]> git.ipfire.org Git - people/ms/u-boot.git/blob - drivers/mmc/mmc.c
mmc: reworked version lookup in mmc_startup_v4
[people/ms/u-boot.git] / drivers / mmc / mmc.c
1 /*
2 * Copyright 2008, Freescale Semiconductor, Inc
3 * Andy Fleming
4 *
5 * Based vaguely on the Linux code
6 *
7 * SPDX-License-Identifier: GPL-2.0+
8 */
9
10 #include <config.h>
11 #include <common.h>
12 #include <command.h>
13 #include <dm.h>
14 #include <dm/device-internal.h>
15 #include <errno.h>
16 #include <mmc.h>
17 #include <part.h>
18 #include <power/regulator.h>
19 #include <malloc.h>
20 #include <memalign.h>
21 #include <linux/list.h>
22 #include <div64.h>
23 #include "mmc_private.h"
24
25 static const unsigned int sd_au_size[] = {
26 0, SZ_16K / 512, SZ_32K / 512,
27 SZ_64K / 512, SZ_128K / 512, SZ_256K / 512,
28 SZ_512K / 512, SZ_1M / 512, SZ_2M / 512,
29 SZ_4M / 512, SZ_8M / 512, (SZ_8M + SZ_4M) / 512,
30 SZ_16M / 512, (SZ_16M + SZ_8M) / 512, SZ_32M / 512, SZ_64M / 512,
31 };
32
33 static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage);
34 static int mmc_power_cycle(struct mmc *mmc);
35 static int mmc_select_mode_and_width(struct mmc *mmc, uint card_caps);
36
37 #if CONFIG_IS_ENABLED(MMC_TINY)
38 static struct mmc mmc_static;
39 struct mmc *find_mmc_device(int dev_num)
40 {
41 return &mmc_static;
42 }
43
44 void mmc_do_preinit(void)
45 {
46 struct mmc *m = &mmc_static;
47 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT
48 mmc_set_preinit(m, 1);
49 #endif
50 if (m->preinit)
51 mmc_start_init(m);
52 }
53
54 struct blk_desc *mmc_get_blk_desc(struct mmc *mmc)
55 {
56 return &mmc->block_dev;
57 }
58 #endif
59
60 #if !CONFIG_IS_ENABLED(DM_MMC)
61
62 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
63 static int mmc_wait_dat0(struct mmc *mmc, int state, int timeout)
64 {
65 return -ENOSYS;
66 }
67 #endif
68
69 __weak int board_mmc_getwp(struct mmc *mmc)
70 {
71 return -1;
72 }
73
74 int mmc_getwp(struct mmc *mmc)
75 {
76 int wp;
77
78 wp = board_mmc_getwp(mmc);
79
80 if (wp < 0) {
81 if (mmc->cfg->ops->getwp)
82 wp = mmc->cfg->ops->getwp(mmc);
83 else
84 wp = 0;
85 }
86
87 return wp;
88 }
89
90 __weak int board_mmc_getcd(struct mmc *mmc)
91 {
92 return -1;
93 }
94 #endif
95
96 #ifdef CONFIG_MMC_TRACE
97 void mmmc_trace_before_send(struct mmc *mmc, struct mmc_cmd *cmd)
98 {
99 printf("CMD_SEND:%d\n", cmd->cmdidx);
100 printf("\t\tARG\t\t\t 0x%08X\n", cmd->cmdarg);
101 }
102
103 void mmmc_trace_after_send(struct mmc *mmc, struct mmc_cmd *cmd, int ret)
104 {
105 int i;
106 u8 *ptr;
107
108 if (ret) {
109 printf("\t\tRET\t\t\t %d\n", ret);
110 } else {
111 switch (cmd->resp_type) {
112 case MMC_RSP_NONE:
113 printf("\t\tMMC_RSP_NONE\n");
114 break;
115 case MMC_RSP_R1:
116 printf("\t\tMMC_RSP_R1,5,6,7 \t 0x%08X \n",
117 cmd->response[0]);
118 break;
119 case MMC_RSP_R1b:
120 printf("\t\tMMC_RSP_R1b\t\t 0x%08X \n",
121 cmd->response[0]);
122 break;
123 case MMC_RSP_R2:
124 printf("\t\tMMC_RSP_R2\t\t 0x%08X \n",
125 cmd->response[0]);
126 printf("\t\t \t\t 0x%08X \n",
127 cmd->response[1]);
128 printf("\t\t \t\t 0x%08X \n",
129 cmd->response[2]);
130 printf("\t\t \t\t 0x%08X \n",
131 cmd->response[3]);
132 printf("\n");
133 printf("\t\t\t\t\tDUMPING DATA\n");
134 for (i = 0; i < 4; i++) {
135 int j;
136 printf("\t\t\t\t\t%03d - ", i*4);
137 ptr = (u8 *)&cmd->response[i];
138 ptr += 3;
139 for (j = 0; j < 4; j++)
140 printf("%02X ", *ptr--);
141 printf("\n");
142 }
143 break;
144 case MMC_RSP_R3:
145 printf("\t\tMMC_RSP_R3,4\t\t 0x%08X \n",
146 cmd->response[0]);
147 break;
148 default:
149 printf("\t\tERROR MMC rsp not supported\n");
150 break;
151 }
152 }
153 }
154
155 void mmc_trace_state(struct mmc *mmc, struct mmc_cmd *cmd)
156 {
157 int status;
158
159 status = (cmd->response[0] & MMC_STATUS_CURR_STATE) >> 9;
160 printf("CURR STATE:%d\n", status);
161 }
162 #endif
163
164 #if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG)
165 const char *mmc_mode_name(enum bus_mode mode)
166 {
167 static const char *const names[] = {
168 [MMC_LEGACY] = "MMC legacy",
169 [SD_LEGACY] = "SD Legacy",
170 [MMC_HS] = "MMC High Speed (26MHz)",
171 [SD_HS] = "SD High Speed (50MHz)",
172 [UHS_SDR12] = "UHS SDR12 (25MHz)",
173 [UHS_SDR25] = "UHS SDR25 (50MHz)",
174 [UHS_SDR50] = "UHS SDR50 (100MHz)",
175 [UHS_SDR104] = "UHS SDR104 (208MHz)",
176 [UHS_DDR50] = "UHS DDR50 (50MHz)",
177 [MMC_HS_52] = "MMC High Speed (52MHz)",
178 [MMC_DDR_52] = "MMC DDR52 (52MHz)",
179 [MMC_HS_200] = "HS200 (200MHz)",
180 };
181
182 if (mode >= MMC_MODES_END)
183 return "Unknown mode";
184 else
185 return names[mode];
186 }
187 #endif
188
189 static uint mmc_mode2freq(struct mmc *mmc, enum bus_mode mode)
190 {
191 static const int freqs[] = {
192 [SD_LEGACY] = 25000000,
193 [MMC_HS] = 26000000,
194 [SD_HS] = 50000000,
195 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
196 [UHS_SDR12] = 25000000,
197 [UHS_SDR25] = 50000000,
198 [UHS_SDR50] = 100000000,
199 [UHS_DDR50] = 50000000,
200 #ifdef MMC_SUPPORTS_TUNING
201 [UHS_SDR104] = 208000000,
202 #endif
203 #endif
204 [MMC_HS_52] = 52000000,
205 [MMC_DDR_52] = 52000000,
206 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
207 [MMC_HS_200] = 200000000,
208 #endif
209 };
210
211 if (mode == MMC_LEGACY)
212 return mmc->legacy_speed;
213 else if (mode >= MMC_MODES_END)
214 return 0;
215 else
216 return freqs[mode];
217 }
218
219 static int mmc_select_mode(struct mmc *mmc, enum bus_mode mode)
220 {
221 mmc->selected_mode = mode;
222 mmc->tran_speed = mmc_mode2freq(mmc, mode);
223 mmc->ddr_mode = mmc_is_mode_ddr(mode);
224 debug("selecting mode %s (freq : %d MHz)\n", mmc_mode_name(mode),
225 mmc->tran_speed / 1000000);
226 return 0;
227 }
228
229 #if !CONFIG_IS_ENABLED(DM_MMC)
230 int mmc_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd, struct mmc_data *data)
231 {
232 int ret;
233
234 mmmc_trace_before_send(mmc, cmd);
235 ret = mmc->cfg->ops->send_cmd(mmc, cmd, data);
236 mmmc_trace_after_send(mmc, cmd, ret);
237
238 return ret;
239 }
240 #endif
241
242 int mmc_send_status(struct mmc *mmc, int timeout)
243 {
244 struct mmc_cmd cmd;
245 int err, retries = 5;
246
247 cmd.cmdidx = MMC_CMD_SEND_STATUS;
248 cmd.resp_type = MMC_RSP_R1;
249 if (!mmc_host_is_spi(mmc))
250 cmd.cmdarg = mmc->rca << 16;
251
252 while (1) {
253 err = mmc_send_cmd(mmc, &cmd, NULL);
254 if (!err) {
255 if ((cmd.response[0] & MMC_STATUS_RDY_FOR_DATA) &&
256 (cmd.response[0] & MMC_STATUS_CURR_STATE) !=
257 MMC_STATE_PRG)
258 break;
259
260 if (cmd.response[0] & MMC_STATUS_MASK) {
261 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
262 pr_err("Status Error: 0x%08X\n",
263 cmd.response[0]);
264 #endif
265 return -ECOMM;
266 }
267 } else if (--retries < 0)
268 return err;
269
270 if (timeout-- <= 0)
271 break;
272
273 udelay(1000);
274 }
275
276 mmc_trace_state(mmc, &cmd);
277 if (timeout <= 0) {
278 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
279 pr_err("Timeout waiting card ready\n");
280 #endif
281 return -ETIMEDOUT;
282 }
283
284 return 0;
285 }
286
287 int mmc_set_blocklen(struct mmc *mmc, int len)
288 {
289 struct mmc_cmd cmd;
290 int err;
291
292 if (mmc->ddr_mode)
293 return 0;
294
295 cmd.cmdidx = MMC_CMD_SET_BLOCKLEN;
296 cmd.resp_type = MMC_RSP_R1;
297 cmd.cmdarg = len;
298
299 err = mmc_send_cmd(mmc, &cmd, NULL);
300
301 #ifdef CONFIG_MMC_QUIRKS
302 if (err && (mmc->quirks & MMC_QUIRK_RETRY_SET_BLOCKLEN)) {
303 int retries = 4;
304 /*
305 * It has been seen that SET_BLOCKLEN may fail on the first
306 * attempt, let's try a few more time
307 */
308 do {
309 err = mmc_send_cmd(mmc, &cmd, NULL);
310 if (!err)
311 break;
312 } while (retries--);
313 }
314 #endif
315
316 return err;
317 }
318
319 #ifdef MMC_SUPPORTS_TUNING
320 static const u8 tuning_blk_pattern_4bit[] = {
321 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
322 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
323 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
324 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
325 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
326 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
327 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
328 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
329 };
330
331 static const u8 tuning_blk_pattern_8bit[] = {
332 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
333 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
334 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
335 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
336 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
337 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
338 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
339 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
340 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
341 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
342 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
343 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
344 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
345 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
346 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
347 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
348 };
349
350 int mmc_send_tuning(struct mmc *mmc, u32 opcode, int *cmd_error)
351 {
352 struct mmc_cmd cmd;
353 struct mmc_data data;
354 const u8 *tuning_block_pattern;
355 int size, err;
356
357 if (mmc->bus_width == 8) {
358 tuning_block_pattern = tuning_blk_pattern_8bit;
359 size = sizeof(tuning_blk_pattern_8bit);
360 } else if (mmc->bus_width == 4) {
361 tuning_block_pattern = tuning_blk_pattern_4bit;
362 size = sizeof(tuning_blk_pattern_4bit);
363 } else {
364 return -EINVAL;
365 }
366
367 ALLOC_CACHE_ALIGN_BUFFER(u8, data_buf, size);
368
369 cmd.cmdidx = opcode;
370 cmd.cmdarg = 0;
371 cmd.resp_type = MMC_RSP_R1;
372
373 data.dest = (void *)data_buf;
374 data.blocks = 1;
375 data.blocksize = size;
376 data.flags = MMC_DATA_READ;
377
378 err = mmc_send_cmd(mmc, &cmd, &data);
379 if (err)
380 return err;
381
382 if (memcmp(data_buf, tuning_block_pattern, size))
383 return -EIO;
384
385 return 0;
386 }
387 #endif
388
389 static int mmc_read_blocks(struct mmc *mmc, void *dst, lbaint_t start,
390 lbaint_t blkcnt)
391 {
392 struct mmc_cmd cmd;
393 struct mmc_data data;
394
395 if (blkcnt > 1)
396 cmd.cmdidx = MMC_CMD_READ_MULTIPLE_BLOCK;
397 else
398 cmd.cmdidx = MMC_CMD_READ_SINGLE_BLOCK;
399
400 if (mmc->high_capacity)
401 cmd.cmdarg = start;
402 else
403 cmd.cmdarg = start * mmc->read_bl_len;
404
405 cmd.resp_type = MMC_RSP_R1;
406
407 data.dest = dst;
408 data.blocks = blkcnt;
409 data.blocksize = mmc->read_bl_len;
410 data.flags = MMC_DATA_READ;
411
412 if (mmc_send_cmd(mmc, &cmd, &data))
413 return 0;
414
415 if (blkcnt > 1) {
416 cmd.cmdidx = MMC_CMD_STOP_TRANSMISSION;
417 cmd.cmdarg = 0;
418 cmd.resp_type = MMC_RSP_R1b;
419 if (mmc_send_cmd(mmc, &cmd, NULL)) {
420 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
421 pr_err("mmc fail to send stop cmd\n");
422 #endif
423 return 0;
424 }
425 }
426
427 return blkcnt;
428 }
429
430 #if CONFIG_IS_ENABLED(BLK)
431 ulong mmc_bread(struct udevice *dev, lbaint_t start, lbaint_t blkcnt, void *dst)
432 #else
433 ulong mmc_bread(struct blk_desc *block_dev, lbaint_t start, lbaint_t blkcnt,
434 void *dst)
435 #endif
436 {
437 #if CONFIG_IS_ENABLED(BLK)
438 struct blk_desc *block_dev = dev_get_uclass_platdata(dev);
439 #endif
440 int dev_num = block_dev->devnum;
441 int err;
442 lbaint_t cur, blocks_todo = blkcnt;
443
444 if (blkcnt == 0)
445 return 0;
446
447 struct mmc *mmc = find_mmc_device(dev_num);
448 if (!mmc)
449 return 0;
450
451 if (CONFIG_IS_ENABLED(MMC_TINY))
452 err = mmc_switch_part(mmc, block_dev->hwpart);
453 else
454 err = blk_dselect_hwpart(block_dev, block_dev->hwpart);
455
456 if (err < 0)
457 return 0;
458
459 if ((start + blkcnt) > block_dev->lba) {
460 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
461 pr_err("MMC: block number 0x" LBAF " exceeds max(0x" LBAF ")\n",
462 start + blkcnt, block_dev->lba);
463 #endif
464 return 0;
465 }
466
467 if (mmc_set_blocklen(mmc, mmc->read_bl_len)) {
468 debug("%s: Failed to set blocklen\n", __func__);
469 return 0;
470 }
471
472 do {
473 cur = (blocks_todo > mmc->cfg->b_max) ?
474 mmc->cfg->b_max : blocks_todo;
475 if (mmc_read_blocks(mmc, dst, start, cur) != cur) {
476 debug("%s: Failed to read blocks\n", __func__);
477 return 0;
478 }
479 blocks_todo -= cur;
480 start += cur;
481 dst += cur * mmc->read_bl_len;
482 } while (blocks_todo > 0);
483
484 return blkcnt;
485 }
486
487 static int mmc_go_idle(struct mmc *mmc)
488 {
489 struct mmc_cmd cmd;
490 int err;
491
492 udelay(1000);
493
494 cmd.cmdidx = MMC_CMD_GO_IDLE_STATE;
495 cmd.cmdarg = 0;
496 cmd.resp_type = MMC_RSP_NONE;
497
498 err = mmc_send_cmd(mmc, &cmd, NULL);
499
500 if (err)
501 return err;
502
503 udelay(2000);
504
505 return 0;
506 }
507
508 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
509 static int mmc_switch_voltage(struct mmc *mmc, int signal_voltage)
510 {
511 struct mmc_cmd cmd;
512 int err = 0;
513
514 /*
515 * Send CMD11 only if the request is to switch the card to
516 * 1.8V signalling.
517 */
518 if (signal_voltage == MMC_SIGNAL_VOLTAGE_330)
519 return mmc_set_signal_voltage(mmc, signal_voltage);
520
521 cmd.cmdidx = SD_CMD_SWITCH_UHS18V;
522 cmd.cmdarg = 0;
523 cmd.resp_type = MMC_RSP_R1;
524
525 err = mmc_send_cmd(mmc, &cmd, NULL);
526 if (err)
527 return err;
528
529 if (!mmc_host_is_spi(mmc) && (cmd.response[0] & MMC_STATUS_ERROR))
530 return -EIO;
531
532 /*
533 * The card should drive cmd and dat[0:3] low immediately
534 * after the response of cmd11, but wait 100 us to be sure
535 */
536 err = mmc_wait_dat0(mmc, 0, 100);
537 if (err == -ENOSYS)
538 udelay(100);
539 else if (err)
540 return -ETIMEDOUT;
541
542 /*
543 * During a signal voltage level switch, the clock must be gated
544 * for 5 ms according to the SD spec
545 */
546 mmc_set_clock(mmc, mmc->clock, true);
547
548 err = mmc_set_signal_voltage(mmc, signal_voltage);
549 if (err)
550 return err;
551
552 /* Keep clock gated for at least 10 ms, though spec only says 5 ms */
553 mdelay(10);
554 mmc_set_clock(mmc, mmc->clock, false);
555
556 /*
557 * Failure to switch is indicated by the card holding
558 * dat[0:3] low. Wait for at least 1 ms according to spec
559 */
560 err = mmc_wait_dat0(mmc, 1, 1000);
561 if (err == -ENOSYS)
562 udelay(1000);
563 else if (err)
564 return -ETIMEDOUT;
565
566 return 0;
567 }
568 #endif
569
570 static int sd_send_op_cond(struct mmc *mmc, bool uhs_en)
571 {
572 int timeout = 1000;
573 int err;
574 struct mmc_cmd cmd;
575
576 while (1) {
577 cmd.cmdidx = MMC_CMD_APP_CMD;
578 cmd.resp_type = MMC_RSP_R1;
579 cmd.cmdarg = 0;
580
581 err = mmc_send_cmd(mmc, &cmd, NULL);
582
583 if (err)
584 return err;
585
586 cmd.cmdidx = SD_CMD_APP_SEND_OP_COND;
587 cmd.resp_type = MMC_RSP_R3;
588
589 /*
590 * Most cards do not answer if some reserved bits
591 * in the ocr are set. However, Some controller
592 * can set bit 7 (reserved for low voltages), but
593 * how to manage low voltages SD card is not yet
594 * specified.
595 */
596 cmd.cmdarg = mmc_host_is_spi(mmc) ? 0 :
597 (mmc->cfg->voltages & 0xff8000);
598
599 if (mmc->version == SD_VERSION_2)
600 cmd.cmdarg |= OCR_HCS;
601
602 if (uhs_en)
603 cmd.cmdarg |= OCR_S18R;
604
605 err = mmc_send_cmd(mmc, &cmd, NULL);
606
607 if (err)
608 return err;
609
610 if (cmd.response[0] & OCR_BUSY)
611 break;
612
613 if (timeout-- <= 0)
614 return -EOPNOTSUPP;
615
616 udelay(1000);
617 }
618
619 if (mmc->version != SD_VERSION_2)
620 mmc->version = SD_VERSION_1_0;
621
622 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
623 cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
624 cmd.resp_type = MMC_RSP_R3;
625 cmd.cmdarg = 0;
626
627 err = mmc_send_cmd(mmc, &cmd, NULL);
628
629 if (err)
630 return err;
631 }
632
633 mmc->ocr = cmd.response[0];
634
635 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
636 if (uhs_en && !(mmc_host_is_spi(mmc)) && (cmd.response[0] & 0x41000000)
637 == 0x41000000) {
638 err = mmc_switch_voltage(mmc, MMC_SIGNAL_VOLTAGE_180);
639 if (err)
640 return err;
641 }
642 #endif
643
644 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
645 mmc->rca = 0;
646
647 return 0;
648 }
649
650 static int mmc_send_op_cond_iter(struct mmc *mmc, int use_arg)
651 {
652 struct mmc_cmd cmd;
653 int err;
654
655 cmd.cmdidx = MMC_CMD_SEND_OP_COND;
656 cmd.resp_type = MMC_RSP_R3;
657 cmd.cmdarg = 0;
658 if (use_arg && !mmc_host_is_spi(mmc))
659 cmd.cmdarg = OCR_HCS |
660 (mmc->cfg->voltages &
661 (mmc->ocr & OCR_VOLTAGE_MASK)) |
662 (mmc->ocr & OCR_ACCESS_MODE);
663
664 err = mmc_send_cmd(mmc, &cmd, NULL);
665 if (err)
666 return err;
667 mmc->ocr = cmd.response[0];
668 return 0;
669 }
670
671 static int mmc_send_op_cond(struct mmc *mmc)
672 {
673 int err, i;
674
675 /* Some cards seem to need this */
676 mmc_go_idle(mmc);
677
678 /* Asking to the card its capabilities */
679 for (i = 0; i < 2; i++) {
680 err = mmc_send_op_cond_iter(mmc, i != 0);
681 if (err)
682 return err;
683
684 /* exit if not busy (flag seems to be inverted) */
685 if (mmc->ocr & OCR_BUSY)
686 break;
687 }
688 mmc->op_cond_pending = 1;
689 return 0;
690 }
691
692 static int mmc_complete_op_cond(struct mmc *mmc)
693 {
694 struct mmc_cmd cmd;
695 int timeout = 1000;
696 uint start;
697 int err;
698
699 mmc->op_cond_pending = 0;
700 if (!(mmc->ocr & OCR_BUSY)) {
701 /* Some cards seem to need this */
702 mmc_go_idle(mmc);
703
704 start = get_timer(0);
705 while (1) {
706 err = mmc_send_op_cond_iter(mmc, 1);
707 if (err)
708 return err;
709 if (mmc->ocr & OCR_BUSY)
710 break;
711 if (get_timer(start) > timeout)
712 return -EOPNOTSUPP;
713 udelay(100);
714 }
715 }
716
717 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
718 cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
719 cmd.resp_type = MMC_RSP_R3;
720 cmd.cmdarg = 0;
721
722 err = mmc_send_cmd(mmc, &cmd, NULL);
723
724 if (err)
725 return err;
726
727 mmc->ocr = cmd.response[0];
728 }
729
730 mmc->version = MMC_VERSION_UNKNOWN;
731
732 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
733 mmc->rca = 1;
734
735 return 0;
736 }
737
738
739 static int mmc_send_ext_csd(struct mmc *mmc, u8 *ext_csd)
740 {
741 struct mmc_cmd cmd;
742 struct mmc_data data;
743 int err;
744
745 /* Get the Card Status Register */
746 cmd.cmdidx = MMC_CMD_SEND_EXT_CSD;
747 cmd.resp_type = MMC_RSP_R1;
748 cmd.cmdarg = 0;
749
750 data.dest = (char *)ext_csd;
751 data.blocks = 1;
752 data.blocksize = MMC_MAX_BLOCK_LEN;
753 data.flags = MMC_DATA_READ;
754
755 err = mmc_send_cmd(mmc, &cmd, &data);
756
757 return err;
758 }
759
760 int mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value)
761 {
762 struct mmc_cmd cmd;
763 int timeout = 1000;
764 int retries = 3;
765 int ret;
766
767 cmd.cmdidx = MMC_CMD_SWITCH;
768 cmd.resp_type = MMC_RSP_R1b;
769 cmd.cmdarg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
770 (index << 16) |
771 (value << 8);
772
773 while (retries > 0) {
774 ret = mmc_send_cmd(mmc, &cmd, NULL);
775
776 /* Waiting for the ready status */
777 if (!ret) {
778 ret = mmc_send_status(mmc, timeout);
779 return ret;
780 }
781
782 retries--;
783 }
784
785 return ret;
786
787 }
788
789 static int mmc_set_card_speed(struct mmc *mmc, enum bus_mode mode)
790 {
791 int err;
792 int speed_bits;
793
794 ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
795
796 switch (mode) {
797 case MMC_HS:
798 case MMC_HS_52:
799 case MMC_DDR_52:
800 speed_bits = EXT_CSD_TIMING_HS;
801 break;
802 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
803 case MMC_HS_200:
804 speed_bits = EXT_CSD_TIMING_HS200;
805 break;
806 #endif
807 case MMC_LEGACY:
808 speed_bits = EXT_CSD_TIMING_LEGACY;
809 break;
810 default:
811 return -EINVAL;
812 }
813 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING,
814 speed_bits);
815 if (err)
816 return err;
817
818 if ((mode == MMC_HS) || (mode == MMC_HS_52)) {
819 /* Now check to see that it worked */
820 err = mmc_send_ext_csd(mmc, test_csd);
821 if (err)
822 return err;
823
824 /* No high-speed support */
825 if (!test_csd[EXT_CSD_HS_TIMING])
826 return -ENOTSUPP;
827 }
828
829 return 0;
830 }
831
832 static int mmc_get_capabilities(struct mmc *mmc)
833 {
834 u8 *ext_csd = mmc->ext_csd;
835 char cardtype;
836
837 mmc->card_caps = MMC_MODE_1BIT | MMC_CAP(MMC_LEGACY);
838
839 if (mmc_host_is_spi(mmc))
840 return 0;
841
842 /* Only version 4 supports high-speed */
843 if (mmc->version < MMC_VERSION_4)
844 return 0;
845
846 if (!ext_csd) {
847 pr_err("No ext_csd found!\n"); /* this should enver happen */
848 return -ENOTSUPP;
849 }
850
851 mmc->card_caps |= MMC_MODE_4BIT | MMC_MODE_8BIT;
852
853 cardtype = ext_csd[EXT_CSD_CARD_TYPE] & 0x3f;
854 mmc->cardtype = cardtype;
855
856 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
857 if (cardtype & (EXT_CSD_CARD_TYPE_HS200_1_2V |
858 EXT_CSD_CARD_TYPE_HS200_1_8V)) {
859 mmc->card_caps |= MMC_MODE_HS200;
860 }
861 #endif
862 if (cardtype & EXT_CSD_CARD_TYPE_52) {
863 if (cardtype & EXT_CSD_CARD_TYPE_DDR_52)
864 mmc->card_caps |= MMC_MODE_DDR_52MHz;
865 mmc->card_caps |= MMC_MODE_HS_52MHz;
866 }
867 if (cardtype & EXT_CSD_CARD_TYPE_26)
868 mmc->card_caps |= MMC_MODE_HS;
869
870 return 0;
871 }
872
873 static int mmc_set_capacity(struct mmc *mmc, int part_num)
874 {
875 switch (part_num) {
876 case 0:
877 mmc->capacity = mmc->capacity_user;
878 break;
879 case 1:
880 case 2:
881 mmc->capacity = mmc->capacity_boot;
882 break;
883 case 3:
884 mmc->capacity = mmc->capacity_rpmb;
885 break;
886 case 4:
887 case 5:
888 case 6:
889 case 7:
890 mmc->capacity = mmc->capacity_gp[part_num - 4];
891 break;
892 default:
893 return -1;
894 }
895
896 mmc_get_blk_desc(mmc)->lba = lldiv(mmc->capacity, mmc->read_bl_len);
897
898 return 0;
899 }
900
901 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
902 static int mmc_boot_part_access_chk(struct mmc *mmc, unsigned int part_num)
903 {
904 int forbidden = 0;
905 bool change = false;
906
907 if (part_num & PART_ACCESS_MASK)
908 forbidden = MMC_CAP(MMC_HS_200);
909
910 if (MMC_CAP(mmc->selected_mode) & forbidden) {
911 debug("selected mode (%s) is forbidden for part %d\n",
912 mmc_mode_name(mmc->selected_mode), part_num);
913 change = true;
914 } else if (mmc->selected_mode != mmc->best_mode) {
915 debug("selected mode is not optimal\n");
916 change = true;
917 }
918
919 if (change)
920 return mmc_select_mode_and_width(mmc,
921 mmc->card_caps & ~forbidden);
922
923 return 0;
924 }
925 #else
926 static inline int mmc_boot_part_access_chk(struct mmc *mmc,
927 unsigned int part_num)
928 {
929 return 0;
930 }
931 #endif
932
933 int mmc_switch_part(struct mmc *mmc, unsigned int part_num)
934 {
935 int ret;
936
937 ret = mmc_boot_part_access_chk(mmc, part_num);
938 if (ret)
939 return ret;
940
941 ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_PART_CONF,
942 (mmc->part_config & ~PART_ACCESS_MASK)
943 | (part_num & PART_ACCESS_MASK));
944
945 /*
946 * Set the capacity if the switch succeeded or was intended
947 * to return to representing the raw device.
948 */
949 if ((ret == 0) || ((ret == -ENODEV) && (part_num == 0))) {
950 ret = mmc_set_capacity(mmc, part_num);
951 mmc_get_blk_desc(mmc)->hwpart = part_num;
952 }
953
954 return ret;
955 }
956
957 #if CONFIG_IS_ENABLED(MMC_HW_PARTITIONING)
958 int mmc_hwpart_config(struct mmc *mmc,
959 const struct mmc_hwpart_conf *conf,
960 enum mmc_hwpart_conf_mode mode)
961 {
962 u8 part_attrs = 0;
963 u32 enh_size_mult;
964 u32 enh_start_addr;
965 u32 gp_size_mult[4];
966 u32 max_enh_size_mult;
967 u32 tot_enh_size_mult = 0;
968 u8 wr_rel_set;
969 int i, pidx, err;
970 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
971
972 if (mode < MMC_HWPART_CONF_CHECK || mode > MMC_HWPART_CONF_COMPLETE)
973 return -EINVAL;
974
975 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4_41)) {
976 pr_err("eMMC >= 4.4 required for enhanced user data area\n");
977 return -EMEDIUMTYPE;
978 }
979
980 if (!(mmc->part_support & PART_SUPPORT)) {
981 pr_err("Card does not support partitioning\n");
982 return -EMEDIUMTYPE;
983 }
984
985 if (!mmc->hc_wp_grp_size) {
986 pr_err("Card does not define HC WP group size\n");
987 return -EMEDIUMTYPE;
988 }
989
990 /* check partition alignment and total enhanced size */
991 if (conf->user.enh_size) {
992 if (conf->user.enh_size % mmc->hc_wp_grp_size ||
993 conf->user.enh_start % mmc->hc_wp_grp_size) {
994 pr_err("User data enhanced area not HC WP group "
995 "size aligned\n");
996 return -EINVAL;
997 }
998 part_attrs |= EXT_CSD_ENH_USR;
999 enh_size_mult = conf->user.enh_size / mmc->hc_wp_grp_size;
1000 if (mmc->high_capacity) {
1001 enh_start_addr = conf->user.enh_start;
1002 } else {
1003 enh_start_addr = (conf->user.enh_start << 9);
1004 }
1005 } else {
1006 enh_size_mult = 0;
1007 enh_start_addr = 0;
1008 }
1009 tot_enh_size_mult += enh_size_mult;
1010
1011 for (pidx = 0; pidx < 4; pidx++) {
1012 if (conf->gp_part[pidx].size % mmc->hc_wp_grp_size) {
1013 pr_err("GP%i partition not HC WP group size "
1014 "aligned\n", pidx+1);
1015 return -EINVAL;
1016 }
1017 gp_size_mult[pidx] = conf->gp_part[pidx].size / mmc->hc_wp_grp_size;
1018 if (conf->gp_part[pidx].size && conf->gp_part[pidx].enhanced) {
1019 part_attrs |= EXT_CSD_ENH_GP(pidx);
1020 tot_enh_size_mult += gp_size_mult[pidx];
1021 }
1022 }
1023
1024 if (part_attrs && ! (mmc->part_support & ENHNCD_SUPPORT)) {
1025 pr_err("Card does not support enhanced attribute\n");
1026 return -EMEDIUMTYPE;
1027 }
1028
1029 err = mmc_send_ext_csd(mmc, ext_csd);
1030 if (err)
1031 return err;
1032
1033 max_enh_size_mult =
1034 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+2] << 16) +
1035 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+1] << 8) +
1036 ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT];
1037 if (tot_enh_size_mult > max_enh_size_mult) {
1038 pr_err("Total enhanced size exceeds maximum (%u > %u)\n",
1039 tot_enh_size_mult, max_enh_size_mult);
1040 return -EMEDIUMTYPE;
1041 }
1042
1043 /* The default value of EXT_CSD_WR_REL_SET is device
1044 * dependent, the values can only be changed if the
1045 * EXT_CSD_HS_CTRL_REL bit is set. The values can be
1046 * changed only once and before partitioning is completed. */
1047 wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
1048 if (conf->user.wr_rel_change) {
1049 if (conf->user.wr_rel_set)
1050 wr_rel_set |= EXT_CSD_WR_DATA_REL_USR;
1051 else
1052 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_USR;
1053 }
1054 for (pidx = 0; pidx < 4; pidx++) {
1055 if (conf->gp_part[pidx].wr_rel_change) {
1056 if (conf->gp_part[pidx].wr_rel_set)
1057 wr_rel_set |= EXT_CSD_WR_DATA_REL_GP(pidx);
1058 else
1059 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_GP(pidx);
1060 }
1061 }
1062
1063 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET] &&
1064 !(ext_csd[EXT_CSD_WR_REL_PARAM] & EXT_CSD_HS_CTRL_REL)) {
1065 puts("Card does not support host controlled partition write "
1066 "reliability settings\n");
1067 return -EMEDIUMTYPE;
1068 }
1069
1070 if (ext_csd[EXT_CSD_PARTITION_SETTING] &
1071 EXT_CSD_PARTITION_SETTING_COMPLETED) {
1072 pr_err("Card already partitioned\n");
1073 return -EPERM;
1074 }
1075
1076 if (mode == MMC_HWPART_CONF_CHECK)
1077 return 0;
1078
1079 /* Partitioning requires high-capacity size definitions */
1080 if (!(ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01)) {
1081 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1082 EXT_CSD_ERASE_GROUP_DEF, 1);
1083
1084 if (err)
1085 return err;
1086
1087 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
1088
1089 /* update erase group size to be high-capacity */
1090 mmc->erase_grp_size =
1091 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
1092
1093 }
1094
1095 /* all OK, write the configuration */
1096 for (i = 0; i < 4; i++) {
1097 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1098 EXT_CSD_ENH_START_ADDR+i,
1099 (enh_start_addr >> (i*8)) & 0xFF);
1100 if (err)
1101 return err;
1102 }
1103 for (i = 0; i < 3; i++) {
1104 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1105 EXT_CSD_ENH_SIZE_MULT+i,
1106 (enh_size_mult >> (i*8)) & 0xFF);
1107 if (err)
1108 return err;
1109 }
1110 for (pidx = 0; pidx < 4; pidx++) {
1111 for (i = 0; i < 3; i++) {
1112 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1113 EXT_CSD_GP_SIZE_MULT+pidx*3+i,
1114 (gp_size_mult[pidx] >> (i*8)) & 0xFF);
1115 if (err)
1116 return err;
1117 }
1118 }
1119 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1120 EXT_CSD_PARTITIONS_ATTRIBUTE, part_attrs);
1121 if (err)
1122 return err;
1123
1124 if (mode == MMC_HWPART_CONF_SET)
1125 return 0;
1126
1127 /* The WR_REL_SET is a write-once register but shall be
1128 * written before setting PART_SETTING_COMPLETED. As it is
1129 * write-once we can only write it when completing the
1130 * partitioning. */
1131 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET]) {
1132 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1133 EXT_CSD_WR_REL_SET, wr_rel_set);
1134 if (err)
1135 return err;
1136 }
1137
1138 /* Setting PART_SETTING_COMPLETED confirms the partition
1139 * configuration but it only becomes effective after power
1140 * cycle, so we do not adjust the partition related settings
1141 * in the mmc struct. */
1142
1143 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1144 EXT_CSD_PARTITION_SETTING,
1145 EXT_CSD_PARTITION_SETTING_COMPLETED);
1146 if (err)
1147 return err;
1148
1149 return 0;
1150 }
1151 #endif
1152
1153 #if !CONFIG_IS_ENABLED(DM_MMC)
1154 int mmc_getcd(struct mmc *mmc)
1155 {
1156 int cd;
1157
1158 cd = board_mmc_getcd(mmc);
1159
1160 if (cd < 0) {
1161 if (mmc->cfg->ops->getcd)
1162 cd = mmc->cfg->ops->getcd(mmc);
1163 else
1164 cd = 1;
1165 }
1166
1167 return cd;
1168 }
1169 #endif
1170
1171 static int sd_switch(struct mmc *mmc, int mode, int group, u8 value, u8 *resp)
1172 {
1173 struct mmc_cmd cmd;
1174 struct mmc_data data;
1175
1176 /* Switch the frequency */
1177 cmd.cmdidx = SD_CMD_SWITCH_FUNC;
1178 cmd.resp_type = MMC_RSP_R1;
1179 cmd.cmdarg = (mode << 31) | 0xffffff;
1180 cmd.cmdarg &= ~(0xf << (group * 4));
1181 cmd.cmdarg |= value << (group * 4);
1182
1183 data.dest = (char *)resp;
1184 data.blocksize = 64;
1185 data.blocks = 1;
1186 data.flags = MMC_DATA_READ;
1187
1188 return mmc_send_cmd(mmc, &cmd, &data);
1189 }
1190
1191
1192 static int sd_get_capabilities(struct mmc *mmc)
1193 {
1194 int err;
1195 struct mmc_cmd cmd;
1196 ALLOC_CACHE_ALIGN_BUFFER(__be32, scr, 2);
1197 ALLOC_CACHE_ALIGN_BUFFER(__be32, switch_status, 16);
1198 struct mmc_data data;
1199 int timeout;
1200 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1201 u32 sd3_bus_mode;
1202 #endif
1203
1204 mmc->card_caps = MMC_MODE_1BIT | MMC_CAP(SD_LEGACY);
1205
1206 if (mmc_host_is_spi(mmc))
1207 return 0;
1208
1209 /* Read the SCR to find out if this card supports higher speeds */
1210 cmd.cmdidx = MMC_CMD_APP_CMD;
1211 cmd.resp_type = MMC_RSP_R1;
1212 cmd.cmdarg = mmc->rca << 16;
1213
1214 err = mmc_send_cmd(mmc, &cmd, NULL);
1215
1216 if (err)
1217 return err;
1218
1219 cmd.cmdidx = SD_CMD_APP_SEND_SCR;
1220 cmd.resp_type = MMC_RSP_R1;
1221 cmd.cmdarg = 0;
1222
1223 timeout = 3;
1224
1225 retry_scr:
1226 data.dest = (char *)scr;
1227 data.blocksize = 8;
1228 data.blocks = 1;
1229 data.flags = MMC_DATA_READ;
1230
1231 err = mmc_send_cmd(mmc, &cmd, &data);
1232
1233 if (err) {
1234 if (timeout--)
1235 goto retry_scr;
1236
1237 return err;
1238 }
1239
1240 mmc->scr[0] = __be32_to_cpu(scr[0]);
1241 mmc->scr[1] = __be32_to_cpu(scr[1]);
1242
1243 switch ((mmc->scr[0] >> 24) & 0xf) {
1244 case 0:
1245 mmc->version = SD_VERSION_1_0;
1246 break;
1247 case 1:
1248 mmc->version = SD_VERSION_1_10;
1249 break;
1250 case 2:
1251 mmc->version = SD_VERSION_2;
1252 if ((mmc->scr[0] >> 15) & 0x1)
1253 mmc->version = SD_VERSION_3;
1254 break;
1255 default:
1256 mmc->version = SD_VERSION_1_0;
1257 break;
1258 }
1259
1260 if (mmc->scr[0] & SD_DATA_4BIT)
1261 mmc->card_caps |= MMC_MODE_4BIT;
1262
1263 /* Version 1.0 doesn't support switching */
1264 if (mmc->version == SD_VERSION_1_0)
1265 return 0;
1266
1267 timeout = 4;
1268 while (timeout--) {
1269 err = sd_switch(mmc, SD_SWITCH_CHECK, 0, 1,
1270 (u8 *)switch_status);
1271
1272 if (err)
1273 return err;
1274
1275 /* The high-speed function is busy. Try again */
1276 if (!(__be32_to_cpu(switch_status[7]) & SD_HIGHSPEED_BUSY))
1277 break;
1278 }
1279
1280 /* If high-speed isn't supported, we return */
1281 if (__be32_to_cpu(switch_status[3]) & SD_HIGHSPEED_SUPPORTED)
1282 mmc->card_caps |= MMC_CAP(SD_HS);
1283
1284 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1285 /* Version before 3.0 don't support UHS modes */
1286 if (mmc->version < SD_VERSION_3)
1287 return 0;
1288
1289 sd3_bus_mode = __be32_to_cpu(switch_status[3]) >> 16 & 0x1f;
1290 if (sd3_bus_mode & SD_MODE_UHS_SDR104)
1291 mmc->card_caps |= MMC_CAP(UHS_SDR104);
1292 if (sd3_bus_mode & SD_MODE_UHS_SDR50)
1293 mmc->card_caps |= MMC_CAP(UHS_SDR50);
1294 if (sd3_bus_mode & SD_MODE_UHS_SDR25)
1295 mmc->card_caps |= MMC_CAP(UHS_SDR25);
1296 if (sd3_bus_mode & SD_MODE_UHS_SDR12)
1297 mmc->card_caps |= MMC_CAP(UHS_SDR12);
1298 if (sd3_bus_mode & SD_MODE_UHS_DDR50)
1299 mmc->card_caps |= MMC_CAP(UHS_DDR50);
1300 #endif
1301
1302 return 0;
1303 }
1304
1305 static int sd_set_card_speed(struct mmc *mmc, enum bus_mode mode)
1306 {
1307 int err;
1308
1309 ALLOC_CACHE_ALIGN_BUFFER(uint, switch_status, 16);
1310 int speed;
1311
1312 switch (mode) {
1313 case SD_LEGACY:
1314 speed = UHS_SDR12_BUS_SPEED;
1315 break;
1316 case SD_HS:
1317 speed = HIGH_SPEED_BUS_SPEED;
1318 break;
1319 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1320 case UHS_SDR12:
1321 speed = UHS_SDR12_BUS_SPEED;
1322 break;
1323 case UHS_SDR25:
1324 speed = UHS_SDR25_BUS_SPEED;
1325 break;
1326 case UHS_SDR50:
1327 speed = UHS_SDR50_BUS_SPEED;
1328 break;
1329 case UHS_DDR50:
1330 speed = UHS_DDR50_BUS_SPEED;
1331 break;
1332 case UHS_SDR104:
1333 speed = UHS_SDR104_BUS_SPEED;
1334 break;
1335 #endif
1336 default:
1337 return -EINVAL;
1338 }
1339
1340 err = sd_switch(mmc, SD_SWITCH_SWITCH, 0, speed, (u8 *)switch_status);
1341 if (err)
1342 return err;
1343
1344 if ((__be32_to_cpu(switch_status[4]) >> 24) != speed)
1345 return -ENOTSUPP;
1346
1347 return 0;
1348 }
1349
1350 int sd_select_bus_width(struct mmc *mmc, int w)
1351 {
1352 int err;
1353 struct mmc_cmd cmd;
1354
1355 if ((w != 4) && (w != 1))
1356 return -EINVAL;
1357
1358 cmd.cmdidx = MMC_CMD_APP_CMD;
1359 cmd.resp_type = MMC_RSP_R1;
1360 cmd.cmdarg = mmc->rca << 16;
1361
1362 err = mmc_send_cmd(mmc, &cmd, NULL);
1363 if (err)
1364 return err;
1365
1366 cmd.cmdidx = SD_CMD_APP_SET_BUS_WIDTH;
1367 cmd.resp_type = MMC_RSP_R1;
1368 if (w == 4)
1369 cmd.cmdarg = 2;
1370 else if (w == 1)
1371 cmd.cmdarg = 0;
1372 err = mmc_send_cmd(mmc, &cmd, NULL);
1373 if (err)
1374 return err;
1375
1376 return 0;
1377 }
1378
1379 static int sd_read_ssr(struct mmc *mmc)
1380 {
1381 int err, i;
1382 struct mmc_cmd cmd;
1383 ALLOC_CACHE_ALIGN_BUFFER(uint, ssr, 16);
1384 struct mmc_data data;
1385 int timeout = 3;
1386 unsigned int au, eo, et, es;
1387
1388 cmd.cmdidx = MMC_CMD_APP_CMD;
1389 cmd.resp_type = MMC_RSP_R1;
1390 cmd.cmdarg = mmc->rca << 16;
1391
1392 err = mmc_send_cmd(mmc, &cmd, NULL);
1393 if (err)
1394 return err;
1395
1396 cmd.cmdidx = SD_CMD_APP_SD_STATUS;
1397 cmd.resp_type = MMC_RSP_R1;
1398 cmd.cmdarg = 0;
1399
1400 retry_ssr:
1401 data.dest = (char *)ssr;
1402 data.blocksize = 64;
1403 data.blocks = 1;
1404 data.flags = MMC_DATA_READ;
1405
1406 err = mmc_send_cmd(mmc, &cmd, &data);
1407 if (err) {
1408 if (timeout--)
1409 goto retry_ssr;
1410
1411 return err;
1412 }
1413
1414 for (i = 0; i < 16; i++)
1415 ssr[i] = be32_to_cpu(ssr[i]);
1416
1417 au = (ssr[2] >> 12) & 0xF;
1418 if ((au <= 9) || (mmc->version == SD_VERSION_3)) {
1419 mmc->ssr.au = sd_au_size[au];
1420 es = (ssr[3] >> 24) & 0xFF;
1421 es |= (ssr[2] & 0xFF) << 8;
1422 et = (ssr[3] >> 18) & 0x3F;
1423 if (es && et) {
1424 eo = (ssr[3] >> 16) & 0x3;
1425 mmc->ssr.erase_timeout = (et * 1000) / es;
1426 mmc->ssr.erase_offset = eo * 1000;
1427 }
1428 } else {
1429 debug("Invalid Allocation Unit Size.\n");
1430 }
1431
1432 return 0;
1433 }
1434
1435 /* frequency bases */
1436 /* divided by 10 to be nice to platforms without floating point */
1437 static const int fbase[] = {
1438 10000,
1439 100000,
1440 1000000,
1441 10000000,
1442 };
1443
1444 /* Multiplier values for TRAN_SPEED. Multiplied by 10 to be nice
1445 * to platforms without floating point.
1446 */
1447 static const u8 multipliers[] = {
1448 0, /* reserved */
1449 10,
1450 12,
1451 13,
1452 15,
1453 20,
1454 25,
1455 30,
1456 35,
1457 40,
1458 45,
1459 50,
1460 55,
1461 60,
1462 70,
1463 80,
1464 };
1465
1466 static inline int bus_width(uint cap)
1467 {
1468 if (cap == MMC_MODE_8BIT)
1469 return 8;
1470 if (cap == MMC_MODE_4BIT)
1471 return 4;
1472 if (cap == MMC_MODE_1BIT)
1473 return 1;
1474 pr_warn("invalid bus witdh capability 0x%x\n", cap);
1475 return 0;
1476 }
1477
1478 #if !CONFIG_IS_ENABLED(DM_MMC)
1479 #ifdef MMC_SUPPORTS_TUNING
1480 static int mmc_execute_tuning(struct mmc *mmc, uint opcode)
1481 {
1482 return -ENOTSUPP;
1483 }
1484 #endif
1485
1486 static void mmc_send_init_stream(struct mmc *mmc)
1487 {
1488 }
1489
1490 static int mmc_set_ios(struct mmc *mmc)
1491 {
1492 int ret = 0;
1493
1494 if (mmc->cfg->ops->set_ios)
1495 ret = mmc->cfg->ops->set_ios(mmc);
1496
1497 return ret;
1498 }
1499 #endif
1500
1501 int mmc_set_clock(struct mmc *mmc, uint clock, bool disable)
1502 {
1503 if (clock > mmc->cfg->f_max)
1504 clock = mmc->cfg->f_max;
1505
1506 if (clock < mmc->cfg->f_min)
1507 clock = mmc->cfg->f_min;
1508
1509 mmc->clock = clock;
1510 mmc->clk_disable = disable;
1511
1512 return mmc_set_ios(mmc);
1513 }
1514
1515 static int mmc_set_bus_width(struct mmc *mmc, uint width)
1516 {
1517 mmc->bus_width = width;
1518
1519 return mmc_set_ios(mmc);
1520 }
1521
1522 #if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG)
1523 /*
1524 * helper function to display the capabilities in a human
1525 * friendly manner. The capabilities include bus width and
1526 * supported modes.
1527 */
1528 void mmc_dump_capabilities(const char *text, uint caps)
1529 {
1530 enum bus_mode mode;
1531
1532 printf("%s: widths [", text);
1533 if (caps & MMC_MODE_8BIT)
1534 printf("8, ");
1535 if (caps & MMC_MODE_4BIT)
1536 printf("4, ");
1537 if (caps & MMC_MODE_1BIT)
1538 printf("1, ");
1539 printf("\b\b] modes [");
1540 for (mode = MMC_LEGACY; mode < MMC_MODES_END; mode++)
1541 if (MMC_CAP(mode) & caps)
1542 printf("%s, ", mmc_mode_name(mode));
1543 printf("\b\b]\n");
1544 }
1545 #endif
1546
1547 struct mode_width_tuning {
1548 enum bus_mode mode;
1549 uint widths;
1550 #ifdef MMC_SUPPORTS_TUNING
1551 uint tuning;
1552 #endif
1553 };
1554
1555 #if CONFIG_IS_ENABLED(MMC_IO_VOLTAGE)
1556 int mmc_voltage_to_mv(enum mmc_voltage voltage)
1557 {
1558 switch (voltage) {
1559 case MMC_SIGNAL_VOLTAGE_000: return 0;
1560 case MMC_SIGNAL_VOLTAGE_330: return 3300;
1561 case MMC_SIGNAL_VOLTAGE_180: return 1800;
1562 case MMC_SIGNAL_VOLTAGE_120: return 1200;
1563 }
1564 return -EINVAL;
1565 }
1566
1567 static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage)
1568 {
1569 int err;
1570
1571 if (mmc->signal_voltage == signal_voltage)
1572 return 0;
1573
1574 mmc->signal_voltage = signal_voltage;
1575 err = mmc_set_ios(mmc);
1576 if (err)
1577 debug("unable to set voltage (err %d)\n", err);
1578
1579 return err;
1580 }
1581 #else
1582 static inline int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage)
1583 {
1584 return 0;
1585 }
1586 #endif
1587
1588 static const struct mode_width_tuning sd_modes_by_pref[] = {
1589 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1590 #ifdef MMC_SUPPORTS_TUNING
1591 {
1592 .mode = UHS_SDR104,
1593 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1594 .tuning = MMC_CMD_SEND_TUNING_BLOCK
1595 },
1596 #endif
1597 {
1598 .mode = UHS_SDR50,
1599 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1600 },
1601 {
1602 .mode = UHS_DDR50,
1603 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1604 },
1605 {
1606 .mode = UHS_SDR25,
1607 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1608 },
1609 #endif
1610 {
1611 .mode = SD_HS,
1612 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1613 },
1614 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1615 {
1616 .mode = UHS_SDR12,
1617 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1618 },
1619 #endif
1620 {
1621 .mode = SD_LEGACY,
1622 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1623 }
1624 };
1625
1626 #define for_each_sd_mode_by_pref(caps, mwt) \
1627 for (mwt = sd_modes_by_pref;\
1628 mwt < sd_modes_by_pref + ARRAY_SIZE(sd_modes_by_pref);\
1629 mwt++) \
1630 if (caps & MMC_CAP(mwt->mode))
1631
1632 static int sd_select_mode_and_width(struct mmc *mmc, uint card_caps)
1633 {
1634 int err;
1635 uint widths[] = {MMC_MODE_4BIT, MMC_MODE_1BIT};
1636 const struct mode_width_tuning *mwt;
1637 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1638 bool uhs_en = (mmc->ocr & OCR_S18R) ? true : false;
1639 #else
1640 bool uhs_en = false;
1641 #endif
1642 uint caps;
1643
1644 #ifdef DEBUG
1645 mmc_dump_capabilities("sd card", card_caps);
1646 mmc_dump_capabilities("host", mmc->host_caps);
1647 #endif
1648
1649 /* Restrict card's capabilities by what the host can do */
1650 caps = card_caps & mmc->host_caps;
1651
1652 if (!uhs_en)
1653 caps &= ~UHS_CAPS;
1654
1655 for_each_sd_mode_by_pref(caps, mwt) {
1656 uint *w;
1657
1658 for (w = widths; w < widths + ARRAY_SIZE(widths); w++) {
1659 if (*w & caps & mwt->widths) {
1660 debug("trying mode %s width %d (at %d MHz)\n",
1661 mmc_mode_name(mwt->mode),
1662 bus_width(*w),
1663 mmc_mode2freq(mmc, mwt->mode) / 1000000);
1664
1665 /* configure the bus width (card + host) */
1666 err = sd_select_bus_width(mmc, bus_width(*w));
1667 if (err)
1668 goto error;
1669 mmc_set_bus_width(mmc, bus_width(*w));
1670
1671 /* configure the bus mode (card) */
1672 err = sd_set_card_speed(mmc, mwt->mode);
1673 if (err)
1674 goto error;
1675
1676 /* configure the bus mode (host) */
1677 mmc_select_mode(mmc, mwt->mode);
1678 mmc_set_clock(mmc, mmc->tran_speed, false);
1679
1680 #ifdef MMC_SUPPORTS_TUNING
1681 /* execute tuning if needed */
1682 if (mwt->tuning && !mmc_host_is_spi(mmc)) {
1683 err = mmc_execute_tuning(mmc,
1684 mwt->tuning);
1685 if (err) {
1686 debug("tuning failed\n");
1687 goto error;
1688 }
1689 }
1690 #endif
1691
1692 err = sd_read_ssr(mmc);
1693 if (!err)
1694 return 0;
1695
1696 pr_warn("bad ssr\n");
1697
1698 error:
1699 /* revert to a safer bus speed */
1700 mmc_select_mode(mmc, SD_LEGACY);
1701 mmc_set_clock(mmc, mmc->tran_speed, false);
1702 }
1703 }
1704 }
1705
1706 printf("unable to select a mode\n");
1707 return -ENOTSUPP;
1708 }
1709
1710 /*
1711 * read the compare the part of ext csd that is constant.
1712 * This can be used to check that the transfer is working
1713 * as expected.
1714 */
1715 static int mmc_read_and_compare_ext_csd(struct mmc *mmc)
1716 {
1717 int err;
1718 const u8 *ext_csd = mmc->ext_csd;
1719 ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
1720
1721 if (mmc->version < MMC_VERSION_4)
1722 return 0;
1723
1724 err = mmc_send_ext_csd(mmc, test_csd);
1725 if (err)
1726 return err;
1727
1728 /* Only compare read only fields */
1729 if (ext_csd[EXT_CSD_PARTITIONING_SUPPORT]
1730 == test_csd[EXT_CSD_PARTITIONING_SUPPORT] &&
1731 ext_csd[EXT_CSD_HC_WP_GRP_SIZE]
1732 == test_csd[EXT_CSD_HC_WP_GRP_SIZE] &&
1733 ext_csd[EXT_CSD_REV]
1734 == test_csd[EXT_CSD_REV] &&
1735 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
1736 == test_csd[EXT_CSD_HC_ERASE_GRP_SIZE] &&
1737 memcmp(&ext_csd[EXT_CSD_SEC_CNT],
1738 &test_csd[EXT_CSD_SEC_CNT], 4) == 0)
1739 return 0;
1740
1741 return -EBADMSG;
1742 }
1743
1744 #if CONFIG_IS_ENABLED(MMC_IO_VOLTAGE)
1745 static int mmc_set_lowest_voltage(struct mmc *mmc, enum bus_mode mode,
1746 uint32_t allowed_mask)
1747 {
1748 u32 card_mask = 0;
1749
1750 switch (mode) {
1751 case MMC_HS_200:
1752 if (mmc->cardtype & EXT_CSD_CARD_TYPE_HS200_1_8V)
1753 card_mask |= MMC_SIGNAL_VOLTAGE_180;
1754 if (mmc->cardtype & EXT_CSD_CARD_TYPE_HS200_1_2V)
1755 card_mask |= MMC_SIGNAL_VOLTAGE_120;
1756 break;
1757 case MMC_DDR_52:
1758 if (mmc->cardtype & EXT_CSD_CARD_TYPE_DDR_1_8V)
1759 card_mask |= MMC_SIGNAL_VOLTAGE_330 |
1760 MMC_SIGNAL_VOLTAGE_180;
1761 if (mmc->cardtype & EXT_CSD_CARD_TYPE_DDR_1_2V)
1762 card_mask |= MMC_SIGNAL_VOLTAGE_120;
1763 break;
1764 default:
1765 card_mask |= MMC_SIGNAL_VOLTAGE_330;
1766 break;
1767 }
1768
1769 while (card_mask & allowed_mask) {
1770 enum mmc_voltage best_match;
1771
1772 best_match = 1 << (ffs(card_mask & allowed_mask) - 1);
1773 if (!mmc_set_signal_voltage(mmc, best_match))
1774 return 0;
1775
1776 allowed_mask &= ~best_match;
1777 }
1778
1779 return -ENOTSUPP;
1780 }
1781 #else
1782 static inline int mmc_set_lowest_voltage(struct mmc *mmc, enum bus_mode mode,
1783 uint32_t allowed_mask)
1784 {
1785 return 0;
1786 }
1787 #endif
1788
1789 static const struct mode_width_tuning mmc_modes_by_pref[] = {
1790 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
1791 {
1792 .mode = MMC_HS_200,
1793 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
1794 .tuning = MMC_CMD_SEND_TUNING_BLOCK_HS200
1795 },
1796 #endif
1797 {
1798 .mode = MMC_DDR_52,
1799 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
1800 },
1801 {
1802 .mode = MMC_HS_52,
1803 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1804 },
1805 {
1806 .mode = MMC_HS,
1807 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1808 },
1809 {
1810 .mode = MMC_LEGACY,
1811 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1812 }
1813 };
1814
1815 #define for_each_mmc_mode_by_pref(caps, mwt) \
1816 for (mwt = mmc_modes_by_pref;\
1817 mwt < mmc_modes_by_pref + ARRAY_SIZE(mmc_modes_by_pref);\
1818 mwt++) \
1819 if (caps & MMC_CAP(mwt->mode))
1820
1821 static const struct ext_csd_bus_width {
1822 uint cap;
1823 bool is_ddr;
1824 uint ext_csd_bits;
1825 } ext_csd_bus_width[] = {
1826 {MMC_MODE_8BIT, true, EXT_CSD_DDR_BUS_WIDTH_8},
1827 {MMC_MODE_4BIT, true, EXT_CSD_DDR_BUS_WIDTH_4},
1828 {MMC_MODE_8BIT, false, EXT_CSD_BUS_WIDTH_8},
1829 {MMC_MODE_4BIT, false, EXT_CSD_BUS_WIDTH_4},
1830 {MMC_MODE_1BIT, false, EXT_CSD_BUS_WIDTH_1},
1831 };
1832
1833 #define for_each_supported_width(caps, ddr, ecbv) \
1834 for (ecbv = ext_csd_bus_width;\
1835 ecbv < ext_csd_bus_width + ARRAY_SIZE(ext_csd_bus_width);\
1836 ecbv++) \
1837 if ((ddr == ecbv->is_ddr) && (caps & ecbv->cap))
1838
1839 static int mmc_select_mode_and_width(struct mmc *mmc, uint card_caps)
1840 {
1841 int err;
1842 const struct mode_width_tuning *mwt;
1843 const struct ext_csd_bus_width *ecbw;
1844
1845 #ifdef DEBUG
1846 mmc_dump_capabilities("mmc", card_caps);
1847 mmc_dump_capabilities("host", mmc->host_caps);
1848 #endif
1849
1850 /* Restrict card's capabilities by what the host can do */
1851 card_caps &= mmc->host_caps;
1852
1853 /* Only version 4 of MMC supports wider bus widths */
1854 if (mmc->version < MMC_VERSION_4)
1855 return 0;
1856
1857 if (!mmc->ext_csd) {
1858 debug("No ext_csd found!\n"); /* this should enver happen */
1859 return -ENOTSUPP;
1860 }
1861
1862 mmc_set_clock(mmc, mmc->legacy_speed, false);
1863
1864 for_each_mmc_mode_by_pref(card_caps, mwt) {
1865 for_each_supported_width(card_caps & mwt->widths,
1866 mmc_is_mode_ddr(mwt->mode), ecbw) {
1867 enum mmc_voltage old_voltage;
1868 debug("trying mode %s width %d (at %d MHz)\n",
1869 mmc_mode_name(mwt->mode),
1870 bus_width(ecbw->cap),
1871 mmc_mode2freq(mmc, mwt->mode) / 1000000);
1872 old_voltage = mmc->signal_voltage;
1873 err = mmc_set_lowest_voltage(mmc, mwt->mode,
1874 MMC_ALL_SIGNAL_VOLTAGE);
1875 if (err)
1876 continue;
1877
1878 /* configure the bus width (card + host) */
1879 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1880 EXT_CSD_BUS_WIDTH,
1881 ecbw->ext_csd_bits & ~EXT_CSD_DDR_FLAG);
1882 if (err)
1883 goto error;
1884 mmc_set_bus_width(mmc, bus_width(ecbw->cap));
1885
1886 /* configure the bus speed (card) */
1887 err = mmc_set_card_speed(mmc, mwt->mode);
1888 if (err)
1889 goto error;
1890
1891 /*
1892 * configure the bus width AND the ddr mode (card)
1893 * The host side will be taken care of in the next step
1894 */
1895 if (ecbw->ext_csd_bits & EXT_CSD_DDR_FLAG) {
1896 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1897 EXT_CSD_BUS_WIDTH,
1898 ecbw->ext_csd_bits);
1899 if (err)
1900 goto error;
1901 }
1902
1903 /* configure the bus mode (host) */
1904 mmc_select_mode(mmc, mwt->mode);
1905 mmc_set_clock(mmc, mmc->tran_speed, false);
1906 #ifdef MMC_SUPPORTS_TUNING
1907
1908 /* execute tuning if needed */
1909 if (mwt->tuning) {
1910 err = mmc_execute_tuning(mmc, mwt->tuning);
1911 if (err) {
1912 debug("tuning failed\n");
1913 goto error;
1914 }
1915 }
1916 #endif
1917
1918 /* do a transfer to check the configuration */
1919 err = mmc_read_and_compare_ext_csd(mmc);
1920 if (!err)
1921 return 0;
1922 error:
1923 mmc_set_signal_voltage(mmc, old_voltage);
1924 /* if an error occured, revert to a safer bus mode */
1925 mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1926 EXT_CSD_BUS_WIDTH, EXT_CSD_BUS_WIDTH_1);
1927 mmc_select_mode(mmc, MMC_LEGACY);
1928 mmc_set_bus_width(mmc, 1);
1929 }
1930 }
1931
1932 pr_err("unable to select a mode\n");
1933
1934 return -ENOTSUPP;
1935 }
1936
1937 static int mmc_startup_v4(struct mmc *mmc)
1938 {
1939 int err, i;
1940 u64 capacity;
1941 bool has_parts = false;
1942 bool part_completed;
1943 static const u32 mmc_versions[] = {
1944 MMC_VERSION_4,
1945 MMC_VERSION_4_1,
1946 MMC_VERSION_4_2,
1947 MMC_VERSION_4_3,
1948 MMC_VERSION_4_41,
1949 MMC_VERSION_4_5,
1950 MMC_VERSION_5_0,
1951 MMC_VERSION_5_1
1952 };
1953
1954 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
1955
1956 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4))
1957 return 0;
1958
1959 /* check ext_csd version and capacity */
1960 err = mmc_send_ext_csd(mmc, ext_csd);
1961 if (err)
1962 goto error;
1963
1964 /* store the ext csd for future reference */
1965 if (!mmc->ext_csd)
1966 mmc->ext_csd = malloc(MMC_MAX_BLOCK_LEN);
1967 if (!mmc->ext_csd)
1968 return -ENOMEM;
1969 memcpy(mmc->ext_csd, ext_csd, MMC_MAX_BLOCK_LEN);
1970
1971 if (ext_csd[EXT_CSD_REV] > ARRAY_SIZE(mmc_versions))
1972 return -EINVAL;
1973
1974 mmc->version = mmc_versions[ext_csd[EXT_CSD_REV]];
1975
1976 if (mmc->version >= MMC_VERSION_4_2) {
1977 /*
1978 * According to the JEDEC Standard, the value of
1979 * ext_csd's capacity is valid if the value is more
1980 * than 2GB
1981 */
1982 capacity = ext_csd[EXT_CSD_SEC_CNT] << 0
1983 | ext_csd[EXT_CSD_SEC_CNT + 1] << 8
1984 | ext_csd[EXT_CSD_SEC_CNT + 2] << 16
1985 | ext_csd[EXT_CSD_SEC_CNT + 3] << 24;
1986 capacity *= MMC_MAX_BLOCK_LEN;
1987 if ((capacity >> 20) > 2 * 1024)
1988 mmc->capacity_user = capacity;
1989 }
1990
1991 /* The partition data may be non-zero but it is only
1992 * effective if PARTITION_SETTING_COMPLETED is set in
1993 * EXT_CSD, so ignore any data if this bit is not set,
1994 * except for enabling the high-capacity group size
1995 * definition (see below).
1996 */
1997 part_completed = !!(ext_csd[EXT_CSD_PARTITION_SETTING] &
1998 EXT_CSD_PARTITION_SETTING_COMPLETED);
1999
2000 /* store the partition info of emmc */
2001 mmc->part_support = ext_csd[EXT_CSD_PARTITIONING_SUPPORT];
2002 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) ||
2003 ext_csd[EXT_CSD_BOOT_MULT])
2004 mmc->part_config = ext_csd[EXT_CSD_PART_CONF];
2005 if (part_completed &&
2006 (ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & ENHNCD_SUPPORT))
2007 mmc->part_attr = ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE];
2008
2009 mmc->capacity_boot = ext_csd[EXT_CSD_BOOT_MULT] << 17;
2010
2011 mmc->capacity_rpmb = ext_csd[EXT_CSD_RPMB_MULT] << 17;
2012
2013 for (i = 0; i < 4; i++) {
2014 int idx = EXT_CSD_GP_SIZE_MULT + i * 3;
2015 uint mult = (ext_csd[idx + 2] << 16) +
2016 (ext_csd[idx + 1] << 8) + ext_csd[idx];
2017 if (mult)
2018 has_parts = true;
2019 if (!part_completed)
2020 continue;
2021 mmc->capacity_gp[i] = mult;
2022 mmc->capacity_gp[i] *=
2023 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
2024 mmc->capacity_gp[i] *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2025 mmc->capacity_gp[i] <<= 19;
2026 }
2027
2028 if (part_completed) {
2029 mmc->enh_user_size =
2030 (ext_csd[EXT_CSD_ENH_SIZE_MULT + 2] << 16) +
2031 (ext_csd[EXT_CSD_ENH_SIZE_MULT + 1] << 8) +
2032 ext_csd[EXT_CSD_ENH_SIZE_MULT];
2033 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
2034 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2035 mmc->enh_user_size <<= 19;
2036 mmc->enh_user_start =
2037 (ext_csd[EXT_CSD_ENH_START_ADDR + 3] << 24) +
2038 (ext_csd[EXT_CSD_ENH_START_ADDR + 2] << 16) +
2039 (ext_csd[EXT_CSD_ENH_START_ADDR + 1] << 8) +
2040 ext_csd[EXT_CSD_ENH_START_ADDR];
2041 if (mmc->high_capacity)
2042 mmc->enh_user_start <<= 9;
2043 }
2044
2045 /*
2046 * Host needs to enable ERASE_GRP_DEF bit if device is
2047 * partitioned. This bit will be lost every time after a reset
2048 * or power off. This will affect erase size.
2049 */
2050 if (part_completed)
2051 has_parts = true;
2052 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) &&
2053 (ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE] & PART_ENH_ATTRIB))
2054 has_parts = true;
2055 if (has_parts) {
2056 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
2057 EXT_CSD_ERASE_GROUP_DEF, 1);
2058
2059 if (err)
2060 goto error;
2061
2062 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
2063 }
2064
2065 if (ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01) {
2066 /* Read out group size from ext_csd */
2067 mmc->erase_grp_size =
2068 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
2069 /*
2070 * if high capacity and partition setting completed
2071 * SEC_COUNT is valid even if it is smaller than 2 GiB
2072 * JEDEC Standard JESD84-B45, 6.2.4
2073 */
2074 if (mmc->high_capacity && part_completed) {
2075 capacity = (ext_csd[EXT_CSD_SEC_CNT]) |
2076 (ext_csd[EXT_CSD_SEC_CNT + 1] << 8) |
2077 (ext_csd[EXT_CSD_SEC_CNT + 2] << 16) |
2078 (ext_csd[EXT_CSD_SEC_CNT + 3] << 24);
2079 capacity *= MMC_MAX_BLOCK_LEN;
2080 mmc->capacity_user = capacity;
2081 }
2082 } else {
2083 /* Calculate the group size from the csd value. */
2084 int erase_gsz, erase_gmul;
2085
2086 erase_gsz = (mmc->csd[2] & 0x00007c00) >> 10;
2087 erase_gmul = (mmc->csd[2] & 0x000003e0) >> 5;
2088 mmc->erase_grp_size = (erase_gsz + 1)
2089 * (erase_gmul + 1);
2090 }
2091
2092 mmc->hc_wp_grp_size = 1024
2093 * ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
2094 * ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2095
2096 mmc->wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
2097
2098 return 0;
2099 error:
2100 if (mmc->ext_csd) {
2101 free(mmc->ext_csd);
2102 mmc->ext_csd = NULL;
2103 }
2104 return err;
2105 }
2106
2107 static int mmc_startup(struct mmc *mmc)
2108 {
2109 int err, i;
2110 uint mult, freq;
2111 u64 cmult, csize;
2112 struct mmc_cmd cmd;
2113 struct blk_desc *bdesc;
2114
2115 #ifdef CONFIG_MMC_SPI_CRC_ON
2116 if (mmc_host_is_spi(mmc)) { /* enable CRC check for spi */
2117 cmd.cmdidx = MMC_CMD_SPI_CRC_ON_OFF;
2118 cmd.resp_type = MMC_RSP_R1;
2119 cmd.cmdarg = 1;
2120 err = mmc_send_cmd(mmc, &cmd, NULL);
2121 if (err)
2122 return err;
2123 }
2124 #endif
2125
2126 /* Put the Card in Identify Mode */
2127 cmd.cmdidx = mmc_host_is_spi(mmc) ? MMC_CMD_SEND_CID :
2128 MMC_CMD_ALL_SEND_CID; /* cmd not supported in spi */
2129 cmd.resp_type = MMC_RSP_R2;
2130 cmd.cmdarg = 0;
2131
2132 err = mmc_send_cmd(mmc, &cmd, NULL);
2133
2134 #ifdef CONFIG_MMC_QUIRKS
2135 if (err && (mmc->quirks & MMC_QUIRK_RETRY_SEND_CID)) {
2136 int retries = 4;
2137 /*
2138 * It has been seen that SEND_CID may fail on the first
2139 * attempt, let's try a few more time
2140 */
2141 do {
2142 err = mmc_send_cmd(mmc, &cmd, NULL);
2143 if (!err)
2144 break;
2145 } while (retries--);
2146 }
2147 #endif
2148
2149 if (err)
2150 return err;
2151
2152 memcpy(mmc->cid, cmd.response, 16);
2153
2154 /*
2155 * For MMC cards, set the Relative Address.
2156 * For SD cards, get the Relatvie Address.
2157 * This also puts the cards into Standby State
2158 */
2159 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
2160 cmd.cmdidx = SD_CMD_SEND_RELATIVE_ADDR;
2161 cmd.cmdarg = mmc->rca << 16;
2162 cmd.resp_type = MMC_RSP_R6;
2163
2164 err = mmc_send_cmd(mmc, &cmd, NULL);
2165
2166 if (err)
2167 return err;
2168
2169 if (IS_SD(mmc))
2170 mmc->rca = (cmd.response[0] >> 16) & 0xffff;
2171 }
2172
2173 /* Get the Card-Specific Data */
2174 cmd.cmdidx = MMC_CMD_SEND_CSD;
2175 cmd.resp_type = MMC_RSP_R2;
2176 cmd.cmdarg = mmc->rca << 16;
2177
2178 err = mmc_send_cmd(mmc, &cmd, NULL);
2179
2180 if (err)
2181 return err;
2182
2183 mmc->csd[0] = cmd.response[0];
2184 mmc->csd[1] = cmd.response[1];
2185 mmc->csd[2] = cmd.response[2];
2186 mmc->csd[3] = cmd.response[3];
2187
2188 if (mmc->version == MMC_VERSION_UNKNOWN) {
2189 int version = (cmd.response[0] >> 26) & 0xf;
2190
2191 switch (version) {
2192 case 0:
2193 mmc->version = MMC_VERSION_1_2;
2194 break;
2195 case 1:
2196 mmc->version = MMC_VERSION_1_4;
2197 break;
2198 case 2:
2199 mmc->version = MMC_VERSION_2_2;
2200 break;
2201 case 3:
2202 mmc->version = MMC_VERSION_3;
2203 break;
2204 case 4:
2205 mmc->version = MMC_VERSION_4;
2206 break;
2207 default:
2208 mmc->version = MMC_VERSION_1_2;
2209 break;
2210 }
2211 }
2212
2213 /* divide frequency by 10, since the mults are 10x bigger */
2214 freq = fbase[(cmd.response[0] & 0x7)];
2215 mult = multipliers[((cmd.response[0] >> 3) & 0xf)];
2216
2217 mmc->legacy_speed = freq * mult;
2218 mmc_select_mode(mmc, MMC_LEGACY);
2219
2220 mmc->dsr_imp = ((cmd.response[1] >> 12) & 0x1);
2221 mmc->read_bl_len = 1 << ((cmd.response[1] >> 16) & 0xf);
2222
2223 if (IS_SD(mmc))
2224 mmc->write_bl_len = mmc->read_bl_len;
2225 else
2226 mmc->write_bl_len = 1 << ((cmd.response[3] >> 22) & 0xf);
2227
2228 if (mmc->high_capacity) {
2229 csize = (mmc->csd[1] & 0x3f) << 16
2230 | (mmc->csd[2] & 0xffff0000) >> 16;
2231 cmult = 8;
2232 } else {
2233 csize = (mmc->csd[1] & 0x3ff) << 2
2234 | (mmc->csd[2] & 0xc0000000) >> 30;
2235 cmult = (mmc->csd[2] & 0x00038000) >> 15;
2236 }
2237
2238 mmc->capacity_user = (csize + 1) << (cmult + 2);
2239 mmc->capacity_user *= mmc->read_bl_len;
2240 mmc->capacity_boot = 0;
2241 mmc->capacity_rpmb = 0;
2242 for (i = 0; i < 4; i++)
2243 mmc->capacity_gp[i] = 0;
2244
2245 if (mmc->read_bl_len > MMC_MAX_BLOCK_LEN)
2246 mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
2247
2248 if (mmc->write_bl_len > MMC_MAX_BLOCK_LEN)
2249 mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
2250
2251 if ((mmc->dsr_imp) && (0xffffffff != mmc->dsr)) {
2252 cmd.cmdidx = MMC_CMD_SET_DSR;
2253 cmd.cmdarg = (mmc->dsr & 0xffff) << 16;
2254 cmd.resp_type = MMC_RSP_NONE;
2255 if (mmc_send_cmd(mmc, &cmd, NULL))
2256 pr_warn("MMC: SET_DSR failed\n");
2257 }
2258
2259 /* Select the card, and put it into Transfer Mode */
2260 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
2261 cmd.cmdidx = MMC_CMD_SELECT_CARD;
2262 cmd.resp_type = MMC_RSP_R1;
2263 cmd.cmdarg = mmc->rca << 16;
2264 err = mmc_send_cmd(mmc, &cmd, NULL);
2265
2266 if (err)
2267 return err;
2268 }
2269
2270 /*
2271 * For SD, its erase group is always one sector
2272 */
2273 mmc->erase_grp_size = 1;
2274 mmc->part_config = MMCPART_NOAVAILABLE;
2275
2276 err = mmc_startup_v4(mmc);
2277 if (err)
2278 return err;
2279
2280 err = mmc_set_capacity(mmc, mmc_get_blk_desc(mmc)->hwpart);
2281 if (err)
2282 return err;
2283
2284 if (IS_SD(mmc)) {
2285 err = sd_get_capabilities(mmc);
2286 if (err)
2287 return err;
2288 err = sd_select_mode_and_width(mmc, mmc->card_caps);
2289 } else {
2290 err = mmc_get_capabilities(mmc);
2291 if (err)
2292 return err;
2293 mmc_select_mode_and_width(mmc, mmc->card_caps);
2294 }
2295
2296 if (err)
2297 return err;
2298
2299 mmc->best_mode = mmc->selected_mode;
2300
2301 /* Fix the block length for DDR mode */
2302 if (mmc->ddr_mode) {
2303 mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
2304 mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
2305 }
2306
2307 /* fill in device description */
2308 bdesc = mmc_get_blk_desc(mmc);
2309 bdesc->lun = 0;
2310 bdesc->hwpart = 0;
2311 bdesc->type = 0;
2312 bdesc->blksz = mmc->read_bl_len;
2313 bdesc->log2blksz = LOG2(bdesc->blksz);
2314 bdesc->lba = lldiv(mmc->capacity, mmc->read_bl_len);
2315 #if !defined(CONFIG_SPL_BUILD) || \
2316 (defined(CONFIG_SPL_LIBCOMMON_SUPPORT) && \
2317 !defined(CONFIG_USE_TINY_PRINTF))
2318 sprintf(bdesc->vendor, "Man %06x Snr %04x%04x",
2319 mmc->cid[0] >> 24, (mmc->cid[2] & 0xffff),
2320 (mmc->cid[3] >> 16) & 0xffff);
2321 sprintf(bdesc->product, "%c%c%c%c%c%c", mmc->cid[0] & 0xff,
2322 (mmc->cid[1] >> 24), (mmc->cid[1] >> 16) & 0xff,
2323 (mmc->cid[1] >> 8) & 0xff, mmc->cid[1] & 0xff,
2324 (mmc->cid[2] >> 24) & 0xff);
2325 sprintf(bdesc->revision, "%d.%d", (mmc->cid[2] >> 20) & 0xf,
2326 (mmc->cid[2] >> 16) & 0xf);
2327 #else
2328 bdesc->vendor[0] = 0;
2329 bdesc->product[0] = 0;
2330 bdesc->revision[0] = 0;
2331 #endif
2332 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBDISK_SUPPORT)
2333 part_init(bdesc);
2334 #endif
2335
2336 return 0;
2337 }
2338
2339 static int mmc_send_if_cond(struct mmc *mmc)
2340 {
2341 struct mmc_cmd cmd;
2342 int err;
2343
2344 cmd.cmdidx = SD_CMD_SEND_IF_COND;
2345 /* We set the bit if the host supports voltages between 2.7 and 3.6 V */
2346 cmd.cmdarg = ((mmc->cfg->voltages & 0xff8000) != 0) << 8 | 0xaa;
2347 cmd.resp_type = MMC_RSP_R7;
2348
2349 err = mmc_send_cmd(mmc, &cmd, NULL);
2350
2351 if (err)
2352 return err;
2353
2354 if ((cmd.response[0] & 0xff) != 0xaa)
2355 return -EOPNOTSUPP;
2356 else
2357 mmc->version = SD_VERSION_2;
2358
2359 return 0;
2360 }
2361
2362 #if !CONFIG_IS_ENABLED(DM_MMC)
2363 /* board-specific MMC power initializations. */
2364 __weak void board_mmc_power_init(void)
2365 {
2366 }
2367 #endif
2368
2369 static int mmc_power_init(struct mmc *mmc)
2370 {
2371 #if CONFIG_IS_ENABLED(DM_MMC)
2372 #if CONFIG_IS_ENABLED(DM_REGULATOR)
2373 int ret;
2374
2375 ret = device_get_supply_regulator(mmc->dev, "vmmc-supply",
2376 &mmc->vmmc_supply);
2377 if (ret)
2378 debug("%s: No vmmc supply\n", mmc->dev->name);
2379
2380 ret = device_get_supply_regulator(mmc->dev, "vqmmc-supply",
2381 &mmc->vqmmc_supply);
2382 if (ret)
2383 debug("%s: No vqmmc supply\n", mmc->dev->name);
2384 #endif
2385 #else /* !CONFIG_DM_MMC */
2386 /*
2387 * Driver model should use a regulator, as above, rather than calling
2388 * out to board code.
2389 */
2390 board_mmc_power_init();
2391 #endif
2392 return 0;
2393 }
2394
2395 /*
2396 * put the host in the initial state:
2397 * - turn on Vdd (card power supply)
2398 * - configure the bus width and clock to minimal values
2399 */
2400 static void mmc_set_initial_state(struct mmc *mmc)
2401 {
2402 int err;
2403
2404 /* First try to set 3.3V. If it fails set to 1.8V */
2405 err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_330);
2406 if (err != 0)
2407 err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_180);
2408 if (err != 0)
2409 pr_warn("mmc: failed to set signal voltage\n");
2410
2411 mmc_select_mode(mmc, MMC_LEGACY);
2412 mmc_set_bus_width(mmc, 1);
2413 mmc_set_clock(mmc, 0, false);
2414 }
2415
2416 static int mmc_power_on(struct mmc *mmc)
2417 {
2418 #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
2419 if (mmc->vmmc_supply) {
2420 int ret = regulator_set_enable(mmc->vmmc_supply, true);
2421
2422 if (ret) {
2423 puts("Error enabling VMMC supply\n");
2424 return ret;
2425 }
2426 }
2427 #endif
2428 return 0;
2429 }
2430
2431 static int mmc_power_off(struct mmc *mmc)
2432 {
2433 mmc_set_clock(mmc, 1, true);
2434 #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
2435 if (mmc->vmmc_supply) {
2436 int ret = regulator_set_enable(mmc->vmmc_supply, false);
2437
2438 if (ret) {
2439 debug("Error disabling VMMC supply\n");
2440 return ret;
2441 }
2442 }
2443 #endif
2444 return 0;
2445 }
2446
2447 static int mmc_power_cycle(struct mmc *mmc)
2448 {
2449 int ret;
2450
2451 ret = mmc_power_off(mmc);
2452 if (ret)
2453 return ret;
2454 /*
2455 * SD spec recommends at least 1ms of delay. Let's wait for 2ms
2456 * to be on the safer side.
2457 */
2458 udelay(2000);
2459 return mmc_power_on(mmc);
2460 }
2461
2462 int mmc_start_init(struct mmc *mmc)
2463 {
2464 bool no_card;
2465 bool uhs_en = supports_uhs(mmc->cfg->host_caps);
2466 int err;
2467
2468 /*
2469 * all hosts are capable of 1 bit bus-width and able to use the legacy
2470 * timings.
2471 */
2472 mmc->host_caps = mmc->cfg->host_caps | MMC_CAP(SD_LEGACY) |
2473 MMC_CAP(MMC_LEGACY) | MMC_MODE_1BIT;
2474
2475 /* we pretend there's no card when init is NULL */
2476 no_card = mmc_getcd(mmc) == 0;
2477 #if !CONFIG_IS_ENABLED(DM_MMC)
2478 no_card = no_card || (mmc->cfg->ops->init == NULL);
2479 #endif
2480 if (no_card) {
2481 mmc->has_init = 0;
2482 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2483 printf("MMC: no card present\n");
2484 #endif
2485 return -ENOMEDIUM;
2486 }
2487
2488 if (mmc->has_init)
2489 return 0;
2490
2491 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT
2492 mmc_adapter_card_type_ident();
2493 #endif
2494 err = mmc_power_init(mmc);
2495 if (err)
2496 return err;
2497
2498 #ifdef CONFIG_MMC_QUIRKS
2499 mmc->quirks = MMC_QUIRK_RETRY_SET_BLOCKLEN |
2500 MMC_QUIRK_RETRY_SEND_CID;
2501 #endif
2502
2503 err = mmc_power_cycle(mmc);
2504 if (err) {
2505 /*
2506 * if power cycling is not supported, we should not try
2507 * to use the UHS modes, because we wouldn't be able to
2508 * recover from an error during the UHS initialization.
2509 */
2510 debug("Unable to do a full power cycle. Disabling the UHS modes for safety\n");
2511 uhs_en = false;
2512 mmc->host_caps &= ~UHS_CAPS;
2513 err = mmc_power_on(mmc);
2514 }
2515 if (err)
2516 return err;
2517
2518 #if CONFIG_IS_ENABLED(DM_MMC)
2519 /* The device has already been probed ready for use */
2520 #else
2521 /* made sure it's not NULL earlier */
2522 err = mmc->cfg->ops->init(mmc);
2523 if (err)
2524 return err;
2525 #endif
2526 mmc->ddr_mode = 0;
2527
2528 retry:
2529 mmc_set_initial_state(mmc);
2530 mmc_send_init_stream(mmc);
2531
2532 /* Reset the Card */
2533 err = mmc_go_idle(mmc);
2534
2535 if (err)
2536 return err;
2537
2538 /* The internal partition reset to user partition(0) at every CMD0*/
2539 mmc_get_blk_desc(mmc)->hwpart = 0;
2540
2541 /* Test for SD version 2 */
2542 err = mmc_send_if_cond(mmc);
2543
2544 /* Now try to get the SD card's operating condition */
2545 err = sd_send_op_cond(mmc, uhs_en);
2546 if (err && uhs_en) {
2547 uhs_en = false;
2548 mmc_power_cycle(mmc);
2549 goto retry;
2550 }
2551
2552 /* If the command timed out, we check for an MMC card */
2553 if (err == -ETIMEDOUT) {
2554 err = mmc_send_op_cond(mmc);
2555
2556 if (err) {
2557 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2558 pr_err("Card did not respond to voltage select!\n");
2559 #endif
2560 return -EOPNOTSUPP;
2561 }
2562 }
2563
2564 if (!err)
2565 mmc->init_in_progress = 1;
2566
2567 return err;
2568 }
2569
2570 static int mmc_complete_init(struct mmc *mmc)
2571 {
2572 int err = 0;
2573
2574 mmc->init_in_progress = 0;
2575 if (mmc->op_cond_pending)
2576 err = mmc_complete_op_cond(mmc);
2577
2578 if (!err)
2579 err = mmc_startup(mmc);
2580 if (err)
2581 mmc->has_init = 0;
2582 else
2583 mmc->has_init = 1;
2584 return err;
2585 }
2586
2587 int mmc_init(struct mmc *mmc)
2588 {
2589 int err = 0;
2590 __maybe_unused unsigned start;
2591 #if CONFIG_IS_ENABLED(DM_MMC)
2592 struct mmc_uclass_priv *upriv = dev_get_uclass_priv(mmc->dev);
2593
2594 upriv->mmc = mmc;
2595 #endif
2596 if (mmc->has_init)
2597 return 0;
2598
2599 start = get_timer(0);
2600
2601 if (!mmc->init_in_progress)
2602 err = mmc_start_init(mmc);
2603
2604 if (!err)
2605 err = mmc_complete_init(mmc);
2606 if (err)
2607 printf("%s: %d, time %lu\n", __func__, err, get_timer(start));
2608
2609 return err;
2610 }
2611
2612 int mmc_set_dsr(struct mmc *mmc, u16 val)
2613 {
2614 mmc->dsr = val;
2615 return 0;
2616 }
2617
2618 /* CPU-specific MMC initializations */
2619 __weak int cpu_mmc_init(bd_t *bis)
2620 {
2621 return -1;
2622 }
2623
2624 /* board-specific MMC initializations. */
2625 __weak int board_mmc_init(bd_t *bis)
2626 {
2627 return -1;
2628 }
2629
2630 void mmc_set_preinit(struct mmc *mmc, int preinit)
2631 {
2632 mmc->preinit = preinit;
2633 }
2634
2635 #if CONFIG_IS_ENABLED(DM_MMC) && defined(CONFIG_SPL_BUILD)
2636 static int mmc_probe(bd_t *bis)
2637 {
2638 return 0;
2639 }
2640 #elif CONFIG_IS_ENABLED(DM_MMC)
2641 static int mmc_probe(bd_t *bis)
2642 {
2643 int ret, i;
2644 struct uclass *uc;
2645 struct udevice *dev;
2646
2647 ret = uclass_get(UCLASS_MMC, &uc);
2648 if (ret)
2649 return ret;
2650
2651 /*
2652 * Try to add them in sequence order. Really with driver model we
2653 * should allow holes, but the current MMC list does not allow that.
2654 * So if we request 0, 1, 3 we will get 0, 1, 2.
2655 */
2656 for (i = 0; ; i++) {
2657 ret = uclass_get_device_by_seq(UCLASS_MMC, i, &dev);
2658 if (ret == -ENODEV)
2659 break;
2660 }
2661 uclass_foreach_dev(dev, uc) {
2662 ret = device_probe(dev);
2663 if (ret)
2664 pr_err("%s - probe failed: %d\n", dev->name, ret);
2665 }
2666
2667 return 0;
2668 }
2669 #else
2670 static int mmc_probe(bd_t *bis)
2671 {
2672 if (board_mmc_init(bis) < 0)
2673 cpu_mmc_init(bis);
2674
2675 return 0;
2676 }
2677 #endif
2678
2679 int mmc_initialize(bd_t *bis)
2680 {
2681 static int initialized = 0;
2682 int ret;
2683 if (initialized) /* Avoid initializing mmc multiple times */
2684 return 0;
2685 initialized = 1;
2686
2687 #if !CONFIG_IS_ENABLED(BLK)
2688 #if !CONFIG_IS_ENABLED(MMC_TINY)
2689 mmc_list_init();
2690 #endif
2691 #endif
2692 ret = mmc_probe(bis);
2693 if (ret)
2694 return ret;
2695
2696 #ifndef CONFIG_SPL_BUILD
2697 print_mmc_devices(',');
2698 #endif
2699
2700 mmc_do_preinit();
2701 return 0;
2702 }
2703
2704 #ifdef CONFIG_CMD_BKOPS_ENABLE
2705 int mmc_set_bkops_enable(struct mmc *mmc)
2706 {
2707 int err;
2708 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
2709
2710 err = mmc_send_ext_csd(mmc, ext_csd);
2711 if (err) {
2712 puts("Could not get ext_csd register values\n");
2713 return err;
2714 }
2715
2716 if (!(ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1)) {
2717 puts("Background operations not supported on device\n");
2718 return -EMEDIUMTYPE;
2719 }
2720
2721 if (ext_csd[EXT_CSD_BKOPS_EN] & 0x1) {
2722 puts("Background operations already enabled\n");
2723 return 0;
2724 }
2725
2726 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BKOPS_EN, 1);
2727 if (err) {
2728 puts("Failed to enable manual background operations\n");
2729 return err;
2730 }
2731
2732 puts("Enabled manual background operations\n");
2733
2734 return 0;
2735 }
2736 #endif