]> git.ipfire.org Git - people/ms/u-boot.git/blob - drivers/mmc/mmc.c
mmc: compile out more code if support for UHS and HS200 is not enabled
[people/ms/u-boot.git] / drivers / mmc / mmc.c
1 /*
2 * Copyright 2008, Freescale Semiconductor, Inc
3 * Andy Fleming
4 *
5 * Based vaguely on the Linux code
6 *
7 * SPDX-License-Identifier: GPL-2.0+
8 */
9
10 #include <config.h>
11 #include <common.h>
12 #include <command.h>
13 #include <dm.h>
14 #include <dm/device-internal.h>
15 #include <errno.h>
16 #include <mmc.h>
17 #include <part.h>
18 #include <power/regulator.h>
19 #include <malloc.h>
20 #include <memalign.h>
21 #include <linux/list.h>
22 #include <div64.h>
23 #include "mmc_private.h"
24
25 static const unsigned int sd_au_size[] = {
26 0, SZ_16K / 512, SZ_32K / 512,
27 SZ_64K / 512, SZ_128K / 512, SZ_256K / 512,
28 SZ_512K / 512, SZ_1M / 512, SZ_2M / 512,
29 SZ_4M / 512, SZ_8M / 512, (SZ_8M + SZ_4M) / 512,
30 SZ_16M / 512, (SZ_16M + SZ_8M) / 512, SZ_32M / 512, SZ_64M / 512,
31 };
32
33 static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage);
34 static int mmc_power_cycle(struct mmc *mmc);
35 static int mmc_select_mode_and_width(struct mmc *mmc, uint card_caps);
36
37 #if CONFIG_IS_ENABLED(MMC_TINY)
38 static struct mmc mmc_static;
39 struct mmc *find_mmc_device(int dev_num)
40 {
41 return &mmc_static;
42 }
43
44 void mmc_do_preinit(void)
45 {
46 struct mmc *m = &mmc_static;
47 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT
48 mmc_set_preinit(m, 1);
49 #endif
50 if (m->preinit)
51 mmc_start_init(m);
52 }
53
54 struct blk_desc *mmc_get_blk_desc(struct mmc *mmc)
55 {
56 return &mmc->block_dev;
57 }
58 #endif
59
60 #if !CONFIG_IS_ENABLED(DM_MMC)
61
62 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
63 static int mmc_wait_dat0(struct mmc *mmc, int state, int timeout)
64 {
65 return -ENOSYS;
66 }
67 #endif
68
69 __weak int board_mmc_getwp(struct mmc *mmc)
70 {
71 return -1;
72 }
73
74 int mmc_getwp(struct mmc *mmc)
75 {
76 int wp;
77
78 wp = board_mmc_getwp(mmc);
79
80 if (wp < 0) {
81 if (mmc->cfg->ops->getwp)
82 wp = mmc->cfg->ops->getwp(mmc);
83 else
84 wp = 0;
85 }
86
87 return wp;
88 }
89
90 __weak int board_mmc_getcd(struct mmc *mmc)
91 {
92 return -1;
93 }
94 #endif
95
96 #ifdef CONFIG_MMC_TRACE
97 void mmmc_trace_before_send(struct mmc *mmc, struct mmc_cmd *cmd)
98 {
99 printf("CMD_SEND:%d\n", cmd->cmdidx);
100 printf("\t\tARG\t\t\t 0x%08X\n", cmd->cmdarg);
101 }
102
103 void mmmc_trace_after_send(struct mmc *mmc, struct mmc_cmd *cmd, int ret)
104 {
105 int i;
106 u8 *ptr;
107
108 if (ret) {
109 printf("\t\tRET\t\t\t %d\n", ret);
110 } else {
111 switch (cmd->resp_type) {
112 case MMC_RSP_NONE:
113 printf("\t\tMMC_RSP_NONE\n");
114 break;
115 case MMC_RSP_R1:
116 printf("\t\tMMC_RSP_R1,5,6,7 \t 0x%08X \n",
117 cmd->response[0]);
118 break;
119 case MMC_RSP_R1b:
120 printf("\t\tMMC_RSP_R1b\t\t 0x%08X \n",
121 cmd->response[0]);
122 break;
123 case MMC_RSP_R2:
124 printf("\t\tMMC_RSP_R2\t\t 0x%08X \n",
125 cmd->response[0]);
126 printf("\t\t \t\t 0x%08X \n",
127 cmd->response[1]);
128 printf("\t\t \t\t 0x%08X \n",
129 cmd->response[2]);
130 printf("\t\t \t\t 0x%08X \n",
131 cmd->response[3]);
132 printf("\n");
133 printf("\t\t\t\t\tDUMPING DATA\n");
134 for (i = 0; i < 4; i++) {
135 int j;
136 printf("\t\t\t\t\t%03d - ", i*4);
137 ptr = (u8 *)&cmd->response[i];
138 ptr += 3;
139 for (j = 0; j < 4; j++)
140 printf("%02X ", *ptr--);
141 printf("\n");
142 }
143 break;
144 case MMC_RSP_R3:
145 printf("\t\tMMC_RSP_R3,4\t\t 0x%08X \n",
146 cmd->response[0]);
147 break;
148 default:
149 printf("\t\tERROR MMC rsp not supported\n");
150 break;
151 }
152 }
153 }
154
155 void mmc_trace_state(struct mmc *mmc, struct mmc_cmd *cmd)
156 {
157 int status;
158
159 status = (cmd->response[0] & MMC_STATUS_CURR_STATE) >> 9;
160 printf("CURR STATE:%d\n", status);
161 }
162 #endif
163
164 #if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG)
165 const char *mmc_mode_name(enum bus_mode mode)
166 {
167 static const char *const names[] = {
168 [MMC_LEGACY] = "MMC legacy",
169 [SD_LEGACY] = "SD Legacy",
170 [MMC_HS] = "MMC High Speed (26MHz)",
171 [SD_HS] = "SD High Speed (50MHz)",
172 [UHS_SDR12] = "UHS SDR12 (25MHz)",
173 [UHS_SDR25] = "UHS SDR25 (50MHz)",
174 [UHS_SDR50] = "UHS SDR50 (100MHz)",
175 [UHS_SDR104] = "UHS SDR104 (208MHz)",
176 [UHS_DDR50] = "UHS DDR50 (50MHz)",
177 [MMC_HS_52] = "MMC High Speed (52MHz)",
178 [MMC_DDR_52] = "MMC DDR52 (52MHz)",
179 [MMC_HS_200] = "HS200 (200MHz)",
180 };
181
182 if (mode >= MMC_MODES_END)
183 return "Unknown mode";
184 else
185 return names[mode];
186 }
187 #endif
188
189 static uint mmc_mode2freq(struct mmc *mmc, enum bus_mode mode)
190 {
191 static const int freqs[] = {
192 [SD_LEGACY] = 25000000,
193 [MMC_HS] = 26000000,
194 [SD_HS] = 50000000,
195 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
196 [UHS_SDR12] = 25000000,
197 [UHS_SDR25] = 50000000,
198 [UHS_SDR50] = 100000000,
199 [UHS_DDR50] = 50000000,
200 #ifdef MMC_SUPPORTS_TUNING
201 [UHS_SDR104] = 208000000,
202 #endif
203 #endif
204 [MMC_HS_52] = 52000000,
205 [MMC_DDR_52] = 52000000,
206 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
207 [MMC_HS_200] = 200000000,
208 #endif
209 };
210
211 if (mode == MMC_LEGACY)
212 return mmc->legacy_speed;
213 else if (mode >= MMC_MODES_END)
214 return 0;
215 else
216 return freqs[mode];
217 }
218
219 static int mmc_select_mode(struct mmc *mmc, enum bus_mode mode)
220 {
221 mmc->selected_mode = mode;
222 mmc->tran_speed = mmc_mode2freq(mmc, mode);
223 mmc->ddr_mode = mmc_is_mode_ddr(mode);
224 debug("selecting mode %s (freq : %d MHz)\n", mmc_mode_name(mode),
225 mmc->tran_speed / 1000000);
226 return 0;
227 }
228
229 #if !CONFIG_IS_ENABLED(DM_MMC)
230 int mmc_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd, struct mmc_data *data)
231 {
232 int ret;
233
234 mmmc_trace_before_send(mmc, cmd);
235 ret = mmc->cfg->ops->send_cmd(mmc, cmd, data);
236 mmmc_trace_after_send(mmc, cmd, ret);
237
238 return ret;
239 }
240 #endif
241
242 int mmc_send_status(struct mmc *mmc, int timeout)
243 {
244 struct mmc_cmd cmd;
245 int err, retries = 5;
246
247 cmd.cmdidx = MMC_CMD_SEND_STATUS;
248 cmd.resp_type = MMC_RSP_R1;
249 if (!mmc_host_is_spi(mmc))
250 cmd.cmdarg = mmc->rca << 16;
251
252 while (1) {
253 err = mmc_send_cmd(mmc, &cmd, NULL);
254 if (!err) {
255 if ((cmd.response[0] & MMC_STATUS_RDY_FOR_DATA) &&
256 (cmd.response[0] & MMC_STATUS_CURR_STATE) !=
257 MMC_STATE_PRG)
258 break;
259
260 if (cmd.response[0] & MMC_STATUS_MASK) {
261 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
262 pr_err("Status Error: 0x%08X\n",
263 cmd.response[0]);
264 #endif
265 return -ECOMM;
266 }
267 } else if (--retries < 0)
268 return err;
269
270 if (timeout-- <= 0)
271 break;
272
273 udelay(1000);
274 }
275
276 mmc_trace_state(mmc, &cmd);
277 if (timeout <= 0) {
278 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
279 pr_err("Timeout waiting card ready\n");
280 #endif
281 return -ETIMEDOUT;
282 }
283
284 return 0;
285 }
286
287 int mmc_set_blocklen(struct mmc *mmc, int len)
288 {
289 struct mmc_cmd cmd;
290 int err;
291
292 if (mmc->ddr_mode)
293 return 0;
294
295 cmd.cmdidx = MMC_CMD_SET_BLOCKLEN;
296 cmd.resp_type = MMC_RSP_R1;
297 cmd.cmdarg = len;
298
299 err = mmc_send_cmd(mmc, &cmd, NULL);
300
301 #ifdef CONFIG_MMC_QUIRKS
302 if (err && (mmc->quirks & MMC_QUIRK_RETRY_SET_BLOCKLEN)) {
303 int retries = 4;
304 /*
305 * It has been seen that SET_BLOCKLEN may fail on the first
306 * attempt, let's try a few more time
307 */
308 do {
309 err = mmc_send_cmd(mmc, &cmd, NULL);
310 if (!err)
311 break;
312 } while (retries--);
313 }
314 #endif
315
316 return err;
317 }
318
319 #ifdef MMC_SUPPORTS_TUNING
320 static const u8 tuning_blk_pattern_4bit[] = {
321 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
322 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
323 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
324 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
325 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
326 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
327 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
328 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
329 };
330
331 static const u8 tuning_blk_pattern_8bit[] = {
332 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
333 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
334 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
335 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
336 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
337 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
338 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
339 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
340 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
341 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
342 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
343 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
344 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
345 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
346 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
347 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
348 };
349
350 int mmc_send_tuning(struct mmc *mmc, u32 opcode, int *cmd_error)
351 {
352 struct mmc_cmd cmd;
353 struct mmc_data data;
354 const u8 *tuning_block_pattern;
355 int size, err;
356
357 if (mmc->bus_width == 8) {
358 tuning_block_pattern = tuning_blk_pattern_8bit;
359 size = sizeof(tuning_blk_pattern_8bit);
360 } else if (mmc->bus_width == 4) {
361 tuning_block_pattern = tuning_blk_pattern_4bit;
362 size = sizeof(tuning_blk_pattern_4bit);
363 } else {
364 return -EINVAL;
365 }
366
367 ALLOC_CACHE_ALIGN_BUFFER(u8, data_buf, size);
368
369 cmd.cmdidx = opcode;
370 cmd.cmdarg = 0;
371 cmd.resp_type = MMC_RSP_R1;
372
373 data.dest = (void *)data_buf;
374 data.blocks = 1;
375 data.blocksize = size;
376 data.flags = MMC_DATA_READ;
377
378 err = mmc_send_cmd(mmc, &cmd, &data);
379 if (err)
380 return err;
381
382 if (memcmp(data_buf, tuning_block_pattern, size))
383 return -EIO;
384
385 return 0;
386 }
387 #endif
388
389 static int mmc_read_blocks(struct mmc *mmc, void *dst, lbaint_t start,
390 lbaint_t blkcnt)
391 {
392 struct mmc_cmd cmd;
393 struct mmc_data data;
394
395 if (blkcnt > 1)
396 cmd.cmdidx = MMC_CMD_READ_MULTIPLE_BLOCK;
397 else
398 cmd.cmdidx = MMC_CMD_READ_SINGLE_BLOCK;
399
400 if (mmc->high_capacity)
401 cmd.cmdarg = start;
402 else
403 cmd.cmdarg = start * mmc->read_bl_len;
404
405 cmd.resp_type = MMC_RSP_R1;
406
407 data.dest = dst;
408 data.blocks = blkcnt;
409 data.blocksize = mmc->read_bl_len;
410 data.flags = MMC_DATA_READ;
411
412 if (mmc_send_cmd(mmc, &cmd, &data))
413 return 0;
414
415 if (blkcnt > 1) {
416 cmd.cmdidx = MMC_CMD_STOP_TRANSMISSION;
417 cmd.cmdarg = 0;
418 cmd.resp_type = MMC_RSP_R1b;
419 if (mmc_send_cmd(mmc, &cmd, NULL)) {
420 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
421 pr_err("mmc fail to send stop cmd\n");
422 #endif
423 return 0;
424 }
425 }
426
427 return blkcnt;
428 }
429
430 #if CONFIG_IS_ENABLED(BLK)
431 ulong mmc_bread(struct udevice *dev, lbaint_t start, lbaint_t blkcnt, void *dst)
432 #else
433 ulong mmc_bread(struct blk_desc *block_dev, lbaint_t start, lbaint_t blkcnt,
434 void *dst)
435 #endif
436 {
437 #if CONFIG_IS_ENABLED(BLK)
438 struct blk_desc *block_dev = dev_get_uclass_platdata(dev);
439 #endif
440 int dev_num = block_dev->devnum;
441 int err;
442 lbaint_t cur, blocks_todo = blkcnt;
443
444 if (blkcnt == 0)
445 return 0;
446
447 struct mmc *mmc = find_mmc_device(dev_num);
448 if (!mmc)
449 return 0;
450
451 if (CONFIG_IS_ENABLED(MMC_TINY))
452 err = mmc_switch_part(mmc, block_dev->hwpart);
453 else
454 err = blk_dselect_hwpart(block_dev, block_dev->hwpart);
455
456 if (err < 0)
457 return 0;
458
459 if ((start + blkcnt) > block_dev->lba) {
460 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
461 pr_err("MMC: block number 0x" LBAF " exceeds max(0x" LBAF ")\n",
462 start + blkcnt, block_dev->lba);
463 #endif
464 return 0;
465 }
466
467 if (mmc_set_blocklen(mmc, mmc->read_bl_len)) {
468 debug("%s: Failed to set blocklen\n", __func__);
469 return 0;
470 }
471
472 do {
473 cur = (blocks_todo > mmc->cfg->b_max) ?
474 mmc->cfg->b_max : blocks_todo;
475 if (mmc_read_blocks(mmc, dst, start, cur) != cur) {
476 debug("%s: Failed to read blocks\n", __func__);
477 return 0;
478 }
479 blocks_todo -= cur;
480 start += cur;
481 dst += cur * mmc->read_bl_len;
482 } while (blocks_todo > 0);
483
484 return blkcnt;
485 }
486
487 static int mmc_go_idle(struct mmc *mmc)
488 {
489 struct mmc_cmd cmd;
490 int err;
491
492 udelay(1000);
493
494 cmd.cmdidx = MMC_CMD_GO_IDLE_STATE;
495 cmd.cmdarg = 0;
496 cmd.resp_type = MMC_RSP_NONE;
497
498 err = mmc_send_cmd(mmc, &cmd, NULL);
499
500 if (err)
501 return err;
502
503 udelay(2000);
504
505 return 0;
506 }
507
508 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
509 static int mmc_switch_voltage(struct mmc *mmc, int signal_voltage)
510 {
511 struct mmc_cmd cmd;
512 int err = 0;
513
514 /*
515 * Send CMD11 only if the request is to switch the card to
516 * 1.8V signalling.
517 */
518 if (signal_voltage == MMC_SIGNAL_VOLTAGE_330)
519 return mmc_set_signal_voltage(mmc, signal_voltage);
520
521 cmd.cmdidx = SD_CMD_SWITCH_UHS18V;
522 cmd.cmdarg = 0;
523 cmd.resp_type = MMC_RSP_R1;
524
525 err = mmc_send_cmd(mmc, &cmd, NULL);
526 if (err)
527 return err;
528
529 if (!mmc_host_is_spi(mmc) && (cmd.response[0] & MMC_STATUS_ERROR))
530 return -EIO;
531
532 /*
533 * The card should drive cmd and dat[0:3] low immediately
534 * after the response of cmd11, but wait 100 us to be sure
535 */
536 err = mmc_wait_dat0(mmc, 0, 100);
537 if (err == -ENOSYS)
538 udelay(100);
539 else if (err)
540 return -ETIMEDOUT;
541
542 /*
543 * During a signal voltage level switch, the clock must be gated
544 * for 5 ms according to the SD spec
545 */
546 mmc_set_clock(mmc, mmc->clock, true);
547
548 err = mmc_set_signal_voltage(mmc, signal_voltage);
549 if (err)
550 return err;
551
552 /* Keep clock gated for at least 10 ms, though spec only says 5 ms */
553 mdelay(10);
554 mmc_set_clock(mmc, mmc->clock, false);
555
556 /*
557 * Failure to switch is indicated by the card holding
558 * dat[0:3] low. Wait for at least 1 ms according to spec
559 */
560 err = mmc_wait_dat0(mmc, 1, 1000);
561 if (err == -ENOSYS)
562 udelay(1000);
563 else if (err)
564 return -ETIMEDOUT;
565
566 return 0;
567 }
568 #endif
569
570 static int sd_send_op_cond(struct mmc *mmc, bool uhs_en)
571 {
572 int timeout = 1000;
573 int err;
574 struct mmc_cmd cmd;
575
576 while (1) {
577 cmd.cmdidx = MMC_CMD_APP_CMD;
578 cmd.resp_type = MMC_RSP_R1;
579 cmd.cmdarg = 0;
580
581 err = mmc_send_cmd(mmc, &cmd, NULL);
582
583 if (err)
584 return err;
585
586 cmd.cmdidx = SD_CMD_APP_SEND_OP_COND;
587 cmd.resp_type = MMC_RSP_R3;
588
589 /*
590 * Most cards do not answer if some reserved bits
591 * in the ocr are set. However, Some controller
592 * can set bit 7 (reserved for low voltages), but
593 * how to manage low voltages SD card is not yet
594 * specified.
595 */
596 cmd.cmdarg = mmc_host_is_spi(mmc) ? 0 :
597 (mmc->cfg->voltages & 0xff8000);
598
599 if (mmc->version == SD_VERSION_2)
600 cmd.cmdarg |= OCR_HCS;
601
602 if (uhs_en)
603 cmd.cmdarg |= OCR_S18R;
604
605 err = mmc_send_cmd(mmc, &cmd, NULL);
606
607 if (err)
608 return err;
609
610 if (cmd.response[0] & OCR_BUSY)
611 break;
612
613 if (timeout-- <= 0)
614 return -EOPNOTSUPP;
615
616 udelay(1000);
617 }
618
619 if (mmc->version != SD_VERSION_2)
620 mmc->version = SD_VERSION_1_0;
621
622 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
623 cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
624 cmd.resp_type = MMC_RSP_R3;
625 cmd.cmdarg = 0;
626
627 err = mmc_send_cmd(mmc, &cmd, NULL);
628
629 if (err)
630 return err;
631 }
632
633 mmc->ocr = cmd.response[0];
634
635 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
636 if (uhs_en && !(mmc_host_is_spi(mmc)) && (cmd.response[0] & 0x41000000)
637 == 0x41000000) {
638 err = mmc_switch_voltage(mmc, MMC_SIGNAL_VOLTAGE_180);
639 if (err)
640 return err;
641 }
642 #endif
643
644 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
645 mmc->rca = 0;
646
647 return 0;
648 }
649
650 static int mmc_send_op_cond_iter(struct mmc *mmc, int use_arg)
651 {
652 struct mmc_cmd cmd;
653 int err;
654
655 cmd.cmdidx = MMC_CMD_SEND_OP_COND;
656 cmd.resp_type = MMC_RSP_R3;
657 cmd.cmdarg = 0;
658 if (use_arg && !mmc_host_is_spi(mmc))
659 cmd.cmdarg = OCR_HCS |
660 (mmc->cfg->voltages &
661 (mmc->ocr & OCR_VOLTAGE_MASK)) |
662 (mmc->ocr & OCR_ACCESS_MODE);
663
664 err = mmc_send_cmd(mmc, &cmd, NULL);
665 if (err)
666 return err;
667 mmc->ocr = cmd.response[0];
668 return 0;
669 }
670
671 static int mmc_send_op_cond(struct mmc *mmc)
672 {
673 int err, i;
674
675 /* Some cards seem to need this */
676 mmc_go_idle(mmc);
677
678 /* Asking to the card its capabilities */
679 for (i = 0; i < 2; i++) {
680 err = mmc_send_op_cond_iter(mmc, i != 0);
681 if (err)
682 return err;
683
684 /* exit if not busy (flag seems to be inverted) */
685 if (mmc->ocr & OCR_BUSY)
686 break;
687 }
688 mmc->op_cond_pending = 1;
689 return 0;
690 }
691
692 static int mmc_complete_op_cond(struct mmc *mmc)
693 {
694 struct mmc_cmd cmd;
695 int timeout = 1000;
696 uint start;
697 int err;
698
699 mmc->op_cond_pending = 0;
700 if (!(mmc->ocr & OCR_BUSY)) {
701 /* Some cards seem to need this */
702 mmc_go_idle(mmc);
703
704 start = get_timer(0);
705 while (1) {
706 err = mmc_send_op_cond_iter(mmc, 1);
707 if (err)
708 return err;
709 if (mmc->ocr & OCR_BUSY)
710 break;
711 if (get_timer(start) > timeout)
712 return -EOPNOTSUPP;
713 udelay(100);
714 }
715 }
716
717 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
718 cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
719 cmd.resp_type = MMC_RSP_R3;
720 cmd.cmdarg = 0;
721
722 err = mmc_send_cmd(mmc, &cmd, NULL);
723
724 if (err)
725 return err;
726
727 mmc->ocr = cmd.response[0];
728 }
729
730 mmc->version = MMC_VERSION_UNKNOWN;
731
732 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
733 mmc->rca = 1;
734
735 return 0;
736 }
737
738
739 static int mmc_send_ext_csd(struct mmc *mmc, u8 *ext_csd)
740 {
741 struct mmc_cmd cmd;
742 struct mmc_data data;
743 int err;
744
745 /* Get the Card Status Register */
746 cmd.cmdidx = MMC_CMD_SEND_EXT_CSD;
747 cmd.resp_type = MMC_RSP_R1;
748 cmd.cmdarg = 0;
749
750 data.dest = (char *)ext_csd;
751 data.blocks = 1;
752 data.blocksize = MMC_MAX_BLOCK_LEN;
753 data.flags = MMC_DATA_READ;
754
755 err = mmc_send_cmd(mmc, &cmd, &data);
756
757 return err;
758 }
759
760 int mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value)
761 {
762 struct mmc_cmd cmd;
763 int timeout = 1000;
764 int retries = 3;
765 int ret;
766
767 cmd.cmdidx = MMC_CMD_SWITCH;
768 cmd.resp_type = MMC_RSP_R1b;
769 cmd.cmdarg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
770 (index << 16) |
771 (value << 8);
772
773 while (retries > 0) {
774 ret = mmc_send_cmd(mmc, &cmd, NULL);
775
776 /* Waiting for the ready status */
777 if (!ret) {
778 ret = mmc_send_status(mmc, timeout);
779 return ret;
780 }
781
782 retries--;
783 }
784
785 return ret;
786
787 }
788
789 static int mmc_set_card_speed(struct mmc *mmc, enum bus_mode mode)
790 {
791 int err;
792 int speed_bits;
793
794 ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
795
796 switch (mode) {
797 case MMC_HS:
798 case MMC_HS_52:
799 case MMC_DDR_52:
800 speed_bits = EXT_CSD_TIMING_HS;
801 break;
802 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
803 case MMC_HS_200:
804 speed_bits = EXT_CSD_TIMING_HS200;
805 break;
806 #endif
807 case MMC_LEGACY:
808 speed_bits = EXT_CSD_TIMING_LEGACY;
809 break;
810 default:
811 return -EINVAL;
812 }
813 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING,
814 speed_bits);
815 if (err)
816 return err;
817
818 if ((mode == MMC_HS) || (mode == MMC_HS_52)) {
819 /* Now check to see that it worked */
820 err = mmc_send_ext_csd(mmc, test_csd);
821 if (err)
822 return err;
823
824 /* No high-speed support */
825 if (!test_csd[EXT_CSD_HS_TIMING])
826 return -ENOTSUPP;
827 }
828
829 return 0;
830 }
831
832 static int mmc_get_capabilities(struct mmc *mmc)
833 {
834 u8 *ext_csd = mmc->ext_csd;
835 char cardtype;
836
837 mmc->card_caps = MMC_MODE_1BIT | MMC_CAP(MMC_LEGACY);
838
839 if (mmc_host_is_spi(mmc))
840 return 0;
841
842 /* Only version 4 supports high-speed */
843 if (mmc->version < MMC_VERSION_4)
844 return 0;
845
846 if (!ext_csd) {
847 pr_err("No ext_csd found!\n"); /* this should enver happen */
848 return -ENOTSUPP;
849 }
850
851 mmc->card_caps |= MMC_MODE_4BIT | MMC_MODE_8BIT;
852
853 cardtype = ext_csd[EXT_CSD_CARD_TYPE] & 0x3f;
854 mmc->cardtype = cardtype;
855
856 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
857 if (cardtype & (EXT_CSD_CARD_TYPE_HS200_1_2V |
858 EXT_CSD_CARD_TYPE_HS200_1_8V)) {
859 mmc->card_caps |= MMC_MODE_HS200;
860 }
861 #endif
862 if (cardtype & EXT_CSD_CARD_TYPE_52) {
863 if (cardtype & EXT_CSD_CARD_TYPE_DDR_52)
864 mmc->card_caps |= MMC_MODE_DDR_52MHz;
865 mmc->card_caps |= MMC_MODE_HS_52MHz;
866 }
867 if (cardtype & EXT_CSD_CARD_TYPE_26)
868 mmc->card_caps |= MMC_MODE_HS;
869
870 return 0;
871 }
872
873 static int mmc_set_capacity(struct mmc *mmc, int part_num)
874 {
875 switch (part_num) {
876 case 0:
877 mmc->capacity = mmc->capacity_user;
878 break;
879 case 1:
880 case 2:
881 mmc->capacity = mmc->capacity_boot;
882 break;
883 case 3:
884 mmc->capacity = mmc->capacity_rpmb;
885 break;
886 case 4:
887 case 5:
888 case 6:
889 case 7:
890 mmc->capacity = mmc->capacity_gp[part_num - 4];
891 break;
892 default:
893 return -1;
894 }
895
896 mmc_get_blk_desc(mmc)->lba = lldiv(mmc->capacity, mmc->read_bl_len);
897
898 return 0;
899 }
900
901 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
902 static int mmc_boot_part_access_chk(struct mmc *mmc, unsigned int part_num)
903 {
904 int forbidden = 0;
905 bool change = false;
906
907 if (part_num & PART_ACCESS_MASK)
908 forbidden = MMC_CAP(MMC_HS_200);
909
910 if (MMC_CAP(mmc->selected_mode) & forbidden) {
911 debug("selected mode (%s) is forbidden for part %d\n",
912 mmc_mode_name(mmc->selected_mode), part_num);
913 change = true;
914 } else if (mmc->selected_mode != mmc->best_mode) {
915 debug("selected mode is not optimal\n");
916 change = true;
917 }
918
919 if (change)
920 return mmc_select_mode_and_width(mmc,
921 mmc->card_caps & ~forbidden);
922
923 return 0;
924 }
925 #else
926 static inline int mmc_boot_part_access_chk(struct mmc *mmc,
927 unsigned int part_num)
928 {
929 return 0;
930 }
931 #endif
932
933 int mmc_switch_part(struct mmc *mmc, unsigned int part_num)
934 {
935 int ret;
936
937 ret = mmc_boot_part_access_chk(mmc, part_num);
938 if (ret)
939 return ret;
940
941 ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_PART_CONF,
942 (mmc->part_config & ~PART_ACCESS_MASK)
943 | (part_num & PART_ACCESS_MASK));
944
945 /*
946 * Set the capacity if the switch succeeded or was intended
947 * to return to representing the raw device.
948 */
949 if ((ret == 0) || ((ret == -ENODEV) && (part_num == 0))) {
950 ret = mmc_set_capacity(mmc, part_num);
951 mmc_get_blk_desc(mmc)->hwpart = part_num;
952 }
953
954 return ret;
955 }
956
957 #if CONFIG_IS_ENABLED(MMC_HW_PARTITIONING)
958 int mmc_hwpart_config(struct mmc *mmc,
959 const struct mmc_hwpart_conf *conf,
960 enum mmc_hwpart_conf_mode mode)
961 {
962 u8 part_attrs = 0;
963 u32 enh_size_mult;
964 u32 enh_start_addr;
965 u32 gp_size_mult[4];
966 u32 max_enh_size_mult;
967 u32 tot_enh_size_mult = 0;
968 u8 wr_rel_set;
969 int i, pidx, err;
970 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
971
972 if (mode < MMC_HWPART_CONF_CHECK || mode > MMC_HWPART_CONF_COMPLETE)
973 return -EINVAL;
974
975 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4_41)) {
976 pr_err("eMMC >= 4.4 required for enhanced user data area\n");
977 return -EMEDIUMTYPE;
978 }
979
980 if (!(mmc->part_support & PART_SUPPORT)) {
981 pr_err("Card does not support partitioning\n");
982 return -EMEDIUMTYPE;
983 }
984
985 if (!mmc->hc_wp_grp_size) {
986 pr_err("Card does not define HC WP group size\n");
987 return -EMEDIUMTYPE;
988 }
989
990 /* check partition alignment and total enhanced size */
991 if (conf->user.enh_size) {
992 if (conf->user.enh_size % mmc->hc_wp_grp_size ||
993 conf->user.enh_start % mmc->hc_wp_grp_size) {
994 pr_err("User data enhanced area not HC WP group "
995 "size aligned\n");
996 return -EINVAL;
997 }
998 part_attrs |= EXT_CSD_ENH_USR;
999 enh_size_mult = conf->user.enh_size / mmc->hc_wp_grp_size;
1000 if (mmc->high_capacity) {
1001 enh_start_addr = conf->user.enh_start;
1002 } else {
1003 enh_start_addr = (conf->user.enh_start << 9);
1004 }
1005 } else {
1006 enh_size_mult = 0;
1007 enh_start_addr = 0;
1008 }
1009 tot_enh_size_mult += enh_size_mult;
1010
1011 for (pidx = 0; pidx < 4; pidx++) {
1012 if (conf->gp_part[pidx].size % mmc->hc_wp_grp_size) {
1013 pr_err("GP%i partition not HC WP group size "
1014 "aligned\n", pidx+1);
1015 return -EINVAL;
1016 }
1017 gp_size_mult[pidx] = conf->gp_part[pidx].size / mmc->hc_wp_grp_size;
1018 if (conf->gp_part[pidx].size && conf->gp_part[pidx].enhanced) {
1019 part_attrs |= EXT_CSD_ENH_GP(pidx);
1020 tot_enh_size_mult += gp_size_mult[pidx];
1021 }
1022 }
1023
1024 if (part_attrs && ! (mmc->part_support & ENHNCD_SUPPORT)) {
1025 pr_err("Card does not support enhanced attribute\n");
1026 return -EMEDIUMTYPE;
1027 }
1028
1029 err = mmc_send_ext_csd(mmc, ext_csd);
1030 if (err)
1031 return err;
1032
1033 max_enh_size_mult =
1034 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+2] << 16) +
1035 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+1] << 8) +
1036 ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT];
1037 if (tot_enh_size_mult > max_enh_size_mult) {
1038 pr_err("Total enhanced size exceeds maximum (%u > %u)\n",
1039 tot_enh_size_mult, max_enh_size_mult);
1040 return -EMEDIUMTYPE;
1041 }
1042
1043 /* The default value of EXT_CSD_WR_REL_SET is device
1044 * dependent, the values can only be changed if the
1045 * EXT_CSD_HS_CTRL_REL bit is set. The values can be
1046 * changed only once and before partitioning is completed. */
1047 wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
1048 if (conf->user.wr_rel_change) {
1049 if (conf->user.wr_rel_set)
1050 wr_rel_set |= EXT_CSD_WR_DATA_REL_USR;
1051 else
1052 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_USR;
1053 }
1054 for (pidx = 0; pidx < 4; pidx++) {
1055 if (conf->gp_part[pidx].wr_rel_change) {
1056 if (conf->gp_part[pidx].wr_rel_set)
1057 wr_rel_set |= EXT_CSD_WR_DATA_REL_GP(pidx);
1058 else
1059 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_GP(pidx);
1060 }
1061 }
1062
1063 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET] &&
1064 !(ext_csd[EXT_CSD_WR_REL_PARAM] & EXT_CSD_HS_CTRL_REL)) {
1065 puts("Card does not support host controlled partition write "
1066 "reliability settings\n");
1067 return -EMEDIUMTYPE;
1068 }
1069
1070 if (ext_csd[EXT_CSD_PARTITION_SETTING] &
1071 EXT_CSD_PARTITION_SETTING_COMPLETED) {
1072 pr_err("Card already partitioned\n");
1073 return -EPERM;
1074 }
1075
1076 if (mode == MMC_HWPART_CONF_CHECK)
1077 return 0;
1078
1079 /* Partitioning requires high-capacity size definitions */
1080 if (!(ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01)) {
1081 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1082 EXT_CSD_ERASE_GROUP_DEF, 1);
1083
1084 if (err)
1085 return err;
1086
1087 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
1088
1089 /* update erase group size to be high-capacity */
1090 mmc->erase_grp_size =
1091 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
1092
1093 }
1094
1095 /* all OK, write the configuration */
1096 for (i = 0; i < 4; i++) {
1097 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1098 EXT_CSD_ENH_START_ADDR+i,
1099 (enh_start_addr >> (i*8)) & 0xFF);
1100 if (err)
1101 return err;
1102 }
1103 for (i = 0; i < 3; i++) {
1104 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1105 EXT_CSD_ENH_SIZE_MULT+i,
1106 (enh_size_mult >> (i*8)) & 0xFF);
1107 if (err)
1108 return err;
1109 }
1110 for (pidx = 0; pidx < 4; pidx++) {
1111 for (i = 0; i < 3; i++) {
1112 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1113 EXT_CSD_GP_SIZE_MULT+pidx*3+i,
1114 (gp_size_mult[pidx] >> (i*8)) & 0xFF);
1115 if (err)
1116 return err;
1117 }
1118 }
1119 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1120 EXT_CSD_PARTITIONS_ATTRIBUTE, part_attrs);
1121 if (err)
1122 return err;
1123
1124 if (mode == MMC_HWPART_CONF_SET)
1125 return 0;
1126
1127 /* The WR_REL_SET is a write-once register but shall be
1128 * written before setting PART_SETTING_COMPLETED. As it is
1129 * write-once we can only write it when completing the
1130 * partitioning. */
1131 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET]) {
1132 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1133 EXT_CSD_WR_REL_SET, wr_rel_set);
1134 if (err)
1135 return err;
1136 }
1137
1138 /* Setting PART_SETTING_COMPLETED confirms the partition
1139 * configuration but it only becomes effective after power
1140 * cycle, so we do not adjust the partition related settings
1141 * in the mmc struct. */
1142
1143 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1144 EXT_CSD_PARTITION_SETTING,
1145 EXT_CSD_PARTITION_SETTING_COMPLETED);
1146 if (err)
1147 return err;
1148
1149 return 0;
1150 }
1151 #endif
1152
1153 #if !CONFIG_IS_ENABLED(DM_MMC)
1154 int mmc_getcd(struct mmc *mmc)
1155 {
1156 int cd;
1157
1158 cd = board_mmc_getcd(mmc);
1159
1160 if (cd < 0) {
1161 if (mmc->cfg->ops->getcd)
1162 cd = mmc->cfg->ops->getcd(mmc);
1163 else
1164 cd = 1;
1165 }
1166
1167 return cd;
1168 }
1169 #endif
1170
1171 static int sd_switch(struct mmc *mmc, int mode, int group, u8 value, u8 *resp)
1172 {
1173 struct mmc_cmd cmd;
1174 struct mmc_data data;
1175
1176 /* Switch the frequency */
1177 cmd.cmdidx = SD_CMD_SWITCH_FUNC;
1178 cmd.resp_type = MMC_RSP_R1;
1179 cmd.cmdarg = (mode << 31) | 0xffffff;
1180 cmd.cmdarg &= ~(0xf << (group * 4));
1181 cmd.cmdarg |= value << (group * 4);
1182
1183 data.dest = (char *)resp;
1184 data.blocksize = 64;
1185 data.blocks = 1;
1186 data.flags = MMC_DATA_READ;
1187
1188 return mmc_send_cmd(mmc, &cmd, &data);
1189 }
1190
1191
1192 static int sd_get_capabilities(struct mmc *mmc)
1193 {
1194 int err;
1195 struct mmc_cmd cmd;
1196 ALLOC_CACHE_ALIGN_BUFFER(__be32, scr, 2);
1197 ALLOC_CACHE_ALIGN_BUFFER(__be32, switch_status, 16);
1198 struct mmc_data data;
1199 int timeout;
1200 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1201 u32 sd3_bus_mode;
1202 #endif
1203
1204 mmc->card_caps = MMC_MODE_1BIT | MMC_CAP(SD_LEGACY);
1205
1206 if (mmc_host_is_spi(mmc))
1207 return 0;
1208
1209 /* Read the SCR to find out if this card supports higher speeds */
1210 cmd.cmdidx = MMC_CMD_APP_CMD;
1211 cmd.resp_type = MMC_RSP_R1;
1212 cmd.cmdarg = mmc->rca << 16;
1213
1214 err = mmc_send_cmd(mmc, &cmd, NULL);
1215
1216 if (err)
1217 return err;
1218
1219 cmd.cmdidx = SD_CMD_APP_SEND_SCR;
1220 cmd.resp_type = MMC_RSP_R1;
1221 cmd.cmdarg = 0;
1222
1223 timeout = 3;
1224
1225 retry_scr:
1226 data.dest = (char *)scr;
1227 data.blocksize = 8;
1228 data.blocks = 1;
1229 data.flags = MMC_DATA_READ;
1230
1231 err = mmc_send_cmd(mmc, &cmd, &data);
1232
1233 if (err) {
1234 if (timeout--)
1235 goto retry_scr;
1236
1237 return err;
1238 }
1239
1240 mmc->scr[0] = __be32_to_cpu(scr[0]);
1241 mmc->scr[1] = __be32_to_cpu(scr[1]);
1242
1243 switch ((mmc->scr[0] >> 24) & 0xf) {
1244 case 0:
1245 mmc->version = SD_VERSION_1_0;
1246 break;
1247 case 1:
1248 mmc->version = SD_VERSION_1_10;
1249 break;
1250 case 2:
1251 mmc->version = SD_VERSION_2;
1252 if ((mmc->scr[0] >> 15) & 0x1)
1253 mmc->version = SD_VERSION_3;
1254 break;
1255 default:
1256 mmc->version = SD_VERSION_1_0;
1257 break;
1258 }
1259
1260 if (mmc->scr[0] & SD_DATA_4BIT)
1261 mmc->card_caps |= MMC_MODE_4BIT;
1262
1263 /* Version 1.0 doesn't support switching */
1264 if (mmc->version == SD_VERSION_1_0)
1265 return 0;
1266
1267 timeout = 4;
1268 while (timeout--) {
1269 err = sd_switch(mmc, SD_SWITCH_CHECK, 0, 1,
1270 (u8 *)switch_status);
1271
1272 if (err)
1273 return err;
1274
1275 /* The high-speed function is busy. Try again */
1276 if (!(__be32_to_cpu(switch_status[7]) & SD_HIGHSPEED_BUSY))
1277 break;
1278 }
1279
1280 /* If high-speed isn't supported, we return */
1281 if (__be32_to_cpu(switch_status[3]) & SD_HIGHSPEED_SUPPORTED)
1282 mmc->card_caps |= MMC_CAP(SD_HS);
1283
1284 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1285 /* Version before 3.0 don't support UHS modes */
1286 if (mmc->version < SD_VERSION_3)
1287 return 0;
1288
1289 sd3_bus_mode = __be32_to_cpu(switch_status[3]) >> 16 & 0x1f;
1290 if (sd3_bus_mode & SD_MODE_UHS_SDR104)
1291 mmc->card_caps |= MMC_CAP(UHS_SDR104);
1292 if (sd3_bus_mode & SD_MODE_UHS_SDR50)
1293 mmc->card_caps |= MMC_CAP(UHS_SDR50);
1294 if (sd3_bus_mode & SD_MODE_UHS_SDR25)
1295 mmc->card_caps |= MMC_CAP(UHS_SDR25);
1296 if (sd3_bus_mode & SD_MODE_UHS_SDR12)
1297 mmc->card_caps |= MMC_CAP(UHS_SDR12);
1298 if (sd3_bus_mode & SD_MODE_UHS_DDR50)
1299 mmc->card_caps |= MMC_CAP(UHS_DDR50);
1300 #endif
1301
1302 return 0;
1303 }
1304
1305 static int sd_set_card_speed(struct mmc *mmc, enum bus_mode mode)
1306 {
1307 int err;
1308
1309 ALLOC_CACHE_ALIGN_BUFFER(uint, switch_status, 16);
1310 int speed;
1311
1312 switch (mode) {
1313 case SD_LEGACY:
1314 speed = UHS_SDR12_BUS_SPEED;
1315 break;
1316 case SD_HS:
1317 speed = HIGH_SPEED_BUS_SPEED;
1318 break;
1319 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1320 case UHS_SDR12:
1321 speed = UHS_SDR12_BUS_SPEED;
1322 break;
1323 case UHS_SDR25:
1324 speed = UHS_SDR25_BUS_SPEED;
1325 break;
1326 case UHS_SDR50:
1327 speed = UHS_SDR50_BUS_SPEED;
1328 break;
1329 case UHS_DDR50:
1330 speed = UHS_DDR50_BUS_SPEED;
1331 break;
1332 case UHS_SDR104:
1333 speed = UHS_SDR104_BUS_SPEED;
1334 break;
1335 #endif
1336 default:
1337 return -EINVAL;
1338 }
1339
1340 err = sd_switch(mmc, SD_SWITCH_SWITCH, 0, speed, (u8 *)switch_status);
1341 if (err)
1342 return err;
1343
1344 if ((__be32_to_cpu(switch_status[4]) >> 24) != speed)
1345 return -ENOTSUPP;
1346
1347 return 0;
1348 }
1349
1350 int sd_select_bus_width(struct mmc *mmc, int w)
1351 {
1352 int err;
1353 struct mmc_cmd cmd;
1354
1355 if ((w != 4) && (w != 1))
1356 return -EINVAL;
1357
1358 cmd.cmdidx = MMC_CMD_APP_CMD;
1359 cmd.resp_type = MMC_RSP_R1;
1360 cmd.cmdarg = mmc->rca << 16;
1361
1362 err = mmc_send_cmd(mmc, &cmd, NULL);
1363 if (err)
1364 return err;
1365
1366 cmd.cmdidx = SD_CMD_APP_SET_BUS_WIDTH;
1367 cmd.resp_type = MMC_RSP_R1;
1368 if (w == 4)
1369 cmd.cmdarg = 2;
1370 else if (w == 1)
1371 cmd.cmdarg = 0;
1372 err = mmc_send_cmd(mmc, &cmd, NULL);
1373 if (err)
1374 return err;
1375
1376 return 0;
1377 }
1378
1379 static int sd_read_ssr(struct mmc *mmc)
1380 {
1381 int err, i;
1382 struct mmc_cmd cmd;
1383 ALLOC_CACHE_ALIGN_BUFFER(uint, ssr, 16);
1384 struct mmc_data data;
1385 int timeout = 3;
1386 unsigned int au, eo, et, es;
1387
1388 cmd.cmdidx = MMC_CMD_APP_CMD;
1389 cmd.resp_type = MMC_RSP_R1;
1390 cmd.cmdarg = mmc->rca << 16;
1391
1392 err = mmc_send_cmd(mmc, &cmd, NULL);
1393 if (err)
1394 return err;
1395
1396 cmd.cmdidx = SD_CMD_APP_SD_STATUS;
1397 cmd.resp_type = MMC_RSP_R1;
1398 cmd.cmdarg = 0;
1399
1400 retry_ssr:
1401 data.dest = (char *)ssr;
1402 data.blocksize = 64;
1403 data.blocks = 1;
1404 data.flags = MMC_DATA_READ;
1405
1406 err = mmc_send_cmd(mmc, &cmd, &data);
1407 if (err) {
1408 if (timeout--)
1409 goto retry_ssr;
1410
1411 return err;
1412 }
1413
1414 for (i = 0; i < 16; i++)
1415 ssr[i] = be32_to_cpu(ssr[i]);
1416
1417 au = (ssr[2] >> 12) & 0xF;
1418 if ((au <= 9) || (mmc->version == SD_VERSION_3)) {
1419 mmc->ssr.au = sd_au_size[au];
1420 es = (ssr[3] >> 24) & 0xFF;
1421 es |= (ssr[2] & 0xFF) << 8;
1422 et = (ssr[3] >> 18) & 0x3F;
1423 if (es && et) {
1424 eo = (ssr[3] >> 16) & 0x3;
1425 mmc->ssr.erase_timeout = (et * 1000) / es;
1426 mmc->ssr.erase_offset = eo * 1000;
1427 }
1428 } else {
1429 debug("Invalid Allocation Unit Size.\n");
1430 }
1431
1432 return 0;
1433 }
1434
1435 /* frequency bases */
1436 /* divided by 10 to be nice to platforms without floating point */
1437 static const int fbase[] = {
1438 10000,
1439 100000,
1440 1000000,
1441 10000000,
1442 };
1443
1444 /* Multiplier values for TRAN_SPEED. Multiplied by 10 to be nice
1445 * to platforms without floating point.
1446 */
1447 static const u8 multipliers[] = {
1448 0, /* reserved */
1449 10,
1450 12,
1451 13,
1452 15,
1453 20,
1454 25,
1455 30,
1456 35,
1457 40,
1458 45,
1459 50,
1460 55,
1461 60,
1462 70,
1463 80,
1464 };
1465
1466 static inline int bus_width(uint cap)
1467 {
1468 if (cap == MMC_MODE_8BIT)
1469 return 8;
1470 if (cap == MMC_MODE_4BIT)
1471 return 4;
1472 if (cap == MMC_MODE_1BIT)
1473 return 1;
1474 pr_warn("invalid bus witdh capability 0x%x\n", cap);
1475 return 0;
1476 }
1477
1478 #if !CONFIG_IS_ENABLED(DM_MMC)
1479 #ifdef MMC_SUPPORTS_TUNING
1480 static int mmc_execute_tuning(struct mmc *mmc, uint opcode)
1481 {
1482 return -ENOTSUPP;
1483 }
1484 #endif
1485
1486 static void mmc_send_init_stream(struct mmc *mmc)
1487 {
1488 }
1489
1490 static int mmc_set_ios(struct mmc *mmc)
1491 {
1492 int ret = 0;
1493
1494 if (mmc->cfg->ops->set_ios)
1495 ret = mmc->cfg->ops->set_ios(mmc);
1496
1497 return ret;
1498 }
1499 #endif
1500
1501 int mmc_set_clock(struct mmc *mmc, uint clock, bool disable)
1502 {
1503 if (clock > mmc->cfg->f_max)
1504 clock = mmc->cfg->f_max;
1505
1506 if (clock < mmc->cfg->f_min)
1507 clock = mmc->cfg->f_min;
1508
1509 mmc->clock = clock;
1510 mmc->clk_disable = disable;
1511
1512 return mmc_set_ios(mmc);
1513 }
1514
1515 static int mmc_set_bus_width(struct mmc *mmc, uint width)
1516 {
1517 mmc->bus_width = width;
1518
1519 return mmc_set_ios(mmc);
1520 }
1521
1522 #if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG)
1523 /*
1524 * helper function to display the capabilities in a human
1525 * friendly manner. The capabilities include bus width and
1526 * supported modes.
1527 */
1528 void mmc_dump_capabilities(const char *text, uint caps)
1529 {
1530 enum bus_mode mode;
1531
1532 printf("%s: widths [", text);
1533 if (caps & MMC_MODE_8BIT)
1534 printf("8, ");
1535 if (caps & MMC_MODE_4BIT)
1536 printf("4, ");
1537 if (caps & MMC_MODE_1BIT)
1538 printf("1, ");
1539 printf("\b\b] modes [");
1540 for (mode = MMC_LEGACY; mode < MMC_MODES_END; mode++)
1541 if (MMC_CAP(mode) & caps)
1542 printf("%s, ", mmc_mode_name(mode));
1543 printf("\b\b]\n");
1544 }
1545 #endif
1546
1547 struct mode_width_tuning {
1548 enum bus_mode mode;
1549 uint widths;
1550 #ifdef MMC_SUPPORTS_TUNING
1551 uint tuning;
1552 #endif
1553 };
1554
1555 #if CONFIG_IS_ENABLED(MMC_IO_VOLTAGE)
1556 int mmc_voltage_to_mv(enum mmc_voltage voltage)
1557 {
1558 switch (voltage) {
1559 case MMC_SIGNAL_VOLTAGE_000: return 0;
1560 case MMC_SIGNAL_VOLTAGE_330: return 3300;
1561 case MMC_SIGNAL_VOLTAGE_180: return 1800;
1562 case MMC_SIGNAL_VOLTAGE_120: return 1200;
1563 }
1564 return -EINVAL;
1565 }
1566
1567 static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage)
1568 {
1569 int err;
1570
1571 if (mmc->signal_voltage == signal_voltage)
1572 return 0;
1573
1574 mmc->signal_voltage = signal_voltage;
1575 err = mmc_set_ios(mmc);
1576 if (err)
1577 debug("unable to set voltage (err %d)\n", err);
1578
1579 return err;
1580 }
1581 #else
1582 static inline int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage)
1583 {
1584 return 0;
1585 }
1586 #endif
1587
1588 static const struct mode_width_tuning sd_modes_by_pref[] = {
1589 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1590 #ifdef MMC_SUPPORTS_TUNING
1591 {
1592 .mode = UHS_SDR104,
1593 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1594 .tuning = MMC_CMD_SEND_TUNING_BLOCK
1595 },
1596 #endif
1597 {
1598 .mode = UHS_SDR50,
1599 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1600 },
1601 {
1602 .mode = UHS_DDR50,
1603 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1604 },
1605 {
1606 .mode = UHS_SDR25,
1607 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1608 },
1609 #endif
1610 {
1611 .mode = SD_HS,
1612 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1613 },
1614 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1615 {
1616 .mode = UHS_SDR12,
1617 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1618 },
1619 #endif
1620 {
1621 .mode = SD_LEGACY,
1622 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1623 }
1624 };
1625
1626 #define for_each_sd_mode_by_pref(caps, mwt) \
1627 for (mwt = sd_modes_by_pref;\
1628 mwt < sd_modes_by_pref + ARRAY_SIZE(sd_modes_by_pref);\
1629 mwt++) \
1630 if (caps & MMC_CAP(mwt->mode))
1631
1632 static int sd_select_mode_and_width(struct mmc *mmc, uint card_caps)
1633 {
1634 int err;
1635 uint widths[] = {MMC_MODE_4BIT, MMC_MODE_1BIT};
1636 const struct mode_width_tuning *mwt;
1637 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1638 bool uhs_en = (mmc->ocr & OCR_S18R) ? true : false;
1639 #else
1640 bool uhs_en = false;
1641 #endif
1642 uint caps;
1643
1644 #ifdef DEBUG
1645 mmc_dump_capabilities("sd card", card_caps);
1646 mmc_dump_capabilities("host", mmc->host_caps);
1647 #endif
1648
1649 /* Restrict card's capabilities by what the host can do */
1650 caps = card_caps & mmc->host_caps;
1651
1652 if (!uhs_en)
1653 caps &= ~UHS_CAPS;
1654
1655 for_each_sd_mode_by_pref(caps, mwt) {
1656 uint *w;
1657
1658 for (w = widths; w < widths + ARRAY_SIZE(widths); w++) {
1659 if (*w & caps & mwt->widths) {
1660 debug("trying mode %s width %d (at %d MHz)\n",
1661 mmc_mode_name(mwt->mode),
1662 bus_width(*w),
1663 mmc_mode2freq(mmc, mwt->mode) / 1000000);
1664
1665 /* configure the bus width (card + host) */
1666 err = sd_select_bus_width(mmc, bus_width(*w));
1667 if (err)
1668 goto error;
1669 mmc_set_bus_width(mmc, bus_width(*w));
1670
1671 /* configure the bus mode (card) */
1672 err = sd_set_card_speed(mmc, mwt->mode);
1673 if (err)
1674 goto error;
1675
1676 /* configure the bus mode (host) */
1677 mmc_select_mode(mmc, mwt->mode);
1678 mmc_set_clock(mmc, mmc->tran_speed, false);
1679
1680 #ifdef MMC_SUPPORTS_TUNING
1681 /* execute tuning if needed */
1682 if (mwt->tuning && !mmc_host_is_spi(mmc)) {
1683 err = mmc_execute_tuning(mmc,
1684 mwt->tuning);
1685 if (err) {
1686 debug("tuning failed\n");
1687 goto error;
1688 }
1689 }
1690 #endif
1691
1692 err = sd_read_ssr(mmc);
1693 if (!err)
1694 return 0;
1695
1696 pr_warn("bad ssr\n");
1697
1698 error:
1699 /* revert to a safer bus speed */
1700 mmc_select_mode(mmc, SD_LEGACY);
1701 mmc_set_clock(mmc, mmc->tran_speed, false);
1702 }
1703 }
1704 }
1705
1706 printf("unable to select a mode\n");
1707 return -ENOTSUPP;
1708 }
1709
1710 /*
1711 * read the compare the part of ext csd that is constant.
1712 * This can be used to check that the transfer is working
1713 * as expected.
1714 */
1715 static int mmc_read_and_compare_ext_csd(struct mmc *mmc)
1716 {
1717 int err;
1718 const u8 *ext_csd = mmc->ext_csd;
1719 ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
1720
1721 if (mmc->version < MMC_VERSION_4)
1722 return 0;
1723
1724 err = mmc_send_ext_csd(mmc, test_csd);
1725 if (err)
1726 return err;
1727
1728 /* Only compare read only fields */
1729 if (ext_csd[EXT_CSD_PARTITIONING_SUPPORT]
1730 == test_csd[EXT_CSD_PARTITIONING_SUPPORT] &&
1731 ext_csd[EXT_CSD_HC_WP_GRP_SIZE]
1732 == test_csd[EXT_CSD_HC_WP_GRP_SIZE] &&
1733 ext_csd[EXT_CSD_REV]
1734 == test_csd[EXT_CSD_REV] &&
1735 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
1736 == test_csd[EXT_CSD_HC_ERASE_GRP_SIZE] &&
1737 memcmp(&ext_csd[EXT_CSD_SEC_CNT],
1738 &test_csd[EXT_CSD_SEC_CNT], 4) == 0)
1739 return 0;
1740
1741 return -EBADMSG;
1742 }
1743
1744 #if CONFIG_IS_ENABLED(MMC_IO_VOLTAGE)
1745 static int mmc_set_lowest_voltage(struct mmc *mmc, enum bus_mode mode,
1746 uint32_t allowed_mask)
1747 {
1748 u32 card_mask = 0;
1749
1750 switch (mode) {
1751 case MMC_HS_200:
1752 if (mmc->cardtype & EXT_CSD_CARD_TYPE_HS200_1_8V)
1753 card_mask |= MMC_SIGNAL_VOLTAGE_180;
1754 if (mmc->cardtype & EXT_CSD_CARD_TYPE_HS200_1_2V)
1755 card_mask |= MMC_SIGNAL_VOLTAGE_120;
1756 break;
1757 case MMC_DDR_52:
1758 if (mmc->cardtype & EXT_CSD_CARD_TYPE_DDR_1_8V)
1759 card_mask |= MMC_SIGNAL_VOLTAGE_330 |
1760 MMC_SIGNAL_VOLTAGE_180;
1761 if (mmc->cardtype & EXT_CSD_CARD_TYPE_DDR_1_2V)
1762 card_mask |= MMC_SIGNAL_VOLTAGE_120;
1763 break;
1764 default:
1765 card_mask |= MMC_SIGNAL_VOLTAGE_330;
1766 break;
1767 }
1768
1769 while (card_mask & allowed_mask) {
1770 enum mmc_voltage best_match;
1771
1772 best_match = 1 << (ffs(card_mask & allowed_mask) - 1);
1773 if (!mmc_set_signal_voltage(mmc, best_match))
1774 return 0;
1775
1776 allowed_mask &= ~best_match;
1777 }
1778
1779 return -ENOTSUPP;
1780 }
1781 #else
1782 static inline int mmc_set_lowest_voltage(struct mmc *mmc, enum bus_mode mode,
1783 uint32_t allowed_mask)
1784 {
1785 return 0;
1786 }
1787 #endif
1788
1789 static const struct mode_width_tuning mmc_modes_by_pref[] = {
1790 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
1791 {
1792 .mode = MMC_HS_200,
1793 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
1794 .tuning = MMC_CMD_SEND_TUNING_BLOCK_HS200
1795 },
1796 #endif
1797 {
1798 .mode = MMC_DDR_52,
1799 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
1800 },
1801 {
1802 .mode = MMC_HS_52,
1803 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1804 },
1805 {
1806 .mode = MMC_HS,
1807 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1808 },
1809 {
1810 .mode = MMC_LEGACY,
1811 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1812 }
1813 };
1814
1815 #define for_each_mmc_mode_by_pref(caps, mwt) \
1816 for (mwt = mmc_modes_by_pref;\
1817 mwt < mmc_modes_by_pref + ARRAY_SIZE(mmc_modes_by_pref);\
1818 mwt++) \
1819 if (caps & MMC_CAP(mwt->mode))
1820
1821 static const struct ext_csd_bus_width {
1822 uint cap;
1823 bool is_ddr;
1824 uint ext_csd_bits;
1825 } ext_csd_bus_width[] = {
1826 {MMC_MODE_8BIT, true, EXT_CSD_DDR_BUS_WIDTH_8},
1827 {MMC_MODE_4BIT, true, EXT_CSD_DDR_BUS_WIDTH_4},
1828 {MMC_MODE_8BIT, false, EXT_CSD_BUS_WIDTH_8},
1829 {MMC_MODE_4BIT, false, EXT_CSD_BUS_WIDTH_4},
1830 {MMC_MODE_1BIT, false, EXT_CSD_BUS_WIDTH_1},
1831 };
1832
1833 #define for_each_supported_width(caps, ddr, ecbv) \
1834 for (ecbv = ext_csd_bus_width;\
1835 ecbv < ext_csd_bus_width + ARRAY_SIZE(ext_csd_bus_width);\
1836 ecbv++) \
1837 if ((ddr == ecbv->is_ddr) && (caps & ecbv->cap))
1838
1839 static int mmc_select_mode_and_width(struct mmc *mmc, uint card_caps)
1840 {
1841 int err;
1842 const struct mode_width_tuning *mwt;
1843 const struct ext_csd_bus_width *ecbw;
1844
1845 #ifdef DEBUG
1846 mmc_dump_capabilities("mmc", card_caps);
1847 mmc_dump_capabilities("host", mmc->host_caps);
1848 #endif
1849
1850 /* Restrict card's capabilities by what the host can do */
1851 card_caps &= mmc->host_caps;
1852
1853 /* Only version 4 of MMC supports wider bus widths */
1854 if (mmc->version < MMC_VERSION_4)
1855 return 0;
1856
1857 if (!mmc->ext_csd) {
1858 debug("No ext_csd found!\n"); /* this should enver happen */
1859 return -ENOTSUPP;
1860 }
1861
1862 mmc_set_clock(mmc, mmc->legacy_speed, false);
1863
1864 for_each_mmc_mode_by_pref(card_caps, mwt) {
1865 for_each_supported_width(card_caps & mwt->widths,
1866 mmc_is_mode_ddr(mwt->mode), ecbw) {
1867 enum mmc_voltage old_voltage;
1868 debug("trying mode %s width %d (at %d MHz)\n",
1869 mmc_mode_name(mwt->mode),
1870 bus_width(ecbw->cap),
1871 mmc_mode2freq(mmc, mwt->mode) / 1000000);
1872 old_voltage = mmc->signal_voltage;
1873 err = mmc_set_lowest_voltage(mmc, mwt->mode,
1874 MMC_ALL_SIGNAL_VOLTAGE);
1875 if (err)
1876 continue;
1877
1878 /* configure the bus width (card + host) */
1879 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1880 EXT_CSD_BUS_WIDTH,
1881 ecbw->ext_csd_bits & ~EXT_CSD_DDR_FLAG);
1882 if (err)
1883 goto error;
1884 mmc_set_bus_width(mmc, bus_width(ecbw->cap));
1885
1886 /* configure the bus speed (card) */
1887 err = mmc_set_card_speed(mmc, mwt->mode);
1888 if (err)
1889 goto error;
1890
1891 /*
1892 * configure the bus width AND the ddr mode (card)
1893 * The host side will be taken care of in the next step
1894 */
1895 if (ecbw->ext_csd_bits & EXT_CSD_DDR_FLAG) {
1896 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1897 EXT_CSD_BUS_WIDTH,
1898 ecbw->ext_csd_bits);
1899 if (err)
1900 goto error;
1901 }
1902
1903 /* configure the bus mode (host) */
1904 mmc_select_mode(mmc, mwt->mode);
1905 mmc_set_clock(mmc, mmc->tran_speed, false);
1906 #ifdef MMC_SUPPORTS_TUNING
1907
1908 /* execute tuning if needed */
1909 if (mwt->tuning) {
1910 err = mmc_execute_tuning(mmc, mwt->tuning);
1911 if (err) {
1912 debug("tuning failed\n");
1913 goto error;
1914 }
1915 }
1916 #endif
1917
1918 /* do a transfer to check the configuration */
1919 err = mmc_read_and_compare_ext_csd(mmc);
1920 if (!err)
1921 return 0;
1922 error:
1923 mmc_set_signal_voltage(mmc, old_voltage);
1924 /* if an error occured, revert to a safer bus mode */
1925 mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1926 EXT_CSD_BUS_WIDTH, EXT_CSD_BUS_WIDTH_1);
1927 mmc_select_mode(mmc, MMC_LEGACY);
1928 mmc_set_bus_width(mmc, 1);
1929 }
1930 }
1931
1932 pr_err("unable to select a mode\n");
1933
1934 return -ENOTSUPP;
1935 }
1936
1937 static int mmc_startup_v4(struct mmc *mmc)
1938 {
1939 int err, i;
1940 u64 capacity;
1941 bool has_parts = false;
1942 bool part_completed;
1943 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
1944
1945 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4))
1946 return 0;
1947
1948 /* check ext_csd version and capacity */
1949 err = mmc_send_ext_csd(mmc, ext_csd);
1950 if (err)
1951 goto error;
1952
1953 /* store the ext csd for future reference */
1954 if (!mmc->ext_csd)
1955 mmc->ext_csd = malloc(MMC_MAX_BLOCK_LEN);
1956 if (!mmc->ext_csd)
1957 return -ENOMEM;
1958 memcpy(mmc->ext_csd, ext_csd, MMC_MAX_BLOCK_LEN);
1959
1960 if (ext_csd[EXT_CSD_REV] >= 2) {
1961 /*
1962 * According to the JEDEC Standard, the value of
1963 * ext_csd's capacity is valid if the value is more
1964 * than 2GB
1965 */
1966 capacity = ext_csd[EXT_CSD_SEC_CNT] << 0
1967 | ext_csd[EXT_CSD_SEC_CNT + 1] << 8
1968 | ext_csd[EXT_CSD_SEC_CNT + 2] << 16
1969 | ext_csd[EXT_CSD_SEC_CNT + 3] << 24;
1970 capacity *= MMC_MAX_BLOCK_LEN;
1971 if ((capacity >> 20) > 2 * 1024)
1972 mmc->capacity_user = capacity;
1973 }
1974
1975 switch (ext_csd[EXT_CSD_REV]) {
1976 case 1:
1977 mmc->version = MMC_VERSION_4_1;
1978 break;
1979 case 2:
1980 mmc->version = MMC_VERSION_4_2;
1981 break;
1982 case 3:
1983 mmc->version = MMC_VERSION_4_3;
1984 break;
1985 case 5:
1986 mmc->version = MMC_VERSION_4_41;
1987 break;
1988 case 6:
1989 mmc->version = MMC_VERSION_4_5;
1990 break;
1991 case 7:
1992 mmc->version = MMC_VERSION_5_0;
1993 break;
1994 case 8:
1995 mmc->version = MMC_VERSION_5_1;
1996 break;
1997 }
1998
1999 /* The partition data may be non-zero but it is only
2000 * effective if PARTITION_SETTING_COMPLETED is set in
2001 * EXT_CSD, so ignore any data if this bit is not set,
2002 * except for enabling the high-capacity group size
2003 * definition (see below).
2004 */
2005 part_completed = !!(ext_csd[EXT_CSD_PARTITION_SETTING] &
2006 EXT_CSD_PARTITION_SETTING_COMPLETED);
2007
2008 /* store the partition info of emmc */
2009 mmc->part_support = ext_csd[EXT_CSD_PARTITIONING_SUPPORT];
2010 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) ||
2011 ext_csd[EXT_CSD_BOOT_MULT])
2012 mmc->part_config = ext_csd[EXT_CSD_PART_CONF];
2013 if (part_completed &&
2014 (ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & ENHNCD_SUPPORT))
2015 mmc->part_attr = ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE];
2016
2017 mmc->capacity_boot = ext_csd[EXT_CSD_BOOT_MULT] << 17;
2018
2019 mmc->capacity_rpmb = ext_csd[EXT_CSD_RPMB_MULT] << 17;
2020
2021 for (i = 0; i < 4; i++) {
2022 int idx = EXT_CSD_GP_SIZE_MULT + i * 3;
2023 uint mult = (ext_csd[idx + 2] << 16) +
2024 (ext_csd[idx + 1] << 8) + ext_csd[idx];
2025 if (mult)
2026 has_parts = true;
2027 if (!part_completed)
2028 continue;
2029 mmc->capacity_gp[i] = mult;
2030 mmc->capacity_gp[i] *=
2031 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
2032 mmc->capacity_gp[i] *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2033 mmc->capacity_gp[i] <<= 19;
2034 }
2035
2036 if (part_completed) {
2037 mmc->enh_user_size =
2038 (ext_csd[EXT_CSD_ENH_SIZE_MULT + 2] << 16) +
2039 (ext_csd[EXT_CSD_ENH_SIZE_MULT + 1] << 8) +
2040 ext_csd[EXT_CSD_ENH_SIZE_MULT];
2041 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
2042 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2043 mmc->enh_user_size <<= 19;
2044 mmc->enh_user_start =
2045 (ext_csd[EXT_CSD_ENH_START_ADDR + 3] << 24) +
2046 (ext_csd[EXT_CSD_ENH_START_ADDR + 2] << 16) +
2047 (ext_csd[EXT_CSD_ENH_START_ADDR + 1] << 8) +
2048 ext_csd[EXT_CSD_ENH_START_ADDR];
2049 if (mmc->high_capacity)
2050 mmc->enh_user_start <<= 9;
2051 }
2052
2053 /*
2054 * Host needs to enable ERASE_GRP_DEF bit if device is
2055 * partitioned. This bit will be lost every time after a reset
2056 * or power off. This will affect erase size.
2057 */
2058 if (part_completed)
2059 has_parts = true;
2060 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) &&
2061 (ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE] & PART_ENH_ATTRIB))
2062 has_parts = true;
2063 if (has_parts) {
2064 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
2065 EXT_CSD_ERASE_GROUP_DEF, 1);
2066
2067 if (err)
2068 goto error;
2069
2070 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
2071 }
2072
2073 if (ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01) {
2074 /* Read out group size from ext_csd */
2075 mmc->erase_grp_size =
2076 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
2077 /*
2078 * if high capacity and partition setting completed
2079 * SEC_COUNT is valid even if it is smaller than 2 GiB
2080 * JEDEC Standard JESD84-B45, 6.2.4
2081 */
2082 if (mmc->high_capacity && part_completed) {
2083 capacity = (ext_csd[EXT_CSD_SEC_CNT]) |
2084 (ext_csd[EXT_CSD_SEC_CNT + 1] << 8) |
2085 (ext_csd[EXT_CSD_SEC_CNT + 2] << 16) |
2086 (ext_csd[EXT_CSD_SEC_CNT + 3] << 24);
2087 capacity *= MMC_MAX_BLOCK_LEN;
2088 mmc->capacity_user = capacity;
2089 }
2090 } else {
2091 /* Calculate the group size from the csd value. */
2092 int erase_gsz, erase_gmul;
2093
2094 erase_gsz = (mmc->csd[2] & 0x00007c00) >> 10;
2095 erase_gmul = (mmc->csd[2] & 0x000003e0) >> 5;
2096 mmc->erase_grp_size = (erase_gsz + 1)
2097 * (erase_gmul + 1);
2098 }
2099
2100 mmc->hc_wp_grp_size = 1024
2101 * ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
2102 * ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2103
2104 mmc->wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
2105
2106 return 0;
2107 error:
2108 if (mmc->ext_csd) {
2109 free(mmc->ext_csd);
2110 mmc->ext_csd = NULL;
2111 }
2112 return err;
2113 }
2114
2115 static int mmc_startup(struct mmc *mmc)
2116 {
2117 int err, i;
2118 uint mult, freq;
2119 u64 cmult, csize;
2120 struct mmc_cmd cmd;
2121 struct blk_desc *bdesc;
2122
2123 #ifdef CONFIG_MMC_SPI_CRC_ON
2124 if (mmc_host_is_spi(mmc)) { /* enable CRC check for spi */
2125 cmd.cmdidx = MMC_CMD_SPI_CRC_ON_OFF;
2126 cmd.resp_type = MMC_RSP_R1;
2127 cmd.cmdarg = 1;
2128 err = mmc_send_cmd(mmc, &cmd, NULL);
2129 if (err)
2130 return err;
2131 }
2132 #endif
2133
2134 /* Put the Card in Identify Mode */
2135 cmd.cmdidx = mmc_host_is_spi(mmc) ? MMC_CMD_SEND_CID :
2136 MMC_CMD_ALL_SEND_CID; /* cmd not supported in spi */
2137 cmd.resp_type = MMC_RSP_R2;
2138 cmd.cmdarg = 0;
2139
2140 err = mmc_send_cmd(mmc, &cmd, NULL);
2141
2142 #ifdef CONFIG_MMC_QUIRKS
2143 if (err && (mmc->quirks & MMC_QUIRK_RETRY_SEND_CID)) {
2144 int retries = 4;
2145 /*
2146 * It has been seen that SEND_CID may fail on the first
2147 * attempt, let's try a few more time
2148 */
2149 do {
2150 err = mmc_send_cmd(mmc, &cmd, NULL);
2151 if (!err)
2152 break;
2153 } while (retries--);
2154 }
2155 #endif
2156
2157 if (err)
2158 return err;
2159
2160 memcpy(mmc->cid, cmd.response, 16);
2161
2162 /*
2163 * For MMC cards, set the Relative Address.
2164 * For SD cards, get the Relatvie Address.
2165 * This also puts the cards into Standby State
2166 */
2167 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
2168 cmd.cmdidx = SD_CMD_SEND_RELATIVE_ADDR;
2169 cmd.cmdarg = mmc->rca << 16;
2170 cmd.resp_type = MMC_RSP_R6;
2171
2172 err = mmc_send_cmd(mmc, &cmd, NULL);
2173
2174 if (err)
2175 return err;
2176
2177 if (IS_SD(mmc))
2178 mmc->rca = (cmd.response[0] >> 16) & 0xffff;
2179 }
2180
2181 /* Get the Card-Specific Data */
2182 cmd.cmdidx = MMC_CMD_SEND_CSD;
2183 cmd.resp_type = MMC_RSP_R2;
2184 cmd.cmdarg = mmc->rca << 16;
2185
2186 err = mmc_send_cmd(mmc, &cmd, NULL);
2187
2188 if (err)
2189 return err;
2190
2191 mmc->csd[0] = cmd.response[0];
2192 mmc->csd[1] = cmd.response[1];
2193 mmc->csd[2] = cmd.response[2];
2194 mmc->csd[3] = cmd.response[3];
2195
2196 if (mmc->version == MMC_VERSION_UNKNOWN) {
2197 int version = (cmd.response[0] >> 26) & 0xf;
2198
2199 switch (version) {
2200 case 0:
2201 mmc->version = MMC_VERSION_1_2;
2202 break;
2203 case 1:
2204 mmc->version = MMC_VERSION_1_4;
2205 break;
2206 case 2:
2207 mmc->version = MMC_VERSION_2_2;
2208 break;
2209 case 3:
2210 mmc->version = MMC_VERSION_3;
2211 break;
2212 case 4:
2213 mmc->version = MMC_VERSION_4;
2214 break;
2215 default:
2216 mmc->version = MMC_VERSION_1_2;
2217 break;
2218 }
2219 }
2220
2221 /* divide frequency by 10, since the mults are 10x bigger */
2222 freq = fbase[(cmd.response[0] & 0x7)];
2223 mult = multipliers[((cmd.response[0] >> 3) & 0xf)];
2224
2225 mmc->legacy_speed = freq * mult;
2226 mmc_select_mode(mmc, MMC_LEGACY);
2227
2228 mmc->dsr_imp = ((cmd.response[1] >> 12) & 0x1);
2229 mmc->read_bl_len = 1 << ((cmd.response[1] >> 16) & 0xf);
2230
2231 if (IS_SD(mmc))
2232 mmc->write_bl_len = mmc->read_bl_len;
2233 else
2234 mmc->write_bl_len = 1 << ((cmd.response[3] >> 22) & 0xf);
2235
2236 if (mmc->high_capacity) {
2237 csize = (mmc->csd[1] & 0x3f) << 16
2238 | (mmc->csd[2] & 0xffff0000) >> 16;
2239 cmult = 8;
2240 } else {
2241 csize = (mmc->csd[1] & 0x3ff) << 2
2242 | (mmc->csd[2] & 0xc0000000) >> 30;
2243 cmult = (mmc->csd[2] & 0x00038000) >> 15;
2244 }
2245
2246 mmc->capacity_user = (csize + 1) << (cmult + 2);
2247 mmc->capacity_user *= mmc->read_bl_len;
2248 mmc->capacity_boot = 0;
2249 mmc->capacity_rpmb = 0;
2250 for (i = 0; i < 4; i++)
2251 mmc->capacity_gp[i] = 0;
2252
2253 if (mmc->read_bl_len > MMC_MAX_BLOCK_LEN)
2254 mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
2255
2256 if (mmc->write_bl_len > MMC_MAX_BLOCK_LEN)
2257 mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
2258
2259 if ((mmc->dsr_imp) && (0xffffffff != mmc->dsr)) {
2260 cmd.cmdidx = MMC_CMD_SET_DSR;
2261 cmd.cmdarg = (mmc->dsr & 0xffff) << 16;
2262 cmd.resp_type = MMC_RSP_NONE;
2263 if (mmc_send_cmd(mmc, &cmd, NULL))
2264 pr_warn("MMC: SET_DSR failed\n");
2265 }
2266
2267 /* Select the card, and put it into Transfer Mode */
2268 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
2269 cmd.cmdidx = MMC_CMD_SELECT_CARD;
2270 cmd.resp_type = MMC_RSP_R1;
2271 cmd.cmdarg = mmc->rca << 16;
2272 err = mmc_send_cmd(mmc, &cmd, NULL);
2273
2274 if (err)
2275 return err;
2276 }
2277
2278 /*
2279 * For SD, its erase group is always one sector
2280 */
2281 mmc->erase_grp_size = 1;
2282 mmc->part_config = MMCPART_NOAVAILABLE;
2283
2284 err = mmc_startup_v4(mmc);
2285 if (err)
2286 return err;
2287
2288 err = mmc_set_capacity(mmc, mmc_get_blk_desc(mmc)->hwpart);
2289 if (err)
2290 return err;
2291
2292 if (IS_SD(mmc)) {
2293 err = sd_get_capabilities(mmc);
2294 if (err)
2295 return err;
2296 err = sd_select_mode_and_width(mmc, mmc->card_caps);
2297 } else {
2298 err = mmc_get_capabilities(mmc);
2299 if (err)
2300 return err;
2301 mmc_select_mode_and_width(mmc, mmc->card_caps);
2302 }
2303
2304 if (err)
2305 return err;
2306
2307 mmc->best_mode = mmc->selected_mode;
2308
2309 /* Fix the block length for DDR mode */
2310 if (mmc->ddr_mode) {
2311 mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
2312 mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
2313 }
2314
2315 /* fill in device description */
2316 bdesc = mmc_get_blk_desc(mmc);
2317 bdesc->lun = 0;
2318 bdesc->hwpart = 0;
2319 bdesc->type = 0;
2320 bdesc->blksz = mmc->read_bl_len;
2321 bdesc->log2blksz = LOG2(bdesc->blksz);
2322 bdesc->lba = lldiv(mmc->capacity, mmc->read_bl_len);
2323 #if !defined(CONFIG_SPL_BUILD) || \
2324 (defined(CONFIG_SPL_LIBCOMMON_SUPPORT) && \
2325 !defined(CONFIG_USE_TINY_PRINTF))
2326 sprintf(bdesc->vendor, "Man %06x Snr %04x%04x",
2327 mmc->cid[0] >> 24, (mmc->cid[2] & 0xffff),
2328 (mmc->cid[3] >> 16) & 0xffff);
2329 sprintf(bdesc->product, "%c%c%c%c%c%c", mmc->cid[0] & 0xff,
2330 (mmc->cid[1] >> 24), (mmc->cid[1] >> 16) & 0xff,
2331 (mmc->cid[1] >> 8) & 0xff, mmc->cid[1] & 0xff,
2332 (mmc->cid[2] >> 24) & 0xff);
2333 sprintf(bdesc->revision, "%d.%d", (mmc->cid[2] >> 20) & 0xf,
2334 (mmc->cid[2] >> 16) & 0xf);
2335 #else
2336 bdesc->vendor[0] = 0;
2337 bdesc->product[0] = 0;
2338 bdesc->revision[0] = 0;
2339 #endif
2340 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBDISK_SUPPORT)
2341 part_init(bdesc);
2342 #endif
2343
2344 return 0;
2345 }
2346
2347 static int mmc_send_if_cond(struct mmc *mmc)
2348 {
2349 struct mmc_cmd cmd;
2350 int err;
2351
2352 cmd.cmdidx = SD_CMD_SEND_IF_COND;
2353 /* We set the bit if the host supports voltages between 2.7 and 3.6 V */
2354 cmd.cmdarg = ((mmc->cfg->voltages & 0xff8000) != 0) << 8 | 0xaa;
2355 cmd.resp_type = MMC_RSP_R7;
2356
2357 err = mmc_send_cmd(mmc, &cmd, NULL);
2358
2359 if (err)
2360 return err;
2361
2362 if ((cmd.response[0] & 0xff) != 0xaa)
2363 return -EOPNOTSUPP;
2364 else
2365 mmc->version = SD_VERSION_2;
2366
2367 return 0;
2368 }
2369
2370 #if !CONFIG_IS_ENABLED(DM_MMC)
2371 /* board-specific MMC power initializations. */
2372 __weak void board_mmc_power_init(void)
2373 {
2374 }
2375 #endif
2376
2377 static int mmc_power_init(struct mmc *mmc)
2378 {
2379 #if CONFIG_IS_ENABLED(DM_MMC)
2380 #if CONFIG_IS_ENABLED(DM_REGULATOR)
2381 int ret;
2382
2383 ret = device_get_supply_regulator(mmc->dev, "vmmc-supply",
2384 &mmc->vmmc_supply);
2385 if (ret)
2386 debug("%s: No vmmc supply\n", mmc->dev->name);
2387
2388 ret = device_get_supply_regulator(mmc->dev, "vqmmc-supply",
2389 &mmc->vqmmc_supply);
2390 if (ret)
2391 debug("%s: No vqmmc supply\n", mmc->dev->name);
2392 #endif
2393 #else /* !CONFIG_DM_MMC */
2394 /*
2395 * Driver model should use a regulator, as above, rather than calling
2396 * out to board code.
2397 */
2398 board_mmc_power_init();
2399 #endif
2400 return 0;
2401 }
2402
2403 /*
2404 * put the host in the initial state:
2405 * - turn on Vdd (card power supply)
2406 * - configure the bus width and clock to minimal values
2407 */
2408 static void mmc_set_initial_state(struct mmc *mmc)
2409 {
2410 int err;
2411
2412 /* First try to set 3.3V. If it fails set to 1.8V */
2413 err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_330);
2414 if (err != 0)
2415 err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_180);
2416 if (err != 0)
2417 pr_warn("mmc: failed to set signal voltage\n");
2418
2419 mmc_select_mode(mmc, MMC_LEGACY);
2420 mmc_set_bus_width(mmc, 1);
2421 mmc_set_clock(mmc, 0, false);
2422 }
2423
2424 static int mmc_power_on(struct mmc *mmc)
2425 {
2426 #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
2427 if (mmc->vmmc_supply) {
2428 int ret = regulator_set_enable(mmc->vmmc_supply, true);
2429
2430 if (ret) {
2431 puts("Error enabling VMMC supply\n");
2432 return ret;
2433 }
2434 }
2435 #endif
2436 return 0;
2437 }
2438
2439 static int mmc_power_off(struct mmc *mmc)
2440 {
2441 mmc_set_clock(mmc, 1, true);
2442 #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
2443 if (mmc->vmmc_supply) {
2444 int ret = regulator_set_enable(mmc->vmmc_supply, false);
2445
2446 if (ret) {
2447 debug("Error disabling VMMC supply\n");
2448 return ret;
2449 }
2450 }
2451 #endif
2452 return 0;
2453 }
2454
2455 static int mmc_power_cycle(struct mmc *mmc)
2456 {
2457 int ret;
2458
2459 ret = mmc_power_off(mmc);
2460 if (ret)
2461 return ret;
2462 /*
2463 * SD spec recommends at least 1ms of delay. Let's wait for 2ms
2464 * to be on the safer side.
2465 */
2466 udelay(2000);
2467 return mmc_power_on(mmc);
2468 }
2469
2470 int mmc_start_init(struct mmc *mmc)
2471 {
2472 bool no_card;
2473 bool uhs_en = supports_uhs(mmc->cfg->host_caps);
2474 int err;
2475
2476 /*
2477 * all hosts are capable of 1 bit bus-width and able to use the legacy
2478 * timings.
2479 */
2480 mmc->host_caps = mmc->cfg->host_caps | MMC_CAP(SD_LEGACY) |
2481 MMC_CAP(MMC_LEGACY) | MMC_MODE_1BIT;
2482
2483 /* we pretend there's no card when init is NULL */
2484 no_card = mmc_getcd(mmc) == 0;
2485 #if !CONFIG_IS_ENABLED(DM_MMC)
2486 no_card = no_card || (mmc->cfg->ops->init == NULL);
2487 #endif
2488 if (no_card) {
2489 mmc->has_init = 0;
2490 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2491 printf("MMC: no card present\n");
2492 #endif
2493 return -ENOMEDIUM;
2494 }
2495
2496 if (mmc->has_init)
2497 return 0;
2498
2499 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT
2500 mmc_adapter_card_type_ident();
2501 #endif
2502 err = mmc_power_init(mmc);
2503 if (err)
2504 return err;
2505
2506 #ifdef CONFIG_MMC_QUIRKS
2507 mmc->quirks = MMC_QUIRK_RETRY_SET_BLOCKLEN |
2508 MMC_QUIRK_RETRY_SEND_CID;
2509 #endif
2510
2511 err = mmc_power_cycle(mmc);
2512 if (err) {
2513 /*
2514 * if power cycling is not supported, we should not try
2515 * to use the UHS modes, because we wouldn't be able to
2516 * recover from an error during the UHS initialization.
2517 */
2518 debug("Unable to do a full power cycle. Disabling the UHS modes for safety\n");
2519 uhs_en = false;
2520 mmc->host_caps &= ~UHS_CAPS;
2521 err = mmc_power_on(mmc);
2522 }
2523 if (err)
2524 return err;
2525
2526 #if CONFIG_IS_ENABLED(DM_MMC)
2527 /* The device has already been probed ready for use */
2528 #else
2529 /* made sure it's not NULL earlier */
2530 err = mmc->cfg->ops->init(mmc);
2531 if (err)
2532 return err;
2533 #endif
2534 mmc->ddr_mode = 0;
2535
2536 retry:
2537 mmc_set_initial_state(mmc);
2538 mmc_send_init_stream(mmc);
2539
2540 /* Reset the Card */
2541 err = mmc_go_idle(mmc);
2542
2543 if (err)
2544 return err;
2545
2546 /* The internal partition reset to user partition(0) at every CMD0*/
2547 mmc_get_blk_desc(mmc)->hwpart = 0;
2548
2549 /* Test for SD version 2 */
2550 err = mmc_send_if_cond(mmc);
2551
2552 /* Now try to get the SD card's operating condition */
2553 err = sd_send_op_cond(mmc, uhs_en);
2554 if (err && uhs_en) {
2555 uhs_en = false;
2556 mmc_power_cycle(mmc);
2557 goto retry;
2558 }
2559
2560 /* If the command timed out, we check for an MMC card */
2561 if (err == -ETIMEDOUT) {
2562 err = mmc_send_op_cond(mmc);
2563
2564 if (err) {
2565 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2566 pr_err("Card did not respond to voltage select!\n");
2567 #endif
2568 return -EOPNOTSUPP;
2569 }
2570 }
2571
2572 if (!err)
2573 mmc->init_in_progress = 1;
2574
2575 return err;
2576 }
2577
2578 static int mmc_complete_init(struct mmc *mmc)
2579 {
2580 int err = 0;
2581
2582 mmc->init_in_progress = 0;
2583 if (mmc->op_cond_pending)
2584 err = mmc_complete_op_cond(mmc);
2585
2586 if (!err)
2587 err = mmc_startup(mmc);
2588 if (err)
2589 mmc->has_init = 0;
2590 else
2591 mmc->has_init = 1;
2592 return err;
2593 }
2594
2595 int mmc_init(struct mmc *mmc)
2596 {
2597 int err = 0;
2598 __maybe_unused unsigned start;
2599 #if CONFIG_IS_ENABLED(DM_MMC)
2600 struct mmc_uclass_priv *upriv = dev_get_uclass_priv(mmc->dev);
2601
2602 upriv->mmc = mmc;
2603 #endif
2604 if (mmc->has_init)
2605 return 0;
2606
2607 start = get_timer(0);
2608
2609 if (!mmc->init_in_progress)
2610 err = mmc_start_init(mmc);
2611
2612 if (!err)
2613 err = mmc_complete_init(mmc);
2614 if (err)
2615 printf("%s: %d, time %lu\n", __func__, err, get_timer(start));
2616
2617 return err;
2618 }
2619
2620 int mmc_set_dsr(struct mmc *mmc, u16 val)
2621 {
2622 mmc->dsr = val;
2623 return 0;
2624 }
2625
2626 /* CPU-specific MMC initializations */
2627 __weak int cpu_mmc_init(bd_t *bis)
2628 {
2629 return -1;
2630 }
2631
2632 /* board-specific MMC initializations. */
2633 __weak int board_mmc_init(bd_t *bis)
2634 {
2635 return -1;
2636 }
2637
2638 void mmc_set_preinit(struct mmc *mmc, int preinit)
2639 {
2640 mmc->preinit = preinit;
2641 }
2642
2643 #if CONFIG_IS_ENABLED(DM_MMC) && defined(CONFIG_SPL_BUILD)
2644 static int mmc_probe(bd_t *bis)
2645 {
2646 return 0;
2647 }
2648 #elif CONFIG_IS_ENABLED(DM_MMC)
2649 static int mmc_probe(bd_t *bis)
2650 {
2651 int ret, i;
2652 struct uclass *uc;
2653 struct udevice *dev;
2654
2655 ret = uclass_get(UCLASS_MMC, &uc);
2656 if (ret)
2657 return ret;
2658
2659 /*
2660 * Try to add them in sequence order. Really with driver model we
2661 * should allow holes, but the current MMC list does not allow that.
2662 * So if we request 0, 1, 3 we will get 0, 1, 2.
2663 */
2664 for (i = 0; ; i++) {
2665 ret = uclass_get_device_by_seq(UCLASS_MMC, i, &dev);
2666 if (ret == -ENODEV)
2667 break;
2668 }
2669 uclass_foreach_dev(dev, uc) {
2670 ret = device_probe(dev);
2671 if (ret)
2672 pr_err("%s - probe failed: %d\n", dev->name, ret);
2673 }
2674
2675 return 0;
2676 }
2677 #else
2678 static int mmc_probe(bd_t *bis)
2679 {
2680 if (board_mmc_init(bis) < 0)
2681 cpu_mmc_init(bis);
2682
2683 return 0;
2684 }
2685 #endif
2686
2687 int mmc_initialize(bd_t *bis)
2688 {
2689 static int initialized = 0;
2690 int ret;
2691 if (initialized) /* Avoid initializing mmc multiple times */
2692 return 0;
2693 initialized = 1;
2694
2695 #if !CONFIG_IS_ENABLED(BLK)
2696 #if !CONFIG_IS_ENABLED(MMC_TINY)
2697 mmc_list_init();
2698 #endif
2699 #endif
2700 ret = mmc_probe(bis);
2701 if (ret)
2702 return ret;
2703
2704 #ifndef CONFIG_SPL_BUILD
2705 print_mmc_devices(',');
2706 #endif
2707
2708 mmc_do_preinit();
2709 return 0;
2710 }
2711
2712 #ifdef CONFIG_CMD_BKOPS_ENABLE
2713 int mmc_set_bkops_enable(struct mmc *mmc)
2714 {
2715 int err;
2716 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
2717
2718 err = mmc_send_ext_csd(mmc, ext_csd);
2719 if (err) {
2720 puts("Could not get ext_csd register values\n");
2721 return err;
2722 }
2723
2724 if (!(ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1)) {
2725 puts("Background operations not supported on device\n");
2726 return -EMEDIUMTYPE;
2727 }
2728
2729 if (ext_csd[EXT_CSD_BKOPS_EN] & 0x1) {
2730 puts("Background operations already enabled\n");
2731 return 0;
2732 }
2733
2734 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BKOPS_EN, 1);
2735 if (err) {
2736 puts("Failed to enable manual background operations\n");
2737 return err;
2738 }
2739
2740 puts("Enabled manual background operations\n");
2741
2742 return 0;
2743 }
2744 #endif