]> git.ipfire.org Git - thirdparty/u-boot.git/blob - drivers/mmc/sdhci.c
doc: rockchip: Adapt Pine64 Rock64 board instructions
[thirdparty/u-boot.git] / drivers / mmc / sdhci.c
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Copyright 2011, Marvell Semiconductor Inc.
4 * Lei Wen <leiwen@marvell.com>
5 *
6 * Back ported to the 8xx platform (from the 8260 platform) by
7 * Murray.Jensen@cmst.csiro.au, 27-Jan-01.
8 */
9
10 #include <common.h>
11 #include <dm.h>
12 #include <errno.h>
13 #include <malloc.h>
14 #include <mmc.h>
15 #include <sdhci.h>
16 #include <dm.h>
17
18 #if defined(CONFIG_FIXED_SDHCI_ALIGNED_BUFFER)
19 void *aligned_buffer = (void *)CONFIG_FIXED_SDHCI_ALIGNED_BUFFER;
20 #else
21 void *aligned_buffer;
22 #endif
23
24 static void sdhci_reset(struct sdhci_host *host, u8 mask)
25 {
26 unsigned long timeout;
27
28 /* Wait max 100 ms */
29 timeout = 100;
30 sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET);
31 while (sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask) {
32 if (timeout == 0) {
33 printf("%s: Reset 0x%x never completed.\n",
34 __func__, (int)mask);
35 return;
36 }
37 timeout--;
38 udelay(1000);
39 }
40 }
41
42 static void sdhci_cmd_done(struct sdhci_host *host, struct mmc_cmd *cmd)
43 {
44 int i;
45 if (cmd->resp_type & MMC_RSP_136) {
46 /* CRC is stripped so we need to do some shifting. */
47 for (i = 0; i < 4; i++) {
48 cmd->response[i] = sdhci_readl(host,
49 SDHCI_RESPONSE + (3-i)*4) << 8;
50 if (i != 3)
51 cmd->response[i] |= sdhci_readb(host,
52 SDHCI_RESPONSE + (3-i)*4-1);
53 }
54 } else {
55 cmd->response[0] = sdhci_readl(host, SDHCI_RESPONSE);
56 }
57 }
58
59 static void sdhci_transfer_pio(struct sdhci_host *host, struct mmc_data *data)
60 {
61 int i;
62 char *offs;
63 for (i = 0; i < data->blocksize; i += 4) {
64 offs = data->dest + i;
65 if (data->flags == MMC_DATA_READ)
66 *(u32 *)offs = sdhci_readl(host, SDHCI_BUFFER);
67 else
68 sdhci_writel(host, *(u32 *)offs, SDHCI_BUFFER);
69 }
70 }
71
72 #if CONFIG_IS_ENABLED(MMC_SDHCI_ADMA)
73 static void sdhci_adma_desc(struct sdhci_host *host, char *buf, u16 len,
74 bool end)
75 {
76 struct sdhci_adma_desc *desc;
77 u8 attr;
78
79 desc = &host->adma_desc_table[host->desc_slot];
80
81 attr = ADMA_DESC_ATTR_VALID | ADMA_DESC_TRANSFER_DATA;
82 if (!end)
83 host->desc_slot++;
84 else
85 attr |= ADMA_DESC_ATTR_END;
86
87 desc->attr = attr;
88 desc->len = len;
89 desc->reserved = 0;
90 desc->addr_lo = (dma_addr_t)buf;
91 #ifdef CONFIG_DMA_ADDR_T_64BIT
92 desc->addr_hi = (u64)buf >> 32;
93 #endif
94 }
95
96 static void sdhci_prepare_adma_table(struct sdhci_host *host,
97 struct mmc_data *data)
98 {
99 uint trans_bytes = data->blocksize * data->blocks;
100 uint desc_count = DIV_ROUND_UP(trans_bytes, ADMA_MAX_LEN);
101 int i = desc_count;
102 char *buf;
103
104 host->desc_slot = 0;
105
106 if (data->flags & MMC_DATA_READ)
107 buf = data->dest;
108 else
109 buf = (char *)data->src;
110
111 while (--i) {
112 sdhci_adma_desc(host, buf, ADMA_MAX_LEN, false);
113 buf += ADMA_MAX_LEN;
114 trans_bytes -= ADMA_MAX_LEN;
115 }
116
117 sdhci_adma_desc(host, buf, trans_bytes, true);
118
119 flush_cache((dma_addr_t)host->adma_desc_table,
120 ROUND(desc_count * sizeof(struct sdhci_adma_desc),
121 ARCH_DMA_MINALIGN));
122 }
123 #elif defined(CONFIG_MMC_SDHCI_SDMA)
124 static void sdhci_prepare_adma_table(struct sdhci_host *host,
125 struct mmc_data *data)
126 {}
127 #endif
128 #if (defined(CONFIG_MMC_SDHCI_SDMA) || CONFIG_IS_ENABLED(MMC_SDHCI_ADMA))
129 static void sdhci_prepare_dma(struct sdhci_host *host, struct mmc_data *data,
130 int *is_aligned, int trans_bytes)
131 {
132 unsigned char ctrl;
133
134 if (data->flags == MMC_DATA_READ)
135 host->start_addr = (dma_addr_t)data->dest;
136 else
137 host->start_addr = (dma_addr_t)data->src;
138
139 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
140 ctrl &= ~SDHCI_CTRL_DMA_MASK;
141 if (host->flags & USE_ADMA64)
142 ctrl |= SDHCI_CTRL_ADMA64;
143 else if (host->flags & USE_ADMA)
144 ctrl |= SDHCI_CTRL_ADMA32;
145 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
146
147 if (host->flags & USE_SDMA) {
148 if ((host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR) &&
149 (host->start_addr & 0x7) != 0x0) {
150 *is_aligned = 0;
151 host->start_addr = (unsigned long)aligned_buffer;
152 if (data->flags != MMC_DATA_READ)
153 memcpy(aligned_buffer, data->src, trans_bytes);
154 }
155
156 #if defined(CONFIG_FIXED_SDHCI_ALIGNED_BUFFER)
157 /*
158 * Always use this bounce-buffer when
159 * CONFIG_FIXED_SDHCI_ALIGNED_BUFFER is defined
160 */
161 *is_aligned = 0;
162 host->start_addr = (unsigned long)aligned_buffer;
163 if (data->flags != MMC_DATA_READ)
164 memcpy(aligned_buffer, data->src, trans_bytes);
165 #endif
166 sdhci_writel(host, host->start_addr, SDHCI_DMA_ADDRESS);
167
168 } else if (host->flags & (USE_ADMA | USE_ADMA64)) {
169 sdhci_prepare_adma_table(host, data);
170
171 sdhci_writel(host, (u32)host->adma_addr, SDHCI_ADMA_ADDRESS);
172 if (host->flags & USE_ADMA64)
173 sdhci_writel(host, (u64)host->adma_addr >> 32,
174 SDHCI_ADMA_ADDRESS_HI);
175 }
176
177 flush_cache(host->start_addr, ROUND(trans_bytes, ARCH_DMA_MINALIGN));
178 }
179 #else
180 static void sdhci_prepare_dma(struct sdhci_host *host, struct mmc_data *data,
181 int *is_aligned, int trans_bytes)
182 {}
183 #endif
184 static int sdhci_transfer_data(struct sdhci_host *host, struct mmc_data *data)
185 {
186 dma_addr_t start_addr = host->start_addr;
187 unsigned int stat, rdy, mask, timeout, block = 0;
188 bool transfer_done = false;
189
190 timeout = 1000000;
191 rdy = SDHCI_INT_SPACE_AVAIL | SDHCI_INT_DATA_AVAIL;
192 mask = SDHCI_DATA_AVAILABLE | SDHCI_SPACE_AVAILABLE;
193 do {
194 stat = sdhci_readl(host, SDHCI_INT_STATUS);
195 if (stat & SDHCI_INT_ERROR) {
196 pr_debug("%s: Error detected in status(0x%X)!\n",
197 __func__, stat);
198 return -EIO;
199 }
200 if (!transfer_done && (stat & rdy)) {
201 if (!(sdhci_readl(host, SDHCI_PRESENT_STATE) & mask))
202 continue;
203 sdhci_writel(host, rdy, SDHCI_INT_STATUS);
204 sdhci_transfer_pio(host, data);
205 data->dest += data->blocksize;
206 if (++block >= data->blocks) {
207 /* Keep looping until the SDHCI_INT_DATA_END is
208 * cleared, even if we finished sending all the
209 * blocks.
210 */
211 transfer_done = true;
212 continue;
213 }
214 }
215 if ((host->flags & USE_DMA) && !transfer_done &&
216 (stat & SDHCI_INT_DMA_END)) {
217 sdhci_writel(host, SDHCI_INT_DMA_END, SDHCI_INT_STATUS);
218 if (host->flags & USE_SDMA) {
219 start_addr &=
220 ~(SDHCI_DEFAULT_BOUNDARY_SIZE - 1);
221 start_addr += SDHCI_DEFAULT_BOUNDARY_SIZE;
222 sdhci_writel(host, start_addr,
223 SDHCI_DMA_ADDRESS);
224 }
225 }
226 if (timeout-- > 0)
227 udelay(10);
228 else {
229 printf("%s: Transfer data timeout\n", __func__);
230 return -ETIMEDOUT;
231 }
232 } while (!(stat & SDHCI_INT_DATA_END));
233 return 0;
234 }
235
236 /*
237 * No command will be sent by driver if card is busy, so driver must wait
238 * for card ready state.
239 * Every time when card is busy after timeout then (last) timeout value will be
240 * increased twice but only if it doesn't exceed global defined maximum.
241 * Each function call will use last timeout value.
242 */
243 #define SDHCI_CMD_MAX_TIMEOUT 3200
244 #define SDHCI_CMD_DEFAULT_TIMEOUT 100
245 #define SDHCI_READ_STATUS_TIMEOUT 1000
246
247 #ifdef CONFIG_DM_MMC
248 static int sdhci_send_command(struct udevice *dev, struct mmc_cmd *cmd,
249 struct mmc_data *data)
250 {
251 struct mmc *mmc = mmc_get_mmc_dev(dev);
252
253 #else
254 static int sdhci_send_command(struct mmc *mmc, struct mmc_cmd *cmd,
255 struct mmc_data *data)
256 {
257 #endif
258 struct sdhci_host *host = mmc->priv;
259 unsigned int stat = 0;
260 int ret = 0;
261 int trans_bytes = 0, is_aligned = 1;
262 u32 mask, flags, mode;
263 unsigned int time = 0;
264 int mmc_dev = mmc_get_blk_desc(mmc)->devnum;
265 ulong start = get_timer(0);
266
267 host->start_addr = 0;
268 /* Timeout unit - ms */
269 static unsigned int cmd_timeout = SDHCI_CMD_DEFAULT_TIMEOUT;
270
271 mask = SDHCI_CMD_INHIBIT | SDHCI_DATA_INHIBIT;
272
273 /* We shouldn't wait for data inihibit for stop commands, even
274 though they might use busy signaling */
275 if (cmd->cmdidx == MMC_CMD_STOP_TRANSMISSION ||
276 ((cmd->cmdidx == MMC_CMD_SEND_TUNING_BLOCK ||
277 cmd->cmdidx == MMC_CMD_SEND_TUNING_BLOCK_HS200) && !data))
278 mask &= ~SDHCI_DATA_INHIBIT;
279
280 while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
281 if (time >= cmd_timeout) {
282 printf("%s: MMC: %d busy ", __func__, mmc_dev);
283 if (2 * cmd_timeout <= SDHCI_CMD_MAX_TIMEOUT) {
284 cmd_timeout += cmd_timeout;
285 printf("timeout increasing to: %u ms.\n",
286 cmd_timeout);
287 } else {
288 puts("timeout.\n");
289 return -ECOMM;
290 }
291 }
292 time++;
293 udelay(1000);
294 }
295
296 sdhci_writel(host, SDHCI_INT_ALL_MASK, SDHCI_INT_STATUS);
297
298 mask = SDHCI_INT_RESPONSE;
299 if ((cmd->cmdidx == MMC_CMD_SEND_TUNING_BLOCK ||
300 cmd->cmdidx == MMC_CMD_SEND_TUNING_BLOCK_HS200) && !data)
301 mask = SDHCI_INT_DATA_AVAIL;
302
303 if (!(cmd->resp_type & MMC_RSP_PRESENT))
304 flags = SDHCI_CMD_RESP_NONE;
305 else if (cmd->resp_type & MMC_RSP_136)
306 flags = SDHCI_CMD_RESP_LONG;
307 else if (cmd->resp_type & MMC_RSP_BUSY) {
308 flags = SDHCI_CMD_RESP_SHORT_BUSY;
309 if (data)
310 mask |= SDHCI_INT_DATA_END;
311 } else
312 flags = SDHCI_CMD_RESP_SHORT;
313
314 if (cmd->resp_type & MMC_RSP_CRC)
315 flags |= SDHCI_CMD_CRC;
316 if (cmd->resp_type & MMC_RSP_OPCODE)
317 flags |= SDHCI_CMD_INDEX;
318 if (data || cmd->cmdidx == MMC_CMD_SEND_TUNING_BLOCK ||
319 cmd->cmdidx == MMC_CMD_SEND_TUNING_BLOCK_HS200)
320 flags |= SDHCI_CMD_DATA;
321
322 /* Set Transfer mode regarding to data flag */
323 if (data) {
324 sdhci_writeb(host, 0xe, SDHCI_TIMEOUT_CONTROL);
325 mode = SDHCI_TRNS_BLK_CNT_EN;
326 trans_bytes = data->blocks * data->blocksize;
327 if (data->blocks > 1)
328 mode |= SDHCI_TRNS_MULTI;
329
330 if (data->flags == MMC_DATA_READ)
331 mode |= SDHCI_TRNS_READ;
332
333 if (host->flags & USE_DMA) {
334 mode |= SDHCI_TRNS_DMA;
335 sdhci_prepare_dma(host, data, &is_aligned, trans_bytes);
336 }
337
338 sdhci_writew(host, SDHCI_MAKE_BLKSZ(SDHCI_DEFAULT_BOUNDARY_ARG,
339 data->blocksize),
340 SDHCI_BLOCK_SIZE);
341 sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT);
342 sdhci_writew(host, mode, SDHCI_TRANSFER_MODE);
343 } else if (cmd->resp_type & MMC_RSP_BUSY) {
344 sdhci_writeb(host, 0xe, SDHCI_TIMEOUT_CONTROL);
345 }
346
347 sdhci_writel(host, cmd->cmdarg, SDHCI_ARGUMENT);
348 sdhci_writew(host, SDHCI_MAKE_CMD(cmd->cmdidx, flags), SDHCI_COMMAND);
349 start = get_timer(0);
350 do {
351 stat = sdhci_readl(host, SDHCI_INT_STATUS);
352 if (stat & SDHCI_INT_ERROR)
353 break;
354
355 if (get_timer(start) >= SDHCI_READ_STATUS_TIMEOUT) {
356 if (host->quirks & SDHCI_QUIRK_BROKEN_R1B) {
357 return 0;
358 } else {
359 printf("%s: Timeout for status update!\n",
360 __func__);
361 return -ETIMEDOUT;
362 }
363 }
364 } while ((stat & mask) != mask);
365
366 if ((stat & (SDHCI_INT_ERROR | mask)) == mask) {
367 sdhci_cmd_done(host, cmd);
368 sdhci_writel(host, mask, SDHCI_INT_STATUS);
369 } else
370 ret = -1;
371
372 if (!ret && data)
373 ret = sdhci_transfer_data(host, data);
374
375 if (host->quirks & SDHCI_QUIRK_WAIT_SEND_CMD)
376 udelay(1000);
377
378 stat = sdhci_readl(host, SDHCI_INT_STATUS);
379 sdhci_writel(host, SDHCI_INT_ALL_MASK, SDHCI_INT_STATUS);
380 if (!ret) {
381 if ((host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR) &&
382 !is_aligned && (data->flags == MMC_DATA_READ))
383 memcpy(data->dest, aligned_buffer, trans_bytes);
384 return 0;
385 }
386
387 sdhci_reset(host, SDHCI_RESET_CMD);
388 sdhci_reset(host, SDHCI_RESET_DATA);
389 if (stat & SDHCI_INT_TIMEOUT)
390 return -ETIMEDOUT;
391 else
392 return -ECOMM;
393 }
394
395 #if defined(CONFIG_DM_MMC) && defined(MMC_SUPPORTS_TUNING)
396 static int sdhci_execute_tuning(struct udevice *dev, uint opcode)
397 {
398 int err;
399 struct mmc *mmc = mmc_get_mmc_dev(dev);
400 struct sdhci_host *host = mmc->priv;
401
402 debug("%s\n", __func__);
403
404 if (host->ops && host->ops->platform_execute_tuning) {
405 err = host->ops->platform_execute_tuning(mmc, opcode);
406 if (err)
407 return err;
408 return 0;
409 }
410 return 0;
411 }
412 #endif
413 int sdhci_set_clock(struct mmc *mmc, unsigned int clock)
414 {
415 struct sdhci_host *host = mmc->priv;
416 unsigned int div, clk = 0, timeout;
417
418 /* Wait max 20 ms */
419 timeout = 200;
420 while (sdhci_readl(host, SDHCI_PRESENT_STATE) &
421 (SDHCI_CMD_INHIBIT | SDHCI_DATA_INHIBIT)) {
422 if (timeout == 0) {
423 printf("%s: Timeout to wait cmd & data inhibit\n",
424 __func__);
425 return -EBUSY;
426 }
427
428 timeout--;
429 udelay(100);
430 }
431
432 sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
433
434 if (clock == 0)
435 return 0;
436
437 if (host->ops && host->ops->set_delay)
438 host->ops->set_delay(host);
439
440 if (SDHCI_GET_VERSION(host) >= SDHCI_SPEC_300) {
441 /*
442 * Check if the Host Controller supports Programmable Clock
443 * Mode.
444 */
445 if (host->clk_mul) {
446 for (div = 1; div <= 1024; div++) {
447 if ((host->max_clk / div) <= clock)
448 break;
449 }
450
451 /*
452 * Set Programmable Clock Mode in the Clock
453 * Control register.
454 */
455 clk = SDHCI_PROG_CLOCK_MODE;
456 div--;
457 } else {
458 /* Version 3.00 divisors must be a multiple of 2. */
459 if (host->max_clk <= clock) {
460 div = 1;
461 } else {
462 for (div = 2;
463 div < SDHCI_MAX_DIV_SPEC_300;
464 div += 2) {
465 if ((host->max_clk / div) <= clock)
466 break;
467 }
468 }
469 div >>= 1;
470 }
471 } else {
472 /* Version 2.00 divisors must be a power of 2. */
473 for (div = 1; div < SDHCI_MAX_DIV_SPEC_200; div *= 2) {
474 if ((host->max_clk / div) <= clock)
475 break;
476 }
477 div >>= 1;
478 }
479
480 if (host->ops && host->ops->set_clock)
481 host->ops->set_clock(host, div);
482
483 clk |= (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT;
484 clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN)
485 << SDHCI_DIVIDER_HI_SHIFT;
486 clk |= SDHCI_CLOCK_INT_EN;
487 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
488
489 /* Wait max 20 ms */
490 timeout = 20;
491 while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL))
492 & SDHCI_CLOCK_INT_STABLE)) {
493 if (timeout == 0) {
494 printf("%s: Internal clock never stabilised.\n",
495 __func__);
496 return -EBUSY;
497 }
498 timeout--;
499 udelay(1000);
500 }
501
502 clk |= SDHCI_CLOCK_CARD_EN;
503 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
504 return 0;
505 }
506
507 static void sdhci_set_power(struct sdhci_host *host, unsigned short power)
508 {
509 u8 pwr = 0;
510
511 if (power != (unsigned short)-1) {
512 switch (1 << power) {
513 case MMC_VDD_165_195:
514 pwr = SDHCI_POWER_180;
515 break;
516 case MMC_VDD_29_30:
517 case MMC_VDD_30_31:
518 pwr = SDHCI_POWER_300;
519 break;
520 case MMC_VDD_32_33:
521 case MMC_VDD_33_34:
522 pwr = SDHCI_POWER_330;
523 break;
524 }
525 }
526
527 if (pwr == 0) {
528 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
529 return;
530 }
531
532 pwr |= SDHCI_POWER_ON;
533
534 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
535 }
536
537 void sdhci_set_uhs_timing(struct sdhci_host *host)
538 {
539 struct mmc *mmc = (struct mmc *)host->mmc;
540 u32 reg;
541
542 reg = sdhci_readw(host, SDHCI_HOST_CONTROL2);
543 reg &= ~SDHCI_CTRL_UHS_MASK;
544
545 switch (mmc->selected_mode) {
546 case UHS_SDR50:
547 case MMC_HS_52:
548 reg |= SDHCI_CTRL_UHS_SDR50;
549 break;
550 case UHS_DDR50:
551 case MMC_DDR_52:
552 reg |= SDHCI_CTRL_UHS_DDR50;
553 break;
554 case UHS_SDR104:
555 case MMC_HS_200:
556 reg |= SDHCI_CTRL_UHS_SDR104;
557 break;
558 default:
559 reg |= SDHCI_CTRL_UHS_SDR12;
560 }
561
562 sdhci_writew(host, reg, SDHCI_HOST_CONTROL2);
563 }
564
565 #ifdef CONFIG_DM_MMC
566 static int sdhci_set_ios(struct udevice *dev)
567 {
568 struct mmc *mmc = mmc_get_mmc_dev(dev);
569 #else
570 static int sdhci_set_ios(struct mmc *mmc)
571 {
572 #endif
573 u32 ctrl;
574 struct sdhci_host *host = mmc->priv;
575
576 if (host->ops && host->ops->set_control_reg)
577 host->ops->set_control_reg(host);
578
579 if (mmc->clock != host->clock)
580 sdhci_set_clock(mmc, mmc->clock);
581
582 if (mmc->clk_disable)
583 sdhci_set_clock(mmc, 0);
584
585 /* Set bus width */
586 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
587 if (mmc->bus_width == 8) {
588 ctrl &= ~SDHCI_CTRL_4BITBUS;
589 if ((SDHCI_GET_VERSION(host) >= SDHCI_SPEC_300) ||
590 (host->quirks & SDHCI_QUIRK_USE_WIDE8))
591 ctrl |= SDHCI_CTRL_8BITBUS;
592 } else {
593 if ((SDHCI_GET_VERSION(host) >= SDHCI_SPEC_300) ||
594 (host->quirks & SDHCI_QUIRK_USE_WIDE8))
595 ctrl &= ~SDHCI_CTRL_8BITBUS;
596 if (mmc->bus_width == 4)
597 ctrl |= SDHCI_CTRL_4BITBUS;
598 else
599 ctrl &= ~SDHCI_CTRL_4BITBUS;
600 }
601
602 if (mmc->clock > 26000000)
603 ctrl |= SDHCI_CTRL_HISPD;
604 else
605 ctrl &= ~SDHCI_CTRL_HISPD;
606
607 if ((host->quirks & SDHCI_QUIRK_NO_HISPD_BIT) ||
608 (host->quirks & SDHCI_QUIRK_BROKEN_HISPD_MODE))
609 ctrl &= ~SDHCI_CTRL_HISPD;
610
611 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
612
613 /* If available, call the driver specific "post" set_ios() function */
614 if (host->ops && host->ops->set_ios_post)
615 return host->ops->set_ios_post(host);
616
617 return 0;
618 }
619
620 static int sdhci_init(struct mmc *mmc)
621 {
622 struct sdhci_host *host = mmc->priv;
623 #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_GPIO)
624 struct udevice *dev = mmc->dev;
625
626 gpio_request_by_name(dev, "cd-gpio", 0,
627 &host->cd_gpio, GPIOD_IS_IN);
628 #endif
629
630 sdhci_reset(host, SDHCI_RESET_ALL);
631
632 if ((host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR) && !aligned_buffer) {
633 aligned_buffer = memalign(8, 512*1024);
634 if (!aligned_buffer) {
635 printf("%s: Aligned buffer alloc failed!!!\n",
636 __func__);
637 return -ENOMEM;
638 }
639 }
640
641 sdhci_set_power(host, fls(mmc->cfg->voltages) - 1);
642
643 if (host->ops && host->ops->get_cd)
644 host->ops->get_cd(host);
645
646 /* Enable only interrupts served by the SD controller */
647 sdhci_writel(host, SDHCI_INT_DATA_MASK | SDHCI_INT_CMD_MASK,
648 SDHCI_INT_ENABLE);
649 /* Mask all sdhci interrupt sources */
650 sdhci_writel(host, 0x0, SDHCI_SIGNAL_ENABLE);
651
652 return 0;
653 }
654
655 #ifdef CONFIG_DM_MMC
656 int sdhci_probe(struct udevice *dev)
657 {
658 struct mmc *mmc = mmc_get_mmc_dev(dev);
659
660 return sdhci_init(mmc);
661 }
662
663 int sdhci_get_cd(struct udevice *dev)
664 {
665 struct mmc *mmc = mmc_get_mmc_dev(dev);
666 struct sdhci_host *host = mmc->priv;
667 int value;
668
669 /* If nonremovable, assume that the card is always present. */
670 if (mmc->cfg->host_caps & MMC_CAP_NONREMOVABLE)
671 return 1;
672 /* If polling, assume that the card is always present. */
673 if (mmc->cfg->host_caps & MMC_CAP_NEEDS_POLL)
674 return 1;
675
676 #if CONFIG_IS_ENABLED(DM_GPIO)
677 value = dm_gpio_get_value(&host->cd_gpio);
678 if (value >= 0) {
679 if (mmc->cfg->host_caps & MMC_CAP_CD_ACTIVE_HIGH)
680 return !value;
681 else
682 return value;
683 }
684 #endif
685 value = !!(sdhci_readl(host, SDHCI_PRESENT_STATE) &
686 SDHCI_CARD_PRESENT);
687 if (mmc->cfg->host_caps & MMC_CAP_CD_ACTIVE_HIGH)
688 return !value;
689 else
690 return value;
691 }
692
693 const struct dm_mmc_ops sdhci_ops = {
694 .send_cmd = sdhci_send_command,
695 .set_ios = sdhci_set_ios,
696 .get_cd = sdhci_get_cd,
697 #ifdef MMC_SUPPORTS_TUNING
698 .execute_tuning = sdhci_execute_tuning,
699 #endif
700 };
701 #else
702 static const struct mmc_ops sdhci_ops = {
703 .send_cmd = sdhci_send_command,
704 .set_ios = sdhci_set_ios,
705 .init = sdhci_init,
706 };
707 #endif
708
709 int sdhci_setup_cfg(struct mmc_config *cfg, struct sdhci_host *host,
710 u32 f_max, u32 f_min)
711 {
712 u32 caps, caps_1 = 0;
713 #if CONFIG_IS_ENABLED(DM_MMC)
714 u32 mask[2] = {0};
715 int ret;
716 ret = dev_read_u32_array(host->mmc->dev, "sdhci-caps-mask",
717 mask, 2);
718 if (ret && ret != -1)
719 return ret;
720
721 caps = ~mask[1] & sdhci_readl(host, SDHCI_CAPABILITIES);
722 #else
723 caps = sdhci_readl(host, SDHCI_CAPABILITIES);
724 #endif
725
726 #ifdef CONFIG_MMC_SDHCI_SDMA
727 if (!(caps & SDHCI_CAN_DO_SDMA)) {
728 printf("%s: Your controller doesn't support SDMA!!\n",
729 __func__);
730 return -EINVAL;
731 }
732
733 host->flags |= USE_SDMA;
734 #endif
735 #if CONFIG_IS_ENABLED(MMC_SDHCI_ADMA)
736 if (!(caps & SDHCI_CAN_DO_ADMA2)) {
737 printf("%s: Your controller doesn't support SDMA!!\n",
738 __func__);
739 return -EINVAL;
740 }
741 host->adma_desc_table = (struct sdhci_adma_desc *)
742 memalign(ARCH_DMA_MINALIGN, ADMA_TABLE_SZ);
743
744 host->adma_addr = (dma_addr_t)host->adma_desc_table;
745 #ifdef CONFIG_DMA_ADDR_T_64BIT
746 host->flags |= USE_ADMA64;
747 #else
748 host->flags |= USE_ADMA;
749 #endif
750 #endif
751 if (host->quirks & SDHCI_QUIRK_REG32_RW)
752 host->version =
753 sdhci_readl(host, SDHCI_HOST_VERSION - 2) >> 16;
754 else
755 host->version = sdhci_readw(host, SDHCI_HOST_VERSION);
756
757 cfg->name = host->name;
758 #ifndef CONFIG_DM_MMC
759 cfg->ops = &sdhci_ops;
760 #endif
761
762 /* Check whether the clock multiplier is supported or not */
763 if (SDHCI_GET_VERSION(host) >= SDHCI_SPEC_300) {
764 #if CONFIG_IS_ENABLED(DM_MMC)
765 caps_1 = ~mask[0] & sdhci_readl(host, SDHCI_CAPABILITIES_1);
766 #else
767 caps_1 = sdhci_readl(host, SDHCI_CAPABILITIES_1);
768 #endif
769 host->clk_mul = (caps_1 & SDHCI_CLOCK_MUL_MASK) >>
770 SDHCI_CLOCK_MUL_SHIFT;
771 }
772
773 if (host->max_clk == 0) {
774 if (SDHCI_GET_VERSION(host) >= SDHCI_SPEC_300)
775 host->max_clk = (caps & SDHCI_CLOCK_V3_BASE_MASK) >>
776 SDHCI_CLOCK_BASE_SHIFT;
777 else
778 host->max_clk = (caps & SDHCI_CLOCK_BASE_MASK) >>
779 SDHCI_CLOCK_BASE_SHIFT;
780 host->max_clk *= 1000000;
781 if (host->clk_mul)
782 host->max_clk *= host->clk_mul;
783 }
784 if (host->max_clk == 0) {
785 printf("%s: Hardware doesn't specify base clock frequency\n",
786 __func__);
787 return -EINVAL;
788 }
789 if (f_max && (f_max < host->max_clk))
790 cfg->f_max = f_max;
791 else
792 cfg->f_max = host->max_clk;
793 if (f_min)
794 cfg->f_min = f_min;
795 else {
796 if (SDHCI_GET_VERSION(host) >= SDHCI_SPEC_300)
797 cfg->f_min = cfg->f_max / SDHCI_MAX_DIV_SPEC_300;
798 else
799 cfg->f_min = cfg->f_max / SDHCI_MAX_DIV_SPEC_200;
800 }
801 cfg->voltages = 0;
802 if (caps & SDHCI_CAN_VDD_330)
803 cfg->voltages |= MMC_VDD_32_33 | MMC_VDD_33_34;
804 if (caps & SDHCI_CAN_VDD_300)
805 cfg->voltages |= MMC_VDD_29_30 | MMC_VDD_30_31;
806 if (caps & SDHCI_CAN_VDD_180)
807 cfg->voltages |= MMC_VDD_165_195;
808
809 if (host->quirks & SDHCI_QUIRK_BROKEN_VOLTAGE)
810 cfg->voltages |= host->voltages;
811
812 cfg->host_caps |= MMC_MODE_HS | MMC_MODE_HS_52MHz | MMC_MODE_4BIT;
813
814 /* Since Host Controller Version3.0 */
815 if (SDHCI_GET_VERSION(host) >= SDHCI_SPEC_300) {
816 if (!(caps & SDHCI_CAN_DO_8BIT))
817 cfg->host_caps &= ~MMC_MODE_8BIT;
818 }
819
820 if (host->quirks & SDHCI_QUIRK_BROKEN_HISPD_MODE) {
821 cfg->host_caps &= ~MMC_MODE_HS;
822 cfg->host_caps &= ~MMC_MODE_HS_52MHz;
823 }
824
825 if (!(cfg->voltages & MMC_VDD_165_195) ||
826 (host->quirks & SDHCI_QUIRK_NO_1_8_V))
827 caps_1 &= ~(SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
828 SDHCI_SUPPORT_DDR50);
829
830 if (caps_1 & (SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
831 SDHCI_SUPPORT_DDR50))
832 cfg->host_caps |= MMC_CAP(UHS_SDR12) | MMC_CAP(UHS_SDR25);
833
834 if (caps_1 & SDHCI_SUPPORT_SDR104) {
835 cfg->host_caps |= MMC_CAP(UHS_SDR104) | MMC_CAP(UHS_SDR50);
836 /*
837 * SD3.0: SDR104 is supported so (for eMMC) the caps2
838 * field can be promoted to support HS200.
839 */
840 cfg->host_caps |= MMC_CAP(MMC_HS_200);
841 } else if (caps_1 & SDHCI_SUPPORT_SDR50) {
842 cfg->host_caps |= MMC_CAP(UHS_SDR50);
843 }
844
845 if (caps_1 & SDHCI_SUPPORT_DDR50)
846 cfg->host_caps |= MMC_CAP(UHS_DDR50);
847
848 if (host->host_caps)
849 cfg->host_caps |= host->host_caps;
850
851 cfg->b_max = CONFIG_SYS_MMC_MAX_BLK_COUNT;
852
853 return 0;
854 }
855
856 #ifdef CONFIG_BLK
857 int sdhci_bind(struct udevice *dev, struct mmc *mmc, struct mmc_config *cfg)
858 {
859 return mmc_bind(dev, mmc, cfg);
860 }
861 #else
862 int add_sdhci(struct sdhci_host *host, u32 f_max, u32 f_min)
863 {
864 int ret;
865
866 ret = sdhci_setup_cfg(&host->cfg, host, f_max, f_min);
867 if (ret)
868 return ret;
869
870 host->mmc = mmc_create(&host->cfg, host);
871 if (host->mmc == NULL) {
872 printf("%s: mmc create fail!\n", __func__);
873 return -ENOMEM;
874 }
875
876 return 0;
877 }
878 #endif