1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright 2011, Marvell Semiconductor Inc.
4 * Lei Wen <leiwen@marvell.com>
6 * Back ported to the 8xx platform (from the 8260 platform) by
7 * Murray.Jensen@cmst.csiro.au, 27-Jan-01.
18 #if defined(CONFIG_FIXED_SDHCI_ALIGNED_BUFFER)
19 void *aligned_buffer
= (void *)CONFIG_FIXED_SDHCI_ALIGNED_BUFFER
;
24 static void sdhci_reset(struct sdhci_host
*host
, u8 mask
)
26 unsigned long timeout
;
30 sdhci_writeb(host
, mask
, SDHCI_SOFTWARE_RESET
);
31 while (sdhci_readb(host
, SDHCI_SOFTWARE_RESET
) & mask
) {
33 printf("%s: Reset 0x%x never completed.\n",
42 static void sdhci_cmd_done(struct sdhci_host
*host
, struct mmc_cmd
*cmd
)
45 if (cmd
->resp_type
& MMC_RSP_136
) {
46 /* CRC is stripped so we need to do some shifting. */
47 for (i
= 0; i
< 4; i
++) {
48 cmd
->response
[i
] = sdhci_readl(host
,
49 SDHCI_RESPONSE
+ (3-i
)*4) << 8;
51 cmd
->response
[i
] |= sdhci_readb(host
,
52 SDHCI_RESPONSE
+ (3-i
)*4-1);
55 cmd
->response
[0] = sdhci_readl(host
, SDHCI_RESPONSE
);
59 static void sdhci_transfer_pio(struct sdhci_host
*host
, struct mmc_data
*data
)
63 for (i
= 0; i
< data
->blocksize
; i
+= 4) {
64 offs
= data
->dest
+ i
;
65 if (data
->flags
== MMC_DATA_READ
)
66 *(u32
*)offs
= sdhci_readl(host
, SDHCI_BUFFER
);
68 sdhci_writel(host
, *(u32
*)offs
, SDHCI_BUFFER
);
72 #if CONFIG_IS_ENABLED(MMC_SDHCI_ADMA)
73 static void sdhci_adma_desc(struct sdhci_host
*host
, char *buf
, u16 len
,
76 struct sdhci_adma_desc
*desc
;
79 desc
= &host
->adma_desc_table
[host
->desc_slot
];
81 attr
= ADMA_DESC_ATTR_VALID
| ADMA_DESC_TRANSFER_DATA
;
85 attr
|= ADMA_DESC_ATTR_END
;
90 desc
->addr_lo
= (dma_addr_t
)buf
;
91 #ifdef CONFIG_DMA_ADDR_T_64BIT
92 desc
->addr_hi
= (u64
)buf
>> 32;
96 static void sdhci_prepare_adma_table(struct sdhci_host
*host
,
97 struct mmc_data
*data
)
99 uint trans_bytes
= data
->blocksize
* data
->blocks
;
100 uint desc_count
= DIV_ROUND_UP(trans_bytes
, ADMA_MAX_LEN
);
106 if (data
->flags
& MMC_DATA_READ
)
109 buf
= (char *)data
->src
;
112 sdhci_adma_desc(host
, buf
, ADMA_MAX_LEN
, false);
114 trans_bytes
-= ADMA_MAX_LEN
;
117 sdhci_adma_desc(host
, buf
, trans_bytes
, true);
119 flush_cache((dma_addr_t
)host
->adma_desc_table
,
120 ROUND(desc_count
* sizeof(struct sdhci_adma_desc
),
123 #elif defined(CONFIG_MMC_SDHCI_SDMA)
124 static void sdhci_prepare_adma_table(struct sdhci_host
*host
,
125 struct mmc_data
*data
)
128 #if (defined(CONFIG_MMC_SDHCI_SDMA) || CONFIG_IS_ENABLED(MMC_SDHCI_ADMA))
129 static void sdhci_prepare_dma(struct sdhci_host
*host
, struct mmc_data
*data
,
130 int *is_aligned
, int trans_bytes
)
134 if (data
->flags
== MMC_DATA_READ
)
135 host
->start_addr
= (dma_addr_t
)data
->dest
;
137 host
->start_addr
= (dma_addr_t
)data
->src
;
139 ctrl
= sdhci_readb(host
, SDHCI_HOST_CONTROL
);
140 ctrl
&= ~SDHCI_CTRL_DMA_MASK
;
141 if (host
->flags
& USE_ADMA64
)
142 ctrl
|= SDHCI_CTRL_ADMA64
;
143 else if (host
->flags
& USE_ADMA
)
144 ctrl
|= SDHCI_CTRL_ADMA32
;
145 sdhci_writeb(host
, ctrl
, SDHCI_HOST_CONTROL
);
147 if (host
->flags
& USE_SDMA
) {
148 if ((host
->quirks
& SDHCI_QUIRK_32BIT_DMA_ADDR
) &&
149 (host
->start_addr
& 0x7) != 0x0) {
151 host
->start_addr
= (unsigned long)aligned_buffer
;
152 if (data
->flags
!= MMC_DATA_READ
)
153 memcpy(aligned_buffer
, data
->src
, trans_bytes
);
156 #if defined(CONFIG_FIXED_SDHCI_ALIGNED_BUFFER)
158 * Always use this bounce-buffer when
159 * CONFIG_FIXED_SDHCI_ALIGNED_BUFFER is defined
162 host
->start_addr
= (unsigned long)aligned_buffer
;
163 if (data
->flags
!= MMC_DATA_READ
)
164 memcpy(aligned_buffer
, data
->src
, trans_bytes
);
166 sdhci_writel(host
, host
->start_addr
, SDHCI_DMA_ADDRESS
);
168 } else if (host
->flags
& (USE_ADMA
| USE_ADMA64
)) {
169 sdhci_prepare_adma_table(host
, data
);
171 sdhci_writel(host
, (u32
)host
->adma_addr
, SDHCI_ADMA_ADDRESS
);
172 if (host
->flags
& USE_ADMA64
)
173 sdhci_writel(host
, (u64
)host
->adma_addr
>> 32,
174 SDHCI_ADMA_ADDRESS_HI
);
177 flush_cache(host
->start_addr
, ROUND(trans_bytes
, ARCH_DMA_MINALIGN
));
180 static void sdhci_prepare_dma(struct sdhci_host
*host
, struct mmc_data
*data
,
181 int *is_aligned
, int trans_bytes
)
184 static int sdhci_transfer_data(struct sdhci_host
*host
, struct mmc_data
*data
)
186 dma_addr_t start_addr
= host
->start_addr
;
187 unsigned int stat
, rdy
, mask
, timeout
, block
= 0;
188 bool transfer_done
= false;
191 rdy
= SDHCI_INT_SPACE_AVAIL
| SDHCI_INT_DATA_AVAIL
;
192 mask
= SDHCI_DATA_AVAILABLE
| SDHCI_SPACE_AVAILABLE
;
194 stat
= sdhci_readl(host
, SDHCI_INT_STATUS
);
195 if (stat
& SDHCI_INT_ERROR
) {
196 pr_debug("%s: Error detected in status(0x%X)!\n",
200 if (!transfer_done
&& (stat
& rdy
)) {
201 if (!(sdhci_readl(host
, SDHCI_PRESENT_STATE
) & mask
))
203 sdhci_writel(host
, rdy
, SDHCI_INT_STATUS
);
204 sdhci_transfer_pio(host
, data
);
205 data
->dest
+= data
->blocksize
;
206 if (++block
>= data
->blocks
) {
207 /* Keep looping until the SDHCI_INT_DATA_END is
208 * cleared, even if we finished sending all the
211 transfer_done
= true;
215 if ((host
->flags
& USE_DMA
) && !transfer_done
&&
216 (stat
& SDHCI_INT_DMA_END
)) {
217 sdhci_writel(host
, SDHCI_INT_DMA_END
, SDHCI_INT_STATUS
);
218 if (host
->flags
& USE_SDMA
) {
220 ~(SDHCI_DEFAULT_BOUNDARY_SIZE
- 1);
221 start_addr
+= SDHCI_DEFAULT_BOUNDARY_SIZE
;
222 sdhci_writel(host
, start_addr
,
229 printf("%s: Transfer data timeout\n", __func__
);
232 } while (!(stat
& SDHCI_INT_DATA_END
));
237 * No command will be sent by driver if card is busy, so driver must wait
238 * for card ready state.
239 * Every time when card is busy after timeout then (last) timeout value will be
240 * increased twice but only if it doesn't exceed global defined maximum.
241 * Each function call will use last timeout value.
243 #define SDHCI_CMD_MAX_TIMEOUT 3200
244 #define SDHCI_CMD_DEFAULT_TIMEOUT 100
245 #define SDHCI_READ_STATUS_TIMEOUT 1000
248 static int sdhci_send_command(struct udevice
*dev
, struct mmc_cmd
*cmd
,
249 struct mmc_data
*data
)
251 struct mmc
*mmc
= mmc_get_mmc_dev(dev
);
254 static int sdhci_send_command(struct mmc
*mmc
, struct mmc_cmd
*cmd
,
255 struct mmc_data
*data
)
258 struct sdhci_host
*host
= mmc
->priv
;
259 unsigned int stat
= 0;
261 int trans_bytes
= 0, is_aligned
= 1;
262 u32 mask
, flags
, mode
;
263 unsigned int time
= 0;
264 int mmc_dev
= mmc_get_blk_desc(mmc
)->devnum
;
265 ulong start
= get_timer(0);
267 host
->start_addr
= 0;
268 /* Timeout unit - ms */
269 static unsigned int cmd_timeout
= SDHCI_CMD_DEFAULT_TIMEOUT
;
271 mask
= SDHCI_CMD_INHIBIT
| SDHCI_DATA_INHIBIT
;
273 /* We shouldn't wait for data inihibit for stop commands, even
274 though they might use busy signaling */
275 if (cmd
->cmdidx
== MMC_CMD_STOP_TRANSMISSION
||
276 ((cmd
->cmdidx
== MMC_CMD_SEND_TUNING_BLOCK
||
277 cmd
->cmdidx
== MMC_CMD_SEND_TUNING_BLOCK_HS200
) && !data
))
278 mask
&= ~SDHCI_DATA_INHIBIT
;
280 while (sdhci_readl(host
, SDHCI_PRESENT_STATE
) & mask
) {
281 if (time
>= cmd_timeout
) {
282 printf("%s: MMC: %d busy ", __func__
, mmc_dev
);
283 if (2 * cmd_timeout
<= SDHCI_CMD_MAX_TIMEOUT
) {
284 cmd_timeout
+= cmd_timeout
;
285 printf("timeout increasing to: %u ms.\n",
296 sdhci_writel(host
, SDHCI_INT_ALL_MASK
, SDHCI_INT_STATUS
);
298 mask
= SDHCI_INT_RESPONSE
;
299 if ((cmd
->cmdidx
== MMC_CMD_SEND_TUNING_BLOCK
||
300 cmd
->cmdidx
== MMC_CMD_SEND_TUNING_BLOCK_HS200
) && !data
)
301 mask
= SDHCI_INT_DATA_AVAIL
;
303 if (!(cmd
->resp_type
& MMC_RSP_PRESENT
))
304 flags
= SDHCI_CMD_RESP_NONE
;
305 else if (cmd
->resp_type
& MMC_RSP_136
)
306 flags
= SDHCI_CMD_RESP_LONG
;
307 else if (cmd
->resp_type
& MMC_RSP_BUSY
) {
308 flags
= SDHCI_CMD_RESP_SHORT_BUSY
;
310 mask
|= SDHCI_INT_DATA_END
;
312 flags
= SDHCI_CMD_RESP_SHORT
;
314 if (cmd
->resp_type
& MMC_RSP_CRC
)
315 flags
|= SDHCI_CMD_CRC
;
316 if (cmd
->resp_type
& MMC_RSP_OPCODE
)
317 flags
|= SDHCI_CMD_INDEX
;
318 if (data
|| cmd
->cmdidx
== MMC_CMD_SEND_TUNING_BLOCK
||
319 cmd
->cmdidx
== MMC_CMD_SEND_TUNING_BLOCK_HS200
)
320 flags
|= SDHCI_CMD_DATA
;
322 /* Set Transfer mode regarding to data flag */
324 sdhci_writeb(host
, 0xe, SDHCI_TIMEOUT_CONTROL
);
325 mode
= SDHCI_TRNS_BLK_CNT_EN
;
326 trans_bytes
= data
->blocks
* data
->blocksize
;
327 if (data
->blocks
> 1)
328 mode
|= SDHCI_TRNS_MULTI
;
330 if (data
->flags
== MMC_DATA_READ
)
331 mode
|= SDHCI_TRNS_READ
;
333 if (host
->flags
& USE_DMA
) {
334 mode
|= SDHCI_TRNS_DMA
;
335 sdhci_prepare_dma(host
, data
, &is_aligned
, trans_bytes
);
338 sdhci_writew(host
, SDHCI_MAKE_BLKSZ(SDHCI_DEFAULT_BOUNDARY_ARG
,
341 sdhci_writew(host
, data
->blocks
, SDHCI_BLOCK_COUNT
);
342 sdhci_writew(host
, mode
, SDHCI_TRANSFER_MODE
);
343 } else if (cmd
->resp_type
& MMC_RSP_BUSY
) {
344 sdhci_writeb(host
, 0xe, SDHCI_TIMEOUT_CONTROL
);
347 sdhci_writel(host
, cmd
->cmdarg
, SDHCI_ARGUMENT
);
348 sdhci_writew(host
, SDHCI_MAKE_CMD(cmd
->cmdidx
, flags
), SDHCI_COMMAND
);
349 start
= get_timer(0);
351 stat
= sdhci_readl(host
, SDHCI_INT_STATUS
);
352 if (stat
& SDHCI_INT_ERROR
)
355 if (get_timer(start
) >= SDHCI_READ_STATUS_TIMEOUT
) {
356 if (host
->quirks
& SDHCI_QUIRK_BROKEN_R1B
) {
359 printf("%s: Timeout for status update!\n",
364 } while ((stat
& mask
) != mask
);
366 if ((stat
& (SDHCI_INT_ERROR
| mask
)) == mask
) {
367 sdhci_cmd_done(host
, cmd
);
368 sdhci_writel(host
, mask
, SDHCI_INT_STATUS
);
373 ret
= sdhci_transfer_data(host
, data
);
375 if (host
->quirks
& SDHCI_QUIRK_WAIT_SEND_CMD
)
378 stat
= sdhci_readl(host
, SDHCI_INT_STATUS
);
379 sdhci_writel(host
, SDHCI_INT_ALL_MASK
, SDHCI_INT_STATUS
);
381 if ((host
->quirks
& SDHCI_QUIRK_32BIT_DMA_ADDR
) &&
382 !is_aligned
&& (data
->flags
== MMC_DATA_READ
))
383 memcpy(data
->dest
, aligned_buffer
, trans_bytes
);
387 sdhci_reset(host
, SDHCI_RESET_CMD
);
388 sdhci_reset(host
, SDHCI_RESET_DATA
);
389 if (stat
& SDHCI_INT_TIMEOUT
)
395 #if defined(CONFIG_DM_MMC) && defined(MMC_SUPPORTS_TUNING)
396 static int sdhci_execute_tuning(struct udevice
*dev
, uint opcode
)
399 struct mmc
*mmc
= mmc_get_mmc_dev(dev
);
400 struct sdhci_host
*host
= mmc
->priv
;
402 debug("%s\n", __func__
);
404 if (host
->ops
&& host
->ops
->platform_execute_tuning
) {
405 err
= host
->ops
->platform_execute_tuning(mmc
, opcode
);
413 int sdhci_set_clock(struct mmc
*mmc
, unsigned int clock
)
415 struct sdhci_host
*host
= mmc
->priv
;
416 unsigned int div
, clk
= 0, timeout
;
420 while (sdhci_readl(host
, SDHCI_PRESENT_STATE
) &
421 (SDHCI_CMD_INHIBIT
| SDHCI_DATA_INHIBIT
)) {
423 printf("%s: Timeout to wait cmd & data inhibit\n",
432 sdhci_writew(host
, 0, SDHCI_CLOCK_CONTROL
);
437 if (host
->ops
&& host
->ops
->set_delay
)
438 host
->ops
->set_delay(host
);
440 if (SDHCI_GET_VERSION(host
) >= SDHCI_SPEC_300
) {
442 * Check if the Host Controller supports Programmable Clock
446 for (div
= 1; div
<= 1024; div
++) {
447 if ((host
->max_clk
/ div
) <= clock
)
452 * Set Programmable Clock Mode in the Clock
455 clk
= SDHCI_PROG_CLOCK_MODE
;
458 /* Version 3.00 divisors must be a multiple of 2. */
459 if (host
->max_clk
<= clock
) {
463 div
< SDHCI_MAX_DIV_SPEC_300
;
465 if ((host
->max_clk
/ div
) <= clock
)
472 /* Version 2.00 divisors must be a power of 2. */
473 for (div
= 1; div
< SDHCI_MAX_DIV_SPEC_200
; div
*= 2) {
474 if ((host
->max_clk
/ div
) <= clock
)
480 if (host
->ops
&& host
->ops
->set_clock
)
481 host
->ops
->set_clock(host
, div
);
483 clk
|= (div
& SDHCI_DIV_MASK
) << SDHCI_DIVIDER_SHIFT
;
484 clk
|= ((div
& SDHCI_DIV_HI_MASK
) >> SDHCI_DIV_MASK_LEN
)
485 << SDHCI_DIVIDER_HI_SHIFT
;
486 clk
|= SDHCI_CLOCK_INT_EN
;
487 sdhci_writew(host
, clk
, SDHCI_CLOCK_CONTROL
);
491 while (!((clk
= sdhci_readw(host
, SDHCI_CLOCK_CONTROL
))
492 & SDHCI_CLOCK_INT_STABLE
)) {
494 printf("%s: Internal clock never stabilised.\n",
502 clk
|= SDHCI_CLOCK_CARD_EN
;
503 sdhci_writew(host
, clk
, SDHCI_CLOCK_CONTROL
);
507 static void sdhci_set_power(struct sdhci_host
*host
, unsigned short power
)
511 if (power
!= (unsigned short)-1) {
512 switch (1 << power
) {
513 case MMC_VDD_165_195
:
514 pwr
= SDHCI_POWER_180
;
518 pwr
= SDHCI_POWER_300
;
522 pwr
= SDHCI_POWER_330
;
528 sdhci_writeb(host
, 0, SDHCI_POWER_CONTROL
);
532 pwr
|= SDHCI_POWER_ON
;
534 sdhci_writeb(host
, pwr
, SDHCI_POWER_CONTROL
);
537 void sdhci_set_uhs_timing(struct sdhci_host
*host
)
539 struct mmc
*mmc
= (struct mmc
*)host
->mmc
;
542 reg
= sdhci_readw(host
, SDHCI_HOST_CONTROL2
);
543 reg
&= ~SDHCI_CTRL_UHS_MASK
;
545 switch (mmc
->selected_mode
) {
548 reg
|= SDHCI_CTRL_UHS_SDR50
;
552 reg
|= SDHCI_CTRL_UHS_DDR50
;
556 reg
|= SDHCI_CTRL_UHS_SDR104
;
559 reg
|= SDHCI_CTRL_UHS_SDR12
;
562 sdhci_writew(host
, reg
, SDHCI_HOST_CONTROL2
);
566 static int sdhci_set_ios(struct udevice
*dev
)
568 struct mmc
*mmc
= mmc_get_mmc_dev(dev
);
570 static int sdhci_set_ios(struct mmc
*mmc
)
574 struct sdhci_host
*host
= mmc
->priv
;
576 if (host
->ops
&& host
->ops
->set_control_reg
)
577 host
->ops
->set_control_reg(host
);
579 if (mmc
->clock
!= host
->clock
)
580 sdhci_set_clock(mmc
, mmc
->clock
);
582 if (mmc
->clk_disable
)
583 sdhci_set_clock(mmc
, 0);
586 ctrl
= sdhci_readb(host
, SDHCI_HOST_CONTROL
);
587 if (mmc
->bus_width
== 8) {
588 ctrl
&= ~SDHCI_CTRL_4BITBUS
;
589 if ((SDHCI_GET_VERSION(host
) >= SDHCI_SPEC_300
) ||
590 (host
->quirks
& SDHCI_QUIRK_USE_WIDE8
))
591 ctrl
|= SDHCI_CTRL_8BITBUS
;
593 if ((SDHCI_GET_VERSION(host
) >= SDHCI_SPEC_300
) ||
594 (host
->quirks
& SDHCI_QUIRK_USE_WIDE8
))
595 ctrl
&= ~SDHCI_CTRL_8BITBUS
;
596 if (mmc
->bus_width
== 4)
597 ctrl
|= SDHCI_CTRL_4BITBUS
;
599 ctrl
&= ~SDHCI_CTRL_4BITBUS
;
602 if (mmc
->clock
> 26000000)
603 ctrl
|= SDHCI_CTRL_HISPD
;
605 ctrl
&= ~SDHCI_CTRL_HISPD
;
607 if ((host
->quirks
& SDHCI_QUIRK_NO_HISPD_BIT
) ||
608 (host
->quirks
& SDHCI_QUIRK_BROKEN_HISPD_MODE
))
609 ctrl
&= ~SDHCI_CTRL_HISPD
;
611 sdhci_writeb(host
, ctrl
, SDHCI_HOST_CONTROL
);
613 /* If available, call the driver specific "post" set_ios() function */
614 if (host
->ops
&& host
->ops
->set_ios_post
)
615 return host
->ops
->set_ios_post(host
);
620 static int sdhci_init(struct mmc
*mmc
)
622 struct sdhci_host
*host
= mmc
->priv
;
623 #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_GPIO)
624 struct udevice
*dev
= mmc
->dev
;
626 gpio_request_by_name(dev
, "cd-gpio", 0,
627 &host
->cd_gpio
, GPIOD_IS_IN
);
630 sdhci_reset(host
, SDHCI_RESET_ALL
);
632 if ((host
->quirks
& SDHCI_QUIRK_32BIT_DMA_ADDR
) && !aligned_buffer
) {
633 aligned_buffer
= memalign(8, 512*1024);
634 if (!aligned_buffer
) {
635 printf("%s: Aligned buffer alloc failed!!!\n",
641 sdhci_set_power(host
, fls(mmc
->cfg
->voltages
) - 1);
643 if (host
->ops
&& host
->ops
->get_cd
)
644 host
->ops
->get_cd(host
);
646 /* Enable only interrupts served by the SD controller */
647 sdhci_writel(host
, SDHCI_INT_DATA_MASK
| SDHCI_INT_CMD_MASK
,
649 /* Mask all sdhci interrupt sources */
650 sdhci_writel(host
, 0x0, SDHCI_SIGNAL_ENABLE
);
656 int sdhci_probe(struct udevice
*dev
)
658 struct mmc
*mmc
= mmc_get_mmc_dev(dev
);
660 return sdhci_init(mmc
);
663 int sdhci_get_cd(struct udevice
*dev
)
665 struct mmc
*mmc
= mmc_get_mmc_dev(dev
);
666 struct sdhci_host
*host
= mmc
->priv
;
669 /* If nonremovable, assume that the card is always present. */
670 if (mmc
->cfg
->host_caps
& MMC_CAP_NONREMOVABLE
)
672 /* If polling, assume that the card is always present. */
673 if (mmc
->cfg
->host_caps
& MMC_CAP_NEEDS_POLL
)
676 #if CONFIG_IS_ENABLED(DM_GPIO)
677 value
= dm_gpio_get_value(&host
->cd_gpio
);
679 if (mmc
->cfg
->host_caps
& MMC_CAP_CD_ACTIVE_HIGH
)
685 value
= !!(sdhci_readl(host
, SDHCI_PRESENT_STATE
) &
687 if (mmc
->cfg
->host_caps
& MMC_CAP_CD_ACTIVE_HIGH
)
693 const struct dm_mmc_ops sdhci_ops
= {
694 .send_cmd
= sdhci_send_command
,
695 .set_ios
= sdhci_set_ios
,
696 .get_cd
= sdhci_get_cd
,
697 #ifdef MMC_SUPPORTS_TUNING
698 .execute_tuning
= sdhci_execute_tuning
,
702 static const struct mmc_ops sdhci_ops
= {
703 .send_cmd
= sdhci_send_command
,
704 .set_ios
= sdhci_set_ios
,
709 int sdhci_setup_cfg(struct mmc_config
*cfg
, struct sdhci_host
*host
,
710 u32 f_max
, u32 f_min
)
712 u32 caps
, caps_1
= 0;
713 #if CONFIG_IS_ENABLED(DM_MMC)
716 ret
= dev_read_u32_array(host
->mmc
->dev
, "sdhci-caps-mask",
718 if (ret
&& ret
!= -1)
721 caps
= ~mask
[1] & sdhci_readl(host
, SDHCI_CAPABILITIES
);
723 caps
= sdhci_readl(host
, SDHCI_CAPABILITIES
);
726 #ifdef CONFIG_MMC_SDHCI_SDMA
727 if (!(caps
& SDHCI_CAN_DO_SDMA
)) {
728 printf("%s: Your controller doesn't support SDMA!!\n",
733 host
->flags
|= USE_SDMA
;
735 #if CONFIG_IS_ENABLED(MMC_SDHCI_ADMA)
736 if (!(caps
& SDHCI_CAN_DO_ADMA2
)) {
737 printf("%s: Your controller doesn't support SDMA!!\n",
741 host
->adma_desc_table
= (struct sdhci_adma_desc
*)
742 memalign(ARCH_DMA_MINALIGN
, ADMA_TABLE_SZ
);
744 host
->adma_addr
= (dma_addr_t
)host
->adma_desc_table
;
745 #ifdef CONFIG_DMA_ADDR_T_64BIT
746 host
->flags
|= USE_ADMA64
;
748 host
->flags
|= USE_ADMA
;
751 if (host
->quirks
& SDHCI_QUIRK_REG32_RW
)
753 sdhci_readl(host
, SDHCI_HOST_VERSION
- 2) >> 16;
755 host
->version
= sdhci_readw(host
, SDHCI_HOST_VERSION
);
757 cfg
->name
= host
->name
;
758 #ifndef CONFIG_DM_MMC
759 cfg
->ops
= &sdhci_ops
;
762 /* Check whether the clock multiplier is supported or not */
763 if (SDHCI_GET_VERSION(host
) >= SDHCI_SPEC_300
) {
764 #if CONFIG_IS_ENABLED(DM_MMC)
765 caps_1
= ~mask
[0] & sdhci_readl(host
, SDHCI_CAPABILITIES_1
);
767 caps_1
= sdhci_readl(host
, SDHCI_CAPABILITIES_1
);
769 host
->clk_mul
= (caps_1
& SDHCI_CLOCK_MUL_MASK
) >>
770 SDHCI_CLOCK_MUL_SHIFT
;
773 if (host
->max_clk
== 0) {
774 if (SDHCI_GET_VERSION(host
) >= SDHCI_SPEC_300
)
775 host
->max_clk
= (caps
& SDHCI_CLOCK_V3_BASE_MASK
) >>
776 SDHCI_CLOCK_BASE_SHIFT
;
778 host
->max_clk
= (caps
& SDHCI_CLOCK_BASE_MASK
) >>
779 SDHCI_CLOCK_BASE_SHIFT
;
780 host
->max_clk
*= 1000000;
782 host
->max_clk
*= host
->clk_mul
;
784 if (host
->max_clk
== 0) {
785 printf("%s: Hardware doesn't specify base clock frequency\n",
789 if (f_max
&& (f_max
< host
->max_clk
))
792 cfg
->f_max
= host
->max_clk
;
796 if (SDHCI_GET_VERSION(host
) >= SDHCI_SPEC_300
)
797 cfg
->f_min
= cfg
->f_max
/ SDHCI_MAX_DIV_SPEC_300
;
799 cfg
->f_min
= cfg
->f_max
/ SDHCI_MAX_DIV_SPEC_200
;
802 if (caps
& SDHCI_CAN_VDD_330
)
803 cfg
->voltages
|= MMC_VDD_32_33
| MMC_VDD_33_34
;
804 if (caps
& SDHCI_CAN_VDD_300
)
805 cfg
->voltages
|= MMC_VDD_29_30
| MMC_VDD_30_31
;
806 if (caps
& SDHCI_CAN_VDD_180
)
807 cfg
->voltages
|= MMC_VDD_165_195
;
809 if (host
->quirks
& SDHCI_QUIRK_BROKEN_VOLTAGE
)
810 cfg
->voltages
|= host
->voltages
;
812 cfg
->host_caps
|= MMC_MODE_HS
| MMC_MODE_HS_52MHz
| MMC_MODE_4BIT
;
814 /* Since Host Controller Version3.0 */
815 if (SDHCI_GET_VERSION(host
) >= SDHCI_SPEC_300
) {
816 if (!(caps
& SDHCI_CAN_DO_8BIT
))
817 cfg
->host_caps
&= ~MMC_MODE_8BIT
;
820 if (host
->quirks
& SDHCI_QUIRK_BROKEN_HISPD_MODE
) {
821 cfg
->host_caps
&= ~MMC_MODE_HS
;
822 cfg
->host_caps
&= ~MMC_MODE_HS_52MHz
;
825 if (!(cfg
->voltages
& MMC_VDD_165_195
) ||
826 (host
->quirks
& SDHCI_QUIRK_NO_1_8_V
))
827 caps_1
&= ~(SDHCI_SUPPORT_SDR104
| SDHCI_SUPPORT_SDR50
|
828 SDHCI_SUPPORT_DDR50
);
830 if (caps_1
& (SDHCI_SUPPORT_SDR104
| SDHCI_SUPPORT_SDR50
|
831 SDHCI_SUPPORT_DDR50
))
832 cfg
->host_caps
|= MMC_CAP(UHS_SDR12
) | MMC_CAP(UHS_SDR25
);
834 if (caps_1
& SDHCI_SUPPORT_SDR104
) {
835 cfg
->host_caps
|= MMC_CAP(UHS_SDR104
) | MMC_CAP(UHS_SDR50
);
837 * SD3.0: SDR104 is supported so (for eMMC) the caps2
838 * field can be promoted to support HS200.
840 cfg
->host_caps
|= MMC_CAP(MMC_HS_200
);
841 } else if (caps_1
& SDHCI_SUPPORT_SDR50
) {
842 cfg
->host_caps
|= MMC_CAP(UHS_SDR50
);
845 if (caps_1
& SDHCI_SUPPORT_DDR50
)
846 cfg
->host_caps
|= MMC_CAP(UHS_DDR50
);
849 cfg
->host_caps
|= host
->host_caps
;
851 cfg
->b_max
= CONFIG_SYS_MMC_MAX_BLK_COUNT
;
857 int sdhci_bind(struct udevice
*dev
, struct mmc
*mmc
, struct mmc_config
*cfg
)
859 return mmc_bind(dev
, mmc
, cfg
);
862 int add_sdhci(struct sdhci_host
*host
, u32 f_max
, u32 f_min
)
866 ret
= sdhci_setup_cfg(&host
->cfg
, host
, f_max
, f_min
);
870 host
->mmc
= mmc_create(&host
->cfg
, host
);
871 if (host
->mmc
== NULL
) {
872 printf("%s: mmc create fail!\n", __func__
);