]> git.ipfire.org Git - thirdparty/u-boot.git/blob - drivers/mmc/dw_mmc.c
common: Drop net.h from common header
[thirdparty/u-boot.git] / drivers / mmc / dw_mmc.c
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * (C) Copyright 2012 SAMSUNG Electronics
4 * Jaehoon Chung <jh80.chung@samsung.com>
5 * Rajeshawari Shinde <rajeshwari.s@samsung.com>
6 */
7
8 #include <bouncebuf.h>
9 #include <common.h>
10 #include <cpu_func.h>
11 #include <errno.h>
12 #include <malloc.h>
13 #include <memalign.h>
14 #include <mmc.h>
15 #include <dwmmc.h>
16 #include <wait_bit.h>
17 #include <asm/cache.h>
18 #include <power/regulator.h>
19
20 #define PAGE_SIZE 4096
21
22 static int dwmci_wait_reset(struct dwmci_host *host, u32 value)
23 {
24 unsigned long timeout = 1000;
25 u32 ctrl;
26
27 dwmci_writel(host, DWMCI_CTRL, value);
28
29 while (timeout--) {
30 ctrl = dwmci_readl(host, DWMCI_CTRL);
31 if (!(ctrl & DWMCI_RESET_ALL))
32 return 1;
33 }
34 return 0;
35 }
36
37 static void dwmci_set_idma_desc(struct dwmci_idmac *idmac,
38 u32 desc0, u32 desc1, u32 desc2)
39 {
40 struct dwmci_idmac *desc = idmac;
41
42 desc->flags = desc0;
43 desc->cnt = desc1;
44 desc->addr = desc2;
45 desc->next_addr = (ulong)desc + sizeof(struct dwmci_idmac);
46 }
47
48 static void dwmci_prepare_data(struct dwmci_host *host,
49 struct mmc_data *data,
50 struct dwmci_idmac *cur_idmac,
51 void *bounce_buffer)
52 {
53 unsigned long ctrl;
54 unsigned int i = 0, flags, cnt, blk_cnt;
55 ulong data_start, data_end;
56
57
58 blk_cnt = data->blocks;
59
60 dwmci_wait_reset(host, DWMCI_CTRL_FIFO_RESET);
61
62 /* Clear IDMAC interrupt */
63 dwmci_writel(host, DWMCI_IDSTS, 0xFFFFFFFF);
64
65 data_start = (ulong)cur_idmac;
66 dwmci_writel(host, DWMCI_DBADDR, (ulong)cur_idmac);
67
68 do {
69 flags = DWMCI_IDMAC_OWN | DWMCI_IDMAC_CH ;
70 flags |= (i == 0) ? DWMCI_IDMAC_FS : 0;
71 if (blk_cnt <= 8) {
72 flags |= DWMCI_IDMAC_LD;
73 cnt = data->blocksize * blk_cnt;
74 } else
75 cnt = data->blocksize * 8;
76
77 dwmci_set_idma_desc(cur_idmac, flags, cnt,
78 (ulong)bounce_buffer + (i * PAGE_SIZE));
79
80 cur_idmac++;
81 if (blk_cnt <= 8)
82 break;
83 blk_cnt -= 8;
84 i++;
85 } while(1);
86
87 data_end = (ulong)cur_idmac;
88 flush_dcache_range(data_start, roundup(data_end, ARCH_DMA_MINALIGN));
89
90 ctrl = dwmci_readl(host, DWMCI_CTRL);
91 ctrl |= DWMCI_IDMAC_EN | DWMCI_DMA_EN;
92 dwmci_writel(host, DWMCI_CTRL, ctrl);
93
94 ctrl = dwmci_readl(host, DWMCI_BMOD);
95 ctrl |= DWMCI_BMOD_IDMAC_FB | DWMCI_BMOD_IDMAC_EN;
96 dwmci_writel(host, DWMCI_BMOD, ctrl);
97
98 dwmci_writel(host, DWMCI_BLKSIZ, data->blocksize);
99 dwmci_writel(host, DWMCI_BYTCNT, data->blocksize * data->blocks);
100 }
101
102 static int dwmci_fifo_ready(struct dwmci_host *host, u32 bit, u32 *len)
103 {
104 u32 timeout = 20000;
105
106 *len = dwmci_readl(host, DWMCI_STATUS);
107 while (--timeout && (*len & bit)) {
108 udelay(200);
109 *len = dwmci_readl(host, DWMCI_STATUS);
110 }
111
112 if (!timeout) {
113 debug("%s: FIFO underflow timeout\n", __func__);
114 return -ETIMEDOUT;
115 }
116
117 return 0;
118 }
119
120 static unsigned int dwmci_get_timeout(struct mmc *mmc, const unsigned int size)
121 {
122 unsigned int timeout;
123
124 timeout = size * 8; /* counting in bits */
125 timeout *= 10; /* wait 10 times as long */
126 timeout /= mmc->clock;
127 timeout /= mmc->bus_width;
128 timeout /= mmc->ddr_mode ? 2 : 1;
129 timeout *= 1000; /* counting in msec */
130 timeout = (timeout < 1000) ? 1000 : timeout;
131
132 return timeout;
133 }
134
135 static int dwmci_data_transfer(struct dwmci_host *host, struct mmc_data *data)
136 {
137 struct mmc *mmc = host->mmc;
138 int ret = 0;
139 u32 timeout, mask, size, i, len = 0;
140 u32 *buf = NULL;
141 ulong start = get_timer(0);
142 u32 fifo_depth = (((host->fifoth_val & RX_WMARK_MASK) >>
143 RX_WMARK_SHIFT) + 1) * 2;
144
145 size = data->blocksize * data->blocks;
146 if (data->flags == MMC_DATA_READ)
147 buf = (unsigned int *)data->dest;
148 else
149 buf = (unsigned int *)data->src;
150
151 timeout = dwmci_get_timeout(mmc, size);
152
153 size /= 4;
154
155 for (;;) {
156 mask = dwmci_readl(host, DWMCI_RINTSTS);
157 /* Error during data transfer. */
158 if (mask & (DWMCI_DATA_ERR | DWMCI_DATA_TOUT)) {
159 debug("%s: DATA ERROR!\n", __func__);
160 ret = -EINVAL;
161 break;
162 }
163
164 if (host->fifo_mode && size) {
165 len = 0;
166 if (data->flags == MMC_DATA_READ &&
167 (mask & DWMCI_INTMSK_RXDR)) {
168 while (size) {
169 ret = dwmci_fifo_ready(host,
170 DWMCI_FIFO_EMPTY,
171 &len);
172 if (ret < 0)
173 break;
174
175 len = (len >> DWMCI_FIFO_SHIFT) &
176 DWMCI_FIFO_MASK;
177 len = min(size, len);
178 for (i = 0; i < len; i++)
179 *buf++ =
180 dwmci_readl(host, DWMCI_DATA);
181 size = size > len ? (size - len) : 0;
182 }
183 dwmci_writel(host, DWMCI_RINTSTS,
184 DWMCI_INTMSK_RXDR);
185 } else if (data->flags == MMC_DATA_WRITE &&
186 (mask & DWMCI_INTMSK_TXDR)) {
187 while (size) {
188 ret = dwmci_fifo_ready(host,
189 DWMCI_FIFO_FULL,
190 &len);
191 if (ret < 0)
192 break;
193
194 len = fifo_depth - ((len >>
195 DWMCI_FIFO_SHIFT) &
196 DWMCI_FIFO_MASK);
197 len = min(size, len);
198 for (i = 0; i < len; i++)
199 dwmci_writel(host, DWMCI_DATA,
200 *buf++);
201 size = size > len ? (size - len) : 0;
202 }
203 dwmci_writel(host, DWMCI_RINTSTS,
204 DWMCI_INTMSK_TXDR);
205 }
206 }
207
208 /* Data arrived correctly. */
209 if (mask & DWMCI_INTMSK_DTO) {
210 ret = 0;
211 break;
212 }
213
214 /* Check for timeout. */
215 if (get_timer(start) > timeout) {
216 debug("%s: Timeout waiting for data!\n",
217 __func__);
218 ret = -ETIMEDOUT;
219 break;
220 }
221 }
222
223 dwmci_writel(host, DWMCI_RINTSTS, mask);
224
225 return ret;
226 }
227
228 static int dwmci_set_transfer_mode(struct dwmci_host *host,
229 struct mmc_data *data)
230 {
231 unsigned long mode;
232
233 mode = DWMCI_CMD_DATA_EXP;
234 if (data->flags & MMC_DATA_WRITE)
235 mode |= DWMCI_CMD_RW;
236
237 return mode;
238 }
239
240 #ifdef CONFIG_DM_MMC
241 static int dwmci_send_cmd(struct udevice *dev, struct mmc_cmd *cmd,
242 struct mmc_data *data)
243 {
244 struct mmc *mmc = mmc_get_mmc_dev(dev);
245 #else
246 static int dwmci_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd,
247 struct mmc_data *data)
248 {
249 #endif
250 struct dwmci_host *host = mmc->priv;
251 ALLOC_CACHE_ALIGN_BUFFER(struct dwmci_idmac, cur_idmac,
252 data ? DIV_ROUND_UP(data->blocks, 8) : 0);
253 int ret = 0, flags = 0, i;
254 unsigned int timeout = 500;
255 u32 retry = 100000;
256 u32 mask, ctrl;
257 ulong start = get_timer(0);
258 struct bounce_buffer bbstate;
259
260 while (dwmci_readl(host, DWMCI_STATUS) & DWMCI_BUSY) {
261 if (get_timer(start) > timeout) {
262 debug("%s: Timeout on data busy\n", __func__);
263 return -ETIMEDOUT;
264 }
265 }
266
267 dwmci_writel(host, DWMCI_RINTSTS, DWMCI_INTMSK_ALL);
268
269 if (data) {
270 if (host->fifo_mode) {
271 dwmci_writel(host, DWMCI_BLKSIZ, data->blocksize);
272 dwmci_writel(host, DWMCI_BYTCNT,
273 data->blocksize * data->blocks);
274 dwmci_wait_reset(host, DWMCI_CTRL_FIFO_RESET);
275 } else {
276 if (data->flags == MMC_DATA_READ) {
277 ret = bounce_buffer_start(&bbstate,
278 (void*)data->dest,
279 data->blocksize *
280 data->blocks, GEN_BB_WRITE);
281 } else {
282 ret = bounce_buffer_start(&bbstate,
283 (void*)data->src,
284 data->blocksize *
285 data->blocks, GEN_BB_READ);
286 }
287
288 if (ret)
289 return ret;
290
291 dwmci_prepare_data(host, data, cur_idmac,
292 bbstate.bounce_buffer);
293 }
294 }
295
296 dwmci_writel(host, DWMCI_CMDARG, cmd->cmdarg);
297
298 if (data)
299 flags = dwmci_set_transfer_mode(host, data);
300
301 if ((cmd->resp_type & MMC_RSP_136) && (cmd->resp_type & MMC_RSP_BUSY))
302 return -1;
303
304 if (cmd->cmdidx == MMC_CMD_STOP_TRANSMISSION)
305 flags |= DWMCI_CMD_ABORT_STOP;
306 else
307 flags |= DWMCI_CMD_PRV_DAT_WAIT;
308
309 if (cmd->resp_type & MMC_RSP_PRESENT) {
310 flags |= DWMCI_CMD_RESP_EXP;
311 if (cmd->resp_type & MMC_RSP_136)
312 flags |= DWMCI_CMD_RESP_LENGTH;
313 }
314
315 if (cmd->resp_type & MMC_RSP_CRC)
316 flags |= DWMCI_CMD_CHECK_CRC;
317
318 flags |= (cmd->cmdidx | DWMCI_CMD_START | DWMCI_CMD_USE_HOLD_REG);
319
320 debug("Sending CMD%d\n",cmd->cmdidx);
321
322 dwmci_writel(host, DWMCI_CMD, flags);
323
324 for (i = 0; i < retry; i++) {
325 mask = dwmci_readl(host, DWMCI_RINTSTS);
326 if (mask & DWMCI_INTMSK_CDONE) {
327 if (!data)
328 dwmci_writel(host, DWMCI_RINTSTS, mask);
329 break;
330 }
331 }
332
333 if (i == retry) {
334 debug("%s: Timeout.\n", __func__);
335 return -ETIMEDOUT;
336 }
337
338 if (mask & DWMCI_INTMSK_RTO) {
339 /*
340 * Timeout here is not necessarily fatal. (e)MMC cards
341 * will splat here when they receive CMD55 as they do
342 * not support this command and that is exactly the way
343 * to tell them apart from SD cards. Thus, this output
344 * below shall be debug(). eMMC cards also do not favor
345 * CMD8, please keep that in mind.
346 */
347 debug("%s: Response Timeout.\n", __func__);
348 return -ETIMEDOUT;
349 } else if (mask & DWMCI_INTMSK_RE) {
350 debug("%s: Response Error.\n", __func__);
351 return -EIO;
352 } else if ((cmd->resp_type & MMC_RSP_CRC) &&
353 (mask & DWMCI_INTMSK_RCRC)) {
354 debug("%s: Response CRC Error.\n", __func__);
355 return -EIO;
356 }
357
358
359 if (cmd->resp_type & MMC_RSP_PRESENT) {
360 if (cmd->resp_type & MMC_RSP_136) {
361 cmd->response[0] = dwmci_readl(host, DWMCI_RESP3);
362 cmd->response[1] = dwmci_readl(host, DWMCI_RESP2);
363 cmd->response[2] = dwmci_readl(host, DWMCI_RESP1);
364 cmd->response[3] = dwmci_readl(host, DWMCI_RESP0);
365 } else {
366 cmd->response[0] = dwmci_readl(host, DWMCI_RESP0);
367 }
368 }
369
370 if (data) {
371 ret = dwmci_data_transfer(host, data);
372
373 /* only dma mode need it */
374 if (!host->fifo_mode) {
375 if (data->flags == MMC_DATA_READ)
376 mask = DWMCI_IDINTEN_RI;
377 else
378 mask = DWMCI_IDINTEN_TI;
379 ret = wait_for_bit_le32(host->ioaddr + DWMCI_IDSTS,
380 mask, true, 1000, false);
381 if (ret)
382 debug("%s: DWMCI_IDINTEN mask 0x%x timeout.\n",
383 __func__, mask);
384 /* clear interrupts */
385 dwmci_writel(host, DWMCI_IDSTS, DWMCI_IDINTEN_MASK);
386
387 ctrl = dwmci_readl(host, DWMCI_CTRL);
388 ctrl &= ~(DWMCI_DMA_EN);
389 dwmci_writel(host, DWMCI_CTRL, ctrl);
390 bounce_buffer_stop(&bbstate);
391 }
392 }
393
394 udelay(100);
395
396 return ret;
397 }
398
399 static int dwmci_setup_bus(struct dwmci_host *host, u32 freq)
400 {
401 u32 div, status;
402 int timeout = 10000;
403 unsigned long sclk;
404
405 if ((freq == host->clock) || (freq == 0))
406 return 0;
407 /*
408 * If host->get_mmc_clk isn't defined,
409 * then assume that host->bus_hz is source clock value.
410 * host->bus_hz should be set by user.
411 */
412 if (host->get_mmc_clk)
413 sclk = host->get_mmc_clk(host, freq);
414 else if (host->bus_hz)
415 sclk = host->bus_hz;
416 else {
417 debug("%s: Didn't get source clock value.\n", __func__);
418 return -EINVAL;
419 }
420
421 if (sclk == freq)
422 div = 0; /* bypass mode */
423 else
424 div = DIV_ROUND_UP(sclk, 2 * freq);
425
426 dwmci_writel(host, DWMCI_CLKENA, 0);
427 dwmci_writel(host, DWMCI_CLKSRC, 0);
428
429 dwmci_writel(host, DWMCI_CLKDIV, div);
430 dwmci_writel(host, DWMCI_CMD, DWMCI_CMD_PRV_DAT_WAIT |
431 DWMCI_CMD_UPD_CLK | DWMCI_CMD_START);
432
433 do {
434 status = dwmci_readl(host, DWMCI_CMD);
435 if (timeout-- < 0) {
436 debug("%s: Timeout!\n", __func__);
437 return -ETIMEDOUT;
438 }
439 } while (status & DWMCI_CMD_START);
440
441 dwmci_writel(host, DWMCI_CLKENA, DWMCI_CLKEN_ENABLE |
442 DWMCI_CLKEN_LOW_PWR);
443
444 dwmci_writel(host, DWMCI_CMD, DWMCI_CMD_PRV_DAT_WAIT |
445 DWMCI_CMD_UPD_CLK | DWMCI_CMD_START);
446
447 timeout = 10000;
448 do {
449 status = dwmci_readl(host, DWMCI_CMD);
450 if (timeout-- < 0) {
451 debug("%s: Timeout!\n", __func__);
452 return -ETIMEDOUT;
453 }
454 } while (status & DWMCI_CMD_START);
455
456 host->clock = freq;
457
458 return 0;
459 }
460
461 #ifdef CONFIG_DM_MMC
462 static int dwmci_set_ios(struct udevice *dev)
463 {
464 struct mmc *mmc = mmc_get_mmc_dev(dev);
465 #else
466 static int dwmci_set_ios(struct mmc *mmc)
467 {
468 #endif
469 struct dwmci_host *host = (struct dwmci_host *)mmc->priv;
470 u32 ctype, regs;
471
472 debug("Buswidth = %d, clock: %d\n", mmc->bus_width, mmc->clock);
473
474 dwmci_setup_bus(host, mmc->clock);
475 switch (mmc->bus_width) {
476 case 8:
477 ctype = DWMCI_CTYPE_8BIT;
478 break;
479 case 4:
480 ctype = DWMCI_CTYPE_4BIT;
481 break;
482 default:
483 ctype = DWMCI_CTYPE_1BIT;
484 break;
485 }
486
487 dwmci_writel(host, DWMCI_CTYPE, ctype);
488
489 regs = dwmci_readl(host, DWMCI_UHS_REG);
490 if (mmc->ddr_mode)
491 regs |= DWMCI_DDR_MODE;
492 else
493 regs &= ~DWMCI_DDR_MODE;
494
495 dwmci_writel(host, DWMCI_UHS_REG, regs);
496
497 if (host->clksel)
498 host->clksel(host);
499
500 #if CONFIG_IS_ENABLED(DM_REGULATOR)
501 if (mmc->vqmmc_supply) {
502 int ret;
503
504 if (mmc->signal_voltage == MMC_SIGNAL_VOLTAGE_180)
505 regulator_set_value(mmc->vqmmc_supply, 1800000);
506 else
507 regulator_set_value(mmc->vqmmc_supply, 3300000);
508
509 ret = regulator_set_enable_if_allowed(mmc->vqmmc_supply, true);
510 if (ret)
511 return ret;
512 }
513 #endif
514
515 return 0;
516 }
517
518 static int dwmci_init(struct mmc *mmc)
519 {
520 struct dwmci_host *host = mmc->priv;
521
522 if (host->board_init)
523 host->board_init(host);
524
525 dwmci_writel(host, DWMCI_PWREN, 1);
526
527 if (!dwmci_wait_reset(host, DWMCI_RESET_ALL)) {
528 debug("%s[%d] Fail-reset!!\n", __func__, __LINE__);
529 return -EIO;
530 }
531
532 /* Enumerate at 400KHz */
533 dwmci_setup_bus(host, mmc->cfg->f_min);
534
535 dwmci_writel(host, DWMCI_RINTSTS, 0xFFFFFFFF);
536 dwmci_writel(host, DWMCI_INTMASK, 0);
537
538 dwmci_writel(host, DWMCI_TMOUT, 0xFFFFFFFF);
539
540 dwmci_writel(host, DWMCI_IDINTEN, 0);
541 dwmci_writel(host, DWMCI_BMOD, 1);
542
543 if (!host->fifoth_val) {
544 uint32_t fifo_size;
545
546 fifo_size = dwmci_readl(host, DWMCI_FIFOTH);
547 fifo_size = ((fifo_size & RX_WMARK_MASK) >> RX_WMARK_SHIFT) + 1;
548 host->fifoth_val = MSIZE(0x2) | RX_WMARK(fifo_size / 2 - 1) |
549 TX_WMARK(fifo_size / 2);
550 }
551 dwmci_writel(host, DWMCI_FIFOTH, host->fifoth_val);
552
553 dwmci_writel(host, DWMCI_CLKENA, 0);
554 dwmci_writel(host, DWMCI_CLKSRC, 0);
555
556 if (!host->fifo_mode)
557 dwmci_writel(host, DWMCI_IDINTEN, DWMCI_IDINTEN_MASK);
558
559 return 0;
560 }
561
562 #ifdef CONFIG_DM_MMC
563 int dwmci_probe(struct udevice *dev)
564 {
565 struct mmc *mmc = mmc_get_mmc_dev(dev);
566
567 return dwmci_init(mmc);
568 }
569
570 const struct dm_mmc_ops dm_dwmci_ops = {
571 .send_cmd = dwmci_send_cmd,
572 .set_ios = dwmci_set_ios,
573 };
574
575 #else
576 static const struct mmc_ops dwmci_ops = {
577 .send_cmd = dwmci_send_cmd,
578 .set_ios = dwmci_set_ios,
579 .init = dwmci_init,
580 };
581 #endif
582
583 void dwmci_setup_cfg(struct mmc_config *cfg, struct dwmci_host *host,
584 u32 max_clk, u32 min_clk)
585 {
586 cfg->name = host->name;
587 #ifndef CONFIG_DM_MMC
588 cfg->ops = &dwmci_ops;
589 #endif
590 cfg->f_min = min_clk;
591 cfg->f_max = max_clk;
592
593 cfg->voltages = MMC_VDD_32_33 | MMC_VDD_33_34 | MMC_VDD_165_195;
594
595 cfg->host_caps = host->caps;
596
597 if (host->buswidth == 8) {
598 cfg->host_caps |= MMC_MODE_8BIT;
599 cfg->host_caps &= ~MMC_MODE_4BIT;
600 } else {
601 cfg->host_caps |= MMC_MODE_4BIT;
602 cfg->host_caps &= ~MMC_MODE_8BIT;
603 }
604 cfg->host_caps |= MMC_MODE_HS | MMC_MODE_HS_52MHz;
605
606 cfg->b_max = CONFIG_SYS_MMC_MAX_BLK_COUNT;
607 }
608
609 #ifdef CONFIG_BLK
610 int dwmci_bind(struct udevice *dev, struct mmc *mmc, struct mmc_config *cfg)
611 {
612 return mmc_bind(dev, mmc, cfg);
613 }
614 #else
615 int add_dwmci(struct dwmci_host *host, u32 max_clk, u32 min_clk)
616 {
617 dwmci_setup_cfg(&host->cfg, host, max_clk, min_clk);
618
619 host->mmc = mmc_create(&host->cfg, host);
620 if (host->mmc == NULL)
621 return -1;
622
623 return 0;
624 }
625 #endif