]> git.ipfire.org Git - people/ms/u-boot.git/blob - drivers/mmc/dw_mmc.c
3ca9743ba06046b16af5ac932928b576f120f7a0
[people/ms/u-boot.git] / drivers / mmc / dw_mmc.c
1 /*
2 * (C) Copyright 2012 SAMSUNG Electronics
3 * Jaehoon Chung <jh80.chung@samsung.com>
4 * Rajeshawari Shinde <rajeshwari.s@samsung.com>
5 *
6 * SPDX-License-Identifier: GPL-2.0+
7 */
8
9 #include <bouncebuf.h>
10 #include <common.h>
11 #include <errno.h>
12 #include <malloc.h>
13 #include <memalign.h>
14 #include <mmc.h>
15 #include <dwmmc.h>
16
17 #define PAGE_SIZE 4096
18
19 static int dwmci_wait_reset(struct dwmci_host *host, u32 value)
20 {
21 unsigned long timeout = 1000;
22 u32 ctrl;
23
24 dwmci_writel(host, DWMCI_CTRL, value);
25
26 while (timeout--) {
27 ctrl = dwmci_readl(host, DWMCI_CTRL);
28 if (!(ctrl & DWMCI_RESET_ALL))
29 return 1;
30 }
31 return 0;
32 }
33
34 static void dwmci_set_idma_desc(struct dwmci_idmac *idmac,
35 u32 desc0, u32 desc1, u32 desc2)
36 {
37 struct dwmci_idmac *desc = idmac;
38
39 desc->flags = desc0;
40 desc->cnt = desc1;
41 desc->addr = desc2;
42 desc->next_addr = (ulong)desc + sizeof(struct dwmci_idmac);
43 }
44
45 static void dwmci_prepare_data(struct dwmci_host *host,
46 struct mmc_data *data,
47 struct dwmci_idmac *cur_idmac,
48 void *bounce_buffer)
49 {
50 unsigned long ctrl;
51 unsigned int i = 0, flags, cnt, blk_cnt;
52 ulong data_start, data_end;
53
54
55 blk_cnt = data->blocks;
56
57 dwmci_wait_reset(host, DWMCI_CTRL_FIFO_RESET);
58
59 data_start = (ulong)cur_idmac;
60 dwmci_writel(host, DWMCI_DBADDR, (ulong)cur_idmac);
61
62 do {
63 flags = DWMCI_IDMAC_OWN | DWMCI_IDMAC_CH ;
64 flags |= (i == 0) ? DWMCI_IDMAC_FS : 0;
65 if (blk_cnt <= 8) {
66 flags |= DWMCI_IDMAC_LD;
67 cnt = data->blocksize * blk_cnt;
68 } else
69 cnt = data->blocksize * 8;
70
71 dwmci_set_idma_desc(cur_idmac, flags, cnt,
72 (ulong)bounce_buffer + (i * PAGE_SIZE));
73
74 if (blk_cnt <= 8)
75 break;
76 blk_cnt -= 8;
77 cur_idmac++;
78 i++;
79 } while(1);
80
81 data_end = (ulong)cur_idmac;
82 flush_dcache_range(data_start, data_end + ARCH_DMA_MINALIGN);
83
84 ctrl = dwmci_readl(host, DWMCI_CTRL);
85 ctrl |= DWMCI_IDMAC_EN | DWMCI_DMA_EN;
86 dwmci_writel(host, DWMCI_CTRL, ctrl);
87
88 ctrl = dwmci_readl(host, DWMCI_BMOD);
89 ctrl |= DWMCI_BMOD_IDMAC_FB | DWMCI_BMOD_IDMAC_EN;
90 dwmci_writel(host, DWMCI_BMOD, ctrl);
91
92 dwmci_writel(host, DWMCI_BLKSIZ, data->blocksize);
93 dwmci_writel(host, DWMCI_BYTCNT, data->blocksize * data->blocks);
94 }
95
96 static int dwmci_data_transfer(struct dwmci_host *host, struct mmc_data *data)
97 {
98 int ret = 0;
99 u32 timeout = 240000;
100 u32 mask, size, i, len = 0;
101 u32 *buf = NULL;
102 ulong start = get_timer(0);
103 u32 fifo_depth = (((host->fifoth_val & RX_WMARK_MASK) >>
104 RX_WMARK_SHIFT) + 1) * 2;
105
106 size = data->blocksize * data->blocks / 4;
107 if (data->flags == MMC_DATA_READ)
108 buf = (unsigned int *)data->dest;
109 else
110 buf = (unsigned int *)data->src;
111
112 for (;;) {
113 mask = dwmci_readl(host, DWMCI_RINTSTS);
114 /* Error during data transfer. */
115 if (mask & (DWMCI_DATA_ERR | DWMCI_DATA_TOUT)) {
116 debug("%s: DATA ERROR!\n", __func__);
117 ret = -EINVAL;
118 break;
119 }
120
121 if (host->fifo_mode && size) {
122 if (data->flags == MMC_DATA_READ) {
123 if ((dwmci_readl(host, DWMCI_RINTSTS) &
124 DWMCI_INTMSK_RXDR)) {
125 len = dwmci_readl(host, DWMCI_STATUS);
126 len = (len >> DWMCI_FIFO_SHIFT) &
127 DWMCI_FIFO_MASK;
128 for (i = 0; i < len; i++)
129 *buf++ =
130 dwmci_readl(host, DWMCI_DATA);
131 dwmci_writel(host, DWMCI_RINTSTS,
132 DWMCI_INTMSK_RXDR);
133 }
134 } else {
135 if ((dwmci_readl(host, DWMCI_RINTSTS) &
136 DWMCI_INTMSK_TXDR)) {
137 len = dwmci_readl(host, DWMCI_STATUS);
138 len = fifo_depth - ((len >>
139 DWMCI_FIFO_SHIFT) &
140 DWMCI_FIFO_MASK);
141 for (i = 0; i < len; i++)
142 dwmci_writel(host, DWMCI_DATA,
143 *buf++);
144 dwmci_writel(host, DWMCI_RINTSTS,
145 DWMCI_INTMSK_TXDR);
146 }
147 }
148 size = size > len ? (size - len) : 0;
149 }
150
151 /* Data arrived correctly. */
152 if (mask & DWMCI_INTMSK_DTO) {
153 ret = 0;
154 break;
155 }
156
157 /* Check for timeout. */
158 if (get_timer(start) > timeout) {
159 debug("%s: Timeout waiting for data!\n",
160 __func__);
161 ret = TIMEOUT;
162 break;
163 }
164 }
165
166 dwmci_writel(host, DWMCI_RINTSTS, mask);
167
168 return ret;
169 }
170
171 static int dwmci_set_transfer_mode(struct dwmci_host *host,
172 struct mmc_data *data)
173 {
174 unsigned long mode;
175
176 mode = DWMCI_CMD_DATA_EXP;
177 if (data->flags & MMC_DATA_WRITE)
178 mode |= DWMCI_CMD_RW;
179
180 return mode;
181 }
182
183 #ifdef CONFIG_DM_MMC_OPS
184 static int dwmci_send_cmd(struct udevice *dev, struct mmc_cmd *cmd,
185 struct mmc_data *data)
186 {
187 struct mmc *mmc = mmc_get_mmc_dev(dev);
188 #else
189 static int dwmci_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd,
190 struct mmc_data *data)
191 {
192 #endif
193 struct dwmci_host *host = mmc->priv;
194 ALLOC_CACHE_ALIGN_BUFFER(struct dwmci_idmac, cur_idmac,
195 data ? DIV_ROUND_UP(data->blocks, 8) : 0);
196 int ret = 0, flags = 0, i;
197 unsigned int timeout = 500;
198 u32 retry = 100000;
199 u32 mask, ctrl;
200 ulong start = get_timer(0);
201 struct bounce_buffer bbstate;
202
203 while (dwmci_readl(host, DWMCI_STATUS) & DWMCI_BUSY) {
204 if (get_timer(start) > timeout) {
205 debug("%s: Timeout on data busy\n", __func__);
206 return TIMEOUT;
207 }
208 }
209
210 dwmci_writel(host, DWMCI_RINTSTS, DWMCI_INTMSK_ALL);
211
212 if (data) {
213 if (host->fifo_mode) {
214 dwmci_writel(host, DWMCI_BLKSIZ, data->blocksize);
215 dwmci_writel(host, DWMCI_BYTCNT,
216 data->blocksize * data->blocks);
217 dwmci_wait_reset(host, DWMCI_CTRL_FIFO_RESET);
218 } else {
219 if (data->flags == MMC_DATA_READ) {
220 bounce_buffer_start(&bbstate, (void*)data->dest,
221 data->blocksize *
222 data->blocks, GEN_BB_WRITE);
223 } else {
224 bounce_buffer_start(&bbstate, (void*)data->src,
225 data->blocksize *
226 data->blocks, GEN_BB_READ);
227 }
228 dwmci_prepare_data(host, data, cur_idmac,
229 bbstate.bounce_buffer);
230 }
231 }
232
233 dwmci_writel(host, DWMCI_CMDARG, cmd->cmdarg);
234
235 if (data)
236 flags = dwmci_set_transfer_mode(host, data);
237
238 if ((cmd->resp_type & MMC_RSP_136) && (cmd->resp_type & MMC_RSP_BUSY))
239 return -1;
240
241 if (cmd->cmdidx == MMC_CMD_STOP_TRANSMISSION)
242 flags |= DWMCI_CMD_ABORT_STOP;
243 else
244 flags |= DWMCI_CMD_PRV_DAT_WAIT;
245
246 if (cmd->resp_type & MMC_RSP_PRESENT) {
247 flags |= DWMCI_CMD_RESP_EXP;
248 if (cmd->resp_type & MMC_RSP_136)
249 flags |= DWMCI_CMD_RESP_LENGTH;
250 }
251
252 if (cmd->resp_type & MMC_RSP_CRC)
253 flags |= DWMCI_CMD_CHECK_CRC;
254
255 flags |= (cmd->cmdidx | DWMCI_CMD_START | DWMCI_CMD_USE_HOLD_REG);
256
257 debug("Sending CMD%d\n",cmd->cmdidx);
258
259 dwmci_writel(host, DWMCI_CMD, flags);
260
261 for (i = 0; i < retry; i++) {
262 mask = dwmci_readl(host, DWMCI_RINTSTS);
263 if (mask & DWMCI_INTMSK_CDONE) {
264 if (!data)
265 dwmci_writel(host, DWMCI_RINTSTS, mask);
266 break;
267 }
268 }
269
270 if (i == retry) {
271 debug("%s: Timeout.\n", __func__);
272 return TIMEOUT;
273 }
274
275 if (mask & DWMCI_INTMSK_RTO) {
276 /*
277 * Timeout here is not necessarily fatal. (e)MMC cards
278 * will splat here when they receive CMD55 as they do
279 * not support this command and that is exactly the way
280 * to tell them apart from SD cards. Thus, this output
281 * below shall be debug(). eMMC cards also do not favor
282 * CMD8, please keep that in mind.
283 */
284 debug("%s: Response Timeout.\n", __func__);
285 return TIMEOUT;
286 } else if (mask & DWMCI_INTMSK_RE) {
287 debug("%s: Response Error.\n", __func__);
288 return -EIO;
289 }
290
291
292 if (cmd->resp_type & MMC_RSP_PRESENT) {
293 if (cmd->resp_type & MMC_RSP_136) {
294 cmd->response[0] = dwmci_readl(host, DWMCI_RESP3);
295 cmd->response[1] = dwmci_readl(host, DWMCI_RESP2);
296 cmd->response[2] = dwmci_readl(host, DWMCI_RESP1);
297 cmd->response[3] = dwmci_readl(host, DWMCI_RESP0);
298 } else {
299 cmd->response[0] = dwmci_readl(host, DWMCI_RESP0);
300 }
301 }
302
303 if (data) {
304 ret = dwmci_data_transfer(host, data);
305
306 /* only dma mode need it */
307 if (!host->fifo_mode) {
308 ctrl = dwmci_readl(host, DWMCI_CTRL);
309 ctrl &= ~(DWMCI_DMA_EN);
310 dwmci_writel(host, DWMCI_CTRL, ctrl);
311 bounce_buffer_stop(&bbstate);
312 }
313 }
314
315 udelay(100);
316
317 return ret;
318 }
319
320 static int dwmci_setup_bus(struct dwmci_host *host, u32 freq)
321 {
322 u32 div, status;
323 int timeout = 10000;
324 unsigned long sclk;
325
326 if ((freq == host->clock) || (freq == 0))
327 return 0;
328 /*
329 * If host->get_mmc_clk isn't defined,
330 * then assume that host->bus_hz is source clock value.
331 * host->bus_hz should be set by user.
332 */
333 if (host->get_mmc_clk)
334 sclk = host->get_mmc_clk(host, freq);
335 else if (host->bus_hz)
336 sclk = host->bus_hz;
337 else {
338 debug("%s: Didn't get source clock value.\n", __func__);
339 return -EINVAL;
340 }
341
342 if (sclk == freq)
343 div = 0; /* bypass mode */
344 else
345 div = DIV_ROUND_UP(sclk, 2 * freq);
346
347 dwmci_writel(host, DWMCI_CLKENA, 0);
348 dwmci_writel(host, DWMCI_CLKSRC, 0);
349
350 dwmci_writel(host, DWMCI_CLKDIV, div);
351 dwmci_writel(host, DWMCI_CMD, DWMCI_CMD_PRV_DAT_WAIT |
352 DWMCI_CMD_UPD_CLK | DWMCI_CMD_START);
353
354 do {
355 status = dwmci_readl(host, DWMCI_CMD);
356 if (timeout-- < 0) {
357 debug("%s: Timeout!\n", __func__);
358 return -ETIMEDOUT;
359 }
360 } while (status & DWMCI_CMD_START);
361
362 dwmci_writel(host, DWMCI_CLKENA, DWMCI_CLKEN_ENABLE |
363 DWMCI_CLKEN_LOW_PWR);
364
365 dwmci_writel(host, DWMCI_CMD, DWMCI_CMD_PRV_DAT_WAIT |
366 DWMCI_CMD_UPD_CLK | DWMCI_CMD_START);
367
368 timeout = 10000;
369 do {
370 status = dwmci_readl(host, DWMCI_CMD);
371 if (timeout-- < 0) {
372 debug("%s: Timeout!\n", __func__);
373 return -ETIMEDOUT;
374 }
375 } while (status & DWMCI_CMD_START);
376
377 host->clock = freq;
378
379 return 0;
380 }
381
382 #ifdef CONFIG_DM_MMC_OPS
383 static int dwmci_set_ios(struct udevice *dev)
384 {
385 struct mmc *mmc = mmc_get_mmc_dev(dev);
386 #else
387 static void dwmci_set_ios(struct mmc *mmc)
388 {
389 #endif
390 struct dwmci_host *host = (struct dwmci_host *)mmc->priv;
391 u32 ctype, regs;
392
393 debug("Buswidth = %d, clock: %d\n", mmc->bus_width, mmc->clock);
394
395 dwmci_setup_bus(host, mmc->clock);
396 switch (mmc->bus_width) {
397 case 8:
398 ctype = DWMCI_CTYPE_8BIT;
399 break;
400 case 4:
401 ctype = DWMCI_CTYPE_4BIT;
402 break;
403 default:
404 ctype = DWMCI_CTYPE_1BIT;
405 break;
406 }
407
408 dwmci_writel(host, DWMCI_CTYPE, ctype);
409
410 regs = dwmci_readl(host, DWMCI_UHS_REG);
411 if (mmc->ddr_mode)
412 regs |= DWMCI_DDR_MODE;
413 else
414 regs &= ~DWMCI_DDR_MODE;
415
416 dwmci_writel(host, DWMCI_UHS_REG, regs);
417
418 if (host->clksel)
419 host->clksel(host);
420 #ifdef CONFIG_DM_MMC_OPS
421 return 0;
422 #endif
423 }
424
425 static int dwmci_init(struct mmc *mmc)
426 {
427 struct dwmci_host *host = mmc->priv;
428
429 if (host->board_init)
430 host->board_init(host);
431
432 dwmci_writel(host, DWMCI_PWREN, 1);
433
434 if (!dwmci_wait_reset(host, DWMCI_RESET_ALL)) {
435 debug("%s[%d] Fail-reset!!\n", __func__, __LINE__);
436 return -EIO;
437 }
438
439 /* Enumerate at 400KHz */
440 dwmci_setup_bus(host, mmc->cfg->f_min);
441
442 dwmci_writel(host, DWMCI_RINTSTS, 0xFFFFFFFF);
443 dwmci_writel(host, DWMCI_INTMASK, 0);
444
445 dwmci_writel(host, DWMCI_TMOUT, 0xFFFFFFFF);
446
447 dwmci_writel(host, DWMCI_IDINTEN, 0);
448 dwmci_writel(host, DWMCI_BMOD, 1);
449
450 if (!host->fifoth_val) {
451 uint32_t fifo_size;
452
453 fifo_size = dwmci_readl(host, DWMCI_FIFOTH);
454 fifo_size = ((fifo_size & RX_WMARK_MASK) >> RX_WMARK_SHIFT) + 1;
455 host->fifoth_val = MSIZE(0x2) | RX_WMARK(fifo_size / 2 - 1) |
456 TX_WMARK(fifo_size / 2);
457 }
458 dwmci_writel(host, DWMCI_FIFOTH, host->fifoth_val);
459
460 dwmci_writel(host, DWMCI_CLKENA, 0);
461 dwmci_writel(host, DWMCI_CLKSRC, 0);
462
463 return 0;
464 }
465
466 #ifdef CONFIG_DM_MMC_OPS
467 int dwmci_probe(struct udevice *dev)
468 {
469 struct mmc *mmc = mmc_get_mmc_dev(dev);
470
471 return dwmci_init(mmc);
472 }
473
474 const struct dm_mmc_ops dm_dwmci_ops = {
475 .send_cmd = dwmci_send_cmd,
476 .set_ios = dwmci_set_ios,
477 };
478
479 #else
480 static const struct mmc_ops dwmci_ops = {
481 .send_cmd = dwmci_send_cmd,
482 .set_ios = dwmci_set_ios,
483 .init = dwmci_init,
484 };
485 #endif
486
487 void dwmci_setup_cfg(struct mmc_config *cfg, const char *name, int buswidth,
488 uint caps, u32 max_clk, u32 min_clk)
489 {
490 cfg->name = name;
491 #ifndef CONFIG_DM_MMC_OPS
492 cfg->ops = &dwmci_ops;
493 #endif
494 cfg->f_min = min_clk;
495 cfg->f_max = max_clk;
496
497 cfg->voltages = MMC_VDD_32_33 | MMC_VDD_33_34 | MMC_VDD_165_195;
498
499 cfg->host_caps = caps;
500
501 if (buswidth == 8) {
502 cfg->host_caps |= MMC_MODE_8BIT;
503 cfg->host_caps &= ~MMC_MODE_4BIT;
504 } else {
505 cfg->host_caps |= MMC_MODE_4BIT;
506 cfg->host_caps &= ~MMC_MODE_8BIT;
507 }
508 cfg->host_caps |= MMC_MODE_HS | MMC_MODE_HS_52MHz;
509
510 cfg->b_max = CONFIG_SYS_MMC_MAX_BLK_COUNT;
511 }
512
513 #ifdef CONFIG_BLK
514 int dwmci_bind(struct udevice *dev, struct mmc *mmc, struct mmc_config *cfg)
515 {
516 return mmc_bind(dev, mmc, cfg);
517 }
518 #else
519 int add_dwmci(struct dwmci_host *host, u32 max_clk, u32 min_clk)
520 {
521 dwmci_setup_cfg(&host->cfg, host->name, host->buswidth, host->caps,
522 max_clk, min_clk);
523
524 host->mmc = mmc_create(&host->cfg, host);
525 if (host->mmc == NULL)
526 return -1;
527
528 return 0;
529 }
530 #endif