]> git.ipfire.org Git - people/ms/u-boot.git/blob - drivers/mmc/dw_mmc.c
mmc: sdhci: change data transfer failure into debug message
[people/ms/u-boot.git] / drivers / mmc / dw_mmc.c
1 /*
2 * (C) Copyright 2012 SAMSUNG Electronics
3 * Jaehoon Chung <jh80.chung@samsung.com>
4 * Rajeshawari Shinde <rajeshwari.s@samsung.com>
5 *
6 * SPDX-License-Identifier: GPL-2.0+
7 */
8
9 #include <bouncebuf.h>
10 #include <common.h>
11 #include <errno.h>
12 #include <malloc.h>
13 #include <memalign.h>
14 #include <mmc.h>
15 #include <dwmmc.h>
16
17 #define PAGE_SIZE 4096
18
19 static int dwmci_wait_reset(struct dwmci_host *host, u32 value)
20 {
21 unsigned long timeout = 1000;
22 u32 ctrl;
23
24 dwmci_writel(host, DWMCI_CTRL, value);
25
26 while (timeout--) {
27 ctrl = dwmci_readl(host, DWMCI_CTRL);
28 if (!(ctrl & DWMCI_RESET_ALL))
29 return 1;
30 }
31 return 0;
32 }
33
34 static void dwmci_set_idma_desc(struct dwmci_idmac *idmac,
35 u32 desc0, u32 desc1, u32 desc2)
36 {
37 struct dwmci_idmac *desc = idmac;
38
39 desc->flags = desc0;
40 desc->cnt = desc1;
41 desc->addr = desc2;
42 desc->next_addr = (ulong)desc + sizeof(struct dwmci_idmac);
43 }
44
45 static void dwmci_prepare_data(struct dwmci_host *host,
46 struct mmc_data *data,
47 struct dwmci_idmac *cur_idmac,
48 void *bounce_buffer)
49 {
50 unsigned long ctrl;
51 unsigned int i = 0, flags, cnt, blk_cnt;
52 ulong data_start, data_end;
53
54
55 blk_cnt = data->blocks;
56
57 dwmci_wait_reset(host, DWMCI_CTRL_FIFO_RESET);
58
59 data_start = (ulong)cur_idmac;
60 dwmci_writel(host, DWMCI_DBADDR, (ulong)cur_idmac);
61
62 do {
63 flags = DWMCI_IDMAC_OWN | DWMCI_IDMAC_CH ;
64 flags |= (i == 0) ? DWMCI_IDMAC_FS : 0;
65 if (blk_cnt <= 8) {
66 flags |= DWMCI_IDMAC_LD;
67 cnt = data->blocksize * blk_cnt;
68 } else
69 cnt = data->blocksize * 8;
70
71 dwmci_set_idma_desc(cur_idmac, flags, cnt,
72 (ulong)bounce_buffer + (i * PAGE_SIZE));
73
74 if (blk_cnt <= 8)
75 break;
76 blk_cnt -= 8;
77 cur_idmac++;
78 i++;
79 } while(1);
80
81 data_end = (ulong)cur_idmac;
82 flush_dcache_range(data_start, data_end + ARCH_DMA_MINALIGN);
83
84 ctrl = dwmci_readl(host, DWMCI_CTRL);
85 ctrl |= DWMCI_IDMAC_EN | DWMCI_DMA_EN;
86 dwmci_writel(host, DWMCI_CTRL, ctrl);
87
88 ctrl = dwmci_readl(host, DWMCI_BMOD);
89 ctrl |= DWMCI_BMOD_IDMAC_FB | DWMCI_BMOD_IDMAC_EN;
90 dwmci_writel(host, DWMCI_BMOD, ctrl);
91
92 dwmci_writel(host, DWMCI_BLKSIZ, data->blocksize);
93 dwmci_writel(host, DWMCI_BYTCNT, data->blocksize * data->blocks);
94 }
95
96 static int dwmci_data_transfer(struct dwmci_host *host, struct mmc_data *data)
97 {
98 int ret = 0;
99 u32 timeout = 240000;
100 u32 mask, size, i, len = 0;
101 u32 *buf = NULL;
102 ulong start = get_timer(0);
103 u32 fifo_depth = (((host->fifoth_val & RX_WMARK_MASK) >>
104 RX_WMARK_SHIFT) + 1) * 2;
105
106 size = data->blocksize * data->blocks / 4;
107 if (data->flags == MMC_DATA_READ)
108 buf = (unsigned int *)data->dest;
109 else
110 buf = (unsigned int *)data->src;
111
112 for (;;) {
113 mask = dwmci_readl(host, DWMCI_RINTSTS);
114 /* Error during data transfer. */
115 if (mask & (DWMCI_DATA_ERR | DWMCI_DATA_TOUT)) {
116 debug("%s: DATA ERROR!\n", __func__);
117 ret = -EINVAL;
118 break;
119 }
120
121 if (host->fifo_mode && size) {
122 len = 0;
123 if (data->flags == MMC_DATA_READ &&
124 (mask & DWMCI_INTMSK_RXDR)) {
125 while (size) {
126 len = dwmci_readl(host, DWMCI_STATUS);
127 len = (len >> DWMCI_FIFO_SHIFT) &
128 DWMCI_FIFO_MASK;
129 len = min(size, len);
130 for (i = 0; i < len; i++)
131 *buf++ =
132 dwmci_readl(host, DWMCI_DATA);
133 size = size > len ? (size - len) : 0;
134 }
135 dwmci_writel(host, DWMCI_RINTSTS,
136 DWMCI_INTMSK_RXDR);
137 } else if (data->flags == MMC_DATA_WRITE &&
138 (mask & DWMCI_INTMSK_TXDR)) {
139 while (size) {
140 len = dwmci_readl(host, DWMCI_STATUS);
141 len = fifo_depth - ((len >>
142 DWMCI_FIFO_SHIFT) &
143 DWMCI_FIFO_MASK);
144 len = min(size, len);
145 for (i = 0; i < len; i++)
146 dwmci_writel(host, DWMCI_DATA,
147 *buf++);
148 size = size > len ? (size - len) : 0;
149 }
150 dwmci_writel(host, DWMCI_RINTSTS,
151 DWMCI_INTMSK_TXDR);
152 }
153 }
154
155 /* Data arrived correctly. */
156 if (mask & DWMCI_INTMSK_DTO) {
157 ret = 0;
158 break;
159 }
160
161 /* Check for timeout. */
162 if (get_timer(start) > timeout) {
163 debug("%s: Timeout waiting for data!\n",
164 __func__);
165 ret = -ETIMEDOUT;
166 break;
167 }
168 }
169
170 dwmci_writel(host, DWMCI_RINTSTS, mask);
171
172 return ret;
173 }
174
175 static int dwmci_set_transfer_mode(struct dwmci_host *host,
176 struct mmc_data *data)
177 {
178 unsigned long mode;
179
180 mode = DWMCI_CMD_DATA_EXP;
181 if (data->flags & MMC_DATA_WRITE)
182 mode |= DWMCI_CMD_RW;
183
184 return mode;
185 }
186
187 #ifdef CONFIG_DM_MMC
188 static int dwmci_send_cmd(struct udevice *dev, struct mmc_cmd *cmd,
189 struct mmc_data *data)
190 {
191 struct mmc *mmc = mmc_get_mmc_dev(dev);
192 #else
193 static int dwmci_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd,
194 struct mmc_data *data)
195 {
196 #endif
197 struct dwmci_host *host = mmc->priv;
198 ALLOC_CACHE_ALIGN_BUFFER(struct dwmci_idmac, cur_idmac,
199 data ? DIV_ROUND_UP(data->blocks, 8) : 0);
200 int ret = 0, flags = 0, i;
201 unsigned int timeout = 500;
202 u32 retry = 100000;
203 u32 mask, ctrl;
204 ulong start = get_timer(0);
205 struct bounce_buffer bbstate;
206
207 while (dwmci_readl(host, DWMCI_STATUS) & DWMCI_BUSY) {
208 if (get_timer(start) > timeout) {
209 debug("%s: Timeout on data busy\n", __func__);
210 return -ETIMEDOUT;
211 }
212 }
213
214 dwmci_writel(host, DWMCI_RINTSTS, DWMCI_INTMSK_ALL);
215
216 if (data) {
217 if (host->fifo_mode) {
218 dwmci_writel(host, DWMCI_BLKSIZ, data->blocksize);
219 dwmci_writel(host, DWMCI_BYTCNT,
220 data->blocksize * data->blocks);
221 dwmci_wait_reset(host, DWMCI_CTRL_FIFO_RESET);
222 } else {
223 if (data->flags == MMC_DATA_READ) {
224 bounce_buffer_start(&bbstate, (void*)data->dest,
225 data->blocksize *
226 data->blocks, GEN_BB_WRITE);
227 } else {
228 bounce_buffer_start(&bbstate, (void*)data->src,
229 data->blocksize *
230 data->blocks, GEN_BB_READ);
231 }
232 dwmci_prepare_data(host, data, cur_idmac,
233 bbstate.bounce_buffer);
234 }
235 }
236
237 dwmci_writel(host, DWMCI_CMDARG, cmd->cmdarg);
238
239 if (data)
240 flags = dwmci_set_transfer_mode(host, data);
241
242 if ((cmd->resp_type & MMC_RSP_136) && (cmd->resp_type & MMC_RSP_BUSY))
243 return -1;
244
245 if (cmd->cmdidx == MMC_CMD_STOP_TRANSMISSION)
246 flags |= DWMCI_CMD_ABORT_STOP;
247 else
248 flags |= DWMCI_CMD_PRV_DAT_WAIT;
249
250 if (cmd->resp_type & MMC_RSP_PRESENT) {
251 flags |= DWMCI_CMD_RESP_EXP;
252 if (cmd->resp_type & MMC_RSP_136)
253 flags |= DWMCI_CMD_RESP_LENGTH;
254 }
255
256 if (cmd->resp_type & MMC_RSP_CRC)
257 flags |= DWMCI_CMD_CHECK_CRC;
258
259 flags |= (cmd->cmdidx | DWMCI_CMD_START | DWMCI_CMD_USE_HOLD_REG);
260
261 debug("Sending CMD%d\n",cmd->cmdidx);
262
263 dwmci_writel(host, DWMCI_CMD, flags);
264
265 for (i = 0; i < retry; i++) {
266 mask = dwmci_readl(host, DWMCI_RINTSTS);
267 if (mask & DWMCI_INTMSK_CDONE) {
268 if (!data)
269 dwmci_writel(host, DWMCI_RINTSTS, mask);
270 break;
271 }
272 }
273
274 if (i == retry) {
275 debug("%s: Timeout.\n", __func__);
276 return -ETIMEDOUT;
277 }
278
279 if (mask & DWMCI_INTMSK_RTO) {
280 /*
281 * Timeout here is not necessarily fatal. (e)MMC cards
282 * will splat here when they receive CMD55 as they do
283 * not support this command and that is exactly the way
284 * to tell them apart from SD cards. Thus, this output
285 * below shall be debug(). eMMC cards also do not favor
286 * CMD8, please keep that in mind.
287 */
288 debug("%s: Response Timeout.\n", __func__);
289 return -ETIMEDOUT;
290 } else if (mask & DWMCI_INTMSK_RE) {
291 debug("%s: Response Error.\n", __func__);
292 return -EIO;
293 }
294
295
296 if (cmd->resp_type & MMC_RSP_PRESENT) {
297 if (cmd->resp_type & MMC_RSP_136) {
298 cmd->response[0] = dwmci_readl(host, DWMCI_RESP3);
299 cmd->response[1] = dwmci_readl(host, DWMCI_RESP2);
300 cmd->response[2] = dwmci_readl(host, DWMCI_RESP1);
301 cmd->response[3] = dwmci_readl(host, DWMCI_RESP0);
302 } else {
303 cmd->response[0] = dwmci_readl(host, DWMCI_RESP0);
304 }
305 }
306
307 if (data) {
308 ret = dwmci_data_transfer(host, data);
309
310 /* only dma mode need it */
311 if (!host->fifo_mode) {
312 ctrl = dwmci_readl(host, DWMCI_CTRL);
313 ctrl &= ~(DWMCI_DMA_EN);
314 dwmci_writel(host, DWMCI_CTRL, ctrl);
315 bounce_buffer_stop(&bbstate);
316 }
317 }
318
319 udelay(100);
320
321 return ret;
322 }
323
324 static int dwmci_setup_bus(struct dwmci_host *host, u32 freq)
325 {
326 u32 div, status;
327 int timeout = 10000;
328 unsigned long sclk;
329
330 if ((freq == host->clock) || (freq == 0))
331 return 0;
332 /*
333 * If host->get_mmc_clk isn't defined,
334 * then assume that host->bus_hz is source clock value.
335 * host->bus_hz should be set by user.
336 */
337 if (host->get_mmc_clk)
338 sclk = host->get_mmc_clk(host, freq);
339 else if (host->bus_hz)
340 sclk = host->bus_hz;
341 else {
342 debug("%s: Didn't get source clock value.\n", __func__);
343 return -EINVAL;
344 }
345
346 if (sclk == freq)
347 div = 0; /* bypass mode */
348 else
349 div = DIV_ROUND_UP(sclk, 2 * freq);
350
351 dwmci_writel(host, DWMCI_CLKENA, 0);
352 dwmci_writel(host, DWMCI_CLKSRC, 0);
353
354 dwmci_writel(host, DWMCI_CLKDIV, div);
355 dwmci_writel(host, DWMCI_CMD, DWMCI_CMD_PRV_DAT_WAIT |
356 DWMCI_CMD_UPD_CLK | DWMCI_CMD_START);
357
358 do {
359 status = dwmci_readl(host, DWMCI_CMD);
360 if (timeout-- < 0) {
361 debug("%s: Timeout!\n", __func__);
362 return -ETIMEDOUT;
363 }
364 } while (status & DWMCI_CMD_START);
365
366 dwmci_writel(host, DWMCI_CLKENA, DWMCI_CLKEN_ENABLE |
367 DWMCI_CLKEN_LOW_PWR);
368
369 dwmci_writel(host, DWMCI_CMD, DWMCI_CMD_PRV_DAT_WAIT |
370 DWMCI_CMD_UPD_CLK | DWMCI_CMD_START);
371
372 timeout = 10000;
373 do {
374 status = dwmci_readl(host, DWMCI_CMD);
375 if (timeout-- < 0) {
376 debug("%s: Timeout!\n", __func__);
377 return -ETIMEDOUT;
378 }
379 } while (status & DWMCI_CMD_START);
380
381 host->clock = freq;
382
383 return 0;
384 }
385
386 #ifdef CONFIG_DM_MMC
387 static int dwmci_set_ios(struct udevice *dev)
388 {
389 struct mmc *mmc = mmc_get_mmc_dev(dev);
390 #else
391 static int dwmci_set_ios(struct mmc *mmc)
392 {
393 #endif
394 struct dwmci_host *host = (struct dwmci_host *)mmc->priv;
395 u32 ctype, regs;
396
397 debug("Buswidth = %d, clock: %d\n", mmc->bus_width, mmc->clock);
398
399 dwmci_setup_bus(host, mmc->clock);
400 switch (mmc->bus_width) {
401 case 8:
402 ctype = DWMCI_CTYPE_8BIT;
403 break;
404 case 4:
405 ctype = DWMCI_CTYPE_4BIT;
406 break;
407 default:
408 ctype = DWMCI_CTYPE_1BIT;
409 break;
410 }
411
412 dwmci_writel(host, DWMCI_CTYPE, ctype);
413
414 regs = dwmci_readl(host, DWMCI_UHS_REG);
415 if (mmc->ddr_mode)
416 regs |= DWMCI_DDR_MODE;
417 else
418 regs &= ~DWMCI_DDR_MODE;
419
420 dwmci_writel(host, DWMCI_UHS_REG, regs);
421
422 if (host->clksel)
423 host->clksel(host);
424
425 return 0;
426 }
427
428 static int dwmci_init(struct mmc *mmc)
429 {
430 struct dwmci_host *host = mmc->priv;
431
432 if (host->board_init)
433 host->board_init(host);
434
435 dwmci_writel(host, DWMCI_PWREN, 1);
436
437 if (!dwmci_wait_reset(host, DWMCI_RESET_ALL)) {
438 debug("%s[%d] Fail-reset!!\n", __func__, __LINE__);
439 return -EIO;
440 }
441
442 /* Enumerate at 400KHz */
443 dwmci_setup_bus(host, mmc->cfg->f_min);
444
445 dwmci_writel(host, DWMCI_RINTSTS, 0xFFFFFFFF);
446 dwmci_writel(host, DWMCI_INTMASK, 0);
447
448 dwmci_writel(host, DWMCI_TMOUT, 0xFFFFFFFF);
449
450 dwmci_writel(host, DWMCI_IDINTEN, 0);
451 dwmci_writel(host, DWMCI_BMOD, 1);
452
453 if (!host->fifoth_val) {
454 uint32_t fifo_size;
455
456 fifo_size = dwmci_readl(host, DWMCI_FIFOTH);
457 fifo_size = ((fifo_size & RX_WMARK_MASK) >> RX_WMARK_SHIFT) + 1;
458 host->fifoth_val = MSIZE(0x2) | RX_WMARK(fifo_size / 2 - 1) |
459 TX_WMARK(fifo_size / 2);
460 }
461 dwmci_writel(host, DWMCI_FIFOTH, host->fifoth_val);
462
463 dwmci_writel(host, DWMCI_CLKENA, 0);
464 dwmci_writel(host, DWMCI_CLKSRC, 0);
465
466 return 0;
467 }
468
469 #ifdef CONFIG_DM_MMC
470 int dwmci_probe(struct udevice *dev)
471 {
472 struct mmc *mmc = mmc_get_mmc_dev(dev);
473
474 return dwmci_init(mmc);
475 }
476
477 const struct dm_mmc_ops dm_dwmci_ops = {
478 .send_cmd = dwmci_send_cmd,
479 .set_ios = dwmci_set_ios,
480 };
481
482 #else
483 static const struct mmc_ops dwmci_ops = {
484 .send_cmd = dwmci_send_cmd,
485 .set_ios = dwmci_set_ios,
486 .init = dwmci_init,
487 };
488 #endif
489
490 void dwmci_setup_cfg(struct mmc_config *cfg, struct dwmci_host *host,
491 u32 max_clk, u32 min_clk)
492 {
493 cfg->name = host->name;
494 #ifndef CONFIG_DM_MMC
495 cfg->ops = &dwmci_ops;
496 #endif
497 cfg->f_min = min_clk;
498 cfg->f_max = max_clk;
499
500 cfg->voltages = MMC_VDD_32_33 | MMC_VDD_33_34 | MMC_VDD_165_195;
501
502 cfg->host_caps = host->caps;
503
504 if (host->buswidth == 8) {
505 cfg->host_caps |= MMC_MODE_8BIT;
506 cfg->host_caps &= ~MMC_MODE_4BIT;
507 } else {
508 cfg->host_caps |= MMC_MODE_4BIT;
509 cfg->host_caps &= ~MMC_MODE_8BIT;
510 }
511 cfg->host_caps |= MMC_MODE_HS | MMC_MODE_HS_52MHz;
512
513 cfg->b_max = CONFIG_SYS_MMC_MAX_BLK_COUNT;
514 }
515
516 #ifdef CONFIG_BLK
517 int dwmci_bind(struct udevice *dev, struct mmc *mmc, struct mmc_config *cfg)
518 {
519 return mmc_bind(dev, mmc, cfg);
520 }
521 #else
522 int add_dwmci(struct dwmci_host *host, u32 max_clk, u32 min_clk)
523 {
524 dwmci_setup_cfg(&host->cfg, host, max_clk, min_clk);
525
526 host->mmc = mmc_create(&host->cfg, host);
527 if (host->mmc == NULL)
528 return -1;
529
530 return 0;
531 }
532 #endif