]> git.ipfire.org Git - people/ms/u-boot.git/blob - drivers/mmc/dw_mmc.c
mmc: dw_mmc: fix the wrong AND operation
[people/ms/u-boot.git] / drivers / mmc / dw_mmc.c
1 /*
2 * (C) Copyright 2012 SAMSUNG Electronics
3 * Jaehoon Chung <jh80.chung@samsung.com>
4 * Rajeshawari Shinde <rajeshwari.s@samsung.com>
5 *
6 * SPDX-License-Identifier: GPL-2.0+
7 */
8
9 #include <bouncebuf.h>
10 #include <common.h>
11 #include <errno.h>
12 #include <malloc.h>
13 #include <memalign.h>
14 #include <mmc.h>
15 #include <dwmmc.h>
16 #include <asm-generic/errno.h>
17
18 #define PAGE_SIZE 4096
19
20 static int dwmci_wait_reset(struct dwmci_host *host, u32 value)
21 {
22 unsigned long timeout = 1000;
23 u32 ctrl;
24
25 dwmci_writel(host, DWMCI_CTRL, value);
26
27 while (timeout--) {
28 ctrl = dwmci_readl(host, DWMCI_CTRL);
29 if (!(ctrl & DWMCI_RESET_ALL))
30 return 1;
31 }
32 return 0;
33 }
34
35 static void dwmci_set_idma_desc(struct dwmci_idmac *idmac,
36 u32 desc0, u32 desc1, u32 desc2)
37 {
38 struct dwmci_idmac *desc = idmac;
39
40 desc->flags = desc0;
41 desc->cnt = desc1;
42 desc->addr = desc2;
43 desc->next_addr = (ulong)desc + sizeof(struct dwmci_idmac);
44 }
45
46 static void dwmci_prepare_data(struct dwmci_host *host,
47 struct mmc_data *data,
48 struct dwmci_idmac *cur_idmac,
49 void *bounce_buffer)
50 {
51 unsigned long ctrl;
52 unsigned int i = 0, flags, cnt, blk_cnt;
53 ulong data_start, data_end;
54
55
56 blk_cnt = data->blocks;
57
58 dwmci_wait_reset(host, DWMCI_CTRL_FIFO_RESET);
59
60 data_start = (ulong)cur_idmac;
61 dwmci_writel(host, DWMCI_DBADDR, (ulong)cur_idmac);
62
63 do {
64 flags = DWMCI_IDMAC_OWN | DWMCI_IDMAC_CH ;
65 flags |= (i == 0) ? DWMCI_IDMAC_FS : 0;
66 if (blk_cnt <= 8) {
67 flags |= DWMCI_IDMAC_LD;
68 cnt = data->blocksize * blk_cnt;
69 } else
70 cnt = data->blocksize * 8;
71
72 dwmci_set_idma_desc(cur_idmac, flags, cnt,
73 (ulong)bounce_buffer + (i * PAGE_SIZE));
74
75 if (blk_cnt <= 8)
76 break;
77 blk_cnt -= 8;
78 cur_idmac++;
79 i++;
80 } while(1);
81
82 data_end = (ulong)cur_idmac;
83 flush_dcache_range(data_start, data_end + ARCH_DMA_MINALIGN);
84
85 ctrl = dwmci_readl(host, DWMCI_CTRL);
86 ctrl |= DWMCI_IDMAC_EN | DWMCI_DMA_EN;
87 dwmci_writel(host, DWMCI_CTRL, ctrl);
88
89 ctrl = dwmci_readl(host, DWMCI_BMOD);
90 ctrl |= DWMCI_BMOD_IDMAC_FB | DWMCI_BMOD_IDMAC_EN;
91 dwmci_writel(host, DWMCI_BMOD, ctrl);
92
93 dwmci_writel(host, DWMCI_BLKSIZ, data->blocksize);
94 dwmci_writel(host, DWMCI_BYTCNT, data->blocksize * data->blocks);
95 }
96
97 static int dwmci_data_transfer(struct dwmci_host *host, struct mmc_data *data)
98 {
99 int ret = 0;
100 u32 timeout = 240000;
101 u32 mask, size, i, len = 0;
102 u32 *buf = NULL;
103 ulong start = get_timer(0);
104 u32 fifo_depth = (((host->fifoth_val & RX_WMARK_MASK) >>
105 RX_WMARK_SHIFT) + 1) * 2;
106
107 size = data->blocksize * data->blocks / 4;
108 if (data->flags == MMC_DATA_READ)
109 buf = (unsigned int *)data->dest;
110 else
111 buf = (unsigned int *)data->src;
112
113 for (;;) {
114 mask = dwmci_readl(host, DWMCI_RINTSTS);
115 /* Error during data transfer. */
116 if (mask & (DWMCI_DATA_ERR | DWMCI_DATA_TOUT)) {
117 debug("%s: DATA ERROR!\n", __func__);
118 ret = -EINVAL;
119 break;
120 }
121
122 if (host->fifo_mode && size) {
123 if (data->flags == MMC_DATA_READ) {
124 if ((dwmci_readl(host, DWMCI_RINTSTS) &
125 DWMCI_INTMSK_RXDR)) {
126 len = dwmci_readl(host, DWMCI_STATUS);
127 len = (len >> DWMCI_FIFO_SHIFT) &
128 DWMCI_FIFO_MASK;
129 for (i = 0; i < len; i++)
130 *buf++ =
131 dwmci_readl(host, DWMCI_DATA);
132 dwmci_writel(host, DWMCI_RINTSTS,
133 DWMCI_INTMSK_RXDR);
134 }
135 } else {
136 if ((dwmci_readl(host, DWMCI_RINTSTS) &
137 DWMCI_INTMSK_TXDR)) {
138 len = dwmci_readl(host, DWMCI_STATUS);
139 len = fifo_depth - ((len >>
140 DWMCI_FIFO_SHIFT) &
141 DWMCI_FIFO_MASK);
142 for (i = 0; i < len; i++)
143 dwmci_writel(host, DWMCI_DATA,
144 *buf++);
145 dwmci_writel(host, DWMCI_RINTSTS,
146 DWMCI_INTMSK_TXDR);
147 }
148 }
149 size = size > len ? (size - len) : 0;
150 }
151
152 /* Data arrived correctly. */
153 if (mask & DWMCI_INTMSK_DTO) {
154 ret = 0;
155 break;
156 }
157
158 /* Check for timeout. */
159 if (get_timer(start) > timeout) {
160 debug("%s: Timeout waiting for data!\n",
161 __func__);
162 ret = TIMEOUT;
163 break;
164 }
165 }
166
167 dwmci_writel(host, DWMCI_RINTSTS, mask);
168
169 return ret;
170 }
171
172 static int dwmci_set_transfer_mode(struct dwmci_host *host,
173 struct mmc_data *data)
174 {
175 unsigned long mode;
176
177 mode = DWMCI_CMD_DATA_EXP;
178 if (data->flags & MMC_DATA_WRITE)
179 mode |= DWMCI_CMD_RW;
180
181 return mode;
182 }
183
184 static int dwmci_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd,
185 struct mmc_data *data)
186 {
187 struct dwmci_host *host = mmc->priv;
188 ALLOC_CACHE_ALIGN_BUFFER(struct dwmci_idmac, cur_idmac,
189 data ? DIV_ROUND_UP(data->blocks, 8) : 0);
190 int ret = 0, flags = 0, i;
191 unsigned int timeout = 100000;
192 u32 retry = 100000;
193 u32 mask, ctrl;
194 ulong start = get_timer(0);
195 struct bounce_buffer bbstate;
196
197 while (dwmci_readl(host, DWMCI_STATUS) & DWMCI_BUSY) {
198 if (get_timer(start) > timeout) {
199 debug("%s: Timeout on data busy\n", __func__);
200 return TIMEOUT;
201 }
202 }
203
204 dwmci_writel(host, DWMCI_RINTSTS, DWMCI_INTMSK_ALL);
205
206 if (data) {
207 if (host->fifo_mode) {
208 dwmci_writel(host, DWMCI_BLKSIZ, data->blocksize);
209 dwmci_writel(host, DWMCI_BYTCNT,
210 data->blocksize * data->blocks);
211 dwmci_wait_reset(host, DWMCI_CTRL_FIFO_RESET);
212 } else {
213 if (data->flags == MMC_DATA_READ) {
214 bounce_buffer_start(&bbstate, (void*)data->dest,
215 data->blocksize *
216 data->blocks, GEN_BB_WRITE);
217 } else {
218 bounce_buffer_start(&bbstate, (void*)data->src,
219 data->blocksize *
220 data->blocks, GEN_BB_READ);
221 }
222 dwmci_prepare_data(host, data, cur_idmac,
223 bbstate.bounce_buffer);
224 }
225 }
226
227 dwmci_writel(host, DWMCI_CMDARG, cmd->cmdarg);
228
229 if (data)
230 flags = dwmci_set_transfer_mode(host, data);
231
232 if ((cmd->resp_type & MMC_RSP_136) && (cmd->resp_type & MMC_RSP_BUSY))
233 return -1;
234
235 if (cmd->cmdidx == MMC_CMD_STOP_TRANSMISSION)
236 flags |= DWMCI_CMD_ABORT_STOP;
237 else
238 flags |= DWMCI_CMD_PRV_DAT_WAIT;
239
240 if (cmd->resp_type & MMC_RSP_PRESENT) {
241 flags |= DWMCI_CMD_RESP_EXP;
242 if (cmd->resp_type & MMC_RSP_136)
243 flags |= DWMCI_CMD_RESP_LENGTH;
244 }
245
246 if (cmd->resp_type & MMC_RSP_CRC)
247 flags |= DWMCI_CMD_CHECK_CRC;
248
249 flags |= (cmd->cmdidx | DWMCI_CMD_START | DWMCI_CMD_USE_HOLD_REG);
250
251 debug("Sending CMD%d\n",cmd->cmdidx);
252
253 dwmci_writel(host, DWMCI_CMD, flags);
254
255 for (i = 0; i < retry; i++) {
256 mask = dwmci_readl(host, DWMCI_RINTSTS);
257 if (mask & DWMCI_INTMSK_CDONE) {
258 if (!data)
259 dwmci_writel(host, DWMCI_RINTSTS, mask);
260 break;
261 }
262 }
263
264 if (i == retry) {
265 debug("%s: Timeout.\n", __func__);
266 return TIMEOUT;
267 }
268
269 if (mask & DWMCI_INTMSK_RTO) {
270 /*
271 * Timeout here is not necessarily fatal. (e)MMC cards
272 * will splat here when they receive CMD55 as they do
273 * not support this command and that is exactly the way
274 * to tell them apart from SD cards. Thus, this output
275 * below shall be debug(). eMMC cards also do not favor
276 * CMD8, please keep that in mind.
277 */
278 debug("%s: Response Timeout.\n", __func__);
279 return TIMEOUT;
280 } else if (mask & DWMCI_INTMSK_RE) {
281 debug("%s: Response Error.\n", __func__);
282 return -EIO;
283 }
284
285
286 if (cmd->resp_type & MMC_RSP_PRESENT) {
287 if (cmd->resp_type & MMC_RSP_136) {
288 cmd->response[0] = dwmci_readl(host, DWMCI_RESP3);
289 cmd->response[1] = dwmci_readl(host, DWMCI_RESP2);
290 cmd->response[2] = dwmci_readl(host, DWMCI_RESP1);
291 cmd->response[3] = dwmci_readl(host, DWMCI_RESP0);
292 } else {
293 cmd->response[0] = dwmci_readl(host, DWMCI_RESP0);
294 }
295 }
296
297 if (data) {
298 ret = dwmci_data_transfer(host, data);
299
300 /* only dma mode need it */
301 if (!host->fifo_mode) {
302 ctrl = dwmci_readl(host, DWMCI_CTRL);
303 ctrl &= ~(DWMCI_DMA_EN);
304 dwmci_writel(host, DWMCI_CTRL, ctrl);
305 bounce_buffer_stop(&bbstate);
306 }
307 }
308
309 udelay(100);
310
311 return ret;
312 }
313
314 static int dwmci_setup_bus(struct dwmci_host *host, u32 freq)
315 {
316 u32 div, status;
317 int timeout = 10000;
318 unsigned long sclk;
319
320 if ((freq == host->clock) || (freq == 0))
321 return 0;
322 /*
323 * If host->get_mmc_clk isn't defined,
324 * then assume that host->bus_hz is source clock value.
325 * host->bus_hz should be set by user.
326 */
327 if (host->get_mmc_clk)
328 sclk = host->get_mmc_clk(host, freq);
329 else if (host->bus_hz)
330 sclk = host->bus_hz;
331 else {
332 debug("%s: Didn't get source clock value.\n", __func__);
333 return -EINVAL;
334 }
335
336 if (sclk == freq)
337 div = 0; /* bypass mode */
338 else
339 div = DIV_ROUND_UP(sclk, 2 * freq);
340
341 dwmci_writel(host, DWMCI_CLKENA, 0);
342 dwmci_writel(host, DWMCI_CLKSRC, 0);
343
344 dwmci_writel(host, DWMCI_CLKDIV, div);
345 dwmci_writel(host, DWMCI_CMD, DWMCI_CMD_PRV_DAT_WAIT |
346 DWMCI_CMD_UPD_CLK | DWMCI_CMD_START);
347
348 do {
349 status = dwmci_readl(host, DWMCI_CMD);
350 if (timeout-- < 0) {
351 debug("%s: Timeout!\n", __func__);
352 return -ETIMEDOUT;
353 }
354 } while (status & DWMCI_CMD_START);
355
356 dwmci_writel(host, DWMCI_CLKENA, DWMCI_CLKEN_ENABLE |
357 DWMCI_CLKEN_LOW_PWR);
358
359 dwmci_writel(host, DWMCI_CMD, DWMCI_CMD_PRV_DAT_WAIT |
360 DWMCI_CMD_UPD_CLK | DWMCI_CMD_START);
361
362 timeout = 10000;
363 do {
364 status = dwmci_readl(host, DWMCI_CMD);
365 if (timeout-- < 0) {
366 debug("%s: Timeout!\n", __func__);
367 return -ETIMEDOUT;
368 }
369 } while (status & DWMCI_CMD_START);
370
371 host->clock = freq;
372
373 return 0;
374 }
375
376 static void dwmci_set_ios(struct mmc *mmc)
377 {
378 struct dwmci_host *host = (struct dwmci_host *)mmc->priv;
379 u32 ctype, regs;
380
381 debug("Buswidth = %d, clock: %d\n", mmc->bus_width, mmc->clock);
382
383 dwmci_setup_bus(host, mmc->clock);
384 switch (mmc->bus_width) {
385 case 8:
386 ctype = DWMCI_CTYPE_8BIT;
387 break;
388 case 4:
389 ctype = DWMCI_CTYPE_4BIT;
390 break;
391 default:
392 ctype = DWMCI_CTYPE_1BIT;
393 break;
394 }
395
396 dwmci_writel(host, DWMCI_CTYPE, ctype);
397
398 regs = dwmci_readl(host, DWMCI_UHS_REG);
399 if (mmc->ddr_mode)
400 regs |= DWMCI_DDR_MODE;
401 else
402 regs &= ~DWMCI_DDR_MODE;
403
404 dwmci_writel(host, DWMCI_UHS_REG, regs);
405
406 if (host->clksel)
407 host->clksel(host);
408 }
409
410 static int dwmci_init(struct mmc *mmc)
411 {
412 struct dwmci_host *host = mmc->priv;
413
414 if (host->board_init)
415 host->board_init(host);
416
417 dwmci_writel(host, DWMCI_PWREN, 1);
418
419 if (!dwmci_wait_reset(host, DWMCI_RESET_ALL)) {
420 debug("%s[%d] Fail-reset!!\n", __func__, __LINE__);
421 return -EIO;
422 }
423
424 /* Enumerate at 400KHz */
425 dwmci_setup_bus(host, mmc->cfg->f_min);
426
427 dwmci_writel(host, DWMCI_RINTSTS, 0xFFFFFFFF);
428 dwmci_writel(host, DWMCI_INTMASK, 0);
429
430 dwmci_writel(host, DWMCI_TMOUT, 0xFFFFFFFF);
431
432 dwmci_writel(host, DWMCI_IDINTEN, 0);
433 dwmci_writel(host, DWMCI_BMOD, 1);
434
435 if (!host->fifoth_val) {
436 uint32_t fifo_size;
437
438 fifo_size = dwmci_readl(host, DWMCI_FIFOTH);
439 fifo_size = ((fifo_size & RX_WMARK_MASK) >> RX_WMARK_SHIFT) + 1;
440 host->fifoth_val = MSIZE(0x2) | RX_WMARK(fifo_size / 2 - 1) |
441 TX_WMARK(fifo_size / 2);
442 }
443 dwmci_writel(host, DWMCI_FIFOTH, host->fifoth_val);
444
445 dwmci_writel(host, DWMCI_CLKENA, 0);
446 dwmci_writel(host, DWMCI_CLKSRC, 0);
447
448 return 0;
449 }
450
451 static const struct mmc_ops dwmci_ops = {
452 .send_cmd = dwmci_send_cmd,
453 .set_ios = dwmci_set_ios,
454 .init = dwmci_init,
455 };
456
457 void dwmci_setup_cfg(struct mmc_config *cfg, const char *name, int buswidth,
458 uint caps, u32 max_clk, u32 min_clk)
459 {
460 cfg->name = name;
461 cfg->ops = &dwmci_ops;
462 cfg->f_min = min_clk;
463 cfg->f_max = max_clk;
464
465 cfg->voltages = MMC_VDD_32_33 | MMC_VDD_33_34 | MMC_VDD_165_195;
466
467 cfg->host_caps = caps;
468
469 if (buswidth == 8) {
470 cfg->host_caps |= MMC_MODE_8BIT;
471 cfg->host_caps &= ~MMC_MODE_4BIT;
472 } else {
473 cfg->host_caps |= MMC_MODE_4BIT;
474 cfg->host_caps &= ~MMC_MODE_8BIT;
475 }
476 cfg->host_caps |= MMC_MODE_HS | MMC_MODE_HS_52MHz;
477
478 cfg->b_max = CONFIG_SYS_MMC_MAX_BLK_COUNT;
479 }
480
481 #ifdef CONFIG_BLK
482 int dwmci_bind(struct udevice *dev, struct mmc *mmc, struct mmc_config *cfg)
483 {
484 return mmc_bind(dev, mmc, cfg);
485 }
486 #else
487 int add_dwmci(struct dwmci_host *host, u32 max_clk, u32 min_clk)
488 {
489 dwmci_setup_cfg(&host->cfg, host->name, host->buswidth, host->caps,
490 max_clk, min_clk);
491
492 host->mmc = mmc_create(&host->cfg, host);
493 if (host->mmc == NULL)
494 return -1;
495
496 return 0;
497 }
498 #endif