]>
Commit | Line | Data |
---|---|---|
83d290c5 | 1 | // SPDX-License-Identifier: GPL-2.0+ |
e94cad93 MV |
2 | /* |
3 | * Copyright (C) 2016 Socionext Inc. | |
4 | * Author: Masahiro Yamada <yamada.masahiro@socionext.com> | |
e94cad93 MV |
5 | */ |
6 | ||
7 | #include <common.h> | |
8 | #include <clk.h> | |
9 | #include <fdtdec.h> | |
10 | #include <mmc.h> | |
11 | #include <dm.h> | |
e10422f1 | 12 | #include <dm/pinctrl.h> |
e94cad93 MV |
13 | #include <linux/compat.h> |
14 | #include <linux/dma-direction.h> | |
15 | #include <linux/io.h> | |
16 | #include <linux/sizes.h> | |
17 | #include <power/regulator.h> | |
18 | #include <asm/unaligned.h> | |
19 | ||
cb0b6b03 | 20 | #include "tmio-common.h" |
e94cad93 MV |
21 | |
22 | DECLARE_GLOBAL_DATA_PTR; | |
23 | ||
cb0b6b03 | 24 | static u64 tmio_sd_readq(struct tmio_sd_priv *priv, unsigned int reg) |
e94cad93 | 25 | { |
620fd85c | 26 | return readq(priv->regbase + (reg << 1)); |
e94cad93 MV |
27 | } |
28 | ||
cb0b6b03 | 29 | static void tmio_sd_writeq(struct tmio_sd_priv *priv, |
e94cad93 MV |
30 | u64 val, unsigned int reg) |
31 | { | |
620fd85c | 32 | writeq(val, priv->regbase + (reg << 1)); |
e94cad93 MV |
33 | } |
34 | ||
cb0b6b03 | 35 | static u16 tmio_sd_readw(struct tmio_sd_priv *priv, unsigned int reg) |
db1266d6 MV |
36 | { |
37 | return readw(priv->regbase + (reg >> 1)); | |
38 | } | |
39 | ||
cb0b6b03 | 40 | static void tmio_sd_writew(struct tmio_sd_priv *priv, |
db1266d6 MV |
41 | u16 val, unsigned int reg) |
42 | { | |
43 | writew(val, priv->regbase + (reg >> 1)); | |
44 | } | |
45 | ||
cb0b6b03 | 46 | u32 tmio_sd_readl(struct tmio_sd_priv *priv, unsigned int reg) |
e94cad93 | 47 | { |
db1266d6 MV |
48 | u32 val; |
49 | ||
cb0b6b03 | 50 | if (priv->caps & TMIO_SD_CAP_64BIT) |
e94cad93 | 51 | return readl(priv->regbase + (reg << 1)); |
cb0b6b03 | 52 | else if (priv->caps & TMIO_SD_CAP_16BIT) { |
db1266d6 | 53 | val = readw(priv->regbase + (reg >> 1)) & 0xffff; |
cb0b6b03 MV |
54 | if ((reg == TMIO_SD_RSP10) || (reg == TMIO_SD_RSP32) || |
55 | (reg == TMIO_SD_RSP54) || (reg == TMIO_SD_RSP76)) { | |
db1266d6 MV |
56 | val |= readw(priv->regbase + (reg >> 1) + 2) << 16; |
57 | } | |
58 | return val; | |
59 | } else | |
e94cad93 MV |
60 | return readl(priv->regbase + reg); |
61 | } | |
62 | ||
cb0b6b03 | 63 | void tmio_sd_writel(struct tmio_sd_priv *priv, |
e94cad93 MV |
64 | u32 val, unsigned int reg) |
65 | { | |
cb0b6b03 | 66 | if (priv->caps & TMIO_SD_CAP_64BIT) |
e94cad93 | 67 | writel(val, priv->regbase + (reg << 1)); |
cb0b6b03 | 68 | else if (priv->caps & TMIO_SD_CAP_16BIT) { |
db1266d6 | 69 | writew(val & 0xffff, priv->regbase + (reg >> 1)); |
cb0b6b03 MV |
70 | if (reg == TMIO_SD_INFO1 || reg == TMIO_SD_INFO1_MASK || |
71 | reg == TMIO_SD_INFO2 || reg == TMIO_SD_INFO2_MASK || | |
72 | reg == TMIO_SD_ARG) | |
db1266d6 MV |
73 | writew(val >> 16, priv->regbase + (reg >> 1) + 2); |
74 | } else | |
e94cad93 MV |
75 | writel(val, priv->regbase + reg); |
76 | } | |
77 | ||
78 | static dma_addr_t __dma_map_single(void *ptr, size_t size, | |
79 | enum dma_data_direction dir) | |
80 | { | |
81 | unsigned long addr = (unsigned long)ptr; | |
82 | ||
83 | if (dir == DMA_FROM_DEVICE) | |
84 | invalidate_dcache_range(addr, addr + size); | |
85 | else | |
86 | flush_dcache_range(addr, addr + size); | |
87 | ||
88 | return addr; | |
89 | } | |
90 | ||
91 | static void __dma_unmap_single(dma_addr_t addr, size_t size, | |
92 | enum dma_data_direction dir) | |
93 | { | |
94 | if (dir != DMA_TO_DEVICE) | |
95 | invalidate_dcache_range(addr, addr + size); | |
96 | } | |
97 | ||
cb0b6b03 | 98 | static int tmio_sd_check_error(struct udevice *dev) |
e94cad93 | 99 | { |
cb0b6b03 MV |
100 | struct tmio_sd_priv *priv = dev_get_priv(dev); |
101 | u32 info2 = tmio_sd_readl(priv, TMIO_SD_INFO2); | |
e94cad93 | 102 | |
cb0b6b03 | 103 | if (info2 & TMIO_SD_INFO2_ERR_RTO) { |
e94cad93 MV |
104 | /* |
105 | * TIMEOUT must be returned for unsupported command. Do not | |
106 | * display error log since this might be a part of sequence to | |
107 | * distinguish between SD and MMC. | |
108 | */ | |
109 | return -ETIMEDOUT; | |
110 | } | |
111 | ||
cb0b6b03 | 112 | if (info2 & TMIO_SD_INFO2_ERR_TO) { |
e94cad93 MV |
113 | dev_err(dev, "timeout error\n"); |
114 | return -ETIMEDOUT; | |
115 | } | |
116 | ||
cb0b6b03 MV |
117 | if (info2 & (TMIO_SD_INFO2_ERR_END | TMIO_SD_INFO2_ERR_CRC | |
118 | TMIO_SD_INFO2_ERR_IDX)) { | |
e94cad93 MV |
119 | dev_err(dev, "communication out of sync\n"); |
120 | return -EILSEQ; | |
121 | } | |
122 | ||
cb0b6b03 MV |
123 | if (info2 & (TMIO_SD_INFO2_ERR_ILA | TMIO_SD_INFO2_ERR_ILR | |
124 | TMIO_SD_INFO2_ERR_ILW)) { | |
e94cad93 MV |
125 | dev_err(dev, "illegal access\n"); |
126 | return -EIO; | |
127 | } | |
128 | ||
129 | return 0; | |
130 | } | |
131 | ||
cb0b6b03 | 132 | static int tmio_sd_wait_for_irq(struct udevice *dev, unsigned int reg, |
e94cad93 MV |
133 | u32 flag) |
134 | { | |
cb0b6b03 | 135 | struct tmio_sd_priv *priv = dev_get_priv(dev); |
e94cad93 MV |
136 | long wait = 1000000; |
137 | int ret; | |
138 | ||
cb0b6b03 | 139 | while (!(tmio_sd_readl(priv, reg) & flag)) { |
e94cad93 MV |
140 | if (wait-- < 0) { |
141 | dev_err(dev, "timeout\n"); | |
142 | return -ETIMEDOUT; | |
143 | } | |
144 | ||
cb0b6b03 | 145 | ret = tmio_sd_check_error(dev); |
e94cad93 MV |
146 | if (ret) |
147 | return ret; | |
148 | ||
149 | udelay(1); | |
150 | } | |
151 | ||
152 | return 0; | |
153 | } | |
154 | ||
cb0b6b03 MV |
155 | #define tmio_pio_read_fifo(__width, __suffix) \ |
156 | static void tmio_pio_read_fifo_##__width(struct tmio_sd_priv *priv, \ | |
12a510e2 MV |
157 | char *pbuf, uint blksz) \ |
158 | { \ | |
159 | u##__width *buf = (u##__width *)pbuf; \ | |
160 | int i; \ | |
161 | \ | |
162 | if (likely(IS_ALIGNED((uintptr_t)buf, ((__width) / 8)))) { \ | |
163 | for (i = 0; i < blksz / ((__width) / 8); i++) { \ | |
cb0b6b03 MV |
164 | *buf++ = tmio_sd_read##__suffix(priv, \ |
165 | TMIO_SD_BUF); \ | |
12a510e2 MV |
166 | } \ |
167 | } else { \ | |
168 | for (i = 0; i < blksz / ((__width) / 8); i++) { \ | |
169 | u##__width data; \ | |
cb0b6b03 MV |
170 | data = tmio_sd_read##__suffix(priv, \ |
171 | TMIO_SD_BUF); \ | |
12a510e2 MV |
172 | put_unaligned(data, buf++); \ |
173 | } \ | |
174 | } \ | |
175 | } | |
176 | ||
cb0b6b03 MV |
177 | tmio_pio_read_fifo(64, q) |
178 | tmio_pio_read_fifo(32, l) | |
179 | tmio_pio_read_fifo(16, w) | |
12a510e2 | 180 | |
cb0b6b03 | 181 | static int tmio_sd_pio_read_one_block(struct udevice *dev, char *pbuf, |
e94cad93 MV |
182 | uint blocksize) |
183 | { | |
cb0b6b03 | 184 | struct tmio_sd_priv *priv = dev_get_priv(dev); |
12a510e2 | 185 | int ret; |
e94cad93 MV |
186 | |
187 | /* wait until the buffer is filled with data */ | |
cb0b6b03 MV |
188 | ret = tmio_sd_wait_for_irq(dev, TMIO_SD_INFO2, |
189 | TMIO_SD_INFO2_BRE); | |
e94cad93 MV |
190 | if (ret) |
191 | return ret; | |
192 | ||
193 | /* | |
194 | * Clear the status flag _before_ read the buffer out because | |
cb0b6b03 | 195 | * TMIO_SD_INFO2_BRE is edge-triggered, not level-triggered. |
e94cad93 | 196 | */ |
cb0b6b03 | 197 | tmio_sd_writel(priv, 0, TMIO_SD_INFO2); |
e94cad93 | 198 | |
cb0b6b03 MV |
199 | if (priv->caps & TMIO_SD_CAP_64BIT) |
200 | tmio_pio_read_fifo_64(priv, pbuf, blocksize); | |
201 | else if (priv->caps & TMIO_SD_CAP_16BIT) | |
202 | tmio_pio_read_fifo_16(priv, pbuf, blocksize); | |
12a510e2 | 203 | else |
cb0b6b03 | 204 | tmio_pio_read_fifo_32(priv, pbuf, blocksize); |
e94cad93 MV |
205 | |
206 | return 0; | |
207 | } | |
208 | ||
cb0b6b03 MV |
209 | #define tmio_pio_write_fifo(__width, __suffix) \ |
210 | static void tmio_pio_write_fifo_##__width(struct tmio_sd_priv *priv, \ | |
12a510e2 MV |
211 | const char *pbuf, uint blksz)\ |
212 | { \ | |
213 | const u##__width *buf = (const u##__width *)pbuf; \ | |
214 | int i; \ | |
215 | \ | |
216 | if (likely(IS_ALIGNED((uintptr_t)buf, ((__width) / 8)))) { \ | |
217 | for (i = 0; i < blksz / ((__width) / 8); i++) { \ | |
cb0b6b03 MV |
218 | tmio_sd_write##__suffix(priv, *buf++, \ |
219 | TMIO_SD_BUF); \ | |
12a510e2 MV |
220 | } \ |
221 | } else { \ | |
222 | for (i = 0; i < blksz / ((__width) / 8); i++) { \ | |
223 | u##__width data = get_unaligned(buf++); \ | |
cb0b6b03 MV |
224 | tmio_sd_write##__suffix(priv, data, \ |
225 | TMIO_SD_BUF); \ | |
12a510e2 MV |
226 | } \ |
227 | } \ | |
228 | } | |
229 | ||
cb0b6b03 MV |
230 | tmio_pio_write_fifo(64, q) |
231 | tmio_pio_write_fifo(32, l) | |
232 | tmio_pio_write_fifo(16, w) | |
12a510e2 | 233 | |
cb0b6b03 | 234 | static int tmio_sd_pio_write_one_block(struct udevice *dev, |
e94cad93 MV |
235 | const char *pbuf, uint blocksize) |
236 | { | |
cb0b6b03 | 237 | struct tmio_sd_priv *priv = dev_get_priv(dev); |
12a510e2 | 238 | int ret; |
e94cad93 MV |
239 | |
240 | /* wait until the buffer becomes empty */ | |
cb0b6b03 MV |
241 | ret = tmio_sd_wait_for_irq(dev, TMIO_SD_INFO2, |
242 | TMIO_SD_INFO2_BWE); | |
e94cad93 MV |
243 | if (ret) |
244 | return ret; | |
245 | ||
cb0b6b03 | 246 | tmio_sd_writel(priv, 0, TMIO_SD_INFO2); |
e94cad93 | 247 | |
cb0b6b03 MV |
248 | if (priv->caps & TMIO_SD_CAP_64BIT) |
249 | tmio_pio_write_fifo_64(priv, pbuf, blocksize); | |
250 | else if (priv->caps & TMIO_SD_CAP_16BIT) | |
251 | tmio_pio_write_fifo_16(priv, pbuf, blocksize); | |
12a510e2 | 252 | else |
cb0b6b03 | 253 | tmio_pio_write_fifo_32(priv, pbuf, blocksize); |
e94cad93 MV |
254 | |
255 | return 0; | |
256 | } | |
257 | ||
cb0b6b03 | 258 | static int tmio_sd_pio_xfer(struct udevice *dev, struct mmc_data *data) |
e94cad93 MV |
259 | { |
260 | const char *src = data->src; | |
261 | char *dest = data->dest; | |
262 | int i, ret; | |
263 | ||
264 | for (i = 0; i < data->blocks; i++) { | |
265 | if (data->flags & MMC_DATA_READ) | |
cb0b6b03 | 266 | ret = tmio_sd_pio_read_one_block(dev, dest, |
e94cad93 MV |
267 | data->blocksize); |
268 | else | |
cb0b6b03 | 269 | ret = tmio_sd_pio_write_one_block(dev, src, |
e94cad93 MV |
270 | data->blocksize); |
271 | if (ret) | |
272 | return ret; | |
273 | ||
274 | if (data->flags & MMC_DATA_READ) | |
275 | dest += data->blocksize; | |
276 | else | |
277 | src += data->blocksize; | |
278 | } | |
279 | ||
280 | return 0; | |
281 | } | |
282 | ||
cb0b6b03 | 283 | static void tmio_sd_dma_start(struct tmio_sd_priv *priv, |
e94cad93 MV |
284 | dma_addr_t dma_addr) |
285 | { | |
286 | u32 tmp; | |
287 | ||
cb0b6b03 MV |
288 | tmio_sd_writel(priv, 0, TMIO_SD_DMA_INFO1); |
289 | tmio_sd_writel(priv, 0, TMIO_SD_DMA_INFO2); | |
e94cad93 MV |
290 | |
291 | /* enable DMA */ | |
cb0b6b03 MV |
292 | tmp = tmio_sd_readl(priv, TMIO_SD_EXTMODE); |
293 | tmp |= TMIO_SD_EXTMODE_DMA_EN; | |
294 | tmio_sd_writel(priv, tmp, TMIO_SD_EXTMODE); | |
e94cad93 | 295 | |
cb0b6b03 | 296 | tmio_sd_writel(priv, dma_addr & U32_MAX, TMIO_SD_DMA_ADDR_L); |
e94cad93 MV |
297 | |
298 | /* suppress the warning "right shift count >= width of type" */ | |
299 | dma_addr >>= min_t(int, 32, 8 * sizeof(dma_addr)); | |
300 | ||
cb0b6b03 | 301 | tmio_sd_writel(priv, dma_addr & U32_MAX, TMIO_SD_DMA_ADDR_H); |
e94cad93 | 302 | |
cb0b6b03 | 303 | tmio_sd_writel(priv, TMIO_SD_DMA_CTL_START, TMIO_SD_DMA_CTL); |
e94cad93 MV |
304 | } |
305 | ||
cb0b6b03 | 306 | static int tmio_sd_dma_wait_for_irq(struct udevice *dev, u32 flag, |
e94cad93 MV |
307 | unsigned int blocks) |
308 | { | |
cb0b6b03 | 309 | struct tmio_sd_priv *priv = dev_get_priv(dev); |
e94cad93 MV |
310 | long wait = 1000000 + 10 * blocks; |
311 | ||
cb0b6b03 | 312 | while (!(tmio_sd_readl(priv, TMIO_SD_DMA_INFO1) & flag)) { |
e94cad93 MV |
313 | if (wait-- < 0) { |
314 | dev_err(dev, "timeout during DMA\n"); | |
315 | return -ETIMEDOUT; | |
316 | } | |
317 | ||
318 | udelay(10); | |
319 | } | |
320 | ||
cb0b6b03 | 321 | if (tmio_sd_readl(priv, TMIO_SD_DMA_INFO2)) { |
e94cad93 MV |
322 | dev_err(dev, "error during DMA\n"); |
323 | return -EIO; | |
324 | } | |
325 | ||
326 | return 0; | |
327 | } | |
328 | ||
cb0b6b03 | 329 | static int tmio_sd_dma_xfer(struct udevice *dev, struct mmc_data *data) |
e94cad93 | 330 | { |
cb0b6b03 | 331 | struct tmio_sd_priv *priv = dev_get_priv(dev); |
e94cad93 MV |
332 | size_t len = data->blocks * data->blocksize; |
333 | void *buf; | |
334 | enum dma_data_direction dir; | |
335 | dma_addr_t dma_addr; | |
336 | u32 poll_flag, tmp; | |
337 | int ret; | |
338 | ||
cb0b6b03 | 339 | tmp = tmio_sd_readl(priv, TMIO_SD_DMA_MODE); |
e94cad93 MV |
340 | |
341 | if (data->flags & MMC_DATA_READ) { | |
342 | buf = data->dest; | |
343 | dir = DMA_FROM_DEVICE; | |
635ae6fe MV |
344 | /* |
345 | * The DMA READ completion flag position differs on Socionext | |
346 | * and Renesas SoCs. It is bit 20 on Socionext SoCs and using | |
347 | * bit 17 is a hardware bug and forbidden. It is bit 17 on | |
348 | * Renesas SoCs and bit 20 does not work on them. | |
349 | */ | |
cb0b6b03 MV |
350 | poll_flag = (priv->caps & TMIO_SD_CAP_RCAR) ? |
351 | TMIO_SD_DMA_INFO1_END_RD : | |
352 | TMIO_SD_DMA_INFO1_END_RD2; | |
353 | tmp |= TMIO_SD_DMA_MODE_DIR_RD; | |
e94cad93 MV |
354 | } else { |
355 | buf = (void *)data->src; | |
356 | dir = DMA_TO_DEVICE; | |
cb0b6b03 MV |
357 | poll_flag = TMIO_SD_DMA_INFO1_END_WR; |
358 | tmp &= ~TMIO_SD_DMA_MODE_DIR_RD; | |
e94cad93 MV |
359 | } |
360 | ||
cb0b6b03 | 361 | tmio_sd_writel(priv, tmp, TMIO_SD_DMA_MODE); |
e94cad93 MV |
362 | |
363 | dma_addr = __dma_map_single(buf, len, dir); | |
364 | ||
cb0b6b03 | 365 | tmio_sd_dma_start(priv, dma_addr); |
e94cad93 | 366 | |
cb0b6b03 | 367 | ret = tmio_sd_dma_wait_for_irq(dev, poll_flag, data->blocks); |
e94cad93 MV |
368 | |
369 | __dma_unmap_single(dma_addr, len, dir); | |
370 | ||
371 | return ret; | |
372 | } | |
373 | ||
374 | /* check if the address is DMA'able */ | |
cb0b6b03 | 375 | static bool tmio_sd_addr_is_dmaable(unsigned long addr) |
e94cad93 | 376 | { |
cb0b6b03 | 377 | if (!IS_ALIGNED(addr, TMIO_SD_DMA_MINALIGN)) |
e94cad93 MV |
378 | return false; |
379 | ||
380 | #if defined(CONFIG_ARCH_UNIPHIER) && !defined(CONFIG_ARM64) && \ | |
381 | defined(CONFIG_SPL_BUILD) | |
382 | /* | |
383 | * For UniPhier ARMv7 SoCs, the stack is allocated in the locked ways | |
384 | * of L2, which is unreachable from the DMA engine. | |
385 | */ | |
386 | if (addr < CONFIG_SPL_STACK) | |
387 | return false; | |
388 | #endif | |
389 | ||
390 | return true; | |
391 | } | |
392 | ||
cb0b6b03 | 393 | int tmio_sd_send_cmd(struct udevice *dev, struct mmc_cmd *cmd, |
e94cad93 MV |
394 | struct mmc_data *data) |
395 | { | |
cb0b6b03 | 396 | struct tmio_sd_priv *priv = dev_get_priv(dev); |
e94cad93 MV |
397 | int ret; |
398 | u32 tmp; | |
399 | ||
cb0b6b03 | 400 | if (tmio_sd_readl(priv, TMIO_SD_INFO2) & TMIO_SD_INFO2_CBSY) { |
e94cad93 MV |
401 | dev_err(dev, "command busy\n"); |
402 | return -EBUSY; | |
403 | } | |
404 | ||
405 | /* clear all status flags */ | |
cb0b6b03 MV |
406 | tmio_sd_writel(priv, 0, TMIO_SD_INFO1); |
407 | tmio_sd_writel(priv, 0, TMIO_SD_INFO2); | |
e94cad93 MV |
408 | |
409 | /* disable DMA once */ | |
cb0b6b03 MV |
410 | tmp = tmio_sd_readl(priv, TMIO_SD_EXTMODE); |
411 | tmp &= ~TMIO_SD_EXTMODE_DMA_EN; | |
412 | tmio_sd_writel(priv, tmp, TMIO_SD_EXTMODE); | |
e94cad93 | 413 | |
cb0b6b03 | 414 | tmio_sd_writel(priv, cmd->cmdarg, TMIO_SD_ARG); |
e94cad93 MV |
415 | |
416 | tmp = cmd->cmdidx; | |
417 | ||
418 | if (data) { | |
cb0b6b03 MV |
419 | tmio_sd_writel(priv, data->blocksize, TMIO_SD_SIZE); |
420 | tmio_sd_writel(priv, data->blocks, TMIO_SD_SECCNT); | |
e94cad93 MV |
421 | |
422 | /* Do not send CMD12 automatically */ | |
cb0b6b03 | 423 | tmp |= TMIO_SD_CMD_NOSTOP | TMIO_SD_CMD_DATA; |
e94cad93 MV |
424 | |
425 | if (data->blocks > 1) | |
cb0b6b03 | 426 | tmp |= TMIO_SD_CMD_MULTI; |
e94cad93 MV |
427 | |
428 | if (data->flags & MMC_DATA_READ) | |
cb0b6b03 | 429 | tmp |= TMIO_SD_CMD_RD; |
e94cad93 MV |
430 | } |
431 | ||
432 | /* | |
433 | * Do not use the response type auto-detection on this hardware. | |
434 | * CMD8, for example, has different response types on SD and eMMC, | |
435 | * while this controller always assumes the response type for SD. | |
436 | * Set the response type manually. | |
437 | */ | |
438 | switch (cmd->resp_type) { | |
439 | case MMC_RSP_NONE: | |
cb0b6b03 | 440 | tmp |= TMIO_SD_CMD_RSP_NONE; |
e94cad93 MV |
441 | break; |
442 | case MMC_RSP_R1: | |
cb0b6b03 | 443 | tmp |= TMIO_SD_CMD_RSP_R1; |
e94cad93 MV |
444 | break; |
445 | case MMC_RSP_R1b: | |
cb0b6b03 | 446 | tmp |= TMIO_SD_CMD_RSP_R1B; |
e94cad93 MV |
447 | break; |
448 | case MMC_RSP_R2: | |
cb0b6b03 | 449 | tmp |= TMIO_SD_CMD_RSP_R2; |
e94cad93 MV |
450 | break; |
451 | case MMC_RSP_R3: | |
cb0b6b03 | 452 | tmp |= TMIO_SD_CMD_RSP_R3; |
e94cad93 MV |
453 | break; |
454 | default: | |
455 | dev_err(dev, "unknown response type\n"); | |
456 | return -EINVAL; | |
457 | } | |
458 | ||
459 | dev_dbg(dev, "sending CMD%d (SD_CMD=%08x, SD_ARG=%08x)\n", | |
460 | cmd->cmdidx, tmp, cmd->cmdarg); | |
cb0b6b03 | 461 | tmio_sd_writel(priv, tmp, TMIO_SD_CMD); |
e94cad93 | 462 | |
cb0b6b03 MV |
463 | ret = tmio_sd_wait_for_irq(dev, TMIO_SD_INFO1, |
464 | TMIO_SD_INFO1_RSP); | |
e94cad93 MV |
465 | if (ret) |
466 | return ret; | |
467 | ||
468 | if (cmd->resp_type & MMC_RSP_136) { | |
cb0b6b03 MV |
469 | u32 rsp_127_104 = tmio_sd_readl(priv, TMIO_SD_RSP76); |
470 | u32 rsp_103_72 = tmio_sd_readl(priv, TMIO_SD_RSP54); | |
471 | u32 rsp_71_40 = tmio_sd_readl(priv, TMIO_SD_RSP32); | |
472 | u32 rsp_39_8 = tmio_sd_readl(priv, TMIO_SD_RSP10); | |
e94cad93 MV |
473 | |
474 | cmd->response[0] = ((rsp_127_104 & 0x00ffffff) << 8) | | |
475 | ((rsp_103_72 & 0xff000000) >> 24); | |
476 | cmd->response[1] = ((rsp_103_72 & 0x00ffffff) << 8) | | |
477 | ((rsp_71_40 & 0xff000000) >> 24); | |
478 | cmd->response[2] = ((rsp_71_40 & 0x00ffffff) << 8) | | |
479 | ((rsp_39_8 & 0xff000000) >> 24); | |
480 | cmd->response[3] = (rsp_39_8 & 0xffffff) << 8; | |
481 | } else { | |
482 | /* bit 39-8 */ | |
cb0b6b03 | 483 | cmd->response[0] = tmio_sd_readl(priv, TMIO_SD_RSP10); |
e94cad93 MV |
484 | } |
485 | ||
486 | if (data) { | |
487 | /* use DMA if the HW supports it and the buffer is aligned */ | |
cb0b6b03 MV |
488 | if (priv->caps & TMIO_SD_CAP_DMA_INTERNAL && |
489 | tmio_sd_addr_is_dmaable((long)data->src)) | |
490 | ret = tmio_sd_dma_xfer(dev, data); | |
e94cad93 | 491 | else |
cb0b6b03 | 492 | ret = tmio_sd_pio_xfer(dev, data); |
e94cad93 | 493 | |
cb0b6b03 MV |
494 | ret = tmio_sd_wait_for_irq(dev, TMIO_SD_INFO1, |
495 | TMIO_SD_INFO1_CMP); | |
e94cad93 MV |
496 | if (ret) |
497 | return ret; | |
498 | } | |
499 | ||
cb0b6b03 | 500 | tmio_sd_wait_for_irq(dev, TMIO_SD_INFO2, TMIO_SD_INFO2_SCLKDIVEN); |
f23b208e | 501 | |
e94cad93 MV |
502 | return ret; |
503 | } | |
504 | ||
cb0b6b03 | 505 | static int tmio_sd_set_bus_width(struct tmio_sd_priv *priv, |
e94cad93 MV |
506 | struct mmc *mmc) |
507 | { | |
508 | u32 val, tmp; | |
509 | ||
510 | switch (mmc->bus_width) { | |
a7b7401c | 511 | case 0: |
e94cad93 | 512 | case 1: |
cb0b6b03 | 513 | val = TMIO_SD_OPTION_WIDTH_1; |
e94cad93 MV |
514 | break; |
515 | case 4: | |
cb0b6b03 | 516 | val = TMIO_SD_OPTION_WIDTH_4; |
e94cad93 MV |
517 | break; |
518 | case 8: | |
cb0b6b03 | 519 | val = TMIO_SD_OPTION_WIDTH_8; |
e94cad93 MV |
520 | break; |
521 | default: | |
522 | return -EINVAL; | |
523 | } | |
524 | ||
cb0b6b03 MV |
525 | tmp = tmio_sd_readl(priv, TMIO_SD_OPTION); |
526 | tmp &= ~TMIO_SD_OPTION_WIDTH_MASK; | |
e94cad93 | 527 | tmp |= val; |
cb0b6b03 | 528 | tmio_sd_writel(priv, tmp, TMIO_SD_OPTION); |
e94cad93 MV |
529 | |
530 | return 0; | |
531 | } | |
532 | ||
cb0b6b03 | 533 | static void tmio_sd_set_ddr_mode(struct tmio_sd_priv *priv, |
e94cad93 MV |
534 | struct mmc *mmc) |
535 | { | |
536 | u32 tmp; | |
537 | ||
cb0b6b03 | 538 | tmp = tmio_sd_readl(priv, TMIO_SD_IF_MODE); |
e94cad93 | 539 | if (mmc->ddr_mode) |
cb0b6b03 | 540 | tmp |= TMIO_SD_IF_MODE_DDR; |
e94cad93 | 541 | else |
cb0b6b03 MV |
542 | tmp &= ~TMIO_SD_IF_MODE_DDR; |
543 | tmio_sd_writel(priv, tmp, TMIO_SD_IF_MODE); | |
e94cad93 MV |
544 | } |
545 | ||
cb0b6b03 | 546 | static void tmio_sd_set_clk_rate(struct tmio_sd_priv *priv, |
e94cad93 MV |
547 | struct mmc *mmc) |
548 | { | |
549 | unsigned int divisor; | |
550 | u32 val, tmp; | |
551 | ||
552 | if (!mmc->clock) | |
553 | return; | |
554 | ||
555 | divisor = DIV_ROUND_UP(priv->mclk, mmc->clock); | |
556 | ||
557 | if (divisor <= 1) | |
cb0b6b03 MV |
558 | val = (priv->caps & TMIO_SD_CAP_RCAR) ? |
559 | TMIO_SD_CLKCTL_RCAR_DIV1 : TMIO_SD_CLKCTL_DIV1; | |
e94cad93 | 560 | else if (divisor <= 2) |
cb0b6b03 | 561 | val = TMIO_SD_CLKCTL_DIV2; |
e94cad93 | 562 | else if (divisor <= 4) |
cb0b6b03 | 563 | val = TMIO_SD_CLKCTL_DIV4; |
e94cad93 | 564 | else if (divisor <= 8) |
cb0b6b03 | 565 | val = TMIO_SD_CLKCTL_DIV8; |
e94cad93 | 566 | else if (divisor <= 16) |
cb0b6b03 | 567 | val = TMIO_SD_CLKCTL_DIV16; |
e94cad93 | 568 | else if (divisor <= 32) |
cb0b6b03 | 569 | val = TMIO_SD_CLKCTL_DIV32; |
e94cad93 | 570 | else if (divisor <= 64) |
cb0b6b03 | 571 | val = TMIO_SD_CLKCTL_DIV64; |
e94cad93 | 572 | else if (divisor <= 128) |
cb0b6b03 | 573 | val = TMIO_SD_CLKCTL_DIV128; |
e94cad93 | 574 | else if (divisor <= 256) |
cb0b6b03 MV |
575 | val = TMIO_SD_CLKCTL_DIV256; |
576 | else if (divisor <= 512 || !(priv->caps & TMIO_SD_CAP_DIV1024)) | |
577 | val = TMIO_SD_CLKCTL_DIV512; | |
e94cad93 | 578 | else |
cb0b6b03 | 579 | val = TMIO_SD_CLKCTL_DIV1024; |
e94cad93 | 580 | |
cb0b6b03 MV |
581 | tmp = tmio_sd_readl(priv, TMIO_SD_CLKCTL); |
582 | if (tmp & TMIO_SD_CLKCTL_SCLKEN && | |
583 | (tmp & TMIO_SD_CLKCTL_DIV_MASK) == val) | |
e94cad93 MV |
584 | return; |
585 | ||
586 | /* stop the clock before changing its rate to avoid a glitch signal */ | |
cb0b6b03 MV |
587 | tmp &= ~TMIO_SD_CLKCTL_SCLKEN; |
588 | tmio_sd_writel(priv, tmp, TMIO_SD_CLKCTL); | |
e94cad93 | 589 | |
cb0b6b03 MV |
590 | tmp &= ~TMIO_SD_CLKCTL_DIV_MASK; |
591 | tmp |= val | TMIO_SD_CLKCTL_OFFEN; | |
592 | tmio_sd_writel(priv, tmp, TMIO_SD_CLKCTL); | |
e94cad93 | 593 | |
cb0b6b03 MV |
594 | tmp |= TMIO_SD_CLKCTL_SCLKEN; |
595 | tmio_sd_writel(priv, tmp, TMIO_SD_CLKCTL); | |
e94cad93 MV |
596 | |
597 | udelay(1000); | |
598 | } | |
599 | ||
cb0b6b03 | 600 | static void tmio_sd_set_pins(struct udevice *dev) |
e10422f1 MV |
601 | { |
602 | __maybe_unused struct mmc *mmc = mmc_get_mmc_dev(dev); | |
603 | ||
604 | #ifdef CONFIG_DM_REGULATOR | |
cb0b6b03 | 605 | struct tmio_sd_priv *priv = dev_get_priv(dev); |
e10422f1 MV |
606 | |
607 | if (priv->vqmmc_dev) { | |
608 | if (mmc->signal_voltage == MMC_SIGNAL_VOLTAGE_180) | |
609 | regulator_set_value(priv->vqmmc_dev, 1800000); | |
610 | else | |
611 | regulator_set_value(priv->vqmmc_dev, 3300000); | |
612 | regulator_set_enable(priv->vqmmc_dev, true); | |
613 | } | |
614 | #endif | |
615 | ||
616 | #ifdef CONFIG_PINCTRL | |
617 | switch (mmc->selected_mode) { | |
618 | case MMC_LEGACY: | |
619 | case SD_LEGACY: | |
620 | case MMC_HS: | |
621 | case SD_HS: | |
622 | case MMC_HS_52: | |
623 | case MMC_DDR_52: | |
624 | pinctrl_select_state(dev, "default"); | |
625 | break; | |
626 | case UHS_SDR12: | |
627 | case UHS_SDR25: | |
628 | case UHS_SDR50: | |
629 | case UHS_DDR50: | |
630 | case UHS_SDR104: | |
631 | case MMC_HS_200: | |
632 | pinctrl_select_state(dev, "state_uhs"); | |
633 | break; | |
634 | default: | |
635 | break; | |
636 | } | |
637 | #endif | |
638 | } | |
639 | ||
cb0b6b03 | 640 | int tmio_sd_set_ios(struct udevice *dev) |
e94cad93 | 641 | { |
cb0b6b03 | 642 | struct tmio_sd_priv *priv = dev_get_priv(dev); |
e94cad93 MV |
643 | struct mmc *mmc = mmc_get_mmc_dev(dev); |
644 | int ret; | |
645 | ||
646 | dev_dbg(dev, "clock %uHz, DDRmode %d, width %u\n", | |
647 | mmc->clock, mmc->ddr_mode, mmc->bus_width); | |
648 | ||
cb0b6b03 | 649 | ret = tmio_sd_set_bus_width(priv, mmc); |
e94cad93 MV |
650 | if (ret) |
651 | return ret; | |
cb0b6b03 MV |
652 | tmio_sd_set_ddr_mode(priv, mmc); |
653 | tmio_sd_set_clk_rate(priv, mmc); | |
654 | tmio_sd_set_pins(dev); | |
e94cad93 MV |
655 | |
656 | return 0; | |
657 | } | |
658 | ||
cb0b6b03 | 659 | int tmio_sd_get_cd(struct udevice *dev) |
e94cad93 | 660 | { |
cb0b6b03 | 661 | struct tmio_sd_priv *priv = dev_get_priv(dev); |
e94cad93 | 662 | |
cb0b6b03 | 663 | if (priv->caps & TMIO_SD_CAP_NONREMOVABLE) |
e94cad93 MV |
664 | return 1; |
665 | ||
cb0b6b03 MV |
666 | return !!(tmio_sd_readl(priv, TMIO_SD_INFO1) & |
667 | TMIO_SD_INFO1_CD); | |
e94cad93 MV |
668 | } |
669 | ||
cb0b6b03 | 670 | static void tmio_sd_host_init(struct tmio_sd_priv *priv) |
e94cad93 MV |
671 | { |
672 | u32 tmp; | |
673 | ||
674 | /* soft reset of the host */ | |
cb0b6b03 MV |
675 | tmp = tmio_sd_readl(priv, TMIO_SD_SOFT_RST); |
676 | tmp &= ~TMIO_SD_SOFT_RST_RSTX; | |
677 | tmio_sd_writel(priv, tmp, TMIO_SD_SOFT_RST); | |
678 | tmp |= TMIO_SD_SOFT_RST_RSTX; | |
679 | tmio_sd_writel(priv, tmp, TMIO_SD_SOFT_RST); | |
e94cad93 MV |
680 | |
681 | /* FIXME: implement eMMC hw_reset */ | |
682 | ||
cb0b6b03 | 683 | tmio_sd_writel(priv, TMIO_SD_STOP_SEC, TMIO_SD_STOP); |
e94cad93 MV |
684 | |
685 | /* | |
686 | * Connected to 32bit AXI. | |
687 | * This register dropped backward compatibility at version 0x10. | |
688 | * Write an appropriate value depending on the IP version. | |
689 | */ | |
db1266d6 | 690 | if (priv->version >= 0x10) |
cb0b6b03 | 691 | tmio_sd_writel(priv, 0x101, TMIO_SD_HOST_MODE); |
db1266d6 | 692 | else |
cb0b6b03 | 693 | tmio_sd_writel(priv, 0x0, TMIO_SD_HOST_MODE); |
e94cad93 | 694 | |
cb0b6b03 MV |
695 | if (priv->caps & TMIO_SD_CAP_DMA_INTERNAL) { |
696 | tmp = tmio_sd_readl(priv, TMIO_SD_DMA_MODE); | |
697 | tmp |= TMIO_SD_DMA_MODE_ADDR_INC; | |
698 | tmio_sd_writel(priv, tmp, TMIO_SD_DMA_MODE); | |
e94cad93 MV |
699 | } |
700 | } | |
701 | ||
cb0b6b03 | 702 | int tmio_sd_bind(struct udevice *dev) |
e94cad93 | 703 | { |
cb0b6b03 | 704 | struct tmio_sd_plat *plat = dev_get_platdata(dev); |
e94cad93 MV |
705 | |
706 | return mmc_bind(dev, &plat->mmc, &plat->cfg); | |
707 | } | |
708 | ||
cb0b6b03 | 709 | int tmio_sd_probe(struct udevice *dev, u32 quirks) |
e94cad93 | 710 | { |
cb0b6b03 MV |
711 | struct tmio_sd_plat *plat = dev_get_platdata(dev); |
712 | struct tmio_sd_priv *priv = dev_get_priv(dev); | |
e94cad93 | 713 | struct mmc_uclass_priv *upriv = dev_get_uclass_priv(dev); |
e94cad93 | 714 | fdt_addr_t base; |
e94cad93 | 715 | int ret; |
e94cad93 MV |
716 | |
717 | base = devfdt_get_addr(dev); | |
718 | if (base == FDT_ADDR_T_NONE) | |
719 | return -EINVAL; | |
720 | ||
721 | priv->regbase = devm_ioremap(dev, base, SZ_2K); | |
722 | if (!priv->regbase) | |
723 | return -ENOMEM; | |
724 | ||
725 | #ifdef CONFIG_DM_REGULATOR | |
e10422f1 | 726 | device_get_supply_regulator(dev, "vqmmc-supply", &priv->vqmmc_dev); |
e94cad93 MV |
727 | #endif |
728 | ||
147169d9 MV |
729 | ret = mmc_of_parse(dev, &plat->cfg); |
730 | if (ret < 0) { | |
731 | dev_err(dev, "failed to parse host caps\n"); | |
732 | return ret; | |
e94cad93 MV |
733 | } |
734 | ||
147169d9 MV |
735 | plat->cfg.name = dev->name; |
736 | plat->cfg.host_caps |= MMC_MODE_HS_52MHz | MMC_MODE_HS; | |
737 | ||
58c35b17 | 738 | if (quirks) |
e94cad93 | 739 | priv->caps = quirks; |
58c35b17 | 740 | |
cb0b6b03 MV |
741 | priv->version = tmio_sd_readl(priv, TMIO_SD_VERSION) & |
742 | TMIO_SD_VERSION_IP; | |
58c35b17 MV |
743 | dev_dbg(dev, "version %x\n", priv->version); |
744 | if (priv->version >= 0x10) { | |
cb0b6b03 MV |
745 | priv->caps |= TMIO_SD_CAP_DMA_INTERNAL; |
746 | priv->caps |= TMIO_SD_CAP_DIV1024; | |
e94cad93 MV |
747 | } |
748 | ||
749 | if (fdt_get_property(gd->fdt_blob, dev_of_offset(dev), "non-removable", | |
750 | NULL)) | |
cb0b6b03 | 751 | priv->caps |= TMIO_SD_CAP_NONREMOVABLE; |
e94cad93 | 752 | |
cb0b6b03 | 753 | tmio_sd_host_init(priv); |
e94cad93 MV |
754 | |
755 | plat->cfg.voltages = MMC_VDD_165_195 | MMC_VDD_32_33 | MMC_VDD_33_34; | |
756 | plat->cfg.f_min = priv->mclk / | |
cb0b6b03 | 757 | (priv->caps & TMIO_SD_CAP_DIV1024 ? 1024 : 512); |
e94cad93 | 758 | plat->cfg.f_max = priv->mclk; |
cb0b6b03 | 759 | plat->cfg.b_max = U32_MAX; /* max value of TMIO_SD_SECCNT */ |
e94cad93 MV |
760 | |
761 | upriv->mmc = &plat->mmc; | |
762 | ||
763 | return 0; | |
764 | } |