]> git.ipfire.org Git - thirdparty/kernel/linux.git/blob - drivers/mtd/nand/mtk_ecc.c
objtool: Add do_task_dead() to global noreturn list
[thirdparty/kernel/linux.git] / drivers / mtd / nand / mtk_ecc.c
1 /*
2 * MTK ECC controller driver.
3 * Copyright (C) 2016 MediaTek Inc.
4 * Authors: Xiaolei Li <xiaolei.li@mediatek.com>
5 * Jorge Ramirez-Ortiz <jorge.ramirez-ortiz@linaro.org>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 */
16
17 #include <linux/platform_device.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/interrupt.h>
20 #include <linux/clk.h>
21 #include <linux/module.h>
22 #include <linux/iopoll.h>
23 #include <linux/of.h>
24 #include <linux/of_platform.h>
25 #include <linux/mutex.h>
26
27 #include "mtk_ecc.h"
28
29 #define ECC_IDLE_MASK BIT(0)
30 #define ECC_IRQ_EN BIT(0)
31 #define ECC_OP_ENABLE (1)
32 #define ECC_OP_DISABLE (0)
33
34 #define ECC_ENCCON (0x00)
35 #define ECC_ENCCNFG (0x04)
36 #define ECC_CNFG_4BIT (0)
37 #define ECC_CNFG_6BIT (1)
38 #define ECC_CNFG_8BIT (2)
39 #define ECC_CNFG_10BIT (3)
40 #define ECC_CNFG_12BIT (4)
41 #define ECC_CNFG_14BIT (5)
42 #define ECC_CNFG_16BIT (6)
43 #define ECC_CNFG_18BIT (7)
44 #define ECC_CNFG_20BIT (8)
45 #define ECC_CNFG_22BIT (9)
46 #define ECC_CNFG_24BIT (0xa)
47 #define ECC_CNFG_28BIT (0xb)
48 #define ECC_CNFG_32BIT (0xc)
49 #define ECC_CNFG_36BIT (0xd)
50 #define ECC_CNFG_40BIT (0xe)
51 #define ECC_CNFG_44BIT (0xf)
52 #define ECC_CNFG_48BIT (0x10)
53 #define ECC_CNFG_52BIT (0x11)
54 #define ECC_CNFG_56BIT (0x12)
55 #define ECC_CNFG_60BIT (0x13)
56 #define ECC_MODE_SHIFT (5)
57 #define ECC_MS_SHIFT (16)
58 #define ECC_ENCDIADDR (0x08)
59 #define ECC_ENCIDLE (0x0C)
60 #define ECC_ENCPAR(x) (0x10 + (x) * sizeof(u32))
61 #define ECC_ENCIRQ_EN (0x80)
62 #define ECC_ENCIRQ_STA (0x84)
63 #define ECC_DECCON (0x100)
64 #define ECC_DECCNFG (0x104)
65 #define DEC_EMPTY_EN BIT(31)
66 #define DEC_CNFG_CORRECT (0x3 << 12)
67 #define ECC_DECIDLE (0x10C)
68 #define ECC_DECENUM0 (0x114)
69 #define ERR_MASK (0x3f)
70 #define ECC_DECDONE (0x124)
71 #define ECC_DECIRQ_EN (0x200)
72 #define ECC_DECIRQ_STA (0x204)
73
74 #define ECC_TIMEOUT (500000)
75
76 #define ECC_IDLE_REG(op) ((op) == ECC_ENCODE ? ECC_ENCIDLE : ECC_DECIDLE)
77 #define ECC_CTL_REG(op) ((op) == ECC_ENCODE ? ECC_ENCCON : ECC_DECCON)
78 #define ECC_IRQ_REG(op) ((op) == ECC_ENCODE ? \
79 ECC_ENCIRQ_EN : ECC_DECIRQ_EN)
80
81 struct mtk_ecc {
82 struct device *dev;
83 void __iomem *regs;
84 struct clk *clk;
85
86 struct completion done;
87 struct mutex lock;
88 u32 sectors;
89 };
90
91 static inline void mtk_ecc_wait_idle(struct mtk_ecc *ecc,
92 enum mtk_ecc_operation op)
93 {
94 struct device *dev = ecc->dev;
95 u32 val;
96 int ret;
97
98 ret = readl_poll_timeout_atomic(ecc->regs + ECC_IDLE_REG(op), val,
99 val & ECC_IDLE_MASK,
100 10, ECC_TIMEOUT);
101 if (ret)
102 dev_warn(dev, "%s NOT idle\n",
103 op == ECC_ENCODE ? "encoder" : "decoder");
104 }
105
106 static irqreturn_t mtk_ecc_irq(int irq, void *id)
107 {
108 struct mtk_ecc *ecc = id;
109 enum mtk_ecc_operation op;
110 u32 dec, enc;
111
112 dec = readw(ecc->regs + ECC_DECIRQ_STA) & ECC_IRQ_EN;
113 if (dec) {
114 op = ECC_DECODE;
115 dec = readw(ecc->regs + ECC_DECDONE);
116 if (dec & ecc->sectors) {
117 ecc->sectors = 0;
118 complete(&ecc->done);
119 } else {
120 return IRQ_HANDLED;
121 }
122 } else {
123 enc = readl(ecc->regs + ECC_ENCIRQ_STA) & ECC_IRQ_EN;
124 if (enc) {
125 op = ECC_ENCODE;
126 complete(&ecc->done);
127 } else {
128 return IRQ_NONE;
129 }
130 }
131
132 writel(0, ecc->regs + ECC_IRQ_REG(op));
133
134 return IRQ_HANDLED;
135 }
136
137 static void mtk_ecc_config(struct mtk_ecc *ecc, struct mtk_ecc_config *config)
138 {
139 u32 ecc_bit = ECC_CNFG_4BIT, dec_sz, enc_sz;
140 u32 reg;
141
142 switch (config->strength) {
143 case 4:
144 ecc_bit = ECC_CNFG_4BIT;
145 break;
146 case 6:
147 ecc_bit = ECC_CNFG_6BIT;
148 break;
149 case 8:
150 ecc_bit = ECC_CNFG_8BIT;
151 break;
152 case 10:
153 ecc_bit = ECC_CNFG_10BIT;
154 break;
155 case 12:
156 ecc_bit = ECC_CNFG_12BIT;
157 break;
158 case 14:
159 ecc_bit = ECC_CNFG_14BIT;
160 break;
161 case 16:
162 ecc_bit = ECC_CNFG_16BIT;
163 break;
164 case 18:
165 ecc_bit = ECC_CNFG_18BIT;
166 break;
167 case 20:
168 ecc_bit = ECC_CNFG_20BIT;
169 break;
170 case 22:
171 ecc_bit = ECC_CNFG_22BIT;
172 break;
173 case 24:
174 ecc_bit = ECC_CNFG_24BIT;
175 break;
176 case 28:
177 ecc_bit = ECC_CNFG_28BIT;
178 break;
179 case 32:
180 ecc_bit = ECC_CNFG_32BIT;
181 break;
182 case 36:
183 ecc_bit = ECC_CNFG_36BIT;
184 break;
185 case 40:
186 ecc_bit = ECC_CNFG_40BIT;
187 break;
188 case 44:
189 ecc_bit = ECC_CNFG_44BIT;
190 break;
191 case 48:
192 ecc_bit = ECC_CNFG_48BIT;
193 break;
194 case 52:
195 ecc_bit = ECC_CNFG_52BIT;
196 break;
197 case 56:
198 ecc_bit = ECC_CNFG_56BIT;
199 break;
200 case 60:
201 ecc_bit = ECC_CNFG_60BIT;
202 break;
203 default:
204 dev_err(ecc->dev, "invalid strength %d, default to 4 bits\n",
205 config->strength);
206 }
207
208 if (config->op == ECC_ENCODE) {
209 /* configure ECC encoder (in bits) */
210 enc_sz = config->len << 3;
211
212 reg = ecc_bit | (config->mode << ECC_MODE_SHIFT);
213 reg |= (enc_sz << ECC_MS_SHIFT);
214 writel(reg, ecc->regs + ECC_ENCCNFG);
215
216 if (config->mode != ECC_NFI_MODE)
217 writel(lower_32_bits(config->addr),
218 ecc->regs + ECC_ENCDIADDR);
219
220 } else {
221 /* configure ECC decoder (in bits) */
222 dec_sz = (config->len << 3) +
223 config->strength * ECC_PARITY_BITS;
224
225 reg = ecc_bit | (config->mode << ECC_MODE_SHIFT);
226 reg |= (dec_sz << ECC_MS_SHIFT) | DEC_CNFG_CORRECT;
227 reg |= DEC_EMPTY_EN;
228 writel(reg, ecc->regs + ECC_DECCNFG);
229
230 if (config->sectors)
231 ecc->sectors = 1 << (config->sectors - 1);
232 }
233 }
234
235 void mtk_ecc_get_stats(struct mtk_ecc *ecc, struct mtk_ecc_stats *stats,
236 int sectors)
237 {
238 u32 offset, i, err;
239 u32 bitflips = 0;
240
241 stats->corrected = 0;
242 stats->failed = 0;
243
244 for (i = 0; i < sectors; i++) {
245 offset = (i >> 2) << 2;
246 err = readl(ecc->regs + ECC_DECENUM0 + offset);
247 err = err >> ((i % 4) * 8);
248 err &= ERR_MASK;
249 if (err == ERR_MASK) {
250 /* uncorrectable errors */
251 stats->failed++;
252 continue;
253 }
254
255 stats->corrected += err;
256 bitflips = max_t(u32, bitflips, err);
257 }
258
259 stats->bitflips = bitflips;
260 }
261 EXPORT_SYMBOL(mtk_ecc_get_stats);
262
263 void mtk_ecc_release(struct mtk_ecc *ecc)
264 {
265 clk_disable_unprepare(ecc->clk);
266 put_device(ecc->dev);
267 }
268 EXPORT_SYMBOL(mtk_ecc_release);
269
270 static void mtk_ecc_hw_init(struct mtk_ecc *ecc)
271 {
272 mtk_ecc_wait_idle(ecc, ECC_ENCODE);
273 writew(ECC_OP_DISABLE, ecc->regs + ECC_ENCCON);
274
275 mtk_ecc_wait_idle(ecc, ECC_DECODE);
276 writel(ECC_OP_DISABLE, ecc->regs + ECC_DECCON);
277 }
278
279 static struct mtk_ecc *mtk_ecc_get(struct device_node *np)
280 {
281 struct platform_device *pdev;
282 struct mtk_ecc *ecc;
283
284 pdev = of_find_device_by_node(np);
285 if (!pdev || !platform_get_drvdata(pdev))
286 return ERR_PTR(-EPROBE_DEFER);
287
288 get_device(&pdev->dev);
289 ecc = platform_get_drvdata(pdev);
290 clk_prepare_enable(ecc->clk);
291 mtk_ecc_hw_init(ecc);
292
293 return ecc;
294 }
295
296 struct mtk_ecc *of_mtk_ecc_get(struct device_node *of_node)
297 {
298 struct mtk_ecc *ecc = NULL;
299 struct device_node *np;
300
301 np = of_parse_phandle(of_node, "ecc-engine", 0);
302 if (np) {
303 ecc = mtk_ecc_get(np);
304 of_node_put(np);
305 }
306
307 return ecc;
308 }
309 EXPORT_SYMBOL(of_mtk_ecc_get);
310
311 int mtk_ecc_enable(struct mtk_ecc *ecc, struct mtk_ecc_config *config)
312 {
313 enum mtk_ecc_operation op = config->op;
314 int ret;
315
316 ret = mutex_lock_interruptible(&ecc->lock);
317 if (ret) {
318 dev_err(ecc->dev, "interrupted when attempting to lock\n");
319 return ret;
320 }
321
322 mtk_ecc_wait_idle(ecc, op);
323 mtk_ecc_config(ecc, config);
324 writew(ECC_OP_ENABLE, ecc->regs + ECC_CTL_REG(op));
325
326 init_completion(&ecc->done);
327 writew(ECC_IRQ_EN, ecc->regs + ECC_IRQ_REG(op));
328
329 return 0;
330 }
331 EXPORT_SYMBOL(mtk_ecc_enable);
332
333 void mtk_ecc_disable(struct mtk_ecc *ecc)
334 {
335 enum mtk_ecc_operation op = ECC_ENCODE;
336
337 /* find out the running operation */
338 if (readw(ecc->regs + ECC_CTL_REG(op)) != ECC_OP_ENABLE)
339 op = ECC_DECODE;
340
341 /* disable it */
342 mtk_ecc_wait_idle(ecc, op);
343 writew(0, ecc->regs + ECC_IRQ_REG(op));
344 writew(ECC_OP_DISABLE, ecc->regs + ECC_CTL_REG(op));
345
346 mutex_unlock(&ecc->lock);
347 }
348 EXPORT_SYMBOL(mtk_ecc_disable);
349
350 int mtk_ecc_wait_done(struct mtk_ecc *ecc, enum mtk_ecc_operation op)
351 {
352 int ret;
353
354 ret = wait_for_completion_timeout(&ecc->done, msecs_to_jiffies(500));
355 if (!ret) {
356 dev_err(ecc->dev, "%s timeout - interrupt did not arrive)\n",
357 (op == ECC_ENCODE) ? "encoder" : "decoder");
358 return -ETIMEDOUT;
359 }
360
361 return 0;
362 }
363 EXPORT_SYMBOL(mtk_ecc_wait_done);
364
365 int mtk_ecc_encode(struct mtk_ecc *ecc, struct mtk_ecc_config *config,
366 u8 *data, u32 bytes)
367 {
368 dma_addr_t addr;
369 u32 *p, len, i;
370 int ret = 0;
371
372 addr = dma_map_single(ecc->dev, data, bytes, DMA_TO_DEVICE);
373 ret = dma_mapping_error(ecc->dev, addr);
374 if (ret) {
375 dev_err(ecc->dev, "dma mapping error\n");
376 return -EINVAL;
377 }
378
379 config->op = ECC_ENCODE;
380 config->addr = addr;
381 ret = mtk_ecc_enable(ecc, config);
382 if (ret) {
383 dma_unmap_single(ecc->dev, addr, bytes, DMA_TO_DEVICE);
384 return ret;
385 }
386
387 ret = mtk_ecc_wait_done(ecc, ECC_ENCODE);
388 if (ret)
389 goto timeout;
390
391 mtk_ecc_wait_idle(ecc, ECC_ENCODE);
392
393 /* Program ECC bytes to OOB: per sector oob = FDM + ECC + SPARE */
394 len = (config->strength * ECC_PARITY_BITS + 7) >> 3;
395 p = (u32 *)(data + bytes);
396
397 /* write the parity bytes generated by the ECC back to the OOB region */
398 for (i = 0; i < len; i++)
399 p[i] = readl(ecc->regs + ECC_ENCPAR(i));
400 timeout:
401
402 dma_unmap_single(ecc->dev, addr, bytes, DMA_TO_DEVICE);
403 mtk_ecc_disable(ecc);
404
405 return ret;
406 }
407 EXPORT_SYMBOL(mtk_ecc_encode);
408
409 void mtk_ecc_adjust_strength(u32 *p)
410 {
411 u32 ecc[] = {4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 28, 32, 36,
412 40, 44, 48, 52, 56, 60};
413 int i;
414
415 for (i = 0; i < ARRAY_SIZE(ecc); i++) {
416 if (*p <= ecc[i]) {
417 if (!i)
418 *p = ecc[i];
419 else if (*p != ecc[i])
420 *p = ecc[i - 1];
421 return;
422 }
423 }
424
425 *p = ecc[ARRAY_SIZE(ecc) - 1];
426 }
427 EXPORT_SYMBOL(mtk_ecc_adjust_strength);
428
429 static int mtk_ecc_probe(struct platform_device *pdev)
430 {
431 struct device *dev = &pdev->dev;
432 struct mtk_ecc *ecc;
433 struct resource *res;
434 int irq, ret;
435
436 ecc = devm_kzalloc(dev, sizeof(*ecc), GFP_KERNEL);
437 if (!ecc)
438 return -ENOMEM;
439
440 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
441 ecc->regs = devm_ioremap_resource(dev, res);
442 if (IS_ERR(ecc->regs)) {
443 dev_err(dev, "failed to map regs: %ld\n", PTR_ERR(ecc->regs));
444 return PTR_ERR(ecc->regs);
445 }
446
447 ecc->clk = devm_clk_get(dev, NULL);
448 if (IS_ERR(ecc->clk)) {
449 dev_err(dev, "failed to get clock: %ld\n", PTR_ERR(ecc->clk));
450 return PTR_ERR(ecc->clk);
451 }
452
453 irq = platform_get_irq(pdev, 0);
454 if (irq < 0) {
455 dev_err(dev, "failed to get irq\n");
456 return -EINVAL;
457 }
458
459 ret = dma_set_mask(dev, DMA_BIT_MASK(32));
460 if (ret) {
461 dev_err(dev, "failed to set DMA mask\n");
462 return ret;
463 }
464
465 ret = devm_request_irq(dev, irq, mtk_ecc_irq, 0x0, "mtk-ecc", ecc);
466 if (ret) {
467 dev_err(dev, "failed to request irq\n");
468 return -EINVAL;
469 }
470
471 ecc->dev = dev;
472 mutex_init(&ecc->lock);
473 platform_set_drvdata(pdev, ecc);
474 dev_info(dev, "probed\n");
475
476 return 0;
477 }
478
479 #ifdef CONFIG_PM_SLEEP
480 static int mtk_ecc_suspend(struct device *dev)
481 {
482 struct mtk_ecc *ecc = dev_get_drvdata(dev);
483
484 clk_disable_unprepare(ecc->clk);
485
486 return 0;
487 }
488
489 static int mtk_ecc_resume(struct device *dev)
490 {
491 struct mtk_ecc *ecc = dev_get_drvdata(dev);
492 int ret;
493
494 ret = clk_prepare_enable(ecc->clk);
495 if (ret) {
496 dev_err(dev, "failed to enable clk\n");
497 return ret;
498 }
499
500 mtk_ecc_hw_init(ecc);
501
502 return 0;
503 }
504
505 static SIMPLE_DEV_PM_OPS(mtk_ecc_pm_ops, mtk_ecc_suspend, mtk_ecc_resume);
506 #endif
507
508 static const struct of_device_id mtk_ecc_dt_match[] = {
509 { .compatible = "mediatek,mt2701-ecc" },
510 {},
511 };
512
513 MODULE_DEVICE_TABLE(of, mtk_ecc_dt_match);
514
515 static struct platform_driver mtk_ecc_driver = {
516 .probe = mtk_ecc_probe,
517 .driver = {
518 .name = "mtk-ecc",
519 .of_match_table = of_match_ptr(mtk_ecc_dt_match),
520 #ifdef CONFIG_PM_SLEEP
521 .pm = &mtk_ecc_pm_ops,
522 #endif
523 },
524 };
525
526 module_platform_driver(mtk_ecc_driver);
527
528 MODULE_AUTHOR("Xiaolei Li <xiaolei.li@mediatek.com>");
529 MODULE_DESCRIPTION("MTK Nand ECC Driver");
530 MODULE_LICENSE("GPL");