]> git.ipfire.org Git - thirdparty/kernel/linux.git/blame - drivers/mtd/nand/raw/denali.c
mtd: rawnand: denali: use bool type instead of int where appropriate
[thirdparty/kernel/linux.git] / drivers / mtd / nand / raw / denali.c
CommitLineData
f1bf52e8 1// SPDX-License-Identifier: GPL-2.0
ce082596
JR
2/*
3 * NAND Flash Controller Device Driver
4 * Copyright © 2009-2010, Intel Corporation and its suppliers.
5 *
f1bf52e8
MY
6 * Copyright (c) 2017 Socionext Inc.
7 * Reworked by Masahiro Yamada <yamada.masahiro@socionext.com>
ce082596 8 */
da4734be 9
e0d53b3f 10#include <linux/bitfield.h>
da4734be 11#include <linux/completion.h>
84457949 12#include <linux/dma-mapping.h>
da4734be
MY
13#include <linux/interrupt.h>
14#include <linux/io.h>
ce082596 15#include <linux/module.h>
da4734be
MY
16#include <linux/mtd/mtd.h>
17#include <linux/mtd/rawnand.h>
7d370b2c 18#include <linux/slab.h>
da4734be 19#include <linux/spinlock.h>
ce082596
JR
20
21#include "denali.h"
22
ce082596 23#define DENALI_NAND_NAME "denali-nand"
0d55c668 24#define DENALI_DEFAULT_OOB_SKIP_BYTES 8
ce082596 25
29c4dd92
MY
26/* for Indexed Addressing */
27#define DENALI_INDEXED_CTRL 0x00
28#define DENALI_INDEXED_DATA 0x10
0d3a966d
MY
29
30#define DENALI_MAP00 (0 << 26) /* direct access to buffer */
31#define DENALI_MAP01 (1 << 26) /* read/write pages in PIO */
32#define DENALI_MAP10 (2 << 26) /* high-level control plane */
33#define DENALI_MAP11 (3 << 26) /* direct controller access */
34
35/* MAP11 access cycle type */
36#define DENALI_MAP11_CMD ((DENALI_MAP11) | 0) /* command cycle */
37#define DENALI_MAP11_ADDR ((DENALI_MAP11) | 1) /* address cycle */
38#define DENALI_MAP11_DATA ((DENALI_MAP11) | 2) /* data cycle */
39
0d3a966d 40#define DENALI_BANK(denali) ((denali)->active_bank << 24)
ce082596 41
0d3a966d 42#define DENALI_INVALID_BANK -1
c19e31d0
MY
43#define DENALI_NR_BANKS 4
44
442f201b
BB
45static inline struct denali_nand_info *mtd_to_denali(struct mtd_info *mtd)
46{
47 return container_of(mtd_to_nand(mtd), struct denali_nand_info, nand);
48}
ce082596 49
750f69b8
MY
50static struct denali_nand_info *to_denali(struct nand_chip *chip)
51{
52 return container_of(chip, struct denali_nand_info, nand);
53}
54
29c4dd92
MY
55/*
56 * Direct Addressing - the slave address forms the control information (command
57 * type, bank, block, and page address). The slave data is the actual data to
58 * be transferred. This mode requires 28 bits of address region allocated.
59 */
60static u32 denali_direct_read(struct denali_nand_info *denali, u32 addr)
61{
62 return ioread32(denali->host + addr);
63}
64
65static void denali_direct_write(struct denali_nand_info *denali, u32 addr,
66 u32 data)
67{
68 iowrite32(data, denali->host + addr);
69}
70
71/*
72 * Indexed Addressing - address translation module intervenes in passing the
73 * control information. This mode reduces the required address range. The
74 * control information and transferred data are latched by the registers in
75 * the translation module.
76 */
77static u32 denali_indexed_read(struct denali_nand_info *denali, u32 addr)
ce082596 78{
29c4dd92
MY
79 iowrite32(addr, denali->host + DENALI_INDEXED_CTRL);
80 return ioread32(denali->host + DENALI_INDEXED_DATA);
81}
82
83static void denali_indexed_write(struct denali_nand_info *denali, u32 addr,
84 u32 data)
85{
86 iowrite32(addr, denali->host + DENALI_INDEXED_CTRL);
87 iowrite32(data, denali->host + DENALI_INDEXED_DATA);
ce082596
JR
88}
89
c89eeda8
JI
90/*
91 * Use the configuration feature register to determine the maximum number of
92 * banks that the hardware supports.
93 */
3ac6c716 94static void denali_detect_max_banks(struct denali_nand_info *denali)
c89eeda8 95{
0d3a966d 96 uint32_t features = ioread32(denali->reg + FEATURES);
c89eeda8 97
8e4cbf7f 98 denali->max_banks = 1 << FIELD_GET(FEATURES__N_BANKS, features);
e7beeeec
MY
99
100 /* the encoding changed from rev 5.0 to 5.1 */
101 if (denali->revision < 0x0501)
102 denali->max_banks <<= 1;
c89eeda8
JI
103}
104
c19e31d0 105static void denali_enable_irq(struct denali_nand_info *denali)
ce082596 106{
c19e31d0 107 int i;
ce082596 108
c19e31d0 109 for (i = 0; i < DENALI_NR_BANKS; i++)
0d3a966d
MY
110 iowrite32(U32_MAX, denali->reg + INTR_EN(i));
111 iowrite32(GLOBAL_INT_EN_FLAG, denali->reg + GLOBAL_INT_ENABLE);
ce082596
JR
112}
113
c19e31d0 114static void denali_disable_irq(struct denali_nand_info *denali)
ce082596 115{
9589bf5b 116 int i;
ce082596 117
c19e31d0 118 for (i = 0; i < DENALI_NR_BANKS; i++)
0d3a966d
MY
119 iowrite32(0, denali->reg + INTR_EN(i));
120 iowrite32(0, denali->reg + GLOBAL_INT_ENABLE);
ce082596
JR
121}
122
c19e31d0
MY
123static void denali_clear_irq(struct denali_nand_info *denali,
124 int bank, uint32_t irq_status)
ce082596 125{
c19e31d0 126 /* write one to clear bits */
0d3a966d 127 iowrite32(irq_status, denali->reg + INTR_STATUS(bank));
ce082596
JR
128}
129
c19e31d0 130static void denali_clear_irq_all(struct denali_nand_info *denali)
ce082596 131{
9589bf5b
JI
132 int i;
133
c19e31d0
MY
134 for (i = 0; i < DENALI_NR_BANKS; i++)
135 denali_clear_irq(denali, i, U32_MAX);
ce082596
JR
136}
137
c19e31d0 138static irqreturn_t denali_isr(int irq, void *dev_id)
ce082596 139{
c19e31d0
MY
140 struct denali_nand_info *denali = dev_id;
141 irqreturn_t ret = IRQ_NONE;
142 uint32_t irq_status;
143 int i;
ce082596 144
c19e31d0 145 spin_lock(&denali->irq_lock);
ce082596 146
c19e31d0 147 for (i = 0; i < DENALI_NR_BANKS; i++) {
0d3a966d 148 irq_status = ioread32(denali->reg + INTR_STATUS(i));
c19e31d0
MY
149 if (irq_status)
150 ret = IRQ_HANDLED;
ce082596 151
c19e31d0 152 denali_clear_irq(denali, i, irq_status);
ce082596 153
0d3a966d 154 if (i != denali->active_bank)
c19e31d0
MY
155 continue;
156
157 denali->irq_status |= irq_status;
5637b69d 158
c19e31d0
MY
159 if (denali->irq_status & denali->irq_mask)
160 complete(&denali->complete);
161 }
ce082596 162
c19e31d0 163 spin_unlock(&denali->irq_lock);
ce082596 164
c19e31d0 165 return ret;
ce082596
JR
166}
167
c19e31d0 168static void denali_reset_irq(struct denali_nand_info *denali)
ce082596 169{
c19e31d0 170 unsigned long flags;
ce082596 171
c19e31d0
MY
172 spin_lock_irqsave(&denali->irq_lock, flags);
173 denali->irq_status = 0;
174 denali->irq_mask = 0;
175 spin_unlock_irqrestore(&denali->irq_lock, flags);
ce082596
JR
176}
177
c19e31d0
MY
178static uint32_t denali_wait_for_irq(struct denali_nand_info *denali,
179 uint32_t irq_mask)
ce082596 180{
c19e31d0 181 unsigned long time_left, flags;
5637b69d 182 uint32_t irq_status;
ce082596 183
c19e31d0 184 spin_lock_irqsave(&denali->irq_lock, flags);
ce082596 185
c19e31d0
MY
186 irq_status = denali->irq_status;
187
188 if (irq_mask & irq_status) {
189 /* return immediately if the IRQ has already happened. */
190 spin_unlock_irqrestore(&denali->irq_lock, flags);
191 return irq_status;
ce082596 192 }
c19e31d0
MY
193
194 denali->irq_mask = irq_mask;
195 reinit_completion(&denali->complete);
196 spin_unlock_irqrestore(&denali->irq_lock, flags);
197
198 time_left = wait_for_completion_timeout(&denali->complete,
199 msecs_to_jiffies(1000));
200 if (!time_left) {
201 dev_err(denali->dev, "timeout while waiting for irq 0x%x\n",
fdd4d083 202 irq_mask);
c19e31d0
MY
203 return 0;
204 }
205
206 return denali->irq_status;
ce082596 207}
ce082596 208
f5541142 209static void denali_select_target(struct nand_chip *chip, int cs)
fa6134e5 210{
f5541142 211 struct denali_nand_info *denali = to_denali(chip);
fa6134e5 212
f5541142 213 denali->active_bank = cs;
fa6134e5
MY
214}
215
0e604fc9
MY
216static int denali_change_column(struct nand_chip *chip, unsigned int offset,
217 void *buf, unsigned int len, bool write)
218{
219 if (write)
220 return nand_change_write_column_op(chip, offset, buf, len,
221 false);
222 else
223 return nand_change_read_column_op(chip, offset, buf, len,
224 false);
225}
226
227static int denali_payload_xfer(struct nand_chip *chip, void *buf, bool write)
228{
229 struct denali_nand_info *denali = to_denali(chip);
230 struct mtd_info *mtd = nand_to_mtd(chip);
231 struct nand_ecc_ctrl *ecc = &chip->ecc;
232 int writesize = mtd->writesize;
233 int oob_skip = denali->oob_skip_bytes;
234 int ret, i, pos, len;
235
236 for (i = 0; i < ecc->steps; i++) {
237 pos = i * (ecc->size + ecc->bytes);
238 len = ecc->size;
239
240 if (pos >= writesize) {
241 pos += oob_skip;
242 } else if (pos + len > writesize) {
243 /* This chunk overwraps the BBM area. Must be split */
244 ret = denali_change_column(chip, pos, buf,
245 writesize - pos, write);
246 if (ret)
247 return ret;
248
249 buf += writesize - pos;
250 len -= writesize - pos;
251 pos = writesize + oob_skip;
252 }
253
254 ret = denali_change_column(chip, pos, buf, len, write);
255 if (ret)
256 return ret;
257
258 buf += len;
259 }
260
261 return 0;
262}
263
264static int denali_oob_xfer(struct nand_chip *chip, void *buf, bool write)
265{
266 struct denali_nand_info *denali = to_denali(chip);
267 struct mtd_info *mtd = nand_to_mtd(chip);
268 struct nand_ecc_ctrl *ecc = &chip->ecc;
269 int writesize = mtd->writesize;
270 int oobsize = mtd->oobsize;
271 int oob_skip = denali->oob_skip_bytes;
272 int ret, i, pos, len;
273
274 /* BBM at the beginning of the OOB area */
275 ret = denali_change_column(chip, writesize, buf, oob_skip, write);
276 if (ret)
277 return ret;
278
279 buf += oob_skip;
280
281 for (i = 0; i < ecc->steps; i++) {
282 pos = ecc->size + i * (ecc->size + ecc->bytes);
283
284 if (i == ecc->steps - 1)
285 /* The last chunk includes OOB free */
286 len = writesize + oobsize - pos - oob_skip;
287 else
288 len = ecc->bytes;
289
290 if (pos >= writesize) {
291 pos += oob_skip;
292 } else if (pos + len > writesize) {
293 /* This chunk overwraps the BBM area. Must be split */
294 ret = denali_change_column(chip, pos, buf,
295 writesize - pos, write);
296 if (ret)
297 return ret;
298
299 buf += writesize - pos;
300 len -= writesize - pos;
301 pos = writesize + oob_skip;
302 }
303
304 ret = denali_change_column(chip, pos, buf, len, write);
305 if (ret)
306 return ret;
307
308 buf += len;
309 }
310
311 return 0;
312}
313
314static int denali_read_raw(struct nand_chip *chip, void *buf, void *oob_buf,
315 int page)
316{
317 int ret;
318
319 if (!buf && !oob_buf)
320 return -EINVAL;
321
322 ret = nand_read_page_op(chip, page, 0, NULL, 0);
323 if (ret)
324 return ret;
325
326 if (buf) {
327 ret = denali_payload_xfer(chip, buf, false);
328 if (ret)
329 return ret;
330 }
331
332 if (oob_buf) {
333 ret = denali_oob_xfer(chip, oob_buf, false);
334 if (ret)
335 return ret;
336 }
337
338 return 0;
339}
340
341static int denali_write_raw(struct nand_chip *chip, const void *buf,
342 const void *oob_buf, int page)
343{
344 int ret;
345
346 if (!buf && !oob_buf)
347 return -EINVAL;
348
349 ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
350 if (ret)
351 return ret;
352
353 if (buf) {
354 ret = denali_payload_xfer(chip, (void *)buf, true);
355 if (ret)
356 return ret;
357 }
358
359 if (oob_buf) {
360 ret = denali_oob_xfer(chip, (void *)oob_buf, true);
361 if (ret)
362 return ret;
363 }
364
365 return nand_prog_page_end_op(chip);
366}
367
368static int denali_read_page_raw(struct nand_chip *chip, u8 *buf,
369 int oob_required, int page)
370{
371 return denali_read_raw(chip, buf, oob_required ? chip->oob_poi : NULL,
372 page);
373}
374
375static int denali_write_page_raw(struct nand_chip *chip, const u8 *buf,
376 int oob_required, int page)
377{
378 return denali_write_raw(chip, buf, oob_required ? chip->oob_poi : NULL,
379 page);
380}
381
382static int denali_read_oob(struct nand_chip *chip, int page)
383{
384 return denali_read_raw(chip, NULL, chip->oob_poi, page);
385}
386
387static int denali_write_oob(struct nand_chip *chip, int page)
388{
389 return denali_write_raw(chip, NULL, chip->oob_poi, page);
390}
391
750f69b8 392static int denali_check_erased_page(struct nand_chip *chip, u8 *buf,
d29109be
MY
393 unsigned long uncor_ecc_flags,
394 unsigned int max_bitflips)
ce082596 395{
750f69b8
MY
396 struct denali_nand_info *denali = to_denali(chip);
397 struct mtd_ecc_stats *ecc_stats = &nand_to_mtd(chip)->ecc_stats;
8c677541 398 uint8_t *ecc_code = chip->oob_poi + denali->oob_skip_bytes;
d29109be
MY
399 int ecc_steps = chip->ecc.steps;
400 int ecc_size = chip->ecc.size;
401 int ecc_bytes = chip->ecc.bytes;
8c677541 402 int i, stat;
d29109be
MY
403
404 for (i = 0; i < ecc_steps; i++) {
405 if (!(uncor_ecc_flags & BIT(i)))
406 continue;
407
408 stat = nand_check_erased_ecc_chunk(buf, ecc_size,
409 ecc_code, ecc_bytes,
410 NULL, 0,
411 chip->ecc.strength);
412 if (stat < 0) {
750f69b8 413 ecc_stats->failed++;
d29109be 414 } else {
750f69b8 415 ecc_stats->corrected += stat;
d29109be
MY
416 max_bitflips = max_t(unsigned int, max_bitflips, stat);
417 }
418
419 buf += ecc_size;
420 ecc_code += ecc_bytes;
421 }
8125450c 422
d29109be 423 return max_bitflips;
ce082596 424}
d29109be 425
750f69b8 426static int denali_hw_ecc_fixup(struct nand_chip *chip,
24715c74
MY
427 unsigned long *uncor_ecc_flags)
428{
750f69b8
MY
429 struct denali_nand_info *denali = to_denali(chip);
430 struct mtd_ecc_stats *ecc_stats = &nand_to_mtd(chip)->ecc_stats;
0d3a966d 431 int bank = denali->active_bank;
24715c74
MY
432 uint32_t ecc_cor;
433 unsigned int max_bitflips;
434
0d3a966d 435 ecc_cor = ioread32(denali->reg + ECC_COR_INFO(bank));
24715c74
MY
436 ecc_cor >>= ECC_COR_INFO__SHIFT(bank);
437
438 if (ecc_cor & ECC_COR_INFO__UNCOR_ERR) {
439 /*
440 * This flag is set when uncorrectable error occurs at least in
441 * one ECC sector. We can not know "how many sectors", or
442 * "which sector(s)". We need erase-page check for all sectors.
443 */
444 *uncor_ecc_flags = GENMASK(chip->ecc.steps - 1, 0);
445 return 0;
446 }
447
8e4cbf7f 448 max_bitflips = FIELD_GET(ECC_COR_INFO__MAX_ERRORS, ecc_cor);
24715c74
MY
449
450 /*
451 * The register holds the maximum of per-sector corrected bitflips.
452 * This is suitable for the return value of the ->read_page() callback.
453 * Unfortunately, we can not know the total number of corrected bits in
454 * the page. Increase the stats by max_bitflips. (compromised solution)
455 */
750f69b8 456 ecc_stats->corrected += max_bitflips;
24715c74
MY
457
458 return max_bitflips;
459}
460
750f69b8 461static int denali_sw_ecc_fixup(struct nand_chip *chip,
24715c74 462 unsigned long *uncor_ecc_flags, uint8_t *buf)
ce082596 463{
750f69b8
MY
464 struct denali_nand_info *denali = to_denali(chip);
465 struct mtd_ecc_stats *ecc_stats = &nand_to_mtd(chip)->ecc_stats;
466 unsigned int ecc_size = chip->ecc.size;
3f91e94f 467 unsigned int bitflips = 0;
20d48595
MY
468 unsigned int max_bitflips = 0;
469 uint32_t err_addr, err_cor_info;
470 unsigned int err_byte, err_sector, err_device;
471 uint8_t err_cor_value;
472 unsigned int prev_sector = 0;
c19e31d0 473 uint32_t irq_status;
ce082596 474
c19e31d0 475 denali_reset_irq(denali);
20d48595
MY
476
477 do {
0d3a966d 478 err_addr = ioread32(denali->reg + ECC_ERROR_ADDRESS);
e0d53b3f
MY
479 err_sector = FIELD_GET(ECC_ERROR_ADDRESS__SECTOR, err_addr);
480 err_byte = FIELD_GET(ECC_ERROR_ADDRESS__OFFSET, err_addr);
20d48595 481
0d3a966d 482 err_cor_info = ioread32(denali->reg + ERR_CORRECTION_INFO);
e0d53b3f
MY
483 err_cor_value = FIELD_GET(ERR_CORRECTION_INFO__BYTE,
484 err_cor_info);
485 err_device = FIELD_GET(ERR_CORRECTION_INFO__DEVICE,
486 err_cor_info);
20d48595
MY
487
488 /* reset the bitflip counter when crossing ECC sector */
489 if (err_sector != prev_sector)
490 bitflips = 0;
491
e0d53b3f 492 if (err_cor_info & ERR_CORRECTION_INFO__UNCOR) {
20d48595 493 /*
d29109be
MY
494 * Check later if this is a real ECC error, or
495 * an erased sector.
20d48595 496 */
d29109be 497 *uncor_ecc_flags |= BIT(err_sector);
7de117fd 498 } else if (err_byte < ecc_size) {
20d48595 499 /*
7de117fd 500 * If err_byte is larger than ecc_size, means error
20d48595
MY
501 * happened in OOB, so we ignore it. It's no need for
502 * us to correct it err_device is represented the NAND
503 * error bits are happened in if there are more than
504 * one NAND connected.
505 */
506 int offset;
507 unsigned int flips_in_byte;
508
7de117fd 509 offset = (err_sector * ecc_size + err_byte) *
0d3a966d 510 denali->devs_per_cs + err_device;
20d48595
MY
511
512 /* correct the ECC error */
513 flips_in_byte = hweight8(buf[offset] ^ err_cor_value);
514 buf[offset] ^= err_cor_value;
750f69b8 515 ecc_stats->corrected += flips_in_byte;
20d48595
MY
516 bitflips += flips_in_byte;
517
518 max_bitflips = max(max_bitflips, bitflips);
519 }
520
521 prev_sector = err_sector;
e0d53b3f 522 } while (!(err_cor_info & ERR_CORRECTION_INFO__LAST_ERR));
20d48595
MY
523
524 /*
8582a03e
MY
525 * Once handle all ECC errors, controller will trigger an
526 * ECC_TRANSACTION_DONE interrupt.
20d48595 527 */
c19e31d0
MY
528 irq_status = denali_wait_for_irq(denali, INTR__ECC_TRANSACTION_DONE);
529 if (!(irq_status & INTR__ECC_TRANSACTION_DONE))
530 return -EIO;
20d48595
MY
531
532 return max_bitflips;
ce082596
JR
533}
534
2291cb89 535static void denali_setup_dma64(struct denali_nand_info *denali,
f4f16fd3 536 dma_addr_t dma_addr, int page, bool write)
210a2c87
MY
537{
538 uint32_t mode;
539 const int page_count = 1;
210a2c87 540
0d3a966d 541 mode = DENALI_MAP10 | DENALI_BANK(denali) | page;
210a2c87
MY
542
543 /* DMA is a three step process */
544
545 /*
546 * 1. setup transfer type, interrupt when complete,
547 * burst len = 64 bytes, the number of pages
548 */
29c4dd92 549 denali->host_write(denali, mode,
f4f16fd3
MY
550 0x01002000 | (64 << 16) |
551 (write ? BIT(8) : 0) | page_count);
210a2c87
MY
552
553 /* 2. set memory low address */
29c4dd92 554 denali->host_write(denali, mode, lower_32_bits(dma_addr));
210a2c87
MY
555
556 /* 3. set memory high address */
29c4dd92 557 denali->host_write(denali, mode, upper_32_bits(dma_addr));
210a2c87
MY
558}
559
2291cb89 560static void denali_setup_dma32(struct denali_nand_info *denali,
f4f16fd3 561 dma_addr_t dma_addr, int page, bool write)
ce082596 562{
5637b69d 563 uint32_t mode;
ce082596 564 const int page_count = 1;
ce082596 565
0d3a966d 566 mode = DENALI_MAP10 | DENALI_BANK(denali);
ce082596
JR
567
568 /* DMA is a four step process */
569
570 /* 1. setup transfer type and # of pages */
29c4dd92 571 denali->host_write(denali, mode | page,
f4f16fd3 572 0x2000 | (write ? BIT(8) : 0) | page_count);
ce082596
JR
573
574 /* 2. set memory high address bits 23:8 */
29c4dd92 575 denali->host_write(denali, mode | ((dma_addr >> 16) << 8), 0x2200);
ce082596
JR
576
577 /* 3. set memory low address bits 23:8 */
29c4dd92 578 denali->host_write(denali, mode | ((dma_addr & 0xffff) << 8), 0x2300);
ce082596 579
43914a2d 580 /* 4. interrupt when complete, burst len = 64 bytes */
29c4dd92 581 denali->host_write(denali, mode | 0x14000, 0x2400);
ce082596
JR
582}
583
cf067b5b 584static int denali_pio_read(struct denali_nand_info *denali, u32 *buf,
a8fce9fe 585 size_t size, int page)
ce082596 586{
29c4dd92 587 u32 addr = DENALI_MAP01 | DENALI_BANK(denali) | page;
26d266e1
MY
588 uint32_t irq_status, ecc_err_mask;
589 int i;
b21ff825 590
26d266e1
MY
591 if (denali->caps & DENALI_CAP_HW_ECC_FIXUP)
592 ecc_err_mask = INTR__ECC_UNCOR_ERR;
593 else
594 ecc_err_mask = INTR__ECC_ERR;
ce082596 595
26d266e1 596 denali_reset_irq(denali);
ce082596 597
26d266e1 598 for (i = 0; i < size / 4; i++)
cf067b5b 599 buf[i] = denali->host_read(denali, addr);
26d266e1
MY
600
601 irq_status = denali_wait_for_irq(denali, INTR__PAGE_XFER_INC);
602 if (!(irq_status & INTR__PAGE_XFER_INC))
603 return -EIO;
ce082596 604
57a4d8b5
MY
605 if (irq_status & INTR__ERASED_PAGE)
606 memset(buf, 0xff, size);
607
26d266e1
MY
608 return irq_status & ecc_err_mask ? -EBADMSG : 0;
609}
610
cf067b5b
MY
611static int denali_pio_write(struct denali_nand_info *denali, const u32 *buf,
612 size_t size, int page)
26d266e1 613{
29c4dd92 614 u32 addr = DENALI_MAP01 | DENALI_BANK(denali) | page;
26d266e1
MY
615 uint32_t irq_status;
616 int i;
ce082596 617
c19e31d0 618 denali_reset_irq(denali);
26d266e1 619
26d266e1 620 for (i = 0; i < size / 4; i++)
cf067b5b 621 denali->host_write(denali, addr, buf[i]);
26d266e1
MY
622
623 irq_status = denali_wait_for_irq(denali,
624 INTR__PROGRAM_COMP | INTR__PROGRAM_FAIL);
625 if (!(irq_status & INTR__PROGRAM_COMP))
626 return -EIO;
627
628 return 0;
629}
630
631static int denali_pio_xfer(struct denali_nand_info *denali, void *buf,
f4f16fd3 632 size_t size, int page, bool write)
26d266e1
MY
633{
634 if (write)
a8fce9fe 635 return denali_pio_write(denali, buf, size, page);
26d266e1 636 else
a8fce9fe 637 return denali_pio_read(denali, buf, size, page);
26d266e1
MY
638}
639
640static int denali_dma_xfer(struct denali_nand_info *denali, void *buf,
f4f16fd3 641 size_t size, int page, bool write)
26d266e1 642{
997cde2a 643 dma_addr_t dma_addr;
26d266e1
MY
644 uint32_t irq_mask, irq_status, ecc_err_mask;
645 enum dma_data_direction dir = write ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
646 int ret = 0;
647
997cde2a
MY
648 dma_addr = dma_map_single(denali->dev, buf, size, dir);
649 if (dma_mapping_error(denali->dev, dma_addr)) {
650 dev_dbg(denali->dev, "Failed to DMA-map buffer. Trying PIO.\n");
a8fce9fe 651 return denali_pio_xfer(denali, buf, size, page, write);
997cde2a 652 }
26d266e1
MY
653
654 if (write) {
655 /*
656 * INTR__PROGRAM_COMP is never asserted for the DMA transfer.
657 * We can use INTR__DMA_CMD_COMP instead. This flag is asserted
658 * when the page program is completed.
659 */
660 irq_mask = INTR__DMA_CMD_COMP | INTR__PROGRAM_FAIL;
661 ecc_err_mask = 0;
662 } else if (denali->caps & DENALI_CAP_HW_ECC_FIXUP) {
663 irq_mask = INTR__DMA_CMD_COMP;
664 ecc_err_mask = INTR__ECC_UNCOR_ERR;
665 } else {
666 irq_mask = INTR__DMA_CMD_COMP;
667 ecc_err_mask = INTR__ECC_ERR;
668 }
669
586a2c52 670 iowrite32(DMA_ENABLE__FLAG, denali->reg + DMA_ENABLE);
cf51e4b9
MY
671 /*
672 * The ->setup_dma() hook kicks DMA by using the data/command
673 * interface, which belongs to a different AXI port from the
674 * register interface. Read back the register to avoid a race.
675 */
676 ioread32(denali->reg + DMA_ENABLE);
ce082596 677
26d266e1 678 denali_reset_irq(denali);
89dcb27b 679 denali->setup_dma(denali, dma_addr, page, write);
ce082596 680
c19e31d0 681 irq_status = denali_wait_for_irq(denali, irq_mask);
26d266e1 682 if (!(irq_status & INTR__DMA_CMD_COMP))
b21ff825 683 ret = -EIO;
26d266e1
MY
684 else if (irq_status & ecc_err_mask)
685 ret = -EBADMSG;
ce082596 686
586a2c52
MY
687 iowrite32(0, denali->reg + DMA_ENABLE);
688
997cde2a 689 dma_unmap_single(denali->dev, dma_addr, size, dir);
fdbad98d 690
57a4d8b5
MY
691 if (irq_status & INTR__ERASED_PAGE)
692 memset(buf, 0xff, size);
693
b21ff825 694 return ret;
ce082596
JR
695}
696
0e604fc9 697static int denali_page_xfer(struct nand_chip *chip, void *buf, size_t size,
f4f16fd3 698 int page, bool write)
ce082596 699{
750f69b8
MY
700 struct denali_nand_info *denali = to_denali(chip);
701
f5541142
MY
702 denali_select_target(chip, chip->cur_cs);
703
26d266e1 704 if (denali->dma_avail)
a8fce9fe 705 return denali_dma_xfer(denali, buf, size, page, write);
26d266e1 706 else
a8fce9fe 707 return denali_pio_xfer(denali, buf, size, page, write);
ce082596
JR
708}
709
b9761687
BB
710static int denali_read_page(struct nand_chip *chip, uint8_t *buf,
711 int oob_required, int page)
26d266e1 712{
b9761687 713 struct mtd_info *mtd = nand_to_mtd(chip);
26d266e1
MY
714 struct denali_nand_info *denali = mtd_to_denali(mtd);
715 unsigned long uncor_ecc_flags = 0;
716 int stat = 0;
717 int ret;
ce082596 718
f4f16fd3 719 ret = denali_page_xfer(chip, buf, mtd->writesize, page, false);
26d266e1
MY
720 if (ret && ret != -EBADMSG)
721 return ret;
ce082596 722
24715c74 723 if (denali->caps & DENALI_CAP_HW_ECC_FIXUP)
750f69b8 724 stat = denali_hw_ecc_fixup(chip, &uncor_ecc_flags);
26d266e1 725 else if (ret == -EBADMSG)
750f69b8 726 stat = denali_sw_ecc_fixup(chip, &uncor_ecc_flags, buf);
ce082596 727
d29109be
MY
728 if (stat < 0)
729 return stat;
730
731 if (uncor_ecc_flags) {
b9761687 732 ret = denali_read_oob(chip, page);
26d266e1
MY
733 if (ret)
734 return ret;
ce082596 735
750f69b8 736 stat = denali_check_erased_page(chip, buf,
d29109be 737 uncor_ecc_flags, stat);
ce082596 738 }
d29109be
MY
739
740 return stat;
ce082596
JR
741}
742
767eb6fb
BB
743static int denali_write_page(struct nand_chip *chip, const uint8_t *buf,
744 int oob_required, int page)
26d266e1 745{
767eb6fb 746 struct mtd_info *mtd = nand_to_mtd(chip);
ce082596 747
f4f16fd3 748 return denali_page_xfer(chip, (void *)buf, mtd->writesize, page, true);
ce082596
JR
749}
750
858838b8 751static int denali_setup_data_interface(struct nand_chip *chip, int chipnr,
1bb88666
MY
752 const struct nand_data_interface *conf)
753{
858838b8 754 struct denali_nand_info *denali = mtd_to_denali(nand_to_mtd(chip));
1bb88666 755 const struct nand_sdr_timings *timings;
1dfac31a 756 unsigned long t_x, mult_x;
1bb88666
MY
757 int acc_clks, re_2_we, re_2_re, we_2_re, addr_2_data;
758 int rdwr_en_lo, rdwr_en_hi, rdwr_en_lo_hi, cs_setup;
759 int addr_2_data_mask;
760 uint32_t tmp;
761
762 timings = nand_get_sdr_timings(conf);
763 if (IS_ERR(timings))
764 return PTR_ERR(timings);
765
766 /* clk_x period in picoseconds */
1dfac31a
MY
767 t_x = DIV_ROUND_DOWN_ULL(1000000000000ULL, denali->clk_x_rate);
768 if (!t_x)
769 return -EINVAL;
770
771 /*
772 * The bus interface clock, clk_x, is phase aligned with the core clock.
773 * The clk_x is an integral multiple N of the core clk. The value N is
774 * configured at IP delivery time, and its available value is 4, 5, 6.
775 */
776 mult_x = DIV_ROUND_CLOSEST_ULL(denali->clk_x_rate, denali->clk_rate);
777 if (mult_x < 4 || mult_x > 6)
1bb88666
MY
778 return -EINVAL;
779
780 if (chipnr == NAND_DATA_IFACE_CHECK_ONLY)
781 return 0;
782
783 /* tREA -> ACC_CLKS */
1dfac31a 784 acc_clks = DIV_ROUND_UP(timings->tREA_max, t_x);
1bb88666
MY
785 acc_clks = min_t(int, acc_clks, ACC_CLKS__VALUE);
786
0d3a966d 787 tmp = ioread32(denali->reg + ACC_CLKS);
1bb88666 788 tmp &= ~ACC_CLKS__VALUE;
8e4cbf7f 789 tmp |= FIELD_PREP(ACC_CLKS__VALUE, acc_clks);
0d3a966d 790 iowrite32(tmp, denali->reg + ACC_CLKS);
1bb88666
MY
791
792 /* tRWH -> RE_2_WE */
1dfac31a 793 re_2_we = DIV_ROUND_UP(timings->tRHW_min, t_x);
1bb88666
MY
794 re_2_we = min_t(int, re_2_we, RE_2_WE__VALUE);
795
0d3a966d 796 tmp = ioread32(denali->reg + RE_2_WE);
1bb88666 797 tmp &= ~RE_2_WE__VALUE;
8e4cbf7f 798 tmp |= FIELD_PREP(RE_2_WE__VALUE, re_2_we);
0d3a966d 799 iowrite32(tmp, denali->reg + RE_2_WE);
1bb88666
MY
800
801 /* tRHZ -> RE_2_RE */
1dfac31a 802 re_2_re = DIV_ROUND_UP(timings->tRHZ_max, t_x);
1bb88666
MY
803 re_2_re = min_t(int, re_2_re, RE_2_RE__VALUE);
804
0d3a966d 805 tmp = ioread32(denali->reg + RE_2_RE);
1bb88666 806 tmp &= ~RE_2_RE__VALUE;
8e4cbf7f 807 tmp |= FIELD_PREP(RE_2_RE__VALUE, re_2_re);
0d3a966d 808 iowrite32(tmp, denali->reg + RE_2_RE);
1bb88666 809
7963f58c
MY
810 /*
811 * tCCS, tWHR -> WE_2_RE
812 *
813 * With WE_2_RE properly set, the Denali controller automatically takes
814 * care of the delay; the driver need not set NAND_WAIT_TCCS.
815 */
1dfac31a 816 we_2_re = DIV_ROUND_UP(max(timings->tCCS_min, timings->tWHR_min), t_x);
1bb88666
MY
817 we_2_re = min_t(int, we_2_re, TWHR2_AND_WE_2_RE__WE_2_RE);
818
0d3a966d 819 tmp = ioread32(denali->reg + TWHR2_AND_WE_2_RE);
1bb88666 820 tmp &= ~TWHR2_AND_WE_2_RE__WE_2_RE;
8e4cbf7f 821 tmp |= FIELD_PREP(TWHR2_AND_WE_2_RE__WE_2_RE, we_2_re);
0d3a966d 822 iowrite32(tmp, denali->reg + TWHR2_AND_WE_2_RE);
1bb88666
MY
823
824 /* tADL -> ADDR_2_DATA */
825
826 /* for older versions, ADDR_2_DATA is only 6 bit wide */
827 addr_2_data_mask = TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA;
828 if (denali->revision < 0x0501)
829 addr_2_data_mask >>= 1;
830
1dfac31a 831 addr_2_data = DIV_ROUND_UP(timings->tADL_min, t_x);
1bb88666
MY
832 addr_2_data = min_t(int, addr_2_data, addr_2_data_mask);
833
0d3a966d 834 tmp = ioread32(denali->reg + TCWAW_AND_ADDR_2_DATA);
8e4cbf7f
MY
835 tmp &= ~TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA;
836 tmp |= FIELD_PREP(TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA, addr_2_data);
0d3a966d 837 iowrite32(tmp, denali->reg + TCWAW_AND_ADDR_2_DATA);
1bb88666
MY
838
839 /* tREH, tWH -> RDWR_EN_HI_CNT */
840 rdwr_en_hi = DIV_ROUND_UP(max(timings->tREH_min, timings->tWH_min),
1dfac31a 841 t_x);
1bb88666
MY
842 rdwr_en_hi = min_t(int, rdwr_en_hi, RDWR_EN_HI_CNT__VALUE);
843
0d3a966d 844 tmp = ioread32(denali->reg + RDWR_EN_HI_CNT);
1bb88666 845 tmp &= ~RDWR_EN_HI_CNT__VALUE;
8e4cbf7f 846 tmp |= FIELD_PREP(RDWR_EN_HI_CNT__VALUE, rdwr_en_hi);
0d3a966d 847 iowrite32(tmp, denali->reg + RDWR_EN_HI_CNT);
1bb88666
MY
848
849 /* tRP, tWP -> RDWR_EN_LO_CNT */
1dfac31a 850 rdwr_en_lo = DIV_ROUND_UP(max(timings->tRP_min, timings->tWP_min), t_x);
1bb88666 851 rdwr_en_lo_hi = DIV_ROUND_UP(max(timings->tRC_min, timings->tWC_min),
1dfac31a
MY
852 t_x);
853 rdwr_en_lo_hi = max_t(int, rdwr_en_lo_hi, mult_x);
1bb88666
MY
854 rdwr_en_lo = max(rdwr_en_lo, rdwr_en_lo_hi - rdwr_en_hi);
855 rdwr_en_lo = min_t(int, rdwr_en_lo, RDWR_EN_LO_CNT__VALUE);
856
0d3a966d 857 tmp = ioread32(denali->reg + RDWR_EN_LO_CNT);
1bb88666 858 tmp &= ~RDWR_EN_LO_CNT__VALUE;
8e4cbf7f 859 tmp |= FIELD_PREP(RDWR_EN_LO_CNT__VALUE, rdwr_en_lo);
0d3a966d 860 iowrite32(tmp, denali->reg + RDWR_EN_LO_CNT);
1bb88666
MY
861
862 /* tCS, tCEA -> CS_SETUP_CNT */
1dfac31a
MY
863 cs_setup = max3((int)DIV_ROUND_UP(timings->tCS_min, t_x) - rdwr_en_lo,
864 (int)DIV_ROUND_UP(timings->tCEA_max, t_x) - acc_clks,
1bb88666
MY
865 0);
866 cs_setup = min_t(int, cs_setup, CS_SETUP_CNT__VALUE);
867
0d3a966d 868 tmp = ioread32(denali->reg + CS_SETUP_CNT);
1bb88666 869 tmp &= ~CS_SETUP_CNT__VALUE;
8e4cbf7f 870 tmp |= FIELD_PREP(CS_SETUP_CNT__VALUE, cs_setup);
0d3a966d 871 iowrite32(tmp, denali->reg + CS_SETUP_CNT);
1bb88666
MY
872
873 return 0;
874}
ce082596 875
ce082596
JR
876static void denali_hw_init(struct denali_nand_info *denali)
877{
e7beeeec
MY
878 /*
879 * The REVISION register may not be reliable. Platforms are allowed to
880 * override it.
881 */
882 if (!denali->revision)
0d3a966d 883 denali->revision = swab16(ioread32(denali->reg + REVISION));
e7beeeec 884
43914a2d 885 /*
0d55c668
MY
886 * Set how many bytes should be skipped before writing data in OOB.
887 * If a non-zero value has already been set (by firmware or something),
888 * just use it. Otherwise, set the driver default.
43914a2d 889 */
0d3a966d 890 denali->oob_skip_bytes = ioread32(denali->reg + SPARE_AREA_SKIP_BYTES);
0d55c668
MY
891 if (!denali->oob_skip_bytes) {
892 denali->oob_skip_bytes = DENALI_DEFAULT_OOB_SKIP_BYTES;
893 iowrite32(denali->oob_skip_bytes,
894 denali->reg + SPARE_AREA_SKIP_BYTES);
895 }
896
3ac6c716 897 denali_detect_max_banks(denali);
0e604fc9 898 iowrite32(0, denali->reg + TRANSFER_SPARE_REG);
0d3a966d
MY
899 iowrite32(0x0F, denali->reg + RB_PIN_ENABLED);
900 iowrite32(CHIP_EN_DONT_CARE__FLAG, denali->reg + CHIP_ENABLE_DONT_CARE);
0e604fc9 901 iowrite32(ECC_ENABLE__FLAG, denali->reg + ECC_ENABLE);
0d3a966d 902 iowrite32(0xffff, denali->reg + SPARE_AREA_MARKER);
ce082596
JR
903}
904
7de117fd
MY
905int denali_calc_ecc_bytes(int step_size, int strength)
906{
907 /* BCH code. Denali requires ecc.bytes to be multiple of 2 */
908 return DIV_ROUND_UP(strength * fls(step_size * 8), 16) * 2;
909}
910EXPORT_SYMBOL(denali_calc_ecc_bytes);
911
14fad62b
BB
912static int denali_ooblayout_ecc(struct mtd_info *mtd, int section,
913 struct mtd_oob_region *oobregion)
914{
915 struct denali_nand_info *denali = mtd_to_denali(mtd);
916 struct nand_chip *chip = mtd_to_nand(mtd);
917
918 if (section)
919 return -ERANGE;
920
0d3a966d 921 oobregion->offset = denali->oob_skip_bytes;
14fad62b
BB
922 oobregion->length = chip->ecc.total;
923
924 return 0;
925}
926
927static int denali_ooblayout_free(struct mtd_info *mtd, int section,
928 struct mtd_oob_region *oobregion)
929{
930 struct denali_nand_info *denali = mtd_to_denali(mtd);
931 struct nand_chip *chip = mtd_to_nand(mtd);
932
933 if (section)
934 return -ERANGE;
935
0d3a966d 936 oobregion->offset = chip->ecc.total + denali->oob_skip_bytes;
14fad62b
BB
937 oobregion->length = mtd->oobsize - oobregion->offset;
938
939 return 0;
940}
941
942static const struct mtd_ooblayout_ops denali_ooblayout_ops = {
943 .ecc = denali_ooblayout_ecc,
944 .free = denali_ooblayout_free,
ce082596
JR
945};
946
750f69b8 947static int denali_multidev_fixup(struct nand_chip *chip)
6da27b46 948{
750f69b8 949 struct denali_nand_info *denali = to_denali(chip);
6da27b46 950 struct mtd_info *mtd = nand_to_mtd(chip);
629a442c
BB
951 struct nand_memory_organization *memorg;
952
953 memorg = nanddev_get_memorg(&chip->base);
6da27b46
MY
954
955 /*
956 * Support for multi device:
957 * When the IP configuration is x16 capable and two x8 chips are
958 * connected in parallel, DEVICES_CONNECTED should be set to 2.
959 * In this case, the core framework knows nothing about this fact,
960 * so we should tell it the _logical_ pagesize and anything necessary.
961 */
0d3a966d 962 denali->devs_per_cs = ioread32(denali->reg + DEVICES_CONNECTED);
6da27b46 963
cc5d8031
MY
964 /*
965 * On some SoCs, DEVICES_CONNECTED is not auto-detected.
966 * For those, DEVICES_CONNECTED is left to 0. Set 1 if it is the case.
967 */
0d3a966d
MY
968 if (denali->devs_per_cs == 0) {
969 denali->devs_per_cs = 1;
970 iowrite32(1, denali->reg + DEVICES_CONNECTED);
cc5d8031
MY
971 }
972
0d3a966d 973 if (denali->devs_per_cs == 1)
e93c1640
MY
974 return 0;
975
0d3a966d 976 if (denali->devs_per_cs != 2) {
e93c1640 977 dev_err(denali->dev, "unsupported number of devices %d\n",
0d3a966d 978 denali->devs_per_cs);
e93c1640
MY
979 return -EINVAL;
980 }
981
982 /* 2 chips in parallel */
629a442c
BB
983 memorg->pagesize <<= 1;
984 memorg->oobsize <<= 1;
e93c1640
MY
985 mtd->size <<= 1;
986 mtd->erasesize <<= 1;
987 mtd->writesize <<= 1;
988 mtd->oobsize <<= 1;
e93c1640
MY
989 chip->page_shift += 1;
990 chip->phys_erase_shift += 1;
991 chip->bbt_erase_shift += 1;
992 chip->chip_shift += 1;
993 chip->pagemask <<= 1;
994 chip->ecc.size <<= 1;
995 chip->ecc.bytes <<= 1;
996 chip->ecc.strength <<= 1;
0d3a966d 997 denali->oob_skip_bytes <<= 1;
e93c1640
MY
998
999 return 0;
6da27b46
MY
1000}
1001
d03af162 1002static int denali_attach_chip(struct nand_chip *chip)
ce082596 1003{
1394a726 1004 struct mtd_info *mtd = nand_to_mtd(chip);
d03af162 1005 struct denali_nand_info *denali = mtd_to_denali(mtd);
2a0a288e 1006 int ret;
ce082596 1007
0d3a966d 1008 if (ioread32(denali->reg + FEATURES) & FEATURES__DMA)
f4f16fd3 1009 denali->dma_avail = true;
26d266e1
MY
1010
1011 if (denali->dma_avail) {
1012 int dma_bit = denali->caps & DENALI_CAP_DMA_64BIT ? 64 : 32;
1013
1014 ret = dma_set_mask(denali->dev, DMA_BIT_MASK(dma_bit));
1015 if (ret) {
1016 dev_info(denali->dev,
1017 "Failed to set DMA mask. Disabling DMA.\n");
f4f16fd3 1018 denali->dma_avail = false;
26d266e1 1019 }
e07caa36
HS
1020 }
1021
26d266e1 1022 if (denali->dma_avail) {
997cde2a
MY
1023 chip->options |= NAND_USE_BOUNCE_BUFFER;
1024 chip->buf_align = 16;
89dcb27b
MY
1025 if (denali->caps & DENALI_CAP_DMA_64BIT)
1026 denali->setup_dma = denali_setup_dma64;
1027 else
1028 denali->setup_dma = denali_setup_dma32;
66406524
CD
1029 }
1030
1394a726 1031 chip->bbt_options |= NAND_BBT_USE_FLASH;
777f2d49 1032 chip->bbt_options |= NAND_BBT_NO_OOB;
1394a726 1033 chip->ecc.mode = NAND_ECC_HW_SYNDROME;
1394a726 1034 chip->options |= NAND_NO_SUBPAGE_WRITE;
d99d7282 1035
f9801fda
AS
1036 ret = nand_ecc_choose_conf(chip, denali->ecc_caps,
1037 mtd->oobsize - denali->oob_skip_bytes);
7de117fd
MY
1038 if (ret) {
1039 dev_err(denali->dev, "Failed to setup ECC settings.\n");
d03af162 1040 return ret;
ce082596
JR
1041 }
1042
7de117fd
MY
1043 dev_dbg(denali->dev,
1044 "chosen ECC settings: step=%d, strength=%d, bytes=%d\n",
1045 chip->ecc.size, chip->ecc.strength, chip->ecc.bytes);
1046
e0d53b3f
MY
1047 iowrite32(FIELD_PREP(ECC_CORRECTION__ERASE_THRESHOLD, 1) |
1048 FIELD_PREP(ECC_CORRECTION__VALUE, chip->ecc.strength),
0d3a966d 1049 denali->reg + ECC_CORRECTION);
0615e7ad 1050 iowrite32(mtd->erasesize / mtd->writesize,
0d3a966d 1051 denali->reg + PAGES_PER_BLOCK);
0615e7ad 1052 iowrite32(chip->options & NAND_BUSWIDTH_16 ? 1 : 0,
0d3a966d 1053 denali->reg + DEVICE_WIDTH);
a3750a64
MY
1054 iowrite32(chip->options & NAND_ROW_ADDR_3 ? 0 : TWO_ROW_ADDR_CYCLES__FLAG,
1055 denali->reg + TWO_ROW_ADDR_CYCLES);
0d3a966d
MY
1056 iowrite32(mtd->writesize, denali->reg + DEVICE_MAIN_AREA_SIZE);
1057 iowrite32(mtd->oobsize, denali->reg + DEVICE_SPARE_AREA_SIZE);
7de117fd 1058
0d3a966d
MY
1059 iowrite32(chip->ecc.size, denali->reg + CFG_DATA_BLOCK_SIZE);
1060 iowrite32(chip->ecc.size, denali->reg + CFG_LAST_DATA_BLOCK_SIZE);
7de117fd
MY
1061 /* chip->ecc.steps is set by nand_scan_tail(); not available here */
1062 iowrite32(mtd->writesize / chip->ecc.size,
0d3a966d 1063 denali->reg + CFG_NUM_DATA_BLOCKS);
7de117fd 1064
14fad62b 1065 mtd_set_ooblayout(mtd, &denali_ooblayout_ops);
db9a3210 1066
1394a726
MY
1067 chip->ecc.read_page = denali_read_page;
1068 chip->ecc.read_page_raw = denali_read_page_raw;
1069 chip->ecc.write_page = denali_write_page;
1070 chip->ecc.write_page_raw = denali_write_page_raw;
1071 chip->ecc.read_oob = denali_read_oob;
1072 chip->ecc.write_oob = denali_write_oob;
ce082596 1073
750f69b8 1074 ret = denali_multidev_fixup(chip);
e93c1640 1075 if (ret)
d03af162 1076 return ret;
6da27b46 1077
d03af162
MR
1078 return 0;
1079}
1080
f5541142
MY
1081static void denali_exec_in8(struct denali_nand_info *denali, u32 type,
1082 u8 *buf, unsigned int len)
1083{
1084 int i;
1085
1086 for (i = 0; i < len; i++)
1087 buf[i] = denali->host_read(denali, type | DENALI_BANK(denali));
1088}
1089
1090static void denali_exec_in16(struct denali_nand_info *denali, u32 type,
1091 u8 *buf, unsigned int len)
1092{
1093 u32 data;
1094 int i;
1095
1096 for (i = 0; i < len; i += 2) {
1097 data = denali->host_read(denali, type | DENALI_BANK(denali));
1098 /* bit 31:24 and 15:8 are used for DDR */
1099 buf[i] = data;
1100 buf[i + 1] = data >> 16;
1101 }
1102}
1103
1104static void denali_exec_in(struct denali_nand_info *denali, u32 type,
1105 u8 *buf, unsigned int len, bool width16)
1106{
1107 if (width16)
1108 denali_exec_in16(denali, type, buf, len);
1109 else
1110 denali_exec_in8(denali, type, buf, len);
1111}
1112
1113static void denali_exec_out8(struct denali_nand_info *denali, u32 type,
1114 const u8 *buf, unsigned int len)
1115{
1116 int i;
1117
1118 for (i = 0; i < len; i++)
1119 denali->host_write(denali, type | DENALI_BANK(denali), buf[i]);
1120}
1121
1122static void denali_exec_out16(struct denali_nand_info *denali, u32 type,
1123 const u8 *buf, unsigned int len)
1124{
1125 int i;
1126
1127 for (i = 0; i < len; i += 2)
1128 denali->host_write(denali, type | DENALI_BANK(denali),
1129 buf[i + 1] << 16 | buf[i]);
1130}
1131
1132static void denali_exec_out(struct denali_nand_info *denali, u32 type,
1133 const u8 *buf, unsigned int len, bool width16)
1134{
1135 if (width16)
1136 denali_exec_out16(denali, type, buf, len);
1137 else
1138 denali_exec_out8(denali, type, buf, len);
1139}
1140
1141static int denali_exec_waitrdy(struct denali_nand_info *denali)
1142{
1143 u32 irq_stat;
1144
1145 /* R/B# pin transitioned from low to high? */
1146 irq_stat = denali_wait_for_irq(denali, INTR__INT_ACT);
1147
1148 /* Just in case nand_operation has multiple NAND_OP_WAITRDY_INSTR. */
1149 denali_reset_irq(denali);
1150
1151 return irq_stat & INTR__INT_ACT ? 0 : -EIO;
1152}
1153
1154static int denali_exec_instr(struct nand_chip *chip,
1155 const struct nand_op_instr *instr)
1156{
1157 struct denali_nand_info *denali = to_denali(chip);
1158
1159 switch (instr->type) {
1160 case NAND_OP_CMD_INSTR:
1161 denali_exec_out8(denali, DENALI_MAP11_CMD,
1162 &instr->ctx.cmd.opcode, 1);
1163 return 0;
1164 case NAND_OP_ADDR_INSTR:
1165 denali_exec_out8(denali, DENALI_MAP11_ADDR,
1166 instr->ctx.addr.addrs,
1167 instr->ctx.addr.naddrs);
1168 return 0;
1169 case NAND_OP_DATA_IN_INSTR:
1170 denali_exec_in(denali, DENALI_MAP11_DATA,
1171 instr->ctx.data.buf.in,
1172 instr->ctx.data.len,
1173 !instr->ctx.data.force_8bit &&
1174 chip->options & NAND_BUSWIDTH_16);
1175 return 0;
1176 case NAND_OP_DATA_OUT_INSTR:
1177 denali_exec_out(denali, DENALI_MAP11_DATA,
1178 instr->ctx.data.buf.out,
1179 instr->ctx.data.len,
1180 !instr->ctx.data.force_8bit &&
1181 chip->options & NAND_BUSWIDTH_16);
1182 return 0;
1183 case NAND_OP_WAITRDY_INSTR:
1184 return denali_exec_waitrdy(denali);
1185 default:
1186 WARN_ONCE(1, "unsupported NAND instruction type: %d\n",
1187 instr->type);
1188
1189 return -EINVAL;
1190 }
1191}
1192
1193static int denali_exec_op(struct nand_chip *chip,
1194 const struct nand_operation *op, bool check_only)
1195{
1196 int i, ret;
1197
1198 if (check_only)
1199 return 0;
1200
1201 denali_select_target(chip, op->cs);
1202
1203 /*
1204 * Some commands contain NAND_OP_WAITRDY_INSTR.
1205 * irq must be cleared here to catch the R/B# interrupt there.
1206 */
1207 denali_reset_irq(to_denali(chip));
1208
1209 for (i = 0; i < op->ninstrs; i++) {
1210 ret = denali_exec_instr(chip, &op->instrs[i]);
1211 if (ret)
1212 return ret;
1213 }
1214
1215 return 0;
1216}
1217
d03af162
MR
1218static const struct nand_controller_ops denali_controller_ops = {
1219 .attach_chip = denali_attach_chip,
f5541142 1220 .exec_op = denali_exec_op,
7a08dbae 1221 .setup_data_interface = denali_setup_data_interface,
d03af162
MR
1222};
1223
1224int denali_init(struct denali_nand_info *denali)
1225{
1226 struct nand_chip *chip = &denali->nand;
1227 struct mtd_info *mtd = nand_to_mtd(chip);
1228 u32 features = ioread32(denali->reg + FEATURES);
1229 int ret;
1230
1231 mtd->dev.parent = denali->dev;
1232 denali_hw_init(denali);
1233
1234 init_completion(&denali->complete);
1235 spin_lock_init(&denali->irq_lock);
1236
1237 denali_clear_irq_all(denali);
1238
1239 ret = devm_request_irq(denali->dev, denali->irq, denali_isr,
1240 IRQF_SHARED, DENALI_NAND_NAME, denali);
1241 if (ret) {
1242 dev_err(denali->dev, "Unable to request IRQ\n");
1243 return ret;
1244 }
1245
1246 denali_enable_irq(denali);
d03af162
MR
1247
1248 denali->active_bank = DENALI_INVALID_BANK;
1249
1250 nand_set_flash_node(chip, denali->dev->of_node);
1251 /* Fallback to the default name if DT did not give "label" property */
1252 if (!mtd->name)
1253 mtd->name = "denali-nand";
1254
d03af162
MR
1255 if (features & FEATURES__INDEX_ADDR) {
1256 denali->host_read = denali_indexed_read;
1257 denali->host_write = denali_indexed_write;
1258 } else {
1259 denali->host_read = denali_direct_read;
1260 denali->host_write = denali_direct_write;
7d370b2c
MY
1261 }
1262
d03af162 1263 /* clk rate info is needed for setup_data_interface */
d311e0c2 1264 if (!denali->clk_rate || !denali->clk_x_rate)
7a08dbae 1265 chip->options |= NAND_KEEP_TIMINGS;
d03af162 1266
7b6a9b28 1267 chip->legacy.dummy_controller.ops = &denali_controller_ops;
00ad378f 1268 ret = nand_scan(chip, denali->max_banks);
a227d4e4 1269 if (ret)
d03af162 1270 goto disable_irq;
ce082596 1271
442f201b 1272 ret = mtd_device_register(mtd, NULL, 0);
ce082596 1273 if (ret) {
789ccf17 1274 dev_err(denali->dev, "Failed to register MTD: %d\n", ret);
4e5d1d90 1275 goto cleanup_nand;
ce082596 1276 }
d03af162 1277
ce082596
JR
1278 return 0;
1279
4e5d1d90
MR
1280cleanup_nand:
1281 nand_cleanup(chip);
c19e31d0
MY
1282disable_irq:
1283 denali_disable_irq(denali);
2a0a288e 1284
ce082596
JR
1285 return ret;
1286}
2a0a288e 1287EXPORT_SYMBOL(denali_init);
ce082596 1288
2a0a288e 1289void denali_remove(struct denali_nand_info *denali)
ce082596 1290{
59ac276f 1291 nand_release(&denali->nand);
c19e31d0 1292 denali_disable_irq(denali);
ce082596 1293}
2a0a288e 1294EXPORT_SYMBOL(denali_remove);
f1bf52e8
MY
1295
1296MODULE_DESCRIPTION("Driver core for Denali NAND controller");
1297MODULE_AUTHOR("Intel Corporation and its suppliers");
1298MODULE_LICENSE("GPL v2");