]> git.ipfire.org Git - people/ms/u-boot.git/blob - drivers/mtd/nand/pxa3xx_nand.c
Merge branch 'master' of git://git.denx.de/u-boot-video
[people/ms/u-boot.git] / drivers / mtd / nand / pxa3xx_nand.c
1 /*
2 * drivers/mtd/nand/pxa3xx_nand.c
3 *
4 * Copyright © 2005 Intel Corporation
5 * Copyright © 2006 Marvell International Ltd.
6 *
7 * SPDX-License-Identifier: GPL-2.0
8 */
9
10 #include <common.h>
11 #include <malloc.h>
12 #include <nand.h>
13 #include <asm/errno.h>
14 #include <asm/io.h>
15 #include <asm/arch/cpu.h>
16 #include <linux/mtd/mtd.h>
17 #include <linux/mtd/nand.h>
18 #include <linux/types.h>
19
20 #include "pxa3xx_nand.h"
21
22 /* Some U-Boot compatibility macros */
23 #define writesl(a, d, s) __raw_writesl((unsigned long)a, d, s)
24 #define readsl(a, d, s) __raw_readsl((unsigned long)a, d, s)
25 #define writesw(a, d, s) __raw_writesw((unsigned long)a, d, s)
26 #define readsw(a, d, s) __raw_readsw((unsigned long)a, d, s)
27 #define writesb(a, d, s) __raw_writesb((unsigned long)a, d, s)
28 #define readsb(a, d, s) __raw_readsb((unsigned long)a, d, s)
29
30 #define TIMEOUT_DRAIN_FIFO 5 /* in ms */
31 #define CHIP_DELAY_TIMEOUT 200
32 #define NAND_STOP_DELAY 40
33 #define PAGE_CHUNK_SIZE (2048)
34
35 /*
36 * Define a buffer size for the initial command that detects the flash device:
37 * STATUS, READID and PARAM. The largest of these is the PARAM command,
38 * needing 256 bytes.
39 */
40 #define INIT_BUFFER_SIZE 256
41
42 /* registers and bit definitions */
43 #define NDCR (0x00) /* Control register */
44 #define NDTR0CS0 (0x04) /* Timing Parameter 0 for CS0 */
45 #define NDTR1CS0 (0x0C) /* Timing Parameter 1 for CS0 */
46 #define NDSR (0x14) /* Status Register */
47 #define NDPCR (0x18) /* Page Count Register */
48 #define NDBDR0 (0x1C) /* Bad Block Register 0 */
49 #define NDBDR1 (0x20) /* Bad Block Register 1 */
50 #define NDECCCTRL (0x28) /* ECC control */
51 #define NDDB (0x40) /* Data Buffer */
52 #define NDCB0 (0x48) /* Command Buffer0 */
53 #define NDCB1 (0x4C) /* Command Buffer1 */
54 #define NDCB2 (0x50) /* Command Buffer2 */
55
56 #define NDCR_SPARE_EN (0x1 << 31)
57 #define NDCR_ECC_EN (0x1 << 30)
58 #define NDCR_DMA_EN (0x1 << 29)
59 #define NDCR_ND_RUN (0x1 << 28)
60 #define NDCR_DWIDTH_C (0x1 << 27)
61 #define NDCR_DWIDTH_M (0x1 << 26)
62 #define NDCR_PAGE_SZ (0x1 << 24)
63 #define NDCR_NCSX (0x1 << 23)
64 #define NDCR_ND_MODE (0x3 << 21)
65 #define NDCR_NAND_MODE (0x0)
66 #define NDCR_CLR_PG_CNT (0x1 << 20)
67 #define NDCR_STOP_ON_UNCOR (0x1 << 19)
68 #define NDCR_RD_ID_CNT_MASK (0x7 << 16)
69 #define NDCR_RD_ID_CNT(x) (((x) << 16) & NDCR_RD_ID_CNT_MASK)
70
71 #define NDCR_RA_START (0x1 << 15)
72 #define NDCR_PG_PER_BLK (0x1 << 14)
73 #define NDCR_ND_ARB_EN (0x1 << 12)
74 #define NDCR_INT_MASK (0xFFF)
75
76 #define NDSR_MASK (0xfff)
77 #define NDSR_ERR_CNT_OFF (16)
78 #define NDSR_ERR_CNT_MASK (0x1f)
79 #define NDSR_ERR_CNT(sr) ((sr >> NDSR_ERR_CNT_OFF) & NDSR_ERR_CNT_MASK)
80 #define NDSR_RDY (0x1 << 12)
81 #define NDSR_FLASH_RDY (0x1 << 11)
82 #define NDSR_CS0_PAGED (0x1 << 10)
83 #define NDSR_CS1_PAGED (0x1 << 9)
84 #define NDSR_CS0_CMDD (0x1 << 8)
85 #define NDSR_CS1_CMDD (0x1 << 7)
86 #define NDSR_CS0_BBD (0x1 << 6)
87 #define NDSR_CS1_BBD (0x1 << 5)
88 #define NDSR_UNCORERR (0x1 << 4)
89 #define NDSR_CORERR (0x1 << 3)
90 #define NDSR_WRDREQ (0x1 << 2)
91 #define NDSR_RDDREQ (0x1 << 1)
92 #define NDSR_WRCMDREQ (0x1)
93
94 #define NDCB0_LEN_OVRD (0x1 << 28)
95 #define NDCB0_ST_ROW_EN (0x1 << 26)
96 #define NDCB0_AUTO_RS (0x1 << 25)
97 #define NDCB0_CSEL (0x1 << 24)
98 #define NDCB0_EXT_CMD_TYPE_MASK (0x7 << 29)
99 #define NDCB0_EXT_CMD_TYPE(x) (((x) << 29) & NDCB0_EXT_CMD_TYPE_MASK)
100 #define NDCB0_CMD_TYPE_MASK (0x7 << 21)
101 #define NDCB0_CMD_TYPE(x) (((x) << 21) & NDCB0_CMD_TYPE_MASK)
102 #define NDCB0_NC (0x1 << 20)
103 #define NDCB0_DBC (0x1 << 19)
104 #define NDCB0_ADDR_CYC_MASK (0x7 << 16)
105 #define NDCB0_ADDR_CYC(x) (((x) << 16) & NDCB0_ADDR_CYC_MASK)
106 #define NDCB0_CMD2_MASK (0xff << 8)
107 #define NDCB0_CMD1_MASK (0xff)
108 #define NDCB0_ADDR_CYC_SHIFT (16)
109
110 #define EXT_CMD_TYPE_DISPATCH 6 /* Command dispatch */
111 #define EXT_CMD_TYPE_NAKED_RW 5 /* Naked read or Naked write */
112 #define EXT_CMD_TYPE_READ 4 /* Read */
113 #define EXT_CMD_TYPE_DISP_WR 4 /* Command dispatch with write */
114 #define EXT_CMD_TYPE_FINAL 3 /* Final command */
115 #define EXT_CMD_TYPE_LAST_RW 1 /* Last naked read/write */
116 #define EXT_CMD_TYPE_MONO 0 /* Monolithic read/write */
117
118 /* macros for registers read/write */
119 #define nand_writel(info, off, val) \
120 writel((val), (info)->mmio_base + (off))
121
122 #define nand_readl(info, off) \
123 readl((info)->mmio_base + (off))
124
125 /* error code and state */
126 enum {
127 ERR_NONE = 0,
128 ERR_DMABUSERR = -1,
129 ERR_SENDCMD = -2,
130 ERR_UNCORERR = -3,
131 ERR_BBERR = -4,
132 ERR_CORERR = -5,
133 };
134
135 enum {
136 STATE_IDLE = 0,
137 STATE_PREPARED,
138 STATE_CMD_HANDLE,
139 STATE_DMA_READING,
140 STATE_DMA_WRITING,
141 STATE_DMA_DONE,
142 STATE_PIO_READING,
143 STATE_PIO_WRITING,
144 STATE_CMD_DONE,
145 STATE_READY,
146 };
147
148 enum pxa3xx_nand_variant {
149 PXA3XX_NAND_VARIANT_PXA,
150 PXA3XX_NAND_VARIANT_ARMADA370,
151 };
152
153 struct pxa3xx_nand_host {
154 struct nand_chip chip;
155 struct mtd_info *mtd;
156 void *info_data;
157
158 /* page size of attached chip */
159 int use_ecc;
160 int cs;
161
162 /* calculated from pxa3xx_nand_flash data */
163 unsigned int col_addr_cycles;
164 unsigned int row_addr_cycles;
165 size_t read_id_bytes;
166
167 };
168
169 struct pxa3xx_nand_info {
170 struct nand_hw_control controller;
171 struct pxa3xx_nand_platform_data *pdata;
172
173 struct clk *clk;
174 void __iomem *mmio_base;
175 unsigned long mmio_phys;
176 int cmd_complete, dev_ready;
177
178 unsigned int buf_start;
179 unsigned int buf_count;
180 unsigned int buf_size;
181 unsigned int data_buff_pos;
182 unsigned int oob_buff_pos;
183
184 unsigned char *data_buff;
185 unsigned char *oob_buff;
186
187 struct pxa3xx_nand_host *host[NUM_CHIP_SELECT];
188 unsigned int state;
189
190 /*
191 * This driver supports NFCv1 (as found in PXA SoC)
192 * and NFCv2 (as found in Armada 370/XP SoC).
193 */
194 enum pxa3xx_nand_variant variant;
195
196 int cs;
197 int use_ecc; /* use HW ECC ? */
198 int ecc_bch; /* using BCH ECC? */
199 int use_spare; /* use spare ? */
200 int need_wait;
201
202 unsigned int data_size; /* data to be read from FIFO */
203 unsigned int chunk_size; /* split commands chunk size */
204 unsigned int oob_size;
205 unsigned int spare_size;
206 unsigned int ecc_size;
207 unsigned int ecc_err_cnt;
208 unsigned int max_bitflips;
209 int retcode;
210
211 /* cached register value */
212 uint32_t reg_ndcr;
213 uint32_t ndtr0cs0;
214 uint32_t ndtr1cs0;
215
216 /* generated NDCBx register values */
217 uint32_t ndcb0;
218 uint32_t ndcb1;
219 uint32_t ndcb2;
220 uint32_t ndcb3;
221 };
222
223 static struct pxa3xx_nand_timing timing[] = {
224 { 40, 80, 60, 100, 80, 100, 90000, 400, 40, },
225 { 10, 0, 20, 40, 30, 40, 11123, 110, 10, },
226 { 10, 25, 15, 25, 15, 30, 25000, 60, 10, },
227 { 10, 35, 15, 25, 15, 25, 25000, 60, 10, },
228 };
229
230 static struct pxa3xx_nand_flash builtin_flash_types[] = {
231 { 0x46ec, 16, 16, &timing[1] },
232 { 0xdaec, 8, 8, &timing[1] },
233 { 0xd7ec, 8, 8, &timing[1] },
234 { 0xa12c, 8, 8, &timing[2] },
235 { 0xb12c, 16, 16, &timing[2] },
236 { 0xdc2c, 8, 8, &timing[2] },
237 { 0xcc2c, 16, 16, &timing[2] },
238 { 0xba20, 16, 16, &timing[3] },
239 };
240
241 static u8 bbt_pattern[] = {'M', 'V', 'B', 'b', 't', '0' };
242 static u8 bbt_mirror_pattern[] = {'1', 't', 'b', 'B', 'V', 'M' };
243
244 static struct nand_bbt_descr bbt_main_descr = {
245 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
246 | NAND_BBT_2BIT | NAND_BBT_VERSION,
247 .offs = 8,
248 .len = 6,
249 .veroffs = 14,
250 .maxblocks = 8, /* Last 8 blocks in each chip */
251 .pattern = bbt_pattern
252 };
253
254 static struct nand_bbt_descr bbt_mirror_descr = {
255 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
256 | NAND_BBT_2BIT | NAND_BBT_VERSION,
257 .offs = 8,
258 .len = 6,
259 .veroffs = 14,
260 .maxblocks = 8, /* Last 8 blocks in each chip */
261 .pattern = bbt_mirror_pattern
262 };
263
264 static struct nand_ecclayout ecc_layout_2KB_bch4bit = {
265 .eccbytes = 32,
266 .eccpos = {
267 32, 33, 34, 35, 36, 37, 38, 39,
268 40, 41, 42, 43, 44, 45, 46, 47,
269 48, 49, 50, 51, 52, 53, 54, 55,
270 56, 57, 58, 59, 60, 61, 62, 63},
271 .oobfree = { {2, 30} }
272 };
273
274 static struct nand_ecclayout ecc_layout_4KB_bch4bit = {
275 .eccbytes = 64,
276 .eccpos = {
277 32, 33, 34, 35, 36, 37, 38, 39,
278 40, 41, 42, 43, 44, 45, 46, 47,
279 48, 49, 50, 51, 52, 53, 54, 55,
280 56, 57, 58, 59, 60, 61, 62, 63,
281 96, 97, 98, 99, 100, 101, 102, 103,
282 104, 105, 106, 107, 108, 109, 110, 111,
283 112, 113, 114, 115, 116, 117, 118, 119,
284 120, 121, 122, 123, 124, 125, 126, 127},
285 /* Bootrom looks in bytes 0 & 5 for bad blocks */
286 .oobfree = { {6, 26}, { 64, 32} }
287 };
288
289 static struct nand_ecclayout ecc_layout_4KB_bch8bit = {
290 .eccbytes = 128,
291 .eccpos = {
292 32, 33, 34, 35, 36, 37, 38, 39,
293 40, 41, 42, 43, 44, 45, 46, 47,
294 48, 49, 50, 51, 52, 53, 54, 55,
295 56, 57, 58, 59, 60, 61, 62, 63},
296 .oobfree = { }
297 };
298
299 #define NDTR0_tCH(c) (min((c), 7) << 19)
300 #define NDTR0_tCS(c) (min((c), 7) << 16)
301 #define NDTR0_tWH(c) (min((c), 7) << 11)
302 #define NDTR0_tWP(c) (min((c), 7) << 8)
303 #define NDTR0_tRH(c) (min((c), 7) << 3)
304 #define NDTR0_tRP(c) (min((c), 7) << 0)
305
306 #define NDTR1_tR(c) (min((c), 65535) << 16)
307 #define NDTR1_tWHR(c) (min((c), 15) << 4)
308 #define NDTR1_tAR(c) (min((c), 15) << 0)
309
310 /* convert nano-seconds to nand flash controller clock cycles */
311 #define ns2cycle(ns, clk) (int)((ns) * (clk / 1000000) / 1000)
312
313 static enum pxa3xx_nand_variant pxa3xx_nand_get_variant(void)
314 {
315 /* We only support the Armada 370/XP/38x for now */
316 return PXA3XX_NAND_VARIANT_ARMADA370;
317 }
318
319 static void pxa3xx_nand_set_timing(struct pxa3xx_nand_host *host,
320 const struct pxa3xx_nand_timing *t)
321 {
322 struct pxa3xx_nand_info *info = host->info_data;
323 unsigned long nand_clk = mvebu_get_nand_clock();
324 uint32_t ndtr0, ndtr1;
325
326 ndtr0 = NDTR0_tCH(ns2cycle(t->tCH, nand_clk)) |
327 NDTR0_tCS(ns2cycle(t->tCS, nand_clk)) |
328 NDTR0_tWH(ns2cycle(t->tWH, nand_clk)) |
329 NDTR0_tWP(ns2cycle(t->tWP, nand_clk)) |
330 NDTR0_tRH(ns2cycle(t->tRH, nand_clk)) |
331 NDTR0_tRP(ns2cycle(t->tRP, nand_clk));
332
333 ndtr1 = NDTR1_tR(ns2cycle(t->tR, nand_clk)) |
334 NDTR1_tWHR(ns2cycle(t->tWHR, nand_clk)) |
335 NDTR1_tAR(ns2cycle(t->tAR, nand_clk));
336
337 info->ndtr0cs0 = ndtr0;
338 info->ndtr1cs0 = ndtr1;
339 nand_writel(info, NDTR0CS0, ndtr0);
340 nand_writel(info, NDTR1CS0, ndtr1);
341 }
342
343 static void pxa3xx_nand_set_sdr_timing(struct pxa3xx_nand_host *host,
344 const struct nand_sdr_timings *t)
345 {
346 struct pxa3xx_nand_info *info = host->info_data;
347 struct nand_chip *chip = &host->chip;
348 unsigned long nand_clk = mvebu_get_nand_clock();
349 uint32_t ndtr0, ndtr1;
350
351 u32 tCH_min = DIV_ROUND_UP(t->tCH_min, 1000);
352 u32 tCS_min = DIV_ROUND_UP(t->tCS_min, 1000);
353 u32 tWH_min = DIV_ROUND_UP(t->tWH_min, 1000);
354 u32 tWP_min = DIV_ROUND_UP(t->tWC_min - tWH_min, 1000);
355 u32 tREH_min = DIV_ROUND_UP(t->tREH_min, 1000);
356 u32 tRP_min = DIV_ROUND_UP(t->tRC_min - tREH_min, 1000);
357 u32 tR = chip->chip_delay * 1000;
358 u32 tWHR_min = DIV_ROUND_UP(t->tWHR_min, 1000);
359 u32 tAR_min = DIV_ROUND_UP(t->tAR_min, 1000);
360
361 /* fallback to a default value if tR = 0 */
362 if (!tR)
363 tR = 20000;
364
365 ndtr0 = NDTR0_tCH(ns2cycle(tCH_min, nand_clk)) |
366 NDTR0_tCS(ns2cycle(tCS_min, nand_clk)) |
367 NDTR0_tWH(ns2cycle(tWH_min, nand_clk)) |
368 NDTR0_tWP(ns2cycle(tWP_min, nand_clk)) |
369 NDTR0_tRH(ns2cycle(tREH_min, nand_clk)) |
370 NDTR0_tRP(ns2cycle(tRP_min, nand_clk));
371
372 ndtr1 = NDTR1_tR(ns2cycle(tR, nand_clk)) |
373 NDTR1_tWHR(ns2cycle(tWHR_min, nand_clk)) |
374 NDTR1_tAR(ns2cycle(tAR_min, nand_clk));
375
376 info->ndtr0cs0 = ndtr0;
377 info->ndtr1cs0 = ndtr1;
378 nand_writel(info, NDTR0CS0, ndtr0);
379 nand_writel(info, NDTR1CS0, ndtr1);
380 }
381
382 static int pxa3xx_nand_init_timings(struct pxa3xx_nand_host *host)
383 {
384 const struct nand_sdr_timings *timings;
385 struct nand_chip *chip = &host->chip;
386 struct pxa3xx_nand_info *info = host->info_data;
387 const struct pxa3xx_nand_flash *f = NULL;
388 int mode, id, ntypes, i;
389
390 mode = onfi_get_async_timing_mode(chip);
391 if (mode == ONFI_TIMING_MODE_UNKNOWN) {
392 ntypes = ARRAY_SIZE(builtin_flash_types);
393
394 chip->cmdfunc(host->mtd, NAND_CMD_READID, 0x00, -1);
395
396 id = chip->read_byte(host->mtd);
397 id |= chip->read_byte(host->mtd) << 0x8;
398
399 for (i = 0; i < ntypes; i++) {
400 f = &builtin_flash_types[i];
401
402 if (f->chip_id == id)
403 break;
404 }
405
406 if (i == ntypes) {
407 dev_err(&info->pdev->dev, "Error: timings not found\n");
408 return -EINVAL;
409 }
410
411 pxa3xx_nand_set_timing(host, f->timing);
412
413 if (f->flash_width == 16) {
414 info->reg_ndcr |= NDCR_DWIDTH_M;
415 chip->options |= NAND_BUSWIDTH_16;
416 }
417
418 info->reg_ndcr |= (f->dfc_width == 16) ? NDCR_DWIDTH_C : 0;
419 } else {
420 mode = fls(mode) - 1;
421 if (mode < 0)
422 mode = 0;
423
424 timings = onfi_async_timing_mode_to_sdr_timings(mode);
425 if (IS_ERR(timings))
426 return PTR_ERR(timings);
427
428 pxa3xx_nand_set_sdr_timing(host, timings);
429 }
430
431 return 0;
432 }
433
434 /*
435 * Set the data and OOB size, depending on the selected
436 * spare and ECC configuration.
437 * Only applicable to READ0, READOOB and PAGEPROG commands.
438 */
439 static void pxa3xx_set_datasize(struct pxa3xx_nand_info *info,
440 struct mtd_info *mtd)
441 {
442 int oob_enable = info->reg_ndcr & NDCR_SPARE_EN;
443
444 info->data_size = mtd->writesize;
445 if (!oob_enable)
446 return;
447
448 info->oob_size = info->spare_size;
449 if (!info->use_ecc)
450 info->oob_size += info->ecc_size;
451 }
452
453 /**
454 * NOTE: it is a must to set ND_RUN firstly, then write
455 * command buffer, otherwise, it does not work.
456 * We enable all the interrupt at the same time, and
457 * let pxa3xx_nand_irq to handle all logic.
458 */
459 static void pxa3xx_nand_start(struct pxa3xx_nand_info *info)
460 {
461 uint32_t ndcr;
462
463 ndcr = info->reg_ndcr;
464
465 if (info->use_ecc) {
466 ndcr |= NDCR_ECC_EN;
467 if (info->ecc_bch)
468 nand_writel(info, NDECCCTRL, 0x1);
469 } else {
470 ndcr &= ~NDCR_ECC_EN;
471 if (info->ecc_bch)
472 nand_writel(info, NDECCCTRL, 0x0);
473 }
474
475 ndcr &= ~NDCR_DMA_EN;
476
477 if (info->use_spare)
478 ndcr |= NDCR_SPARE_EN;
479 else
480 ndcr &= ~NDCR_SPARE_EN;
481
482 ndcr |= NDCR_ND_RUN;
483
484 /* clear status bits and run */
485 nand_writel(info, NDCR, 0);
486 nand_writel(info, NDSR, NDSR_MASK);
487 nand_writel(info, NDCR, ndcr);
488 }
489
490 static void disable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
491 {
492 uint32_t ndcr;
493
494 ndcr = nand_readl(info, NDCR);
495 nand_writel(info, NDCR, ndcr | int_mask);
496 }
497
498 static void drain_fifo(struct pxa3xx_nand_info *info, void *data, int len)
499 {
500 if (info->ecc_bch) {
501 u32 ts;
502
503 /*
504 * According to the datasheet, when reading from NDDB
505 * with BCH enabled, after each 32 bytes reads, we
506 * have to make sure that the NDSR.RDDREQ bit is set.
507 *
508 * Drain the FIFO 8 32 bits reads at a time, and skip
509 * the polling on the last read.
510 */
511 while (len > 8) {
512 readsl(info->mmio_base + NDDB, data, 8);
513
514 ts = get_timer(0);
515 while (!(nand_readl(info, NDSR) & NDSR_RDDREQ)) {
516 if (get_timer(ts) > TIMEOUT_DRAIN_FIFO) {
517 dev_err(&info->pdev->dev,
518 "Timeout on RDDREQ while draining the FIFO\n");
519 return;
520 }
521 }
522
523 data += 32;
524 len -= 8;
525 }
526 }
527
528 readsl(info->mmio_base + NDDB, data, len);
529 }
530
531 static void handle_data_pio(struct pxa3xx_nand_info *info)
532 {
533 unsigned int do_bytes = min(info->data_size, info->chunk_size);
534
535 switch (info->state) {
536 case STATE_PIO_WRITING:
537 writesl(info->mmio_base + NDDB,
538 info->data_buff + info->data_buff_pos,
539 DIV_ROUND_UP(do_bytes, 4));
540
541 if (info->oob_size > 0)
542 writesl(info->mmio_base + NDDB,
543 info->oob_buff + info->oob_buff_pos,
544 DIV_ROUND_UP(info->oob_size, 4));
545 break;
546 case STATE_PIO_READING:
547 drain_fifo(info,
548 info->data_buff + info->data_buff_pos,
549 DIV_ROUND_UP(do_bytes, 4));
550
551 if (info->oob_size > 0)
552 drain_fifo(info,
553 info->oob_buff + info->oob_buff_pos,
554 DIV_ROUND_UP(info->oob_size, 4));
555 break;
556 default:
557 dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
558 info->state);
559 BUG();
560 }
561
562 /* Update buffer pointers for multi-page read/write */
563 info->data_buff_pos += do_bytes;
564 info->oob_buff_pos += info->oob_size;
565 info->data_size -= do_bytes;
566 }
567
568 static void pxa3xx_nand_irq_thread(struct pxa3xx_nand_info *info)
569 {
570 handle_data_pio(info);
571
572 info->state = STATE_CMD_DONE;
573 nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
574 }
575
576 static irqreturn_t pxa3xx_nand_irq(struct pxa3xx_nand_info *info)
577 {
578 unsigned int status, is_completed = 0, is_ready = 0;
579 unsigned int ready, cmd_done;
580 irqreturn_t ret = IRQ_HANDLED;
581
582 if (info->cs == 0) {
583 ready = NDSR_FLASH_RDY;
584 cmd_done = NDSR_CS0_CMDD;
585 } else {
586 ready = NDSR_RDY;
587 cmd_done = NDSR_CS1_CMDD;
588 }
589
590 status = nand_readl(info, NDSR);
591
592 if (status & NDSR_UNCORERR)
593 info->retcode = ERR_UNCORERR;
594 if (status & NDSR_CORERR) {
595 info->retcode = ERR_CORERR;
596 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 &&
597 info->ecc_bch)
598 info->ecc_err_cnt = NDSR_ERR_CNT(status);
599 else
600 info->ecc_err_cnt = 1;
601
602 /*
603 * Each chunk composing a page is corrected independently,
604 * and we need to store maximum number of corrected bitflips
605 * to return it to the MTD layer in ecc.read_page().
606 */
607 info->max_bitflips = max_t(unsigned int,
608 info->max_bitflips,
609 info->ecc_err_cnt);
610 }
611 if (status & (NDSR_RDDREQ | NDSR_WRDREQ)) {
612 info->state = (status & NDSR_RDDREQ) ?
613 STATE_PIO_READING : STATE_PIO_WRITING;
614 /* Call the IRQ thread in U-Boot directly */
615 pxa3xx_nand_irq_thread(info);
616 return 0;
617 }
618 if (status & cmd_done) {
619 info->state = STATE_CMD_DONE;
620 is_completed = 1;
621 }
622 if (status & ready) {
623 info->state = STATE_READY;
624 is_ready = 1;
625 }
626
627 if (status & NDSR_WRCMDREQ) {
628 nand_writel(info, NDSR, NDSR_WRCMDREQ);
629 status &= ~NDSR_WRCMDREQ;
630 info->state = STATE_CMD_HANDLE;
631
632 /*
633 * Command buffer registers NDCB{0-2} (and optionally NDCB3)
634 * must be loaded by writing directly either 12 or 16
635 * bytes directly to NDCB0, four bytes at a time.
636 *
637 * Direct write access to NDCB1, NDCB2 and NDCB3 is ignored
638 * but each NDCBx register can be read.
639 */
640 nand_writel(info, NDCB0, info->ndcb0);
641 nand_writel(info, NDCB0, info->ndcb1);
642 nand_writel(info, NDCB0, info->ndcb2);
643
644 /* NDCB3 register is available in NFCv2 (Armada 370/XP SoC) */
645 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
646 nand_writel(info, NDCB0, info->ndcb3);
647 }
648
649 /* clear NDSR to let the controller exit the IRQ */
650 nand_writel(info, NDSR, status);
651 if (is_completed)
652 info->cmd_complete = 1;
653 if (is_ready)
654 info->dev_ready = 1;
655
656 return ret;
657 }
658
659 static inline int is_buf_blank(uint8_t *buf, size_t len)
660 {
661 for (; len > 0; len--)
662 if (*buf++ != 0xff)
663 return 0;
664 return 1;
665 }
666
667 static void set_command_address(struct pxa3xx_nand_info *info,
668 unsigned int page_size, uint16_t column, int page_addr)
669 {
670 /* small page addr setting */
671 if (page_size < PAGE_CHUNK_SIZE) {
672 info->ndcb1 = ((page_addr & 0xFFFFFF) << 8)
673 | (column & 0xFF);
674
675 info->ndcb2 = 0;
676 } else {
677 info->ndcb1 = ((page_addr & 0xFFFF) << 16)
678 | (column & 0xFFFF);
679
680 if (page_addr & 0xFF0000)
681 info->ndcb2 = (page_addr & 0xFF0000) >> 16;
682 else
683 info->ndcb2 = 0;
684 }
685 }
686
687 static void prepare_start_command(struct pxa3xx_nand_info *info, int command)
688 {
689 struct pxa3xx_nand_host *host = info->host[info->cs];
690 struct mtd_info *mtd = host->mtd;
691
692 /* reset data and oob column point to handle data */
693 info->buf_start = 0;
694 info->buf_count = 0;
695 info->oob_size = 0;
696 info->data_buff_pos = 0;
697 info->oob_buff_pos = 0;
698 info->use_ecc = 0;
699 info->use_spare = 1;
700 info->retcode = ERR_NONE;
701 info->ecc_err_cnt = 0;
702 info->ndcb3 = 0;
703 info->need_wait = 0;
704
705 switch (command) {
706 case NAND_CMD_READ0:
707 case NAND_CMD_PAGEPROG:
708 info->use_ecc = 1;
709 case NAND_CMD_READOOB:
710 pxa3xx_set_datasize(info, mtd);
711 break;
712 case NAND_CMD_PARAM:
713 info->use_spare = 0;
714 break;
715 default:
716 info->ndcb1 = 0;
717 info->ndcb2 = 0;
718 break;
719 }
720
721 /*
722 * If we are about to issue a read command, or about to set
723 * the write address, then clean the data buffer.
724 */
725 if (command == NAND_CMD_READ0 ||
726 command == NAND_CMD_READOOB ||
727 command == NAND_CMD_SEQIN) {
728 info->buf_count = mtd->writesize + mtd->oobsize;
729 memset(info->data_buff, 0xFF, info->buf_count);
730 }
731 }
732
733 static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
734 int ext_cmd_type, uint16_t column, int page_addr)
735 {
736 int addr_cycle, exec_cmd;
737 struct pxa3xx_nand_host *host;
738 struct mtd_info *mtd;
739
740 host = info->host[info->cs];
741 mtd = host->mtd;
742 addr_cycle = 0;
743 exec_cmd = 1;
744
745 if (info->cs != 0)
746 info->ndcb0 = NDCB0_CSEL;
747 else
748 info->ndcb0 = 0;
749
750 if (command == NAND_CMD_SEQIN)
751 exec_cmd = 0;
752
753 addr_cycle = NDCB0_ADDR_CYC(host->row_addr_cycles
754 + host->col_addr_cycles);
755
756 switch (command) {
757 case NAND_CMD_READOOB:
758 case NAND_CMD_READ0:
759 info->buf_start = column;
760 info->ndcb0 |= NDCB0_CMD_TYPE(0)
761 | addr_cycle
762 | NAND_CMD_READ0;
763
764 if (command == NAND_CMD_READOOB)
765 info->buf_start += mtd->writesize;
766
767 /*
768 * Multiple page read needs an 'extended command type' field,
769 * which is either naked-read or last-read according to the
770 * state.
771 */
772 if (mtd->writesize == PAGE_CHUNK_SIZE) {
773 info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8);
774 } else if (mtd->writesize > PAGE_CHUNK_SIZE) {
775 info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8)
776 | NDCB0_LEN_OVRD
777 | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
778 info->ndcb3 = info->chunk_size +
779 info->oob_size;
780 }
781
782 set_command_address(info, mtd->writesize, column, page_addr);
783 break;
784
785 case NAND_CMD_SEQIN:
786
787 info->buf_start = column;
788 set_command_address(info, mtd->writesize, 0, page_addr);
789
790 /*
791 * Multiple page programming needs to execute the initial
792 * SEQIN command that sets the page address.
793 */
794 if (mtd->writesize > PAGE_CHUNK_SIZE) {
795 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
796 | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
797 | addr_cycle
798 | command;
799 /* No data transfer in this case */
800 info->data_size = 0;
801 exec_cmd = 1;
802 }
803 break;
804
805 case NAND_CMD_PAGEPROG:
806 if (is_buf_blank(info->data_buff,
807 (mtd->writesize + mtd->oobsize))) {
808 exec_cmd = 0;
809 break;
810 }
811
812 /* Second command setting for large pages */
813 if (mtd->writesize > PAGE_CHUNK_SIZE) {
814 /*
815 * Multiple page write uses the 'extended command'
816 * field. This can be used to issue a command dispatch
817 * or a naked-write depending on the current stage.
818 */
819 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
820 | NDCB0_LEN_OVRD
821 | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
822 info->ndcb3 = info->chunk_size +
823 info->oob_size;
824
825 /*
826 * This is the command dispatch that completes a chunked
827 * page program operation.
828 */
829 if (info->data_size == 0) {
830 info->ndcb0 = NDCB0_CMD_TYPE(0x1)
831 | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
832 | command;
833 info->ndcb1 = 0;
834 info->ndcb2 = 0;
835 info->ndcb3 = 0;
836 }
837 } else {
838 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
839 | NDCB0_AUTO_RS
840 | NDCB0_ST_ROW_EN
841 | NDCB0_DBC
842 | (NAND_CMD_PAGEPROG << 8)
843 | NAND_CMD_SEQIN
844 | addr_cycle;
845 }
846 break;
847
848 case NAND_CMD_PARAM:
849 info->buf_count = 256;
850 info->ndcb0 |= NDCB0_CMD_TYPE(0)
851 | NDCB0_ADDR_CYC(1)
852 | NDCB0_LEN_OVRD
853 | command;
854 info->ndcb1 = (column & 0xFF);
855 info->ndcb3 = 256;
856 info->data_size = 256;
857 break;
858
859 case NAND_CMD_READID:
860 info->buf_count = host->read_id_bytes;
861 info->ndcb0 |= NDCB0_CMD_TYPE(3)
862 | NDCB0_ADDR_CYC(1)
863 | command;
864 info->ndcb1 = (column & 0xFF);
865
866 info->data_size = 8;
867 break;
868 case NAND_CMD_STATUS:
869 info->buf_count = 1;
870 info->ndcb0 |= NDCB0_CMD_TYPE(4)
871 | NDCB0_ADDR_CYC(1)
872 | command;
873
874 info->data_size = 8;
875 break;
876
877 case NAND_CMD_ERASE1:
878 info->ndcb0 |= NDCB0_CMD_TYPE(2)
879 | NDCB0_AUTO_RS
880 | NDCB0_ADDR_CYC(3)
881 | NDCB0_DBC
882 | (NAND_CMD_ERASE2 << 8)
883 | NAND_CMD_ERASE1;
884 info->ndcb1 = page_addr;
885 info->ndcb2 = 0;
886
887 break;
888 case NAND_CMD_RESET:
889 info->ndcb0 |= NDCB0_CMD_TYPE(5)
890 | command;
891
892 break;
893
894 case NAND_CMD_ERASE2:
895 exec_cmd = 0;
896 break;
897
898 default:
899 exec_cmd = 0;
900 dev_err(&info->pdev->dev, "non-supported command %x\n",
901 command);
902 break;
903 }
904
905 return exec_cmd;
906 }
907
908 static void nand_cmdfunc(struct mtd_info *mtd, unsigned command,
909 int column, int page_addr)
910 {
911 struct pxa3xx_nand_host *host = mtd->priv;
912 struct pxa3xx_nand_info *info = host->info_data;
913 int exec_cmd;
914
915 /*
916 * if this is a x16 device ,then convert the input
917 * "byte" address into a "word" address appropriate
918 * for indexing a word-oriented device
919 */
920 if (info->reg_ndcr & NDCR_DWIDTH_M)
921 column /= 2;
922
923 /*
924 * There may be different NAND chip hooked to
925 * different chip select, so check whether
926 * chip select has been changed, if yes, reset the timing
927 */
928 if (info->cs != host->cs) {
929 info->cs = host->cs;
930 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
931 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
932 }
933
934 prepare_start_command(info, command);
935
936 info->state = STATE_PREPARED;
937 exec_cmd = prepare_set_command(info, command, 0, column, page_addr);
938
939 if (exec_cmd) {
940 u32 ts;
941
942 info->cmd_complete = 0;
943 info->dev_ready = 0;
944 info->need_wait = 1;
945 pxa3xx_nand_start(info);
946
947 ts = get_timer(0);
948 while (1) {
949 u32 status;
950
951 status = nand_readl(info, NDSR);
952 if (status)
953 pxa3xx_nand_irq(info);
954
955 if (info->cmd_complete)
956 break;
957
958 if (get_timer(ts) > CHIP_DELAY_TIMEOUT) {
959 dev_err(&info->pdev->dev, "Wait timeout!!!\n");
960 return;
961 }
962 }
963 }
964 info->state = STATE_IDLE;
965 }
966
967 static void nand_cmdfunc_extended(struct mtd_info *mtd,
968 const unsigned command,
969 int column, int page_addr)
970 {
971 struct pxa3xx_nand_host *host = mtd->priv;
972 struct pxa3xx_nand_info *info = host->info_data;
973 int exec_cmd, ext_cmd_type;
974
975 /*
976 * if this is a x16 device then convert the input
977 * "byte" address into a "word" address appropriate
978 * for indexing a word-oriented device
979 */
980 if (info->reg_ndcr & NDCR_DWIDTH_M)
981 column /= 2;
982
983 /*
984 * There may be different NAND chip hooked to
985 * different chip select, so check whether
986 * chip select has been changed, if yes, reset the timing
987 */
988 if (info->cs != host->cs) {
989 info->cs = host->cs;
990 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
991 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
992 }
993
994 /* Select the extended command for the first command */
995 switch (command) {
996 case NAND_CMD_READ0:
997 case NAND_CMD_READOOB:
998 ext_cmd_type = EXT_CMD_TYPE_MONO;
999 break;
1000 case NAND_CMD_SEQIN:
1001 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1002 break;
1003 case NAND_CMD_PAGEPROG:
1004 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1005 break;
1006 default:
1007 ext_cmd_type = 0;
1008 break;
1009 }
1010
1011 prepare_start_command(info, command);
1012
1013 /*
1014 * Prepare the "is ready" completion before starting a command
1015 * transaction sequence. If the command is not executed the
1016 * completion will be completed, see below.
1017 *
1018 * We can do that inside the loop because the command variable
1019 * is invariant and thus so is the exec_cmd.
1020 */
1021 info->need_wait = 1;
1022 info->dev_ready = 0;
1023
1024 do {
1025 u32 ts;
1026
1027 info->state = STATE_PREPARED;
1028 exec_cmd = prepare_set_command(info, command, ext_cmd_type,
1029 column, page_addr);
1030 if (!exec_cmd) {
1031 info->need_wait = 0;
1032 info->dev_ready = 1;
1033 break;
1034 }
1035
1036 info->cmd_complete = 0;
1037 pxa3xx_nand_start(info);
1038
1039 ts = get_timer(0);
1040 while (1) {
1041 u32 status;
1042
1043 status = nand_readl(info, NDSR);
1044 if (status)
1045 pxa3xx_nand_irq(info);
1046
1047 if (info->cmd_complete)
1048 break;
1049
1050 if (get_timer(ts) > CHIP_DELAY_TIMEOUT) {
1051 dev_err(&info->pdev->dev, "Wait timeout!!!\n");
1052 return;
1053 }
1054 }
1055
1056 /* Check if the sequence is complete */
1057 if (info->data_size == 0 && command != NAND_CMD_PAGEPROG)
1058 break;
1059
1060 /*
1061 * After a splitted program command sequence has issued
1062 * the command dispatch, the command sequence is complete.
1063 */
1064 if (info->data_size == 0 &&
1065 command == NAND_CMD_PAGEPROG &&
1066 ext_cmd_type == EXT_CMD_TYPE_DISPATCH)
1067 break;
1068
1069 if (command == NAND_CMD_READ0 || command == NAND_CMD_READOOB) {
1070 /* Last read: issue a 'last naked read' */
1071 if (info->data_size == info->chunk_size)
1072 ext_cmd_type = EXT_CMD_TYPE_LAST_RW;
1073 else
1074 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1075
1076 /*
1077 * If a splitted program command has no more data to transfer,
1078 * the command dispatch must be issued to complete.
1079 */
1080 } else if (command == NAND_CMD_PAGEPROG &&
1081 info->data_size == 0) {
1082 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1083 }
1084 } while (1);
1085
1086 info->state = STATE_IDLE;
1087 }
1088
1089 static int pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd,
1090 struct nand_chip *chip, const uint8_t *buf, int oob_required)
1091 {
1092 chip->write_buf(mtd, buf, mtd->writesize);
1093 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
1094
1095 return 0;
1096 }
1097
1098 static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
1099 struct nand_chip *chip, uint8_t *buf, int oob_required,
1100 int page)
1101 {
1102 struct pxa3xx_nand_host *host = mtd->priv;
1103 struct pxa3xx_nand_info *info = host->info_data;
1104
1105 chip->read_buf(mtd, buf, mtd->writesize);
1106 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1107
1108 if (info->retcode == ERR_CORERR && info->use_ecc) {
1109 mtd->ecc_stats.corrected += info->ecc_err_cnt;
1110
1111 } else if (info->retcode == ERR_UNCORERR) {
1112 /*
1113 * for blank page (all 0xff), HW will calculate its ECC as
1114 * 0, which is different from the ECC information within
1115 * OOB, ignore such uncorrectable errors
1116 */
1117 if (is_buf_blank(buf, mtd->writesize))
1118 info->retcode = ERR_NONE;
1119 else
1120 mtd->ecc_stats.failed++;
1121 }
1122
1123 return info->max_bitflips;
1124 }
1125
1126 static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd)
1127 {
1128 struct pxa3xx_nand_host *host = mtd->priv;
1129 struct pxa3xx_nand_info *info = host->info_data;
1130 char retval = 0xFF;
1131
1132 if (info->buf_start < info->buf_count)
1133 /* Has just send a new command? */
1134 retval = info->data_buff[info->buf_start++];
1135
1136 return retval;
1137 }
1138
1139 static u16 pxa3xx_nand_read_word(struct mtd_info *mtd)
1140 {
1141 struct pxa3xx_nand_host *host = mtd->priv;
1142 struct pxa3xx_nand_info *info = host->info_data;
1143 u16 retval = 0xFFFF;
1144
1145 if (!(info->buf_start & 0x01) && info->buf_start < info->buf_count) {
1146 retval = *((u16 *)(info->data_buff+info->buf_start));
1147 info->buf_start += 2;
1148 }
1149 return retval;
1150 }
1151
1152 static void pxa3xx_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
1153 {
1154 struct pxa3xx_nand_host *host = mtd->priv;
1155 struct pxa3xx_nand_info *info = host->info_data;
1156 int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1157
1158 memcpy(buf, info->data_buff + info->buf_start, real_len);
1159 info->buf_start += real_len;
1160 }
1161
1162 static void pxa3xx_nand_write_buf(struct mtd_info *mtd,
1163 const uint8_t *buf, int len)
1164 {
1165 struct pxa3xx_nand_host *host = mtd->priv;
1166 struct pxa3xx_nand_info *info = host->info_data;
1167 int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1168
1169 memcpy(info->data_buff + info->buf_start, buf, real_len);
1170 info->buf_start += real_len;
1171 }
1172
1173 static void pxa3xx_nand_select_chip(struct mtd_info *mtd, int chip)
1174 {
1175 return;
1176 }
1177
1178 static int pxa3xx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *this)
1179 {
1180 struct pxa3xx_nand_host *host = mtd->priv;
1181 struct pxa3xx_nand_info *info = host->info_data;
1182
1183 if (info->need_wait) {
1184 u32 ts;
1185
1186 info->need_wait = 0;
1187
1188 ts = get_timer(0);
1189 while (1) {
1190 u32 status;
1191
1192 status = nand_readl(info, NDSR);
1193 if (status)
1194 pxa3xx_nand_irq(info);
1195
1196 if (info->dev_ready)
1197 break;
1198
1199 if (get_timer(ts) > CHIP_DELAY_TIMEOUT) {
1200 dev_err(&info->pdev->dev, "Ready timeout!!!\n");
1201 return NAND_STATUS_FAIL;
1202 }
1203 }
1204 }
1205
1206 /* pxa3xx_nand_send_command has waited for command complete */
1207 if (this->state == FL_WRITING || this->state == FL_ERASING) {
1208 if (info->retcode == ERR_NONE)
1209 return 0;
1210 else
1211 return NAND_STATUS_FAIL;
1212 }
1213
1214 return NAND_STATUS_READY;
1215 }
1216
1217 static int pxa3xx_nand_config_flash(struct pxa3xx_nand_info *info)
1218 {
1219 struct pxa3xx_nand_host *host = info->host[info->cs];
1220 struct mtd_info *mtd = host->mtd;
1221 struct nand_chip *chip = mtd->priv;
1222
1223 info->reg_ndcr |= (host->col_addr_cycles == 2) ? NDCR_RA_START : 0;
1224 info->reg_ndcr |= (chip->page_shift == 6) ? NDCR_PG_PER_BLK : 0;
1225 info->reg_ndcr |= (mtd->writesize == 2048) ? NDCR_PAGE_SZ : 0;
1226
1227 return 0;
1228 }
1229
1230 static int pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
1231 {
1232 /*
1233 * We set 0 by hard coding here, for we don't support keep_config
1234 * when there is more than one chip attached to the controller
1235 */
1236 struct pxa3xx_nand_host *host = info->host[0];
1237 uint32_t ndcr = nand_readl(info, NDCR);
1238
1239 if (ndcr & NDCR_PAGE_SZ) {
1240 /* Controller's FIFO size */
1241 info->chunk_size = 2048;
1242 host->read_id_bytes = 4;
1243 } else {
1244 info->chunk_size = 512;
1245 host->read_id_bytes = 2;
1246 }
1247
1248 /* Set an initial chunk size */
1249 info->reg_ndcr = ndcr & ~NDCR_INT_MASK;
1250 info->ndtr0cs0 = nand_readl(info, NDTR0CS0);
1251 info->ndtr1cs0 = nand_readl(info, NDTR1CS0);
1252 return 0;
1253 }
1254
1255 static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
1256 {
1257 info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1258 if (info->data_buff == NULL)
1259 return -ENOMEM;
1260 return 0;
1261 }
1262
1263 static int pxa3xx_nand_sensing(struct pxa3xx_nand_host *host)
1264 {
1265 struct pxa3xx_nand_info *info = host->info_data;
1266 struct pxa3xx_nand_platform_data *pdata = info->pdata;
1267 struct mtd_info *mtd;
1268 struct nand_chip *chip;
1269 const struct nand_sdr_timings *timings;
1270 int ret;
1271
1272 mtd = info->host[info->cs]->mtd;
1273 chip = mtd->priv;
1274
1275 /* configure default flash values */
1276 info->reg_ndcr = 0x0; /* enable all interrupts */
1277 info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
1278 info->reg_ndcr |= NDCR_RD_ID_CNT(host->read_id_bytes);
1279 info->reg_ndcr |= NDCR_SPARE_EN; /* enable spare by default */
1280
1281 /* use the common timing to make a try */
1282 timings = onfi_async_timing_mode_to_sdr_timings(0);
1283 if (IS_ERR(timings))
1284 return PTR_ERR(timings);
1285
1286 pxa3xx_nand_set_sdr_timing(host, timings);
1287
1288 chip->cmdfunc(mtd, NAND_CMD_RESET, 0, 0);
1289 ret = chip->waitfunc(mtd, chip);
1290 if (ret & NAND_STATUS_FAIL)
1291 return -ENODEV;
1292
1293 return 0;
1294 }
1295
1296 static int pxa_ecc_init(struct pxa3xx_nand_info *info,
1297 struct nand_ecc_ctrl *ecc,
1298 int strength, int ecc_stepsize, int page_size)
1299 {
1300 if (strength == 1 && ecc_stepsize == 512 && page_size == 2048) {
1301 info->chunk_size = 2048;
1302 info->spare_size = 40;
1303 info->ecc_size = 24;
1304 ecc->mode = NAND_ECC_HW;
1305 ecc->size = 512;
1306 ecc->strength = 1;
1307
1308 } else if (strength == 1 && ecc_stepsize == 512 && page_size == 512) {
1309 info->chunk_size = 512;
1310 info->spare_size = 8;
1311 info->ecc_size = 8;
1312 ecc->mode = NAND_ECC_HW;
1313 ecc->size = 512;
1314 ecc->strength = 1;
1315
1316 /*
1317 * Required ECC: 4-bit correction per 512 bytes
1318 * Select: 16-bit correction per 2048 bytes
1319 */
1320 } else if (strength == 4 && ecc_stepsize == 512 && page_size == 2048) {
1321 info->ecc_bch = 1;
1322 info->chunk_size = 2048;
1323 info->spare_size = 32;
1324 info->ecc_size = 32;
1325 ecc->mode = NAND_ECC_HW;
1326 ecc->size = info->chunk_size;
1327 ecc->layout = &ecc_layout_2KB_bch4bit;
1328 ecc->strength = 16;
1329
1330 } else if (strength == 4 && ecc_stepsize == 512 && page_size == 4096) {
1331 info->ecc_bch = 1;
1332 info->chunk_size = 2048;
1333 info->spare_size = 32;
1334 info->ecc_size = 32;
1335 ecc->mode = NAND_ECC_HW;
1336 ecc->size = info->chunk_size;
1337 ecc->layout = &ecc_layout_4KB_bch4bit;
1338 ecc->strength = 16;
1339
1340 /*
1341 * Required ECC: 8-bit correction per 512 bytes
1342 * Select: 16-bit correction per 1024 bytes
1343 */
1344 } else if (strength == 8 && ecc_stepsize == 512 && page_size == 4096) {
1345 info->ecc_bch = 1;
1346 info->chunk_size = 1024;
1347 info->spare_size = 0;
1348 info->ecc_size = 32;
1349 ecc->mode = NAND_ECC_HW;
1350 ecc->size = info->chunk_size;
1351 ecc->layout = &ecc_layout_4KB_bch8bit;
1352 ecc->strength = 16;
1353 } else {
1354 dev_err(&info->pdev->dev,
1355 "ECC strength %d at page size %d is not supported\n",
1356 strength, page_size);
1357 return -ENODEV;
1358 }
1359
1360 return 0;
1361 }
1362
1363 static int pxa3xx_nand_scan(struct mtd_info *mtd)
1364 {
1365 struct pxa3xx_nand_host *host = mtd->priv;
1366 struct pxa3xx_nand_info *info = host->info_data;
1367 struct pxa3xx_nand_platform_data *pdata = info->pdata;
1368 struct nand_chip *chip = mtd->priv;
1369 int ret;
1370 uint16_t ecc_strength, ecc_step;
1371
1372 if (pdata->keep_config && !pxa3xx_nand_detect_config(info))
1373 goto KEEP_CONFIG;
1374
1375 /* Set a default chunk size */
1376 info->chunk_size = 512;
1377
1378 ret = pxa3xx_nand_sensing(host);
1379 if (ret) {
1380 dev_info(&info->pdev->dev, "There is no chip on cs %d!\n",
1381 info->cs);
1382
1383 return ret;
1384 }
1385
1386 KEEP_CONFIG:
1387 /* Device detection must be done with ECC disabled */
1388 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
1389 nand_writel(info, NDECCCTRL, 0x0);
1390
1391 if (nand_scan_ident(mtd, 1, NULL))
1392 return -ENODEV;
1393
1394 if (!pdata->keep_config) {
1395 ret = pxa3xx_nand_init_timings(host);
1396 if (ret) {
1397 dev_err(&info->pdev->dev,
1398 "Failed to set timings: %d\n", ret);
1399 return ret;
1400 }
1401 }
1402
1403 ret = pxa3xx_nand_config_flash(info);
1404 if (ret)
1405 return ret;
1406
1407 #ifdef CONFIG_SYS_NAND_USE_FLASH_BBT
1408 /*
1409 * We'll use a bad block table stored in-flash and don't
1410 * allow writing the bad block marker to the flash.
1411 */
1412 chip->bbt_options |= NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB_BBM;
1413 chip->bbt_td = &bbt_main_descr;
1414 chip->bbt_md = &bbt_mirror_descr;
1415 #endif
1416
1417 /*
1418 * If the page size is bigger than the FIFO size, let's check
1419 * we are given the right variant and then switch to the extended
1420 * (aka splitted) command handling,
1421 */
1422 if (mtd->writesize > PAGE_CHUNK_SIZE) {
1423 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370) {
1424 chip->cmdfunc = nand_cmdfunc_extended;
1425 } else {
1426 dev_err(&info->pdev->dev,
1427 "unsupported page size on this variant\n");
1428 return -ENODEV;
1429 }
1430 }
1431
1432 if (pdata->ecc_strength && pdata->ecc_step_size) {
1433 ecc_strength = pdata->ecc_strength;
1434 ecc_step = pdata->ecc_step_size;
1435 } else {
1436 ecc_strength = chip->ecc_strength_ds;
1437 ecc_step = chip->ecc_step_ds;
1438 }
1439
1440 /* Set default ECC strength requirements on non-ONFI devices */
1441 if (ecc_strength < 1 && ecc_step < 1) {
1442 ecc_strength = 1;
1443 ecc_step = 512;
1444 }
1445
1446 ret = pxa_ecc_init(info, &chip->ecc, ecc_strength,
1447 ecc_step, mtd->writesize);
1448 if (ret)
1449 return ret;
1450
1451 /* calculate addressing information */
1452 if (mtd->writesize >= 2048)
1453 host->col_addr_cycles = 2;
1454 else
1455 host->col_addr_cycles = 1;
1456
1457 /* release the initial buffer */
1458 kfree(info->data_buff);
1459
1460 /* allocate the real data + oob buffer */
1461 info->buf_size = mtd->writesize + mtd->oobsize;
1462 ret = pxa3xx_nand_init_buff(info);
1463 if (ret)
1464 return ret;
1465 info->oob_buff = info->data_buff + mtd->writesize;
1466
1467 if ((mtd->size >> chip->page_shift) > 65536)
1468 host->row_addr_cycles = 3;
1469 else
1470 host->row_addr_cycles = 2;
1471 return nand_scan_tail(mtd);
1472 }
1473
1474 static int alloc_nand_resource(struct pxa3xx_nand_info *info)
1475 {
1476 struct pxa3xx_nand_platform_data *pdata;
1477 struct pxa3xx_nand_host *host;
1478 struct nand_chip *chip = NULL;
1479 struct mtd_info *mtd;
1480 int ret, cs;
1481
1482 pdata = info->pdata;
1483 if (pdata->num_cs <= 0)
1484 return -ENODEV;
1485
1486 info->variant = pxa3xx_nand_get_variant();
1487 for (cs = 0; cs < pdata->num_cs; cs++) {
1488 mtd = &nand_info[cs];
1489 chip = (struct nand_chip *)info +
1490 sizeof(struct pxa3xx_nand_host);
1491 host = (struct pxa3xx_nand_host *)chip;
1492 info->host[cs] = host;
1493 host->mtd = mtd;
1494 host->cs = cs;
1495 host->info_data = info;
1496 host->read_id_bytes = 4;
1497 mtd->priv = host;
1498 mtd->owner = THIS_MODULE;
1499
1500 chip->ecc.read_page = pxa3xx_nand_read_page_hwecc;
1501 chip->ecc.write_page = pxa3xx_nand_write_page_hwecc;
1502 chip->controller = &info->controller;
1503 chip->waitfunc = pxa3xx_nand_waitfunc;
1504 chip->select_chip = pxa3xx_nand_select_chip;
1505 chip->read_word = pxa3xx_nand_read_word;
1506 chip->read_byte = pxa3xx_nand_read_byte;
1507 chip->read_buf = pxa3xx_nand_read_buf;
1508 chip->write_buf = pxa3xx_nand_write_buf;
1509 chip->options |= NAND_NO_SUBPAGE_WRITE;
1510 chip->cmdfunc = nand_cmdfunc;
1511 }
1512
1513 info->mmio_base = (void __iomem *)MVEBU_NAND_BASE;
1514
1515 /* Allocate a buffer to allow flash detection */
1516 info->buf_size = INIT_BUFFER_SIZE;
1517 info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1518 if (info->data_buff == NULL) {
1519 ret = -ENOMEM;
1520 goto fail_disable_clk;
1521 }
1522
1523 /* initialize all interrupts to be disabled */
1524 disable_int(info, NDSR_MASK);
1525
1526 return 0;
1527
1528 kfree(info->data_buff);
1529 fail_disable_clk:
1530 return ret;
1531 }
1532
1533 static int pxa3xx_nand_probe_dt(struct pxa3xx_nand_info *info)
1534 {
1535 struct pxa3xx_nand_platform_data *pdata;
1536
1537 pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
1538 if (!pdata)
1539 return -ENOMEM;
1540
1541 pdata->enable_arbiter = 1;
1542 pdata->num_cs = 1;
1543
1544 info->pdata = pdata;
1545
1546 return 0;
1547 }
1548
1549 static int pxa3xx_nand_probe(struct pxa3xx_nand_info *info)
1550 {
1551 struct pxa3xx_nand_platform_data *pdata;
1552 int ret, cs, probe_success;
1553
1554 ret = pxa3xx_nand_probe_dt(info);
1555 if (ret)
1556 return ret;
1557
1558 pdata = info->pdata;
1559
1560 ret = alloc_nand_resource(info);
1561 if (ret) {
1562 dev_err(&pdev->dev, "alloc nand resource failed\n");
1563 return ret;
1564 }
1565
1566 probe_success = 0;
1567 for (cs = 0; cs < pdata->num_cs; cs++) {
1568 struct mtd_info *mtd = info->host[cs]->mtd;
1569
1570 /*
1571 * The mtd name matches the one used in 'mtdparts' kernel
1572 * parameter. This name cannot be changed or otherwise
1573 * user's mtd partitions configuration would get broken.
1574 */
1575 mtd->name = "pxa3xx_nand-0";
1576 info->cs = cs;
1577 ret = pxa3xx_nand_scan(mtd);
1578 if (ret) {
1579 dev_info(&pdev->dev, "failed to scan nand at cs %d\n",
1580 cs);
1581 continue;
1582 }
1583
1584 if (!ret)
1585 probe_success = 1;
1586 }
1587
1588 if (!probe_success)
1589 return -ENODEV;
1590
1591 return 0;
1592 }
1593
1594 /*
1595 * Main initialization routine
1596 */
1597 void board_nand_init(void)
1598 {
1599 struct pxa3xx_nand_info *info;
1600 struct pxa3xx_nand_host *host;
1601 int ret;
1602
1603 info = kzalloc(sizeof(*info) + (sizeof(struct mtd_info) +
1604 sizeof(*host)) *
1605 CONFIG_SYS_MAX_NAND_DEVICE, GFP_KERNEL);
1606 if (!info)
1607 return;
1608
1609 /*
1610 * If CONFIG_SYS_NAND_SELF_INIT is defined, each driver is responsible
1611 * for instantiating struct nand_chip, while drivers/mtd/nand/nand.c
1612 * still provides a "struct mtd_info nand_info" instance.
1613 */
1614 info->host[0]->mtd = &nand_info[0];
1615
1616 ret = pxa3xx_nand_probe(info);
1617 if (ret)
1618 return;
1619
1620 nand_register(0);
1621 }