]> git.ipfire.org Git - people/ms/u-boot.git/blob - drivers/mtd/nand/pxa3xx_nand.c
mtd: nand: Rename nand.h into rawnand.h
[people/ms/u-boot.git] / drivers / mtd / nand / pxa3xx_nand.c
1 /*
2 * drivers/mtd/nand/pxa3xx_nand.c
3 *
4 * Copyright © 2005 Intel Corporation
5 * Copyright © 2006 Marvell International Ltd.
6 *
7 * SPDX-License-Identifier: GPL-2.0
8 */
9
10 #include <common.h>
11 #include <malloc.h>
12 #include <fdtdec.h>
13 #include <nand.h>
14 #include <linux/errno.h>
15 #include <asm/io.h>
16 #include <asm/arch/cpu.h>
17 #include <linux/mtd/mtd.h>
18 #include <linux/mtd/rawnand.h>
19 #include <linux/types.h>
20
21 #include "pxa3xx_nand.h"
22
23 DECLARE_GLOBAL_DATA_PTR;
24
25 #define TIMEOUT_DRAIN_FIFO 5 /* in ms */
26 #define CHIP_DELAY_TIMEOUT 200
27 #define NAND_STOP_DELAY 40
28 #define PAGE_CHUNK_SIZE (2048)
29
30 /*
31 * Define a buffer size for the initial command that detects the flash device:
32 * STATUS, READID and PARAM. The largest of these is the PARAM command,
33 * needing 256 bytes.
34 */
35 #define INIT_BUFFER_SIZE 256
36
37 /* registers and bit definitions */
38 #define NDCR (0x00) /* Control register */
39 #define NDTR0CS0 (0x04) /* Timing Parameter 0 for CS0 */
40 #define NDTR1CS0 (0x0C) /* Timing Parameter 1 for CS0 */
41 #define NDSR (0x14) /* Status Register */
42 #define NDPCR (0x18) /* Page Count Register */
43 #define NDBDR0 (0x1C) /* Bad Block Register 0 */
44 #define NDBDR1 (0x20) /* Bad Block Register 1 */
45 #define NDECCCTRL (0x28) /* ECC control */
46 #define NDDB (0x40) /* Data Buffer */
47 #define NDCB0 (0x48) /* Command Buffer0 */
48 #define NDCB1 (0x4C) /* Command Buffer1 */
49 #define NDCB2 (0x50) /* Command Buffer2 */
50
51 #define NDCR_SPARE_EN (0x1 << 31)
52 #define NDCR_ECC_EN (0x1 << 30)
53 #define NDCR_DMA_EN (0x1 << 29)
54 #define NDCR_ND_RUN (0x1 << 28)
55 #define NDCR_DWIDTH_C (0x1 << 27)
56 #define NDCR_DWIDTH_M (0x1 << 26)
57 #define NDCR_PAGE_SZ (0x1 << 24)
58 #define NDCR_NCSX (0x1 << 23)
59 #define NDCR_ND_MODE (0x3 << 21)
60 #define NDCR_NAND_MODE (0x0)
61 #define NDCR_CLR_PG_CNT (0x1 << 20)
62 #define NDCR_STOP_ON_UNCOR (0x1 << 19)
63 #define NDCR_RD_ID_CNT_MASK (0x7 << 16)
64 #define NDCR_RD_ID_CNT(x) (((x) << 16) & NDCR_RD_ID_CNT_MASK)
65
66 #define NDCR_RA_START (0x1 << 15)
67 #define NDCR_PG_PER_BLK (0x1 << 14)
68 #define NDCR_ND_ARB_EN (0x1 << 12)
69 #define NDCR_INT_MASK (0xFFF)
70
71 #define NDSR_MASK (0xfff)
72 #define NDSR_ERR_CNT_OFF (16)
73 #define NDSR_ERR_CNT_MASK (0x1f)
74 #define NDSR_ERR_CNT(sr) ((sr >> NDSR_ERR_CNT_OFF) & NDSR_ERR_CNT_MASK)
75 #define NDSR_RDY (0x1 << 12)
76 #define NDSR_FLASH_RDY (0x1 << 11)
77 #define NDSR_CS0_PAGED (0x1 << 10)
78 #define NDSR_CS1_PAGED (0x1 << 9)
79 #define NDSR_CS0_CMDD (0x1 << 8)
80 #define NDSR_CS1_CMDD (0x1 << 7)
81 #define NDSR_CS0_BBD (0x1 << 6)
82 #define NDSR_CS1_BBD (0x1 << 5)
83 #define NDSR_UNCORERR (0x1 << 4)
84 #define NDSR_CORERR (0x1 << 3)
85 #define NDSR_WRDREQ (0x1 << 2)
86 #define NDSR_RDDREQ (0x1 << 1)
87 #define NDSR_WRCMDREQ (0x1)
88
89 #define NDCB0_LEN_OVRD (0x1 << 28)
90 #define NDCB0_ST_ROW_EN (0x1 << 26)
91 #define NDCB0_AUTO_RS (0x1 << 25)
92 #define NDCB0_CSEL (0x1 << 24)
93 #define NDCB0_EXT_CMD_TYPE_MASK (0x7 << 29)
94 #define NDCB0_EXT_CMD_TYPE(x) (((x) << 29) & NDCB0_EXT_CMD_TYPE_MASK)
95 #define NDCB0_CMD_TYPE_MASK (0x7 << 21)
96 #define NDCB0_CMD_TYPE(x) (((x) << 21) & NDCB0_CMD_TYPE_MASK)
97 #define NDCB0_NC (0x1 << 20)
98 #define NDCB0_DBC (0x1 << 19)
99 #define NDCB0_ADDR_CYC_MASK (0x7 << 16)
100 #define NDCB0_ADDR_CYC(x) (((x) << 16) & NDCB0_ADDR_CYC_MASK)
101 #define NDCB0_CMD2_MASK (0xff << 8)
102 #define NDCB0_CMD1_MASK (0xff)
103 #define NDCB0_ADDR_CYC_SHIFT (16)
104
105 #define EXT_CMD_TYPE_DISPATCH 6 /* Command dispatch */
106 #define EXT_CMD_TYPE_NAKED_RW 5 /* Naked read or Naked write */
107 #define EXT_CMD_TYPE_READ 4 /* Read */
108 #define EXT_CMD_TYPE_DISP_WR 4 /* Command dispatch with write */
109 #define EXT_CMD_TYPE_FINAL 3 /* Final command */
110 #define EXT_CMD_TYPE_LAST_RW 1 /* Last naked read/write */
111 #define EXT_CMD_TYPE_MONO 0 /* Monolithic read/write */
112
113 /* macros for registers read/write */
114 #define nand_writel(info, off, val) \
115 writel((val), (info)->mmio_base + (off))
116
117 #define nand_readl(info, off) \
118 readl((info)->mmio_base + (off))
119
120 /* error code and state */
121 enum {
122 ERR_NONE = 0,
123 ERR_DMABUSERR = -1,
124 ERR_SENDCMD = -2,
125 ERR_UNCORERR = -3,
126 ERR_BBERR = -4,
127 ERR_CORERR = -5,
128 };
129
130 enum {
131 STATE_IDLE = 0,
132 STATE_PREPARED,
133 STATE_CMD_HANDLE,
134 STATE_DMA_READING,
135 STATE_DMA_WRITING,
136 STATE_DMA_DONE,
137 STATE_PIO_READING,
138 STATE_PIO_WRITING,
139 STATE_CMD_DONE,
140 STATE_READY,
141 };
142
143 enum pxa3xx_nand_variant {
144 PXA3XX_NAND_VARIANT_PXA,
145 PXA3XX_NAND_VARIANT_ARMADA370,
146 };
147
148 struct pxa3xx_nand_host {
149 struct nand_chip chip;
150 struct mtd_info *mtd;
151 void *info_data;
152
153 /* page size of attached chip */
154 int use_ecc;
155 int cs;
156
157 /* calculated from pxa3xx_nand_flash data */
158 unsigned int col_addr_cycles;
159 unsigned int row_addr_cycles;
160 size_t read_id_bytes;
161
162 };
163
164 struct pxa3xx_nand_info {
165 struct nand_hw_control controller;
166 struct pxa3xx_nand_platform_data *pdata;
167
168 struct clk *clk;
169 void __iomem *mmio_base;
170 unsigned long mmio_phys;
171 int cmd_complete, dev_ready;
172
173 unsigned int buf_start;
174 unsigned int buf_count;
175 unsigned int buf_size;
176 unsigned int data_buff_pos;
177 unsigned int oob_buff_pos;
178
179 unsigned char *data_buff;
180 unsigned char *oob_buff;
181
182 struct pxa3xx_nand_host *host[NUM_CHIP_SELECT];
183 unsigned int state;
184
185 /*
186 * This driver supports NFCv1 (as found in PXA SoC)
187 * and NFCv2 (as found in Armada 370/XP SoC).
188 */
189 enum pxa3xx_nand_variant variant;
190
191 int cs;
192 int use_ecc; /* use HW ECC ? */
193 int ecc_bch; /* using BCH ECC? */
194 int use_spare; /* use spare ? */
195 int need_wait;
196
197 unsigned int data_size; /* data to be read from FIFO */
198 unsigned int chunk_size; /* split commands chunk size */
199 unsigned int oob_size;
200 unsigned int spare_size;
201 unsigned int ecc_size;
202 unsigned int ecc_err_cnt;
203 unsigned int max_bitflips;
204 int retcode;
205
206 /* cached register value */
207 uint32_t reg_ndcr;
208 uint32_t ndtr0cs0;
209 uint32_t ndtr1cs0;
210
211 /* generated NDCBx register values */
212 uint32_t ndcb0;
213 uint32_t ndcb1;
214 uint32_t ndcb2;
215 uint32_t ndcb3;
216 };
217
218 static struct pxa3xx_nand_timing timing[] = {
219 { 40, 80, 60, 100, 80, 100, 90000, 400, 40, },
220 { 10, 0, 20, 40, 30, 40, 11123, 110, 10, },
221 { 10, 25, 15, 25, 15, 30, 25000, 60, 10, },
222 { 10, 35, 15, 25, 15, 25, 25000, 60, 10, },
223 };
224
225 static struct pxa3xx_nand_flash builtin_flash_types[] = {
226 { 0x46ec, 16, 16, &timing[1] },
227 { 0xdaec, 8, 8, &timing[1] },
228 { 0xd7ec, 8, 8, &timing[1] },
229 { 0xa12c, 8, 8, &timing[2] },
230 { 0xb12c, 16, 16, &timing[2] },
231 { 0xdc2c, 8, 8, &timing[2] },
232 { 0xcc2c, 16, 16, &timing[2] },
233 { 0xba20, 16, 16, &timing[3] },
234 };
235
236 #ifdef CONFIG_SYS_NAND_USE_FLASH_BBT
237 static u8 bbt_pattern[] = {'M', 'V', 'B', 'b', 't', '0' };
238 static u8 bbt_mirror_pattern[] = {'1', 't', 'b', 'B', 'V', 'M' };
239
240 static struct nand_bbt_descr bbt_main_descr = {
241 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
242 | NAND_BBT_2BIT | NAND_BBT_VERSION,
243 .offs = 8,
244 .len = 6,
245 .veroffs = 14,
246 .maxblocks = 8, /* Last 8 blocks in each chip */
247 .pattern = bbt_pattern
248 };
249
250 static struct nand_bbt_descr bbt_mirror_descr = {
251 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
252 | NAND_BBT_2BIT | NAND_BBT_VERSION,
253 .offs = 8,
254 .len = 6,
255 .veroffs = 14,
256 .maxblocks = 8, /* Last 8 blocks in each chip */
257 .pattern = bbt_mirror_pattern
258 };
259 #endif
260
261 static struct nand_ecclayout ecc_layout_2KB_bch4bit = {
262 .eccbytes = 32,
263 .eccpos = {
264 32, 33, 34, 35, 36, 37, 38, 39,
265 40, 41, 42, 43, 44, 45, 46, 47,
266 48, 49, 50, 51, 52, 53, 54, 55,
267 56, 57, 58, 59, 60, 61, 62, 63},
268 .oobfree = { {2, 30} }
269 };
270
271 static struct nand_ecclayout ecc_layout_4KB_bch4bit = {
272 .eccbytes = 64,
273 .eccpos = {
274 32, 33, 34, 35, 36, 37, 38, 39,
275 40, 41, 42, 43, 44, 45, 46, 47,
276 48, 49, 50, 51, 52, 53, 54, 55,
277 56, 57, 58, 59, 60, 61, 62, 63,
278 96, 97, 98, 99, 100, 101, 102, 103,
279 104, 105, 106, 107, 108, 109, 110, 111,
280 112, 113, 114, 115, 116, 117, 118, 119,
281 120, 121, 122, 123, 124, 125, 126, 127},
282 /* Bootrom looks in bytes 0 & 5 for bad blocks */
283 .oobfree = { {6, 26}, { 64, 32} }
284 };
285
286 static struct nand_ecclayout ecc_layout_4KB_bch8bit = {
287 .eccbytes = 128,
288 .eccpos = {
289 32, 33, 34, 35, 36, 37, 38, 39,
290 40, 41, 42, 43, 44, 45, 46, 47,
291 48, 49, 50, 51, 52, 53, 54, 55,
292 56, 57, 58, 59, 60, 61, 62, 63},
293 .oobfree = { }
294 };
295
296 #define NDTR0_tCH(c) (min((c), 7) << 19)
297 #define NDTR0_tCS(c) (min((c), 7) << 16)
298 #define NDTR0_tWH(c) (min((c), 7) << 11)
299 #define NDTR0_tWP(c) (min((c), 7) << 8)
300 #define NDTR0_tRH(c) (min((c), 7) << 3)
301 #define NDTR0_tRP(c) (min((c), 7) << 0)
302
303 #define NDTR1_tR(c) (min((c), 65535) << 16)
304 #define NDTR1_tWHR(c) (min((c), 15) << 4)
305 #define NDTR1_tAR(c) (min((c), 15) << 0)
306
307 /* convert nano-seconds to nand flash controller clock cycles */
308 #define ns2cycle(ns, clk) (int)((ns) * (clk / 1000000) / 1000)
309
310 static enum pxa3xx_nand_variant pxa3xx_nand_get_variant(void)
311 {
312 /* We only support the Armada 370/XP/38x for now */
313 return PXA3XX_NAND_VARIANT_ARMADA370;
314 }
315
316 static void pxa3xx_nand_set_timing(struct pxa3xx_nand_host *host,
317 const struct pxa3xx_nand_timing *t)
318 {
319 struct pxa3xx_nand_info *info = host->info_data;
320 unsigned long nand_clk = mvebu_get_nand_clock();
321 uint32_t ndtr0, ndtr1;
322
323 ndtr0 = NDTR0_tCH(ns2cycle(t->tCH, nand_clk)) |
324 NDTR0_tCS(ns2cycle(t->tCS, nand_clk)) |
325 NDTR0_tWH(ns2cycle(t->tWH, nand_clk)) |
326 NDTR0_tWP(ns2cycle(t->tWP, nand_clk)) |
327 NDTR0_tRH(ns2cycle(t->tRH, nand_clk)) |
328 NDTR0_tRP(ns2cycle(t->tRP, nand_clk));
329
330 ndtr1 = NDTR1_tR(ns2cycle(t->tR, nand_clk)) |
331 NDTR1_tWHR(ns2cycle(t->tWHR, nand_clk)) |
332 NDTR1_tAR(ns2cycle(t->tAR, nand_clk));
333
334 info->ndtr0cs0 = ndtr0;
335 info->ndtr1cs0 = ndtr1;
336 nand_writel(info, NDTR0CS0, ndtr0);
337 nand_writel(info, NDTR1CS0, ndtr1);
338 }
339
340 static void pxa3xx_nand_set_sdr_timing(struct pxa3xx_nand_host *host,
341 const struct nand_sdr_timings *t)
342 {
343 struct pxa3xx_nand_info *info = host->info_data;
344 struct nand_chip *chip = &host->chip;
345 unsigned long nand_clk = mvebu_get_nand_clock();
346 uint32_t ndtr0, ndtr1;
347
348 u32 tCH_min = DIV_ROUND_UP(t->tCH_min, 1000);
349 u32 tCS_min = DIV_ROUND_UP(t->tCS_min, 1000);
350 u32 tWH_min = DIV_ROUND_UP(t->tWH_min, 1000);
351 u32 tWP_min = DIV_ROUND_UP(t->tWC_min - tWH_min, 1000);
352 u32 tREH_min = DIV_ROUND_UP(t->tREH_min, 1000);
353 u32 tRP_min = DIV_ROUND_UP(t->tRC_min - tREH_min, 1000);
354 u32 tR = chip->chip_delay * 1000;
355 u32 tWHR_min = DIV_ROUND_UP(t->tWHR_min, 1000);
356 u32 tAR_min = DIV_ROUND_UP(t->tAR_min, 1000);
357
358 /* fallback to a default value if tR = 0 */
359 if (!tR)
360 tR = 20000;
361
362 ndtr0 = NDTR0_tCH(ns2cycle(tCH_min, nand_clk)) |
363 NDTR0_tCS(ns2cycle(tCS_min, nand_clk)) |
364 NDTR0_tWH(ns2cycle(tWH_min, nand_clk)) |
365 NDTR0_tWP(ns2cycle(tWP_min, nand_clk)) |
366 NDTR0_tRH(ns2cycle(tREH_min, nand_clk)) |
367 NDTR0_tRP(ns2cycle(tRP_min, nand_clk));
368
369 ndtr1 = NDTR1_tR(ns2cycle(tR, nand_clk)) |
370 NDTR1_tWHR(ns2cycle(tWHR_min, nand_clk)) |
371 NDTR1_tAR(ns2cycle(tAR_min, nand_clk));
372
373 info->ndtr0cs0 = ndtr0;
374 info->ndtr1cs0 = ndtr1;
375 nand_writel(info, NDTR0CS0, ndtr0);
376 nand_writel(info, NDTR1CS0, ndtr1);
377 }
378
379 static int pxa3xx_nand_init_timings(struct pxa3xx_nand_host *host)
380 {
381 const struct nand_sdr_timings *timings;
382 struct nand_chip *chip = &host->chip;
383 struct pxa3xx_nand_info *info = host->info_data;
384 const struct pxa3xx_nand_flash *f = NULL;
385 int mode, id, ntypes, i;
386
387 mode = onfi_get_async_timing_mode(chip);
388 if (mode == ONFI_TIMING_MODE_UNKNOWN) {
389 ntypes = ARRAY_SIZE(builtin_flash_types);
390
391 chip->cmdfunc(host->mtd, NAND_CMD_READID, 0x00, -1);
392
393 id = chip->read_byte(host->mtd);
394 id |= chip->read_byte(host->mtd) << 0x8;
395
396 for (i = 0; i < ntypes; i++) {
397 f = &builtin_flash_types[i];
398
399 if (f->chip_id == id)
400 break;
401 }
402
403 if (i == ntypes) {
404 dev_err(&info->pdev->dev, "Error: timings not found\n");
405 return -EINVAL;
406 }
407
408 pxa3xx_nand_set_timing(host, f->timing);
409
410 if (f->flash_width == 16) {
411 info->reg_ndcr |= NDCR_DWIDTH_M;
412 chip->options |= NAND_BUSWIDTH_16;
413 }
414
415 info->reg_ndcr |= (f->dfc_width == 16) ? NDCR_DWIDTH_C : 0;
416 } else {
417 mode = fls(mode) - 1;
418 if (mode < 0)
419 mode = 0;
420
421 timings = onfi_async_timing_mode_to_sdr_timings(mode);
422 if (IS_ERR(timings))
423 return PTR_ERR(timings);
424
425 pxa3xx_nand_set_sdr_timing(host, timings);
426 }
427
428 return 0;
429 }
430
431 /*
432 * Set the data and OOB size, depending on the selected
433 * spare and ECC configuration.
434 * Only applicable to READ0, READOOB and PAGEPROG commands.
435 */
436 static void pxa3xx_set_datasize(struct pxa3xx_nand_info *info,
437 struct mtd_info *mtd)
438 {
439 int oob_enable = info->reg_ndcr & NDCR_SPARE_EN;
440
441 info->data_size = mtd->writesize;
442 if (!oob_enable)
443 return;
444
445 info->oob_size = info->spare_size;
446 if (!info->use_ecc)
447 info->oob_size += info->ecc_size;
448 }
449
450 /**
451 * NOTE: it is a must to set ND_RUN first, then write
452 * command buffer, otherwise, it does not work.
453 * We enable all the interrupt at the same time, and
454 * let pxa3xx_nand_irq to handle all logic.
455 */
456 static void pxa3xx_nand_start(struct pxa3xx_nand_info *info)
457 {
458 uint32_t ndcr;
459
460 ndcr = info->reg_ndcr;
461
462 if (info->use_ecc) {
463 ndcr |= NDCR_ECC_EN;
464 if (info->ecc_bch)
465 nand_writel(info, NDECCCTRL, 0x1);
466 } else {
467 ndcr &= ~NDCR_ECC_EN;
468 if (info->ecc_bch)
469 nand_writel(info, NDECCCTRL, 0x0);
470 }
471
472 ndcr &= ~NDCR_DMA_EN;
473
474 if (info->use_spare)
475 ndcr |= NDCR_SPARE_EN;
476 else
477 ndcr &= ~NDCR_SPARE_EN;
478
479 ndcr |= NDCR_ND_RUN;
480
481 /* clear status bits and run */
482 nand_writel(info, NDCR, 0);
483 nand_writel(info, NDSR, NDSR_MASK);
484 nand_writel(info, NDCR, ndcr);
485 }
486
487 static void disable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
488 {
489 uint32_t ndcr;
490
491 ndcr = nand_readl(info, NDCR);
492 nand_writel(info, NDCR, ndcr | int_mask);
493 }
494
495 static void drain_fifo(struct pxa3xx_nand_info *info, void *data, int len)
496 {
497 if (info->ecc_bch) {
498 u32 ts;
499
500 /*
501 * According to the datasheet, when reading from NDDB
502 * with BCH enabled, after each 32 bytes reads, we
503 * have to make sure that the NDSR.RDDREQ bit is set.
504 *
505 * Drain the FIFO 8 32 bits reads at a time, and skip
506 * the polling on the last read.
507 */
508 while (len > 8) {
509 readsl(info->mmio_base + NDDB, data, 8);
510
511 ts = get_timer(0);
512 while (!(nand_readl(info, NDSR) & NDSR_RDDREQ)) {
513 if (get_timer(ts) > TIMEOUT_DRAIN_FIFO) {
514 dev_err(&info->pdev->dev,
515 "Timeout on RDDREQ while draining the FIFO\n");
516 return;
517 }
518 }
519
520 data += 32;
521 len -= 8;
522 }
523 }
524
525 readsl(info->mmio_base + NDDB, data, len);
526 }
527
528 static void handle_data_pio(struct pxa3xx_nand_info *info)
529 {
530 unsigned int do_bytes = min(info->data_size, info->chunk_size);
531
532 switch (info->state) {
533 case STATE_PIO_WRITING:
534 writesl(info->mmio_base + NDDB,
535 info->data_buff + info->data_buff_pos,
536 DIV_ROUND_UP(do_bytes, 4));
537
538 if (info->oob_size > 0)
539 writesl(info->mmio_base + NDDB,
540 info->oob_buff + info->oob_buff_pos,
541 DIV_ROUND_UP(info->oob_size, 4));
542 break;
543 case STATE_PIO_READING:
544 drain_fifo(info,
545 info->data_buff + info->data_buff_pos,
546 DIV_ROUND_UP(do_bytes, 4));
547
548 if (info->oob_size > 0)
549 drain_fifo(info,
550 info->oob_buff + info->oob_buff_pos,
551 DIV_ROUND_UP(info->oob_size, 4));
552 break;
553 default:
554 dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
555 info->state);
556 BUG();
557 }
558
559 /* Update buffer pointers for multi-page read/write */
560 info->data_buff_pos += do_bytes;
561 info->oob_buff_pos += info->oob_size;
562 info->data_size -= do_bytes;
563 }
564
565 static void pxa3xx_nand_irq_thread(struct pxa3xx_nand_info *info)
566 {
567 handle_data_pio(info);
568
569 info->state = STATE_CMD_DONE;
570 nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
571 }
572
573 static irqreturn_t pxa3xx_nand_irq(struct pxa3xx_nand_info *info)
574 {
575 unsigned int status, is_completed = 0, is_ready = 0;
576 unsigned int ready, cmd_done;
577 irqreturn_t ret = IRQ_HANDLED;
578
579 if (info->cs == 0) {
580 ready = NDSR_FLASH_RDY;
581 cmd_done = NDSR_CS0_CMDD;
582 } else {
583 ready = NDSR_RDY;
584 cmd_done = NDSR_CS1_CMDD;
585 }
586
587 status = nand_readl(info, NDSR);
588
589 if (status & NDSR_UNCORERR)
590 info->retcode = ERR_UNCORERR;
591 if (status & NDSR_CORERR) {
592 info->retcode = ERR_CORERR;
593 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 &&
594 info->ecc_bch)
595 info->ecc_err_cnt = NDSR_ERR_CNT(status);
596 else
597 info->ecc_err_cnt = 1;
598
599 /*
600 * Each chunk composing a page is corrected independently,
601 * and we need to store maximum number of corrected bitflips
602 * to return it to the MTD layer in ecc.read_page().
603 */
604 info->max_bitflips = max_t(unsigned int,
605 info->max_bitflips,
606 info->ecc_err_cnt);
607 }
608 if (status & (NDSR_RDDREQ | NDSR_WRDREQ)) {
609 info->state = (status & NDSR_RDDREQ) ?
610 STATE_PIO_READING : STATE_PIO_WRITING;
611 /* Call the IRQ thread in U-Boot directly */
612 pxa3xx_nand_irq_thread(info);
613 return 0;
614 }
615 if (status & cmd_done) {
616 info->state = STATE_CMD_DONE;
617 is_completed = 1;
618 }
619 if (status & ready) {
620 info->state = STATE_READY;
621 is_ready = 1;
622 }
623
624 if (status & NDSR_WRCMDREQ) {
625 nand_writel(info, NDSR, NDSR_WRCMDREQ);
626 status &= ~NDSR_WRCMDREQ;
627 info->state = STATE_CMD_HANDLE;
628
629 /*
630 * Command buffer registers NDCB{0-2} (and optionally NDCB3)
631 * must be loaded by writing directly either 12 or 16
632 * bytes directly to NDCB0, four bytes at a time.
633 *
634 * Direct write access to NDCB1, NDCB2 and NDCB3 is ignored
635 * but each NDCBx register can be read.
636 */
637 nand_writel(info, NDCB0, info->ndcb0);
638 nand_writel(info, NDCB0, info->ndcb1);
639 nand_writel(info, NDCB0, info->ndcb2);
640
641 /* NDCB3 register is available in NFCv2 (Armada 370/XP SoC) */
642 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
643 nand_writel(info, NDCB0, info->ndcb3);
644 }
645
646 /* clear NDSR to let the controller exit the IRQ */
647 nand_writel(info, NDSR, status);
648 if (is_completed)
649 info->cmd_complete = 1;
650 if (is_ready)
651 info->dev_ready = 1;
652
653 return ret;
654 }
655
656 static inline int is_buf_blank(uint8_t *buf, size_t len)
657 {
658 for (; len > 0; len--)
659 if (*buf++ != 0xff)
660 return 0;
661 return 1;
662 }
663
664 static void set_command_address(struct pxa3xx_nand_info *info,
665 unsigned int page_size, uint16_t column, int page_addr)
666 {
667 /* small page addr setting */
668 if (page_size < PAGE_CHUNK_SIZE) {
669 info->ndcb1 = ((page_addr & 0xFFFFFF) << 8)
670 | (column & 0xFF);
671
672 info->ndcb2 = 0;
673 } else {
674 info->ndcb1 = ((page_addr & 0xFFFF) << 16)
675 | (column & 0xFFFF);
676
677 if (page_addr & 0xFF0000)
678 info->ndcb2 = (page_addr & 0xFF0000) >> 16;
679 else
680 info->ndcb2 = 0;
681 }
682 }
683
684 static void prepare_start_command(struct pxa3xx_nand_info *info, int command)
685 {
686 struct pxa3xx_nand_host *host = info->host[info->cs];
687 struct mtd_info *mtd = host->mtd;
688
689 /* reset data and oob column point to handle data */
690 info->buf_start = 0;
691 info->buf_count = 0;
692 info->oob_size = 0;
693 info->data_buff_pos = 0;
694 info->oob_buff_pos = 0;
695 info->use_ecc = 0;
696 info->use_spare = 1;
697 info->retcode = ERR_NONE;
698 info->ecc_err_cnt = 0;
699 info->ndcb3 = 0;
700 info->need_wait = 0;
701
702 switch (command) {
703 case NAND_CMD_READ0:
704 case NAND_CMD_PAGEPROG:
705 info->use_ecc = 1;
706 case NAND_CMD_READOOB:
707 pxa3xx_set_datasize(info, mtd);
708 break;
709 case NAND_CMD_PARAM:
710 info->use_spare = 0;
711 break;
712 default:
713 info->ndcb1 = 0;
714 info->ndcb2 = 0;
715 break;
716 }
717
718 /*
719 * If we are about to issue a read command, or about to set
720 * the write address, then clean the data buffer.
721 */
722 if (command == NAND_CMD_READ0 ||
723 command == NAND_CMD_READOOB ||
724 command == NAND_CMD_SEQIN) {
725 info->buf_count = mtd->writesize + mtd->oobsize;
726 memset(info->data_buff, 0xFF, info->buf_count);
727 }
728 }
729
730 static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
731 int ext_cmd_type, uint16_t column, int page_addr)
732 {
733 int addr_cycle, exec_cmd;
734 struct pxa3xx_nand_host *host;
735 struct mtd_info *mtd;
736
737 host = info->host[info->cs];
738 mtd = host->mtd;
739 addr_cycle = 0;
740 exec_cmd = 1;
741
742 if (info->cs != 0)
743 info->ndcb0 = NDCB0_CSEL;
744 else
745 info->ndcb0 = 0;
746
747 if (command == NAND_CMD_SEQIN)
748 exec_cmd = 0;
749
750 addr_cycle = NDCB0_ADDR_CYC(host->row_addr_cycles
751 + host->col_addr_cycles);
752
753 switch (command) {
754 case NAND_CMD_READOOB:
755 case NAND_CMD_READ0:
756 info->buf_start = column;
757 info->ndcb0 |= NDCB0_CMD_TYPE(0)
758 | addr_cycle
759 | NAND_CMD_READ0;
760
761 if (command == NAND_CMD_READOOB)
762 info->buf_start += mtd->writesize;
763
764 /*
765 * Multiple page read needs an 'extended command type' field,
766 * which is either naked-read or last-read according to the
767 * state.
768 */
769 if (mtd->writesize == PAGE_CHUNK_SIZE) {
770 info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8);
771 } else if (mtd->writesize > PAGE_CHUNK_SIZE) {
772 info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8)
773 | NDCB0_LEN_OVRD
774 | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
775 info->ndcb3 = info->chunk_size +
776 info->oob_size;
777 }
778
779 set_command_address(info, mtd->writesize, column, page_addr);
780 break;
781
782 case NAND_CMD_SEQIN:
783
784 info->buf_start = column;
785 set_command_address(info, mtd->writesize, 0, page_addr);
786
787 /*
788 * Multiple page programming needs to execute the initial
789 * SEQIN command that sets the page address.
790 */
791 if (mtd->writesize > PAGE_CHUNK_SIZE) {
792 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
793 | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
794 | addr_cycle
795 | command;
796 /* No data transfer in this case */
797 info->data_size = 0;
798 exec_cmd = 1;
799 }
800 break;
801
802 case NAND_CMD_PAGEPROG:
803 if (is_buf_blank(info->data_buff,
804 (mtd->writesize + mtd->oobsize))) {
805 exec_cmd = 0;
806 break;
807 }
808
809 /* Second command setting for large pages */
810 if (mtd->writesize > PAGE_CHUNK_SIZE) {
811 /*
812 * Multiple page write uses the 'extended command'
813 * field. This can be used to issue a command dispatch
814 * or a naked-write depending on the current stage.
815 */
816 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
817 | NDCB0_LEN_OVRD
818 | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
819 info->ndcb3 = info->chunk_size +
820 info->oob_size;
821
822 /*
823 * This is the command dispatch that completes a chunked
824 * page program operation.
825 */
826 if (info->data_size == 0) {
827 info->ndcb0 = NDCB0_CMD_TYPE(0x1)
828 | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
829 | command;
830 info->ndcb1 = 0;
831 info->ndcb2 = 0;
832 info->ndcb3 = 0;
833 }
834 } else {
835 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
836 | NDCB0_AUTO_RS
837 | NDCB0_ST_ROW_EN
838 | NDCB0_DBC
839 | (NAND_CMD_PAGEPROG << 8)
840 | NAND_CMD_SEQIN
841 | addr_cycle;
842 }
843 break;
844
845 case NAND_CMD_PARAM:
846 info->buf_count = 256;
847 info->ndcb0 |= NDCB0_CMD_TYPE(0)
848 | NDCB0_ADDR_CYC(1)
849 | NDCB0_LEN_OVRD
850 | command;
851 info->ndcb1 = (column & 0xFF);
852 info->ndcb3 = 256;
853 info->data_size = 256;
854 break;
855
856 case NAND_CMD_READID:
857 info->buf_count = host->read_id_bytes;
858 info->ndcb0 |= NDCB0_CMD_TYPE(3)
859 | NDCB0_ADDR_CYC(1)
860 | command;
861 info->ndcb1 = (column & 0xFF);
862
863 info->data_size = 8;
864 break;
865 case NAND_CMD_STATUS:
866 info->buf_count = 1;
867 info->ndcb0 |= NDCB0_CMD_TYPE(4)
868 | NDCB0_ADDR_CYC(1)
869 | command;
870
871 info->data_size = 8;
872 break;
873
874 case NAND_CMD_ERASE1:
875 info->ndcb0 |= NDCB0_CMD_TYPE(2)
876 | NDCB0_AUTO_RS
877 | NDCB0_ADDR_CYC(3)
878 | NDCB0_DBC
879 | (NAND_CMD_ERASE2 << 8)
880 | NAND_CMD_ERASE1;
881 info->ndcb1 = page_addr;
882 info->ndcb2 = 0;
883
884 break;
885 case NAND_CMD_RESET:
886 info->ndcb0 |= NDCB0_CMD_TYPE(5)
887 | command;
888
889 break;
890
891 case NAND_CMD_ERASE2:
892 exec_cmd = 0;
893 break;
894
895 default:
896 exec_cmd = 0;
897 dev_err(&info->pdev->dev, "non-supported command %x\n",
898 command);
899 break;
900 }
901
902 return exec_cmd;
903 }
904
905 static void nand_cmdfunc(struct mtd_info *mtd, unsigned command,
906 int column, int page_addr)
907 {
908 struct nand_chip *chip = mtd_to_nand(mtd);
909 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
910 struct pxa3xx_nand_info *info = host->info_data;
911 int exec_cmd;
912
913 /*
914 * if this is a x16 device ,then convert the input
915 * "byte" address into a "word" address appropriate
916 * for indexing a word-oriented device
917 */
918 if (info->reg_ndcr & NDCR_DWIDTH_M)
919 column /= 2;
920
921 /*
922 * There may be different NAND chip hooked to
923 * different chip select, so check whether
924 * chip select has been changed, if yes, reset the timing
925 */
926 if (info->cs != host->cs) {
927 info->cs = host->cs;
928 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
929 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
930 }
931
932 prepare_start_command(info, command);
933
934 info->state = STATE_PREPARED;
935 exec_cmd = prepare_set_command(info, command, 0, column, page_addr);
936
937 if (exec_cmd) {
938 u32 ts;
939
940 info->cmd_complete = 0;
941 info->dev_ready = 0;
942 info->need_wait = 1;
943 pxa3xx_nand_start(info);
944
945 ts = get_timer(0);
946 while (1) {
947 u32 status;
948
949 status = nand_readl(info, NDSR);
950 if (status)
951 pxa3xx_nand_irq(info);
952
953 if (info->cmd_complete)
954 break;
955
956 if (get_timer(ts) > CHIP_DELAY_TIMEOUT) {
957 dev_err(&info->pdev->dev, "Wait timeout!!!\n");
958 return;
959 }
960 }
961 }
962 info->state = STATE_IDLE;
963 }
964
965 static void nand_cmdfunc_extended(struct mtd_info *mtd,
966 const unsigned command,
967 int column, int page_addr)
968 {
969 struct nand_chip *chip = mtd_to_nand(mtd);
970 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
971 struct pxa3xx_nand_info *info = host->info_data;
972 int exec_cmd, ext_cmd_type;
973
974 /*
975 * if this is a x16 device then convert the input
976 * "byte" address into a "word" address appropriate
977 * for indexing a word-oriented device
978 */
979 if (info->reg_ndcr & NDCR_DWIDTH_M)
980 column /= 2;
981
982 /*
983 * There may be different NAND chip hooked to
984 * different chip select, so check whether
985 * chip select has been changed, if yes, reset the timing
986 */
987 if (info->cs != host->cs) {
988 info->cs = host->cs;
989 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
990 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
991 }
992
993 /* Select the extended command for the first command */
994 switch (command) {
995 case NAND_CMD_READ0:
996 case NAND_CMD_READOOB:
997 ext_cmd_type = EXT_CMD_TYPE_MONO;
998 break;
999 case NAND_CMD_SEQIN:
1000 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1001 break;
1002 case NAND_CMD_PAGEPROG:
1003 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1004 break;
1005 default:
1006 ext_cmd_type = 0;
1007 break;
1008 }
1009
1010 prepare_start_command(info, command);
1011
1012 /*
1013 * Prepare the "is ready" completion before starting a command
1014 * transaction sequence. If the command is not executed the
1015 * completion will be completed, see below.
1016 *
1017 * We can do that inside the loop because the command variable
1018 * is invariant and thus so is the exec_cmd.
1019 */
1020 info->need_wait = 1;
1021 info->dev_ready = 0;
1022
1023 do {
1024 u32 ts;
1025
1026 info->state = STATE_PREPARED;
1027 exec_cmd = prepare_set_command(info, command, ext_cmd_type,
1028 column, page_addr);
1029 if (!exec_cmd) {
1030 info->need_wait = 0;
1031 info->dev_ready = 1;
1032 break;
1033 }
1034
1035 info->cmd_complete = 0;
1036 pxa3xx_nand_start(info);
1037
1038 ts = get_timer(0);
1039 while (1) {
1040 u32 status;
1041
1042 status = nand_readl(info, NDSR);
1043 if (status)
1044 pxa3xx_nand_irq(info);
1045
1046 if (info->cmd_complete)
1047 break;
1048
1049 if (get_timer(ts) > CHIP_DELAY_TIMEOUT) {
1050 dev_err(&info->pdev->dev, "Wait timeout!!!\n");
1051 return;
1052 }
1053 }
1054
1055 /* Check if the sequence is complete */
1056 if (info->data_size == 0 && command != NAND_CMD_PAGEPROG)
1057 break;
1058
1059 /*
1060 * After a splitted program command sequence has issued
1061 * the command dispatch, the command sequence is complete.
1062 */
1063 if (info->data_size == 0 &&
1064 command == NAND_CMD_PAGEPROG &&
1065 ext_cmd_type == EXT_CMD_TYPE_DISPATCH)
1066 break;
1067
1068 if (command == NAND_CMD_READ0 || command == NAND_CMD_READOOB) {
1069 /* Last read: issue a 'last naked read' */
1070 if (info->data_size == info->chunk_size)
1071 ext_cmd_type = EXT_CMD_TYPE_LAST_RW;
1072 else
1073 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1074
1075 /*
1076 * If a splitted program command has no more data to transfer,
1077 * the command dispatch must be issued to complete.
1078 */
1079 } else if (command == NAND_CMD_PAGEPROG &&
1080 info->data_size == 0) {
1081 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1082 }
1083 } while (1);
1084
1085 info->state = STATE_IDLE;
1086 }
1087
1088 static int pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd,
1089 struct nand_chip *chip, const uint8_t *buf, int oob_required,
1090 int page)
1091 {
1092 chip->write_buf(mtd, buf, mtd->writesize);
1093 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
1094
1095 return 0;
1096 }
1097
1098 static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
1099 struct nand_chip *chip, uint8_t *buf, int oob_required,
1100 int page)
1101 {
1102 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1103 struct pxa3xx_nand_info *info = host->info_data;
1104
1105 chip->read_buf(mtd, buf, mtd->writesize);
1106 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1107
1108 if (info->retcode == ERR_CORERR && info->use_ecc) {
1109 mtd->ecc_stats.corrected += info->ecc_err_cnt;
1110
1111 } else if (info->retcode == ERR_UNCORERR) {
1112 /*
1113 * for blank page (all 0xff), HW will calculate its ECC as
1114 * 0, which is different from the ECC information within
1115 * OOB, ignore such uncorrectable errors
1116 */
1117 if (is_buf_blank(buf, mtd->writesize))
1118 info->retcode = ERR_NONE;
1119 else
1120 mtd->ecc_stats.failed++;
1121 }
1122
1123 return info->max_bitflips;
1124 }
1125
1126 static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd)
1127 {
1128 struct nand_chip *chip = mtd_to_nand(mtd);
1129 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1130 struct pxa3xx_nand_info *info = host->info_data;
1131 char retval = 0xFF;
1132
1133 if (info->buf_start < info->buf_count)
1134 /* Has just send a new command? */
1135 retval = info->data_buff[info->buf_start++];
1136
1137 return retval;
1138 }
1139
1140 static u16 pxa3xx_nand_read_word(struct mtd_info *mtd)
1141 {
1142 struct nand_chip *chip = mtd_to_nand(mtd);
1143 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1144 struct pxa3xx_nand_info *info = host->info_data;
1145 u16 retval = 0xFFFF;
1146
1147 if (!(info->buf_start & 0x01) && info->buf_start < info->buf_count) {
1148 retval = *((u16 *)(info->data_buff+info->buf_start));
1149 info->buf_start += 2;
1150 }
1151 return retval;
1152 }
1153
1154 static void pxa3xx_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
1155 {
1156 struct nand_chip *chip = mtd_to_nand(mtd);
1157 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1158 struct pxa3xx_nand_info *info = host->info_data;
1159 int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1160
1161 memcpy(buf, info->data_buff + info->buf_start, real_len);
1162 info->buf_start += real_len;
1163 }
1164
1165 static void pxa3xx_nand_write_buf(struct mtd_info *mtd,
1166 const uint8_t *buf, int len)
1167 {
1168 struct nand_chip *chip = mtd_to_nand(mtd);
1169 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1170 struct pxa3xx_nand_info *info = host->info_data;
1171 int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1172
1173 memcpy(info->data_buff + info->buf_start, buf, real_len);
1174 info->buf_start += real_len;
1175 }
1176
1177 static void pxa3xx_nand_select_chip(struct mtd_info *mtd, int chip)
1178 {
1179 return;
1180 }
1181
1182 static int pxa3xx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *this)
1183 {
1184 struct nand_chip *chip = mtd_to_nand(mtd);
1185 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1186 struct pxa3xx_nand_info *info = host->info_data;
1187
1188 if (info->need_wait) {
1189 u32 ts;
1190
1191 info->need_wait = 0;
1192
1193 ts = get_timer(0);
1194 while (1) {
1195 u32 status;
1196
1197 status = nand_readl(info, NDSR);
1198 if (status)
1199 pxa3xx_nand_irq(info);
1200
1201 if (info->dev_ready)
1202 break;
1203
1204 if (get_timer(ts) > CHIP_DELAY_TIMEOUT) {
1205 dev_err(&info->pdev->dev, "Ready timeout!!!\n");
1206 return NAND_STATUS_FAIL;
1207 }
1208 }
1209 }
1210
1211 /* pxa3xx_nand_send_command has waited for command complete */
1212 if (this->state == FL_WRITING || this->state == FL_ERASING) {
1213 if (info->retcode == ERR_NONE)
1214 return 0;
1215 else
1216 return NAND_STATUS_FAIL;
1217 }
1218
1219 return NAND_STATUS_READY;
1220 }
1221
1222 static int pxa3xx_nand_config_flash(struct pxa3xx_nand_info *info)
1223 {
1224 struct pxa3xx_nand_host *host = info->host[info->cs];
1225 struct mtd_info *mtd = host->mtd;
1226 struct nand_chip *chip = mtd_to_nand(mtd);
1227
1228 info->reg_ndcr |= (host->col_addr_cycles == 2) ? NDCR_RA_START : 0;
1229 info->reg_ndcr |= (chip->page_shift == 6) ? NDCR_PG_PER_BLK : 0;
1230 info->reg_ndcr |= (mtd->writesize == 2048) ? NDCR_PAGE_SZ : 0;
1231
1232 return 0;
1233 }
1234
1235 static int pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
1236 {
1237 /*
1238 * We set 0 by hard coding here, for we don't support keep_config
1239 * when there is more than one chip attached to the controller
1240 */
1241 struct pxa3xx_nand_host *host = info->host[0];
1242 uint32_t ndcr = nand_readl(info, NDCR);
1243
1244 if (ndcr & NDCR_PAGE_SZ) {
1245 /* Controller's FIFO size */
1246 info->chunk_size = 2048;
1247 host->read_id_bytes = 4;
1248 } else {
1249 info->chunk_size = 512;
1250 host->read_id_bytes = 2;
1251 }
1252
1253 /* Set an initial chunk size */
1254 info->reg_ndcr = ndcr & ~NDCR_INT_MASK;
1255 info->ndtr0cs0 = nand_readl(info, NDTR0CS0);
1256 info->ndtr1cs0 = nand_readl(info, NDTR1CS0);
1257 return 0;
1258 }
1259
1260 static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
1261 {
1262 info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1263 if (info->data_buff == NULL)
1264 return -ENOMEM;
1265 return 0;
1266 }
1267
1268 static int pxa3xx_nand_sensing(struct pxa3xx_nand_host *host)
1269 {
1270 struct pxa3xx_nand_info *info = host->info_data;
1271 struct pxa3xx_nand_platform_data *pdata = info->pdata;
1272 struct mtd_info *mtd;
1273 struct nand_chip *chip;
1274 const struct nand_sdr_timings *timings;
1275 int ret;
1276
1277 mtd = info->host[info->cs]->mtd;
1278 chip = mtd_to_nand(mtd);
1279
1280 /* configure default flash values */
1281 info->reg_ndcr = 0x0; /* enable all interrupts */
1282 info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
1283 info->reg_ndcr |= NDCR_RD_ID_CNT(host->read_id_bytes);
1284 info->reg_ndcr |= NDCR_SPARE_EN; /* enable spare by default */
1285
1286 /* use the common timing to make a try */
1287 timings = onfi_async_timing_mode_to_sdr_timings(0);
1288 if (IS_ERR(timings))
1289 return PTR_ERR(timings);
1290
1291 pxa3xx_nand_set_sdr_timing(host, timings);
1292
1293 chip->cmdfunc(mtd, NAND_CMD_RESET, 0, 0);
1294 ret = chip->waitfunc(mtd, chip);
1295 if (ret & NAND_STATUS_FAIL)
1296 return -ENODEV;
1297
1298 return 0;
1299 }
1300
1301 static int pxa_ecc_init(struct pxa3xx_nand_info *info,
1302 struct nand_ecc_ctrl *ecc,
1303 int strength, int ecc_stepsize, int page_size)
1304 {
1305 if (strength == 1 && ecc_stepsize == 512 && page_size == 2048) {
1306 info->chunk_size = 2048;
1307 info->spare_size = 40;
1308 info->ecc_size = 24;
1309 ecc->mode = NAND_ECC_HW;
1310 ecc->size = 512;
1311 ecc->strength = 1;
1312
1313 } else if (strength == 1 && ecc_stepsize == 512 && page_size == 512) {
1314 info->chunk_size = 512;
1315 info->spare_size = 8;
1316 info->ecc_size = 8;
1317 ecc->mode = NAND_ECC_HW;
1318 ecc->size = 512;
1319 ecc->strength = 1;
1320
1321 /*
1322 * Required ECC: 4-bit correction per 512 bytes
1323 * Select: 16-bit correction per 2048 bytes
1324 */
1325 } else if (strength == 4 && ecc_stepsize == 512 && page_size == 2048) {
1326 info->ecc_bch = 1;
1327 info->chunk_size = 2048;
1328 info->spare_size = 32;
1329 info->ecc_size = 32;
1330 ecc->mode = NAND_ECC_HW;
1331 ecc->size = info->chunk_size;
1332 ecc->layout = &ecc_layout_2KB_bch4bit;
1333 ecc->strength = 16;
1334
1335 } else if (strength == 4 && ecc_stepsize == 512 && page_size == 4096) {
1336 info->ecc_bch = 1;
1337 info->chunk_size = 2048;
1338 info->spare_size = 32;
1339 info->ecc_size = 32;
1340 ecc->mode = NAND_ECC_HW;
1341 ecc->size = info->chunk_size;
1342 ecc->layout = &ecc_layout_4KB_bch4bit;
1343 ecc->strength = 16;
1344
1345 /*
1346 * Required ECC: 8-bit correction per 512 bytes
1347 * Select: 16-bit correction per 1024 bytes
1348 */
1349 } else if (strength == 8 && ecc_stepsize == 512 && page_size == 4096) {
1350 info->ecc_bch = 1;
1351 info->chunk_size = 1024;
1352 info->spare_size = 0;
1353 info->ecc_size = 32;
1354 ecc->mode = NAND_ECC_HW;
1355 ecc->size = info->chunk_size;
1356 ecc->layout = &ecc_layout_4KB_bch8bit;
1357 ecc->strength = 16;
1358 } else {
1359 dev_err(&info->pdev->dev,
1360 "ECC strength %d at page size %d is not supported\n",
1361 strength, page_size);
1362 return -ENODEV;
1363 }
1364
1365 return 0;
1366 }
1367
1368 static int pxa3xx_nand_scan(struct mtd_info *mtd)
1369 {
1370 struct nand_chip *chip = mtd_to_nand(mtd);
1371 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1372 struct pxa3xx_nand_info *info = host->info_data;
1373 struct pxa3xx_nand_platform_data *pdata = info->pdata;
1374 int ret;
1375 uint16_t ecc_strength, ecc_step;
1376
1377 if (pdata->keep_config && !pxa3xx_nand_detect_config(info))
1378 goto KEEP_CONFIG;
1379
1380 /* Set a default chunk size */
1381 info->chunk_size = 512;
1382
1383 ret = pxa3xx_nand_sensing(host);
1384 if (ret) {
1385 dev_info(&info->pdev->dev, "There is no chip on cs %d!\n",
1386 info->cs);
1387
1388 return ret;
1389 }
1390
1391 KEEP_CONFIG:
1392 /* Device detection must be done with ECC disabled */
1393 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
1394 nand_writel(info, NDECCCTRL, 0x0);
1395
1396 if (nand_scan_ident(mtd, 1, NULL))
1397 return -ENODEV;
1398
1399 if (!pdata->keep_config) {
1400 ret = pxa3xx_nand_init_timings(host);
1401 if (ret) {
1402 dev_err(&info->pdev->dev,
1403 "Failed to set timings: %d\n", ret);
1404 return ret;
1405 }
1406 }
1407
1408 ret = pxa3xx_nand_config_flash(info);
1409 if (ret)
1410 return ret;
1411
1412 #ifdef CONFIG_SYS_NAND_USE_FLASH_BBT
1413 /*
1414 * We'll use a bad block table stored in-flash and don't
1415 * allow writing the bad block marker to the flash.
1416 */
1417 chip->bbt_options |= NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB_BBM;
1418 chip->bbt_td = &bbt_main_descr;
1419 chip->bbt_md = &bbt_mirror_descr;
1420 #endif
1421
1422 /*
1423 * If the page size is bigger than the FIFO size, let's check
1424 * we are given the right variant and then switch to the extended
1425 * (aka splitted) command handling,
1426 */
1427 if (mtd->writesize > PAGE_CHUNK_SIZE) {
1428 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370) {
1429 chip->cmdfunc = nand_cmdfunc_extended;
1430 } else {
1431 dev_err(&info->pdev->dev,
1432 "unsupported page size on this variant\n");
1433 return -ENODEV;
1434 }
1435 }
1436
1437 if (pdata->ecc_strength && pdata->ecc_step_size) {
1438 ecc_strength = pdata->ecc_strength;
1439 ecc_step = pdata->ecc_step_size;
1440 } else {
1441 ecc_strength = chip->ecc_strength_ds;
1442 ecc_step = chip->ecc_step_ds;
1443 }
1444
1445 /* Set default ECC strength requirements on non-ONFI devices */
1446 if (ecc_strength < 1 && ecc_step < 1) {
1447 ecc_strength = 1;
1448 ecc_step = 512;
1449 }
1450
1451 ret = pxa_ecc_init(info, &chip->ecc, ecc_strength,
1452 ecc_step, mtd->writesize);
1453 if (ret)
1454 return ret;
1455
1456 /* calculate addressing information */
1457 if (mtd->writesize >= 2048)
1458 host->col_addr_cycles = 2;
1459 else
1460 host->col_addr_cycles = 1;
1461
1462 /* release the initial buffer */
1463 kfree(info->data_buff);
1464
1465 /* allocate the real data + oob buffer */
1466 info->buf_size = mtd->writesize + mtd->oobsize;
1467 ret = pxa3xx_nand_init_buff(info);
1468 if (ret)
1469 return ret;
1470 info->oob_buff = info->data_buff + mtd->writesize;
1471
1472 if ((mtd->size >> chip->page_shift) > 65536)
1473 host->row_addr_cycles = 3;
1474 else
1475 host->row_addr_cycles = 2;
1476 return nand_scan_tail(mtd);
1477 }
1478
1479 static int alloc_nand_resource(struct pxa3xx_nand_info *info)
1480 {
1481 struct pxa3xx_nand_platform_data *pdata;
1482 struct pxa3xx_nand_host *host;
1483 struct nand_chip *chip = NULL;
1484 struct mtd_info *mtd;
1485 int ret, cs;
1486
1487 pdata = info->pdata;
1488 if (pdata->num_cs <= 0)
1489 return -ENODEV;
1490
1491 info->variant = pxa3xx_nand_get_variant();
1492 for (cs = 0; cs < pdata->num_cs; cs++) {
1493 chip = (struct nand_chip *)
1494 ((u8 *)&info[1] + sizeof(*host) * cs);
1495 mtd = nand_to_mtd(chip);
1496 host = (struct pxa3xx_nand_host *)chip;
1497 info->host[cs] = host;
1498 host->mtd = mtd;
1499 host->cs = cs;
1500 host->info_data = info;
1501 host->read_id_bytes = 4;
1502 mtd->owner = THIS_MODULE;
1503
1504 nand_set_controller_data(chip, host);
1505 chip->ecc.read_page = pxa3xx_nand_read_page_hwecc;
1506 chip->ecc.write_page = pxa3xx_nand_write_page_hwecc;
1507 chip->controller = &info->controller;
1508 chip->waitfunc = pxa3xx_nand_waitfunc;
1509 chip->select_chip = pxa3xx_nand_select_chip;
1510 chip->read_word = pxa3xx_nand_read_word;
1511 chip->read_byte = pxa3xx_nand_read_byte;
1512 chip->read_buf = pxa3xx_nand_read_buf;
1513 chip->write_buf = pxa3xx_nand_write_buf;
1514 chip->options |= NAND_NO_SUBPAGE_WRITE;
1515 chip->cmdfunc = nand_cmdfunc;
1516 }
1517
1518 /* Allocate a buffer to allow flash detection */
1519 info->buf_size = INIT_BUFFER_SIZE;
1520 info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1521 if (info->data_buff == NULL) {
1522 ret = -ENOMEM;
1523 goto fail_disable_clk;
1524 }
1525
1526 /* initialize all interrupts to be disabled */
1527 disable_int(info, NDSR_MASK);
1528
1529 return 0;
1530
1531 kfree(info->data_buff);
1532 fail_disable_clk:
1533 return ret;
1534 }
1535
1536 static int pxa3xx_nand_probe_dt(struct pxa3xx_nand_info *info)
1537 {
1538 struct pxa3xx_nand_platform_data *pdata;
1539 const void *blob = gd->fdt_blob;
1540 int node = -1;
1541
1542 pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
1543 if (!pdata)
1544 return -ENOMEM;
1545
1546 /* Get address decoding nodes from the FDT blob */
1547 do {
1548 node = fdt_node_offset_by_compatible(blob, node,
1549 "marvell,mvebu-pxa3xx-nand");
1550 if (node < 0)
1551 break;
1552
1553 /* Bypass disabeld nodes */
1554 if (!fdtdec_get_is_enabled(blob, node))
1555 continue;
1556
1557 /* Get the first enabled NAND controler base address */
1558 info->mmio_base =
1559 (void __iomem *)fdtdec_get_addr_size_auto_noparent(
1560 blob, node, "reg", 0, NULL, true);
1561
1562 pdata->num_cs = fdtdec_get_int(blob, node, "num-cs", 1);
1563 if (pdata->num_cs != 1) {
1564 pr_err("pxa3xx driver supports single CS only\n");
1565 break;
1566 }
1567
1568 if (fdtdec_get_bool(blob, node, "nand-enable-arbiter"))
1569 pdata->enable_arbiter = 1;
1570
1571 if (fdtdec_get_bool(blob, node, "nand-keep-config"))
1572 pdata->keep_config = 1;
1573
1574 /*
1575 * ECC parameters.
1576 * If these are not set, they will be selected according
1577 * to the detected flash type.
1578 */
1579 /* ECC strength */
1580 pdata->ecc_strength = fdtdec_get_int(blob, node,
1581 "nand-ecc-strength", 0);
1582
1583 /* ECC step size */
1584 pdata->ecc_step_size = fdtdec_get_int(blob, node,
1585 "nand-ecc-step-size", 0);
1586
1587 info->pdata = pdata;
1588
1589 /* Currently support only a single NAND controller */
1590 return 0;
1591
1592 } while (node >= 0);
1593
1594 return -EINVAL;
1595 }
1596
1597 static int pxa3xx_nand_probe(struct pxa3xx_nand_info *info)
1598 {
1599 struct pxa3xx_nand_platform_data *pdata;
1600 int ret, cs, probe_success;
1601
1602 ret = pxa3xx_nand_probe_dt(info);
1603 if (ret)
1604 return ret;
1605
1606 pdata = info->pdata;
1607
1608 ret = alloc_nand_resource(info);
1609 if (ret) {
1610 dev_err(&pdev->dev, "alloc nand resource failed\n");
1611 return ret;
1612 }
1613
1614 probe_success = 0;
1615 for (cs = 0; cs < pdata->num_cs; cs++) {
1616 struct mtd_info *mtd = info->host[cs]->mtd;
1617
1618 /*
1619 * The mtd name matches the one used in 'mtdparts' kernel
1620 * parameter. This name cannot be changed or otherwise
1621 * user's mtd partitions configuration would get broken.
1622 */
1623 mtd->name = "pxa3xx_nand-0";
1624 info->cs = cs;
1625 ret = pxa3xx_nand_scan(mtd);
1626 if (ret) {
1627 dev_info(&pdev->dev, "failed to scan nand at cs %d\n",
1628 cs);
1629 continue;
1630 }
1631
1632 if (nand_register(cs, mtd))
1633 continue;
1634
1635 probe_success = 1;
1636 }
1637
1638 if (!probe_success)
1639 return -ENODEV;
1640
1641 return 0;
1642 }
1643
1644 /*
1645 * Main initialization routine
1646 */
1647 void board_nand_init(void)
1648 {
1649 struct pxa3xx_nand_info *info;
1650 struct pxa3xx_nand_host *host;
1651 int ret;
1652
1653 info = kzalloc(sizeof(*info) +
1654 sizeof(*host) * CONFIG_SYS_MAX_NAND_DEVICE,
1655 GFP_KERNEL);
1656 if (!info)
1657 return;
1658
1659 ret = pxa3xx_nand_probe(info);
1660 if (ret)
1661 return;
1662 }