]> git.ipfire.org Git - thirdparty/u-boot.git/blame - drivers/mtd/nand/raw/mxs_nand.c
nand: raw: mxs_nand: Fix specific hook registration
[thirdparty/u-boot.git] / drivers / mtd / nand / raw / mxs_nand.c
CommitLineData
83d290c5 1// SPDX-License-Identifier: GPL-2.0+
0d4e8509
MV
2/*
3 * Freescale i.MX28 NAND flash driver
4 *
5 * Copyright (C) 2011 Marek Vasut <marek.vasut@gmail.com>
6 * on behalf of DENX Software Engineering GmbH
7 *
8 * Based on code from LTIB:
9 * Freescale GPMI NFC NAND Flash Driver
10 *
11 * Copyright (C) 2010 Freescale Semiconductor, Inc.
12 * Copyright (C) 2008 Embedded Alley Solutions, Inc.
29f40c07 13 * Copyright 2017-2019 NXP
0d4e8509
MV
14 */
15
651eb732 16#include <common.h>
1eb69ae4 17#include <cpu_func.h>
68748340 18#include <dm.h>
baed179d 19#include <dm/device_compat.h>
0d4e8509 20#include <malloc.h>
baed179d 21#include <mxs_nand.h>
0d4e8509
MV
22#include <asm/arch/clock.h>
23#include <asm/arch/imx-regs.h>
baed179d
SA
24#include <asm/arch/sys_proto.h>
25#include <asm/cache.h>
26#include <asm/io.h>
552a848e
SB
27#include <asm/mach-imx/regs-bch.h>
28#include <asm/mach-imx/regs-gpmi.h>
baed179d
SA
29#include <linux/errno.h>
30#include <linux/mtd/rawnand.h>
31#include <linux/sizes.h>
32#include <linux/types.h>
0d4e8509
MV
33
34#define MXS_NAND_DMA_DESCRIPTOR_COUNT 4
35
39320e72
PF
36#if defined(CONFIG_MX6) || defined(CONFIG_MX7) || defined(CONFIG_IMX8) || \
37 defined(CONFIG_IMX8M)
ae695b18
SR
38#define MXS_NAND_CHUNK_DATA_CHUNK_SIZE_SHIFT 2
39#else
40#define MXS_NAND_CHUNK_DATA_CHUNK_SIZE_SHIFT 0
41#endif
0d4e8509 42#define MXS_NAND_METADATA_SIZE 10
1fbdb706 43#define MXS_NAND_BITS_PER_ECC_LEVEL 13
2a83c95f
SA
44
45#if !defined(CONFIG_SYS_CACHELINE_SIZE) || CONFIG_SYS_CACHELINE_SIZE < 32
0d4e8509 46#define MXS_NAND_COMMAND_BUFFER_SIZE 32
2a83c95f
SA
47#else
48#define MXS_NAND_COMMAND_BUFFER_SIZE CONFIG_SYS_CACHELINE_SIZE
49#endif
0d4e8509
MV
50
51#define MXS_NAND_BCH_TIMEOUT 10000
52
0d4e8509
MV
53struct nand_ecclayout fake_ecc_layout;
54
6b9408ed
MV
55/*
56 * Cache management functions
57 */
10015025 58#if !CONFIG_IS_ENABLED(SYS_DCACHE_OFF)
6b9408ed
MV
59static void mxs_nand_flush_data_buf(struct mxs_nand_info *info)
60{
39320e72 61 uint32_t addr = (uintptr_t)info->data_buf;
6b9408ed
MV
62
63 flush_dcache_range(addr, addr + info->data_buf_size);
64}
65
66static void mxs_nand_inval_data_buf(struct mxs_nand_info *info)
67{
39320e72 68 uint32_t addr = (uintptr_t)info->data_buf;
6b9408ed
MV
69
70 invalidate_dcache_range(addr, addr + info->data_buf_size);
71}
72
73static void mxs_nand_flush_cmd_buf(struct mxs_nand_info *info)
74{
39320e72 75 uint32_t addr = (uintptr_t)info->cmd_buf;
6b9408ed
MV
76
77 flush_dcache_range(addr, addr + MXS_NAND_COMMAND_BUFFER_SIZE);
78}
79#else
80static inline void mxs_nand_flush_data_buf(struct mxs_nand_info *info) {}
81static inline void mxs_nand_inval_data_buf(struct mxs_nand_info *info) {}
82static inline void mxs_nand_flush_cmd_buf(struct mxs_nand_info *info) {}
83#endif
84
0d4e8509
MV
85static struct mxs_dma_desc *mxs_nand_get_dma_desc(struct mxs_nand_info *info)
86{
87 struct mxs_dma_desc *desc;
88
89 if (info->desc_index >= MXS_NAND_DMA_DESCRIPTOR_COUNT) {
90 printf("MXS NAND: Too many DMA descriptors requested\n");
91 return NULL;
92 }
93
94 desc = info->desc[info->desc_index];
95 info->desc_index++;
96
97 return desc;
98}
99
100static void mxs_nand_return_dma_descs(struct mxs_nand_info *info)
101{
102 int i;
103 struct mxs_dma_desc *desc;
104
105 for (i = 0; i < info->desc_index; i++) {
106 desc = info->desc[i];
107 memset(desc, 0, sizeof(struct mxs_dma_desc));
108 desc->address = (dma_addr_t)desc;
109 }
110
111 info->desc_index = 0;
112}
113
0d4e8509
MV
114static uint32_t mxs_nand_aux_status_offset(void)
115{
116 return (MXS_NAND_METADATA_SIZE + 0x3) & ~0x3;
117}
118
baed179d
SA
119static inline bool mxs_nand_bbm_in_data_chunk(struct bch_geometry *geo,
120 struct mtd_info *mtd,
121 unsigned int *chunk_num)
0d4e8509 122{
616f03da 123 unsigned int i, j;
0d4e8509 124
616f03da 125 if (geo->ecc_chunk0_size != geo->ecc_chunkn_size) {
baed179d 126 dev_err(mtd->dev, "The size of chunk0 must equal to chunkn\n");
616f03da
YL
127 return false;
128 }
0d4e8509 129
616f03da
YL
130 i = (mtd->writesize * 8 - MXS_NAND_METADATA_SIZE * 8) /
131 (geo->gf_len * geo->ecc_strength +
132 geo->ecc_chunkn_size * 8);
0d4e8509 133
616f03da
YL
134 j = (mtd->writesize * 8 - MXS_NAND_METADATA_SIZE * 8) -
135 (geo->gf_len * geo->ecc_strength +
136 geo->ecc_chunkn_size * 8) * i;
0d4e8509 137
616f03da
YL
138 if (j < geo->ecc_chunkn_size * 8) {
139 *chunk_num = i + 1;
baed179d 140 dev_dbg(mtd->dev, "Set ecc to %d and bbm in chunk %d\n",
616f03da
YL
141 geo->ecc_strength, *chunk_num);
142 return true;
143 }
0d4e8509 144
616f03da 145 return false;
0d4e8509
MV
146}
147
984df7ad 148static inline int mxs_nand_calc_ecc_layout_by_info(struct bch_geometry *geo,
62754450
SA
149 struct mtd_info *mtd,
150 unsigned int ecc_strength,
151 unsigned int ecc_step)
984df7ad
SA
152{
153 struct nand_chip *chip = mtd_to_nand(mtd);
502bdc6b 154 struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
616f03da 155 unsigned int block_mark_bit_offset;
984df7ad 156
62754450 157 switch (ecc_step) {
984df7ad
SA
158 case SZ_512:
159 geo->gf_len = 13;
160 break;
161 case SZ_1K:
162 geo->gf_len = 14;
163 break;
164 default:
165 return -EINVAL;
166 }
167
616f03da
YL
168 geo->ecc_chunk0_size = ecc_step;
169 geo->ecc_chunkn_size = ecc_step;
62754450 170 geo->ecc_strength = round_up(ecc_strength, 2);
984df7ad
SA
171
172 /* Keep the C >= O */
616f03da 173 if (geo->ecc_chunkn_size < mtd->oobsize)
984df7ad
SA
174 return -EINVAL;
175
502bdc6b 176 if (geo->ecc_strength > nand_info->max_ecc_strength_supported)
984df7ad
SA
177 return -EINVAL;
178
616f03da
YL
179 geo->ecc_chunk_count = mtd->writesize / geo->ecc_chunkn_size;
180
181 /* For bit swap. */
182 block_mark_bit_offset = mtd->writesize * 8 -
183 (geo->ecc_strength * geo->gf_len * (geo->ecc_chunk_count - 1)
184 + MXS_NAND_METADATA_SIZE * 8);
185
186 geo->block_mark_byte_offset = block_mark_bit_offset / 8;
187 geo->block_mark_bit_offset = block_mark_bit_offset % 8;
984df7ad
SA
188
189 return 0;
190}
191
616f03da 192static inline int mxs_nand_legacy_calc_ecc_layout(struct bch_geometry *geo,
28897e8d 193 struct mtd_info *mtd)
0d4e8509 194{
502bdc6b
SA
195 struct nand_chip *chip = mtd_to_nand(mtd);
196 struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
616f03da 197 unsigned int block_mark_bit_offset;
ed48490f 198 int corr, ds_corr;
502bdc6b 199
28897e8d
SA
200 /* The default for the length of Galois Field. */
201 geo->gf_len = 13;
202
203 /* The default for chunk size. */
616f03da
YL
204 geo->ecc_chunk0_size = 512;
205 geo->ecc_chunkn_size = 512;
28897e8d 206
616f03da 207 if (geo->ecc_chunkn_size < mtd->oobsize) {
28897e8d 208 geo->gf_len = 14;
616f03da
YL
209 geo->ecc_chunk0_size *= 2;
210 geo->ecc_chunkn_size *= 2;
28897e8d
SA
211 }
212
616f03da 213 geo->ecc_chunk_count = mtd->writesize / geo->ecc_chunkn_size;
28897e8d 214
28897e8d
SA
215 /*
216 * Determine the ECC layout with the formula:
217 * ECC bits per chunk = (total page spare data bits) /
218 * (bits per ECC level) / (chunks per page)
219 * where:
220 * total page spare data bits =
221 * (page oob size - meta data size) * (bits per byte)
222 */
223 geo->ecc_strength = ((mtd->oobsize - MXS_NAND_METADATA_SIZE) * 8)
224 / (geo->gf_len * geo->ecc_chunk_count);
225
984df7ad 226 geo->ecc_strength = min(round_down(geo->ecc_strength, 2),
502bdc6b 227 nand_info->max_ecc_strength_supported);
28897e8d 228
ed48490f
HX
229 /* check ecc strength, same as nand_ecc_is_strong_enough() did*/
230 if (chip->ecc_step_ds) {
231 corr = mtd->writesize * geo->ecc_strength /
232 geo->ecc_chunkn_size;
233 ds_corr = mtd->writesize * chip->ecc_strength_ds /
234 chip->ecc_step_ds;
235 if (corr < ds_corr ||
236 geo->ecc_strength < chip->ecc_strength_ds)
237 return -EINVAL;
238 }
239
616f03da
YL
240 block_mark_bit_offset = mtd->writesize * 8 -
241 (geo->ecc_strength * geo->gf_len * (geo->ecc_chunk_count - 1)
242 + MXS_NAND_METADATA_SIZE * 8);
243
244 geo->block_mark_byte_offset = block_mark_bit_offset / 8;
245 geo->block_mark_bit_offset = block_mark_bit_offset % 8;
246
247 return 0;
248}
249
250static inline int mxs_nand_calc_ecc_for_large_oob(struct bch_geometry *geo,
251 struct mtd_info *mtd)
252{
253 struct nand_chip *chip = mtd_to_nand(mtd);
254 struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
255 unsigned int block_mark_bit_offset;
256 unsigned int max_ecc;
257 unsigned int bbm_chunk;
258 unsigned int i;
259
260 /* sanity check for the minimum ecc nand required */
261 if (!(chip->ecc_strength_ds > 0 && chip->ecc_step_ds > 0))
262 return -EINVAL;
263 geo->ecc_strength = chip->ecc_strength_ds;
264
265 /* calculate the maximum ecc platform can support*/
266 geo->gf_len = 14;
267 geo->ecc_chunk0_size = 1024;
268 geo->ecc_chunkn_size = 1024;
269 geo->ecc_chunk_count = mtd->writesize / geo->ecc_chunkn_size;
270 max_ecc = ((mtd->oobsize - MXS_NAND_METADATA_SIZE) * 8)
271 / (geo->gf_len * geo->ecc_chunk_count);
272 max_ecc = min(round_down(max_ecc, 2),
273 nand_info->max_ecc_strength_supported);
274
275
276 /* search a supported ecc strength that makes bbm */
277 /* located in data chunk */
278 geo->ecc_strength = chip->ecc_strength_ds;
279 while (!(geo->ecc_strength > max_ecc)) {
280 if (mxs_nand_bbm_in_data_chunk(geo, mtd, &bbm_chunk))
281 break;
282 geo->ecc_strength += 2;
283 }
284
285 /* if none of them works, keep using the minimum ecc */
286 /* nand required but changing ecc page layout */
287 if (geo->ecc_strength > max_ecc) {
288 geo->ecc_strength = chip->ecc_strength_ds;
289 /* add extra ecc for meta data */
290 geo->ecc_chunk0_size = 0;
291 geo->ecc_chunk_count = (mtd->writesize / geo->ecc_chunkn_size) + 1;
292 geo->ecc_for_meta = 1;
293 /* check if oob can afford this extra ecc chunk */
294 if (mtd->oobsize * 8 < MXS_NAND_METADATA_SIZE * 8 +
295 geo->gf_len * geo->ecc_strength
296 * geo->ecc_chunk_count) {
297 printf("unsupported NAND chip with new layout\n");
298 return -EINVAL;
299 }
300
301 /* calculate in which chunk bbm located */
302 bbm_chunk = (mtd->writesize * 8 - MXS_NAND_METADATA_SIZE * 8 -
303 geo->gf_len * geo->ecc_strength) /
304 (geo->gf_len * geo->ecc_strength +
305 geo->ecc_chunkn_size * 8) + 1;
306 }
307
308 /* calculate the number of ecc chunk behind the bbm */
309 i = (mtd->writesize / geo->ecc_chunkn_size) - bbm_chunk + 1;
310
311 block_mark_bit_offset = mtd->writesize * 8 -
312 (geo->ecc_strength * geo->gf_len * (geo->ecc_chunk_count - i)
313 + MXS_NAND_METADATA_SIZE * 8);
314
315 geo->block_mark_byte_offset = block_mark_bit_offset / 8;
316 geo->block_mark_bit_offset = block_mark_bit_offset % 8;
317
28897e8d 318 return 0;
0d4e8509
MV
319}
320
321/*
322 * Wait for BCH complete IRQ and clear the IRQ
323 */
931747e5 324static int mxs_nand_wait_for_bch_complete(struct mxs_nand_info *nand_info)
0d4e8509 325{
0d4e8509
MV
326 int timeout = MXS_NAND_BCH_TIMEOUT;
327 int ret;
328
931747e5 329 ret = mxs_wait_mask_set(&nand_info->bch_regs->hw_bch_ctrl_reg,
0d4e8509
MV
330 BCH_CTRL_COMPLETE_IRQ, timeout);
331
931747e5 332 writel(BCH_CTRL_COMPLETE_IRQ, &nand_info->bch_regs->hw_bch_ctrl_clr);
0d4e8509
MV
333
334 return ret;
335}
336
337/*
338 * This is the function that we install in the cmd_ctrl function pointer of the
339 * owning struct nand_chip. The only functions in the reference implementation
340 * that use these functions pointers are cmdfunc and select_chip.
341 *
342 * In this driver, we implement our own select_chip, so this function will only
343 * be called by the reference implementation's cmdfunc. For this reason, we can
344 * ignore the chip enable bit and concentrate only on sending bytes to the NAND
345 * Flash.
346 */
347static void mxs_nand_cmd_ctrl(struct mtd_info *mtd, int data, unsigned int ctrl)
348{
17cb4b8f
SW
349 struct nand_chip *nand = mtd_to_nand(mtd);
350 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
0d4e8509
MV
351 struct mxs_dma_desc *d;
352 uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip;
353 int ret;
354
355 /*
356 * If this condition is true, something is _VERY_ wrong in MTD
357 * subsystem!
358 */
359 if (nand_info->cmd_queue_len == MXS_NAND_COMMAND_BUFFER_SIZE) {
360 printf("MXS NAND: Command queue too long\n");
361 return;
362 }
363
364 /*
365 * Every operation begins with a command byte and a series of zero or
366 * more address bytes. These are distinguished by either the Address
367 * Latch Enable (ALE) or Command Latch Enable (CLE) signals being
368 * asserted. When MTD is ready to execute the command, it will
369 * deasert both latch enables.
370 *
371 * Rather than run a separate DMA operation for every single byte, we
372 * queue them up and run a single DMA operation for the entire series
373 * of command and data bytes.
374 */
375 if (ctrl & (NAND_ALE | NAND_CLE)) {
376 if (data != NAND_CMD_NONE)
377 nand_info->cmd_buf[nand_info->cmd_queue_len++] = data;
378 return;
379 }
380
381 /*
382 * If control arrives here, MTD has deasserted both the ALE and CLE,
383 * which means it's ready to run an operation. Check if we have any
384 * bytes to send.
385 */
386 if (nand_info->cmd_queue_len == 0)
387 return;
388
389 /* Compile the DMA descriptor -- a descriptor that sends command. */
390 d = mxs_nand_get_dma_desc(nand_info);
391 d->cmd.data =
392 MXS_DMA_DESC_COMMAND_DMA_READ | MXS_DMA_DESC_IRQ |
393 MXS_DMA_DESC_CHAIN | MXS_DMA_DESC_DEC_SEM |
394 MXS_DMA_DESC_WAIT4END | (3 << MXS_DMA_DESC_PIO_WORDS_OFFSET) |
395 (nand_info->cmd_queue_len << MXS_DMA_DESC_BYTES_OFFSET);
396
397 d->cmd.address = (dma_addr_t)nand_info->cmd_buf;
398
399 d->cmd.pio_words[0] =
400 GPMI_CTRL0_COMMAND_MODE_WRITE |
401 GPMI_CTRL0_WORD_LENGTH |
402 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
403 GPMI_CTRL0_ADDRESS_NAND_CLE |
404 GPMI_CTRL0_ADDRESS_INCREMENT |
405 nand_info->cmd_queue_len;
406
407 mxs_dma_desc_append(channel, d);
408
6b9408ed
MV
409 /* Flush caches */
410 mxs_nand_flush_cmd_buf(nand_info);
411
0d4e8509
MV
412 /* Execute the DMA chain. */
413 ret = mxs_dma_go(channel);
414 if (ret)
415 printf("MXS NAND: Error sending command\n");
416
417 mxs_nand_return_dma_descs(nand_info);
418
419 /* Reset the command queue. */
420 nand_info->cmd_queue_len = 0;
421}
422
423/*
424 * Test if the NAND flash is ready.
425 */
426static int mxs_nand_device_ready(struct mtd_info *mtd)
427{
17cb4b8f
SW
428 struct nand_chip *chip = mtd_to_nand(mtd);
429 struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
0d4e8509
MV
430 uint32_t tmp;
431
931747e5 432 tmp = readl(&nand_info->gpmi_regs->hw_gpmi_stat);
0d4e8509
MV
433 tmp >>= (GPMI_STAT_READY_BUSY_OFFSET + nand_info->cur_chip);
434
435 return tmp & 1;
436}
437
438/*
439 * Select the NAND chip.
440 */
441static void mxs_nand_select_chip(struct mtd_info *mtd, int chip)
442{
17cb4b8f
SW
443 struct nand_chip *nand = mtd_to_nand(mtd);
444 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
0d4e8509
MV
445
446 nand_info->cur_chip = chip;
447}
448
449/*
450 * Handle block mark swapping.
451 *
452 * Note that, when this function is called, it doesn't know whether it's
453 * swapping the block mark, or swapping it *back* -- but it doesn't matter
454 * because the the operation is the same.
455 */
28897e8d
SA
456static void mxs_nand_swap_block_mark(struct bch_geometry *geo,
457 uint8_t *data_buf, uint8_t *oob_buf)
0d4e8509 458{
28897e8d
SA
459 uint32_t bit_offset = geo->block_mark_bit_offset;
460 uint32_t buf_offset = geo->block_mark_byte_offset;
0d4e8509
MV
461
462 uint32_t src;
463 uint32_t dst;
464
0d4e8509
MV
465 /*
466 * Get the byte from the data area that overlays the block mark. Since
467 * the ECC engine applies its own view to the bits in the page, the
468 * physical block mark won't (in general) appear on a byte boundary in
469 * the data.
470 */
471 src = data_buf[buf_offset] >> bit_offset;
472 src |= data_buf[buf_offset + 1] << (8 - bit_offset);
473
474 dst = oob_buf[0];
475
476 oob_buf[0] = src;
477
478 data_buf[buf_offset] &= ~(0xff << bit_offset);
479 data_buf[buf_offset + 1] &= 0xff << bit_offset;
480
481 data_buf[buf_offset] |= dst << bit_offset;
482 data_buf[buf_offset + 1] |= dst >> (8 - bit_offset);
483}
484
485/*
486 * Read data from NAND.
487 */
488static void mxs_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int length)
489{
17cb4b8f
SW
490 struct nand_chip *nand = mtd_to_nand(mtd);
491 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
0d4e8509
MV
492 struct mxs_dma_desc *d;
493 uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip;
494 int ret;
495
496 if (length > NAND_MAX_PAGESIZE) {
497 printf("MXS NAND: DMA buffer too big\n");
498 return;
499 }
500
501 if (!buf) {
502 printf("MXS NAND: DMA buffer is NULL\n");
503 return;
504 }
505
506 /* Compile the DMA descriptor - a descriptor that reads data. */
507 d = mxs_nand_get_dma_desc(nand_info);
508 d->cmd.data =
509 MXS_DMA_DESC_COMMAND_DMA_WRITE | MXS_DMA_DESC_IRQ |
510 MXS_DMA_DESC_DEC_SEM | MXS_DMA_DESC_WAIT4END |
511 (1 << MXS_DMA_DESC_PIO_WORDS_OFFSET) |
512 (length << MXS_DMA_DESC_BYTES_OFFSET);
513
514 d->cmd.address = (dma_addr_t)nand_info->data_buf;
515
516 d->cmd.pio_words[0] =
517 GPMI_CTRL0_COMMAND_MODE_READ |
518 GPMI_CTRL0_WORD_LENGTH |
519 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
520 GPMI_CTRL0_ADDRESS_NAND_DATA |
521 length;
522
523 mxs_dma_desc_append(channel, d);
524
525 /*
526 * A DMA descriptor that waits for the command to end and the chip to
527 * become ready.
528 *
529 * I think we actually should *not* be waiting for the chip to become
530 * ready because, after all, we don't care. I think the original code
531 * did that and no one has re-thought it yet.
532 */
533 d = mxs_nand_get_dma_desc(nand_info);
534 d->cmd.data =
535 MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_IRQ |
536 MXS_DMA_DESC_NAND_WAIT_4_READY | MXS_DMA_DESC_DEC_SEM |
5263a02e 537 MXS_DMA_DESC_WAIT4END | (1 << MXS_DMA_DESC_PIO_WORDS_OFFSET);
0d4e8509
MV
538
539 d->cmd.address = 0;
540
541 d->cmd.pio_words[0] =
542 GPMI_CTRL0_COMMAND_MODE_WAIT_FOR_READY |
543 GPMI_CTRL0_WORD_LENGTH |
544 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
545 GPMI_CTRL0_ADDRESS_NAND_DATA;
546
547 mxs_dma_desc_append(channel, d);
548
ecfb8768
PF
549 /* Invalidate caches */
550 mxs_nand_inval_data_buf(nand_info);
551
0d4e8509
MV
552 /* Execute the DMA chain. */
553 ret = mxs_dma_go(channel);
554 if (ret) {
555 printf("MXS NAND: DMA read error\n");
556 goto rtn;
557 }
558
6b9408ed
MV
559 /* Invalidate caches */
560 mxs_nand_inval_data_buf(nand_info);
561
0d4e8509
MV
562 memcpy(buf, nand_info->data_buf, length);
563
564rtn:
565 mxs_nand_return_dma_descs(nand_info);
566}
567
568/*
569 * Write data to NAND.
570 */
571static void mxs_nand_write_buf(struct mtd_info *mtd, const uint8_t *buf,
572 int length)
573{
17cb4b8f
SW
574 struct nand_chip *nand = mtd_to_nand(mtd);
575 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
0d4e8509
MV
576 struct mxs_dma_desc *d;
577 uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip;
578 int ret;
579
580 if (length > NAND_MAX_PAGESIZE) {
581 printf("MXS NAND: DMA buffer too big\n");
582 return;
583 }
584
585 if (!buf) {
586 printf("MXS NAND: DMA buffer is NULL\n");
587 return;
588 }
589
590 memcpy(nand_info->data_buf, buf, length);
591
592 /* Compile the DMA descriptor - a descriptor that writes data. */
593 d = mxs_nand_get_dma_desc(nand_info);
594 d->cmd.data =
595 MXS_DMA_DESC_COMMAND_DMA_READ | MXS_DMA_DESC_IRQ |
596 MXS_DMA_DESC_DEC_SEM | MXS_DMA_DESC_WAIT4END |
88a2cbb2 597 (1 << MXS_DMA_DESC_PIO_WORDS_OFFSET) |
0d4e8509
MV
598 (length << MXS_DMA_DESC_BYTES_OFFSET);
599
600 d->cmd.address = (dma_addr_t)nand_info->data_buf;
601
602 d->cmd.pio_words[0] =
603 GPMI_CTRL0_COMMAND_MODE_WRITE |
604 GPMI_CTRL0_WORD_LENGTH |
605 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
606 GPMI_CTRL0_ADDRESS_NAND_DATA |
607 length;
608
609 mxs_dma_desc_append(channel, d);
610
6b9408ed
MV
611 /* Flush caches */
612 mxs_nand_flush_data_buf(nand_info);
613
0d4e8509
MV
614 /* Execute the DMA chain. */
615 ret = mxs_dma_go(channel);
616 if (ret)
617 printf("MXS NAND: DMA write error\n");
618
619 mxs_nand_return_dma_descs(nand_info);
620}
621
622/*
623 * Read a single byte from NAND.
624 */
625static uint8_t mxs_nand_read_byte(struct mtd_info *mtd)
626{
627 uint8_t buf;
628 mxs_nand_read_buf(mtd, &buf, 1);
629 return buf;
630}
631
552c8827
PF
632static bool mxs_nand_erased_page(struct mtd_info *mtd, struct nand_chip *nand,
633 u8 *buf, int chunk, int page)
634{
635 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
636 struct bch_geometry *geo = &nand_info->bch_geometry;
637 unsigned int flip_bits = 0, flip_bits_noecc = 0;
638 unsigned int threshold;
639 unsigned int base = geo->ecc_chunkn_size * chunk;
640 u32 *dma_buf = (u32 *)buf;
641 int i;
642
643 threshold = geo->gf_len / 2;
644 if (threshold > geo->ecc_strength)
645 threshold = geo->ecc_strength;
646
647 for (i = 0; i < geo->ecc_chunkn_size; i++) {
648 flip_bits += hweight8(~buf[base + i]);
649 if (flip_bits > threshold)
650 return false;
651 }
652
653 nand->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
654 nand->read_buf(mtd, buf, mtd->writesize);
655
656 for (i = 0; i < mtd->writesize / 4; i++) {
657 flip_bits_noecc += hweight32(~dma_buf[i]);
658 if (flip_bits_noecc > threshold)
659 return false;
660 }
661
662 mtd->ecc_stats.corrected += flip_bits;
663
664 memset(buf, 0xff, mtd->writesize);
665
666 printf("The page(%d) is an erased page(%d,%d,%d,%d).\n", page, chunk, threshold, flip_bits, flip_bits_noecc);
667
668 return true;
669}
670
0d4e8509
MV
671/*
672 * Read a page from NAND.
673 */
674static int mxs_nand_ecc_read_page(struct mtd_info *mtd, struct nand_chip *nand,
dfe64e2c
SL
675 uint8_t *buf, int oob_required,
676 int page)
0d4e8509 677{
17cb4b8f 678 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
28897e8d 679 struct bch_geometry *geo = &nand_info->bch_geometry;
29f40c07 680 struct mxs_bch_regs *bch_regs = nand_info->bch_regs;
0d4e8509
MV
681 struct mxs_dma_desc *d;
682 uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip;
683 uint32_t corrected = 0, failed = 0;
684 uint8_t *status;
685 int i, ret;
29f40c07 686 int flag = 0;
0d4e8509
MV
687
688 /* Compile the DMA descriptor - wait for ready. */
689 d = mxs_nand_get_dma_desc(nand_info);
690 d->cmd.data =
691 MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_CHAIN |
692 MXS_DMA_DESC_NAND_WAIT_4_READY | MXS_DMA_DESC_WAIT4END |
693 (1 << MXS_DMA_DESC_PIO_WORDS_OFFSET);
694
695 d->cmd.address = 0;
696
697 d->cmd.pio_words[0] =
698 GPMI_CTRL0_COMMAND_MODE_WAIT_FOR_READY |
699 GPMI_CTRL0_WORD_LENGTH |
700 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
701 GPMI_CTRL0_ADDRESS_NAND_DATA;
702
703 mxs_dma_desc_append(channel, d);
704
705 /* Compile the DMA descriptor - enable the BCH block and read. */
706 d = mxs_nand_get_dma_desc(nand_info);
707 d->cmd.data =
708 MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_CHAIN |
709 MXS_DMA_DESC_WAIT4END | (6 << MXS_DMA_DESC_PIO_WORDS_OFFSET);
710
711 d->cmd.address = 0;
712
713 d->cmd.pio_words[0] =
714 GPMI_CTRL0_COMMAND_MODE_READ |
715 GPMI_CTRL0_WORD_LENGTH |
716 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
717 GPMI_CTRL0_ADDRESS_NAND_DATA |
718 (mtd->writesize + mtd->oobsize);
719 d->cmd.pio_words[1] = 0;
720 d->cmd.pio_words[2] =
721 GPMI_ECCCTRL_ENABLE_ECC |
722 GPMI_ECCCTRL_ECC_CMD_DECODE |
723 GPMI_ECCCTRL_BUFFER_MASK_BCH_PAGE;
724 d->cmd.pio_words[3] = mtd->writesize + mtd->oobsize;
725 d->cmd.pio_words[4] = (dma_addr_t)nand_info->data_buf;
726 d->cmd.pio_words[5] = (dma_addr_t)nand_info->oob_buf;
727
bf9382ad 728 if (nand_info->en_randomizer) {
fe04bcd7
AG
729 d->cmd.pio_words[2] |= GPMI_ECCCTRL_RANDOMIZER_ENABLE |
730 GPMI_ECCCTRL_RANDOMIZER_TYPE2;
731 d->cmd.pio_words[3] |= (page % 256) << 16;
732 }
733
0d4e8509
MV
734 mxs_dma_desc_append(channel, d);
735
736 /* Compile the DMA descriptor - disable the BCH block. */
737 d = mxs_nand_get_dma_desc(nand_info);
738 d->cmd.data =
739 MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_CHAIN |
740 MXS_DMA_DESC_NAND_WAIT_4_READY | MXS_DMA_DESC_WAIT4END |
741 (3 << MXS_DMA_DESC_PIO_WORDS_OFFSET);
742
743 d->cmd.address = 0;
744
745 d->cmd.pio_words[0] =
746 GPMI_CTRL0_COMMAND_MODE_WAIT_FOR_READY |
747 GPMI_CTRL0_WORD_LENGTH |
748 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
749 GPMI_CTRL0_ADDRESS_NAND_DATA |
750 (mtd->writesize + mtd->oobsize);
751 d->cmd.pio_words[1] = 0;
752 d->cmd.pio_words[2] = 0;
753
754 mxs_dma_desc_append(channel, d);
755
756 /* Compile the DMA descriptor - deassert the NAND lock and interrupt. */
757 d = mxs_nand_get_dma_desc(nand_info);
758 d->cmd.data =
759 MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_IRQ |
760 MXS_DMA_DESC_DEC_SEM;
761
762 d->cmd.address = 0;
763
764 mxs_dma_desc_append(channel, d);
765
ecfb8768
PF
766 /* Invalidate caches */
767 mxs_nand_inval_data_buf(nand_info);
768
0d4e8509
MV
769 /* Execute the DMA chain. */
770 ret = mxs_dma_go(channel);
771 if (ret) {
772 printf("MXS NAND: DMA read error\n");
773 goto rtn;
774 }
775
931747e5 776 ret = mxs_nand_wait_for_bch_complete(nand_info);
0d4e8509
MV
777 if (ret) {
778 printf("MXS NAND: BCH read timeout\n");
779 goto rtn;
780 }
781
552c8827
PF
782 mxs_nand_return_dma_descs(nand_info);
783
6b9408ed
MV
784 /* Invalidate caches */
785 mxs_nand_inval_data_buf(nand_info);
786
0d4e8509 787 /* Read DMA completed, now do the mark swapping. */
28897e8d 788 mxs_nand_swap_block_mark(geo, nand_info->data_buf, nand_info->oob_buf);
0d4e8509
MV
789
790 /* Loop over status bytes, accumulating ECC status. */
791 status = nand_info->oob_buf + mxs_nand_aux_status_offset();
28897e8d 792 for (i = 0; i < geo->ecc_chunk_count; i++) {
0d4e8509
MV
793 if (status[i] == 0x00)
794 continue;
795
29f40c07 796 if (status[i] == 0xff) {
f7bb012a
HX
797 if (!nand_info->en_randomizer &&
798 (is_mx6dqp() || is_mx7() || is_mx6ul() ||
799 is_imx8() || is_imx8m()))
29f40c07
PF
800 if (readl(&bch_regs->hw_bch_debug1))
801 flag = 1;
0d4e8509 802 continue;
29f40c07 803 }
0d4e8509
MV
804
805 if (status[i] == 0xfe) {
552c8827
PF
806 if (mxs_nand_erased_page(mtd, nand,
807 nand_info->data_buf, i, page))
808 break;
0d4e8509
MV
809 failed++;
810 continue;
811 }
812
813 corrected += status[i];
814 }
815
816 /* Propagate ECC status to the owning MTD. */
817 mtd->ecc_stats.failed += failed;
818 mtd->ecc_stats.corrected += corrected;
819
820 /*
821 * It's time to deliver the OOB bytes. See mxs_nand_ecc_read_oob() for
822 * details about our policy for delivering the OOB.
823 *
824 * We fill the caller's buffer with set bits, and then copy the block
825 * mark to the caller's buffer. Note that, if block mark swapping was
826 * necessary, it has already been done, so we can rely on the first
827 * byte of the auxiliary buffer to contain the block mark.
828 */
829 memset(nand->oob_poi, 0xff, mtd->oobsize);
830
831 nand->oob_poi[0] = nand_info->oob_buf[0];
832
833 memcpy(buf, nand_info->data_buf, mtd->writesize);
834
29f40c07
PF
835 if (flag)
836 memset(buf, 0xff, mtd->writesize);
0d4e8509
MV
837rtn:
838 mxs_nand_return_dma_descs(nand_info);
839
840 return ret;
841}
842
843/*
844 * Write a page to NAND.
845 */
dfe64e2c
SL
846static int mxs_nand_ecc_write_page(struct mtd_info *mtd,
847 struct nand_chip *nand, const uint8_t *buf,
81c77252 848 int oob_required, int page)
0d4e8509 849{
17cb4b8f 850 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
28897e8d 851 struct bch_geometry *geo = &nand_info->bch_geometry;
0d4e8509
MV
852 struct mxs_dma_desc *d;
853 uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip;
854 int ret;
855
856 memcpy(nand_info->data_buf, buf, mtd->writesize);
857 memcpy(nand_info->oob_buf, nand->oob_poi, mtd->oobsize);
858
859 /* Handle block mark swapping. */
28897e8d 860 mxs_nand_swap_block_mark(geo, nand_info->data_buf, nand_info->oob_buf);
0d4e8509
MV
861
862 /* Compile the DMA descriptor - write data. */
863 d = mxs_nand_get_dma_desc(nand_info);
864 d->cmd.data =
865 MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_IRQ |
866 MXS_DMA_DESC_DEC_SEM | MXS_DMA_DESC_WAIT4END |
867 (6 << MXS_DMA_DESC_PIO_WORDS_OFFSET);
868
869 d->cmd.address = 0;
870
871 d->cmd.pio_words[0] =
872 GPMI_CTRL0_COMMAND_MODE_WRITE |
873 GPMI_CTRL0_WORD_LENGTH |
874 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
875 GPMI_CTRL0_ADDRESS_NAND_DATA;
876 d->cmd.pio_words[1] = 0;
877 d->cmd.pio_words[2] =
878 GPMI_ECCCTRL_ENABLE_ECC |
879 GPMI_ECCCTRL_ECC_CMD_ENCODE |
880 GPMI_ECCCTRL_BUFFER_MASK_BCH_PAGE;
881 d->cmd.pio_words[3] = (mtd->writesize + mtd->oobsize);
882 d->cmd.pio_words[4] = (dma_addr_t)nand_info->data_buf;
883 d->cmd.pio_words[5] = (dma_addr_t)nand_info->oob_buf;
884
bf9382ad 885 if (nand_info->en_randomizer) {
9ab5f221
IO
886 d->cmd.pio_words[2] |= GPMI_ECCCTRL_RANDOMIZER_ENABLE |
887 GPMI_ECCCTRL_RANDOMIZER_TYPE2;
888 /*
889 * Write NAND page number needed to be randomized
890 * to GPMI_ECCCOUNT register.
891 *
892 * The value is between 0-255. For additional details
893 * check 9.6.6.4 of i.MX7D Applications Processor reference
894 */
fe04bcd7 895 d->cmd.pio_words[3] |= (page % 256) << 16;
9ab5f221
IO
896 }
897
0d4e8509
MV
898 mxs_dma_desc_append(channel, d);
899
6b9408ed
MV
900 /* Flush caches */
901 mxs_nand_flush_data_buf(nand_info);
902
0d4e8509
MV
903 /* Execute the DMA chain. */
904 ret = mxs_dma_go(channel);
905 if (ret) {
906 printf("MXS NAND: DMA write error\n");
907 goto rtn;
908 }
909
931747e5 910 ret = mxs_nand_wait_for_bch_complete(nand_info);
0d4e8509
MV
911 if (ret) {
912 printf("MXS NAND: BCH write timeout\n");
913 goto rtn;
914 }
915
916rtn:
917 mxs_nand_return_dma_descs(nand_info);
dfe64e2c 918 return 0;
0d4e8509
MV
919}
920
921/*
922 * Read OOB from NAND.
923 *
924 * This function is a veneer that replaces the function originally installed by
925 * the NAND Flash MTD code.
926 */
927static int mxs_nand_hook_read_oob(struct mtd_info *mtd, loff_t from,
928 struct mtd_oob_ops *ops)
929{
17cb4b8f
SW
930 struct nand_chip *chip = mtd_to_nand(mtd);
931 struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
0d4e8509
MV
932 int ret;
933
dfe64e2c 934 if (ops->mode == MTD_OPS_RAW)
0d4e8509
MV
935 nand_info->raw_oob_mode = 1;
936 else
937 nand_info->raw_oob_mode = 0;
938
939 ret = nand_info->hooked_read_oob(mtd, from, ops);
940
941 nand_info->raw_oob_mode = 0;
942
943 return ret;
944}
945
946/*
947 * Write OOB to NAND.
948 *
949 * This function is a veneer that replaces the function originally installed by
950 * the NAND Flash MTD code.
951 */
952static int mxs_nand_hook_write_oob(struct mtd_info *mtd, loff_t to,
953 struct mtd_oob_ops *ops)
954{
17cb4b8f
SW
955 struct nand_chip *chip = mtd_to_nand(mtd);
956 struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
0d4e8509
MV
957 int ret;
958
dfe64e2c 959 if (ops->mode == MTD_OPS_RAW)
0d4e8509
MV
960 nand_info->raw_oob_mode = 1;
961 else
962 nand_info->raw_oob_mode = 0;
963
964 ret = nand_info->hooked_write_oob(mtd, to, ops);
965
966 nand_info->raw_oob_mode = 0;
967
968 return ret;
969}
970
971/*
972 * Mark a block bad in NAND.
973 *
974 * This function is a veneer that replaces the function originally installed by
975 * the NAND Flash MTD code.
976 */
977static int mxs_nand_hook_block_markbad(struct mtd_info *mtd, loff_t ofs)
978{
17cb4b8f
SW
979 struct nand_chip *chip = mtd_to_nand(mtd);
980 struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
0d4e8509
MV
981 int ret;
982
983 nand_info->marking_block_bad = 1;
984
985 ret = nand_info->hooked_block_markbad(mtd, ofs);
986
987 nand_info->marking_block_bad = 0;
988
989 return ret;
990}
991
992/*
993 * There are several places in this driver where we have to handle the OOB and
994 * block marks. This is the function where things are the most complicated, so
995 * this is where we try to explain it all. All the other places refer back to
996 * here.
997 *
998 * These are the rules, in order of decreasing importance:
999 *
1000 * 1) Nothing the caller does can be allowed to imperil the block mark, so all
1001 * write operations take measures to protect it.
1002 *
1003 * 2) In read operations, the first byte of the OOB we return must reflect the
1004 * true state of the block mark, no matter where that block mark appears in
1005 * the physical page.
1006 *
1007 * 3) ECC-based read operations return an OOB full of set bits (since we never
1008 * allow ECC-based writes to the OOB, it doesn't matter what ECC-based reads
1009 * return).
1010 *
1011 * 4) "Raw" read operations return a direct view of the physical bytes in the
1012 * page, using the conventional definition of which bytes are data and which
1013 * are OOB. This gives the caller a way to see the actual, physical bytes
1014 * in the page, without the distortions applied by our ECC engine.
1015 *
1016 * What we do for this specific read operation depends on whether we're doing
1017 * "raw" read, or an ECC-based read.
1018 *
1019 * It turns out that knowing whether we want an "ECC-based" or "raw" read is not
1020 * easy. When reading a page, for example, the NAND Flash MTD code calls our
1021 * ecc.read_page or ecc.read_page_raw function. Thus, the fact that MTD wants an
1022 * ECC-based or raw view of the page is implicit in which function it calls
1023 * (there is a similar pair of ECC-based/raw functions for writing).
1024 *
1025 * Since MTD assumes the OOB is not covered by ECC, there is no pair of
1026 * ECC-based/raw functions for reading or or writing the OOB. The fact that the
1027 * caller wants an ECC-based or raw view of the page is not propagated down to
1028 * this driver.
1029 *
1030 * Since our OOB *is* covered by ECC, we need this information. So, we hook the
1031 * ecc.read_oob and ecc.write_oob function pointers in the owning
1032 * struct mtd_info with our own functions. These hook functions set the
1033 * raw_oob_mode field so that, when control finally arrives here, we'll know
1034 * what to do.
1035 */
1036static int mxs_nand_ecc_read_oob(struct mtd_info *mtd, struct nand_chip *nand,
dfe64e2c 1037 int page)
0d4e8509 1038{
17cb4b8f 1039 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
0d4e8509
MV
1040
1041 /*
1042 * First, fill in the OOB buffer. If we're doing a raw read, we need to
1043 * get the bytes from the physical page. If we're not doing a raw read,
1044 * we need to fill the buffer with set bits.
1045 */
1046 if (nand_info->raw_oob_mode) {
1047 /*
1048 * If control arrives here, we're doing a "raw" read. Send the
1049 * command to read the conventional OOB and read it.
1050 */
1051 nand->cmdfunc(mtd, NAND_CMD_READ0, mtd->writesize, page);
1052 nand->read_buf(mtd, nand->oob_poi, mtd->oobsize);
1053 } else {
1054 /*
1055 * If control arrives here, we're not doing a "raw" read. Fill
1056 * the OOB buffer with set bits and correct the block mark.
1057 */
1058 memset(nand->oob_poi, 0xff, mtd->oobsize);
1059
1060 nand->cmdfunc(mtd, NAND_CMD_READ0, mtd->writesize, page);
1061 mxs_nand_read_buf(mtd, nand->oob_poi, 1);
1062 }
1063
1064 return 0;
1065
1066}
1067
1068/*
1069 * Write OOB data to NAND.
1070 */
1071static int mxs_nand_ecc_write_oob(struct mtd_info *mtd, struct nand_chip *nand,
1072 int page)
1073{
17cb4b8f 1074 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
0d4e8509
MV
1075 uint8_t block_mark = 0;
1076
1077 /*
1078 * There are fundamental incompatibilities between the i.MX GPMI NFC and
1079 * the NAND Flash MTD model that make it essentially impossible to write
1080 * the out-of-band bytes.
1081 *
1082 * We permit *ONE* exception. If the *intent* of writing the OOB is to
1083 * mark a block bad, we can do that.
1084 */
1085
1086 if (!nand_info->marking_block_bad) {
1087 printf("NXS NAND: Writing OOB isn't supported\n");
1088 return -EIO;
1089 }
1090
1091 /* Write the block mark. */
1092 nand->cmdfunc(mtd, NAND_CMD_SEQIN, mtd->writesize, page);
1093 nand->write_buf(mtd, &block_mark, 1);
1094 nand->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
1095
1096 /* Check if it worked. */
1097 if (nand->waitfunc(mtd, nand) & NAND_STATUS_FAIL)
1098 return -EIO;
1099
1100 return 0;
1101}
1102
1103/*
1104 * Claims all blocks are good.
1105 *
1106 * In principle, this function is *only* called when the NAND Flash MTD system
1107 * isn't allowed to keep an in-memory bad block table, so it is forced to ask
1108 * the driver for bad block information.
1109 *
1110 * In fact, we permit the NAND Flash MTD system to have an in-memory BBT, so
1111 * this function is *only* called when we take it away.
1112 *
1113 * Thus, this function is only called when we want *all* blocks to look good,
1114 * so it *always* return success.
1115 */
ceee07b6 1116static int mxs_nand_block_bad(struct mtd_info *mtd, loff_t ofs)
0d4e8509
MV
1117{
1118 return 0;
1119}
1120
62754450
SA
1121static int mxs_nand_set_geometry(struct mtd_info *mtd, struct bch_geometry *geo)
1122{
1123 struct nand_chip *chip = mtd_to_nand(mtd);
1124 struct nand_chip *nand = mtd_to_nand(mtd);
1125 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
ed48490f 1126 int err;
62754450 1127
616f03da
YL
1128 if (chip->ecc_strength_ds > nand_info->max_ecc_strength_supported) {
1129 printf("unsupported NAND chip, minimum ecc required %d\n"
1130 , chip->ecc_strength_ds);
1131 return -EINVAL;
1132 }
1133
ed48490f
HX
1134 /* use the legacy bch setting by default */
1135 if ((!nand_info->use_minimum_ecc && mtd->oobsize < 1024) ||
1136 !(chip->ecc_strength_ds > 0 && chip->ecc_step_ds > 0)) {
1137 dev_dbg(mtd->dev, "use legacy bch geometry\n");
1138 err = mxs_nand_legacy_calc_ecc_layout(geo, mtd);
1139 if (!err)
1140 return 0;
616f03da 1141 }
62754450 1142
ed48490f
HX
1143 /* for large oob nand */
1144 if (mtd->oobsize > 1024) {
1145 dev_dbg(mtd->dev, "use large oob bch geometry\n");
1146 err = mxs_nand_calc_ecc_for_large_oob(geo, mtd);
1147 if (!err)
1148 return 0;
1149 }
62754450 1150
ed48490f
HX
1151 /* otherwise use the minimum ecc nand chips required */
1152 dev_dbg(mtd->dev, "use minimum ecc bch geometry\n");
1153 err = mxs_nand_calc_ecc_layout_by_info(geo, mtd, chip->ecc_strength_ds,
1154 chip->ecc_step_ds);
62754450 1155
ed48490f
HX
1156 if (err)
1157 dev_err(mtd->dev, "none of the bch geometry setting works\n");
1158
1159 return err;
1160}
1161
1162void mxs_nand_dump_geo(struct mtd_info *mtd)
1163{
1164 struct nand_chip *nand = mtd_to_nand(mtd);
1165 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
1166 struct bch_geometry *geo = &nand_info->bch_geometry;
1167
1168 dev_dbg(mtd->dev, "BCH Geometry :\n"
1169 "GF Length\t\t: %u\n"
1170 "ECC Strength\t\t: %u\n"
1171 "ECC for Meta\t\t: %u\n"
1172 "ECC Chunk0 Size\t\t: %u\n"
1173 "ECC Chunkn Size\t\t: %u\n"
1174 "ECC Chunk Count\t\t: %u\n"
1175 "Block Mark Byte Offset\t: %u\n"
1176 "Block Mark Bit Offset\t: %u\n",
1177 geo->gf_len,
1178 geo->ecc_strength,
1179 geo->ecc_for_meta,
1180 geo->ecc_chunk0_size,
1181 geo->ecc_chunkn_size,
1182 geo->ecc_chunk_count,
1183 geo->block_mark_byte_offset,
1184 geo->block_mark_bit_offset);
62754450
SA
1185}
1186
0d4e8509 1187/*
0d4e8509
MV
1188 * At this point, the physical NAND Flash chips have been identified and
1189 * counted, so we know the physical geometry. This enables us to make some
1190 * important configuration decisions.
1191 *
62a3b7dd 1192 * The return value of this function propagates directly back to this driver's
5346c31e 1193 * board_nand_init(). Anything other than zero will cause this driver to
0d4e8509
MV
1194 * tear everything down and declare failure.
1195 */
5346c31e 1196int mxs_nand_setup_ecc(struct mtd_info *mtd)
0d4e8509 1197{
17cb4b8f
SW
1198 struct nand_chip *nand = mtd_to_nand(mtd);
1199 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
28897e8d 1200 struct bch_geometry *geo = &nand_info->bch_geometry;
931747e5 1201 struct mxs_bch_regs *bch_regs = nand_info->bch_regs;
0d4e8509 1202 uint32_t tmp;
62754450 1203 int ret;
984df7ad 1204
9ab5f221
IO
1205 nand_info->en_randomizer = 0;
1206 nand_info->oobsize = mtd->oobsize;
1207 nand_info->writesize = mtd->writesize;
1208
62754450 1209 ret = mxs_nand_set_geometry(mtd, geo);
984df7ad
SA
1210 if (ret)
1211 return ret;
1212
ed48490f
HX
1213 mxs_nand_dump_geo(mtd);
1214
0d4e8509 1215 /* Configure BCH and set NFC geometry */
fa7a51cb 1216 mxs_reset_block(&bch_regs->hw_bch_ctrl_reg);
0d4e8509
MV
1217
1218 /* Configure layout 0 */
28897e8d 1219 tmp = (geo->ecc_chunk_count - 1) << BCH_FLASHLAYOUT0_NBLOCKS_OFFSET;
0d4e8509 1220 tmp |= MXS_NAND_METADATA_SIZE << BCH_FLASHLAYOUT0_META_SIZE_OFFSET;
28897e8d 1221 tmp |= (geo->ecc_strength >> 1) << BCH_FLASHLAYOUT0_ECC0_OFFSET;
616f03da 1222 tmp |= geo->ecc_chunk0_size >> MXS_NAND_CHUNK_DATA_CHUNK_SIZE_SHIFT;
28897e8d 1223 tmp |= (geo->gf_len == 14 ? 1 : 0) <<
63b29d80 1224 BCH_FLASHLAYOUT0_GF13_0_GF14_1_OFFSET;
0d4e8509 1225 writel(tmp, &bch_regs->hw_bch_flash0layout0);
9ab5f221 1226 nand_info->bch_flash0layout0 = tmp;
0d4e8509
MV
1227
1228 tmp = (mtd->writesize + mtd->oobsize)
1229 << BCH_FLASHLAYOUT1_PAGE_SIZE_OFFSET;
28897e8d 1230 tmp |= (geo->ecc_strength >> 1) << BCH_FLASHLAYOUT1_ECCN_OFFSET;
616f03da 1231 tmp |= geo->ecc_chunkn_size >> MXS_NAND_CHUNK_DATA_CHUNK_SIZE_SHIFT;
28897e8d 1232 tmp |= (geo->gf_len == 14 ? 1 : 0) <<
63b29d80 1233 BCH_FLASHLAYOUT1_GF13_0_GF14_1_OFFSET;
0d4e8509 1234 writel(tmp, &bch_regs->hw_bch_flash0layout1);
9ab5f221 1235 nand_info->bch_flash0layout1 = tmp;
0d4e8509 1236
29f40c07
PF
1237 /* Set erase threshold to ecc strength for mx6ul, mx6qp and mx7 */
1238 if (is_mx6dqp() || is_mx7() ||
39320e72 1239 is_mx6ul() || is_imx8() || is_imx8m())
29f40c07
PF
1240 writel(BCH_MODE_ERASE_THRESHOLD(geo->ecc_strength),
1241 &bch_regs->hw_bch_mode);
1242
0d4e8509
MV
1243 /* Set *all* chip selects to use layout 0 */
1244 writel(0, &bch_regs->hw_bch_layoutselect);
1245
1246 /* Enable BCH complete interrupt */
1247 writel(BCH_CTRL_COMPLETE_IRQ_EN, &bch_regs->hw_bch_ctrl_set);
1248
5346c31e 1249 return 0;
0d4e8509
MV
1250}
1251
1252/*
1253 * Allocate DMA buffers
1254 */
1255int mxs_nand_alloc_buffers(struct mxs_nand_info *nand_info)
1256{
1257 uint8_t *buf;
1258 const int size = NAND_MAX_PAGESIZE + NAND_MAX_OOBSIZE;
1259
6b9408ed
MV
1260 nand_info->data_buf_size = roundup(size, MXS_DMA_ALIGNMENT);
1261
0d4e8509 1262 /* DMA buffers */
6b9408ed 1263 buf = memalign(MXS_DMA_ALIGNMENT, nand_info->data_buf_size);
0d4e8509
MV
1264 if (!buf) {
1265 printf("MXS NAND: Error allocating DMA buffers\n");
1266 return -ENOMEM;
1267 }
1268
6b9408ed 1269 memset(buf, 0, nand_info->data_buf_size);
0d4e8509
MV
1270
1271 nand_info->data_buf = buf;
1272 nand_info->oob_buf = buf + NAND_MAX_PAGESIZE;
0d4e8509
MV
1273 /* Command buffers */
1274 nand_info->cmd_buf = memalign(MXS_DMA_ALIGNMENT,
1275 MXS_NAND_COMMAND_BUFFER_SIZE);
1276 if (!nand_info->cmd_buf) {
1277 free(buf);
1278 printf("MXS NAND: Error allocating command buffers\n");
1279 return -ENOMEM;
1280 }
1281 memset(nand_info->cmd_buf, 0, MXS_NAND_COMMAND_BUFFER_SIZE);
1282 nand_info->cmd_queue_len = 0;
1283
1284 return 0;
1285}
1286
1287/*
1288 * Initializes the NFC hardware.
1289 */
5645df9e 1290static int mxs_nand_init_dma(struct mxs_nand_info *info)
0d4e8509 1291{
549d7c0e 1292 int i = 0, j, ret = 0;
0d4e8509
MV
1293
1294 info->desc = malloc(sizeof(struct mxs_dma_desc *) *
1295 MXS_NAND_DMA_DESCRIPTOR_COUNT);
549d7c0e
PF
1296 if (!info->desc) {
1297 ret = -ENOMEM;
0d4e8509 1298 goto err1;
549d7c0e 1299 }
0d4e8509
MV
1300
1301 /* Allocate the DMA descriptors. */
1302 for (i = 0; i < MXS_NAND_DMA_DESCRIPTOR_COUNT; i++) {
1303 info->desc[i] = mxs_dma_desc_alloc();
549d7c0e
PF
1304 if (!info->desc[i]) {
1305 ret = -ENOMEM;
0d4e8509 1306 goto err2;
549d7c0e 1307 }
0d4e8509
MV
1308 }
1309
1310 /* Init the DMA controller. */
a1d1fdc9 1311 mxs_dma_init();
96666a39
MV
1312 for (j = MXS_DMA_CHANNEL_AHB_APBH_GPMI0;
1313 j <= MXS_DMA_CHANNEL_AHB_APBH_GPMI7; j++) {
549d7c0e
PF
1314 ret = mxs_dma_init_channel(j);
1315 if (ret)
96666a39
MV
1316 goto err3;
1317 }
0d4e8509
MV
1318
1319 /* Reset the GPMI block. */
931747e5
SA
1320 mxs_reset_block(&info->gpmi_regs->hw_gpmi_ctrl0_reg);
1321 mxs_reset_block(&info->bch_regs->hw_bch_ctrl_reg);
0d4e8509
MV
1322
1323 /*
1324 * Choose NAND mode, set IRQ polarity, disable write protection and
1325 * select BCH ECC.
1326 */
931747e5 1327 clrsetbits_le32(&info->gpmi_regs->hw_gpmi_ctrl1,
0d4e8509
MV
1328 GPMI_CTRL1_GPMI_MODE,
1329 GPMI_CTRL1_ATA_IRQRDY_POLARITY | GPMI_CTRL1_DEV_RESET |
1330 GPMI_CTRL1_BCH_MODE);
1331
1332 return 0;
1333
96666a39 1334err3:
549d7c0e 1335 for (--j; j >= MXS_DMA_CHANNEL_AHB_APBH_GPMI0; j--)
96666a39 1336 mxs_dma_release(j);
0d4e8509 1337err2:
0d4e8509
MV
1338 for (--i; i >= 0; i--)
1339 mxs_dma_desc_free(info->desc[i]);
549d7c0e
PF
1340 free(info->desc);
1341err1:
1342 if (ret == -ENOMEM)
1343 printf("MXS NAND: Unable to allocate DMA descriptors\n");
1344 return ret;
0d4e8509
MV
1345}
1346
9345943b
SA
1347int mxs_nand_init_spl(struct nand_chip *nand)
1348{
1349 struct mxs_nand_info *nand_info;
1350 int err;
1351
1352 nand_info = malloc(sizeof(struct mxs_nand_info));
1353 if (!nand_info) {
1354 printf("MXS NAND: Failed to allocate private data\n");
1355 return -ENOMEM;
1356 }
1357 memset(nand_info, 0, sizeof(struct mxs_nand_info));
1358
931747e5
SA
1359 nand_info->gpmi_regs = (struct mxs_gpmi_regs *)MXS_GPMI_BASE;
1360 nand_info->bch_regs = (struct mxs_bch_regs *)MXS_BCH_BASE;
5ae585ba 1361
39320e72 1362 if (is_mx6sx() || is_mx7() || is_imx8() || is_imx8m())
5ae585ba
AF
1363 nand_info->max_ecc_strength_supported = 62;
1364 else
1365 nand_info->max_ecc_strength_supported = 40;
1366
9fd406de
YL
1367 if (IS_ENABLED(CONFIG_NAND_MXS_USE_MINIMUM_ECC))
1368 nand_info->use_minimum_ecc = true;
1369
9345943b
SA
1370 err = mxs_nand_alloc_buffers(nand_info);
1371 if (err)
1372 return err;
1373
0d4e9d8b 1374 err = mxs_nand_init_dma(nand_info);
9345943b
SA
1375 if (err)
1376 return err;
1377
1378 nand_set_controller_data(nand, nand_info);
1379
1380 nand->options |= NAND_NO_SUBPAGE_WRITE;
1381
1382 nand->cmd_ctrl = mxs_nand_cmd_ctrl;
1383 nand->dev_ready = mxs_nand_device_ready;
1384 nand->select_chip = mxs_nand_select_chip;
9345943b
SA
1385
1386 nand->read_byte = mxs_nand_read_byte;
1387 nand->read_buf = mxs_nand_read_buf;
1388
1389 nand->ecc.read_page = mxs_nand_ecc_read_page;
1390
1391 nand->ecc.mode = NAND_ECC_HW;
9345943b
SA
1392
1393 return 0;
1394}
1395
68748340 1396int mxs_nand_init_ctrl(struct mxs_nand_info *nand_info)
0d4e8509 1397{
5346c31e 1398 struct mtd_info *mtd;
5346c31e 1399 struct nand_chip *nand;
0d4e8509
MV
1400 int err;
1401
5346c31e
SA
1402 nand = &nand_info->chip;
1403 mtd = nand_to_mtd(nand);
0d4e8509
MV
1404 err = mxs_nand_alloc_buffers(nand_info);
1405 if (err)
3b1328a0 1406 return err;
0d4e8509 1407
0d4e9d8b 1408 err = mxs_nand_init_dma(nand_info);
0d4e8509 1409 if (err)
3b1328a0 1410 goto err_free_buffers;
0d4e8509
MV
1411
1412 memset(&fake_ecc_layout, 0, sizeof(fake_ecc_layout));
1413
dc0b69fa
SA
1414#ifdef CONFIG_SYS_NAND_USE_FLASH_BBT
1415 nand->bbt_options |= NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB;
1416#endif
1417
17cb4b8f 1418 nand_set_controller_data(nand, nand_info);
0d4e8509
MV
1419 nand->options |= NAND_NO_SUBPAGE_WRITE;
1420
f75e83bf 1421 if (nand_info->dev)
b8919eaa 1422 nand->flash_node = dev_ofnode(nand_info->dev);
f75e83bf 1423
0d4e8509
MV
1424 nand->cmd_ctrl = mxs_nand_cmd_ctrl;
1425
1426 nand->dev_ready = mxs_nand_device_ready;
1427 nand->select_chip = mxs_nand_select_chip;
1428 nand->block_bad = mxs_nand_block_bad;
0d4e8509
MV
1429
1430 nand->read_byte = mxs_nand_read_byte;
1431
1432 nand->read_buf = mxs_nand_read_buf;
1433 nand->write_buf = mxs_nand_write_buf;
1434
5346c31e
SA
1435 /* first scan to find the device and get the page size */
1436 if (nand_scan_ident(mtd, CONFIG_SYS_MAX_NAND_DEVICE, NULL))
3b1328a0 1437 goto err_free_buffers;
5346c31e
SA
1438
1439 if (mxs_nand_setup_ecc(mtd))
3b1328a0 1440 goto err_free_buffers;
5346c31e 1441
0d4e8509
MV
1442 nand->ecc.read_page = mxs_nand_ecc_read_page;
1443 nand->ecc.write_page = mxs_nand_ecc_write_page;
1444 nand->ecc.read_oob = mxs_nand_ecc_read_oob;
1445 nand->ecc.write_oob = mxs_nand_ecc_write_oob;
1446
1447 nand->ecc.layout = &fake_ecc_layout;
1448 nand->ecc.mode = NAND_ECC_HW;
616f03da 1449 nand->ecc.size = nand_info->bch_geometry.ecc_chunkn_size;
5c69dd07 1450 nand->ecc.strength = nand_info->bch_geometry.ecc_strength;
0d4e8509 1451
5346c31e
SA
1452 /* second phase scan */
1453 err = nand_scan_tail(mtd);
1454 if (err)
3b1328a0 1455 goto err_free_buffers;
5346c31e 1456
78bf8e80
MT
1457 /* Hook some operations at the MTD level. */
1458 if (mtd->_read_oob != mxs_nand_hook_read_oob) {
1459 nand_info->hooked_read_oob = mtd->_read_oob;
1460 mtd->_read_oob = mxs_nand_hook_read_oob;
1461 }
1462
1463 if (mtd->_write_oob != mxs_nand_hook_write_oob) {
1464 nand_info->hooked_write_oob = mtd->_write_oob;
1465 mtd->_write_oob = mxs_nand_hook_write_oob;
1466 }
1467
1468 if (mtd->_block_markbad != mxs_nand_hook_block_markbad) {
1469 nand_info->hooked_block_markbad = mtd->_block_markbad;
1470 mtd->_block_markbad = mxs_nand_hook_block_markbad;
1471 }
1472
5346c31e
SA
1473 err = nand_register(0, mtd);
1474 if (err)
3b1328a0 1475 goto err_free_buffers;
5346c31e 1476
3b1328a0 1477 return 0;
0d4e8509 1478
3b1328a0 1479err_free_buffers:
0d4e8509
MV
1480 free(nand_info->data_buf);
1481 free(nand_info->cmd_buf);
3b1328a0
SA
1482
1483 return err;
1484}
1485
f75e83bf 1486#ifndef CONFIG_NAND_MXS_DT
3b1328a0
SA
1487void board_nand_init(void)
1488{
1489 struct mxs_nand_info *nand_info;
1490
1491 nand_info = malloc(sizeof(struct mxs_nand_info));
1492 if (!nand_info) {
1493 printf("MXS NAND: Failed to allocate private data\n");
1494 return;
1495 }
1496 memset(nand_info, 0, sizeof(struct mxs_nand_info));
1497
1498 nand_info->gpmi_regs = (struct mxs_gpmi_regs *)MXS_GPMI_BASE;
1499 nand_info->bch_regs = (struct mxs_bch_regs *)MXS_BCH_BASE;
1500
502bdc6b
SA
1501 /* Refer to Chapter 17 for i.MX6DQ, Chapter 18 for i.MX6SX */
1502 if (is_mx6sx() || is_mx7())
1503 nand_info->max_ecc_strength_supported = 62;
1504 else
1505 nand_info->max_ecc_strength_supported = 40;
1506
1507#ifdef CONFIG_NAND_MXS_USE_MINIMUM_ECC
1508 nand_info->use_minimum_ecc = true;
1509#endif
1510
68748340 1511 if (mxs_nand_init_ctrl(nand_info) < 0)
3b1328a0
SA
1512 goto err;
1513
5346c31e 1514 return;
3b1328a0
SA
1515
1516err:
1517 free(nand_info);
0d4e8509 1518}
f75e83bf 1519#endif
9ab5f221
IO
1520
1521/*
1522 * Read NAND layout for FCB block generation.
1523 */
1524void mxs_nand_get_layout(struct mtd_info *mtd, struct mxs_nand_layout *l)
1525{
1526 struct mxs_bch_regs *bch_regs = (struct mxs_bch_regs *)MXS_BCH_BASE;
1527 u32 tmp;
1528
1529 tmp = readl(&bch_regs->hw_bch_flash0layout0);
1530 l->nblocks = (tmp & BCH_FLASHLAYOUT0_NBLOCKS_MASK) >>
1531 BCH_FLASHLAYOUT0_NBLOCKS_OFFSET;
1532 l->meta_size = (tmp & BCH_FLASHLAYOUT0_META_SIZE_MASK) >>
1533 BCH_FLASHLAYOUT0_META_SIZE_OFFSET;
1534
1535 tmp = readl(&bch_regs->hw_bch_flash0layout1);
1536 l->data0_size = 4 * ((tmp & BCH_FLASHLAYOUT0_DATA0_SIZE_MASK) >>
1537 BCH_FLASHLAYOUT0_DATA0_SIZE_OFFSET);
1538 l->ecc0 = (tmp & BCH_FLASHLAYOUT0_ECC0_MASK) >>
1539 BCH_FLASHLAYOUT0_ECC0_OFFSET;
1540 l->datan_size = 4 * ((tmp & BCH_FLASHLAYOUT1_DATAN_SIZE_MASK) >>
1541 BCH_FLASHLAYOUT1_DATAN_SIZE_OFFSET);
1542 l->eccn = (tmp & BCH_FLASHLAYOUT1_ECCN_MASK) >>
1543 BCH_FLASHLAYOUT1_ECCN_OFFSET;
17282f45
HX
1544 l->gf_len = (tmp & BCH_FLASHLAYOUT1_GF13_0_GF14_1_MASK) >>
1545 BCH_FLASHLAYOUT1_GF13_0_GF14_1_OFFSET;
9ab5f221
IO
1546}
1547
1548/*
1549 * Set BCH to specific layout used by ROM bootloader to read FCB.
1550 */
bf9382ad 1551void mxs_nand_mode_fcb_62bit(struct mtd_info *mtd)
9ab5f221
IO
1552{
1553 u32 tmp;
1554 struct mxs_bch_regs *bch_regs = (struct mxs_bch_regs *)MXS_BCH_BASE;
1555 struct nand_chip *nand = mtd_to_nand(mtd);
1556 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
1557
1558 nand_info->en_randomizer = 1;
1559
1560 mtd->writesize = 1024;
1561 mtd->oobsize = 1862 - 1024;
1562
1563 /* 8 ecc_chunks_*/
1564 tmp = 7 << BCH_FLASHLAYOUT0_NBLOCKS_OFFSET;
1565 /* 32 bytes for metadata */
1566 tmp |= 32 << BCH_FLASHLAYOUT0_META_SIZE_OFFSET;
1567 /* using ECC62 level to be performed */
1568 tmp |= 0x1F << BCH_FLASHLAYOUT0_ECC0_OFFSET;
1569 /* 0x20 * 4 bytes of the data0 block */
1570 tmp |= 0x20 << BCH_FLASHLAYOUT0_DATA0_SIZE_OFFSET;
1571 tmp |= 0 << BCH_FLASHLAYOUT0_GF13_0_GF14_1_OFFSET;
1572 writel(tmp, &bch_regs->hw_bch_flash0layout0);
1573
1574 /* 1024 for data + 838 for OOB */
1575 tmp = 1862 << BCH_FLASHLAYOUT1_PAGE_SIZE_OFFSET;
1576 /* using ECC62 level to be performed */
1577 tmp |= 0x1F << BCH_FLASHLAYOUT1_ECCN_OFFSET;
1578 /* 0x20 * 4 bytes of the data0 block */
1579 tmp |= 0x20 << BCH_FLASHLAYOUT1_DATAN_SIZE_OFFSET;
1580 tmp |= 0 << BCH_FLASHLAYOUT1_GF13_0_GF14_1_OFFSET;
1581 writel(tmp, &bch_regs->hw_bch_flash0layout1);
1582}
1583
bf9382ad
HX
1584/*
1585 * Set BCH to specific layout used by ROM bootloader to read FCB.
1586 */
1587void mxs_nand_mode_fcb_40bit(struct mtd_info *mtd)
1588{
1589 u32 tmp;
1590 struct mxs_bch_regs *bch_regs = (struct mxs_bch_regs *)MXS_BCH_BASE;
1591 struct nand_chip *nand = mtd_to_nand(mtd);
1592 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
1593
1594 /* no randomizer in this setting*/
1595 nand_info->en_randomizer = 0;
1596
1597 mtd->writesize = 1024;
1598 mtd->oobsize = 1576 - 1024;
1599
1600 /* 8 ecc_chunks_*/
1601 tmp = 7 << BCH_FLASHLAYOUT0_NBLOCKS_OFFSET;
1602 /* 32 bytes for metadata */
1603 tmp |= 32 << BCH_FLASHLAYOUT0_META_SIZE_OFFSET;
1604 /* using ECC40 level to be performed */
1605 tmp |= 0x14 << BCH_FLASHLAYOUT0_ECC0_OFFSET;
1606 /* 0x20 * 4 bytes of the data0 block */
1607 tmp |= 0x20 << BCH_FLASHLAYOUT0_DATA0_SIZE_OFFSET;
1608 tmp |= 0 << BCH_FLASHLAYOUT0_GF13_0_GF14_1_OFFSET;
1609 writel(tmp, &bch_regs->hw_bch_flash0layout0);
1610
1611 /* 1024 for data + 552 for OOB */
1612 tmp = 1576 << BCH_FLASHLAYOUT1_PAGE_SIZE_OFFSET;
1613 /* using ECC40 level to be performed */
1614 tmp |= 0x14 << BCH_FLASHLAYOUT1_ECCN_OFFSET;
1615 /* 0x20 * 4 bytes of the data0 block */
1616 tmp |= 0x20 << BCH_FLASHLAYOUT1_DATAN_SIZE_OFFSET;
1617 tmp |= 0 << BCH_FLASHLAYOUT1_GF13_0_GF14_1_OFFSET;
1618 writel(tmp, &bch_regs->hw_bch_flash0layout1);
1619}
1620
9ab5f221
IO
1621/*
1622 * Restore BCH to normal settings.
1623 */
1624void mxs_nand_mode_normal(struct mtd_info *mtd)
1625{
1626 struct mxs_bch_regs *bch_regs = (struct mxs_bch_regs *)MXS_BCH_BASE;
1627 struct nand_chip *nand = mtd_to_nand(mtd);
1628 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
1629
1630 nand_info->en_randomizer = 0;
1631
1632 mtd->writesize = nand_info->writesize;
1633 mtd->oobsize = nand_info->oobsize;
1634
1635 writel(nand_info->bch_flash0layout0, &bch_regs->hw_bch_flash0layout0);
1636 writel(nand_info->bch_flash0layout1, &bch_regs->hw_bch_flash0layout1);
1637}
1638
1639uint32_t mxs_nand_mark_byte_offset(struct mtd_info *mtd)
1640{
1641 struct nand_chip *chip = mtd_to_nand(mtd);
1642 struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
1643 struct bch_geometry *geo = &nand_info->bch_geometry;
1644
1645 return geo->block_mark_byte_offset;
1646}
1647
1648uint32_t mxs_nand_mark_bit_offset(struct mtd_info *mtd)
1649{
1650 struct nand_chip *chip = mtd_to_nand(mtd);
1651 struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
1652 struct bch_geometry *geo = &nand_info->bch_geometry;
1653
1654 return geo->block_mark_bit_offset;
1655}