struct nand_device *nand = mtd_to_nanddev(mtd);
unsigned int max_bitflips = 0;
struct nand_io_iter iter;
- bool enable_ecc = false;
+ bool disable_ecc = false;
bool ecc_failed = false;
int ret = 0;
- if (ops->mode != MTD_OPS_RAW && spinand->eccinfo.ooblayout)
- enable_ecc = true;
+ if (ops->mode == MTD_OPS_RAW || !spinand->eccinfo.ooblayout)
+ disable_ecc = true;
#ifndef __UBOOT__
mutex_lock(&spinand->lock);
nanddev_io_for_each_page(nand, NAND_PAGE_READ, from, ops, &iter) {
schedule();
+ if (disable_ecc)
+ iter.req.mode = MTD_OPS_RAW;
+
ret = spinand_select_target(spinand, iter.req.pos.target);
if (ret)
break;
- ret = spinand_ecc_enable(spinand, enable_ecc);
+ ret = spinand_ecc_enable(spinand, !disable_ecc);
if (ret)
break;
- ret = spinand_read_page(spinand, &iter.req, enable_ecc);
+ ret = spinand_read_page(spinand, &iter.req, !disable_ecc);
if (ret < 0 && ret != -EBADMSG)
break;
struct spinand_device *spinand = mtd_to_spinand(mtd);
struct nand_device *nand = mtd_to_nanddev(mtd);
struct nand_io_iter iter;
- bool enable_ecc = false;
+ bool disable_ecc = false;
int ret = 0;
- if (ops->mode != MTD_OPS_RAW && mtd->ooblayout)
- enable_ecc = true;
+ if (ops->mode == MTD_OPS_RAW || !mtd->ooblayout)
+ disable_ecc = true;
#ifndef __UBOOT__
mutex_lock(&spinand->lock);
nanddev_io_for_each_page(nand, NAND_PAGE_WRITE, to, ops, &iter) {
schedule();
+ if (disable_ecc)
+ iter.req.mode = MTD_OPS_RAW;
+
ret = spinand_select_target(spinand, iter.req.pos.target);
if (ret)
break;
- ret = spinand_ecc_enable(spinand, enable_ecc);
+ ret = spinand_ecc_enable(spinand, !disable_ecc);
if (ret)
break;