]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
mtd: spi-nand: Isolate the MTD read logic in a helper
authorMiquel Raynal <miquel.raynal@bootlin.com>
Mon, 26 Aug 2024 10:14:06 +0000 (12:14 +0200)
committerMiquel Raynal <miquel.raynal@bootlin.com>
Fri, 6 Sep 2024 15:00:04 +0000 (17:00 +0200)
There is currently only a single path for performing page reads as
requested by the MTD layer. Soon there will be two:
- a "regular" page read
- a continuous page read

Let's extract the page read logic in a dedicated helper, so the
introduction of continuous page reads will be as easy as checking whether
continuous reads shall/can be used and calling one helper or the other.

There is not behavioral change intended.

Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com>
Link: https://lore.kernel.org/linux-mtd/20240826101412.20644-4-miquel.raynal@bootlin.com
drivers/mtd/nand/spi/core.c

index 018c854d06193bbe2c8f2b2e0668dc65a08600bc..1f468ed93c8e57ed72d1faebedc201a3e3b8b021 100644 (file)
@@ -630,25 +630,20 @@ static int spinand_write_page(struct spinand_device *spinand,
        return nand_ecc_finish_io_req(nand, (struct nand_page_io_req *)req);
 }
 
-static int spinand_mtd_read(struct mtd_info *mtd, loff_t from,
-                           struct mtd_oob_ops *ops)
+static int spinand_mtd_regular_page_read(struct mtd_info *mtd, loff_t from,
+                                        struct mtd_oob_ops *ops,
+                                        unsigned int *max_bitflips)
 {
        struct spinand_device *spinand = mtd_to_spinand(mtd);
        struct nand_device *nand = mtd_to_nanddev(mtd);
-       struct mtd_ecc_stats old_stats;
-       unsigned int max_bitflips = 0;
        struct nand_io_iter iter;
        bool disable_ecc = false;
        bool ecc_failed = false;
-       int ret = 0;
+       int ret;
 
-       if (ops->mode == MTD_OPS_RAW || !spinand->eccinfo.ooblayout)
+       if (ops->mode == MTD_OPS_RAW || !mtd->ooblayout)
                disable_ecc = true;
 
-       mutex_lock(&spinand->lock);
-
-       old_stats = mtd->ecc_stats;
-
        nanddev_io_for_each_page(nand, NAND_PAGE_READ, from, ops, &iter) {
                if (disable_ecc)
                        iter.req.mode = MTD_OPS_RAW;
@@ -664,13 +659,33 @@ static int spinand_mtd_read(struct mtd_info *mtd, loff_t from,
                if (ret == -EBADMSG)
                        ecc_failed = true;
                else
-                       max_bitflips = max_t(unsigned int, max_bitflips, ret);
+                       *max_bitflips = max_t(unsigned int, *max_bitflips, ret);
 
                ret = 0;
                ops->retlen += iter.req.datalen;
                ops->oobretlen += iter.req.ooblen;
        }
 
+       if (ecc_failed && !ret)
+               ret = -EBADMSG;
+
+       return ret;
+}
+
+static int spinand_mtd_read(struct mtd_info *mtd, loff_t from,
+                           struct mtd_oob_ops *ops)
+{
+       struct spinand_device *spinand = mtd_to_spinand(mtd);
+       struct mtd_ecc_stats old_stats;
+       unsigned int max_bitflips = 0;
+       int ret;
+
+       mutex_lock(&spinand->lock);
+
+       old_stats = mtd->ecc_stats;
+
+       ret = spinand_mtd_regular_page_read(mtd, from, ops, &max_bitflips);
+
        if (ops->stats) {
                ops->stats->uncorrectable_errors +=
                        mtd->ecc_stats.failed - old_stats.failed;
@@ -680,9 +695,6 @@ static int spinand_mtd_read(struct mtd_info *mtd, loff_t from,
 
        mutex_unlock(&spinand->lock);
 
-       if (ecc_failed && !ret)
-               ret = -EBADMSG;
-
        return ret ? ret : max_bitflips;
 }