]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
mmc: block: use single block write in retry
authorBin Liu <b-liu@ti.com>
Wed, 25 Mar 2026 13:49:47 +0000 (08:49 -0500)
committerUlf Hansson <ulf.hansson@linaro.org>
Thu, 26 Mar 2026 10:19:57 +0000 (11:19 +0100)
Due to errata i2493[0], multi-block write would still fail in retries.

With i2493, the MMC interface has the potential of write failures when
issuing multi-block writes operating in HS200 mode with excessive IO
supply noise.

While the errata provides guidance in hardware design and layout to
minimize the IO supply noise, in theory the write failure cannot be
resolved in hardware. The software solution to ensure the data integrity
is to add minimum 5us delay between block writes. Single-block write is
the practical way to introduce the delay.

This patch reuses recovery_mode flag, and switches to single-block
write in retry when multi-block write fails. It covers both CQE and
non-CQE cases.

[0] https://www.ti.com/lit/pdf/sprz582
Cc: stable@vger.kernel.org
Suggested-by: Jens Axboe <axboe@kernel.dk>
Signed-off-by: Bin Liu <b-liu@ti.com>
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
drivers/mmc/core/block.c
drivers/mmc/core/queue.h

index 05ee76cb0a08e9bfded128834029baeb4286205e..db8c99c73a61739b35f94049a73e074d48e3c26d 100644 (file)
@@ -1401,6 +1401,9 @@ static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq,
                    rq_data_dir(req) == WRITE &&
                    (md->flags & MMC_BLK_REL_WR);
 
+       if (mqrq->flags & MQRQ_XFER_SINGLE_BLOCK)
+               recovery_mode = 1;
+
        memset(brq, 0, sizeof(struct mmc_blk_request));
 
        mmc_crypto_prepare_req(mqrq);
@@ -1540,10 +1543,13 @@ static void mmc_blk_cqe_complete_rq(struct mmc_queue *mq, struct request *req)
                err = 0;
 
        if (err) {
-               if (mqrq->retries++ < MMC_CQE_RETRIES)
+               if (mqrq->retries++ < MMC_CQE_RETRIES) {
+                       if (rq_data_dir(req) == WRITE)
+                               mqrq->flags |= MQRQ_XFER_SINGLE_BLOCK;
                        blk_mq_requeue_request(req, true);
-               else
+               } else {
                        blk_mq_end_request(req, BLK_STS_IOERR);
+               }
        } else if (mrq->data) {
                if (blk_update_request(req, BLK_STS_OK, mrq->data->bytes_xfered))
                        blk_mq_requeue_request(req, true);
@@ -2085,6 +2091,8 @@ static void mmc_blk_mq_complete_rq(struct mmc_queue *mq, struct request *req)
        } else if (!blk_rq_bytes(req)) {
                __blk_mq_end_request(req, BLK_STS_IOERR);
        } else if (mqrq->retries++ < MMC_MAX_RETRIES) {
+               if (rq_data_dir(req) == WRITE)
+                       mqrq->flags |= MQRQ_XFER_SINGLE_BLOCK;
                blk_mq_requeue_request(req, true);
        } else {
                if (mmc_card_removed(mq->card))
index 1498840a4ea008db502ea07ad6c17923af24dca1..c254e6580afd62481afcf04eb6e13fddffd63503 100644 (file)
@@ -61,6 +61,8 @@ enum mmc_drv_op {
        MMC_DRV_OP_GET_EXT_CSD,
 };
 
+#define        MQRQ_XFER_SINGLE_BLOCK          BIT(0)
+
 struct mmc_queue_req {
        struct mmc_blk_request  brq;
        struct scatterlist      *sg;
@@ -69,6 +71,7 @@ struct mmc_queue_req {
        void                    *drv_op_data;
        unsigned int            ioc_count;
        int                     retries;
+       u32                     flags;
 };
 
 struct mmc_queue {