]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
mmc: block: Use MQRQ_XFER_SINGLE_BLOCK for both read and write recovery
authorShawn Lin <shawn.lin@rock-chips.com>
Mon, 30 Mar 2026 03:28:32 +0000 (11:28 +0800)
committerUlf Hansson <ulf.hansson@linaro.org>
Tue, 31 Mar 2026 10:41:32 +0000 (12:41 +0200)
Currently, the code uses the MQRQ_XFER_SINGLE_BLOCK flag to handle write
failures by retrying with single-block transfers. However, read failures
bypass this mechanism and instead use a dedicated legacy path mmc_blk_read_single()
that performs sector-by-sector retries.

Extend the MQRQ_XFER_SINGLE_BLOCK logic to cover multi-block read failures
as well. By doing so, we can remove the redundant and complex mmc_blk_read_single()
function, unifying the retry logic for both read and write operations under
a single, consistent, easier-to-maintain mechanism.

Signed-off-by: Shawn Lin <shawn.lin@rock-chips.com>
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
drivers/mmc/core/block.c

index 53c1b04f19163a30f7aef430a9e4f98a2b26e0cb..0274e8d07660dd0e3af237da6ba91480d2c0a9da 100644 (file)
@@ -1544,8 +1544,7 @@ static void mmc_blk_cqe_complete_rq(struct mmc_queue *mq, struct request *req)
 
        if (err) {
                if (mqrq->retries++ < MMC_CQE_RETRIES) {
-                       if (rq_data_dir(req) == WRITE)
-                               mqrq->flags |= MQRQ_XFER_SINGLE_BLOCK;
+                       mqrq->flags |= MQRQ_XFER_SINGLE_BLOCK;
                        blk_mq_requeue_request(req, true);
                } else {
                        blk_mq_end_request(req, BLK_STS_IOERR);
@@ -1782,63 +1781,6 @@ static int mmc_blk_fix_state(struct mmc_card *card, struct request *req)
        return err;
 }
 
-#define MMC_READ_SINGLE_RETRIES        2
-
-/* Single (native) sector read during recovery */
-static void mmc_blk_read_single(struct mmc_queue *mq, struct request *req)
-{
-       struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
-       struct mmc_request *mrq = &mqrq->brq.mrq;
-       struct mmc_card *card = mq->card;
-       struct mmc_host *host = card->host;
-       blk_status_t error = BLK_STS_OK;
-       size_t bytes_per_read = queue_physical_block_size(mq->queue);
-
-       do {
-               u32 status;
-               int err;
-               int retries = 0;
-
-               while (retries++ <= MMC_READ_SINGLE_RETRIES) {
-                       mmc_blk_rw_rq_prep(mqrq, card, 1, mq);
-
-                       mmc_wait_for_req(host, mrq);
-
-                       err = mmc_send_status(card, &status);
-                       if (err)
-                               goto error_exit;
-
-                       if (!mmc_host_is_spi(host) &&
-                           !mmc_ready_for_data(status)) {
-                               err = mmc_blk_fix_state(card, req);
-                               if (err)
-                                       goto error_exit;
-                       }
-
-                       if (!mrq->cmd->error)
-                               break;
-               }
-
-               if (mrq->cmd->error ||
-                   mrq->data->error ||
-                   (!mmc_host_is_spi(host) &&
-                    (mrq->cmd->resp[0] & CMD_ERRORS || status & CMD_ERRORS)))
-                       error = BLK_STS_IOERR;
-               else
-                       error = BLK_STS_OK;
-
-       } while (blk_update_request(req, error, bytes_per_read));
-
-       return;
-
-error_exit:
-       mrq->data->bytes_xfered = 0;
-       blk_update_request(req, BLK_STS_IOERR, bytes_per_read);
-       /* Let it try the remaining request again */
-       if (mqrq->retries > MMC_MAX_RETRIES - 1)
-               mqrq->retries = MMC_MAX_RETRIES - 1;
-}
-
 static inline bool mmc_blk_oor_valid(struct mmc_blk_request *brq)
 {
        return !!brq->mrq.sbc;
@@ -1974,13 +1916,6 @@ static void mmc_blk_mq_rw_recovery(struct mmc_queue *mq, struct request *req)
                mqrq->retries = MMC_MAX_RETRIES - MMC_DATA_RETRIES;
                return;
        }
-
-       if (rq_data_dir(req) == READ && brq->data.blocks >
-                       queue_physical_block_size(mq->queue) >> SECTOR_SHIFT) {
-               /* Read one (native) sector at a time */
-               mmc_blk_read_single(mq, req);
-               return;
-       }
 }
 
 static inline bool mmc_blk_rq_error(struct mmc_blk_request *brq)
@@ -2091,8 +2026,7 @@ static void mmc_blk_mq_complete_rq(struct mmc_queue *mq, struct request *req)
        } else if (!blk_rq_bytes(req)) {
                __blk_mq_end_request(req, BLK_STS_IOERR);
        } else if (mqrq->retries++ < MMC_MAX_RETRIES) {
-               if (rq_data_dir(req) == WRITE)
-                       mqrq->flags |= MQRQ_XFER_SINGLE_BLOCK;
+               mqrq->flags |= MQRQ_XFER_SINGLE_BLOCK;
                blk_mq_requeue_request(req, true);
        } else {
                if (mmc_card_removed(mq->card))