]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
mmc: block: Issue a cache flush only when it's enabled
authorAvri Altman <avri.altman@wdc.com>
Sun, 25 Apr 2021 06:02:06 +0000 (09:02 +0300)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 12 May 2021 06:39:31 +0000 (08:39 +0200)
commit 97fce126e279690105ee15be652b465fd96f9997 upstream.

In command queueing mode, the cache isn't flushed via the mmc_flush_cache()
function, but instead by issuing a CMDQ_TASK_MGMT (CMD48) with a
FLUSH_CACHE opcode. In this path, we need to check if cache has been
enabled, before deciding to flush the cache, along the lines of what's
being done in mmc_flush_cache().

To fix this problem, let's add a new bus ops callback ->cache_enabled() and
implement it for the mmc bus type. In this way, the mmc block device driver
can call it to know whether cache flushing should be done.

Fixes: 1e8e55b67030 (mmc: block: Add CQE support)
Cc: stable@vger.kernel.org
Reported-by: Brendan Peter <bpeter@lytx.com>
Signed-off-by: Avri Altman <avri.altman@wdc.com>
Tested-by: Brendan Peter <bpeter@lytx.com>
Acked-by: Adrian Hunter <adrian.hunter@intel.com>
Link: https://lore.kernel.org/r/20210425060207.2591-2-avri.altman@wdc.com
Link: https://lore.kernel.org/r/20210425060207.2591-3-avri.altman@wdc.com
[Ulf: Squashed the two patches and made some minor updates]
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
drivers/mmc/core/block.c
drivers/mmc/core/core.h
drivers/mmc/core/mmc.c
drivers/mmc/core/mmc_ops.c

index baaaf266c0f966ba0de26ab6eabf3a9b2dd9b591..a4c06ef673943d5d1cfe59c577e54ec542706746 100644 (file)
@@ -2236,6 +2236,10 @@ enum mmc_issued mmc_blk_mq_issue_rq(struct mmc_queue *mq, struct request *req)
        case MMC_ISSUE_ASYNC:
                switch (req_op(req)) {
                case REQ_OP_FLUSH:
+                       if (!mmc_cache_enabled(host)) {
+                               blk_mq_end_request(req, BLK_STS_OK);
+                               return MMC_REQ_FINISHED;
+                       }
                        ret = mmc_blk_cqe_issue_flush(mq, req);
                        break;
                case REQ_OP_READ:
index 575ac0257af2f1f50a16cb0a7c2e238063b90a62..ca554d08f7e1dc8906e9442ef7af90b08137d126 100644 (file)
@@ -29,6 +29,7 @@ struct mmc_bus_ops {
        int (*shutdown)(struct mmc_host *);
        int (*hw_reset)(struct mmc_host *);
        int (*sw_reset)(struct mmc_host *);
+       bool (*cache_enabled)(struct mmc_host *);
 };
 
 void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops);
@@ -171,4 +172,12 @@ static inline void mmc_post_req(struct mmc_host *host, struct mmc_request *mrq,
                host->ops->post_req(host, mrq, err);
 }
 
+static inline bool mmc_cache_enabled(struct mmc_host *host)
+{
+       if (host->bus_ops->cache_enabled)
+               return host->bus_ops->cache_enabled(host);
+
+       return false;
+}
+
 #endif
index 8741271d39712b4950a039c3244c0f26fbb08575..4d2b4b0da93cdb00bd0fc4e5a2b7fbed4860cafc 100644 (file)
@@ -2029,6 +2029,12 @@ static void mmc_detect(struct mmc_host *host)
        }
 }
 
+static bool _mmc_cache_enabled(struct mmc_host *host)
+{
+       return host->card->ext_csd.cache_size > 0 &&
+              host->card->ext_csd.cache_ctrl & 1;
+}
+
 static int _mmc_suspend(struct mmc_host *host, bool is_suspend)
 {
        int err = 0;
@@ -2208,6 +2214,7 @@ static const struct mmc_bus_ops mmc_ops = {
        .alive = mmc_alive,
        .shutdown = mmc_shutdown,
        .hw_reset = _mmc_hw_reset,
+       .cache_enabled = _mmc_cache_enabled,
 };
 
 /*
index 265d95ec82ce16f063accfce5d0c84af7314ea45..c458f6b626a2f1b251ad047d2a05c03e1c616e42 100644 (file)
@@ -988,9 +988,7 @@ int mmc_flush_cache(struct mmc_card *card)
 {
        int err = 0;
 
-       if (mmc_card_mmc(card) &&
-                       (card->ext_csd.cache_size > 0) &&
-                       (card->ext_csd.cache_ctrl & 1)) {
+       if (mmc_cache_enabled(card->host)) {
                err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
                                 EXT_CSD_FLUSH_CACHE, 1,
                                 MMC_CACHE_FLUSH_TIMEOUT_MS);