From: Greg Kroah-Hartman Date: Sat, 8 Dec 2018 08:38:38 +0000 (+0100) Subject: 4.19-stable patches X-Git-Tag: v4.19.8~2 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=0e7e7af0f268f4c18f963032f95a54e2d6912bf2;p=thirdparty%2Fkernel%2Fstable-queue.git 4.19-stable patches added patches: blk-mq-punt-failed-direct-issue-to-dispatch-list.patch --- diff --git a/queue-4.19/blk-mq-punt-failed-direct-issue-to-dispatch-list.patch b/queue-4.19/blk-mq-punt-failed-direct-issue-to-dispatch-list.patch new file mode 100644 index 00000000000..adf054eef4d --- /dev/null +++ b/queue-4.19/blk-mq-punt-failed-direct-issue-to-dispatch-list.patch @@ -0,0 +1,124 @@ +From c616cbee97aed4bc6178f148a7240206dcdb85a6 Mon Sep 17 00:00:00 2001 +From: Jens Axboe +Date: Thu, 6 Dec 2018 22:17:44 -0700 +Subject: blk-mq: punt failed direct issue to dispatch list + +From: Jens Axboe + +commit c616cbee97aed4bc6178f148a7240206dcdb85a6 upstream. + +After the direct dispatch corruption fix, we permanently disallow direct +dispatch of non read/write requests. This works fine off the normal IO +path, as they will be retried like any other failed direct dispatch +request. But for the blk_insert_cloned_request() that only DM uses to +bypass the bottom level scheduler, we always first attempt direct +dispatch. For some types of requests, that's now a permanent failure, +and no amount of retrying will make that succeed. This results in a +livelock. + +Instead of making special cases for what we can direct issue, and now +having to deal with DM solving the livelock while still retaining a BUSY +condition feedback loop, always just add a request that has been through +->queue_rq() to the hardware queue dispatch list. These are safe to use +as no merging can take place there. Additionally, if requests do have +prepped data from drivers, we aren't dependent on them not sharing space +in the request structure to safely add them to the IO scheduler lists. + +This basically reverts ffe81d45322c and is based on a patch from Ming, +but with the list insert case covered as well. + +Fixes: ffe81d45322c ("blk-mq: fix corruption with direct issue") +Cc: stable@vger.kernel.org +Suggested-by: Ming Lei +Reported-by: Bart Van Assche +Tested-by: Ming Lei +Acked-by: Mike Snitzer +Signed-off-by: Jens Axboe +Signed-off-by: Greg Kroah-Hartman + +--- + block/blk-mq.c | 33 +++++---------------------------- + 1 file changed, 5 insertions(+), 28 deletions(-) + +--- a/block/blk-mq.c ++++ b/block/blk-mq.c +@@ -1698,15 +1698,6 @@ static blk_status_t __blk_mq_issue_direc + break; + case BLK_STS_RESOURCE: + case BLK_STS_DEV_RESOURCE: +- /* +- * If direct dispatch fails, we cannot allow any merging on +- * this IO. Drivers (like SCSI) may have set up permanent state +- * for this request, like SG tables and mappings, and if we +- * merge to it later on then we'll still only do IO to the +- * original part. +- */ +- rq->cmd_flags |= REQ_NOMERGE; +- + blk_mq_update_dispatch_busy(hctx, true); + __blk_mq_requeue_request(rq); + break; +@@ -1719,18 +1710,6 @@ static blk_status_t __blk_mq_issue_direc + return ret; + } + +-/* +- * Don't allow direct dispatch of anything but regular reads/writes, +- * as some of the other commands can potentially share request space +- * with data we need for the IO scheduler. If we attempt a direct dispatch +- * on those and fail, we can't safely add it to the scheduler afterwards +- * without potentially overwriting data that the driver has already written. +- */ +-static bool blk_rq_can_direct_dispatch(struct request *rq) +-{ +- return req_op(rq) == REQ_OP_READ || req_op(rq) == REQ_OP_WRITE; +-} +- + static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, + struct request *rq, + blk_qc_t *cookie, +@@ -1752,7 +1731,7 @@ static blk_status_t __blk_mq_try_issue_d + goto insert; + } + +- if (!blk_rq_can_direct_dispatch(rq) || (q->elevator && !bypass_insert)) ++ if (q->elevator && !bypass_insert) + goto insert; + + if (!blk_mq_get_dispatch_budget(hctx)) +@@ -1768,7 +1747,7 @@ insert: + if (bypass_insert) + return BLK_STS_RESOURCE; + +- blk_mq_sched_insert_request(rq, false, run_queue, false); ++ blk_mq_request_bypass_insert(rq, run_queue); + return BLK_STS_OK; + } + +@@ -1784,7 +1763,7 @@ static void blk_mq_try_issue_directly(st + + ret = __blk_mq_try_issue_directly(hctx, rq, cookie, false); + if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE) +- blk_mq_sched_insert_request(rq, false, true, false); ++ blk_mq_request_bypass_insert(rq, true); + else if (ret != BLK_STS_OK) + blk_mq_end_request(rq, ret); + +@@ -1814,15 +1793,13 @@ void blk_mq_try_issue_list_directly(stru + struct request *rq = list_first_entry(list, struct request, + queuelist); + +- if (!blk_rq_can_direct_dispatch(rq)) +- break; +- + list_del_init(&rq->queuelist); + ret = blk_mq_request_issue_directly(rq); + if (ret != BLK_STS_OK) { + if (ret == BLK_STS_RESOURCE || + ret == BLK_STS_DEV_RESOURCE) { +- list_add(&rq->queuelist, list); ++ blk_mq_request_bypass_insert(rq, ++ list_empty(list)); + break; + } + blk_mq_end_request(rq, ret); diff --git a/queue-4.19/series b/queue-4.19/series index 28b6bcd4ab2..c1bb9675d7e 100644 --- a/queue-4.19/series +++ b/queue-4.19/series @@ -38,3 +38,4 @@ btrfs-tree-checker-don-t-check-max-block-group-size-as-current-max-chunk-size-li arc-change-defconfig-defaults-to-arcv2.patch arc-add-support-of-nfsv3-acl.patch tipc-use-destination-length-for-copy-string.patch +blk-mq-punt-failed-direct-issue-to-dispatch-list.patch