--- /dev/null
+From 79d628a726895ad82c2a5135a90aecc4fc7e222b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 8 Dec 2020 21:29:51 -0800
+Subject: scsi: block: Do not accept any requests while suspended
+
+From: Alan Stern <stern@rowland.harvard.edu>
+
+[ Upstream commit 52abca64fd9410ea6c9a3a74eab25663b403d7da ]
+
+blk_queue_enter() accepts BLK_MQ_REQ_PM requests independent of the runtime
+power management state. Now that SCSI domain validation no longer depends
+on this behavior, modify the behavior of blk_queue_enter() as follows:
+
+ - Do not accept any requests while suspended.
+
+ - Only process power management requests while suspending or resuming.
+
+Submitting BLK_MQ_REQ_PM requests to a device that is runtime suspended
+causes runtime-suspended devices not to resume as they should. The request
+which should cause a runtime resume instead gets issued directly, without
+resuming the device first. Of course the device can't handle it properly,
+the I/O fails, and the device remains suspended.
+
+The problem is fixed by checking that the queue's runtime-PM status isn't
+RPM_SUSPENDED before allowing a request to be issued, and queuing a
+runtime-resume request if it is. In particular, the inline
+blk_pm_request_resume() routine is renamed blk_pm_resume_queue() and the
+code is unified by merging the surrounding checks into the routine. If the
+queue isn't set up for runtime PM, or there currently is no restriction on
+allowed requests, the request is allowed. Likewise if the BLK_MQ_REQ_PM
+flag is set and the status isn't RPM_SUSPENDED. Otherwise a runtime resume
+is queued and the request is blocked until conditions are more suitable.
+
+[ bvanassche: modified commit message and removed Cc: stable because
+ without the previous patches from this series this patch would break
+ parallel SCSI domain validation + introduced queue_rpm_status() ]
+
+Link: https://lore.kernel.org/r/20201209052951.16136-9-bvanassche@acm.org
+Cc: Jens Axboe <axboe@kernel.dk>
+Cc: Christoph Hellwig <hch@lst.de>
+Cc: Hannes Reinecke <hare@suse.de>
+Cc: Can Guo <cang@codeaurora.org>
+Cc: Stanley Chu <stanley.chu@mediatek.com>
+Cc: Ming Lei <ming.lei@redhat.com>
+Cc: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Reported-and-tested-by: Martin Kepplinger <martin.kepplinger@puri.sm>
+Reviewed-by: Hannes Reinecke <hare@suse.de>
+Reviewed-by: Can Guo <cang@codeaurora.org>
+Signed-off-by: Alan Stern <stern@rowland.harvard.edu>
+Signed-off-by: Bart Van Assche <bvanassche@acm.org>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ block/blk-core.c | 7 ++++---
+ block/blk-pm.h | 14 +++++++++-----
+ include/linux/blkdev.h | 12 ++++++++++++
+ 3 files changed, 25 insertions(+), 8 deletions(-)
+
+diff --git a/block/blk-core.c b/block/blk-core.c
+index a00bce9f46d88..2d53e2ff48ff8 100644
+--- a/block/blk-core.c
++++ b/block/blk-core.c
+@@ -18,6 +18,7 @@
+ #include <linux/bio.h>
+ #include <linux/blkdev.h>
+ #include <linux/blk-mq.h>
++#include <linux/blk-pm.h>
+ #include <linux/highmem.h>
+ #include <linux/mm.h>
+ #include <linux/pagemap.h>
+@@ -440,7 +441,8 @@ int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
+ * responsible for ensuring that that counter is
+ * globally visible before the queue is unfrozen.
+ */
+- if (pm || !blk_queue_pm_only(q)) {
++ if ((pm && queue_rpm_status(q) != RPM_SUSPENDED) ||
++ !blk_queue_pm_only(q)) {
+ success = true;
+ } else {
+ percpu_ref_put(&q->q_usage_counter);
+@@ -465,8 +467,7 @@ int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
+
+ wait_event(q->mq_freeze_wq,
+ (!q->mq_freeze_depth &&
+- (pm || (blk_pm_request_resume(q),
+- !blk_queue_pm_only(q)))) ||
++ blk_pm_resume_queue(pm, q)) ||
+ blk_queue_dying(q));
+ if (blk_queue_dying(q))
+ return -ENODEV;
+diff --git a/block/blk-pm.h b/block/blk-pm.h
+index ea5507d23e759..a2283cc9f716d 100644
+--- a/block/blk-pm.h
++++ b/block/blk-pm.h
+@@ -6,11 +6,14 @@
+ #include <linux/pm_runtime.h>
+
+ #ifdef CONFIG_PM
+-static inline void blk_pm_request_resume(struct request_queue *q)
++static inline int blk_pm_resume_queue(const bool pm, struct request_queue *q)
+ {
+- if (q->dev && (q->rpm_status == RPM_SUSPENDED ||
+- q->rpm_status == RPM_SUSPENDING))
+- pm_request_resume(q->dev);
++ if (!q->dev || !blk_queue_pm_only(q))
++ return 1; /* Nothing to do */
++ if (pm && q->rpm_status != RPM_SUSPENDED)
++ return 1; /* Request allowed */
++ pm_request_resume(q->dev);
++ return 0;
+ }
+
+ static inline void blk_pm_mark_last_busy(struct request *rq)
+@@ -44,8 +47,9 @@ static inline void blk_pm_put_request(struct request *rq)
+ --rq->q->nr_pending;
+ }
+ #else
+-static inline void blk_pm_request_resume(struct request_queue *q)
++static inline int blk_pm_resume_queue(const bool pm, struct request_queue *q)
+ {
++ return 1;
+ }
+
+ static inline void blk_pm_mark_last_busy(struct request *rq)
+diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
+index 4a6e33d382429..542471b76f410 100644
+--- a/include/linux/blkdev.h
++++ b/include/linux/blkdev.h
+@@ -692,6 +692,18 @@ static inline bool queue_is_mq(struct request_queue *q)
+ return q->mq_ops;
+ }
+
++#ifdef CONFIG_PM
++static inline enum rpm_status queue_rpm_status(struct request_queue *q)
++{
++ return q->rpm_status;
++}
++#else
++static inline enum rpm_status queue_rpm_status(struct request_queue *q)
++{
++ return RPM_ACTIVE;
++}
++#endif
++
+ static inline enum blk_zoned_model
+ blk_queue_zoned_model(struct request_queue *q)
+ {
+--
+2.27.0
+
--- /dev/null
+From dbf57e5986491017310cf174c6250bc98c6703f1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 8 Dec 2020 21:29:50 -0800
+Subject: scsi: block: Remove RQF_PREEMPT and BLK_MQ_REQ_PREEMPT
+
+From: Bart Van Assche <bvanassche@acm.org>
+
+[ Upstream commit a4d34da715e3cb7e0741fe603dcd511bed067e00 ]
+
+Remove flag RQF_PREEMPT and BLK_MQ_REQ_PREEMPT since these are no longer
+used by any kernel code.
+
+Link: https://lore.kernel.org/r/20201209052951.16136-8-bvanassche@acm.org
+Cc: Can Guo <cang@codeaurora.org>
+Cc: Stanley Chu <stanley.chu@mediatek.com>
+Cc: Alan Stern <stern@rowland.harvard.edu>
+Cc: Ming Lei <ming.lei@redhat.com>
+Cc: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Cc: Martin Kepplinger <martin.kepplinger@puri.sm>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Reviewed-by: Hannes Reinecke <hare@suse.de>
+Reviewed-by: Jens Axboe <axboe@kernel.dk>
+Reviewed-by: Can Guo <cang@codeaurora.org>
+Signed-off-by: Bart Van Assche <bvanassche@acm.org>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ block/blk-core.c | 7 +++----
+ block/blk-mq-debugfs.c | 1 -
+ block/blk-mq.c | 2 --
+ include/linux/blk-mq.h | 2 --
+ include/linux/blkdev.h | 6 +-----
+ 5 files changed, 4 insertions(+), 14 deletions(-)
+
+diff --git a/block/blk-core.c b/block/blk-core.c
+index 10696f9fb6ac6..a00bce9f46d88 100644
+--- a/block/blk-core.c
++++ b/block/blk-core.c
+@@ -424,11 +424,11 @@ EXPORT_SYMBOL(blk_cleanup_queue);
+ /**
+ * blk_queue_enter() - try to increase q->q_usage_counter
+ * @q: request queue pointer
+- * @flags: BLK_MQ_REQ_NOWAIT, BLK_MQ_REQ_PM and/or BLK_MQ_REQ_PREEMPT
++ * @flags: BLK_MQ_REQ_NOWAIT and/or BLK_MQ_REQ_PM
+ */
+ int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
+ {
+- const bool pm = flags & (BLK_MQ_REQ_PM | BLK_MQ_REQ_PREEMPT);
++ const bool pm = flags & BLK_MQ_REQ_PM;
+
+ while (true) {
+ bool success = false;
+@@ -630,8 +630,7 @@ struct request *blk_get_request(struct request_queue *q, unsigned int op,
+ struct request *req;
+
+ WARN_ON_ONCE(op & REQ_NOWAIT);
+- WARN_ON_ONCE(flags & ~(BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_PM |
+- BLK_MQ_REQ_PREEMPT));
++ WARN_ON_ONCE(flags & ~(BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_PM));
+
+ req = blk_mq_alloc_request(q, op, flags);
+ if (!IS_ERR(req) && q->mq_ops->initialize_rq_fn)
+diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c
+index e21eed20a1551..4d6e83e5b4429 100644
+--- a/block/blk-mq-debugfs.c
++++ b/block/blk-mq-debugfs.c
+@@ -298,7 +298,6 @@ static const char *const rqf_name[] = {
+ RQF_NAME(MIXED_MERGE),
+ RQF_NAME(MQ_INFLIGHT),
+ RQF_NAME(DONTPREP),
+- RQF_NAME(PREEMPT),
+ RQF_NAME(FAILED),
+ RQF_NAME(QUIET),
+ RQF_NAME(ELVPRIV),
+diff --git a/block/blk-mq.c b/block/blk-mq.c
+index 0072ffa50b46e..2a1eff60c7975 100644
+--- a/block/blk-mq.c
++++ b/block/blk-mq.c
+@@ -294,8 +294,6 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
+ rq->cmd_flags = data->cmd_flags;
+ if (data->flags & BLK_MQ_REQ_PM)
+ rq->rq_flags |= RQF_PM;
+- if (data->flags & BLK_MQ_REQ_PREEMPT)
+- rq->rq_flags |= RQF_PREEMPT;
+ if (blk_queue_io_stat(data->q))
+ rq->rq_flags |= RQF_IO_STAT;
+ INIT_LIST_HEAD(&rq->queuelist);
+diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
+index c9ecfd8b03381..f8ea27423d1d8 100644
+--- a/include/linux/blk-mq.h
++++ b/include/linux/blk-mq.h
+@@ -448,8 +448,6 @@ enum {
+ BLK_MQ_REQ_RESERVED = (__force blk_mq_req_flags_t)(1 << 1),
+ /* set RQF_PM */
+ BLK_MQ_REQ_PM = (__force blk_mq_req_flags_t)(1 << 2),
+- /* set RQF_PREEMPT */
+- BLK_MQ_REQ_PREEMPT = (__force blk_mq_req_flags_t)(1 << 3),
+ };
+
+ struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,
+diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
+index 033eb5f73b654..4a6e33d382429 100644
+--- a/include/linux/blkdev.h
++++ b/include/linux/blkdev.h
+@@ -79,9 +79,6 @@ typedef __u32 __bitwise req_flags_t;
+ #define RQF_MQ_INFLIGHT ((__force req_flags_t)(1 << 6))
+ /* don't call prep for this one */
+ #define RQF_DONTPREP ((__force req_flags_t)(1 << 7))
+-/* set for "ide_preempt" requests and also for requests for which the SCSI
+- "quiesce" state must be ignored. */
+-#define RQF_PREEMPT ((__force req_flags_t)(1 << 8))
+ /* vaguely specified driver internal error. Ignored by the block layer */
+ #define RQF_FAILED ((__force req_flags_t)(1 << 10))
+ /* don't warn about errors */
+@@ -430,8 +427,7 @@ struct request_queue {
+ unsigned long queue_flags;
+ /*
+ * Number of contexts that have called blk_set_pm_only(). If this
+- * counter is above zero then only RQF_PM and RQF_PREEMPT requests are
+- * processed.
++ * counter is above zero then only RQF_PM requests are processed.
+ */
+ atomic_t pm_only;
+
+--
+2.27.0
+