--- /dev/null
+From aa0818c6ee8d8e4772725a43550823347bc1ad30 Mon Sep 17 00:00:00 2001
+From: Ming Lei <tom.leiming@gmail.com>
+Date: Fri, 16 May 2014 23:31:21 +0800
+Subject: virtio_blk: fix race between start and stop queue
+
+From: Ming Lei <tom.leiming@gmail.com>
+
+commit aa0818c6ee8d8e4772725a43550823347bc1ad30 upstream.
+
+When there isn't enough vring descriptor for adding to vq,
+blk-mq will be put as stopped state until some of pending
+descriptors are completed & freed.
+
+Unfortunately, the vq's interrupt may come just before
+blk-mq's BLK_MQ_S_STOPPED flag is set, so the blk-mq will
+still be kept as stopped even though lots of descriptors
+are completed and freed in the interrupt handler. The worst
+case is that all pending descriptors are freed in the
+interrupt handler, and the queue is kept as stopped forever.
+
+This patch fixes the problem by starting/stopping blk-mq
+with holding vq_lock.
+
+Cc: Jens Axboe <axboe@kernel.dk>
+Cc: Rusty Russell <rusty@rustcorp.com.au>
+Signed-off-by: Ming Lei <tom.leiming@gmail.com>
+Signed-off-by: Jens Axboe <axboe@fb.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+
+---
+ drivers/block/virtio_blk.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/block/virtio_blk.c
++++ b/drivers/block/virtio_blk.c
+@@ -144,11 +144,11 @@ static void virtblk_done(struct virtqueu
+ if (unlikely(virtqueue_is_broken(vq)))
+ break;
+ } while (!virtqueue_enable_cb(vq));
+- spin_unlock_irqrestore(&vblk->vq_lock, flags);
+
+ /* In case queue is stopped waiting for more buffers. */
+ if (req_done)
+ blk_mq_start_stopped_hw_queues(vblk->disk->queue);
++ spin_unlock_irqrestore(&vblk->vq_lock, flags);
+ }
+
+ static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req)
+@@ -200,8 +200,8 @@ static int virtio_queue_rq(struct blk_mq
+ spin_lock_irqsave(&vblk->vq_lock, flags);
+ if (__virtblk_add_req(vblk->vq, vbr, vbr->sg, num) < 0) {
+ virtqueue_kick(vblk->vq);
+- spin_unlock_irqrestore(&vblk->vq_lock, flags);
+ blk_mq_stop_hw_queue(hctx);
++ spin_unlock_irqrestore(&vblk->vq_lock, flags);
+ return BLK_MQ_RQ_QUEUE_BUSY;
+ }
+