]> git.ipfire.org Git - thirdparty/linux.git/blobdiff - drivers/nvme/host/pci.c
Merge tag 'for-5.8/drivers-2020-06-01' of git://git.kernel.dk/linux-block
[thirdparty/linux.git] / drivers / nvme / host / pci.c
index b307c06a783dca5795c372caacb2664826090a2e..d690d5593a8095971a73965305250d87d613302e 100644 (file)
@@ -984,6 +984,11 @@ static inline int nvme_process_cq(struct nvme_queue *nvmeq)
 
        while (nvme_cqe_pending(nvmeq)) {
                found++;
+               /*
+                * load-load control dependency between phase and the rest of
+                * the cqe requires a full read memory barrier
+                */
+               dma_rmb();
                nvme_handle_cqe(nvmeq, nvmeq->cq_head);
                nvme_update_cq_head(nvmeq);
        }
@@ -1372,16 +1377,19 @@ static void nvme_disable_admin_queue(struct nvme_dev *dev, bool shutdown)
 
 /*
  * Called only on a device that has been disabled and after all other threads
- * that can check this device's completion queues have synced. This is the
- * last chance for the driver to see a natural completion before
- * nvme_cancel_request() terminates all incomplete requests.
+ * that can check this device's completion queues have synced, except
+ * nvme_poll(). This is the last chance for the driver to see a natural
+ * completion before nvme_cancel_request() terminates all incomplete requests.
  */
 static void nvme_reap_pending_cqes(struct nvme_dev *dev)
 {
        int i;
 
-       for (i = dev->ctrl.queue_count - 1; i > 0; i--)
+       for (i = dev->ctrl.queue_count - 1; i > 0; i--) {
+               spin_lock(&dev->queues[i].cq_poll_lock);
                nvme_process_cq(&dev->queues[i]);
+               spin_unlock(&dev->queues[i].cq_poll_lock);
+       }
 }
 
 static int nvme_cmb_qdepth(struct nvme_dev *dev, int nr_io_queues,