]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
dmaengine: idxd: Flush kernel workqueues on Function Level Reset
authorVinicius Costa Gomes <vinicius.gomes@intel.com>
Wed, 21 Jan 2026 18:34:30 +0000 (10:34 -0800)
committerVinod Koul <vkoul@kernel.org>
Wed, 25 Feb 2026 11:09:17 +0000 (16:39 +0530)
When a Function Level Reset (FLR) happens, terminate the pending
descriptors that were issued by in-kernel users and disable the
interrupts associated with those. They will be re-enabled after FLR
finishes.

idxd_wq_flush_desc() is declared on idxd.h because it's going to be
used in by the DMA backend in a future patch.

Signed-off-by: Vinicius Costa Gomes <vinicius.gomes@intel.com>
Reviewed-by: Dave Jiang <dave.jiang@intel.com>
Link: https://patch.msgid.link/20260121-idxd-fix-flr-on-kernel-queues-v3-v3-4-7ed70658a9d1@intel.com
Signed-off-by: Vinod Koul <vkoul@kernel.org>
drivers/dma/idxd/device.c
drivers/dma/idxd/idxd.h
drivers/dma/idxd/irq.c

index 5265925f30764233374f41e148b25badb8f79840..5e890b6771cb5635a854a520b7cb5f7ecd9333ec 100644 (file)
@@ -1339,6 +1339,11 @@ void idxd_wq_free_irq(struct idxd_wq *wq)
 
        free_irq(ie->vector, ie);
        idxd_flush_pending_descs(ie);
+
+       /* The interrupt might have been already released by FLR */
+       if (ie->int_handle == INVALID_INT_HANDLE)
+               return;
+
        if (idxd->request_int_handles)
                idxd_device_release_int_handle(idxd, ie->int_handle, IDXD_IRQ_MSIX);
        idxd_device_clear_perm_entry(idxd, ie);
@@ -1347,6 +1352,23 @@ void idxd_wq_free_irq(struct idxd_wq *wq)
        ie->pasid = IOMMU_PASID_INVALID;
 }
 
+void idxd_wq_flush_descs(struct idxd_wq *wq)
+{
+       struct idxd_irq_entry *ie = &wq->ie;
+       struct idxd_device *idxd = wq->idxd;
+
+       guard(mutex)(&wq->wq_lock);
+
+       if (wq->state != IDXD_WQ_ENABLED || wq->type != IDXD_WQT_KERNEL)
+               return;
+
+       idxd_flush_pending_descs(ie);
+       if (idxd->request_int_handles)
+               idxd_device_release_int_handle(idxd, ie->int_handle, IDXD_IRQ_MSIX);
+       idxd_device_clear_perm_entry(idxd, ie);
+       ie->int_handle = INVALID_INT_HANDLE;
+}
+
 int idxd_wq_request_irq(struct idxd_wq *wq)
 {
        struct idxd_device *idxd = wq->idxd;
index ea8c4daed38d4dc738b954db778a6e59ed4d033e..ce78b9a7c641c8ac8f3d1b3d1e8c75658d9059ea 100644 (file)
@@ -803,6 +803,7 @@ void idxd_wq_quiesce(struct idxd_wq *wq);
 int idxd_wq_init_percpu_ref(struct idxd_wq *wq);
 void idxd_wq_free_irq(struct idxd_wq *wq);
 int idxd_wq_request_irq(struct idxd_wq *wq);
+void idxd_wq_flush_descs(struct idxd_wq *wq);
 
 /* submission */
 int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc);
index 7782f8c51c32aa598befb7bd0dd48a8f5230b050..6a25e1fd0e62c52ab3ce45d59c89b7135502106d 100644 (file)
@@ -397,6 +397,17 @@ static void idxd_device_flr(struct work_struct *work)
                dev_err(&idxd->pdev->dev, "FLR failed\n");
 }
 
+static void idxd_wqs_flush_descs(struct idxd_device *idxd)
+{
+       int i;
+
+       for (i = 0; i < idxd->max_wqs; i++) {
+               struct idxd_wq *wq = idxd->wqs[i];
+
+               idxd_wq_flush_descs(wq);
+       }
+}
+
 static irqreturn_t idxd_halt(struct idxd_device *idxd)
 {
        union gensts_reg gensts;
@@ -415,6 +426,11 @@ static irqreturn_t idxd_halt(struct idxd_device *idxd)
                } else if (gensts.reset_type == IDXD_DEVICE_RESET_FLR) {
                        idxd->state = IDXD_DEV_HALTED;
                        idxd_mask_error_interrupts(idxd);
+                       /* Flush all pending descriptors, and disable
+                        * interrupts, they will be re-enabled when FLR
+                        * concludes.
+                        */
+                       idxd_wqs_flush_descs(idxd);
                        dev_dbg(&idxd->pdev->dev,
                                "idxd halted, doing FLR. After FLR, configs are restored\n");
                        INIT_WORK(&idxd->work, idxd_device_flr);