]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
iommu/vt-d: Use device rbtree in iopf reporting path
authorLu Baolu <baolu.lu@linux.intel.com>
Tue, 27 Feb 2024 02:14:41 +0000 (10:14 +0800)
committerSasha Levin <sashal@kernel.org>
Tue, 26 Mar 2024 22:16:52 +0000 (18:16 -0400)
[ Upstream commit def054b01a867822254e1dda13d587f5c7a99e2a ]

The existing I/O page fault handler currently locates the PCI device by
calling pci_get_domain_bus_and_slot(). This function searches the list
of all PCI devices until the desired device is found. To improve lookup
efficiency, replace it with device_rbtree_find() to search the device
within the probed device rbtree.

The I/O page fault is initiated by the device, which does not have any
synchronization mechanism with the software to ensure that the device
stays in the probed device tree. Theoretically, a device could be released
by the IOMMU subsystem after device_rbtree_find() and before
iopf_get_dev_fault_param(), which would cause a use-after-free problem.

Add a mutex to synchronize the I/O page fault reporting path and the IOMMU
release device path. This lock doesn't introduce any performance overhead,
as the conflict between I/O page fault reporting and device releasing is
very rare.

Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com>
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Link: https://lore.kernel.org/r/20240220065939.121116-3-baolu.lu@linux.intel.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>
Stable-dep-of: 81e921fd3216 ("iommu/vt-d: Fix NULL domain on device release")
Signed-off-by: Sasha Levin <sashal@kernel.org>
drivers/iommu/intel/dmar.c
drivers/iommu/intel/iommu.c
drivers/iommu/intel/iommu.h
drivers/iommu/intel/svm.c

index ad8a340fc7f1d25a83e5c36c614fbf79c1360361..36d7427b12026ca943e3beb660e2866e19e687a1 100644 (file)
@@ -1097,6 +1097,7 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd)
        iommu->segment = drhd->segment;
        iommu->device_rbtree = RB_ROOT;
        spin_lock_init(&iommu->device_rbtree_lock);
+       mutex_init(&iommu->iopf_lock);
        iommu->node = NUMA_NO_NODE;
 
        ver = readl(iommu->reg + DMAR_VER_REG);
index 9e07e4425ff65e4a8b2ce3a489b1fc1004039b8f..31b5d852ba73280467c08233eaa2ad0639bad901 100644 (file)
@@ -4431,8 +4431,11 @@ free:
 static void intel_iommu_release_device(struct device *dev)
 {
        struct device_domain_info *info = dev_iommu_priv_get(dev);
+       struct intel_iommu *iommu = info->iommu;
 
+       mutex_lock(&iommu->iopf_lock);
        device_rbtree_remove(info);
+       mutex_unlock(&iommu->iopf_lock);
        dmar_remove_one_dev_info(dev);
        intel_pasid_free_table(dev);
        intel_iommu_debugfs_remove_dev(info);
index df00240ebe90bffb57457a7debc84debe4f539ad..cd267ba64eda1eef3687b31c6b48e127b7a9902b 100644 (file)
@@ -719,6 +719,8 @@ struct intel_iommu {
 #endif
        struct iopf_queue *iopf_queue;
        unsigned char iopfq_name[16];
+       /* Synchronization between fault report and iommu device release. */
+       struct mutex iopf_lock;
        struct q_inval  *qi;            /* Queued invalidation info */
        u32 iommu_state[MAX_SR_DMAR_REGS]; /* Store iommu states between suspend and resume.*/
 
index 40edd282903fbe7c804512819aa95c3a1ae9d43e..ec47ec81f0ecd1362fb14d6a9186fa5a6306bd5b 100644 (file)
@@ -650,7 +650,7 @@ static irqreturn_t prq_event_thread(int irq, void *d)
        struct intel_iommu *iommu = d;
        struct page_req_dsc *req;
        int head, tail, handled;
-       struct pci_dev *pdev;
+       struct device *dev;
        u64 address;
 
        /*
@@ -696,23 +696,24 @@ bad_req:
                if (unlikely(req->lpig && !req->rd_req && !req->wr_req))
                        goto prq_advance;
 
-               pdev = pci_get_domain_bus_and_slot(iommu->segment,
-                                                  PCI_BUS_NUM(req->rid),
-                                                  req->rid & 0xff);
                /*
                 * If prq is to be handled outside iommu driver via receiver of
                 * the fault notifiers, we skip the page response here.
                 */
-               if (!pdev)
+               mutex_lock(&iommu->iopf_lock);
+               dev = device_rbtree_find(iommu, req->rid);
+               if (!dev) {
+                       mutex_unlock(&iommu->iopf_lock);
                        goto bad_req;
+               }
 
-               if (intel_svm_prq_report(iommu, &pdev->dev, req))
+               if (intel_svm_prq_report(iommu, dev, req))
                        handle_bad_prq_event(iommu, req, QI_RESP_INVALID);
                else
-                       trace_prq_report(iommu, &pdev->dev, req->qw_0, req->qw_1,
+                       trace_prq_report(iommu, dev, req->qw_0, req->qw_1,
                                         req->priv_data[0], req->priv_data[1],
                                         iommu->prq_seq_number++);
-               pci_dev_put(pdev);
+               mutex_unlock(&iommu->iopf_lock);
 prq_advance:
                head = (head + sizeof(*req)) & PRQ_RING_MASK;
        }