]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
iommu/amd: Consolidate amd_iommu_domain_flush_complete() call
authorVasant Hegde <vasant.hegde@amd.com>
Wed, 22 Nov 2023 09:02:13 +0000 (09:02 +0000)
committerJoerg Roedel <jroedel@suse.de>
Mon, 11 Dec 2023 14:25:36 +0000 (15:25 +0100)
Call amd_iommu_domain_flush_complete() from domain_flush_pages().
That way we can remove explicit call of amd_iommu_domain_flush_complete()
from various places.

Signed-off-by: Vasant Hegde <vasant.hegde@amd.com>
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Link: https://lore.kernel.org/r/20231122090215.6191-8-vasant.hegde@amd.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>
drivers/iommu/amd/io_pgtable.c
drivers/iommu/amd/iommu.c

index 6c0621f6f572a4c4c0fb72ea1bdb5abe9d504311..ca22546e4d1a51e2097a5f3a29325caab88a6cb1 100644 (file)
@@ -425,7 +425,6 @@ out:
                 * increase_address_space().
                 */
                amd_iommu_domain_flush_tlb_pde(dom);
-               amd_iommu_domain_flush_complete(dom);
                spin_unlock_irqrestore(&dom->lock, flags);
        }
 
index a071e1d52c3e8b9db5ddeebabc7fe79fad3c4966..cb8f51d5f83dde803b8f8c339b6fa4b665505762 100644 (file)
@@ -1489,6 +1489,10 @@ static void domain_flush_pages(struct protection_domain *domain,
 {
        if (likely(!amd_iommu_np_cache)) {
                __domain_flush_pages(domain, address, size);
+
+               /* Wait until IOMMU TLB and all device IOTLB flushes are complete */
+               amd_iommu_domain_flush_complete(domain);
+
                return;
        }
 
@@ -1525,6 +1529,9 @@ static void domain_flush_pages(struct protection_domain *domain,
                address += flush_size;
                size -= flush_size;
        }
+
+       /* Wait until IOMMU TLB and all device IOTLB flushes are complete */
+       amd_iommu_domain_flush_complete(domain);
 }
 
 /* Flush the whole IO/TLB for a given protection domain - including PDE */
@@ -1558,7 +1565,6 @@ static void domain_flush_np_cache(struct protection_domain *domain,
 
                spin_lock_irqsave(&domain->lock, flags);
                domain_flush_pages(domain, iova, size);
-               amd_iommu_domain_flush_complete(domain);
                spin_unlock_irqrestore(&domain->lock, flags);
        }
 }
@@ -1836,12 +1842,9 @@ static void do_detach(struct iommu_dev_data *dev_data)
        /* Flush the DTE entry */
        device_flush_dte(dev_data);
 
-       /* Flush IOTLB */
+       /* Flush IOTLB and wait for the flushes to finish */
        amd_iommu_domain_flush_tlb_pde(domain);
 
-       /* Wait for the flushes to finish */
-       amd_iommu_domain_flush_complete(domain);
-
        /* decrease reference counters - needs to happen after the flushes */
        domain->dev_iommu[iommu->index] -= 1;
        domain->dev_cnt                 -= 1;
@@ -2018,7 +2021,6 @@ void amd_iommu_domain_update(struct protection_domain *domain)
 
        /* Flush domain TLB(s) and wait for completion */
        amd_iommu_domain_flush_tlb_pde(domain);
-       amd_iommu_domain_flush_complete(domain);
 }
 
 /*****************************************************************************
@@ -2451,10 +2453,9 @@ static int amd_iommu_set_dirty_tracking(struct iommu_domain *domain,
        }
 
        /* Flush IOTLB to mark IOPTE dirty on the next translation(s) */
-       if (domain_flush) {
+       if (domain_flush)
                amd_iommu_domain_flush_tlb_pde(pdomain);
-               amd_iommu_domain_flush_complete(pdomain);
-       }
+
        pdomain->dirty_tracking = enable;
        spin_unlock_irqrestore(&pdomain->lock, flags);
 
@@ -2558,7 +2559,6 @@ static void amd_iommu_flush_iotlb_all(struct iommu_domain *domain)
 
        spin_lock_irqsave(&dom->lock, flags);
        amd_iommu_domain_flush_tlb_pde(dom);
-       amd_iommu_domain_flush_complete(dom);
        spin_unlock_irqrestore(&dom->lock, flags);
 }
 
@@ -2570,7 +2570,6 @@ static void amd_iommu_iotlb_sync(struct iommu_domain *domain,
 
        spin_lock_irqsave(&dom->lock, flags);
        domain_flush_pages(dom, gather->start, gather->end - gather->start + 1);
-       amd_iommu_domain_flush_complete(dom);
        spin_unlock_irqrestore(&dom->lock, flags);
 }