]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
iommu/amd: Make domain_flush_pages as global function
authorVasant Hegde <vasant.hegde@amd.com>
Wed, 22 Nov 2023 09:02:14 +0000 (09:02 +0000)
committerJoerg Roedel <jroedel@suse.de>
Mon, 11 Dec 2023 14:25:37 +0000 (15:25 +0100)
- Rename domain_flush_pages() -> amd_iommu_domain_flush_pages() and make
  it as global function.

- Rename amd_iommu_domain_flush_tlb_pde() -> amd_iommu_domain_flush_all()
  and make it as static.

- Convert v1 page table (io_pgtble.c) to use amd_iommu_domain_flush_pages().

Signed-off-by: Vasant Hegde <vasant.hegde@amd.com>
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Link: https://lore.kernel.org/r/20231122090215.6191-9-vasant.hegde@amd.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>
drivers/iommu/amd/amd_iommu.h
drivers/iommu/amd/io_pgtable.c
drivers/iommu/amd/iommu.c

index 234db57cd3209a1c45b2b7b3765709436d7b7881..8b3601f285fd699dd4d9d4d32e1ac62c2c5e3058 100644 (file)
@@ -61,7 +61,8 @@ void amd_iommu_flush_all_caches(struct amd_iommu *iommu);
 void amd_iommu_update_and_flush_device_table(struct protection_domain *domain);
 void amd_iommu_domain_update(struct protection_domain *domain);
 void amd_iommu_domain_flush_complete(struct protection_domain *domain);
-void amd_iommu_domain_flush_tlb_pde(struct protection_domain *domain);
+void amd_iommu_domain_flush_pages(struct protection_domain *domain,
+                                 u64 address, size_t size);
 int amd_iommu_flush_tlb(struct iommu_domain *dom, u32 pasid);
 int amd_iommu_domain_set_gcr3(struct iommu_domain *dom, u32 pasid,
                              unsigned long cr3);
index ca22546e4d1a51e2097a5f3a29325caab88a6cb1..2a0d1e97e52fdfe2375c3988c698260ae520cb9f 100644 (file)
@@ -369,6 +369,8 @@ static int iommu_v1_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
        bool updated = false;
        u64 __pte, *pte;
        int ret, i, count;
+       size_t size = pgcount << __ffs(pgsize);
+       unsigned long o_iova = iova;
 
        BUG_ON(!IS_ALIGNED(iova, pgsize));
        BUG_ON(!IS_ALIGNED(paddr, pgsize));
@@ -424,7 +426,7 @@ out:
                 * Updates and flushing already happened in
                 * increase_address_space().
                 */
-               amd_iommu_domain_flush_tlb_pde(dom);
+               amd_iommu_domain_flush_pages(dom, o_iova, size);
                spin_unlock_irqrestore(&dom->lock, flags);
        }
 
index cb8f51d5f83dde803b8f8c339b6fa4b665505762..77cf1e3de053bb710606867f6f291d8ed0d71ce2 100644 (file)
@@ -1484,8 +1484,8 @@ static void __domain_flush_pages(struct protection_domain *domain,
        WARN_ON(ret);
 }
 
-static void domain_flush_pages(struct protection_domain *domain,
-                              u64 address, size_t size)
+void amd_iommu_domain_flush_pages(struct protection_domain *domain,
+                                 u64 address, size_t size)
 {
        if (likely(!amd_iommu_np_cache)) {
                __domain_flush_pages(domain, address, size);
@@ -1535,9 +1535,10 @@ static void domain_flush_pages(struct protection_domain *domain,
 }
 
 /* Flush the whole IO/TLB for a given protection domain - including PDE */
-void amd_iommu_domain_flush_tlb_pde(struct protection_domain *domain)
+static void amd_iommu_domain_flush_all(struct protection_domain *domain)
 {
-       domain_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS);
+       amd_iommu_domain_flush_pages(domain, 0,
+                                    CMD_INV_IOMMU_ALL_PAGES_ADDRESS);
 }
 
 void amd_iommu_domain_flush_complete(struct protection_domain *domain)
@@ -1564,7 +1565,7 @@ static void domain_flush_np_cache(struct protection_domain *domain,
                unsigned long flags;
 
                spin_lock_irqsave(&domain->lock, flags);
-               domain_flush_pages(domain, iova, size);
+               amd_iommu_domain_flush_pages(domain, iova, size);
                spin_unlock_irqrestore(&domain->lock, flags);
        }
 }
@@ -1843,7 +1844,7 @@ static void do_detach(struct iommu_dev_data *dev_data)
        device_flush_dte(dev_data);
 
        /* Flush IOTLB and wait for the flushes to finish */
-       amd_iommu_domain_flush_tlb_pde(domain);
+       amd_iommu_domain_flush_all(domain);
 
        /* decrease reference counters - needs to happen after the flushes */
        domain->dev_iommu[iommu->index] -= 1;
@@ -2020,7 +2021,7 @@ void amd_iommu_domain_update(struct protection_domain *domain)
        amd_iommu_update_and_flush_device_table(domain);
 
        /* Flush domain TLB(s) and wait for completion */
-       amd_iommu_domain_flush_tlb_pde(domain);
+       amd_iommu_domain_flush_all(domain);
 }
 
 /*****************************************************************************
@@ -2454,7 +2455,7 @@ static int amd_iommu_set_dirty_tracking(struct iommu_domain *domain,
 
        /* Flush IOTLB to mark IOPTE dirty on the next translation(s) */
        if (domain_flush)
-               amd_iommu_domain_flush_tlb_pde(pdomain);
+               amd_iommu_domain_flush_all(pdomain);
 
        pdomain->dirty_tracking = enable;
        spin_unlock_irqrestore(&pdomain->lock, flags);
@@ -2558,7 +2559,7 @@ static void amd_iommu_flush_iotlb_all(struct iommu_domain *domain)
        unsigned long flags;
 
        spin_lock_irqsave(&dom->lock, flags);
-       amd_iommu_domain_flush_tlb_pde(dom);
+       amd_iommu_domain_flush_all(dom);
        spin_unlock_irqrestore(&dom->lock, flags);
 }
 
@@ -2569,7 +2570,8 @@ static void amd_iommu_iotlb_sync(struct iommu_domain *domain,
        unsigned long flags;
 
        spin_lock_irqsave(&dom->lock, flags);
-       domain_flush_pages(dom, gather->start, gather->end - gather->start + 1);
+       amd_iommu_domain_flush_pages(dom, gather->start,
+                                    gather->end - gather->start + 1);
        spin_unlock_irqrestore(&dom->lock, flags);
 }