]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
iommu/amd: Make amd_iommu_domain_flush_complete() static
authorVasant Hegde <vasant.hegde@amd.com>
Wed, 28 Aug 2024 11:10:27 +0000 (11:10 +0000)
committerJoerg Roedel <jroedel@suse.de>
Wed, 4 Sep 2024 09:35:56 +0000 (11:35 +0200)
AMD driver uses amd_iommu_domain_flush_complete() function to make sure
IOMMU processed invalidation commands before proceeding. Ideally this
should be called from functions which updates DTE/invalidates caches.
There is no need to call this function explicitly. This patches makes
below changes :

- Rename amd_iommu_domain_flush_complete() -> domain_flush_complete()
  and make it as static function.

- Rearrage domain_flush_complete() to avoid forward declaration.

- Update amd_iommu_update_and_flush_device_table() to call
  domain_flush_complete().

Signed-off-by: Vasant Hegde <vasant.hegde@amd.com>
Reviewed-by: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
Link: https://lore.kernel.org/r/20240828111029.5429-7-vasant.hegde@amd.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>
drivers/iommu/amd/amd_iommu.h
drivers/iommu/amd/io_pgtable.c
drivers/iommu/amd/iommu.c

index d0a24ec3ada2a89559c4f64af7e87ec5f3bc0a28..94402b88789dc171d666c8f622d552d489821df2 100644 (file)
@@ -88,7 +88,6 @@ void amd_iommu_flush_all_caches(struct amd_iommu *iommu);
 void amd_iommu_update_and_flush_device_table(struct protection_domain *domain);
 void amd_iommu_domain_update(struct protection_domain *domain);
 void amd_iommu_dev_update_dte(struct iommu_dev_data *dev_data, bool set);
-void amd_iommu_domain_flush_complete(struct protection_domain *domain);
 void amd_iommu_domain_flush_pages(struct protection_domain *domain,
                                  u64 address, size_t size);
 void amd_iommu_dev_flush_pasid_pages(struct iommu_dev_data *dev_data,
index 1074ee25064d06024b2f7857522bbc791abed8ba..bfbcec68efb94410cb03077656dc457711040da3 100644 (file)
@@ -175,7 +175,6 @@ static bool increase_address_space(struct protection_domain *domain,
        domain->iop.root  = pte;
        domain->iop.mode += 1;
        amd_iommu_update_and_flush_device_table(domain);
-       amd_iommu_domain_flush_complete(domain);
 
        /*
         * Device Table needs to be updated and flushed before the new root can
index ddd63c2b659428a4d9c8588c8f31d123337c6c89..9af084fa6dd48cd3e1e216b00d395cedc713e0ef 100644 (file)
@@ -1249,6 +1249,22 @@ out_unlock:
        return ret;
 }
 
+static void domain_flush_complete(struct protection_domain *domain)
+{
+       int i;
+
+       for (i = 0; i < amd_iommu_get_num_iommus(); ++i) {
+               if (domain && !domain->dev_iommu[i])
+                       continue;
+
+               /*
+                * Devices of this domain are behind this IOMMU
+                * We need to wait for completion of all commands.
+                */
+               iommu_completion_wait(amd_iommus[i]);
+       }
+}
+
 static int iommu_flush_dte(struct amd_iommu *iommu, u16 devid)
 {
        struct iommu_cmd cmd;
@@ -1485,7 +1501,7 @@ void amd_iommu_domain_flush_pages(struct protection_domain *domain,
                __domain_flush_pages(domain, address, size);
 
                /* Wait until IOMMU TLB and all device IOTLB flushes are complete */
-               amd_iommu_domain_flush_complete(domain);
+               domain_flush_complete(domain);
 
                return;
        }
@@ -1525,7 +1541,7 @@ void amd_iommu_domain_flush_pages(struct protection_domain *domain,
        }
 
        /* Wait until IOMMU TLB and all device IOTLB flushes are complete */
-       amd_iommu_domain_flush_complete(domain);
+       domain_flush_complete(domain);
 }
 
 /* Flush the whole IO/TLB for a given protection domain - including PDE */
@@ -1558,22 +1574,6 @@ static void dev_flush_pasid_all(struct iommu_dev_data *dev_data,
                                        CMD_INV_IOMMU_ALL_PAGES_ADDRESS, pasid);
 }
 
-void amd_iommu_domain_flush_complete(struct protection_domain *domain)
-{
-       int i;
-
-       for (i = 0; i < amd_iommu_get_num_iommus(); ++i) {
-               if (domain && !domain->dev_iommu[i])
-                       continue;
-
-               /*
-                * Devices of this domain are behind this IOMMU
-                * We need to wait for completion of all commands.
-                */
-               iommu_completion_wait(amd_iommus[i]);
-       }
-}
-
 /* Flush the not present cache if it exists */
 static void domain_flush_np_cache(struct protection_domain *domain,
                dma_addr_t iova, size_t size)
@@ -1615,6 +1615,7 @@ void amd_iommu_update_and_flush_device_table(struct protection_domain *domain)
 {
        update_device_table(domain);
        domain_flush_devices(domain);
+       domain_flush_complete(domain);
 }
 
 void amd_iommu_domain_update(struct protection_domain *domain)