]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
iommu: Rename iommu_tlb_* functions to iommu_iotlb_*
authorTom Murphy <murphyt7@tcd.ie>
Mon, 17 Aug 2020 21:00:49 +0000 (22:00 +0100)
committerJoerg Roedel <jroedel@suse.de>
Fri, 4 Sep 2020 09:16:09 +0000 (11:16 +0200)
To keep naming consistent we should stick with *iotlb*. This patch
renames a few remaining functions.

Signed-off-by: Tom Murphy <murphyt7@tcd.ie>
Link: https://lore.kernel.org/r/20200817210051.13546-1-murphyt7@tcd.ie
Signed-off-by: Joerg Roedel <jroedel@suse.de>
drivers/iommu/dma-iommu.c
drivers/iommu/iommu.c
drivers/vfio/vfio_iommu_type1.c
include/linux/io-pgtable.h
include/linux/iommu.h

index 5141d49a046baa5ab80d408c06ebb15685c93712..3afd076e9f3665b566f0a6a699705471a62108cf 100644 (file)
@@ -471,7 +471,7 @@ static void __iommu_dma_unmap(struct device *dev, dma_addr_t dma_addr,
        WARN_ON(unmapped != size);
 
        if (!cookie->fq_domain)
-               iommu_tlb_sync(domain, &iotlb_gather);
+               iommu_iotlb_sync(domain, &iotlb_gather);
        iommu_dma_free_iova(cookie, dma_addr, size);
 }
 
index 609bd25bf154b5bc550996dd259749338c8f04a8..6c14c88cd525834bcec006533bb68492d86b7a46 100644 (file)
@@ -762,7 +762,7 @@ static int iommu_create_device_direct_mappings(struct iommu_group *group,
 
        }
 
-       iommu_flush_tlb_all(domain);
+       iommu_flush_iotlb_all(domain);
 
 out:
        iommu_put_resv_regions(dev, &mappings);
@@ -2316,7 +2316,7 @@ size_t iommu_unmap(struct iommu_domain *domain,
 
        iommu_iotlb_gather_init(&iotlb_gather);
        ret = __iommu_unmap(domain, iova, size, &iotlb_gather);
-       iommu_tlb_sync(domain, &iotlb_gather);
+       iommu_iotlb_sync(domain, &iotlb_gather);
 
        return ret;
 }
index 5fbf0c1f7433808b4414d91331c1c3f561d0b3ff..c255a6683f319138364bebae344c58dbbbe8b434 100644 (file)
@@ -774,7 +774,7 @@ static long vfio_sync_unpin(struct vfio_dma *dma, struct vfio_domain *domain,
        long unlocked = 0;
        struct vfio_regions *entry, *next;
 
-       iommu_tlb_sync(domain->domain, iotlb_gather);
+       iommu_iotlb_sync(domain->domain, iotlb_gather);
 
        list_for_each_entry_safe(entry, next, regions, list) {
                unlocked += vfio_unpin_pages_remote(dma,
index 23285ba645dbaa8fb1e843aaaa477597d647f8d8..4cde111e425b600d03cb047a1df80319e3d728aa 100644 (file)
@@ -31,7 +31,7 @@ enum io_pgtable_fmt {
  *                  single page.  IOMMUs that cannot batch TLB invalidation
  *                  operations efficiently will typically issue them here, but
  *                  others may decide to update the iommu_iotlb_gather structure
- *                  and defer the invalidation until iommu_tlb_sync() instead.
+ *                  and defer the invalidation until iommu_iotlb_sync() instead.
  *
  * Note that these can all be called in atomic context and must therefore
  * not block.
index fee209efb7568f34163e0070c2976385ae869b44..2ad26d8b4ab935f66d0c26a7b8afc0b19537aba5 100644 (file)
@@ -514,13 +514,13 @@ extern void iommu_domain_window_disable(struct iommu_domain *domain, u32 wnd_nr)
 extern int report_iommu_fault(struct iommu_domain *domain, struct device *dev,
                              unsigned long iova, int flags);
 
-static inline void iommu_flush_tlb_all(struct iommu_domain *domain)
+static inline void iommu_flush_iotlb_all(struct iommu_domain *domain)
 {
        if (domain->ops->flush_iotlb_all)
                domain->ops->flush_iotlb_all(domain);
 }
 
-static inline void iommu_tlb_sync(struct iommu_domain *domain,
+static inline void iommu_iotlb_sync(struct iommu_domain *domain,
                                  struct iommu_iotlb_gather *iotlb_gather)
 {
        if (domain->ops->iotlb_sync)
@@ -543,7 +543,7 @@ static inline void iommu_iotlb_gather_add_page(struct iommu_domain *domain,
        if (gather->pgsize != size ||
            end < gather->start || start > gather->end) {
                if (gather->pgsize)
-                       iommu_tlb_sync(domain, gather);
+                       iommu_iotlb_sync(domain, gather);
                gather->pgsize = size;
        }
 
@@ -725,11 +725,11 @@ static inline size_t iommu_map_sg_atomic(struct iommu_domain *domain,
        return 0;
 }
 
-static inline void iommu_flush_tlb_all(struct iommu_domain *domain)
+static inline void iommu_flush_iotlb_all(struct iommu_domain *domain)
 {
 }
 
-static inline void iommu_tlb_sync(struct iommu_domain *domain,
+static inline void iommu_iotlb_sync(struct iommu_domain *domain,
                                  struct iommu_iotlb_gather *iotlb_gather)
 {
 }