]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
iommu/amd: Remove the confusing dummy iommu_flush_ops tlb ops
authorJason Gunthorpe <jgg@nvidia.com>
Fri, 30 Aug 2024 00:06:21 +0000 (21:06 -0300)
committerJoerg Roedel <jroedel@suse.de>
Wed, 4 Sep 2024 09:39:02 +0000 (11:39 +0200)
The iommu driver is supposed to provide these ops to its io_pgtable
implementation so that it can hook the invalidations and do the right
thing.

They are called by wrapper functions like io_pgtable_tlb_add_page() etc,
which the AMD code never calls.

Instead it directly calls the AMD IOMMU invalidation functions by casting
to the struct protection_domain. Remove it all.

Reviewed-by: Vasant Hegde <vasant.hegde@amd.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Link: https://lore.kernel.org/r/12-v2-831cdc4d00f3+1a315-amd_iopgtbl_jgg@nvidia.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>
drivers/iommu/amd/io_pgtable.c
drivers/iommu/amd/io_pgtable_v2.c

index 1cf3d580a55107a3fc2bc11afa071fa99d37ddb9..14f62c420e4a9c9e034635121eaf4abd3ff35071 100644 (file)
 #include "amd_iommu.h"
 #include "../iommu-pages.h"
 
-static void v1_tlb_flush_all(void *cookie)
-{
-}
-
-static void v1_tlb_flush_walk(unsigned long iova, size_t size,
-                                 size_t granule, void *cookie)
-{
-}
-
-static void v1_tlb_add_page(struct iommu_iotlb_gather *gather,
-                                        unsigned long iova, size_t granule,
-                                        void *cookie)
-{
-}
-
-static const struct iommu_flush_ops v1_flush_ops = {
-       .tlb_flush_all  = v1_tlb_flush_all,
-       .tlb_flush_walk = v1_tlb_flush_walk,
-       .tlb_add_page   = v1_tlb_add_page,
-};
-
 /*
  * Helper function to get the first pte of a large mapping
  */
@@ -572,7 +551,6 @@ static struct io_pgtable *v1_alloc_pgtable(struct io_pgtable_cfg *cfg, void *coo
        cfg->pgsize_bitmap  = AMD_IOMMU_PGSIZES;
        cfg->ias            = IOMMU_IN_ADDR_BIT_SIZE;
        cfg->oas            = IOMMU_OUT_ADDR_BIT_SIZE;
-       cfg->tlb            = &v1_flush_ops;
 
        pgtable->pgtbl.ops.map_pages    = iommu_v1_map_pages;
        pgtable->pgtbl.ops.unmap_pages  = iommu_v1_unmap_pages;
index 910fe1879f3e41a3938ebbb5cf55649cc0aa350a..77cc1b4a3f02258fbe01662c3126f75fadc2b3e5 100644 (file)
@@ -326,27 +326,6 @@ static phys_addr_t iommu_v2_iova_to_phys(struct io_pgtable_ops *ops, unsigned lo
 /*
  * ----------------------------------------------------
  */
-static void v2_tlb_flush_all(void *cookie)
-{
-}
-
-static void v2_tlb_flush_walk(unsigned long iova, size_t size,
-                             size_t granule, void *cookie)
-{
-}
-
-static void v2_tlb_add_page(struct iommu_iotlb_gather *gather,
-                           unsigned long iova, size_t granule,
-                           void *cookie)
-{
-}
-
-static const struct iommu_flush_ops v2_flush_ops = {
-       .tlb_flush_all  = v2_tlb_flush_all,
-       .tlb_flush_walk = v2_tlb_flush_walk,
-       .tlb_add_page   = v2_tlb_add_page,
-};
-
 static void v2_free_pgtable(struct io_pgtable *iop)
 {
        struct amd_io_pgtable *pgtable = container_of(iop, struct amd_io_pgtable, pgtbl);
@@ -378,7 +357,6 @@ static struct io_pgtable *v2_alloc_pgtable(struct io_pgtable_cfg *cfg, void *coo
        cfg->pgsize_bitmap = AMD_IOMMU_PGSIZES_V2;
        cfg->ias           = ias;
        cfg->oas           = IOMMU_OUT_ADDR_BIT_SIZE;
-       cfg->tlb           = &v2_flush_ops;
 
        return &pgtable->pgtbl;
 }