]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
iommu/tegra: Do not use struct page as the handle for pts
authorJason Gunthorpe <jgg@nvidia.com>
Tue, 8 Apr 2025 16:53:50 +0000 (13:53 -0300)
committerJoerg Roedel <jroedel@suse.de>
Thu, 17 Apr 2025 14:22:32 +0000 (16:22 +0200)
Instead use the virtual address and dma_map_single() like as->pd
uses. Introduce a small struct tegra_pt instead of void * to have some
clarity what is using this API and add compile safety during the
conversion.

Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Link: https://lore.kernel.org/r/2-v4-c8663abbb606+3f7-iommu_pages_jgg@nvidia.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>
drivers/iommu/tegra-smmu.c

index d040400ee34ac7503c2dc9788d58df091f891e0b..95e97d1e4004688f7c3a5f10c2c9e1122170f883 100644 (file)
@@ -52,6 +52,7 @@ struct tegra_smmu {
 };
 
 struct tegra_pd;
+struct tegra_pt;
 
 struct tegra_smmu_as {
        struct iommu_domain domain;
@@ -59,7 +60,7 @@ struct tegra_smmu_as {
        unsigned int use_count;
        spinlock_t lock;
        u32 *count;
-       struct page **pts;
+       struct tegra_pt **pts;
        struct tegra_pd *pd;
        dma_addr_t pd_dma;
        unsigned id;
@@ -161,6 +162,10 @@ struct tegra_pd {
        u32 val[SMMU_NUM_PDE];
 };
 
+struct tegra_pt {
+       u32 val[SMMU_NUM_PTE];
+};
+
 static unsigned int iova_pd_index(unsigned long iova)
 {
        return (iova >> SMMU_PDE_SHIFT) & (SMMU_NUM_PDE - 1);
@@ -570,11 +575,9 @@ static void tegra_smmu_set_pde(struct tegra_smmu_as *as, unsigned long iova,
        smmu_flush(smmu);
 }
 
-static u32 *tegra_smmu_pte_offset(struct page *pt_page, unsigned long iova)
+static u32 *tegra_smmu_pte_offset(struct tegra_pt *pt, unsigned long iova)
 {
-       u32 *pt = page_address(pt_page);
-
-       return pt + iova_pt_index(iova);
+       return &pt->val[iova_pt_index(iova)];
 }
 
 static u32 *tegra_smmu_pte_lookup(struct tegra_smmu_as *as, unsigned long iova,
@@ -582,19 +585,19 @@ static u32 *tegra_smmu_pte_lookup(struct tegra_smmu_as *as, unsigned long iova,
 {
        unsigned int pd_index = iova_pd_index(iova);
        struct tegra_smmu *smmu = as->smmu;
-       struct page *pt_page;
+       struct tegra_pt *pt;
 
-       pt_page = as->pts[pd_index];
-       if (!pt_page)
+       pt = as->pts[pd_index];
+       if (!pt)
                return NULL;
 
        *dmap = smmu_pde_to_dma(smmu, as->pd->val[pd_index]);
 
-       return tegra_smmu_pte_offset(pt_page, iova);
+       return tegra_smmu_pte_offset(pt, iova);
 }
 
 static u32 *as_get_pte(struct tegra_smmu_as *as, dma_addr_t iova,
-                      dma_addr_t *dmap, struct page *page)
+                      dma_addr_t *dmap, struct tegra_pt *pt)
 {
        unsigned int pde = iova_pd_index(iova);
        struct tegra_smmu *smmu = as->smmu;
@@ -602,21 +605,21 @@ static u32 *as_get_pte(struct tegra_smmu_as *as, dma_addr_t iova,
        if (!as->pts[pde]) {
                dma_addr_t dma;
 
-               dma = dma_map_page(smmu->dev, page, 0, SMMU_SIZE_PT,
-                                  DMA_TO_DEVICE);
+               dma = dma_map_single(smmu->dev, pt, SMMU_SIZE_PT,
+                                    DMA_TO_DEVICE);
                if (dma_mapping_error(smmu->dev, dma)) {
-                       __iommu_free_pages(page, 0);
+                       iommu_free_page(pt);
                        return NULL;
                }
 
                if (!smmu_dma_addr_valid(smmu, dma)) {
-                       dma_unmap_page(smmu->dev, dma, SMMU_SIZE_PT,
-                                      DMA_TO_DEVICE);
-                       __iommu_free_pages(page, 0);
+                       dma_unmap_single(smmu->dev, dma, SMMU_SIZE_PT,
+                                        DMA_TO_DEVICE);
+                       iommu_free_page(pt);
                        return NULL;
                }
 
-               as->pts[pde] = page;
+               as->pts[pde] = pt;
 
                tegra_smmu_set_pde(as, iova, SMMU_MK_PDE(dma, SMMU_PDE_ATTR |
                                                              SMMU_PDE_NEXT));
@@ -639,7 +642,7 @@ static void tegra_smmu_pte_get_use(struct tegra_smmu_as *as, unsigned long iova)
 static void tegra_smmu_pte_put_use(struct tegra_smmu_as *as, unsigned long iova)
 {
        unsigned int pde = iova_pd_index(iova);
-       struct page *page = as->pts[pde];
+       struct tegra_pt *pt = as->pts[pde];
 
        /*
         * When no entries in this page table are used anymore, return the
@@ -651,8 +654,9 @@ static void tegra_smmu_pte_put_use(struct tegra_smmu_as *as, unsigned long iova)
 
                tegra_smmu_set_pde(as, iova, 0);
 
-               dma_unmap_page(smmu->dev, pte_dma, SMMU_SIZE_PT, DMA_TO_DEVICE);
-               __iommu_free_pages(page, 0);
+               dma_unmap_single(smmu->dev, pte_dma, SMMU_SIZE_PT,
+                                DMA_TO_DEVICE);
+               iommu_free_page(pt);
                as->pts[pde] = NULL;
        }
 }
@@ -672,16 +676,16 @@ static void tegra_smmu_set_pte(struct tegra_smmu_as *as, unsigned long iova,
        smmu_flush(smmu);
 }
 
-static struct page *as_get_pde_page(struct tegra_smmu_as *as,
-                                   unsigned long iova, gfp_t gfp,
-                                   unsigned long *flags)
+static struct tegra_pt *as_get_pde_page(struct tegra_smmu_as *as,
+                                       unsigned long iova, gfp_t gfp,
+                                       unsigned long *flags)
 {
        unsigned int pde = iova_pd_index(iova);
-       struct page *page = as->pts[pde];
+       struct tegra_pt *pt = as->pts[pde];
 
        /* at first check whether allocation needs to be done at all */
-       if (page)
-               return page;
+       if (pt)
+               return pt;
 
        /*
         * In order to prevent exhaustion of the atomic memory pool, we
@@ -691,7 +695,7 @@ static struct page *as_get_pde_page(struct tegra_smmu_as *as,
        if (gfpflags_allow_blocking(gfp))
                spin_unlock_irqrestore(&as->lock, *flags);
 
-       page = __iommu_alloc_pages(gfp | __GFP_DMA, 0);
+       pt = iommu_alloc_page(gfp | __GFP_DMA);
 
        if (gfpflags_allow_blocking(gfp))
                spin_lock_irqsave(&as->lock, *flags);
@@ -702,13 +706,13 @@ static struct page *as_get_pde_page(struct tegra_smmu_as *as,
         * if allocation succeeded and the allocation failure isn't fatal.
         */
        if (as->pts[pde]) {
-               if (page)
-                       __iommu_free_pages(page, 0);
+               if (pt)
+                       iommu_free_page(pt);
 
-               page = as->pts[pde];
+               pt = as->pts[pde];
        }
 
-       return page;
+       return pt;
 }
 
 static int
@@ -718,15 +722,15 @@ __tegra_smmu_map(struct iommu_domain *domain, unsigned long iova,
 {
        struct tegra_smmu_as *as = to_smmu_as(domain);
        dma_addr_t pte_dma;
-       struct page *page;
+       struct tegra_pt *pt;
        u32 pte_attrs;
        u32 *pte;
 
-       page = as_get_pde_page(as, iova, gfp, flags);
-       if (!page)
+       pt = as_get_pde_page(as, iova, gfp, flags);
+       if (!pt)
                return -ENOMEM;
 
-       pte = as_get_pte(as, iova, &pte_dma, page);
+       pte = as_get_pte(as, iova, &pte_dma, pt);
        if (!pte)
                return -ENOMEM;