]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
iommu/rockchip: use page allocation function provided by iommu-pages.h
authorPasha Tatashin <pasha.tatashin@soleen.com>
Sat, 13 Apr 2024 00:25:18 +0000 (00:25 +0000)
committerJoerg Roedel <jroedel@suse.de>
Mon, 15 Apr 2024 12:31:45 +0000 (14:31 +0200)
Convert iommu/rockchip-iommu.c to use the new page allocation functions
provided in iommu-pages.h.

Signed-off-by: Pasha Tatashin <pasha.tatashin@soleen.com>
Acked-by: David Rientjes <rientjes@google.com>
Tested-by: Bagas Sanjaya <bagasdotme@gmail.com>
Link: https://lore.kernel.org/r/20240413002522.1101315-8-pasha.tatashin@soleen.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>
drivers/iommu/rockchip-iommu.c

index da79d9f4cf637197f87691bee579fa976dc75ae9..4b369419b32ce1b571f836e11f92452943a753c6 100644 (file)
@@ -26,6 +26,8 @@
 #include <linux/slab.h>
 #include <linux/spinlock.h>
 
+#include "iommu-pages.h"
+
 /** MMU register offsets */
 #define RK_MMU_DTE_ADDR                0x00    /* Directory table address */
 #define RK_MMU_STATUS          0x04
@@ -727,14 +729,14 @@ static u32 *rk_dte_get_page_table(struct rk_iommu_domain *rk_domain,
        if (rk_dte_is_pt_valid(dte))
                goto done;
 
-       page_table = (u32 *)get_zeroed_page(GFP_ATOMIC | rk_ops->gfp_flags);
+       page_table = iommu_alloc_page(GFP_ATOMIC | rk_ops->gfp_flags);
        if (!page_table)
                return ERR_PTR(-ENOMEM);
 
        pt_dma = dma_map_single(dma_dev, page_table, SPAGE_SIZE, DMA_TO_DEVICE);
        if (dma_mapping_error(dma_dev, pt_dma)) {
                dev_err(dma_dev, "DMA mapping error while allocating page table\n");
-               free_page((unsigned long)page_table);
+               iommu_free_page(page_table);
                return ERR_PTR(-ENOMEM);
        }
 
@@ -1061,7 +1063,7 @@ static struct iommu_domain *rk_iommu_domain_alloc_paging(struct device *dev)
         * Each level1 (dt) and level2 (pt) table has 1024 4-byte entries.
         * Allocate one 4 KiB page for each table.
         */
-       rk_domain->dt = (u32 *)get_zeroed_page(GFP_KERNEL | rk_ops->gfp_flags);
+       rk_domain->dt = iommu_alloc_page(GFP_KERNEL | rk_ops->gfp_flags);
        if (!rk_domain->dt)
                goto err_free_domain;
 
@@ -1083,7 +1085,7 @@ static struct iommu_domain *rk_iommu_domain_alloc_paging(struct device *dev)
        return &rk_domain->domain;
 
 err_free_dt:
-       free_page((unsigned long)rk_domain->dt);
+       iommu_free_page(rk_domain->dt);
 err_free_domain:
        kfree(rk_domain);
 
@@ -1104,13 +1106,13 @@ static void rk_iommu_domain_free(struct iommu_domain *domain)
                        u32 *page_table = phys_to_virt(pt_phys);
                        dma_unmap_single(dma_dev, pt_phys,
                                         SPAGE_SIZE, DMA_TO_DEVICE);
-                       free_page((unsigned long)page_table);
+                       iommu_free_page(page_table);
                }
        }
 
        dma_unmap_single(dma_dev, rk_domain->dt_dma,
                         SPAGE_SIZE, DMA_TO_DEVICE);
-       free_page((unsigned long)rk_domain->dt);
+       iommu_free_page(rk_domain->dt);
 
        kfree(rk_domain);
 }