From: Pasha Tatashin Date: Sat, 13 Apr 2024 00:25:18 +0000 (+0000) Subject: iommu/rockchip: use page allocation function provided by iommu-pages.h X-Git-Tag: v6.10-rc1~114^2^4~2^2~4 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=5404ccaaff357d2d8d40e1a879446da9c9da5879;p=thirdparty%2Fkernel%2Flinux.git iommu/rockchip: use page allocation function provided by iommu-pages.h Convert iommu/rockchip-iommu.c to use the new page allocation functions provided in iommu-pages.h. Signed-off-by: Pasha Tatashin Acked-by: David Rientjes Tested-by: Bagas Sanjaya Link: https://lore.kernel.org/r/20240413002522.1101315-8-pasha.tatashin@soleen.com Signed-off-by: Joerg Roedel --- diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c index da79d9f4cf637..4b369419b32ce 100644 --- a/drivers/iommu/rockchip-iommu.c +++ b/drivers/iommu/rockchip-iommu.c @@ -26,6 +26,8 @@ #include #include +#include "iommu-pages.h" + /** MMU register offsets */ #define RK_MMU_DTE_ADDR 0x00 /* Directory table address */ #define RK_MMU_STATUS 0x04 @@ -727,14 +729,14 @@ static u32 *rk_dte_get_page_table(struct rk_iommu_domain *rk_domain, if (rk_dte_is_pt_valid(dte)) goto done; - page_table = (u32 *)get_zeroed_page(GFP_ATOMIC | rk_ops->gfp_flags); + page_table = iommu_alloc_page(GFP_ATOMIC | rk_ops->gfp_flags); if (!page_table) return ERR_PTR(-ENOMEM); pt_dma = dma_map_single(dma_dev, page_table, SPAGE_SIZE, DMA_TO_DEVICE); if (dma_mapping_error(dma_dev, pt_dma)) { dev_err(dma_dev, "DMA mapping error while allocating page table\n"); - free_page((unsigned long)page_table); + iommu_free_page(page_table); return ERR_PTR(-ENOMEM); } @@ -1061,7 +1063,7 @@ static struct iommu_domain *rk_iommu_domain_alloc_paging(struct device *dev) * Each level1 (dt) and level2 (pt) table has 1024 4-byte entries. * Allocate one 4 KiB page for each table. */ - rk_domain->dt = (u32 *)get_zeroed_page(GFP_KERNEL | rk_ops->gfp_flags); + rk_domain->dt = iommu_alloc_page(GFP_KERNEL | rk_ops->gfp_flags); if (!rk_domain->dt) goto err_free_domain; @@ -1083,7 +1085,7 @@ static struct iommu_domain *rk_iommu_domain_alloc_paging(struct device *dev) return &rk_domain->domain; err_free_dt: - free_page((unsigned long)rk_domain->dt); + iommu_free_page(rk_domain->dt); err_free_domain: kfree(rk_domain); @@ -1104,13 +1106,13 @@ static void rk_iommu_domain_free(struct iommu_domain *domain) u32 *page_table = phys_to_virt(pt_phys); dma_unmap_single(dma_dev, pt_phys, SPAGE_SIZE, DMA_TO_DEVICE); - free_page((unsigned long)page_table); + iommu_free_page(page_table); } } dma_unmap_single(dma_dev, rk_domain->dt_dma, SPAGE_SIZE, DMA_TO_DEVICE); - free_page((unsigned long)rk_domain->dt); + iommu_free_page(rk_domain->dt); kfree(rk_domain); }