]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
iommu/pages: Move the __GFP_HIGHMEM checks into the common code
authorJason Gunthorpe <jgg@nvidia.com>
Tue, 8 Apr 2025 16:54:02 +0000 (13:54 -0300)
committerJoerg Roedel <jroedel@suse.de>
Thu, 17 Apr 2025 14:22:45 +0000 (16:22 +0200)
The entire allocator API is built around using the kernel virtual address,
it is illegal to pass GFP_HIGHMEM in as a GFP flag. Block it in the common
code. Remove the duplicated checks from drivers.

Reviewed-by: Lu Baolu <baolu.lu@linux.intel.com>
Reviewed-by: Mostafa Saleh <smostafa@google.com>
Tested-by: Nicolin Chen <nicolinc@nvidia.com>
Tested-by: Alejandro Jimenez <alejandro.j.jimenez@oracle.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Link: https://lore.kernel.org/r/14-v4-c8663abbb606+3f7-iommu_pages_jgg@nvidia.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>
drivers/iommu/io-pgtable-arm.c
drivers/iommu/io-pgtable-dart.c
drivers/iommu/iommu-pages.c

index 62df2528d020b2c96ae5768328f60a8e444c623d..08d0f62abe8a093347e9b7ce70b3c0cec4820a84 100644 (file)
@@ -267,8 +267,6 @@ static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp,
        dma_addr_t dma;
        void *pages;
 
-       VM_BUG_ON((gfp & __GFP_HIGHMEM));
-
        if (cfg->alloc)
                pages = cfg->alloc(cookie, size, gfp);
        else
index c80ccc753b5e60618d66414004910699e22b5d95..8b5403aae8c0ca5dafcab430bc140e92e47ededa 100644 (file)
@@ -111,7 +111,6 @@ static void *__dart_alloc_pages(size_t size, gfp_t gfp)
 {
        int order = get_order(size);
 
-       VM_BUG_ON((gfp & __GFP_HIGHMEM));
        return iommu_alloc_pages(gfp, order);
 }
 
index 3077df642adb1f1d40f41c1578f040edc13fd593..a7eed09420a2311d2963c34291b87fa8df43063d 100644 (file)
@@ -37,6 +37,10 @@ void *iommu_alloc_pages_node(int nid, gfp_t gfp, unsigned int order)
        const unsigned long pgcnt = 1UL << order;
        struct folio *folio;
 
+       /* This uses page_address() on the memory. */
+       if (WARN_ON(gfp & __GFP_HIGHMEM))
+               return NULL;
+
        /*
         * __folio_alloc_node() does not handle NUMA_NO_NODE like
         * alloc_pages_node() did.