]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
arm64: Honor __GFP_ZERO in dma allocations
authorSuzuki K. Poulose <suzuki.poulose@arm.com>
Thu, 19 Mar 2015 18:17:09 +0000 (18:17 +0000)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 26 Mar 2015 12:59:36 +0000 (13:59 +0100)
commit 7132813c384515c9dede1ae20e56f3895feb7f1e upstream.

Current implementation doesn't zero out the pages allocated.
Honor the __GFP_ZERO flag and zero out if set.

Acked-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Suzuki K. Poulose <suzuki.poulose@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
arch/arm64/mm/dma-mapping.c

index d9209420391381d41aa97665c3ce1dd08456c466..df34a70caca19982491c605139d243c8bef70d5c 100644 (file)
@@ -51,7 +51,7 @@ static int __init early_coherent_pool(char *p)
 }
 early_param("coherent_pool", early_coherent_pool);
 
-static void *__alloc_from_pool(size_t size, struct page **ret_page)
+static void *__alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags)
 {
        unsigned long val;
        void *ptr = NULL;
@@ -67,6 +67,8 @@ static void *__alloc_from_pool(size_t size, struct page **ret_page)
 
                *ret_page = phys_to_page(phys);
                ptr = (void *)val;
+               if (flags & __GFP_ZERO)
+                       memset(ptr, 0, size);
        }
 
        return ptr;
@@ -101,6 +103,7 @@ static void *__dma_alloc_coherent(struct device *dev, size_t size,
                flags |= GFP_DMA;
        if (IS_ENABLED(CONFIG_DMA_CMA) && (flags & __GFP_WAIT)) {
                struct page *page;
+               void *addr;
 
                size = PAGE_ALIGN(size);
                page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
@@ -109,7 +112,10 @@ static void *__dma_alloc_coherent(struct device *dev, size_t size,
                        return NULL;
 
                *dma_handle = phys_to_dma(dev, page_to_phys(page));
-               return page_address(page);
+               addr = page_address(page);
+               if (flags & __GFP_ZERO)
+                       memset(addr, 0, size);
+               return addr;
        } else {
                return swiotlb_alloc_coherent(dev, size, dma_handle, flags);
        }
@@ -145,7 +151,7 @@ static void *__dma_alloc_noncoherent(struct device *dev, size_t size,
 
        if (!(flags & __GFP_WAIT)) {
                struct page *page = NULL;
-               void *addr = __alloc_from_pool(size, &page);
+               void *addr = __alloc_from_pool(size, &page, flags);
 
                if (addr)
                        *dma_handle = phys_to_dma(dev, page_to_phys(page));