return min(zone_limit, memblock_end_of_DRAM() - 1) + 1;
}
-static void __init zone_sizes_init(void)
+void __init arch_zone_limits_init(unsigned long *max_zone_pfns)
+{
+ phys_addr_t __maybe_unused dma32_phys_limit =
+ max_zone_phys(DMA_BIT_MASK(32));
+
+#ifdef CONFIG_ZONE_DMA
+ max_zone_pfns[ZONE_DMA] = PFN_DOWN(max_zone_phys(zone_dma_limit));
+#endif
+#ifdef CONFIG_ZONE_DMA32
+ max_zone_pfns[ZONE_DMA32] = PFN_DOWN(dma32_phys_limit);
+#endif
+ max_zone_pfns[ZONE_NORMAL] = max_pfn;
+}
+
+static void __init dma_limits_init(void)
{
unsigned long max_zone_pfns[MAX_NR_ZONES] = {0};
phys_addr_t __maybe_unused acpi_zone_dma_limit;
if (memblock_start_of_DRAM() < U32_MAX)
zone_dma_limit = min(zone_dma_limit, U32_MAX);
arm64_dma_phys_limit = max_zone_phys(zone_dma_limit);
- max_zone_pfns[ZONE_DMA] = PFN_DOWN(arm64_dma_phys_limit);
#endif
#ifdef CONFIG_ZONE_DMA32
- max_zone_pfns[ZONE_DMA32] = PFN_DOWN(dma32_phys_limit);
if (!arm64_dma_phys_limit)
arm64_dma_phys_limit = dma32_phys_limit;
#endif
if (!arm64_dma_phys_limit)
arm64_dma_phys_limit = PHYS_MASK + 1;
- max_zone_pfns[ZONE_NORMAL] = max_pfn;
+ arch_zone_limits_init(max_zone_pfns);
free_area_init(max_zone_pfns);
}
* done after the fixed reservations
*/
sparse_init();
- zone_sizes_init();
+ dma_limits_init();
/*
* Reserve the CMA area after arm64_dma_phys_limit was initialised.