__vmcache_idsync(addr, PAGE_SIZE);
}
+void __init arch_zone_limits_init(unsigned long *max_zone_pfns)
+{
+ /*
+ * This is not particularly well documented anywhere, but
+ * give ZONE_NORMAL all the memory, including the big holes
+ * left by the kernel+bootmem_map which are already left as reserved
+ * in the bootmem_map; free_area_init should see those bits and
+ * adjust accordingly.
+ */
+ max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
+}
+
/*
* In order to set up page allocator "nodes",
* somebody has to call free_area_init() for UMA.
{
unsigned long max_zone_pfn[MAX_NR_ZONES] = {0, };
- /*
- * This is not particularly well documented anywhere, but
- * give ZONE_NORMAL all the memory, including the big holes
- * left by the kernel+bootmem_map which are already left as reserved
- * in the bootmem_map; free_area_init should see those bits and
- * adjust accordingly.
- */
-
- max_zone_pfn[ZONE_NORMAL] = max_low_pfn;
-
+ arch_zone_limits_init(max_zone_pfn);
free_area_init(max_zone_pfn); /* sets up the zonelists and mem_map */
/*