}
/*
- * paging_init() sets up the memory map.
+ * paging_init() initializes the kernel's ZERO_PGE.
*/
void __init paging_init(void)
{
- unsigned long max_zone_pfn[MAX_NR_ZONES] = {0, };
-
- /* Initialize mem_map[]. */
- arch_zone_limits_init(max_zone_pfn);
- free_area_init(max_zone_pfn);
-
- /* Initialize the kernel's ZERO_PGE. */
memset(absolute_pointer(ZERO_PGE), 0, PAGE_SIZE);
}
*/
void __init setup_arch_memory(void)
{
- unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0 };
-
setup_initial_init_mm(_text, _etext, _edata, _end);
/* first page of system - kernel .vector starts here */
arch_pfn_offset = min(min_low_pfn, min_high_pfn);
kmap_init();
#endif /* CONFIG_HIGHMEM */
-
- arch_zone_limits_init(max_zone_pfn);
- free_area_init(max_zone_pfn);
}
void __init arch_mm_preinit(void)
#endif
}
-static void __init zone_sizes_init(unsigned long min, unsigned long max_low,
- unsigned long max_high)
-{
- unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0 };
-
- arch_zone_limits_init(max_zone_pfn);
- free_area_init(max_zone_pfn);
-}
-
#ifdef CONFIG_HAVE_ARCH_PFN_VALID
int pfn_valid(unsigned long pfn)
{
* done after the fixed reservations
*/
sparse_init();
-
- /*
- * Now free the memory - free_area_init needs
- * the sparse mem_map arrays initialized by sparse_init()
- * for memmap_init_zone(), otherwise all PFNs are invalid.
- */
- zone_sizes_init(min_low_pfn, max_low_pfn, max_pfn);
}
/*
static void __init dma_limits_init(void)
{
- unsigned long max_zone_pfns[MAX_NR_ZONES] = {0};
phys_addr_t __maybe_unused acpi_zone_dma_limit;
phys_addr_t __maybe_unused dt_zone_dma_limit;
phys_addr_t __maybe_unused dma32_phys_limit =
#endif
if (!arm64_dma_phys_limit)
arm64_dma_phys_limit = PHYS_MASK + 1;
-
- arch_zone_limits_init(max_zone_pfns);
- free_area_init(max_zone_pfns);
}
int pfn_is_map_memory(unsigned long pfn)
{
unsigned long lowmem_size = PFN_DOWN(LOWMEM_LIMIT - PHYS_OFFSET_OFFSET);
unsigned long sseg_size = PFN_DOWN(SSEG_SIZE - PHYS_OFFSET_OFFSET);
- unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0 };
signed long size;
memblock_reserve(__pa(_start), _end - _start);
memblock_set_current_limit(PFN_PHYS(max_low_pfn));
dma_contiguous_reserve(0);
-
- arch_zone_limits_init(max_zone_pfn);
- free_area_init(max_zone_pfn);
}
void __init setup_arch(char **cmdline_p)
max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
}
-/*
- * In order to set up page allocator "nodes",
- * somebody has to call free_area_init() for UMA.
- *
- * In this mode, we only have one pg_data_t
- * structure: contig_mem_data.
- */
static void __init paging_init(void)
{
- unsigned long max_zone_pfn[MAX_NR_ZONES] = {0, };
-
- arch_zone_limits_init(max_zone_pfn);
- free_area_init(max_zone_pfn); /* sets up the zonelists and mem_map */
-
/*
* Set the init_mm descriptors "context" value to point to the
* initial kernel segment table's physical address.
return pte;
}
-extern void paging_init(void);
-
#define pte_none(pte) (!(pte_val(pte) & ~_PAGE_GLOBAL))
#define pte_present(pte) (pte_val(pte) & (_PAGE_PRESENT | _PAGE_PROTNONE))
#define pte_no_exec(pte) (pte_val(pte) & _PAGE_NO_EXEC)
prefill_possible_map();
#endif
- paging_init();
-
#ifdef CONFIG_KASAN
kasan_init();
#endif
max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
}
-void __init paging_init(void)
-{
- unsigned long max_zone_pfns[MAX_NR_ZONES];
-
- arch_zone_limits_init(max_zone_pfns);
- free_area_init(max_zone_pfns);
-}
-
void __ref free_initmem(void)
{
free_initmem_default(POISON_FREE_INITMEM);
* page_alloc get different views of the world.
*/
unsigned long end_mem = memory_end & PAGE_MASK;
- unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0, };
high_memory = (void *) end_mem;
empty_zero_page = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE);
- arch_zone_limits_init(max_zone_pfn);
- free_area_init(max_zone_pfn);
}
#endif /* CONFIG_MMU */
pte_t *pg_table;
unsigned long address, size;
unsigned long next_pgtable;
- unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0 };
int i;
empty_zero_page = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE);
}
current->mm = NULL;
- arch_zone_limits_init(max_zone_pfn);
- free_area_init(max_zone_pfn);
}
int cf_tlb_miss(struct pt_regs *regs, int write, int dtlb, int extension_word)
*/
void __init paging_init(void)
{
- unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0, };
unsigned long min_addr, max_addr;
unsigned long addr;
int i;
set_fc(USER_DATA);
#ifdef DEBUG
- printk ("before free_area_init\n");
+ printk ("before node_set_state\n");
#endif
for (i = 0; i < m68k_num_memory; i++)
if (node_present_pages(i))
node_set_state(i, N_NORMAL_MEMORY);
-
- arch_zone_limits_init(max_zone_pfn);
- free_area_init(max_zone_pfn);
}
unsigned long address;
unsigned long next_pgtable;
unsigned long bootmem_end;
- unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0, };
unsigned long size;
empty_zero_page = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE);
mmu_emu_init(bootmem_end);
current->mm = NULL;
-
- /* memory sizing is a hack stolen from motorola.c.. hope it works for us */
- arch_zone_limits_init(max_zone_pfn);
-
- /* I really wish I knew why the following change made things better... -- Sam */
- free_area_init(max_zone_pfn);
-
-
}
static const pgprot_t protection_map[16] = {
*/
static void __init paging_init(void)
{
- unsigned long zones_size[MAX_NR_ZONES];
int idx;
/* Setup fixmaps */
for (idx = 0; idx < __end_of_fixed_addresses; idx++)
clear_fixmap(idx);
- /* Clean every zones */
- memset(zones_size, 0, sizeof(zones_size));
-
#ifdef CONFIG_HIGHMEM
highmem_init();
#endif
- arch_zone_limits_init(zones_size);
- /* We don't have holes in memory map */
- free_area_init(zones_size);
}
void __init setup_memory(void)
void __init paging_init(void)
{
- unsigned long zones_size[MAX_NR_ZONES] = {0, };
-
pagetable_init();
- arch_zone_limits_init(zones_size);
- free_area_init(zones_size);
}
/* All PCI device belongs to logical Node-0 */
void __init paging_init(void)
{
- unsigned long max_zone_pfns[MAX_NR_ZONES];
-
pagetable_init();
-
- arch_zone_limits_init(max_zone_pfns);
- free_area_init(max_zone_pfns);
}
#ifdef CONFIG_64BIT
void __init paging_init(void)
{
- unsigned long zones_size[MAX_NR_ZONES] = {0, };
-
pagetable_init();
- arch_zone_limits_init(zones_size);
- free_area_init(zones_size);
}
*/
void __init paging_init(void)
{
- unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0 };
-
pagetable_init();
pgd_current = swapper_pg_dir;
- arch_zone_limits_init(max_zone_pfn);
- /* pass the memory from the bootmem allocator to the main allocator */
- free_area_init(max_zone_pfn);
-
flush_dcache_range((unsigned long)empty_zero_page,
(unsigned long)empty_zero_page + PAGE_SIZE);
}
max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
}
-static void __init zone_sizes_init(void)
-{
- unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0 };
-
- arch_zone_limits_init(max_zone_pfn);
- free_area_init(max_zone_pfn);
-}
-
extern const char _s_kernel_ro[], _e_kernel_ro[];
/*
map_ram();
- zone_sizes_init();
-
/* self modifying code ;) */
/* Since the old TLB miss handler has been running up until now,
* the kernel pages are still all RW, so we can still modify the
max_zone_pfns[ZONE_NORMAL] = PFN_DOWN(memblock_end_of_DRAM());
}
-static void __init parisc_bootmem_free(void)
-{
- unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0, };
-
- arch_zone_limits_init(max_zone_pfn);
- free_area_init(max_zone_pfn);
-}
-
void __init paging_init(void)
{
setup_bootmem();
flush_tlb_all_local(NULL);
sparse_init();
- parisc_bootmem_free();
}
static void alloc_btlb(unsigned long start, unsigned long end, int *slot,
*/
void __init paging_init(void)
{
- unsigned long max_zone_pfns[MAX_NR_ZONES] = { 0 };
unsigned long long total_ram = memblock_phys_mem_size();
phys_addr_t top_of_ram = memblock_end_of_DRAM();
int zone_dma_bits;
zone_dma_limit = DMA_BIT_MASK(zone_dma_bits);
- arch_zone_limits_init(max_zone_pfns);
- free_area_init(max_zone_pfns);
-
mark_nonram_nosave();
}
max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
}
-static void __init zone_sizes_init(void)
-{
- unsigned long max_zone_pfns[MAX_NR_ZONES] = { 0, };
-
- arch_zone_limits_init(max_zone_pfns);
- free_area_init(max_zone_pfns);
-}
-
#if defined(CONFIG_MMU) && defined(CONFIG_DEBUG_VM)
#define LOG2_SZ_1K ilog2(SZ_1K)
/* The entire VMEMMAP region has been populated. Flush TLB for this region */
local_flush_tlb_kernel_range(VMEMMAP_START, VMEMMAP_END);
#endif
- zone_sizes_init();
arch_reserve_crashkernel();
memblock_dump_all();
}
*/
void __init paging_init(void)
{
- unsigned long max_zone_pfns[MAX_NR_ZONES];
-
vmem_map_init();
sparse_init();
zone_dma_limit = DMA_BIT_MASK(31);
- memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
- arch_zone_limits_init(max_zone_pfns);
- free_area_init(max_zone_pfns);
}
void mark_rodata_ro(void)
void __init paging_init(void)
{
- unsigned long max_zone_pfns[MAX_NR_ZONES];
unsigned long vaddr, end;
sh_mv.mv_mem_init();
page_table_range_init(vaddr, end, swapper_pg_dir);
kmap_coherent_init();
-
- memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
- arch_zone_limits_init(max_zone_pfns);
- free_area_init(max_zone_pfns);
}
unsigned int mem_init_done = 0;
kernel_physical_mapping_init();
- {
- unsigned long max_zone_pfns[MAX_NR_ZONES];
-
- memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
-
- max_zone_pfns[ZONE_NORMAL] = end_pfn;
-
- arch_zone_limits_init(max_zone_pfns);
- free_area_init(max_zone_pfns);
- }
-
printk("Booting Linux...\n");
}
flush_tlb_all();
sparc_context_init(num_contexts);
-
- {
- unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0 };
-
- arch_zone_limits_init(max_zone_pfn);
- free_area_init(max_zone_pfn);
- }
}
void mmu_info(struct seq_file *m)
void __init paging_init(void)
{
- unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0 };
-
empty_zero_page = (unsigned long *) memblock_alloc_low(PAGE_SIZE,
PAGE_SIZE);
if (!empty_zero_page)
panic("%s: Failed to allocate %lu bytes align=%lx\n",
__func__, PAGE_SIZE, PAGE_SIZE);
-
- arch_zone_limits_init(max_zone_pfn);
- free_area_init(max_zone_pfn);
}
/*
#endif
}
-void __init zone_sizes_init(void)
-{
- unsigned long max_zone_pfns[MAX_NR_ZONES];
-
- memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
-
- arch_zone_limits_init(max_zone_pfns);
- free_area_init(max_zone_pfns);
-}
-
__visible DEFINE_PER_CPU_ALIGNED(struct tlb_state, cpu_tlbstate) = {
.loaded_mm = &init_mm,
.next_asid = 1,
*/
olpc_dt_build_devicetree();
sparse_init();
- zone_sizes_init();
}
/*
*/
node_clear_state(0, N_MEMORY);
node_clear_state(0, N_NORMAL_MEMORY);
-
- zone_sizes_init();
}
#define PAGE_UNUSED 0xFD
unsigned long kernel_physical_mapping_change(unsigned long start,
unsigned long end,
unsigned long page_size_mask);
-void zone_sizes_init(void);
extern int after_bootmem;
void __init zones_init(void)
{
- unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0, };
-
- arch_zone_limits_init(max_zone_pfn);
- free_area_init(max_zone_pfn);
print_vm_layout();
}
struct folio_batch;
void arch_mm_preinit(void);
+void mm_core_init_early(void);
void mm_core_init(void);
void init_mm_internals(void);
}
/*
- * Using memblock node mappings, an architecture may initialise its
+ * FIXME: Using memblock node mappings, an architecture may initialise its
* zones, allocate the backing mem_map and account for memory holes in an
* architecture independent manner.
*
* memblock_add_node(base, size, nid, MEMBLOCK_NONE)
* free_area_init(max_zone_pfns);
*/
-void free_area_init(unsigned long *max_zone_pfn);
void arch_zone_limits_init(unsigned long *max_zone_pfn);
unsigned long node_map_pfn_alignment(void);
extern unsigned long absent_pages_in_range(unsigned long start_pfn,
page_address_init();
pr_notice("%s", linux_banner);
setup_arch(&command_line);
+ mm_core_init_early();
/* Static keys and static calls are needed by LSMs */
jump_label_init();
static_call_init();
/**
* free_area_init - Initialise all pg_data_t and zone data
- * @max_zone_pfn: an array of max PFNs for each zone
*
* This will call free_area_init_node() for each active node in the system.
* Using the page ranges provided by memblock_set_node(), the size of each
* starts where the previous one ended. For example, ZONE_DMA32 starts
* at arch_max_dma_pfn.
*/
-void __init free_area_init(unsigned long *max_zone_pfn)
+static void __init free_area_init(void)
{
+ unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0 };
unsigned long start_pfn, end_pfn;
int i, nid, zone;
bool descending;
- /* Record where the zone boundaries are */
- memset(arch_zone_lowest_possible_pfn, 0,
- sizeof(arch_zone_lowest_possible_pfn));
- memset(arch_zone_highest_possible_pfn, 0,
- sizeof(arch_zone_highest_possible_pfn));
+ arch_zone_limits_init(max_zone_pfn);
start_pfn = PHYS_PFN(memblock_start_of_DRAM());
descending = arch_has_descending_max_zone_pfns();
{
}
+void __init mm_core_init_early(void)
+{
+ hugetlb_bootmem_alloc();
+
+ free_area_init();
+}
+
/*
* Set up kernel memory allocators
*/
void __init mm_core_init(void)
{
arch_mm_preinit();
- hugetlb_bootmem_alloc();
/* Initializations relying on SMP setup */
BUILD_BUG_ON(MAX_ZONELISTS > 2);