1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
5 #include <linux/init.h>
6 #include <linux/export.h>
7 #include <linux/signal.h>
8 #include <linux/sched.h>
10 #include <linux/kernel.h>
11 #include <linux/errno.h>
12 #include <linux/string.h>
13 #include <linux/types.h>
14 #include <linux/pagemap.h>
15 #include <linux/memblock.h>
16 #include <linux/memremap.h>
18 #include <linux/mman.h>
19 #include <linux/highmem.h>
20 #include <linux/swap.h>
21 #include <linux/proc_fs.h>
22 #include <linux/pfn.h>
23 #include <linux/hardirq.h>
24 #include <linux/gfp.h>
25 #include <linux/hugetlb.h>
26 #include <linux/mmzone.h>
28 #include <asm/asm-offsets.h>
29 #include <asm/bootinfo.h>
32 #include <asm/mmu_context.h>
33 #include <asm/sections.h>
34 #include <asm/pgtable.h>
35 #include <asm/pgalloc.h>
38 unsigned long empty_zero_page
[PAGE_SIZE
/ sizeof(unsigned long)] __page_aligned_bss
;
39 EXPORT_SYMBOL(empty_zero_page
);
41 void copy_user_highpage(struct page
*to
, struct page
*from
,
42 unsigned long vaddr
, struct vm_area_struct
*vma
)
46 vto
= kmap_atomic(to
);
47 vfrom
= kmap_atomic(from
);
48 copy_page(vto
, vfrom
);
51 /* Make sure this page is cleared on other CPU's too before using it */
55 int __ref
page_is_ram(unsigned long pfn
)
57 unsigned long addr
= PFN_PHYS(pfn
);
59 return memblock_is_memory(addr
) && !memblock_is_reserved(addr
);
63 void __init
paging_init(void)
65 unsigned long max_zone_pfns
[MAX_NR_ZONES
];
67 #ifdef CONFIG_ZONE_DMA
68 max_zone_pfns
[ZONE_DMA
] = MAX_DMA_PFN
;
70 #ifdef CONFIG_ZONE_DMA32
71 max_zone_pfns
[ZONE_DMA32
] = MAX_DMA32_PFN
;
73 max_zone_pfns
[ZONE_NORMAL
] = max_low_pfn
;
75 free_area_init(max_zone_pfns
);
78 void __init
mem_init(void)
80 max_mapnr
= max_low_pfn
;
81 high_memory
= (void *) __va(max_low_pfn
<< PAGE_SHIFT
);
85 #endif /* !CONFIG_NUMA */
87 void __ref
free_initmem(void)
89 free_initmem_default(POISON_FREE_INITMEM
);
92 #ifdef CONFIG_MEMORY_HOTPLUG
93 int arch_add_memory(int nid
, u64 start
, u64 size
, struct mhp_params
*params
)
95 unsigned long start_pfn
= start
>> PAGE_SHIFT
;
96 unsigned long nr_pages
= size
>> PAGE_SHIFT
;
99 ret
= __add_pages(nid
, start_pfn
, nr_pages
, params
);
102 pr_warn("%s: Problem encountered in __add_pages() as ret=%d\n",
108 void arch_remove_memory(u64 start
, u64 size
, struct vmem_altmap
*altmap
)
110 unsigned long start_pfn
= start
>> PAGE_SHIFT
;
111 unsigned long nr_pages
= size
>> PAGE_SHIFT
;
112 struct page
*page
= pfn_to_page(start_pfn
);
114 /* With altmap the first mapped page is offset from @start */
116 page
+= vmem_altmap_offset(altmap
);
117 __remove_pages(start_pfn
, nr_pages
, altmap
);
121 int memory_add_physaddr_to_nid(u64 start
)
123 return pa_to_nid(start
);
125 EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid
);
129 #ifdef CONFIG_SPARSEMEM_VMEMMAP
130 void __meminit
vmemmap_set_pmd(pmd_t
*pmd
, void *p
, int node
,
131 unsigned long addr
, unsigned long next
)
135 entry
= pfn_pmd(virt_to_pfn(p
), PAGE_KERNEL
);
136 pmd_val(entry
) |= _PAGE_HUGE
| _PAGE_HGLOBAL
;
137 set_pmd_at(&init_mm
, addr
, pmd
, entry
);
140 int __meminit
vmemmap_check_pmd(pmd_t
*pmd
, int node
,
141 unsigned long addr
, unsigned long next
)
143 int huge
= pmd_val(*pmd
) & _PAGE_HUGE
;
146 vmemmap_verify((pte_t
*)pmd
, node
, addr
, next
);
151 int __meminit
vmemmap_populate(unsigned long start
, unsigned long end
,
152 int node
, struct vmem_altmap
*altmap
)
154 #if CONFIG_PGTABLE_LEVELS == 2
155 return vmemmap_populate_basepages(start
, end
, node
, NULL
);
157 return vmemmap_populate_hugepages(start
, end
, node
, NULL
);
161 #ifdef CONFIG_MEMORY_HOTPLUG
162 void vmemmap_free(unsigned long start
, unsigned long end
, struct vmem_altmap
*altmap
)
168 pte_t
* __init
populate_kernel_pte(unsigned long addr
)
170 pgd_t
*pgd
= pgd_offset_k(addr
);
171 p4d_t
*p4d
= p4d_offset(pgd
, addr
);
175 if (p4d_none(*p4d
)) {
176 pud
= memblock_alloc(PAGE_SIZE
, PAGE_SIZE
);
178 panic("%s: Failed to allocate memory\n", __func__
);
179 p4d_populate(&init_mm
, p4d
, pud
);
180 #ifndef __PAGETABLE_PUD_FOLDED
185 pud
= pud_offset(p4d
, addr
);
186 if (pud_none(*pud
)) {
187 pmd
= memblock_alloc(PAGE_SIZE
, PAGE_SIZE
);
189 panic("%s: Failed to allocate memory\n", __func__
);
190 pud_populate(&init_mm
, pud
, pmd
);
191 #ifndef __PAGETABLE_PMD_FOLDED
196 pmd
= pmd_offset(pud
, addr
);
197 if (!pmd_present(*pmd
)) {
200 pte
= memblock_alloc(PAGE_SIZE
, PAGE_SIZE
);
202 panic("%s: Failed to allocate memory\n", __func__
);
203 pmd_populate_kernel(&init_mm
, pmd
, pte
);
206 return pte_offset_kernel(pmd
, addr
);
209 void __init
__set_fixmap(enum fixed_addresses idx
,
210 phys_addr_t phys
, pgprot_t flags
)
212 unsigned long addr
= __fix_to_virt(idx
);
215 BUG_ON(idx
<= FIX_HOLE
|| idx
>= __end_of_fixed_addresses
);
217 ptep
= populate_kernel_pte(addr
);
218 if (!pte_none(*ptep
)) {
223 if (pgprot_val(flags
))
224 set_pte(ptep
, pfn_pte(phys
>> PAGE_SHIFT
, flags
));
226 pte_clear(&init_mm
, addr
, ptep
);
227 flush_tlb_kernel_range(addr
, addr
+ PAGE_SIZE
);
232 * Align swapper_pg_dir in to 64K, allows its address to be loaded
233 * with a single LUI instruction in the TLB handlers. If we used
234 * __aligned(64K), its size would get rounded up to the alignment
235 * size, and waste space. So we place it in its own section and align
236 * it in the linker script.
238 pgd_t swapper_pg_dir
[_PTRS_PER_PGD
] __section(".bss..swapper_pg_dir");
240 pgd_t invalid_pg_dir
[_PTRS_PER_PGD
] __page_aligned_bss
;
241 #ifndef __PAGETABLE_PUD_FOLDED
242 pud_t invalid_pud_table
[PTRS_PER_PUD
] __page_aligned_bss
;
244 #ifndef __PAGETABLE_PMD_FOLDED
245 pmd_t invalid_pmd_table
[PTRS_PER_PMD
] __page_aligned_bss
;
246 EXPORT_SYMBOL(invalid_pmd_table
);
248 pte_t invalid_pte_table
[PTRS_PER_PTE
] __page_aligned_bss
;
249 EXPORT_SYMBOL(invalid_pte_table
);