static inline hpa_t kvm_mmu_get_dummy_root(void)
{
- return my_zero_pfn(0) << PAGE_SHIFT;
+ return zero_pfn(0) << PAGE_SHIFT;
}
static inline bool kvm_mmu_is_dummy_root(hpa_t shadow_page)
{
struct inode *inode = iter->inode;
unsigned long vaddr = vmf->address;
- unsigned long pfn = my_zero_pfn(vaddr);
+ unsigned long pfn = zero_pfn(vaddr);
vm_fault_t ret;
*entry = dax_insert_entry(xas, vmf, iter, *entry, pfn, DAX_ZERO_PAGE);
{
unsigned long map_size;
unsigned long pos_start, pos_end, pos;
- unsigned long zeropage_pfn = my_zero_pfn(0);
+ unsigned long zeropage_pfn = zero_pfn(0);
size_t len = 0;
pos_start = pfn;
pfnmap_setup_cachemode(pfn, PAGE_SIZE, prot);
}
+/*
+ * ZERO_PAGE() is global shared page(s) that is always zero. It is used for
+ * zero-mapped memory areas, CoW etc.
+ *
+ * On architectures that __HAVE_COLOR_ZERO_PAGE there are several such pages
+ * for different ranges in the virtual address space.
+ *
+ * zero_page_pfn identifies the first (or the only) pfn for these pages.
+ */
#ifdef __HAVE_COLOR_ZERO_PAGE
static inline int is_zero_pfn(unsigned long pfn)
{
- extern unsigned long zero_pfn;
- unsigned long offset_from_zero_pfn = pfn - zero_pfn;
+ extern unsigned long zero_page_pfn;
+ unsigned long offset_from_zero_pfn = pfn - zero_page_pfn;
+
return offset_from_zero_pfn <= (zero_page_mask >> PAGE_SHIFT);
}
-#define my_zero_pfn(addr) page_to_pfn(ZERO_PAGE(addr))
+#define zero_pfn(addr) page_to_pfn(ZERO_PAGE(addr))
#else
static inline int is_zero_pfn(unsigned long pfn)
{
- extern unsigned long zero_pfn;
- return pfn == zero_pfn;
+ extern unsigned long zero_page_pfn;
+
+ return pfn == zero_page_pfn;
}
-static inline unsigned long my_zero_pfn(unsigned long addr)
+static inline unsigned long zero_pfn(unsigned long addr)
{
- extern unsigned long zero_pfn;
- return zero_pfn;
+ extern unsigned long zero_page_pfn;
+
+ return zero_page_pfn;
}
#endif /* __HAVE_COLOR_ZERO_PAGE */
for (i = 0, addr = haddr; i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE) {
pte_t entry;
- entry = pfn_pte(my_zero_pfn(addr), vma->vm_page_prot);
+ entry = pfn_pte(zero_pfn(addr), vma->vm_page_prot);
entry = pte_mkspecial(entry);
if (pmd_uffd_wp(old_pmd))
entry = pte_mkuffd_wp(entry);
/* Use the zero-page for reads */
if (!(vmf->flags & FAULT_FLAG_WRITE) &&
!mm_forbids_zeropage(vma->vm_mm)) {
- entry = pte_mkspecial(pfn_pte(my_zero_pfn(vmf->address),
+ entry = pte_mkspecial(pfn_pte(zero_pfn(vmf->address),
vma->vm_page_prot));
vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
vmf->address, &vmf->ptl);
if (!pages_identical(page, ZERO_PAGE(0)))
return false;
- newpte = pte_mkspecial(pfn_pte(my_zero_pfn(pvmw->address),
+ newpte = pte_mkspecial(pfn_pte(zero_pfn(pvmw->address),
pvmw->vma->vm_page_prot));
if (pte_swp_soft_dirty(old_pte))
void *high_memory;
EXPORT_SYMBOL(high_memory);
-unsigned long zero_pfn __ro_after_init;
-EXPORT_SYMBOL(zero_pfn);
+unsigned long zero_page_pfn __ro_after_init;
+EXPORT_SYMBOL(zero_page_pfn);
#ifdef CONFIG_DEBUG_MEMORY_INIT
int __meminitdata mminit_loglevel;
);
}
-static int __init init_zero_pfn(void)
+static int __init init_zero_page_pfn(void)
{
- zero_pfn = page_to_pfn(ZERO_PAGE(0));
+ zero_page_pfn = page_to_pfn(ZERO_PAGE(0));
return 0;
}
-early_initcall(init_zero_pfn);
+early_initcall(init_zero_page_pfn);
void __init __weak arch_mm_preinit(void)
{
if (mm_forbids_zeropage(dst_vma->vm_mm))
return mfill_atomic_pte_zeroed_folio(dst_pmd, dst_vma, dst_addr);
- _dst_pte = pte_mkspecial(pfn_pte(my_zero_pfn(dst_addr),
+ _dst_pte = pte_mkspecial(pfn_pte(zero_pfn(dst_addr),
dst_vma->vm_page_prot));
ret = -EAGAIN;
dst_pte = pte_offset_map_lock(dst_vma->vm_mm, dst_pmd, dst_addr, &ptl);
return -EAGAIN;
}
- zero_pte = pte_mkspecial(pfn_pte(my_zero_pfn(dst_addr),
+ zero_pte = pte_mkspecial(pfn_pte(zero_pfn(dst_addr),
dst_vma->vm_page_prot));
ptep_clear_flush(src_vma, src_addr, src_pte);
set_pte_at(mm, dst_addr, dst_pte, zero_pte);