===================================================
There's no need in special enabling of PTE split page table lock: everything
-required is done by pagetable_pte_ctor() and pagetable_pte_dtor(), which
+required is done by pagetable_pte_ctor() and pagetable_dtor(), which
must be called on PTE table allocation / freeing.
Make sure the architecture doesn't use slab allocator for page table
levels.
PMD split lock enabling requires pagetable_pmd_ctor() call on PMD table
-allocation and pagetable_pmd_dtor() on freeing.
+allocation and pagetable_dtor() on freeing.
Allocation usually happens in pmd_alloc_one(), freeing in pmd_free() and
pmd_free_tlb(), but make sure you cover all PMD table allocation / freeing
{
struct ptdesc *ptdesc = page_ptdesc(pte);
- pagetable_pte_dtor(ptdesc);
+ pagetable_dtor(ptdesc);
#ifndef CONFIG_ARM_LPAE
/*
#ifdef CONFIG_ARM_LPAE
struct ptdesc *ptdesc = virt_to_ptdesc(pmdp);
- pagetable_pmd_dtor(ptdesc);
+ pagetable_dtor(ptdesc);
tlb_remove_ptdesc(tlb, ptdesc);
#endif
}
{
struct ptdesc *ptdesc = page_ptdesc(pte);
- pagetable_pte_dtor(ptdesc);
+ pagetable_dtor(ptdesc);
tlb_remove_ptdesc(tlb, ptdesc);
}
{
struct ptdesc *ptdesc = virt_to_ptdesc(pmdp);
- pagetable_pmd_dtor(ptdesc);
+ pagetable_dtor(ptdesc);
tlb_remove_ptdesc(tlb, ptdesc);
}
#endif
if (!pgtable_l4_enabled())
return;
- pagetable_pud_dtor(ptdesc);
+ pagetable_dtor(ptdesc);
tlb_remove_ptdesc(tlb, ptdesc);
}
#endif
if (!pgtable_l5_enabled())
return;
- pagetable_p4d_dtor(ptdesc);
+ pagetable_dtor(ptdesc);
tlb_remove_ptdesc(tlb, ptdesc);
}
#endif
#define __pte_free_tlb(tlb, pte, address) \
do { \
- pagetable_pte_dtor(page_ptdesc(pte)); \
+ pagetable_dtor(page_ptdesc(pte)); \
tlb_remove_page_ptdesc(tlb, page_ptdesc(pte)); \
} while (0)
#define __pte_free_tlb(tlb, pte, addr) \
do { \
- pagetable_pte_dtor((page_ptdesc(pte))); \
+ pagetable_dtor((page_ptdesc(pte))); \
tlb_remove_page_ptdesc((tlb), (page_ptdesc(pte))); \
} while (0)
#define __pte_free_tlb(tlb, pte, address) \
do { \
- pagetable_pte_dtor(page_ptdesc(pte)); \
+ pagetable_dtor(page_ptdesc(pte)); \
tlb_remove_page_ptdesc((tlb), page_ptdesc(pte)); \
} while (0)
{
struct ptdesc *ptdesc = virt_to_ptdesc(pgtable);
- pagetable_pte_dtor(ptdesc);
+ pagetable_dtor(ptdesc);
pagetable_free(ptdesc);
}
{
struct ptdesc *ptdesc = virt_to_ptdesc(pgtable);
- pagetable_pte_dtor(ptdesc);
+ pagetable_dtor(ptdesc);
pagetable_free(ptdesc);
}
#define __pte_free_tlb(tlb, pte, addr) \
do { \
- pagetable_pte_dtor(page_ptdesc(pte)); \
+ pagetable_dtor(page_ptdesc(pte)); \
tlb_remove_page_ptdesc((tlb), page_ptdesc(pte)); \
} while (0)
list_del(dp);
mmu_page_dtor((void *)page);
if (type == TABLE_PTE)
- pagetable_pte_dtor(virt_to_ptdesc((void *)page));
+ pagetable_dtor(virt_to_ptdesc((void *)page));
free_page (page);
return 1;
} else if (ptable_list[type].next != dp) {
#define __pte_free_tlb(tlb, pte, address) \
do { \
- pagetable_pte_dtor(page_ptdesc(pte)); \
+ pagetable_dtor(page_ptdesc(pte)); \
tlb_remove_page_ptdesc((tlb), page_ptdesc(pte)); \
} while (0)
#define __pte_free_tlb(tlb, pte, addr) \
do { \
- pagetable_pte_dtor(page_ptdesc(pte)); \
+ pagetable_dtor(page_ptdesc(pte)); \
tlb_remove_page_ptdesc((tlb), (page_ptdesc(pte))); \
} while (0)
#define __pte_free_tlb(tlb, pte, addr) \
do { \
- pagetable_pte_dtor(page_ptdesc(pte)); \
+ pagetable_dtor(page_ptdesc(pte)); \
tlb_remove_page_ptdesc((tlb), (page_ptdesc(pte))); \
} while (0)
count = ((unsigned long)pmd_frag & ~PAGE_MASK) >> PMD_FRAG_SIZE_SHIFT;
/* We allow PTE_FRAG_NR fragments from a PTE page */
if (atomic_sub_and_test(PMD_FRAG_NR - count, &ptdesc->pt_frag_refcount)) {
- pagetable_pmd_dtor(ptdesc);
+ pagetable_dtor(ptdesc);
pagetable_free(ptdesc);
}
}
BUG_ON(atomic_read(&ptdesc->pt_frag_refcount) <= 0);
if (atomic_dec_and_test(&ptdesc->pt_frag_refcount)) {
- pagetable_pmd_dtor(ptdesc);
+ pagetable_dtor(ptdesc);
pagetable_free(ptdesc);
}
}
count = ((unsigned long)pte_frag & ~PAGE_MASK) >> PTE_FRAG_SIZE_SHIFT;
/* We allow PTE_FRAG_NR fragments from a PTE page */
if (atomic_sub_and_test(PTE_FRAG_NR - count, &ptdesc->pt_frag_refcount)) {
- pagetable_pte_dtor(ptdesc);
+ pagetable_dtor(ptdesc);
pagetable_free(ptdesc);
}
}
struct ptdesc *ptdesc;
ptdesc = container_of(head, struct ptdesc, pt_rcu_head);
- pagetable_pte_dtor(ptdesc);
+ pagetable_dtor(ptdesc);
pagetable_free(ptdesc);
}
if (pgtable_l4_enabled) {
struct ptdesc *ptdesc = virt_to_ptdesc(pud);
- pagetable_pud_dtor(ptdesc);
+ pagetable_dtor(ptdesc);
riscv_tlb_remove_ptdesc(tlb, ptdesc);
}
}
if (pgtable_l5_enabled) {
struct ptdesc *ptdesc = virt_to_ptdesc(p4d);
- pagetable_p4d_dtor(ptdesc);
+ pagetable_dtor(ptdesc);
riscv_tlb_remove_ptdesc(tlb, virt_to_ptdesc(p4d));
}
}
{
struct ptdesc *ptdesc = virt_to_ptdesc(pmd);
- pagetable_pmd_dtor(ptdesc);
+ pagetable_dtor(ptdesc);
riscv_tlb_remove_ptdesc(tlb, ptdesc);
}
{
struct ptdesc *ptdesc = page_ptdesc(pte);
- pagetable_pte_dtor(ptdesc);
+ pagetable_dtor(ptdesc);
riscv_tlb_remove_ptdesc(tlb, ptdesc);
}
#endif /* CONFIG_MMU */
return;
}
- pagetable_pte_dtor(ptdesc);
+ pagetable_dtor(ptdesc);
if (PageReserved(page))
free_reserved_page(page);
else
}
if (!is_vmemmap)
- pagetable_pmd_dtor(ptdesc);
+ pagetable_dtor(ptdesc);
if (PageReserved(page))
free_reserved_page(page);
else
if (mm_p4d_folded(mm))
return;
- pagetable_p4d_dtor(virt_to_ptdesc(p4d));
+ pagetable_dtor(virt_to_ptdesc(p4d));
crst_table_free(mm, (unsigned long *) p4d);
}
if (mm_pud_folded(mm))
return;
- pagetable_pud_dtor(virt_to_ptdesc(pud));
+ pagetable_dtor(virt_to_ptdesc(pud));
crst_table_free(mm, (unsigned long *) pud);
}
{
if (mm_pmd_folded(mm))
return;
- pagetable_pmd_dtor(virt_to_ptdesc(pmd));
+ pagetable_dtor(virt_to_ptdesc(pmd));
crst_table_free(mm, (unsigned long *) pmd);
}
{
if (mm_pmd_folded(tlb->mm))
return;
- pagetable_pmd_dtor(virt_to_ptdesc(pmd));
+ pagetable_dtor(virt_to_ptdesc(pmd));
__tlb_adjust_range(tlb, address, PAGE_SIZE);
tlb->mm->context.flush_mm = 1;
tlb->freed_tables = 1;
{
if (mm_p4d_folded(tlb->mm))
return;
- pagetable_p4d_dtor(virt_to_ptdesc(p4d));
+ pagetable_dtor(virt_to_ptdesc(p4d));
__tlb_adjust_range(tlb, address, PAGE_SIZE);
tlb->mm->context.flush_mm = 1;
tlb->freed_tables = 1;
{
if (mm_pud_folded(tlb->mm))
return;
- pagetable_pud_dtor(virt_to_ptdesc(pud));
+ pagetable_dtor(virt_to_ptdesc(pud));
tlb->mm->context.flush_mm = 1;
tlb->freed_tables = 1;
tlb->cleared_p4ds = 1;
static void pagetable_pte_dtor_free(struct ptdesc *ptdesc)
{
- pagetable_pte_dtor(ptdesc);
+ pagetable_dtor(ptdesc);
pagetable_free(ptdesc);
}
#define __pte_free_tlb(tlb, pte, addr) \
do { \
- pagetable_pte_dtor(page_ptdesc(pte)); \
+ pagetable_dtor(page_ptdesc(pte)); \
tlb_remove_page_ptdesc((tlb), (page_ptdesc(pte))); \
} while (0)
{
struct ptdesc *ptdesc = virt_to_ptdesc(pte);
- pagetable_pte_dtor(ptdesc);
+ pagetable_dtor(ptdesc);
pagetable_free(ptdesc);
}
page = pfn_to_page(__nocache_pa((unsigned long)ptep) >> PAGE_SHIFT);
spin_lock(&mm->page_table_lock);
if (page_ref_dec_return(page) == 1)
- pagetable_pte_dtor(page_ptdesc(page));
+ pagetable_dtor(page_ptdesc(page));
spin_unlock(&mm->page_table_lock);
srmmu_free_nocache(ptep, SRMMU_PTE_TABLE_SIZE);
#define __pte_free_tlb(tlb, pte, address) \
do { \
- pagetable_pte_dtor(page_ptdesc(pte)); \
+ pagetable_dtor(page_ptdesc(pte)); \
tlb_remove_page_ptdesc((tlb), (page_ptdesc(pte))); \
} while (0)
#define __pmd_free_tlb(tlb, pmd, address) \
do { \
- pagetable_pmd_dtor(virt_to_ptdesc(pmd)); \
+ pagetable_dtor(virt_to_ptdesc(pmd)); \
tlb_remove_page_ptdesc((tlb), virt_to_ptdesc(pmd)); \
} while (0)
#define __pud_free_tlb(tlb, pud, address) \
do { \
- pagetable_pud_dtor(virt_to_ptdesc(pud)); \
+ pagetable_dtor(virt_to_ptdesc(pud)); \
tlb_remove_page_ptdesc((tlb), virt_to_ptdesc(pud)); \
} while (0)
void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte)
{
- pagetable_pte_dtor(page_ptdesc(pte));
+ pagetable_dtor(page_ptdesc(pte));
paravirt_release_pte(page_to_pfn(pte));
paravirt_tlb_remove_table(tlb, pte);
}
#ifdef CONFIG_X86_PAE
tlb->need_flush_all = 1;
#endif
- pagetable_pmd_dtor(ptdesc);
+ pagetable_dtor(ptdesc);
paravirt_tlb_remove_table(tlb, ptdesc_page(ptdesc));
}
{
struct ptdesc *ptdesc = virt_to_ptdesc(pud);
- pagetable_pud_dtor(ptdesc);
+ pagetable_dtor(ptdesc);
paravirt_release_pud(__pa(pud) >> PAGE_SHIFT);
paravirt_tlb_remove_table(tlb, virt_to_page(pud));
}
{
struct ptdesc *ptdesc = virt_to_ptdesc(p4d);
- pagetable_p4d_dtor(ptdesc);
+ pagetable_dtor(ptdesc);
paravirt_release_p4d(__pa(p4d) >> PAGE_SHIFT);
paravirt_tlb_remove_table(tlb, virt_to_page(p4d));
}
if (pmds[i]) {
ptdesc = virt_to_ptdesc(pmds[i]);
- pagetable_pmd_dtor(ptdesc);
+ pagetable_dtor(ptdesc);
pagetable_free(ptdesc);
mm_dec_nr_pmds(mm);
}
free_page((unsigned long)pmd_sv);
- pagetable_pmd_dtor(virt_to_ptdesc(pmd));
+ pagetable_dtor(virt_to_ptdesc(pmd));
free_page((unsigned long)pmd);
return 1;
{
struct ptdesc *ptdesc = page_ptdesc(pte_page);
- pagetable_pte_dtor(ptdesc);
+ pagetable_dtor(ptdesc);
pagetable_free(ptdesc);
}
struct ptdesc *ptdesc = virt_to_ptdesc(pmd);
BUG_ON((unsigned long)pmd & (PAGE_SIZE-1));
- pagetable_pmd_dtor(ptdesc);
+ pagetable_dtor(ptdesc);
pagetable_free(ptdesc);
}
#endif
struct ptdesc *ptdesc = virt_to_ptdesc(pud);
BUG_ON((unsigned long)pud & (PAGE_SIZE-1));
- pagetable_pud_dtor(ptdesc);
+ pagetable_dtor(ptdesc);
pagetable_free(ptdesc);
}
struct ptdesc *ptdesc = virt_to_ptdesc(p4d);
BUG_ON((unsigned long)p4d & (PAGE_SIZE-1));
- pagetable_p4d_dtor(ptdesc);
+ pagetable_dtor(ptdesc);
pagetable_free(ptdesc);
}
static inline void ptlock_free(struct ptdesc *ptdesc) {}
#endif /* defined(CONFIG_SPLIT_PTE_PTLOCKS) */
+static inline void pagetable_dtor(struct ptdesc *ptdesc)
+{
+ struct folio *folio = ptdesc_folio(ptdesc);
+
+ ptlock_free(ptdesc);
+ __folio_clear_pgtable(folio);
+ lruvec_stat_sub_folio(folio, NR_PAGETABLE);
+}
+
static inline bool pagetable_pte_ctor(struct ptdesc *ptdesc)
{
struct folio *folio = ptdesc_folio(ptdesc);
return true;
}
-static inline void pagetable_pte_dtor(struct ptdesc *ptdesc)
-{
- struct folio *folio = ptdesc_folio(ptdesc);
-
- ptlock_free(ptdesc);
- __folio_clear_pgtable(folio);
- lruvec_stat_sub_folio(folio, NR_PAGETABLE);
-}
-
pte_t *___pte_offset_map(pmd_t *pmd, unsigned long addr, pmd_t *pmdvalp);
static inline pte_t *__pte_offset_map(pmd_t *pmd, unsigned long addr,
pmd_t *pmdvalp)
return ptlock_init(ptdesc);
}
-static inline void pmd_ptlock_free(struct ptdesc *ptdesc)
-{
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- VM_BUG_ON_PAGE(ptdesc->pmd_huge_pte, ptdesc_page(ptdesc));
-#endif
- ptlock_free(ptdesc);
-}
-
#define pmd_huge_pte(mm, pmd) (pmd_ptdesc(pmd)->pmd_huge_pte)
#else
}
static inline bool pmd_ptlock_init(struct ptdesc *ptdesc) { return true; }
-static inline void pmd_ptlock_free(struct ptdesc *ptdesc) {}
#define pmd_huge_pte(mm, pmd) ((mm)->pmd_huge_pte)
return true;
}
-static inline void pagetable_pmd_dtor(struct ptdesc *ptdesc)
-{
- struct folio *folio = ptdesc_folio(ptdesc);
-
- pmd_ptlock_free(ptdesc);
- __folio_clear_pgtable(folio);
- lruvec_stat_sub_folio(folio, NR_PAGETABLE);
-}
-
/*
* No scalability reason to split PUD locks yet, but follow the same pattern
* as the PMD locks to make it easier if we decide to. The VM should not be
lruvec_stat_add_folio(folio, NR_PAGETABLE);
}
-static inline void pagetable_pud_dtor(struct ptdesc *ptdesc)
-{
- struct folio *folio = ptdesc_folio(ptdesc);
-
- __folio_clear_pgtable(folio);
- lruvec_stat_sub_folio(folio, NR_PAGETABLE);
-}
-
static inline void pagetable_p4d_ctor(struct ptdesc *ptdesc)
{
struct folio *folio = ptdesc_folio(ptdesc);
lruvec_stat_add_folio(folio, NR_PAGETABLE);
}
-static inline void pagetable_p4d_dtor(struct ptdesc *ptdesc)
-{
- struct folio *folio = ptdesc_folio(ptdesc);
-
- __folio_clear_pgtable(folio);
- lruvec_stat_sub_folio(folio, NR_PAGETABLE);
-}
-
extern void __init pagecache_init(void);
extern void free_initmem(void);
void ptlock_free(struct ptdesc *ptdesc)
{
- kmem_cache_free(page_ptl_cachep, ptdesc->ptl);
+ if (ptdesc->ptl)
+ kmem_cache_free(page_ptl_cachep, ptdesc->ptl);
}
#endif