This reverts commit
6d144436d954 ("mm/page_table_check: remove unused
parameter in [__]page_table_check_pud_set").
Reinstate previously unused parameters for the purpose of supporting
powerpc platforms, as many do not encode user/kernel ownership of the page
in the pte, but instead in the address of the access.
Apply this to __page_table_check_puds_set(), page_table_check_puds_set()
and the page_table_check_pud_set() wrapper macro.
[ajd@linux.ibm.com: rebase on riscv + arm64 changes, update commit message]
Link: https://lkml.kernel.org/r/20251219-pgtable_check_v18rebase-v18-3-755bc151a50b@linux.ibm.com
Signed-off-by: Rohan McLure <rmclure@linux.ibm.com>
Signed-off-by: Andrew Donnellan <ajd@linux.ibm.com>
Reviewed-by: Pasha Tatashin <pasha.tatashin@soleen.com>
Acked-by: Ingo Molnar <mingo@kernel.org> # x86
Acked-by: Alexandre Ghiti <alexghiti@rivosinc.com> # riscv
Cc: Alexander Gordeev <agordeev@linux.ibm.com>
Cc: Alexandre Ghiti <alex@ghiti.fr>
Cc: Alistair Popple <apopple@nvidia.com>
Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
Cc: "Christophe Leroy (CS GROUP)" <chleroy@kernel.org>
Cc: David Hildenbrand <david@kernel.org>
Cc: Donet Tom <donettom@linux.ibm.com>
Cc: Guo Weikang <guoweikang.kernel@gmail.com>
Cc: Jason Gunthorpe <jgg@ziepe.ca>
Cc: Kevin Brodsky <kevin.brodsky@arm.com>
Cc: Madhavan Srinivasan <maddy@linux.ibm.com>
Cc: Magnus Lindholm <linmag7@gmail.com>
Cc: "Matthew Wilcox (Oracle)" <willy@infradead.org>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Nicholas Miehlbradt <nicholas@linux.ibm.com>
Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: Paul Mackerras <paulus@ozlabs.org>
Cc: Qi Zheng <zhengqi.arch@bytedance.com>
Cc: "Ritesh Harjani (IBM)" <ritesh.list@gmail.com>
Cc: Sweet Tea Dorminy <sweettea-kernel@dorminy.me>
Cc: Thomas Huth <thuth@redhat.com>
Cc: "Vishal Moola (Oracle)" <vishal.moola@gmail.com>
Cc: Zi Yan <ziy@nvidia.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
break;
#ifndef __PAGETABLE_PMD_FOLDED
case PUD_SIZE:
- page_table_check_puds_set(mm, (pud_t *)ptep, pte_pud(pte), nr);
+ page_table_check_puds_set(mm, addr, (pud_t *)ptep,
+ pte_pud(pte), nr);
break;
#endif
default:
static inline void set_pud_at(struct mm_struct *mm, unsigned long addr,
pud_t *pudp, pud_t pud)
{
- page_table_check_pud_set(mm, pudp, pud);
+ page_table_check_pud_set(mm, addr, pudp, pud);
return __set_pte_at(mm, (pte_t *)pudp, pud_pte(pud));
}
static inline pud_t pudp_establish(struct vm_area_struct *vma,
unsigned long address, pud_t *pudp, pud_t pud)
{
- page_table_check_pud_set(vma->vm_mm, pudp, pud);
+ page_table_check_pud_set(vma->vm_mm, address, pudp, pud);
return __pud(atomic_long_xchg((atomic_long_t *)pudp, pud_val(pud)));
}
static inline void set_pud_at(struct mm_struct *mm, unsigned long addr,
pud_t *pudp, pud_t pud)
{
- page_table_check_pud_set(mm, pudp, pud);
+ page_table_check_pud_set(mm, addr, pudp, pud);
native_set_pud(pudp, pud);
}
static inline pud_t pudp_establish(struct vm_area_struct *vma,
unsigned long address, pud_t *pudp, pud_t pud)
{
- page_table_check_pud_set(vma->vm_mm, pudp, pud);
+ page_table_check_pud_set(vma->vm_mm, address, pudp, pud);
if (IS_ENABLED(CONFIG_SMP)) {
return xchg(pudp, pud);
} else {
unsigned int nr);
void __page_table_check_pmds_set(struct mm_struct *mm, pmd_t *pmdp, pmd_t pmd,
unsigned int nr);
-void __page_table_check_puds_set(struct mm_struct *mm, pud_t *pudp, pud_t pud,
- unsigned int nr);
+void __page_table_check_puds_set(struct mm_struct *mm, unsigned long addr,
+ pud_t *pudp, pud_t pud, unsigned int nr);
void __page_table_check_pte_clear_range(struct mm_struct *mm,
unsigned long addr,
pmd_t pmd);
}
static inline void page_table_check_puds_set(struct mm_struct *mm,
- pud_t *pudp, pud_t pud, unsigned int nr)
+ unsigned long addr, pud_t *pudp, pud_t pud, unsigned int nr)
{
if (static_branch_likely(&page_table_check_disabled))
return;
- __page_table_check_puds_set(mm, pudp, pud, nr);
+ __page_table_check_puds_set(mm, addr, pudp, pud, nr);
}
static inline void page_table_check_pte_clear_range(struct mm_struct *mm,
}
static inline void page_table_check_puds_set(struct mm_struct *mm,
- pud_t *pudp, pud_t pud, unsigned int nr)
+ unsigned long addr, pud_t *pudp, pud_t pud, unsigned int nr)
{
}
#endif /* CONFIG_PAGE_TABLE_CHECK */
#define page_table_check_pmd_set(mm, pmdp, pmd) page_table_check_pmds_set(mm, pmdp, pmd, 1)
-#define page_table_check_pud_set(mm, pudp, pud) page_table_check_puds_set(mm, pudp, pud, 1)
+#define page_table_check_pud_set(mm, addr, pudp, pud) page_table_check_puds_set(mm, addr, pudp, pud, 1)
#endif /* __LINUX_PAGE_TABLE_CHECK_H */
}
EXPORT_SYMBOL(__page_table_check_pmds_set);
-void __page_table_check_puds_set(struct mm_struct *mm, pud_t *pudp, pud_t pud,
- unsigned int nr)
+void __page_table_check_puds_set(struct mm_struct *mm, unsigned long addr,
+ pud_t *pudp, pud_t pud, unsigned int nr)
{
unsigned long stride = PUD_SIZE >> PAGE_SHIFT;
unsigned int i;