#include <linux/mm_types.h>
#include <linux/cpufeature.h>
#include <linux/page-flags.h>
+#include <linux/page_table_check.h>
#include <linux/radix-tree.h>
#include <linux/atomic.h>
+#include <linux/mmap_lock.h>
#include <asm/ctlreg.h>
#include <asm/bug.h>
#include <asm/page.h>
/* At this point the reference through the mapping is still present */
if (mm_is_protected(mm) && pte_present(res))
WARN_ON_ONCE(uv_convert_from_secure_pte(res));
+ page_table_check_pte_clear(mm, addr, res);
return res;
}
/* At this point the reference through the mapping is still present */
if (mm_is_protected(vma->vm_mm) && pte_present(res))
WARN_ON_ONCE(uv_convert_from_secure_pte(res));
+ page_table_check_pte_clear(vma->vm_mm, addr, res);
return res;
}
} else {
res = ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID));
}
+
+ page_table_check_pte_clear(mm, addr, res);
+
/* Nothing to do */
if (!mm_is_protected(mm) || !pte_present(res))
return res;
{
if (pte_present(entry))
entry = clear_pte_bit(entry, __pgprot(_PAGE_UNUSED));
+ page_table_check_ptes_set(mm, addr, ptep, entry, nr);
for (;;) {
set_pte(ptep, entry);
if (--nr == 0)
static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp, pmd_t entry)
{
+ page_table_check_pmd_set(mm, addr, pmdp, entry);
set_pmd(pmdp, entry);
}
static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
unsigned long addr, pmd_t *pmdp)
{
- return pmdp_xchg_direct(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
+ pmd_t pmd;
+
+ pmd = pmdp_xchg_direct(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
+ page_table_check_pmd_clear(mm, addr, pmd);
+ return pmd;
}
#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL
unsigned long addr,
pmd_t *pmdp, int full)
{
+ pmd_t pmd;
+
if (full) {
- pmd_t pmd = *pmdp;
+ pmd = *pmdp;
set_pmd(pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
+ page_table_check_pmd_clear(vma->vm_mm, addr, pmd);
return pmd;
}
- return pmdp_xchg_lazy(vma->vm_mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
+ pmd = pmdp_xchg_lazy(vma->vm_mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
+ page_table_check_pmd_clear(vma->vm_mm, addr, pmd);
+ return pmd;
}
#define __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH
VM_WARN_ON_ONCE(!pmd_present(pmd));
pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_INVALID));
- return pmdp_xchg_direct(vma->vm_mm, addr, pmdp, pmd);
+#ifdef CONFIG_PAGE_TABLE_CHECK
+ pmd = clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_READ));
+#endif
+ page_table_check_pmd_set(vma->vm_mm, addr, pmdp, pmd);
+ pmd = pmdp_xchg_direct(vma->vm_mm, addr, pmdp, pmd);
+ return pmd;
}
#define __HAVE_ARCH_PMDP_SET_WRPROTECT
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+#ifdef CONFIG_PAGE_TABLE_CHECK
+static inline bool pte_user_accessible_page(struct mm_struct *mm, unsigned long addr, pte_t pte)
+{
+ VM_BUG_ON(mm == &init_mm);
+
+ return pte_present(pte);
+}
+
+static inline bool pmd_user_accessible_page(struct mm_struct *mm, unsigned long addr, pmd_t pmd)
+{
+ VM_BUG_ON(mm == &init_mm);
+
+ return pmd_leaf(pmd) && (pmd_val(pmd) & _SEGMENT_ENTRY_READ);
+}
+
+static inline bool pud_user_accessible_page(struct mm_struct *mm, unsigned long addr, pud_t pud)
+{
+ VM_BUG_ON(mm == &init_mm);
+
+ return pud_leaf(pud);
+}
+#endif
+
/*
* 64 bit swap entry format:
* A page-table entry has some bits we have to treat in a special way.