1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright (c) 2021, Google LLC.
5 * Pasha Tatashin <pasha.tatashin@soleen.com>
7 #include <linux/kstrtox.h>
9 #include <linux/page_table_check.h>
12 #define pr_fmt(fmt) "page_table_check: " fmt
14 struct page_table_check
{
15 atomic_t anon_map_count
;
16 atomic_t file_map_count
;
19 static bool __page_table_check_enabled __initdata
=
20 IS_ENABLED(CONFIG_PAGE_TABLE_CHECK_ENFORCED
);
22 DEFINE_STATIC_KEY_TRUE(page_table_check_disabled
);
23 EXPORT_SYMBOL(page_table_check_disabled
);
25 static int __init
early_page_table_check_param(char *buf
)
27 return kstrtobool(buf
, &__page_table_check_enabled
);
30 early_param("page_table_check", early_page_table_check_param
);
32 static bool __init
need_page_table_check(void)
34 return __page_table_check_enabled
;
37 static void __init
init_page_table_check(void)
39 if (!__page_table_check_enabled
)
41 static_branch_disable(&page_table_check_disabled
);
44 struct page_ext_operations page_table_check_ops
= {
45 .size
= sizeof(struct page_table_check
),
46 .need
= need_page_table_check
,
47 .init
= init_page_table_check
,
48 .need_shared_flags
= false,
51 static struct page_table_check
*get_page_table_check(struct page_ext
*page_ext
)
54 return page_ext_data(page_ext
, &page_table_check_ops
);
58 * An entry is removed from the page table, decrement the counters for that page
59 * verify that it is of correct type and counters do not become negative.
61 static void page_table_check_clear(unsigned long pfn
, unsigned long pgcnt
)
63 struct page_ext
*page_ext
;
71 page
= pfn_to_page(pfn
);
72 page_ext
= page_ext_get(page
);
74 BUG_ON(PageSlab(page
));
75 anon
= PageAnon(page
);
77 for (i
= 0; i
< pgcnt
; i
++) {
78 struct page_table_check
*ptc
= get_page_table_check(page_ext
);
81 BUG_ON(atomic_read(&ptc
->file_map_count
));
82 BUG_ON(atomic_dec_return(&ptc
->anon_map_count
) < 0);
84 BUG_ON(atomic_read(&ptc
->anon_map_count
));
85 BUG_ON(atomic_dec_return(&ptc
->file_map_count
) < 0);
87 page_ext
= page_ext_next(page_ext
);
89 page_ext_put(page_ext
);
93 * A new entry is added to the page table, increment the counters for that page
94 * verify that it is of correct type and is not being mapped with a different
95 * type to a different process.
97 static void page_table_check_set(unsigned long pfn
, unsigned long pgcnt
,
100 struct page_ext
*page_ext
;
108 page
= pfn_to_page(pfn
);
109 page_ext
= page_ext_get(page
);
111 BUG_ON(PageSlab(page
));
112 anon
= PageAnon(page
);
114 for (i
= 0; i
< pgcnt
; i
++) {
115 struct page_table_check
*ptc
= get_page_table_check(page_ext
);
118 BUG_ON(atomic_read(&ptc
->file_map_count
));
119 BUG_ON(atomic_inc_return(&ptc
->anon_map_count
) > 1 && rw
);
121 BUG_ON(atomic_read(&ptc
->anon_map_count
));
122 BUG_ON(atomic_inc_return(&ptc
->file_map_count
) < 0);
124 page_ext
= page_ext_next(page_ext
);
126 page_ext_put(page_ext
);
130 * page is on free list, or is being allocated, verify that counters are zeroes
131 * crash if they are not.
133 void __page_table_check_zero(struct page
*page
, unsigned int order
)
135 struct page_ext
*page_ext
;
138 BUG_ON(PageSlab(page
));
140 page_ext
= page_ext_get(page
);
142 for (i
= 0; i
< (1ul << order
); i
++) {
143 struct page_table_check
*ptc
= get_page_table_check(page_ext
);
145 BUG_ON(atomic_read(&ptc
->anon_map_count
));
146 BUG_ON(atomic_read(&ptc
->file_map_count
));
147 page_ext
= page_ext_next(page_ext
);
149 page_ext_put(page_ext
);
152 void __page_table_check_pte_clear(struct mm_struct
*mm
, pte_t pte
)
157 if (pte_user_accessible_page(pte
)) {
158 page_table_check_clear(pte_pfn(pte
), PAGE_SIZE
>> PAGE_SHIFT
);
161 EXPORT_SYMBOL(__page_table_check_pte_clear
);
163 void __page_table_check_pmd_clear(struct mm_struct
*mm
, pmd_t pmd
)
168 if (pmd_user_accessible_page(pmd
)) {
169 page_table_check_clear(pmd_pfn(pmd
), PMD_SIZE
>> PAGE_SHIFT
);
172 EXPORT_SYMBOL(__page_table_check_pmd_clear
);
174 void __page_table_check_pud_clear(struct mm_struct
*mm
, pud_t pud
)
179 if (pud_user_accessible_page(pud
)) {
180 page_table_check_clear(pud_pfn(pud
), PUD_SIZE
>> PAGE_SHIFT
);
183 EXPORT_SYMBOL(__page_table_check_pud_clear
);
185 void __page_table_check_ptes_set(struct mm_struct
*mm
, pte_t
*ptep
, pte_t pte
,
193 for (i
= 0; i
< nr
; i
++)
194 __page_table_check_pte_clear(mm
, ptep_get(ptep
+ i
));
195 if (pte_user_accessible_page(pte
))
196 page_table_check_set(pte_pfn(pte
), nr
, pte_write(pte
));
198 EXPORT_SYMBOL(__page_table_check_ptes_set
);
200 void __page_table_check_pmd_set(struct mm_struct
*mm
, pmd_t
*pmdp
, pmd_t pmd
)
205 __page_table_check_pmd_clear(mm
, *pmdp
);
206 if (pmd_user_accessible_page(pmd
)) {
207 page_table_check_set(pmd_pfn(pmd
), PMD_SIZE
>> PAGE_SHIFT
,
211 EXPORT_SYMBOL(__page_table_check_pmd_set
);
213 void __page_table_check_pud_set(struct mm_struct
*mm
, pud_t
*pudp
, pud_t pud
)
218 __page_table_check_pud_clear(mm
, *pudp
);
219 if (pud_user_accessible_page(pud
)) {
220 page_table_check_set(pud_pfn(pud
), PUD_SIZE
>> PAGE_SHIFT
,
224 EXPORT_SYMBOL(__page_table_check_pud_set
);
226 void __page_table_check_pte_clear_range(struct mm_struct
*mm
,
233 if (!pmd_bad(pmd
) && !pmd_leaf(pmd
)) {
234 pte_t
*ptep
= pte_offset_map(&pmd
, addr
);
239 for (i
= 0; i
< PTRS_PER_PTE
; i
++) {
240 __page_table_check_pte_clear(mm
, ptep_get(ptep
));
244 pte_unmap(ptep
- PTRS_PER_PTE
);