]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
0b24becc AR |
2 | #ifndef _LINUX_KASAN_H |
3 | #define _LINUX_KASAN_H | |
4 | ||
7a3b8353 | 5 | #include <linux/bug.h> |
f9b5e46f | 6 | #include <linux/kasan-enabled.h> |
5cb6674b | 7 | #include <linux/kasan-tags.h> |
2db710cc | 8 | #include <linux/kernel.h> |
34303244 | 9 | #include <linux/static_key.h> |
0b24becc AR |
10 | #include <linux/types.h> |
11 | ||
12 | struct kmem_cache; | |
13 | struct page; | |
6e48a966 | 14 | struct slab; |
a5af5aa8 | 15 | struct vm_struct; |
5be9b730 | 16 | struct task_struct; |
0b24becc AR |
17 | |
18 | #ifdef CONFIG_KASAN | |
19 | ||
d5750edf | 20 | #include <linux/linkage.h> |
65fddcfc | 21 | #include <asm/kasan.h> |
0b24becc | 22 | |
d5750edf AK |
23 | #endif |
24 | ||
23689e91 AK |
25 | typedef unsigned int __bitwise kasan_vmalloc_flags_t; |
26 | ||
ec2a0f9c AK |
27 | #define KASAN_VMALLOC_NONE ((__force kasan_vmalloc_flags_t)0x00u) |
28 | #define KASAN_VMALLOC_INIT ((__force kasan_vmalloc_flags_t)0x01u) | |
29 | #define KASAN_VMALLOC_VM_ALLOC ((__force kasan_vmalloc_flags_t)0x02u) | |
30 | #define KASAN_VMALLOC_PROT_NORMAL ((__force kasan_vmalloc_flags_t)0x04u) | |
23689e91 | 31 | |
9e9e085e AH |
32 | #define KASAN_VMALLOC_PAGE_RANGE 0x1 /* Apply exsiting page range */ |
33 | #define KASAN_VMALLOC_TLB_FLUSH 0x2 /* TLB flush */ | |
34 | ||
d5750edf AK |
35 | #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) |
36 | ||
37 | #include <linux/pgtable.h> | |
38 | ||
39 | /* Software KASAN implementations use shadow memory. */ | |
40 | ||
41 | #ifdef CONFIG_KASAN_SW_TAGS | |
a064cb00 AK |
42 | /* This matches KASAN_TAG_INVALID. */ |
43 | #define KASAN_SHADOW_INIT 0xFE | |
d5750edf AK |
44 | #else |
45 | #define KASAN_SHADOW_INIT 0 | |
46 | #endif | |
47 | ||
29970dc2 HL |
48 | #ifndef PTE_HWTABLE_PTRS |
49 | #define PTE_HWTABLE_PTRS 0 | |
50 | #endif | |
51 | ||
9577dd74 | 52 | extern unsigned char kasan_early_shadow_page[PAGE_SIZE]; |
cb32c9c5 DA |
53 | extern pte_t kasan_early_shadow_pte[MAX_PTRS_PER_PTE + PTE_HWTABLE_PTRS]; |
54 | extern pmd_t kasan_early_shadow_pmd[MAX_PTRS_PER_PMD]; | |
55 | extern pud_t kasan_early_shadow_pud[MAX_PTRS_PER_PUD]; | |
9577dd74 | 56 | extern p4d_t kasan_early_shadow_p4d[MAX_PTRS_PER_P4D]; |
69786cdb | 57 | |
9577dd74 | 58 | int kasan_populate_early_shadow(const void *shadow_start, |
69786cdb AR |
59 | const void *shadow_end); |
60 | ||
2a86f1b5 | 61 | #ifndef kasan_mem_to_shadow |
0b24becc AR |
62 | static inline void *kasan_mem_to_shadow(const void *addr) |
63 | { | |
64 | return (void *)((unsigned long)addr >> KASAN_SHADOW_SCALE_SHIFT) | |
65 | + KASAN_SHADOW_OFFSET; | |
66 | } | |
9b04c764 | 67 | #endif |
0b24becc | 68 | |
d5750edf AK |
69 | int kasan_add_zero_shadow(void *start, unsigned long size); |
70 | void kasan_remove_zero_shadow(void *start, unsigned long size); | |
71 | ||
d73b4936 AK |
72 | /* Enable reporting bugs after kasan_disable_current() */ |
73 | extern void kasan_enable_current(void); | |
74 | ||
75 | /* Disable reporting bugs for current task */ | |
76 | extern void kasan_disable_current(void); | |
77 | ||
d5750edf AK |
78 | #else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */ |
79 | ||
80 | static inline int kasan_add_zero_shadow(void *start, unsigned long size) | |
81 | { | |
82 | return 0; | |
83 | } | |
84 | static inline void kasan_remove_zero_shadow(void *start, | |
85 | unsigned long size) | |
86 | {} | |
87 | ||
d73b4936 AK |
88 | static inline void kasan_enable_current(void) {} |
89 | static inline void kasan_disable_current(void) {} | |
90 | ||
d5750edf AK |
91 | #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */ |
92 | ||
34303244 | 93 | #ifdef CONFIG_KASAN_HW_TAGS |
e86f8b09 | 94 | |
e86f8b09 AK |
95 | #else /* CONFIG_KASAN_HW_TAGS */ |
96 | ||
e86f8b09 AK |
97 | #endif /* CONFIG_KASAN_HW_TAGS */ |
98 | ||
e5af50a5 PC |
99 | static inline bool kasan_has_integrated_init(void) |
100 | { | |
101 | return kasan_hw_tags_enabled(); | |
102 | } | |
103 | ||
7a3b8353 | 104 | #ifdef CONFIG_KASAN |
34303244 AK |
105 | void __kasan_unpoison_range(const void *addr, size_t size); |
106 | static __always_inline void kasan_unpoison_range(const void *addr, size_t size) | |
107 | { | |
108 | if (kasan_enabled()) | |
109 | __kasan_unpoison_range(addr, size); | |
110 | } | |
7ed2f9e6 | 111 | |
7a3b8353 PC |
112 | void __kasan_poison_pages(struct page *page, unsigned int order, bool init); |
113 | static __always_inline void kasan_poison_pages(struct page *page, | |
1bb5eab3 | 114 | unsigned int order, bool init) |
34303244 AK |
115 | { |
116 | if (kasan_enabled()) | |
7a3b8353 | 117 | __kasan_poison_pages(page, order, init); |
34303244 | 118 | } |
0316bec2 | 119 | |
44383cef AK |
120 | bool __kasan_unpoison_pages(struct page *page, unsigned int order, bool init); |
121 | static __always_inline bool kasan_unpoison_pages(struct page *page, | |
7a3b8353 | 122 | unsigned int order, bool init) |
34303244 AK |
123 | { |
124 | if (kasan_enabled()) | |
44383cef AK |
125 | return __kasan_unpoison_pages(page, order, init); |
126 | return false; | |
34303244 | 127 | } |
0316bec2 | 128 | |
6e48a966 MWO |
129 | void __kasan_poison_slab(struct slab *slab); |
130 | static __always_inline void kasan_poison_slab(struct slab *slab) | |
34303244 AK |
131 | { |
132 | if (kasan_enabled()) | |
6e48a966 | 133 | __kasan_poison_slab(slab); |
34303244 AK |
134 | } |
135 | ||
1ce9a052 AK |
136 | void __kasan_unpoison_new_object(struct kmem_cache *cache, void *object); |
137 | /** | |
138 | * kasan_unpoison_new_object - Temporarily unpoison a new slab object. | |
139 | * @cache: Cache the object belong to. | |
140 | * @object: Pointer to the object. | |
141 | * | |
142 | * This function is intended for the slab allocator's internal use. It | |
143 | * temporarily unpoisons an object from a newly allocated slab without doing | |
144 | * anything else. The object must later be repoisoned by | |
145 | * kasan_poison_new_object(). | |
146 | */ | |
147 | static __always_inline void kasan_unpoison_new_object(struct kmem_cache *cache, | |
34303244 AK |
148 | void *object) |
149 | { | |
150 | if (kasan_enabled()) | |
1ce9a052 | 151 | __kasan_unpoison_new_object(cache, object); |
34303244 AK |
152 | } |
153 | ||
1ce9a052 AK |
154 | void __kasan_poison_new_object(struct kmem_cache *cache, void *object); |
155 | /** | |
78346c34 | 156 | * kasan_poison_new_object - Repoison a new slab object. |
1ce9a052 AK |
157 | * @cache: Cache the object belong to. |
158 | * @object: Pointer to the object. | |
159 | * | |
160 | * This function is intended for the slab allocator's internal use. It | |
161 | * repoisons an object that was previously unpoisoned by | |
162 | * kasan_unpoison_new_object() without doing anything else. | |
163 | */ | |
164 | static __always_inline void kasan_poison_new_object(struct kmem_cache *cache, | |
34303244 AK |
165 | void *object) |
166 | { | |
167 | if (kasan_enabled()) | |
1ce9a052 | 168 | __kasan_poison_new_object(cache, object); |
34303244 AK |
169 | } |
170 | ||
171 | void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache, | |
172 | const void *object); | |
173 | static __always_inline void * __must_check kasan_init_slab_obj( | |
174 | struct kmem_cache *cache, const void *object) | |
175 | { | |
176 | if (kasan_enabled()) | |
177 | return __kasan_init_slab_obj(cache, object); | |
178 | return (void *)object; | |
179 | } | |
180 | ||
b3c34245 JH |
181 | bool __kasan_slab_pre_free(struct kmem_cache *s, void *object, |
182 | unsigned long ip); | |
183 | /** | |
184 | * kasan_slab_pre_free - Check whether freeing a slab object is safe. | |
185 | * @object: Object to be freed. | |
186 | * | |
187 | * This function checks whether freeing the given object is safe. It may | |
188 | * check for double-free and invalid-free bugs and report them. | |
189 | * | |
190 | * This function is intended only for use by the slab allocator. | |
191 | * | |
192 | * @Return true if freeing the object is unsafe; false otherwise. | |
193 | */ | |
194 | static __always_inline bool kasan_slab_pre_free(struct kmem_cache *s, | |
195 | void *object) | |
196 | { | |
197 | if (kasan_enabled()) | |
198 | return __kasan_slab_pre_free(s, object, _RET_IP_); | |
199 | return false; | |
200 | } | |
201 | ||
b8c8ba73 JH |
202 | bool __kasan_slab_free(struct kmem_cache *s, void *object, bool init, |
203 | bool still_accessible); | |
b3c34245 JH |
204 | /** |
205 | * kasan_slab_free - Poison, initialize, and quarantine a slab object. | |
206 | * @object: Object to be freed. | |
207 | * @init: Whether to initialize the object. | |
b8c8ba73 | 208 | * @still_accessible: Whether the object contents are still accessible. |
b3c34245 JH |
209 | * |
210 | * This function informs that a slab object has been freed and is not | |
b8c8ba73 JH |
211 | * supposed to be accessed anymore, except when @still_accessible is set |
212 | * (indicating that the object is in a SLAB_TYPESAFE_BY_RCU cache and an RCU | |
213 | * grace period might not have passed yet). | |
b3c34245 JH |
214 | * |
215 | * For KASAN modes that have integrated memory initialization | |
216 | * (kasan_has_integrated_init() == true), this function also initializes | |
217 | * the object's memory. For other modes, the @init argument is ignored. | |
218 | * | |
219 | * This function might also take ownership of the object to quarantine it. | |
220 | * When this happens, KASAN will defer freeing the object to a later | |
221 | * stage and handle it internally until then. The return value indicates | |
222 | * whether KASAN took ownership of the object. | |
223 | * | |
224 | * This function is intended only for use by the slab allocator. | |
225 | * | |
226 | * @Return true if KASAN took ownership of the object; false otherwise. | |
227 | */ | |
d57a964e | 228 | static __always_inline bool kasan_slab_free(struct kmem_cache *s, |
b8c8ba73 JH |
229 | void *object, bool init, |
230 | bool still_accessible) | |
34303244 AK |
231 | { |
232 | if (kasan_enabled()) | |
b8c8ba73 | 233 | return __kasan_slab_free(s, object, init, still_accessible); |
34303244 AK |
234 | return false; |
235 | } | |
236 | ||
200072ce AK |
237 | void __kasan_kfree_large(void *ptr, unsigned long ip); |
238 | static __always_inline void kasan_kfree_large(void *ptr) | |
239 | { | |
240 | if (kasan_enabled()) | |
241 | __kasan_kfree_large(ptr, _RET_IP_); | |
242 | } | |
243 | ||
34303244 | 244 | void * __must_check __kasan_slab_alloc(struct kmem_cache *s, |
da844b78 | 245 | void *object, gfp_t flags, bool init); |
34303244 | 246 | static __always_inline void * __must_check kasan_slab_alloc( |
da844b78 | 247 | struct kmem_cache *s, void *object, gfp_t flags, bool init) |
34303244 AK |
248 | { |
249 | if (kasan_enabled()) | |
da844b78 | 250 | return __kasan_slab_alloc(s, object, flags, init); |
34303244 AK |
251 | return object; |
252 | } | |
253 | ||
254 | void * __must_check __kasan_kmalloc(struct kmem_cache *s, const void *object, | |
255 | size_t size, gfp_t flags); | |
256 | static __always_inline void * __must_check kasan_kmalloc(struct kmem_cache *s, | |
257 | const void *object, size_t size, gfp_t flags) | |
258 | { | |
259 | if (kasan_enabled()) | |
260 | return __kasan_kmalloc(s, object, size, flags); | |
261 | return (void *)object; | |
262 | } | |
263 | ||
264 | void * __must_check __kasan_kmalloc_large(const void *ptr, | |
265 | size_t size, gfp_t flags); | |
266 | static __always_inline void * __must_check kasan_kmalloc_large(const void *ptr, | |
267 | size_t size, gfp_t flags) | |
268 | { | |
269 | if (kasan_enabled()) | |
270 | return __kasan_kmalloc_large(ptr, size, flags); | |
271 | return (void *)ptr; | |
272 | } | |
273 | ||
274 | void * __must_check __kasan_krealloc(const void *object, | |
275 | size_t new_size, gfp_t flags); | |
276 | static __always_inline void * __must_check kasan_krealloc(const void *object, | |
277 | size_t new_size, gfp_t flags) | |
278 | { | |
279 | if (kasan_enabled()) | |
280 | return __kasan_krealloc(object, new_size, flags); | |
281 | return (void *)object; | |
282 | } | |
283 | ||
f129c310 AK |
284 | bool __kasan_mempool_poison_pages(struct page *page, unsigned int order, |
285 | unsigned long ip); | |
286 | /** | |
287 | * kasan_mempool_poison_pages - Check and poison a mempool page allocation. | |
288 | * @page: Pointer to the page allocation. | |
289 | * @order: Order of the allocation. | |
290 | * | |
291 | * This function is intended for kernel subsystems that cache page allocations | |
292 | * to reuse them instead of freeing them back to page_alloc (e.g. mempool). | |
293 | * | |
294 | * This function is similar to kasan_mempool_poison_object() but operates on | |
295 | * page allocations. | |
296 | * | |
9f41c59a AK |
297 | * Before the poisoned allocation can be reused, it must be unpoisoned via |
298 | * kasan_mempool_unpoison_pages(). | |
299 | * | |
f129c310 AK |
300 | * Return: true if the allocation can be safely reused; false otherwise. |
301 | */ | |
302 | static __always_inline bool kasan_mempool_poison_pages(struct page *page, | |
303 | unsigned int order) | |
304 | { | |
305 | if (kasan_enabled()) | |
306 | return __kasan_mempool_poison_pages(page, order, _RET_IP_); | |
307 | return true; | |
308 | } | |
309 | ||
9f41c59a AK |
310 | void __kasan_mempool_unpoison_pages(struct page *page, unsigned int order, |
311 | unsigned long ip); | |
312 | /** | |
313 | * kasan_mempool_unpoison_pages - Unpoison a mempool page allocation. | |
314 | * @page: Pointer to the page allocation. | |
315 | * @order: Order of the allocation. | |
316 | * | |
317 | * This function is intended for kernel subsystems that cache page allocations | |
318 | * to reuse them instead of freeing them back to page_alloc (e.g. mempool). | |
319 | * | |
320 | * This function unpoisons a page allocation that was previously poisoned by | |
321 | * kasan_mempool_poison_pages() without zeroing the allocation's memory. For | |
322 | * the tag-based modes, this function assigns a new tag to the allocation. | |
323 | */ | |
324 | static __always_inline void kasan_mempool_unpoison_pages(struct page *page, | |
325 | unsigned int order) | |
326 | { | |
327 | if (kasan_enabled()) | |
328 | __kasan_mempool_unpoison_pages(page, order, _RET_IP_); | |
329 | } | |
330 | ||
2e7c954c | 331 | bool __kasan_mempool_poison_object(void *ptr, unsigned long ip); |
1bb84304 AK |
332 | /** |
333 | * kasan_mempool_poison_object - Check and poison a mempool slab allocation. | |
334 | * @ptr: Pointer to the slab allocation. | |
335 | * | |
336 | * This function is intended for kernel subsystems that cache slab allocations | |
337 | * to reuse them instead of freeing them back to the slab allocator (e.g. | |
338 | * mempool). | |
339 | * | |
b556a462 AK |
340 | * This function poisons a slab allocation and saves a free stack trace for it |
341 | * without initializing the allocation's memory and without putting it into the | |
342 | * quarantine (for the Generic mode). | |
1bb84304 AK |
343 | * |
344 | * This function also performs checks to detect double-free and invalid-free | |
2e7c954c AK |
345 | * bugs and reports them. The caller can use the return value of this function |
346 | * to find out if the allocation is buggy. | |
1bb84304 | 347 | * |
19568327 AK |
348 | * Before the poisoned allocation can be reused, it must be unpoisoned via |
349 | * kasan_mempool_unpoison_object(). | |
350 | * | |
1bb84304 AK |
351 | * This function operates on all slab allocations including large kmalloc |
352 | * allocations (the ones returned by kmalloc_large() or by kmalloc() with the | |
353 | * size > KMALLOC_MAX_SIZE). | |
2e7c954c AK |
354 | * |
355 | * Return: true if the allocation can be safely reused; false otherwise. | |
1bb84304 | 356 | */ |
2e7c954c | 357 | static __always_inline bool kasan_mempool_poison_object(void *ptr) |
9b94fe91 AK |
358 | { |
359 | if (kasan_enabled()) | |
2e7c954c AK |
360 | return __kasan_mempool_poison_object(ptr, _RET_IP_); |
361 | return true; | |
9b94fe91 AK |
362 | } |
363 | ||
19568327 AK |
364 | void __kasan_mempool_unpoison_object(void *ptr, size_t size, unsigned long ip); |
365 | /** | |
366 | * kasan_mempool_unpoison_object - Unpoison a mempool slab allocation. | |
367 | * @ptr: Pointer to the slab allocation. | |
368 | * @size: Size to be unpoisoned. | |
369 | * | |
370 | * This function is intended for kernel subsystems that cache slab allocations | |
371 | * to reuse them instead of freeing them back to the slab allocator (e.g. | |
372 | * mempool). | |
373 | * | |
374 | * This function unpoisons a slab allocation that was previously poisoned via | |
29d7355a AK |
375 | * kasan_mempool_poison_object() and saves an alloc stack trace for it without |
376 | * initializing the allocation's memory. For the tag-based modes, this function | |
377 | * does not assign a new tag to the allocation and instead restores the | |
378 | * original tags based on the pointer value. | |
19568327 AK |
379 | * |
380 | * This function operates on all slab allocations including large kmalloc | |
381 | * allocations (the ones returned by kmalloc_large() or by kmalloc() with the | |
382 | * size > KMALLOC_MAX_SIZE). | |
383 | */ | |
384 | static __always_inline void kasan_mempool_unpoison_object(void *ptr, | |
385 | size_t size) | |
386 | { | |
387 | if (kasan_enabled()) | |
388 | __kasan_mempool_unpoison_object(ptr, size, _RET_IP_); | |
389 | } | |
390 | ||
611806b4 AK |
391 | /* |
392 | * Unlike kasan_check_read/write(), kasan_check_byte() is performed even for | |
393 | * the hardware tag-based mode that doesn't rely on compiler instrumentation. | |
394 | */ | |
395 | bool __kasan_check_byte(const void *addr, unsigned long ip); | |
396 | static __always_inline bool kasan_check_byte(const void *addr) | |
397 | { | |
398 | if (kasan_enabled()) | |
399 | return __kasan_check_byte(addr, _RET_IP_); | |
400 | return true; | |
401 | } | |
402 | ||
0b24becc AR |
403 | #else /* CONFIG_KASAN */ |
404 | ||
cebd0eb2 | 405 | static inline void kasan_unpoison_range(const void *address, size_t size) {} |
7a3b8353 PC |
406 | static inline void kasan_poison_pages(struct page *page, unsigned int order, |
407 | bool init) {} | |
44383cef AK |
408 | static inline bool kasan_unpoison_pages(struct page *page, unsigned int order, |
409 | bool init) | |
410 | { | |
411 | return false; | |
412 | } | |
6e48a966 | 413 | static inline void kasan_poison_slab(struct slab *slab) {} |
1ce9a052 | 414 | static inline void kasan_unpoison_new_object(struct kmem_cache *cache, |
0316bec2 | 415 | void *object) {} |
1ce9a052 | 416 | static inline void kasan_poison_new_object(struct kmem_cache *cache, |
0316bec2 | 417 | void *object) {} |
0116523c AK |
418 | static inline void *kasan_init_slab_obj(struct kmem_cache *cache, |
419 | const void *object) | |
420 | { | |
421 | return (void *)object; | |
422 | } | |
b3c34245 JH |
423 | |
424 | static inline bool kasan_slab_pre_free(struct kmem_cache *s, void *object) | |
425 | { | |
426 | return false; | |
427 | } | |
428 | ||
b8c8ba73 JH |
429 | static inline bool kasan_slab_free(struct kmem_cache *s, void *object, |
430 | bool init, bool still_accessible) | |
34303244 AK |
431 | { |
432 | return false; | |
433 | } | |
200072ce | 434 | static inline void kasan_kfree_large(void *ptr) {} |
34303244 | 435 | static inline void *kasan_slab_alloc(struct kmem_cache *s, void *object, |
da844b78 | 436 | gfp_t flags, bool init) |
0116523c | 437 | { |
34303244 | 438 | return object; |
0116523c | 439 | } |
0116523c AK |
440 | static inline void *kasan_kmalloc(struct kmem_cache *s, const void *object, |
441 | size_t size, gfp_t flags) | |
442 | { | |
443 | return (void *)object; | |
444 | } | |
34303244 AK |
445 | static inline void *kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags) |
446 | { | |
447 | return (void *)ptr; | |
448 | } | |
0116523c AK |
449 | static inline void *kasan_krealloc(const void *object, size_t new_size, |
450 | gfp_t flags) | |
451 | { | |
452 | return (void *)object; | |
453 | } | |
f129c310 AK |
454 | static inline bool kasan_mempool_poison_pages(struct page *page, unsigned int order) |
455 | { | |
456 | return true; | |
457 | } | |
9f41c59a | 458 | static inline void kasan_mempool_unpoison_pages(struct page *page, unsigned int order) {} |
2e7c954c AK |
459 | static inline bool kasan_mempool_poison_object(void *ptr) |
460 | { | |
461 | return true; | |
462 | } | |
19568327 AK |
463 | static inline void kasan_mempool_unpoison_object(void *ptr, size_t size) {} |
464 | ||
611806b4 AK |
465 | static inline bool kasan_check_byte(const void *address) |
466 | { | |
467 | return true; | |
468 | } | |
9b75a867 | 469 | |
0b24becc AR |
470 | #endif /* CONFIG_KASAN */ |
471 | ||
02c58773 | 472 | #if defined(CONFIG_KASAN) && defined(CONFIG_KASAN_STACK) |
d56a9ef8 | 473 | void kasan_unpoison_task_stack(struct task_struct *task); |
7ccb84f0 | 474 | asmlinkage void kasan_unpoison_task_stack_below(const void *watermark); |
d56a9ef8 AK |
475 | #else |
476 | static inline void kasan_unpoison_task_stack(struct task_struct *task) {} | |
7ccb84f0 | 477 | static inline void kasan_unpoison_task_stack_below(const void *watermark) {} |
d56a9ef8 AK |
478 | #endif |
479 | ||
2bd926b4 AK |
480 | #ifdef CONFIG_KASAN_GENERIC |
481 | ||
bbc61844 FT |
482 | struct kasan_cache { |
483 | int alloc_meta_offset; | |
484 | int free_meta_offset; | |
485 | }; | |
486 | ||
5d1ba310 | 487 | size_t kasan_metadata_size(struct kmem_cache *cache, bool in_object); |
682ed089 AK |
488 | void kasan_cache_create(struct kmem_cache *cache, unsigned int *size, |
489 | slab_flags_t *flags); | |
f372bde9 | 490 | |
2bd926b4 AK |
491 | void kasan_cache_shrink(struct kmem_cache *cache); |
492 | void kasan_cache_shutdown(struct kmem_cache *cache); | |
26e760c9 | 493 | void kasan_record_aux_stack(void *ptr); |
2bd926b4 AK |
494 | |
495 | #else /* CONFIG_KASAN_GENERIC */ | |
496 | ||
f372bde9 | 497 | /* Tag-based KASAN modes do not use per-object metadata. */ |
5d1ba310 FT |
498 | static inline size_t kasan_metadata_size(struct kmem_cache *cache, |
499 | bool in_object) | |
f372bde9 AK |
500 | { |
501 | return 0; | |
502 | } | |
682ed089 AK |
503 | /* And no cache-related metadata initialization is required. */ |
504 | static inline void kasan_cache_create(struct kmem_cache *cache, | |
505 | unsigned int *size, | |
506 | slab_flags_t *flags) {} | |
f372bde9 | 507 | |
2bd926b4 AK |
508 | static inline void kasan_cache_shrink(struct kmem_cache *cache) {} |
509 | static inline void kasan_cache_shutdown(struct kmem_cache *cache) {} | |
26e760c9 | 510 | static inline void kasan_record_aux_stack(void *ptr) {} |
2bd926b4 AK |
511 | |
512 | #endif /* CONFIG_KASAN_GENERIC */ | |
513 | ||
2e903b91 | 514 | #if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS) |
3c9e3aa1 | 515 | |
c0054c56 AK |
516 | static inline void *kasan_reset_tag(const void *addr) |
517 | { | |
518 | return (void *)arch_kasan_reset_tag(addr); | |
519 | } | |
3c9e3aa1 | 520 | |
49c6631d VF |
521 | /** |
522 | * kasan_report - print a report about a bad memory access detected by KASAN | |
523 | * @addr: address of the bad access | |
524 | * @size: size of the bad access | |
525 | * @is_write: whether the bad access is a write or a read | |
526 | * @ip: instruction pointer for the accessibility check or the bad access itself | |
527 | */ | |
bb6e04a1 | 528 | bool kasan_report(const void *addr, size_t size, |
41eea9cd AK |
529 | bool is_write, unsigned long ip); |
530 | ||
2e903b91 | 531 | #else /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */ |
3c9e3aa1 AK |
532 | |
533 | static inline void *kasan_reset_tag(const void *addr) | |
534 | { | |
535 | return (void *)addr; | |
536 | } | |
537 | ||
2e903b91 AK |
538 | #endif /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS*/ |
539 | ||
8f7b5054 VF |
540 | #ifdef CONFIG_KASAN_HW_TAGS |
541 | ||
542 | void kasan_report_async(void); | |
543 | ||
544 | #endif /* CONFIG_KASAN_HW_TAGS */ | |
545 | ||
2e903b91 AK |
546 | #ifdef CONFIG_KASAN_SW_TAGS |
547 | void __init kasan_init_sw_tags(void); | |
548 | #else | |
549 | static inline void kasan_init_sw_tags(void) { } | |
550 | #endif | |
551 | ||
552 | #ifdef CONFIG_KASAN_HW_TAGS | |
553 | void kasan_init_hw_tags_cpu(void); | |
554 | void __init kasan_init_hw_tags(void); | |
555 | #else | |
556 | static inline void kasan_init_hw_tags_cpu(void) { } | |
557 | static inline void kasan_init_hw_tags(void) { } | |
558 | #endif | |
080eb83f | 559 | |
3c5c3cfb | 560 | #ifdef CONFIG_KASAN_VMALLOC |
3b1a4a86 | 561 | |
23689e91 AK |
562 | #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) |
563 | ||
5bd9bae2 | 564 | void kasan_populate_early_vm_area_shadow(void *start, unsigned long size); |
d98c9e83 | 565 | int kasan_populate_vmalloc(unsigned long addr, unsigned long size); |
3c5c3cfb DA |
566 | void kasan_release_vmalloc(unsigned long start, unsigned long end, |
567 | unsigned long free_region_start, | |
9e9e085e AH |
568 | unsigned long free_region_end, |
569 | unsigned long flags); | |
3b1a4a86 | 570 | |
23689e91 AK |
571 | #else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */ |
572 | ||
573 | static inline void kasan_populate_early_vm_area_shadow(void *start, | |
574 | unsigned long size) | |
575 | { } | |
576 | static inline int kasan_populate_vmalloc(unsigned long start, | |
577 | unsigned long size) | |
578 | { | |
579 | return 0; | |
580 | } | |
581 | static inline void kasan_release_vmalloc(unsigned long start, | |
582 | unsigned long end, | |
583 | unsigned long free_region_start, | |
9e9e085e AH |
584 | unsigned long free_region_end, |
585 | unsigned long flags) { } | |
23689e91 AK |
586 | |
587 | #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */ | |
588 | ||
589 | void *__kasan_unpoison_vmalloc(const void *start, unsigned long size, | |
590 | kasan_vmalloc_flags_t flags); | |
1d96320f | 591 | static __always_inline void *kasan_unpoison_vmalloc(const void *start, |
23689e91 AK |
592 | unsigned long size, |
593 | kasan_vmalloc_flags_t flags) | |
579fb0ac AK |
594 | { |
595 | if (kasan_enabled()) | |
23689e91 | 596 | return __kasan_unpoison_vmalloc(start, size, flags); |
1d96320f | 597 | return (void *)start; |
579fb0ac AK |
598 | } |
599 | ||
600 | void __kasan_poison_vmalloc(const void *start, unsigned long size); | |
601 | static __always_inline void kasan_poison_vmalloc(const void *start, | |
602 | unsigned long size) | |
603 | { | |
604 | if (kasan_enabled()) | |
605 | __kasan_poison_vmalloc(start, size); | |
606 | } | |
3252b1d8 | 607 | |
3b1a4a86 AK |
608 | #else /* CONFIG_KASAN_VMALLOC */ |
609 | ||
5bd9bae2 AK |
610 | static inline void kasan_populate_early_vm_area_shadow(void *start, |
611 | unsigned long size) { } | |
d98c9e83 AR |
612 | static inline int kasan_populate_vmalloc(unsigned long start, |
613 | unsigned long size) | |
3c5c3cfb DA |
614 | { |
615 | return 0; | |
616 | } | |
3c5c3cfb DA |
617 | static inline void kasan_release_vmalloc(unsigned long start, |
618 | unsigned long end, | |
619 | unsigned long free_region_start, | |
9e9e085e AH |
620 | unsigned long free_region_end, |
621 | unsigned long flags) { } | |
3b1a4a86 | 622 | |
1d96320f | 623 | static inline void *kasan_unpoison_vmalloc(const void *start, |
23689e91 AK |
624 | unsigned long size, |
625 | kasan_vmalloc_flags_t flags) | |
1d96320f AK |
626 | { |
627 | return (void *)start; | |
628 | } | |
5bd9bae2 | 629 | static inline void kasan_poison_vmalloc(const void *start, unsigned long size) |
3252b1d8 KW |
630 | { } |
631 | ||
3b1a4a86 AK |
632 | #endif /* CONFIG_KASAN_VMALLOC */ |
633 | ||
0fea6e9a AK |
634 | #if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \ |
635 | !defined(CONFIG_KASAN_VMALLOC) | |
3b1a4a86 AK |
636 | |
637 | /* | |
63840de2 AK |
638 | * These functions allocate and free shadow memory for kernel modules. |
639 | * They are only required when KASAN_VMALLOC is not supported, as otherwise | |
640 | * shadow memory is allocated by the generic vmalloc handlers. | |
3b1a4a86 | 641 | */ |
63840de2 AK |
642 | int kasan_alloc_module_shadow(void *addr, size_t size, gfp_t gfp_mask); |
643 | void kasan_free_module_shadow(const struct vm_struct *vm); | |
3b1a4a86 | 644 | |
0fea6e9a | 645 | #else /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */ |
3b1a4a86 | 646 | |
63840de2 AK |
647 | static inline int kasan_alloc_module_shadow(void *addr, size_t size, gfp_t gfp_mask) { return 0; } |
648 | static inline void kasan_free_module_shadow(const struct vm_struct *vm) {} | |
3b1a4a86 | 649 | |
0fea6e9a | 650 | #endif /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */ |
3c5c3cfb | 651 | |
17c17567 | 652 | #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) |
2f004eea | 653 | void kasan_non_canonical_hook(unsigned long addr); |
17c17567 | 654 | #else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */ |
2f004eea | 655 | static inline void kasan_non_canonical_hook(unsigned long addr) { } |
17c17567 | 656 | #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */ |
2f004eea | 657 | |
0b24becc | 658 | #endif /* LINUX_KASAN_H */ |