]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
0b24becc AR |
2 | #ifndef _LINUX_KASAN_H |
3 | #define _LINUX_KASAN_H | |
4 | ||
7a3b8353 | 5 | #include <linux/bug.h> |
f9b5e46f | 6 | #include <linux/kasan-enabled.h> |
2db710cc | 7 | #include <linux/kernel.h> |
34303244 | 8 | #include <linux/static_key.h> |
0b24becc AR |
9 | #include <linux/types.h> |
10 | ||
11 | struct kmem_cache; | |
12 | struct page; | |
6e48a966 | 13 | struct slab; |
a5af5aa8 | 14 | struct vm_struct; |
5be9b730 | 15 | struct task_struct; |
0b24becc AR |
16 | |
17 | #ifdef CONFIG_KASAN | |
18 | ||
d5750edf | 19 | #include <linux/linkage.h> |
65fddcfc | 20 | #include <asm/kasan.h> |
0b24becc | 21 | |
d5750edf AK |
22 | #endif |
23 | ||
23689e91 AK |
24 | typedef unsigned int __bitwise kasan_vmalloc_flags_t; |
25 | ||
ec2a0f9c AK |
26 | #define KASAN_VMALLOC_NONE ((__force kasan_vmalloc_flags_t)0x00u) |
27 | #define KASAN_VMALLOC_INIT ((__force kasan_vmalloc_flags_t)0x01u) | |
28 | #define KASAN_VMALLOC_VM_ALLOC ((__force kasan_vmalloc_flags_t)0x02u) | |
29 | #define KASAN_VMALLOC_PROT_NORMAL ((__force kasan_vmalloc_flags_t)0x04u) | |
23689e91 | 30 | |
d5750edf AK |
31 | #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) |
32 | ||
33 | #include <linux/pgtable.h> | |
34 | ||
35 | /* Software KASAN implementations use shadow memory. */ | |
36 | ||
37 | #ifdef CONFIG_KASAN_SW_TAGS | |
a064cb00 AK |
38 | /* This matches KASAN_TAG_INVALID. */ |
39 | #define KASAN_SHADOW_INIT 0xFE | |
d5750edf AK |
40 | #else |
41 | #define KASAN_SHADOW_INIT 0 | |
42 | #endif | |
43 | ||
29970dc2 HL |
44 | #ifndef PTE_HWTABLE_PTRS |
45 | #define PTE_HWTABLE_PTRS 0 | |
46 | #endif | |
47 | ||
9577dd74 | 48 | extern unsigned char kasan_early_shadow_page[PAGE_SIZE]; |
cb32c9c5 DA |
49 | extern pte_t kasan_early_shadow_pte[MAX_PTRS_PER_PTE + PTE_HWTABLE_PTRS]; |
50 | extern pmd_t kasan_early_shadow_pmd[MAX_PTRS_PER_PMD]; | |
51 | extern pud_t kasan_early_shadow_pud[MAX_PTRS_PER_PUD]; | |
9577dd74 | 52 | extern p4d_t kasan_early_shadow_p4d[MAX_PTRS_PER_P4D]; |
69786cdb | 53 | |
9577dd74 | 54 | int kasan_populate_early_shadow(const void *shadow_start, |
69786cdb AR |
55 | const void *shadow_end); |
56 | ||
2a86f1b5 | 57 | #ifndef kasan_mem_to_shadow |
0b24becc AR |
58 | static inline void *kasan_mem_to_shadow(const void *addr) |
59 | { | |
60 | return (void *)((unsigned long)addr >> KASAN_SHADOW_SCALE_SHIFT) | |
61 | + KASAN_SHADOW_OFFSET; | |
62 | } | |
9b04c764 | 63 | #endif |
0b24becc | 64 | |
d5750edf AK |
65 | int kasan_add_zero_shadow(void *start, unsigned long size); |
66 | void kasan_remove_zero_shadow(void *start, unsigned long size); | |
67 | ||
d73b4936 AK |
68 | /* Enable reporting bugs after kasan_disable_current() */ |
69 | extern void kasan_enable_current(void); | |
70 | ||
71 | /* Disable reporting bugs for current task */ | |
72 | extern void kasan_disable_current(void); | |
73 | ||
d5750edf AK |
74 | #else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */ |
75 | ||
76 | static inline int kasan_add_zero_shadow(void *start, unsigned long size) | |
77 | { | |
78 | return 0; | |
79 | } | |
80 | static inline void kasan_remove_zero_shadow(void *start, | |
81 | unsigned long size) | |
82 | {} | |
83 | ||
d73b4936 AK |
84 | static inline void kasan_enable_current(void) {} |
85 | static inline void kasan_disable_current(void) {} | |
86 | ||
d5750edf AK |
87 | #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */ |
88 | ||
34303244 | 89 | #ifdef CONFIG_KASAN_HW_TAGS |
e86f8b09 | 90 | |
e86f8b09 AK |
91 | #else /* CONFIG_KASAN_HW_TAGS */ |
92 | ||
e86f8b09 AK |
93 | #endif /* CONFIG_KASAN_HW_TAGS */ |
94 | ||
e5af50a5 PC |
95 | static inline bool kasan_has_integrated_init(void) |
96 | { | |
97 | return kasan_hw_tags_enabled(); | |
98 | } | |
99 | ||
7a3b8353 | 100 | #ifdef CONFIG_KASAN |
34303244 AK |
101 | void __kasan_unpoison_range(const void *addr, size_t size); |
102 | static __always_inline void kasan_unpoison_range(const void *addr, size_t size) | |
103 | { | |
104 | if (kasan_enabled()) | |
105 | __kasan_unpoison_range(addr, size); | |
106 | } | |
7ed2f9e6 | 107 | |
7a3b8353 PC |
108 | void __kasan_poison_pages(struct page *page, unsigned int order, bool init); |
109 | static __always_inline void kasan_poison_pages(struct page *page, | |
1bb5eab3 | 110 | unsigned int order, bool init) |
34303244 AK |
111 | { |
112 | if (kasan_enabled()) | |
7a3b8353 | 113 | __kasan_poison_pages(page, order, init); |
34303244 | 114 | } |
0316bec2 | 115 | |
44383cef AK |
116 | bool __kasan_unpoison_pages(struct page *page, unsigned int order, bool init); |
117 | static __always_inline bool kasan_unpoison_pages(struct page *page, | |
7a3b8353 | 118 | unsigned int order, bool init) |
34303244 AK |
119 | { |
120 | if (kasan_enabled()) | |
44383cef AK |
121 | return __kasan_unpoison_pages(page, order, init); |
122 | return false; | |
34303244 | 123 | } |
0316bec2 | 124 | |
6e48a966 MWO |
125 | void __kasan_poison_slab(struct slab *slab); |
126 | static __always_inline void kasan_poison_slab(struct slab *slab) | |
34303244 AK |
127 | { |
128 | if (kasan_enabled()) | |
6e48a966 | 129 | __kasan_poison_slab(slab); |
34303244 AK |
130 | } |
131 | ||
132 | void __kasan_unpoison_object_data(struct kmem_cache *cache, void *object); | |
133 | static __always_inline void kasan_unpoison_object_data(struct kmem_cache *cache, | |
134 | void *object) | |
135 | { | |
136 | if (kasan_enabled()) | |
137 | __kasan_unpoison_object_data(cache, object); | |
138 | } | |
139 | ||
140 | void __kasan_poison_object_data(struct kmem_cache *cache, void *object); | |
141 | static __always_inline void kasan_poison_object_data(struct kmem_cache *cache, | |
142 | void *object) | |
143 | { | |
144 | if (kasan_enabled()) | |
145 | __kasan_poison_object_data(cache, object); | |
146 | } | |
147 | ||
148 | void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache, | |
149 | const void *object); | |
150 | static __always_inline void * __must_check kasan_init_slab_obj( | |
151 | struct kmem_cache *cache, const void *object) | |
152 | { | |
153 | if (kasan_enabled()) | |
154 | return __kasan_init_slab_obj(cache, object); | |
155 | return (void *)object; | |
156 | } | |
157 | ||
d57a964e AK |
158 | bool __kasan_slab_free(struct kmem_cache *s, void *object, |
159 | unsigned long ip, bool init); | |
160 | static __always_inline bool kasan_slab_free(struct kmem_cache *s, | |
161 | void *object, bool init) | |
34303244 AK |
162 | { |
163 | if (kasan_enabled()) | |
d57a964e | 164 | return __kasan_slab_free(s, object, _RET_IP_, init); |
34303244 AK |
165 | return false; |
166 | } | |
167 | ||
200072ce AK |
168 | void __kasan_kfree_large(void *ptr, unsigned long ip); |
169 | static __always_inline void kasan_kfree_large(void *ptr) | |
170 | { | |
171 | if (kasan_enabled()) | |
172 | __kasan_kfree_large(ptr, _RET_IP_); | |
173 | } | |
174 | ||
eeb3160c | 175 | void __kasan_slab_free_mempool(void *ptr, unsigned long ip); |
027b37b5 | 176 | static __always_inline void kasan_slab_free_mempool(void *ptr) |
eeb3160c AK |
177 | { |
178 | if (kasan_enabled()) | |
027b37b5 | 179 | __kasan_slab_free_mempool(ptr, _RET_IP_); |
eeb3160c AK |
180 | } |
181 | ||
34303244 | 182 | void * __must_check __kasan_slab_alloc(struct kmem_cache *s, |
da844b78 | 183 | void *object, gfp_t flags, bool init); |
34303244 | 184 | static __always_inline void * __must_check kasan_slab_alloc( |
da844b78 | 185 | struct kmem_cache *s, void *object, gfp_t flags, bool init) |
34303244 AK |
186 | { |
187 | if (kasan_enabled()) | |
da844b78 | 188 | return __kasan_slab_alloc(s, object, flags, init); |
34303244 AK |
189 | return object; |
190 | } | |
191 | ||
192 | void * __must_check __kasan_kmalloc(struct kmem_cache *s, const void *object, | |
193 | size_t size, gfp_t flags); | |
194 | static __always_inline void * __must_check kasan_kmalloc(struct kmem_cache *s, | |
195 | const void *object, size_t size, gfp_t flags) | |
196 | { | |
197 | if (kasan_enabled()) | |
198 | return __kasan_kmalloc(s, object, size, flags); | |
199 | return (void *)object; | |
200 | } | |
201 | ||
202 | void * __must_check __kasan_kmalloc_large(const void *ptr, | |
203 | size_t size, gfp_t flags); | |
204 | static __always_inline void * __must_check kasan_kmalloc_large(const void *ptr, | |
205 | size_t size, gfp_t flags) | |
206 | { | |
207 | if (kasan_enabled()) | |
208 | return __kasan_kmalloc_large(ptr, size, flags); | |
209 | return (void *)ptr; | |
210 | } | |
211 | ||
212 | void * __must_check __kasan_krealloc(const void *object, | |
213 | size_t new_size, gfp_t flags); | |
214 | static __always_inline void * __must_check kasan_krealloc(const void *object, | |
215 | size_t new_size, gfp_t flags) | |
216 | { | |
217 | if (kasan_enabled()) | |
218 | return __kasan_krealloc(object, new_size, flags); | |
219 | return (void *)object; | |
220 | } | |
221 | ||
611806b4 AK |
222 | /* |
223 | * Unlike kasan_check_read/write(), kasan_check_byte() is performed even for | |
224 | * the hardware tag-based mode that doesn't rely on compiler instrumentation. | |
225 | */ | |
226 | bool __kasan_check_byte(const void *addr, unsigned long ip); | |
227 | static __always_inline bool kasan_check_byte(const void *addr) | |
228 | { | |
229 | if (kasan_enabled()) | |
230 | return __kasan_check_byte(addr, _RET_IP_); | |
231 | return true; | |
232 | } | |
233 | ||
0b24becc AR |
234 | #else /* CONFIG_KASAN */ |
235 | ||
cebd0eb2 | 236 | static inline void kasan_unpoison_range(const void *address, size_t size) {} |
7a3b8353 PC |
237 | static inline void kasan_poison_pages(struct page *page, unsigned int order, |
238 | bool init) {} | |
44383cef AK |
239 | static inline bool kasan_unpoison_pages(struct page *page, unsigned int order, |
240 | bool init) | |
241 | { | |
242 | return false; | |
243 | } | |
6e48a966 | 244 | static inline void kasan_poison_slab(struct slab *slab) {} |
0316bec2 AR |
245 | static inline void kasan_unpoison_object_data(struct kmem_cache *cache, |
246 | void *object) {} | |
247 | static inline void kasan_poison_object_data(struct kmem_cache *cache, | |
248 | void *object) {} | |
0116523c AK |
249 | static inline void *kasan_init_slab_obj(struct kmem_cache *cache, |
250 | const void *object) | |
251 | { | |
252 | return (void *)object; | |
253 | } | |
d57a964e | 254 | static inline bool kasan_slab_free(struct kmem_cache *s, void *object, bool init) |
34303244 AK |
255 | { |
256 | return false; | |
257 | } | |
200072ce | 258 | static inline void kasan_kfree_large(void *ptr) {} |
027b37b5 | 259 | static inline void kasan_slab_free_mempool(void *ptr) {} |
34303244 | 260 | static inline void *kasan_slab_alloc(struct kmem_cache *s, void *object, |
da844b78 | 261 | gfp_t flags, bool init) |
0116523c | 262 | { |
34303244 | 263 | return object; |
0116523c | 264 | } |
0116523c AK |
265 | static inline void *kasan_kmalloc(struct kmem_cache *s, const void *object, |
266 | size_t size, gfp_t flags) | |
267 | { | |
268 | return (void *)object; | |
269 | } | |
34303244 AK |
270 | static inline void *kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags) |
271 | { | |
272 | return (void *)ptr; | |
273 | } | |
0116523c AK |
274 | static inline void *kasan_krealloc(const void *object, size_t new_size, |
275 | gfp_t flags) | |
276 | { | |
277 | return (void *)object; | |
278 | } | |
611806b4 AK |
279 | static inline bool kasan_check_byte(const void *address) |
280 | { | |
281 | return true; | |
282 | } | |
9b75a867 | 283 | |
0b24becc AR |
284 | #endif /* CONFIG_KASAN */ |
285 | ||
02c58773 | 286 | #if defined(CONFIG_KASAN) && defined(CONFIG_KASAN_STACK) |
d56a9ef8 AK |
287 | void kasan_unpoison_task_stack(struct task_struct *task); |
288 | #else | |
289 | static inline void kasan_unpoison_task_stack(struct task_struct *task) {} | |
290 | #endif | |
291 | ||
2bd926b4 AK |
292 | #ifdef CONFIG_KASAN_GENERIC |
293 | ||
bbc61844 FT |
294 | struct kasan_cache { |
295 | int alloc_meta_offset; | |
296 | int free_meta_offset; | |
297 | }; | |
298 | ||
5d1ba310 | 299 | size_t kasan_metadata_size(struct kmem_cache *cache, bool in_object); |
3b7f8813 | 300 | slab_flags_t kasan_never_merge(void); |
682ed089 AK |
301 | void kasan_cache_create(struct kmem_cache *cache, unsigned int *size, |
302 | slab_flags_t *flags); | |
f372bde9 | 303 | |
2bd926b4 AK |
304 | void kasan_cache_shrink(struct kmem_cache *cache); |
305 | void kasan_cache_shutdown(struct kmem_cache *cache); | |
26e760c9 | 306 | void kasan_record_aux_stack(void *ptr); |
7cb3007c | 307 | void kasan_record_aux_stack_noalloc(void *ptr); |
2bd926b4 AK |
308 | |
309 | #else /* CONFIG_KASAN_GENERIC */ | |
310 | ||
f372bde9 | 311 | /* Tag-based KASAN modes do not use per-object metadata. */ |
5d1ba310 FT |
312 | static inline size_t kasan_metadata_size(struct kmem_cache *cache, |
313 | bool in_object) | |
f372bde9 AK |
314 | { |
315 | return 0; | |
316 | } | |
3b7f8813 AK |
317 | /* And thus nothing prevents cache merging. */ |
318 | static inline slab_flags_t kasan_never_merge(void) | |
319 | { | |
320 | return 0; | |
321 | } | |
682ed089 AK |
322 | /* And no cache-related metadata initialization is required. */ |
323 | static inline void kasan_cache_create(struct kmem_cache *cache, | |
324 | unsigned int *size, | |
325 | slab_flags_t *flags) {} | |
f372bde9 | 326 | |
2bd926b4 AK |
327 | static inline void kasan_cache_shrink(struct kmem_cache *cache) {} |
328 | static inline void kasan_cache_shutdown(struct kmem_cache *cache) {} | |
26e760c9 | 329 | static inline void kasan_record_aux_stack(void *ptr) {} |
7cb3007c | 330 | static inline void kasan_record_aux_stack_noalloc(void *ptr) {} |
2bd926b4 AK |
331 | |
332 | #endif /* CONFIG_KASAN_GENERIC */ | |
333 | ||
2e903b91 | 334 | #if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS) |
3c9e3aa1 | 335 | |
c0054c56 AK |
336 | static inline void *kasan_reset_tag(const void *addr) |
337 | { | |
338 | return (void *)arch_kasan_reset_tag(addr); | |
339 | } | |
3c9e3aa1 | 340 | |
49c6631d VF |
341 | /** |
342 | * kasan_report - print a report about a bad memory access detected by KASAN | |
343 | * @addr: address of the bad access | |
344 | * @size: size of the bad access | |
345 | * @is_write: whether the bad access is a write or a read | |
346 | * @ip: instruction pointer for the accessibility check or the bad access itself | |
347 | */ | |
bb6e04a1 | 348 | bool kasan_report(const void *addr, size_t size, |
41eea9cd AK |
349 | bool is_write, unsigned long ip); |
350 | ||
2e903b91 | 351 | #else /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */ |
3c9e3aa1 AK |
352 | |
353 | static inline void *kasan_reset_tag(const void *addr) | |
354 | { | |
355 | return (void *)addr; | |
356 | } | |
357 | ||
2e903b91 AK |
358 | #endif /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS*/ |
359 | ||
8f7b5054 VF |
360 | #ifdef CONFIG_KASAN_HW_TAGS |
361 | ||
362 | void kasan_report_async(void); | |
363 | ||
364 | #endif /* CONFIG_KASAN_HW_TAGS */ | |
365 | ||
2e903b91 AK |
366 | #ifdef CONFIG_KASAN_SW_TAGS |
367 | void __init kasan_init_sw_tags(void); | |
368 | #else | |
369 | static inline void kasan_init_sw_tags(void) { } | |
370 | #endif | |
371 | ||
372 | #ifdef CONFIG_KASAN_HW_TAGS | |
373 | void kasan_init_hw_tags_cpu(void); | |
374 | void __init kasan_init_hw_tags(void); | |
375 | #else | |
376 | static inline void kasan_init_hw_tags_cpu(void) { } | |
377 | static inline void kasan_init_hw_tags(void) { } | |
378 | #endif | |
080eb83f | 379 | |
3c5c3cfb | 380 | #ifdef CONFIG_KASAN_VMALLOC |
3b1a4a86 | 381 | |
23689e91 AK |
382 | #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) |
383 | ||
5bd9bae2 | 384 | void kasan_populate_early_vm_area_shadow(void *start, unsigned long size); |
d98c9e83 | 385 | int kasan_populate_vmalloc(unsigned long addr, unsigned long size); |
3c5c3cfb DA |
386 | void kasan_release_vmalloc(unsigned long start, unsigned long end, |
387 | unsigned long free_region_start, | |
388 | unsigned long free_region_end); | |
3b1a4a86 | 389 | |
23689e91 AK |
390 | #else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */ |
391 | ||
392 | static inline void kasan_populate_early_vm_area_shadow(void *start, | |
393 | unsigned long size) | |
394 | { } | |
395 | static inline int kasan_populate_vmalloc(unsigned long start, | |
396 | unsigned long size) | |
397 | { | |
398 | return 0; | |
399 | } | |
400 | static inline void kasan_release_vmalloc(unsigned long start, | |
401 | unsigned long end, | |
402 | unsigned long free_region_start, | |
403 | unsigned long free_region_end) { } | |
404 | ||
405 | #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */ | |
406 | ||
407 | void *__kasan_unpoison_vmalloc(const void *start, unsigned long size, | |
408 | kasan_vmalloc_flags_t flags); | |
1d96320f | 409 | static __always_inline void *kasan_unpoison_vmalloc(const void *start, |
23689e91 AK |
410 | unsigned long size, |
411 | kasan_vmalloc_flags_t flags) | |
579fb0ac AK |
412 | { |
413 | if (kasan_enabled()) | |
23689e91 | 414 | return __kasan_unpoison_vmalloc(start, size, flags); |
1d96320f | 415 | return (void *)start; |
579fb0ac AK |
416 | } |
417 | ||
418 | void __kasan_poison_vmalloc(const void *start, unsigned long size); | |
419 | static __always_inline void kasan_poison_vmalloc(const void *start, | |
420 | unsigned long size) | |
421 | { | |
422 | if (kasan_enabled()) | |
423 | __kasan_poison_vmalloc(start, size); | |
424 | } | |
3252b1d8 | 425 | |
3b1a4a86 AK |
426 | #else /* CONFIG_KASAN_VMALLOC */ |
427 | ||
5bd9bae2 AK |
428 | static inline void kasan_populate_early_vm_area_shadow(void *start, |
429 | unsigned long size) { } | |
d98c9e83 AR |
430 | static inline int kasan_populate_vmalloc(unsigned long start, |
431 | unsigned long size) | |
3c5c3cfb DA |
432 | { |
433 | return 0; | |
434 | } | |
3c5c3cfb DA |
435 | static inline void kasan_release_vmalloc(unsigned long start, |
436 | unsigned long end, | |
437 | unsigned long free_region_start, | |
5bd9bae2 | 438 | unsigned long free_region_end) { } |
3b1a4a86 | 439 | |
1d96320f | 440 | static inline void *kasan_unpoison_vmalloc(const void *start, |
23689e91 AK |
441 | unsigned long size, |
442 | kasan_vmalloc_flags_t flags) | |
1d96320f AK |
443 | { |
444 | return (void *)start; | |
445 | } | |
5bd9bae2 | 446 | static inline void kasan_poison_vmalloc(const void *start, unsigned long size) |
3252b1d8 KW |
447 | { } |
448 | ||
3b1a4a86 AK |
449 | #endif /* CONFIG_KASAN_VMALLOC */ |
450 | ||
0fea6e9a AK |
451 | #if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \ |
452 | !defined(CONFIG_KASAN_VMALLOC) | |
3b1a4a86 AK |
453 | |
454 | /* | |
63840de2 AK |
455 | * These functions allocate and free shadow memory for kernel modules. |
456 | * They are only required when KASAN_VMALLOC is not supported, as otherwise | |
457 | * shadow memory is allocated by the generic vmalloc handlers. | |
3b1a4a86 | 458 | */ |
63840de2 AK |
459 | int kasan_alloc_module_shadow(void *addr, size_t size, gfp_t gfp_mask); |
460 | void kasan_free_module_shadow(const struct vm_struct *vm); | |
3b1a4a86 | 461 | |
0fea6e9a | 462 | #else /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */ |
3b1a4a86 | 463 | |
63840de2 AK |
464 | static inline int kasan_alloc_module_shadow(void *addr, size_t size, gfp_t gfp_mask) { return 0; } |
465 | static inline void kasan_free_module_shadow(const struct vm_struct *vm) {} | |
3b1a4a86 | 466 | |
0fea6e9a | 467 | #endif /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */ |
3c5c3cfb | 468 | |
17c17567 | 469 | #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) |
2f004eea | 470 | void kasan_non_canonical_hook(unsigned long addr); |
17c17567 | 471 | #else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */ |
2f004eea | 472 | static inline void kasan_non_canonical_hook(unsigned long addr) { } |
17c17567 | 473 | #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */ |
2f004eea | 474 | |
0b24becc | 475 | #endif /* LINUX_KASAN_H */ |