]>
Commit | Line | Data |
---|---|---|
1 | /* SPDX-License-Identifier: GPL-2.0 */ | |
2 | #ifndef _LINUX_VMALLOC_H | |
3 | #define _LINUX_VMALLOC_H | |
4 | ||
5 | #include <linux/spinlock.h> | |
6 | #include <linux/init.h> | |
7 | #include <linux/list.h> | |
8 | #include <linux/llist.h> | |
9 | #include <asm/page.h> /* pgprot_t */ | |
10 | #include <linux/rbtree.h> | |
11 | #include <linux/overflow.h> | |
12 | ||
13 | #include <asm/vmalloc.h> | |
14 | ||
15 | struct vm_area_struct; /* vma defining user mapping in mm_types.h */ | |
16 | struct notifier_block; /* in notifier.h */ | |
17 | ||
18 | /* bits in flags of vmalloc's vm_struct below */ | |
19 | #define VM_IOREMAP 0x00000001 /* ioremap() and friends */ | |
20 | #define VM_ALLOC 0x00000002 /* vmalloc() */ | |
21 | #define VM_MAP 0x00000004 /* vmap()ed pages */ | |
22 | #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */ | |
23 | #define VM_DMA_COHERENT 0x00000010 /* dma_alloc_coherent */ | |
24 | #define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */ | |
25 | #define VM_NO_GUARD 0x00000040 /* ***DANGEROUS*** don't add guard page */ | |
26 | #define VM_KASAN 0x00000080 /* has allocated kasan shadow memory */ | |
27 | #define VM_FLUSH_RESET_PERMS 0x00000100 /* reset direct map and flush TLB on unmap, can't be freed in atomic context */ | |
28 | #define VM_MAP_PUT_PAGES 0x00000200 /* put pages and free array in vfree */ | |
29 | #define VM_ALLOW_HUGE_VMAP 0x00000400 /* Allow for huge pages on archs with HAVE_ARCH_HUGE_VMALLOC */ | |
30 | ||
31 | #if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \ | |
32 | !defined(CONFIG_KASAN_VMALLOC) | |
33 | #define VM_DEFER_KMEMLEAK 0x00000800 /* defer kmemleak object creation */ | |
34 | #else | |
35 | #define VM_DEFER_KMEMLEAK 0 | |
36 | #endif | |
37 | ||
38 | /* bits [20..32] reserved for arch specific ioremap internals */ | |
39 | ||
40 | /* | |
41 | * Maximum alignment for ioremap() regions. | |
42 | * Can be overridden by arch-specific value. | |
43 | */ | |
44 | #ifndef IOREMAP_MAX_ORDER | |
45 | #define IOREMAP_MAX_ORDER (7 + PAGE_SHIFT) /* 128 pages */ | |
46 | #endif | |
47 | ||
48 | struct vm_struct { | |
49 | struct vm_struct *next; | |
50 | void *addr; | |
51 | unsigned long size; | |
52 | unsigned long flags; | |
53 | struct page **pages; | |
54 | #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC | |
55 | unsigned int page_order; | |
56 | #endif | |
57 | unsigned int nr_pages; | |
58 | phys_addr_t phys_addr; | |
59 | const void *caller; | |
60 | }; | |
61 | ||
62 | struct vmap_area { | |
63 | unsigned long va_start; | |
64 | unsigned long va_end; | |
65 | ||
66 | struct rb_node rb_node; /* address sorted rbtree */ | |
67 | struct list_head list; /* address sorted list */ | |
68 | ||
69 | /* | |
70 | * The following two variables can be packed, because | |
71 | * a vmap_area object can be either: | |
72 | * 1) in "free" tree (root is free_vmap_area_root) | |
73 | * 2) or "busy" tree (root is vmap_area_root) | |
74 | */ | |
75 | union { | |
76 | unsigned long subtree_max_size; /* in "free" tree */ | |
77 | struct vm_struct *vm; /* in "busy" tree */ | |
78 | }; | |
79 | }; | |
80 | ||
81 | /* archs that select HAVE_ARCH_HUGE_VMAP should override one or more of these */ | |
82 | #ifndef arch_vmap_p4d_supported | |
83 | static inline bool arch_vmap_p4d_supported(pgprot_t prot) | |
84 | { | |
85 | return false; | |
86 | } | |
87 | #endif | |
88 | ||
89 | #ifndef arch_vmap_pud_supported | |
90 | static inline bool arch_vmap_pud_supported(pgprot_t prot) | |
91 | { | |
92 | return false; | |
93 | } | |
94 | #endif | |
95 | ||
96 | #ifndef arch_vmap_pmd_supported | |
97 | static inline bool arch_vmap_pmd_supported(pgprot_t prot) | |
98 | { | |
99 | return false; | |
100 | } | |
101 | #endif | |
102 | ||
103 | #ifndef arch_vmap_pte_range_map_size | |
104 | static inline unsigned long arch_vmap_pte_range_map_size(unsigned long addr, unsigned long end, | |
105 | u64 pfn, unsigned int max_page_shift) | |
106 | { | |
107 | return PAGE_SIZE; | |
108 | } | |
109 | #endif | |
110 | ||
111 | #ifndef arch_vmap_pte_supported_shift | |
112 | static inline int arch_vmap_pte_supported_shift(unsigned long size) | |
113 | { | |
114 | return PAGE_SHIFT; | |
115 | } | |
116 | #endif | |
117 | ||
118 | #ifndef arch_vmap_pgprot_tagged | |
119 | static inline pgprot_t arch_vmap_pgprot_tagged(pgprot_t prot) | |
120 | { | |
121 | return prot; | |
122 | } | |
123 | #endif | |
124 | ||
125 | /* | |
126 | * Highlevel APIs for driver use | |
127 | */ | |
128 | extern void vm_unmap_ram(const void *mem, unsigned int count); | |
129 | extern void *vm_map_ram(struct page **pages, unsigned int count, int node); | |
130 | extern void vm_unmap_aliases(void); | |
131 | ||
132 | #ifdef CONFIG_MMU | |
133 | extern void __init vmalloc_init(void); | |
134 | extern unsigned long vmalloc_nr_pages(void); | |
135 | #else | |
136 | static inline void vmalloc_init(void) | |
137 | { | |
138 | } | |
139 | static inline unsigned long vmalloc_nr_pages(void) { return 0; } | |
140 | #endif | |
141 | ||
142 | extern void *vmalloc(unsigned long size) __alloc_size(1); | |
143 | extern void *vzalloc(unsigned long size) __alloc_size(1); | |
144 | extern void *vmalloc_user(unsigned long size) __alloc_size(1); | |
145 | extern void *vmalloc_node(unsigned long size, int node) __alloc_size(1); | |
146 | extern void *vzalloc_node(unsigned long size, int node) __alloc_size(1); | |
147 | extern void *vmalloc_32(unsigned long size) __alloc_size(1); | |
148 | extern void *vmalloc_32_user(unsigned long size) __alloc_size(1); | |
149 | extern void *__vmalloc(unsigned long size, gfp_t gfp_mask) __alloc_size(1); | |
150 | extern void *__vmalloc_node_range(unsigned long size, unsigned long align, | |
151 | unsigned long start, unsigned long end, gfp_t gfp_mask, | |
152 | pgprot_t prot, unsigned long vm_flags, int node, | |
153 | const void *caller) __alloc_size(1); | |
154 | void *__vmalloc_node(unsigned long size, unsigned long align, gfp_t gfp_mask, | |
155 | int node, const void *caller) __alloc_size(1); | |
156 | void *vmalloc_huge(unsigned long size, gfp_t gfp_mask) __alloc_size(1); | |
157 | ||
158 | extern void *__vmalloc_array(size_t n, size_t size, gfp_t flags) __alloc_size(1, 2); | |
159 | extern void *vmalloc_array(size_t n, size_t size) __alloc_size(1, 2); | |
160 | extern void *__vcalloc(size_t n, size_t size, gfp_t flags) __alloc_size(1, 2); | |
161 | extern void *vcalloc(size_t n, size_t size) __alloc_size(1, 2); | |
162 | ||
163 | extern void vfree(const void *addr); | |
164 | extern void vfree_atomic(const void *addr); | |
165 | ||
166 | extern void *vmap(struct page **pages, unsigned int count, | |
167 | unsigned long flags, pgprot_t prot); | |
168 | void *vmap_pfn(unsigned long *pfns, unsigned int count, pgprot_t prot); | |
169 | extern void vunmap(const void *addr); | |
170 | ||
171 | extern int remap_vmalloc_range_partial(struct vm_area_struct *vma, | |
172 | unsigned long uaddr, void *kaddr, | |
173 | unsigned long pgoff, unsigned long size); | |
174 | ||
175 | extern int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, | |
176 | unsigned long pgoff); | |
177 | ||
178 | /* | |
179 | * Architectures can set this mask to a combination of PGTBL_P?D_MODIFIED values | |
180 | * and let generic vmalloc and ioremap code know when arch_sync_kernel_mappings() | |
181 | * needs to be called. | |
182 | */ | |
183 | #ifndef ARCH_PAGE_TABLE_SYNC_MASK | |
184 | #define ARCH_PAGE_TABLE_SYNC_MASK 0 | |
185 | #endif | |
186 | ||
187 | /* | |
188 | * There is no default implementation for arch_sync_kernel_mappings(). It is | |
189 | * relied upon the compiler to optimize calls out if ARCH_PAGE_TABLE_SYNC_MASK | |
190 | * is 0. | |
191 | */ | |
192 | void arch_sync_kernel_mappings(unsigned long start, unsigned long end); | |
193 | ||
194 | /* | |
195 | * Lowlevel-APIs (not for driver use!) | |
196 | */ | |
197 | ||
198 | static inline size_t get_vm_area_size(const struct vm_struct *area) | |
199 | { | |
200 | if (!(area->flags & VM_NO_GUARD)) | |
201 | /* return actual size without guard page */ | |
202 | return area->size - PAGE_SIZE; | |
203 | else | |
204 | return area->size; | |
205 | ||
206 | } | |
207 | ||
208 | extern struct vm_struct *get_vm_area(unsigned long size, unsigned long flags); | |
209 | extern struct vm_struct *get_vm_area_caller(unsigned long size, | |
210 | unsigned long flags, const void *caller); | |
211 | extern struct vm_struct *__get_vm_area_caller(unsigned long size, | |
212 | unsigned long flags, | |
213 | unsigned long start, unsigned long end, | |
214 | const void *caller); | |
215 | void free_vm_area(struct vm_struct *area); | |
216 | extern struct vm_struct *remove_vm_area(const void *addr); | |
217 | extern struct vm_struct *find_vm_area(const void *addr); | |
218 | struct vmap_area *find_vmap_area(unsigned long addr); | |
219 | ||
220 | static inline bool is_vm_area_hugepages(const void *addr) | |
221 | { | |
222 | /* | |
223 | * This may not 100% tell if the area is mapped with > PAGE_SIZE | |
224 | * page table entries, if for some reason the architecture indicates | |
225 | * larger sizes are available but decides not to use them, nothing | |
226 | * prevents that. This only indicates the size of the physical page | |
227 | * allocated in the vmalloc layer. | |
228 | */ | |
229 | #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC | |
230 | return find_vm_area(addr)->page_order > 0; | |
231 | #else | |
232 | return false; | |
233 | #endif | |
234 | } | |
235 | ||
236 | #ifdef CONFIG_MMU | |
237 | void vunmap_range(unsigned long addr, unsigned long end); | |
238 | static inline void set_vm_flush_reset_perms(void *addr) | |
239 | { | |
240 | struct vm_struct *vm = find_vm_area(addr); | |
241 | ||
242 | if (vm) | |
243 | vm->flags |= VM_FLUSH_RESET_PERMS; | |
244 | } | |
245 | ||
246 | #else | |
247 | static inline void set_vm_flush_reset_perms(void *addr) | |
248 | { | |
249 | } | |
250 | #endif | |
251 | ||
252 | /* for /proc/kcore */ | |
253 | extern long vread(char *buf, char *addr, unsigned long count); | |
254 | ||
255 | /* | |
256 | * Internals. Don't use.. | |
257 | */ | |
258 | extern struct list_head vmap_area_list; | |
259 | extern __init void vm_area_add_early(struct vm_struct *vm); | |
260 | extern __init void vm_area_register_early(struct vm_struct *vm, size_t align); | |
261 | ||
262 | #ifdef CONFIG_SMP | |
263 | # ifdef CONFIG_MMU | |
264 | struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets, | |
265 | const size_t *sizes, int nr_vms, | |
266 | size_t align); | |
267 | ||
268 | void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms); | |
269 | # else | |
270 | static inline struct vm_struct ** | |
271 | pcpu_get_vm_areas(const unsigned long *offsets, | |
272 | const size_t *sizes, int nr_vms, | |
273 | size_t align) | |
274 | { | |
275 | return NULL; | |
276 | } | |
277 | ||
278 | static inline void | |
279 | pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms) | |
280 | { | |
281 | } | |
282 | # endif | |
283 | #endif | |
284 | ||
285 | #ifdef CONFIG_MMU | |
286 | #define VMALLOC_TOTAL (VMALLOC_END - VMALLOC_START) | |
287 | #else | |
288 | #define VMALLOC_TOTAL 0UL | |
289 | #endif | |
290 | ||
291 | int register_vmap_purge_notifier(struct notifier_block *nb); | |
292 | int unregister_vmap_purge_notifier(struct notifier_block *nb); | |
293 | ||
294 | #if defined(CONFIG_MMU) && defined(CONFIG_PRINTK) | |
295 | bool vmalloc_dump_obj(void *object); | |
296 | #else | |
297 | static inline bool vmalloc_dump_obj(void *object) { return false; } | |
298 | #endif | |
299 | ||
300 | #endif /* _LINUX_VMALLOC_H */ |