]> git.ipfire.org Git - people/ms/linux.git/blame - include/linux/vmalloc.h
Merge tag 'soc-fixes-6.0-rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/soc/soc
[people/ms/linux.git] / include / linux / vmalloc.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
1da177e4
LT
2#ifndef _LINUX_VMALLOC_H
3#define _LINUX_VMALLOC_H
4
5#include <linux/spinlock.h>
db64fe02 6#include <linux/init.h>
13ba3fcb 7#include <linux/list.h>
80c4bd7a 8#include <linux/llist.h>
1da177e4 9#include <asm/page.h> /* pgprot_t */
13ba3fcb 10#include <linux/rbtree.h>
3b3b1a29 11#include <linux/overflow.h>
1da177e4 12
1f059dfd
IM
13#include <asm/vmalloc.h>
14
605d9288 15struct vm_area_struct; /* vma defining user mapping in mm_types.h */
4da56b99 16struct notifier_block; /* in notifier.h */
83342314 17
605d9288 18/* bits in flags of vmalloc's vm_struct below */
20fc02b4
ZY
19#define VM_IOREMAP 0x00000001 /* ioremap() and friends */
20#define VM_ALLOC 0x00000002 /* vmalloc() */
21#define VM_MAP 0x00000004 /* vmap()ed pages */
22#define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
fe9041c2 23#define VM_DMA_COHERENT 0x00000010 /* dma_alloc_coherent */
20fc02b4 24#define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */
bd1a8fb2 25#define VM_NO_GUARD 0x00000040 /* ***DANGEROUS*** don't add guard page */
a5af5aa8 26#define VM_KASAN 0x00000080 /* has allocated kasan shadow memory */
4f6ec860
RE
27#define VM_FLUSH_RESET_PERMS 0x00000100 /* reset direct map and flush TLB on unmap, can't be freed in atomic context */
28#define VM_MAP_PUT_PAGES 0x00000200 /* put pages and free array in vfree */
559089e0 29#define VM_ALLOW_HUGE_VMAP 0x00000400 /* Allow for huge pages on archs with HAVE_ARCH_HUGE_VMALLOC */
3c5c3cfb 30
60115fa5
KW
31#if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \
32 !defined(CONFIG_KASAN_VMALLOC)
33#define VM_DEFER_KMEMLEAK 0x00000800 /* defer kmemleak object creation */
34#else
35#define VM_DEFER_KMEMLEAK 0
36#endif
37
1da177e4
LT
38/* bits [20..32] reserved for arch specific ioremap internals */
39
fd195c49
DS
40/*
41 * Maximum alignment for ioremap() regions.
f0953a1b 42 * Can be overridden by arch-specific value.
fd195c49
DS
43 */
44#ifndef IOREMAP_MAX_ORDER
45#define IOREMAP_MAX_ORDER (7 + PAGE_SHIFT) /* 128 pages */
46#endif
47
1da177e4 48struct vm_struct {
2b4ac44e 49 struct vm_struct *next;
1da177e4
LT
50 void *addr;
51 unsigned long size;
52 unsigned long flags;
53 struct page **pages;
121e6f32
NP
54#ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC
55 unsigned int page_order;
56#endif
1da177e4 57 unsigned int nr_pages;
ffa71f33 58 phys_addr_t phys_addr;
5e6cafc8 59 const void *caller;
1da177e4
LT
60};
61
13ba3fcb
AK
62struct vmap_area {
63 unsigned long va_start;
64 unsigned long va_end;
68ad4a33 65
13ba3fcb
AK
66 struct rb_node rb_node; /* address sorted rbtree */
67 struct list_head list; /* address sorted list */
688fcbfc
PL
68
69 /*
96e2db45
URS
70 * The following two variables can be packed, because
71 * a vmap_area object can be either:
ff11a7ce
BL
72 * 1) in "free" tree (root is free_vmap_area_root)
73 * 2) or "busy" tree (root is vmap_area_root)
688fcbfc
PL
74 */
75 union {
76 unsigned long subtree_max_size; /* in "free" tree */
77 struct vm_struct *vm; /* in "busy" tree */
688fcbfc 78 };
13ba3fcb
AK
79};
80
6f680e70
NP
81/* archs that select HAVE_ARCH_HUGE_VMAP should override one or more of these */
82#ifndef arch_vmap_p4d_supported
83static inline bool arch_vmap_p4d_supported(pgprot_t prot)
84{
85 return false;
86}
87#endif
88
89#ifndef arch_vmap_pud_supported
90static inline bool arch_vmap_pud_supported(pgprot_t prot)
91{
92 return false;
93}
94#endif
95
96#ifndef arch_vmap_pmd_supported
97static inline bool arch_vmap_pmd_supported(pgprot_t prot)
98{
99 return false;
100}
bbc180a5
NP
101#endif
102
f7ee1f13
CL
103#ifndef arch_vmap_pte_range_map_size
104static inline unsigned long arch_vmap_pte_range_map_size(unsigned long addr, unsigned long end,
105 u64 pfn, unsigned int max_page_shift)
106{
107 return PAGE_SIZE;
108}
109#endif
110
3382bbee
CL
111#ifndef arch_vmap_pte_supported_shift
112static inline int arch_vmap_pte_supported_shift(unsigned long size)
113{
114 return PAGE_SHIFT;
115}
116#endif
117
01d92c7f
AK
118#ifndef arch_vmap_pgprot_tagged
119static inline pgprot_t arch_vmap_pgprot_tagged(pgprot_t prot)
120{
121 return prot;
122}
123#endif
124
1da177e4
LT
125/*
126 * Highlevel APIs for driver use
127 */
db64fe02 128extern void vm_unmap_ram(const void *mem, unsigned int count);
d4efd79a 129extern void *vm_map_ram(struct page **pages, unsigned int count, int node);
db64fe02
NP
130extern void vm_unmap_aliases(void);
131
132#ifdef CONFIG_MMU
133extern void __init vmalloc_init(void);
97105f0a 134extern unsigned long vmalloc_nr_pages(void);
db64fe02
NP
135#else
136static inline void vmalloc_init(void)
137{
138}
97105f0a 139static inline unsigned long vmalloc_nr_pages(void) { return 0; }
db64fe02
NP
140#endif
141
894f24bb
KC
142extern void *vmalloc(unsigned long size) __alloc_size(1);
143extern void *vzalloc(unsigned long size) __alloc_size(1);
144extern void *vmalloc_user(unsigned long size) __alloc_size(1);
145extern void *vmalloc_node(unsigned long size, int node) __alloc_size(1);
146extern void *vzalloc_node(unsigned long size, int node) __alloc_size(1);
147extern void *vmalloc_32(unsigned long size) __alloc_size(1);
148extern void *vmalloc_32_user(unsigned long size) __alloc_size(1);
149extern void *__vmalloc(unsigned long size, gfp_t gfp_mask) __alloc_size(1);
d0a21265
DR
150extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
151 unsigned long start, unsigned long end, gfp_t gfp_mask,
cb9e3c29 152 pgprot_t prot, unsigned long vm_flags, int node,
894f24bb 153 const void *caller) __alloc_size(1);
2b905948 154void *__vmalloc_node(unsigned long size, unsigned long align, gfp_t gfp_mask,
894f24bb 155 int node, const void *caller) __alloc_size(1);
559089e0 156void *vmalloc_huge(unsigned long size, gfp_t gfp_mask) __alloc_size(1);
cb9e3c29 157
a8749a35
PB
158extern void *__vmalloc_array(size_t n, size_t size, gfp_t flags) __alloc_size(1, 2);
159extern void *vmalloc_array(size_t n, size_t size) __alloc_size(1, 2);
160extern void *__vcalloc(size_t n, size_t size, gfp_t flags) __alloc_size(1, 2);
161extern void *vcalloc(size_t n, size_t size) __alloc_size(1, 2);
162
b3bdda02 163extern void vfree(const void *addr);
bf22e37a 164extern void vfree_atomic(const void *addr);
1da177e4
LT
165
166extern void *vmap(struct page **pages, unsigned int count,
167 unsigned long flags, pgprot_t prot);
3e9a9e25 168void *vmap_pfn(unsigned long *pfns, unsigned int count, pgprot_t prot);
b3bdda02 169extern void vunmap(const void *addr);
83342314 170
e69e9d4a
HD
171extern int remap_vmalloc_range_partial(struct vm_area_struct *vma,
172 unsigned long uaddr, void *kaddr,
bdebd6a2 173 unsigned long pgoff, unsigned long size);
e69e9d4a 174
83342314
NP
175extern int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
176 unsigned long pgoff);
763802b5 177
2ba3e694
JR
178/*
179 * Architectures can set this mask to a combination of PGTBL_P?D_MODIFIED values
180 * and let generic vmalloc and ioremap code know when arch_sync_kernel_mappings()
181 * needs to be called.
182 */
183#ifndef ARCH_PAGE_TABLE_SYNC_MASK
184#define ARCH_PAGE_TABLE_SYNC_MASK 0
185#endif
186
187/*
188 * There is no default implementation for arch_sync_kernel_mappings(). It is
189 * relied upon the compiler to optimize calls out if ARCH_PAGE_TABLE_SYNC_MASK
190 * is 0.
191 */
192void arch_sync_kernel_mappings(unsigned long start, unsigned long end);
193
1da177e4
LT
194/*
195 * Lowlevel-APIs (not for driver use!)
196 */
9585116b
JF
197
198static inline size_t get_vm_area_size(const struct vm_struct *area)
199{
71394fe5
AR
200 if (!(area->flags & VM_NO_GUARD))
201 /* return actual size without guard page */
202 return area->size - PAGE_SIZE;
203 else
204 return area->size;
205
9585116b
JF
206}
207
1da177e4 208extern struct vm_struct *get_vm_area(unsigned long size, unsigned long flags);
23016969 209extern struct vm_struct *get_vm_area_caller(unsigned long size,
5e6cafc8 210 unsigned long flags, const void *caller);
c2968612
BH
211extern struct vm_struct *__get_vm_area_caller(unsigned long size,
212 unsigned long flags,
213 unsigned long start, unsigned long end,
5e6cafc8 214 const void *caller);
301fa9f2 215void free_vm_area(struct vm_struct *area);
b3bdda02 216extern struct vm_struct *remove_vm_area(const void *addr);
e9da6e99 217extern struct vm_struct *find_vm_area(const void *addr);
993d0b28 218struct vmap_area *find_vmap_area(unsigned long addr);
c19c03fc 219
121e6f32
NP
220static inline bool is_vm_area_hugepages(const void *addr)
221{
222 /*
223 * This may not 100% tell if the area is mapped with > PAGE_SIZE
224 * page table entries, if for some reason the architecture indicates
225 * larger sizes are available but decides not to use them, nothing
226 * prevents that. This only indicates the size of the physical page
227 * allocated in the vmalloc layer.
228 */
229#ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC
230 return find_vm_area(addr)->page_order > 0;
231#else
232 return false;
233#endif
234}
235
b554cb42 236#ifdef CONFIG_MMU
4ad0ae8c 237void vunmap_range(unsigned long addr, unsigned long end);
868b104d
RE
238static inline void set_vm_flush_reset_perms(void *addr)
239{
240 struct vm_struct *vm = find_vm_area(addr);
241
242 if (vm)
243 vm->flags |= VM_FLUSH_RESET_PERMS;
244}
121e6f32 245
b554cb42 246#else
868b104d
RE
247static inline void set_vm_flush_reset_perms(void *addr)
248{
249}
b554cb42 250#endif
1da177e4 251
bbcd53c9 252/* for /proc/kcore */
69beeb1d 253extern long vread(char *buf, char *addr, unsigned long count);
69beeb1d 254
1da177e4 255/*
06c88398 256 * Internals. Don't use..
1da177e4 257 */
f1c4069e 258extern struct list_head vmap_area_list;
be9b7335 259extern __init void vm_area_add_early(struct vm_struct *vm);
c0c0a293 260extern __init void vm_area_register_early(struct vm_struct *vm, size_t align);
1da177e4 261
4f8b02b4 262#ifdef CONFIG_SMP
b554cb42 263# ifdef CONFIG_MMU
ca23e405
TH
264struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
265 const size_t *sizes, int nr_vms,
ec3f64fc 266 size_t align);
ca23e405
TH
267
268void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms);
b554cb42
GY
269# else
270static inline struct vm_struct **
271pcpu_get_vm_areas(const unsigned long *offsets,
272 const size_t *sizes, int nr_vms,
273 size_t align)
274{
275 return NULL;
276}
277
278static inline void
279pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
280{
281}
282# endif
4f8b02b4 283#endif
ca23e405 284
db3808c1
JK
285#ifdef CONFIG_MMU
286#define VMALLOC_TOTAL (VMALLOC_END - VMALLOC_START)
db3808c1 287#else
db3808c1 288#define VMALLOC_TOTAL 0UL
db3808c1
JK
289#endif
290
4da56b99
CW
291int register_vmap_purge_notifier(struct notifier_block *nb);
292int unregister_vmap_purge_notifier(struct notifier_block *nb);
293
5bb1bb35 294#if defined(CONFIG_MMU) && defined(CONFIG_PRINTK)
98f18083
PM
295bool vmalloc_dump_obj(void *object);
296#else
297static inline bool vmalloc_dump_obj(void *object) { return false; }
298#endif
299
1da177e4 300#endif /* _LINUX_VMALLOC_H */