1 /* SPDX-License-Identifier: GPL-2.0 */
3 * NUMA memory policies for Linux.
4 * Copyright 2003,2004 Andi Kleen SuSE Labs
6 #ifndef _LINUX_MEMPOLICY_H
7 #define _LINUX_MEMPOLICY_H 1
10 #include <linux/mmzone.h>
11 #include <linux/dax.h>
12 #include <linux/slab.h>
13 #include <linux/rbtree.h>
14 #include <linux/spinlock.h>
15 #include <linux/nodemask.h>
16 #include <linux/pagemap.h>
17 #include <uapi/linux/mempolicy.h>
24 * Describe a memory policy.
26 * A mempolicy can be either associated with a process or with a VMA.
27 * For VMA related allocations the VMA policy is preferred, otherwise
28 * the process policy is used. Interrupts ignore the memory policy
29 * of the current process.
31 * Locking policy for interlave:
32 * In process context there is no locking because only the process accesses
33 * its own state. All vma manipulation is somewhat protected by a down_read on
37 * Mempolicy objects are reference counted. A mempolicy will be freed when
38 * mpol_put() decrements the reference count to zero.
40 * Duplicating policy objects:
41 * mpol_dup() allocates a new mempolicy and copies the specified mempolicy
42 * to the new storage. The reference count of the new object is initialized
43 * to 1, representing the caller of mpol_dup().
47 unsigned short mode
; /* See MPOL_* above */
48 unsigned short flags
; /* See set_mempolicy() MPOL_F_* above */
50 short preferred_node
; /* preferred */
51 nodemask_t nodes
; /* interleave/bind */
52 /* undefined for default */
55 nodemask_t cpuset_mems_allowed
; /* relative to these nodes */
56 nodemask_t user_nodemask
; /* nodemask passed by user */
61 * Support for managing mempolicy data objects (clone, copy, destroy)
62 * The default fast path of a NULL MPOL_DEFAULT policy is always inlined.
65 extern void __mpol_put(struct mempolicy
*pol
);
66 static inline void mpol_put(struct mempolicy
*pol
)
73 * Does mempolicy pol need explicit unref after use?
74 * Currently only needed for shared policies.
76 static inline int mpol_needs_cond_ref(struct mempolicy
*pol
)
78 return (pol
&& (pol
->flags
& MPOL_F_SHARED
));
81 static inline void mpol_cond_put(struct mempolicy
*pol
)
83 if (mpol_needs_cond_ref(pol
))
87 extern struct mempolicy
*__mpol_dup(struct mempolicy
*pol
);
88 static inline struct mempolicy
*mpol_dup(struct mempolicy
*pol
)
91 pol
= __mpol_dup(pol
);
95 #define vma_policy(vma) ((vma)->vm_policy)
97 static inline void mpol_get(struct mempolicy
*pol
)
100 atomic_inc(&pol
->refcnt
);
103 extern bool __mpol_equal(struct mempolicy
*a
, struct mempolicy
*b
);
104 static inline bool mpol_equal(struct mempolicy
*a
, struct mempolicy
*b
)
108 return __mpol_equal(a
, b
);
112 * Tree of shared policies for a shared memory region.
113 * Maintain the policies in a pseudo mm that contains vmas. The vmas
114 * carry the policy. As a special twist the pseudo mm is indexed in pages, not
115 * bytes, so that we can work with shared memory segments bigger than
121 unsigned long start
, end
;
122 struct mempolicy
*policy
;
125 struct shared_policy
{
130 int vma_dup_policy(struct vm_area_struct
*src
, struct vm_area_struct
*dst
);
131 void mpol_shared_policy_init(struct shared_policy
*sp
, struct mempolicy
*mpol
);
132 int mpol_set_shared_policy(struct shared_policy
*info
,
133 struct vm_area_struct
*vma
,
134 struct mempolicy
*new);
135 void mpol_free_shared_policy(struct shared_policy
*p
);
136 struct mempolicy
*mpol_shared_policy_lookup(struct shared_policy
*sp
,
139 struct mempolicy
*get_task_policy(struct task_struct
*p
);
140 struct mempolicy
*__get_vma_policy(struct vm_area_struct
*vma
,
142 struct mempolicy
*get_vma_policy(struct vm_area_struct
*vma
,
144 bool vma_policy_mof(struct vm_area_struct
*vma
);
146 extern void numa_default_policy(void);
147 extern void numa_policy_init(void);
148 extern void mpol_rebind_task(struct task_struct
*tsk
, const nodemask_t
*new);
149 extern void mpol_rebind_mm(struct mm_struct
*mm
, nodemask_t
*new);
151 extern int huge_node(struct vm_area_struct
*vma
,
152 unsigned long addr
, gfp_t gfp_flags
,
153 struct mempolicy
**mpol
, nodemask_t
**nodemask
);
154 extern bool init_nodemask_of_mempolicy(nodemask_t
*mask
);
155 extern bool mempolicy_nodemask_intersects(struct task_struct
*tsk
,
156 const nodemask_t
*mask
);
157 extern unsigned int mempolicy_slab_node(void);
159 extern enum zone_type policy_zone
;
161 static inline void check_highest_zone(enum zone_type k
)
163 if (k
> policy_zone
&& k
!= ZONE_MOVABLE
)
167 int do_migrate_pages(struct mm_struct
*mm
, const nodemask_t
*from
,
168 const nodemask_t
*to
, int flags
);
172 extern int mpol_parse_str(char *str
, struct mempolicy
**mpol
);
175 extern void mpol_to_str(char *buffer
, int maxlen
, struct mempolicy
*pol
);
177 /* Check if a vma is migratable */
178 static inline bool vma_migratable(struct vm_area_struct
*vma
)
180 if (vma
->vm_flags
& (VM_IO
| VM_PFNMAP
))
184 * DAX device mappings require predictable access latency, so avoid
185 * incurring periodic faults.
190 #ifndef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
191 if (vma
->vm_flags
& VM_HUGETLB
)
196 * Migration allocates pages in the highest zone. If we cannot
197 * do so then migration (at least from node to node) is not
201 gfp_zone(mapping_gfp_mask(vma
->vm_file
->f_mapping
))
207 extern int mpol_misplaced(struct page
*, struct vm_area_struct
*, unsigned long);
208 extern void mpol_put_task_policy(struct task_struct
*);
214 static inline bool mpol_equal(struct mempolicy
*a
, struct mempolicy
*b
)
219 static inline void mpol_put(struct mempolicy
*p
)
223 static inline void mpol_cond_put(struct mempolicy
*pol
)
227 static inline void mpol_get(struct mempolicy
*pol
)
231 struct shared_policy
{};
233 static inline void mpol_shared_policy_init(struct shared_policy
*sp
,
234 struct mempolicy
*mpol
)
238 static inline void mpol_free_shared_policy(struct shared_policy
*p
)
242 static inline struct mempolicy
*
243 mpol_shared_policy_lookup(struct shared_policy
*sp
, unsigned long idx
)
248 #define vma_policy(vma) NULL
251 vma_dup_policy(struct vm_area_struct
*src
, struct vm_area_struct
*dst
)
256 static inline void numa_policy_init(void)
260 static inline void numa_default_policy(void)
264 static inline void mpol_rebind_task(struct task_struct
*tsk
,
265 const nodemask_t
*new)
269 static inline void mpol_rebind_mm(struct mm_struct
*mm
, nodemask_t
*new)
273 static inline int huge_node(struct vm_area_struct
*vma
,
274 unsigned long addr
, gfp_t gfp_flags
,
275 struct mempolicy
**mpol
, nodemask_t
**nodemask
)
282 static inline bool init_nodemask_of_mempolicy(nodemask_t
*m
)
287 static inline int do_migrate_pages(struct mm_struct
*mm
, const nodemask_t
*from
,
288 const nodemask_t
*to
, int flags
)
293 static inline void check_highest_zone(int k
)
298 static inline int mpol_parse_str(char *str
, struct mempolicy
**mpol
)
300 return 1; /* error */
304 static inline int mpol_misplaced(struct page
*page
, struct vm_area_struct
*vma
,
305 unsigned long address
)
307 return -1; /* no node preference */
310 static inline void mpol_put_task_policy(struct task_struct
*task
)
313 #endif /* CONFIG_NUMA */