]> git.ipfire.org Git - thirdparty/kernel/stable.git/blame - include/linux/sched/mm.h
slub: remove obsolete comments of put_cpu_partial()
[thirdparty/kernel/stable.git] / include / linux / sched / mm.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
6e84f315
IM
2#ifndef _LINUX_SCHED_MM_H
3#define _LINUX_SCHED_MM_H
4
b8d6d80b
IM
5#include <linux/kernel.h>
6#include <linux/atomic.h>
6e84f315 7#include <linux/sched.h>
589ee628 8#include <linux/mm_types.h>
fd771233 9#include <linux/gfp.h>
6e84f315 10
68e21be2
IM
11/*
12 * Routines for handling mm_structs
13 */
14extern struct mm_struct * mm_alloc(void);
15
16/**
17 * mmgrab() - Pin a &struct mm_struct.
18 * @mm: The &struct mm_struct to pin.
19 *
20 * Make sure that @mm will not get freed even after the owning task
21 * exits. This doesn't guarantee that the associated address space
22 * will still exist later on and mmget_not_zero() has to be used before
23 * accessing it.
24 *
25 * This is a preferred way to to pin @mm for a longer/unbounded amount
26 * of time.
27 *
28 * Use mmdrop() to release the reference acquired by mmgrab().
29 *
30 * See also <Documentation/vm/active_mm.txt> for an in-depth explanation
31 * of &mm_struct.mm_count vs &mm_struct.mm_users.
32 */
33static inline void mmgrab(struct mm_struct *mm)
34{
35 atomic_inc(&mm->mm_count);
36}
37
38/* mmdrop drops the mm and the page tables */
39extern void __mmdrop(struct mm_struct *);
40static inline void mmdrop(struct mm_struct *mm)
41{
42 if (unlikely(atomic_dec_and_test(&mm->mm_count)))
43 __mmdrop(mm);
44}
45
46static inline void mmdrop_async_fn(struct work_struct *work)
47{
48 struct mm_struct *mm = container_of(work, struct mm_struct, async_put_work);
49 __mmdrop(mm);
50}
51
52static inline void mmdrop_async(struct mm_struct *mm)
53{
54 if (unlikely(atomic_dec_and_test(&mm->mm_count))) {
55 INIT_WORK(&mm->async_put_work, mmdrop_async_fn);
56 schedule_work(&mm->async_put_work);
57 }
58}
59
60/**
61 * mmget() - Pin the address space associated with a &struct mm_struct.
62 * @mm: The address space to pin.
63 *
64 * Make sure that the address space of the given &struct mm_struct doesn't
65 * go away. This does not protect against parts of the address space being
66 * modified or freed, however.
67 *
68 * Never use this function to pin this address space for an
69 * unbounded/indefinite amount of time.
70 *
71 * Use mmput() to release the reference acquired by mmget().
72 *
73 * See also <Documentation/vm/active_mm.txt> for an in-depth explanation
74 * of &mm_struct.mm_count vs &mm_struct.mm_users.
75 */
76static inline void mmget(struct mm_struct *mm)
77{
78 atomic_inc(&mm->mm_users);
79}
80
81static inline bool mmget_not_zero(struct mm_struct *mm)
82{
83 return atomic_inc_not_zero(&mm->mm_users);
84}
85
86/* mmput gets rid of the mappings and all user-space */
87extern void mmput(struct mm_struct *);
a1b2289c
SY
88#ifdef CONFIG_MMU
89/* same as above but performs the slow path from the async context. Can
90 * be called from the atomic context as well
91 */
92void mmput_async(struct mm_struct *);
93#endif
68e21be2
IM
94
95/* Grab a reference to a task's mm, if it is not already going away */
96extern struct mm_struct *get_task_mm(struct task_struct *task);
97/*
98 * Grab a reference to a task's mm, if it is not already going away
99 * and ptrace_may_access with the mode parameter passed to it
100 * succeeds.
101 */
102extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode);
103/* Remove the current tasks stale references to the old mm_struct */
104extern void mm_release(struct task_struct *, struct mm_struct *);
105
4240c8bf
IM
106#ifdef CONFIG_MEMCG
107extern void mm_update_next_owner(struct mm_struct *mm);
108#else
109static inline void mm_update_next_owner(struct mm_struct *mm)
110{
111}
112#endif /* CONFIG_MEMCG */
113
114#ifdef CONFIG_MMU
115extern void arch_pick_mmap_layout(struct mm_struct *mm);
116extern unsigned long
117arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
118 unsigned long, unsigned long);
119extern unsigned long
120arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
121 unsigned long len, unsigned long pgoff,
122 unsigned long flags);
123#else
124static inline void arch_pick_mmap_layout(struct mm_struct *mm) {}
125#endif
126
d026ce79
IM
127static inline bool in_vfork(struct task_struct *tsk)
128{
129 bool ret;
130
131 /*
132 * need RCU to access ->real_parent if CLONE_VM was used along with
133 * CLONE_PARENT.
134 *
135 * We check real_parent->mm == tsk->mm because CLONE_VFORK does not
136 * imply CLONE_VM
137 *
138 * CLONE_VFORK can be used with CLONE_PARENT/CLONE_THREAD and thus
139 * ->real_parent is not necessarily the task doing vfork(), so in
140 * theory we can't rely on task_lock() if we want to dereference it.
141 *
142 * And in this case we can't trust the real_parent->mm == tsk->mm
143 * check, it can be false negative. But we do not care, if init or
144 * another oom-unkillable task does this it should blame itself.
145 */
146 rcu_read_lock();
147 ret = tsk->vfork_done && tsk->real_parent->mm == tsk->mm;
148 rcu_read_unlock();
149
150 return ret;
151}
152
7dea19f9
MH
153/*
154 * Applies per-task gfp context to the given allocation flags.
155 * PF_MEMALLOC_NOIO implies GFP_NOIO
156 * PF_MEMALLOC_NOFS implies GFP_NOFS
74444eda 157 */
7dea19f9 158static inline gfp_t current_gfp_context(gfp_t flags)
74444eda 159{
7dea19f9
MH
160 /*
161 * NOIO implies both NOIO and NOFS and it is a weaker context
162 * so always make sure it makes precendence
163 */
74444eda
IM
164 if (unlikely(current->flags & PF_MEMALLOC_NOIO))
165 flags &= ~(__GFP_IO | __GFP_FS);
7dea19f9
MH
166 else if (unlikely(current->flags & PF_MEMALLOC_NOFS))
167 flags &= ~__GFP_FS;
74444eda
IM
168 return flags;
169}
170
d92a8cfc
PZ
171#ifdef CONFIG_LOCKDEP
172extern void fs_reclaim_acquire(gfp_t gfp_mask);
173extern void fs_reclaim_release(gfp_t gfp_mask);
174#else
175static inline void fs_reclaim_acquire(gfp_t gfp_mask) { }
176static inline void fs_reclaim_release(gfp_t gfp_mask) { }
177#endif
178
74444eda
IM
179static inline unsigned int memalloc_noio_save(void)
180{
181 unsigned int flags = current->flags & PF_MEMALLOC_NOIO;
182 current->flags |= PF_MEMALLOC_NOIO;
183 return flags;
184}
185
186static inline void memalloc_noio_restore(unsigned int flags)
187{
188 current->flags = (current->flags & ~PF_MEMALLOC_NOIO) | flags;
189}
190
7dea19f9
MH
191static inline unsigned int memalloc_nofs_save(void)
192{
193 unsigned int flags = current->flags & PF_MEMALLOC_NOFS;
194 current->flags |= PF_MEMALLOC_NOFS;
195 return flags;
196}
197
198static inline void memalloc_nofs_restore(unsigned int flags)
199{
200 current->flags = (current->flags & ~PF_MEMALLOC_NOFS) | flags;
201}
202
499118e9
VB
203static inline unsigned int memalloc_noreclaim_save(void)
204{
205 unsigned int flags = current->flags & PF_MEMALLOC;
206 current->flags |= PF_MEMALLOC;
207 return flags;
208}
209
210static inline void memalloc_noreclaim_restore(unsigned int flags)
211{
212 current->flags = (current->flags & ~PF_MEMALLOC) | flags;
213}
214
a961e409
MD
215#ifdef CONFIG_MEMBARRIER
216enum {
217 MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY = (1U << 0),
218 MEMBARRIER_STATE_SWITCH_MM = (1U << 1),
219};
220
221static inline void membarrier_execve(struct task_struct *t)
222{
223 atomic_set(&t->mm->membarrier_state, 0);
224}
225#else
226static inline void membarrier_execve(struct task_struct *t)
227{
228}
229#endif
230
6e84f315 231#endif /* _LINUX_SCHED_MM_H */