]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
29930025 IM |
2 | #ifndef _LINUX_SCHED_TASK_H |
3 | #define _LINUX_SCHED_TASK_H | |
4 | ||
901b14bd IM |
5 | /* |
6 | * Interface between the scheduler and various task lifetime (fork()/exit()) | |
7 | * functionality: | |
8 | */ | |
9 | ||
29930025 IM |
10 | #include <linux/sched.h> |
11 | ||
cdc75e9f | 12 | struct task_struct; |
92ebce5a | 13 | struct rusage; |
cdc75e9f IM |
14 | union thread_union; |
15 | ||
901b14bd IM |
16 | /* |
17 | * This serializes "schedule()" and also protects | |
18 | * the run-queue from deletions/modifications (but | |
19 | * _adding_ to the beginning of the run-queue has | |
20 | * a separate lock). | |
21 | */ | |
22 | extern rwlock_t tasklist_lock; | |
23 | extern spinlock_t mmlist_lock; | |
24 | ||
cdc75e9f IM |
25 | extern union thread_union init_thread_union; |
26 | extern struct task_struct init_task; | |
27 | ||
901b14bd IM |
28 | #ifdef CONFIG_PROVE_RCU |
29 | extern int lockdep_tasklist_lock_is_held(void); | |
30 | #endif /* #ifdef CONFIG_PROVE_RCU */ | |
31 | ||
32 | extern asmlinkage void schedule_tail(struct task_struct *prev); | |
33 | extern void init_idle(struct task_struct *idle, int cpu); | |
901b14bd | 34 | |
901b14bd IM |
35 | extern int sched_fork(unsigned long clone_flags, struct task_struct *p); |
36 | extern void sched_dead(struct task_struct *p); | |
37 | ||
38 | void __noreturn do_task_dead(void); | |
39 | ||
40 | extern void proc_caches_init(void); | |
41 | ||
42 | extern void release_task(struct task_struct * p); | |
43 | ||
44 | #ifdef CONFIG_HAVE_COPY_THREAD_TLS | |
45 | extern int copy_thread_tls(unsigned long, unsigned long, unsigned long, | |
46 | struct task_struct *, unsigned long); | |
47 | #else | |
48 | extern int copy_thread(unsigned long, unsigned long, unsigned long, | |
49 | struct task_struct *); | |
50 | ||
51 | /* Architectures that haven't opted into copy_thread_tls get the tls argument | |
52 | * via pt_regs, so ignore the tls argument passed via C. */ | |
53 | static inline int copy_thread_tls( | |
54 | unsigned long clone_flags, unsigned long sp, unsigned long arg, | |
55 | struct task_struct *p, unsigned long tls) | |
56 | { | |
57 | return copy_thread(clone_flags, sp, arg, p); | |
58 | } | |
59 | #endif | |
60 | extern void flush_thread(void); | |
61 | ||
62 | #ifdef CONFIG_HAVE_EXIT_THREAD | |
63 | extern void exit_thread(struct task_struct *tsk); | |
64 | #else | |
65 | static inline void exit_thread(struct task_struct *tsk) | |
66 | { | |
67 | } | |
68 | #endif | |
69 | extern void do_group_exit(int); | |
70 | ||
42011db0 IM |
71 | extern void exit_files(struct task_struct *); |
72 | extern void exit_itimers(struct signal_struct *); | |
73 | ||
901b14bd IM |
74 | extern long _do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *, unsigned long); |
75 | extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *); | |
76 | struct task_struct *fork_idle(int); | |
77 | extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); | |
92ebce5a | 78 | extern long kernel_wait4(pid_t, int *, int, struct rusage *); |
901b14bd | 79 | |
cda66725 IM |
80 | extern void free_task(struct task_struct *tsk); |
81 | ||
6f175fc9 IM |
82 | /* sched_exec is called by processes performing an exec */ |
83 | #ifdef CONFIG_SMP | |
84 | extern void sched_exec(void); | |
85 | #else | |
86 | #define sched_exec() {} | |
87 | #endif | |
88 | ||
cda66725 IM |
89 | #define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0) |
90 | ||
91 | extern void __put_task_struct(struct task_struct *t); | |
92 | ||
93 | static inline void put_task_struct(struct task_struct *t) | |
94 | { | |
95 | if (atomic_dec_and_test(&t->usage)) | |
96 | __put_task_struct(t); | |
97 | } | |
98 | ||
99 | struct task_struct *task_rcu_dereference(struct task_struct **ptask); | |
901b14bd IM |
100 | |
101 | #ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT | |
102 | extern int arch_task_struct_size __read_mostly; | |
103 | #else | |
104 | # define arch_task_struct_size (sizeof(struct task_struct)) | |
105 | #endif | |
106 | ||
107 | #ifdef CONFIG_VMAP_STACK | |
108 | static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t) | |
109 | { | |
110 | return t->stack_vm_area; | |
111 | } | |
112 | #else | |
113 | static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t) | |
114 | { | |
115 | return NULL; | |
116 | } | |
117 | #endif | |
118 | ||
56cd6973 IM |
119 | /* |
120 | * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring | |
121 | * subscriptions and synchronises with wait4(). Also used in procfs. Also | |
122 | * pins the final release of task.io_context. Also protects ->cpuset and | |
123 | * ->cgroup.subsys[]. And ->vfork_done. | |
124 | * | |
125 | * Nests both inside and outside of read_lock(&tasklist_lock). | |
126 | * It must not be nested with write_lock_irq(&tasklist_lock), | |
127 | * neither inside nor outside. | |
128 | */ | |
129 | static inline void task_lock(struct task_struct *p) | |
130 | { | |
131 | spin_lock(&p->alloc_lock); | |
132 | } | |
133 | ||
134 | static inline void task_unlock(struct task_struct *p) | |
135 | { | |
136 | spin_unlock(&p->alloc_lock); | |
137 | } | |
138 | ||
29930025 | 139 | #endif /* _LINUX_SCHED_TASK_H */ |