]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
3f07c014 IM |
2 | #ifndef _LINUX_SCHED_SIGNAL_H |
3 | #define _LINUX_SCHED_SIGNAL_H | |
4 | ||
b2d09103 | 5 | #include <linux/rculist.h> |
f361bf4a | 6 | #include <linux/signal.h> |
3f07c014 | 7 | #include <linux/sched.h> |
1e4bae64 | 8 | #include <linux/sched/jobctl.h> |
9164bb4a | 9 | #include <linux/sched/task.h> |
2a1f062a | 10 | #include <linux/cred.h> |
d036bda7 | 11 | #include <linux/refcount.h> |
2b69942f | 12 | #include <linux/posix-timers.h> |
4ef87322 PX |
13 | #include <linux/mm_types.h> |
14 | #include <asm/ptrace.h> | |
3f07c014 | 15 | |
c3edc401 IM |
16 | /* |
17 | * Types defining task->signal and task->sighand and APIs using them: | |
18 | */ | |
19 | ||
20 | struct sighand_struct { | |
c3edc401 | 21 | spinlock_t siglock; |
e2d9018e | 22 | refcount_t count; |
c3edc401 | 23 | wait_queue_head_t signalfd_wqh; |
e2d9018e | 24 | struct k_sigaction action[_NSIG]; |
c3edc401 IM |
25 | }; |
26 | ||
8d88460e IM |
27 | /* |
28 | * Per-process accounting stats: | |
29 | */ | |
30 | struct pacct_struct { | |
31 | int ac_flag; | |
32 | long ac_exitcode; | |
33 | unsigned long ac_mem; | |
34 | u64 ac_utime, ac_stime; | |
35 | unsigned long ac_minflt, ac_majflt; | |
36 | }; | |
37 | ||
38 | struct cpu_itimer { | |
39 | u64 expires; | |
40 | u64 incr; | |
41 | }; | |
42 | ||
1050b27c IM |
43 | /* |
44 | * This is the atomic variant of task_cputime, which can be used for | |
45 | * storing and updating task_cputime statistics without locking. | |
46 | */ | |
47 | struct task_cputime_atomic { | |
48 | atomic64_t utime; | |
49 | atomic64_t stime; | |
50 | atomic64_t sum_exec_runtime; | |
51 | }; | |
52 | ||
53 | #define INIT_CPUTIME_ATOMIC \ | |
54 | (struct task_cputime_atomic) { \ | |
55 | .utime = ATOMIC64_INIT(0), \ | |
56 | .stime = ATOMIC64_INIT(0), \ | |
57 | .sum_exec_runtime = ATOMIC64_INIT(0), \ | |
58 | } | |
59 | /** | |
60 | * struct thread_group_cputimer - thread group interval timer counts | |
61 | * @cputime_atomic: atomic thread group interval timers. | |
1050b27c IM |
62 | * |
63 | * This structure contains the version of task_cputime, above, that is | |
64 | * used for thread group CPU timer calculations. | |
65 | */ | |
66 | struct thread_group_cputimer { | |
67 | struct task_cputime_atomic cputime_atomic; | |
1050b27c IM |
68 | }; |
69 | ||
c3ad2c3b EB |
70 | struct multiprocess_signals { |
71 | sigset_t signal; | |
72 | struct hlist_node node; | |
73 | }; | |
74 | ||
0258b5fd EB |
75 | struct core_thread { |
76 | struct task_struct *task; | |
77 | struct core_thread *next; | |
78 | }; | |
79 | ||
80 | struct core_state { | |
81 | atomic_t nr_threads; | |
82 | struct core_thread dumper; | |
83 | struct completion startup; | |
84 | }; | |
85 | ||
c3edc401 IM |
86 | /* |
87 | * NOTE! "signal_struct" does not have its own | |
88 | * locking, because a shared signal_struct always | |
89 | * implies a shared sighand_struct, so locking | |
90 | * sighand_struct is always a proper superset of | |
91 | * the locking of signal_struct. | |
92 | */ | |
93 | struct signal_struct { | |
60d4de3f | 94 | refcount_t sigcnt; |
c3edc401 IM |
95 | atomic_t live; |
96 | int nr_threads; | |
d80f7d7b | 97 | int quick_threads; |
c3edc401 IM |
98 | struct list_head thread_head; |
99 | ||
100 | wait_queue_head_t wait_chldexit; /* for wait4() */ | |
101 | ||
102 | /* current thread group signal load-balancing target: */ | |
103 | struct task_struct *curr_target; | |
104 | ||
105 | /* shared signal handling: */ | |
106 | struct sigpending shared_pending; | |
107 | ||
c3ad2c3b EB |
108 | /* For collecting multiprocess signals during fork */ |
109 | struct hlist_head multiprocess; | |
110 | ||
c3edc401 IM |
111 | /* thread group exit support */ |
112 | int group_exit_code; | |
60700e38 | 113 | /* notify group_exec_task when notify_count is less or equal to 0 */ |
c3edc401 | 114 | int notify_count; |
60700e38 | 115 | struct task_struct *group_exec_task; |
c3edc401 IM |
116 | |
117 | /* thread group stop support, overloads group_exit_code too */ | |
118 | int group_stop_count; | |
119 | unsigned int flags; /* see SIGNAL_* flags below */ | |
120 | ||
0258b5fd EB |
121 | struct core_state *core_state; /* coredumping support */ |
122 | ||
c3edc401 IM |
123 | /* |
124 | * PR_SET_CHILD_SUBREAPER marks a process, like a service | |
125 | * manager, to re-parent orphan (double-forking) child processes | |
126 | * to this process instead of 'init'. The service manager is | |
127 | * able to receive SIGCHLD signals and is able to investigate | |
128 | * the process until it calls wait(). All children of this | |
129 | * process will inherit a flag if they should look for a | |
130 | * child_subreaper process at exit. | |
131 | */ | |
132 | unsigned int is_child_subreaper:1; | |
133 | unsigned int has_child_subreaper:1; | |
134 | ||
135 | #ifdef CONFIG_POSIX_TIMERS | |
136 | ||
137 | /* POSIX.1b Interval Timers */ | |
138 | int posix_timer_id; | |
139 | struct list_head posix_timers; | |
140 | ||
141 | /* ITIMER_REAL timer for the process */ | |
142 | struct hrtimer real_timer; | |
143 | ktime_t it_real_incr; | |
144 | ||
145 | /* | |
146 | * ITIMER_PROF and ITIMER_VIRTUAL timers for the process, we use | |
147 | * CPUCLOCK_PROF and CPUCLOCK_VIRT for indexing array as these | |
148 | * values are defined to 0 and 1 respectively | |
149 | */ | |
150 | struct cpu_itimer it[2]; | |
151 | ||
152 | /* | |
153 | * Thread group totals for process CPU timers. | |
154 | * See thread_group_cputimer(), et al, for details. | |
155 | */ | |
156 | struct thread_group_cputimer cputimer; | |
157 | ||
c3edc401 | 158 | #endif |
2b69942f TG |
159 | /* Empty if CONFIG_POSIX_TIMERS=n */ |
160 | struct posix_cputimers posix_cputimers; | |
c3edc401 | 161 | |
2c470475 | 162 | /* PID/PID hash table linkage. */ |
2c470475 | 163 | struct pid *pids[PIDTYPE_MAX]; |
c3edc401 IM |
164 | |
165 | #ifdef CONFIG_NO_HZ_FULL | |
166 | atomic_t tick_dep_mask; | |
167 | #endif | |
168 | ||
169 | struct pid *tty_old_pgrp; | |
170 | ||
171 | /* boolean value for session group leader */ | |
172 | int leader; | |
173 | ||
174 | struct tty_struct *tty; /* NULL if no tty */ | |
175 | ||
176 | #ifdef CONFIG_SCHED_AUTOGROUP | |
177 | struct autogroup *autogroup; | |
178 | #endif | |
179 | /* | |
180 | * Cumulative resource counters for dead threads in the group, | |
181 | * and for reaped dead child processes forked by this group. | |
182 | * Live threads maintain their own counters and add to these | |
183 | * in __exit_signal, except for the group leader. | |
184 | */ | |
185 | seqlock_t stats_lock; | |
186 | u64 utime, stime, cutime, cstime; | |
187 | u64 gtime; | |
188 | u64 cgtime; | |
189 | struct prev_cputime prev_cputime; | |
190 | unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw; | |
191 | unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt; | |
192 | unsigned long inblock, oublock, cinblock, coublock; | |
193 | unsigned long maxrss, cmaxrss; | |
194 | struct task_io_accounting ioac; | |
195 | ||
196 | /* | |
197 | * Cumulative ns of schedule CPU time fo dead threads in the | |
198 | * group, not including a zombie group leader, (This only differs | |
199 | * from jiffies_to_ns(utime + stime) if sched_clock uses something | |
200 | * other than jiffies.) | |
201 | */ | |
202 | unsigned long long sum_sched_runtime; | |
203 | ||
204 | /* | |
205 | * We don't bother to synchronize most readers of this at all, | |
206 | * because there is no reader checking a limit that actually needs | |
207 | * to get both rlim_cur and rlim_max atomically, and either one | |
208 | * alone is a single word that can safely be read normally. | |
209 | * getrlimit/setrlimit use task_lock(current->group_leader) to | |
210 | * protect this instead of the siglock, because they really | |
211 | * have no need to disable irqs. | |
212 | */ | |
213 | struct rlimit rlim[RLIM_NLIMITS]; | |
214 | ||
215 | #ifdef CONFIG_BSD_PROCESS_ACCT | |
216 | struct pacct_struct pacct; /* per-process accounting information */ | |
217 | #endif | |
218 | #ifdef CONFIG_TASKSTATS | |
219 | struct taskstats *stats; | |
220 | #endif | |
221 | #ifdef CONFIG_AUDIT | |
222 | unsigned audit_tty; | |
223 | struct tty_audit_buf *tty_audit_buf; | |
224 | #endif | |
225 | ||
226 | /* | |
227 | * Thread is the potential origin of an oom condition; kill first on | |
228 | * oom | |
229 | */ | |
230 | bool oom_flag_origin; | |
231 | short oom_score_adj; /* OOM kill score adjustment */ | |
232 | short oom_score_adj_min; /* OOM kill score adjustment min value. | |
233 | * Only settable by CAP_SYS_RESOURCE. */ | |
234 | struct mm_struct *oom_mm; /* recorded mm when the thread group got | |
235 | * killed by the oom killer */ | |
236 | ||
237 | struct mutex cred_guard_mutex; /* guard against foreign influences on | |
238 | * credential calculations | |
eea96732 EB |
239 | * (notably. ptrace) |
240 | * Deprecated do not use in new code. | |
f7cfd871 | 241 | * Use exec_update_lock instead. |
eea96732 | 242 | */ |
f7cfd871 EB |
243 | struct rw_semaphore exec_update_lock; /* Held while task_struct is |
244 | * being updated during exec, | |
245 | * and may have inconsistent | |
246 | * permissions. | |
247 | */ | |
3859a271 | 248 | } __randomize_layout; |
c3edc401 IM |
249 | |
250 | /* | |
251 | * Bits in flags field of signal_struct. | |
252 | */ | |
253 | #define SIGNAL_STOP_STOPPED 0x00000001 /* job control stop in effect */ | |
254 | #define SIGNAL_STOP_CONTINUED 0x00000002 /* SIGCONT since WCONTINUED reap */ | |
255 | #define SIGNAL_GROUP_EXIT 0x00000004 /* group exit in progress */ | |
c3edc401 IM |
256 | /* |
257 | * Pending notifications to parent. | |
258 | */ | |
259 | #define SIGNAL_CLD_STOPPED 0x00000010 | |
260 | #define SIGNAL_CLD_CONTINUED 0x00000020 | |
261 | #define SIGNAL_CLD_MASK (SIGNAL_CLD_STOPPED|SIGNAL_CLD_CONTINUED) | |
262 | ||
263 | #define SIGNAL_UNKILLABLE 0x00000040 /* for init: ignore fatal signals */ | |
264 | ||
265 | #define SIGNAL_STOP_MASK (SIGNAL_CLD_MASK | SIGNAL_STOP_STOPPED | \ | |
266 | SIGNAL_STOP_CONTINUED) | |
267 | ||
268 | static inline void signal_set_stop_flags(struct signal_struct *sig, | |
269 | unsigned int flags) | |
270 | { | |
2f824d4d | 271 | WARN_ON(sig->flags & SIGNAL_GROUP_EXIT); |
c3edc401 IM |
272 | sig->flags = (sig->flags & ~SIGNAL_STOP_MASK) | flags; |
273 | } | |
274 | ||
c3edc401 IM |
275 | extern void flush_signals(struct task_struct *); |
276 | extern void ignore_signals(struct task_struct *); | |
277 | extern void flush_signal_handlers(struct task_struct *, int force_default); | |
5768d890 EB |
278 | extern int dequeue_signal(struct task_struct *task, sigset_t *mask, |
279 | kernel_siginfo_t *info, enum pid_type *type); | |
c3edc401 | 280 | |
961366a0 | 281 | static inline int kernel_dequeue_signal(void) |
c3edc401 | 282 | { |
9e9291c7 | 283 | struct task_struct *task = current; |
ae7795bc | 284 | kernel_siginfo_t __info; |
5768d890 | 285 | enum pid_type __type; |
c3edc401 IM |
286 | int ret; |
287 | ||
9e9291c7 | 288 | spin_lock_irq(&task->sighand->siglock); |
5768d890 | 289 | ret = dequeue_signal(task, &task->blocked, &__info, &__type); |
9e9291c7 | 290 | spin_unlock_irq(&task->sighand->siglock); |
c3edc401 IM |
291 | |
292 | return ret; | |
293 | } | |
294 | ||
295 | static inline void kernel_signal_stop(void) | |
296 | { | |
297 | spin_lock_irq(¤t->sighand->siglock); | |
31cae1ea PZ |
298 | if (current->jobctl & JOBCTL_STOP_DEQUEUED) { |
299 | current->jobctl |= JOBCTL_STOPPED; | |
b5bf9a90 | 300 | set_special_state(TASK_STOPPED); |
31cae1ea | 301 | } |
c3edc401 IM |
302 | spin_unlock_irq(¤t->sighand->siglock); |
303 | ||
304 | schedule(); | |
305 | } | |
f8ec6601 EB |
306 | #ifdef __ia64__ |
307 | # define ___ARCH_SI_IA64(_a1, _a2, _a3) , _a1, _a2, _a3 | |
308 | #else | |
309 | # define ___ARCH_SI_IA64(_a1, _a2, _a3) | |
310 | #endif | |
311 | ||
91ca180d | 312 | int force_sig_fault_to_task(int sig, int code, void __user *addr |
91ca180d EB |
313 | ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr) |
314 | , struct task_struct *t); | |
f8ec6601 | 315 | int force_sig_fault(int sig, int code, void __user *addr |
2e1661d2 | 316 | ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)); |
f8ec6601 | 317 | int send_sig_fault(int sig, int code, void __user *addr |
f8ec6601 EB |
318 | ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr) |
319 | , struct task_struct *t); | |
320 | ||
f8eac901 | 321 | int force_sig_mceerr(int code, void __user *, short); |
38246735 EB |
322 | int send_sig_mceerr(int code, void __user *, short, struct task_struct *); |
323 | ||
324 | int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper); | |
325 | int force_sig_pkuerr(void __user *addr, u32 pkey); | |
78ed93d7 | 326 | int send_sig_perf(void __user *addr, u32 type, u64 sig_data); |
38246735 | 327 | |
f71dd7dc | 328 | int force_sig_ptrace_errno_trap(int errno, void __user *addr); |
2c9f7eaf | 329 | int force_sig_fault_trapno(int sig, int code, void __user *addr, int trapno); |
7de5f68d EB |
330 | int send_sig_fault_trapno(int sig, int code, void __user *addr, int trapno, |
331 | struct task_struct *t); | |
307d522f | 332 | int force_sig_seccomp(int syscall, int reason, bool force_coredump); |
f71dd7dc | 333 | |
ae7795bc | 334 | extern int send_sig_info(int, struct kernel_siginfo *, struct task_struct *); |
cb44c9a0 | 335 | extern void force_sigsegv(int sig); |
a89e9b8a | 336 | extern int force_sig_info(struct kernel_siginfo *); |
ae7795bc EB |
337 | extern int __kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp); |
338 | extern int kill_pid_info(int sig, struct kernel_siginfo *info, struct pid *pid); | |
70f1b0d3 | 339 | extern int kill_pid_usb_asyncio(int sig, int errno, sigval_t addr, struct pid *, |
6b4f3d01 | 340 | const struct cred *); |
c3edc401 IM |
341 | extern int kill_pgrp(struct pid *pid, int sig, int priv); |
342 | extern int kill_pid(struct pid *pid, int sig, int priv); | |
c3edc401 IM |
343 | extern __must_check bool do_notify_parent(struct task_struct *, int); |
344 | extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent); | |
3cf5d076 | 345 | extern void force_sig(int); |
26d5badb | 346 | extern void force_fatal_sig(int); |
fcb116bc | 347 | extern void force_exit_sig(int); |
c3edc401 IM |
348 | extern int send_sig(int, struct task_struct *, int); |
349 | extern int zap_other_threads(struct task_struct *p); | |
350 | extern struct sigqueue *sigqueue_alloc(void); | |
351 | extern void sigqueue_free(struct sigqueue *); | |
24122c7f | 352 | extern int send_sigqueue(struct sigqueue *, struct pid *, enum pid_type); |
c3edc401 IM |
353 | extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *); |
354 | ||
593febb1 EB |
355 | static inline void clear_notify_signal(void) |
356 | { | |
357 | clear_thread_flag(TIF_NOTIFY_SIGNAL); | |
358 | smp_mb__after_atomic(); | |
359 | } | |
360 | ||
e788be95 JA |
361 | /* |
362 | * Returns 'true' if kick_process() is needed to force a transition from | |
363 | * user -> kernel to guarantee expedient run of TWA_SIGNAL based task_work. | |
364 | */ | |
365 | static inline bool __set_notify_signal(struct task_struct *task) | |
366 | { | |
367 | return !test_and_set_tsk_thread_flag(task, TIF_NOTIFY_SIGNAL) && | |
368 | !wake_up_state(task, TASK_INTERRUPTIBLE); | |
369 | } | |
370 | ||
593febb1 EB |
371 | /* |
372 | * Called to break out of interruptible wait loops, and enter the | |
373 | * exit_to_user_mode_loop(). | |
374 | */ | |
375 | static inline void set_notify_signal(struct task_struct *task) | |
376 | { | |
e788be95 | 377 | if (__set_notify_signal(task)) |
593febb1 EB |
378 | kick_process(task); |
379 | } | |
380 | ||
2a1f062a IM |
381 | static inline int restart_syscall(void) |
382 | { | |
383 | set_tsk_thread_flag(current, TIF_SIGPENDING); | |
384 | return -ERESTARTNOINTR; | |
385 | } | |
386 | ||
5c251e9d | 387 | static inline int task_sigpending(struct task_struct *p) |
2a1f062a IM |
388 | { |
389 | return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING)); | |
390 | } | |
391 | ||
5c251e9d JA |
392 | static inline int signal_pending(struct task_struct *p) |
393 | { | |
12db8b69 JA |
394 | /* |
395 | * TIF_NOTIFY_SIGNAL isn't really a signal, but it requires the same | |
396 | * behavior in terms of ensuring that we break out of wait loops | |
397 | * so that notify signal callbacks can be processed. | |
398 | */ | |
399 | if (unlikely(test_tsk_thread_flag(p, TIF_NOTIFY_SIGNAL))) | |
400 | return 1; | |
5c251e9d JA |
401 | return task_sigpending(p); |
402 | } | |
403 | ||
2a1f062a IM |
404 | static inline int __fatal_signal_pending(struct task_struct *p) |
405 | { | |
406 | return unlikely(sigismember(&p->pending.signal, SIGKILL)); | |
407 | } | |
408 | ||
409 | static inline int fatal_signal_pending(struct task_struct *p) | |
410 | { | |
5c251e9d | 411 | return task_sigpending(p) && __fatal_signal_pending(p); |
2a1f062a IM |
412 | } |
413 | ||
2f064a59 | 414 | static inline int signal_pending_state(unsigned int state, struct task_struct *p) |
2a1f062a IM |
415 | { |
416 | if (!(state & (TASK_INTERRUPTIBLE | TASK_WAKEKILL))) | |
417 | return 0; | |
418 | if (!signal_pending(p)) | |
419 | return 0; | |
420 | ||
421 | return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p); | |
422 | } | |
423 | ||
4ef87322 PX |
424 | /* |
425 | * This should only be used in fault handlers to decide whether we | |
426 | * should stop the current fault routine to handle the signals | |
427 | * instead, especially with the case where we've got interrupted with | |
428 | * a VM_FAULT_RETRY. | |
429 | */ | |
430 | static inline bool fault_signal_pending(vm_fault_t fault_flags, | |
431 | struct pt_regs *regs) | |
432 | { | |
433 | return unlikely((fault_flags & VM_FAULT_RETRY) && | |
8b9a65fd PX |
434 | (fatal_signal_pending(current) || |
435 | (user_mode(regs) && signal_pending(current)))); | |
4ef87322 PX |
436 | } |
437 | ||
2a1f062a IM |
438 | /* |
439 | * Reevaluate whether the task has signals pending delivery. | |
440 | * Wake the task if so. | |
441 | * This is required every time the blocked sigset_t changes. | |
442 | * callers must hold sighand->siglock. | |
443 | */ | |
444 | extern void recalc_sigpending_and_wake(struct task_struct *t); | |
445 | extern void recalc_sigpending(void); | |
088fe47c | 446 | extern void calculate_sigpending(void); |
2a1f062a IM |
447 | |
448 | extern void signal_wake_up_state(struct task_struct *t, unsigned int state); | |
449 | ||
2500ad1c | 450 | static inline void signal_wake_up(struct task_struct *t, bool fatal) |
2a1f062a | 451 | { |
31cae1ea PZ |
452 | unsigned int state = 0; |
453 | if (fatal && !(t->jobctl & JOBCTL_PTRACE_FROZEN)) { | |
454 | t->jobctl &= ~(JOBCTL_STOPPED | JOBCTL_TRACED); | |
455 | state = TASK_WAKEKILL | __TASK_TRACED; | |
456 | } | |
457 | signal_wake_up_state(t, state); | |
2a1f062a IM |
458 | } |
459 | static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume) | |
460 | { | |
31cae1ea PZ |
461 | unsigned int state = 0; |
462 | if (resume) { | |
463 | t->jobctl &= ~JOBCTL_TRACED; | |
464 | state = __TASK_TRACED; | |
465 | } | |
466 | signal_wake_up_state(t, state); | |
2a1f062a IM |
467 | } |
468 | ||
924de3b8 EB |
469 | void task_join_group_stop(struct task_struct *task); |
470 | ||
c3edc401 IM |
471 | #ifdef TIF_RESTORE_SIGMASK |
472 | /* | |
473 | * Legacy restore_sigmask accessors. These are inefficient on | |
474 | * SMP architectures because they require atomic operations. | |
475 | */ | |
476 | ||
477 | /** | |
478 | * set_restore_sigmask() - make sure saved_sigmask processing gets done | |
479 | * | |
480 | * This sets TIF_RESTORE_SIGMASK and ensures that the arch signal code | |
481 | * will run before returning to user mode, to process the flag. For | |
482 | * all callers, TIF_SIGPENDING is already set or it's no harm to set | |
483 | * it. TIF_RESTORE_SIGMASK need not be in the set of bits that the | |
484 | * arch code will notice on return to user mode, in case those bits | |
485 | * are scarce. We set TIF_SIGPENDING here to ensure that the arch | |
486 | * signal code always gets run when TIF_RESTORE_SIGMASK is set. | |
487 | */ | |
488 | static inline void set_restore_sigmask(void) | |
489 | { | |
490 | set_thread_flag(TIF_RESTORE_SIGMASK); | |
c3edc401 | 491 | } |
fcfc2aa0 | 492 | |
9e9291c7 | 493 | static inline void clear_tsk_restore_sigmask(struct task_struct *task) |
fcfc2aa0 | 494 | { |
9e9291c7 | 495 | clear_tsk_thread_flag(task, TIF_RESTORE_SIGMASK); |
fcfc2aa0 AV |
496 | } |
497 | ||
c3edc401 IM |
498 | static inline void clear_restore_sigmask(void) |
499 | { | |
500 | clear_thread_flag(TIF_RESTORE_SIGMASK); | |
501 | } | |
9e9291c7 | 502 | static inline bool test_tsk_restore_sigmask(struct task_struct *task) |
fcfc2aa0 | 503 | { |
9e9291c7 | 504 | return test_tsk_thread_flag(task, TIF_RESTORE_SIGMASK); |
fcfc2aa0 | 505 | } |
c3edc401 IM |
506 | static inline bool test_restore_sigmask(void) |
507 | { | |
508 | return test_thread_flag(TIF_RESTORE_SIGMASK); | |
509 | } | |
510 | static inline bool test_and_clear_restore_sigmask(void) | |
511 | { | |
512 | return test_and_clear_thread_flag(TIF_RESTORE_SIGMASK); | |
513 | } | |
514 | ||
515 | #else /* TIF_RESTORE_SIGMASK */ | |
516 | ||
517 | /* Higher-quality implementation, used if TIF_RESTORE_SIGMASK doesn't exist. */ | |
518 | static inline void set_restore_sigmask(void) | |
519 | { | |
520 | current->restore_sigmask = true; | |
c3edc401 | 521 | } |
9e9291c7 | 522 | static inline void clear_tsk_restore_sigmask(struct task_struct *task) |
fcfc2aa0 | 523 | { |
9e9291c7 | 524 | task->restore_sigmask = false; |
fcfc2aa0 | 525 | } |
c3edc401 IM |
526 | static inline void clear_restore_sigmask(void) |
527 | { | |
528 | current->restore_sigmask = false; | |
529 | } | |
530 | static inline bool test_restore_sigmask(void) | |
531 | { | |
532 | return current->restore_sigmask; | |
533 | } | |
9e9291c7 | 534 | static inline bool test_tsk_restore_sigmask(struct task_struct *task) |
fcfc2aa0 | 535 | { |
9e9291c7 | 536 | return task->restore_sigmask; |
fcfc2aa0 | 537 | } |
c3edc401 IM |
538 | static inline bool test_and_clear_restore_sigmask(void) |
539 | { | |
540 | if (!current->restore_sigmask) | |
541 | return false; | |
542 | current->restore_sigmask = false; | |
543 | return true; | |
544 | } | |
545 | #endif | |
546 | ||
547 | static inline void restore_saved_sigmask(void) | |
548 | { | |
549 | if (test_and_clear_restore_sigmask()) | |
550 | __set_current_blocked(¤t->saved_sigmask); | |
551 | } | |
552 | ||
b772434b ON |
553 | extern int set_user_sigmask(const sigset_t __user *umask, size_t sigsetsize); |
554 | ||
555 | static inline void restore_saved_sigmask_unless(bool interrupted) | |
556 | { | |
557 | if (interrupted) | |
12db8b69 | 558 | WARN_ON(!signal_pending(current)); |
b772434b ON |
559 | else |
560 | restore_saved_sigmask(); | |
561 | } | |
562 | ||
c3edc401 IM |
563 | static inline sigset_t *sigmask_to_save(void) |
564 | { | |
565 | sigset_t *res = ¤t->blocked; | |
566 | if (unlikely(test_restore_sigmask())) | |
567 | res = ¤t->saved_sigmask; | |
568 | return res; | |
569 | } | |
570 | ||
571 | static inline int kill_cad_pid(int sig, int priv) | |
572 | { | |
573 | return kill_pid(cad_pid, sig, priv); | |
574 | } | |
575 | ||
576 | /* These can be the second arg to send_sig_info/send_group_sig_info. */ | |
ae7795bc EB |
577 | #define SEND_SIG_NOINFO ((struct kernel_siginfo *) 0) |
578 | #define SEND_SIG_PRIV ((struct kernel_siginfo *) 1) | |
c3edc401 | 579 | |
2beb4a53 CB |
580 | static inline int __on_sig_stack(unsigned long sp) |
581 | { | |
582 | #ifdef CONFIG_STACK_GROWSUP | |
583 | return sp >= current->sas_ss_sp && | |
584 | sp - current->sas_ss_sp < current->sas_ss_size; | |
585 | #else | |
586 | return sp > current->sas_ss_sp && | |
587 | sp - current->sas_ss_sp <= current->sas_ss_size; | |
588 | #endif | |
589 | } | |
590 | ||
c3edc401 IM |
591 | /* |
592 | * True if we are on the alternate signal stack. | |
593 | */ | |
594 | static inline int on_sig_stack(unsigned long sp) | |
595 | { | |
596 | /* | |
597 | * If the signal stack is SS_AUTODISARM then, by construction, we | |
598 | * can't be on the signal stack unless user code deliberately set | |
599 | * SS_AUTODISARM when we were already on it. | |
600 | * | |
601 | * This improves reliability: if user state gets corrupted such that | |
602 | * the stack pointer points very close to the end of the signal stack, | |
603 | * then this check will enable the signal to be handled anyway. | |
604 | */ | |
605 | if (current->sas_ss_flags & SS_AUTODISARM) | |
606 | return 0; | |
607 | ||
2beb4a53 | 608 | return __on_sig_stack(sp); |
c3edc401 IM |
609 | } |
610 | ||
611 | static inline int sas_ss_flags(unsigned long sp) | |
612 | { | |
613 | if (!current->sas_ss_size) | |
614 | return SS_DISABLE; | |
615 | ||
616 | return on_sig_stack(sp) ? SS_ONSTACK : 0; | |
617 | } | |
618 | ||
619 | static inline void sas_ss_reset(struct task_struct *p) | |
620 | { | |
621 | p->sas_ss_sp = 0; | |
622 | p->sas_ss_size = 0; | |
623 | p->sas_ss_flags = SS_DISABLE; | |
624 | } | |
625 | ||
626 | static inline unsigned long sigsp(unsigned long sp, struct ksignal *ksig) | |
627 | { | |
628 | if (unlikely((ksig->ka.sa.sa_flags & SA_ONSTACK)) && ! sas_ss_flags(sp)) | |
629 | #ifdef CONFIG_STACK_GROWSUP | |
630 | return current->sas_ss_sp; | |
631 | #else | |
632 | return current->sas_ss_sp + current->sas_ss_size; | |
633 | #endif | |
634 | return sp; | |
635 | } | |
636 | ||
637 | extern void __cleanup_sighand(struct sighand_struct *); | |
638 | extern void flush_itimer_signals(void); | |
639 | ||
640 | #define tasklist_empty() \ | |
641 | list_empty(&init_task.tasks) | |
642 | ||
643 | #define next_task(p) \ | |
644 | list_entry_rcu((p)->tasks.next, struct task_struct, tasks) | |
645 | ||
646 | #define for_each_process(p) \ | |
647 | for (p = &init_task ; (p = next_task(p)) != &init_task ; ) | |
648 | ||
649 | extern bool current_is_single_threaded(void); | |
650 | ||
651 | /* | |
652 | * Careful: do_each_thread/while_each_thread is a double loop so | |
653 | * 'break' will not work as expected - use goto instead. | |
654 | */ | |
655 | #define do_each_thread(g, t) \ | |
656 | for (g = t = &init_task ; (g = t = next_task(g)) != &init_task ; ) do | |
657 | ||
658 | #define while_each_thread(g, t) \ | |
659 | while ((t = next_thread(t)) != g) | |
660 | ||
661 | #define __for_each_thread(signal, t) \ | |
662 | list_for_each_entry_rcu(t, &(signal)->thread_head, thread_node) | |
663 | ||
664 | #define for_each_thread(p, t) \ | |
665 | __for_each_thread((p)->signal, t) | |
666 | ||
667 | /* Careful: this is a double loop, 'break' won't work as expected. */ | |
668 | #define for_each_process_thread(p, t) \ | |
669 | for_each_process(p) for_each_thread(p, t) | |
670 | ||
671 | typedef int (*proc_visitor)(struct task_struct *p, void *data); | |
672 | void walk_process_tree(struct task_struct *top, proc_visitor, void *); | |
673 | ||
1fb53567 EB |
674 | static inline |
675 | struct pid *task_pid_type(struct task_struct *task, enum pid_type type) | |
676 | { | |
2c470475 EB |
677 | struct pid *pid; |
678 | if (type == PIDTYPE_PID) | |
679 | pid = task_pid(task); | |
680 | else | |
681 | pid = task->signal->pids[type]; | |
682 | return pid; | |
1fb53567 EB |
683 | } |
684 | ||
7a36094d EB |
685 | static inline struct pid *task_tgid(struct task_struct *task) |
686 | { | |
6883f81a | 687 | return task->signal->pids[PIDTYPE_TGID]; |
7a36094d EB |
688 | } |
689 | ||
2c470475 EB |
690 | /* |
691 | * Without tasklist or RCU lock it is not safe to dereference | |
692 | * the result of task_pgrp/task_session even if task == current, | |
693 | * we can race with another thread doing sys_setsid/sys_setpgid. | |
694 | */ | |
695 | static inline struct pid *task_pgrp(struct task_struct *task) | |
696 | { | |
697 | return task->signal->pids[PIDTYPE_PGID]; | |
698 | } | |
699 | ||
700 | static inline struct pid *task_session(struct task_struct *task) | |
701 | { | |
702 | return task->signal->pids[PIDTYPE_SID]; | |
703 | } | |
704 | ||
9e9291c7 | 705 | static inline int get_nr_threads(struct task_struct *task) |
c3edc401 | 706 | { |
9e9291c7 | 707 | return task->signal->nr_threads; |
c3edc401 IM |
708 | } |
709 | ||
710 | static inline bool thread_group_leader(struct task_struct *p) | |
711 | { | |
712 | return p->exit_signal >= 0; | |
713 | } | |
714 | ||
c3edc401 IM |
715 | static inline |
716 | bool same_thread_group(struct task_struct *p1, struct task_struct *p2) | |
717 | { | |
718 | return p1->signal == p2->signal; | |
719 | } | |
720 | ||
721 | static inline struct task_struct *next_thread(const struct task_struct *p) | |
722 | { | |
723 | return list_entry_rcu(p->thread_group.next, | |
724 | struct task_struct, thread_group); | |
725 | } | |
726 | ||
727 | static inline int thread_group_empty(struct task_struct *p) | |
728 | { | |
729 | return list_empty(&p->thread_group); | |
730 | } | |
731 | ||
732 | #define delay_group_leader(p) \ | |
733 | (thread_group_leader(p) && !thread_group_empty(p)) | |
734 | ||
38fd525a EB |
735 | extern bool thread_group_exited(struct pid *pid); |
736 | ||
9e9291c7 | 737 | extern struct sighand_struct *__lock_task_sighand(struct task_struct *task, |
c3edc401 IM |
738 | unsigned long *flags); |
739 | ||
9e9291c7 | 740 | static inline struct sighand_struct *lock_task_sighand(struct task_struct *task, |
c3edc401 IM |
741 | unsigned long *flags) |
742 | { | |
743 | struct sighand_struct *ret; | |
744 | ||
9e9291c7 AV |
745 | ret = __lock_task_sighand(task, flags); |
746 | (void)__cond_lock(&task->sighand->siglock, ret); | |
c3edc401 IM |
747 | return ret; |
748 | } | |
749 | ||
9e9291c7 | 750 | static inline void unlock_task_sighand(struct task_struct *task, |
c3edc401 IM |
751 | unsigned long *flags) |
752 | { | |
9e9291c7 | 753 | spin_unlock_irqrestore(&task->sighand->siglock, *flags); |
c3edc401 IM |
754 | } |
755 | ||
a5dec9f8 FW |
756 | #ifdef CONFIG_LOCKDEP |
757 | extern void lockdep_assert_task_sighand_held(struct task_struct *task); | |
758 | #else | |
759 | static inline void lockdep_assert_task_sighand_held(struct task_struct *task) { } | |
760 | #endif | |
761 | ||
9e9291c7 | 762 | static inline unsigned long task_rlimit(const struct task_struct *task, |
c3edc401 IM |
763 | unsigned int limit) |
764 | { | |
9e9291c7 | 765 | return READ_ONCE(task->signal->rlim[limit].rlim_cur); |
c3edc401 IM |
766 | } |
767 | ||
9e9291c7 | 768 | static inline unsigned long task_rlimit_max(const struct task_struct *task, |
c3edc401 IM |
769 | unsigned int limit) |
770 | { | |
9e9291c7 | 771 | return READ_ONCE(task->signal->rlim[limit].rlim_max); |
c3edc401 IM |
772 | } |
773 | ||
774 | static inline unsigned long rlimit(unsigned int limit) | |
775 | { | |
776 | return task_rlimit(current, limit); | |
777 | } | |
778 | ||
779 | static inline unsigned long rlimit_max(unsigned int limit) | |
780 | { | |
781 | return task_rlimit_max(current, limit); | |
782 | } | |
783 | ||
3f07c014 | 784 | #endif /* _LINUX_SCHED_SIGNAL_H */ |