1 // SPDX-License-Identifier: GPL-2.0-only
3 * linux/kernel/signal.c
5 * Copyright (C) 1991, 1992 Linus Torvalds
7 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
9 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
10 * Changes to use preallocated sigqueue structures
11 * to allow signals to be sent reliably.
14 #include <linux/slab.h>
15 #include <linux/export.h>
16 #include <linux/init.h>
17 #include <linux/sched/mm.h>
18 #include <linux/sched/user.h>
19 #include <linux/sched/debug.h>
20 #include <linux/sched/task.h>
21 #include <linux/sched/task_stack.h>
22 #include <linux/sched/cputime.h>
23 #include <linux/file.h>
26 #include <linux/proc_fs.h>
27 #include <linux/tty.h>
28 #include <linux/binfmts.h>
29 #include <linux/coredump.h>
30 #include <linux/security.h>
31 #include <linux/syscalls.h>
32 #include <linux/ptrace.h>
33 #include <linux/signal.h>
34 #include <linux/signalfd.h>
35 #include <linux/ratelimit.h>
36 #include <linux/task_work.h>
37 #include <linux/capability.h>
38 #include <linux/freezer.h>
39 #include <linux/pid_namespace.h>
40 #include <linux/nsproxy.h>
41 #include <linux/user_namespace.h>
42 #include <linux/uprobes.h>
43 #include <linux/compat.h>
44 #include <linux/cn_proc.h>
45 #include <linux/compiler.h>
46 #include <linux/posix-timers.h>
47 #include <linux/cgroup.h>
48 #include <linux/audit.h>
49 #include <linux/sysctl.h>
50 #include <uapi/linux/pidfd.h>
52 #define CREATE_TRACE_POINTS
53 #include <trace/events/signal.h>
55 #include <asm/param.h>
56 #include <linux/uaccess.h>
57 #include <asm/unistd.h>
58 #include <asm/siginfo.h>
59 #include <asm/cacheflush.h>
60 #include <asm/syscall.h> /* for syscall_get_* */
63 * SLAB caches for signal bits.
66 static struct kmem_cache
*sigqueue_cachep
;
68 int print_fatal_signals __read_mostly
;
70 static void __user
*sig_handler(struct task_struct
*t
, int sig
)
72 return t
->sighand
->action
[sig
- 1].sa
.sa_handler
;
75 static inline bool sig_handler_ignored(void __user
*handler
, int sig
)
77 /* Is it explicitly or implicitly ignored? */
78 return handler
== SIG_IGN
||
79 (handler
== SIG_DFL
&& sig_kernel_ignore(sig
));
82 static bool sig_task_ignored(struct task_struct
*t
, int sig
, bool force
)
86 handler
= sig_handler(t
, sig
);
88 /* SIGKILL and SIGSTOP may not be sent to the global init */
89 if (unlikely(is_global_init(t
) && sig_kernel_only(sig
)))
92 if (unlikely(t
->signal
->flags
& SIGNAL_UNKILLABLE
) &&
93 handler
== SIG_DFL
&& !(force
&& sig_kernel_only(sig
)))
96 /* Only allow kernel generated signals to this kthread */
97 if (unlikely((t
->flags
& PF_KTHREAD
) &&
98 (handler
== SIG_KTHREAD_KERNEL
) && !force
))
101 return sig_handler_ignored(handler
, sig
);
104 static bool sig_ignored(struct task_struct
*t
, int sig
, bool force
)
107 * Blocked signals are never ignored, since the
108 * signal handler may change by the time it is
111 if (sigismember(&t
->blocked
, sig
) || sigismember(&t
->real_blocked
, sig
))
115 * Tracers may want to know about even ignored signal unless it
116 * is SIGKILL which can't be reported anyway but can be ignored
117 * by SIGNAL_UNKILLABLE task.
119 if (t
->ptrace
&& sig
!= SIGKILL
)
122 return sig_task_ignored(t
, sig
, force
);
126 * Re-calculate pending state from the set of locally pending
127 * signals, globally pending signals, and blocked signals.
129 static inline bool has_pending_signals(sigset_t
*signal
, sigset_t
*blocked
)
134 switch (_NSIG_WORDS
) {
136 for (i
= _NSIG_WORDS
, ready
= 0; --i
>= 0 ;)
137 ready
|= signal
->sig
[i
] &~ blocked
->sig
[i
];
140 case 4: ready
= signal
->sig
[3] &~ blocked
->sig
[3];
141 ready
|= signal
->sig
[2] &~ blocked
->sig
[2];
142 ready
|= signal
->sig
[1] &~ blocked
->sig
[1];
143 ready
|= signal
->sig
[0] &~ blocked
->sig
[0];
146 case 2: ready
= signal
->sig
[1] &~ blocked
->sig
[1];
147 ready
|= signal
->sig
[0] &~ blocked
->sig
[0];
150 case 1: ready
= signal
->sig
[0] &~ blocked
->sig
[0];
155 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
157 static bool recalc_sigpending_tsk(struct task_struct
*t
)
159 if ((t
->jobctl
& (JOBCTL_PENDING_MASK
| JOBCTL_TRAP_FREEZE
)) ||
160 PENDING(&t
->pending
, &t
->blocked
) ||
161 PENDING(&t
->signal
->shared_pending
, &t
->blocked
) ||
162 cgroup_task_frozen(t
)) {
163 set_tsk_thread_flag(t
, TIF_SIGPENDING
);
168 * We must never clear the flag in another thread, or in current
169 * when it's possible the current syscall is returning -ERESTART*.
170 * So we don't clear it here, and only callers who know they should do.
175 void recalc_sigpending(void)
177 if (!recalc_sigpending_tsk(current
) && !freezing(current
))
178 clear_thread_flag(TIF_SIGPENDING
);
181 EXPORT_SYMBOL(recalc_sigpending
);
183 void calculate_sigpending(void)
185 /* Have any signals or users of TIF_SIGPENDING been delayed
188 spin_lock_irq(¤t
->sighand
->siglock
);
189 set_tsk_thread_flag(current
, TIF_SIGPENDING
);
191 spin_unlock_irq(¤t
->sighand
->siglock
);
194 /* Given the mask, find the first available signal that should be serviced. */
196 #define SYNCHRONOUS_MASK \
197 (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
198 sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
200 int next_signal(struct sigpending
*pending
, sigset_t
*mask
)
202 unsigned long i
, *s
, *m
, x
;
205 s
= pending
->signal
.sig
;
209 * Handle the first word specially: it contains the
210 * synchronous signals that need to be dequeued first.
214 if (x
& SYNCHRONOUS_MASK
)
215 x
&= SYNCHRONOUS_MASK
;
220 switch (_NSIG_WORDS
) {
222 for (i
= 1; i
< _NSIG_WORDS
; ++i
) {
226 sig
= ffz(~x
) + i
*_NSIG_BPW
+ 1;
235 sig
= ffz(~x
) + _NSIG_BPW
+ 1;
246 static inline void print_dropped_signal(int sig
)
248 static DEFINE_RATELIMIT_STATE(ratelimit_state
, 5 * HZ
, 10);
250 if (!print_fatal_signals
)
253 if (!__ratelimit(&ratelimit_state
))
256 pr_info("%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
257 current
->comm
, current
->pid
, sig
);
261 * task_set_jobctl_pending - set jobctl pending bits
263 * @mask: pending bits to set
265 * Clear @mask from @task->jobctl. @mask must be subset of
266 * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK |
267 * %JOBCTL_TRAPPING. If stop signo is being set, the existing signo is
268 * cleared. If @task is already being killed or exiting, this function
272 * Must be called with @task->sighand->siglock held.
275 * %true if @mask is set, %false if made noop because @task was dying.
277 bool task_set_jobctl_pending(struct task_struct
*task
, unsigned long mask
)
279 BUG_ON(mask
& ~(JOBCTL_PENDING_MASK
| JOBCTL_STOP_CONSUME
|
280 JOBCTL_STOP_SIGMASK
| JOBCTL_TRAPPING
));
281 BUG_ON((mask
& JOBCTL_TRAPPING
) && !(mask
& JOBCTL_PENDING_MASK
));
283 if (unlikely(fatal_signal_pending(task
) || (task
->flags
& PF_EXITING
)))
286 if (mask
& JOBCTL_STOP_SIGMASK
)
287 task
->jobctl
&= ~JOBCTL_STOP_SIGMASK
;
289 task
->jobctl
|= mask
;
294 * task_clear_jobctl_trapping - clear jobctl trapping bit
297 * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
298 * Clear it and wake up the ptracer. Note that we don't need any further
299 * locking. @task->siglock guarantees that @task->parent points to the
303 * Must be called with @task->sighand->siglock held.
305 void task_clear_jobctl_trapping(struct task_struct
*task
)
307 if (unlikely(task
->jobctl
& JOBCTL_TRAPPING
)) {
308 task
->jobctl
&= ~JOBCTL_TRAPPING
;
309 smp_mb(); /* advised by wake_up_bit() */
310 wake_up_bit(&task
->jobctl
, JOBCTL_TRAPPING_BIT
);
315 * task_clear_jobctl_pending - clear jobctl pending bits
317 * @mask: pending bits to clear
319 * Clear @mask from @task->jobctl. @mask must be subset of
320 * %JOBCTL_PENDING_MASK. If %JOBCTL_STOP_PENDING is being cleared, other
321 * STOP bits are cleared together.
323 * If clearing of @mask leaves no stop or trap pending, this function calls
324 * task_clear_jobctl_trapping().
327 * Must be called with @task->sighand->siglock held.
329 void task_clear_jobctl_pending(struct task_struct
*task
, unsigned long mask
)
331 BUG_ON(mask
& ~JOBCTL_PENDING_MASK
);
333 if (mask
& JOBCTL_STOP_PENDING
)
334 mask
|= JOBCTL_STOP_CONSUME
| JOBCTL_STOP_DEQUEUED
;
336 task
->jobctl
&= ~mask
;
338 if (!(task
->jobctl
& JOBCTL_PENDING_MASK
))
339 task_clear_jobctl_trapping(task
);
343 * task_participate_group_stop - participate in a group stop
344 * @task: task participating in a group stop
346 * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
347 * Group stop states are cleared and the group stop count is consumed if
348 * %JOBCTL_STOP_CONSUME was set. If the consumption completes the group
349 * stop, the appropriate `SIGNAL_*` flags are set.
352 * Must be called with @task->sighand->siglock held.
355 * %true if group stop completion should be notified to the parent, %false
358 static bool task_participate_group_stop(struct task_struct
*task
)
360 struct signal_struct
*sig
= task
->signal
;
361 bool consume
= task
->jobctl
& JOBCTL_STOP_CONSUME
;
363 WARN_ON_ONCE(!(task
->jobctl
& JOBCTL_STOP_PENDING
));
365 task_clear_jobctl_pending(task
, JOBCTL_STOP_PENDING
);
370 if (!WARN_ON_ONCE(sig
->group_stop_count
== 0))
371 sig
->group_stop_count
--;
374 * Tell the caller to notify completion iff we are entering into a
375 * fresh group stop. Read comment in do_signal_stop() for details.
377 if (!sig
->group_stop_count
&& !(sig
->flags
& SIGNAL_STOP_STOPPED
)) {
378 signal_set_stop_flags(sig
, SIGNAL_STOP_STOPPED
);
384 void task_join_group_stop(struct task_struct
*task
)
386 unsigned long mask
= current
->jobctl
& JOBCTL_STOP_SIGMASK
;
387 struct signal_struct
*sig
= current
->signal
;
389 if (sig
->group_stop_count
) {
390 sig
->group_stop_count
++;
391 mask
|= JOBCTL_STOP_CONSUME
;
392 } else if (!(sig
->flags
& SIGNAL_STOP_STOPPED
))
395 /* Have the new thread join an on-going signal group stop */
396 task_set_jobctl_pending(task
, mask
| JOBCTL_STOP_PENDING
);
400 * allocate a new signal queue record
401 * - this may be called without locks if and only if t == current, otherwise an
402 * appropriate lock must be held to stop the target task from exiting
404 static struct sigqueue
*
405 __sigqueue_alloc(int sig
, struct task_struct
*t
, gfp_t gfp_flags
,
406 int override_rlimit
, const unsigned int sigqueue_flags
)
408 struct sigqueue
*q
= NULL
;
409 struct ucounts
*ucounts
;
413 * Protect access to @t credentials. This can go away when all
414 * callers hold rcu read lock.
416 * NOTE! A pending signal will hold on to the user refcount,
417 * and we get/put the refcount only when the sigpending count
418 * changes from/to zero.
421 ucounts
= task_ucounts(t
);
422 sigpending
= inc_rlimit_get_ucounts(ucounts
, UCOUNT_RLIMIT_SIGPENDING
);
427 if (override_rlimit
|| likely(sigpending
<= task_rlimit(t
, RLIMIT_SIGPENDING
))) {
428 q
= kmem_cache_alloc(sigqueue_cachep
, gfp_flags
);
430 print_dropped_signal(sig
);
433 if (unlikely(q
== NULL
)) {
434 dec_rlimit_put_ucounts(ucounts
, UCOUNT_RLIMIT_SIGPENDING
);
436 INIT_LIST_HEAD(&q
->list
);
437 q
->flags
= sigqueue_flags
;
438 q
->ucounts
= ucounts
;
443 static void __sigqueue_free(struct sigqueue
*q
)
445 if (q
->flags
& SIGQUEUE_PREALLOC
)
448 dec_rlimit_put_ucounts(q
->ucounts
, UCOUNT_RLIMIT_SIGPENDING
);
451 kmem_cache_free(sigqueue_cachep
, q
);
454 void flush_sigqueue(struct sigpending
*queue
)
458 sigemptyset(&queue
->signal
);
459 while (!list_empty(&queue
->list
)) {
460 q
= list_entry(queue
->list
.next
, struct sigqueue
, list
);
461 list_del_init(&q
->list
);
467 * Flush all pending signals for this kthread.
469 void flush_signals(struct task_struct
*t
)
473 spin_lock_irqsave(&t
->sighand
->siglock
, flags
);
474 clear_tsk_thread_flag(t
, TIF_SIGPENDING
);
475 flush_sigqueue(&t
->pending
);
476 flush_sigqueue(&t
->signal
->shared_pending
);
477 spin_unlock_irqrestore(&t
->sighand
->siglock
, flags
);
479 EXPORT_SYMBOL(flush_signals
);
481 #ifdef CONFIG_POSIX_TIMERS
482 static void __flush_itimer_signals(struct sigpending
*pending
)
484 sigset_t signal
, retain
;
485 struct sigqueue
*q
, *n
;
487 signal
= pending
->signal
;
488 sigemptyset(&retain
);
490 list_for_each_entry_safe(q
, n
, &pending
->list
, list
) {
491 int sig
= q
->info
.si_signo
;
493 if (likely(q
->info
.si_code
!= SI_TIMER
)) {
494 sigaddset(&retain
, sig
);
496 sigdelset(&signal
, sig
);
497 list_del_init(&q
->list
);
502 sigorsets(&pending
->signal
, &signal
, &retain
);
505 void flush_itimer_signals(void)
507 struct task_struct
*tsk
= current
;
510 spin_lock_irqsave(&tsk
->sighand
->siglock
, flags
);
511 __flush_itimer_signals(&tsk
->pending
);
512 __flush_itimer_signals(&tsk
->signal
->shared_pending
);
513 spin_unlock_irqrestore(&tsk
->sighand
->siglock
, flags
);
517 void ignore_signals(struct task_struct
*t
)
521 for (i
= 0; i
< _NSIG
; ++i
)
522 t
->sighand
->action
[i
].sa
.sa_handler
= SIG_IGN
;
528 * Flush all handlers for a task.
532 flush_signal_handlers(struct task_struct
*t
, int force_default
)
535 struct k_sigaction
*ka
= &t
->sighand
->action
[0];
536 for (i
= _NSIG
; i
!= 0 ; i
--) {
537 if (force_default
|| ka
->sa
.sa_handler
!= SIG_IGN
)
538 ka
->sa
.sa_handler
= SIG_DFL
;
540 #ifdef __ARCH_HAS_SA_RESTORER
541 ka
->sa
.sa_restorer
= NULL
;
543 sigemptyset(&ka
->sa
.sa_mask
);
548 bool unhandled_signal(struct task_struct
*tsk
, int sig
)
550 void __user
*handler
= tsk
->sighand
->action
[sig
-1].sa
.sa_handler
;
551 if (is_global_init(tsk
))
554 if (handler
!= SIG_IGN
&& handler
!= SIG_DFL
)
557 /* If dying, we handle all new signals by ignoring them */
558 if (fatal_signal_pending(tsk
))
561 /* if ptraced, let the tracer determine */
565 static void collect_signal(int sig
, struct sigpending
*list
, kernel_siginfo_t
*info
,
568 struct sigqueue
*q
, *first
= NULL
;
571 * Collect the siginfo appropriate to this signal. Check if
572 * there is another siginfo for the same signal.
574 list_for_each_entry(q
, &list
->list
, list
) {
575 if (q
->info
.si_signo
== sig
) {
582 sigdelset(&list
->signal
, sig
);
586 list_del_init(&first
->list
);
587 copy_siginfo(info
, &first
->info
);
590 (first
->flags
& SIGQUEUE_PREALLOC
) &&
591 (info
->si_code
== SI_TIMER
) &&
592 (info
->si_sys_private
);
594 __sigqueue_free(first
);
597 * Ok, it wasn't in the queue. This must be
598 * a fast-pathed signal or we must have been
599 * out of queue space. So zero out the info.
602 info
->si_signo
= sig
;
604 info
->si_code
= SI_USER
;
610 static int __dequeue_signal(struct sigpending
*pending
, sigset_t
*mask
,
611 kernel_siginfo_t
*info
, bool *resched_timer
)
613 int sig
= next_signal(pending
, mask
);
616 collect_signal(sig
, pending
, info
, resched_timer
);
621 * Dequeue a signal and return the element to the caller, which is
622 * expected to free it.
624 * All callers have to hold the siglock.
626 int dequeue_signal(struct task_struct
*tsk
, sigset_t
*mask
,
627 kernel_siginfo_t
*info
, enum pid_type
*type
)
629 bool resched_timer
= false;
632 /* We only dequeue private signals from ourselves, we don't let
633 * signalfd steal them
636 signr
= __dequeue_signal(&tsk
->pending
, mask
, info
, &resched_timer
);
638 *type
= PIDTYPE_TGID
;
639 signr
= __dequeue_signal(&tsk
->signal
->shared_pending
,
640 mask
, info
, &resched_timer
);
641 #ifdef CONFIG_POSIX_TIMERS
645 * itimers are process shared and we restart periodic
646 * itimers in the signal delivery path to prevent DoS
647 * attacks in the high resolution timer case. This is
648 * compliant with the old way of self-restarting
649 * itimers, as the SIGALRM is a legacy signal and only
650 * queued once. Changing the restart behaviour to
651 * restart the timer in the signal dequeue path is
652 * reducing the timer noise on heavy loaded !highres
655 if (unlikely(signr
== SIGALRM
)) {
656 struct hrtimer
*tmr
= &tsk
->signal
->real_timer
;
658 if (!hrtimer_is_queued(tmr
) &&
659 tsk
->signal
->it_real_incr
!= 0) {
660 hrtimer_forward(tmr
, tmr
->base
->get_time(),
661 tsk
->signal
->it_real_incr
);
662 hrtimer_restart(tmr
);
672 if (unlikely(sig_kernel_stop(signr
))) {
674 * Set a marker that we have dequeued a stop signal. Our
675 * caller might release the siglock and then the pending
676 * stop signal it is about to process is no longer in the
677 * pending bitmasks, but must still be cleared by a SIGCONT
678 * (and overruled by a SIGKILL). So those cases clear this
679 * shared flag after we've set it. Note that this flag may
680 * remain set after the signal we return is ignored or
681 * handled. That doesn't matter because its only purpose
682 * is to alert stop-signal processing code when another
683 * processor has come along and cleared the flag.
685 current
->jobctl
|= JOBCTL_STOP_DEQUEUED
;
687 #ifdef CONFIG_POSIX_TIMERS
690 * Release the siglock to ensure proper locking order
691 * of timer locks outside of siglocks. Note, we leave
692 * irqs disabled here, since the posix-timers code is
693 * about to disable them again anyway.
695 spin_unlock(&tsk
->sighand
->siglock
);
696 posixtimer_rearm(info
);
697 spin_lock(&tsk
->sighand
->siglock
);
699 /* Don't expose the si_sys_private value to userspace */
700 info
->si_sys_private
= 0;
705 EXPORT_SYMBOL_GPL(dequeue_signal
);
707 static int dequeue_synchronous_signal(kernel_siginfo_t
*info
)
709 struct task_struct
*tsk
= current
;
710 struct sigpending
*pending
= &tsk
->pending
;
711 struct sigqueue
*q
, *sync
= NULL
;
714 * Might a synchronous signal be in the queue?
716 if (!((pending
->signal
.sig
[0] & ~tsk
->blocked
.sig
[0]) & SYNCHRONOUS_MASK
))
720 * Return the first synchronous signal in the queue.
722 list_for_each_entry(q
, &pending
->list
, list
) {
723 /* Synchronous signals have a positive si_code */
724 if ((q
->info
.si_code
> SI_USER
) &&
725 (sigmask(q
->info
.si_signo
) & SYNCHRONOUS_MASK
)) {
733 * Check if there is another siginfo for the same signal.
735 list_for_each_entry_continue(q
, &pending
->list
, list
) {
736 if (q
->info
.si_signo
== sync
->info
.si_signo
)
740 sigdelset(&pending
->signal
, sync
->info
.si_signo
);
743 list_del_init(&sync
->list
);
744 copy_siginfo(info
, &sync
->info
);
745 __sigqueue_free(sync
);
746 return info
->si_signo
;
750 * Tell a process that it has a new active signal..
752 * NOTE! we rely on the previous spin_lock to
753 * lock interrupts for us! We can only be called with
754 * "siglock" held, and the local interrupt must
755 * have been disabled when that got acquired!
757 * No need to set need_resched since signal event passing
758 * goes through ->blocked
760 void signal_wake_up_state(struct task_struct
*t
, unsigned int state
)
762 lockdep_assert_held(&t
->sighand
->siglock
);
764 set_tsk_thread_flag(t
, TIF_SIGPENDING
);
767 * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
768 * case. We don't check t->state here because there is a race with it
769 * executing another processor and just now entering stopped state.
770 * By using wake_up_state, we ensure the process will wake up and
771 * handle its death signal.
773 if (!wake_up_state(t
, state
| TASK_INTERRUPTIBLE
))
778 * Remove signals in mask from the pending set and queue.
779 * Returns 1 if any signals were found.
781 * All callers must be holding the siglock.
783 static void flush_sigqueue_mask(sigset_t
*mask
, struct sigpending
*s
)
785 struct sigqueue
*q
, *n
;
788 sigandsets(&m
, mask
, &s
->signal
);
789 if (sigisemptyset(&m
))
792 sigandnsets(&s
->signal
, &s
->signal
, mask
);
793 list_for_each_entry_safe(q
, n
, &s
->list
, list
) {
794 if (sigismember(mask
, q
->info
.si_signo
)) {
795 list_del_init(&q
->list
);
801 static inline int is_si_special(const struct kernel_siginfo
*info
)
803 return info
<= SEND_SIG_PRIV
;
806 static inline bool si_fromuser(const struct kernel_siginfo
*info
)
808 return info
== SEND_SIG_NOINFO
||
809 (!is_si_special(info
) && SI_FROMUSER(info
));
813 * called with RCU read lock from check_kill_permission()
815 static bool kill_ok_by_cred(struct task_struct
*t
)
817 const struct cred
*cred
= current_cred();
818 const struct cred
*tcred
= __task_cred(t
);
820 return uid_eq(cred
->euid
, tcred
->suid
) ||
821 uid_eq(cred
->euid
, tcred
->uid
) ||
822 uid_eq(cred
->uid
, tcred
->suid
) ||
823 uid_eq(cred
->uid
, tcred
->uid
) ||
824 ns_capable(tcred
->user_ns
, CAP_KILL
);
828 * Bad permissions for sending the signal
829 * - the caller must hold the RCU read lock
831 static int check_kill_permission(int sig
, struct kernel_siginfo
*info
,
832 struct task_struct
*t
)
837 if (!valid_signal(sig
))
840 if (!si_fromuser(info
))
843 error
= audit_signal_info(sig
, t
); /* Let audit system see the signal */
847 if (!same_thread_group(current
, t
) &&
848 !kill_ok_by_cred(t
)) {
851 sid
= task_session(t
);
853 * We don't return the error if sid == NULL. The
854 * task was unhashed, the caller must notice this.
856 if (!sid
|| sid
== task_session(current
))
864 return security_task_kill(t
, info
, sig
, NULL
);
868 * ptrace_trap_notify - schedule trap to notify ptracer
869 * @t: tracee wanting to notify tracer
871 * This function schedules sticky ptrace trap which is cleared on the next
872 * TRAP_STOP to notify ptracer of an event. @t must have been seized by
875 * If @t is running, STOP trap will be taken. If trapped for STOP and
876 * ptracer is listening for events, tracee is woken up so that it can
877 * re-trap for the new event. If trapped otherwise, STOP trap will be
878 * eventually taken without returning to userland after the existing traps
879 * are finished by PTRACE_CONT.
882 * Must be called with @task->sighand->siglock held.
884 static void ptrace_trap_notify(struct task_struct
*t
)
886 WARN_ON_ONCE(!(t
->ptrace
& PT_SEIZED
));
887 lockdep_assert_held(&t
->sighand
->siglock
);
889 task_set_jobctl_pending(t
, JOBCTL_TRAP_NOTIFY
);
890 ptrace_signal_wake_up(t
, t
->jobctl
& JOBCTL_LISTENING
);
894 * Handle magic process-wide effects of stop/continue signals. Unlike
895 * the signal actions, these happen immediately at signal-generation
896 * time regardless of blocking, ignoring, or handling. This does the
897 * actual continuing for SIGCONT, but not the actual stopping for stop
898 * signals. The process stop is done as a signal action for SIG_DFL.
900 * Returns true if the signal should be actually delivered, otherwise
901 * it should be dropped.
903 static bool prepare_signal(int sig
, struct task_struct
*p
, bool force
)
905 struct signal_struct
*signal
= p
->signal
;
906 struct task_struct
*t
;
909 if (signal
->flags
& SIGNAL_GROUP_EXIT
) {
910 if (signal
->core_state
)
911 return sig
== SIGKILL
;
913 * The process is in the middle of dying, drop the signal.
916 } else if (sig_kernel_stop(sig
)) {
918 * This is a stop signal. Remove SIGCONT from all queues.
920 siginitset(&flush
, sigmask(SIGCONT
));
921 flush_sigqueue_mask(&flush
, &signal
->shared_pending
);
922 for_each_thread(p
, t
)
923 flush_sigqueue_mask(&flush
, &t
->pending
);
924 } else if (sig
== SIGCONT
) {
927 * Remove all stop signals from all queues, wake all threads.
929 siginitset(&flush
, SIG_KERNEL_STOP_MASK
);
930 flush_sigqueue_mask(&flush
, &signal
->shared_pending
);
931 for_each_thread(p
, t
) {
932 flush_sigqueue_mask(&flush
, &t
->pending
);
933 task_clear_jobctl_pending(t
, JOBCTL_STOP_PENDING
);
934 if (likely(!(t
->ptrace
& PT_SEIZED
))) {
935 t
->jobctl
&= ~JOBCTL_STOPPED
;
936 wake_up_state(t
, __TASK_STOPPED
);
938 ptrace_trap_notify(t
);
942 * Notify the parent with CLD_CONTINUED if we were stopped.
944 * If we were in the middle of a group stop, we pretend it
945 * was already finished, and then continued. Since SIGCHLD
946 * doesn't queue we report only CLD_STOPPED, as if the next
947 * CLD_CONTINUED was dropped.
950 if (signal
->flags
& SIGNAL_STOP_STOPPED
)
951 why
|= SIGNAL_CLD_CONTINUED
;
952 else if (signal
->group_stop_count
)
953 why
|= SIGNAL_CLD_STOPPED
;
957 * The first thread which returns from do_signal_stop()
958 * will take ->siglock, notice SIGNAL_CLD_MASK, and
959 * notify its parent. See get_signal().
961 signal_set_stop_flags(signal
, why
| SIGNAL_STOP_CONTINUED
);
962 signal
->group_stop_count
= 0;
963 signal
->group_exit_code
= 0;
967 return !sig_ignored(p
, sig
, force
);
971 * Test if P wants to take SIG. After we've checked all threads with this,
972 * it's equivalent to finding no threads not blocking SIG. Any threads not
973 * blocking SIG were ruled out because they are not running and already
974 * have pending signals. Such threads will dequeue from the shared queue
975 * as soon as they're available, so putting the signal on the shared queue
976 * will be equivalent to sending it to one such thread.
978 static inline bool wants_signal(int sig
, struct task_struct
*p
)
980 if (sigismember(&p
->blocked
, sig
))
983 if (p
->flags
& PF_EXITING
)
989 if (task_is_stopped_or_traced(p
))
992 return task_curr(p
) || !task_sigpending(p
);
995 static void complete_signal(int sig
, struct task_struct
*p
, enum pid_type type
)
997 struct signal_struct
*signal
= p
->signal
;
998 struct task_struct
*t
;
1001 * Now find a thread we can wake up to take the signal off the queue.
1003 * Try the suggested task first (may or may not be the main thread).
1005 if (wants_signal(sig
, p
))
1007 else if ((type
== PIDTYPE_PID
) || thread_group_empty(p
))
1009 * There is just one thread and it does not need to be woken.
1010 * It will dequeue unblocked signals before it runs again.
1015 * Otherwise try to find a suitable thread.
1017 t
= signal
->curr_target
;
1018 while (!wants_signal(sig
, t
)) {
1020 if (t
== signal
->curr_target
)
1022 * No thread needs to be woken.
1023 * Any eligible threads will see
1024 * the signal in the queue soon.
1028 signal
->curr_target
= t
;
1032 * Found a killable thread. If the signal will be fatal,
1033 * then start taking the whole group down immediately.
1035 if (sig_fatal(p
, sig
) &&
1036 (signal
->core_state
|| !(signal
->flags
& SIGNAL_GROUP_EXIT
)) &&
1037 !sigismember(&t
->real_blocked
, sig
) &&
1038 (sig
== SIGKILL
|| !p
->ptrace
)) {
1040 * This signal will be fatal to the whole group.
1042 if (!sig_kernel_coredump(sig
)) {
1044 * Start a group exit and wake everybody up.
1045 * This way we don't have other threads
1046 * running and doing things after a slower
1047 * thread has the fatal signal pending.
1049 signal
->flags
= SIGNAL_GROUP_EXIT
;
1050 signal
->group_exit_code
= sig
;
1051 signal
->group_stop_count
= 0;
1052 __for_each_thread(signal
, t
) {
1053 task_clear_jobctl_pending(t
, JOBCTL_PENDING_MASK
);
1054 sigaddset(&t
->pending
.signal
, SIGKILL
);
1055 signal_wake_up(t
, 1);
1062 * The signal is already in the shared-pending queue.
1063 * Tell the chosen thread to wake up and dequeue it.
1065 signal_wake_up(t
, sig
== SIGKILL
);
1069 static inline bool legacy_queue(struct sigpending
*signals
, int sig
)
1071 return (sig
< SIGRTMIN
) && sigismember(&signals
->signal
, sig
);
1074 static int __send_signal_locked(int sig
, struct kernel_siginfo
*info
,
1075 struct task_struct
*t
, enum pid_type type
, bool force
)
1077 struct sigpending
*pending
;
1079 int override_rlimit
;
1080 int ret
= 0, result
;
1082 lockdep_assert_held(&t
->sighand
->siglock
);
1084 result
= TRACE_SIGNAL_IGNORED
;
1085 if (!prepare_signal(sig
, t
, force
))
1088 pending
= (type
!= PIDTYPE_PID
) ? &t
->signal
->shared_pending
: &t
->pending
;
1090 * Short-circuit ignored signals and support queuing
1091 * exactly one non-rt signal, so that we can get more
1092 * detailed information about the cause of the signal.
1094 result
= TRACE_SIGNAL_ALREADY_PENDING
;
1095 if (legacy_queue(pending
, sig
))
1098 result
= TRACE_SIGNAL_DELIVERED
;
1100 * Skip useless siginfo allocation for SIGKILL and kernel threads.
1102 if ((sig
== SIGKILL
) || (t
->flags
& PF_KTHREAD
))
1106 * Real-time signals must be queued if sent by sigqueue, or
1107 * some other real-time mechanism. It is implementation
1108 * defined whether kill() does so. We attempt to do so, on
1109 * the principle of least surprise, but since kill is not
1110 * allowed to fail with EAGAIN when low on memory we just
1111 * make sure at least one signal gets delivered and don't
1112 * pass on the info struct.
1115 override_rlimit
= (is_si_special(info
) || info
->si_code
>= 0);
1117 override_rlimit
= 0;
1119 q
= __sigqueue_alloc(sig
, t
, GFP_ATOMIC
, override_rlimit
, 0);
1122 list_add_tail(&q
->list
, &pending
->list
);
1123 switch ((unsigned long) info
) {
1124 case (unsigned long) SEND_SIG_NOINFO
:
1125 clear_siginfo(&q
->info
);
1126 q
->info
.si_signo
= sig
;
1127 q
->info
.si_errno
= 0;
1128 q
->info
.si_code
= SI_USER
;
1129 q
->info
.si_pid
= task_tgid_nr_ns(current
,
1130 task_active_pid_ns(t
));
1133 from_kuid_munged(task_cred_xxx(t
, user_ns
),
1137 case (unsigned long) SEND_SIG_PRIV
:
1138 clear_siginfo(&q
->info
);
1139 q
->info
.si_signo
= sig
;
1140 q
->info
.si_errno
= 0;
1141 q
->info
.si_code
= SI_KERNEL
;
1146 copy_siginfo(&q
->info
, info
);
1149 } else if (!is_si_special(info
) &&
1150 sig
>= SIGRTMIN
&& info
->si_code
!= SI_USER
) {
1152 * Queue overflow, abort. We may abort if the
1153 * signal was rt and sent by user using something
1154 * other than kill().
1156 result
= TRACE_SIGNAL_OVERFLOW_FAIL
;
1161 * This is a silent loss of information. We still
1162 * send the signal, but the *info bits are lost.
1164 result
= TRACE_SIGNAL_LOSE_INFO
;
1168 signalfd_notify(t
, sig
);
1169 sigaddset(&pending
->signal
, sig
);
1171 /* Let multiprocess signals appear after on-going forks */
1172 if (type
> PIDTYPE_TGID
) {
1173 struct multiprocess_signals
*delayed
;
1174 hlist_for_each_entry(delayed
, &t
->signal
->multiprocess
, node
) {
1175 sigset_t
*signal
= &delayed
->signal
;
1176 /* Can't queue both a stop and a continue signal */
1178 sigdelsetmask(signal
, SIG_KERNEL_STOP_MASK
);
1179 else if (sig_kernel_stop(sig
))
1180 sigdelset(signal
, SIGCONT
);
1181 sigaddset(signal
, sig
);
1185 complete_signal(sig
, t
, type
);
1187 trace_signal_generate(sig
, info
, t
, type
!= PIDTYPE_PID
, result
);
1191 static inline bool has_si_pid_and_uid(struct kernel_siginfo
*info
)
1194 switch (siginfo_layout(info
->si_signo
, info
->si_code
)) {
1203 case SIL_FAULT_TRAPNO
:
1204 case SIL_FAULT_MCEERR
:
1205 case SIL_FAULT_BNDERR
:
1206 case SIL_FAULT_PKUERR
:
1207 case SIL_FAULT_PERF_EVENT
:
1215 int send_signal_locked(int sig
, struct kernel_siginfo
*info
,
1216 struct task_struct
*t
, enum pid_type type
)
1218 /* Should SIGKILL or SIGSTOP be received by a pid namespace init? */
1221 if (info
== SEND_SIG_NOINFO
) {
1222 /* Force if sent from an ancestor pid namespace */
1223 force
= !task_pid_nr_ns(current
, task_active_pid_ns(t
));
1224 } else if (info
== SEND_SIG_PRIV
) {
1225 /* Don't ignore kernel generated signals */
1227 } else if (has_si_pid_and_uid(info
)) {
1228 /* SIGKILL and SIGSTOP is special or has ids */
1229 struct user_namespace
*t_user_ns
;
1232 t_user_ns
= task_cred_xxx(t
, user_ns
);
1233 if (current_user_ns() != t_user_ns
) {
1234 kuid_t uid
= make_kuid(current_user_ns(), info
->si_uid
);
1235 info
->si_uid
= from_kuid_munged(t_user_ns
, uid
);
1239 /* A kernel generated signal? */
1240 force
= (info
->si_code
== SI_KERNEL
);
1242 /* From an ancestor pid namespace? */
1243 if (!task_pid_nr_ns(current
, task_active_pid_ns(t
))) {
1248 return __send_signal_locked(sig
, info
, t
, type
, force
);
1251 static void print_fatal_signal(int signr
)
1253 struct pt_regs
*regs
= task_pt_regs(current
);
1254 struct file
*exe_file
;
1256 exe_file
= get_task_exe_file(current
);
1258 pr_info("%pD: %s: potentially unexpected fatal signal %d.\n",
1259 exe_file
, current
->comm
, signr
);
1262 pr_info("%s: potentially unexpected fatal signal %d.\n",
1263 current
->comm
, signr
);
1266 #if defined(__i386__) && !defined(__arch_um__)
1267 pr_info("code at %08lx: ", regs
->ip
);
1270 for (i
= 0; i
< 16; i
++) {
1273 if (get_user(insn
, (unsigned char *)(regs
->ip
+ i
)))
1275 pr_cont("%02x ", insn
);
1285 static int __init
setup_print_fatal_signals(char *str
)
1287 get_option (&str
, &print_fatal_signals
);
1292 __setup("print-fatal-signals=", setup_print_fatal_signals
);
1294 int do_send_sig_info(int sig
, struct kernel_siginfo
*info
, struct task_struct
*p
,
1297 unsigned long flags
;
1300 if (lock_task_sighand(p
, &flags
)) {
1301 ret
= send_signal_locked(sig
, info
, p
, type
);
1302 unlock_task_sighand(p
, &flags
);
1309 HANDLER_CURRENT
, /* If reachable use the current handler */
1310 HANDLER_SIG_DFL
, /* Always use SIG_DFL handler semantics */
1311 HANDLER_EXIT
, /* Only visible as the process exit code */
1315 * Force a signal that the process can't ignore: if necessary
1316 * we unblock the signal and change any SIG_IGN to SIG_DFL.
1318 * Note: If we unblock the signal, we always reset it to SIG_DFL,
1319 * since we do not want to have a signal handler that was blocked
1320 * be invoked when user space had explicitly blocked it.
1322 * We don't want to have recursive SIGSEGV's etc, for example,
1323 * that is why we also clear SIGNAL_UNKILLABLE.
1326 force_sig_info_to_task(struct kernel_siginfo
*info
, struct task_struct
*t
,
1327 enum sig_handler handler
)
1329 unsigned long int flags
;
1330 int ret
, blocked
, ignored
;
1331 struct k_sigaction
*action
;
1332 int sig
= info
->si_signo
;
1334 spin_lock_irqsave(&t
->sighand
->siglock
, flags
);
1335 action
= &t
->sighand
->action
[sig
-1];
1336 ignored
= action
->sa
.sa_handler
== SIG_IGN
;
1337 blocked
= sigismember(&t
->blocked
, sig
);
1338 if (blocked
|| ignored
|| (handler
!= HANDLER_CURRENT
)) {
1339 action
->sa
.sa_handler
= SIG_DFL
;
1340 if (handler
== HANDLER_EXIT
)
1341 action
->sa
.sa_flags
|= SA_IMMUTABLE
;
1343 sigdelset(&t
->blocked
, sig
);
1346 * Don't clear SIGNAL_UNKILLABLE for traced tasks, users won't expect
1347 * debugging to leave init killable. But HANDLER_EXIT is always fatal.
1349 if (action
->sa
.sa_handler
== SIG_DFL
&&
1350 (!t
->ptrace
|| (handler
== HANDLER_EXIT
)))
1351 t
->signal
->flags
&= ~SIGNAL_UNKILLABLE
;
1352 ret
= send_signal_locked(sig
, info
, t
, PIDTYPE_PID
);
1353 /* This can happen if the signal was already pending and blocked */
1354 if (!task_sigpending(t
))
1355 signal_wake_up(t
, 0);
1356 spin_unlock_irqrestore(&t
->sighand
->siglock
, flags
);
1361 int force_sig_info(struct kernel_siginfo
*info
)
1363 return force_sig_info_to_task(info
, current
, HANDLER_CURRENT
);
1367 * Nuke all other threads in the group.
1369 int zap_other_threads(struct task_struct
*p
)
1371 struct task_struct
*t
;
1374 p
->signal
->group_stop_count
= 0;
1376 for_other_threads(p
, t
) {
1377 task_clear_jobctl_pending(t
, JOBCTL_PENDING_MASK
);
1378 /* Don't require de_thread to wait for the vhost_worker */
1379 if ((t
->flags
& (PF_IO_WORKER
| PF_USER_WORKER
)) != PF_USER_WORKER
)
1382 /* Don't bother with already dead threads */
1385 sigaddset(&t
->pending
.signal
, SIGKILL
);
1386 signal_wake_up(t
, 1);
1392 struct sighand_struct
*__lock_task_sighand(struct task_struct
*tsk
,
1393 unsigned long *flags
)
1395 struct sighand_struct
*sighand
;
1399 sighand
= rcu_dereference(tsk
->sighand
);
1400 if (unlikely(sighand
== NULL
))
1404 * This sighand can be already freed and even reused, but
1405 * we rely on SLAB_TYPESAFE_BY_RCU and sighand_ctor() which
1406 * initializes ->siglock: this slab can't go away, it has
1407 * the same object type, ->siglock can't be reinitialized.
1409 * We need to ensure that tsk->sighand is still the same
1410 * after we take the lock, we can race with de_thread() or
1411 * __exit_signal(). In the latter case the next iteration
1412 * must see ->sighand == NULL.
1414 spin_lock_irqsave(&sighand
->siglock
, *flags
);
1415 if (likely(sighand
== rcu_access_pointer(tsk
->sighand
)))
1417 spin_unlock_irqrestore(&sighand
->siglock
, *flags
);
1424 #ifdef CONFIG_LOCKDEP
1425 void lockdep_assert_task_sighand_held(struct task_struct
*task
)
1427 struct sighand_struct
*sighand
;
1430 sighand
= rcu_dereference(task
->sighand
);
1432 lockdep_assert_held(&sighand
->siglock
);
1440 * send signal info to all the members of a thread group or to the
1441 * individual thread if type == PIDTYPE_PID.
1443 int group_send_sig_info(int sig
, struct kernel_siginfo
*info
,
1444 struct task_struct
*p
, enum pid_type type
)
1449 ret
= check_kill_permission(sig
, info
, p
);
1453 ret
= do_send_sig_info(sig
, info
, p
, type
);
1459 * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1460 * control characters do (^C, ^Z etc)
1461 * - the caller must hold at least a readlock on tasklist_lock
1463 int __kill_pgrp_info(int sig
, struct kernel_siginfo
*info
, struct pid
*pgrp
)
1465 struct task_struct
*p
= NULL
;
1468 do_each_pid_task(pgrp
, PIDTYPE_PGID
, p
) {
1469 int err
= group_send_sig_info(sig
, info
, p
, PIDTYPE_PGID
);
1471 * If group_send_sig_info() succeeds at least once ret
1472 * becomes 0 and after that the code below has no effect.
1473 * Otherwise we return the last err or -ESRCH if this
1474 * process group is empty.
1478 } while_each_pid_task(pgrp
, PIDTYPE_PGID
, p
);
1483 static int kill_pid_info_type(int sig
, struct kernel_siginfo
*info
,
1484 struct pid
*pid
, enum pid_type type
)
1487 struct task_struct
*p
;
1491 p
= pid_task(pid
, PIDTYPE_PID
);
1493 error
= group_send_sig_info(sig
, info
, p
, type
);
1495 if (likely(!p
|| error
!= -ESRCH
))
1498 * The task was unhashed in between, try again. If it
1499 * is dead, pid_task() will return NULL, if we race with
1500 * de_thread() it will find the new leader.
1505 int kill_pid_info(int sig
, struct kernel_siginfo
*info
, struct pid
*pid
)
1507 return kill_pid_info_type(sig
, info
, pid
, PIDTYPE_TGID
);
1510 static int kill_proc_info(int sig
, struct kernel_siginfo
*info
, pid_t pid
)
1514 error
= kill_pid_info(sig
, info
, find_vpid(pid
));
1519 static inline bool kill_as_cred_perm(const struct cred
*cred
,
1520 struct task_struct
*target
)
1522 const struct cred
*pcred
= __task_cred(target
);
1524 return uid_eq(cred
->euid
, pcred
->suid
) ||
1525 uid_eq(cred
->euid
, pcred
->uid
) ||
1526 uid_eq(cred
->uid
, pcred
->suid
) ||
1527 uid_eq(cred
->uid
, pcred
->uid
);
1531 * The usb asyncio usage of siginfo is wrong. The glibc support
1532 * for asyncio which uses SI_ASYNCIO assumes the layout is SIL_RT.
1533 * AKA after the generic fields:
1534 * kernel_pid_t si_pid;
1535 * kernel_uid32_t si_uid;
1536 * sigval_t si_value;
1538 * Unfortunately when usb generates SI_ASYNCIO it assumes the layout
1539 * after the generic fields is:
1540 * void __user *si_addr;
1542 * This is a practical problem when there is a 64bit big endian kernel
1543 * and a 32bit userspace. As the 32bit address will encoded in the low
1544 * 32bits of the pointer. Those low 32bits will be stored at higher
1545 * address than appear in a 32 bit pointer. So userspace will not
1546 * see the address it was expecting for it's completions.
1548 * There is nothing in the encoding that can allow
1549 * copy_siginfo_to_user32 to detect this confusion of formats, so
1550 * handle this by requiring the caller of kill_pid_usb_asyncio to
1551 * notice when this situration takes place and to store the 32bit
1552 * pointer in sival_int, instead of sival_addr of the sigval_t addr
1555 int kill_pid_usb_asyncio(int sig
, int errno
, sigval_t addr
,
1556 struct pid
*pid
, const struct cred
*cred
)
1558 struct kernel_siginfo info
;
1559 struct task_struct
*p
;
1560 unsigned long flags
;
1563 if (!valid_signal(sig
))
1566 clear_siginfo(&info
);
1567 info
.si_signo
= sig
;
1568 info
.si_errno
= errno
;
1569 info
.si_code
= SI_ASYNCIO
;
1570 *((sigval_t
*)&info
.si_pid
) = addr
;
1573 p
= pid_task(pid
, PIDTYPE_PID
);
1578 if (!kill_as_cred_perm(cred
, p
)) {
1582 ret
= security_task_kill(p
, &info
, sig
, cred
);
1587 if (lock_task_sighand(p
, &flags
)) {
1588 ret
= __send_signal_locked(sig
, &info
, p
, PIDTYPE_TGID
, false);
1589 unlock_task_sighand(p
, &flags
);
1597 EXPORT_SYMBOL_GPL(kill_pid_usb_asyncio
);
1600 * kill_something_info() interprets pid in interesting ways just like kill(2).
1602 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1603 * is probably wrong. Should make it like BSD or SYSV.
1606 static int kill_something_info(int sig
, struct kernel_siginfo
*info
, pid_t pid
)
1611 return kill_proc_info(sig
, info
, pid
);
1613 /* -INT_MIN is undefined. Exclude this case to avoid a UBSAN warning */
1617 read_lock(&tasklist_lock
);
1619 ret
= __kill_pgrp_info(sig
, info
,
1620 pid
? find_vpid(-pid
) : task_pgrp(current
));
1622 int retval
= 0, count
= 0;
1623 struct task_struct
* p
;
1625 for_each_process(p
) {
1626 if (task_pid_vnr(p
) > 1 &&
1627 !same_thread_group(p
, current
)) {
1628 int err
= group_send_sig_info(sig
, info
, p
,
1635 ret
= count
? retval
: -ESRCH
;
1637 read_unlock(&tasklist_lock
);
1643 * These are for backward compatibility with the rest of the kernel source.
1646 int send_sig_info(int sig
, struct kernel_siginfo
*info
, struct task_struct
*p
)
1649 * Make sure legacy kernel users don't send in bad values
1650 * (normal paths check this in check_kill_permission).
1652 if (!valid_signal(sig
))
1655 return do_send_sig_info(sig
, info
, p
, PIDTYPE_PID
);
1657 EXPORT_SYMBOL(send_sig_info
);
1659 #define __si_special(priv) \
1660 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1663 send_sig(int sig
, struct task_struct
*p
, int priv
)
1665 return send_sig_info(sig
, __si_special(priv
), p
);
1667 EXPORT_SYMBOL(send_sig
);
1669 void force_sig(int sig
)
1671 struct kernel_siginfo info
;
1673 clear_siginfo(&info
);
1674 info
.si_signo
= sig
;
1676 info
.si_code
= SI_KERNEL
;
1679 force_sig_info(&info
);
1681 EXPORT_SYMBOL(force_sig
);
1683 void force_fatal_sig(int sig
)
1685 struct kernel_siginfo info
;
1687 clear_siginfo(&info
);
1688 info
.si_signo
= sig
;
1690 info
.si_code
= SI_KERNEL
;
1693 force_sig_info_to_task(&info
, current
, HANDLER_SIG_DFL
);
1696 void force_exit_sig(int sig
)
1698 struct kernel_siginfo info
;
1700 clear_siginfo(&info
);
1701 info
.si_signo
= sig
;
1703 info
.si_code
= SI_KERNEL
;
1706 force_sig_info_to_task(&info
, current
, HANDLER_EXIT
);
1710 * When things go south during signal handling, we
1711 * will force a SIGSEGV. And if the signal that caused
1712 * the problem was already a SIGSEGV, we'll want to
1713 * make sure we don't even try to deliver the signal..
1715 void force_sigsegv(int sig
)
1718 force_fatal_sig(SIGSEGV
);
1723 int force_sig_fault_to_task(int sig
, int code
, void __user
*addr
,
1724 struct task_struct
*t
)
1726 struct kernel_siginfo info
;
1728 clear_siginfo(&info
);
1729 info
.si_signo
= sig
;
1731 info
.si_code
= code
;
1732 info
.si_addr
= addr
;
1733 return force_sig_info_to_task(&info
, t
, HANDLER_CURRENT
);
1736 int force_sig_fault(int sig
, int code
, void __user
*addr
)
1738 return force_sig_fault_to_task(sig
, code
, addr
, current
);
1741 int send_sig_fault(int sig
, int code
, void __user
*addr
, struct task_struct
*t
)
1743 struct kernel_siginfo info
;
1745 clear_siginfo(&info
);
1746 info
.si_signo
= sig
;
1748 info
.si_code
= code
;
1749 info
.si_addr
= addr
;
1750 return send_sig_info(info
.si_signo
, &info
, t
);
1753 int force_sig_mceerr(int code
, void __user
*addr
, short lsb
)
1755 struct kernel_siginfo info
;
1757 WARN_ON((code
!= BUS_MCEERR_AO
) && (code
!= BUS_MCEERR_AR
));
1758 clear_siginfo(&info
);
1759 info
.si_signo
= SIGBUS
;
1761 info
.si_code
= code
;
1762 info
.si_addr
= addr
;
1763 info
.si_addr_lsb
= lsb
;
1764 return force_sig_info(&info
);
1767 int send_sig_mceerr(int code
, void __user
*addr
, short lsb
, struct task_struct
*t
)
1769 struct kernel_siginfo info
;
1771 WARN_ON((code
!= BUS_MCEERR_AO
) && (code
!= BUS_MCEERR_AR
));
1772 clear_siginfo(&info
);
1773 info
.si_signo
= SIGBUS
;
1775 info
.si_code
= code
;
1776 info
.si_addr
= addr
;
1777 info
.si_addr_lsb
= lsb
;
1778 return send_sig_info(info
.si_signo
, &info
, t
);
1780 EXPORT_SYMBOL(send_sig_mceerr
);
1782 int force_sig_bnderr(void __user
*addr
, void __user
*lower
, void __user
*upper
)
1784 struct kernel_siginfo info
;
1786 clear_siginfo(&info
);
1787 info
.si_signo
= SIGSEGV
;
1789 info
.si_code
= SEGV_BNDERR
;
1790 info
.si_addr
= addr
;
1791 info
.si_lower
= lower
;
1792 info
.si_upper
= upper
;
1793 return force_sig_info(&info
);
1797 int force_sig_pkuerr(void __user
*addr
, u32 pkey
)
1799 struct kernel_siginfo info
;
1801 clear_siginfo(&info
);
1802 info
.si_signo
= SIGSEGV
;
1804 info
.si_code
= SEGV_PKUERR
;
1805 info
.si_addr
= addr
;
1806 info
.si_pkey
= pkey
;
1807 return force_sig_info(&info
);
1811 int send_sig_perf(void __user
*addr
, u32 type
, u64 sig_data
)
1813 struct kernel_siginfo info
;
1815 clear_siginfo(&info
);
1816 info
.si_signo
= SIGTRAP
;
1818 info
.si_code
= TRAP_PERF
;
1819 info
.si_addr
= addr
;
1820 info
.si_perf_data
= sig_data
;
1821 info
.si_perf_type
= type
;
1824 * Signals generated by perf events should not terminate the whole
1825 * process if SIGTRAP is blocked, however, delivering the signal
1826 * asynchronously is better than not delivering at all. But tell user
1827 * space if the signal was asynchronous, so it can clearly be
1828 * distinguished from normal synchronous ones.
1830 info
.si_perf_flags
= sigismember(¤t
->blocked
, info
.si_signo
) ?
1831 TRAP_PERF_FLAG_ASYNC
:
1834 return send_sig_info(info
.si_signo
, &info
, current
);
1838 * force_sig_seccomp - signals the task to allow in-process syscall emulation
1839 * @syscall: syscall number to send to userland
1840 * @reason: filter-supplied reason code to send to userland (via si_errno)
1841 * @force_coredump: true to trigger a coredump
1843 * Forces a SIGSYS with a code of SYS_SECCOMP and related sigsys info.
1845 int force_sig_seccomp(int syscall
, int reason
, bool force_coredump
)
1847 struct kernel_siginfo info
;
1849 clear_siginfo(&info
);
1850 info
.si_signo
= SIGSYS
;
1851 info
.si_code
= SYS_SECCOMP
;
1852 info
.si_call_addr
= (void __user
*)KSTK_EIP(current
);
1853 info
.si_errno
= reason
;
1854 info
.si_arch
= syscall_get_arch(current
);
1855 info
.si_syscall
= syscall
;
1856 return force_sig_info_to_task(&info
, current
,
1857 force_coredump
? HANDLER_EXIT
: HANDLER_CURRENT
);
1860 /* For the crazy architectures that include trap information in
1861 * the errno field, instead of an actual errno value.
1863 int force_sig_ptrace_errno_trap(int errno
, void __user
*addr
)
1865 struct kernel_siginfo info
;
1867 clear_siginfo(&info
);
1868 info
.si_signo
= SIGTRAP
;
1869 info
.si_errno
= errno
;
1870 info
.si_code
= TRAP_HWBKPT
;
1871 info
.si_addr
= addr
;
1872 return force_sig_info(&info
);
1875 /* For the rare architectures that include trap information using
1878 int force_sig_fault_trapno(int sig
, int code
, void __user
*addr
, int trapno
)
1880 struct kernel_siginfo info
;
1882 clear_siginfo(&info
);
1883 info
.si_signo
= sig
;
1885 info
.si_code
= code
;
1886 info
.si_addr
= addr
;
1887 info
.si_trapno
= trapno
;
1888 return force_sig_info(&info
);
1891 /* For the rare architectures that include trap information using
1894 int send_sig_fault_trapno(int sig
, int code
, void __user
*addr
, int trapno
,
1895 struct task_struct
*t
)
1897 struct kernel_siginfo info
;
1899 clear_siginfo(&info
);
1900 info
.si_signo
= sig
;
1902 info
.si_code
= code
;
1903 info
.si_addr
= addr
;
1904 info
.si_trapno
= trapno
;
1905 return send_sig_info(info
.si_signo
, &info
, t
);
1908 static int kill_pgrp_info(int sig
, struct kernel_siginfo
*info
, struct pid
*pgrp
)
1911 read_lock(&tasklist_lock
);
1912 ret
= __kill_pgrp_info(sig
, info
, pgrp
);
1913 read_unlock(&tasklist_lock
);
1917 int kill_pgrp(struct pid
*pid
, int sig
, int priv
)
1919 return kill_pgrp_info(sig
, __si_special(priv
), pid
);
1921 EXPORT_SYMBOL(kill_pgrp
);
1923 int kill_pid(struct pid
*pid
, int sig
, int priv
)
1925 return kill_pid_info(sig
, __si_special(priv
), pid
);
1927 EXPORT_SYMBOL(kill_pid
);
1930 * These functions support sending signals using preallocated sigqueue
1931 * structures. This is needed "because realtime applications cannot
1932 * afford to lose notifications of asynchronous events, like timer
1933 * expirations or I/O completions". In the case of POSIX Timers
1934 * we allocate the sigqueue structure from the timer_create. If this
1935 * allocation fails we are able to report the failure to the application
1936 * with an EAGAIN error.
1938 struct sigqueue
*sigqueue_alloc(void)
1940 return __sigqueue_alloc(-1, current
, GFP_KERNEL
, 0, SIGQUEUE_PREALLOC
);
1943 void sigqueue_free(struct sigqueue
*q
)
1945 unsigned long flags
;
1946 spinlock_t
*lock
= ¤t
->sighand
->siglock
;
1948 BUG_ON(!(q
->flags
& SIGQUEUE_PREALLOC
));
1950 * We must hold ->siglock while testing q->list
1951 * to serialize with collect_signal() or with
1952 * __exit_signal()->flush_sigqueue().
1954 spin_lock_irqsave(lock
, flags
);
1955 q
->flags
&= ~SIGQUEUE_PREALLOC
;
1957 * If it is queued it will be freed when dequeued,
1958 * like the "regular" sigqueue.
1960 if (!list_empty(&q
->list
))
1962 spin_unlock_irqrestore(lock
, flags
);
1968 int send_sigqueue(struct sigqueue
*q
, struct pid
*pid
, enum pid_type type
)
1970 int sig
= q
->info
.si_signo
;
1971 struct sigpending
*pending
;
1972 struct task_struct
*t
;
1973 unsigned long flags
;
1976 BUG_ON(!(q
->flags
& SIGQUEUE_PREALLOC
));
1982 * This function is used by POSIX timers to deliver a timer signal.
1983 * Where type is PIDTYPE_PID (such as for timers with SIGEV_THREAD_ID
1984 * set), the signal must be delivered to the specific thread (queues
1987 * Where type is not PIDTYPE_PID, signals must be delivered to the
1988 * process. In this case, prefer to deliver to current if it is in
1989 * the same thread group as the target process, which avoids
1990 * unnecessarily waking up a potentially idle task.
1992 t
= pid_task(pid
, type
);
1995 if (type
!= PIDTYPE_PID
&& same_thread_group(t
, current
))
1997 if (!likely(lock_task_sighand(t
, &flags
)))
2000 ret
= 1; /* the signal is ignored */
2001 result
= TRACE_SIGNAL_IGNORED
;
2002 if (!prepare_signal(sig
, t
, false))
2006 if (unlikely(!list_empty(&q
->list
))) {
2008 * If an SI_TIMER entry is already queue just increment
2009 * the overrun count.
2011 BUG_ON(q
->info
.si_code
!= SI_TIMER
);
2012 q
->info
.si_overrun
++;
2013 result
= TRACE_SIGNAL_ALREADY_PENDING
;
2016 q
->info
.si_overrun
= 0;
2018 signalfd_notify(t
, sig
);
2019 pending
= (type
!= PIDTYPE_PID
) ? &t
->signal
->shared_pending
: &t
->pending
;
2020 list_add_tail(&q
->list
, &pending
->list
);
2021 sigaddset(&pending
->signal
, sig
);
2022 complete_signal(sig
, t
, type
);
2023 result
= TRACE_SIGNAL_DELIVERED
;
2025 trace_signal_generate(sig
, &q
->info
, t
, type
!= PIDTYPE_PID
, result
);
2026 unlock_task_sighand(t
, &flags
);
2032 void do_notify_pidfd(struct task_struct
*task
)
2034 struct pid
*pid
= task_pid(task
);
2036 WARN_ON(task
->exit_state
== 0);
2038 __wake_up(&pid
->wait_pidfd
, TASK_NORMAL
, 0,
2039 poll_to_key(EPOLLIN
| EPOLLRDNORM
));
2043 * Let a parent know about the death of a child.
2044 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
2046 * Returns true if our parent ignored us and so we've switched to
2049 bool do_notify_parent(struct task_struct
*tsk
, int sig
)
2051 struct kernel_siginfo info
;
2052 unsigned long flags
;
2053 struct sighand_struct
*psig
;
2054 bool autoreap
= false;
2057 WARN_ON_ONCE(sig
== -1);
2059 /* do_notify_parent_cldstop should have been called instead. */
2060 WARN_ON_ONCE(task_is_stopped_or_traced(tsk
));
2062 WARN_ON_ONCE(!tsk
->ptrace
&&
2063 (tsk
->group_leader
!= tsk
|| !thread_group_empty(tsk
)));
2065 * tsk is a group leader and has no threads, wake up the
2066 * non-PIDFD_THREAD waiters.
2068 if (thread_group_empty(tsk
))
2069 do_notify_pidfd(tsk
);
2071 if (sig
!= SIGCHLD
) {
2073 * This is only possible if parent == real_parent.
2074 * Check if it has changed security domain.
2076 if (tsk
->parent_exec_id
!= READ_ONCE(tsk
->parent
->self_exec_id
))
2080 clear_siginfo(&info
);
2081 info
.si_signo
= sig
;
2084 * We are under tasklist_lock here so our parent is tied to
2085 * us and cannot change.
2087 * task_active_pid_ns will always return the same pid namespace
2088 * until a task passes through release_task.
2090 * write_lock() currently calls preempt_disable() which is the
2091 * same as rcu_read_lock(), but according to Oleg, this is not
2092 * correct to rely on this
2095 info
.si_pid
= task_pid_nr_ns(tsk
, task_active_pid_ns(tsk
->parent
));
2096 info
.si_uid
= from_kuid_munged(task_cred_xxx(tsk
->parent
, user_ns
),
2100 task_cputime(tsk
, &utime
, &stime
);
2101 info
.si_utime
= nsec_to_clock_t(utime
+ tsk
->signal
->utime
);
2102 info
.si_stime
= nsec_to_clock_t(stime
+ tsk
->signal
->stime
);
2104 info
.si_status
= tsk
->exit_code
& 0x7f;
2105 if (tsk
->exit_code
& 0x80)
2106 info
.si_code
= CLD_DUMPED
;
2107 else if (tsk
->exit_code
& 0x7f)
2108 info
.si_code
= CLD_KILLED
;
2110 info
.si_code
= CLD_EXITED
;
2111 info
.si_status
= tsk
->exit_code
>> 8;
2114 psig
= tsk
->parent
->sighand
;
2115 spin_lock_irqsave(&psig
->siglock
, flags
);
2116 if (!tsk
->ptrace
&& sig
== SIGCHLD
&&
2117 (psig
->action
[SIGCHLD
-1].sa
.sa_handler
== SIG_IGN
||
2118 (psig
->action
[SIGCHLD
-1].sa
.sa_flags
& SA_NOCLDWAIT
))) {
2120 * We are exiting and our parent doesn't care. POSIX.1
2121 * defines special semantics for setting SIGCHLD to SIG_IGN
2122 * or setting the SA_NOCLDWAIT flag: we should be reaped
2123 * automatically and not left for our parent's wait4 call.
2124 * Rather than having the parent do it as a magic kind of
2125 * signal handler, we just set this to tell do_exit that we
2126 * can be cleaned up without becoming a zombie. Note that
2127 * we still call __wake_up_parent in this case, because a
2128 * blocked sys_wait4 might now return -ECHILD.
2130 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
2131 * is implementation-defined: we do (if you don't want
2132 * it, just use SIG_IGN instead).
2135 if (psig
->action
[SIGCHLD
-1].sa
.sa_handler
== SIG_IGN
)
2139 * Send with __send_signal as si_pid and si_uid are in the
2140 * parent's namespaces.
2142 if (valid_signal(sig
) && sig
)
2143 __send_signal_locked(sig
, &info
, tsk
->parent
, PIDTYPE_TGID
, false);
2144 __wake_up_parent(tsk
, tsk
->parent
);
2145 spin_unlock_irqrestore(&psig
->siglock
, flags
);
2151 * do_notify_parent_cldstop - notify parent of stopped/continued state change
2152 * @tsk: task reporting the state change
2153 * @for_ptracer: the notification is for ptracer
2154 * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report
2156 * Notify @tsk's parent that the stopped/continued state has changed. If
2157 * @for_ptracer is %false, @tsk's group leader notifies to its real parent.
2158 * If %true, @tsk reports to @tsk->parent which should be the ptracer.
2161 * Must be called with tasklist_lock at least read locked.
2163 static void do_notify_parent_cldstop(struct task_struct
*tsk
,
2164 bool for_ptracer
, int why
)
2166 struct kernel_siginfo info
;
2167 unsigned long flags
;
2168 struct task_struct
*parent
;
2169 struct sighand_struct
*sighand
;
2173 parent
= tsk
->parent
;
2175 tsk
= tsk
->group_leader
;
2176 parent
= tsk
->real_parent
;
2179 clear_siginfo(&info
);
2180 info
.si_signo
= SIGCHLD
;
2183 * see comment in do_notify_parent() about the following 4 lines
2186 info
.si_pid
= task_pid_nr_ns(tsk
, task_active_pid_ns(parent
));
2187 info
.si_uid
= from_kuid_munged(task_cred_xxx(parent
, user_ns
), task_uid(tsk
));
2190 task_cputime(tsk
, &utime
, &stime
);
2191 info
.si_utime
= nsec_to_clock_t(utime
);
2192 info
.si_stime
= nsec_to_clock_t(stime
);
2197 info
.si_status
= SIGCONT
;
2200 info
.si_status
= tsk
->signal
->group_exit_code
& 0x7f;
2203 info
.si_status
= tsk
->exit_code
& 0x7f;
2209 sighand
= parent
->sighand
;
2210 spin_lock_irqsave(&sighand
->siglock
, flags
);
2211 if (sighand
->action
[SIGCHLD
-1].sa
.sa_handler
!= SIG_IGN
&&
2212 !(sighand
->action
[SIGCHLD
-1].sa
.sa_flags
& SA_NOCLDSTOP
))
2213 send_signal_locked(SIGCHLD
, &info
, parent
, PIDTYPE_TGID
);
2215 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
2217 __wake_up_parent(tsk
, parent
);
2218 spin_unlock_irqrestore(&sighand
->siglock
, flags
);
2222 * This must be called with current->sighand->siglock held.
2224 * This should be the path for all ptrace stops.
2225 * We always set current->last_siginfo while stopped here.
2226 * That makes it a way to test a stopped process for
2227 * being ptrace-stopped vs being job-control-stopped.
2229 * Returns the signal the ptracer requested the code resume
2230 * with. If the code did not stop because the tracer is gone,
2231 * the stop signal remains unchanged unless clear_code.
2233 static int ptrace_stop(int exit_code
, int why
, unsigned long message
,
2234 kernel_siginfo_t
*info
)
2235 __releases(¤t
->sighand
->siglock
)
2236 __acquires(¤t
->sighand
->siglock
)
2238 bool gstop_done
= false;
2240 if (arch_ptrace_stop_needed()) {
2242 * The arch code has something special to do before a
2243 * ptrace stop. This is allowed to block, e.g. for faults
2244 * on user stack pages. We can't keep the siglock while
2245 * calling arch_ptrace_stop, so we must release it now.
2246 * To preserve proper semantics, we must do this before
2247 * any signal bookkeeping like checking group_stop_count.
2249 spin_unlock_irq(¤t
->sighand
->siglock
);
2251 spin_lock_irq(¤t
->sighand
->siglock
);
2255 * After this point ptrace_signal_wake_up or signal_wake_up
2256 * will clear TASK_TRACED if ptrace_unlink happens or a fatal
2257 * signal comes in. Handle previous ptrace_unlinks and fatal
2258 * signals here to prevent ptrace_stop sleeping in schedule.
2260 if (!current
->ptrace
|| __fatal_signal_pending(current
))
2263 set_special_state(TASK_TRACED
);
2264 current
->jobctl
|= JOBCTL_TRACED
;
2267 * We're committing to trapping. TRACED should be visible before
2268 * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
2269 * Also, transition to TRACED and updates to ->jobctl should be
2270 * atomic with respect to siglock and should be done after the arch
2271 * hook as siglock is released and regrabbed across it.
2276 * [L] wait_on_bit(JOBCTL_TRAPPING) [S] set_special_state(TRACED)
2278 * set_current_state() smp_wmb();
2280 * wait_task_stopped()
2281 * task_stopped_code()
2282 * [L] task_is_traced() [S] task_clear_jobctl_trapping();
2286 current
->ptrace_message
= message
;
2287 current
->last_siginfo
= info
;
2288 current
->exit_code
= exit_code
;
2291 * If @why is CLD_STOPPED, we're trapping to participate in a group
2292 * stop. Do the bookkeeping. Note that if SIGCONT was delievered
2293 * across siglock relocks since INTERRUPT was scheduled, PENDING
2294 * could be clear now. We act as if SIGCONT is received after
2295 * TASK_TRACED is entered - ignore it.
2297 if (why
== CLD_STOPPED
&& (current
->jobctl
& JOBCTL_STOP_PENDING
))
2298 gstop_done
= task_participate_group_stop(current
);
2300 /* any trap clears pending STOP trap, STOP trap clears NOTIFY */
2301 task_clear_jobctl_pending(current
, JOBCTL_TRAP_STOP
);
2302 if (info
&& info
->si_code
>> 8 == PTRACE_EVENT_STOP
)
2303 task_clear_jobctl_pending(current
, JOBCTL_TRAP_NOTIFY
);
2305 /* entering a trap, clear TRAPPING */
2306 task_clear_jobctl_trapping(current
);
2308 spin_unlock_irq(¤t
->sighand
->siglock
);
2309 read_lock(&tasklist_lock
);
2311 * Notify parents of the stop.
2313 * While ptraced, there are two parents - the ptracer and
2314 * the real_parent of the group_leader. The ptracer should
2315 * know about every stop while the real parent is only
2316 * interested in the completion of group stop. The states
2317 * for the two don't interact with each other. Notify
2318 * separately unless they're gonna be duplicates.
2320 if (current
->ptrace
)
2321 do_notify_parent_cldstop(current
, true, why
);
2322 if (gstop_done
&& (!current
->ptrace
|| ptrace_reparented(current
)))
2323 do_notify_parent_cldstop(current
, false, why
);
2326 * The previous do_notify_parent_cldstop() invocation woke ptracer.
2327 * One a PREEMPTION kernel this can result in preemption requirement
2328 * which will be fulfilled after read_unlock() and the ptracer will be
2330 * The ptracer is in wait_task_inactive(, __TASK_TRACED) waiting for
2331 * this task wait in schedule(). If this task gets preempted then it
2332 * remains enqueued on the runqueue. The ptracer will observe this and
2333 * then sleep for a delay of one HZ tick. In the meantime this task
2334 * gets scheduled, enters schedule() and will wait for the ptracer.
2336 * This preemption point is not bad from a correctness point of
2337 * view but extends the runtime by one HZ tick time due to the
2338 * ptracer's sleep. The preempt-disable section ensures that there
2339 * will be no preemption between unlock and schedule() and so
2340 * improving the performance since the ptracer will observe that
2341 * the tracee is scheduled out once it gets on the CPU.
2343 * On PREEMPT_RT locking tasklist_lock does not disable preemption.
2344 * Therefore the task can be preempted after do_notify_parent_cldstop()
2345 * before unlocking tasklist_lock so there is no benefit in doing this.
2347 * In fact disabling preemption is harmful on PREEMPT_RT because
2348 * the spinlock_t in cgroup_enter_frozen() must not be acquired
2349 * with preemption disabled due to the 'sleeping' spinlock
2350 * substitution of RT.
2352 if (!IS_ENABLED(CONFIG_PREEMPT_RT
))
2354 read_unlock(&tasklist_lock
);
2355 cgroup_enter_frozen();
2356 if (!IS_ENABLED(CONFIG_PREEMPT_RT
))
2357 preempt_enable_no_resched();
2359 cgroup_leave_frozen(true);
2362 * We are back. Now reacquire the siglock before touching
2363 * last_siginfo, so that we are sure to have synchronized with
2364 * any signal-sending on another CPU that wants to examine it.
2366 spin_lock_irq(¤t
->sighand
->siglock
);
2367 exit_code
= current
->exit_code
;
2368 current
->last_siginfo
= NULL
;
2369 current
->ptrace_message
= 0;
2370 current
->exit_code
= 0;
2372 /* LISTENING can be set only during STOP traps, clear it */
2373 current
->jobctl
&= ~(JOBCTL_LISTENING
| JOBCTL_PTRACE_FROZEN
);
2376 * Queued signals ignored us while we were stopped for tracing.
2377 * So check for any that we should take before resuming user mode.
2378 * This sets TIF_SIGPENDING, but never clears it.
2380 recalc_sigpending_tsk(current
);
2384 static int ptrace_do_notify(int signr
, int exit_code
, int why
, unsigned long message
)
2386 kernel_siginfo_t info
;
2388 clear_siginfo(&info
);
2389 info
.si_signo
= signr
;
2390 info
.si_code
= exit_code
;
2391 info
.si_pid
= task_pid_vnr(current
);
2392 info
.si_uid
= from_kuid_munged(current_user_ns(), current_uid());
2394 /* Let the debugger run. */
2395 return ptrace_stop(exit_code
, why
, message
, &info
);
2398 int ptrace_notify(int exit_code
, unsigned long message
)
2402 BUG_ON((exit_code
& (0x7f | ~0xffff)) != SIGTRAP
);
2403 if (unlikely(task_work_pending(current
)))
2406 spin_lock_irq(¤t
->sighand
->siglock
);
2407 signr
= ptrace_do_notify(SIGTRAP
, exit_code
, CLD_TRAPPED
, message
);
2408 spin_unlock_irq(¤t
->sighand
->siglock
);
2413 * do_signal_stop - handle group stop for SIGSTOP and other stop signals
2414 * @signr: signr causing group stop if initiating
2416 * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr
2417 * and participate in it. If already set, participate in the existing
2418 * group stop. If participated in a group stop (and thus slept), %true is
2419 * returned with siglock released.
2421 * If ptraced, this function doesn't handle stop itself. Instead,
2422 * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock
2423 * untouched. The caller must ensure that INTERRUPT trap handling takes
2424 * places afterwards.
2427 * Must be called with @current->sighand->siglock held, which is released
2431 * %false if group stop is already cancelled or ptrace trap is scheduled.
2432 * %true if participated in group stop.
2434 static bool do_signal_stop(int signr
)
2435 __releases(¤t
->sighand
->siglock
)
2437 struct signal_struct
*sig
= current
->signal
;
2439 if (!(current
->jobctl
& JOBCTL_STOP_PENDING
)) {
2440 unsigned long gstop
= JOBCTL_STOP_PENDING
| JOBCTL_STOP_CONSUME
;
2441 struct task_struct
*t
;
2443 /* signr will be recorded in task->jobctl for retries */
2444 WARN_ON_ONCE(signr
& ~JOBCTL_STOP_SIGMASK
);
2446 if (!likely(current
->jobctl
& JOBCTL_STOP_DEQUEUED
) ||
2447 unlikely(sig
->flags
& SIGNAL_GROUP_EXIT
) ||
2448 unlikely(sig
->group_exec_task
))
2451 * There is no group stop already in progress. We must
2454 * While ptraced, a task may be resumed while group stop is
2455 * still in effect and then receive a stop signal and
2456 * initiate another group stop. This deviates from the
2457 * usual behavior as two consecutive stop signals can't
2458 * cause two group stops when !ptraced. That is why we
2459 * also check !task_is_stopped(t) below.
2461 * The condition can be distinguished by testing whether
2462 * SIGNAL_STOP_STOPPED is already set. Don't generate
2463 * group_exit_code in such case.
2465 * This is not necessary for SIGNAL_STOP_CONTINUED because
2466 * an intervening stop signal is required to cause two
2467 * continued events regardless of ptrace.
2469 if (!(sig
->flags
& SIGNAL_STOP_STOPPED
))
2470 sig
->group_exit_code
= signr
;
2472 sig
->group_stop_count
= 0;
2473 if (task_set_jobctl_pending(current
, signr
| gstop
))
2474 sig
->group_stop_count
++;
2476 for_other_threads(current
, t
) {
2478 * Setting state to TASK_STOPPED for a group
2479 * stop is always done with the siglock held,
2480 * so this check has no races.
2482 if (!task_is_stopped(t
) &&
2483 task_set_jobctl_pending(t
, signr
| gstop
)) {
2484 sig
->group_stop_count
++;
2485 if (likely(!(t
->ptrace
& PT_SEIZED
)))
2486 signal_wake_up(t
, 0);
2488 ptrace_trap_notify(t
);
2493 if (likely(!current
->ptrace
)) {
2497 * If there are no other threads in the group, or if there
2498 * is a group stop in progress and we are the last to stop,
2499 * report to the parent.
2501 if (task_participate_group_stop(current
))
2502 notify
= CLD_STOPPED
;
2504 current
->jobctl
|= JOBCTL_STOPPED
;
2505 set_special_state(TASK_STOPPED
);
2506 spin_unlock_irq(¤t
->sighand
->siglock
);
2509 * Notify the parent of the group stop completion. Because
2510 * we're not holding either the siglock or tasklist_lock
2511 * here, ptracer may attach inbetween; however, this is for
2512 * group stop and should always be delivered to the real
2513 * parent of the group leader. The new ptracer will get
2514 * its notification when this task transitions into
2518 read_lock(&tasklist_lock
);
2519 do_notify_parent_cldstop(current
, false, notify
);
2520 read_unlock(&tasklist_lock
);
2523 /* Now we don't run again until woken by SIGCONT or SIGKILL */
2524 cgroup_enter_frozen();
2529 * While ptraced, group stop is handled by STOP trap.
2530 * Schedule it and let the caller deal with it.
2532 task_set_jobctl_pending(current
, JOBCTL_TRAP_STOP
);
2538 * do_jobctl_trap - take care of ptrace jobctl traps
2540 * When PT_SEIZED, it's used for both group stop and explicit
2541 * SEIZE/INTERRUPT traps. Both generate PTRACE_EVENT_STOP trap with
2542 * accompanying siginfo. If stopped, lower eight bits of exit_code contain
2543 * the stop signal; otherwise, %SIGTRAP.
2545 * When !PT_SEIZED, it's used only for group stop trap with stop signal
2546 * number as exit_code and no siginfo.
2549 * Must be called with @current->sighand->siglock held, which may be
2550 * released and re-acquired before returning with intervening sleep.
2552 static void do_jobctl_trap(void)
2554 struct signal_struct
*signal
= current
->signal
;
2555 int signr
= current
->jobctl
& JOBCTL_STOP_SIGMASK
;
2557 if (current
->ptrace
& PT_SEIZED
) {
2558 if (!signal
->group_stop_count
&&
2559 !(signal
->flags
& SIGNAL_STOP_STOPPED
))
2561 WARN_ON_ONCE(!signr
);
2562 ptrace_do_notify(signr
, signr
| (PTRACE_EVENT_STOP
<< 8),
2565 WARN_ON_ONCE(!signr
);
2566 ptrace_stop(signr
, CLD_STOPPED
, 0, NULL
);
2571 * do_freezer_trap - handle the freezer jobctl trap
2573 * Puts the task into frozen state, if only the task is not about to quit.
2574 * In this case it drops JOBCTL_TRAP_FREEZE.
2577 * Must be called with @current->sighand->siglock held,
2578 * which is always released before returning.
2580 static void do_freezer_trap(void)
2581 __releases(¤t
->sighand
->siglock
)
2584 * If there are other trap bits pending except JOBCTL_TRAP_FREEZE,
2585 * let's make another loop to give it a chance to be handled.
2586 * In any case, we'll return back.
2588 if ((current
->jobctl
& (JOBCTL_PENDING_MASK
| JOBCTL_TRAP_FREEZE
)) !=
2589 JOBCTL_TRAP_FREEZE
) {
2590 spin_unlock_irq(¤t
->sighand
->siglock
);
2595 * Now we're sure that there is no pending fatal signal and no
2596 * pending traps. Clear TIF_SIGPENDING to not get out of schedule()
2597 * immediately (if there is a non-fatal signal pending), and
2598 * put the task into sleep.
2600 __set_current_state(TASK_INTERRUPTIBLE
|TASK_FREEZABLE
);
2601 clear_thread_flag(TIF_SIGPENDING
);
2602 spin_unlock_irq(¤t
->sighand
->siglock
);
2603 cgroup_enter_frozen();
2607 static int ptrace_signal(int signr
, kernel_siginfo_t
*info
, enum pid_type type
)
2610 * We do not check sig_kernel_stop(signr) but set this marker
2611 * unconditionally because we do not know whether debugger will
2612 * change signr. This flag has no meaning unless we are going
2613 * to stop after return from ptrace_stop(). In this case it will
2614 * be checked in do_signal_stop(), we should only stop if it was
2615 * not cleared by SIGCONT while we were sleeping. See also the
2616 * comment in dequeue_signal().
2618 current
->jobctl
|= JOBCTL_STOP_DEQUEUED
;
2619 signr
= ptrace_stop(signr
, CLD_TRAPPED
, 0, info
);
2621 /* We're back. Did the debugger cancel the sig? */
2626 * Update the siginfo structure if the signal has
2627 * changed. If the debugger wanted something
2628 * specific in the siginfo structure then it should
2629 * have updated *info via PTRACE_SETSIGINFO.
2631 if (signr
!= info
->si_signo
) {
2632 clear_siginfo(info
);
2633 info
->si_signo
= signr
;
2635 info
->si_code
= SI_USER
;
2637 info
->si_pid
= task_pid_vnr(current
->parent
);
2638 info
->si_uid
= from_kuid_munged(current_user_ns(),
2639 task_uid(current
->parent
));
2643 /* If the (new) signal is now blocked, requeue it. */
2644 if (sigismember(¤t
->blocked
, signr
) ||
2645 fatal_signal_pending(current
)) {
2646 send_signal_locked(signr
, info
, current
, type
);
2653 static void hide_si_addr_tag_bits(struct ksignal
*ksig
)
2655 switch (siginfo_layout(ksig
->sig
, ksig
->info
.si_code
)) {
2657 case SIL_FAULT_TRAPNO
:
2658 case SIL_FAULT_MCEERR
:
2659 case SIL_FAULT_BNDERR
:
2660 case SIL_FAULT_PKUERR
:
2661 case SIL_FAULT_PERF_EVENT
:
2662 ksig
->info
.si_addr
= arch_untagged_si_addr(
2663 ksig
->info
.si_addr
, ksig
->sig
, ksig
->info
.si_code
);
2675 bool get_signal(struct ksignal
*ksig
)
2677 struct sighand_struct
*sighand
= current
->sighand
;
2678 struct signal_struct
*signal
= current
->signal
;
2681 clear_notify_signal();
2682 if (unlikely(task_work_pending(current
)))
2685 if (!task_sigpending(current
))
2688 if (unlikely(uprobe_deny_signal()))
2692 * Do this once, we can't return to user-mode if freezing() == T.
2693 * do_signal_stop() and ptrace_stop() do freezable_schedule() and
2694 * thus do not need another check after return.
2699 spin_lock_irq(&sighand
->siglock
);
2702 * Every stopped thread goes here after wakeup. Check to see if
2703 * we should notify the parent, prepare_signal(SIGCONT) encodes
2704 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
2706 if (unlikely(signal
->flags
& SIGNAL_CLD_MASK
)) {
2709 if (signal
->flags
& SIGNAL_CLD_CONTINUED
)
2710 why
= CLD_CONTINUED
;
2714 signal
->flags
&= ~SIGNAL_CLD_MASK
;
2716 spin_unlock_irq(&sighand
->siglock
);
2719 * Notify the parent that we're continuing. This event is
2720 * always per-process and doesn't make whole lot of sense
2721 * for ptracers, who shouldn't consume the state via
2722 * wait(2) either, but, for backward compatibility, notify
2723 * the ptracer of the group leader too unless it's gonna be
2726 read_lock(&tasklist_lock
);
2727 do_notify_parent_cldstop(current
, false, why
);
2729 if (ptrace_reparented(current
->group_leader
))
2730 do_notify_parent_cldstop(current
->group_leader
,
2732 read_unlock(&tasklist_lock
);
2738 struct k_sigaction
*ka
;
2741 /* Has this task already been marked for death? */
2742 if ((signal
->flags
& SIGNAL_GROUP_EXIT
) ||
2743 signal
->group_exec_task
) {
2744 clear_siginfo(&ksig
->info
);
2745 ksig
->info
.si_signo
= signr
= SIGKILL
;
2746 sigdelset(¤t
->pending
.signal
, SIGKILL
);
2747 trace_signal_deliver(SIGKILL
, SEND_SIG_NOINFO
,
2748 &sighand
->action
[SIGKILL
- 1]);
2749 recalc_sigpending();
2753 if (unlikely(current
->jobctl
& JOBCTL_STOP_PENDING
) &&
2757 if (unlikely(current
->jobctl
&
2758 (JOBCTL_TRAP_MASK
| JOBCTL_TRAP_FREEZE
))) {
2759 if (current
->jobctl
& JOBCTL_TRAP_MASK
) {
2761 spin_unlock_irq(&sighand
->siglock
);
2762 } else if (current
->jobctl
& JOBCTL_TRAP_FREEZE
)
2769 * If the task is leaving the frozen state, let's update
2770 * cgroup counters and reset the frozen bit.
2772 if (unlikely(cgroup_task_frozen(current
))) {
2773 spin_unlock_irq(&sighand
->siglock
);
2774 cgroup_leave_frozen(false);
2779 * Signals generated by the execution of an instruction
2780 * need to be delivered before any other pending signals
2781 * so that the instruction pointer in the signal stack
2782 * frame points to the faulting instruction.
2785 signr
= dequeue_synchronous_signal(&ksig
->info
);
2787 signr
= dequeue_signal(current
, ¤t
->blocked
,
2788 &ksig
->info
, &type
);
2791 break; /* will return 0 */
2793 if (unlikely(current
->ptrace
) && (signr
!= SIGKILL
) &&
2794 !(sighand
->action
[signr
-1].sa
.sa_flags
& SA_IMMUTABLE
)) {
2795 signr
= ptrace_signal(signr
, &ksig
->info
, type
);
2800 ka
= &sighand
->action
[signr
-1];
2802 /* Trace actually delivered signals. */
2803 trace_signal_deliver(signr
, &ksig
->info
, ka
);
2805 if (ka
->sa
.sa_handler
== SIG_IGN
) /* Do nothing. */
2807 if (ka
->sa
.sa_handler
!= SIG_DFL
) {
2808 /* Run the handler. */
2811 if (ka
->sa
.sa_flags
& SA_ONESHOT
)
2812 ka
->sa
.sa_handler
= SIG_DFL
;
2814 break; /* will return non-zero "signr" value */
2818 * Now we are doing the default action for this signal.
2820 if (sig_kernel_ignore(signr
)) /* Default is nothing. */
2824 * Global init gets no signals it doesn't want.
2825 * Container-init gets no signals it doesn't want from same
2828 * Note that if global/container-init sees a sig_kernel_only()
2829 * signal here, the signal must have been generated internally
2830 * or must have come from an ancestor namespace. In either
2831 * case, the signal cannot be dropped.
2833 if (unlikely(signal
->flags
& SIGNAL_UNKILLABLE
) &&
2834 !sig_kernel_only(signr
))
2837 if (sig_kernel_stop(signr
)) {
2839 * The default action is to stop all threads in
2840 * the thread group. The job control signals
2841 * do nothing in an orphaned pgrp, but SIGSTOP
2842 * always works. Note that siglock needs to be
2843 * dropped during the call to is_orphaned_pgrp()
2844 * because of lock ordering with tasklist_lock.
2845 * This allows an intervening SIGCONT to be posted.
2846 * We need to check for that and bail out if necessary.
2848 if (signr
!= SIGSTOP
) {
2849 spin_unlock_irq(&sighand
->siglock
);
2851 /* signals can be posted during this window */
2853 if (is_current_pgrp_orphaned())
2856 spin_lock_irq(&sighand
->siglock
);
2859 if (likely(do_signal_stop(ksig
->info
.si_signo
))) {
2860 /* It released the siglock. */
2865 * We didn't actually stop, due to a race
2866 * with SIGCONT or something like that.
2872 spin_unlock_irq(&sighand
->siglock
);
2873 if (unlikely(cgroup_task_frozen(current
)))
2874 cgroup_leave_frozen(true);
2877 * Anything else is fatal, maybe with a core dump.
2879 current
->flags
|= PF_SIGNALED
;
2881 if (sig_kernel_coredump(signr
)) {
2882 if (print_fatal_signals
)
2883 print_fatal_signal(ksig
->info
.si_signo
);
2884 proc_coredump_connector(current
);
2886 * If it was able to dump core, this kills all
2887 * other threads in the group and synchronizes with
2888 * their demise. If we lost the race with another
2889 * thread getting here, it set group_exit_code
2890 * first and our do_group_exit call below will use
2891 * that value and ignore the one we pass it.
2893 do_coredump(&ksig
->info
);
2897 * PF_USER_WORKER threads will catch and exit on fatal signals
2898 * themselves. They have cleanup that must be performed, so
2899 * we cannot call do_exit() on their behalf.
2901 if (current
->flags
& PF_USER_WORKER
)
2905 * Death signals, no core dump.
2907 do_group_exit(ksig
->info
.si_signo
);
2910 spin_unlock_irq(&sighand
->siglock
);
2914 if (!(ksig
->ka
.sa
.sa_flags
& SA_EXPOSE_TAGBITS
))
2915 hide_si_addr_tag_bits(ksig
);
2917 return ksig
->sig
> 0;
2921 * signal_delivered - called after signal delivery to update blocked signals
2922 * @ksig: kernel signal struct
2923 * @stepping: nonzero if debugger single-step or block-step in use
2925 * This function should be called when a signal has successfully been
2926 * delivered. It updates the blocked signals accordingly (@ksig->ka.sa.sa_mask
2927 * is always blocked), and the signal itself is blocked unless %SA_NODEFER
2928 * is set in @ksig->ka.sa.sa_flags. Tracing is notified.
2930 static void signal_delivered(struct ksignal
*ksig
, int stepping
)
2934 /* A signal was successfully delivered, and the
2935 saved sigmask was stored on the signal frame,
2936 and will be restored by sigreturn. So we can
2937 simply clear the restore sigmask flag. */
2938 clear_restore_sigmask();
2940 sigorsets(&blocked
, ¤t
->blocked
, &ksig
->ka
.sa
.sa_mask
);
2941 if (!(ksig
->ka
.sa
.sa_flags
& SA_NODEFER
))
2942 sigaddset(&blocked
, ksig
->sig
);
2943 set_current_blocked(&blocked
);
2944 if (current
->sas_ss_flags
& SS_AUTODISARM
)
2945 sas_ss_reset(current
);
2947 ptrace_notify(SIGTRAP
, 0);
2950 void signal_setup_done(int failed
, struct ksignal
*ksig
, int stepping
)
2953 force_sigsegv(ksig
->sig
);
2955 signal_delivered(ksig
, stepping
);
2959 * It could be that complete_signal() picked us to notify about the
2960 * group-wide signal. Other threads should be notified now to take
2961 * the shared signals in @which since we will not.
2963 static void retarget_shared_pending(struct task_struct
*tsk
, sigset_t
*which
)
2966 struct task_struct
*t
;
2968 sigandsets(&retarget
, &tsk
->signal
->shared_pending
.signal
, which
);
2969 if (sigisemptyset(&retarget
))
2972 for_other_threads(tsk
, t
) {
2973 if (t
->flags
& PF_EXITING
)
2976 if (!has_pending_signals(&retarget
, &t
->blocked
))
2978 /* Remove the signals this thread can handle. */
2979 sigandsets(&retarget
, &retarget
, &t
->blocked
);
2981 if (!task_sigpending(t
))
2982 signal_wake_up(t
, 0);
2984 if (sigisemptyset(&retarget
))
2989 void exit_signals(struct task_struct
*tsk
)
2995 * @tsk is about to have PF_EXITING set - lock out users which
2996 * expect stable threadgroup.
2998 cgroup_threadgroup_change_begin(tsk
);
3000 if (thread_group_empty(tsk
) || (tsk
->signal
->flags
& SIGNAL_GROUP_EXIT
)) {
3001 sched_mm_cid_exit_signals(tsk
);
3002 tsk
->flags
|= PF_EXITING
;
3003 cgroup_threadgroup_change_end(tsk
);
3007 spin_lock_irq(&tsk
->sighand
->siglock
);
3009 * From now this task is not visible for group-wide signals,
3010 * see wants_signal(), do_signal_stop().
3012 sched_mm_cid_exit_signals(tsk
);
3013 tsk
->flags
|= PF_EXITING
;
3015 cgroup_threadgroup_change_end(tsk
);
3017 if (!task_sigpending(tsk
))
3020 unblocked
= tsk
->blocked
;
3021 signotset(&unblocked
);
3022 retarget_shared_pending(tsk
, &unblocked
);
3024 if (unlikely(tsk
->jobctl
& JOBCTL_STOP_PENDING
) &&
3025 task_participate_group_stop(tsk
))
3026 group_stop
= CLD_STOPPED
;
3028 spin_unlock_irq(&tsk
->sighand
->siglock
);
3031 * If group stop has completed, deliver the notification. This
3032 * should always go to the real parent of the group leader.
3034 if (unlikely(group_stop
)) {
3035 read_lock(&tasklist_lock
);
3036 do_notify_parent_cldstop(tsk
, false, group_stop
);
3037 read_unlock(&tasklist_lock
);
3042 * System call entry points.
3046 * sys_restart_syscall - restart a system call
3048 SYSCALL_DEFINE0(restart_syscall
)
3050 struct restart_block
*restart
= ¤t
->restart_block
;
3051 return restart
->fn(restart
);
3054 long do_no_restart_syscall(struct restart_block
*param
)
3059 static void __set_task_blocked(struct task_struct
*tsk
, const sigset_t
*newset
)
3061 if (task_sigpending(tsk
) && !thread_group_empty(tsk
)) {
3062 sigset_t newblocked
;
3063 /* A set of now blocked but previously unblocked signals. */
3064 sigandnsets(&newblocked
, newset
, ¤t
->blocked
);
3065 retarget_shared_pending(tsk
, &newblocked
);
3067 tsk
->blocked
= *newset
;
3068 recalc_sigpending();
3072 * set_current_blocked - change current->blocked mask
3075 * It is wrong to change ->blocked directly, this helper should be used
3076 * to ensure the process can't miss a shared signal we are going to block.
3078 void set_current_blocked(sigset_t
*newset
)
3080 sigdelsetmask(newset
, sigmask(SIGKILL
) | sigmask(SIGSTOP
));
3081 __set_current_blocked(newset
);
3084 void __set_current_blocked(const sigset_t
*newset
)
3086 struct task_struct
*tsk
= current
;
3089 * In case the signal mask hasn't changed, there is nothing we need
3090 * to do. The current->blocked shouldn't be modified by other task.
3092 if (sigequalsets(&tsk
->blocked
, newset
))
3095 spin_lock_irq(&tsk
->sighand
->siglock
);
3096 __set_task_blocked(tsk
, newset
);
3097 spin_unlock_irq(&tsk
->sighand
->siglock
);
3101 * This is also useful for kernel threads that want to temporarily
3102 * (or permanently) block certain signals.
3104 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
3105 * interface happily blocks "unblockable" signals like SIGKILL
3108 int sigprocmask(int how
, sigset_t
*set
, sigset_t
*oldset
)
3110 struct task_struct
*tsk
= current
;
3113 /* Lockless, only current can change ->blocked, never from irq */
3115 *oldset
= tsk
->blocked
;
3119 sigorsets(&newset
, &tsk
->blocked
, set
);
3122 sigandnsets(&newset
, &tsk
->blocked
, set
);
3131 __set_current_blocked(&newset
);
3134 EXPORT_SYMBOL(sigprocmask
);
3137 * The api helps set app-provided sigmasks.
3139 * This is useful for syscalls such as ppoll, pselect, io_pgetevents and
3140 * epoll_pwait where a new sigmask is passed from userland for the syscalls.
3142 * Note that it does set_restore_sigmask() in advance, so it must be always
3143 * paired with restore_saved_sigmask_unless() before return from syscall.
3145 int set_user_sigmask(const sigset_t __user
*umask
, size_t sigsetsize
)
3151 if (sigsetsize
!= sizeof(sigset_t
))
3153 if (copy_from_user(&kmask
, umask
, sizeof(sigset_t
)))
3156 set_restore_sigmask();
3157 current
->saved_sigmask
= current
->blocked
;
3158 set_current_blocked(&kmask
);
3163 #ifdef CONFIG_COMPAT
3164 int set_compat_user_sigmask(const compat_sigset_t __user
*umask
,
3171 if (sigsetsize
!= sizeof(compat_sigset_t
))
3173 if (get_compat_sigset(&kmask
, umask
))
3176 set_restore_sigmask();
3177 current
->saved_sigmask
= current
->blocked
;
3178 set_current_blocked(&kmask
);
3185 * sys_rt_sigprocmask - change the list of currently blocked signals
3186 * @how: whether to add, remove, or set signals
3187 * @nset: stores pending signals
3188 * @oset: previous value of signal mask if non-null
3189 * @sigsetsize: size of sigset_t type
3191 SYSCALL_DEFINE4(rt_sigprocmask
, int, how
, sigset_t __user
*, nset
,
3192 sigset_t __user
*, oset
, size_t, sigsetsize
)
3194 sigset_t old_set
, new_set
;
3197 /* XXX: Don't preclude handling different sized sigset_t's. */
3198 if (sigsetsize
!= sizeof(sigset_t
))
3201 old_set
= current
->blocked
;
3204 if (copy_from_user(&new_set
, nset
, sizeof(sigset_t
)))
3206 sigdelsetmask(&new_set
, sigmask(SIGKILL
)|sigmask(SIGSTOP
));
3208 error
= sigprocmask(how
, &new_set
, NULL
);
3214 if (copy_to_user(oset
, &old_set
, sizeof(sigset_t
)))
3221 #ifdef CONFIG_COMPAT
3222 COMPAT_SYSCALL_DEFINE4(rt_sigprocmask
, int, how
, compat_sigset_t __user
*, nset
,
3223 compat_sigset_t __user
*, oset
, compat_size_t
, sigsetsize
)
3225 sigset_t old_set
= current
->blocked
;
3227 /* XXX: Don't preclude handling different sized sigset_t's. */
3228 if (sigsetsize
!= sizeof(sigset_t
))
3234 if (get_compat_sigset(&new_set
, nset
))
3236 sigdelsetmask(&new_set
, sigmask(SIGKILL
)|sigmask(SIGSTOP
));
3238 error
= sigprocmask(how
, &new_set
, NULL
);
3242 return oset
? put_compat_sigset(oset
, &old_set
, sizeof(*oset
)) : 0;
3246 static void do_sigpending(sigset_t
*set
)
3248 spin_lock_irq(¤t
->sighand
->siglock
);
3249 sigorsets(set
, ¤t
->pending
.signal
,
3250 ¤t
->signal
->shared_pending
.signal
);
3251 spin_unlock_irq(¤t
->sighand
->siglock
);
3253 /* Outside the lock because only this thread touches it. */
3254 sigandsets(set
, ¤t
->blocked
, set
);
3258 * sys_rt_sigpending - examine a pending signal that has been raised
3260 * @uset: stores pending signals
3261 * @sigsetsize: size of sigset_t type or larger
3263 SYSCALL_DEFINE2(rt_sigpending
, sigset_t __user
*, uset
, size_t, sigsetsize
)
3267 if (sigsetsize
> sizeof(*uset
))
3270 do_sigpending(&set
);
3272 if (copy_to_user(uset
, &set
, sigsetsize
))
3278 #ifdef CONFIG_COMPAT
3279 COMPAT_SYSCALL_DEFINE2(rt_sigpending
, compat_sigset_t __user
*, uset
,
3280 compat_size_t
, sigsetsize
)
3284 if (sigsetsize
> sizeof(*uset
))
3287 do_sigpending(&set
);
3289 return put_compat_sigset(uset
, &set
, sigsetsize
);
3293 static const struct {
3294 unsigned char limit
, layout
;
3296 [SIGILL
] = { NSIGILL
, SIL_FAULT
},
3297 [SIGFPE
] = { NSIGFPE
, SIL_FAULT
},
3298 [SIGSEGV
] = { NSIGSEGV
, SIL_FAULT
},
3299 [SIGBUS
] = { NSIGBUS
, SIL_FAULT
},
3300 [SIGTRAP
] = { NSIGTRAP
, SIL_FAULT
},
3302 [SIGEMT
] = { NSIGEMT
, SIL_FAULT
},
3304 [SIGCHLD
] = { NSIGCHLD
, SIL_CHLD
},
3305 [SIGPOLL
] = { NSIGPOLL
, SIL_POLL
},
3306 [SIGSYS
] = { NSIGSYS
, SIL_SYS
},
3309 static bool known_siginfo_layout(unsigned sig
, int si_code
)
3311 if (si_code
== SI_KERNEL
)
3313 else if ((si_code
> SI_USER
)) {
3314 if (sig_specific_sicodes(sig
)) {
3315 if (si_code
<= sig_sicodes
[sig
].limit
)
3318 else if (si_code
<= NSIGPOLL
)
3321 else if (si_code
>= SI_DETHREAD
)
3323 else if (si_code
== SI_ASYNCNL
)
3328 enum siginfo_layout
siginfo_layout(unsigned sig
, int si_code
)
3330 enum siginfo_layout layout
= SIL_KILL
;
3331 if ((si_code
> SI_USER
) && (si_code
< SI_KERNEL
)) {
3332 if ((sig
< ARRAY_SIZE(sig_sicodes
)) &&
3333 (si_code
<= sig_sicodes
[sig
].limit
)) {
3334 layout
= sig_sicodes
[sig
].layout
;
3335 /* Handle the exceptions */
3336 if ((sig
== SIGBUS
) &&
3337 (si_code
>= BUS_MCEERR_AR
) && (si_code
<= BUS_MCEERR_AO
))
3338 layout
= SIL_FAULT_MCEERR
;
3339 else if ((sig
== SIGSEGV
) && (si_code
== SEGV_BNDERR
))
3340 layout
= SIL_FAULT_BNDERR
;
3342 else if ((sig
== SIGSEGV
) && (si_code
== SEGV_PKUERR
))
3343 layout
= SIL_FAULT_PKUERR
;
3345 else if ((sig
== SIGTRAP
) && (si_code
== TRAP_PERF
))
3346 layout
= SIL_FAULT_PERF_EVENT
;
3347 else if (IS_ENABLED(CONFIG_SPARC
) &&
3348 (sig
== SIGILL
) && (si_code
== ILL_ILLTRP
))
3349 layout
= SIL_FAULT_TRAPNO
;
3350 else if (IS_ENABLED(CONFIG_ALPHA
) &&
3352 ((sig
== SIGTRAP
) && (si_code
== TRAP_UNK
))))
3353 layout
= SIL_FAULT_TRAPNO
;
3355 else if (si_code
<= NSIGPOLL
)
3358 if (si_code
== SI_TIMER
)
3360 else if (si_code
== SI_SIGIO
)
3362 else if (si_code
< 0)
3368 static inline char __user
*si_expansion(const siginfo_t __user
*info
)
3370 return ((char __user
*)info
) + sizeof(struct kernel_siginfo
);
3373 int copy_siginfo_to_user(siginfo_t __user
*to
, const kernel_siginfo_t
*from
)
3375 char __user
*expansion
= si_expansion(to
);
3376 if (copy_to_user(to
, from
, sizeof(struct kernel_siginfo
)))
3378 if (clear_user(expansion
, SI_EXPANSION_SIZE
))
3383 static int post_copy_siginfo_from_user(kernel_siginfo_t
*info
,
3384 const siginfo_t __user
*from
)
3386 if (unlikely(!known_siginfo_layout(info
->si_signo
, info
->si_code
))) {
3387 char __user
*expansion
= si_expansion(from
);
3388 char buf
[SI_EXPANSION_SIZE
];
3391 * An unknown si_code might need more than
3392 * sizeof(struct kernel_siginfo) bytes. Verify all of the
3393 * extra bytes are 0. This guarantees copy_siginfo_to_user
3394 * will return this data to userspace exactly.
3396 if (copy_from_user(&buf
, expansion
, SI_EXPANSION_SIZE
))
3398 for (i
= 0; i
< SI_EXPANSION_SIZE
; i
++) {
3406 static int __copy_siginfo_from_user(int signo
, kernel_siginfo_t
*to
,
3407 const siginfo_t __user
*from
)
3409 if (copy_from_user(to
, from
, sizeof(struct kernel_siginfo
)))
3411 to
->si_signo
= signo
;
3412 return post_copy_siginfo_from_user(to
, from
);
3415 int copy_siginfo_from_user(kernel_siginfo_t
*to
, const siginfo_t __user
*from
)
3417 if (copy_from_user(to
, from
, sizeof(struct kernel_siginfo
)))
3419 return post_copy_siginfo_from_user(to
, from
);
3422 #ifdef CONFIG_COMPAT
3424 * copy_siginfo_to_external32 - copy a kernel siginfo into a compat user siginfo
3425 * @to: compat siginfo destination
3426 * @from: kernel siginfo source
3428 * Note: This function does not work properly for the SIGCHLD on x32, but
3429 * fortunately it doesn't have to. The only valid callers for this function are
3430 * copy_siginfo_to_user32, which is overriden for x32 and the coredump code.
3431 * The latter does not care because SIGCHLD will never cause a coredump.
3433 void copy_siginfo_to_external32(struct compat_siginfo
*to
,
3434 const struct kernel_siginfo
*from
)
3436 memset(to
, 0, sizeof(*to
));
3438 to
->si_signo
= from
->si_signo
;
3439 to
->si_errno
= from
->si_errno
;
3440 to
->si_code
= from
->si_code
;
3441 switch(siginfo_layout(from
->si_signo
, from
->si_code
)) {
3443 to
->si_pid
= from
->si_pid
;
3444 to
->si_uid
= from
->si_uid
;
3447 to
->si_tid
= from
->si_tid
;
3448 to
->si_overrun
= from
->si_overrun
;
3449 to
->si_int
= from
->si_int
;
3452 to
->si_band
= from
->si_band
;
3453 to
->si_fd
= from
->si_fd
;
3456 to
->si_addr
= ptr_to_compat(from
->si_addr
);
3458 case SIL_FAULT_TRAPNO
:
3459 to
->si_addr
= ptr_to_compat(from
->si_addr
);
3460 to
->si_trapno
= from
->si_trapno
;
3462 case SIL_FAULT_MCEERR
:
3463 to
->si_addr
= ptr_to_compat(from
->si_addr
);
3464 to
->si_addr_lsb
= from
->si_addr_lsb
;
3466 case SIL_FAULT_BNDERR
:
3467 to
->si_addr
= ptr_to_compat(from
->si_addr
);
3468 to
->si_lower
= ptr_to_compat(from
->si_lower
);
3469 to
->si_upper
= ptr_to_compat(from
->si_upper
);
3471 case SIL_FAULT_PKUERR
:
3472 to
->si_addr
= ptr_to_compat(from
->si_addr
);
3473 to
->si_pkey
= from
->si_pkey
;
3475 case SIL_FAULT_PERF_EVENT
:
3476 to
->si_addr
= ptr_to_compat(from
->si_addr
);
3477 to
->si_perf_data
= from
->si_perf_data
;
3478 to
->si_perf_type
= from
->si_perf_type
;
3479 to
->si_perf_flags
= from
->si_perf_flags
;
3482 to
->si_pid
= from
->si_pid
;
3483 to
->si_uid
= from
->si_uid
;
3484 to
->si_status
= from
->si_status
;
3485 to
->si_utime
= from
->si_utime
;
3486 to
->si_stime
= from
->si_stime
;
3489 to
->si_pid
= from
->si_pid
;
3490 to
->si_uid
= from
->si_uid
;
3491 to
->si_int
= from
->si_int
;
3494 to
->si_call_addr
= ptr_to_compat(from
->si_call_addr
);
3495 to
->si_syscall
= from
->si_syscall
;
3496 to
->si_arch
= from
->si_arch
;
3501 int __copy_siginfo_to_user32(struct compat_siginfo __user
*to
,
3502 const struct kernel_siginfo
*from
)
3504 struct compat_siginfo
new;
3506 copy_siginfo_to_external32(&new, from
);
3507 if (copy_to_user(to
, &new, sizeof(struct compat_siginfo
)))
3512 static int post_copy_siginfo_from_user32(kernel_siginfo_t
*to
,
3513 const struct compat_siginfo
*from
)
3516 to
->si_signo
= from
->si_signo
;
3517 to
->si_errno
= from
->si_errno
;
3518 to
->si_code
= from
->si_code
;
3519 switch(siginfo_layout(from
->si_signo
, from
->si_code
)) {
3521 to
->si_pid
= from
->si_pid
;
3522 to
->si_uid
= from
->si_uid
;
3525 to
->si_tid
= from
->si_tid
;
3526 to
->si_overrun
= from
->si_overrun
;
3527 to
->si_int
= from
->si_int
;
3530 to
->si_band
= from
->si_band
;
3531 to
->si_fd
= from
->si_fd
;
3534 to
->si_addr
= compat_ptr(from
->si_addr
);
3536 case SIL_FAULT_TRAPNO
:
3537 to
->si_addr
= compat_ptr(from
->si_addr
);
3538 to
->si_trapno
= from
->si_trapno
;
3540 case SIL_FAULT_MCEERR
:
3541 to
->si_addr
= compat_ptr(from
->si_addr
);
3542 to
->si_addr_lsb
= from
->si_addr_lsb
;
3544 case SIL_FAULT_BNDERR
:
3545 to
->si_addr
= compat_ptr(from
->si_addr
);
3546 to
->si_lower
= compat_ptr(from
->si_lower
);
3547 to
->si_upper
= compat_ptr(from
->si_upper
);
3549 case SIL_FAULT_PKUERR
:
3550 to
->si_addr
= compat_ptr(from
->si_addr
);
3551 to
->si_pkey
= from
->si_pkey
;
3553 case SIL_FAULT_PERF_EVENT
:
3554 to
->si_addr
= compat_ptr(from
->si_addr
);
3555 to
->si_perf_data
= from
->si_perf_data
;
3556 to
->si_perf_type
= from
->si_perf_type
;
3557 to
->si_perf_flags
= from
->si_perf_flags
;
3560 to
->si_pid
= from
->si_pid
;
3561 to
->si_uid
= from
->si_uid
;
3562 to
->si_status
= from
->si_status
;
3563 #ifdef CONFIG_X86_X32_ABI
3564 if (in_x32_syscall()) {
3565 to
->si_utime
= from
->_sifields
._sigchld_x32
._utime
;
3566 to
->si_stime
= from
->_sifields
._sigchld_x32
._stime
;
3570 to
->si_utime
= from
->si_utime
;
3571 to
->si_stime
= from
->si_stime
;
3575 to
->si_pid
= from
->si_pid
;
3576 to
->si_uid
= from
->si_uid
;
3577 to
->si_int
= from
->si_int
;
3580 to
->si_call_addr
= compat_ptr(from
->si_call_addr
);
3581 to
->si_syscall
= from
->si_syscall
;
3582 to
->si_arch
= from
->si_arch
;
3588 static int __copy_siginfo_from_user32(int signo
, struct kernel_siginfo
*to
,
3589 const struct compat_siginfo __user
*ufrom
)
3591 struct compat_siginfo from
;
3593 if (copy_from_user(&from
, ufrom
, sizeof(struct compat_siginfo
)))
3596 from
.si_signo
= signo
;
3597 return post_copy_siginfo_from_user32(to
, &from
);
3600 int copy_siginfo_from_user32(struct kernel_siginfo
*to
,
3601 const struct compat_siginfo __user
*ufrom
)
3603 struct compat_siginfo from
;
3605 if (copy_from_user(&from
, ufrom
, sizeof(struct compat_siginfo
)))
3608 return post_copy_siginfo_from_user32(to
, &from
);
3610 #endif /* CONFIG_COMPAT */
3613 * do_sigtimedwait - wait for queued signals specified in @which
3614 * @which: queued signals to wait for
3615 * @info: if non-null, the signal's siginfo is returned here
3616 * @ts: upper bound on process time suspension
3618 static int do_sigtimedwait(const sigset_t
*which
, kernel_siginfo_t
*info
,
3619 const struct timespec64
*ts
)
3621 ktime_t
*to
= NULL
, timeout
= KTIME_MAX
;
3622 struct task_struct
*tsk
= current
;
3623 sigset_t mask
= *which
;
3628 if (!timespec64_valid(ts
))
3630 timeout
= timespec64_to_ktime(*ts
);
3635 * Invert the set of allowed signals to get those we want to block.
3637 sigdelsetmask(&mask
, sigmask(SIGKILL
) | sigmask(SIGSTOP
));
3640 spin_lock_irq(&tsk
->sighand
->siglock
);
3641 sig
= dequeue_signal(tsk
, &mask
, info
, &type
);
3642 if (!sig
&& timeout
) {
3644 * None ready, temporarily unblock those we're interested
3645 * while we are sleeping in so that we'll be awakened when
3646 * they arrive. Unblocking is always fine, we can avoid
3647 * set_current_blocked().
3649 tsk
->real_blocked
= tsk
->blocked
;
3650 sigandsets(&tsk
->blocked
, &tsk
->blocked
, &mask
);
3651 recalc_sigpending();
3652 spin_unlock_irq(&tsk
->sighand
->siglock
);
3654 __set_current_state(TASK_INTERRUPTIBLE
|TASK_FREEZABLE
);
3655 ret
= schedule_hrtimeout_range(to
, tsk
->timer_slack_ns
,
3657 spin_lock_irq(&tsk
->sighand
->siglock
);
3658 __set_task_blocked(tsk
, &tsk
->real_blocked
);
3659 sigemptyset(&tsk
->real_blocked
);
3660 sig
= dequeue_signal(tsk
, &mask
, info
, &type
);
3662 spin_unlock_irq(&tsk
->sighand
->siglock
);
3666 return ret
? -EINTR
: -EAGAIN
;
3670 * sys_rt_sigtimedwait - synchronously wait for queued signals specified
3672 * @uthese: queued signals to wait for
3673 * @uinfo: if non-null, the signal's siginfo is returned here
3674 * @uts: upper bound on process time suspension
3675 * @sigsetsize: size of sigset_t type
3677 SYSCALL_DEFINE4(rt_sigtimedwait
, const sigset_t __user
*, uthese
,
3678 siginfo_t __user
*, uinfo
,
3679 const struct __kernel_timespec __user
*, uts
,
3683 struct timespec64 ts
;
3684 kernel_siginfo_t info
;
3687 /* XXX: Don't preclude handling different sized sigset_t's. */
3688 if (sigsetsize
!= sizeof(sigset_t
))
3691 if (copy_from_user(&these
, uthese
, sizeof(these
)))
3695 if (get_timespec64(&ts
, uts
))
3699 ret
= do_sigtimedwait(&these
, &info
, uts
? &ts
: NULL
);
3701 if (ret
> 0 && uinfo
) {
3702 if (copy_siginfo_to_user(uinfo
, &info
))
3709 #ifdef CONFIG_COMPAT_32BIT_TIME
3710 SYSCALL_DEFINE4(rt_sigtimedwait_time32
, const sigset_t __user
*, uthese
,
3711 siginfo_t __user
*, uinfo
,
3712 const struct old_timespec32 __user
*, uts
,
3716 struct timespec64 ts
;
3717 kernel_siginfo_t info
;
3720 if (sigsetsize
!= sizeof(sigset_t
))
3723 if (copy_from_user(&these
, uthese
, sizeof(these
)))
3727 if (get_old_timespec32(&ts
, uts
))
3731 ret
= do_sigtimedwait(&these
, &info
, uts
? &ts
: NULL
);
3733 if (ret
> 0 && uinfo
) {
3734 if (copy_siginfo_to_user(uinfo
, &info
))
3742 #ifdef CONFIG_COMPAT
3743 COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time64
, compat_sigset_t __user
*, uthese
,
3744 struct compat_siginfo __user
*, uinfo
,
3745 struct __kernel_timespec __user
*, uts
, compat_size_t
, sigsetsize
)
3748 struct timespec64 t
;
3749 kernel_siginfo_t info
;
3752 if (sigsetsize
!= sizeof(sigset_t
))
3755 if (get_compat_sigset(&s
, uthese
))
3759 if (get_timespec64(&t
, uts
))
3763 ret
= do_sigtimedwait(&s
, &info
, uts
? &t
: NULL
);
3765 if (ret
> 0 && uinfo
) {
3766 if (copy_siginfo_to_user32(uinfo
, &info
))
3773 #ifdef CONFIG_COMPAT_32BIT_TIME
3774 COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time32
, compat_sigset_t __user
*, uthese
,
3775 struct compat_siginfo __user
*, uinfo
,
3776 struct old_timespec32 __user
*, uts
, compat_size_t
, sigsetsize
)
3779 struct timespec64 t
;
3780 kernel_siginfo_t info
;
3783 if (sigsetsize
!= sizeof(sigset_t
))
3786 if (get_compat_sigset(&s
, uthese
))
3790 if (get_old_timespec32(&t
, uts
))
3794 ret
= do_sigtimedwait(&s
, &info
, uts
? &t
: NULL
);
3796 if (ret
> 0 && uinfo
) {
3797 if (copy_siginfo_to_user32(uinfo
, &info
))
3806 static void prepare_kill_siginfo(int sig
, struct kernel_siginfo
*info
,
3809 clear_siginfo(info
);
3810 info
->si_signo
= sig
;
3812 info
->si_code
= (type
== PIDTYPE_PID
) ? SI_TKILL
: SI_USER
;
3813 info
->si_pid
= task_tgid_vnr(current
);
3814 info
->si_uid
= from_kuid_munged(current_user_ns(), current_uid());
3818 * sys_kill - send a signal to a process
3819 * @pid: the PID of the process
3820 * @sig: signal to be sent
3822 SYSCALL_DEFINE2(kill
, pid_t
, pid
, int, sig
)
3824 struct kernel_siginfo info
;
3826 prepare_kill_siginfo(sig
, &info
, PIDTYPE_TGID
);
3828 return kill_something_info(sig
, &info
, pid
);
3832 * Verify that the signaler and signalee either are in the same pid namespace
3833 * or that the signaler's pid namespace is an ancestor of the signalee's pid
3836 static bool access_pidfd_pidns(struct pid
*pid
)
3838 struct pid_namespace
*active
= task_active_pid_ns(current
);
3839 struct pid_namespace
*p
= ns_of_pid(pid
);
3852 static int copy_siginfo_from_user_any(kernel_siginfo_t
*kinfo
,
3853 siginfo_t __user
*info
)
3855 #ifdef CONFIG_COMPAT
3857 * Avoid hooking up compat syscalls and instead handle necessary
3858 * conversions here. Note, this is a stop-gap measure and should not be
3859 * considered a generic solution.
3861 if (in_compat_syscall())
3862 return copy_siginfo_from_user32(
3863 kinfo
, (struct compat_siginfo __user
*)info
);
3865 return copy_siginfo_from_user(kinfo
, info
);
3868 static struct pid
*pidfd_to_pid(const struct file
*file
)
3872 pid
= pidfd_pid(file
);
3876 return tgid_pidfd_to_pid(file
);
3879 #define PIDFD_SEND_SIGNAL_FLAGS \
3880 (PIDFD_SIGNAL_THREAD | PIDFD_SIGNAL_THREAD_GROUP | \
3881 PIDFD_SIGNAL_PROCESS_GROUP)
3884 * sys_pidfd_send_signal - Signal a process through a pidfd
3885 * @pidfd: file descriptor of the process
3886 * @sig: signal to send
3887 * @info: signal info
3888 * @flags: future flags
3890 * Send the signal to the thread group or to the individual thread depending
3892 * In the future extension to @flags may be used to override the default scope
3895 * Return: 0 on success, negative errno on failure
3897 SYSCALL_DEFINE4(pidfd_send_signal
, int, pidfd
, int, sig
,
3898 siginfo_t __user
*, info
, unsigned int, flags
)
3903 kernel_siginfo_t kinfo
;
3906 /* Enforce flags be set to 0 until we add an extension. */
3907 if (flags
& ~PIDFD_SEND_SIGNAL_FLAGS
)
3910 /* Ensure that only a single signal scope determining flag is set. */
3911 if (hweight32(flags
& PIDFD_SEND_SIGNAL_FLAGS
) > 1)
3918 /* Is this a pidfd? */
3919 pid
= pidfd_to_pid(f
.file
);
3926 if (!access_pidfd_pidns(pid
))
3931 /* Infer scope from the type of pidfd. */
3932 if (f
.file
->f_flags
& PIDFD_THREAD
)
3935 type
= PIDTYPE_TGID
;
3937 case PIDFD_SIGNAL_THREAD
:
3940 case PIDFD_SIGNAL_THREAD_GROUP
:
3941 type
= PIDTYPE_TGID
;
3943 case PIDFD_SIGNAL_PROCESS_GROUP
:
3944 type
= PIDTYPE_PGID
;
3949 ret
= copy_siginfo_from_user_any(&kinfo
, info
);
3954 if (unlikely(sig
!= kinfo
.si_signo
))
3957 /* Only allow sending arbitrary signals to yourself. */
3959 if ((task_pid(current
) != pid
|| type
> PIDTYPE_TGID
) &&
3960 (kinfo
.si_code
>= 0 || kinfo
.si_code
== SI_TKILL
))
3963 prepare_kill_siginfo(sig
, &kinfo
, type
);
3966 if (type
== PIDTYPE_PGID
)
3967 ret
= kill_pgrp_info(sig
, &kinfo
, pid
);
3969 ret
= kill_pid_info_type(sig
, &kinfo
, pid
, type
);
3976 do_send_specific(pid_t tgid
, pid_t pid
, int sig
, struct kernel_siginfo
*info
)
3978 struct task_struct
*p
;
3982 p
= find_task_by_vpid(pid
);
3983 if (p
&& (tgid
<= 0 || task_tgid_vnr(p
) == tgid
)) {
3984 error
= check_kill_permission(sig
, info
, p
);
3986 * The null signal is a permissions and process existence
3987 * probe. No signal is actually delivered.
3989 if (!error
&& sig
) {
3990 error
= do_send_sig_info(sig
, info
, p
, PIDTYPE_PID
);
3992 * If lock_task_sighand() failed we pretend the task
3993 * dies after receiving the signal. The window is tiny,
3994 * and the signal is private anyway.
3996 if (unlikely(error
== -ESRCH
))
4005 static int do_tkill(pid_t tgid
, pid_t pid
, int sig
)
4007 struct kernel_siginfo info
;
4009 prepare_kill_siginfo(sig
, &info
, PIDTYPE_PID
);
4011 return do_send_specific(tgid
, pid
, sig
, &info
);
4015 * sys_tgkill - send signal to one specific thread
4016 * @tgid: the thread group ID of the thread
4017 * @pid: the PID of the thread
4018 * @sig: signal to be sent
4020 * This syscall also checks the @tgid and returns -ESRCH even if the PID
4021 * exists but it's not belonging to the target process anymore. This
4022 * method solves the problem of threads exiting and PIDs getting reused.
4024 SYSCALL_DEFINE3(tgkill
, pid_t
, tgid
, pid_t
, pid
, int, sig
)
4026 /* This is only valid for single tasks */
4027 if (pid
<= 0 || tgid
<= 0)
4030 return do_tkill(tgid
, pid
, sig
);
4034 * sys_tkill - send signal to one specific task
4035 * @pid: the PID of the task
4036 * @sig: signal to be sent
4038 * Send a signal to only one task, even if it's a CLONE_THREAD task.
4040 SYSCALL_DEFINE2(tkill
, pid_t
, pid
, int, sig
)
4042 /* This is only valid for single tasks */
4046 return do_tkill(0, pid
, sig
);
4049 static int do_rt_sigqueueinfo(pid_t pid
, int sig
, kernel_siginfo_t
*info
)
4051 /* Not even root can pretend to send signals from the kernel.
4052 * Nor can they impersonate a kill()/tgkill(), which adds source info.
4054 if ((info
->si_code
>= 0 || info
->si_code
== SI_TKILL
) &&
4055 (task_pid_vnr(current
) != pid
))
4058 /* POSIX.1b doesn't mention process groups. */
4059 return kill_proc_info(sig
, info
, pid
);
4063 * sys_rt_sigqueueinfo - send signal information to a signal
4064 * @pid: the PID of the thread
4065 * @sig: signal to be sent
4066 * @uinfo: signal info to be sent
4068 SYSCALL_DEFINE3(rt_sigqueueinfo
, pid_t
, pid
, int, sig
,
4069 siginfo_t __user
*, uinfo
)
4071 kernel_siginfo_t info
;
4072 int ret
= __copy_siginfo_from_user(sig
, &info
, uinfo
);
4075 return do_rt_sigqueueinfo(pid
, sig
, &info
);
4078 #ifdef CONFIG_COMPAT
4079 COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo
,
4082 struct compat_siginfo __user
*, uinfo
)
4084 kernel_siginfo_t info
;
4085 int ret
= __copy_siginfo_from_user32(sig
, &info
, uinfo
);
4088 return do_rt_sigqueueinfo(pid
, sig
, &info
);
4092 static int do_rt_tgsigqueueinfo(pid_t tgid
, pid_t pid
, int sig
, kernel_siginfo_t
*info
)
4094 /* This is only valid for single tasks */
4095 if (pid
<= 0 || tgid
<= 0)
4098 /* Not even root can pretend to send signals from the kernel.
4099 * Nor can they impersonate a kill()/tgkill(), which adds source info.
4101 if ((info
->si_code
>= 0 || info
->si_code
== SI_TKILL
) &&
4102 (task_pid_vnr(current
) != pid
))
4105 return do_send_specific(tgid
, pid
, sig
, info
);
4108 SYSCALL_DEFINE4(rt_tgsigqueueinfo
, pid_t
, tgid
, pid_t
, pid
, int, sig
,
4109 siginfo_t __user
*, uinfo
)
4111 kernel_siginfo_t info
;
4112 int ret
= __copy_siginfo_from_user(sig
, &info
, uinfo
);
4115 return do_rt_tgsigqueueinfo(tgid
, pid
, sig
, &info
);
4118 #ifdef CONFIG_COMPAT
4119 COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo
,
4123 struct compat_siginfo __user
*, uinfo
)
4125 kernel_siginfo_t info
;
4126 int ret
= __copy_siginfo_from_user32(sig
, &info
, uinfo
);
4129 return do_rt_tgsigqueueinfo(tgid
, pid
, sig
, &info
);
4134 * For kthreads only, must not be used if cloned with CLONE_SIGHAND
4136 void kernel_sigaction(int sig
, __sighandler_t action
)
4138 spin_lock_irq(¤t
->sighand
->siglock
);
4139 current
->sighand
->action
[sig
- 1].sa
.sa_handler
= action
;
4140 if (action
== SIG_IGN
) {
4144 sigaddset(&mask
, sig
);
4146 flush_sigqueue_mask(&mask
, ¤t
->signal
->shared_pending
);
4147 flush_sigqueue_mask(&mask
, ¤t
->pending
);
4148 recalc_sigpending();
4150 spin_unlock_irq(¤t
->sighand
->siglock
);
4152 EXPORT_SYMBOL(kernel_sigaction
);
4154 void __weak
sigaction_compat_abi(struct k_sigaction
*act
,
4155 struct k_sigaction
*oact
)
4159 int do_sigaction(int sig
, struct k_sigaction
*act
, struct k_sigaction
*oact
)
4161 struct task_struct
*p
= current
, *t
;
4162 struct k_sigaction
*k
;
4165 if (!valid_signal(sig
) || sig
< 1 || (act
&& sig_kernel_only(sig
)))
4168 k
= &p
->sighand
->action
[sig
-1];
4170 spin_lock_irq(&p
->sighand
->siglock
);
4171 if (k
->sa
.sa_flags
& SA_IMMUTABLE
) {
4172 spin_unlock_irq(&p
->sighand
->siglock
);
4179 * Make sure that we never accidentally claim to support SA_UNSUPPORTED,
4180 * e.g. by having an architecture use the bit in their uapi.
4182 BUILD_BUG_ON(UAPI_SA_FLAGS
& SA_UNSUPPORTED
);
4185 * Clear unknown flag bits in order to allow userspace to detect missing
4186 * support for flag bits and to allow the kernel to use non-uapi bits
4190 act
->sa
.sa_flags
&= UAPI_SA_FLAGS
;
4192 oact
->sa
.sa_flags
&= UAPI_SA_FLAGS
;
4194 sigaction_compat_abi(act
, oact
);
4197 sigdelsetmask(&act
->sa
.sa_mask
,
4198 sigmask(SIGKILL
) | sigmask(SIGSTOP
));
4202 * "Setting a signal action to SIG_IGN for a signal that is
4203 * pending shall cause the pending signal to be discarded,
4204 * whether or not it is blocked."
4206 * "Setting a signal action to SIG_DFL for a signal that is
4207 * pending and whose default action is to ignore the signal
4208 * (for example, SIGCHLD), shall cause the pending signal to
4209 * be discarded, whether or not it is blocked"
4211 if (sig_handler_ignored(sig_handler(p
, sig
), sig
)) {
4213 sigaddset(&mask
, sig
);
4214 flush_sigqueue_mask(&mask
, &p
->signal
->shared_pending
);
4215 for_each_thread(p
, t
)
4216 flush_sigqueue_mask(&mask
, &t
->pending
);
4220 spin_unlock_irq(&p
->sighand
->siglock
);
4224 #ifdef CONFIG_DYNAMIC_SIGFRAME
4225 static inline void sigaltstack_lock(void)
4226 __acquires(¤t
->sighand
->siglock
)
4228 spin_lock_irq(¤t
->sighand
->siglock
);
4231 static inline void sigaltstack_unlock(void)
4232 __releases(¤t
->sighand
->siglock
)
4234 spin_unlock_irq(¤t
->sighand
->siglock
);
4237 static inline void sigaltstack_lock(void) { }
4238 static inline void sigaltstack_unlock(void) { }
4242 do_sigaltstack (const stack_t
*ss
, stack_t
*oss
, unsigned long sp
,
4245 struct task_struct
*t
= current
;
4249 memset(oss
, 0, sizeof(stack_t
));
4250 oss
->ss_sp
= (void __user
*) t
->sas_ss_sp
;
4251 oss
->ss_size
= t
->sas_ss_size
;
4252 oss
->ss_flags
= sas_ss_flags(sp
) |
4253 (current
->sas_ss_flags
& SS_FLAG_BITS
);
4257 void __user
*ss_sp
= ss
->ss_sp
;
4258 size_t ss_size
= ss
->ss_size
;
4259 unsigned ss_flags
= ss
->ss_flags
;
4262 if (unlikely(on_sig_stack(sp
)))
4265 ss_mode
= ss_flags
& ~SS_FLAG_BITS
;
4266 if (unlikely(ss_mode
!= SS_DISABLE
&& ss_mode
!= SS_ONSTACK
&&
4271 * Return before taking any locks if no actual
4272 * sigaltstack changes were requested.
4274 if (t
->sas_ss_sp
== (unsigned long)ss_sp
&&
4275 t
->sas_ss_size
== ss_size
&&
4276 t
->sas_ss_flags
== ss_flags
)
4280 if (ss_mode
== SS_DISABLE
) {
4284 if (unlikely(ss_size
< min_ss_size
))
4286 if (!sigaltstack_size_valid(ss_size
))
4290 t
->sas_ss_sp
= (unsigned long) ss_sp
;
4291 t
->sas_ss_size
= ss_size
;
4292 t
->sas_ss_flags
= ss_flags
;
4294 sigaltstack_unlock();
4299 SYSCALL_DEFINE2(sigaltstack
,const stack_t __user
*,uss
, stack_t __user
*,uoss
)
4303 if (uss
&& copy_from_user(&new, uss
, sizeof(stack_t
)))
4305 err
= do_sigaltstack(uss
? &new : NULL
, uoss
? &old
: NULL
,
4306 current_user_stack_pointer(),
4308 if (!err
&& uoss
&& copy_to_user(uoss
, &old
, sizeof(stack_t
)))
4313 int restore_altstack(const stack_t __user
*uss
)
4316 if (copy_from_user(&new, uss
, sizeof(stack_t
)))
4318 (void)do_sigaltstack(&new, NULL
, current_user_stack_pointer(),
4320 /* squash all but EFAULT for now */
4324 int __save_altstack(stack_t __user
*uss
, unsigned long sp
)
4326 struct task_struct
*t
= current
;
4327 int err
= __put_user((void __user
*)t
->sas_ss_sp
, &uss
->ss_sp
) |
4328 __put_user(t
->sas_ss_flags
, &uss
->ss_flags
) |
4329 __put_user(t
->sas_ss_size
, &uss
->ss_size
);
4333 #ifdef CONFIG_COMPAT
4334 static int do_compat_sigaltstack(const compat_stack_t __user
*uss_ptr
,
4335 compat_stack_t __user
*uoss_ptr
)
4341 compat_stack_t uss32
;
4342 if (copy_from_user(&uss32
, uss_ptr
, sizeof(compat_stack_t
)))
4344 uss
.ss_sp
= compat_ptr(uss32
.ss_sp
);
4345 uss
.ss_flags
= uss32
.ss_flags
;
4346 uss
.ss_size
= uss32
.ss_size
;
4348 ret
= do_sigaltstack(uss_ptr
? &uss
: NULL
, &uoss
,
4349 compat_user_stack_pointer(),
4350 COMPAT_MINSIGSTKSZ
);
4351 if (ret
>= 0 && uoss_ptr
) {
4353 memset(&old
, 0, sizeof(old
));
4354 old
.ss_sp
= ptr_to_compat(uoss
.ss_sp
);
4355 old
.ss_flags
= uoss
.ss_flags
;
4356 old
.ss_size
= uoss
.ss_size
;
4357 if (copy_to_user(uoss_ptr
, &old
, sizeof(compat_stack_t
)))
4363 COMPAT_SYSCALL_DEFINE2(sigaltstack
,
4364 const compat_stack_t __user
*, uss_ptr
,
4365 compat_stack_t __user
*, uoss_ptr
)
4367 return do_compat_sigaltstack(uss_ptr
, uoss_ptr
);
4370 int compat_restore_altstack(const compat_stack_t __user
*uss
)
4372 int err
= do_compat_sigaltstack(uss
, NULL
);
4373 /* squash all but -EFAULT for now */
4374 return err
== -EFAULT
? err
: 0;
4377 int __compat_save_altstack(compat_stack_t __user
*uss
, unsigned long sp
)
4380 struct task_struct
*t
= current
;
4381 err
= __put_user(ptr_to_compat((void __user
*)t
->sas_ss_sp
),
4383 __put_user(t
->sas_ss_flags
, &uss
->ss_flags
) |
4384 __put_user(t
->sas_ss_size
, &uss
->ss_size
);
4389 #ifdef __ARCH_WANT_SYS_SIGPENDING
4392 * sys_sigpending - examine pending signals
4393 * @uset: where mask of pending signal is returned
4395 SYSCALL_DEFINE1(sigpending
, old_sigset_t __user
*, uset
)
4399 if (sizeof(old_sigset_t
) > sizeof(*uset
))
4402 do_sigpending(&set
);
4404 if (copy_to_user(uset
, &set
, sizeof(old_sigset_t
)))
4410 #ifdef CONFIG_COMPAT
4411 COMPAT_SYSCALL_DEFINE1(sigpending
, compat_old_sigset_t __user
*, set32
)
4415 do_sigpending(&set
);
4417 return put_user(set
.sig
[0], set32
);
4423 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
4425 * sys_sigprocmask - examine and change blocked signals
4426 * @how: whether to add, remove, or set signals
4427 * @nset: signals to add or remove (if non-null)
4428 * @oset: previous value of signal mask if non-null
4430 * Some platforms have their own version with special arguments;
4431 * others support only sys_rt_sigprocmask.
4434 SYSCALL_DEFINE3(sigprocmask
, int, how
, old_sigset_t __user
*, nset
,
4435 old_sigset_t __user
*, oset
)
4437 old_sigset_t old_set
, new_set
;
4438 sigset_t new_blocked
;
4440 old_set
= current
->blocked
.sig
[0];
4443 if (copy_from_user(&new_set
, nset
, sizeof(*nset
)))
4446 new_blocked
= current
->blocked
;
4450 sigaddsetmask(&new_blocked
, new_set
);
4453 sigdelsetmask(&new_blocked
, new_set
);
4456 new_blocked
.sig
[0] = new_set
;
4462 set_current_blocked(&new_blocked
);
4466 if (copy_to_user(oset
, &old_set
, sizeof(*oset
)))
4472 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
4474 #ifndef CONFIG_ODD_RT_SIGACTION
4476 * sys_rt_sigaction - alter an action taken by a process
4477 * @sig: signal to be sent
4478 * @act: new sigaction
4479 * @oact: used to save the previous sigaction
4480 * @sigsetsize: size of sigset_t type
4482 SYSCALL_DEFINE4(rt_sigaction
, int, sig
,
4483 const struct sigaction __user
*, act
,
4484 struct sigaction __user
*, oact
,
4487 struct k_sigaction new_sa
, old_sa
;
4490 /* XXX: Don't preclude handling different sized sigset_t's. */
4491 if (sigsetsize
!= sizeof(sigset_t
))
4494 if (act
&& copy_from_user(&new_sa
.sa
, act
, sizeof(new_sa
.sa
)))
4497 ret
= do_sigaction(sig
, act
? &new_sa
: NULL
, oact
? &old_sa
: NULL
);
4501 if (oact
&& copy_to_user(oact
, &old_sa
.sa
, sizeof(old_sa
.sa
)))
4506 #ifdef CONFIG_COMPAT
4507 COMPAT_SYSCALL_DEFINE4(rt_sigaction
, int, sig
,
4508 const struct compat_sigaction __user
*, act
,
4509 struct compat_sigaction __user
*, oact
,
4510 compat_size_t
, sigsetsize
)
4512 struct k_sigaction new_ka
, old_ka
;
4513 #ifdef __ARCH_HAS_SA_RESTORER
4514 compat_uptr_t restorer
;
4518 /* XXX: Don't preclude handling different sized sigset_t's. */
4519 if (sigsetsize
!= sizeof(compat_sigset_t
))
4523 compat_uptr_t handler
;
4524 ret
= get_user(handler
, &act
->sa_handler
);
4525 new_ka
.sa
.sa_handler
= compat_ptr(handler
);
4526 #ifdef __ARCH_HAS_SA_RESTORER
4527 ret
|= get_user(restorer
, &act
->sa_restorer
);
4528 new_ka
.sa
.sa_restorer
= compat_ptr(restorer
);
4530 ret
|= get_compat_sigset(&new_ka
.sa
.sa_mask
, &act
->sa_mask
);
4531 ret
|= get_user(new_ka
.sa
.sa_flags
, &act
->sa_flags
);
4536 ret
= do_sigaction(sig
, act
? &new_ka
: NULL
, oact
? &old_ka
: NULL
);
4538 ret
= put_user(ptr_to_compat(old_ka
.sa
.sa_handler
),
4540 ret
|= put_compat_sigset(&oact
->sa_mask
, &old_ka
.sa
.sa_mask
,
4541 sizeof(oact
->sa_mask
));
4542 ret
|= put_user(old_ka
.sa
.sa_flags
, &oact
->sa_flags
);
4543 #ifdef __ARCH_HAS_SA_RESTORER
4544 ret
|= put_user(ptr_to_compat(old_ka
.sa
.sa_restorer
),
4545 &oact
->sa_restorer
);
4551 #endif /* !CONFIG_ODD_RT_SIGACTION */
4553 #ifdef CONFIG_OLD_SIGACTION
4554 SYSCALL_DEFINE3(sigaction
, int, sig
,
4555 const struct old_sigaction __user
*, act
,
4556 struct old_sigaction __user
*, oact
)
4558 struct k_sigaction new_ka
, old_ka
;
4563 if (!access_ok(act
, sizeof(*act
)) ||
4564 __get_user(new_ka
.sa
.sa_handler
, &act
->sa_handler
) ||
4565 __get_user(new_ka
.sa
.sa_restorer
, &act
->sa_restorer
) ||
4566 __get_user(new_ka
.sa
.sa_flags
, &act
->sa_flags
) ||
4567 __get_user(mask
, &act
->sa_mask
))
4569 #ifdef __ARCH_HAS_KA_RESTORER
4570 new_ka
.ka_restorer
= NULL
;
4572 siginitset(&new_ka
.sa
.sa_mask
, mask
);
4575 ret
= do_sigaction(sig
, act
? &new_ka
: NULL
, oact
? &old_ka
: NULL
);
4578 if (!access_ok(oact
, sizeof(*oact
)) ||
4579 __put_user(old_ka
.sa
.sa_handler
, &oact
->sa_handler
) ||
4580 __put_user(old_ka
.sa
.sa_restorer
, &oact
->sa_restorer
) ||
4581 __put_user(old_ka
.sa
.sa_flags
, &oact
->sa_flags
) ||
4582 __put_user(old_ka
.sa
.sa_mask
.sig
[0], &oact
->sa_mask
))
4589 #ifdef CONFIG_COMPAT_OLD_SIGACTION
4590 COMPAT_SYSCALL_DEFINE3(sigaction
, int, sig
,
4591 const struct compat_old_sigaction __user
*, act
,
4592 struct compat_old_sigaction __user
*, oact
)
4594 struct k_sigaction new_ka
, old_ka
;
4596 compat_old_sigset_t mask
;
4597 compat_uptr_t handler
, restorer
;
4600 if (!access_ok(act
, sizeof(*act
)) ||
4601 __get_user(handler
, &act
->sa_handler
) ||
4602 __get_user(restorer
, &act
->sa_restorer
) ||
4603 __get_user(new_ka
.sa
.sa_flags
, &act
->sa_flags
) ||
4604 __get_user(mask
, &act
->sa_mask
))
4607 #ifdef __ARCH_HAS_KA_RESTORER
4608 new_ka
.ka_restorer
= NULL
;
4610 new_ka
.sa
.sa_handler
= compat_ptr(handler
);
4611 new_ka
.sa
.sa_restorer
= compat_ptr(restorer
);
4612 siginitset(&new_ka
.sa
.sa_mask
, mask
);
4615 ret
= do_sigaction(sig
, act
? &new_ka
: NULL
, oact
? &old_ka
: NULL
);
4618 if (!access_ok(oact
, sizeof(*oact
)) ||
4619 __put_user(ptr_to_compat(old_ka
.sa
.sa_handler
),
4620 &oact
->sa_handler
) ||
4621 __put_user(ptr_to_compat(old_ka
.sa
.sa_restorer
),
4622 &oact
->sa_restorer
) ||
4623 __put_user(old_ka
.sa
.sa_flags
, &oact
->sa_flags
) ||
4624 __put_user(old_ka
.sa
.sa_mask
.sig
[0], &oact
->sa_mask
))
4631 #ifdef CONFIG_SGETMASK_SYSCALL
4634 * For backwards compatibility. Functionality superseded by sigprocmask.
4636 SYSCALL_DEFINE0(sgetmask
)
4639 return current
->blocked
.sig
[0];
4642 SYSCALL_DEFINE1(ssetmask
, int, newmask
)
4644 int old
= current
->blocked
.sig
[0];
4647 siginitset(&newset
, newmask
);
4648 set_current_blocked(&newset
);
4652 #endif /* CONFIG_SGETMASK_SYSCALL */
4654 #ifdef __ARCH_WANT_SYS_SIGNAL
4656 * For backwards compatibility. Functionality superseded by sigaction.
4658 SYSCALL_DEFINE2(signal
, int, sig
, __sighandler_t
, handler
)
4660 struct k_sigaction new_sa
, old_sa
;
4663 new_sa
.sa
.sa_handler
= handler
;
4664 new_sa
.sa
.sa_flags
= SA_ONESHOT
| SA_NOMASK
;
4665 sigemptyset(&new_sa
.sa
.sa_mask
);
4667 ret
= do_sigaction(sig
, &new_sa
, &old_sa
);
4669 return ret
? ret
: (unsigned long)old_sa
.sa
.sa_handler
;
4671 #endif /* __ARCH_WANT_SYS_SIGNAL */
4673 #ifdef __ARCH_WANT_SYS_PAUSE
4675 SYSCALL_DEFINE0(pause
)
4677 while (!signal_pending(current
)) {
4678 __set_current_state(TASK_INTERRUPTIBLE
);
4681 return -ERESTARTNOHAND
;
4686 static int sigsuspend(sigset_t
*set
)
4688 current
->saved_sigmask
= current
->blocked
;
4689 set_current_blocked(set
);
4691 while (!signal_pending(current
)) {
4692 __set_current_state(TASK_INTERRUPTIBLE
);
4695 set_restore_sigmask();
4696 return -ERESTARTNOHAND
;
4700 * sys_rt_sigsuspend - replace the signal mask for a value with the
4701 * @unewset value until a signal is received
4702 * @unewset: new signal mask value
4703 * @sigsetsize: size of sigset_t type
4705 SYSCALL_DEFINE2(rt_sigsuspend
, sigset_t __user
*, unewset
, size_t, sigsetsize
)
4709 /* XXX: Don't preclude handling different sized sigset_t's. */
4710 if (sigsetsize
!= sizeof(sigset_t
))
4713 if (copy_from_user(&newset
, unewset
, sizeof(newset
)))
4715 return sigsuspend(&newset
);
4718 #ifdef CONFIG_COMPAT
4719 COMPAT_SYSCALL_DEFINE2(rt_sigsuspend
, compat_sigset_t __user
*, unewset
, compat_size_t
, sigsetsize
)
4723 /* XXX: Don't preclude handling different sized sigset_t's. */
4724 if (sigsetsize
!= sizeof(sigset_t
))
4727 if (get_compat_sigset(&newset
, unewset
))
4729 return sigsuspend(&newset
);
4733 #ifdef CONFIG_OLD_SIGSUSPEND
4734 SYSCALL_DEFINE1(sigsuspend
, old_sigset_t
, mask
)
4737 siginitset(&blocked
, mask
);
4738 return sigsuspend(&blocked
);
4741 #ifdef CONFIG_OLD_SIGSUSPEND3
4742 SYSCALL_DEFINE3(sigsuspend
, int, unused1
, int, unused2
, old_sigset_t
, mask
)
4745 siginitset(&blocked
, mask
);
4746 return sigsuspend(&blocked
);
4750 __weak
const char *arch_vma_name(struct vm_area_struct
*vma
)
4755 static inline void siginfo_buildtime_checks(void)
4757 BUILD_BUG_ON(sizeof(struct siginfo
) != SI_MAX_SIZE
);
4759 /* Verify the offsets in the two siginfos match */
4760 #define CHECK_OFFSET(field) \
4761 BUILD_BUG_ON(offsetof(siginfo_t, field) != offsetof(kernel_siginfo_t, field))
4764 CHECK_OFFSET(si_pid
);
4765 CHECK_OFFSET(si_uid
);
4768 CHECK_OFFSET(si_tid
);
4769 CHECK_OFFSET(si_overrun
);
4770 CHECK_OFFSET(si_value
);
4773 CHECK_OFFSET(si_pid
);
4774 CHECK_OFFSET(si_uid
);
4775 CHECK_OFFSET(si_value
);
4778 CHECK_OFFSET(si_pid
);
4779 CHECK_OFFSET(si_uid
);
4780 CHECK_OFFSET(si_status
);
4781 CHECK_OFFSET(si_utime
);
4782 CHECK_OFFSET(si_stime
);
4785 CHECK_OFFSET(si_addr
);
4786 CHECK_OFFSET(si_trapno
);
4787 CHECK_OFFSET(si_addr_lsb
);
4788 CHECK_OFFSET(si_lower
);
4789 CHECK_OFFSET(si_upper
);
4790 CHECK_OFFSET(si_pkey
);
4791 CHECK_OFFSET(si_perf_data
);
4792 CHECK_OFFSET(si_perf_type
);
4793 CHECK_OFFSET(si_perf_flags
);
4796 CHECK_OFFSET(si_band
);
4797 CHECK_OFFSET(si_fd
);
4800 CHECK_OFFSET(si_call_addr
);
4801 CHECK_OFFSET(si_syscall
);
4802 CHECK_OFFSET(si_arch
);
4806 BUILD_BUG_ON(offsetof(struct siginfo
, si_pid
) !=
4807 offsetof(struct siginfo
, si_addr
));
4808 if (sizeof(int) == sizeof(void __user
*)) {
4809 BUILD_BUG_ON(sizeof_field(struct siginfo
, si_pid
) !=
4810 sizeof(void __user
*));
4812 BUILD_BUG_ON((sizeof_field(struct siginfo
, si_pid
) +
4813 sizeof_field(struct siginfo
, si_uid
)) !=
4814 sizeof(void __user
*));
4815 BUILD_BUG_ON(offsetofend(struct siginfo
, si_pid
) !=
4816 offsetof(struct siginfo
, si_uid
));
4818 #ifdef CONFIG_COMPAT
4819 BUILD_BUG_ON(offsetof(struct compat_siginfo
, si_pid
) !=
4820 offsetof(struct compat_siginfo
, si_addr
));
4821 BUILD_BUG_ON(sizeof_field(struct compat_siginfo
, si_pid
) !=
4822 sizeof(compat_uptr_t
));
4823 BUILD_BUG_ON(sizeof_field(struct compat_siginfo
, si_pid
) !=
4824 sizeof_field(struct siginfo
, si_pid
));
4828 #if defined(CONFIG_SYSCTL)
4829 static struct ctl_table signal_debug_table
[] = {
4830 #ifdef CONFIG_SYSCTL_EXCEPTION_TRACE
4832 .procname
= "exception-trace",
4833 .data
= &show_unhandled_signals
,
4834 .maxlen
= sizeof(int),
4836 .proc_handler
= proc_dointvec
4842 static int __init
init_signal_sysctls(void)
4844 register_sysctl_init("debug", signal_debug_table
);
4847 early_initcall(init_signal_sysctls
);
4848 #endif /* CONFIG_SYSCTL */
4850 void __init
signals_init(void)
4852 siginfo_buildtime_checks();
4854 sigqueue_cachep
= KMEM_CACHE(sigqueue
, SLAB_PANIC
| SLAB_ACCOUNT
);
4857 #ifdef CONFIG_KGDB_KDB
4858 #include <linux/kdb.h>
4860 * kdb_send_sig - Allows kdb to send signals without exposing
4861 * signal internals. This function checks if the required locks are
4862 * available before calling the main signal code, to avoid kdb
4865 void kdb_send_sig(struct task_struct
*t
, int sig
)
4867 static struct task_struct
*kdb_prev_t
;
4869 if (!spin_trylock(&t
->sighand
->siglock
)) {
4870 kdb_printf("Can't do kill command now.\n"
4871 "The sigmask lock is held somewhere else in "
4872 "kernel, try again later\n");
4875 new_t
= kdb_prev_t
!= t
;
4877 if (!task_is_running(t
) && new_t
) {
4878 spin_unlock(&t
->sighand
->siglock
);
4879 kdb_printf("Process is not RUNNING, sending a signal from "
4880 "kdb risks deadlock\n"
4881 "on the run queue locks. "
4882 "The signal has _not_ been sent.\n"
4883 "Reissue the kill command if you want to risk "
4887 ret
= send_signal_locked(sig
, SEND_SIG_PRIV
, t
, PIDTYPE_PID
);
4888 spin_unlock(&t
->sighand
->siglock
);
4890 kdb_printf("Fail to deliver Signal %d to process %d.\n",
4893 kdb_printf("Signal %d is sent to process %d.\n", sig
, t
->pid
);
4895 #endif /* CONFIG_KGDB_KDB */