]> git.ipfire.org Git - thirdparty/kernel/stable.git/blob - kernel/signal.c
x86/speculation: Provide IBPB always command line options
[thirdparty/kernel/stable.git] / kernel / signal.c
1 /*
2 * linux/kernel/signal.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 *
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
7 *
8 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
9 * Changes to use preallocated sigqueue structures
10 * to allow signals to be sent reliably.
11 */
12
13 #include <linux/slab.h>
14 #include <linux/export.h>
15 #include <linux/init.h>
16 #include <linux/sched.h>
17 #include <linux/fs.h>
18 #include <linux/tty.h>
19 #include <linux/binfmts.h>
20 #include <linux/coredump.h>
21 #include <linux/security.h>
22 #include <linux/syscalls.h>
23 #include <linux/ptrace.h>
24 #include <linux/signal.h>
25 #include <linux/signalfd.h>
26 #include <linux/ratelimit.h>
27 #include <linux/tracehook.h>
28 #include <linux/capability.h>
29 #include <linux/freezer.h>
30 #include <linux/pid_namespace.h>
31 #include <linux/nsproxy.h>
32 #include <linux/user_namespace.h>
33 #include <linux/uprobes.h>
34 #include <linux/compat.h>
35 #include <linux/cn_proc.h>
36 #include <linux/compiler.h>
37
38 #define CREATE_TRACE_POINTS
39 #include <trace/events/signal.h>
40
41 #include <asm/param.h>
42 #include <asm/uaccess.h>
43 #include <asm/unistd.h>
44 #include <asm/siginfo.h>
45 #include <asm/cacheflush.h>
46 #include "audit.h" /* audit_signal_info() */
47
48 /*
49 * SLAB caches for signal bits.
50 */
51
52 static struct kmem_cache *sigqueue_cachep;
53
54 int print_fatal_signals __read_mostly;
55
56 static void __user *sig_handler(struct task_struct *t, int sig)
57 {
58 return t->sighand->action[sig - 1].sa.sa_handler;
59 }
60
61 static int sig_handler_ignored(void __user *handler, int sig)
62 {
63 /* Is it explicitly or implicitly ignored? */
64 return handler == SIG_IGN ||
65 (handler == SIG_DFL && sig_kernel_ignore(sig));
66 }
67
68 static int sig_task_ignored(struct task_struct *t, int sig, bool force)
69 {
70 void __user *handler;
71
72 handler = sig_handler(t, sig);
73
74 if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
75 handler == SIG_DFL && !force)
76 return 1;
77
78 return sig_handler_ignored(handler, sig);
79 }
80
81 static int sig_ignored(struct task_struct *t, int sig, bool force)
82 {
83 /*
84 * Blocked signals are never ignored, since the
85 * signal handler may change by the time it is
86 * unblocked.
87 */
88 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
89 return 0;
90
91 if (!sig_task_ignored(t, sig, force))
92 return 0;
93
94 /*
95 * Tracers may want to know about even ignored signals.
96 */
97 return !t->ptrace;
98 }
99
100 /*
101 * Re-calculate pending state from the set of locally pending
102 * signals, globally pending signals, and blocked signals.
103 */
104 static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
105 {
106 unsigned long ready;
107 long i;
108
109 switch (_NSIG_WORDS) {
110 default:
111 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
112 ready |= signal->sig[i] &~ blocked->sig[i];
113 break;
114
115 case 4: ready = signal->sig[3] &~ blocked->sig[3];
116 ready |= signal->sig[2] &~ blocked->sig[2];
117 ready |= signal->sig[1] &~ blocked->sig[1];
118 ready |= signal->sig[0] &~ blocked->sig[0];
119 break;
120
121 case 2: ready = signal->sig[1] &~ blocked->sig[1];
122 ready |= signal->sig[0] &~ blocked->sig[0];
123 break;
124
125 case 1: ready = signal->sig[0] &~ blocked->sig[0];
126 }
127 return ready != 0;
128 }
129
130 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
131
132 static int recalc_sigpending_tsk(struct task_struct *t)
133 {
134 if ((t->jobctl & JOBCTL_PENDING_MASK) ||
135 PENDING(&t->pending, &t->blocked) ||
136 PENDING(&t->signal->shared_pending, &t->blocked)) {
137 set_tsk_thread_flag(t, TIF_SIGPENDING);
138 return 1;
139 }
140 /*
141 * We must never clear the flag in another thread, or in current
142 * when it's possible the current syscall is returning -ERESTART*.
143 * So we don't clear it here, and only callers who know they should do.
144 */
145 return 0;
146 }
147
148 /*
149 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
150 * This is superfluous when called on current, the wakeup is a harmless no-op.
151 */
152 void recalc_sigpending_and_wake(struct task_struct *t)
153 {
154 if (recalc_sigpending_tsk(t))
155 signal_wake_up(t, 0);
156 }
157
158 void recalc_sigpending(void)
159 {
160 if (!recalc_sigpending_tsk(current) && !freezing(current))
161 clear_thread_flag(TIF_SIGPENDING);
162
163 }
164
165 /* Given the mask, find the first available signal that should be serviced. */
166
167 #define SYNCHRONOUS_MASK \
168 (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
169 sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
170
171 int next_signal(struct sigpending *pending, sigset_t *mask)
172 {
173 unsigned long i, *s, *m, x;
174 int sig = 0;
175
176 s = pending->signal.sig;
177 m = mask->sig;
178
179 /*
180 * Handle the first word specially: it contains the
181 * synchronous signals that need to be dequeued first.
182 */
183 x = *s &~ *m;
184 if (x) {
185 if (x & SYNCHRONOUS_MASK)
186 x &= SYNCHRONOUS_MASK;
187 sig = ffz(~x) + 1;
188 return sig;
189 }
190
191 switch (_NSIG_WORDS) {
192 default:
193 for (i = 1; i < _NSIG_WORDS; ++i) {
194 x = *++s &~ *++m;
195 if (!x)
196 continue;
197 sig = ffz(~x) + i*_NSIG_BPW + 1;
198 break;
199 }
200 break;
201
202 case 2:
203 x = s[1] &~ m[1];
204 if (!x)
205 break;
206 sig = ffz(~x) + _NSIG_BPW + 1;
207 break;
208
209 case 1:
210 /* Nothing to do */
211 break;
212 }
213
214 return sig;
215 }
216
217 static inline void print_dropped_signal(int sig)
218 {
219 static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
220
221 if (!print_fatal_signals)
222 return;
223
224 if (!__ratelimit(&ratelimit_state))
225 return;
226
227 printk(KERN_INFO "%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
228 current->comm, current->pid, sig);
229 }
230
231 /**
232 * task_set_jobctl_pending - set jobctl pending bits
233 * @task: target task
234 * @mask: pending bits to set
235 *
236 * Clear @mask from @task->jobctl. @mask must be subset of
237 * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK |
238 * %JOBCTL_TRAPPING. If stop signo is being set, the existing signo is
239 * cleared. If @task is already being killed or exiting, this function
240 * becomes noop.
241 *
242 * CONTEXT:
243 * Must be called with @task->sighand->siglock held.
244 *
245 * RETURNS:
246 * %true if @mask is set, %false if made noop because @task was dying.
247 */
248 bool task_set_jobctl_pending(struct task_struct *task, unsigned int mask)
249 {
250 BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
251 JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
252 BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
253
254 if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
255 return false;
256
257 if (mask & JOBCTL_STOP_SIGMASK)
258 task->jobctl &= ~JOBCTL_STOP_SIGMASK;
259
260 task->jobctl |= mask;
261 return true;
262 }
263
264 /**
265 * task_clear_jobctl_trapping - clear jobctl trapping bit
266 * @task: target task
267 *
268 * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
269 * Clear it and wake up the ptracer. Note that we don't need any further
270 * locking. @task->siglock guarantees that @task->parent points to the
271 * ptracer.
272 *
273 * CONTEXT:
274 * Must be called with @task->sighand->siglock held.
275 */
276 void task_clear_jobctl_trapping(struct task_struct *task)
277 {
278 if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
279 task->jobctl &= ~JOBCTL_TRAPPING;
280 smp_mb(); /* advised by wake_up_bit() */
281 wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
282 }
283 }
284
285 /**
286 * task_clear_jobctl_pending - clear jobctl pending bits
287 * @task: target task
288 * @mask: pending bits to clear
289 *
290 * Clear @mask from @task->jobctl. @mask must be subset of
291 * %JOBCTL_PENDING_MASK. If %JOBCTL_STOP_PENDING is being cleared, other
292 * STOP bits are cleared together.
293 *
294 * If clearing of @mask leaves no stop or trap pending, this function calls
295 * task_clear_jobctl_trapping().
296 *
297 * CONTEXT:
298 * Must be called with @task->sighand->siglock held.
299 */
300 void task_clear_jobctl_pending(struct task_struct *task, unsigned int mask)
301 {
302 BUG_ON(mask & ~JOBCTL_PENDING_MASK);
303
304 if (mask & JOBCTL_STOP_PENDING)
305 mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
306
307 task->jobctl &= ~mask;
308
309 if (!(task->jobctl & JOBCTL_PENDING_MASK))
310 task_clear_jobctl_trapping(task);
311 }
312
313 /**
314 * task_participate_group_stop - participate in a group stop
315 * @task: task participating in a group stop
316 *
317 * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
318 * Group stop states are cleared and the group stop count is consumed if
319 * %JOBCTL_STOP_CONSUME was set. If the consumption completes the group
320 * stop, the appropriate %SIGNAL_* flags are set.
321 *
322 * CONTEXT:
323 * Must be called with @task->sighand->siglock held.
324 *
325 * RETURNS:
326 * %true if group stop completion should be notified to the parent, %false
327 * otherwise.
328 */
329 static bool task_participate_group_stop(struct task_struct *task)
330 {
331 struct signal_struct *sig = task->signal;
332 bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
333
334 WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
335
336 task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
337
338 if (!consume)
339 return false;
340
341 if (!WARN_ON_ONCE(sig->group_stop_count == 0))
342 sig->group_stop_count--;
343
344 /*
345 * Tell the caller to notify completion iff we are entering into a
346 * fresh group stop. Read comment in do_signal_stop() for details.
347 */
348 if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
349 sig->flags = SIGNAL_STOP_STOPPED;
350 return true;
351 }
352 return false;
353 }
354
355 /*
356 * allocate a new signal queue record
357 * - this may be called without locks if and only if t == current, otherwise an
358 * appropriate lock must be held to stop the target task from exiting
359 */
360 static struct sigqueue *
361 __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
362 {
363 struct sigqueue *q = NULL;
364 struct user_struct *user;
365
366 /*
367 * Protect access to @t credentials. This can go away when all
368 * callers hold rcu read lock.
369 */
370 rcu_read_lock();
371 user = get_uid(__task_cred(t)->user);
372 atomic_inc(&user->sigpending);
373 rcu_read_unlock();
374
375 if (override_rlimit ||
376 atomic_read(&user->sigpending) <=
377 task_rlimit(t, RLIMIT_SIGPENDING)) {
378 q = kmem_cache_alloc(sigqueue_cachep, flags);
379 } else {
380 print_dropped_signal(sig);
381 }
382
383 if (unlikely(q == NULL)) {
384 atomic_dec(&user->sigpending);
385 free_uid(user);
386 } else {
387 INIT_LIST_HEAD(&q->list);
388 q->flags = 0;
389 q->user = user;
390 }
391
392 return q;
393 }
394
395 static void __sigqueue_free(struct sigqueue *q)
396 {
397 if (q->flags & SIGQUEUE_PREALLOC)
398 return;
399 atomic_dec(&q->user->sigpending);
400 free_uid(q->user);
401 kmem_cache_free(sigqueue_cachep, q);
402 }
403
404 void flush_sigqueue(struct sigpending *queue)
405 {
406 struct sigqueue *q;
407
408 sigemptyset(&queue->signal);
409 while (!list_empty(&queue->list)) {
410 q = list_entry(queue->list.next, struct sigqueue , list);
411 list_del_init(&q->list);
412 __sigqueue_free(q);
413 }
414 }
415
416 /*
417 * Flush all pending signals for a task.
418 */
419 void __flush_signals(struct task_struct *t)
420 {
421 clear_tsk_thread_flag(t, TIF_SIGPENDING);
422 flush_sigqueue(&t->pending);
423 flush_sigqueue(&t->signal->shared_pending);
424 }
425
426 void flush_signals(struct task_struct *t)
427 {
428 unsigned long flags;
429
430 spin_lock_irqsave(&t->sighand->siglock, flags);
431 __flush_signals(t);
432 spin_unlock_irqrestore(&t->sighand->siglock, flags);
433 }
434
435 static void __flush_itimer_signals(struct sigpending *pending)
436 {
437 sigset_t signal, retain;
438 struct sigqueue *q, *n;
439
440 signal = pending->signal;
441 sigemptyset(&retain);
442
443 list_for_each_entry_safe(q, n, &pending->list, list) {
444 int sig = q->info.si_signo;
445
446 if (likely(q->info.si_code != SI_TIMER)) {
447 sigaddset(&retain, sig);
448 } else {
449 sigdelset(&signal, sig);
450 list_del_init(&q->list);
451 __sigqueue_free(q);
452 }
453 }
454
455 sigorsets(&pending->signal, &signal, &retain);
456 }
457
458 void flush_itimer_signals(void)
459 {
460 struct task_struct *tsk = current;
461 unsigned long flags;
462
463 spin_lock_irqsave(&tsk->sighand->siglock, flags);
464 __flush_itimer_signals(&tsk->pending);
465 __flush_itimer_signals(&tsk->signal->shared_pending);
466 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
467 }
468
469 void ignore_signals(struct task_struct *t)
470 {
471 int i;
472
473 for (i = 0; i < _NSIG; ++i)
474 t->sighand->action[i].sa.sa_handler = SIG_IGN;
475
476 flush_signals(t);
477 }
478
479 /*
480 * Flush all handlers for a task.
481 */
482
483 void
484 flush_signal_handlers(struct task_struct *t, int force_default)
485 {
486 int i;
487 struct k_sigaction *ka = &t->sighand->action[0];
488 for (i = _NSIG ; i != 0 ; i--) {
489 if (force_default || ka->sa.sa_handler != SIG_IGN)
490 ka->sa.sa_handler = SIG_DFL;
491 ka->sa.sa_flags = 0;
492 #ifdef __ARCH_HAS_SA_RESTORER
493 ka->sa.sa_restorer = NULL;
494 #endif
495 sigemptyset(&ka->sa.sa_mask);
496 ka++;
497 }
498 }
499
500 int unhandled_signal(struct task_struct *tsk, int sig)
501 {
502 void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
503 if (is_global_init(tsk))
504 return 1;
505 if (handler != SIG_IGN && handler != SIG_DFL)
506 return 0;
507 /* if ptraced, let the tracer determine */
508 return !tsk->ptrace;
509 }
510
511 /*
512 * Notify the system that a driver wants to block all signals for this
513 * process, and wants to be notified if any signals at all were to be
514 * sent/acted upon. If the notifier routine returns non-zero, then the
515 * signal will be acted upon after all. If the notifier routine returns 0,
516 * then then signal will be blocked. Only one block per process is
517 * allowed. priv is a pointer to private data that the notifier routine
518 * can use to determine if the signal should be blocked or not.
519 */
520 void
521 block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
522 {
523 unsigned long flags;
524
525 spin_lock_irqsave(&current->sighand->siglock, flags);
526 current->notifier_mask = mask;
527 current->notifier_data = priv;
528 current->notifier = notifier;
529 spin_unlock_irqrestore(&current->sighand->siglock, flags);
530 }
531
532 /* Notify the system that blocking has ended. */
533
534 void
535 unblock_all_signals(void)
536 {
537 unsigned long flags;
538
539 spin_lock_irqsave(&current->sighand->siglock, flags);
540 current->notifier = NULL;
541 current->notifier_data = NULL;
542 recalc_sigpending();
543 spin_unlock_irqrestore(&current->sighand->siglock, flags);
544 }
545
546 static void collect_signal(int sig, struct sigpending *list, siginfo_t *info,
547 bool *resched_timer)
548 {
549 struct sigqueue *q, *first = NULL;
550
551 /*
552 * Collect the siginfo appropriate to this signal. Check if
553 * there is another siginfo for the same signal.
554 */
555 list_for_each_entry(q, &list->list, list) {
556 if (q->info.si_signo == sig) {
557 if (first)
558 goto still_pending;
559 first = q;
560 }
561 }
562
563 sigdelset(&list->signal, sig);
564
565 if (first) {
566 still_pending:
567 list_del_init(&first->list);
568 copy_siginfo(info, &first->info);
569
570 *resched_timer =
571 (first->flags & SIGQUEUE_PREALLOC) &&
572 (info->si_code == SI_TIMER) &&
573 (info->si_sys_private);
574
575 __sigqueue_free(first);
576 } else {
577 /*
578 * Ok, it wasn't in the queue. This must be
579 * a fast-pathed signal or we must have been
580 * out of queue space. So zero out the info.
581 */
582 info->si_signo = sig;
583 info->si_errno = 0;
584 info->si_code = SI_USER;
585 info->si_pid = 0;
586 info->si_uid = 0;
587 }
588 }
589
590 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
591 siginfo_t *info, bool *resched_timer)
592 {
593 int sig = next_signal(pending, mask);
594
595 if (sig) {
596 if (current->notifier) {
597 if (sigismember(current->notifier_mask, sig)) {
598 if (!(current->notifier)(current->notifier_data)) {
599 clear_thread_flag(TIF_SIGPENDING);
600 return 0;
601 }
602 }
603 }
604
605 collect_signal(sig, pending, info, resched_timer);
606 }
607
608 return sig;
609 }
610
611 /*
612 * Dequeue a signal and return the element to the caller, which is
613 * expected to free it.
614 *
615 * All callers have to hold the siglock.
616 */
617 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
618 {
619 bool resched_timer = false;
620 int signr;
621
622 /* We only dequeue private signals from ourselves, we don't let
623 * signalfd steal them
624 */
625 signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer);
626 if (!signr) {
627 signr = __dequeue_signal(&tsk->signal->shared_pending,
628 mask, info, &resched_timer);
629 /*
630 * itimer signal ?
631 *
632 * itimers are process shared and we restart periodic
633 * itimers in the signal delivery path to prevent DoS
634 * attacks in the high resolution timer case. This is
635 * compliant with the old way of self-restarting
636 * itimers, as the SIGALRM is a legacy signal and only
637 * queued once. Changing the restart behaviour to
638 * restart the timer in the signal dequeue path is
639 * reducing the timer noise on heavy loaded !highres
640 * systems too.
641 */
642 if (unlikely(signr == SIGALRM)) {
643 struct hrtimer *tmr = &tsk->signal->real_timer;
644
645 if (!hrtimer_is_queued(tmr) &&
646 tsk->signal->it_real_incr.tv64 != 0) {
647 hrtimer_forward(tmr, tmr->base->get_time(),
648 tsk->signal->it_real_incr);
649 hrtimer_restart(tmr);
650 }
651 }
652 }
653
654 recalc_sigpending();
655 if (!signr)
656 return 0;
657
658 if (unlikely(sig_kernel_stop(signr))) {
659 /*
660 * Set a marker that we have dequeued a stop signal. Our
661 * caller might release the siglock and then the pending
662 * stop signal it is about to process is no longer in the
663 * pending bitmasks, but must still be cleared by a SIGCONT
664 * (and overruled by a SIGKILL). So those cases clear this
665 * shared flag after we've set it. Note that this flag may
666 * remain set after the signal we return is ignored or
667 * handled. That doesn't matter because its only purpose
668 * is to alert stop-signal processing code when another
669 * processor has come along and cleared the flag.
670 */
671 current->jobctl |= JOBCTL_STOP_DEQUEUED;
672 }
673 if (resched_timer) {
674 /*
675 * Release the siglock to ensure proper locking order
676 * of timer locks outside of siglocks. Note, we leave
677 * irqs disabled here, since the posix-timers code is
678 * about to disable them again anyway.
679 */
680 spin_unlock(&tsk->sighand->siglock);
681 do_schedule_next_timer(info);
682 spin_lock(&tsk->sighand->siglock);
683 }
684 return signr;
685 }
686
687 static int dequeue_synchronous_signal(siginfo_t *info)
688 {
689 struct task_struct *tsk = current;
690 struct sigpending *pending = &tsk->pending;
691 struct sigqueue *q, *sync = NULL;
692
693 /*
694 * Might a synchronous signal be in the queue?
695 */
696 if (!((pending->signal.sig[0] & ~tsk->blocked.sig[0]) & SYNCHRONOUS_MASK))
697 return 0;
698
699 /*
700 * Return the first synchronous signal in the queue.
701 */
702 list_for_each_entry(q, &pending->list, list) {
703 /* Synchronous signals have a postive si_code */
704 if ((q->info.si_code > SI_USER) &&
705 (sigmask(q->info.si_signo) & SYNCHRONOUS_MASK)) {
706 sync = q;
707 goto next;
708 }
709 }
710 return 0;
711 next:
712 /*
713 * Check if there is another siginfo for the same signal.
714 */
715 list_for_each_entry_continue(q, &pending->list, list) {
716 if (q->info.si_signo == sync->info.si_signo)
717 goto still_pending;
718 }
719
720 sigdelset(&pending->signal, sync->info.si_signo);
721 recalc_sigpending();
722 still_pending:
723 list_del_init(&sync->list);
724 copy_siginfo(info, &sync->info);
725 __sigqueue_free(sync);
726 return info->si_signo;
727 }
728
729 /*
730 * Tell a process that it has a new active signal..
731 *
732 * NOTE! we rely on the previous spin_lock to
733 * lock interrupts for us! We can only be called with
734 * "siglock" held, and the local interrupt must
735 * have been disabled when that got acquired!
736 *
737 * No need to set need_resched since signal event passing
738 * goes through ->blocked
739 */
740 void signal_wake_up_state(struct task_struct *t, unsigned int state)
741 {
742 set_tsk_thread_flag(t, TIF_SIGPENDING);
743 /*
744 * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
745 * case. We don't check t->state here because there is a race with it
746 * executing another processor and just now entering stopped state.
747 * By using wake_up_state, we ensure the process will wake up and
748 * handle its death signal.
749 */
750 if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
751 kick_process(t);
752 }
753
754 /*
755 * Remove signals in mask from the pending set and queue.
756 * Returns 1 if any signals were found.
757 *
758 * All callers must be holding the siglock.
759 */
760 static int flush_sigqueue_mask(sigset_t *mask, struct sigpending *s)
761 {
762 struct sigqueue *q, *n;
763 sigset_t m;
764
765 sigandsets(&m, mask, &s->signal);
766 if (sigisemptyset(&m))
767 return 0;
768
769 sigandnsets(&s->signal, &s->signal, mask);
770 list_for_each_entry_safe(q, n, &s->list, list) {
771 if (sigismember(mask, q->info.si_signo)) {
772 list_del_init(&q->list);
773 __sigqueue_free(q);
774 }
775 }
776 return 1;
777 }
778
779 static inline int is_si_special(const struct siginfo *info)
780 {
781 return info <= SEND_SIG_FORCED;
782 }
783
784 static inline bool si_fromuser(const struct siginfo *info)
785 {
786 return info == SEND_SIG_NOINFO ||
787 (!is_si_special(info) && SI_FROMUSER(info));
788 }
789
790 /*
791 * called with RCU read lock from check_kill_permission()
792 */
793 static int kill_ok_by_cred(struct task_struct *t)
794 {
795 const struct cred *cred = current_cred();
796 const struct cred *tcred = __task_cred(t);
797
798 if (uid_eq(cred->euid, tcred->suid) ||
799 uid_eq(cred->euid, tcred->uid) ||
800 uid_eq(cred->uid, tcred->suid) ||
801 uid_eq(cred->uid, tcred->uid))
802 return 1;
803
804 if (ns_capable(tcred->user_ns, CAP_KILL))
805 return 1;
806
807 return 0;
808 }
809
810 /*
811 * Bad permissions for sending the signal
812 * - the caller must hold the RCU read lock
813 */
814 static int check_kill_permission(int sig, struct siginfo *info,
815 struct task_struct *t)
816 {
817 struct pid *sid;
818 int error;
819
820 if (!valid_signal(sig))
821 return -EINVAL;
822
823 if (!si_fromuser(info))
824 return 0;
825
826 error = audit_signal_info(sig, t); /* Let audit system see the signal */
827 if (error)
828 return error;
829
830 if (!same_thread_group(current, t) &&
831 !kill_ok_by_cred(t)) {
832 switch (sig) {
833 case SIGCONT:
834 sid = task_session(t);
835 /*
836 * We don't return the error if sid == NULL. The
837 * task was unhashed, the caller must notice this.
838 */
839 if (!sid || sid == task_session(current))
840 break;
841 default:
842 return -EPERM;
843 }
844 }
845
846 return security_task_kill(t, info, sig, 0);
847 }
848
849 /**
850 * ptrace_trap_notify - schedule trap to notify ptracer
851 * @t: tracee wanting to notify tracer
852 *
853 * This function schedules sticky ptrace trap which is cleared on the next
854 * TRAP_STOP to notify ptracer of an event. @t must have been seized by
855 * ptracer.
856 *
857 * If @t is running, STOP trap will be taken. If trapped for STOP and
858 * ptracer is listening for events, tracee is woken up so that it can
859 * re-trap for the new event. If trapped otherwise, STOP trap will be
860 * eventually taken without returning to userland after the existing traps
861 * are finished by PTRACE_CONT.
862 *
863 * CONTEXT:
864 * Must be called with @task->sighand->siglock held.
865 */
866 static void ptrace_trap_notify(struct task_struct *t)
867 {
868 WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
869 assert_spin_locked(&t->sighand->siglock);
870
871 task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
872 ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
873 }
874
875 /*
876 * Handle magic process-wide effects of stop/continue signals. Unlike
877 * the signal actions, these happen immediately at signal-generation
878 * time regardless of blocking, ignoring, or handling. This does the
879 * actual continuing for SIGCONT, but not the actual stopping for stop
880 * signals. The process stop is done as a signal action for SIG_DFL.
881 *
882 * Returns true if the signal should be actually delivered, otherwise
883 * it should be dropped.
884 */
885 static bool prepare_signal(int sig, struct task_struct *p, bool force)
886 {
887 struct signal_struct *signal = p->signal;
888 struct task_struct *t;
889 sigset_t flush;
890
891 if (signal->flags & (SIGNAL_GROUP_EXIT | SIGNAL_GROUP_COREDUMP)) {
892 if (signal->flags & SIGNAL_GROUP_COREDUMP)
893 return sig == SIGKILL;
894 /*
895 * The process is in the middle of dying, nothing to do.
896 */
897 } else if (sig_kernel_stop(sig)) {
898 /*
899 * This is a stop signal. Remove SIGCONT from all queues.
900 */
901 siginitset(&flush, sigmask(SIGCONT));
902 flush_sigqueue_mask(&flush, &signal->shared_pending);
903 for_each_thread(p, t)
904 flush_sigqueue_mask(&flush, &t->pending);
905 } else if (sig == SIGCONT) {
906 unsigned int why;
907 /*
908 * Remove all stop signals from all queues, wake all threads.
909 */
910 siginitset(&flush, SIG_KERNEL_STOP_MASK);
911 flush_sigqueue_mask(&flush, &signal->shared_pending);
912 for_each_thread(p, t) {
913 flush_sigqueue_mask(&flush, &t->pending);
914 task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
915 if (likely(!(t->ptrace & PT_SEIZED)))
916 wake_up_state(t, __TASK_STOPPED);
917 else
918 ptrace_trap_notify(t);
919 }
920
921 /*
922 * Notify the parent with CLD_CONTINUED if we were stopped.
923 *
924 * If we were in the middle of a group stop, we pretend it
925 * was already finished, and then continued. Since SIGCHLD
926 * doesn't queue we report only CLD_STOPPED, as if the next
927 * CLD_CONTINUED was dropped.
928 */
929 why = 0;
930 if (signal->flags & SIGNAL_STOP_STOPPED)
931 why |= SIGNAL_CLD_CONTINUED;
932 else if (signal->group_stop_count)
933 why |= SIGNAL_CLD_STOPPED;
934
935 if (why) {
936 /*
937 * The first thread which returns from do_signal_stop()
938 * will take ->siglock, notice SIGNAL_CLD_MASK, and
939 * notify its parent. See get_signal_to_deliver().
940 */
941 signal->flags = why | SIGNAL_STOP_CONTINUED;
942 signal->group_stop_count = 0;
943 signal->group_exit_code = 0;
944 }
945 }
946
947 return !sig_ignored(p, sig, force);
948 }
949
950 /*
951 * Test if P wants to take SIG. After we've checked all threads with this,
952 * it's equivalent to finding no threads not blocking SIG. Any threads not
953 * blocking SIG were ruled out because they are not running and already
954 * have pending signals. Such threads will dequeue from the shared queue
955 * as soon as they're available, so putting the signal on the shared queue
956 * will be equivalent to sending it to one such thread.
957 */
958 static inline int wants_signal(int sig, struct task_struct *p)
959 {
960 if (sigismember(&p->blocked, sig))
961 return 0;
962 if (p->flags & PF_EXITING)
963 return 0;
964 if (sig == SIGKILL)
965 return 1;
966 if (task_is_stopped_or_traced(p))
967 return 0;
968 return task_curr(p) || !signal_pending(p);
969 }
970
971 static void complete_signal(int sig, struct task_struct *p, int group)
972 {
973 struct signal_struct *signal = p->signal;
974 struct task_struct *t;
975
976 /*
977 * Now find a thread we can wake up to take the signal off the queue.
978 *
979 * If the main thread wants the signal, it gets first crack.
980 * Probably the least surprising to the average bear.
981 */
982 if (wants_signal(sig, p))
983 t = p;
984 else if (!group || thread_group_empty(p))
985 /*
986 * There is just one thread and it does not need to be woken.
987 * It will dequeue unblocked signals before it runs again.
988 */
989 return;
990 else {
991 /*
992 * Otherwise try to find a suitable thread.
993 */
994 t = signal->curr_target;
995 while (!wants_signal(sig, t)) {
996 t = next_thread(t);
997 if (t == signal->curr_target)
998 /*
999 * No thread needs to be woken.
1000 * Any eligible threads will see
1001 * the signal in the queue soon.
1002 */
1003 return;
1004 }
1005 signal->curr_target = t;
1006 }
1007
1008 /*
1009 * Found a killable thread. If the signal will be fatal,
1010 * then start taking the whole group down immediately.
1011 */
1012 if (sig_fatal(p, sig) &&
1013 !(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) &&
1014 !sigismember(&t->real_blocked, sig) &&
1015 (sig == SIGKILL || !t->ptrace)) {
1016 /*
1017 * This signal will be fatal to the whole group.
1018 */
1019 if (!sig_kernel_coredump(sig)) {
1020 /*
1021 * Start a group exit and wake everybody up.
1022 * This way we don't have other threads
1023 * running and doing things after a slower
1024 * thread has the fatal signal pending.
1025 */
1026 signal->flags = SIGNAL_GROUP_EXIT;
1027 signal->group_exit_code = sig;
1028 signal->group_stop_count = 0;
1029 t = p;
1030 do {
1031 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1032 sigaddset(&t->pending.signal, SIGKILL);
1033 signal_wake_up(t, 1);
1034 } while_each_thread(p, t);
1035 return;
1036 }
1037 }
1038
1039 /*
1040 * The signal is already in the shared-pending queue.
1041 * Tell the chosen thread to wake up and dequeue it.
1042 */
1043 signal_wake_up(t, sig == SIGKILL);
1044 return;
1045 }
1046
1047 static inline int legacy_queue(struct sigpending *signals, int sig)
1048 {
1049 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
1050 }
1051
1052 #ifdef CONFIG_USER_NS
1053 static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t)
1054 {
1055 if (current_user_ns() == task_cred_xxx(t, user_ns))
1056 return;
1057
1058 if (SI_FROMKERNEL(info))
1059 return;
1060
1061 rcu_read_lock();
1062 info->si_uid = from_kuid_munged(task_cred_xxx(t, user_ns),
1063 make_kuid(current_user_ns(), info->si_uid));
1064 rcu_read_unlock();
1065 }
1066 #else
1067 static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t)
1068 {
1069 return;
1070 }
1071 #endif
1072
1073 static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
1074 int group, int from_ancestor_ns)
1075 {
1076 struct sigpending *pending;
1077 struct sigqueue *q;
1078 int override_rlimit;
1079 int ret = 0, result;
1080
1081 assert_spin_locked(&t->sighand->siglock);
1082
1083 result = TRACE_SIGNAL_IGNORED;
1084 if (!prepare_signal(sig, t,
1085 from_ancestor_ns || (info == SEND_SIG_PRIV) || (info == SEND_SIG_FORCED)))
1086 goto ret;
1087
1088 pending = group ? &t->signal->shared_pending : &t->pending;
1089 /*
1090 * Short-circuit ignored signals and support queuing
1091 * exactly one non-rt signal, so that we can get more
1092 * detailed information about the cause of the signal.
1093 */
1094 result = TRACE_SIGNAL_ALREADY_PENDING;
1095 if (legacy_queue(pending, sig))
1096 goto ret;
1097
1098 result = TRACE_SIGNAL_DELIVERED;
1099 /*
1100 * fast-pathed signals for kernel-internal things like SIGSTOP
1101 * or SIGKILL.
1102 */
1103 if (info == SEND_SIG_FORCED)
1104 goto out_set;
1105
1106 /*
1107 * Real-time signals must be queued if sent by sigqueue, or
1108 * some other real-time mechanism. It is implementation
1109 * defined whether kill() does so. We attempt to do so, on
1110 * the principle of least surprise, but since kill is not
1111 * allowed to fail with EAGAIN when low on memory we just
1112 * make sure at least one signal gets delivered and don't
1113 * pass on the info struct.
1114 */
1115 if (sig < SIGRTMIN)
1116 override_rlimit = (is_si_special(info) || info->si_code >= 0);
1117 else
1118 override_rlimit = 0;
1119
1120 q = __sigqueue_alloc(sig, t, GFP_ATOMIC | __GFP_NOTRACK_FALSE_POSITIVE,
1121 override_rlimit);
1122 if (q) {
1123 list_add_tail(&q->list, &pending->list);
1124 switch ((unsigned long) info) {
1125 case (unsigned long) SEND_SIG_NOINFO:
1126 q->info.si_signo = sig;
1127 q->info.si_errno = 0;
1128 q->info.si_code = SI_USER;
1129 q->info.si_pid = task_tgid_nr_ns(current,
1130 task_active_pid_ns(t));
1131 q->info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
1132 break;
1133 case (unsigned long) SEND_SIG_PRIV:
1134 q->info.si_signo = sig;
1135 q->info.si_errno = 0;
1136 q->info.si_code = SI_KERNEL;
1137 q->info.si_pid = 0;
1138 q->info.si_uid = 0;
1139 break;
1140 default:
1141 copy_siginfo(&q->info, info);
1142 if (from_ancestor_ns)
1143 q->info.si_pid = 0;
1144 break;
1145 }
1146
1147 userns_fixup_signal_uid(&q->info, t);
1148
1149 } else if (!is_si_special(info)) {
1150 if (sig >= SIGRTMIN && info->si_code != SI_USER) {
1151 /*
1152 * Queue overflow, abort. We may abort if the
1153 * signal was rt and sent by user using something
1154 * other than kill().
1155 */
1156 result = TRACE_SIGNAL_OVERFLOW_FAIL;
1157 ret = -EAGAIN;
1158 goto ret;
1159 } else {
1160 /*
1161 * This is a silent loss of information. We still
1162 * send the signal, but the *info bits are lost.
1163 */
1164 result = TRACE_SIGNAL_LOSE_INFO;
1165 }
1166 }
1167
1168 out_set:
1169 signalfd_notify(t, sig);
1170 sigaddset(&pending->signal, sig);
1171 complete_signal(sig, t, group);
1172 ret:
1173 trace_signal_generate(sig, info, t, group, result);
1174 return ret;
1175 }
1176
1177 static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
1178 int group)
1179 {
1180 int from_ancestor_ns = 0;
1181
1182 #ifdef CONFIG_PID_NS
1183 from_ancestor_ns = si_fromuser(info) &&
1184 !task_pid_nr_ns(current, task_active_pid_ns(t));
1185 #endif
1186
1187 return __send_signal(sig, info, t, group, from_ancestor_ns);
1188 }
1189
1190 static void print_fatal_signal(int signr)
1191 {
1192 struct pt_regs *regs = signal_pt_regs();
1193 printk(KERN_INFO "potentially unexpected fatal signal %d.\n", signr);
1194
1195 #if defined(__i386__) && !defined(__arch_um__)
1196 printk(KERN_INFO "code at %08lx: ", regs->ip);
1197 {
1198 int i;
1199 for (i = 0; i < 16; i++) {
1200 unsigned char insn;
1201
1202 if (get_user(insn, (unsigned char *)(regs->ip + i)))
1203 break;
1204 printk(KERN_CONT "%02x ", insn);
1205 }
1206 }
1207 printk(KERN_CONT "\n");
1208 #endif
1209 preempt_disable();
1210 show_regs(regs);
1211 preempt_enable();
1212 }
1213
1214 static int __init setup_print_fatal_signals(char *str)
1215 {
1216 get_option (&str, &print_fatal_signals);
1217
1218 return 1;
1219 }
1220
1221 __setup("print-fatal-signals=", setup_print_fatal_signals);
1222
1223 int
1224 __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1225 {
1226 return send_signal(sig, info, p, 1);
1227 }
1228
1229 static int
1230 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1231 {
1232 return send_signal(sig, info, t, 0);
1233 }
1234
1235 int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p,
1236 bool group)
1237 {
1238 unsigned long flags;
1239 int ret = -ESRCH;
1240
1241 if (lock_task_sighand(p, &flags)) {
1242 ret = send_signal(sig, info, p, group);
1243 unlock_task_sighand(p, &flags);
1244 }
1245
1246 return ret;
1247 }
1248
1249 /*
1250 * Force a signal that the process can't ignore: if necessary
1251 * we unblock the signal and change any SIG_IGN to SIG_DFL.
1252 *
1253 * Note: If we unblock the signal, we always reset it to SIG_DFL,
1254 * since we do not want to have a signal handler that was blocked
1255 * be invoked when user space had explicitly blocked it.
1256 *
1257 * We don't want to have recursive SIGSEGV's etc, for example,
1258 * that is why we also clear SIGNAL_UNKILLABLE.
1259 */
1260 int
1261 force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1262 {
1263 unsigned long int flags;
1264 int ret, blocked, ignored;
1265 struct k_sigaction *action;
1266
1267 spin_lock_irqsave(&t->sighand->siglock, flags);
1268 action = &t->sighand->action[sig-1];
1269 ignored = action->sa.sa_handler == SIG_IGN;
1270 blocked = sigismember(&t->blocked, sig);
1271 if (blocked || ignored) {
1272 action->sa.sa_handler = SIG_DFL;
1273 if (blocked) {
1274 sigdelset(&t->blocked, sig);
1275 recalc_sigpending_and_wake(t);
1276 }
1277 }
1278 if (action->sa.sa_handler == SIG_DFL)
1279 t->signal->flags &= ~SIGNAL_UNKILLABLE;
1280 ret = specific_send_sig_info(sig, info, t);
1281 spin_unlock_irqrestore(&t->sighand->siglock, flags);
1282
1283 return ret;
1284 }
1285
1286 /*
1287 * Nuke all other threads in the group.
1288 */
1289 int zap_other_threads(struct task_struct *p)
1290 {
1291 struct task_struct *t = p;
1292 int count = 0;
1293
1294 p->signal->group_stop_count = 0;
1295
1296 while_each_thread(p, t) {
1297 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1298 count++;
1299
1300 /* Don't bother with already dead threads */
1301 if (t->exit_state)
1302 continue;
1303 sigaddset(&t->pending.signal, SIGKILL);
1304 signal_wake_up(t, 1);
1305 }
1306
1307 return count;
1308 }
1309
1310 struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1311 unsigned long *flags)
1312 {
1313 struct sighand_struct *sighand;
1314
1315 for (;;) {
1316 local_irq_save(*flags);
1317 rcu_read_lock();
1318 sighand = rcu_dereference(tsk->sighand);
1319 if (unlikely(sighand == NULL)) {
1320 rcu_read_unlock();
1321 local_irq_restore(*flags);
1322 break;
1323 }
1324
1325 spin_lock(&sighand->siglock);
1326 if (likely(sighand == tsk->sighand)) {
1327 rcu_read_unlock();
1328 break;
1329 }
1330 spin_unlock(&sighand->siglock);
1331 rcu_read_unlock();
1332 local_irq_restore(*flags);
1333 }
1334
1335 return sighand;
1336 }
1337
1338 /*
1339 * send signal info to all the members of a group
1340 */
1341 int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1342 {
1343 int ret;
1344
1345 rcu_read_lock();
1346 ret = check_kill_permission(sig, info, p);
1347 rcu_read_unlock();
1348
1349 if (!ret && sig)
1350 ret = do_send_sig_info(sig, info, p, true);
1351
1352 return ret;
1353 }
1354
1355 /*
1356 * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1357 * control characters do (^C, ^Z etc)
1358 * - the caller must hold at least a readlock on tasklist_lock
1359 */
1360 int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
1361 {
1362 struct task_struct *p = NULL;
1363 int retval, success;
1364
1365 success = 0;
1366 retval = -ESRCH;
1367 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1368 int err = group_send_sig_info(sig, info, p);
1369 success |= !err;
1370 retval = err;
1371 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1372 return success ? 0 : retval;
1373 }
1374
1375 int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
1376 {
1377 int error = -ESRCH;
1378 struct task_struct *p;
1379
1380 rcu_read_lock();
1381 retry:
1382 p = pid_task(pid, PIDTYPE_PID);
1383 if (p) {
1384 error = group_send_sig_info(sig, info, p);
1385 if (unlikely(error == -ESRCH))
1386 /*
1387 * The task was unhashed in between, try again.
1388 * If it is dead, pid_task() will return NULL,
1389 * if we race with de_thread() it will find the
1390 * new leader.
1391 */
1392 goto retry;
1393 }
1394 rcu_read_unlock();
1395
1396 return error;
1397 }
1398
1399 int kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1400 {
1401 int error;
1402 rcu_read_lock();
1403 error = kill_pid_info(sig, info, find_vpid(pid));
1404 rcu_read_unlock();
1405 return error;
1406 }
1407
1408 static int kill_as_cred_perm(const struct cred *cred,
1409 struct task_struct *target)
1410 {
1411 const struct cred *pcred = __task_cred(target);
1412 if (!uid_eq(cred->euid, pcred->suid) && !uid_eq(cred->euid, pcred->uid) &&
1413 !uid_eq(cred->uid, pcred->suid) && !uid_eq(cred->uid, pcred->uid))
1414 return 0;
1415 return 1;
1416 }
1417
1418 /* like kill_pid_info(), but doesn't use uid/euid of "current" */
1419 int kill_pid_info_as_cred(int sig, struct siginfo *info, struct pid *pid,
1420 const struct cred *cred, u32 secid)
1421 {
1422 int ret = -EINVAL;
1423 struct task_struct *p;
1424 unsigned long flags;
1425
1426 if (!valid_signal(sig))
1427 return ret;
1428
1429 rcu_read_lock();
1430 p = pid_task(pid, PIDTYPE_PID);
1431 if (!p) {
1432 ret = -ESRCH;
1433 goto out_unlock;
1434 }
1435 if (si_fromuser(info) && !kill_as_cred_perm(cred, p)) {
1436 ret = -EPERM;
1437 goto out_unlock;
1438 }
1439 ret = security_task_kill(p, info, sig, secid);
1440 if (ret)
1441 goto out_unlock;
1442
1443 if (sig) {
1444 if (lock_task_sighand(p, &flags)) {
1445 ret = __send_signal(sig, info, p, 1, 0);
1446 unlock_task_sighand(p, &flags);
1447 } else
1448 ret = -ESRCH;
1449 }
1450 out_unlock:
1451 rcu_read_unlock();
1452 return ret;
1453 }
1454 EXPORT_SYMBOL_GPL(kill_pid_info_as_cred);
1455
1456 /*
1457 * kill_something_info() interprets pid in interesting ways just like kill(2).
1458 *
1459 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1460 * is probably wrong. Should make it like BSD or SYSV.
1461 */
1462
1463 static int kill_something_info(int sig, struct siginfo *info, pid_t pid)
1464 {
1465 int ret;
1466
1467 if (pid > 0) {
1468 rcu_read_lock();
1469 ret = kill_pid_info(sig, info, find_vpid(pid));
1470 rcu_read_unlock();
1471 return ret;
1472 }
1473
1474 read_lock(&tasklist_lock);
1475 if (pid != -1) {
1476 ret = __kill_pgrp_info(sig, info,
1477 pid ? find_vpid(-pid) : task_pgrp(current));
1478 } else {
1479 int retval = 0, count = 0;
1480 struct task_struct * p;
1481
1482 for_each_process(p) {
1483 if (task_pid_vnr(p) > 1 &&
1484 !same_thread_group(p, current)) {
1485 int err = group_send_sig_info(sig, info, p);
1486 ++count;
1487 if (err != -EPERM)
1488 retval = err;
1489 }
1490 }
1491 ret = count ? retval : -ESRCH;
1492 }
1493 read_unlock(&tasklist_lock);
1494
1495 return ret;
1496 }
1497
1498 /*
1499 * These are for backward compatibility with the rest of the kernel source.
1500 */
1501
1502 int send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1503 {
1504 /*
1505 * Make sure legacy kernel users don't send in bad values
1506 * (normal paths check this in check_kill_permission).
1507 */
1508 if (!valid_signal(sig))
1509 return -EINVAL;
1510
1511 return do_send_sig_info(sig, info, p, false);
1512 }
1513
1514 #define __si_special(priv) \
1515 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1516
1517 int
1518 send_sig(int sig, struct task_struct *p, int priv)
1519 {
1520 return send_sig_info(sig, __si_special(priv), p);
1521 }
1522
1523 void
1524 force_sig(int sig, struct task_struct *p)
1525 {
1526 force_sig_info(sig, SEND_SIG_PRIV, p);
1527 }
1528
1529 /*
1530 * When things go south during signal handling, we
1531 * will force a SIGSEGV. And if the signal that caused
1532 * the problem was already a SIGSEGV, we'll want to
1533 * make sure we don't even try to deliver the signal..
1534 */
1535 int
1536 force_sigsegv(int sig, struct task_struct *p)
1537 {
1538 if (sig == SIGSEGV) {
1539 unsigned long flags;
1540 spin_lock_irqsave(&p->sighand->siglock, flags);
1541 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1542 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1543 }
1544 force_sig(SIGSEGV, p);
1545 return 0;
1546 }
1547
1548 int kill_pgrp(struct pid *pid, int sig, int priv)
1549 {
1550 int ret;
1551
1552 read_lock(&tasklist_lock);
1553 ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1554 read_unlock(&tasklist_lock);
1555
1556 return ret;
1557 }
1558 EXPORT_SYMBOL(kill_pgrp);
1559
1560 int kill_pid(struct pid *pid, int sig, int priv)
1561 {
1562 return kill_pid_info(sig, __si_special(priv), pid);
1563 }
1564 EXPORT_SYMBOL(kill_pid);
1565
1566 /*
1567 * These functions support sending signals using preallocated sigqueue
1568 * structures. This is needed "because realtime applications cannot
1569 * afford to lose notifications of asynchronous events, like timer
1570 * expirations or I/O completions". In the case of POSIX Timers
1571 * we allocate the sigqueue structure from the timer_create. If this
1572 * allocation fails we are able to report the failure to the application
1573 * with an EAGAIN error.
1574 */
1575 struct sigqueue *sigqueue_alloc(void)
1576 {
1577 struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
1578
1579 if (q)
1580 q->flags |= SIGQUEUE_PREALLOC;
1581
1582 return q;
1583 }
1584
1585 void sigqueue_free(struct sigqueue *q)
1586 {
1587 unsigned long flags;
1588 spinlock_t *lock = &current->sighand->siglock;
1589
1590 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1591 /*
1592 * We must hold ->siglock while testing q->list
1593 * to serialize with collect_signal() or with
1594 * __exit_signal()->flush_sigqueue().
1595 */
1596 spin_lock_irqsave(lock, flags);
1597 q->flags &= ~SIGQUEUE_PREALLOC;
1598 /*
1599 * If it is queued it will be freed when dequeued,
1600 * like the "regular" sigqueue.
1601 */
1602 if (!list_empty(&q->list))
1603 q = NULL;
1604 spin_unlock_irqrestore(lock, flags);
1605
1606 if (q)
1607 __sigqueue_free(q);
1608 }
1609
1610 int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group)
1611 {
1612 int sig = q->info.si_signo;
1613 struct sigpending *pending;
1614 unsigned long flags;
1615 int ret, result;
1616
1617 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1618
1619 ret = -1;
1620 if (!likely(lock_task_sighand(t, &flags)))
1621 goto ret;
1622
1623 ret = 1; /* the signal is ignored */
1624 result = TRACE_SIGNAL_IGNORED;
1625 if (!prepare_signal(sig, t, false))
1626 goto out;
1627
1628 ret = 0;
1629 if (unlikely(!list_empty(&q->list))) {
1630 /*
1631 * If an SI_TIMER entry is already queue just increment
1632 * the overrun count.
1633 */
1634 BUG_ON(q->info.si_code != SI_TIMER);
1635 q->info.si_overrun++;
1636 result = TRACE_SIGNAL_ALREADY_PENDING;
1637 goto out;
1638 }
1639 q->info.si_overrun = 0;
1640
1641 signalfd_notify(t, sig);
1642 pending = group ? &t->signal->shared_pending : &t->pending;
1643 list_add_tail(&q->list, &pending->list);
1644 sigaddset(&pending->signal, sig);
1645 complete_signal(sig, t, group);
1646 result = TRACE_SIGNAL_DELIVERED;
1647 out:
1648 trace_signal_generate(sig, &q->info, t, group, result);
1649 unlock_task_sighand(t, &flags);
1650 ret:
1651 return ret;
1652 }
1653
1654 /*
1655 * Let a parent know about the death of a child.
1656 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1657 *
1658 * Returns true if our parent ignored us and so we've switched to
1659 * self-reaping.
1660 */
1661 bool do_notify_parent(struct task_struct *tsk, int sig)
1662 {
1663 struct siginfo info;
1664 unsigned long flags;
1665 struct sighand_struct *psig;
1666 bool autoreap = false;
1667 cputime_t utime, stime;
1668
1669 BUG_ON(sig == -1);
1670
1671 /* do_notify_parent_cldstop should have been called instead. */
1672 BUG_ON(task_is_stopped_or_traced(tsk));
1673
1674 BUG_ON(!tsk->ptrace &&
1675 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1676
1677 if (sig != SIGCHLD) {
1678 /*
1679 * This is only possible if parent == real_parent.
1680 * Check if it has changed security domain.
1681 */
1682 if (tsk->parent_exec_id != tsk->parent->self_exec_id)
1683 sig = SIGCHLD;
1684 }
1685
1686 info.si_signo = sig;
1687 info.si_errno = 0;
1688 /*
1689 * We are under tasklist_lock here so our parent is tied to
1690 * us and cannot change.
1691 *
1692 * task_active_pid_ns will always return the same pid namespace
1693 * until a task passes through release_task.
1694 *
1695 * write_lock() currently calls preempt_disable() which is the
1696 * same as rcu_read_lock(), but according to Oleg, this is not
1697 * correct to rely on this
1698 */
1699 rcu_read_lock();
1700 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent));
1701 info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
1702 task_uid(tsk));
1703 rcu_read_unlock();
1704
1705 task_cputime(tsk, &utime, &stime);
1706 info.si_utime = cputime_to_clock_t(utime + tsk->signal->utime);
1707 info.si_stime = cputime_to_clock_t(stime + tsk->signal->stime);
1708
1709 info.si_status = tsk->exit_code & 0x7f;
1710 if (tsk->exit_code & 0x80)
1711 info.si_code = CLD_DUMPED;
1712 else if (tsk->exit_code & 0x7f)
1713 info.si_code = CLD_KILLED;
1714 else {
1715 info.si_code = CLD_EXITED;
1716 info.si_status = tsk->exit_code >> 8;
1717 }
1718
1719 psig = tsk->parent->sighand;
1720 spin_lock_irqsave(&psig->siglock, flags);
1721 if (!tsk->ptrace && sig == SIGCHLD &&
1722 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1723 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1724 /*
1725 * We are exiting and our parent doesn't care. POSIX.1
1726 * defines special semantics for setting SIGCHLD to SIG_IGN
1727 * or setting the SA_NOCLDWAIT flag: we should be reaped
1728 * automatically and not left for our parent's wait4 call.
1729 * Rather than having the parent do it as a magic kind of
1730 * signal handler, we just set this to tell do_exit that we
1731 * can be cleaned up without becoming a zombie. Note that
1732 * we still call __wake_up_parent in this case, because a
1733 * blocked sys_wait4 might now return -ECHILD.
1734 *
1735 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1736 * is implementation-defined: we do (if you don't want
1737 * it, just use SIG_IGN instead).
1738 */
1739 autoreap = true;
1740 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1741 sig = 0;
1742 }
1743 if (valid_signal(sig) && sig)
1744 __group_send_sig_info(sig, &info, tsk->parent);
1745 __wake_up_parent(tsk, tsk->parent);
1746 spin_unlock_irqrestore(&psig->siglock, flags);
1747
1748 return autoreap;
1749 }
1750
1751 /**
1752 * do_notify_parent_cldstop - notify parent of stopped/continued state change
1753 * @tsk: task reporting the state change
1754 * @for_ptracer: the notification is for ptracer
1755 * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report
1756 *
1757 * Notify @tsk's parent that the stopped/continued state has changed. If
1758 * @for_ptracer is %false, @tsk's group leader notifies to its real parent.
1759 * If %true, @tsk reports to @tsk->parent which should be the ptracer.
1760 *
1761 * CONTEXT:
1762 * Must be called with tasklist_lock at least read locked.
1763 */
1764 static void do_notify_parent_cldstop(struct task_struct *tsk,
1765 bool for_ptracer, int why)
1766 {
1767 struct siginfo info;
1768 unsigned long flags;
1769 struct task_struct *parent;
1770 struct sighand_struct *sighand;
1771 cputime_t utime, stime;
1772
1773 if (for_ptracer) {
1774 parent = tsk->parent;
1775 } else {
1776 tsk = tsk->group_leader;
1777 parent = tsk->real_parent;
1778 }
1779
1780 info.si_signo = SIGCHLD;
1781 info.si_errno = 0;
1782 /*
1783 * see comment in do_notify_parent() about the following 4 lines
1784 */
1785 rcu_read_lock();
1786 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent));
1787 info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
1788 rcu_read_unlock();
1789
1790 task_cputime(tsk, &utime, &stime);
1791 info.si_utime = cputime_to_clock_t(utime);
1792 info.si_stime = cputime_to_clock_t(stime);
1793
1794 info.si_code = why;
1795 switch (why) {
1796 case CLD_CONTINUED:
1797 info.si_status = SIGCONT;
1798 break;
1799 case CLD_STOPPED:
1800 info.si_status = tsk->signal->group_exit_code & 0x7f;
1801 break;
1802 case CLD_TRAPPED:
1803 info.si_status = tsk->exit_code & 0x7f;
1804 break;
1805 default:
1806 BUG();
1807 }
1808
1809 sighand = parent->sighand;
1810 spin_lock_irqsave(&sighand->siglock, flags);
1811 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1812 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1813 __group_send_sig_info(SIGCHLD, &info, parent);
1814 /*
1815 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1816 */
1817 __wake_up_parent(tsk, parent);
1818 spin_unlock_irqrestore(&sighand->siglock, flags);
1819 }
1820
1821 static inline int may_ptrace_stop(void)
1822 {
1823 if (!likely(current->ptrace))
1824 return 0;
1825 /*
1826 * Are we in the middle of do_coredump?
1827 * If so and our tracer is also part of the coredump stopping
1828 * is a deadlock situation, and pointless because our tracer
1829 * is dead so don't allow us to stop.
1830 * If SIGKILL was already sent before the caller unlocked
1831 * ->siglock we must see ->core_state != NULL. Otherwise it
1832 * is safe to enter schedule().
1833 *
1834 * This is almost outdated, a task with the pending SIGKILL can't
1835 * block in TASK_TRACED. But PTRACE_EVENT_EXIT can be reported
1836 * after SIGKILL was already dequeued.
1837 */
1838 if (unlikely(current->mm->core_state) &&
1839 unlikely(current->mm == current->parent->mm))
1840 return 0;
1841
1842 return 1;
1843 }
1844
1845 /*
1846 * Return non-zero if there is a SIGKILL that should be waking us up.
1847 * Called with the siglock held.
1848 */
1849 static int sigkill_pending(struct task_struct *tsk)
1850 {
1851 return sigismember(&tsk->pending.signal, SIGKILL) ||
1852 sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
1853 }
1854
1855 /*
1856 * This must be called with current->sighand->siglock held.
1857 *
1858 * This should be the path for all ptrace stops.
1859 * We always set current->last_siginfo while stopped here.
1860 * That makes it a way to test a stopped process for
1861 * being ptrace-stopped vs being job-control-stopped.
1862 *
1863 * If we actually decide not to stop at all because the tracer
1864 * is gone, we keep current->exit_code unless clear_code.
1865 */
1866 static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
1867 __releases(&current->sighand->siglock)
1868 __acquires(&current->sighand->siglock)
1869 {
1870 bool gstop_done = false;
1871
1872 if (arch_ptrace_stop_needed(exit_code, info)) {
1873 /*
1874 * The arch code has something special to do before a
1875 * ptrace stop. This is allowed to block, e.g. for faults
1876 * on user stack pages. We can't keep the siglock while
1877 * calling arch_ptrace_stop, so we must release it now.
1878 * To preserve proper semantics, we must do this before
1879 * any signal bookkeeping like checking group_stop_count.
1880 * Meanwhile, a SIGKILL could come in before we retake the
1881 * siglock. That must prevent us from sleeping in TASK_TRACED.
1882 * So after regaining the lock, we must check for SIGKILL.
1883 */
1884 spin_unlock_irq(&current->sighand->siglock);
1885 arch_ptrace_stop(exit_code, info);
1886 spin_lock_irq(&current->sighand->siglock);
1887 if (sigkill_pending(current))
1888 return;
1889 }
1890
1891 /*
1892 * We're committing to trapping. TRACED should be visible before
1893 * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
1894 * Also, transition to TRACED and updates to ->jobctl should be
1895 * atomic with respect to siglock and should be done after the arch
1896 * hook as siglock is released and regrabbed across it.
1897 */
1898 set_current_state(TASK_TRACED);
1899
1900 current->last_siginfo = info;
1901 current->exit_code = exit_code;
1902
1903 /*
1904 * If @why is CLD_STOPPED, we're trapping to participate in a group
1905 * stop. Do the bookkeeping. Note that if SIGCONT was delievered
1906 * across siglock relocks since INTERRUPT was scheduled, PENDING
1907 * could be clear now. We act as if SIGCONT is received after
1908 * TASK_TRACED is entered - ignore it.
1909 */
1910 if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
1911 gstop_done = task_participate_group_stop(current);
1912
1913 /* any trap clears pending STOP trap, STOP trap clears NOTIFY */
1914 task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
1915 if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
1916 task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
1917
1918 /* entering a trap, clear TRAPPING */
1919 task_clear_jobctl_trapping(current);
1920
1921 spin_unlock_irq(&current->sighand->siglock);
1922 read_lock(&tasklist_lock);
1923 if (may_ptrace_stop()) {
1924 /*
1925 * Notify parents of the stop.
1926 *
1927 * While ptraced, there are two parents - the ptracer and
1928 * the real_parent of the group_leader. The ptracer should
1929 * know about every stop while the real parent is only
1930 * interested in the completion of group stop. The states
1931 * for the two don't interact with each other. Notify
1932 * separately unless they're gonna be duplicates.
1933 */
1934 do_notify_parent_cldstop(current, true, why);
1935 if (gstop_done && ptrace_reparented(current))
1936 do_notify_parent_cldstop(current, false, why);
1937
1938 /*
1939 * Don't want to allow preemption here, because
1940 * sys_ptrace() needs this task to be inactive.
1941 *
1942 * XXX: implement read_unlock_no_resched().
1943 */
1944 preempt_disable();
1945 read_unlock(&tasklist_lock);
1946 preempt_enable_no_resched();
1947 freezable_schedule();
1948 } else {
1949 /*
1950 * By the time we got the lock, our tracer went away.
1951 * Don't drop the lock yet, another tracer may come.
1952 *
1953 * If @gstop_done, the ptracer went away between group stop
1954 * completion and here. During detach, it would have set
1955 * JOBCTL_STOP_PENDING on us and we'll re-enter
1956 * TASK_STOPPED in do_signal_stop() on return, so notifying
1957 * the real parent of the group stop completion is enough.
1958 */
1959 if (gstop_done)
1960 do_notify_parent_cldstop(current, false, why);
1961
1962 /* tasklist protects us from ptrace_freeze_traced() */
1963 __set_current_state(TASK_RUNNING);
1964 if (clear_code)
1965 current->exit_code = 0;
1966 read_unlock(&tasklist_lock);
1967 }
1968
1969 /*
1970 * We are back. Now reacquire the siglock before touching
1971 * last_siginfo, so that we are sure to have synchronized with
1972 * any signal-sending on another CPU that wants to examine it.
1973 */
1974 spin_lock_irq(&current->sighand->siglock);
1975 current->last_siginfo = NULL;
1976
1977 /* LISTENING can be set only during STOP traps, clear it */
1978 current->jobctl &= ~JOBCTL_LISTENING;
1979
1980 /*
1981 * Queued signals ignored us while we were stopped for tracing.
1982 * So check for any that we should take before resuming user mode.
1983 * This sets TIF_SIGPENDING, but never clears it.
1984 */
1985 recalc_sigpending_tsk(current);
1986 }
1987
1988 static void ptrace_do_notify(int signr, int exit_code, int why)
1989 {
1990 siginfo_t info;
1991
1992 memset(&info, 0, sizeof info);
1993 info.si_signo = signr;
1994 info.si_code = exit_code;
1995 info.si_pid = task_pid_vnr(current);
1996 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
1997
1998 /* Let the debugger run. */
1999 ptrace_stop(exit_code, why, 1, &info);
2000 }
2001
2002 void ptrace_notify(int exit_code)
2003 {
2004 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
2005 if (unlikely(current->task_works))
2006 task_work_run();
2007
2008 spin_lock_irq(&current->sighand->siglock);
2009 ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED);
2010 spin_unlock_irq(&current->sighand->siglock);
2011 }
2012
2013 /**
2014 * do_signal_stop - handle group stop for SIGSTOP and other stop signals
2015 * @signr: signr causing group stop if initiating
2016 *
2017 * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr
2018 * and participate in it. If already set, participate in the existing
2019 * group stop. If participated in a group stop (and thus slept), %true is
2020 * returned with siglock released.
2021 *
2022 * If ptraced, this function doesn't handle stop itself. Instead,
2023 * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock
2024 * untouched. The caller must ensure that INTERRUPT trap handling takes
2025 * places afterwards.
2026 *
2027 * CONTEXT:
2028 * Must be called with @current->sighand->siglock held, which is released
2029 * on %true return.
2030 *
2031 * RETURNS:
2032 * %false if group stop is already cancelled or ptrace trap is scheduled.
2033 * %true if participated in group stop.
2034 */
2035 static bool do_signal_stop(int signr)
2036 __releases(&current->sighand->siglock)
2037 {
2038 struct signal_struct *sig = current->signal;
2039
2040 if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
2041 unsigned int gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
2042 struct task_struct *t;
2043
2044 /* signr will be recorded in task->jobctl for retries */
2045 WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
2046
2047 if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
2048 unlikely(signal_group_exit(sig)))
2049 return false;
2050 /*
2051 * There is no group stop already in progress. We must
2052 * initiate one now.
2053 *
2054 * While ptraced, a task may be resumed while group stop is
2055 * still in effect and then receive a stop signal and
2056 * initiate another group stop. This deviates from the
2057 * usual behavior as two consecutive stop signals can't
2058 * cause two group stops when !ptraced. That is why we
2059 * also check !task_is_stopped(t) below.
2060 *
2061 * The condition can be distinguished by testing whether
2062 * SIGNAL_STOP_STOPPED is already set. Don't generate
2063 * group_exit_code in such case.
2064 *
2065 * This is not necessary for SIGNAL_STOP_CONTINUED because
2066 * an intervening stop signal is required to cause two
2067 * continued events regardless of ptrace.
2068 */
2069 if (!(sig->flags & SIGNAL_STOP_STOPPED))
2070 sig->group_exit_code = signr;
2071
2072 sig->group_stop_count = 0;
2073
2074 if (task_set_jobctl_pending(current, signr | gstop))
2075 sig->group_stop_count++;
2076
2077 t = current;
2078 while_each_thread(current, t) {
2079 /*
2080 * Setting state to TASK_STOPPED for a group
2081 * stop is always done with the siglock held,
2082 * so this check has no races.
2083 */
2084 if (!task_is_stopped(t) &&
2085 task_set_jobctl_pending(t, signr | gstop)) {
2086 sig->group_stop_count++;
2087 if (likely(!(t->ptrace & PT_SEIZED)))
2088 signal_wake_up(t, 0);
2089 else
2090 ptrace_trap_notify(t);
2091 }
2092 }
2093 }
2094
2095 if (likely(!current->ptrace)) {
2096 int notify = 0;
2097
2098 /*
2099 * If there are no other threads in the group, or if there
2100 * is a group stop in progress and we are the last to stop,
2101 * report to the parent.
2102 */
2103 if (task_participate_group_stop(current))
2104 notify = CLD_STOPPED;
2105
2106 __set_current_state(TASK_STOPPED);
2107 spin_unlock_irq(&current->sighand->siglock);
2108
2109 /*
2110 * Notify the parent of the group stop completion. Because
2111 * we're not holding either the siglock or tasklist_lock
2112 * here, ptracer may attach inbetween; however, this is for
2113 * group stop and should always be delivered to the real
2114 * parent of the group leader. The new ptracer will get
2115 * its notification when this task transitions into
2116 * TASK_TRACED.
2117 */
2118 if (notify) {
2119 read_lock(&tasklist_lock);
2120 do_notify_parent_cldstop(current, false, notify);
2121 read_unlock(&tasklist_lock);
2122 }
2123
2124 /* Now we don't run again until woken by SIGCONT or SIGKILL */
2125 freezable_schedule();
2126 return true;
2127 } else {
2128 /*
2129 * While ptraced, group stop is handled by STOP trap.
2130 * Schedule it and let the caller deal with it.
2131 */
2132 task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
2133 return false;
2134 }
2135 }
2136
2137 /**
2138 * do_jobctl_trap - take care of ptrace jobctl traps
2139 *
2140 * When PT_SEIZED, it's used for both group stop and explicit
2141 * SEIZE/INTERRUPT traps. Both generate PTRACE_EVENT_STOP trap with
2142 * accompanying siginfo. If stopped, lower eight bits of exit_code contain
2143 * the stop signal; otherwise, %SIGTRAP.
2144 *
2145 * When !PT_SEIZED, it's used only for group stop trap with stop signal
2146 * number as exit_code and no siginfo.
2147 *
2148 * CONTEXT:
2149 * Must be called with @current->sighand->siglock held, which may be
2150 * released and re-acquired before returning with intervening sleep.
2151 */
2152 static void do_jobctl_trap(void)
2153 {
2154 struct signal_struct *signal = current->signal;
2155 int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
2156
2157 if (current->ptrace & PT_SEIZED) {
2158 if (!signal->group_stop_count &&
2159 !(signal->flags & SIGNAL_STOP_STOPPED))
2160 signr = SIGTRAP;
2161 WARN_ON_ONCE(!signr);
2162 ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
2163 CLD_STOPPED);
2164 } else {
2165 WARN_ON_ONCE(!signr);
2166 ptrace_stop(signr, CLD_STOPPED, 0, NULL);
2167 current->exit_code = 0;
2168 }
2169 }
2170
2171 static int ptrace_signal(int signr, siginfo_t *info)
2172 {
2173 ptrace_signal_deliver();
2174 /*
2175 * We do not check sig_kernel_stop(signr) but set this marker
2176 * unconditionally because we do not know whether debugger will
2177 * change signr. This flag has no meaning unless we are going
2178 * to stop after return from ptrace_stop(). In this case it will
2179 * be checked in do_signal_stop(), we should only stop if it was
2180 * not cleared by SIGCONT while we were sleeping. See also the
2181 * comment in dequeue_signal().
2182 */
2183 current->jobctl |= JOBCTL_STOP_DEQUEUED;
2184 ptrace_stop(signr, CLD_TRAPPED, 0, info);
2185
2186 /* We're back. Did the debugger cancel the sig? */
2187 signr = current->exit_code;
2188 if (signr == 0)
2189 return signr;
2190
2191 current->exit_code = 0;
2192
2193 /*
2194 * Update the siginfo structure if the signal has
2195 * changed. If the debugger wanted something
2196 * specific in the siginfo structure then it should
2197 * have updated *info via PTRACE_SETSIGINFO.
2198 */
2199 if (signr != info->si_signo) {
2200 info->si_signo = signr;
2201 info->si_errno = 0;
2202 info->si_code = SI_USER;
2203 rcu_read_lock();
2204 info->si_pid = task_pid_vnr(current->parent);
2205 info->si_uid = from_kuid_munged(current_user_ns(),
2206 task_uid(current->parent));
2207 rcu_read_unlock();
2208 }
2209
2210 /* If the (new) signal is now blocked, requeue it. */
2211 if (sigismember(&current->blocked, signr)) {
2212 specific_send_sig_info(signr, info, current);
2213 signr = 0;
2214 }
2215
2216 return signr;
2217 }
2218
2219 int get_signal(struct ksignal *ksig)
2220 {
2221 struct sighand_struct *sighand = current->sighand;
2222 struct signal_struct *signal = current->signal;
2223 int signr;
2224
2225 if (unlikely(current->task_works))
2226 task_work_run();
2227
2228 if (unlikely(uprobe_deny_signal()))
2229 return 0;
2230
2231 /*
2232 * Do this once, we can't return to user-mode if freezing() == T.
2233 * do_signal_stop() and ptrace_stop() do freezable_schedule() and
2234 * thus do not need another check after return.
2235 */
2236 try_to_freeze();
2237
2238 relock:
2239 spin_lock_irq(&sighand->siglock);
2240 /*
2241 * Every stopped thread goes here after wakeup. Check to see if
2242 * we should notify the parent, prepare_signal(SIGCONT) encodes
2243 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
2244 */
2245 if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
2246 int why;
2247
2248 if (signal->flags & SIGNAL_CLD_CONTINUED)
2249 why = CLD_CONTINUED;
2250 else
2251 why = CLD_STOPPED;
2252
2253 signal->flags &= ~SIGNAL_CLD_MASK;
2254
2255 spin_unlock_irq(&sighand->siglock);
2256
2257 /*
2258 * Notify the parent that we're continuing. This event is
2259 * always per-process and doesn't make whole lot of sense
2260 * for ptracers, who shouldn't consume the state via
2261 * wait(2) either, but, for backward compatibility, notify
2262 * the ptracer of the group leader too unless it's gonna be
2263 * a duplicate.
2264 */
2265 read_lock(&tasklist_lock);
2266 do_notify_parent_cldstop(current, false, why);
2267
2268 if (ptrace_reparented(current->group_leader))
2269 do_notify_parent_cldstop(current->group_leader,
2270 true, why);
2271 read_unlock(&tasklist_lock);
2272
2273 goto relock;
2274 }
2275
2276 /* Has this task already been marked for death? */
2277 if (signal_group_exit(signal)) {
2278 ksig->info.si_signo = signr = SIGKILL;
2279 sigdelset(&current->pending.signal, SIGKILL);
2280 recalc_sigpending();
2281 goto fatal;
2282 }
2283
2284 for (;;) {
2285 struct k_sigaction *ka;
2286
2287 if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
2288 do_signal_stop(0))
2289 goto relock;
2290
2291 if (unlikely(current->jobctl & JOBCTL_TRAP_MASK)) {
2292 do_jobctl_trap();
2293 spin_unlock_irq(&sighand->siglock);
2294 goto relock;
2295 }
2296
2297 /*
2298 * Signals generated by the execution of an instruction
2299 * need to be delivered before any other pending signals
2300 * so that the instruction pointer in the signal stack
2301 * frame points to the faulting instruction.
2302 */
2303 signr = dequeue_synchronous_signal(&ksig->info);
2304 if (!signr)
2305 signr = dequeue_signal(current, &current->blocked, &ksig->info);
2306
2307 if (!signr)
2308 break; /* will return 0 */
2309
2310 if (unlikely(current->ptrace) && signr != SIGKILL) {
2311 signr = ptrace_signal(signr, &ksig->info);
2312 if (!signr)
2313 continue;
2314 }
2315
2316 ka = &sighand->action[signr-1];
2317
2318 /* Trace actually delivered signals. */
2319 trace_signal_deliver(signr, &ksig->info, ka);
2320
2321 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
2322 continue;
2323 if (ka->sa.sa_handler != SIG_DFL) {
2324 /* Run the handler. */
2325 ksig->ka = *ka;
2326
2327 if (ka->sa.sa_flags & SA_ONESHOT)
2328 ka->sa.sa_handler = SIG_DFL;
2329
2330 break; /* will return non-zero "signr" value */
2331 }
2332
2333 /*
2334 * Now we are doing the default action for this signal.
2335 */
2336 if (sig_kernel_ignore(signr)) /* Default is nothing. */
2337 continue;
2338
2339 /*
2340 * Global init gets no signals it doesn't want.
2341 * Container-init gets no signals it doesn't want from same
2342 * container.
2343 *
2344 * Note that if global/container-init sees a sig_kernel_only()
2345 * signal here, the signal must have been generated internally
2346 * or must have come from an ancestor namespace. In either
2347 * case, the signal cannot be dropped.
2348 */
2349 if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
2350 !sig_kernel_only(signr))
2351 continue;
2352
2353 if (sig_kernel_stop(signr)) {
2354 /*
2355 * The default action is to stop all threads in
2356 * the thread group. The job control signals
2357 * do nothing in an orphaned pgrp, but SIGSTOP
2358 * always works. Note that siglock needs to be
2359 * dropped during the call to is_orphaned_pgrp()
2360 * because of lock ordering with tasklist_lock.
2361 * This allows an intervening SIGCONT to be posted.
2362 * We need to check for that and bail out if necessary.
2363 */
2364 if (signr != SIGSTOP) {
2365 spin_unlock_irq(&sighand->siglock);
2366
2367 /* signals can be posted during this window */
2368
2369 if (is_current_pgrp_orphaned())
2370 goto relock;
2371
2372 spin_lock_irq(&sighand->siglock);
2373 }
2374
2375 if (likely(do_signal_stop(ksig->info.si_signo))) {
2376 /* It released the siglock. */
2377 goto relock;
2378 }
2379
2380 /*
2381 * We didn't actually stop, due to a race
2382 * with SIGCONT or something like that.
2383 */
2384 continue;
2385 }
2386
2387 fatal:
2388 spin_unlock_irq(&sighand->siglock);
2389
2390 /*
2391 * Anything else is fatal, maybe with a core dump.
2392 */
2393 current->flags |= PF_SIGNALED;
2394
2395 if (sig_kernel_coredump(signr)) {
2396 if (print_fatal_signals)
2397 print_fatal_signal(ksig->info.si_signo);
2398 proc_coredump_connector(current);
2399 /*
2400 * If it was able to dump core, this kills all
2401 * other threads in the group and synchronizes with
2402 * their demise. If we lost the race with another
2403 * thread getting here, it set group_exit_code
2404 * first and our do_group_exit call below will use
2405 * that value and ignore the one we pass it.
2406 */
2407 do_coredump(&ksig->info);
2408 }
2409
2410 /*
2411 * Death signals, no core dump.
2412 */
2413 do_group_exit(ksig->info.si_signo);
2414 /* NOTREACHED */
2415 }
2416 spin_unlock_irq(&sighand->siglock);
2417
2418 ksig->sig = signr;
2419 return ksig->sig > 0;
2420 }
2421
2422 /**
2423 * signal_delivered -
2424 * @ksig: kernel signal struct
2425 * @stepping: nonzero if debugger single-step or block-step in use
2426 *
2427 * This function should be called when a signal has successfully been
2428 * delivered. It updates the blocked signals accordingly (@ksig->ka.sa.sa_mask
2429 * is always blocked, and the signal itself is blocked unless %SA_NODEFER
2430 * is set in @ksig->ka.sa.sa_flags. Tracing is notified.
2431 */
2432 static void signal_delivered(struct ksignal *ksig, int stepping)
2433 {
2434 sigset_t blocked;
2435
2436 /* A signal was successfully delivered, and the
2437 saved sigmask was stored on the signal frame,
2438 and will be restored by sigreturn. So we can
2439 simply clear the restore sigmask flag. */
2440 clear_restore_sigmask();
2441
2442 sigorsets(&blocked, &current->blocked, &ksig->ka.sa.sa_mask);
2443 if (!(ksig->ka.sa.sa_flags & SA_NODEFER))
2444 sigaddset(&blocked, ksig->sig);
2445 set_current_blocked(&blocked);
2446 tracehook_signal_handler(stepping);
2447 }
2448
2449 void signal_setup_done(int failed, struct ksignal *ksig, int stepping)
2450 {
2451 if (failed)
2452 force_sigsegv(ksig->sig, current);
2453 else
2454 signal_delivered(ksig, stepping);
2455 }
2456
2457 /*
2458 * It could be that complete_signal() picked us to notify about the
2459 * group-wide signal. Other threads should be notified now to take
2460 * the shared signals in @which since we will not.
2461 */
2462 static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
2463 {
2464 sigset_t retarget;
2465 struct task_struct *t;
2466
2467 sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
2468 if (sigisemptyset(&retarget))
2469 return;
2470
2471 t = tsk;
2472 while_each_thread(tsk, t) {
2473 if (t->flags & PF_EXITING)
2474 continue;
2475
2476 if (!has_pending_signals(&retarget, &t->blocked))
2477 continue;
2478 /* Remove the signals this thread can handle. */
2479 sigandsets(&retarget, &retarget, &t->blocked);
2480
2481 if (!signal_pending(t))
2482 signal_wake_up(t, 0);
2483
2484 if (sigisemptyset(&retarget))
2485 break;
2486 }
2487 }
2488
2489 void exit_signals(struct task_struct *tsk)
2490 {
2491 int group_stop = 0;
2492 sigset_t unblocked;
2493
2494 /*
2495 * @tsk is about to have PF_EXITING set - lock out users which
2496 * expect stable threadgroup.
2497 */
2498 threadgroup_change_begin(tsk);
2499
2500 if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
2501 tsk->flags |= PF_EXITING;
2502 threadgroup_change_end(tsk);
2503 return;
2504 }
2505
2506 spin_lock_irq(&tsk->sighand->siglock);
2507 /*
2508 * From now this task is not visible for group-wide signals,
2509 * see wants_signal(), do_signal_stop().
2510 */
2511 tsk->flags |= PF_EXITING;
2512
2513 threadgroup_change_end(tsk);
2514
2515 if (!signal_pending(tsk))
2516 goto out;
2517
2518 unblocked = tsk->blocked;
2519 signotset(&unblocked);
2520 retarget_shared_pending(tsk, &unblocked);
2521
2522 if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
2523 task_participate_group_stop(tsk))
2524 group_stop = CLD_STOPPED;
2525 out:
2526 spin_unlock_irq(&tsk->sighand->siglock);
2527
2528 /*
2529 * If group stop has completed, deliver the notification. This
2530 * should always go to the real parent of the group leader.
2531 */
2532 if (unlikely(group_stop)) {
2533 read_lock(&tasklist_lock);
2534 do_notify_parent_cldstop(tsk, false, group_stop);
2535 read_unlock(&tasklist_lock);
2536 }
2537 }
2538
2539 EXPORT_SYMBOL(recalc_sigpending);
2540 EXPORT_SYMBOL_GPL(dequeue_signal);
2541 EXPORT_SYMBOL(flush_signals);
2542 EXPORT_SYMBOL(force_sig);
2543 EXPORT_SYMBOL(send_sig);
2544 EXPORT_SYMBOL(send_sig_info);
2545 EXPORT_SYMBOL(sigprocmask);
2546 EXPORT_SYMBOL(block_all_signals);
2547 EXPORT_SYMBOL(unblock_all_signals);
2548
2549
2550 /*
2551 * System call entry points.
2552 */
2553
2554 /**
2555 * sys_restart_syscall - restart a system call
2556 */
2557 SYSCALL_DEFINE0(restart_syscall)
2558 {
2559 struct restart_block *restart = &current_thread_info()->restart_block;
2560 return restart->fn(restart);
2561 }
2562
2563 long do_no_restart_syscall(struct restart_block *param)
2564 {
2565 return -EINTR;
2566 }
2567
2568 static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
2569 {
2570 if (signal_pending(tsk) && !thread_group_empty(tsk)) {
2571 sigset_t newblocked;
2572 /* A set of now blocked but previously unblocked signals. */
2573 sigandnsets(&newblocked, newset, &current->blocked);
2574 retarget_shared_pending(tsk, &newblocked);
2575 }
2576 tsk->blocked = *newset;
2577 recalc_sigpending();
2578 }
2579
2580 /**
2581 * set_current_blocked - change current->blocked mask
2582 * @newset: new mask
2583 *
2584 * It is wrong to change ->blocked directly, this helper should be used
2585 * to ensure the process can't miss a shared signal we are going to block.
2586 */
2587 void set_current_blocked(sigset_t *newset)
2588 {
2589 sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP));
2590 __set_current_blocked(newset);
2591 }
2592
2593 void __set_current_blocked(const sigset_t *newset)
2594 {
2595 struct task_struct *tsk = current;
2596
2597 spin_lock_irq(&tsk->sighand->siglock);
2598 __set_task_blocked(tsk, newset);
2599 spin_unlock_irq(&tsk->sighand->siglock);
2600 }
2601
2602 /*
2603 * This is also useful for kernel threads that want to temporarily
2604 * (or permanently) block certain signals.
2605 *
2606 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
2607 * interface happily blocks "unblockable" signals like SIGKILL
2608 * and friends.
2609 */
2610 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2611 {
2612 struct task_struct *tsk = current;
2613 sigset_t newset;
2614
2615 /* Lockless, only current can change ->blocked, never from irq */
2616 if (oldset)
2617 *oldset = tsk->blocked;
2618
2619 switch (how) {
2620 case SIG_BLOCK:
2621 sigorsets(&newset, &tsk->blocked, set);
2622 break;
2623 case SIG_UNBLOCK:
2624 sigandnsets(&newset, &tsk->blocked, set);
2625 break;
2626 case SIG_SETMASK:
2627 newset = *set;
2628 break;
2629 default:
2630 return -EINVAL;
2631 }
2632
2633 __set_current_blocked(&newset);
2634 return 0;
2635 }
2636
2637 /**
2638 * sys_rt_sigprocmask - change the list of currently blocked signals
2639 * @how: whether to add, remove, or set signals
2640 * @nset: stores pending signals
2641 * @oset: previous value of signal mask if non-null
2642 * @sigsetsize: size of sigset_t type
2643 */
2644 SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
2645 sigset_t __user *, oset, size_t, sigsetsize)
2646 {
2647 sigset_t old_set, new_set;
2648 int error;
2649
2650 /* XXX: Don't preclude handling different sized sigset_t's. */
2651 if (sigsetsize != sizeof(sigset_t))
2652 return -EINVAL;
2653
2654 old_set = current->blocked;
2655
2656 if (nset) {
2657 if (copy_from_user(&new_set, nset, sizeof(sigset_t)))
2658 return -EFAULT;
2659 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2660
2661 error = sigprocmask(how, &new_set, NULL);
2662 if (error)
2663 return error;
2664 }
2665
2666 if (oset) {
2667 if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
2668 return -EFAULT;
2669 }
2670
2671 return 0;
2672 }
2673
2674 #ifdef CONFIG_COMPAT
2675 COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset,
2676 compat_sigset_t __user *, oset, compat_size_t, sigsetsize)
2677 {
2678 #ifdef __BIG_ENDIAN
2679 sigset_t old_set = current->blocked;
2680
2681 /* XXX: Don't preclude handling different sized sigset_t's. */
2682 if (sigsetsize != sizeof(sigset_t))
2683 return -EINVAL;
2684
2685 if (nset) {
2686 compat_sigset_t new32;
2687 sigset_t new_set;
2688 int error;
2689 if (copy_from_user(&new32, nset, sizeof(compat_sigset_t)))
2690 return -EFAULT;
2691
2692 sigset_from_compat(&new_set, &new32);
2693 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2694
2695 error = sigprocmask(how, &new_set, NULL);
2696 if (error)
2697 return error;
2698 }
2699 if (oset) {
2700 compat_sigset_t old32;
2701 sigset_to_compat(&old32, &old_set);
2702 if (copy_to_user(oset, &old32, sizeof(compat_sigset_t)))
2703 return -EFAULT;
2704 }
2705 return 0;
2706 #else
2707 return sys_rt_sigprocmask(how, (sigset_t __user *)nset,
2708 (sigset_t __user *)oset, sigsetsize);
2709 #endif
2710 }
2711 #endif
2712
2713 static int do_sigpending(void *set, unsigned long sigsetsize)
2714 {
2715 if (sigsetsize > sizeof(sigset_t))
2716 return -EINVAL;
2717
2718 spin_lock_irq(&current->sighand->siglock);
2719 sigorsets(set, &current->pending.signal,
2720 &current->signal->shared_pending.signal);
2721 spin_unlock_irq(&current->sighand->siglock);
2722
2723 /* Outside the lock because only this thread touches it. */
2724 sigandsets(set, &current->blocked, set);
2725 return 0;
2726 }
2727
2728 /**
2729 * sys_rt_sigpending - examine a pending signal that has been raised
2730 * while blocked
2731 * @uset: stores pending signals
2732 * @sigsetsize: size of sigset_t type or larger
2733 */
2734 SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize)
2735 {
2736 sigset_t set;
2737 int err = do_sigpending(&set, sigsetsize);
2738 if (!err && copy_to_user(uset, &set, sigsetsize))
2739 err = -EFAULT;
2740 return err;
2741 }
2742
2743 #ifdef CONFIG_COMPAT
2744 COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset,
2745 compat_size_t, sigsetsize)
2746 {
2747 #ifdef __BIG_ENDIAN
2748 sigset_t set;
2749 int err = do_sigpending(&set, sigsetsize);
2750 if (!err) {
2751 compat_sigset_t set32;
2752 sigset_to_compat(&set32, &set);
2753 /* we can get here only if sigsetsize <= sizeof(set) */
2754 if (copy_to_user(uset, &set32, sigsetsize))
2755 err = -EFAULT;
2756 }
2757 return err;
2758 #else
2759 return sys_rt_sigpending((sigset_t __user *)uset, sigsetsize);
2760 #endif
2761 }
2762 #endif
2763
2764 #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2765
2766 int copy_siginfo_to_user(siginfo_t __user *to, const siginfo_t *from)
2767 {
2768 int err;
2769
2770 if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
2771 return -EFAULT;
2772 if (from->si_code < 0)
2773 return __copy_to_user(to, from, sizeof(siginfo_t))
2774 ? -EFAULT : 0;
2775 /*
2776 * If you change siginfo_t structure, please be sure
2777 * this code is fixed accordingly.
2778 * Please remember to update the signalfd_copyinfo() function
2779 * inside fs/signalfd.c too, in case siginfo_t changes.
2780 * It should never copy any pad contained in the structure
2781 * to avoid security leaks, but must copy the generic
2782 * 3 ints plus the relevant union member.
2783 */
2784 err = __put_user(from->si_signo, &to->si_signo);
2785 err |= __put_user(from->si_errno, &to->si_errno);
2786 err |= __put_user((short)from->si_code, &to->si_code);
2787 switch (from->si_code & __SI_MASK) {
2788 case __SI_KILL:
2789 err |= __put_user(from->si_pid, &to->si_pid);
2790 err |= __put_user(from->si_uid, &to->si_uid);
2791 break;
2792 case __SI_TIMER:
2793 err |= __put_user(from->si_tid, &to->si_tid);
2794 err |= __put_user(from->si_overrun, &to->si_overrun);
2795 err |= __put_user(from->si_ptr, &to->si_ptr);
2796 break;
2797 case __SI_POLL:
2798 err |= __put_user(from->si_band, &to->si_band);
2799 err |= __put_user(from->si_fd, &to->si_fd);
2800 break;
2801 case __SI_FAULT:
2802 err |= __put_user(from->si_addr, &to->si_addr);
2803 #ifdef __ARCH_SI_TRAPNO
2804 err |= __put_user(from->si_trapno, &to->si_trapno);
2805 #endif
2806 #ifdef BUS_MCEERR_AO
2807 /*
2808 * Other callers might not initialize the si_lsb field,
2809 * so check explicitly for the right codes here.
2810 */
2811 if (from->si_signo == SIGBUS &&
2812 (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO))
2813 err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb);
2814 #endif
2815 break;
2816 case __SI_CHLD:
2817 err |= __put_user(from->si_pid, &to->si_pid);
2818 err |= __put_user(from->si_uid, &to->si_uid);
2819 err |= __put_user(from->si_status, &to->si_status);
2820 err |= __put_user(from->si_utime, &to->si_utime);
2821 err |= __put_user(from->si_stime, &to->si_stime);
2822 break;
2823 case __SI_RT: /* This is not generated by the kernel as of now. */
2824 case __SI_MESGQ: /* But this is */
2825 err |= __put_user(from->si_pid, &to->si_pid);
2826 err |= __put_user(from->si_uid, &to->si_uid);
2827 err |= __put_user(from->si_ptr, &to->si_ptr);
2828 break;
2829 #ifdef __ARCH_SIGSYS
2830 case __SI_SYS:
2831 err |= __put_user(from->si_call_addr, &to->si_call_addr);
2832 err |= __put_user(from->si_syscall, &to->si_syscall);
2833 err |= __put_user(from->si_arch, &to->si_arch);
2834 break;
2835 #endif
2836 default: /* this is just in case for now ... */
2837 err |= __put_user(from->si_pid, &to->si_pid);
2838 err |= __put_user(from->si_uid, &to->si_uid);
2839 break;
2840 }
2841 return err;
2842 }
2843
2844 #endif
2845
2846 /**
2847 * do_sigtimedwait - wait for queued signals specified in @which
2848 * @which: queued signals to wait for
2849 * @info: if non-null, the signal's siginfo is returned here
2850 * @ts: upper bound on process time suspension
2851 */
2852 int do_sigtimedwait(const sigset_t *which, siginfo_t *info,
2853 const struct timespec *ts)
2854 {
2855 struct task_struct *tsk = current;
2856 long timeout = MAX_SCHEDULE_TIMEOUT;
2857 sigset_t mask = *which;
2858 int sig;
2859
2860 if (ts) {
2861 if (!timespec_valid(ts))
2862 return -EINVAL;
2863 timeout = timespec_to_jiffies(ts);
2864 /*
2865 * We can be close to the next tick, add another one
2866 * to ensure we will wait at least the time asked for.
2867 */
2868 if (ts->tv_sec || ts->tv_nsec)
2869 timeout++;
2870 }
2871
2872 /*
2873 * Invert the set of allowed signals to get those we want to block.
2874 */
2875 sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
2876 signotset(&mask);
2877
2878 spin_lock_irq(&tsk->sighand->siglock);
2879 sig = dequeue_signal(tsk, &mask, info);
2880 if (!sig && timeout) {
2881 /*
2882 * None ready, temporarily unblock those we're interested
2883 * while we are sleeping in so that we'll be awakened when
2884 * they arrive. Unblocking is always fine, we can avoid
2885 * set_current_blocked().
2886 */
2887 tsk->real_blocked = tsk->blocked;
2888 sigandsets(&tsk->blocked, &tsk->blocked, &mask);
2889 recalc_sigpending();
2890 spin_unlock_irq(&tsk->sighand->siglock);
2891
2892 timeout = freezable_schedule_timeout_interruptible(timeout);
2893
2894 spin_lock_irq(&tsk->sighand->siglock);
2895 __set_task_blocked(tsk, &tsk->real_blocked);
2896 sigemptyset(&tsk->real_blocked);
2897 sig = dequeue_signal(tsk, &mask, info);
2898 }
2899 spin_unlock_irq(&tsk->sighand->siglock);
2900
2901 if (sig)
2902 return sig;
2903 return timeout ? -EINTR : -EAGAIN;
2904 }
2905
2906 /**
2907 * sys_rt_sigtimedwait - synchronously wait for queued signals specified
2908 * in @uthese
2909 * @uthese: queued signals to wait for
2910 * @uinfo: if non-null, the signal's siginfo is returned here
2911 * @uts: upper bound on process time suspension
2912 * @sigsetsize: size of sigset_t type
2913 */
2914 SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
2915 siginfo_t __user *, uinfo, const struct timespec __user *, uts,
2916 size_t, sigsetsize)
2917 {
2918 sigset_t these;
2919 struct timespec ts;
2920 siginfo_t info;
2921 int ret;
2922
2923 /* XXX: Don't preclude handling different sized sigset_t's. */
2924 if (sigsetsize != sizeof(sigset_t))
2925 return -EINVAL;
2926
2927 if (copy_from_user(&these, uthese, sizeof(these)))
2928 return -EFAULT;
2929
2930 if (uts) {
2931 if (copy_from_user(&ts, uts, sizeof(ts)))
2932 return -EFAULT;
2933 }
2934
2935 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
2936
2937 if (ret > 0 && uinfo) {
2938 if (copy_siginfo_to_user(uinfo, &info))
2939 ret = -EFAULT;
2940 }
2941
2942 return ret;
2943 }
2944
2945 /**
2946 * sys_kill - send a signal to a process
2947 * @pid: the PID of the process
2948 * @sig: signal to be sent
2949 */
2950 SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
2951 {
2952 struct siginfo info;
2953
2954 info.si_signo = sig;
2955 info.si_errno = 0;
2956 info.si_code = SI_USER;
2957 info.si_pid = task_tgid_vnr(current);
2958 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2959
2960 return kill_something_info(sig, &info, pid);
2961 }
2962
2963 static int
2964 do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
2965 {
2966 struct task_struct *p;
2967 int error = -ESRCH;
2968
2969 rcu_read_lock();
2970 p = find_task_by_vpid(pid);
2971 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
2972 error = check_kill_permission(sig, info, p);
2973 /*
2974 * The null signal is a permissions and process existence
2975 * probe. No signal is actually delivered.
2976 */
2977 if (!error && sig) {
2978 error = do_send_sig_info(sig, info, p, false);
2979 /*
2980 * If lock_task_sighand() failed we pretend the task
2981 * dies after receiving the signal. The window is tiny,
2982 * and the signal is private anyway.
2983 */
2984 if (unlikely(error == -ESRCH))
2985 error = 0;
2986 }
2987 }
2988 rcu_read_unlock();
2989
2990 return error;
2991 }
2992
2993 static int do_tkill(pid_t tgid, pid_t pid, int sig)
2994 {
2995 struct siginfo info = {};
2996
2997 info.si_signo = sig;
2998 info.si_errno = 0;
2999 info.si_code = SI_TKILL;
3000 info.si_pid = task_tgid_vnr(current);
3001 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
3002
3003 return do_send_specific(tgid, pid, sig, &info);
3004 }
3005
3006 /**
3007 * sys_tgkill - send signal to one specific thread
3008 * @tgid: the thread group ID of the thread
3009 * @pid: the PID of the thread
3010 * @sig: signal to be sent
3011 *
3012 * This syscall also checks the @tgid and returns -ESRCH even if the PID
3013 * exists but it's not belonging to the target process anymore. This
3014 * method solves the problem of threads exiting and PIDs getting reused.
3015 */
3016 SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
3017 {
3018 /* This is only valid for single tasks */
3019 if (pid <= 0 || tgid <= 0)
3020 return -EINVAL;
3021
3022 return do_tkill(tgid, pid, sig);
3023 }
3024
3025 /**
3026 * sys_tkill - send signal to one specific task
3027 * @pid: the PID of the task
3028 * @sig: signal to be sent
3029 *
3030 * Send a signal to only one task, even if it's a CLONE_THREAD task.
3031 */
3032 SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
3033 {
3034 /* This is only valid for single tasks */
3035 if (pid <= 0)
3036 return -EINVAL;
3037
3038 return do_tkill(0, pid, sig);
3039 }
3040
3041 static int do_rt_sigqueueinfo(pid_t pid, int sig, siginfo_t *info)
3042 {
3043 /* Not even root can pretend to send signals from the kernel.
3044 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3045 */
3046 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3047 (task_pid_vnr(current) != pid)) {
3048 /* We used to allow any < 0 si_code */
3049 WARN_ON_ONCE(info->si_code < 0);
3050 return -EPERM;
3051 }
3052 info->si_signo = sig;
3053
3054 /* POSIX.1b doesn't mention process groups. */
3055 return kill_proc_info(sig, info, pid);
3056 }
3057
3058 /**
3059 * sys_rt_sigqueueinfo - send signal information to a signal
3060 * @pid: the PID of the thread
3061 * @sig: signal to be sent
3062 * @uinfo: signal info to be sent
3063 */
3064 SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
3065 siginfo_t __user *, uinfo)
3066 {
3067 siginfo_t info;
3068 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
3069 return -EFAULT;
3070 return do_rt_sigqueueinfo(pid, sig, &info);
3071 }
3072
3073 #ifdef CONFIG_COMPAT
3074 COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
3075 compat_pid_t, pid,
3076 int, sig,
3077 struct compat_siginfo __user *, uinfo)
3078 {
3079 siginfo_t info = {};
3080 int ret = copy_siginfo_from_user32(&info, uinfo);
3081 if (unlikely(ret))
3082 return ret;
3083 return do_rt_sigqueueinfo(pid, sig, &info);
3084 }
3085 #endif
3086
3087 static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info)
3088 {
3089 /* This is only valid for single tasks */
3090 if (pid <= 0 || tgid <= 0)
3091 return -EINVAL;
3092
3093 /* Not even root can pretend to send signals from the kernel.
3094 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3095 */
3096 if (((info->si_code >= 0 || info->si_code == SI_TKILL)) &&
3097 (task_pid_vnr(current) != pid)) {
3098 /* We used to allow any < 0 si_code */
3099 WARN_ON_ONCE(info->si_code < 0);
3100 return -EPERM;
3101 }
3102 info->si_signo = sig;
3103
3104 return do_send_specific(tgid, pid, sig, info);
3105 }
3106
3107 SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
3108 siginfo_t __user *, uinfo)
3109 {
3110 siginfo_t info;
3111
3112 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
3113 return -EFAULT;
3114
3115 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3116 }
3117
3118 #ifdef CONFIG_COMPAT
3119 COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
3120 compat_pid_t, tgid,
3121 compat_pid_t, pid,
3122 int, sig,
3123 struct compat_siginfo __user *, uinfo)
3124 {
3125 siginfo_t info = {};
3126
3127 if (copy_siginfo_from_user32(&info, uinfo))
3128 return -EFAULT;
3129 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3130 }
3131 #endif
3132
3133 /*
3134 * For kthreads only, must not be used if cloned with CLONE_SIGHAND
3135 */
3136 void kernel_sigaction(int sig, __sighandler_t action)
3137 {
3138 spin_lock_irq(&current->sighand->siglock);
3139 current->sighand->action[sig - 1].sa.sa_handler = action;
3140 if (action == SIG_IGN) {
3141 sigset_t mask;
3142
3143 sigemptyset(&mask);
3144 sigaddset(&mask, sig);
3145
3146 flush_sigqueue_mask(&mask, &current->signal->shared_pending);
3147 flush_sigqueue_mask(&mask, &current->pending);
3148 recalc_sigpending();
3149 }
3150 spin_unlock_irq(&current->sighand->siglock);
3151 }
3152 EXPORT_SYMBOL(kernel_sigaction);
3153
3154 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
3155 {
3156 struct task_struct *p = current, *t;
3157 struct k_sigaction *k;
3158 sigset_t mask;
3159
3160 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
3161 return -EINVAL;
3162
3163 k = &p->sighand->action[sig-1];
3164
3165 spin_lock_irq(&p->sighand->siglock);
3166 if (oact)
3167 *oact = *k;
3168
3169 if (act) {
3170 sigdelsetmask(&act->sa.sa_mask,
3171 sigmask(SIGKILL) | sigmask(SIGSTOP));
3172 *k = *act;
3173 /*
3174 * POSIX 3.3.1.3:
3175 * "Setting a signal action to SIG_IGN for a signal that is
3176 * pending shall cause the pending signal to be discarded,
3177 * whether or not it is blocked."
3178 *
3179 * "Setting a signal action to SIG_DFL for a signal that is
3180 * pending and whose default action is to ignore the signal
3181 * (for example, SIGCHLD), shall cause the pending signal to
3182 * be discarded, whether or not it is blocked"
3183 */
3184 if (sig_handler_ignored(sig_handler(p, sig), sig)) {
3185 sigemptyset(&mask);
3186 sigaddset(&mask, sig);
3187 flush_sigqueue_mask(&mask, &p->signal->shared_pending);
3188 for_each_thread(p, t)
3189 flush_sigqueue_mask(&mask, &t->pending);
3190 }
3191 }
3192
3193 spin_unlock_irq(&p->sighand->siglock);
3194 return 0;
3195 }
3196
3197 static int
3198 do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp)
3199 {
3200 stack_t oss;
3201 int error;
3202
3203 oss.ss_sp = (void __user *) current->sas_ss_sp;
3204 oss.ss_size = current->sas_ss_size;
3205 oss.ss_flags = sas_ss_flags(sp);
3206
3207 if (uss) {
3208 void __user *ss_sp;
3209 size_t ss_size;
3210 int ss_flags;
3211
3212 error = -EFAULT;
3213 if (!access_ok(VERIFY_READ, uss, sizeof(*uss)))
3214 goto out;
3215 error = __get_user(ss_sp, &uss->ss_sp) |
3216 __get_user(ss_flags, &uss->ss_flags) |
3217 __get_user(ss_size, &uss->ss_size);
3218 if (error)
3219 goto out;
3220
3221 error = -EPERM;
3222 if (on_sig_stack(sp))
3223 goto out;
3224
3225 error = -EINVAL;
3226 /*
3227 * Note - this code used to test ss_flags incorrectly:
3228 * old code may have been written using ss_flags==0
3229 * to mean ss_flags==SS_ONSTACK (as this was the only
3230 * way that worked) - this fix preserves that older
3231 * mechanism.
3232 */
3233 if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
3234 goto out;
3235
3236 if (ss_flags == SS_DISABLE) {
3237 ss_size = 0;
3238 ss_sp = NULL;
3239 } else {
3240 error = -ENOMEM;
3241 if (ss_size < MINSIGSTKSZ)
3242 goto out;
3243 }
3244
3245 current->sas_ss_sp = (unsigned long) ss_sp;
3246 current->sas_ss_size = ss_size;
3247 }
3248
3249 error = 0;
3250 if (uoss) {
3251 error = -EFAULT;
3252 if (!access_ok(VERIFY_WRITE, uoss, sizeof(*uoss)))
3253 goto out;
3254 error = __put_user(oss.ss_sp, &uoss->ss_sp) |
3255 __put_user(oss.ss_size, &uoss->ss_size) |
3256 __put_user(oss.ss_flags, &uoss->ss_flags);
3257 }
3258
3259 out:
3260 return error;
3261 }
3262 SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss)
3263 {
3264 return do_sigaltstack(uss, uoss, current_user_stack_pointer());
3265 }
3266
3267 int restore_altstack(const stack_t __user *uss)
3268 {
3269 int err = do_sigaltstack(uss, NULL, current_user_stack_pointer());
3270 /* squash all but EFAULT for now */
3271 return err == -EFAULT ? err : 0;
3272 }
3273
3274 int __save_altstack(stack_t __user *uss, unsigned long sp)
3275 {
3276 struct task_struct *t = current;
3277 return __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) |
3278 __put_user(sas_ss_flags(sp), &uss->ss_flags) |
3279 __put_user(t->sas_ss_size, &uss->ss_size);
3280 }
3281
3282 #ifdef CONFIG_COMPAT
3283 COMPAT_SYSCALL_DEFINE2(sigaltstack,
3284 const compat_stack_t __user *, uss_ptr,
3285 compat_stack_t __user *, uoss_ptr)
3286 {
3287 stack_t uss, uoss;
3288 int ret;
3289 mm_segment_t seg;
3290
3291 if (uss_ptr) {
3292 compat_stack_t uss32;
3293
3294 memset(&uss, 0, sizeof(stack_t));
3295 if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t)))
3296 return -EFAULT;
3297 uss.ss_sp = compat_ptr(uss32.ss_sp);
3298 uss.ss_flags = uss32.ss_flags;
3299 uss.ss_size = uss32.ss_size;
3300 }
3301 seg = get_fs();
3302 set_fs(KERNEL_DS);
3303 ret = do_sigaltstack((stack_t __force __user *) (uss_ptr ? &uss : NULL),
3304 (stack_t __force __user *) &uoss,
3305 compat_user_stack_pointer());
3306 set_fs(seg);
3307 if (ret >= 0 && uoss_ptr) {
3308 if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(compat_stack_t)) ||
3309 __put_user(ptr_to_compat(uoss.ss_sp), &uoss_ptr->ss_sp) ||
3310 __put_user(uoss.ss_flags, &uoss_ptr->ss_flags) ||
3311 __put_user(uoss.ss_size, &uoss_ptr->ss_size))
3312 ret = -EFAULT;
3313 }
3314 return ret;
3315 }
3316
3317 int compat_restore_altstack(const compat_stack_t __user *uss)
3318 {
3319 int err = compat_sys_sigaltstack(uss, NULL);
3320 /* squash all but -EFAULT for now */
3321 return err == -EFAULT ? err : 0;
3322 }
3323
3324 int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
3325 {
3326 struct task_struct *t = current;
3327 return __put_user(ptr_to_compat((void __user *)t->sas_ss_sp), &uss->ss_sp) |
3328 __put_user(sas_ss_flags(sp), &uss->ss_flags) |
3329 __put_user(t->sas_ss_size, &uss->ss_size);
3330 }
3331 #endif
3332
3333 #ifdef __ARCH_WANT_SYS_SIGPENDING
3334
3335 /**
3336 * sys_sigpending - examine pending signals
3337 * @set: where mask of pending signal is returned
3338 */
3339 SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, set)
3340 {
3341 return sys_rt_sigpending((sigset_t __user *)set, sizeof(old_sigset_t));
3342 }
3343
3344 #endif
3345
3346 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
3347 /**
3348 * sys_sigprocmask - examine and change blocked signals
3349 * @how: whether to add, remove, or set signals
3350 * @nset: signals to add or remove (if non-null)
3351 * @oset: previous value of signal mask if non-null
3352 *
3353 * Some platforms have their own version with special arguments;
3354 * others support only sys_rt_sigprocmask.
3355 */
3356
3357 SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
3358 old_sigset_t __user *, oset)
3359 {
3360 old_sigset_t old_set, new_set;
3361 sigset_t new_blocked;
3362
3363 old_set = current->blocked.sig[0];
3364
3365 if (nset) {
3366 if (copy_from_user(&new_set, nset, sizeof(*nset)))
3367 return -EFAULT;
3368
3369 new_blocked = current->blocked;
3370
3371 switch (how) {
3372 case SIG_BLOCK:
3373 sigaddsetmask(&new_blocked, new_set);
3374 break;
3375 case SIG_UNBLOCK:
3376 sigdelsetmask(&new_blocked, new_set);
3377 break;
3378 case SIG_SETMASK:
3379 new_blocked.sig[0] = new_set;
3380 break;
3381 default:
3382 return -EINVAL;
3383 }
3384
3385 set_current_blocked(&new_blocked);
3386 }
3387
3388 if (oset) {
3389 if (copy_to_user(oset, &old_set, sizeof(*oset)))
3390 return -EFAULT;
3391 }
3392
3393 return 0;
3394 }
3395 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
3396
3397 #ifndef CONFIG_ODD_RT_SIGACTION
3398 /**
3399 * sys_rt_sigaction - alter an action taken by a process
3400 * @sig: signal to be sent
3401 * @act: new sigaction
3402 * @oact: used to save the previous sigaction
3403 * @sigsetsize: size of sigset_t type
3404 */
3405 SYSCALL_DEFINE4(rt_sigaction, int, sig,
3406 const struct sigaction __user *, act,
3407 struct sigaction __user *, oact,
3408 size_t, sigsetsize)
3409 {
3410 struct k_sigaction new_sa, old_sa;
3411 int ret = -EINVAL;
3412
3413 /* XXX: Don't preclude handling different sized sigset_t's. */
3414 if (sigsetsize != sizeof(sigset_t))
3415 goto out;
3416
3417 if (act) {
3418 if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
3419 return -EFAULT;
3420 }
3421
3422 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
3423
3424 if (!ret && oact) {
3425 if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
3426 return -EFAULT;
3427 }
3428 out:
3429 return ret;
3430 }
3431 #ifdef CONFIG_COMPAT
3432 COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig,
3433 const struct compat_sigaction __user *, act,
3434 struct compat_sigaction __user *, oact,
3435 compat_size_t, sigsetsize)
3436 {
3437 struct k_sigaction new_ka, old_ka;
3438 compat_sigset_t mask;
3439 #ifdef __ARCH_HAS_SA_RESTORER
3440 compat_uptr_t restorer;
3441 #endif
3442 int ret;
3443
3444 /* XXX: Don't preclude handling different sized sigset_t's. */
3445 if (sigsetsize != sizeof(compat_sigset_t))
3446 return -EINVAL;
3447
3448 if (act) {
3449 compat_uptr_t handler;
3450 ret = get_user(handler, &act->sa_handler);
3451 new_ka.sa.sa_handler = compat_ptr(handler);
3452 #ifdef __ARCH_HAS_SA_RESTORER
3453 ret |= get_user(restorer, &act->sa_restorer);
3454 new_ka.sa.sa_restorer = compat_ptr(restorer);
3455 #endif
3456 ret |= copy_from_user(&mask, &act->sa_mask, sizeof(mask));
3457 ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
3458 if (ret)
3459 return -EFAULT;
3460 sigset_from_compat(&new_ka.sa.sa_mask, &mask);
3461 }
3462
3463 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3464 if (!ret && oact) {
3465 sigset_to_compat(&mask, &old_ka.sa.sa_mask);
3466 ret = put_user(ptr_to_compat(old_ka.sa.sa_handler),
3467 &oact->sa_handler);
3468 ret |= copy_to_user(&oact->sa_mask, &mask, sizeof(mask));
3469 ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
3470 #ifdef __ARCH_HAS_SA_RESTORER
3471 ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer),
3472 &oact->sa_restorer);
3473 #endif
3474 }
3475 return ret;
3476 }
3477 #endif
3478 #endif /* !CONFIG_ODD_RT_SIGACTION */
3479
3480 #ifdef CONFIG_OLD_SIGACTION
3481 SYSCALL_DEFINE3(sigaction, int, sig,
3482 const struct old_sigaction __user *, act,
3483 struct old_sigaction __user *, oact)
3484 {
3485 struct k_sigaction new_ka, old_ka;
3486 int ret;
3487
3488 if (act) {
3489 old_sigset_t mask;
3490 if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
3491 __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
3492 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
3493 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
3494 __get_user(mask, &act->sa_mask))
3495 return -EFAULT;
3496 #ifdef __ARCH_HAS_KA_RESTORER
3497 new_ka.ka_restorer = NULL;
3498 #endif
3499 siginitset(&new_ka.sa.sa_mask, mask);
3500 }
3501
3502 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3503
3504 if (!ret && oact) {
3505 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
3506 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
3507 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
3508 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
3509 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
3510 return -EFAULT;
3511 }
3512
3513 return ret;
3514 }
3515 #endif
3516 #ifdef CONFIG_COMPAT_OLD_SIGACTION
3517 COMPAT_SYSCALL_DEFINE3(sigaction, int, sig,
3518 const struct compat_old_sigaction __user *, act,
3519 struct compat_old_sigaction __user *, oact)
3520 {
3521 struct k_sigaction new_ka, old_ka;
3522 int ret;
3523 compat_old_sigset_t mask;
3524 compat_uptr_t handler, restorer;
3525
3526 if (act) {
3527 if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
3528 __get_user(handler, &act->sa_handler) ||
3529 __get_user(restorer, &act->sa_restorer) ||
3530 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
3531 __get_user(mask, &act->sa_mask))
3532 return -EFAULT;
3533
3534 #ifdef __ARCH_HAS_KA_RESTORER
3535 new_ka.ka_restorer = NULL;
3536 #endif
3537 new_ka.sa.sa_handler = compat_ptr(handler);
3538 new_ka.sa.sa_restorer = compat_ptr(restorer);
3539 siginitset(&new_ka.sa.sa_mask, mask);
3540 }
3541
3542 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3543
3544 if (!ret && oact) {
3545 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
3546 __put_user(ptr_to_compat(old_ka.sa.sa_handler),
3547 &oact->sa_handler) ||
3548 __put_user(ptr_to_compat(old_ka.sa.sa_restorer),
3549 &oact->sa_restorer) ||
3550 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
3551 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
3552 return -EFAULT;
3553 }
3554 return ret;
3555 }
3556 #endif
3557
3558 #ifdef CONFIG_SGETMASK_SYSCALL
3559
3560 /*
3561 * For backwards compatibility. Functionality superseded by sigprocmask.
3562 */
3563 SYSCALL_DEFINE0(sgetmask)
3564 {
3565 /* SMP safe */
3566 return current->blocked.sig[0];
3567 }
3568
3569 SYSCALL_DEFINE1(ssetmask, int, newmask)
3570 {
3571 int old = current->blocked.sig[0];
3572 sigset_t newset;
3573
3574 siginitset(&newset, newmask);
3575 set_current_blocked(&newset);
3576
3577 return old;
3578 }
3579 #endif /* CONFIG_SGETMASK_SYSCALL */
3580
3581 #ifdef __ARCH_WANT_SYS_SIGNAL
3582 /*
3583 * For backwards compatibility. Functionality superseded by sigaction.
3584 */
3585 SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
3586 {
3587 struct k_sigaction new_sa, old_sa;
3588 int ret;
3589
3590 new_sa.sa.sa_handler = handler;
3591 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
3592 sigemptyset(&new_sa.sa.sa_mask);
3593
3594 ret = do_sigaction(sig, &new_sa, &old_sa);
3595
3596 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
3597 }
3598 #endif /* __ARCH_WANT_SYS_SIGNAL */
3599
3600 #ifdef __ARCH_WANT_SYS_PAUSE
3601
3602 SYSCALL_DEFINE0(pause)
3603 {
3604 while (!signal_pending(current)) {
3605 current->state = TASK_INTERRUPTIBLE;
3606 schedule();
3607 }
3608 return -ERESTARTNOHAND;
3609 }
3610
3611 #endif
3612
3613 static int sigsuspend(sigset_t *set)
3614 {
3615 current->saved_sigmask = current->blocked;
3616 set_current_blocked(set);
3617
3618 current->state = TASK_INTERRUPTIBLE;
3619 schedule();
3620 set_restore_sigmask();
3621 return -ERESTARTNOHAND;
3622 }
3623
3624 /**
3625 * sys_rt_sigsuspend - replace the signal mask for a value with the
3626 * @unewset value until a signal is received
3627 * @unewset: new signal mask value
3628 * @sigsetsize: size of sigset_t type
3629 */
3630 SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
3631 {
3632 sigset_t newset;
3633
3634 /* XXX: Don't preclude handling different sized sigset_t's. */
3635 if (sigsetsize != sizeof(sigset_t))
3636 return -EINVAL;
3637
3638 if (copy_from_user(&newset, unewset, sizeof(newset)))
3639 return -EFAULT;
3640 return sigsuspend(&newset);
3641 }
3642
3643 #ifdef CONFIG_COMPAT
3644 COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize)
3645 {
3646 #ifdef __BIG_ENDIAN
3647 sigset_t newset;
3648 compat_sigset_t newset32;
3649
3650 /* XXX: Don't preclude handling different sized sigset_t's. */
3651 if (sigsetsize != sizeof(sigset_t))
3652 return -EINVAL;
3653
3654 if (copy_from_user(&newset32, unewset, sizeof(compat_sigset_t)))
3655 return -EFAULT;
3656 sigset_from_compat(&newset, &newset32);
3657 return sigsuspend(&newset);
3658 #else
3659 /* on little-endian bitmaps don't care about granularity */
3660 return sys_rt_sigsuspend((sigset_t __user *)unewset, sigsetsize);
3661 #endif
3662 }
3663 #endif
3664
3665 #ifdef CONFIG_OLD_SIGSUSPEND
3666 SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
3667 {
3668 sigset_t blocked;
3669 siginitset(&blocked, mask);
3670 return sigsuspend(&blocked);
3671 }
3672 #endif
3673 #ifdef CONFIG_OLD_SIGSUSPEND3
3674 SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask)
3675 {
3676 sigset_t blocked;
3677 siginitset(&blocked, mask);
3678 return sigsuspend(&blocked);
3679 }
3680 #endif
3681
3682 __weak const char *arch_vma_name(struct vm_area_struct *vma)
3683 {
3684 return NULL;
3685 }
3686
3687 void __init signals_init(void)
3688 {
3689 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
3690 }
3691
3692 #ifdef CONFIG_KGDB_KDB
3693 #include <linux/kdb.h>
3694 /*
3695 * kdb_send_sig_info - Allows kdb to send signals without exposing
3696 * signal internals. This function checks if the required locks are
3697 * available before calling the main signal code, to avoid kdb
3698 * deadlocks.
3699 */
3700 void
3701 kdb_send_sig_info(struct task_struct *t, struct siginfo *info)
3702 {
3703 static struct task_struct *kdb_prev_t;
3704 int sig, new_t;
3705 if (!spin_trylock(&t->sighand->siglock)) {
3706 kdb_printf("Can't do kill command now.\n"
3707 "The sigmask lock is held somewhere else in "
3708 "kernel, try again later\n");
3709 return;
3710 }
3711 spin_unlock(&t->sighand->siglock);
3712 new_t = kdb_prev_t != t;
3713 kdb_prev_t = t;
3714 if (t->state != TASK_RUNNING && new_t) {
3715 kdb_printf("Process is not RUNNING, sending a signal from "
3716 "kdb risks deadlock\n"
3717 "on the run queue locks. "
3718 "The signal has _not_ been sent.\n"
3719 "Reissue the kill command if you want to risk "
3720 "the deadlock.\n");
3721 return;
3722 }
3723 sig = info->si_signo;
3724 if (send_sig_info(sig, info, t))
3725 kdb_printf("Fail to deliver Signal %d to process %d.\n",
3726 sig, t->pid);
3727 else
3728 kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
3729 }
3730 #endif /* CONFIG_KGDB_KDB */