]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/kernel/exit.c | |
3 | * | |
4 | * Copyright (C) 1991, 1992 Linus Torvalds | |
5 | */ | |
6 | ||
7 | #include <linux/config.h> | |
8 | #include <linux/mm.h> | |
9 | #include <linux/slab.h> | |
10 | #include <linux/interrupt.h> | |
11 | #include <linux/smp_lock.h> | |
12 | #include <linux/module.h> | |
c59ede7b | 13 | #include <linux/capability.h> |
1da177e4 LT |
14 | #include <linux/completion.h> |
15 | #include <linux/personality.h> | |
16 | #include <linux/tty.h> | |
17 | #include <linux/namespace.h> | |
18 | #include <linux/key.h> | |
19 | #include <linux/security.h> | |
20 | #include <linux/cpu.h> | |
21 | #include <linux/acct.h> | |
22 | #include <linux/file.h> | |
23 | #include <linux/binfmts.h> | |
24 | #include <linux/ptrace.h> | |
25 | #include <linux/profile.h> | |
26 | #include <linux/mount.h> | |
27 | #include <linux/proc_fs.h> | |
28 | #include <linux/mempolicy.h> | |
29 | #include <linux/cpuset.h> | |
30 | #include <linux/syscalls.h> | |
7ed20e1a | 31 | #include <linux/signal.h> |
6a14c5c9 | 32 | #include <linux/posix-timers.h> |
9f46080c | 33 | #include <linux/cn_proc.h> |
de5097c2 | 34 | #include <linux/mutex.h> |
0771dfef | 35 | #include <linux/futex.h> |
34f192c6 | 36 | #include <linux/compat.h> |
1da177e4 LT |
37 | |
38 | #include <asm/uaccess.h> | |
39 | #include <asm/unistd.h> | |
40 | #include <asm/pgtable.h> | |
41 | #include <asm/mmu_context.h> | |
42 | ||
43 | extern void sem_exit (void); | |
44 | extern struct task_struct *child_reaper; | |
45 | ||
46 | int getrusage(struct task_struct *, int, struct rusage __user *); | |
47 | ||
408b664a AB |
48 | static void exit_mm(struct task_struct * tsk); |
49 | ||
1da177e4 LT |
50 | static void __unhash_process(struct task_struct *p) |
51 | { | |
52 | nr_threads--; | |
53 | detach_pid(p, PIDTYPE_PID); | |
54 | detach_pid(p, PIDTYPE_TGID); | |
55 | if (thread_group_leader(p)) { | |
56 | detach_pid(p, PIDTYPE_PGID); | |
57 | detach_pid(p, PIDTYPE_SID); | |
c97d9893 ON |
58 | |
59 | list_del_init(&p->tasks); | |
73b9ebfe | 60 | __get_cpu_var(process_counts)--; |
1da177e4 LT |
61 | } |
62 | ||
c97d9893 | 63 | remove_parent(p); |
1da177e4 LT |
64 | } |
65 | ||
6a14c5c9 ON |
66 | /* |
67 | * This function expects the tasklist_lock write-locked. | |
68 | */ | |
69 | static void __exit_signal(struct task_struct *tsk) | |
70 | { | |
71 | struct signal_struct *sig = tsk->signal; | |
72 | struct sighand_struct *sighand; | |
73 | ||
74 | BUG_ON(!sig); | |
75 | BUG_ON(!atomic_read(&sig->count)); | |
76 | ||
77 | rcu_read_lock(); | |
78 | sighand = rcu_dereference(tsk->sighand); | |
79 | spin_lock(&sighand->siglock); | |
80 | ||
81 | posix_cpu_timers_exit(tsk); | |
82 | if (atomic_dec_and_test(&sig->count)) | |
83 | posix_cpu_timers_exit_group(tsk); | |
84 | else { | |
85 | /* | |
86 | * If there is any task waiting for the group exit | |
87 | * then notify it: | |
88 | */ | |
89 | if (sig->group_exit_task && atomic_read(&sig->count) == sig->notify_count) { | |
90 | wake_up_process(sig->group_exit_task); | |
91 | sig->group_exit_task = NULL; | |
92 | } | |
93 | if (tsk == sig->curr_target) | |
94 | sig->curr_target = next_thread(tsk); | |
95 | /* | |
96 | * Accumulate here the counters for all threads but the | |
97 | * group leader as they die, so they can be added into | |
98 | * the process-wide totals when those are taken. | |
99 | * The group leader stays around as a zombie as long | |
100 | * as there are other threads. When it gets reaped, | |
101 | * the exit.c code will add its counts into these totals. | |
102 | * We won't ever get here for the group leader, since it | |
103 | * will have been the last reference on the signal_struct. | |
104 | */ | |
105 | sig->utime = cputime_add(sig->utime, tsk->utime); | |
106 | sig->stime = cputime_add(sig->stime, tsk->stime); | |
107 | sig->min_flt += tsk->min_flt; | |
108 | sig->maj_flt += tsk->maj_flt; | |
109 | sig->nvcsw += tsk->nvcsw; | |
110 | sig->nivcsw += tsk->nivcsw; | |
111 | sig->sched_time += tsk->sched_time; | |
112 | sig = NULL; /* Marker for below. */ | |
113 | } | |
114 | ||
115 | tsk->signal = NULL; | |
116 | cleanup_sighand(tsk); | |
117 | spin_unlock(&sighand->siglock); | |
118 | rcu_read_unlock(); | |
119 | ||
120 | clear_tsk_thread_flag(tsk,TIF_SIGPENDING); | |
121 | flush_sigqueue(&tsk->pending); | |
122 | if (sig) { | |
123 | flush_sigqueue(&sig->shared_pending); | |
124 | __cleanup_signal(sig); | |
125 | } | |
126 | } | |
127 | ||
1da177e4 LT |
128 | void release_task(struct task_struct * p) |
129 | { | |
130 | int zap_leader; | |
131 | task_t *leader; | |
132 | struct dentry *proc_dentry; | |
133 | ||
1f09f974 | 134 | repeat: |
1da177e4 LT |
135 | atomic_dec(&p->user->processes); |
136 | spin_lock(&p->proc_lock); | |
137 | proc_dentry = proc_pid_unhash(p); | |
138 | write_lock_irq(&tasklist_lock); | |
1f09f974 | 139 | ptrace_unlink(p); |
1da177e4 LT |
140 | BUG_ON(!list_empty(&p->ptrace_list) || !list_empty(&p->ptrace_children)); |
141 | __exit_signal(p); | |
71a2224d CL |
142 | /* |
143 | * Note that the fastpath in sys_times depends on __exit_signal having | |
144 | * updated the counters before a task is removed from the tasklist of | |
145 | * the process by __unhash_process. | |
146 | */ | |
1da177e4 LT |
147 | __unhash_process(p); |
148 | ||
149 | /* | |
150 | * If we are the last non-leader member of the thread | |
151 | * group, and the leader is zombie, then notify the | |
152 | * group leader's parent process. (if it wants notification.) | |
153 | */ | |
154 | zap_leader = 0; | |
155 | leader = p->group_leader; | |
156 | if (leader != p && thread_group_empty(leader) && leader->exit_state == EXIT_ZOMBIE) { | |
157 | BUG_ON(leader->exit_signal == -1); | |
158 | do_notify_parent(leader, leader->exit_signal); | |
159 | /* | |
160 | * If we were the last child thread and the leader has | |
161 | * exited already, and the leader's parent ignores SIGCHLD, | |
162 | * then we are the one who should release the leader. | |
163 | * | |
164 | * do_notify_parent() will have marked it self-reaping in | |
165 | * that case. | |
166 | */ | |
167 | zap_leader = (leader->exit_signal == -1); | |
168 | } | |
169 | ||
170 | sched_exit(p); | |
171 | write_unlock_irq(&tasklist_lock); | |
172 | spin_unlock(&p->proc_lock); | |
173 | proc_pid_flush(proc_dentry); | |
174 | release_thread(p); | |
175 | put_task_struct(p); | |
176 | ||
177 | p = leader; | |
178 | if (unlikely(zap_leader)) | |
179 | goto repeat; | |
180 | } | |
181 | ||
1da177e4 LT |
182 | /* |
183 | * This checks not only the pgrp, but falls back on the pid if no | |
184 | * satisfactory pgrp is found. I dunno - gdb doesn't work correctly | |
185 | * without this... | |
186 | */ | |
187 | int session_of_pgrp(int pgrp) | |
188 | { | |
189 | struct task_struct *p; | |
190 | int sid = -1; | |
191 | ||
192 | read_lock(&tasklist_lock); | |
193 | do_each_task_pid(pgrp, PIDTYPE_PGID, p) { | |
194 | if (p->signal->session > 0) { | |
195 | sid = p->signal->session; | |
196 | goto out; | |
197 | } | |
198 | } while_each_task_pid(pgrp, PIDTYPE_PGID, p); | |
199 | p = find_task_by_pid(pgrp); | |
200 | if (p) | |
201 | sid = p->signal->session; | |
202 | out: | |
203 | read_unlock(&tasklist_lock); | |
204 | ||
205 | return sid; | |
206 | } | |
207 | ||
208 | /* | |
209 | * Determine if a process group is "orphaned", according to the POSIX | |
210 | * definition in 2.2.2.52. Orphaned process groups are not to be affected | |
211 | * by terminal-generated stop signals. Newly orphaned process groups are | |
212 | * to receive a SIGHUP and a SIGCONT. | |
213 | * | |
214 | * "I ask you, have you ever known what it is to be an orphan?" | |
215 | */ | |
216 | static int will_become_orphaned_pgrp(int pgrp, task_t *ignored_task) | |
217 | { | |
218 | struct task_struct *p; | |
219 | int ret = 1; | |
220 | ||
221 | do_each_task_pid(pgrp, PIDTYPE_PGID, p) { | |
222 | if (p == ignored_task | |
223 | || p->exit_state | |
224 | || p->real_parent->pid == 1) | |
225 | continue; | |
226 | if (process_group(p->real_parent) != pgrp | |
227 | && p->real_parent->signal->session == p->signal->session) { | |
228 | ret = 0; | |
229 | break; | |
230 | } | |
231 | } while_each_task_pid(pgrp, PIDTYPE_PGID, p); | |
232 | return ret; /* (sighing) "Often!" */ | |
233 | } | |
234 | ||
235 | int is_orphaned_pgrp(int pgrp) | |
236 | { | |
237 | int retval; | |
238 | ||
239 | read_lock(&tasklist_lock); | |
240 | retval = will_become_orphaned_pgrp(pgrp, NULL); | |
241 | read_unlock(&tasklist_lock); | |
242 | ||
243 | return retval; | |
244 | } | |
245 | ||
858119e1 | 246 | static int has_stopped_jobs(int pgrp) |
1da177e4 LT |
247 | { |
248 | int retval = 0; | |
249 | struct task_struct *p; | |
250 | ||
251 | do_each_task_pid(pgrp, PIDTYPE_PGID, p) { | |
252 | if (p->state != TASK_STOPPED) | |
253 | continue; | |
254 | ||
255 | /* If p is stopped by a debugger on a signal that won't | |
256 | stop it, then don't count p as stopped. This isn't | |
257 | perfect but it's a good approximation. */ | |
258 | if (unlikely (p->ptrace) | |
259 | && p->exit_code != SIGSTOP | |
260 | && p->exit_code != SIGTSTP | |
261 | && p->exit_code != SIGTTOU | |
262 | && p->exit_code != SIGTTIN) | |
263 | continue; | |
264 | ||
265 | retval = 1; | |
266 | break; | |
267 | } while_each_task_pid(pgrp, PIDTYPE_PGID, p); | |
268 | return retval; | |
269 | } | |
270 | ||
271 | /** | |
4dc3b16b | 272 | * reparent_to_init - Reparent the calling kernel thread to the init task. |
1da177e4 LT |
273 | * |
274 | * If a kernel thread is launched as a result of a system call, or if | |
275 | * it ever exits, it should generally reparent itself to init so that | |
276 | * it is correctly cleaned up on exit. | |
277 | * | |
278 | * The various task state such as scheduling policy and priority may have | |
279 | * been inherited from a user process, so we reset them to sane values here. | |
280 | * | |
281 | * NOTE that reparent_to_init() gives the caller full capabilities. | |
282 | */ | |
858119e1 | 283 | static void reparent_to_init(void) |
1da177e4 LT |
284 | { |
285 | write_lock_irq(&tasklist_lock); | |
286 | ||
287 | ptrace_unlink(current); | |
288 | /* Reparent to init */ | |
9b678ece | 289 | remove_parent(current); |
1da177e4 LT |
290 | current->parent = child_reaper; |
291 | current->real_parent = child_reaper; | |
9b678ece | 292 | add_parent(current); |
1da177e4 LT |
293 | |
294 | /* Set the exit signal to SIGCHLD so we signal init on exit */ | |
295 | current->exit_signal = SIGCHLD; | |
296 | ||
b0a9499c IM |
297 | if ((current->policy == SCHED_NORMAL || |
298 | current->policy == SCHED_BATCH) | |
299 | && (task_nice(current) < 0)) | |
1da177e4 LT |
300 | set_user_nice(current, 0); |
301 | /* cpus_allowed? */ | |
302 | /* rt_priority? */ | |
303 | /* signals? */ | |
304 | security_task_reparent_to_init(current); | |
305 | memcpy(current->signal->rlim, init_task.signal->rlim, | |
306 | sizeof(current->signal->rlim)); | |
307 | atomic_inc(&(INIT_USER->__count)); | |
308 | write_unlock_irq(&tasklist_lock); | |
309 | switch_uid(INIT_USER); | |
310 | } | |
311 | ||
312 | void __set_special_pids(pid_t session, pid_t pgrp) | |
313 | { | |
e19f247a | 314 | struct task_struct *curr = current->group_leader; |
1da177e4 LT |
315 | |
316 | if (curr->signal->session != session) { | |
317 | detach_pid(curr, PIDTYPE_SID); | |
318 | curr->signal->session = session; | |
319 | attach_pid(curr, PIDTYPE_SID, session); | |
320 | } | |
321 | if (process_group(curr) != pgrp) { | |
322 | detach_pid(curr, PIDTYPE_PGID); | |
323 | curr->signal->pgrp = pgrp; | |
324 | attach_pid(curr, PIDTYPE_PGID, pgrp); | |
325 | } | |
326 | } | |
327 | ||
328 | void set_special_pids(pid_t session, pid_t pgrp) | |
329 | { | |
330 | write_lock_irq(&tasklist_lock); | |
331 | __set_special_pids(session, pgrp); | |
332 | write_unlock_irq(&tasklist_lock); | |
333 | } | |
334 | ||
335 | /* | |
336 | * Let kernel threads use this to say that they | |
337 | * allow a certain signal (since daemonize() will | |
338 | * have disabled all of them by default). | |
339 | */ | |
340 | int allow_signal(int sig) | |
341 | { | |
7ed20e1a | 342 | if (!valid_signal(sig) || sig < 1) |
1da177e4 LT |
343 | return -EINVAL; |
344 | ||
345 | spin_lock_irq(¤t->sighand->siglock); | |
346 | sigdelset(¤t->blocked, sig); | |
347 | if (!current->mm) { | |
348 | /* Kernel threads handle their own signals. | |
349 | Let the signal code know it'll be handled, so | |
350 | that they don't get converted to SIGKILL or | |
351 | just silently dropped */ | |
352 | current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2; | |
353 | } | |
354 | recalc_sigpending(); | |
355 | spin_unlock_irq(¤t->sighand->siglock); | |
356 | return 0; | |
357 | } | |
358 | ||
359 | EXPORT_SYMBOL(allow_signal); | |
360 | ||
361 | int disallow_signal(int sig) | |
362 | { | |
7ed20e1a | 363 | if (!valid_signal(sig) || sig < 1) |
1da177e4 LT |
364 | return -EINVAL; |
365 | ||
366 | spin_lock_irq(¤t->sighand->siglock); | |
367 | sigaddset(¤t->blocked, sig); | |
368 | recalc_sigpending(); | |
369 | spin_unlock_irq(¤t->sighand->siglock); | |
370 | return 0; | |
371 | } | |
372 | ||
373 | EXPORT_SYMBOL(disallow_signal); | |
374 | ||
375 | /* | |
376 | * Put all the gunge required to become a kernel thread without | |
377 | * attached user resources in one place where it belongs. | |
378 | */ | |
379 | ||
380 | void daemonize(const char *name, ...) | |
381 | { | |
382 | va_list args; | |
383 | struct fs_struct *fs; | |
384 | sigset_t blocked; | |
385 | ||
386 | va_start(args, name); | |
387 | vsnprintf(current->comm, sizeof(current->comm), name, args); | |
388 | va_end(args); | |
389 | ||
390 | /* | |
391 | * If we were started as result of loading a module, close all of the | |
392 | * user space pages. We don't need them, and if we didn't close them | |
393 | * they would be locked into memory. | |
394 | */ | |
395 | exit_mm(current); | |
396 | ||
397 | set_special_pids(1, 1); | |
70522e12 | 398 | mutex_lock(&tty_mutex); |
1da177e4 | 399 | current->signal->tty = NULL; |
70522e12 | 400 | mutex_unlock(&tty_mutex); |
1da177e4 LT |
401 | |
402 | /* Block and flush all signals */ | |
403 | sigfillset(&blocked); | |
404 | sigprocmask(SIG_BLOCK, &blocked, NULL); | |
405 | flush_signals(current); | |
406 | ||
407 | /* Become as one with the init task */ | |
408 | ||
409 | exit_fs(current); /* current->fs->count--; */ | |
410 | fs = init_task.fs; | |
411 | current->fs = fs; | |
412 | atomic_inc(&fs->count); | |