]> git.ipfire.org Git - thirdparty/linux.git/blob - kernel/pid.c
Merge tag 'pidfd-updates-v5.3' of git://git.kernel.org/pub/scm/linux/kernel/git/braun...
[thirdparty/linux.git] / kernel / pid.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Generic pidhash and scalable, time-bounded PID allocator
4 *
5 * (C) 2002-2003 Nadia Yvette Chambers, IBM
6 * (C) 2004 Nadia Yvette Chambers, Oracle
7 * (C) 2002-2004 Ingo Molnar, Red Hat
8 *
9 * pid-structures are backing objects for tasks sharing a given ID to chain
10 * against. There is very little to them aside from hashing them and
11 * parking tasks using given ID's on a list.
12 *
13 * The hash is always changed with the tasklist_lock write-acquired,
14 * and the hash is only accessed with the tasklist_lock at least
15 * read-acquired, so there's no additional SMP locking needed here.
16 *
17 * We have a list of bitmap pages, which bitmaps represent the PID space.
18 * Allocating and freeing PIDs is completely lockless. The worst-case
19 * allocation scenario when all but one out of 1 million PIDs possible are
20 * allocated already: the scanning of 32 list entries and at most PAGE_SIZE
21 * bytes. The typical fastpath is a single successful setbit. Freeing is O(1).
22 *
23 * Pid namespaces:
24 * (C) 2007 Pavel Emelyanov <xemul@openvz.org>, OpenVZ, SWsoft Inc.
25 * (C) 2007 Sukadev Bhattiprolu <sukadev@us.ibm.com>, IBM
26 * Many thanks to Oleg Nesterov for comments and help
27 *
28 */
29
30 #include <linux/mm.h>
31 #include <linux/export.h>
32 #include <linux/slab.h>
33 #include <linux/init.h>
34 #include <linux/rculist.h>
35 #include <linux/memblock.h>
36 #include <linux/pid_namespace.h>
37 #include <linux/init_task.h>
38 #include <linux/syscalls.h>
39 #include <linux/proc_ns.h>
40 #include <linux/proc_fs.h>
41 #include <linux/anon_inodes.h>
42 #include <linux/sched/signal.h>
43 #include <linux/sched/task.h>
44 #include <linux/idr.h>
45
46 struct pid init_struct_pid = {
47 .count = ATOMIC_INIT(1),
48 .tasks = {
49 { .first = NULL },
50 { .first = NULL },
51 { .first = NULL },
52 },
53 .level = 0,
54 .numbers = { {
55 .nr = 0,
56 .ns = &init_pid_ns,
57 }, }
58 };
59
60 int pid_max = PID_MAX_DEFAULT;
61
62 #define RESERVED_PIDS 300
63
64 int pid_max_min = RESERVED_PIDS + 1;
65 int pid_max_max = PID_MAX_LIMIT;
66
67 /*
68 * PID-map pages start out as NULL, they get allocated upon
69 * first use and are never deallocated. This way a low pid_max
70 * value does not cause lots of bitmaps to be allocated, but
71 * the scheme scales to up to 4 million PIDs, runtime.
72 */
73 struct pid_namespace init_pid_ns = {
74 .kref = KREF_INIT(2),
75 .idr = IDR_INIT(init_pid_ns.idr),
76 .pid_allocated = PIDNS_ADDING,
77 .level = 0,
78 .child_reaper = &init_task,
79 .user_ns = &init_user_ns,
80 .ns.inum = PROC_PID_INIT_INO,
81 #ifdef CONFIG_PID_NS
82 .ns.ops = &pidns_operations,
83 #endif
84 };
85 EXPORT_SYMBOL_GPL(init_pid_ns);
86
87 /*
88 * Note: disable interrupts while the pidmap_lock is held as an
89 * interrupt might come in and do read_lock(&tasklist_lock).
90 *
91 * If we don't disable interrupts there is a nasty deadlock between
92 * detach_pid()->free_pid() and another cpu that does
93 * spin_lock(&pidmap_lock) followed by an interrupt routine that does
94 * read_lock(&tasklist_lock);
95 *
96 * After we clean up the tasklist_lock and know there are no
97 * irq handlers that take it we can leave the interrupts enabled.
98 * For now it is easier to be safe than to prove it can't happen.
99 */
100
101 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(pidmap_lock);
102
103 void put_pid(struct pid *pid)
104 {
105 struct pid_namespace *ns;
106
107 if (!pid)
108 return;
109
110 ns = pid->numbers[pid->level].ns;
111 if ((atomic_read(&pid->count) == 1) ||
112 atomic_dec_and_test(&pid->count)) {
113 kmem_cache_free(ns->pid_cachep, pid);
114 put_pid_ns(ns);
115 }
116 }
117 EXPORT_SYMBOL_GPL(put_pid);
118
119 static void delayed_put_pid(struct rcu_head *rhp)
120 {
121 struct pid *pid = container_of(rhp, struct pid, rcu);
122 put_pid(pid);
123 }
124
125 void free_pid(struct pid *pid)
126 {
127 /* We can be called with write_lock_irq(&tasklist_lock) held */
128 int i;
129 unsigned long flags;
130
131 spin_lock_irqsave(&pidmap_lock, flags);
132 for (i = 0; i <= pid->level; i++) {
133 struct upid *upid = pid->numbers + i;
134 struct pid_namespace *ns = upid->ns;
135 switch (--ns->pid_allocated) {
136 case 2:
137 case 1:
138 /* When all that is left in the pid namespace
139 * is the reaper wake up the reaper. The reaper
140 * may be sleeping in zap_pid_ns_processes().
141 */
142 wake_up_process(ns->child_reaper);
143 break;
144 case PIDNS_ADDING:
145 /* Handle a fork failure of the first process */
146 WARN_ON(ns->child_reaper);
147 ns->pid_allocated = 0;
148 /* fall through */
149 case 0:
150 schedule_work(&ns->proc_work);
151 break;
152 }
153
154 idr_remove(&ns->idr, upid->nr);
155 }
156 spin_unlock_irqrestore(&pidmap_lock, flags);
157
158 call_rcu(&pid->rcu, delayed_put_pid);
159 }
160
161 struct pid *alloc_pid(struct pid_namespace *ns)
162 {
163 struct pid *pid;
164 enum pid_type type;
165 int i, nr;
166 struct pid_namespace *tmp;
167 struct upid *upid;
168 int retval = -ENOMEM;
169
170 pid = kmem_cache_alloc(ns->pid_cachep, GFP_KERNEL);
171 if (!pid)
172 return ERR_PTR(retval);
173
174 tmp = ns;
175 pid->level = ns->level;
176
177 for (i = ns->level; i >= 0; i--) {
178 int pid_min = 1;
179
180 idr_preload(GFP_KERNEL);
181 spin_lock_irq(&pidmap_lock);
182
183 /*
184 * init really needs pid 1, but after reaching the maximum
185 * wrap back to RESERVED_PIDS
186 */
187 if (idr_get_cursor(&tmp->idr) > RESERVED_PIDS)
188 pid_min = RESERVED_PIDS;
189
190 /*
191 * Store a null pointer so find_pid_ns does not find
192 * a partially initialized PID (see below).
193 */
194 nr = idr_alloc_cyclic(&tmp->idr, NULL, pid_min,
195 pid_max, GFP_ATOMIC);
196 spin_unlock_irq(&pidmap_lock);
197 idr_preload_end();
198
199 if (nr < 0) {
200 retval = (nr == -ENOSPC) ? -EAGAIN : nr;
201 goto out_free;
202 }
203
204 pid->numbers[i].nr = nr;
205 pid->numbers[i].ns = tmp;
206 tmp = tmp->parent;
207 }
208
209 if (unlikely(is_child_reaper(pid))) {
210 if (pid_ns_prepare_proc(ns))
211 goto out_free;
212 }
213
214 get_pid_ns(ns);
215 atomic_set(&pid->count, 1);
216 for (type = 0; type < PIDTYPE_MAX; ++type)
217 INIT_HLIST_HEAD(&pid->tasks[type]);
218
219 init_waitqueue_head(&pid->wait_pidfd);
220
221 upid = pid->numbers + ns->level;
222 spin_lock_irq(&pidmap_lock);
223 if (!(ns->pid_allocated & PIDNS_ADDING))
224 goto out_unlock;
225 for ( ; upid >= pid->numbers; --upid) {
226 /* Make the PID visible to find_pid_ns. */
227 idr_replace(&upid->ns->idr, pid, upid->nr);
228 upid->ns->pid_allocated++;
229 }
230 spin_unlock_irq(&pidmap_lock);
231
232 return pid;
233
234 out_unlock:
235 spin_unlock_irq(&pidmap_lock);
236 put_pid_ns(ns);
237
238 out_free:
239 spin_lock_irq(&pidmap_lock);
240 while (++i <= ns->level) {
241 upid = pid->numbers + i;
242 idr_remove(&upid->ns->idr, upid->nr);
243 }
244
245 /* On failure to allocate the first pid, reset the state */
246 if (ns->pid_allocated == PIDNS_ADDING)
247 idr_set_cursor(&ns->idr, 0);
248
249 spin_unlock_irq(&pidmap_lock);
250
251 kmem_cache_free(ns->pid_cachep, pid);
252 return ERR_PTR(retval);
253 }
254
255 void disable_pid_allocation(struct pid_namespace *ns)
256 {
257 spin_lock_irq(&pidmap_lock);
258 ns->pid_allocated &= ~PIDNS_ADDING;
259 spin_unlock_irq(&pidmap_lock);
260 }
261
262 struct pid *find_pid_ns(int nr, struct pid_namespace *ns)
263 {
264 return idr_find(&ns->idr, nr);
265 }
266 EXPORT_SYMBOL_GPL(find_pid_ns);
267
268 struct pid *find_vpid(int nr)
269 {
270 return find_pid_ns(nr, task_active_pid_ns(current));
271 }
272 EXPORT_SYMBOL_GPL(find_vpid);
273
274 static struct pid **task_pid_ptr(struct task_struct *task, enum pid_type type)
275 {
276 return (type == PIDTYPE_PID) ?
277 &task->thread_pid :
278 &task->signal->pids[type];
279 }
280
281 /*
282 * attach_pid() must be called with the tasklist_lock write-held.
283 */
284 void attach_pid(struct task_struct *task, enum pid_type type)
285 {
286 struct pid *pid = *task_pid_ptr(task, type);
287 hlist_add_head_rcu(&task->pid_links[type], &pid->tasks[type]);
288 }
289
290 static void __change_pid(struct task_struct *task, enum pid_type type,
291 struct pid *new)
292 {
293 struct pid **pid_ptr = task_pid_ptr(task, type);
294 struct pid *pid;
295 int tmp;
296
297 pid = *pid_ptr;
298
299 hlist_del_rcu(&task->pid_links[type]);
300 *pid_ptr = new;
301
302 for (tmp = PIDTYPE_MAX; --tmp >= 0; )
303 if (!hlist_empty(&pid->tasks[tmp]))
304 return;
305
306 free_pid(pid);
307 }
308
309 void detach_pid(struct task_struct *task, enum pid_type type)
310 {
311 __change_pid(task, type, NULL);
312 }
313
314 void change_pid(struct task_struct *task, enum pid_type type,
315 struct pid *pid)
316 {
317 __change_pid(task, type, pid);
318 attach_pid(task, type);
319 }
320
321 /* transfer_pid is an optimization of attach_pid(new), detach_pid(old) */
322 void transfer_pid(struct task_struct *old, struct task_struct *new,
323 enum pid_type type)
324 {
325 if (type == PIDTYPE_PID)
326 new->thread_pid = old->thread_pid;
327 hlist_replace_rcu(&old->pid_links[type], &new->pid_links[type]);
328 }
329
330 struct task_struct *pid_task(struct pid *pid, enum pid_type type)
331 {
332 struct task_struct *result = NULL;
333 if (pid) {
334 struct hlist_node *first;
335 first = rcu_dereference_check(hlist_first_rcu(&pid->tasks[type]),
336 lockdep_tasklist_lock_is_held());
337 if (first)
338 result = hlist_entry(first, struct task_struct, pid_links[(type)]);
339 }
340 return result;
341 }
342 EXPORT_SYMBOL(pid_task);
343
344 /*
345 * Must be called under rcu_read_lock().
346 */
347 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
348 {
349 RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
350 "find_task_by_pid_ns() needs rcu_read_lock() protection");
351 return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
352 }
353
354 struct task_struct *find_task_by_vpid(pid_t vnr)
355 {
356 return find_task_by_pid_ns(vnr, task_active_pid_ns(current));
357 }
358
359 struct task_struct *find_get_task_by_vpid(pid_t nr)
360 {
361 struct task_struct *task;
362
363 rcu_read_lock();
364 task = find_task_by_vpid(nr);
365 if (task)
366 get_task_struct(task);
367 rcu_read_unlock();
368
369 return task;
370 }
371
372 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
373 {
374 struct pid *pid;
375 rcu_read_lock();
376 pid = get_pid(rcu_dereference(*task_pid_ptr(task, type)));
377 rcu_read_unlock();
378 return pid;
379 }
380 EXPORT_SYMBOL_GPL(get_task_pid);
381
382 struct task_struct *get_pid_task(struct pid *pid, enum pid_type type)
383 {
384 struct task_struct *result;
385 rcu_read_lock();
386 result = pid_task(pid, type);
387 if (result)
388 get_task_struct(result);
389 rcu_read_unlock();
390 return result;
391 }
392 EXPORT_SYMBOL_GPL(get_pid_task);
393
394 struct pid *find_get_pid(pid_t nr)
395 {
396 struct pid *pid;
397
398 rcu_read_lock();
399 pid = get_pid(find_vpid(nr));
400 rcu_read_unlock();
401
402 return pid;
403 }
404 EXPORT_SYMBOL_GPL(find_get_pid);
405
406 pid_t pid_nr_ns(struct pid *pid, struct pid_namespace *ns)
407 {
408 struct upid *upid;
409 pid_t nr = 0;
410
411 if (pid && ns->level <= pid->level) {
412 upid = &pid->numbers[ns->level];
413 if (upid->ns == ns)
414 nr = upid->nr;
415 }
416 return nr;
417 }
418 EXPORT_SYMBOL_GPL(pid_nr_ns);
419
420 pid_t pid_vnr(struct pid *pid)
421 {
422 return pid_nr_ns(pid, task_active_pid_ns(current));
423 }
424 EXPORT_SYMBOL_GPL(pid_vnr);
425
426 pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
427 struct pid_namespace *ns)
428 {
429 pid_t nr = 0;
430
431 rcu_read_lock();
432 if (!ns)
433 ns = task_active_pid_ns(current);
434 if (likely(pid_alive(task)))
435 nr = pid_nr_ns(rcu_dereference(*task_pid_ptr(task, type)), ns);
436 rcu_read_unlock();
437
438 return nr;
439 }
440 EXPORT_SYMBOL(__task_pid_nr_ns);
441
442 struct pid_namespace *task_active_pid_ns(struct task_struct *tsk)
443 {
444 return ns_of_pid(task_pid(tsk));
445 }
446 EXPORT_SYMBOL_GPL(task_active_pid_ns);
447
448 /*
449 * Used by proc to find the first pid that is greater than or equal to nr.
450 *
451 * If there is a pid at nr this function is exactly the same as find_pid_ns.
452 */
453 struct pid *find_ge_pid(int nr, struct pid_namespace *ns)
454 {
455 return idr_get_next(&ns->idr, &nr);
456 }
457
458 /**
459 * pidfd_create() - Create a new pid file descriptor.
460 *
461 * @pid: struct pid that the pidfd will reference
462 *
463 * This creates a new pid file descriptor with the O_CLOEXEC flag set.
464 *
465 * Note, that this function can only be called after the fd table has
466 * been unshared to avoid leaking the pidfd to the new process.
467 *
468 * Return: On success, a cloexec pidfd is returned.
469 * On error, a negative errno number will be returned.
470 */
471 static int pidfd_create(struct pid *pid)
472 {
473 int fd;
474
475 fd = anon_inode_getfd("[pidfd]", &pidfd_fops, get_pid(pid),
476 O_RDWR | O_CLOEXEC);
477 if (fd < 0)
478 put_pid(pid);
479
480 return fd;
481 }
482
483 /**
484 * pidfd_open() - Open new pid file descriptor.
485 *
486 * @pid: pid for which to retrieve a pidfd
487 * @flags: flags to pass
488 *
489 * This creates a new pid file descriptor with the O_CLOEXEC flag set for
490 * the process identified by @pid. Currently, the process identified by
491 * @pid must be a thread-group leader. This restriction currently exists
492 * for all aspects of pidfds including pidfd creation (CLONE_PIDFD cannot
493 * be used with CLONE_THREAD) and pidfd polling (only supports thread group
494 * leaders).
495 *
496 * Return: On success, a cloexec pidfd is returned.
497 * On error, a negative errno number will be returned.
498 */
499 SYSCALL_DEFINE2(pidfd_open, pid_t, pid, unsigned int, flags)
500 {
501 int fd, ret;
502 struct pid *p;
503
504 if (flags)
505 return -EINVAL;
506
507 if (pid <= 0)
508 return -EINVAL;
509
510 p = find_get_pid(pid);
511 if (!p)
512 return -ESRCH;
513
514 ret = 0;
515 rcu_read_lock();
516 if (!pid_task(p, PIDTYPE_TGID))
517 ret = -EINVAL;
518 rcu_read_unlock();
519
520 fd = ret ?: pidfd_create(p);
521 put_pid(p);
522 return fd;
523 }
524
525 void __init pid_idr_init(void)
526 {
527 /* Verify no one has done anything silly: */
528 BUILD_BUG_ON(PID_MAX_LIMIT >= PIDNS_ADDING);
529
530 /* bump default and minimum pid_max based on number of cpus */
531 pid_max = min(pid_max_max, max_t(int, pid_max,
532 PIDS_PER_CPU_DEFAULT * num_possible_cpus()));
533 pid_max_min = max_t(int, pid_max_min,
534 PIDS_PER_CPU_MIN * num_possible_cpus());
535 pr_info("pid_max: default: %u minimum: %u\n", pid_max, pid_max_min);
536
537 idr_init(&init_pid_ns.idr);
538
539 init_pid_ns.pid_cachep = KMEM_CACHE(pid,
540 SLAB_HWCACHE_ALIGN | SLAB_PANIC | SLAB_ACCOUNT);
541 }