1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (C) 1991, 1992 Linus Torvalds
8 #include <linux/export.h>
10 #include <linux/mm_inline.h>
11 #include <linux/utsname.h>
12 #include <linux/mman.h>
13 #include <linux/reboot.h>
14 #include <linux/prctl.h>
15 #include <linux/highuid.h>
17 #include <linux/kmod.h>
18 #include <linux/ksm.h>
19 #include <linux/perf_event.h>
20 #include <linux/resource.h>
21 #include <linux/kernel.h>
22 #include <linux/workqueue.h>
23 #include <linux/capability.h>
24 #include <linux/device.h>
25 #include <linux/key.h>
26 #include <linux/times.h>
27 #include <linux/posix-timers.h>
28 #include <linux/security.h>
29 #include <linux/random.h>
30 #include <linux/suspend.h>
31 #include <linux/tty.h>
32 #include <linux/signal.h>
33 #include <linux/cn_proc.h>
34 #include <linux/getcpu.h>
35 #include <linux/task_io_accounting_ops.h>
36 #include <linux/seccomp.h>
37 #include <linux/cpu.h>
38 #include <linux/personality.h>
39 #include <linux/ptrace.h>
40 #include <linux/fs_struct.h>
41 #include <linux/file.h>
42 #include <linux/mount.h>
43 #include <linux/gfp.h>
44 #include <linux/syscore_ops.h>
45 #include <linux/version.h>
46 #include <linux/ctype.h>
47 #include <linux/syscall_user_dispatch.h>
49 #include <linux/compat.h>
50 #include <linux/syscalls.h>
51 #include <linux/kprobes.h>
52 #include <linux/user_namespace.h>
53 #include <linux/time_namespace.h>
54 #include <linux/binfmts.h>
56 #include <linux/sched.h>
57 #include <linux/sched/autogroup.h>
58 #include <linux/sched/loadavg.h>
59 #include <linux/sched/stat.h>
60 #include <linux/sched/mm.h>
61 #include <linux/sched/coredump.h>
62 #include <linux/sched/task.h>
63 #include <linux/sched/cputime.h>
64 #include <linux/rcupdate.h>
65 #include <linux/uidgid.h>
66 #include <linux/cred.h>
68 #include <linux/nospec.h>
70 #include <linux/kmsg_dump.h>
71 /* Move somewhere else to avoid recompiling? */
72 #include <generated/utsrelease.h>
74 #include <linux/uaccess.h>
76 #include <asm/unistd.h>
80 #ifndef SET_UNALIGN_CTL
81 # define SET_UNALIGN_CTL(a, b) (-EINVAL)
83 #ifndef GET_UNALIGN_CTL
84 # define GET_UNALIGN_CTL(a, b) (-EINVAL)
87 # define SET_FPEMU_CTL(a, b) (-EINVAL)
90 # define GET_FPEMU_CTL(a, b) (-EINVAL)
93 # define SET_FPEXC_CTL(a, b) (-EINVAL)
96 # define GET_FPEXC_CTL(a, b) (-EINVAL)
99 # define GET_ENDIAN(a, b) (-EINVAL)
102 # define SET_ENDIAN(a, b) (-EINVAL)
105 # define GET_TSC_CTL(a) (-EINVAL)
108 # define SET_TSC_CTL(a) (-EINVAL)
111 # define GET_FP_MODE(a) (-EINVAL)
114 # define SET_FP_MODE(a,b) (-EINVAL)
117 # define SVE_SET_VL(a) (-EINVAL)
120 # define SVE_GET_VL() (-EINVAL)
123 # define SME_SET_VL(a) (-EINVAL)
126 # define SME_GET_VL() (-EINVAL)
128 #ifndef PAC_RESET_KEYS
129 # define PAC_RESET_KEYS(a, b) (-EINVAL)
131 #ifndef PAC_SET_ENABLED_KEYS
132 # define PAC_SET_ENABLED_KEYS(a, b, c) (-EINVAL)
134 #ifndef PAC_GET_ENABLED_KEYS
135 # define PAC_GET_ENABLED_KEYS(a) (-EINVAL)
137 #ifndef SET_TAGGED_ADDR_CTRL
138 # define SET_TAGGED_ADDR_CTRL(a) (-EINVAL)
140 #ifndef GET_TAGGED_ADDR_CTRL
141 # define GET_TAGGED_ADDR_CTRL() (-EINVAL)
143 #ifndef RISCV_V_SET_CONTROL
144 # define RISCV_V_SET_CONTROL(a) (-EINVAL)
146 #ifndef RISCV_V_GET_CONTROL
147 # define RISCV_V_GET_CONTROL() (-EINVAL)
151 * this is where the system-wide overflow UID and GID are defined, for
152 * architectures that now have 32-bit UID/GID but didn't in the past
155 int overflowuid
= DEFAULT_OVERFLOWUID
;
156 int overflowgid
= DEFAULT_OVERFLOWGID
;
158 EXPORT_SYMBOL(overflowuid
);
159 EXPORT_SYMBOL(overflowgid
);
162 * the same as above, but for filesystems which can only store a 16-bit
163 * UID and GID. as such, this is needed on all architectures
166 int fs_overflowuid
= DEFAULT_FS_OVERFLOWUID
;
167 int fs_overflowgid
= DEFAULT_FS_OVERFLOWGID
;
169 EXPORT_SYMBOL(fs_overflowuid
);
170 EXPORT_SYMBOL(fs_overflowgid
);
173 * Returns true if current's euid is same as p's uid or euid,
174 * or has CAP_SYS_NICE to p's user_ns.
176 * Called with rcu_read_lock, creds are safe
178 static bool set_one_prio_perm(struct task_struct
*p
)
180 const struct cred
*cred
= current_cred(), *pcred
= __task_cred(p
);
182 if (uid_eq(pcred
->uid
, cred
->euid
) ||
183 uid_eq(pcred
->euid
, cred
->euid
))
185 if (ns_capable(pcred
->user_ns
, CAP_SYS_NICE
))
191 * set the priority of a task
192 * - the caller must hold the RCU read lock
194 static int set_one_prio(struct task_struct
*p
, int niceval
, int error
)
198 if (!set_one_prio_perm(p
)) {
202 if (niceval
< task_nice(p
) && !can_nice(p
, niceval
)) {
206 no_nice
= security_task_setnice(p
, niceval
);
213 set_user_nice(p
, niceval
);
218 SYSCALL_DEFINE3(setpriority
, int, which
, int, who
, int, niceval
)
220 struct task_struct
*g
, *p
;
221 struct user_struct
*user
;
222 const struct cred
*cred
= current_cred();
227 if (which
> PRIO_USER
|| which
< PRIO_PROCESS
)
230 /* normalize: avoid signed division (rounding problems) */
232 if (niceval
< MIN_NICE
)
234 if (niceval
> MAX_NICE
)
241 p
= find_task_by_vpid(who
);
245 error
= set_one_prio(p
, niceval
, error
);
249 pgrp
= find_vpid(who
);
251 pgrp
= task_pgrp(current
);
252 read_lock(&tasklist_lock
);
253 do_each_pid_thread(pgrp
, PIDTYPE_PGID
, p
) {
254 error
= set_one_prio(p
, niceval
, error
);
255 } while_each_pid_thread(pgrp
, PIDTYPE_PGID
, p
);
256 read_unlock(&tasklist_lock
);
259 uid
= make_kuid(cred
->user_ns
, who
);
263 else if (!uid_eq(uid
, cred
->uid
)) {
264 user
= find_user(uid
);
266 goto out_unlock
; /* No processes for this user */
268 for_each_process_thread(g
, p
) {
269 if (uid_eq(task_uid(p
), uid
) && task_pid_vnr(p
))
270 error
= set_one_prio(p
, niceval
, error
);
272 if (!uid_eq(uid
, cred
->uid
))
273 free_uid(user
); /* For find_user() */
283 * Ugh. To avoid negative return values, "getpriority()" will
284 * not return the normal nice-value, but a negated value that
285 * has been offset by 20 (ie it returns 40..1 instead of -20..19)
286 * to stay compatible.
288 SYSCALL_DEFINE2(getpriority
, int, which
, int, who
)
290 struct task_struct
*g
, *p
;
291 struct user_struct
*user
;
292 const struct cred
*cred
= current_cred();
293 long niceval
, retval
= -ESRCH
;
297 if (which
> PRIO_USER
|| which
< PRIO_PROCESS
)
304 p
= find_task_by_vpid(who
);
308 niceval
= nice_to_rlimit(task_nice(p
));
309 if (niceval
> retval
)
315 pgrp
= find_vpid(who
);
317 pgrp
= task_pgrp(current
);
318 read_lock(&tasklist_lock
);
319 do_each_pid_thread(pgrp
, PIDTYPE_PGID
, p
) {
320 niceval
= nice_to_rlimit(task_nice(p
));
321 if (niceval
> retval
)
323 } while_each_pid_thread(pgrp
, PIDTYPE_PGID
, p
);
324 read_unlock(&tasklist_lock
);
327 uid
= make_kuid(cred
->user_ns
, who
);
331 else if (!uid_eq(uid
, cred
->uid
)) {
332 user
= find_user(uid
);
334 goto out_unlock
; /* No processes for this user */
336 for_each_process_thread(g
, p
) {
337 if (uid_eq(task_uid(p
), uid
) && task_pid_vnr(p
)) {
338 niceval
= nice_to_rlimit(task_nice(p
));
339 if (niceval
> retval
)
343 if (!uid_eq(uid
, cred
->uid
))
344 free_uid(user
); /* for find_user() */
354 * Unprivileged users may change the real gid to the effective gid
355 * or vice versa. (BSD-style)
357 * If you set the real gid at all, or set the effective gid to a value not
358 * equal to the real gid, then the saved gid is set to the new effective gid.
360 * This makes it possible for a setgid program to completely drop its
361 * privileges, which is often a useful assertion to make when you are doing
362 * a security audit over a program.
364 * The general idea is that a program which uses just setregid() will be
365 * 100% compatible with BSD. A program which uses just setgid() will be
366 * 100% compatible with POSIX with saved IDs.
368 * SMP: There are not races, the GIDs are checked only by filesystem
369 * operations (as far as semantic preservation is concerned).
371 #ifdef CONFIG_MULTIUSER
372 long __sys_setregid(gid_t rgid
, gid_t egid
)
374 struct user_namespace
*ns
= current_user_ns();
375 const struct cred
*old
;
380 krgid
= make_kgid(ns
, rgid
);
381 kegid
= make_kgid(ns
, egid
);
383 if ((rgid
!= (gid_t
) -1) && !gid_valid(krgid
))
385 if ((egid
!= (gid_t
) -1) && !gid_valid(kegid
))
388 new = prepare_creds();
391 old
= current_cred();
394 if (rgid
!= (gid_t
) -1) {
395 if (gid_eq(old
->gid
, krgid
) ||
396 gid_eq(old
->egid
, krgid
) ||
397 ns_capable_setid(old
->user_ns
, CAP_SETGID
))
402 if (egid
!= (gid_t
) -1) {
403 if (gid_eq(old
->gid
, kegid
) ||
404 gid_eq(old
->egid
, kegid
) ||
405 gid_eq(old
->sgid
, kegid
) ||
406 ns_capable_setid(old
->user_ns
, CAP_SETGID
))
412 if (rgid
!= (gid_t
) -1 ||
413 (egid
!= (gid_t
) -1 && !gid_eq(kegid
, old
->gid
)))
414 new->sgid
= new->egid
;
415 new->fsgid
= new->egid
;
417 retval
= security_task_fix_setgid(new, old
, LSM_SETID_RE
);
421 return commit_creds(new);
428 SYSCALL_DEFINE2(setregid
, gid_t
, rgid
, gid_t
, egid
)
430 return __sys_setregid(rgid
, egid
);
434 * setgid() is implemented like SysV w/ SAVED_IDS
436 * SMP: Same implicit races as above.
438 long __sys_setgid(gid_t gid
)
440 struct user_namespace
*ns
= current_user_ns();
441 const struct cred
*old
;
446 kgid
= make_kgid(ns
, gid
);
447 if (!gid_valid(kgid
))
450 new = prepare_creds();
453 old
= current_cred();
456 if (ns_capable_setid(old
->user_ns
, CAP_SETGID
))
457 new->gid
= new->egid
= new->sgid
= new->fsgid
= kgid
;
458 else if (gid_eq(kgid
, old
->gid
) || gid_eq(kgid
, old
->sgid
))
459 new->egid
= new->fsgid
= kgid
;
463 retval
= security_task_fix_setgid(new, old
, LSM_SETID_ID
);
467 return commit_creds(new);
474 SYSCALL_DEFINE1(setgid
, gid_t
, gid
)
476 return __sys_setgid(gid
);
480 * change the user struct in a credentials set to match the new UID
482 static int set_user(struct cred
*new)
484 struct user_struct
*new_user
;
486 new_user
= alloc_uid(new->uid
);
491 new->user
= new_user
;
495 static void flag_nproc_exceeded(struct cred
*new)
497 if (new->ucounts
== current_ucounts())
501 * We don't fail in case of NPROC limit excess here because too many
502 * poorly written programs don't check set*uid() return code, assuming
503 * it never fails if called by root. We may still enforce NPROC limit
504 * for programs doing set*uid()+execve() by harmlessly deferring the
505 * failure to the execve() stage.
507 if (is_rlimit_overlimit(new->ucounts
, UCOUNT_RLIMIT_NPROC
, rlimit(RLIMIT_NPROC
)) &&
508 new->user
!= INIT_USER
)
509 current
->flags
|= PF_NPROC_EXCEEDED
;
511 current
->flags
&= ~PF_NPROC_EXCEEDED
;
515 * Unprivileged users may change the real uid to the effective uid
516 * or vice versa. (BSD-style)
518 * If you set the real uid at all, or set the effective uid to a value not
519 * equal to the real uid, then the saved uid is set to the new effective uid.
521 * This makes it possible for a setuid program to completely drop its
522 * privileges, which is often a useful assertion to make when you are doing
523 * a security audit over a program.
525 * The general idea is that a program which uses just setreuid() will be
526 * 100% compatible with BSD. A program which uses just setuid() will be
527 * 100% compatible with POSIX with saved IDs.
529 long __sys_setreuid(uid_t ruid
, uid_t euid
)
531 struct user_namespace
*ns
= current_user_ns();
532 const struct cred
*old
;
537 kruid
= make_kuid(ns
, ruid
);
538 keuid
= make_kuid(ns
, euid
);
540 if ((ruid
!= (uid_t
) -1) && !uid_valid(kruid
))
542 if ((euid
!= (uid_t
) -1) && !uid_valid(keuid
))
545 new = prepare_creds();
548 old
= current_cred();
551 if (ruid
!= (uid_t
) -1) {
553 if (!uid_eq(old
->uid
, kruid
) &&
554 !uid_eq(old
->euid
, kruid
) &&
555 !ns_capable_setid(old
->user_ns
, CAP_SETUID
))
559 if (euid
!= (uid_t
) -1) {
561 if (!uid_eq(old
->uid
, keuid
) &&
562 !uid_eq(old
->euid
, keuid
) &&
563 !uid_eq(old
->suid
, keuid
) &&
564 !ns_capable_setid(old
->user_ns
, CAP_SETUID
))
568 if (!uid_eq(new->uid
, old
->uid
)) {
569 retval
= set_user(new);
573 if (ruid
!= (uid_t
) -1 ||
574 (euid
!= (uid_t
) -1 && !uid_eq(keuid
, old
->uid
)))
575 new->suid
= new->euid
;
576 new->fsuid
= new->euid
;
578 retval
= security_task_fix_setuid(new, old
, LSM_SETID_RE
);
582 retval
= set_cred_ucounts(new);
586 flag_nproc_exceeded(new);
587 return commit_creds(new);
594 SYSCALL_DEFINE2(setreuid
, uid_t
, ruid
, uid_t
, euid
)
596 return __sys_setreuid(ruid
, euid
);
600 * setuid() is implemented like SysV with SAVED_IDS
602 * Note that SAVED_ID's is deficient in that a setuid root program
603 * like sendmail, for example, cannot set its uid to be a normal
604 * user and then switch back, because if you're root, setuid() sets
605 * the saved uid too. If you don't like this, blame the bright people
606 * in the POSIX committee and/or USG. Note that the BSD-style setreuid()
607 * will allow a root program to temporarily drop privileges and be able to
608 * regain them by swapping the real and effective uid.
610 long __sys_setuid(uid_t uid
)
612 struct user_namespace
*ns
= current_user_ns();
613 const struct cred
*old
;
618 kuid
= make_kuid(ns
, uid
);
619 if (!uid_valid(kuid
))
622 new = prepare_creds();
625 old
= current_cred();
628 if (ns_capable_setid(old
->user_ns
, CAP_SETUID
)) {
629 new->suid
= new->uid
= kuid
;
630 if (!uid_eq(kuid
, old
->uid
)) {
631 retval
= set_user(new);
635 } else if (!uid_eq(kuid
, old
->uid
) && !uid_eq(kuid
, new->suid
)) {
639 new->fsuid
= new->euid
= kuid
;
641 retval
= security_task_fix_setuid(new, old
, LSM_SETID_ID
);
645 retval
= set_cred_ucounts(new);
649 flag_nproc_exceeded(new);
650 return commit_creds(new);
657 SYSCALL_DEFINE1(setuid
, uid_t
, uid
)
659 return __sys_setuid(uid
);
664 * This function implements a generic ability to update ruid, euid,
665 * and suid. This allows you to implement the 4.4 compatible seteuid().
667 long __sys_setresuid(uid_t ruid
, uid_t euid
, uid_t suid
)
669 struct user_namespace
*ns
= current_user_ns();
670 const struct cred
*old
;
673 kuid_t kruid
, keuid
, ksuid
;
674 bool ruid_new
, euid_new
, suid_new
;
676 kruid
= make_kuid(ns
, ruid
);
677 keuid
= make_kuid(ns
, euid
);
678 ksuid
= make_kuid(ns
, suid
);
680 if ((ruid
!= (uid_t
) -1) && !uid_valid(kruid
))
683 if ((euid
!= (uid_t
) -1) && !uid_valid(keuid
))
686 if ((suid
!= (uid_t
) -1) && !uid_valid(ksuid
))
689 old
= current_cred();
691 /* check for no-op */
692 if ((ruid
== (uid_t
) -1 || uid_eq(kruid
, old
->uid
)) &&
693 (euid
== (uid_t
) -1 || (uid_eq(keuid
, old
->euid
) &&
694 uid_eq(keuid
, old
->fsuid
))) &&
695 (suid
== (uid_t
) -1 || uid_eq(ksuid
, old
->suid
)))
698 ruid_new
= ruid
!= (uid_t
) -1 && !uid_eq(kruid
, old
->uid
) &&
699 !uid_eq(kruid
, old
->euid
) && !uid_eq(kruid
, old
->suid
);
700 euid_new
= euid
!= (uid_t
) -1 && !uid_eq(keuid
, old
->uid
) &&
701 !uid_eq(keuid
, old
->euid
) && !uid_eq(keuid
, old
->suid
);
702 suid_new
= suid
!= (uid_t
) -1 && !uid_eq(ksuid
, old
->uid
) &&
703 !uid_eq(ksuid
, old
->euid
) && !uid_eq(ksuid
, old
->suid
);
704 if ((ruid_new
|| euid_new
|| suid_new
) &&
705 !ns_capable_setid(old
->user_ns
, CAP_SETUID
))
708 new = prepare_creds();
712 if (ruid
!= (uid_t
) -1) {
714 if (!uid_eq(kruid
, old
->uid
)) {
715 retval
= set_user(new);
720 if (euid
!= (uid_t
) -1)
722 if (suid
!= (uid_t
) -1)
724 new->fsuid
= new->euid
;
726 retval
= security_task_fix_setuid(new, old
, LSM_SETID_RES
);
730 retval
= set_cred_ucounts(new);
734 flag_nproc_exceeded(new);
735 return commit_creds(new);
742 SYSCALL_DEFINE3(setresuid
, uid_t
, ruid
, uid_t
, euid
, uid_t
, suid
)
744 return __sys_setresuid(ruid
, euid
, suid
);
747 SYSCALL_DEFINE3(getresuid
, uid_t __user
*, ruidp
, uid_t __user
*, euidp
, uid_t __user
*, suidp
)
749 const struct cred
*cred
= current_cred();
751 uid_t ruid
, euid
, suid
;
753 ruid
= from_kuid_munged(cred
->user_ns
, cred
->uid
);
754 euid
= from_kuid_munged(cred
->user_ns
, cred
->euid
);
755 suid
= from_kuid_munged(cred
->user_ns
, cred
->suid
);
757 retval
= put_user(ruid
, ruidp
);
759 retval
= put_user(euid
, euidp
);
761 return put_user(suid
, suidp
);
767 * Same as above, but for rgid, egid, sgid.
769 long __sys_setresgid(gid_t rgid
, gid_t egid
, gid_t sgid
)
771 struct user_namespace
*ns
= current_user_ns();
772 const struct cred
*old
;
775 kgid_t krgid
, kegid
, ksgid
;
776 bool rgid_new
, egid_new
, sgid_new
;
778 krgid
= make_kgid(ns
, rgid
);
779 kegid
= make_kgid(ns
, egid
);
780 ksgid
= make_kgid(ns
, sgid
);
782 if ((rgid
!= (gid_t
) -1) && !gid_valid(krgid
))
784 if ((egid
!= (gid_t
) -1) && !gid_valid(kegid
))
786 if ((sgid
!= (gid_t
) -1) && !gid_valid(ksgid
))
789 old
= current_cred();
791 /* check for no-op */
792 if ((rgid
== (gid_t
) -1 || gid_eq(krgid
, old
->gid
)) &&
793 (egid
== (gid_t
) -1 || (gid_eq(kegid
, old
->egid
) &&
794 gid_eq(kegid
, old
->fsgid
))) &&
795 (sgid
== (gid_t
) -1 || gid_eq(ksgid
, old
->sgid
)))
798 rgid_new
= rgid
!= (gid_t
) -1 && !gid_eq(krgid
, old
->gid
) &&
799 !gid_eq(krgid
, old
->egid
) && !gid_eq(krgid
, old
->sgid
);
800 egid_new
= egid
!= (gid_t
) -1 && !gid_eq(kegid
, old
->gid
) &&
801 !gid_eq(kegid
, old
->egid
) && !gid_eq(kegid
, old
->sgid
);
802 sgid_new
= sgid
!= (gid_t
) -1 && !gid_eq(ksgid
, old
->gid
) &&
803 !gid_eq(ksgid
, old
->egid
) && !gid_eq(ksgid
, old
->sgid
);
804 if ((rgid_new
|| egid_new
|| sgid_new
) &&
805 !ns_capable_setid(old
->user_ns
, CAP_SETGID
))
808 new = prepare_creds();
812 if (rgid
!= (gid_t
) -1)
814 if (egid
!= (gid_t
) -1)
816 if (sgid
!= (gid_t
) -1)
818 new->fsgid
= new->egid
;
820 retval
= security_task_fix_setgid(new, old
, LSM_SETID_RES
);
824 return commit_creds(new);
831 SYSCALL_DEFINE3(setresgid
, gid_t
, rgid
, gid_t
, egid
, gid_t
, sgid
)
833 return __sys_setresgid(rgid
, egid
, sgid
);
836 SYSCALL_DEFINE3(getresgid
, gid_t __user
*, rgidp
, gid_t __user
*, egidp
, gid_t __user
*, sgidp
)
838 const struct cred
*cred
= current_cred();
840 gid_t rgid
, egid
, sgid
;
842 rgid
= from_kgid_munged(cred
->user_ns
, cred
->gid
);
843 egid
= from_kgid_munged(cred
->user_ns
, cred
->egid
);
844 sgid
= from_kgid_munged(cred
->user_ns
, cred
->sgid
);
846 retval
= put_user(rgid
, rgidp
);
848 retval
= put_user(egid
, egidp
);
850 retval
= put_user(sgid
, sgidp
);
858 * "setfsuid()" sets the fsuid - the uid used for filesystem checks. This
859 * is used for "access()" and for the NFS daemon (letting nfsd stay at
860 * whatever uid it wants to). It normally shadows "euid", except when
861 * explicitly set by setfsuid() or for access..
863 long __sys_setfsuid(uid_t uid
)
865 const struct cred
*old
;
870 old
= current_cred();
871 old_fsuid
= from_kuid_munged(old
->user_ns
, old
->fsuid
);
873 kuid
= make_kuid(old
->user_ns
, uid
);
874 if (!uid_valid(kuid
))
877 new = prepare_creds();
881 if (uid_eq(kuid
, old
->uid
) || uid_eq(kuid
, old
->euid
) ||
882 uid_eq(kuid
, old
->suid
) || uid_eq(kuid
, old
->fsuid
) ||
883 ns_capable_setid(old
->user_ns
, CAP_SETUID
)) {
884 if (!uid_eq(kuid
, old
->fsuid
)) {
886 if (security_task_fix_setuid(new, old
, LSM_SETID_FS
) == 0)
899 SYSCALL_DEFINE1(setfsuid
, uid_t
, uid
)
901 return __sys_setfsuid(uid
);
905 * Samma på svenska..
907 long __sys_setfsgid(gid_t gid
)
909 const struct cred
*old
;
914 old
= current_cred();
915 old_fsgid
= from_kgid_munged(old
->user_ns
, old
->fsgid
);
917 kgid
= make_kgid(old
->user_ns
, gid
);
918 if (!gid_valid(kgid
))
921 new = prepare_creds();
925 if (gid_eq(kgid
, old
->gid
) || gid_eq(kgid
, old
->egid
) ||
926 gid_eq(kgid
, old
->sgid
) || gid_eq(kgid
, old
->fsgid
) ||
927 ns_capable_setid(old
->user_ns
, CAP_SETGID
)) {
928 if (!gid_eq(kgid
, old
->fsgid
)) {
930 if (security_task_fix_setgid(new,old
,LSM_SETID_FS
) == 0)
943 SYSCALL_DEFINE1(setfsgid
, gid_t
, gid
)
945 return __sys_setfsgid(gid
);
947 #endif /* CONFIG_MULTIUSER */
950 * sys_getpid - return the thread group id of the current process
952 * Note, despite the name, this returns the tgid not the pid. The tgid and
953 * the pid are identical unless CLONE_THREAD was specified on clone() in
954 * which case the tgid is the same in all threads of the same group.
956 * This is SMP safe as current->tgid does not change.
958 SYSCALL_DEFINE0(getpid
)
960 return task_tgid_vnr(current
);
963 /* Thread ID - the internal kernel "pid" */
964 SYSCALL_DEFINE0(gettid
)
966 return task_pid_vnr(current
);
970 * Accessing ->real_parent is not SMP-safe, it could
971 * change from under us. However, we can use a stale
972 * value of ->real_parent under rcu_read_lock(), see
973 * release_task()->call_rcu(delayed_put_task_struct).
975 SYSCALL_DEFINE0(getppid
)
980 pid
= task_tgid_vnr(rcu_dereference(current
->real_parent
));
986 SYSCALL_DEFINE0(getuid
)
988 /* Only we change this so SMP safe */
989 return from_kuid_munged(current_user_ns(), current_uid());
992 SYSCALL_DEFINE0(geteuid
)
994 /* Only we change this so SMP safe */
995 return from_kuid_munged(current_user_ns(), current_euid());
998 SYSCALL_DEFINE0(getgid
)
1000 /* Only we change this so SMP safe */
1001 return from_kgid_munged(current_user_ns(), current_gid());
1004 SYSCALL_DEFINE0(getegid
)
1006 /* Only we change this so SMP safe */
1007 return from_kgid_munged(current_user_ns(), current_egid());
1010 static void do_sys_times(struct tms
*tms
)
1012 u64 tgutime
, tgstime
, cutime
, cstime
;
1014 thread_group_cputime_adjusted(current
, &tgutime
, &tgstime
);
1015 cutime
= current
->signal
->cutime
;
1016 cstime
= current
->signal
->cstime
;
1017 tms
->tms_utime
= nsec_to_clock_t(tgutime
);
1018 tms
->tms_stime
= nsec_to_clock_t(tgstime
);
1019 tms
->tms_cutime
= nsec_to_clock_t(cutime
);
1020 tms
->tms_cstime
= nsec_to_clock_t(cstime
);
1023 SYSCALL_DEFINE1(times
, struct tms __user
*, tbuf
)
1029 if (copy_to_user(tbuf
, &tmp
, sizeof(struct tms
)))
1032 force_successful_syscall_return();
1033 return (long) jiffies_64_to_clock_t(get_jiffies_64());
1036 #ifdef CONFIG_COMPAT
1037 static compat_clock_t
clock_t_to_compat_clock_t(clock_t x
)
1039 return compat_jiffies_to_clock_t(clock_t_to_jiffies(x
));
1042 COMPAT_SYSCALL_DEFINE1(times
, struct compat_tms __user
*, tbuf
)
1046 struct compat_tms tmp
;
1049 /* Convert our struct tms to the compat version. */
1050 tmp
.tms_utime
= clock_t_to_compat_clock_t(tms
.tms_utime
);
1051 tmp
.tms_stime
= clock_t_to_compat_clock_t(tms
.tms_stime
);
1052 tmp
.tms_cutime
= clock_t_to_compat_clock_t(tms
.tms_cutime
);
1053 tmp
.tms_cstime
= clock_t_to_compat_clock_t(tms
.tms_cstime
);
1054 if (copy_to_user(tbuf
, &tmp
, sizeof(tmp
)))
1057 force_successful_syscall_return();
1058 return compat_jiffies_to_clock_t(jiffies
);
1063 * This needs some heavy checking ...
1064 * I just haven't the stomach for it. I also don't fully
1065 * understand sessions/pgrp etc. Let somebody who does explain it.
1067 * OK, I think I have the protection semantics right.... this is really
1068 * only important on a multi-user system anyway, to make sure one user
1069 * can't send a signal to a process owned by another. -TYT, 12/12/91
1071 * !PF_FORKNOEXEC check to conform completely to POSIX.
1073 SYSCALL_DEFINE2(setpgid
, pid_t
, pid
, pid_t
, pgid
)
1075 struct task_struct
*p
;
1076 struct task_struct
*group_leader
= current
->group_leader
;
1081 pid
= task_pid_vnr(group_leader
);
1088 /* From this point forward we keep holding onto the tasklist lock
1089 * so that our parent does not change from under us. -DaveM
1091 write_lock_irq(&tasklist_lock
);
1094 p
= find_task_by_vpid(pid
);
1099 if (!thread_group_leader(p
))
1102 if (same_thread_group(p
->real_parent
, group_leader
)) {
1104 if (task_session(p
) != task_session(group_leader
))
1107 if (!(p
->flags
& PF_FORKNOEXEC
))
1111 if (p
!= group_leader
)
1116 if (p
->signal
->leader
)
1121 struct task_struct
*g
;
1123 pgrp
= find_vpid(pgid
);
1124 g
= pid_task(pgrp
, PIDTYPE_PGID
);
1125 if (!g
|| task_session(g
) != task_session(group_leader
))
1129 err
= security_task_setpgid(p
, pgid
);
1133 if (task_pgrp(p
) != pgrp
)
1134 change_pid(p
, PIDTYPE_PGID
, pgrp
);
1138 /* All paths lead to here, thus we are safe. -DaveM */
1139 write_unlock_irq(&tasklist_lock
);
1144 static int do_getpgid(pid_t pid
)
1146 struct task_struct
*p
;
1152 grp
= task_pgrp(current
);
1155 p
= find_task_by_vpid(pid
);
1162 retval
= security_task_getpgid(p
);
1166 retval
= pid_vnr(grp
);
1172 SYSCALL_DEFINE1(getpgid
, pid_t
, pid
)
1174 return do_getpgid(pid
);
1177 #ifdef __ARCH_WANT_SYS_GETPGRP
1179 SYSCALL_DEFINE0(getpgrp
)
1181 return do_getpgid(0);
1186 SYSCALL_DEFINE1(getsid
, pid_t
, pid
)
1188 struct task_struct
*p
;
1194 sid
= task_session(current
);
1197 p
= find_task_by_vpid(pid
);
1200 sid
= task_session(p
);
1204 retval
= security_task_getsid(p
);
1208 retval
= pid_vnr(sid
);
1214 static void set_special_pids(struct pid
*pid
)
1216 struct task_struct
*curr
= current
->group_leader
;
1218 if (task_session(curr
) != pid
)
1219 change_pid(curr
, PIDTYPE_SID
, pid
);
1221 if (task_pgrp(curr
) != pid
)
1222 change_pid(curr
, PIDTYPE_PGID
, pid
);
1225 int ksys_setsid(void)
1227 struct task_struct
*group_leader
= current
->group_leader
;
1228 struct pid
*sid
= task_pid(group_leader
);
1229 pid_t session
= pid_vnr(sid
);
1232 write_lock_irq(&tasklist_lock
);
1233 /* Fail if I am already a session leader */
1234 if (group_leader
->signal
->leader
)
1237 /* Fail if a process group id already exists that equals the
1238 * proposed session id.
1240 if (pid_task(sid
, PIDTYPE_PGID
))
1243 group_leader
->signal
->leader
= 1;
1244 set_special_pids(sid
);
1246 proc_clear_tty(group_leader
);
1250 write_unlock_irq(&tasklist_lock
);
1252 proc_sid_connector(group_leader
);
1253 sched_autogroup_create_attach(group_leader
);
1258 SYSCALL_DEFINE0(setsid
)
1260 return ksys_setsid();
1263 DECLARE_RWSEM(uts_sem
);
1265 #ifdef COMPAT_UTS_MACHINE
1266 #define override_architecture(name) \
1267 (personality(current->personality) == PER_LINUX32 && \
1268 copy_to_user(name->machine, COMPAT_UTS_MACHINE, \
1269 sizeof(COMPAT_UTS_MACHINE)))
1271 #define override_architecture(name) 0
1275 * Work around broken programs that cannot handle "Linux 3.0".
1276 * Instead we map 3.x to 2.6.40+x, so e.g. 3.0 would be 2.6.40
1277 * And we map 4.x and later versions to 2.6.60+x, so 4.0/5.0/6.0/... would be
1280 static int override_release(char __user
*release
, size_t len
)
1284 if (current
->personality
& UNAME26
) {
1285 const char *rest
= UTS_RELEASE
;
1286 char buf
[65] = { 0 };
1292 if (*rest
== '.' && ++ndots
>= 3)
1294 if (!isdigit(*rest
) && *rest
!= '.')
1298 v
= LINUX_VERSION_PATCHLEVEL
+ 60;
1299 copy
= clamp_t(size_t, len
, 1, sizeof(buf
));
1300 copy
= scnprintf(buf
, copy
, "2.6.%u%s", v
, rest
);
1301 ret
= copy_to_user(release
, buf
, copy
+ 1);
1306 SYSCALL_DEFINE1(newuname
, struct new_utsname __user
*, name
)
1308 struct new_utsname tmp
;
1310 down_read(&uts_sem
);
1311 memcpy(&tmp
, utsname(), sizeof(tmp
));
1313 if (copy_to_user(name
, &tmp
, sizeof(tmp
)))
1316 if (override_release(name
->release
, sizeof(name
->release
)))
1318 if (override_architecture(name
))
1323 #ifdef __ARCH_WANT_SYS_OLD_UNAME
1327 SYSCALL_DEFINE1(uname
, struct old_utsname __user
*, name
)
1329 struct old_utsname tmp
;
1334 down_read(&uts_sem
);
1335 memcpy(&tmp
, utsname(), sizeof(tmp
));
1337 if (copy_to_user(name
, &tmp
, sizeof(tmp
)))
1340 if (override_release(name
->release
, sizeof(name
->release
)))
1342 if (override_architecture(name
))
1347 SYSCALL_DEFINE1(olduname
, struct oldold_utsname __user
*, name
)
1349 struct oldold_utsname tmp
;
1354 memset(&tmp
, 0, sizeof(tmp
));
1356 down_read(&uts_sem
);
1357 memcpy(&tmp
.sysname
, &utsname()->sysname
, __OLD_UTS_LEN
);
1358 memcpy(&tmp
.nodename
, &utsname()->nodename
, __OLD_UTS_LEN
);
1359 memcpy(&tmp
.release
, &utsname()->release
, __OLD_UTS_LEN
);
1360 memcpy(&tmp
.version
, &utsname()->version
, __OLD_UTS_LEN
);
1361 memcpy(&tmp
.machine
, &utsname()->machine
, __OLD_UTS_LEN
);
1363 if (copy_to_user(name
, &tmp
, sizeof(tmp
)))
1366 if (override_architecture(name
))
1368 if (override_release(name
->release
, sizeof(name
->release
)))
1374 SYSCALL_DEFINE2(sethostname
, char __user
*, name
, int, len
)
1377 char tmp
[__NEW_UTS_LEN
];
1379 if (!ns_capable(current
->nsproxy
->uts_ns
->user_ns
, CAP_SYS_ADMIN
))
1382 if (len
< 0 || len
> __NEW_UTS_LEN
)
1385 if (!copy_from_user(tmp
, name
, len
)) {
1386 struct new_utsname
*u
;
1388 add_device_randomness(tmp
, len
);
1389 down_write(&uts_sem
);
1391 memcpy(u
->nodename
, tmp
, len
);
1392 memset(u
->nodename
+ len
, 0, sizeof(u
->nodename
) - len
);
1394 uts_proc_notify(UTS_PROC_HOSTNAME
);
1400 #ifdef __ARCH_WANT_SYS_GETHOSTNAME
1402 SYSCALL_DEFINE2(gethostname
, char __user
*, name
, int, len
)
1405 struct new_utsname
*u
;
1406 char tmp
[__NEW_UTS_LEN
+ 1];
1410 down_read(&uts_sem
);
1412 i
= 1 + strlen(u
->nodename
);
1415 memcpy(tmp
, u
->nodename
, i
);
1417 if (copy_to_user(name
, tmp
, i
))
1425 * Only setdomainname; getdomainname can be implemented by calling
1428 SYSCALL_DEFINE2(setdomainname
, char __user
*, name
, int, len
)
1431 char tmp
[__NEW_UTS_LEN
];
1433 if (!ns_capable(current
->nsproxy
->uts_ns
->user_ns
, CAP_SYS_ADMIN
))
1435 if (len
< 0 || len
> __NEW_UTS_LEN
)
1439 if (!copy_from_user(tmp
, name
, len
)) {
1440 struct new_utsname
*u
;
1442 add_device_randomness(tmp
, len
);
1443 down_write(&uts_sem
);
1445 memcpy(u
->domainname
, tmp
, len
);
1446 memset(u
->domainname
+ len
, 0, sizeof(u
->domainname
) - len
);
1448 uts_proc_notify(UTS_PROC_DOMAINNAME
);
1454 /* make sure you are allowed to change @tsk limits before calling this */
1455 static int do_prlimit(struct task_struct
*tsk
, unsigned int resource
,
1456 struct rlimit
*new_rlim
, struct rlimit
*old_rlim
)
1458 struct rlimit
*rlim
;
1461 if (resource
>= RLIM_NLIMITS
)
1463 resource
= array_index_nospec(resource
, RLIM_NLIMITS
);
1466 if (new_rlim
->rlim_cur
> new_rlim
->rlim_max
)
1468 if (resource
== RLIMIT_NOFILE
&&
1469 new_rlim
->rlim_max
> sysctl_nr_open
)
1473 /* Holding a refcount on tsk protects tsk->signal from disappearing. */
1474 rlim
= tsk
->signal
->rlim
+ resource
;
1475 task_lock(tsk
->group_leader
);
1478 * Keep the capable check against init_user_ns until cgroups can
1479 * contain all limits.
1481 if (new_rlim
->rlim_max
> rlim
->rlim_max
&&
1482 !capable(CAP_SYS_RESOURCE
))
1485 retval
= security_task_setrlimit(tsk
, resource
, new_rlim
);
1493 task_unlock(tsk
->group_leader
);
1496 * RLIMIT_CPU handling. Arm the posix CPU timer if the limit is not
1497 * infinite. In case of RLIM_INFINITY the posix CPU timer code
1498 * ignores the rlimit.
1500 if (!retval
&& new_rlim
&& resource
== RLIMIT_CPU
&&
1501 new_rlim
->rlim_cur
!= RLIM_INFINITY
&&
1502 IS_ENABLED(CONFIG_POSIX_TIMERS
)) {
1504 * update_rlimit_cpu can fail if the task is exiting, but there
1505 * may be other tasks in the thread group that are not exiting,
1506 * and they need their cpu timers adjusted.
1508 * The group_leader is the last task to be released, so if we
1509 * cannot update_rlimit_cpu on it, then the entire process is
1510 * exiting and we do not need to update at all.
1512 update_rlimit_cpu(tsk
->group_leader
, new_rlim
->rlim_cur
);
1518 SYSCALL_DEFINE2(getrlimit
, unsigned int, resource
, struct rlimit __user
*, rlim
)
1520 struct rlimit value
;
1523 ret
= do_prlimit(current
, resource
, NULL
, &value
);
1525 ret
= copy_to_user(rlim
, &value
, sizeof(*rlim
)) ? -EFAULT
: 0;
1530 #ifdef CONFIG_COMPAT
1532 COMPAT_SYSCALL_DEFINE2(setrlimit
, unsigned int, resource
,
1533 struct compat_rlimit __user
*, rlim
)
1536 struct compat_rlimit r32
;
1538 if (copy_from_user(&r32
, rlim
, sizeof(struct compat_rlimit
)))
1541 if (r32
.rlim_cur
== COMPAT_RLIM_INFINITY
)
1542 r
.rlim_cur
= RLIM_INFINITY
;
1544 r
.rlim_cur
= r32
.rlim_cur
;
1545 if (r32
.rlim_max
== COMPAT_RLIM_INFINITY
)
1546 r
.rlim_max
= RLIM_INFINITY
;
1548 r
.rlim_max
= r32
.rlim_max
;
1549 return do_prlimit(current
, resource
, &r
, NULL
);
1552 COMPAT_SYSCALL_DEFINE2(getrlimit
, unsigned int, resource
,
1553 struct compat_rlimit __user
*, rlim
)
1558 ret
= do_prlimit(current
, resource
, NULL
, &r
);
1560 struct compat_rlimit r32
;
1561 if (r
.rlim_cur
> COMPAT_RLIM_INFINITY
)
1562 r32
.rlim_cur
= COMPAT_RLIM_INFINITY
;
1564 r32
.rlim_cur
= r
.rlim_cur
;
1565 if (r
.rlim_max
> COMPAT_RLIM_INFINITY
)
1566 r32
.rlim_max
= COMPAT_RLIM_INFINITY
;
1568 r32
.rlim_max
= r
.rlim_max
;
1570 if (copy_to_user(rlim
, &r32
, sizeof(struct compat_rlimit
)))
1578 #ifdef __ARCH_WANT_SYS_OLD_GETRLIMIT
1581 * Back compatibility for getrlimit. Needed for some apps.
1583 SYSCALL_DEFINE2(old_getrlimit
, unsigned int, resource
,
1584 struct rlimit __user
*, rlim
)
1587 if (resource
>= RLIM_NLIMITS
)
1590 resource
= array_index_nospec(resource
, RLIM_NLIMITS
);
1591 task_lock(current
->group_leader
);
1592 x
= current
->signal
->rlim
[resource
];
1593 task_unlock(current
->group_leader
);
1594 if (x
.rlim_cur
> 0x7FFFFFFF)
1595 x
.rlim_cur
= 0x7FFFFFFF;
1596 if (x
.rlim_max
> 0x7FFFFFFF)
1597 x
.rlim_max
= 0x7FFFFFFF;
1598 return copy_to_user(rlim
, &x
, sizeof(x
)) ? -EFAULT
: 0;
1601 #ifdef CONFIG_COMPAT
1602 COMPAT_SYSCALL_DEFINE2(old_getrlimit
, unsigned int, resource
,
1603 struct compat_rlimit __user
*, rlim
)
1607 if (resource
>= RLIM_NLIMITS
)
1610 resource
= array_index_nospec(resource
, RLIM_NLIMITS
);
1611 task_lock(current
->group_leader
);
1612 r
= current
->signal
->rlim
[resource
];
1613 task_unlock(current
->group_leader
);
1614 if (r
.rlim_cur
> 0x7FFFFFFF)
1615 r
.rlim_cur
= 0x7FFFFFFF;
1616 if (r
.rlim_max
> 0x7FFFFFFF)
1617 r
.rlim_max
= 0x7FFFFFFF;
1619 if (put_user(r
.rlim_cur
, &rlim
->rlim_cur
) ||
1620 put_user(r
.rlim_max
, &rlim
->rlim_max
))
1628 static inline bool rlim64_is_infinity(__u64 rlim64
)
1630 #if BITS_PER_LONG < 64
1631 return rlim64
>= ULONG_MAX
;
1633 return rlim64
== RLIM64_INFINITY
;
1637 static void rlim_to_rlim64(const struct rlimit
*rlim
, struct rlimit64
*rlim64
)
1639 if (rlim
->rlim_cur
== RLIM_INFINITY
)
1640 rlim64
->rlim_cur
= RLIM64_INFINITY
;
1642 rlim64
->rlim_cur
= rlim
->rlim_cur
;
1643 if (rlim
->rlim_max
== RLIM_INFINITY
)
1644 rlim64
->rlim_max
= RLIM64_INFINITY
;
1646 rlim64
->rlim_max
= rlim
->rlim_max
;
1649 static void rlim64_to_rlim(const struct rlimit64
*rlim64
, struct rlimit
*rlim
)
1651 if (rlim64_is_infinity(rlim64
->rlim_cur
))
1652 rlim
->rlim_cur
= RLIM_INFINITY
;
1654 rlim
->rlim_cur
= (unsigned long)rlim64
->rlim_cur
;
1655 if (rlim64_is_infinity(rlim64
->rlim_max
))
1656 rlim
->rlim_max
= RLIM_INFINITY
;
1658 rlim
->rlim_max
= (unsigned long)rlim64
->rlim_max
;
1661 /* rcu lock must be held */
1662 static int check_prlimit_permission(struct task_struct
*task
,
1665 const struct cred
*cred
= current_cred(), *tcred
;
1668 if (current
== task
)
1671 tcred
= __task_cred(task
);
1672 id_match
= (uid_eq(cred
->uid
, tcred
->euid
) &&
1673 uid_eq(cred
->uid
, tcred
->suid
) &&
1674 uid_eq(cred
->uid
, tcred
->uid
) &&
1675 gid_eq(cred
->gid
, tcred
->egid
) &&
1676 gid_eq(cred
->gid
, tcred
->sgid
) &&
1677 gid_eq(cred
->gid
, tcred
->gid
));
1678 if (!id_match
&& !ns_capable(tcred
->user_ns
, CAP_SYS_RESOURCE
))
1681 return security_task_prlimit(cred
, tcred
, flags
);
1684 SYSCALL_DEFINE4(prlimit64
, pid_t
, pid
, unsigned int, resource
,
1685 const struct rlimit64 __user
*, new_rlim
,
1686 struct rlimit64 __user
*, old_rlim
)
1688 struct rlimit64 old64
, new64
;
1689 struct rlimit old
, new;
1690 struct task_struct
*tsk
;
1691 unsigned int checkflags
= 0;
1695 checkflags
|= LSM_PRLIMIT_READ
;
1698 if (copy_from_user(&new64
, new_rlim
, sizeof(new64
)))
1700 rlim64_to_rlim(&new64
, &new);
1701 checkflags
|= LSM_PRLIMIT_WRITE
;
1705 tsk
= pid
? find_task_by_vpid(pid
) : current
;
1710 ret
= check_prlimit_permission(tsk
, checkflags
);
1715 get_task_struct(tsk
);
1718 ret
= do_prlimit(tsk
, resource
, new_rlim
? &new : NULL
,
1719 old_rlim
? &old
: NULL
);
1721 if (!ret
&& old_rlim
) {
1722 rlim_to_rlim64(&old
, &old64
);
1723 if (copy_to_user(old_rlim
, &old64
, sizeof(old64
)))
1727 put_task_struct(tsk
);
1731 SYSCALL_DEFINE2(setrlimit
, unsigned int, resource
, struct rlimit __user
*, rlim
)
1733 struct rlimit new_rlim
;
1735 if (copy_from_user(&new_rlim
, rlim
, sizeof(*rlim
)))
1737 return do_prlimit(current
, resource
, &new_rlim
, NULL
);
1741 * It would make sense to put struct rusage in the task_struct,
1742 * except that would make the task_struct be *really big*. After
1743 * task_struct gets moved into malloc'ed memory, it would
1744 * make sense to do this. It will make moving the rest of the information
1745 * a lot simpler! (Which we're not doing right now because we're not
1746 * measuring them yet).
1748 * When sampling multiple threads for RUSAGE_SELF, under SMP we might have
1749 * races with threads incrementing their own counters. But since word
1750 * reads are atomic, we either get new values or old values and we don't
1751 * care which for the sums. We always take the siglock to protect reading
1752 * the c* fields from p->signal from races with exit.c updating those
1753 * fields when reaping, so a sample either gets all the additions of a
1754 * given child after it's reaped, or none so this sample is before reaping.
1757 * We need to take the siglock for CHILDEREN, SELF and BOTH
1758 * for the cases current multithreaded, non-current single threaded
1759 * non-current multithreaded. Thread traversal is now safe with
1761 * Strictly speaking, we donot need to take the siglock if we are current and
1762 * single threaded, as no one else can take our signal_struct away, no one
1763 * else can reap the children to update signal->c* counters, and no one else
1764 * can race with the signal-> fields. If we do not take any lock, the
1765 * signal-> fields could be read out of order while another thread was just
1766 * exiting. So we should place a read memory barrier when we avoid the lock.
1767 * On the writer side, write memory barrier is implied in __exit_signal
1768 * as __exit_signal releases the siglock spinlock after updating the signal->
1769 * fields. But we don't do this yet to keep things simple.
1773 static void accumulate_thread_rusage(struct task_struct
*t
, struct rusage
*r
)
1775 r
->ru_nvcsw
+= t
->nvcsw
;
1776 r
->ru_nivcsw
+= t
->nivcsw
;
1777 r
->ru_minflt
+= t
->min_flt
;
1778 r
->ru_majflt
+= t
->maj_flt
;
1779 r
->ru_inblock
+= task_io_get_inblock(t
);
1780 r
->ru_oublock
+= task_io_get_oublock(t
);
1783 void getrusage(struct task_struct
*p
, int who
, struct rusage
*r
)
1785 struct task_struct
*t
;
1786 unsigned long flags
;
1787 u64 tgutime
, tgstime
, utime
, stime
;
1788 unsigned long maxrss
;
1789 struct mm_struct
*mm
;
1790 struct signal_struct
*sig
= p
->signal
;
1791 unsigned int seq
= 0;
1794 memset(r
, 0, sizeof(*r
));
1798 if (who
== RUSAGE_THREAD
) {
1799 task_cputime_adjusted(current
, &utime
, &stime
);
1800 accumulate_thread_rusage(p
, r
);
1801 maxrss
= sig
->maxrss
;
1805 flags
= read_seqbegin_or_lock_irqsave(&sig
->stats_lock
, &seq
);
1809 case RUSAGE_CHILDREN
:
1810 utime
= sig
->cutime
;
1811 stime
= sig
->cstime
;
1812 r
->ru_nvcsw
= sig
->cnvcsw
;
1813 r
->ru_nivcsw
= sig
->cnivcsw
;
1814 r
->ru_minflt
= sig
->cmin_flt
;
1815 r
->ru_majflt
= sig
->cmaj_flt
;
1816 r
->ru_inblock
= sig
->cinblock
;
1817 r
->ru_oublock
= sig
->coublock
;
1818 maxrss
= sig
->cmaxrss
;
1820 if (who
== RUSAGE_CHILDREN
)
1825 r
->ru_nvcsw
+= sig
->nvcsw
;
1826 r
->ru_nivcsw
+= sig
->nivcsw
;
1827 r
->ru_minflt
+= sig
->min_flt
;
1828 r
->ru_majflt
+= sig
->maj_flt
;
1829 r
->ru_inblock
+= sig
->inblock
;
1830 r
->ru_oublock
+= sig
->oublock
;
1831 if (maxrss
< sig
->maxrss
)
1832 maxrss
= sig
->maxrss
;
1835 __for_each_thread(sig
, t
)
1836 accumulate_thread_rusage(t
, r
);
1845 if (need_seqretry(&sig
->stats_lock
, seq
)) {
1849 done_seqretry_irqrestore(&sig
->stats_lock
, seq
, flags
);
1851 if (who
== RUSAGE_CHILDREN
)
1854 thread_group_cputime_adjusted(p
, &tgutime
, &tgstime
);
1859 mm
= get_task_mm(p
);
1861 setmax_mm_hiwater_rss(&maxrss
, mm
);
1866 r
->ru_maxrss
= maxrss
* (PAGE_SIZE
/ 1024); /* convert pages to KBs */
1867 r
->ru_utime
= ns_to_kernel_old_timeval(utime
);
1868 r
->ru_stime
= ns_to_kernel_old_timeval(stime
);
1871 SYSCALL_DEFINE2(getrusage
, int, who
, struct rusage __user
*, ru
)
1875 if (who
!= RUSAGE_SELF
&& who
!= RUSAGE_CHILDREN
&&
1876 who
!= RUSAGE_THREAD
)
1879 getrusage(current
, who
, &r
);
1880 return copy_to_user(ru
, &r
, sizeof(r
)) ? -EFAULT
: 0;
1883 #ifdef CONFIG_COMPAT
1884 COMPAT_SYSCALL_DEFINE2(getrusage
, int, who
, struct compat_rusage __user
*, ru
)
1888 if (who
!= RUSAGE_SELF
&& who
!= RUSAGE_CHILDREN
&&
1889 who
!= RUSAGE_THREAD
)
1892 getrusage(current
, who
, &r
);
1893 return put_compat_rusage(&r
, ru
);
1897 SYSCALL_DEFINE1(umask
, int, mask
)
1899 mask
= xchg(¤t
->fs
->umask
, mask
& S_IRWXUGO
);
1903 static int prctl_set_mm_exe_file(struct mm_struct
*mm
, unsigned int fd
)
1906 struct inode
*inode
;
1913 inode
= file_inode(exe
.file
);
1916 * Because the original mm->exe_file points to executable file, make
1917 * sure that this one is executable as well, to avoid breaking an
1921 if (!S_ISREG(inode
->i_mode
) || path_noexec(&exe
.file
->f_path
))
1924 err
= file_permission(exe
.file
, MAY_EXEC
);
1928 err
= replace_mm_exe_file(mm
, exe
.file
);
1935 * Check arithmetic relations of passed addresses.
1937 * WARNING: we don't require any capability here so be very careful
1938 * in what is allowed for modification from userspace.
1940 static int validate_prctl_map_addr(struct prctl_mm_map
*prctl_map
)
1942 unsigned long mmap_max_addr
= TASK_SIZE
;
1943 int error
= -EINVAL
, i
;
1945 static const unsigned char offsets
[] = {
1946 offsetof(struct prctl_mm_map
, start_code
),
1947 offsetof(struct prctl_mm_map
, end_code
),
1948 offsetof(struct prctl_mm_map
, start_data
),
1949 offsetof(struct prctl_mm_map
, end_data
),
1950 offsetof(struct prctl_mm_map
, start_brk
),
1951 offsetof(struct prctl_mm_map
, brk
),
1952 offsetof(struct prctl_mm_map
, start_stack
),
1953 offsetof(struct prctl_mm_map
, arg_start
),
1954 offsetof(struct prctl_mm_map
, arg_end
),
1955 offsetof(struct prctl_mm_map
, env_start
),
1956 offsetof(struct prctl_mm_map
, env_end
),
1960 * Make sure the members are not somewhere outside
1961 * of allowed address space.
1963 for (i
= 0; i
< ARRAY_SIZE(offsets
); i
++) {
1964 u64 val
= *(u64
*)((char *)prctl_map
+ offsets
[i
]);
1966 if ((unsigned long)val
>= mmap_max_addr
||
1967 (unsigned long)val
< mmap_min_addr
)
1972 * Make sure the pairs are ordered.
1974 #define __prctl_check_order(__m1, __op, __m2) \
1975 ((unsigned long)prctl_map->__m1 __op \
1976 (unsigned long)prctl_map->__m2) ? 0 : -EINVAL
1977 error
= __prctl_check_order(start_code
, <, end_code
);
1978 error
|= __prctl_check_order(start_data
,<=, end_data
);
1979 error
|= __prctl_check_order(start_brk
, <=, brk
);
1980 error
|= __prctl_check_order(arg_start
, <=, arg_end
);
1981 error
|= __prctl_check_order(env_start
, <=, env_end
);
1984 #undef __prctl_check_order
1989 * Neither we should allow to override limits if they set.
1991 if (check_data_rlimit(rlimit(RLIMIT_DATA
), prctl_map
->brk
,
1992 prctl_map
->start_brk
, prctl_map
->end_data
,
1993 prctl_map
->start_data
))
2001 #ifdef CONFIG_CHECKPOINT_RESTORE
2002 static int prctl_set_mm_map(int opt
, const void __user
*addr
, unsigned long data_size
)
2004 struct prctl_mm_map prctl_map
= { .exe_fd
= (u32
)-1, };
2005 unsigned long user_auxv
[AT_VECTOR_SIZE
];
2006 struct mm_struct
*mm
= current
->mm
;
2009 BUILD_BUG_ON(sizeof(user_auxv
) != sizeof(mm
->saved_auxv
));
2010 BUILD_BUG_ON(sizeof(struct prctl_mm_map
) > 256);
2012 if (opt
== PR_SET_MM_MAP_SIZE
)
2013 return put_user((unsigned int)sizeof(prctl_map
),
2014 (unsigned int __user
*)addr
);
2016 if (data_size
!= sizeof(prctl_map
))
2019 if (copy_from_user(&prctl_map
, addr
, sizeof(prctl_map
)))
2022 error
= validate_prctl_map_addr(&prctl_map
);
2026 if (prctl_map
.auxv_size
) {
2028 * Someone is trying to cheat the auxv vector.
2030 if (!prctl_map
.auxv
||
2031 prctl_map
.auxv_size
> sizeof(mm
->saved_auxv
))
2034 memset(user_auxv
, 0, sizeof(user_auxv
));
2035 if (copy_from_user(user_auxv
,
2036 (const void __user
*)prctl_map
.auxv
,
2037 prctl_map
.auxv_size
))
2040 /* Last entry must be AT_NULL as specification requires */
2041 user_auxv
[AT_VECTOR_SIZE
- 2] = AT_NULL
;
2042 user_auxv
[AT_VECTOR_SIZE
- 1] = AT_NULL
;
2045 if (prctl_map
.exe_fd
!= (u32
)-1) {
2047 * Check if the current user is checkpoint/restore capable.
2048 * At the time of this writing, it checks for CAP_SYS_ADMIN
2049 * or CAP_CHECKPOINT_RESTORE.
2050 * Note that a user with access to ptrace can masquerade an
2051 * arbitrary program as any executable, even setuid ones.
2052 * This may have implications in the tomoyo subsystem.
2054 if (!checkpoint_restore_ns_capable(current_user_ns()))
2057 error
= prctl_set_mm_exe_file(mm
, prctl_map
.exe_fd
);
2063 * arg_lock protects concurrent updates but we still need mmap_lock for
2064 * read to exclude races with sys_brk.
2069 * We don't validate if these members are pointing to
2070 * real present VMAs because application may have correspond
2071 * VMAs already unmapped and kernel uses these members for statistics
2072 * output in procfs mostly, except
2074 * - @start_brk/@brk which are used in do_brk_flags but kernel lookups
2075 * for VMAs when updating these members so anything wrong written
2076 * here cause kernel to swear at userspace program but won't lead
2077 * to any problem in kernel itself
2080 spin_lock(&mm
->arg_lock
);
2081 mm
->start_code
= prctl_map
.start_code
;
2082 mm
->end_code
= prctl_map
.end_code
;
2083 mm
->start_data
= prctl_map
.start_data
;
2084 mm
->end_data
= prctl_map
.end_data
;
2085 mm
->start_brk
= prctl_map
.start_brk
;
2086 mm
->brk
= prctl_map
.brk
;
2087 mm
->start_stack
= prctl_map
.start_stack
;
2088 mm
->arg_start
= prctl_map
.arg_start
;
2089 mm
->arg_end
= prctl_map
.arg_end
;
2090 mm
->env_start
= prctl_map
.env_start
;
2091 mm
->env_end
= prctl_map
.env_end
;
2092 spin_unlock(&mm
->arg_lock
);
2095 * Note this update of @saved_auxv is lockless thus
2096 * if someone reads this member in procfs while we're
2097 * updating -- it may get partly updated results. It's
2098 * known and acceptable trade off: we leave it as is to
2099 * not introduce additional locks here making the kernel
2102 if (prctl_map
.auxv_size
)
2103 memcpy(mm
->saved_auxv
, user_auxv
, sizeof(user_auxv
));
2105 mmap_read_unlock(mm
);
2108 #endif /* CONFIG_CHECKPOINT_RESTORE */
2110 static int prctl_set_auxv(struct mm_struct
*mm
, unsigned long addr
,
2114 * This doesn't move the auxiliary vector itself since it's pinned to
2115 * mm_struct, but it permits filling the vector with new values. It's
2116 * up to the caller to provide sane values here, otherwise userspace
2117 * tools which use this vector might be unhappy.
2119 unsigned long user_auxv
[AT_VECTOR_SIZE
] = {};
2121 if (len
> sizeof(user_auxv
))
2124 if (copy_from_user(user_auxv
, (const void __user
*)addr
, len
))
2127 /* Make sure the last entry is always AT_NULL */
2128 user_auxv
[AT_VECTOR_SIZE
- 2] = 0;
2129 user_auxv
[AT_VECTOR_SIZE
- 1] = 0;
2131 BUILD_BUG_ON(sizeof(user_auxv
) != sizeof(mm
->saved_auxv
));
2134 memcpy(mm
->saved_auxv
, user_auxv
, len
);
2135 task_unlock(current
);
2140 static int prctl_set_mm(int opt
, unsigned long addr
,
2141 unsigned long arg4
, unsigned long arg5
)
2143 struct mm_struct
*mm
= current
->mm
;
2144 struct prctl_mm_map prctl_map
= {
2149 struct vm_area_struct
*vma
;
2152 if (arg5
|| (arg4
&& (opt
!= PR_SET_MM_AUXV
&&
2153 opt
!= PR_SET_MM_MAP
&&
2154 opt
!= PR_SET_MM_MAP_SIZE
)))
2157 #ifdef CONFIG_CHECKPOINT_RESTORE
2158 if (opt
== PR_SET_MM_MAP
|| opt
== PR_SET_MM_MAP_SIZE
)
2159 return prctl_set_mm_map(opt
, (const void __user
*)addr
, arg4
);
2162 if (!capable(CAP_SYS_RESOURCE
))
2165 if (opt
== PR_SET_MM_EXE_FILE
)
2166 return prctl_set_mm_exe_file(mm
, (unsigned int)addr
);
2168 if (opt
== PR_SET_MM_AUXV
)
2169 return prctl_set_auxv(mm
, addr
, arg4
);
2171 if (addr
>= TASK_SIZE
|| addr
< mmap_min_addr
)
2177 * arg_lock protects concurrent updates of arg boundaries, we need
2178 * mmap_lock for a) concurrent sys_brk, b) finding VMA for addr
2182 vma
= find_vma(mm
, addr
);
2184 spin_lock(&mm
->arg_lock
);
2185 prctl_map
.start_code
= mm
->start_code
;
2186 prctl_map
.end_code
= mm
->end_code
;
2187 prctl_map
.start_data
= mm
->start_data
;
2188 prctl_map
.end_data
= mm
->end_data
;
2189 prctl_map
.start_brk
= mm
->start_brk
;
2190 prctl_map
.brk
= mm
->brk
;
2191 prctl_map
.start_stack
= mm
->start_stack
;
2192 prctl_map
.arg_start
= mm
->arg_start
;
2193 prctl_map
.arg_end
= mm
->arg_end
;
2194 prctl_map
.env_start
= mm
->env_start
;
2195 prctl_map
.env_end
= mm
->env_end
;
2198 case PR_SET_MM_START_CODE
:
2199 prctl_map
.start_code
= addr
;
2201 case PR_SET_MM_END_CODE
:
2202 prctl_map
.end_code
= addr
;
2204 case PR_SET_MM_START_DATA
:
2205 prctl_map
.start_data
= addr
;
2207 case PR_SET_MM_END_DATA
:
2208 prctl_map
.end_data
= addr
;
2210 case PR_SET_MM_START_STACK
:
2211 prctl_map
.start_stack
= addr
;
2213 case PR_SET_MM_START_BRK
:
2214 prctl_map
.start_brk
= addr
;
2217 prctl_map
.brk
= addr
;
2219 case PR_SET_MM_ARG_START
:
2220 prctl_map
.arg_start
= addr
;
2222 case PR_SET_MM_ARG_END
:
2223 prctl_map
.arg_end
= addr
;
2225 case PR_SET_MM_ENV_START
:
2226 prctl_map
.env_start
= addr
;
2228 case PR_SET_MM_ENV_END
:
2229 prctl_map
.env_end
= addr
;
2235 error
= validate_prctl_map_addr(&prctl_map
);
2241 * If command line arguments and environment
2242 * are placed somewhere else on stack, we can
2243 * set them up here, ARG_START/END to setup
2244 * command line arguments and ENV_START/END
2247 case PR_SET_MM_START_STACK
:
2248 case PR_SET_MM_ARG_START
:
2249 case PR_SET_MM_ARG_END
:
2250 case PR_SET_MM_ENV_START
:
2251 case PR_SET_MM_ENV_END
:
2258 mm
->start_code
= prctl_map
.start_code
;
2259 mm
->end_code
= prctl_map
.end_code
;
2260 mm
->start_data
= prctl_map
.start_data
;
2261 mm
->end_data
= prctl_map
.end_data
;
2262 mm
->start_brk
= prctl_map
.start_brk
;
2263 mm
->brk
= prctl_map
.brk
;
2264 mm
->start_stack
= prctl_map
.start_stack
;
2265 mm
->arg_start
= prctl_map
.arg_start
;
2266 mm
->arg_end
= prctl_map
.arg_end
;
2267 mm
->env_start
= prctl_map
.env_start
;
2268 mm
->env_end
= prctl_map
.env_end
;
2272 spin_unlock(&mm
->arg_lock
);
2273 mmap_read_unlock(mm
);
2277 #ifdef CONFIG_CHECKPOINT_RESTORE
2278 static int prctl_get_tid_address(struct task_struct
*me
, int __user
* __user
*tid_addr
)
2280 return put_user(me
->clear_child_tid
, tid_addr
);
2283 static int prctl_get_tid_address(struct task_struct
*me
, int __user
* __user
*tid_addr
)
2289 static int propagate_has_child_subreaper(struct task_struct
*p
, void *data
)
2292 * If task has has_child_subreaper - all its descendants
2293 * already have these flag too and new descendants will
2294 * inherit it on fork, skip them.
2296 * If we've found child_reaper - skip descendants in
2297 * it's subtree as they will never get out pidns.
2299 if (p
->signal
->has_child_subreaper
||
2300 is_child_reaper(task_pid(p
)))
2303 p
->signal
->has_child_subreaper
= 1;
2307 int __weak
arch_prctl_spec_ctrl_get(struct task_struct
*t
, unsigned long which
)
2312 int __weak
arch_prctl_spec_ctrl_set(struct task_struct
*t
, unsigned long which
,
2318 #define PR_IO_FLUSHER (PF_MEMALLOC_NOIO | PF_LOCAL_THROTTLE)
2320 #ifdef CONFIG_ANON_VMA_NAME
2322 #define ANON_VMA_NAME_MAX_LEN 80
2323 #define ANON_VMA_NAME_INVALID_CHARS "\\`$[]"
2325 static inline bool is_valid_name_char(char ch
)
2327 /* printable ascii characters, excluding ANON_VMA_NAME_INVALID_CHARS */
2328 return ch
> 0x1f && ch
< 0x7f &&
2329 !strchr(ANON_VMA_NAME_INVALID_CHARS
, ch
);
2332 static int prctl_set_vma(unsigned long opt
, unsigned long addr
,
2333 unsigned long size
, unsigned long arg
)
2335 struct mm_struct
*mm
= current
->mm
;
2336 const char __user
*uname
;
2337 struct anon_vma_name
*anon_name
= NULL
;
2341 case PR_SET_VMA_ANON_NAME
:
2342 uname
= (const char __user
*)arg
;
2346 name
= strndup_user(uname
, ANON_VMA_NAME_MAX_LEN
);
2348 return PTR_ERR(name
);
2350 for (pch
= name
; *pch
!= '\0'; pch
++) {
2351 if (!is_valid_name_char(*pch
)) {
2356 /* anon_vma has its own copy */
2357 anon_name
= anon_vma_name_alloc(name
);
2364 mmap_write_lock(mm
);
2365 error
= madvise_set_anon_name(mm
, addr
, size
, anon_name
);
2366 mmap_write_unlock(mm
);
2367 anon_vma_name_put(anon_name
);
2376 #else /* CONFIG_ANON_VMA_NAME */
2377 static int prctl_set_vma(unsigned long opt
, unsigned long start
,
2378 unsigned long size
, unsigned long arg
)
2382 #endif /* CONFIG_ANON_VMA_NAME */
2384 static inline unsigned long get_current_mdwe(void)
2386 unsigned long ret
= 0;
2388 if (test_bit(MMF_HAS_MDWE
, ¤t
->mm
->flags
))
2389 ret
|= PR_MDWE_REFUSE_EXEC_GAIN
;
2390 if (test_bit(MMF_HAS_MDWE_NO_INHERIT
, ¤t
->mm
->flags
))
2391 ret
|= PR_MDWE_NO_INHERIT
;
2396 static inline int prctl_set_mdwe(unsigned long bits
, unsigned long arg3
,
2397 unsigned long arg4
, unsigned long arg5
)
2399 unsigned long current_bits
;
2401 if (arg3
|| arg4
|| arg5
)
2404 if (bits
& ~(PR_MDWE_REFUSE_EXEC_GAIN
| PR_MDWE_NO_INHERIT
))
2407 /* NO_INHERIT only makes sense with REFUSE_EXEC_GAIN */
2408 if (bits
& PR_MDWE_NO_INHERIT
&& !(bits
& PR_MDWE_REFUSE_EXEC_GAIN
))
2412 * EOPNOTSUPP might be more appropriate here in principle, but
2413 * existing userspace depends on EINVAL specifically.
2415 if (!arch_memory_deny_write_exec_supported())
2418 current_bits
= get_current_mdwe();
2419 if (current_bits
&& current_bits
!= bits
)
2420 return -EPERM
; /* Cannot unset the flags */
2422 if (bits
& PR_MDWE_NO_INHERIT
)
2423 set_bit(MMF_HAS_MDWE_NO_INHERIT
, ¤t
->mm
->flags
);
2424 if (bits
& PR_MDWE_REFUSE_EXEC_GAIN
)
2425 set_bit(MMF_HAS_MDWE
, ¤t
->mm
->flags
);
2430 static inline int prctl_get_mdwe(unsigned long arg2
, unsigned long arg3
,
2431 unsigned long arg4
, unsigned long arg5
)
2433 if (arg2
|| arg3
|| arg4
|| arg5
)
2435 return get_current_mdwe();
2438 static int prctl_get_auxv(void __user
*addr
, unsigned long len
)
2440 struct mm_struct
*mm
= current
->mm
;
2441 unsigned long size
= min_t(unsigned long, sizeof(mm
->saved_auxv
), len
);
2443 if (size
&& copy_to_user(addr
, mm
->saved_auxv
, size
))
2445 return sizeof(mm
->saved_auxv
);
2448 SYSCALL_DEFINE5(prctl
, int, option
, unsigned long, arg2
, unsigned long, arg3
,
2449 unsigned long, arg4
, unsigned long, arg5
)
2451 struct task_struct
*me
= current
;
2452 unsigned char comm
[sizeof(me
->comm
)];
2455 error
= security_task_prctl(option
, arg2
, arg3
, arg4
, arg5
);
2456 if (error
!= -ENOSYS
)
2461 case PR_SET_PDEATHSIG
:
2462 if (!valid_signal(arg2
)) {
2466 me
->pdeath_signal
= arg2
;
2468 case PR_GET_PDEATHSIG
:
2469 error
= put_user(me
->pdeath_signal
, (int __user
*)arg2
);
2471 case PR_GET_DUMPABLE
:
2472 error
= get_dumpable(me
->mm
);
2474 case PR_SET_DUMPABLE
:
2475 if (arg2
!= SUID_DUMP_DISABLE
&& arg2
!= SUID_DUMP_USER
) {
2479 set_dumpable(me
->mm
, arg2
);
2482 case PR_SET_UNALIGN
:
2483 error
= SET_UNALIGN_CTL(me
, arg2
);
2485 case PR_GET_UNALIGN
:
2486 error
= GET_UNALIGN_CTL(me
, arg2
);
2489 error
= SET_FPEMU_CTL(me
, arg2
);
2492 error
= GET_FPEMU_CTL(me
, arg2
);
2495 error
= SET_FPEXC_CTL(me
, arg2
);
2498 error
= GET_FPEXC_CTL(me
, arg2
);
2501 error
= PR_TIMING_STATISTICAL
;
2504 if (arg2
!= PR_TIMING_STATISTICAL
)
2508 comm
[sizeof(me
->comm
) - 1] = 0;
2509 if (strncpy_from_user(comm
, (char __user
*)arg2
,
2510 sizeof(me
->comm
) - 1) < 0)
2512 set_task_comm(me
, comm
);
2513 proc_comm_connector(me
);
2516 get_task_comm(comm
, me
);
2517 if (copy_to_user((char __user
*)arg2
, comm
, sizeof(comm
)))
2521 error
= GET_ENDIAN(me
, arg2
);
2524 error
= SET_ENDIAN(me
, arg2
);
2526 case PR_GET_SECCOMP
:
2527 error
= prctl_get_seccomp();
2529 case PR_SET_SECCOMP
:
2530 error
= prctl_set_seccomp(arg2
, (char __user
*)arg3
);
2533 error
= GET_TSC_CTL(arg2
);
2536 error
= SET_TSC_CTL(arg2
);
2538 case PR_TASK_PERF_EVENTS_DISABLE
:
2539 error
= perf_event_task_disable();
2541 case PR_TASK_PERF_EVENTS_ENABLE
:
2542 error
= perf_event_task_enable();
2544 case PR_GET_TIMERSLACK
:
2545 if (current
->timer_slack_ns
> ULONG_MAX
)
2548 error
= current
->timer_slack_ns
;
2550 case PR_SET_TIMERSLACK
:
2552 current
->timer_slack_ns
=
2553 current
->default_timer_slack_ns
;
2555 current
->timer_slack_ns
= arg2
;
2561 case PR_MCE_KILL_CLEAR
:
2564 current
->flags
&= ~PF_MCE_PROCESS
;
2566 case PR_MCE_KILL_SET
:
2567 current
->flags
|= PF_MCE_PROCESS
;
2568 if (arg3
== PR_MCE_KILL_EARLY
)
2569 current
->flags
|= PF_MCE_EARLY
;
2570 else if (arg3
== PR_MCE_KILL_LATE
)
2571 current
->flags
&= ~PF_MCE_EARLY
;
2572 else if (arg3
== PR_MCE_KILL_DEFAULT
)
2574 ~(PF_MCE_EARLY
|PF_MCE_PROCESS
);
2582 case PR_MCE_KILL_GET
:
2583 if (arg2
| arg3
| arg4
| arg5
)
2585 if (current
->flags
& PF_MCE_PROCESS
)
2586 error
= (current
->flags
& PF_MCE_EARLY
) ?
2587 PR_MCE_KILL_EARLY
: PR_MCE_KILL_LATE
;
2589 error
= PR_MCE_KILL_DEFAULT
;
2592 error
= prctl_set_mm(arg2
, arg3
, arg4
, arg5
);
2594 case PR_GET_TID_ADDRESS
:
2595 error
= prctl_get_tid_address(me
, (int __user
* __user
*)arg2
);
2597 case PR_SET_CHILD_SUBREAPER
:
2598 me
->signal
->is_child_subreaper
= !!arg2
;
2602 walk_process_tree(me
, propagate_has_child_subreaper
, NULL
);
2604 case PR_GET_CHILD_SUBREAPER
:
2605 error
= put_user(me
->signal
->is_child_subreaper
,
2606 (int __user
*)arg2
);
2608 case PR_SET_NO_NEW_PRIVS
:
2609 if (arg2
!= 1 || arg3
|| arg4
|| arg5
)
2612 task_set_no_new_privs(current
);
2614 case PR_GET_NO_NEW_PRIVS
:
2615 if (arg2
|| arg3
|| arg4
|| arg5
)
2617 return task_no_new_privs(current
) ? 1 : 0;
2618 case PR_GET_THP_DISABLE
:
2619 if (arg2
|| arg3
|| arg4
|| arg5
)
2621 error
= !!test_bit(MMF_DISABLE_THP
, &me
->mm
->flags
);
2623 case PR_SET_THP_DISABLE
:
2624 if (arg3
|| arg4
|| arg5
)
2626 if (mmap_write_lock_killable(me
->mm
))
2629 set_bit(MMF_DISABLE_THP
, &me
->mm
->flags
);
2631 clear_bit(MMF_DISABLE_THP
, &me
->mm
->flags
);
2632 mmap_write_unlock(me
->mm
);
2634 case PR_MPX_ENABLE_MANAGEMENT
:
2635 case PR_MPX_DISABLE_MANAGEMENT
:
2636 /* No longer implemented: */
2638 case PR_SET_FP_MODE
:
2639 error
= SET_FP_MODE(me
, arg2
);
2641 case PR_GET_FP_MODE
:
2642 error
= GET_FP_MODE(me
);
2645 error
= SVE_SET_VL(arg2
);
2648 error
= SVE_GET_VL();
2651 error
= SME_SET_VL(arg2
);
2654 error
= SME_GET_VL();
2656 case PR_GET_SPECULATION_CTRL
:
2657 if (arg3
|| arg4
|| arg5
)
2659 error
= arch_prctl_spec_ctrl_get(me
, arg2
);
2661 case PR_SET_SPECULATION_CTRL
:
2664 error
= arch_prctl_spec_ctrl_set(me
, arg2
, arg3
);
2666 case PR_PAC_RESET_KEYS
:
2667 if (arg3
|| arg4
|| arg5
)
2669 error
= PAC_RESET_KEYS(me
, arg2
);
2671 case PR_PAC_SET_ENABLED_KEYS
:
2674 error
= PAC_SET_ENABLED_KEYS(me
, arg2
, arg3
);
2676 case PR_PAC_GET_ENABLED_KEYS
:
2677 if (arg2
|| arg3
|| arg4
|| arg5
)
2679 error
= PAC_GET_ENABLED_KEYS(me
);
2681 case PR_SET_TAGGED_ADDR_CTRL
:
2682 if (arg3
|| arg4
|| arg5
)
2684 error
= SET_TAGGED_ADDR_CTRL(arg2
);
2686 case PR_GET_TAGGED_ADDR_CTRL
:
2687 if (arg2
|| arg3
|| arg4
|| arg5
)
2689 error
= GET_TAGGED_ADDR_CTRL();
2691 case PR_SET_IO_FLUSHER
:
2692 if (!capable(CAP_SYS_RESOURCE
))
2695 if (arg3
|| arg4
|| arg5
)
2699 current
->flags
|= PR_IO_FLUSHER
;
2701 current
->flags
&= ~PR_IO_FLUSHER
;
2705 case PR_GET_IO_FLUSHER
:
2706 if (!capable(CAP_SYS_RESOURCE
))
2709 if (arg2
|| arg3
|| arg4
|| arg5
)
2712 error
= (current
->flags
& PR_IO_FLUSHER
) == PR_IO_FLUSHER
;
2714 case PR_SET_SYSCALL_USER_DISPATCH
:
2715 error
= set_syscall_user_dispatch(arg2
, arg3
, arg4
,
2716 (char __user
*) arg5
);
2718 #ifdef CONFIG_SCHED_CORE
2720 error
= sched_core_share_pid(arg2
, arg3
, arg4
, arg5
);
2724 error
= prctl_set_mdwe(arg2
, arg3
, arg4
, arg5
);
2727 error
= prctl_get_mdwe(arg2
, arg3
, arg4
, arg5
);
2730 error
= prctl_set_vma(arg2
, arg3
, arg4
, arg5
);
2735 error
= prctl_get_auxv((void __user
*)arg2
, arg3
);
2738 case PR_SET_MEMORY_MERGE
:
2739 if (arg3
|| arg4
|| arg5
)
2741 if (mmap_write_lock_killable(me
->mm
))
2745 error
= ksm_enable_merge_any(me
->mm
);
2747 error
= ksm_disable_merge_any(me
->mm
);
2748 mmap_write_unlock(me
->mm
);
2750 case PR_GET_MEMORY_MERGE
:
2751 if (arg2
|| arg3
|| arg4
|| arg5
)
2754 error
= !!test_bit(MMF_VM_MERGE_ANY
, &me
->mm
->flags
);
2757 case PR_RISCV_V_SET_CONTROL
:
2758 error
= RISCV_V_SET_CONTROL(arg2
);
2760 case PR_RISCV_V_GET_CONTROL
:
2761 error
= RISCV_V_GET_CONTROL();
2770 SYSCALL_DEFINE3(getcpu
, unsigned __user
*, cpup
, unsigned __user
*, nodep
,
2771 struct getcpu_cache __user
*, unused
)
2774 int cpu
= raw_smp_processor_id();
2777 err
|= put_user(cpu
, cpup
);
2779 err
|= put_user(cpu_to_node(cpu
), nodep
);
2780 return err
? -EFAULT
: 0;
2784 * do_sysinfo - fill in sysinfo struct
2785 * @info: pointer to buffer to fill
2787 static int do_sysinfo(struct sysinfo
*info
)
2789 unsigned long mem_total
, sav_total
;
2790 unsigned int mem_unit
, bitcount
;
2791 struct timespec64 tp
;
2793 memset(info
, 0, sizeof(struct sysinfo
));
2795 ktime_get_boottime_ts64(&tp
);
2796 timens_add_boottime(&tp
);
2797 info
->uptime
= tp
.tv_sec
+ (tp
.tv_nsec
? 1 : 0);
2799 get_avenrun(info
->loads
, 0, SI_LOAD_SHIFT
- FSHIFT
);
2801 info
->procs
= nr_threads
;
2807 * If the sum of all the available memory (i.e. ram + swap)
2808 * is less than can be stored in a 32 bit unsigned long then
2809 * we can be binary compatible with 2.2.x kernels. If not,
2810 * well, in that case 2.2.x was broken anyways...
2812 * -Erik Andersen <andersee@debian.org>
2815 mem_total
= info
->totalram
+ info
->totalswap
;
2816 if (mem_total
< info
->totalram
|| mem_total
< info
->totalswap
)
2819 mem_unit
= info
->mem_unit
;
2820 while (mem_unit
> 1) {
2823 sav_total
= mem_total
;
2825 if (mem_total
< sav_total
)
2830 * If mem_total did not overflow, multiply all memory values by
2831 * info->mem_unit and set it to 1. This leaves things compatible
2832 * with 2.2.x, and also retains compatibility with earlier 2.4.x
2837 info
->totalram
<<= bitcount
;
2838 info
->freeram
<<= bitcount
;
2839 info
->sharedram
<<= bitcount
;
2840 info
->bufferram
<<= bitcount
;
2841 info
->totalswap
<<= bitcount
;
2842 info
->freeswap
<<= bitcount
;
2843 info
->totalhigh
<<= bitcount
;
2844 info
->freehigh
<<= bitcount
;
2850 SYSCALL_DEFINE1(sysinfo
, struct sysinfo __user
*, info
)
2856 if (copy_to_user(info
, &val
, sizeof(struct sysinfo
)))
2862 #ifdef CONFIG_COMPAT
2863 struct compat_sysinfo
{
2877 char _f
[20-2*sizeof(u32
)-sizeof(int)];
2880 COMPAT_SYSCALL_DEFINE1(sysinfo
, struct compat_sysinfo __user
*, info
)
2883 struct compat_sysinfo s_32
;
2887 /* Check to see if any memory value is too large for 32-bit and scale
2890 if (upper_32_bits(s
.totalram
) || upper_32_bits(s
.totalswap
)) {
2893 while (s
.mem_unit
< PAGE_SIZE
) {
2898 s
.totalram
>>= bitcount
;
2899 s
.freeram
>>= bitcount
;
2900 s
.sharedram
>>= bitcount
;
2901 s
.bufferram
>>= bitcount
;
2902 s
.totalswap
>>= bitcount
;
2903 s
.freeswap
>>= bitcount
;
2904 s
.totalhigh
>>= bitcount
;
2905 s
.freehigh
>>= bitcount
;
2908 memset(&s_32
, 0, sizeof(s_32
));
2909 s_32
.uptime
= s
.uptime
;
2910 s_32
.loads
[0] = s
.loads
[0];
2911 s_32
.loads
[1] = s
.loads
[1];
2912 s_32
.loads
[2] = s
.loads
[2];
2913 s_32
.totalram
= s
.totalram
;
2914 s_32
.freeram
= s
.freeram
;
2915 s_32
.sharedram
= s
.sharedram
;
2916 s_32
.bufferram
= s
.bufferram
;
2917 s_32
.totalswap
= s
.totalswap
;
2918 s_32
.freeswap
= s
.freeswap
;
2919 s_32
.procs
= s
.procs
;
2920 s_32
.totalhigh
= s
.totalhigh
;
2921 s_32
.freehigh
= s
.freehigh
;
2922 s_32
.mem_unit
= s
.mem_unit
;
2923 if (copy_to_user(info
, &s_32
, sizeof(s_32
)))
2927 #endif /* CONFIG_COMPAT */