2 * linux/kernel/seccomp.c
4 * Copyright 2004-2005 Andrea Arcangeli <andrea@cpushare.com>
6 * Copyright (C) 2012 Google, Inc.
7 * Will Drewry <wad@chromium.org>
9 * This defines a simple but solid secure-computing facility.
11 * Mode 1 uses a fixed list of allowed system calls.
12 * Mode 2 allows user-defined system call filters in the form
13 * of Berkeley Packet Filters/Linux Socket Filters.
16 #include <linux/atomic.h>
17 #include <linux/audit.h>
18 #include <linux/compat.h>
19 #include <linux/nospec.h>
20 #include <linux/prctl.h>
21 #include <linux/sched.h>
22 #include <linux/seccomp.h>
23 #include <linux/syscalls.h>
25 /* #define SECCOMP_DEBUG 1 */
27 #ifdef CONFIG_SECCOMP_FILTER
28 #include <asm/syscall.h>
29 #include <linux/filter.h>
30 #include <linux/ptrace.h>
31 #include <linux/security.h>
32 #include <linux/slab.h>
33 #include <linux/tracehook.h>
34 #include <linux/uaccess.h>
37 * struct seccomp_filter - container for seccomp BPF programs
39 * @usage: reference count to manage the object lifetime.
40 * get/put helpers should be used when accessing an instance
41 * outside of a lifetime-guarded section. In general, this
42 * is only needed for handling filters shared across tasks.
43 * @prev: points to a previously installed, or inherited, filter
44 * @len: the number of instructions in the program
45 * @insnsi: the BPF program instructions to evaluate
47 * seccomp_filter objects are organized in a tree linked via the @prev
48 * pointer. For any task, it appears to be a singly-linked list starting
49 * with current->seccomp.filter, the most recently attached or inherited filter.
50 * However, multiple filters may share a @prev node, by way of fork(), which
51 * results in a unidirectional tree existing in memory. This is similar to
52 * how namespaces work.
54 * seccomp_filter objects should never be modified after being attached
55 * to a task_struct (other than @usage).
57 struct seccomp_filter
{
59 struct seccomp_filter
*prev
;
60 struct sk_filter
*prog
;
63 /* Limit any path through the tree to 256KB worth of instructions. */
64 #define MAX_INSNS_PER_PATH ((1 << 18) / sizeof(struct sock_filter))
67 * Endianness is explicitly ignored and left for BPF program authors to manage
68 * as per the specific architecture.
70 static void populate_seccomp_data(struct seccomp_data
*sd
)
72 struct task_struct
*task
= current
;
73 struct pt_regs
*regs
= task_pt_regs(task
);
74 unsigned long args
[6];
76 sd
->nr
= syscall_get_nr(task
, regs
);
77 sd
->arch
= syscall_get_arch();
78 syscall_get_arguments(task
, regs
, 0, 6, args
);
79 sd
->args
[0] = args
[0];
80 sd
->args
[1] = args
[1];
81 sd
->args
[2] = args
[2];
82 sd
->args
[3] = args
[3];
83 sd
->args
[4] = args
[4];
84 sd
->args
[5] = args
[5];
85 sd
->instruction_pointer
= KSTK_EIP(task
);
89 * seccomp_check_filter - verify seccomp filter code
90 * @filter: filter to verify
91 * @flen: length of filter
93 * Takes a previously checked filter (by sk_chk_filter) and
94 * redirects all filter code that loads struct sk_buff data
95 * and related data through seccomp_bpf_load. It also
96 * enforces length and alignment checking of those loads.
98 * Returns 0 if the rule set is legal or -EINVAL if not.
100 static int seccomp_check_filter(struct sock_filter
*filter
, unsigned int flen
)
103 for (pc
= 0; pc
< flen
; pc
++) {
104 struct sock_filter
*ftest
= &filter
[pc
];
105 u16 code
= ftest
->code
;
109 case BPF_LD
| BPF_W
| BPF_ABS
:
110 ftest
->code
= BPF_LDX
| BPF_W
| BPF_ABS
;
111 /* 32-bit aligned and not out of bounds. */
112 if (k
>= sizeof(struct seccomp_data
) || k
& 3)
115 case BPF_LD
| BPF_W
| BPF_LEN
:
116 ftest
->code
= BPF_LD
| BPF_IMM
;
117 ftest
->k
= sizeof(struct seccomp_data
);
119 case BPF_LDX
| BPF_W
| BPF_LEN
:
120 ftest
->code
= BPF_LDX
| BPF_IMM
;
121 ftest
->k
= sizeof(struct seccomp_data
);
123 /* Explicitly include allowed calls. */
124 case BPF_RET
| BPF_K
:
125 case BPF_RET
| BPF_A
:
126 case BPF_ALU
| BPF_ADD
| BPF_K
:
127 case BPF_ALU
| BPF_ADD
| BPF_X
:
128 case BPF_ALU
| BPF_SUB
| BPF_K
:
129 case BPF_ALU
| BPF_SUB
| BPF_X
:
130 case BPF_ALU
| BPF_MUL
| BPF_K
:
131 case BPF_ALU
| BPF_MUL
| BPF_X
:
132 case BPF_ALU
| BPF_DIV
| BPF_K
:
133 case BPF_ALU
| BPF_DIV
| BPF_X
:
134 case BPF_ALU
| BPF_AND
| BPF_K
:
135 case BPF_ALU
| BPF_AND
| BPF_X
:
136 case BPF_ALU
| BPF_OR
| BPF_K
:
137 case BPF_ALU
| BPF_OR
| BPF_X
:
138 case BPF_ALU
| BPF_XOR
| BPF_K
:
139 case BPF_ALU
| BPF_XOR
| BPF_X
:
140 case BPF_ALU
| BPF_LSH
| BPF_K
:
141 case BPF_ALU
| BPF_LSH
| BPF_X
:
142 case BPF_ALU
| BPF_RSH
| BPF_K
:
143 case BPF_ALU
| BPF_RSH
| BPF_X
:
144 case BPF_ALU
| BPF_NEG
:
145 case BPF_LD
| BPF_IMM
:
146 case BPF_LDX
| BPF_IMM
:
147 case BPF_MISC
| BPF_TAX
:
148 case BPF_MISC
| BPF_TXA
:
149 case BPF_LD
| BPF_MEM
:
150 case BPF_LDX
| BPF_MEM
:
153 case BPF_JMP
| BPF_JA
:
154 case BPF_JMP
| BPF_JEQ
| BPF_K
:
155 case BPF_JMP
| BPF_JEQ
| BPF_X
:
156 case BPF_JMP
| BPF_JGE
| BPF_K
:
157 case BPF_JMP
| BPF_JGE
| BPF_X
:
158 case BPF_JMP
| BPF_JGT
| BPF_K
:
159 case BPF_JMP
| BPF_JGT
| BPF_X
:
160 case BPF_JMP
| BPF_JSET
| BPF_K
:
161 case BPF_JMP
| BPF_JSET
| BPF_X
:
171 * seccomp_run_filters - evaluates all seccomp filters against @syscall
172 * @syscall: number of the current system call
174 * Returns valid seccomp BPF response codes.
176 static u32
seccomp_run_filters(int syscall
)
178 struct seccomp_filter
*f
;
179 struct seccomp_data sd
;
180 u32 ret
= SECCOMP_RET_ALLOW
;
182 /* Ensure unexpected behavior doesn't result in failing open. */
183 if (WARN_ON(current
->seccomp
.filter
== NULL
))
184 return SECCOMP_RET_KILL
;
186 populate_seccomp_data(&sd
);
189 * All filters in the list are evaluated and the lowest BPF return
190 * value always takes priority (ignoring the DATA).
192 for (f
= current
->seccomp
.filter
; f
; f
= f
->prev
) {
193 u32 cur_ret
= SK_RUN_FILTER(f
->prog
, (void *)&sd
);
195 if ((cur_ret
& SECCOMP_RET_ACTION
) < (ret
& SECCOMP_RET_ACTION
))
200 #endif /* CONFIG_SECCOMP_FILTER */
202 static inline bool seccomp_may_assign_mode(unsigned long seccomp_mode
)
204 if (current
->seccomp
.mode
&& current
->seccomp
.mode
!= seccomp_mode
)
210 void __weak
arch_seccomp_spec_mitigate(struct task_struct
*task
) { }
212 static inline void seccomp_assign_mode(unsigned long seccomp_mode
,
215 current
->seccomp
.mode
= seccomp_mode
;
216 /* Assume default seccomp processes want spec flaw mitigation. */
217 if ((flags
& SECCOMP_FILTER_FLAG_SPEC_ALLOW
) == 0)
218 arch_seccomp_spec_mitigate(current
);
219 set_tsk_thread_flag(current
, TIF_SECCOMP
);
222 #ifdef CONFIG_SECCOMP_FILTER
224 * seccomp_attach_filter: Attaches a seccomp filter to current.
225 * @fprog: BPF program to install
227 * Returns 0 on success or an errno on failure.
229 static long seccomp_attach_filter(struct sock_fprog
*fprog
)
231 struct seccomp_filter
*filter
;
232 unsigned long fp_size
= fprog
->len
* sizeof(struct sock_filter
);
233 unsigned long total_insns
= fprog
->len
;
234 struct sock_filter
*fp
;
238 if (fprog
->len
== 0 || fprog
->len
> BPF_MAXINSNS
)
241 for (filter
= current
->seccomp
.filter
; filter
; filter
= filter
->prev
)
242 total_insns
+= filter
->prog
->len
+ 4; /* include a 4 instr penalty */
243 if (total_insns
> MAX_INSNS_PER_PATH
)
247 * Installing a seccomp filter requires that the task has
248 * CAP_SYS_ADMIN in its namespace or be running with no_new_privs.
249 * This avoids scenarios where unprivileged tasks can affect the
250 * behavior of privileged children.
252 if (!task_no_new_privs(current
) &&
253 security_capable_noaudit(current_cred(), current_user_ns(),
257 fp
= kzalloc(fp_size
, GFP_KERNEL
|__GFP_NOWARN
);
261 /* Copy the instructions from fprog. */
263 if (copy_from_user(fp
, fprog
->filter
, fp_size
))
266 /* Check and rewrite the fprog via the skb checker */
267 ret
= sk_chk_filter(fp
, fprog
->len
);
271 /* Check and rewrite the fprog for seccomp use */
272 ret
= seccomp_check_filter(fp
, fprog
->len
);
276 /* Convert 'sock_filter' insns to 'sock_filter_int' insns */
277 ret
= sk_convert_filter(fp
, fprog
->len
, NULL
, &new_len
);
281 /* Allocate a new seccomp_filter */
283 filter
= kzalloc(sizeof(struct seccomp_filter
),
284 GFP_KERNEL
|__GFP_NOWARN
);
288 filter
->prog
= kzalloc(sk_filter_size(new_len
),
289 GFP_KERNEL
|__GFP_NOWARN
);
293 ret
= sk_convert_filter(fp
, fprog
->len
, filter
->prog
->insnsi
, &new_len
);
295 goto free_filter_prog
;
298 atomic_set(&filter
->usage
, 1);
299 filter
->prog
->len
= new_len
;
301 sk_filter_select_runtime(filter
->prog
);
304 * If there is an existing filter, make it the prev and don't drop its
307 filter
->prev
= current
->seccomp
.filter
;
308 current
->seccomp
.filter
= filter
;
321 * seccomp_attach_user_filter - attaches a user-supplied sock_fprog
322 * @user_filter: pointer to the user data containing a sock_fprog.
324 * Returns 0 on success and non-zero otherwise.
326 static long seccomp_attach_user_filter(const char __user
*user_filter
)
328 struct sock_fprog fprog
;
332 if (is_compat_task()) {
333 struct compat_sock_fprog fprog32
;
334 if (copy_from_user(&fprog32
, user_filter
, sizeof(fprog32
)))
336 fprog
.len
= fprog32
.len
;
337 fprog
.filter
= compat_ptr(fprog32
.filter
);
338 } else /* falls through to the if below. */
340 if (copy_from_user(&fprog
, user_filter
, sizeof(fprog
)))
342 ret
= seccomp_attach_filter(&fprog
);
347 /* get_seccomp_filter - increments the reference count of the filter on @tsk */
348 void get_seccomp_filter(struct task_struct
*tsk
)
350 struct seccomp_filter
*orig
= tsk
->seccomp
.filter
;
353 /* Reference count is bounded by the number of total processes. */
354 atomic_inc(&orig
->usage
);
357 /* put_seccomp_filter - decrements the ref count of tsk->seccomp.filter */
358 void put_seccomp_filter(struct task_struct
*tsk
)
360 struct seccomp_filter
*orig
= tsk
->seccomp
.filter
;
361 /* Clean up single-reference branches iteratively. */
362 while (orig
&& atomic_dec_and_test(&orig
->usage
)) {
363 struct seccomp_filter
*freeme
= orig
;
365 sk_filter_free(freeme
->prog
);
371 * seccomp_send_sigsys - signals the task to allow in-process syscall emulation
372 * @syscall: syscall number to send to userland
373 * @reason: filter-supplied reason code to send to userland (via si_errno)
375 * Forces a SIGSYS with a code of SYS_SECCOMP and related sigsys info.
377 static void seccomp_send_sigsys(int syscall
, int reason
)
380 memset(&info
, 0, sizeof(info
));
381 info
.si_signo
= SIGSYS
;
382 info
.si_code
= SYS_SECCOMP
;
383 info
.si_call_addr
= (void __user
*)KSTK_EIP(current
);
384 info
.si_errno
= reason
;
385 info
.si_arch
= syscall_get_arch();
386 info
.si_syscall
= syscall
;
387 force_sig_info(SIGSYS
, &info
, current
);
389 #endif /* CONFIG_SECCOMP_FILTER */
392 * Secure computing mode 1 allows only read/write/exit/sigreturn.
393 * To be fully secure this must be combined with rlimit
394 * to limit the stack allocations too.
396 static int mode1_syscalls
[] = {
397 __NR_seccomp_read
, __NR_seccomp_write
, __NR_seccomp_exit
, __NR_seccomp_sigreturn
,
398 0, /* null terminated */
402 static int mode1_syscalls_32
[] = {
403 __NR_seccomp_read_32
, __NR_seccomp_write_32
, __NR_seccomp_exit_32
, __NR_seccomp_sigreturn_32
,
404 0, /* null terminated */
408 int __secure_computing(int this_syscall
)
410 int mode
= current
->seccomp
.mode
;
416 case SECCOMP_MODE_STRICT
:
417 syscall
= mode1_syscalls
;
419 if (is_compat_task())
420 syscall
= mode1_syscalls_32
;
423 if (*syscall
== this_syscall
)
425 } while (*++syscall
);
427 ret
= SECCOMP_RET_KILL
;
429 #ifdef CONFIG_SECCOMP_FILTER
430 case SECCOMP_MODE_FILTER
: {
432 struct pt_regs
*regs
= task_pt_regs(current
);
433 ret
= seccomp_run_filters(this_syscall
);
434 data
= ret
& SECCOMP_RET_DATA
;
435 ret
&= SECCOMP_RET_ACTION
;
437 case SECCOMP_RET_ERRNO
:
438 /* Set the low-order 16-bits as a errno. */
439 syscall_set_return_value(current
, regs
,
442 case SECCOMP_RET_TRAP
:
443 /* Show the handler the original registers. */
444 syscall_rollback(current
, regs
);
445 /* Let the filter pass back 16 bits of data. */
446 seccomp_send_sigsys(this_syscall
, data
);
448 case SECCOMP_RET_TRACE
:
449 /* Skip these calls if there is no tracer. */
450 if (!ptrace_event_enabled(current
, PTRACE_EVENT_SECCOMP
)) {
451 syscall_set_return_value(current
, regs
,
455 /* Allow the BPF to provide the event message */
456 ptrace_event(PTRACE_EVENT_SECCOMP
, data
);
458 * The delivery of a fatal signal during event
459 * notification may silently skip tracer notification.
460 * Terminating the task now avoids executing a system
461 * call that may not be intended.
463 if (fatal_signal_pending(current
))
465 if (syscall_get_nr(current
, regs
) < 0)
466 goto skip
; /* Explicit request to skip. */
469 case SECCOMP_RET_ALLOW
:
471 case SECCOMP_RET_KILL
:
486 audit_seccomp(this_syscall
, exit_sig
, ret
);
488 #ifdef CONFIG_SECCOMP_FILTER
490 audit_seccomp(this_syscall
, exit_sig
, ret
);
495 long prctl_get_seccomp(void)
497 return current
->seccomp
.mode
;
501 * seccomp_set_mode_strict: internal function for setting strict seccomp
503 * Once current->seccomp.mode is non-zero, it may not be changed.
505 * Returns 0 on success or -EINVAL on failure.
507 static long seccomp_set_mode_strict(void)
509 const unsigned long seccomp_mode
= SECCOMP_MODE_STRICT
;
512 if (!seccomp_may_assign_mode(seccomp_mode
))
518 seccomp_assign_mode(seccomp_mode
, 0);
526 #ifdef CONFIG_SECCOMP_FILTER
528 * seccomp_set_mode_filter: internal function for setting seccomp filter
529 * @flags: flags to change filter behavior
530 * @filter: struct sock_fprog containing filter
532 * This function may be called repeatedly to install additional filters.
533 * Every filter successfully installed will be evaluated (in reverse order)
534 * for each system call the task makes.
536 * Once current->seccomp.mode is non-zero, it may not be changed.
538 * Returns 0 on success or -EINVAL on failure.
540 static long seccomp_set_mode_filter(unsigned int flags
,
541 const char __user
*filter
)
543 const unsigned long seccomp_mode
= SECCOMP_MODE_FILTER
;
546 /* Validate flags. */
547 if (flags
& ~SECCOMP_FILTER_FLAG_MASK
)
550 if (!seccomp_may_assign_mode(seccomp_mode
))
553 ret
= seccomp_attach_user_filter(filter
);
557 seccomp_assign_mode(seccomp_mode
, flags
);
562 static inline long seccomp_set_mode_filter(unsigned int flags
,
563 const char __user
*filter
)
569 /* Common entry point for both prctl and syscall. */
570 static long do_seccomp(unsigned int op
, unsigned int flags
,
571 const char __user
*uargs
)
574 case SECCOMP_SET_MODE_STRICT
:
575 if (flags
!= 0 || uargs
!= NULL
)
577 return seccomp_set_mode_strict();
578 case SECCOMP_SET_MODE_FILTER
:
579 return seccomp_set_mode_filter(flags
, uargs
);
585 SYSCALL_DEFINE3(seccomp
, unsigned int, op
, unsigned int, flags
,
586 const char __user
*, uargs
)
588 return do_seccomp(op
, flags
, uargs
);
592 * prctl_set_seccomp: configures current->seccomp.mode
593 * @seccomp_mode: requested mode to use
594 * @filter: optional struct sock_fprog for use with SECCOMP_MODE_FILTER
596 * Returns 0 on success or -EINVAL on failure.
598 long prctl_set_seccomp(unsigned long seccomp_mode
, char __user
*filter
)
603 switch (seccomp_mode
) {
604 case SECCOMP_MODE_STRICT
:
605 op
= SECCOMP_SET_MODE_STRICT
;
607 * Setting strict mode through prctl always ignored filter,
608 * so make sure it is always NULL here to pass the internal
609 * check in do_seccomp().
613 case SECCOMP_MODE_FILTER
:
614 op
= SECCOMP_SET_MODE_FILTER
;
621 /* prctl interface doesn't have flags, so they are always zero. */
622 return do_seccomp(op
, 0, uargs
);