#endif /* CONFIG_AUDIT */
#ifdef CONFIG_BPF_SYSCALL
-LSM_HOOK(int, 0, bpf, int cmd, union bpf_attr *attr, unsigned int size)
+LSM_HOOK(int, 0, bpf, int cmd, union bpf_attr *attr, unsigned int size, bool kernel)
LSM_HOOK(int, 0, bpf_map, struct bpf_map *map, fmode_t fmode)
LSM_HOOK(int, 0, bpf_prog, struct bpf_prog *prog)
LSM_HOOK(int, 0, bpf_map_create, struct bpf_map *map, union bpf_attr *attr,
- struct bpf_token *token)
+ struct bpf_token *token, bool kernel)
LSM_HOOK(void, LSM_RET_VOID, bpf_map_free, struct bpf_map *map)
LSM_HOOK(int, 0, bpf_prog_load, struct bpf_prog *prog, union bpf_attr *attr,
- struct bpf_token *token)
+ struct bpf_token *token, bool kernel)
LSM_HOOK(void, LSM_RET_VOID, bpf_prog_free, struct bpf_prog *prog)
LSM_HOOK(int, 0, bpf_token_create, struct bpf_token *token, union bpf_attr *attr,
const struct path *path)
struct bpf_prog;
struct bpf_token;
#ifdef CONFIG_SECURITY
-extern int security_bpf(int cmd, union bpf_attr *attr, unsigned int size);
+extern int security_bpf(int cmd, union bpf_attr *attr, unsigned int size, bool kernel);
extern int security_bpf_map(struct bpf_map *map, fmode_t fmode);
extern int security_bpf_prog(struct bpf_prog *prog);
extern int security_bpf_map_create(struct bpf_map *map, union bpf_attr *attr,
- struct bpf_token *token);
+ struct bpf_token *token, bool kernel);
extern void security_bpf_map_free(struct bpf_map *map);
extern int security_bpf_prog_load(struct bpf_prog *prog, union bpf_attr *attr,
- struct bpf_token *token);
+ struct bpf_token *token, bool kernel);
extern void security_bpf_prog_free(struct bpf_prog *prog);
extern int security_bpf_token_create(struct bpf_token *token, union bpf_attr *attr,
const struct path *path);
extern int security_bpf_token_capable(const struct bpf_token *token, int cap);
#else
static inline int security_bpf(int cmd, union bpf_attr *attr,
- unsigned int size)
+ unsigned int size, bool kernel)
{
return 0;
}
}
static inline int security_bpf_map_create(struct bpf_map *map, union bpf_attr *attr,
- struct bpf_token *token)
+ struct bpf_token *token, bool kernel)
{
return 0;
}
{ }
static inline int security_bpf_prog_load(struct bpf_prog *prog, union bpf_attr *attr,
- struct bpf_token *token)
+ struct bpf_token *token, bool kernel)
{
return 0;
}
#define BPF_MAP_CREATE_LAST_FIELD map_token_fd
/* called via syscall */
-static int map_create(union bpf_attr *attr)
+static int map_create(union bpf_attr *attr, bool kernel)
{
const struct bpf_map_ops *ops;
struct bpf_token *token = NULL;
attr->btf_vmlinux_value_type_id;
}
- err = security_bpf_map_create(map, attr, token);
+ err = security_bpf_map_create(map, attr, token, kernel);
if (err)
goto free_map_sec;
if (err < 0)
goto free_prog;
- err = security_bpf_prog_load(prog, attr, token);
+ err = security_bpf_prog_load(prog, attr, token, uattr.is_kernel);
if (err)
goto free_prog_sec;
if (copy_from_bpfptr(&attr, uattr, size) != 0)
return -EFAULT;
- err = security_bpf(cmd, &attr, size);
+ err = security_bpf(cmd, &attr, size, uattr.is_kernel);
if (err < 0)
return err;
switch (cmd) {
case BPF_MAP_CREATE:
- err = map_create(&attr);
+ err = map_create(&attr, uattr.is_kernel);
break;
case BPF_MAP_LOOKUP_ELEM:
err = map_lookup_elem(&attr);
* @cmd: command
* @attr: bpf attribute
* @size: size
+ * @kernel: whether or not call originated from kernel
*
* Do a initial check for all bpf syscalls after the attribute is copied into
* the kernel. The actual security module can implement their own rules to
*
* Return: Returns 0 if permission is granted.
*/
-int security_bpf(int cmd, union bpf_attr *attr, unsigned int size)
+int security_bpf(int cmd, union bpf_attr *attr, unsigned int size, bool kernel)
{
- return call_int_hook(bpf, cmd, attr, size);
+ return call_int_hook(bpf, cmd, attr, size, kernel);
}
/**
* @map: BPF map object
* @attr: BPF syscall attributes used to create BPF map
* @token: BPF token used to grant user access
+ * @kernel: whether or not call originated from kernel
*
* Do a check when the kernel creates a new BPF map. This is also the
* point where LSM blob is allocated for LSMs that need them.
* Return: Returns 0 on success, error on failure.
*/
int security_bpf_map_create(struct bpf_map *map, union bpf_attr *attr,
- struct bpf_token *token)
+ struct bpf_token *token, bool kernel)
{
- return call_int_hook(bpf_map_create, map, attr, token);
+ return call_int_hook(bpf_map_create, map, attr, token, kernel);
}
/**
* @prog: BPF program object
* @attr: BPF syscall attributes used to create BPF program
* @token: BPF token used to grant user access to BPF subsystem
+ * @kernel: whether or not call originated from kernel
*
* Perform an access control check when the kernel loads a BPF program and
* allocates associated BPF program object. This hook is also responsible for
* Return: Returns 0 on success, error on failure.
*/
int security_bpf_prog_load(struct bpf_prog *prog, union bpf_attr *attr,
- struct bpf_token *token)
+ struct bpf_token *token, bool kernel)
{
- return call_int_hook(bpf_prog_load, prog, attr, token);
+ return call_int_hook(bpf_prog_load, prog, attr, token, kernel);
}
/**
#ifdef CONFIG_BPF_SYSCALL
static int selinux_bpf(int cmd, union bpf_attr *attr,
- unsigned int size)
+ unsigned int size, bool kernel)
{
u32 sid = current_sid();
int ret;
}
static int selinux_bpf_map_create(struct bpf_map *map, union bpf_attr *attr,
- struct bpf_token *token)
+ struct bpf_token *token, bool kernel)
{
struct bpf_security_struct *bpfsec;
}
static int selinux_bpf_prog_load(struct bpf_prog *prog, union bpf_attr *attr,
- struct bpf_token *token)
+ struct bpf_token *token, bool kernel)
{
struct bpf_security_struct *bpfsec;
}
SEC("?lsm.s/bpf")
-int BPF_PROG(inproper_sleepable_kfunc, int cmd, union bpf_attr *attr, unsigned int size)
+int BPF_PROG(inproper_sleepable_kfunc, int cmd, union bpf_attr *attr, unsigned int size,
+ bool kernel)
{
struct bpf_key *bkey;
}
SEC("lsm/bpf")
-int BPF_PROG(lsm_run, int cmd, union bpf_attr *attr, unsigned int size)
+int BPF_PROG(lsm_run, int cmd, union bpf_attr *attr, unsigned int size, bool kernel)
{
return bpf_link_create_verify(cmd);
}
SEC("lsm.s/bpf")
-int BPF_PROG(lsm_s_run, int cmd, union bpf_attr *attr, unsigned int size)
+int BPF_PROG(lsm_s_run, int cmd, union bpf_attr *attr, unsigned int size, bool kernel)
{
return bpf_link_create_verify(cmd);
}
SEC("?lsm.s/bpf")
__failure __msg("cannot pass in dynptr at an offset=-8")
-int BPF_PROG(not_valid_dynptr, int cmd, union bpf_attr *attr, unsigned int size)
+int BPF_PROG(not_valid_dynptr, int cmd, union bpf_attr *attr, unsigned int size, bool kernel)
{
unsigned long val;
SEC("?lsm.s/bpf")
__failure __msg("arg#0 expected pointer to stack or const struct bpf_dynptr")
-int BPF_PROG(not_ptr_to_stack, int cmd, union bpf_attr *attr, unsigned int size)
+int BPF_PROG(not_ptr_to_stack, int cmd, union bpf_attr *attr, unsigned int size, bool kernel)
{
unsigned long val = 0;
}
SEC("lsm.s/bpf")
-int BPF_PROG(dynptr_data_null, int cmd, union bpf_attr *attr, unsigned int size)
+int BPF_PROG(dynptr_data_null, int cmd, union bpf_attr *attr, unsigned int size, bool kernel)
{
struct bpf_key *trusted_keyring;
struct bpf_dynptr ptr;
extern void bpf_key_put(struct bpf_key *key) __ksym;
SEC("lsm.s/bpf")
-int BPF_PROG(bpf, int cmd, union bpf_attr *attr, unsigned int size)
+int BPF_PROG(bpf, int cmd, union bpf_attr *attr, unsigned int size, bool kernel)
{
struct bpf_key *bkey;
__u32 pid;
char tp_name[128];
SEC("lsm.s/bpf")
-int BPF_PROG(lsm_run, int cmd, union bpf_attr *attr, unsigned int size)
+int BPF_PROG(lsm_run, int cmd, union bpf_attr *attr, unsigned int size, bool kernel)
{
switch (cmd) {
case BPF_RAW_TRACEPOINT_OPEN:
}
SEC("lsm.s/bpf")
-int BPF_PROG(lsm_run, int cmd, union bpf_attr *attr, unsigned int size)
+int BPF_PROG(lsm_run, int cmd, union bpf_attr *attr, unsigned int size, bool kernel)
{
struct cgroup *cgrp = NULL;
struct task_struct *task;
char _license[] SEC("license") = "GPL";
SEC("lsm.s/bpf")
-int BPF_PROG(bpf, int cmd, union bpf_attr *attr, unsigned int size)
+int BPF_PROG(bpf, int cmd, union bpf_attr *attr, unsigned int size, bool kernel)
{
struct bpf_dynptr data_ptr, sig_ptr;
struct data *data_val;