int lbs_xattr_count; /* number of xattr slots in new_xattrs array */
int lbs_tun_dev;
int lbs_bdev;
+ int lbs_bpf_map;
+ int lbs_bpf_prog;
+ int lbs_bpf_token;
};
/*
lsm_set_blob_size(&needed->lbs_xattr_count,
&blob_sizes.lbs_xattr_count);
lsm_set_blob_size(&needed->lbs_bdev, &blob_sizes.lbs_bdev);
+ lsm_set_blob_size(&needed->lbs_bpf_map, &blob_sizes.lbs_bpf_map);
+ lsm_set_blob_size(&needed->lbs_bpf_prog, &blob_sizes.lbs_bpf_prog);
+ lsm_set_blob_size(&needed->lbs_bpf_token, &blob_sizes.lbs_bpf_token);
}
/* Prepare LSM for initialization. */
init_debug("tun device blob size = %d\n", blob_sizes.lbs_tun_dev);
init_debug("xattr slots = %d\n", blob_sizes.lbs_xattr_count);
init_debug("bdev blob size = %d\n", blob_sizes.lbs_bdev);
+ init_debug("bpf map blob size = %d\n", blob_sizes.lbs_bpf_map);
+ init_debug("bpf prog blob size = %d\n", blob_sizes.lbs_bpf_prog);
+ init_debug("bpf token blob size = %d\n", blob_sizes.lbs_bpf_token);
/*
* Create any kmem_caches needed for blobs
GFP_KERNEL);
}
+#ifdef CONFIG_BPF_SYSCALL
+/**
+ * lsm_bpf_map_alloc - allocate a composite bpf_map blob
+ * @map: the bpf_map that needs a blob
+ *
+ * Allocate the bpf_map blob for all the modules
+ *
+ * Returns 0, or -ENOMEM if memory can't be allocated.
+ */
+static int lsm_bpf_map_alloc(struct bpf_map *map)
+{
+ return lsm_blob_alloc(&map->security, blob_sizes.lbs_bpf_map, GFP_KERNEL);
+}
+
+/**
+ * lsm_bpf_prog_alloc - allocate a composite bpf_prog blob
+ * @prog: the bpf_prog that needs a blob
+ *
+ * Allocate the bpf_prog blob for all the modules
+ *
+ * Returns 0, or -ENOMEM if memory can't be allocated.
+ */
+static int lsm_bpf_prog_alloc(struct bpf_prog *prog)
+{
+ return lsm_blob_alloc(&prog->aux->security, blob_sizes.lbs_bpf_prog, GFP_KERNEL);
+}
+
+/**
+ * lsm_bpf_token_alloc - allocate a composite bpf_token blob
+ * @token: the bpf_token that needs a blob
+ *
+ * Allocate the bpf_token blob for all the modules
+ *
+ * Returns 0, or -ENOMEM if memory can't be allocated.
+ */
+static int lsm_bpf_token_alloc(struct bpf_token *token)
+{
+ return lsm_blob_alloc(&token->security, blob_sizes.lbs_bpf_token, GFP_KERNEL);
+}
+#endif /* CONFIG_BPF_SYSCALL */
+
/**
* lsm_early_task - during initialization allocate a composite task blob
* @task: the task that needs a blob
int security_bpf_map_create(struct bpf_map *map, union bpf_attr *attr,
struct bpf_token *token, bool kernel)
{
- return call_int_hook(bpf_map_create, map, attr, token, kernel);
+ int rc;
+
+ rc = lsm_bpf_map_alloc(map);
+ if (unlikely(rc))
+ return rc;
+
+ rc = call_int_hook(bpf_map_create, map, attr, token, kernel);
+ if (unlikely(rc))
+ security_bpf_map_free(map);
+ return rc;
}
/**
int security_bpf_prog_load(struct bpf_prog *prog, union bpf_attr *attr,
struct bpf_token *token, bool kernel)
{
- return call_int_hook(bpf_prog_load, prog, attr, token, kernel);
+ int rc;
+
+ rc = lsm_bpf_prog_alloc(prog);
+ if (unlikely(rc))
+ return rc;
+
+ rc = call_int_hook(bpf_prog_load, prog, attr, token, kernel);
+ if (unlikely(rc))
+ security_bpf_prog_free(prog);
+ return rc;
}
/**
int security_bpf_token_create(struct bpf_token *token, union bpf_attr *attr,
const struct path *path)
{
- return call_int_hook(bpf_token_create, token, attr, path);
+ int rc;
+
+ rc = lsm_bpf_token_alloc(token);
+ if (unlikely(rc))
+ return rc;
+
+ rc = call_int_hook(bpf_token_create, token, attr, path);
+ if (unlikely(rc))
+ security_bpf_token_free(token);
+ return rc;
}
/**
void security_bpf_map_free(struct bpf_map *map)
{
call_void_hook(bpf_map_free, map);
+ kfree(map->security);
+ map->security = NULL;
}
/**
void security_bpf_prog_free(struct bpf_prog *prog)
{
call_void_hook(bpf_prog_free, prog);
+ kfree(prog->aux->security);
+ prog->aux->security = NULL;
}
/**
void security_bpf_token_free(struct bpf_token *token)
{
call_void_hook(bpf_token_free, token);
+ kfree(token->security);
+ token->security = NULL;
}
#endif /* CONFIG_BPF_SYSCALL */
if (file->f_op == &bpf_map_fops) {
map = file->private_data;
- bpfsec = map->security;
+ bpfsec = selinux_bpf_map_security(map);
ret = avc_has_perm(sid, bpfsec->sid, SECCLASS_BPF,
bpf_map_fmode_to_av(file->f_mode), NULL);
if (ret)
return ret;
} else if (file->f_op == &bpf_prog_fops) {
prog = file->private_data;
- bpfsec = prog->aux->security;
+ bpfsec = selinux_bpf_prog_security(prog);
ret = avc_has_perm(sid, bpfsec->sid, SECCLASS_BPF,
BPF__PROG_RUN, NULL);
if (ret)
u32 sid = current_sid();
struct bpf_security_struct *bpfsec;
- bpfsec = map->security;
+ bpfsec = selinux_bpf_map_security(map);
return avc_has_perm(sid, bpfsec->sid, SECCLASS_BPF,
bpf_map_fmode_to_av(fmode), NULL);
}
u32 sid = current_sid();
struct bpf_security_struct *bpfsec;
- bpfsec = prog->aux->security;
+ bpfsec = selinux_bpf_prog_security(prog);
return avc_has_perm(sid, bpfsec->sid, SECCLASS_BPF,
BPF__PROG_RUN, NULL);
}
{
struct bpf_security_struct *bpfsec;
- bpfsec = kzalloc(sizeof(*bpfsec), GFP_KERNEL);
- if (!bpfsec)
- return -ENOMEM;
-
+ bpfsec = selinux_bpf_map_security(map);
bpfsec->sid = current_sid();
- map->security = bpfsec;
return 0;
}
-static void selinux_bpf_map_free(struct bpf_map *map)
-{
- struct bpf_security_struct *bpfsec = map->security;
-
- map->security = NULL;
- kfree(bpfsec);
-}
-
static int selinux_bpf_prog_load(struct bpf_prog *prog, union bpf_attr *attr,
struct bpf_token *token, bool kernel)
{
struct bpf_security_struct *bpfsec;
- bpfsec = kzalloc(sizeof(*bpfsec), GFP_KERNEL);
- if (!bpfsec)
- return -ENOMEM;
-
+ bpfsec = selinux_bpf_prog_security(prog);
bpfsec->sid = current_sid();
- prog->aux->security = bpfsec;
return 0;
}
-static void selinux_bpf_prog_free(struct bpf_prog *prog)
-{
- struct bpf_security_struct *bpfsec = prog->aux->security;
-
- prog->aux->security = NULL;
- kfree(bpfsec);
-}
-
static int selinux_bpf_token_create(struct bpf_token *token, union bpf_attr *attr,
const struct path *path)
{
struct bpf_security_struct *bpfsec;
- bpfsec = kzalloc(sizeof(*bpfsec), GFP_KERNEL);
- if (!bpfsec)
- return -ENOMEM;
-
+ bpfsec = selinux_bpf_token_security(token);
bpfsec->sid = current_sid();
- token->security = bpfsec;
return 0;
}
-
-static void selinux_bpf_token_free(struct bpf_token *token)
-{
- struct bpf_security_struct *bpfsec = token->security;
-
- token->security = NULL;
- kfree(bpfsec);
-}
#endif
struct lsm_blob_sizes selinux_blob_sizes __ro_after_init = {
.lbs_xattr_count = SELINUX_INODE_INIT_XATTRS,
.lbs_tun_dev = sizeof(struct tun_security_struct),
.lbs_ib = sizeof(struct ib_security_struct),
+ .lbs_bpf_map = sizeof(struct bpf_security_struct),
+ .lbs_bpf_prog = sizeof(struct bpf_security_struct),
+ .lbs_bpf_token = sizeof(struct bpf_security_struct),
};
#ifdef CONFIG_PERF_EVENTS
LSM_HOOK_INIT(bpf, selinux_bpf),
LSM_HOOK_INIT(bpf_map, selinux_bpf_map),
LSM_HOOK_INIT(bpf_prog, selinux_bpf_prog),
- LSM_HOOK_INIT(bpf_map_free, selinux_bpf_map_free),
- LSM_HOOK_INIT(bpf_prog_free, selinux_bpf_prog_free),
- LSM_HOOK_INIT(bpf_token_free, selinux_bpf_token_free),
#endif
#ifdef CONFIG_PERF_EVENTS
#include <linux/lsm_hooks.h>
#include <linux/msg.h>
#include <net/net_namespace.h>
+#include <linux/bpf.h>
#include "flask.h"
#include "avc.h"
return perf_event + selinux_blob_sizes.lbs_perf_event;
}
+#ifdef CONFIG_BPF_SYSCALL
+static inline struct bpf_security_struct *
+selinux_bpf_map_security(struct bpf_map *map)
+{
+ return map->security + selinux_blob_sizes.lbs_bpf_map;
+}
+
+static inline struct bpf_security_struct *
+selinux_bpf_prog_security(struct bpf_prog *prog)
+{
+ return prog->aux->security + selinux_blob_sizes.lbs_bpf_prog;
+}
+
+static inline struct bpf_security_struct *
+selinux_bpf_token_security(struct bpf_token *token)
+{
+ return token->security + selinux_blob_sizes.lbs_bpf_token;
+}
+#endif /* CONFIG_BPF_SYSCALL */
#endif /* _SELINUX_OBJSEC_H_ */