}
/* LOCKING
- * 1. c->privlock protects the struct lxc_container from multiple threads.
- * 2. c->slock protects the on-disk container data
- * 3. thread_mutex protects process data (ex: fd table) from multiple threads
- * slock is an flock, which does not exclude threads. Therefore slock should
- * always be wrapped inside privlock.
+ * 1. container_mem_lock(c) protects the struct lxc_container from multiple threads.
+ * 2. container_disk_lock(c) protects the on-disk container data - in particular the
+ * container configuration file.
+ * The container_disk_lock also takes the container_mem_lock.
+ * 3. thread_mutex protects process data (ex: fd table) from multiple threads.
* NOTHING mutexes two independent programs with their own struct
* lxc_container for the same c->name, between API calls. For instance,
* c->config_read(); c->start(); Between those calls, data on disk
* due to hung callers. So I prefer to keep the locks only within our own
* functions, not across functions.
*
- * If you're going to fork while holding a lxccontainer, increment
+ * If you're going to clone while holding a lxccontainer, increment
* c->numthreads (under privlock) before forking. When deleting,
* decrement numthreads under privlock, then if it hits 0 you can delete.
* Do not ever use a lxccontainer whose numthreads you did not bump.
{
if (!c)
return;
+ if (!container_mem_lock(c)) {
+ ERROR("Error getting mem lock");
+ return;
+ }
c->daemonize = 1;
+ container_mem_unlock(c);
}
static bool lxcapi_wait(struct lxc_container *c, const char *state, int timeout)
if (!c || !lxcapi_is_defined(c))
return false;
- if (lxclock(c->privlock, 0))
- return false;
- if (lxclock(c->slock, 0)) {
- lxcunlock(c->privlock);
+ if (container_disk_lock(c))
return false;
- }
if (!is_stopped(c)) {
// we should queue some sort of error - in c->error_string?
goto out;
}
- if (c->lxc_conf->rootfs.path && c->lxc_conf->rootfs.mount)
+ if (c->lxc_conf && c->lxc_conf->rootfs.path && c->lxc_conf->rootfs.mount)
r = bdev_init(c->lxc_conf->rootfs.path, c->lxc_conf->rootfs.mount, NULL);
if (r) {
if (r->ops->destroy(r) < 0) {
ret = true;
out:
- lxcunlock(c->privlock);
- lxcunlock(c->slock);
+ container_disk_unlock(c);
return ret;
}
static bool lxcapi_set_cgroup_item(struct lxc_container *c, const char *subsys, const char *value)
{
int ret;
- bool b = false;
if (!c)
return false;
- if (container_mem_lock(c))
+ if (is_stopped(c))
return false;
- if (is_stopped(c))
- goto err;
+ if (container_disk_lock(c))
+ return false;
- ret = lxc_cgroup_set(c->name, subsys, value, c->config_path);
- if (!ret)
- b = true;
-err:
- container_mem_unlock(c);
- return b;
+ ret = lxc_cgroup_set(c->name, subsys, value, c->config_path) == 0;
+
+ container_disk_unlock(c);
+ return ret == 0;
}
static int lxcapi_get_cgroup_item(struct lxc_container *c, const char *subsys, char *retv, int inlen)
{
- int ret = -1;
+ int ret;
if (!c || !c->lxc_conf)
return -1;
- if (container_mem_lock(c))
+ if (is_stopped(c))
return -1;
- if (is_stopped(c))
- goto out;
+ if (container_disk_lock(c))
+ return -1;
ret = lxc_cgroup_get(c->name, subsys, retv, inlen, c->config_path);
-out:
- container_mem_unlock(c);
+ container_disk_unlock(c);
return ret;
}