{
struct binder_thread *from;
- spin_lock(&t->lock);
+ guard(spinlock)(&t->lock);
from = t->from;
if (from)
atomic_inc(&from->tmp_ref);
- spin_unlock(&t->lock);
return from;
}
struct binder_node *new_node;
kuid_t curr_euid = current_euid();
- mutex_lock(&context->context_mgr_node_lock);
+ guard(mutex)(&context->context_mgr_node_lock);
if (context->binder_context_mgr_node) {
pr_err("BINDER_SET_CONTEXT_MGR already set\n");
- ret = -EBUSY;
- goto out;
+ return -EBUSY;
}
ret = security_binder_set_context_mgr(proc->cred);
if (ret < 0)
- goto out;
+ return ret;
if (uid_valid(context->binder_context_mgr_uid)) {
if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
from_kuid(&init_user_ns, curr_euid),
from_kuid(&init_user_ns,
context->binder_context_mgr_uid));
- ret = -EPERM;
- goto out;
+ return -EPERM;
}
} else {
context->binder_context_mgr_uid = curr_euid;
}
new_node = binder_new_node(proc, fbo);
- if (!new_node) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!new_node)
+ return -ENOMEM;
binder_node_lock(new_node);
new_node->local_weak_refs++;
new_node->local_strong_refs++;
context->binder_context_mgr_node = new_node;
binder_node_unlock(new_node);
binder_put_node(new_node);
-out:
- mutex_unlock(&context->context_mgr_node_lock);
return ret;
}
static void
binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
{
- mutex_lock(&binder_deferred_lock);
+ guard(mutex)(&binder_deferred_lock);
proc->deferred_work |= defer;
if (hlist_unhashed(&proc->deferred_work_node)) {
hlist_add_head(&proc->deferred_work_node,
&binder_deferred_list);
schedule_work(&binder_deferred_work);
}
- mutex_unlock(&binder_deferred_lock);
}
static void print_binder_transaction_ilocked(struct seq_file *m,
struct binder_proc *itr;
int pid = (unsigned long)m->private;
- mutex_lock(&binder_procs_lock);
+ guard(mutex)(&binder_procs_lock);
hlist_for_each_entry(itr, &binder_procs, proc_node) {
if (itr->pid == pid) {
seq_puts(m, "binder proc state:\n");
print_binder_proc(m, itr, true, false);
}
}
- mutex_unlock(&binder_procs_lock);
return 0;
}
void binder_add_device(struct binder_device *device)
{
- spin_lock(&binder_devices_lock);
+ guard(spinlock)(&binder_devices_lock);
hlist_add_head(&device->hlist, &binder_devices);
- spin_unlock(&binder_devices_lock);
}
void binder_remove_device(struct binder_device *device)
{
- spin_lock(&binder_devices_lock);
+ guard(spinlock)(&binder_devices_lock);
hlist_del_init(&device->hlist);
- spin_unlock(&binder_devices_lock);
}
static int __init init_binder_device(const char *name)
struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc,
unsigned long user_ptr)
{
- struct binder_buffer *buffer;
-
- mutex_lock(&alloc->mutex);
- buffer = binder_alloc_prepare_to_free_locked(alloc, user_ptr);
- mutex_unlock(&alloc->mutex);
- return buffer;
+ guard(mutex)(&alloc->mutex);
+ return binder_alloc_prepare_to_free_locked(alloc, user_ptr);
}
static inline void
struct binder_buffer *buffer;
struct rb_node *n;
- mutex_lock(&alloc->mutex);
+ guard(mutex)(&alloc->mutex);
for (n = rb_first(&alloc->allocated_buffers); n; n = rb_next(n)) {
buffer = rb_entry(n, struct binder_buffer, rb_node);
seq_printf(m, " buffer %d: %lx size %zd:%zd:%zd %s\n",
buffer->extra_buffers_size,
buffer->transaction ? "active" : "delivered");
}
- mutex_unlock(&alloc->mutex);
}
/**
struct rb_node *n;
int count = 0;
- mutex_lock(&alloc->mutex);
+ guard(mutex)(&alloc->mutex);
for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n))
count++;
- mutex_unlock(&alloc->mutex);
return count;
}