]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
binder: use guards for plain mutex- and spinlock-protected sections
authorDmitry Antipov <dmantipov@yandex.ru>
Thu, 26 Jun 2025 07:30:54 +0000 (10:30 +0300)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 16 Jul 2025 12:11:20 +0000 (14:11 +0200)
Use 'guard(mutex)' and 'guard(spinlock)' for plain (i.e. non-scoped)
mutex- and spinlock-protected sections, respectively, thus making
locking a bit simpler. Briefly tested with 'stress-ng --binderfs'.

Signed-off-by: Dmitry Antipov <dmantipov@yandex.ru>
Reviewed-by: Alice Ryhl <aliceryhl@google.com>
Acked-by: Carlos Llamas <cmllamas@google.com>
Link: https://lore.kernel.org/r/20250626073054.7706-2-dmantipov@yandex.ru
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
drivers/android/binder.c
drivers/android/binder_alloc.c
drivers/android/binder_alloc.h

index 2bd8ac943171b4367388c96825d18d81409600d3..fb527a06c54b9588178d059eebb70f159d3c4bc8 100644 (file)
@@ -1585,11 +1585,10 @@ static struct binder_thread *binder_get_txn_from(
 {
        struct binder_thread *from;
 
-       spin_lock(&t->lock);
+       guard(spinlock)(&t->lock);
        from = t->from;
        if (from)
                atomic_inc(&from->tmp_ref);
-       spin_unlock(&t->lock);
        return from;
 }
 
@@ -5443,32 +5442,28 @@ static int binder_ioctl_set_ctx_mgr(struct file *filp,
        struct binder_node *new_node;
        kuid_t curr_euid = current_euid();
 
-       mutex_lock(&context->context_mgr_node_lock);
+       guard(mutex)(&context->context_mgr_node_lock);
        if (context->binder_context_mgr_node) {
                pr_err("BINDER_SET_CONTEXT_MGR already set\n");
-               ret = -EBUSY;
-               goto out;
+               return -EBUSY;
        }
        ret = security_binder_set_context_mgr(proc->cred);
        if (ret < 0)
-               goto out;
+               return ret;
        if (uid_valid(context->binder_context_mgr_uid)) {
                if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
                        pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
                               from_kuid(&init_user_ns, curr_euid),
                               from_kuid(&init_user_ns,
                                         context->binder_context_mgr_uid));
-                       ret = -EPERM;
-                       goto out;
+                       return -EPERM;
                }
        } else {
                context->binder_context_mgr_uid = curr_euid;
        }
        new_node = binder_new_node(proc, fbo);
-       if (!new_node) {
-               ret = -ENOMEM;
-               goto out;
-       }
+       if (!new_node)
+               return -ENOMEM;
        binder_node_lock(new_node);
        new_node->local_weak_refs++;
        new_node->local_strong_refs++;
@@ -5477,8 +5472,6 @@ static int binder_ioctl_set_ctx_mgr(struct file *filp,
        context->binder_context_mgr_node = new_node;
        binder_node_unlock(new_node);
        binder_put_node(new_node);
-out:
-       mutex_unlock(&context->context_mgr_node_lock);
        return ret;
 }
 
@@ -6320,14 +6313,13 @@ static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
 static void
 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
 {
-       mutex_lock(&binder_deferred_lock);
+       guard(mutex)(&binder_deferred_lock);
        proc->deferred_work |= defer;
        if (hlist_unhashed(&proc->deferred_work_node)) {
                hlist_add_head(&proc->deferred_work_node,
                                &binder_deferred_list);
                schedule_work(&binder_deferred_work);
        }
-       mutex_unlock(&binder_deferred_lock);
 }
 
 static void print_binder_transaction_ilocked(struct seq_file *m,
@@ -6869,14 +6861,13 @@ static int proc_show(struct seq_file *m, void *unused)
        struct binder_proc *itr;
        int pid = (unsigned long)m->private;
 
-       mutex_lock(&binder_procs_lock);
+       guard(mutex)(&binder_procs_lock);
        hlist_for_each_entry(itr, &binder_procs, proc_node) {
                if (itr->pid == pid) {
                        seq_puts(m, "binder proc state:\n");
                        print_binder_proc(m, itr, true, false);
                }
        }
-       mutex_unlock(&binder_procs_lock);
 
        return 0;
 }
@@ -6994,16 +6985,14 @@ const struct binder_debugfs_entry binder_debugfs_entries[] = {
 
 void binder_add_device(struct binder_device *device)
 {
-       spin_lock(&binder_devices_lock);
+       guard(spinlock)(&binder_devices_lock);
        hlist_add_head(&device->hlist, &binder_devices);
-       spin_unlock(&binder_devices_lock);
 }
 
 void binder_remove_device(struct binder_device *device)
 {
-       spin_lock(&binder_devices_lock);
+       guard(spinlock)(&binder_devices_lock);
        hlist_del_init(&device->hlist);
-       spin_unlock(&binder_devices_lock);
 }
 
 static int __init init_binder_device(const char *name)
index fcfaf1b899c8fba0697ff104d9de3555b4a9a89b..a0a7cb58fc05da43e0486bc991e27d71b9856f35 100644 (file)
@@ -167,12 +167,8 @@ static struct binder_buffer *binder_alloc_prepare_to_free_locked(
 struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc,
                                                   unsigned long user_ptr)
 {
-       struct binder_buffer *buffer;
-
-       mutex_lock(&alloc->mutex);
-       buffer = binder_alloc_prepare_to_free_locked(alloc, user_ptr);
-       mutex_unlock(&alloc->mutex);
-       return buffer;
+       guard(mutex)(&alloc->mutex);
+       return binder_alloc_prepare_to_free_locked(alloc, user_ptr);
 }
 
 static inline void
@@ -1043,7 +1039,7 @@ void binder_alloc_print_allocated(struct seq_file *m,
        struct binder_buffer *buffer;
        struct rb_node *n;
 
-       mutex_lock(&alloc->mutex);
+       guard(mutex)(&alloc->mutex);
        for (n = rb_first(&alloc->allocated_buffers); n; n = rb_next(n)) {
                buffer = rb_entry(n, struct binder_buffer, rb_node);
                seq_printf(m, "  buffer %d: %lx size %zd:%zd:%zd %s\n",
@@ -1053,7 +1049,6 @@ void binder_alloc_print_allocated(struct seq_file *m,
                           buffer->extra_buffers_size,
                           buffer->transaction ? "active" : "delivered");
        }
-       mutex_unlock(&alloc->mutex);
 }
 
 /**
@@ -1102,10 +1097,9 @@ int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
        struct rb_node *n;
        int count = 0;
 
-       mutex_lock(&alloc->mutex);
+       guard(mutex)(&alloc->mutex);
        for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n))
                count++;
-       mutex_unlock(&alloc->mutex);
        return count;
 }
 
index feecd741424101df3f8901d473c2c03146923a0a..a9d5f3169e126013e8932a4d448c6f1150326789 100644 (file)
@@ -160,12 +160,8 @@ void binder_alloc_print_pages(struct seq_file *m,
 static inline size_t
 binder_alloc_get_free_async_space(struct binder_alloc *alloc)
 {
-       size_t free_async_space;
-
-       mutex_lock(&alloc->mutex);
-       free_async_space = alloc->free_async_space;
-       mutex_unlock(&alloc->mutex);
-       return free_async_space;
+       guard(mutex)(&alloc->mutex);
+       return alloc->free_async_space;
 }
 
 unsigned long