3 * Android IPC Subsystem
5 * Copyright (C) 2007-2008 Google, Inc.
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
21 * There are 3 main spinlocks which must be acquired in the
24 * 1) proc->outer_lock : protects binder_ref
25 * binder_proc_lock() and binder_proc_unlock() are
27 * 2) node->lock : protects most fields of binder_node.
28 * binder_node_lock() and binder_node_unlock() are
30 * 3) proc->inner_lock : protects the thread and node lists
31 * (proc->threads, proc->waiting_threads, proc->nodes)
32 * and all todo lists associated with the binder_proc
33 * (proc->todo, thread->todo, proc->delivered_death and
34 * node->async_todo), as well as thread->transaction_stack
35 * binder_inner_proc_lock() and binder_inner_proc_unlock()
38 * Any lock under procA must never be nested under any lock at the same
39 * level or below on procB.
41 * Functions that require a lock held on entry indicate which lock
42 * in the suffix of the function name:
44 * foo_olocked() : requires node->outer_lock
45 * foo_nlocked() : requires node->lock
46 * foo_ilocked() : requires proc->inner_lock
47 * foo_oilocked(): requires proc->outer_lock and proc->inner_lock
48 * foo_nilocked(): requires node->lock and proc->inner_lock
52 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
54 #include <linux/fdtable.h>
55 #include <linux/file.h>
56 #include <linux/freezer.h>
58 #include <linux/list.h>
59 #include <linux/miscdevice.h>
60 #include <linux/module.h>
61 #include <linux/mutex.h>
62 #include <linux/nsproxy.h>
63 #include <linux/poll.h>
64 #include <linux/debugfs.h>
65 #include <linux/rbtree.h>
66 #include <linux/sched/signal.h>
67 #include <linux/sched/mm.h>
68 #include <linux/seq_file.h>
69 #include <linux/uaccess.h>
70 #include <linux/pid_namespace.h>
71 #include <linux/security.h>
72 #include <linux/spinlock.h>
73 #include <linux/ratelimit.h>
74 #include <linux/syscalls.h>
75 #include <linux/task_work.h>
77 #include <uapi/linux/android/binder.h>
79 #include <asm/cacheflush.h>
81 #include "binder_alloc.h"
82 #include "binder_internal.h"
83 #include "binder_trace.h"
85 static HLIST_HEAD(binder_deferred_list
);
86 static DEFINE_MUTEX(binder_deferred_lock
);
88 static HLIST_HEAD(binder_devices
);
89 static HLIST_HEAD(binder_procs
);
90 static DEFINE_MUTEX(binder_procs_lock
);
92 static HLIST_HEAD(binder_dead_nodes
);
93 static DEFINE_SPINLOCK(binder_dead_nodes_lock
);
95 static struct dentry
*binder_debugfs_dir_entry_root
;
96 static struct dentry
*binder_debugfs_dir_entry_proc
;
97 static atomic_t binder_last_id
;
99 static int proc_show(struct seq_file
*m
, void *unused
);
100 DEFINE_SHOW_ATTRIBUTE(proc
);
102 /* This is only defined in include/asm-arm/sizes.h */
108 #define SZ_4M 0x400000
111 #define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
114 BINDER_DEBUG_USER_ERROR
= 1U << 0,
115 BINDER_DEBUG_FAILED_TRANSACTION
= 1U << 1,
116 BINDER_DEBUG_DEAD_TRANSACTION
= 1U << 2,
117 BINDER_DEBUG_OPEN_CLOSE
= 1U << 3,
118 BINDER_DEBUG_DEAD_BINDER
= 1U << 4,
119 BINDER_DEBUG_DEATH_NOTIFICATION
= 1U << 5,
120 BINDER_DEBUG_READ_WRITE
= 1U << 6,
121 BINDER_DEBUG_USER_REFS
= 1U << 7,
122 BINDER_DEBUG_THREADS
= 1U << 8,
123 BINDER_DEBUG_TRANSACTION
= 1U << 9,
124 BINDER_DEBUG_TRANSACTION_COMPLETE
= 1U << 10,
125 BINDER_DEBUG_FREE_BUFFER
= 1U << 11,
126 BINDER_DEBUG_INTERNAL_REFS
= 1U << 12,
127 BINDER_DEBUG_PRIORITY_CAP
= 1U << 13,
128 BINDER_DEBUG_SPINLOCKS
= 1U << 14,
130 static uint32_t binder_debug_mask
= BINDER_DEBUG_USER_ERROR
|
131 BINDER_DEBUG_FAILED_TRANSACTION
| BINDER_DEBUG_DEAD_TRANSACTION
;
132 module_param_named(debug_mask
, binder_debug_mask
, uint
, 0644);
134 static char *binder_devices_param
= CONFIG_ANDROID_BINDER_DEVICES
;
135 module_param_named(devices
, binder_devices_param
, charp
, 0444);
137 static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait
);
138 static int binder_stop_on_user_error
;
140 static int binder_set_stop_on_user_error(const char *val
,
141 const struct kernel_param
*kp
)
145 ret
= param_set_int(val
, kp
);
146 if (binder_stop_on_user_error
< 2)
147 wake_up(&binder_user_error_wait
);
150 module_param_call(stop_on_user_error
, binder_set_stop_on_user_error
,
151 param_get_int
, &binder_stop_on_user_error
, 0644);
153 #define binder_debug(mask, x...) \
155 if (binder_debug_mask & mask) \
156 pr_info_ratelimited(x); \
159 #define binder_user_error(x...) \
161 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
162 pr_info_ratelimited(x); \
163 if (binder_stop_on_user_error) \
164 binder_stop_on_user_error = 2; \
167 #define to_flat_binder_object(hdr) \
168 container_of(hdr, struct flat_binder_object, hdr)
170 #define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
172 #define to_binder_buffer_object(hdr) \
173 container_of(hdr, struct binder_buffer_object, hdr)
175 #define to_binder_fd_array_object(hdr) \
176 container_of(hdr, struct binder_fd_array_object, hdr)
178 enum binder_stat_types
{
184 BINDER_STAT_TRANSACTION
,
185 BINDER_STAT_TRANSACTION_COMPLETE
,
189 struct binder_stats
{
190 atomic_t br
[_IOC_NR(BR_FAILED_REPLY
) + 1];
191 atomic_t bc
[_IOC_NR(BC_REPLY_SG
) + 1];
192 atomic_t obj_created
[BINDER_STAT_COUNT
];
193 atomic_t obj_deleted
[BINDER_STAT_COUNT
];
196 static struct binder_stats binder_stats
;
198 static inline void binder_stats_deleted(enum binder_stat_types type
)
200 atomic_inc(&binder_stats
.obj_deleted
[type
]);
203 static inline void binder_stats_created(enum binder_stat_types type
)
205 atomic_inc(&binder_stats
.obj_created
[type
]);
208 struct binder_transaction_log_entry
{
220 int return_error_line
;
221 uint32_t return_error
;
222 uint32_t return_error_param
;
223 const char *context_name
;
225 struct binder_transaction_log
{
228 struct binder_transaction_log_entry entry
[32];
230 static struct binder_transaction_log binder_transaction_log
;
231 static struct binder_transaction_log binder_transaction_log_failed
;
233 static struct binder_transaction_log_entry
*binder_transaction_log_add(
234 struct binder_transaction_log
*log
)
236 struct binder_transaction_log_entry
*e
;
237 unsigned int cur
= atomic_inc_return(&log
->cur
);
239 if (cur
>= ARRAY_SIZE(log
->entry
))
241 e
= &log
->entry
[cur
% ARRAY_SIZE(log
->entry
)];
242 WRITE_ONCE(e
->debug_id_done
, 0);
244 * write-barrier to synchronize access to e->debug_id_done.
245 * We make sure the initialized 0 value is seen before
246 * memset() other fields are zeroed by memset.
249 memset(e
, 0, sizeof(*e
));
254 * struct binder_work - work enqueued on a worklist
255 * @entry: node enqueued on list
256 * @type: type of work to be performed
258 * There are separate work lists for proc, thread, and node (async).
261 struct list_head entry
;
264 BINDER_WORK_TRANSACTION
= 1,
265 BINDER_WORK_TRANSACTION_COMPLETE
,
266 BINDER_WORK_RETURN_ERROR
,
268 BINDER_WORK_DEAD_BINDER
,
269 BINDER_WORK_DEAD_BINDER_AND_CLEAR
,
270 BINDER_WORK_CLEAR_DEATH_NOTIFICATION
,
274 struct binder_error
{
275 struct binder_work work
;
280 * struct binder_node - binder node bookkeeping
281 * @debug_id: unique ID for debugging
282 * (invariant after initialized)
283 * @lock: lock for node fields
284 * @work: worklist element for node work
285 * (protected by @proc->inner_lock)
286 * @rb_node: element for proc->nodes tree
287 * (protected by @proc->inner_lock)
288 * @dead_node: element for binder_dead_nodes list
289 * (protected by binder_dead_nodes_lock)
290 * @proc: binder_proc that owns this node
291 * (invariant after initialized)
292 * @refs: list of references on this node
293 * (protected by @lock)
294 * @internal_strong_refs: used to take strong references when
295 * initiating a transaction
296 * (protected by @proc->inner_lock if @proc
298 * @local_weak_refs: weak user refs from local process
299 * (protected by @proc->inner_lock if @proc
301 * @local_strong_refs: strong user refs from local process
302 * (protected by @proc->inner_lock if @proc
304 * @tmp_refs: temporary kernel refs
305 * (protected by @proc->inner_lock while @proc
306 * is valid, and by binder_dead_nodes_lock
307 * if @proc is NULL. During inc/dec and node release
308 * it is also protected by @lock to provide safety
309 * as the node dies and @proc becomes NULL)
310 * @ptr: userspace pointer for node
311 * (invariant, no lock needed)
312 * @cookie: userspace cookie for node
313 * (invariant, no lock needed)
314 * @has_strong_ref: userspace notified of strong ref
315 * (protected by @proc->inner_lock if @proc
317 * @pending_strong_ref: userspace has acked notification of strong ref
318 * (protected by @proc->inner_lock if @proc
320 * @has_weak_ref: userspace notified of weak ref
321 * (protected by @proc->inner_lock if @proc
323 * @pending_weak_ref: userspace has acked notification of weak ref
324 * (protected by @proc->inner_lock if @proc
326 * @has_async_transaction: async transaction to node in progress
327 * (protected by @lock)
328 * @accept_fds: file descriptor operations supported for node
329 * (invariant after initialized)
330 * @min_priority: minimum scheduling priority
331 * (invariant after initialized)
332 * @async_todo: list of async work items
333 * (protected by @proc->inner_lock)
335 * Bookkeeping structure for binder nodes.
340 struct binder_work work
;
342 struct rb_node rb_node
;
343 struct hlist_node dead_node
;
345 struct binder_proc
*proc
;
346 struct hlist_head refs
;
347 int internal_strong_refs
;
349 int local_strong_refs
;
351 binder_uintptr_t ptr
;
352 binder_uintptr_t cookie
;
355 * bitfield elements protected by
359 u8 pending_strong_ref
:1;
361 u8 pending_weak_ref
:1;
365 * invariant after initialization
370 bool has_async_transaction
;
371 struct list_head async_todo
;
374 struct binder_ref_death
{
376 * @work: worklist element for death notifications
377 * (protected by inner_lock of the proc that
378 * this ref belongs to)
380 struct binder_work work
;
381 binder_uintptr_t cookie
;
385 * struct binder_ref_data - binder_ref counts and id
386 * @debug_id: unique ID for the ref
387 * @desc: unique userspace handle for ref
388 * @strong: strong ref count (debugging only if not locked)
389 * @weak: weak ref count (debugging only if not locked)
391 * Structure to hold ref count and ref id information. Since
392 * the actual ref can only be accessed with a lock, this structure
393 * is used to return information about the ref to callers of
394 * ref inc/dec functions.
396 struct binder_ref_data
{
404 * struct binder_ref - struct to track references on nodes
405 * @data: binder_ref_data containing id, handle, and current refcounts
406 * @rb_node_desc: node for lookup by @data.desc in proc's rb_tree
407 * @rb_node_node: node for lookup by @node in proc's rb_tree
408 * @node_entry: list entry for node->refs list in target node
409 * (protected by @node->lock)
410 * @proc: binder_proc containing ref
411 * @node: binder_node of target node. When cleaning up a
412 * ref for deletion in binder_cleanup_ref, a non-NULL
413 * @node indicates the node must be freed
414 * @death: pointer to death notification (ref_death) if requested
415 * (protected by @node->lock)
417 * Structure to track references from procA to target node (on procB). This
418 * structure is unsafe to access without holding @proc->outer_lock.
421 /* Lookups needed: */
422 /* node + proc => ref (transaction) */
423 /* desc + proc => ref (transaction, inc/dec ref) */
424 /* node => refs + procs (proc exit) */
425 struct binder_ref_data data
;
426 struct rb_node rb_node_desc
;
427 struct rb_node rb_node_node
;
428 struct hlist_node node_entry
;
429 struct binder_proc
*proc
;
430 struct binder_node
*node
;
431 struct binder_ref_death
*death
;
434 enum binder_deferred_state
{
435 BINDER_DEFERRED_FLUSH
= 0x01,
436 BINDER_DEFERRED_RELEASE
= 0x02,
440 * struct binder_proc - binder process bookkeeping
441 * @proc_node: element for binder_procs list
442 * @threads: rbtree of binder_threads in this proc
443 * (protected by @inner_lock)
444 * @nodes: rbtree of binder nodes associated with
445 * this proc ordered by node->ptr
446 * (protected by @inner_lock)
447 * @refs_by_desc: rbtree of refs ordered by ref->desc
448 * (protected by @outer_lock)
449 * @refs_by_node: rbtree of refs ordered by ref->node
450 * (protected by @outer_lock)
451 * @waiting_threads: threads currently waiting for proc work
452 * (protected by @inner_lock)
453 * @pid PID of group_leader of process
454 * (invariant after initialized)
455 * @tsk task_struct for group_leader of process
456 * (invariant after initialized)
457 * @deferred_work_node: element for binder_deferred_list
458 * (protected by binder_deferred_lock)
459 * @deferred_work: bitmap of deferred work to perform
460 * (protected by binder_deferred_lock)
461 * @is_dead: process is dead and awaiting free
462 * when outstanding transactions are cleaned up
463 * (protected by @inner_lock)
464 * @todo: list of work for this process
465 * (protected by @inner_lock)
466 * @stats: per-process binder statistics
467 * (atomics, no lock needed)
468 * @delivered_death: list of delivered death notification
469 * (protected by @inner_lock)
470 * @max_threads: cap on number of binder threads
471 * (protected by @inner_lock)
472 * @requested_threads: number of binder threads requested but not
473 * yet started. In current implementation, can
475 * (protected by @inner_lock)
476 * @requested_threads_started: number binder threads started
477 * (protected by @inner_lock)
478 * @tmp_ref: temporary reference to indicate proc is in use
479 * (protected by @inner_lock)
480 * @default_priority: default scheduler priority
481 * (invariant after initialized)
482 * @debugfs_entry: debugfs node
483 * @alloc: binder allocator bookkeeping
484 * @context: binder_context for this proc
485 * (invariant after initialized)
486 * @inner_lock: can nest under outer_lock and/or node lock
487 * @outer_lock: no nesting under innor or node lock
488 * Lock order: 1) outer, 2) node, 3) inner
490 * Bookkeeping structure for binder processes
493 struct hlist_node proc_node
;
494 struct rb_root threads
;
495 struct rb_root nodes
;
496 struct rb_root refs_by_desc
;
497 struct rb_root refs_by_node
;
498 struct list_head waiting_threads
;
500 struct task_struct
*tsk
;
501 struct hlist_node deferred_work_node
;
505 struct list_head todo
;
506 struct binder_stats stats
;
507 struct list_head delivered_death
;
509 int requested_threads
;
510 int requested_threads_started
;
512 long default_priority
;
513 struct dentry
*debugfs_entry
;
514 struct binder_alloc alloc
;
515 struct binder_context
*context
;
516 spinlock_t inner_lock
;
517 spinlock_t outer_lock
;
521 BINDER_LOOPER_STATE_REGISTERED
= 0x01,
522 BINDER_LOOPER_STATE_ENTERED
= 0x02,
523 BINDER_LOOPER_STATE_EXITED
= 0x04,
524 BINDER_LOOPER_STATE_INVALID
= 0x08,
525 BINDER_LOOPER_STATE_WAITING
= 0x10,
526 BINDER_LOOPER_STATE_POLL
= 0x20,
530 * struct binder_thread - binder thread bookkeeping
531 * @proc: binder process for this thread
532 * (invariant after initialization)
533 * @rb_node: element for proc->threads rbtree
534 * (protected by @proc->inner_lock)
535 * @waiting_thread_node: element for @proc->waiting_threads list
536 * (protected by @proc->inner_lock)
537 * @pid: PID for this thread
538 * (invariant after initialization)
539 * @looper: bitmap of looping state
540 * (only accessed by this thread)
541 * @looper_needs_return: looping thread needs to exit driver
543 * @transaction_stack: stack of in-progress transactions for this thread
544 * (protected by @proc->inner_lock)
545 * @todo: list of work to do for this thread
546 * (protected by @proc->inner_lock)
547 * @process_todo: whether work in @todo should be processed
548 * (protected by @proc->inner_lock)
549 * @return_error: transaction errors reported by this thread
550 * (only accessed by this thread)
551 * @reply_error: transaction errors reported by target thread
552 * (protected by @proc->inner_lock)
553 * @wait: wait queue for thread work
554 * @stats: per-thread statistics
555 * (atomics, no lock needed)
556 * @tmp_ref: temporary reference to indicate thread is in use
557 * (atomic since @proc->inner_lock cannot
558 * always be acquired)
559 * @is_dead: thread is dead and awaiting free
560 * when outstanding transactions are cleaned up
561 * (protected by @proc->inner_lock)
563 * Bookkeeping structure for binder threads.
565 struct binder_thread
{
566 struct binder_proc
*proc
;
567 struct rb_node rb_node
;
568 struct list_head waiting_thread_node
;
570 int looper
; /* only modified by this thread */
571 bool looper_need_return
; /* can be written by other thread */
572 struct binder_transaction
*transaction_stack
;
573 struct list_head todo
;
575 struct binder_error return_error
;
576 struct binder_error reply_error
;
577 wait_queue_head_t wait
;
578 struct binder_stats stats
;
584 * struct binder_txn_fd_fixup - transaction fd fixup list element
585 * @fixup_entry: list entry
586 * @file: struct file to be associated with new fd
587 * @offset: offset in buffer data to this fixup
589 * List element for fd fixups in a transaction. Since file
590 * descriptors need to be allocated in the context of the
591 * target process, we pass each fd to be processed in this
594 struct binder_txn_fd_fixup
{
595 struct list_head fixup_entry
;
600 struct binder_transaction
{
602 struct binder_work work
;
603 struct binder_thread
*from
;
604 struct binder_transaction
*from_parent
;
605 struct binder_proc
*to_proc
;
606 struct binder_thread
*to_thread
;
607 struct binder_transaction
*to_parent
;
608 unsigned need_reply
:1;
609 /* unsigned is_dead:1; */ /* not used at the moment */
611 struct binder_buffer
*buffer
;
617 struct list_head fd_fixups
;
619 * @lock: protects @from, @to_proc, and @to_thread
621 * @from, @to_proc, and @to_thread can be set to NULL
622 * during thread teardown
628 * binder_proc_lock() - Acquire outer lock for given binder_proc
629 * @proc: struct binder_proc to acquire
631 * Acquires proc->outer_lock. Used to protect binder_ref
632 * structures associated with the given proc.
634 #define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
636 _binder_proc_lock(struct binder_proc
*proc
, int line
)
637 __acquires(&proc
->outer_lock
)
639 binder_debug(BINDER_DEBUG_SPINLOCKS
,
640 "%s: line=%d\n", __func__
, line
);
641 spin_lock(&proc
->outer_lock
);
645 * binder_proc_unlock() - Release spinlock for given binder_proc
646 * @proc: struct binder_proc to acquire
648 * Release lock acquired via binder_proc_lock()
650 #define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__)
652 _binder_proc_unlock(struct binder_proc
*proc
, int line
)
653 __releases(&proc
->outer_lock
)
655 binder_debug(BINDER_DEBUG_SPINLOCKS
,
656 "%s: line=%d\n", __func__
, line
);
657 spin_unlock(&proc
->outer_lock
);
661 * binder_inner_proc_lock() - Acquire inner lock for given binder_proc
662 * @proc: struct binder_proc to acquire
664 * Acquires proc->inner_lock. Used to protect todo lists
666 #define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
668 _binder_inner_proc_lock(struct binder_proc
*proc
, int line
)
669 __acquires(&proc
->inner_lock
)
671 binder_debug(BINDER_DEBUG_SPINLOCKS
,
672 "%s: line=%d\n", __func__
, line
);
673 spin_lock(&proc
->inner_lock
);
677 * binder_inner_proc_unlock() - Release inner lock for given binder_proc
678 * @proc: struct binder_proc to acquire
680 * Release lock acquired via binder_inner_proc_lock()
682 #define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
684 _binder_inner_proc_unlock(struct binder_proc
*proc
, int line
)
685 __releases(&proc
->inner_lock
)
687 binder_debug(BINDER_DEBUG_SPINLOCKS
,
688 "%s: line=%d\n", __func__
, line
);
689 spin_unlock(&proc
->inner_lock
);
693 * binder_node_lock() - Acquire spinlock for given binder_node
694 * @node: struct binder_node to acquire
696 * Acquires node->lock. Used to protect binder_node fields
698 #define binder_node_lock(node) _binder_node_lock(node, __LINE__)
700 _binder_node_lock(struct binder_node
*node
, int line
)
701 __acquires(&node
->lock
)
703 binder_debug(BINDER_DEBUG_SPINLOCKS
,
704 "%s: line=%d\n", __func__
, line
);
705 spin_lock(&node
->lock
);
709 * binder_node_unlock() - Release spinlock for given binder_proc
710 * @node: struct binder_node to acquire
712 * Release lock acquired via binder_node_lock()
714 #define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
716 _binder_node_unlock(struct binder_node
*node
, int line
)
717 __releases(&node
->lock
)
719 binder_debug(BINDER_DEBUG_SPINLOCKS
,
720 "%s: line=%d\n", __func__
, line
);
721 spin_unlock(&node
->lock
);
725 * binder_node_inner_lock() - Acquire node and inner locks
726 * @node: struct binder_node to acquire
728 * Acquires node->lock. If node->proc also acquires
729 * proc->inner_lock. Used to protect binder_node fields
731 #define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
733 _binder_node_inner_lock(struct binder_node
*node
, int line
)
734 __acquires(&node
->lock
) __acquires(&node
->proc
->inner_lock
)
736 binder_debug(BINDER_DEBUG_SPINLOCKS
,
737 "%s: line=%d\n", __func__
, line
);
738 spin_lock(&node
->lock
);
740 binder_inner_proc_lock(node
->proc
);
742 /* annotation for sparse */
743 __acquire(&node
->proc
->inner_lock
);
747 * binder_node_unlock() - Release node and inner locks
748 * @node: struct binder_node to acquire
750 * Release lock acquired via binder_node_lock()
752 #define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
754 _binder_node_inner_unlock(struct binder_node
*node
, int line
)
755 __releases(&node
->lock
) __releases(&node
->proc
->inner_lock
)
757 struct binder_proc
*proc
= node
->proc
;
759 binder_debug(BINDER_DEBUG_SPINLOCKS
,
760 "%s: line=%d\n", __func__
, line
);
762 binder_inner_proc_unlock(proc
);
764 /* annotation for sparse */
765 __release(&node
->proc
->inner_lock
);
766 spin_unlock(&node
->lock
);
769 static bool binder_worklist_empty_ilocked(struct list_head
*list
)
771 return list_empty(list
);
775 * binder_worklist_empty() - Check if no items on the work list
776 * @proc: binder_proc associated with list
777 * @list: list to check
779 * Return: true if there are no items on list, else false
781 static bool binder_worklist_empty(struct binder_proc
*proc
,
782 struct list_head
*list
)
786 binder_inner_proc_lock(proc
);
787 ret
= binder_worklist_empty_ilocked(list
);
788 binder_inner_proc_unlock(proc
);
793 * binder_enqueue_work_ilocked() - Add an item to the work list
794 * @work: struct binder_work to add to list
795 * @target_list: list to add work to
797 * Adds the work to the specified list. Asserts that work
798 * is not already on a list.
800 * Requires the proc->inner_lock to be held.
803 binder_enqueue_work_ilocked(struct binder_work
*work
,
804 struct list_head
*target_list
)
806 BUG_ON(target_list
== NULL
);
807 BUG_ON(work
->entry
.next
&& !list_empty(&work
->entry
));
808 list_add_tail(&work
->entry
, target_list
);
812 * binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work
813 * @thread: thread to queue work to
814 * @work: struct binder_work to add to list
816 * Adds the work to the todo list of the thread. Doesn't set the process_todo
817 * flag, which means that (if it wasn't already set) the thread will go to
818 * sleep without handling this work when it calls read.
820 * Requires the proc->inner_lock to be held.
823 binder_enqueue_deferred_thread_work_ilocked(struct binder_thread
*thread
,
824 struct binder_work
*work
)
826 WARN_ON(!list_empty(&thread
->waiting_thread_node
));
827 binder_enqueue_work_ilocked(work
, &thread
->todo
);
831 * binder_enqueue_thread_work_ilocked() - Add an item to the thread work list
832 * @thread: thread to queue work to
833 * @work: struct binder_work to add to list
835 * Adds the work to the todo list of the thread, and enables processing
838 * Requires the proc->inner_lock to be held.
841 binder_enqueue_thread_work_ilocked(struct binder_thread
*thread
,
842 struct binder_work
*work
)
844 WARN_ON(!list_empty(&thread
->waiting_thread_node
));
845 binder_enqueue_work_ilocked(work
, &thread
->todo
);
846 thread
->process_todo
= true;
850 * binder_enqueue_thread_work() - Add an item to the thread work list
851 * @thread: thread to queue work to
852 * @work: struct binder_work to add to list
854 * Adds the work to the todo list of the thread, and enables processing
858 binder_enqueue_thread_work(struct binder_thread
*thread
,
859 struct binder_work
*work
)
861 binder_inner_proc_lock(thread
->proc
);
862 binder_enqueue_thread_work_ilocked(thread
, work
);
863 binder_inner_proc_unlock(thread
->proc
);
867 binder_dequeue_work_ilocked(struct binder_work
*work
)
869 list_del_init(&work
->entry
);
873 * binder_dequeue_work() - Removes an item from the work list
874 * @proc: binder_proc associated with list
875 * @work: struct binder_work to remove from list
877 * Removes the specified work item from whatever list it is on.
878 * Can safely be called if work is not on any list.
881 binder_dequeue_work(struct binder_proc
*proc
, struct binder_work
*work
)
883 binder_inner_proc_lock(proc
);
884 binder_dequeue_work_ilocked(work
);
885 binder_inner_proc_unlock(proc
);
888 static struct binder_work
*binder_dequeue_work_head_ilocked(
889 struct list_head
*list
)
891 struct binder_work
*w
;
893 w
= list_first_entry_or_null(list
, struct binder_work
, entry
);
895 list_del_init(&w
->entry
);
900 * binder_dequeue_work_head() - Dequeues the item at head of list
901 * @proc: binder_proc associated with list
902 * @list: list to dequeue head
904 * Removes the head of the list if there are items on the list
906 * Return: pointer dequeued binder_work, NULL if list was empty
908 static struct binder_work
*binder_dequeue_work_head(
909 struct binder_proc
*proc
,
910 struct list_head
*list
)
912 struct binder_work
*w
;
914 binder_inner_proc_lock(proc
);
915 w
= binder_dequeue_work_head_ilocked(list
);
916 binder_inner_proc_unlock(proc
);
921 binder_defer_work(struct binder_proc
*proc
, enum binder_deferred_state defer
);
922 static void binder_free_thread(struct binder_thread
*thread
);
923 static void binder_free_proc(struct binder_proc
*proc
);
924 static void binder_inc_node_tmpref_ilocked(struct binder_node
*node
);
926 static bool binder_has_work_ilocked(struct binder_thread
*thread
,
929 return thread
->process_todo
||
930 thread
->looper_need_return
||
932 !binder_worklist_empty_ilocked(&thread
->proc
->todo
));
935 static bool binder_has_work(struct binder_thread
*thread
, bool do_proc_work
)
939 binder_inner_proc_lock(thread
->proc
);
940 has_work
= binder_has_work_ilocked(thread
, do_proc_work
);
941 binder_inner_proc_unlock(thread
->proc
);
946 static bool binder_available_for_proc_work_ilocked(struct binder_thread
*thread
)
948 return !thread
->transaction_stack
&&
949 binder_worklist_empty_ilocked(&thread
->todo
) &&
950 (thread
->looper
& (BINDER_LOOPER_STATE_ENTERED
|
951 BINDER_LOOPER_STATE_REGISTERED
));
954 static void binder_wakeup_poll_threads_ilocked(struct binder_proc
*proc
,
958 struct binder_thread
*thread
;
960 for (n
= rb_first(&proc
->threads
); n
!= NULL
; n
= rb_next(n
)) {
961 thread
= rb_entry(n
, struct binder_thread
, rb_node
);
962 if (thread
->looper
& BINDER_LOOPER_STATE_POLL
&&
963 binder_available_for_proc_work_ilocked(thread
)) {
965 wake_up_interruptible_sync(&thread
->wait
);
967 wake_up_interruptible(&thread
->wait
);
973 * binder_select_thread_ilocked() - selects a thread for doing proc work.
974 * @proc: process to select a thread from
976 * Note that calling this function moves the thread off the waiting_threads
977 * list, so it can only be woken up by the caller of this function, or a
978 * signal. Therefore, callers *should* always wake up the thread this function
981 * Return: If there's a thread currently waiting for process work,
982 * returns that thread. Otherwise returns NULL.
984 static struct binder_thread
*
985 binder_select_thread_ilocked(struct binder_proc
*proc
)
987 struct binder_thread
*thread
;
989 assert_spin_locked(&proc
->inner_lock
);
990 thread
= list_first_entry_or_null(&proc
->waiting_threads
,
991 struct binder_thread
,
992 waiting_thread_node
);
995 list_del_init(&thread
->waiting_thread_node
);
1001 * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work.
1002 * @proc: process to wake up a thread in
1003 * @thread: specific thread to wake-up (may be NULL)
1004 * @sync: whether to do a synchronous wake-up
1006 * This function wakes up a thread in the @proc process.
1007 * The caller may provide a specific thread to wake-up in
1008 * the @thread parameter. If @thread is NULL, this function
1009 * will wake up threads that have called poll().
1011 * Note that for this function to work as expected, callers
1012 * should first call binder_select_thread() to find a thread
1013 * to handle the work (if they don't have a thread already),
1014 * and pass the result into the @thread parameter.
1016 static void binder_wakeup_thread_ilocked(struct binder_proc
*proc
,
1017 struct binder_thread
*thread
,
1020 assert_spin_locked(&proc
->inner_lock
);
1024 wake_up_interruptible_sync(&thread
->wait
);
1026 wake_up_interruptible(&thread
->wait
);
1030 /* Didn't find a thread waiting for proc work; this can happen
1032 * 1. All threads are busy handling transactions
1033 * In that case, one of those threads should call back into
1034 * the kernel driver soon and pick up this work.
1035 * 2. Threads are using the (e)poll interface, in which case
1036 * they may be blocked on the waitqueue without having been
1037 * added to waiting_threads. For this case, we just iterate
1038 * over all threads not handling transaction work, and
1039 * wake them all up. We wake all because we don't know whether
1040 * a thread that called into (e)poll is handling non-binder
1043 binder_wakeup_poll_threads_ilocked(proc
, sync
);
1046 static void binder_wakeup_proc_ilocked(struct binder_proc
*proc
)
1048 struct binder_thread
*thread
= binder_select_thread_ilocked(proc
);
1050 binder_wakeup_thread_ilocked(proc
, thread
, /* sync = */false);
1053 static void binder_set_nice(long nice
)
1057 if (can_nice(current
, nice
)) {
1058 set_user_nice(current
, nice
);
1061 min_nice
= rlimit_to_nice(rlimit(RLIMIT_NICE
));
1062 binder_debug(BINDER_DEBUG_PRIORITY_CAP
,
1063 "%d: nice value %ld not allowed use %ld instead\n",
1064 current
->pid
, nice
, min_nice
);
1065 set_user_nice(current
, min_nice
);
1066 if (min_nice
<= MAX_NICE
)
1068 binder_user_error("%d RLIMIT_NICE not set\n", current
->pid
);
1071 static struct binder_node
*binder_get_node_ilocked(struct binder_proc
*proc
,
1072 binder_uintptr_t ptr
)
1074 struct rb_node
*n
= proc
->nodes
.rb_node
;
1075 struct binder_node
*node
;
1077 assert_spin_locked(&proc
->inner_lock
);
1080 node
= rb_entry(n
, struct binder_node
, rb_node
);
1082 if (ptr
< node
->ptr
)
1084 else if (ptr
> node
->ptr
)
1088 * take an implicit weak reference
1089 * to ensure node stays alive until
1090 * call to binder_put_node()
1092 binder_inc_node_tmpref_ilocked(node
);
1099 static struct binder_node
*binder_get_node(struct binder_proc
*proc
,
1100 binder_uintptr_t ptr
)
1102 struct binder_node
*node
;
1104 binder_inner_proc_lock(proc
);
1105 node
= binder_get_node_ilocked(proc
, ptr
);
1106 binder_inner_proc_unlock(proc
);
1110 static struct binder_node
*binder_init_node_ilocked(
1111 struct binder_proc
*proc
,
1112 struct binder_node
*new_node
,
1113 struct flat_binder_object
*fp
)
1115 struct rb_node
**p
= &proc
->nodes
.rb_node
;
1116 struct rb_node
*parent
= NULL
;
1117 struct binder_node
*node
;
1118 binder_uintptr_t ptr
= fp
? fp
->binder
: 0;
1119 binder_uintptr_t cookie
= fp
? fp
->cookie
: 0;
1120 __u32 flags
= fp
? fp
->flags
: 0;
1122 assert_spin_locked(&proc
->inner_lock
);
1127 node
= rb_entry(parent
, struct binder_node
, rb_node
);
1129 if (ptr
< node
->ptr
)
1131 else if (ptr
> node
->ptr
)
1132 p
= &(*p
)->rb_right
;
1135 * A matching node is already in
1136 * the rb tree. Abandon the init
1139 binder_inc_node_tmpref_ilocked(node
);
1144 binder_stats_created(BINDER_STAT_NODE
);
1146 rb_link_node(&node
->rb_node
, parent
, p
);
1147 rb_insert_color(&node
->rb_node
, &proc
->nodes
);
1148 node
->debug_id
= atomic_inc_return(&binder_last_id
);
1151 node
->cookie
= cookie
;
1152 node
->work
.type
= BINDER_WORK_NODE
;
1153 node
->min_priority
= flags
& FLAT_BINDER_FLAG_PRIORITY_MASK
;
1154 node
->accept_fds
= !!(flags
& FLAT_BINDER_FLAG_ACCEPTS_FDS
);
1155 spin_lock_init(&node
->lock
);
1156 INIT_LIST_HEAD(&node
->work
.entry
);
1157 INIT_LIST_HEAD(&node
->async_todo
);
1158 binder_debug(BINDER_DEBUG_INTERNAL_REFS
,
1159 "%d:%d node %d u%016llx c%016llx created\n",
1160 proc
->pid
, current
->pid
, node
->debug_id
,
1161 (u64
)node
->ptr
, (u64
)node
->cookie
);
1166 static struct binder_node
*binder_new_node(struct binder_proc
*proc
,
1167 struct flat_binder_object
*fp
)
1169 struct binder_node
*node
;
1170 struct binder_node
*new_node
= kzalloc(sizeof(*node
), GFP_KERNEL
);
1174 binder_inner_proc_lock(proc
);
1175 node
= binder_init_node_ilocked(proc
, new_node
, fp
);
1176 binder_inner_proc_unlock(proc
);
1177 if (node
!= new_node
)
1179 * The node was already added by another thread
1186 static void binder_free_node(struct binder_node
*node
)
1189 binder_stats_deleted(BINDER_STAT_NODE
);
1192 static int binder_inc_node_nilocked(struct binder_node
*node
, int strong
,
1194 struct list_head
*target_list
)
1196 struct binder_proc
*proc
= node
->proc
;
1198 assert_spin_locked(&node
->lock
);
1200 assert_spin_locked(&proc
->inner_lock
);
1203 if (target_list
== NULL
&&
1204 node
->internal_strong_refs
== 0 &&
1206 node
== node
->proc
->context
->binder_context_mgr_node
&&
1207 node
->has_strong_ref
)) {
1208 pr_err("invalid inc strong node for %d\n",
1212 node
->internal_strong_refs
++;
1214 node
->local_strong_refs
++;
1215 if (!node
->has_strong_ref
&& target_list
) {
1216 struct binder_thread
*thread
= container_of(target_list
,
1217 struct binder_thread
, todo
);
1218 binder_dequeue_work_ilocked(&node
->work
);
1219 BUG_ON(&thread
->todo
!= target_list
);
1220 binder_enqueue_deferred_thread_work_ilocked(thread
,
1225 node
->local_weak_refs
++;
1226 if (!node
->has_weak_ref
&& list_empty(&node
->work
.entry
)) {
1227 if (target_list
== NULL
) {
1228 pr_err("invalid inc weak node for %d\n",
1235 binder_enqueue_work_ilocked(&node
->work
, target_list
);
1241 static int binder_inc_node(struct binder_node
*node
, int strong
, int internal
,
1242 struct list_head
*target_list
)
1246 binder_node_inner_lock(node
);
1247 ret
= binder_inc_node_nilocked(node
, strong
, internal
, target_list
);
1248 binder_node_inner_unlock(node
);
1253 static bool binder_dec_node_nilocked(struct binder_node
*node
,
1254 int strong
, int internal
)
1256 struct binder_proc
*proc
= node
->proc
;
1258 assert_spin_locked(&node
->lock
);
1260 assert_spin_locked(&proc
->inner_lock
);
1263 node
->internal_strong_refs
--;
1265 node
->local_strong_refs
--;
1266 if (node
->local_strong_refs
|| node
->internal_strong_refs
)
1270 node
->local_weak_refs
--;
1271 if (node
->local_weak_refs
|| node
->tmp_refs
||
1272 !hlist_empty(&node
->refs
))
1276 if (proc
&& (node
->has_strong_ref
|| node
->has_weak_ref
)) {
1277 if (list_empty(&node
->work
.entry
)) {
1278 binder_enqueue_work_ilocked(&node
->work
, &proc
->todo
);
1279 binder_wakeup_proc_ilocked(proc
);
1282 if (hlist_empty(&node
->refs
) && !node
->local_strong_refs
&&
1283 !node
->local_weak_refs
&& !node
->tmp_refs
) {
1285 binder_dequeue_work_ilocked(&node
->work
);
1286 rb_erase(&node
->rb_node
, &proc
->nodes
);
1287 binder_debug(BINDER_DEBUG_INTERNAL_REFS
,
1288 "refless node %d deleted\n",
1291 BUG_ON(!list_empty(&node
->work
.entry
));
1292 spin_lock(&binder_dead_nodes_lock
);
1294 * tmp_refs could have changed so
1297 if (node
->tmp_refs
) {
1298 spin_unlock(&binder_dead_nodes_lock
);
1301 hlist_del(&node
->dead_node
);
1302 spin_unlock(&binder_dead_nodes_lock
);
1303 binder_debug(BINDER_DEBUG_INTERNAL_REFS
,
1304 "dead node %d deleted\n",
1313 static void binder_dec_node(struct binder_node
*node
, int strong
, int internal
)
1317 binder_node_inner_lock(node
);
1318 free_node
= binder_dec_node_nilocked(node
, strong
, internal
);
1319 binder_node_inner_unlock(node
);
1321 binder_free_node(node
);
1324 static void binder_inc_node_tmpref_ilocked(struct binder_node
*node
)
1327 * No call to binder_inc_node() is needed since we
1328 * don't need to inform userspace of any changes to
1335 * binder_inc_node_tmpref() - take a temporary reference on node
1336 * @node: node to reference
1338 * Take reference on node to prevent the node from being freed
1339 * while referenced only by a local variable. The inner lock is
1340 * needed to serialize with the node work on the queue (which
1341 * isn't needed after the node is dead). If the node is dead
1342 * (node->proc is NULL), use binder_dead_nodes_lock to protect
1343 * node->tmp_refs against dead-node-only cases where the node
1344 * lock cannot be acquired (eg traversing the dead node list to
1347 static void binder_inc_node_tmpref(struct binder_node
*node
)
1349 binder_node_lock(node
);
1351 binder_inner_proc_lock(node
->proc
);
1353 spin_lock(&binder_dead_nodes_lock
);
1354 binder_inc_node_tmpref_ilocked(node
);
1356 binder_inner_proc_unlock(node
->proc
);
1358 spin_unlock(&binder_dead_nodes_lock
);
1359 binder_node_unlock(node
);
1363 * binder_dec_node_tmpref() - remove a temporary reference on node
1364 * @node: node to reference
1366 * Release temporary reference on node taken via binder_inc_node_tmpref()
1368 static void binder_dec_node_tmpref(struct binder_node
*node
)
1372 binder_node_inner_lock(node
);
1374 spin_lock(&binder_dead_nodes_lock
);
1376 __acquire(&binder_dead_nodes_lock
);
1378 BUG_ON(node
->tmp_refs
< 0);
1380 spin_unlock(&binder_dead_nodes_lock
);
1382 __release(&binder_dead_nodes_lock
);
1384 * Call binder_dec_node() to check if all refcounts are 0
1385 * and cleanup is needed. Calling with strong=0 and internal=1
1386 * causes no actual reference to be released in binder_dec_node().
1387 * If that changes, a change is needed here too.
1389 free_node
= binder_dec_node_nilocked(node
, 0, 1);
1390 binder_node_inner_unlock(node
);
1392 binder_free_node(node
);
1395 static void binder_put_node(struct binder_node
*node
)
1397 binder_dec_node_tmpref(node
);
1400 static struct binder_ref
*binder_get_ref_olocked(struct binder_proc
*proc
,
1401 u32 desc
, bool need_strong_ref
)
1403 struct rb_node
*n
= proc
->refs_by_desc
.rb_node
;
1404 struct binder_ref
*ref
;
1407 ref
= rb_entry(n
, struct binder_ref
, rb_node_desc
);
1409 if (desc
< ref
->data
.desc
) {
1411 } else if (desc
> ref
->data
.desc
) {
1413 } else if (need_strong_ref
&& !ref
->data
.strong
) {
1414 binder_user_error("tried to use weak ref as strong ref\n");
1424 * binder_get_ref_for_node_olocked() - get the ref associated with given node
1425 * @proc: binder_proc that owns the ref
1426 * @node: binder_node of target
1427 * @new_ref: newly allocated binder_ref to be initialized or %NULL
1429 * Look up the ref for the given node and return it if it exists
1431 * If it doesn't exist and the caller provides a newly allocated
1432 * ref, initialize the fields of the newly allocated ref and insert
1433 * into the given proc rb_trees and node refs list.
1435 * Return: the ref for node. It is possible that another thread
1436 * allocated/initialized the ref first in which case the
1437 * returned ref would be different than the passed-in
1438 * new_ref. new_ref must be kfree'd by the caller in
1441 static struct binder_ref
*binder_get_ref_for_node_olocked(
1442 struct binder_proc
*proc
,
1443 struct binder_node
*node
,
1444 struct binder_ref
*new_ref
)
1446 struct binder_context
*context
= proc
->context
;
1447 struct rb_node
**p
= &proc
->refs_by_node
.rb_node
;
1448 struct rb_node
*parent
= NULL
;
1449 struct binder_ref
*ref
;
1454 ref
= rb_entry(parent
, struct binder_ref
, rb_node_node
);
1456 if (node
< ref
->node
)
1458 else if (node
> ref
->node
)
1459 p
= &(*p
)->rb_right
;
1466 binder_stats_created(BINDER_STAT_REF
);
1467 new_ref
->data
.debug_id
= atomic_inc_return(&binder_last_id
);
1468 new_ref
->proc
= proc
;
1469 new_ref
->node
= node
;
1470 rb_link_node(&new_ref
->rb_node_node
, parent
, p
);
1471 rb_insert_color(&new_ref
->rb_node_node
, &proc
->refs_by_node
);
1473 new_ref
->data
.desc
= (node
== context
->binder_context_mgr_node
) ? 0 : 1;
1474 for (n
= rb_first(&proc
->refs_by_desc
); n
!= NULL
; n
= rb_next(n
)) {
1475 ref
= rb_entry(n
, struct binder_ref
, rb_node_desc
);
1476 if (ref
->data
.desc
> new_ref
->data
.desc
)
1478 new_ref
->data
.desc
= ref
->data
.desc
+ 1;
1481 p
= &proc
->refs_by_desc
.rb_node
;
1484 ref
= rb_entry(parent
, struct binder_ref
, rb_node_desc
);
1486 if (new_ref
->data
.desc
< ref
->data
.desc
)
1488 else if (new_ref
->data
.desc
> ref
->data
.desc
)
1489 p
= &(*p
)->rb_right
;
1493 rb_link_node(&new_ref
->rb_node_desc
, parent
, p
);
1494 rb_insert_color(&new_ref
->rb_node_desc
, &proc
->refs_by_desc
);
1496 binder_node_lock(node
);
1497 hlist_add_head(&new_ref
->node_entry
, &node
->refs
);
1499 binder_debug(BINDER_DEBUG_INTERNAL_REFS
,
1500 "%d new ref %d desc %d for node %d\n",
1501 proc
->pid
, new_ref
->data
.debug_id
, new_ref
->data
.desc
,
1503 binder_node_unlock(node
);
1507 static void binder_cleanup_ref_olocked(struct binder_ref
*ref
)
1509 bool delete_node
= false;
1511 binder_debug(BINDER_DEBUG_INTERNAL_REFS
,
1512 "%d delete ref %d desc %d for node %d\n",
1513 ref
->proc
->pid
, ref
->data
.debug_id
, ref
->data
.desc
,
1514 ref
->node
->debug_id
);
1516 rb_erase(&ref
->rb_node_desc
, &ref
->proc
->refs_by_desc
);
1517 rb_erase(&ref
->rb_node_node
, &ref
->proc
->refs_by_node
);
1519 binder_node_inner_lock(ref
->node
);
1520 if (ref
->data
.strong
)
1521 binder_dec_node_nilocked(ref
->node
, 1, 1);
1523 hlist_del(&ref
->node_entry
);
1524 delete_node
= binder_dec_node_nilocked(ref
->node
, 0, 1);
1525 binder_node_inner_unlock(ref
->node
);
1527 * Clear ref->node unless we want the caller to free the node
1531 * The caller uses ref->node to determine
1532 * whether the node needs to be freed. Clear
1533 * it since the node is still alive.
1539 binder_debug(BINDER_DEBUG_DEAD_BINDER
,
1540 "%d delete ref %d desc %d has death notification\n",
1541 ref
->proc
->pid
, ref
->data
.debug_id
,
1543 binder_dequeue_work(ref
->proc
, &ref
->death
->work
);
1544 binder_stats_deleted(BINDER_STAT_DEATH
);
1546 binder_stats_deleted(BINDER_STAT_REF
);
1550 * binder_inc_ref_olocked() - increment the ref for given handle
1551 * @ref: ref to be incremented
1552 * @strong: if true, strong increment, else weak
1553 * @target_list: list to queue node work on
1555 * Increment the ref. @ref->proc->outer_lock must be held on entry
1557 * Return: 0, if successful, else errno
1559 static int binder_inc_ref_olocked(struct binder_ref
*ref
, int strong
,
1560 struct list_head
*target_list
)
1565 if (ref
->data
.strong
== 0) {
1566 ret
= binder_inc_node(ref
->node
, 1, 1, target_list
);
1572 if (ref
->data
.weak
== 0) {
1573 ret
= binder_inc_node(ref
->node
, 0, 1, target_list
);
1583 * binder_dec_ref() - dec the ref for given handle
1584 * @ref: ref to be decremented
1585 * @strong: if true, strong decrement, else weak
1587 * Decrement the ref.
1589 * Return: true if ref is cleaned up and ready to be freed
1591 static bool binder_dec_ref_olocked(struct binder_ref
*ref
, int strong
)
1594 if (ref
->data
.strong
== 0) {
1595 binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
1596 ref
->proc
->pid
, ref
->data
.debug_id
,
1597 ref
->data
.desc
, ref
->data
.strong
,
1602 if (ref
->data
.strong
== 0)
1603 binder_dec_node(ref
->node
, strong
, 1);
1605 if (ref
->data
.weak
== 0) {
1606 binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
1607 ref
->proc
->pid
, ref
->data
.debug_id
,
1608 ref
->data
.desc
, ref
->data
.strong
,
1614 if (ref
->data
.strong
== 0 && ref
->data
.weak
== 0) {
1615 binder_cleanup_ref_olocked(ref
);
1622 * binder_get_node_from_ref() - get the node from the given proc/desc
1623 * @proc: proc containing the ref
1624 * @desc: the handle associated with the ref
1625 * @need_strong_ref: if true, only return node if ref is strong
1626 * @rdata: the id/refcount data for the ref
1628 * Given a proc and ref handle, return the associated binder_node
1630 * Return: a binder_node or NULL if not found or not strong when strong required
1632 static struct binder_node
*binder_get_node_from_ref(
1633 struct binder_proc
*proc
,
1634 u32 desc
, bool need_strong_ref
,
1635 struct binder_ref_data
*rdata
)
1637 struct binder_node
*node
;
1638 struct binder_ref
*ref
;
1640 binder_proc_lock(proc
);
1641 ref
= binder_get_ref_olocked(proc
, desc
, need_strong_ref
);
1646 * Take an implicit reference on the node to ensure
1647 * it stays alive until the call to binder_put_node()
1649 binder_inc_node_tmpref(node
);
1652 binder_proc_unlock(proc
);
1657 binder_proc_unlock(proc
);
1662 * binder_free_ref() - free the binder_ref
1665 * Free the binder_ref. Free the binder_node indicated by ref->node
1666 * (if non-NULL) and the binder_ref_death indicated by ref->death.
1668 static void binder_free_ref(struct binder_ref
*ref
)
1671 binder_free_node(ref
->node
);
1677 * binder_update_ref_for_handle() - inc/dec the ref for given handle
1678 * @proc: proc containing the ref
1679 * @desc: the handle associated with the ref
1680 * @increment: true=inc reference, false=dec reference
1681 * @strong: true=strong reference, false=weak reference
1682 * @rdata: the id/refcount data for the ref
1684 * Given a proc and ref handle, increment or decrement the ref
1685 * according to "increment" arg.
1687 * Return: 0 if successful, else errno
1689 static int binder_update_ref_for_handle(struct binder_proc
*proc
,
1690 uint32_t desc
, bool increment
, bool strong
,
1691 struct binder_ref_data
*rdata
)
1694 struct binder_ref
*ref
;
1695 bool delete_ref
= false;
1697 binder_proc_lock(proc
);
1698 ref
= binder_get_ref_olocked(proc
, desc
, strong
);
1704 ret
= binder_inc_ref_olocked(ref
, strong
, NULL
);
1706 delete_ref
= binder_dec_ref_olocked(ref
, strong
);
1710 binder_proc_unlock(proc
);
1713 binder_free_ref(ref
);
1717 binder_proc_unlock(proc
);
1722 * binder_dec_ref_for_handle() - dec the ref for given handle
1723 * @proc: proc containing the ref
1724 * @desc: the handle associated with the ref
1725 * @strong: true=strong reference, false=weak reference
1726 * @rdata: the id/refcount data for the ref
1728 * Just calls binder_update_ref_for_handle() to decrement the ref.
1730 * Return: 0 if successful, else errno
1732 static int binder_dec_ref_for_handle(struct binder_proc
*proc
,
1733 uint32_t desc
, bool strong
, struct binder_ref_data
*rdata
)
1735 return binder_update_ref_for_handle(proc
, desc
, false, strong
, rdata
);
1740 * binder_inc_ref_for_node() - increment the ref for given proc/node
1741 * @proc: proc containing the ref
1742 * @node: target node
1743 * @strong: true=strong reference, false=weak reference
1744 * @target_list: worklist to use if node is incremented
1745 * @rdata: the id/refcount data for the ref
1747 * Given a proc and node, increment the ref. Create the ref if it
1748 * doesn't already exist
1750 * Return: 0 if successful, else errno
1752 static int binder_inc_ref_for_node(struct binder_proc
*proc
,
1753 struct binder_node
*node
,
1755 struct list_head
*target_list
,
1756 struct binder_ref_data
*rdata
)
1758 struct binder_ref
*ref
;
1759 struct binder_ref
*new_ref
= NULL
;
1762 binder_proc_lock(proc
);
1763 ref
= binder_get_ref_for_node_olocked(proc
, node
, NULL
);
1765 binder_proc_unlock(proc
);
1766 new_ref
= kzalloc(sizeof(*ref
), GFP_KERNEL
);
1769 binder_proc_lock(proc
);
1770 ref
= binder_get_ref_for_node_olocked(proc
, node
, new_ref
);
1772 ret
= binder_inc_ref_olocked(ref
, strong
, target_list
);
1774 binder_proc_unlock(proc
);
1775 if (new_ref
&& ref
!= new_ref
)
1777 * Another thread created the ref first so
1778 * free the one we allocated
1784 static void binder_pop_transaction_ilocked(struct binder_thread
*target_thread
,
1785 struct binder_transaction
*t
)
1787 BUG_ON(!target_thread
);
1788 assert_spin_locked(&target_thread
->proc
->inner_lock
);
1789 BUG_ON(target_thread
->transaction_stack
!= t
);
1790 BUG_ON(target_thread
->transaction_stack
->from
!= target_thread
);
1791 target_thread
->transaction_stack
=
1792 target_thread
->transaction_stack
->from_parent
;
1797 * binder_thread_dec_tmpref() - decrement thread->tmp_ref
1798 * @thread: thread to decrement
1800 * A thread needs to be kept alive while being used to create or
1801 * handle a transaction. binder_get_txn_from() is used to safely
1802 * extract t->from from a binder_transaction and keep the thread
1803 * indicated by t->from from being freed. When done with that
1804 * binder_thread, this function is called to decrement the
1805 * tmp_ref and free if appropriate (thread has been released
1806 * and no transaction being processed by the driver)
1808 static void binder_thread_dec_tmpref(struct binder_thread
*thread
)
1811 * atomic is used to protect the counter value while
1812 * it cannot reach zero or thread->is_dead is false
1814 binder_inner_proc_lock(thread
->proc
);
1815 atomic_dec(&thread
->tmp_ref
);
1816 if (thread
->is_dead
&& !atomic_read(&thread
->tmp_ref
)) {
1817 binder_inner_proc_unlock(thread
->proc
);
1818 binder_free_thread(thread
);
1821 binder_inner_proc_unlock(thread
->proc
);
1825 * binder_proc_dec_tmpref() - decrement proc->tmp_ref
1826 * @proc: proc to decrement
1828 * A binder_proc needs to be kept alive while being used to create or
1829 * handle a transaction. proc->tmp_ref is incremented when
1830 * creating a new transaction or the binder_proc is currently in-use
1831 * by threads that are being released. When done with the binder_proc,
1832 * this function is called to decrement the counter and free the
1833 * proc if appropriate (proc has been released, all threads have
1834 * been released and not currenly in-use to process a transaction).
1836 static void binder_proc_dec_tmpref(struct binder_proc
*proc
)
1838 binder_inner_proc_lock(proc
);
1840 if (proc
->is_dead
&& RB_EMPTY_ROOT(&proc
->threads
) &&
1842 binder_inner_proc_unlock(proc
);
1843 binder_free_proc(proc
);
1846 binder_inner_proc_unlock(proc
);
1850 * binder_get_txn_from() - safely extract the "from" thread in transaction
1851 * @t: binder transaction for t->from
1853 * Atomically return the "from" thread and increment the tmp_ref
1854 * count for the thread to ensure it stays alive until
1855 * binder_thread_dec_tmpref() is called.
1857 * Return: the value of t->from
1859 static struct binder_thread
*binder_get_txn_from(
1860 struct binder_transaction
*t
)
1862 struct binder_thread
*from
;
1864 spin_lock(&t
->lock
);
1867 atomic_inc(&from
->tmp_ref
);
1868 spin_unlock(&t
->lock
);
1873 * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock
1874 * @t: binder transaction for t->from
1876 * Same as binder_get_txn_from() except it also acquires the proc->inner_lock
1877 * to guarantee that the thread cannot be released while operating on it.
1878 * The caller must call binder_inner_proc_unlock() to release the inner lock
1879 * as well as call binder_dec_thread_txn() to release the reference.
1881 * Return: the value of t->from
1883 static struct binder_thread
*binder_get_txn_from_and_acq_inner(
1884 struct binder_transaction
*t
)
1885 __acquires(&t
->from
->proc
->inner_lock
)
1887 struct binder_thread
*from
;
1889 from
= binder_get_txn_from(t
);
1891 __acquire(&from
->proc
->inner_lock
);
1894 binder_inner_proc_lock(from
->proc
);
1896 BUG_ON(from
!= t
->from
);
1899 binder_inner_proc_unlock(from
->proc
);
1900 __acquire(&from
->proc
->inner_lock
);
1901 binder_thread_dec_tmpref(from
);
1906 * binder_free_txn_fixups() - free unprocessed fd fixups
1907 * @t: binder transaction for t->from
1909 * If the transaction is being torn down prior to being
1910 * processed by the target process, free all of the
1911 * fd fixups and fput the file structs. It is safe to
1912 * call this function after the fixups have been
1913 * processed -- in that case, the list will be empty.
1915 static void binder_free_txn_fixups(struct binder_transaction
*t
)
1917 struct binder_txn_fd_fixup
*fixup
, *tmp
;
1919 list_for_each_entry_safe(fixup
, tmp
, &t
->fd_fixups
, fixup_entry
) {
1921 list_del(&fixup
->fixup_entry
);
1926 static void binder_free_transaction(struct binder_transaction
*t
)
1929 t
->buffer
->transaction
= NULL
;
1930 binder_free_txn_fixups(t
);
1932 binder_stats_deleted(BINDER_STAT_TRANSACTION
);
1935 static void binder_send_failed_reply(struct binder_transaction
*t
,
1936 uint32_t error_code
)
1938 struct binder_thread
*target_thread
;
1939 struct binder_transaction
*next
;
1941 BUG_ON(t
->flags
& TF_ONE_WAY
);
1943 target_thread
= binder_get_txn_from_and_acq_inner(t
);
1944 if (target_thread
) {
1945 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION
,
1946 "send failed reply for transaction %d to %d:%d\n",
1948 target_thread
->proc
->pid
,
1949 target_thread
->pid
);
1951 binder_pop_transaction_ilocked(target_thread
, t
);
1952 if (target_thread
->reply_error
.cmd
== BR_OK
) {
1953 target_thread
->reply_error
.cmd
= error_code
;
1954 binder_enqueue_thread_work_ilocked(
1956 &target_thread
->reply_error
.work
);
1957 wake_up_interruptible(&target_thread
->wait
);
1960 * Cannot get here for normal operation, but
1961 * we can if multiple synchronous transactions
1962 * are sent without blocking for responses.
1963 * Just ignore the 2nd error in this case.
1965 pr_warn("Unexpected reply error: %u\n",
1966 target_thread
->reply_error
.cmd
);
1968 binder_inner_proc_unlock(target_thread
->proc
);
1969 binder_thread_dec_tmpref(target_thread
);
1970 binder_free_transaction(t
);
1973 __release(&target_thread
->proc
->inner_lock
);
1975 next
= t
->from_parent
;
1977 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION
,
1978 "send failed reply for transaction %d, target dead\n",
1981 binder_free_transaction(t
);
1983 binder_debug(BINDER_DEBUG_DEAD_BINDER
,
1984 "reply failed, no target thread at root\n");
1988 binder_debug(BINDER_DEBUG_DEAD_BINDER
,
1989 "reply failed, no target thread -- retry %d\n",
1995 * binder_cleanup_transaction() - cleans up undelivered transaction
1996 * @t: transaction that needs to be cleaned up
1997 * @reason: reason the transaction wasn't delivered
1998 * @error_code: error to return to caller (if synchronous call)
2000 static void binder_cleanup_transaction(struct binder_transaction
*t
,
2002 uint32_t error_code
)
2004 if (t
->buffer
->target_node
&& !(t
->flags
& TF_ONE_WAY
)) {
2005 binder_send_failed_reply(t
, error_code
);
2007 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION
,
2008 "undelivered transaction %d, %s\n",
2009 t
->debug_id
, reason
);
2010 binder_free_transaction(t
);
2015 * binder_validate_object() - checks for a valid metadata object in a buffer.
2016 * @buffer: binder_buffer that we're parsing.
2017 * @offset: offset in the buffer at which to validate an object.
2019 * Return: If there's a valid metadata object at @offset in @buffer, the
2020 * size of that object. Otherwise, it returns zero.
2022 static size_t binder_validate_object(struct binder_buffer
*buffer
, u64 offset
)
2024 /* Check if we can read a header first */
2025 struct binder_object_header
*hdr
;
2026 size_t object_size
= 0;
2028 if (buffer
->data_size
< sizeof(*hdr
) ||
2029 offset
> buffer
->data_size
- sizeof(*hdr
) ||
2030 !IS_ALIGNED(offset
, sizeof(u32
)))
2033 /* Ok, now see if we can read a complete object. */
2034 hdr
= (struct binder_object_header
*)(buffer
->data
+ offset
);
2035 switch (hdr
->type
) {
2036 case BINDER_TYPE_BINDER
:
2037 case BINDER_TYPE_WEAK_BINDER
:
2038 case BINDER_TYPE_HANDLE
:
2039 case BINDER_TYPE_WEAK_HANDLE
:
2040 object_size
= sizeof(struct flat_binder_object
);
2042 case BINDER_TYPE_FD
:
2043 object_size
= sizeof(struct binder_fd_object
);
2045 case BINDER_TYPE_PTR
:
2046 object_size
= sizeof(struct binder_buffer_object
);
2048 case BINDER_TYPE_FDA
:
2049 object_size
= sizeof(struct binder_fd_array_object
);
2054 if (offset
<= buffer
->data_size
- object_size
&&
2055 buffer
->data_size
>= object_size
)
2062 * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
2063 * @b: binder_buffer containing the object
2064 * @index: index in offset array at which the binder_buffer_object is
2066 * @start: points to the start of the offset array
2067 * @num_valid: the number of valid offsets in the offset array
2069 * Return: If @index is within the valid range of the offset array
2070 * described by @start and @num_valid, and if there's a valid
2071 * binder_buffer_object at the offset found in index @index
2072 * of the offset array, that object is returned. Otherwise,
2073 * %NULL is returned.
2074 * Note that the offset found in index @index itself is not
2075 * verified; this function assumes that @num_valid elements
2076 * from @start were previously verified to have valid offsets.
2078 static struct binder_buffer_object
*binder_validate_ptr(struct binder_buffer
*b
,
2079 binder_size_t index
,
2080 binder_size_t
*start
,
2081 binder_size_t num_valid
)
2083 struct binder_buffer_object
*buffer_obj
;
2084 binder_size_t
*offp
;
2086 if (index
>= num_valid
)
2089 offp
= start
+ index
;
2090 buffer_obj
= (struct binder_buffer_object
*)(b
->data
+ *offp
);
2091 if (buffer_obj
->hdr
.type
!= BINDER_TYPE_PTR
)
2098 * binder_validate_fixup() - validates pointer/fd fixups happen in order.
2099 * @b: transaction buffer
2100 * @objects_start start of objects buffer
2101 * @buffer: binder_buffer_object in which to fix up
2102 * @offset: start offset in @buffer to fix up
2103 * @last_obj: last binder_buffer_object that we fixed up in
2104 * @last_min_offset: minimum fixup offset in @last_obj
2106 * Return: %true if a fixup in buffer @buffer at offset @offset is
2109 * For safety reasons, we only allow fixups inside a buffer to happen
2110 * at increasing offsets; additionally, we only allow fixup on the last
2111 * buffer object that was verified, or one of its parents.
2113 * Example of what is allowed:
2116 * B (parent = A, offset = 0)
2117 * C (parent = A, offset = 16)
2118 * D (parent = C, offset = 0)
2119 * E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
2121 * Examples of what is not allowed:
2123 * Decreasing offsets within the same parent:
2125 * C (parent = A, offset = 16)
2126 * B (parent = A, offset = 0) // decreasing offset within A
2128 * Referring to a parent that wasn't the last object or any of its parents:
2130 * B (parent = A, offset = 0)
2131 * C (parent = A, offset = 0)
2132 * C (parent = A, offset = 16)
2133 * D (parent = B, offset = 0) // B is not A or any of A's parents
2135 static bool binder_validate_fixup(struct binder_buffer
*b
,
2136 binder_size_t
*objects_start
,
2137 struct binder_buffer_object
*buffer
,
2138 binder_size_t fixup_offset
,
2139 struct binder_buffer_object
*last_obj
,
2140 binder_size_t last_min_offset
)
2143 /* Nothing to fix up in */
2147 while (last_obj
!= buffer
) {
2149 * Safe to retrieve the parent of last_obj, since it
2150 * was already previously verified by the driver.
2152 if ((last_obj
->flags
& BINDER_BUFFER_FLAG_HAS_PARENT
) == 0)
2154 last_min_offset
= last_obj
->parent_offset
+ sizeof(uintptr_t);
2155 last_obj
= (struct binder_buffer_object
*)
2156 (b
->data
+ *(objects_start
+ last_obj
->parent
));
2158 return (fixup_offset
>= last_min_offset
);
2162 * struct binder_task_work_cb - for deferred close
2164 * @twork: callback_head for task work
2167 * Structure to pass task work to be handled after
2168 * returning from binder_ioctl() via task_work_add().
2170 struct binder_task_work_cb
{
2171 struct callback_head twork
;
2176 * binder_do_fd_close() - close list of file descriptors
2177 * @twork: callback head for task work
2179 * It is not safe to call ksys_close() during the binder_ioctl()
2180 * function if there is a chance that binder's own file descriptor
2181 * might be closed. This is to meet the requirements for using
2182 * fdget() (see comments for __fget_light()). Therefore use
2183 * task_work_add() to schedule the close operation once we have
2184 * returned from binder_ioctl(). This function is a callback
2185 * for that mechanism and does the actual ksys_close() on the
2186 * given file descriptor.
2188 static void binder_do_fd_close(struct callback_head
*twork
)
2190 struct binder_task_work_cb
*twcb
= container_of(twork
,
2191 struct binder_task_work_cb
, twork
);
2198 * binder_deferred_fd_close() - schedule a close for the given file-descriptor
2199 * @fd: file-descriptor to close
2201 * See comments in binder_do_fd_close(). This function is used to schedule
2202 * a file-descriptor to be closed after returning from binder_ioctl().
2204 static void binder_deferred_fd_close(int fd
)
2206 struct binder_task_work_cb
*twcb
;
2208 twcb
= kzalloc(sizeof(*twcb
), GFP_KERNEL
);
2211 init_task_work(&twcb
->twork
, binder_do_fd_close
);
2212 __close_fd_get_file(fd
, &twcb
->file
);
2214 task_work_add(current
, &twcb
->twork
, true);
2219 static void binder_transaction_buffer_release(struct binder_proc
*proc
,
2220 struct binder_buffer
*buffer
,
2221 binder_size_t
*failed_at
)
2223 binder_size_t
*offp
, *off_start
, *off_end
;
2224 int debug_id
= buffer
->debug_id
;
2226 binder_debug(BINDER_DEBUG_TRANSACTION
,
2227 "%d buffer release %d, size %zd-%zd, failed at %pK\n",
2228 proc
->pid
, buffer
->debug_id
,
2229 buffer
->data_size
, buffer
->offsets_size
, failed_at
);
2231 if (buffer
->target_node
)
2232 binder_dec_node(buffer
->target_node
, 1, 0);
2234 off_start
= (binder_size_t
*)(buffer
->data
+
2235 ALIGN(buffer
->data_size
, sizeof(void *)));
2237 off_end
= failed_at
;
2239 off_end
= (void *)off_start
+ buffer
->offsets_size
;
2240 for (offp
= off_start
; offp
< off_end
; offp
++) {
2241 struct binder_object_header
*hdr
;
2242 size_t object_size
= binder_validate_object(buffer
, *offp
);
2244 if (object_size
== 0) {
2245 pr_err("transaction release %d bad object at offset %lld, size %zd\n",
2246 debug_id
, (u64
)*offp
, buffer
->data_size
);
2249 hdr
= (struct binder_object_header
*)(buffer
->data
+ *offp
);
2250 switch (hdr
->type
) {
2251 case BINDER_TYPE_BINDER
:
2252 case BINDER_TYPE_WEAK_BINDER
: {
2253 struct flat_binder_object
*fp
;
2254 struct binder_node
*node
;
2256 fp
= to_flat_binder_object(hdr
);
2257 node
= binder_get_node(proc
, fp
->binder
);
2259 pr_err("transaction release %d bad node %016llx\n",
2260 debug_id
, (u64
)fp
->binder
);
2263 binder_debug(BINDER_DEBUG_TRANSACTION
,
2264 " node %d u%016llx\n",
2265 node
->debug_id
, (u64
)node
->ptr
);
2266 binder_dec_node(node
, hdr
->type
== BINDER_TYPE_BINDER
,
2268 binder_put_node(node
);
2270 case BINDER_TYPE_HANDLE
:
2271 case BINDER_TYPE_WEAK_HANDLE
: {
2272 struct flat_binder_object
*fp
;
2273 struct binder_ref_data rdata
;
2276 fp
= to_flat_binder_object(hdr
);
2277 ret
= binder_dec_ref_for_handle(proc
, fp
->handle
,
2278 hdr
->type
== BINDER_TYPE_HANDLE
, &rdata
);
2281 pr_err("transaction release %d bad handle %d, ret = %d\n",
2282 debug_id
, fp
->handle
, ret
);
2285 binder_debug(BINDER_DEBUG_TRANSACTION
,
2286 " ref %d desc %d\n",
2287 rdata
.debug_id
, rdata
.desc
);
2290 case BINDER_TYPE_FD
: {
2292 * No need to close the file here since user-space
2293 * closes it for for successfully delivered
2294 * transactions. For transactions that weren't
2295 * delivered, the new fd was never allocated so
2296 * there is no need to close and the fput on the
2297 * file is done when the transaction is torn
2300 WARN_ON(failed_at
&&
2301 proc
->tsk
== current
->group_leader
);
2303 case BINDER_TYPE_PTR
:
2305 * Nothing to do here, this will get cleaned up when the
2306 * transaction buffer gets freed
2309 case BINDER_TYPE_FDA
: {
2310 struct binder_fd_array_object
*fda
;
2311 struct binder_buffer_object
*parent
;
2312 uintptr_t parent_buffer
;
2315 binder_size_t fd_buf_size
;
2317 if (proc
->tsk
!= current
->group_leader
) {
2319 * Nothing to do if running in sender context
2320 * The fd fixups have not been applied so no
2321 * fds need to be closed.
2326 fda
= to_binder_fd_array_object(hdr
);
2327 parent
= binder_validate_ptr(buffer
, fda
->parent
,
2331 pr_err("transaction release %d bad parent offset\n",
2336 * Since the parent was already fixed up, convert it
2337 * back to kernel address space to access it
2339 parent_buffer
= parent
->buffer
-
2340 binder_alloc_get_user_buffer_offset(
2343 fd_buf_size
= sizeof(u32
) * fda
->num_fds
;
2344 if (fda
->num_fds
>= SIZE_MAX
/ sizeof(u32
)) {
2345 pr_err("transaction release %d invalid number of fds (%lld)\n",
2346 debug_id
, (u64
)fda
->num_fds
);
2349 if (fd_buf_size
> parent
->length
||
2350 fda
->parent_offset
> parent
->length
- fd_buf_size
) {
2351 /* No space for all file descriptors here. */
2352 pr_err("transaction release %d not enough space for %lld fds in buffer\n",
2353 debug_id
, (u64
)fda
->num_fds
);
2356 fd_array
= (u32
*)(parent_buffer
+ (uintptr_t)fda
->parent_offset
);
2357 for (fd_index
= 0; fd_index
< fda
->num_fds
; fd_index
++)
2358 binder_deferred_fd_close(fd_array
[fd_index
]);
2361 pr_err("transaction release %d bad object type %x\n",
2362 debug_id
, hdr
->type
);
2368 static int binder_translate_binder(struct flat_binder_object
*fp
,
2369 struct binder_transaction
*t
,
2370 struct binder_thread
*thread
)
2372 struct binder_node
*node
;
2373 struct binder_proc
*proc
= thread
->proc
;
2374 struct binder_proc
*target_proc
= t
->to_proc
;
2375 struct binder_ref_data rdata
;
2378 node
= binder_get_node(proc
, fp
->binder
);
2380 node
= binder_new_node(proc
, fp
);
2384 if (fp
->cookie
!= node
->cookie
) {
2385 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
2386 proc
->pid
, thread
->pid
, (u64
)fp
->binder
,
2387 node
->debug_id
, (u64
)fp
->cookie
,
2392 if (security_binder_transfer_binder(proc
->tsk
, target_proc
->tsk
)) {
2397 ret
= binder_inc_ref_for_node(target_proc
, node
,
2398 fp
->hdr
.type
== BINDER_TYPE_BINDER
,
2399 &thread
->todo
, &rdata
);
2403 if (fp
->hdr
.type
== BINDER_TYPE_BINDER
)
2404 fp
->hdr
.type
= BINDER_TYPE_HANDLE
;
2406 fp
->hdr
.type
= BINDER_TYPE_WEAK_HANDLE
;
2408 fp
->handle
= rdata
.desc
;
2411 trace_binder_transaction_node_to_ref(t
, node
, &rdata
);
2412 binder_debug(BINDER_DEBUG_TRANSACTION
,
2413 " node %d u%016llx -> ref %d desc %d\n",
2414 node
->debug_id
, (u64
)node
->ptr
,
2415 rdata
.debug_id
, rdata
.desc
);
2417 binder_put_node(node
);
2421 static int binder_translate_handle(struct flat_binder_object
*fp
,
2422 struct binder_transaction
*t
,
2423 struct binder_thread
*thread
)
2425 struct binder_proc
*proc
= thread
->proc
;
2426 struct binder_proc
*target_proc
= t
->to_proc
;
2427 struct binder_node
*node
;
2428 struct binder_ref_data src_rdata
;
2431 node
= binder_get_node_from_ref(proc
, fp
->handle
,
2432 fp
->hdr
.type
== BINDER_TYPE_HANDLE
, &src_rdata
);
2434 binder_user_error("%d:%d got transaction with invalid handle, %d\n",
2435 proc
->pid
, thread
->pid
, fp
->handle
);
2438 if (security_binder_transfer_binder(proc
->tsk
, target_proc
->tsk
)) {
2443 binder_node_lock(node
);
2444 if (node
->proc
== target_proc
) {
2445 if (fp
->hdr
.type
== BINDER_TYPE_HANDLE
)
2446 fp
->hdr
.type
= BINDER_TYPE_BINDER
;
2448 fp
->hdr
.type
= BINDER_TYPE_WEAK_BINDER
;
2449 fp
->binder
= node
->ptr
;
2450 fp
->cookie
= node
->cookie
;
2452 binder_inner_proc_lock(node
->proc
);
2454 __acquire(&node
->proc
->inner_lock
);
2455 binder_inc_node_nilocked(node
,
2456 fp
->hdr
.type
== BINDER_TYPE_BINDER
,
2459 binder_inner_proc_unlock(node
->proc
);
2461 __release(&node
->proc
->inner_lock
);
2462 trace_binder_transaction_ref_to_node(t
, node
, &src_rdata
);
2463 binder_debug(BINDER_DEBUG_TRANSACTION
,
2464 " ref %d desc %d -> node %d u%016llx\n",
2465 src_rdata
.debug_id
, src_rdata
.desc
, node
->debug_id
,
2467 binder_node_unlock(node
);
2469 struct binder_ref_data dest_rdata
;
2471 binder_node_unlock(node
);
2472 ret
= binder_inc_ref_for_node(target_proc
, node
,
2473 fp
->hdr
.type
== BINDER_TYPE_HANDLE
,
2479 fp
->handle
= dest_rdata
.desc
;
2481 trace_binder_transaction_ref_to_ref(t
, node
, &src_rdata
,
2483 binder_debug(BINDER_DEBUG_TRANSACTION
,
2484 " ref %d desc %d -> ref %d desc %d (node %d)\n",
2485 src_rdata
.debug_id
, src_rdata
.desc
,
2486 dest_rdata
.debug_id
, dest_rdata
.desc
,
2490 binder_put_node(node
);
2494 static int binder_translate_fd(u32
*fdp
,
2495 struct binder_transaction
*t
,
2496 struct binder_thread
*thread
,
2497 struct binder_transaction
*in_reply_to
)
2499 struct binder_proc
*proc
= thread
->proc
;
2500 struct binder_proc
*target_proc
= t
->to_proc
;
2501 struct binder_txn_fd_fixup
*fixup
;
2504 bool target_allows_fd
;
2508 target_allows_fd
= !!(in_reply_to
->flags
& TF_ACCEPT_FDS
);
2510 target_allows_fd
= t
->buffer
->target_node
->accept_fds
;
2511 if (!target_allows_fd
) {
2512 binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
2513 proc
->pid
, thread
->pid
,
2514 in_reply_to
? "reply" : "transaction",
2517 goto err_fd_not_accepted
;
2522 binder_user_error("%d:%d got transaction with invalid fd, %d\n",
2523 proc
->pid
, thread
->pid
, fd
);
2527 ret
= security_binder_transfer_file(proc
->tsk
, target_proc
->tsk
, file
);
2534 * Add fixup record for this transaction. The allocation
2535 * of the fd in the target needs to be done from a
2538 fixup
= kzalloc(sizeof(*fixup
), GFP_KERNEL
);
2544 fixup
->offset
= (uintptr_t)fdp
- (uintptr_t)t
->buffer
->data
;
2545 trace_binder_transaction_fd_send(t
, fd
, fixup
->offset
);
2546 list_add_tail(&fixup
->fixup_entry
, &t
->fd_fixups
);
2554 err_fd_not_accepted
:
2558 static int binder_translate_fd_array(struct binder_fd_array_object
*fda
,
2559 struct binder_buffer_object
*parent
,
2560 struct binder_transaction
*t
,
2561 struct binder_thread
*thread
,
2562 struct binder_transaction
*in_reply_to
)
2564 binder_size_t fdi
, fd_buf_size
;
2565 uintptr_t parent_buffer
;
2567 struct binder_proc
*proc
= thread
->proc
;
2568 struct binder_proc
*target_proc
= t
->to_proc
;
2570 fd_buf_size
= sizeof(u32
) * fda
->num_fds
;
2571 if (fda
->num_fds
>= SIZE_MAX
/ sizeof(u32
)) {
2572 binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
2573 proc
->pid
, thread
->pid
, (u64
)fda
->num_fds
);
2576 if (fd_buf_size
> parent
->length
||
2577 fda
->parent_offset
> parent
->length
- fd_buf_size
) {
2578 /* No space for all file descriptors here. */
2579 binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
2580 proc
->pid
, thread
->pid
, (u64
)fda
->num_fds
);
2584 * Since the parent was already fixed up, convert it
2585 * back to the kernel address space to access it
2587 parent_buffer
= parent
->buffer
-
2588 binder_alloc_get_user_buffer_offset(&target_proc
->alloc
);
2589 fd_array
= (u32
*)(parent_buffer
+ (uintptr_t)fda
->parent_offset
);
2590 if (!IS_ALIGNED((unsigned long)fd_array
, sizeof(u32
))) {
2591 binder_user_error("%d:%d parent offset not aligned correctly.\n",
2592 proc
->pid
, thread
->pid
);
2595 for (fdi
= 0; fdi
< fda
->num_fds
; fdi
++) {
2596 int ret
= binder_translate_fd(&fd_array
[fdi
], t
, thread
,
2604 static int binder_fixup_parent(struct binder_transaction
*t
,
2605 struct binder_thread
*thread
,
2606 struct binder_buffer_object
*bp
,
2607 binder_size_t
*off_start
,
2608 binder_size_t num_valid
,
2609 struct binder_buffer_object
*last_fixup_obj
,
2610 binder_size_t last_fixup_min_off
)
2612 struct binder_buffer_object
*parent
;
2614 struct binder_buffer
*b
= t
->buffer
;
2615 struct binder_proc
*proc
= thread
->proc
;
2616 struct binder_proc
*target_proc
= t
->to_proc
;
2618 if (!(bp
->flags
& BINDER_BUFFER_FLAG_HAS_PARENT
))
2621 parent
= binder_validate_ptr(b
, bp
->parent
, off_start
, num_valid
);
2623 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2624 proc
->pid
, thread
->pid
);
2628 if (!binder_validate_fixup(b
, off_start
,
2629 parent
, bp
->parent_offset
,
2631 last_fixup_min_off
)) {
2632 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2633 proc
->pid
, thread
->pid
);
2637 if (parent
->length
< sizeof(binder_uintptr_t
) ||
2638 bp
->parent_offset
> parent
->length
- sizeof(binder_uintptr_t
)) {
2639 /* No space for a pointer here! */
2640 binder_user_error("%d:%d got transaction with invalid parent offset\n",
2641 proc
->pid
, thread
->pid
);
2644 parent_buffer
= (u8
*)((uintptr_t)parent
->buffer
-
2645 binder_alloc_get_user_buffer_offset(
2646 &target_proc
->alloc
));
2647 *(binder_uintptr_t
*)(parent_buffer
+ bp
->parent_offset
) = bp
->buffer
;
2653 * binder_proc_transaction() - sends a transaction to a process and wakes it up
2654 * @t: transaction to send
2655 * @proc: process to send the transaction to
2656 * @thread: thread in @proc to send the transaction to (may be NULL)
2658 * This function queues a transaction to the specified process. It will try
2659 * to find a thread in the target process to handle the transaction and
2660 * wake it up. If no thread is found, the work is queued to the proc
2663 * If the @thread parameter is not NULL, the transaction is always queued
2664 * to the waitlist of that specific thread.
2666 * Return: true if the transactions was successfully queued
2667 * false if the target process or thread is dead
2669 static bool binder_proc_transaction(struct binder_transaction
*t
,
2670 struct binder_proc
*proc
,
2671 struct binder_thread
*thread
)
2673 struct binder_node
*node
= t
->buffer
->target_node
;
2674 bool oneway
= !!(t
->flags
& TF_ONE_WAY
);
2675 bool pending_async
= false;
2678 binder_node_lock(node
);
2681 if (node
->has_async_transaction
) {
2682 pending_async
= true;
2684 node
->has_async_transaction
= true;
2688 binder_inner_proc_lock(proc
);
2690 if (proc
->is_dead
|| (thread
&& thread
->is_dead
)) {
2691 binder_inner_proc_unlock(proc
);
2692 binder_node_unlock(node
);
2696 if (!thread
&& !pending_async
)
2697 thread
= binder_select_thread_ilocked(proc
);
2700 binder_enqueue_thread_work_ilocked(thread
, &t
->work
);
2701 else if (!pending_async
)
2702 binder_enqueue_work_ilocked(&t
->work
, &proc
->todo
);
2704 binder_enqueue_work_ilocked(&t
->work
, &node
->async_todo
);
2707 binder_wakeup_thread_ilocked(proc
, thread
, !oneway
/* sync */);
2709 binder_inner_proc_unlock(proc
);
2710 binder_node_unlock(node
);
2716 * binder_get_node_refs_for_txn() - Get required refs on node for txn
2717 * @node: struct binder_node for which to get refs
2718 * @proc: returns @node->proc if valid
2719 * @error: if no @proc then returns BR_DEAD_REPLY
2721 * User-space normally keeps the node alive when creating a transaction
2722 * since it has a reference to the target. The local strong ref keeps it
2723 * alive if the sending process dies before the target process processes
2724 * the transaction. If the source process is malicious or has a reference
2725 * counting bug, relying on the local strong ref can fail.
2727 * Since user-space can cause the local strong ref to go away, we also take
2728 * a tmpref on the node to ensure it survives while we are constructing
2729 * the transaction. We also need a tmpref on the proc while we are
2730 * constructing the transaction, so we take that here as well.
2732 * Return: The target_node with refs taken or NULL if no @node->proc is NULL.
2733 * Also sets @proc if valid. If the @node->proc is NULL indicating that the
2734 * target proc has died, @error is set to BR_DEAD_REPLY
2736 static struct binder_node
*binder_get_node_refs_for_txn(
2737 struct binder_node
*node
,
2738 struct binder_proc
**procp
,
2741 struct binder_node
*target_node
= NULL
;
2743 binder_node_inner_lock(node
);
2746 binder_inc_node_nilocked(node
, 1, 0, NULL
);
2747 binder_inc_node_tmpref_ilocked(node
);
2748 node
->proc
->tmp_ref
++;
2749 *procp
= node
->proc
;
2751 *error
= BR_DEAD_REPLY
;
2752 binder_node_inner_unlock(node
);
2757 static void binder_transaction(struct binder_proc
*proc
,
2758 struct binder_thread
*thread
,
2759 struct binder_transaction_data
*tr
, int reply
,
2760 binder_size_t extra_buffers_size
)
2763 struct binder_transaction
*t
;
2764 struct binder_work
*w
;
2765 struct binder_work
*tcomplete
;
2766 binder_size_t
*offp
, *off_end
, *off_start
;
2767 binder_size_t off_min
;
2768 u8
*sg_bufp
, *sg_buf_end
;
2769 struct binder_proc
*target_proc
= NULL
;
2770 struct binder_thread
*target_thread
= NULL
;
2771 struct binder_node
*target_node
= NULL
;
2772 struct binder_transaction
*in_reply_to
= NULL
;
2773 struct binder_transaction_log_entry
*e
;
2774 uint32_t return_error
= 0;
2775 uint32_t return_error_param
= 0;
2776 uint32_t return_error_line
= 0;
2777 struct binder_buffer_object
*last_fixup_obj
= NULL
;
2778 binder_size_t last_fixup_min_off
= 0;
2779 struct binder_context
*context
= proc
->context
;
2780 int t_debug_id
= atomic_inc_return(&binder_last_id
);
2782 e
= binder_transaction_log_add(&binder_transaction_log
);
2783 e
->debug_id
= t_debug_id
;
2784 e
->call_type
= reply
? 2 : !!(tr
->flags
& TF_ONE_WAY
);
2785 e
->from_proc
= proc
->pid
;
2786 e
->from_thread
= thread
->pid
;
2787 e
->target_handle
= tr
->target
.handle
;
2788 e
->data_size
= tr
->data_size
;
2789 e
->offsets_size
= tr
->offsets_size
;
2790 e
->context_name
= proc
->context
->name
;
2793 binder_inner_proc_lock(proc
);
2794 in_reply_to
= thread
->transaction_stack
;
2795 if (in_reply_to
== NULL
) {
2796 binder_inner_proc_unlock(proc
);
2797 binder_user_error("%d:%d got reply transaction with no transaction stack\n",
2798 proc
->pid
, thread
->pid
);
2799 return_error
= BR_FAILED_REPLY
;
2800 return_error_param
= -EPROTO
;
2801 return_error_line
= __LINE__
;
2802 goto err_empty_call_stack
;
2804 if (in_reply_to
->to_thread
!= thread
) {
2805 spin_lock(&in_reply_to
->lock
);
2806 binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
2807 proc
->pid
, thread
->pid
, in_reply_to
->debug_id
,
2808 in_reply_to
->to_proc
?
2809 in_reply_to
->to_proc
->pid
: 0,
2810 in_reply_to
->to_thread
?
2811 in_reply_to
->to_thread
->pid
: 0);
2812 spin_unlock(&in_reply_to
->lock
);
2813 binder_inner_proc_unlock(proc
);
2814 return_error
= BR_FAILED_REPLY
;
2815 return_error_param
= -EPROTO
;
2816 return_error_line
= __LINE__
;
2818 goto err_bad_call_stack
;
2820 thread
->transaction_stack
= in_reply_to
->to_parent
;
2821 binder_inner_proc_unlock(proc
);
2822 binder_set_nice(in_reply_to
->saved_priority
);
2823 target_thread
= binder_get_txn_from_and_acq_inner(in_reply_to
);
2824 if (target_thread
== NULL
) {
2825 /* annotation for sparse */
2826 __release(&target_thread
->proc
->inner_lock
);
2827 return_error
= BR_DEAD_REPLY
;
2828 return_error_line
= __LINE__
;
2829 goto err_dead_binder
;
2831 if (target_thread
->transaction_stack
!= in_reply_to
) {
2832 binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
2833 proc
->pid
, thread
->pid
,
2834 target_thread
->transaction_stack
?
2835 target_thread
->transaction_stack
->debug_id
: 0,
2836 in_reply_to
->debug_id
);
2837 binder_inner_proc_unlock(target_thread
->proc
);
2838 return_error
= BR_FAILED_REPLY
;
2839 return_error_param
= -EPROTO
;
2840 return_error_line
= __LINE__
;
2842 target_thread
= NULL
;
2843 goto err_dead_binder
;
2845 target_proc
= target_thread
->proc
;
2846 target_proc
->tmp_ref
++;
2847 binder_inner_proc_unlock(target_thread
->proc
);
2849 if (tr
->target
.handle
) {
2850 struct binder_ref
*ref
;
2853 * There must already be a strong ref
2854 * on this node. If so, do a strong
2855 * increment on the node to ensure it
2856 * stays alive until the transaction is
2859 binder_proc_lock(proc
);
2860 ref
= binder_get_ref_olocked(proc
, tr
->target
.handle
,
2863 target_node
= binder_get_node_refs_for_txn(
2864 ref
->node
, &target_proc
,
2867 binder_user_error("%d:%d got transaction to invalid handle\n",
2868 proc
->pid
, thread
->pid
);
2869 return_error
= BR_FAILED_REPLY
;
2871 binder_proc_unlock(proc
);
2873 mutex_lock(&context
->context_mgr_node_lock
);
2874 target_node
= context
->binder_context_mgr_node
;
2876 target_node
= binder_get_node_refs_for_txn(
2877 target_node
, &target_proc
,
2880 return_error
= BR_DEAD_REPLY
;
2881 mutex_unlock(&context
->context_mgr_node_lock
);
2882 if (target_node
&& target_proc
== proc
) {
2883 binder_user_error("%d:%d got transaction to context manager from process owning it\n",
2884 proc
->pid
, thread
->pid
);
2885 return_error
= BR_FAILED_REPLY
;
2886 return_error_param
= -EINVAL
;
2887 return_error_line
= __LINE__
;
2888 goto err_invalid_target_handle
;
2893 * return_error is set above
2895 return_error_param
= -EINVAL
;
2896 return_error_line
= __LINE__
;
2897 goto err_dead_binder
;
2899 e
->to_node
= target_node
->debug_id
;
2900 if (security_binder_transaction(proc
->tsk
,
2901 target_proc
->tsk
) < 0) {
2902 return_error
= BR_FAILED_REPLY
;
2903 return_error_param
= -EPERM
;
2904 return_error_line
= __LINE__
;
2905 goto err_invalid_target_handle
;
2907 binder_inner_proc_lock(proc
);
2909 w
= list_first_entry_or_null(&thread
->todo
,
2910 struct binder_work
, entry
);
2911 if (!(tr
->flags
& TF_ONE_WAY
) && w
&&
2912 w
->type
== BINDER_WORK_TRANSACTION
) {
2914 * Do not allow new outgoing transaction from a
2915 * thread that has a transaction at the head of
2916 * its todo list. Only need to check the head
2917 * because binder_select_thread_ilocked picks a
2918 * thread from proc->waiting_threads to enqueue
2919 * the transaction, and nothing is queued to the
2920 * todo list while the thread is on waiting_threads.
2922 binder_user_error("%d:%d new transaction not allowed when there is a transaction on thread todo\n",
2923 proc
->pid
, thread
->pid
);
2924 binder_inner_proc_unlock(proc
);
2925 return_error
= BR_FAILED_REPLY
;
2926 return_error_param
= -EPROTO
;
2927 return_error_line
= __LINE__
;
2928 goto err_bad_todo_list
;
2931 if (!(tr
->flags
& TF_ONE_WAY
) && thread
->transaction_stack
) {
2932 struct binder_transaction
*tmp
;
2934 tmp
= thread
->transaction_stack
;
2935 if (tmp
->to_thread
!= thread
) {
2936 spin_lock(&tmp
->lock
);
2937 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
2938 proc
->pid
, thread
->pid
, tmp
->debug_id
,
2939 tmp
->to_proc
? tmp
->to_proc
->pid
: 0,
2941 tmp
->to_thread
->pid
: 0);
2942 spin_unlock(&tmp
->lock
);
2943 binder_inner_proc_unlock(proc
);
2944 return_error
= BR_FAILED_REPLY
;
2945 return_error_param
= -EPROTO
;
2946 return_error_line
= __LINE__
;
2947 goto err_bad_call_stack
;
2950 struct binder_thread
*from
;
2952 spin_lock(&tmp
->lock
);
2954 if (from
&& from
->proc
== target_proc
) {
2955 atomic_inc(&from
->tmp_ref
);
2956 target_thread
= from
;
2957 spin_unlock(&tmp
->lock
);
2960 spin_unlock(&tmp
->lock
);
2961 tmp
= tmp
->from_parent
;
2964 binder_inner_proc_unlock(proc
);
2967 e
->to_thread
= target_thread
->pid
;
2968 e
->to_proc
= target_proc
->pid
;
2970 /* TODO: reuse incoming transaction for reply */
2971 t
= kzalloc(sizeof(*t
), GFP_KERNEL
);
2973 return_error
= BR_FAILED_REPLY
;
2974 return_error_param
= -ENOMEM
;
2975 return_error_line
= __LINE__
;
2976 goto err_alloc_t_failed
;
2978 INIT_LIST_HEAD(&t
->fd_fixups
);
2979 binder_stats_created(BINDER_STAT_TRANSACTION
);
2980 spin_lock_init(&t
->lock
);
2982 tcomplete
= kzalloc(sizeof(*tcomplete
), GFP_KERNEL
);
2983 if (tcomplete
== NULL
) {
2984 return_error
= BR_FAILED_REPLY
;
2985 return_error_param
= -ENOMEM
;
2986 return_error_line
= __LINE__
;
2987 goto err_alloc_tcomplete_failed
;
2989 binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE
);
2991 t
->debug_id
= t_debug_id
;
2994 binder_debug(BINDER_DEBUG_TRANSACTION
,
2995 "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
2996 proc
->pid
, thread
->pid
, t
->debug_id
,
2997 target_proc
->pid
, target_thread
->pid
,
2998 (u64
)tr
->data
.ptr
.buffer
,
2999 (u64
)tr
->data
.ptr
.offsets
,
3000 (u64
)tr
->data_size
, (u64
)tr
->offsets_size
,
3001 (u64
)extra_buffers_size
);
3003 binder_debug(BINDER_DEBUG_TRANSACTION
,
3004 "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
3005 proc
->pid
, thread
->pid
, t
->debug_id
,
3006 target_proc
->pid
, target_node
->debug_id
,
3007 (u64
)tr
->data
.ptr
.buffer
,
3008 (u64
)tr
->data
.ptr
.offsets
,
3009 (u64
)tr
->data_size
, (u64
)tr
->offsets_size
,
3010 (u64
)extra_buffers_size
);
3012 if (!reply
&& !(tr
->flags
& TF_ONE_WAY
))
3016 t
->sender_euid
= task_euid(proc
->tsk
);
3017 t
->to_proc
= target_proc
;
3018 t
->to_thread
= target_thread
;
3020 t
->flags
= tr
->flags
;
3021 t
->priority
= task_nice(current
);
3023 trace_binder_transaction(reply
, t
, target_node
);
3025 t
->buffer
= binder_alloc_new_buf(&target_proc
->alloc
, tr
->data_size
,
3026 tr
->offsets_size
, extra_buffers_size
,
3027 !reply
&& (t
->flags
& TF_ONE_WAY
));
3028 if (IS_ERR(t
->buffer
)) {
3030 * -ESRCH indicates VMA cleared. The target is dying.
3032 return_error_param
= PTR_ERR(t
->buffer
);
3033 return_error
= return_error_param
== -ESRCH
?
3034 BR_DEAD_REPLY
: BR_FAILED_REPLY
;
3035 return_error_line
= __LINE__
;
3037 goto err_binder_alloc_buf_failed
;
3039 t
->buffer
->debug_id
= t
->debug_id
;
3040 t
->buffer
->transaction
= t
;
3041 t
->buffer
->target_node
= target_node
;
3042 trace_binder_transaction_alloc_buf(t
->buffer
);
3043 off_start
= (binder_size_t
*)(t
->buffer
->data
+
3044 ALIGN(tr
->data_size
, sizeof(void *)));
3047 if (copy_from_user(t
->buffer
->data
, (const void __user
*)(uintptr_t)
3048 tr
->data
.ptr
.buffer
, tr
->data_size
)) {
3049 binder_user_error("%d:%d got transaction with invalid data ptr\n",
3050 proc
->pid
, thread
->pid
);
3051 return_error
= BR_FAILED_REPLY
;
3052 return_error_param
= -EFAULT
;
3053 return_error_line
= __LINE__
;
3054 goto err_copy_data_failed
;
3056 if (copy_from_user(offp
, (const void __user
*)(uintptr_t)
3057 tr
->data
.ptr
.offsets
, tr
->offsets_size
)) {
3058 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3059 proc
->pid
, thread
->pid
);
3060 return_error
= BR_FAILED_REPLY
;
3061 return_error_param
= -EFAULT
;
3062 return_error_line
= __LINE__
;
3063 goto err_copy_data_failed
;
3065 if (!IS_ALIGNED(tr
->offsets_size
, sizeof(binder_size_t
))) {
3066 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
3067 proc
->pid
, thread
->pid
, (u64
)tr
->offsets_size
);
3068 return_error
= BR_FAILED_REPLY
;
3069 return_error_param
= -EINVAL
;
3070 return_error_line
= __LINE__
;
3071 goto err_bad_offset
;
3073 if (!IS_ALIGNED(extra_buffers_size
, sizeof(u64
))) {
3074 binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
3075 proc
->pid
, thread
->pid
,
3076 (u64
)extra_buffers_size
);
3077 return_error
= BR_FAILED_REPLY
;
3078 return_error_param
= -EINVAL
;
3079 return_error_line
= __LINE__
;
3080 goto err_bad_offset
;
3082 off_end
= (void *)off_start
+ tr
->offsets_size
;
3083 sg_bufp
= (u8
*)(PTR_ALIGN(off_end
, sizeof(void *)));
3084 sg_buf_end
= sg_bufp
+ extra_buffers_size
;
3086 for (; offp
< off_end
; offp
++) {
3087 struct binder_object_header
*hdr
;
3088 size_t object_size
= binder_validate_object(t
->buffer
, *offp
);
3090 if (object_size
== 0 || *offp
< off_min
) {
3091 binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
3092 proc
->pid
, thread
->pid
, (u64
)*offp
,
3094 (u64
)t
->buffer
->data_size
);
3095 return_error
= BR_FAILED_REPLY
;
3096 return_error_param
= -EINVAL
;
3097 return_error_line
= __LINE__
;
3098 goto err_bad_offset
;
3101 hdr
= (struct binder_object_header
*)(t
->buffer
->data
+ *offp
);
3102 off_min
= *offp
+ object_size
;
3103 switch (hdr
->type
) {
3104 case BINDER_TYPE_BINDER
:
3105 case BINDER_TYPE_WEAK_BINDER
: {
3106 struct flat_binder_object
*fp
;
3108 fp
= to_flat_binder_object(hdr
);
3109 ret
= binder_translate_binder(fp
, t
, thread
);
3111 return_error
= BR_FAILED_REPLY
;
3112 return_error_param
= ret
;
3113 return_error_line
= __LINE__
;
3114 goto err_translate_failed
;
3117 case BINDER_TYPE_HANDLE
:
3118 case BINDER_TYPE_WEAK_HANDLE
: {
3119 struct flat_binder_object
*fp
;
3121 fp
= to_flat_binder_object(hdr
);
3122 ret
= binder_translate_handle(fp
, t
, thread
);
3124 return_error
= BR_FAILED_REPLY
;
3125 return_error_param
= ret
;
3126 return_error_line
= __LINE__
;
3127 goto err_translate_failed
;
3131 case BINDER_TYPE_FD
: {
3132 struct binder_fd_object
*fp
= to_binder_fd_object(hdr
);
3133 int ret
= binder_translate_fd(&fp
->fd
, t
, thread
,
3137 return_error
= BR_FAILED_REPLY
;
3138 return_error_param
= ret
;
3139 return_error_line
= __LINE__
;
3140 goto err_translate_failed
;
3144 case BINDER_TYPE_FDA
: {
3145 struct binder_fd_array_object
*fda
=
3146 to_binder_fd_array_object(hdr
);
3147 struct binder_buffer_object
*parent
=
3148 binder_validate_ptr(t
->buffer
, fda
->parent
,
3152 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
3153 proc
->pid
, thread
->pid
);
3154 return_error
= BR_FAILED_REPLY
;
3155 return_error_param
= -EINVAL
;
3156 return_error_line
= __LINE__
;
3157 goto err_bad_parent
;
3159 if (!binder_validate_fixup(t
->buffer
, off_start
,
3160 parent
, fda
->parent_offset
,
3162 last_fixup_min_off
)) {
3163 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
3164 proc
->pid
, thread
->pid
);
3165 return_error
= BR_FAILED_REPLY
;
3166 return_error_param
= -EINVAL
;
3167 return_error_line
= __LINE__
;
3168 goto err_bad_parent
;
3170 ret
= binder_translate_fd_array(fda
, parent
, t
, thread
,
3173 return_error
= BR_FAILED_REPLY
;
3174 return_error_param
= ret
;
3175 return_error_line
= __LINE__
;
3176 goto err_translate_failed
;
3178 last_fixup_obj
= parent
;
3179 last_fixup_min_off
=
3180 fda
->parent_offset
+ sizeof(u32
) * fda
->num_fds
;
3182 case BINDER_TYPE_PTR
: {
3183 struct binder_buffer_object
*bp
=
3184 to_binder_buffer_object(hdr
);
3185 size_t buf_left
= sg_buf_end
- sg_bufp
;
3187 if (bp
->length
> buf_left
) {
3188 binder_user_error("%d:%d got transaction with too large buffer\n",
3189 proc
->pid
, thread
->pid
);
3190 return_error
= BR_FAILED_REPLY
;
3191 return_error_param
= -EINVAL
;
3192 return_error_line
= __LINE__
;
3193 goto err_bad_offset
;
3195 if (copy_from_user(sg_bufp
,
3196 (const void __user
*)(uintptr_t)
3197 bp
->buffer
, bp
->length
)) {
3198 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3199 proc
->pid
, thread
->pid
);
3200 return_error_param
= -EFAULT
;
3201 return_error
= BR_FAILED_REPLY
;
3202 return_error_line
= __LINE__
;
3203 goto err_copy_data_failed
;
3205 /* Fixup buffer pointer to target proc address space */
3206 bp
->buffer
= (uintptr_t)sg_bufp
+
3207 binder_alloc_get_user_buffer_offset(
3208 &target_proc
->alloc
);
3209 sg_bufp
+= ALIGN(bp
->length
, sizeof(u64
));
3211 ret
= binder_fixup_parent(t
, thread
, bp
, off_start
,
3214 last_fixup_min_off
);
3216 return_error
= BR_FAILED_REPLY
;
3217 return_error_param
= ret
;
3218 return_error_line
= __LINE__
;
3219 goto err_translate_failed
;
3221 last_fixup_obj
= bp
;
3222 last_fixup_min_off
= 0;
3225 binder_user_error("%d:%d got transaction with invalid object type, %x\n",
3226 proc
->pid
, thread
->pid
, hdr
->type
);
3227 return_error
= BR_FAILED_REPLY
;
3228 return_error_param
= -EINVAL
;
3229 return_error_line
= __LINE__
;
3230 goto err_bad_object_type
;
3233 tcomplete
->type
= BINDER_WORK_TRANSACTION_COMPLETE
;
3234 t
->work
.type
= BINDER_WORK_TRANSACTION
;
3237 binder_enqueue_thread_work(thread
, tcomplete
);
3238 binder_inner_proc_lock(target_proc
);
3239 if (target_thread
->is_dead
) {
3240 binder_inner_proc_unlock(target_proc
);
3241 goto err_dead_proc_or_thread
;
3243 BUG_ON(t
->buffer
->async_transaction
!= 0);
3244 binder_pop_transaction_ilocked(target_thread
, in_reply_to
);
3245 binder_enqueue_thread_work_ilocked(target_thread
, &t
->work
);
3246 binder_inner_proc_unlock(target_proc
);
3247 wake_up_interruptible_sync(&target_thread
->wait
);
3248 binder_free_transaction(in_reply_to
);
3249 } else if (!(t
->flags
& TF_ONE_WAY
)) {
3250 BUG_ON(t
->buffer
->async_transaction
!= 0);
3251 binder_inner_proc_lock(proc
);
3253 * Defer the TRANSACTION_COMPLETE, so we don't return to
3254 * userspace immediately; this allows the target process to
3255 * immediately start processing this transaction, reducing
3256 * latency. We will then return the TRANSACTION_COMPLETE when
3257 * the target replies (or there is an error).
3259 binder_enqueue_deferred_thread_work_ilocked(thread
, tcomplete
);
3261 t
->from_parent
= thread
->transaction_stack
;
3262 thread
->transaction_stack
= t
;
3263 binder_inner_proc_unlock(proc
);
3264 if (!binder_proc_transaction(t
, target_proc
, target_thread
)) {
3265 binder_inner_proc_lock(proc
);
3266 binder_pop_transaction_ilocked(thread
, t
);
3267 binder_inner_proc_unlock(proc
);
3268 goto err_dead_proc_or_thread
;
3271 BUG_ON(target_node
== NULL
);
3272 BUG_ON(t
->buffer
->async_transaction
!= 1);
3273 binder_enqueue_thread_work(thread
, tcomplete
);
3274 if (!binder_proc_transaction(t
, target_proc
, NULL
))
3275 goto err_dead_proc_or_thread
;
3278 binder_thread_dec_tmpref(target_thread
);
3279 binder_proc_dec_tmpref(target_proc
);
3281 binder_dec_node_tmpref(target_node
);
3283 * write barrier to synchronize with initialization
3287 WRITE_ONCE(e
->debug_id_done
, t_debug_id
);
3290 err_dead_proc_or_thread
:
3291 return_error
= BR_DEAD_REPLY
;
3292 return_error_line
= __LINE__
;
3293 binder_dequeue_work(proc
, tcomplete
);
3294 err_translate_failed
:
3295 err_bad_object_type
:
3298 err_copy_data_failed
:
3299 binder_free_txn_fixups(t
);
3300 trace_binder_transaction_failed_buffer_release(t
->buffer
);
3301 binder_transaction_buffer_release(target_proc
, t
->buffer
, offp
);
3303 binder_dec_node_tmpref(target_node
);
3305 t
->buffer
->transaction
= NULL
;
3306 binder_alloc_free_buf(&target_proc
->alloc
, t
->buffer
);
3307 err_binder_alloc_buf_failed
:
3309 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE
);
3310 err_alloc_tcomplete_failed
:
3312 binder_stats_deleted(BINDER_STAT_TRANSACTION
);
3316 err_empty_call_stack
:
3318 err_invalid_target_handle
:
3320 binder_thread_dec_tmpref(target_thread
);
3322 binder_proc_dec_tmpref(target_proc
);
3324 binder_dec_node(target_node
, 1, 0);
3325 binder_dec_node_tmpref(target_node
);
3328 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION
,
3329 "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n",
3330 proc
->pid
, thread
->pid
, return_error
, return_error_param
,
3331 (u64
)tr
->data_size
, (u64
)tr
->offsets_size
,
3335 struct binder_transaction_log_entry
*fe
;
3337 e
->return_error
= return_error
;
3338 e
->return_error_param
= return_error_param
;
3339 e
->return_error_line
= return_error_line
;
3340 fe
= binder_transaction_log_add(&binder_transaction_log_failed
);
3343 * write barrier to synchronize with initialization
3347 WRITE_ONCE(e
->debug_id_done
, t_debug_id
);
3348 WRITE_ONCE(fe
->debug_id_done
, t_debug_id
);
3351 BUG_ON(thread
->return_error
.cmd
!= BR_OK
);
3353 thread
->return_error
.cmd
= BR_TRANSACTION_COMPLETE
;
3354 binder_enqueue_thread_work(thread
, &thread
->return_error
.work
);
3355 binder_send_failed_reply(in_reply_to
, return_error
);
3357 thread
->return_error
.cmd
= return_error
;
3358 binder_enqueue_thread_work(thread
, &thread
->return_error
.work
);
3363 * binder_free_buf() - free the specified buffer
3364 * @proc: binder proc that owns buffer
3365 * @buffer: buffer to be freed
3367 * If buffer for an async transaction, enqueue the next async
3368 * transaction from the node.
3370 * Cleanup buffer and free it.
3373 binder_free_buf(struct binder_proc
*proc
, struct binder_buffer
*buffer
)
3375 if (buffer
->transaction
) {
3376 buffer
->transaction
->buffer
= NULL
;
3377 buffer
->transaction
= NULL
;
3379 if (buffer
->async_transaction
&& buffer
->target_node
) {
3380 struct binder_node
*buf_node
;
3381 struct binder_work
*w
;
3383 buf_node
= buffer
->target_node
;
3384 binder_node_inner_lock(buf_node
);
3385 BUG_ON(!buf_node
->has_async_transaction
);
3386 BUG_ON(buf_node
->proc
!= proc
);
3387 w
= binder_dequeue_work_head_ilocked(
3388 &buf_node
->async_todo
);
3390 buf_node
->has_async_transaction
= false;
3392 binder_enqueue_work_ilocked(
3394 binder_wakeup_proc_ilocked(proc
);
3396 binder_node_inner_unlock(buf_node
);
3398 trace_binder_transaction_buffer_release(buffer
);
3399 binder_transaction_buffer_release(proc
, buffer
, NULL
);
3400 binder_alloc_free_buf(&proc
->alloc
, buffer
);
3403 static int binder_thread_write(struct binder_proc
*proc
,
3404 struct binder_thread
*thread
,
3405 binder_uintptr_t binder_buffer
, size_t size
,
3406 binder_size_t
*consumed
)
3409 struct binder_context
*context
= proc
->context
;
3410 void __user
*buffer
= (void __user
*)(uintptr_t)binder_buffer
;
3411 void __user
*ptr
= buffer
+ *consumed
;
3412 void __user
*end
= buffer
+ size
;
3414 while (ptr
< end
&& thread
->return_error
.cmd
== BR_OK
) {
3417 if (get_user(cmd
, (uint32_t __user
*)ptr
))
3419 ptr
+= sizeof(uint32_t);
3420 trace_binder_command(cmd
);
3421 if (_IOC_NR(cmd
) < ARRAY_SIZE(binder_stats
.bc
)) {
3422 atomic_inc(&binder_stats
.bc
[_IOC_NR(cmd
)]);
3423 atomic_inc(&proc
->stats
.bc
[_IOC_NR(cmd
)]);
3424 atomic_inc(&thread
->stats
.bc
[_IOC_NR(cmd
)]);
3432 const char *debug_string
;
3433 bool strong
= cmd
== BC_ACQUIRE
|| cmd
== BC_RELEASE
;
3434 bool increment
= cmd
== BC_INCREFS
|| cmd
== BC_ACQUIRE
;
3435 struct binder_ref_data rdata
;
3437 if (get_user(target
, (uint32_t __user
*)ptr
))
3440 ptr
+= sizeof(uint32_t);
3442 if (increment
&& !target
) {
3443 struct binder_node
*ctx_mgr_node
;
3444 mutex_lock(&context
->context_mgr_node_lock
);
3445 ctx_mgr_node
= context
->binder_context_mgr_node
;
3447 ret
= binder_inc_ref_for_node(
3449 strong
, NULL
, &rdata
);
3450 mutex_unlock(&context
->context_mgr_node_lock
);
3453 ret
= binder_update_ref_for_handle(
3454 proc
, target
, increment
, strong
,
3456 if (!ret
&& rdata
.desc
!= target
) {
3457 binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
3458 proc
->pid
, thread
->pid
,
3459 target
, rdata
.desc
);
3463 debug_string
= "IncRefs";
3466 debug_string
= "Acquire";
3469 debug_string
= "Release";
3473 debug_string
= "DecRefs";
3477 binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
3478 proc
->pid
, thread
->pid
, debug_string
,
3479 strong
, target
, ret
);
3482 binder_debug(BINDER_DEBUG_USER_REFS
,
3483 "%d:%d %s ref %d desc %d s %d w %d\n",
3484 proc
->pid
, thread
->pid
, debug_string
,
3485 rdata
.debug_id
, rdata
.desc
, rdata
.strong
,
3489 case BC_INCREFS_DONE
:
3490 case BC_ACQUIRE_DONE
: {
3491 binder_uintptr_t node_ptr
;
3492 binder_uintptr_t cookie
;
3493 struct binder_node
*node
;
3496 if (get_user(node_ptr
, (binder_uintptr_t __user
*)ptr
))
3498 ptr
+= sizeof(binder_uintptr_t
);
3499 if (get_user(cookie
, (binder_uintptr_t __user
*)ptr
))
3501 ptr
+= sizeof(binder_uintptr_t
);
3502 node
= binder_get_node(proc
, node_ptr
);
3504 binder_user_error("%d:%d %s u%016llx no match\n",
3505 proc
->pid
, thread
->pid
,
3506 cmd
== BC_INCREFS_DONE
?
3512 if (cookie
!= node
->cookie
) {
3513 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
3514 proc
->pid
, thread
->pid
,
3515 cmd
== BC_INCREFS_DONE
?
3516 "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3517 (u64
)node_ptr
, node
->debug_id
,
3518 (u64
)cookie
, (u64
)node
->cookie
);
3519 binder_put_node(node
);
3522 binder_node_inner_lock(node
);
3523 if (cmd
== BC_ACQUIRE_DONE
) {
3524 if (node
->pending_strong_ref
== 0) {
3525 binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
3526 proc
->pid
, thread
->pid
,
3528 binder_node_inner_unlock(node
);
3529 binder_put_node(node
);
3532 node
->pending_strong_ref
= 0;
3534 if (node
->pending_weak_ref
== 0) {
3535 binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
3536 proc
->pid
, thread
->pid
,
3538 binder_node_inner_unlock(node
);
3539 binder_put_node(node
);
3542 node
->pending_weak_ref
= 0;
3544 free_node
= binder_dec_node_nilocked(node
,
3545 cmd
== BC_ACQUIRE_DONE
, 0);
3547 binder_debug(BINDER_DEBUG_USER_REFS
,
3548 "%d:%d %s node %d ls %d lw %d tr %d\n",
3549 proc
->pid
, thread
->pid
,
3550 cmd
== BC_INCREFS_DONE
? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3551 node
->debug_id
, node
->local_strong_refs
,
3552 node
->local_weak_refs
, node
->tmp_refs
);
3553 binder_node_inner_unlock(node
);
3554 binder_put_node(node
);
3557 case BC_ATTEMPT_ACQUIRE
:
3558 pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
3560 case BC_ACQUIRE_RESULT
:
3561 pr_err("BC_ACQUIRE_RESULT not supported\n");
3564 case BC_FREE_BUFFER
: {
3565 binder_uintptr_t data_ptr
;
3566 struct binder_buffer
*buffer
;
3568 if (get_user(data_ptr
, (binder_uintptr_t __user
*)ptr
))
3570 ptr
+= sizeof(binder_uintptr_t
);
3572 buffer
= binder_alloc_prepare_to_free(&proc
->alloc
,
3574 if (IS_ERR_OR_NULL(buffer
)) {
3575 if (PTR_ERR(buffer
) == -EPERM
) {
3577 "%d:%d BC_FREE_BUFFER u%016llx matched unreturned or currently freeing buffer\n",
3578 proc
->pid
, thread
->pid
,
3582 "%d:%d BC_FREE_BUFFER u%016llx no match\n",
3583 proc
->pid
, thread
->pid
,
3588 binder_debug(BINDER_DEBUG_FREE_BUFFER
,
3589 "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
3590 proc
->pid
, thread
->pid
, (u64
)data_ptr
,
3592 buffer
->transaction
? "active" : "finished");
3593 binder_free_buf(proc
, buffer
);
3597 case BC_TRANSACTION_SG
:
3599 struct binder_transaction_data_sg tr
;
3601 if (copy_from_user(&tr
, ptr
, sizeof(tr
)))
3604 binder_transaction(proc
, thread
, &tr
.transaction_data
,
3605 cmd
== BC_REPLY_SG
, tr
.buffers_size
);
3608 case BC_TRANSACTION
:
3610 struct binder_transaction_data tr
;
3612 if (copy_from_user(&tr
, ptr
, sizeof(tr
)))
3615 binder_transaction(proc
, thread
, &tr
,
3616 cmd
== BC_REPLY
, 0);
3620 case BC_REGISTER_LOOPER
:
3621 binder_debug(BINDER_DEBUG_THREADS
,
3622 "%d:%d BC_REGISTER_LOOPER\n",
3623 proc
->pid
, thread
->pid
);
3624 binder_inner_proc_lock(proc
);
3625 if (thread
->looper
& BINDER_LOOPER_STATE_ENTERED
) {
3626 thread
->looper
|= BINDER_LOOPER_STATE_INVALID
;
3627 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
3628 proc
->pid
, thread
->pid
);
3629 } else if (proc
->requested_threads
== 0) {
3630 thread
->looper
|= BINDER_LOOPER_STATE_INVALID
;
3631 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
3632 proc
->pid
, thread
->pid
);
3634 proc
->requested_threads
--;
3635 proc
->requested_threads_started
++;
3637 thread
->looper
|= BINDER_LOOPER_STATE_REGISTERED
;
3638 binder_inner_proc_unlock(proc
);
3640 case BC_ENTER_LOOPER
:
3641 binder_debug(BINDER_DEBUG_THREADS
,
3642 "%d:%d BC_ENTER_LOOPER\n",
3643 proc
->pid
, thread
->pid
);
3644 if (thread
->looper
& BINDER_LOOPER_STATE_REGISTERED
) {
3645 thread
->looper
|= BINDER_LOOPER_STATE_INVALID
;
3646 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
3647 proc
->pid
, thread
->pid
);
3649 thread
->looper
|= BINDER_LOOPER_STATE_ENTERED
;
3651 case BC_EXIT_LOOPER
:
3652 binder_debug(BINDER_DEBUG_THREADS
,
3653 "%d:%d BC_EXIT_LOOPER\n",
3654 proc
->pid
, thread
->pid
);
3655 thread
->looper
|= BINDER_LOOPER_STATE_EXITED
;
3658 case BC_REQUEST_DEATH_NOTIFICATION
:
3659 case BC_CLEAR_DEATH_NOTIFICATION
: {
3661 binder_uintptr_t cookie
;
3662 struct binder_ref
*ref
;
3663 struct binder_ref_death
*death
= NULL
;
3665 if (get_user(target
, (uint32_t __user
*)ptr
))
3667 ptr
+= sizeof(uint32_t);
3668 if (get_user(cookie
, (binder_uintptr_t __user
*)ptr
))
3670 ptr
+= sizeof(binder_uintptr_t
);
3671 if (cmd
== BC_REQUEST_DEATH_NOTIFICATION
) {
3673 * Allocate memory for death notification
3674 * before taking lock
3676 death
= kzalloc(sizeof(*death
), GFP_KERNEL
);
3677 if (death
== NULL
) {
3678 WARN_ON(thread
->return_error
.cmd
!=
3680 thread
->return_error
.cmd
= BR_ERROR
;
3681 binder_enqueue_thread_work(
3683 &thread
->return_error
.work
);
3685 BINDER_DEBUG_FAILED_TRANSACTION
,
3686 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
3687 proc
->pid
, thread
->pid
);
3691 binder_proc_lock(proc
);
3692 ref
= binder_get_ref_olocked(proc
, target
, false);
3694 binder_user_error("%d:%d %s invalid ref %d\n",
3695 proc
->pid
, thread
->pid
,
3696 cmd
== BC_REQUEST_DEATH_NOTIFICATION
?
3697 "BC_REQUEST_DEATH_NOTIFICATION" :
3698 "BC_CLEAR_DEATH_NOTIFICATION",
3700 binder_proc_unlock(proc
);
3705 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION
,
3706 "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
3707 proc
->pid
, thread
->pid
,
3708 cmd
== BC_REQUEST_DEATH_NOTIFICATION
?
3709 "BC_REQUEST_DEATH_NOTIFICATION" :
3710 "BC_CLEAR_DEATH_NOTIFICATION",
3711 (u64
)cookie
, ref
->data
.debug_id
,
3712 ref
->data
.desc
, ref
->data
.strong
,
3713 ref
->data
.weak
, ref
->node
->debug_id
);
3715 binder_node_lock(ref
->node
);
3716 if (cmd
== BC_REQUEST_DEATH_NOTIFICATION
) {
3718 binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
3719 proc
->pid
, thread
->pid
);
3720 binder_node_unlock(ref
->node
);
3721 binder_proc_unlock(proc
);
3725 binder_stats_created(BINDER_STAT_DEATH
);
3726 INIT_LIST_HEAD(&death
->work
.entry
);
3727 death
->cookie
= cookie
;
3729 if (ref
->node
->proc
== NULL
) {
3730 ref
->death
->work
.type
= BINDER_WORK_DEAD_BINDER
;
3732 binder_inner_proc_lock(proc
);
3733 binder_enqueue_work_ilocked(
3734 &ref
->death
->work
, &proc
->todo
);
3735 binder_wakeup_proc_ilocked(proc
);
3736 binder_inner_proc_unlock(proc
);
3739 if (ref
->death
== NULL
) {
3740 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
3741 proc
->pid
, thread
->pid
);
3742 binder_node_unlock(ref
->node
);
3743 binder_proc_unlock(proc
);
3747 if (death
->cookie
!= cookie
) {
3748 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
3749 proc
->pid
, thread
->pid
,
3752 binder_node_unlock(ref
->node
);
3753 binder_proc_unlock(proc
);
3757 binder_inner_proc_lock(proc
);
3758 if (list_empty(&death
->work
.entry
)) {
3759 death
->work
.type
= BINDER_WORK_CLEAR_DEATH_NOTIFICATION
;
3760 if (thread
->looper
&
3761 (BINDER_LOOPER_STATE_REGISTERED
|
3762 BINDER_LOOPER_STATE_ENTERED
))
3763 binder_enqueue_thread_work_ilocked(
3767 binder_enqueue_work_ilocked(
3770 binder_wakeup_proc_ilocked(
3774 BUG_ON(death
->work
.type
!= BINDER_WORK_DEAD_BINDER
);
3775 death
->work
.type
= BINDER_WORK_DEAD_BINDER_AND_CLEAR
;
3777 binder_inner_proc_unlock(proc
);
3779 binder_node_unlock(ref
->node
);
3780 binder_proc_unlock(proc
);
3782 case BC_DEAD_BINDER_DONE
: {
3783 struct binder_work
*w
;
3784 binder_uintptr_t cookie
;
3785 struct binder_ref_death
*death
= NULL
;
3787 if (get_user(cookie
, (binder_uintptr_t __user
*)ptr
))
3790 ptr
+= sizeof(cookie
);
3791 binder_inner_proc_lock(proc
);
3792 list_for_each_entry(w
, &proc
->delivered_death
,
3794 struct binder_ref_death
*tmp_death
=
3796 struct binder_ref_death
,
3799 if (tmp_death
->cookie
== cookie
) {
3804 binder_debug(BINDER_DEBUG_DEAD_BINDER
,
3805 "%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n",
3806 proc
->pid
, thread
->pid
, (u64
)cookie
,
3808 if (death
== NULL
) {
3809 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
3810 proc
->pid
, thread
->pid
, (u64
)cookie
);
3811 binder_inner_proc_unlock(proc
);
3814 binder_dequeue_work_ilocked(&death
->work
);
3815 if (death
->work
.type
== BINDER_WORK_DEAD_BINDER_AND_CLEAR
) {
3816 death
->work
.type
= BINDER_WORK_CLEAR_DEATH_NOTIFICATION
;
3817 if (thread
->looper
&
3818 (BINDER_LOOPER_STATE_REGISTERED
|
3819 BINDER_LOOPER_STATE_ENTERED
))
3820 binder_enqueue_thread_work_ilocked(
3821 thread
, &death
->work
);
3823 binder_enqueue_work_ilocked(
3826 binder_wakeup_proc_ilocked(proc
);
3829 binder_inner_proc_unlock(proc
);
3833 pr_err("%d:%d unknown command %d\n",
3834 proc
->pid
, thread
->pid
, cmd
);
3837 *consumed
= ptr
- buffer
;
3842 static void binder_stat_br(struct binder_proc
*proc
,
3843 struct binder_thread
*thread
, uint32_t cmd
)
3845 trace_binder_return(cmd
);
3846 if (_IOC_NR(cmd
) < ARRAY_SIZE(binder_stats
.br
)) {
3847 atomic_inc(&binder_stats
.br
[_IOC_NR(cmd
)]);
3848 atomic_inc(&proc
->stats
.br
[_IOC_NR(cmd
)]);
3849 atomic_inc(&thread
->stats
.br
[_IOC_NR(cmd
)]);
3853 static int binder_put_node_cmd(struct binder_proc
*proc
,
3854 struct binder_thread
*thread
,
3856 binder_uintptr_t node_ptr
,
3857 binder_uintptr_t node_cookie
,
3859 uint32_t cmd
, const char *cmd_name
)
3861 void __user
*ptr
= *ptrp
;
3863 if (put_user(cmd
, (uint32_t __user
*)ptr
))
3865 ptr
+= sizeof(uint32_t);
3867 if (put_user(node_ptr
, (binder_uintptr_t __user
*)ptr
))
3869 ptr
+= sizeof(binder_uintptr_t
);
3871 if (put_user(node_cookie
, (binder_uintptr_t __user
*)ptr
))
3873 ptr
+= sizeof(binder_uintptr_t
);
3875 binder_stat_br(proc
, thread
, cmd
);
3876 binder_debug(BINDER_DEBUG_USER_REFS
, "%d:%d %s %d u%016llx c%016llx\n",
3877 proc
->pid
, thread
->pid
, cmd_name
, node_debug_id
,
3878 (u64
)node_ptr
, (u64
)node_cookie
);
3884 static int binder_wait_for_work(struct binder_thread
*thread
,
3888 struct binder_proc
*proc
= thread
->proc
;
3891 freezer_do_not_count();
3892 binder_inner_proc_lock(proc
);
3894 prepare_to_wait(&thread
->wait
, &wait
, TASK_INTERRUPTIBLE
);
3895 if (binder_has_work_ilocked(thread
, do_proc_work
))
3898 list_add(&thread
->waiting_thread_node
,
3899 &proc
->waiting_threads
);
3900 binder_inner_proc_unlock(proc
);
3902 binder_inner_proc_lock(proc
);
3903 list_del_init(&thread
->waiting_thread_node
);
3904 if (signal_pending(current
)) {
3909 finish_wait(&thread
->wait
, &wait
);
3910 binder_inner_proc_unlock(proc
);
3917 * binder_apply_fd_fixups() - finish fd translation
3918 * @t: binder transaction with list of fd fixups
3920 * Now that we are in the context of the transaction target
3921 * process, we can allocate and install fds. Process the
3922 * list of fds to translate and fixup the buffer with the
3925 * If we fail to allocate an fd, then free the resources by
3926 * fput'ing files that have not been processed and ksys_close'ing
3927 * any fds that have already been allocated.
3929 static int binder_apply_fd_fixups(struct binder_transaction
*t
)
3931 struct binder_txn_fd_fixup
*fixup
, *tmp
;
3934 list_for_each_entry(fixup
, &t
->fd_fixups
, fixup_entry
) {
3935 int fd
= get_unused_fd_flags(O_CLOEXEC
);
3939 binder_debug(BINDER_DEBUG_TRANSACTION
,
3940 "failed fd fixup txn %d fd %d\n",
3945 binder_debug(BINDER_DEBUG_TRANSACTION
,
3946 "fd fixup txn %d fd %d\n",
3948 trace_binder_transaction_fd_recv(t
, fd
, fixup
->offset
);
3949 fd_install(fd
, fixup
->file
);
3951 fdp
= (u32
*)(t
->buffer
->data
+ fixup
->offset
);
3953 * This store can cause problems for CPUs with a
3954 * VIVT cache (eg ARMv5) since the cache cannot
3955 * detect virtual aliases to the same physical cacheline.
3956 * To support VIVT, this address and the user-space VA
3957 * would both need to be flushed. Since this kernel
3958 * VA is not constructed via page_to_virt(), we can't
3959 * use flush_dcache_page() on it, so we'd have to use
3960 * an internal function. If devices with VIVT ever
3961 * need to run Android, we'll either need to go back
3962 * to patching the translated fd from the sender side
3963 * (using the non-standard kernel functions), or rework
3964 * how the kernel uses the buffer to use page_to_virt()
3965 * addresses instead of allocating in our own vm area.
3967 * For now, we disable compilation if CONFIG_CPU_CACHE_VIVT.
3971 list_for_each_entry_safe(fixup
, tmp
, &t
->fd_fixups
, fixup_entry
) {
3975 u32
*fdp
= (u32
*)(t
->buffer
->data
+ fixup
->offset
);
3977 binder_deferred_fd_close(*fdp
);
3979 list_del(&fixup
->fixup_entry
);
3986 static int binder_thread_read(struct binder_proc
*proc
,
3987 struct binder_thread
*thread
,
3988 binder_uintptr_t binder_buffer
, size_t size
,
3989 binder_size_t
*consumed
, int non_block
)
3991 void __user
*buffer
= (void __user
*)(uintptr_t)binder_buffer
;
3992 void __user
*ptr
= buffer
+ *consumed
;
3993 void __user
*end
= buffer
+ size
;
3996 int wait_for_proc_work
;
3998 if (*consumed
== 0) {
3999 if (put_user(BR_NOOP
, (uint32_t __user
*)ptr
))
4001 ptr
+= sizeof(uint32_t);
4005 binder_inner_proc_lock(proc
);
4006 wait_for_proc_work
= binder_available_for_proc_work_ilocked(thread
);
4007 binder_inner_proc_unlock(proc
);
4009 thread
->looper
|= BINDER_LOOPER_STATE_WAITING
;
4011 trace_binder_wait_for_work(wait_for_proc_work
,
4012 !!thread
->transaction_stack
,
4013 !binder_worklist_empty(proc
, &thread
->todo
));
4014 if (wait_for_proc_work
) {
4015 if (!(thread
->looper
& (BINDER_LOOPER_STATE_REGISTERED
|
4016 BINDER_LOOPER_STATE_ENTERED
))) {
4017 binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
4018 proc
->pid
, thread
->pid
, thread
->looper
);
4019 wait_event_interruptible(binder_user_error_wait
,
4020 binder_stop_on_user_error
< 2);
4022 binder_set_nice(proc
->default_priority
);
4026 if (!binder_has_work(thread
, wait_for_proc_work
))
4029 ret
= binder_wait_for_work(thread
, wait_for_proc_work
);
4032 thread
->looper
&= ~BINDER_LOOPER_STATE_WAITING
;
4039 struct binder_transaction_data tr
;
4040 struct binder_work
*w
= NULL
;
4041 struct list_head
*list
= NULL
;
4042 struct binder_transaction
*t
= NULL
;
4043 struct binder_thread
*t_from
;
4045 binder_inner_proc_lock(proc
);
4046 if (!binder_worklist_empty_ilocked(&thread
->todo
))
4047 list
= &thread
->todo
;
4048 else if (!binder_worklist_empty_ilocked(&proc
->todo
) &&
4052 binder_inner_proc_unlock(proc
);
4055 if (ptr
- buffer
== 4 && !thread
->looper_need_return
)
4060 if (end
- ptr
< sizeof(tr
) + 4) {
4061 binder_inner_proc_unlock(proc
);
4064 w
= binder_dequeue_work_head_ilocked(list
);
4065 if (binder_worklist_empty_ilocked(&thread
->todo
))
4066 thread
->process_todo
= false;
4069 case BINDER_WORK_TRANSACTION
: {
4070 binder_inner_proc_unlock(proc
);
4071 t
= container_of(w
, struct binder_transaction
, work
);
4073 case BINDER_WORK_RETURN_ERROR
: {
4074 struct binder_error
*e
= container_of(
4075 w
, struct binder_error
, work
);
4077 WARN_ON(e
->cmd
== BR_OK
);
4078 binder_inner_proc_unlock(proc
);
4079 if (put_user(e
->cmd
, (uint32_t __user
*)ptr
))
4083 ptr
+= sizeof(uint32_t);
4085 binder_stat_br(proc
, thread
, cmd
);
4087 case BINDER_WORK_TRANSACTION_COMPLETE
: {
4088 binder_inner_proc_unlock(proc
);
4089 cmd
= BR_TRANSACTION_COMPLETE
;
4090 if (put_user(cmd
, (uint32_t __user
*)ptr
))
4092 ptr
+= sizeof(uint32_t);
4094 binder_stat_br(proc
, thread
, cmd
);
4095 binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE
,
4096 "%d:%d BR_TRANSACTION_COMPLETE\n",
4097 proc
->pid
, thread
->pid
);
4099 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE
);
4101 case BINDER_WORK_NODE
: {
4102 struct binder_node
*node
= container_of(w
, struct binder_node
, work
);
4104 binder_uintptr_t node_ptr
= node
->ptr
;
4105 binder_uintptr_t node_cookie
= node
->cookie
;
4106 int node_debug_id
= node
->debug_id
;
4109 void __user
*orig_ptr
= ptr
;
4111 BUG_ON(proc
!= node
->proc
);
4112 strong
= node
->internal_strong_refs
||
4113 node
->local_strong_refs
;
4114 weak
= !hlist_empty(&node
->refs
) ||
4115 node
->local_weak_refs
||
4116 node
->tmp_refs
|| strong
;
4117 has_strong_ref
= node
->has_strong_ref
;
4118 has_weak_ref
= node
->has_weak_ref
;
4120 if (weak
&& !has_weak_ref
) {
4121 node
->has_weak_ref
= 1;
4122 node
->pending_weak_ref
= 1;
4123 node
->local_weak_refs
++;
4125 if (strong
&& !has_strong_ref
) {
4126 node
->has_strong_ref
= 1;
4127 node
->pending_strong_ref
= 1;
4128 node
->local_strong_refs
++;
4130 if (!strong
&& has_strong_ref
)
4131 node
->has_strong_ref
= 0;
4132 if (!weak
&& has_weak_ref
)
4133 node
->has_weak_ref
= 0;
4134 if (!weak
&& !strong
) {
4135 binder_debug(BINDER_DEBUG_INTERNAL_REFS
,
4136 "%d:%d node %d u%016llx c%016llx deleted\n",
4137 proc
->pid
, thread
->pid
,
4141 rb_erase(&node
->rb_node
, &proc
->nodes
);
4142 binder_inner_proc_unlock(proc
);
4143 binder_node_lock(node
);
4145 * Acquire the node lock before freeing the
4146 * node to serialize with other threads that
4147 * may have been holding the node lock while
4148 * decrementing this node (avoids race where
4149 * this thread frees while the other thread
4150 * is unlocking the node after the final
4153 binder_node_unlock(node
);
4154 binder_free_node(node
);
4156 binder_inner_proc_unlock(proc
);
4158 if (weak
&& !has_weak_ref
)
4159 ret
= binder_put_node_cmd(
4160 proc
, thread
, &ptr
, node_ptr
,
4161 node_cookie
, node_debug_id
,
4162 BR_INCREFS
, "BR_INCREFS");
4163 if (!ret
&& strong
&& !has_strong_ref
)
4164 ret
= binder_put_node_cmd(
4165 proc
, thread
, &ptr
, node_ptr
,
4166 node_cookie
, node_debug_id
,
4167 BR_ACQUIRE
, "BR_ACQUIRE");
4168 if (!ret
&& !strong
&& has_strong_ref
)
4169 ret
= binder_put_node_cmd(
4170 proc
, thread
, &ptr
, node_ptr
,
4171 node_cookie
, node_debug_id
,
4172 BR_RELEASE
, "BR_RELEASE");
4173 if (!ret
&& !weak
&& has_weak_ref
)
4174 ret
= binder_put_node_cmd(
4175 proc
, thread
, &ptr
, node_ptr
,
4176 node_cookie
, node_debug_id
,
4177 BR_DECREFS
, "BR_DECREFS");
4178 if (orig_ptr
== ptr
)
4179 binder_debug(BINDER_DEBUG_INTERNAL_REFS
,
4180 "%d:%d node %d u%016llx c%016llx state unchanged\n",
4181 proc
->pid
, thread
->pid
,
4188 case BINDER_WORK_DEAD_BINDER
:
4189 case BINDER_WORK_DEAD_BINDER_AND_CLEAR
:
4190 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION
: {
4191 struct binder_ref_death
*death
;
4193 binder_uintptr_t cookie
;
4195 death
= container_of(w
, struct binder_ref_death
, work
);
4196 if (w
->type
== BINDER_WORK_CLEAR_DEATH_NOTIFICATION
)
4197 cmd
= BR_CLEAR_DEATH_NOTIFICATION_DONE
;
4199 cmd
= BR_DEAD_BINDER
;
4200 cookie
= death
->cookie
;
4202 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION
,
4203 "%d:%d %s %016llx\n",
4204 proc
->pid
, thread
->pid
,
4205 cmd
== BR_DEAD_BINDER
?
4207 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
4209 if (w
->type
== BINDER_WORK_CLEAR_DEATH_NOTIFICATION
) {
4210 binder_inner_proc_unlock(proc
);
4212 binder_stats_deleted(BINDER_STAT_DEATH
);
4214 binder_enqueue_work_ilocked(
4215 w
, &proc
->delivered_death
);
4216 binder_inner_proc_unlock(proc
);
4218 if (put_user(cmd
, (uint32_t __user
*)ptr
))
4220 ptr
+= sizeof(uint32_t);
4221 if (put_user(cookie
,
4222 (binder_uintptr_t __user
*)ptr
))
4224 ptr
+= sizeof(binder_uintptr_t
);
4225 binder_stat_br(proc
, thread
, cmd
);
4226 if (cmd
== BR_DEAD_BINDER
)
4227 goto done
; /* DEAD_BINDER notifications can cause transactions */
4230 binder_inner_proc_unlock(proc
);
4231 pr_err("%d:%d: bad work type %d\n",
4232 proc
->pid
, thread
->pid
, w
->type
);
4239 BUG_ON(t
->buffer
== NULL
);
4240 if (t
->buffer
->target_node
) {
4241 struct binder_node
*target_node
= t
->buffer
->target_node
;
4243 tr
.target
.ptr
= target_node
->ptr
;
4244 tr
.cookie
= target_node
->cookie
;
4245 t
->saved_priority
= task_nice(current
);
4246 if (t
->priority
< target_node
->min_priority
&&
4247 !(t
->flags
& TF_ONE_WAY
))
4248 binder_set_nice(t
->priority
);
4249 else if (!(t
->flags
& TF_ONE_WAY
) ||
4250 t
->saved_priority
> target_node
->min_priority
)
4251 binder_set_nice(target_node
->min_priority
);
4252 cmd
= BR_TRANSACTION
;
4259 tr
.flags
= t
->flags
;
4260 tr
.sender_euid
= from_kuid(current_user_ns(), t
->sender_euid
);
4262 t_from
= binder_get_txn_from(t
);
4264 struct task_struct
*sender
= t_from
->proc
->tsk
;
4266 tr
.sender_pid
= task_tgid_nr_ns(sender
,
4267 task_active_pid_ns(current
));
4272 ret
= binder_apply_fd_fixups(t
);
4274 struct binder_buffer
*buffer
= t
->buffer
;
4275 bool oneway
= !!(t
->flags
& TF_ONE_WAY
);
4276 int tid
= t
->debug_id
;
4279 binder_thread_dec_tmpref(t_from
);
4280 buffer
->transaction
= NULL
;
4281 binder_cleanup_transaction(t
, "fd fixups failed",
4283 binder_free_buf(proc
, buffer
);
4284 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION
,
4285 "%d:%d %stransaction %d fd fixups failed %d/%d, line %d\n",
4286 proc
->pid
, thread
->pid
,
4288 (cmd
== BR_REPLY
? "reply " : ""),
4289 tid
, BR_FAILED_REPLY
, ret
, __LINE__
);
4290 if (cmd
== BR_REPLY
) {
4291 cmd
= BR_FAILED_REPLY
;
4292 if (put_user(cmd
, (uint32_t __user
*)ptr
))
4294 ptr
+= sizeof(uint32_t);
4295 binder_stat_br(proc
, thread
, cmd
);
4300 tr
.data_size
= t
->buffer
->data_size
;
4301 tr
.offsets_size
= t
->buffer
->offsets_size
;
4302 tr
.data
.ptr
.buffer
= (binder_uintptr_t
)
4303 ((uintptr_t)t
->buffer
->data
+
4304 binder_alloc_get_user_buffer_offset(&proc
->alloc
));
4305 tr
.data
.ptr
.offsets
= tr
.data
.ptr
.buffer
+
4306 ALIGN(t
->buffer
->data_size
,
4309 if (put_user(cmd
, (uint32_t __user
*)ptr
)) {
4311 binder_thread_dec_tmpref(t_from
);
4313 binder_cleanup_transaction(t
, "put_user failed",
4318 ptr
+= sizeof(uint32_t);
4319 if (copy_to_user(ptr
, &tr
, sizeof(tr
))) {
4321 binder_thread_dec_tmpref(t_from
);
4323 binder_cleanup_transaction(t
, "copy_to_user failed",
4330 trace_binder_transaction_received(t
);
4331 binder_stat_br(proc
, thread
, cmd
);
4332 binder_debug(BINDER_DEBUG_TRANSACTION
,
4333 "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
4334 proc
->pid
, thread
->pid
,
4335 (cmd
== BR_TRANSACTION
) ? "BR_TRANSACTION" :
4337 t
->debug_id
, t_from
? t_from
->proc
->pid
: 0,
4338 t_from
? t_from
->pid
: 0, cmd
,
4339 t
->buffer
->data_size
, t
->buffer
->offsets_size
,
4340 (u64
)tr
.data
.ptr
.buffer
, (u64
)tr
.data
.ptr
.offsets
);
4343 binder_thread_dec_tmpref(t_from
);
4344 t
->buffer
->allow_user_free
= 1;
4345 if (cmd
== BR_TRANSACTION
&& !(t
->flags
& TF_ONE_WAY
)) {
4346 binder_inner_proc_lock(thread
->proc
);
4347 t
->to_parent
= thread
->transaction_stack
;
4348 t
->to_thread
= thread
;
4349 thread
->transaction_stack
= t
;
4350 binder_inner_proc_unlock(thread
->proc
);
4352 binder_free_transaction(t
);
4359 *consumed
= ptr
- buffer
;
4360 binder_inner_proc_lock(proc
);
4361 if (proc
->requested_threads
== 0 &&
4362 list_empty(&thread
->proc
->waiting_threads
) &&
4363 proc
->requested_threads_started
< proc
->max_threads
&&
4364 (thread
->looper
& (BINDER_LOOPER_STATE_REGISTERED
|
4365 BINDER_LOOPER_STATE_ENTERED
)) /* the user-space code fails to */
4366 /*spawn a new thread if we leave this out */) {
4367 proc
->requested_threads
++;
4368 binder_inner_proc_unlock(proc
);
4369 binder_debug(BINDER_DEBUG_THREADS
,
4370 "%d:%d BR_SPAWN_LOOPER\n",
4371 proc
->pid
, thread
->pid
);
4372 if (put_user(BR_SPAWN_LOOPER
, (uint32_t __user
*)buffer
))
4374 binder_stat_br(proc
, thread
, BR_SPAWN_LOOPER
);
4376 binder_inner_proc_unlock(proc
);
4380 static void binder_release_work(struct binder_proc
*proc
,
4381 struct list_head
*list
)
4383 struct binder_work
*w
;
4386 w
= binder_dequeue_work_head(proc
, list
);
4391 case BINDER_WORK_TRANSACTION
: {
4392 struct binder_transaction
*t
;
4394 t
= container_of(w
, struct binder_transaction
, work
);
4396 binder_cleanup_transaction(t
, "process died.",
4399 case BINDER_WORK_RETURN_ERROR
: {
4400 struct binder_error
*e
= container_of(
4401 w
, struct binder_error
, work
);
4403 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION
,
4404 "undelivered TRANSACTION_ERROR: %u\n",
4407 case BINDER_WORK_TRANSACTION_COMPLETE
: {
4408 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION
,
4409 "undelivered TRANSACTION_COMPLETE\n");
4411 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE
);
4413 case BINDER_WORK_DEAD_BINDER_AND_CLEAR
:
4414 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION
: {
4415 struct binder_ref_death
*death
;
4417 death
= container_of(w
, struct binder_ref_death
, work
);
4418 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION
,
4419 "undelivered death notification, %016llx\n",
4420 (u64
)death
->cookie
);
4422 binder_stats_deleted(BINDER_STAT_DEATH
);
4425 pr_err("unexpected work type, %d, not freed\n",
4433 static struct binder_thread
*binder_get_thread_ilocked(
4434 struct binder_proc
*proc
, struct binder_thread
*new_thread
)
4436 struct binder_thread
*thread
= NULL
;
4437 struct rb_node
*parent
= NULL
;
4438 struct rb_node
**p
= &proc
->threads
.rb_node
;
4442 thread
= rb_entry(parent
, struct binder_thread
, rb_node
);
4444 if (current
->pid
< thread
->pid
)
4446 else if (current
->pid
> thread
->pid
)
4447 p
= &(*p
)->rb_right
;
4453 thread
= new_thread
;
4454 binder_stats_created(BINDER_STAT_THREAD
);
4455 thread
->proc
= proc
;
4456 thread
->pid
= current
->pid
;
4457 atomic_set(&thread
->tmp_ref
, 0);
4458 init_waitqueue_head(&thread
->wait
);
4459 INIT_LIST_HEAD(&thread
->todo
);
4460 rb_link_node(&thread
->rb_node
, parent
, p
);
4461 rb_insert_color(&thread
->rb_node
, &proc
->threads
);
4462 thread
->looper_need_return
= true;
4463 thread
->return_error
.work
.type
= BINDER_WORK_RETURN_ERROR
;
4464 thread
->return_error
.cmd
= BR_OK
;
4465 thread
->reply_error
.work
.type
= BINDER_WORK_RETURN_ERROR
;
4466 thread
->reply_error
.cmd
= BR_OK
;
4467 INIT_LIST_HEAD(&new_thread
->waiting_thread_node
);
4471 static struct binder_thread
*binder_get_thread(struct binder_proc
*proc
)
4473 struct binder_thread
*thread
;
4474 struct binder_thread
*new_thread
;
4476 binder_inner_proc_lock(proc
);
4477 thread
= binder_get_thread_ilocked(proc
, NULL
);
4478 binder_inner_proc_unlock(proc
);
4480 new_thread
= kzalloc(sizeof(*thread
), GFP_KERNEL
);
4481 if (new_thread
== NULL
)
4483 binder_inner_proc_lock(proc
);
4484 thread
= binder_get_thread_ilocked(proc
, new_thread
);
4485 binder_inner_proc_unlock(proc
);
4486 if (thread
!= new_thread
)
4492 static void binder_free_proc(struct binder_proc
*proc
)
4494 BUG_ON(!list_empty(&proc
->todo
));
4495 BUG_ON(!list_empty(&proc
->delivered_death
));
4496 binder_alloc_deferred_release(&proc
->alloc
);
4497 put_task_struct(proc
->tsk
);
4498 binder_stats_deleted(BINDER_STAT_PROC
);
4502 static void binder_free_thread(struct binder_thread
*thread
)
4504 BUG_ON(!list_empty(&thread
->todo
));
4505 binder_stats_deleted(BINDER_STAT_THREAD
);
4506 binder_proc_dec_tmpref(thread
->proc
);
4510 static int binder_thread_release(struct binder_proc
*proc
,
4511 struct binder_thread
*thread
)
4513 struct binder_transaction
*t
;
4514 struct binder_transaction
*send_reply
= NULL
;
4515 int active_transactions
= 0;
4516 struct binder_transaction
*last_t
= NULL
;
4518 binder_inner_proc_lock(thread
->proc
);
4520 * take a ref on the proc so it survives
4521 * after we remove this thread from proc->threads.
4522 * The corresponding dec is when we actually
4523 * free the thread in binder_free_thread()
4527 * take a ref on this thread to ensure it
4528 * survives while we are releasing it
4530 atomic_inc(&thread
->tmp_ref
);
4531 rb_erase(&thread
->rb_node
, &proc
->threads
);
4532 t
= thread
->transaction_stack
;
4534 spin_lock(&t
->lock
);
4535 if (t
->to_thread
== thread
)
4538 __acquire(&t
->lock
);
4540 thread
->is_dead
= true;
4544 active_transactions
++;
4545 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION
,
4546 "release %d:%d transaction %d %s, still active\n",
4547 proc
->pid
, thread
->pid
,
4549 (t
->to_thread
== thread
) ? "in" : "out");
4551 if (t
->to_thread
== thread
) {
4553 t
->to_thread
= NULL
;
4555 t
->buffer
->transaction
= NULL
;
4559 } else if (t
->from
== thread
) {
4564 spin_unlock(&last_t
->lock
);
4566 spin_lock(&t
->lock
);
4568 __acquire(&t
->lock
);
4570 /* annotation for sparse, lock not acquired in last iteration above */
4571 __release(&t
->lock
);
4574 * If this thread used poll, make sure we remove the waitqueue
4575 * from any epoll data structures holding it with POLLFREE.
4576 * waitqueue_active() is safe to use here because we're holding
4579 if ((thread
->looper
& BINDER_LOOPER_STATE_POLL
) &&
4580 waitqueue_active(&thread
->wait
)) {
4581 wake_up_poll(&thread
->wait
, EPOLLHUP
| POLLFREE
);
4584 binder_inner_proc_unlock(thread
->proc
);
4587 * This is needed to avoid races between wake_up_poll() above and
4588 * and ep_remove_waitqueue() called for other reasons (eg the epoll file
4589 * descriptor being closed); ep_remove_waitqueue() holds an RCU read
4590 * lock, so we can be sure it's done after calling synchronize_rcu().
4592 if (thread
->looper
& BINDER_LOOPER_STATE_POLL
)
4596 binder_send_failed_reply(send_reply
, BR_DEAD_REPLY
);
4597 binder_release_work(proc
, &thread
->todo
);
4598 binder_thread_dec_tmpref(thread
);
4599 return active_transactions
;
4602 static __poll_t
binder_poll(struct file
*filp
,
4603 struct poll_table_struct
*wait
)
4605 struct binder_proc
*proc
= filp
->private_data
;
4606 struct binder_thread
*thread
= NULL
;
4607 bool wait_for_proc_work
;
4609 thread
= binder_get_thread(proc
);
4613 binder_inner_proc_lock(thread
->proc
);
4614 thread
->looper
|= BINDER_LOOPER_STATE_POLL
;
4615 wait_for_proc_work
= binder_available_for_proc_work_ilocked(thread
);
4617 binder_inner_proc_unlock(thread
->proc
);
4619 poll_wait(filp
, &thread
->wait
, wait
);
4621 if (binder_has_work(thread
, wait_for_proc_work
))
4627 static int binder_ioctl_write_read(struct file
*filp
,
4628 unsigned int cmd
, unsigned long arg
,
4629 struct binder_thread
*thread
)
4632 struct binder_proc
*proc
= filp
->private_data
;
4633 unsigned int size
= _IOC_SIZE(cmd
);
4634 void __user
*ubuf
= (void __user
*)arg
;
4635 struct binder_write_read bwr
;
4637 if (size
!= sizeof(struct binder_write_read
)) {
4641 if (copy_from_user(&bwr
, ubuf
, sizeof(bwr
))) {
4645 binder_debug(BINDER_DEBUG_READ_WRITE
,
4646 "%d:%d write %lld at %016llx, read %lld at %016llx\n",
4647 proc
->pid
, thread
->pid
,
4648 (u64
)bwr
.write_size
, (u64
)bwr
.write_buffer
,
4649 (u64
)bwr
.read_size
, (u64
)bwr
.read_buffer
);
4651 if (bwr
.write_size
> 0) {
4652 ret
= binder_thread_write(proc
, thread
,
4655 &bwr
.write_consumed
);
4656 trace_binder_write_done(ret
);
4658 bwr
.read_consumed
= 0;
4659 if (copy_to_user(ubuf
, &bwr
, sizeof(bwr
)))
4664 if (bwr
.read_size
> 0) {
4665 ret
= binder_thread_read(proc
, thread
, bwr
.read_buffer
,
4668 filp
->f_flags
& O_NONBLOCK
);
4669 trace_binder_read_done(ret
);
4670 binder_inner_proc_lock(proc
);
4671 if (!binder_worklist_empty_ilocked(&proc
->todo
))
4672 binder_wakeup_proc_ilocked(proc
);
4673 binder_inner_proc_unlock(proc
);
4675 if (copy_to_user(ubuf
, &bwr
, sizeof(bwr
)))
4680 binder_debug(BINDER_DEBUG_READ_WRITE
,
4681 "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
4682 proc
->pid
, thread
->pid
,
4683 (u64
)bwr
.write_consumed
, (u64
)bwr
.write_size
,
4684 (u64
)bwr
.read_consumed
, (u64
)bwr
.read_size
);
4685 if (copy_to_user(ubuf
, &bwr
, sizeof(bwr
))) {
4693 static int binder_ioctl_set_ctx_mgr(struct file
*filp
)
4696 struct binder_proc
*proc
= filp
->private_data
;
4697 struct binder_context
*context
= proc
->context
;
4698 struct binder_node
*new_node
;
4699 kuid_t curr_euid
= current_euid();
4701 mutex_lock(&context
->context_mgr_node_lock
);
4702 if (context
->binder_context_mgr_node
) {
4703 pr_err("BINDER_SET_CONTEXT_MGR already set\n");
4707 ret
= security_binder_set_context_mgr(proc
->tsk
);
4710 if (uid_valid(context
->binder_context_mgr_uid
)) {
4711 if (!uid_eq(context
->binder_context_mgr_uid
, curr_euid
)) {
4712 pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
4713 from_kuid(&init_user_ns
, curr_euid
),
4714 from_kuid(&init_user_ns
,
4715 context
->binder_context_mgr_uid
));
4720 context
->binder_context_mgr_uid
= curr_euid
;
4722 new_node
= binder_new_node(proc
, NULL
);
4727 binder_node_lock(new_node
);
4728 new_node
->local_weak_refs
++;
4729 new_node
->local_strong_refs
++;
4730 new_node
->has_strong_ref
= 1;
4731 new_node
->has_weak_ref
= 1;
4732 context
->binder_context_mgr_node
= new_node
;
4733 binder_node_unlock(new_node
);
4734 binder_put_node(new_node
);
4736 mutex_unlock(&context
->context_mgr_node_lock
);
4740 static int binder_ioctl_get_node_info_for_ref(struct binder_proc
*proc
,
4741 struct binder_node_info_for_ref
*info
)
4743 struct binder_node
*node
;
4744 struct binder_context
*context
= proc
->context
;
4745 __u32 handle
= info
->handle
;
4747 if (info
->strong_count
|| info
->weak_count
|| info
->reserved1
||
4748 info
->reserved2
|| info
->reserved3
) {
4749 binder_user_error("%d BINDER_GET_NODE_INFO_FOR_REF: only handle may be non-zero.",
4754 /* This ioctl may only be used by the context manager */
4755 mutex_lock(&context
->context_mgr_node_lock
);
4756 if (!context
->binder_context_mgr_node
||
4757 context
->binder_context_mgr_node
->proc
!= proc
) {
4758 mutex_unlock(&context
->context_mgr_node_lock
);
4761 mutex_unlock(&context
->context_mgr_node_lock
);
4763 node
= binder_get_node_from_ref(proc
, handle
, true, NULL
);
4767 info
->strong_count
= node
->local_strong_refs
+
4768 node
->internal_strong_refs
;
4769 info
->weak_count
= node
->local_weak_refs
;
4771 binder_put_node(node
);
4776 static int binder_ioctl_get_node_debug_info(struct binder_proc
*proc
,
4777 struct binder_node_debug_info
*info
)
4780 binder_uintptr_t ptr
= info
->ptr
;
4782 memset(info
, 0, sizeof(*info
));
4784 binder_inner_proc_lock(proc
);
4785 for (n
= rb_first(&proc
->nodes
); n
!= NULL
; n
= rb_next(n
)) {
4786 struct binder_node
*node
= rb_entry(n
, struct binder_node
,
4788 if (node
->ptr
> ptr
) {
4789 info
->ptr
= node
->ptr
;
4790 info
->cookie
= node
->cookie
;
4791 info
->has_strong_ref
= node
->has_strong_ref
;
4792 info
->has_weak_ref
= node
->has_weak_ref
;
4796 binder_inner_proc_unlock(proc
);
4801 static long binder_ioctl(struct file
*filp
, unsigned int cmd
, unsigned long arg
)
4804 struct binder_proc
*proc
= filp
->private_data
;
4805 struct binder_thread
*thread
;
4806 unsigned int size
= _IOC_SIZE(cmd
);
4807 void __user
*ubuf
= (void __user
*)arg
;
4809 /*pr_info("binder_ioctl: %d:%d %x %lx\n",
4810 proc->pid, current->pid, cmd, arg);*/
4812 binder_selftest_alloc(&proc
->alloc
);
4814 trace_binder_ioctl(cmd
, arg
);
4816 ret
= wait_event_interruptible(binder_user_error_wait
, binder_stop_on_user_error
< 2);
4820 thread
= binder_get_thread(proc
);
4821 if (thread
== NULL
) {
4827 case BINDER_WRITE_READ
:
4828 ret
= binder_ioctl_write_read(filp
, cmd
, arg
, thread
);
4832 case BINDER_SET_MAX_THREADS
: {
4835 if (copy_from_user(&max_threads
, ubuf
,
4836 sizeof(max_threads
))) {
4840 binder_inner_proc_lock(proc
);
4841 proc
->max_threads
= max_threads
;
4842 binder_inner_proc_unlock(proc
);
4845 case BINDER_SET_CONTEXT_MGR
:
4846 ret
= binder_ioctl_set_ctx_mgr(filp
);
4850 case BINDER_THREAD_EXIT
:
4851 binder_debug(BINDER_DEBUG_THREADS
, "%d:%d exit\n",
4852 proc
->pid
, thread
->pid
);
4853 binder_thread_release(proc
, thread
);
4856 case BINDER_VERSION
: {
4857 struct binder_version __user
*ver
= ubuf
;
4859 if (size
!= sizeof(struct binder_version
)) {
4863 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION
,
4864 &ver
->protocol_version
)) {
4870 case BINDER_GET_NODE_INFO_FOR_REF
: {
4871 struct binder_node_info_for_ref info
;
4873 if (copy_from_user(&info
, ubuf
, sizeof(info
))) {
4878 ret
= binder_ioctl_get_node_info_for_ref(proc
, &info
);
4882 if (copy_to_user(ubuf
, &info
, sizeof(info
))) {
4889 case BINDER_GET_NODE_DEBUG_INFO
: {
4890 struct binder_node_debug_info info
;
4892 if (copy_from_user(&info
, ubuf
, sizeof(info
))) {
4897 ret
= binder_ioctl_get_node_debug_info(proc
, &info
);
4901 if (copy_to_user(ubuf
, &info
, sizeof(info
))) {
4914 thread
->looper_need_return
= false;
4915 wait_event_interruptible(binder_user_error_wait
, binder_stop_on_user_error
< 2);
4916 if (ret
&& ret
!= -ERESTARTSYS
)
4917 pr_info("%d:%d ioctl %x %lx returned %d\n", proc
->pid
, current
->pid
, cmd
, arg
, ret
);
4919 trace_binder_ioctl_done(ret
);
4923 static void binder_vma_open(struct vm_area_struct
*vma
)
4925 struct binder_proc
*proc
= vma
->vm_private_data
;
4927 binder_debug(BINDER_DEBUG_OPEN_CLOSE
,
4928 "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
4929 proc
->pid
, vma
->vm_start
, vma
->vm_end
,
4930 (vma
->vm_end
- vma
->vm_start
) / SZ_1K
, vma
->vm_flags
,
4931 (unsigned long)pgprot_val(vma
->vm_page_prot
));
4934 static void binder_vma_close(struct vm_area_struct
*vma
)
4936 struct binder_proc
*proc
= vma
->vm_private_data
;
4938 binder_debug(BINDER_DEBUG_OPEN_CLOSE
,
4939 "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
4940 proc
->pid
, vma
->vm_start
, vma
->vm_end
,
4941 (vma
->vm_end
- vma
->vm_start
) / SZ_1K
, vma
->vm_flags
,
4942 (unsigned long)pgprot_val(vma
->vm_page_prot
));
4943 binder_alloc_vma_close(&proc
->alloc
);
4946 static vm_fault_t
binder_vm_fault(struct vm_fault
*vmf
)
4948 return VM_FAULT_SIGBUS
;
4951 static const struct vm_operations_struct binder_vm_ops
= {
4952 .open
= binder_vma_open
,
4953 .close
= binder_vma_close
,
4954 .fault
= binder_vm_fault
,
4957 static int binder_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
4960 struct binder_proc
*proc
= filp
->private_data
;
4961 const char *failure_string
;
4963 if (proc
->tsk
!= current
->group_leader
)
4966 if ((vma
->vm_end
- vma
->vm_start
) > SZ_4M
)
4967 vma
->vm_end
= vma
->vm_start
+ SZ_4M
;
4969 binder_debug(BINDER_DEBUG_OPEN_CLOSE
,
4970 "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
4971 __func__
, proc
->pid
, vma
->vm_start
, vma
->vm_end
,
4972 (vma
->vm_end
- vma
->vm_start
) / SZ_1K
, vma
->vm_flags
,
4973 (unsigned long)pgprot_val(vma
->vm_page_prot
));
4975 if (vma
->vm_flags
& FORBIDDEN_MMAP_FLAGS
) {
4977 failure_string
= "bad vm_flags";
4980 vma
->vm_flags
|= VM_DONTCOPY
| VM_MIXEDMAP
;
4981 vma
->vm_flags
&= ~VM_MAYWRITE
;
4983 vma
->vm_ops
= &binder_vm_ops
;
4984 vma
->vm_private_data
= proc
;
4986 ret
= binder_alloc_mmap_handler(&proc
->alloc
, vma
);
4992 pr_err("%s: %d %lx-%lx %s failed %d\n", __func__
,
4993 proc
->pid
, vma
->vm_start
, vma
->vm_end
, failure_string
, ret
);
4997 static int binder_open(struct inode
*nodp
, struct file
*filp
)
4999 struct binder_proc
*proc
;
5000 struct binder_device
*binder_dev
;
5002 binder_debug(BINDER_DEBUG_OPEN_CLOSE
, "%s: %d:%d\n", __func__
,
5003 current
->group_leader
->pid
, current
->pid
);
5005 proc
= kzalloc(sizeof(*proc
), GFP_KERNEL
);
5008 spin_lock_init(&proc
->inner_lock
);
5009 spin_lock_init(&proc
->outer_lock
);
5010 get_task_struct(current
->group_leader
);
5011 proc
->tsk
= current
->group_leader
;
5012 INIT_LIST_HEAD(&proc
->todo
);
5013 proc
->default_priority
= task_nice(current
);
5014 /* binderfs stashes devices in i_private */
5015 if (is_binderfs_device(nodp
))
5016 binder_dev
= nodp
->i_private
;
5018 binder_dev
= container_of(filp
->private_data
,
5019 struct binder_device
, miscdev
);
5020 proc
->context
= &binder_dev
->context
;
5021 binder_alloc_init(&proc
->alloc
);
5023 binder_stats_created(BINDER_STAT_PROC
);
5024 proc
->pid
= current
->group_leader
->pid
;
5025 INIT_LIST_HEAD(&proc
->delivered_death
);
5026 INIT_LIST_HEAD(&proc
->waiting_threads
);
5027 filp
->private_data
= proc
;
5029 mutex_lock(&binder_procs_lock
);
5030 hlist_add_head(&proc
->proc_node
, &binder_procs
);
5031 mutex_unlock(&binder_procs_lock
);
5033 if (binder_debugfs_dir_entry_proc
) {
5036 snprintf(strbuf
, sizeof(strbuf
), "%u", proc
->pid
);
5038 * proc debug entries are shared between contexts, so
5039 * this will fail if the process tries to open the driver
5040 * again with a different context. The priting code will
5041 * anyway print all contexts that a given PID has, so this
5044 proc
->debugfs_entry
= debugfs_create_file(strbuf
, 0444,
5045 binder_debugfs_dir_entry_proc
,
5046 (void *)(unsigned long)proc
->pid
,
5053 static int binder_flush(struct file
*filp
, fl_owner_t id
)
5055 struct binder_proc
*proc
= filp
->private_data
;
5057 binder_defer_work(proc
, BINDER_DEFERRED_FLUSH
);
5062 static void binder_deferred_flush(struct binder_proc
*proc
)
5067 binder_inner_proc_lock(proc
);
5068 for (n
= rb_first(&proc
->threads
); n
!= NULL
; n
= rb_next(n
)) {
5069 struct binder_thread
*thread
= rb_entry(n
, struct binder_thread
, rb_node
);
5071 thread
->looper_need_return
= true;
5072 if (thread
->looper
& BINDER_LOOPER_STATE_WAITING
) {
5073 wake_up_interruptible(&thread
->wait
);
5077 binder_inner_proc_unlock(proc
);
5079 binder_debug(BINDER_DEBUG_OPEN_CLOSE
,
5080 "binder_flush: %d woke %d threads\n", proc
->pid
,
5084 static int binder_release(struct inode
*nodp
, struct file
*filp
)
5086 struct binder_proc
*proc
= filp
->private_data
;
5088 debugfs_remove(proc
->debugfs_entry
);
5089 binder_defer_work(proc
, BINDER_DEFERRED_RELEASE
);
5094 static int binder_node_release(struct binder_node
*node
, int refs
)
5096 struct binder_ref
*ref
;
5098 struct binder_proc
*proc
= node
->proc
;
5100 binder_release_work(proc
, &node
->async_todo
);
5102 binder_node_lock(node
);
5103 binder_inner_proc_lock(proc
);
5104 binder_dequeue_work_ilocked(&node
->work
);
5106 * The caller must have taken a temporary ref on the node,
5108 BUG_ON(!node
->tmp_refs
);
5109 if (hlist_empty(&node
->refs
) && node
->tmp_refs
== 1) {
5110 binder_inner_proc_unlock(proc
);
5111 binder_node_unlock(node
);
5112 binder_free_node(node
);
5118 node
->local_strong_refs
= 0;
5119 node
->local_weak_refs
= 0;
5120 binder_inner_proc_unlock(proc
);
5122 spin_lock(&binder_dead_nodes_lock
);
5123 hlist_add_head(&node
->dead_node
, &binder_dead_nodes
);
5124 spin_unlock(&binder_dead_nodes_lock
);
5126 hlist_for_each_entry(ref
, &node
->refs
, node_entry
) {
5129 * Need the node lock to synchronize
5130 * with new notification requests and the
5131 * inner lock to synchronize with queued
5132 * death notifications.
5134 binder_inner_proc_lock(ref
->proc
);
5136 binder_inner_proc_unlock(ref
->proc
);
5142 BUG_ON(!list_empty(&ref
->death
->work
.entry
));
5143 ref
->death
->work
.type
= BINDER_WORK_DEAD_BINDER
;
5144 binder_enqueue_work_ilocked(&ref
->death
->work
,
5146 binder_wakeup_proc_ilocked(ref
->proc
);
5147 binder_inner_proc_unlock(ref
->proc
);
5150 binder_debug(BINDER_DEBUG_DEAD_BINDER
,
5151 "node %d now dead, refs %d, death %d\n",
5152 node
->debug_id
, refs
, death
);
5153 binder_node_unlock(node
);
5154 binder_put_node(node
);
5159 static void binder_deferred_release(struct binder_proc
*proc
)
5161 struct binder_context
*context
= proc
->context
;
5163 int threads
, nodes
, incoming_refs
, outgoing_refs
, active_transactions
;
5165 mutex_lock(&binder_procs_lock
);
5166 hlist_del(&proc
->proc_node
);
5167 mutex_unlock(&binder_procs_lock
);
5169 mutex_lock(&context
->context_mgr_node_lock
);
5170 if (context
->binder_context_mgr_node
&&
5171 context
->binder_context_mgr_node
->proc
== proc
) {
5172 binder_debug(BINDER_DEBUG_DEAD_BINDER
,
5173 "%s: %d context_mgr_node gone\n",
5174 __func__
, proc
->pid
);
5175 context
->binder_context_mgr_node
= NULL
;
5177 mutex_unlock(&context
->context_mgr_node_lock
);
5178 binder_inner_proc_lock(proc
);
5180 * Make sure proc stays alive after we
5181 * remove all the threads
5185 proc
->is_dead
= true;
5187 active_transactions
= 0;
5188 while ((n
= rb_first(&proc
->threads
))) {
5189 struct binder_thread
*thread
;
5191 thread
= rb_entry(n
, struct binder_thread
, rb_node
);
5192 binder_inner_proc_unlock(proc
);
5194 active_transactions
+= binder_thread_release(proc
, thread
);
5195 binder_inner_proc_lock(proc
);
5200 while ((n
= rb_first(&proc
->nodes
))) {
5201 struct binder_node
*node
;
5203 node
= rb_entry(n
, struct binder_node
, rb_node
);
5206 * take a temporary ref on the node before
5207 * calling binder_node_release() which will either
5208 * kfree() the node or call binder_put_node()
5210 binder_inc_node_tmpref_ilocked(node
);
5211 rb_erase(&node
->rb_node
, &proc
->nodes
);
5212 binder_inner_proc_unlock(proc
);
5213 incoming_refs
= binder_node_release(node
, incoming_refs
);
5214 binder_inner_proc_lock(proc
);
5216 binder_inner_proc_unlock(proc
);
5219 binder_proc_lock(proc
);
5220 while ((n
= rb_first(&proc
->refs_by_desc
))) {
5221 struct binder_ref
*ref
;
5223 ref
= rb_entry(n
, struct binder_ref
, rb_node_desc
);
5225 binder_cleanup_ref_olocked(ref
);
5226 binder_proc_unlock(proc
);
5227 binder_free_ref(ref
);
5228 binder_proc_lock(proc
);
5230 binder_proc_unlock(proc
);
5232 binder_release_work(proc
, &proc
->todo
);
5233 binder_release_work(proc
, &proc
->delivered_death
);
5235 binder_debug(BINDER_DEBUG_OPEN_CLOSE
,
5236 "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
5237 __func__
, proc
->pid
, threads
, nodes
, incoming_refs
,
5238 outgoing_refs
, active_transactions
);
5240 binder_proc_dec_tmpref(proc
);
5243 static void binder_deferred_func(struct work_struct
*work
)
5245 struct binder_proc
*proc
;
5250 mutex_lock(&binder_deferred_lock
);
5251 if (!hlist_empty(&binder_deferred_list
)) {
5252 proc
= hlist_entry(binder_deferred_list
.first
,
5253 struct binder_proc
, deferred_work_node
);
5254 hlist_del_init(&proc
->deferred_work_node
);
5255 defer
= proc
->deferred_work
;
5256 proc
->deferred_work
= 0;
5261 mutex_unlock(&binder_deferred_lock
);
5263 if (defer
& BINDER_DEFERRED_FLUSH
)
5264 binder_deferred_flush(proc
);
5266 if (defer
& BINDER_DEFERRED_RELEASE
)
5267 binder_deferred_release(proc
); /* frees proc */
5270 static DECLARE_WORK(binder_deferred_work
, binder_deferred_func
);
5273 binder_defer_work(struct binder_proc
*proc
, enum binder_deferred_state defer
)
5275 mutex_lock(&binder_deferred_lock
);
5276 proc
->deferred_work
|= defer
;
5277 if (hlist_unhashed(&proc
->deferred_work_node
)) {
5278 hlist_add_head(&proc
->deferred_work_node
,
5279 &binder_deferred_list
);
5280 schedule_work(&binder_deferred_work
);
5282 mutex_unlock(&binder_deferred_lock
);
5285 static void print_binder_transaction_ilocked(struct seq_file
*m
,
5286 struct binder_proc
*proc
,
5288 struct binder_transaction
*t
)
5290 struct binder_proc
*to_proc
;
5291 struct binder_buffer
*buffer
= t
->buffer
;
5293 spin_lock(&t
->lock
);
5294 to_proc
= t
->to_proc
;
5296 "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld r%d",
5297 prefix
, t
->debug_id
, t
,
5298 t
->from
? t
->from
->proc
->pid
: 0,
5299 t
->from
? t
->from
->pid
: 0,
5300 to_proc
? to_proc
->pid
: 0,
5301 t
->to_thread
? t
->to_thread
->pid
: 0,
5302 t
->code
, t
->flags
, t
->priority
, t
->need_reply
);
5303 spin_unlock(&t
->lock
);
5305 if (proc
!= to_proc
) {
5307 * Can only safely deref buffer if we are holding the
5308 * correct proc inner lock for this node
5314 if (buffer
== NULL
) {
5315 seq_puts(m
, " buffer free\n");
5318 if (buffer
->target_node
)
5319 seq_printf(m
, " node %d", buffer
->target_node
->debug_id
);
5320 seq_printf(m
, " size %zd:%zd data %pK\n",
5321 buffer
->data_size
, buffer
->offsets_size
,
5325 static void print_binder_work_ilocked(struct seq_file
*m
,
5326 struct binder_proc
*proc
,
5328 const char *transaction_prefix
,
5329 struct binder_work
*w
)
5331 struct binder_node
*node
;
5332 struct binder_transaction
*t
;
5335 case BINDER_WORK_TRANSACTION
:
5336 t
= container_of(w
, struct binder_transaction
, work
);
5337 print_binder_transaction_ilocked(
5338 m
, proc
, transaction_prefix
, t
);
5340 case BINDER_WORK_RETURN_ERROR
: {
5341 struct binder_error
*e
= container_of(
5342 w
, struct binder_error
, work
);
5344 seq_printf(m
, "%stransaction error: %u\n",
5347 case BINDER_WORK_TRANSACTION_COMPLETE
:
5348 seq_printf(m
, "%stransaction complete\n", prefix
);
5350 case BINDER_WORK_NODE
:
5351 node
= container_of(w
, struct binder_node
, work
);
5352 seq_printf(m
, "%snode work %d: u%016llx c%016llx\n",
5353 prefix
, node
->debug_id
,
5354 (u64
)node
->ptr
, (u64
)node
->cookie
);
5356 case BINDER_WORK_DEAD_BINDER
:
5357 seq_printf(m
, "%shas dead binder\n", prefix
);
5359 case BINDER_WORK_DEAD_BINDER_AND_CLEAR
:
5360 seq_printf(m
, "%shas cleared dead binder\n", prefix
);
5362 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION
:
5363 seq_printf(m
, "%shas cleared death notification\n", prefix
);
5366 seq_printf(m
, "%sunknown work: type %d\n", prefix
, w
->type
);
5371 static void print_binder_thread_ilocked(struct seq_file
*m
,
5372 struct binder_thread
*thread
,
5375 struct binder_transaction
*t
;
5376 struct binder_work
*w
;
5377 size_t start_pos
= m
->count
;
5380 seq_printf(m
, " thread %d: l %02x need_return %d tr %d\n",
5381 thread
->pid
, thread
->looper
,
5382 thread
->looper_need_return
,
5383 atomic_read(&thread
->tmp_ref
));
5384 header_pos
= m
->count
;
5385 t
= thread
->transaction_stack
;
5387 if (t
->from
== thread
) {
5388 print_binder_transaction_ilocked(m
, thread
->proc
,
5389 " outgoing transaction", t
);
5391 } else if (t
->to_thread
== thread
) {
5392 print_binder_transaction_ilocked(m
, thread
->proc
,
5393 " incoming transaction", t
);
5396 print_binder_transaction_ilocked(m
, thread
->proc
,
5397 " bad transaction", t
);
5401 list_for_each_entry(w
, &thread
->todo
, entry
) {
5402 print_binder_work_ilocked(m
, thread
->proc
, " ",
5403 " pending transaction", w
);
5405 if (!print_always
&& m
->count
== header_pos
)
5406 m
->count
= start_pos
;
5409 static void print_binder_node_nilocked(struct seq_file
*m
,
5410 struct binder_node
*node
)
5412 struct binder_ref
*ref
;
5413 struct binder_work
*w
;
5417 hlist_for_each_entry(ref
, &node
->refs
, node_entry
)
5420 seq_printf(m
, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d tr %d",
5421 node
->debug_id
, (u64
)node
->ptr
, (u64
)node
->cookie
,
5422 node
->has_strong_ref
, node
->has_weak_ref
,
5423 node
->local_strong_refs
, node
->local_weak_refs
,
5424 node
->internal_strong_refs
, count
, node
->tmp_refs
);
5426 seq_puts(m
, " proc");
5427 hlist_for_each_entry(ref
, &node
->refs
, node_entry
)
5428 seq_printf(m
, " %d", ref
->proc
->pid
);
5432 list_for_each_entry(w
, &node
->async_todo
, entry
)
5433 print_binder_work_ilocked(m
, node
->proc
, " ",
5434 " pending async transaction", w
);
5438 static void print_binder_ref_olocked(struct seq_file
*m
,
5439 struct binder_ref
*ref
)
5441 binder_node_lock(ref
->node
);
5442 seq_printf(m
, " ref %d: desc %d %snode %d s %d w %d d %pK\n",
5443 ref
->data
.debug_id
, ref
->data
.desc
,
5444 ref
->node
->proc
? "" : "dead ",
5445 ref
->node
->debug_id
, ref
->data
.strong
,
5446 ref
->data
.weak
, ref
->death
);
5447 binder_node_unlock(ref
->node
);
5450 static void print_binder_proc(struct seq_file
*m
,
5451 struct binder_proc
*proc
, int print_all
)
5453 struct binder_work
*w
;
5455 size_t start_pos
= m
->count
;
5457 struct binder_node
*last_node
= NULL
;
5459 seq_printf(m
, "proc %d\n", proc
->pid
);
5460 seq_printf(m
, "context %s\n", proc
->context
->name
);
5461 header_pos
= m
->count
;
5463 binder_inner_proc_lock(proc
);
5464 for (n
= rb_first(&proc
->threads
); n
!= NULL
; n
= rb_next(n
))
5465 print_binder_thread_ilocked(m
, rb_entry(n
, struct binder_thread
,
5466 rb_node
), print_all
);
5468 for (n
= rb_first(&proc
->nodes
); n
!= NULL
; n
= rb_next(n
)) {
5469 struct binder_node
*node
= rb_entry(n
, struct binder_node
,
5471 if (!print_all
&& !node
->has_async_transaction
)
5475 * take a temporary reference on the node so it
5476 * survives and isn't removed from the tree
5477 * while we print it.
5479 binder_inc_node_tmpref_ilocked(node
);
5480 /* Need to drop inner lock to take node lock */
5481 binder_inner_proc_unlock(proc
);
5483 binder_put_node(last_node
);
5484 binder_node_inner_lock(node
);
5485 print_binder_node_nilocked(m
, node
);
5486 binder_node_inner_unlock(node
);
5488 binder_inner_proc_lock(proc
);
5490 binder_inner_proc_unlock(proc
);
5492 binder_put_node(last_node
);
5495 binder_proc_lock(proc
);
5496 for (n
= rb_first(&proc
->refs_by_desc
);
5499 print_binder_ref_olocked(m
, rb_entry(n
,
5502 binder_proc_unlock(proc
);
5504 binder_alloc_print_allocated(m
, &proc
->alloc
);
5505 binder_inner_proc_lock(proc
);
5506 list_for_each_entry(w
, &proc
->todo
, entry
)
5507 print_binder_work_ilocked(m
, proc
, " ",
5508 " pending transaction", w
);
5509 list_for_each_entry(w
, &proc
->delivered_death
, entry
) {
5510 seq_puts(m
, " has delivered dead binder\n");
5513 binder_inner_proc_unlock(proc
);
5514 if (!print_all
&& m
->count
== header_pos
)
5515 m
->count
= start_pos
;
5518 static const char * const binder_return_strings
[] = {
5523 "BR_ACQUIRE_RESULT",
5525 "BR_TRANSACTION_COMPLETE",
5530 "BR_ATTEMPT_ACQUIRE",
5535 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
5539 static const char * const binder_command_strings
[] = {
5542 "BC_ACQUIRE_RESULT",
5550 "BC_ATTEMPT_ACQUIRE",
5551 "BC_REGISTER_LOOPER",
5554 "BC_REQUEST_DEATH_NOTIFICATION",
5555 "BC_CLEAR_DEATH_NOTIFICATION",
5556 "BC_DEAD_BINDER_DONE",
5557 "BC_TRANSACTION_SG",
5561 static const char * const binder_objstat_strings
[] = {
5568 "transaction_complete"
5571 static void print_binder_stats(struct seq_file
*m
, const char *prefix
,
5572 struct binder_stats
*stats
)
5576 BUILD_BUG_ON(ARRAY_SIZE(stats
->bc
) !=
5577 ARRAY_SIZE(binder_command_strings
));
5578 for (i
= 0; i
< ARRAY_SIZE(stats
->bc
); i
++) {
5579 int temp
= atomic_read(&stats
->bc
[i
]);
5582 seq_printf(m
, "%s%s: %d\n", prefix
,
5583 binder_command_strings
[i
], temp
);
5586 BUILD_BUG_ON(ARRAY_SIZE(stats
->br
) !=
5587 ARRAY_SIZE(binder_return_strings
));
5588 for (i
= 0; i
< ARRAY_SIZE(stats
->br
); i
++) {
5589 int temp
= atomic_read(&stats
->br
[i
]);
5592 seq_printf(m
, "%s%s: %d\n", prefix
,
5593 binder_return_strings
[i
], temp
);
5596 BUILD_BUG_ON(ARRAY_SIZE(stats
->obj_created
) !=
5597 ARRAY_SIZE(binder_objstat_strings
));
5598 BUILD_BUG_ON(ARRAY_SIZE(stats
->obj_created
) !=
5599 ARRAY_SIZE(stats
->obj_deleted
));
5600 for (i
= 0; i
< ARRAY_SIZE(stats
->obj_created
); i
++) {
5601 int created
= atomic_read(&stats
->obj_created
[i
]);
5602 int deleted
= atomic_read(&stats
->obj_deleted
[i
]);
5604 if (created
|| deleted
)
5605 seq_printf(m
, "%s%s: active %d total %d\n",
5607 binder_objstat_strings
[i
],
5613 static void print_binder_proc_stats(struct seq_file
*m
,
5614 struct binder_proc
*proc
)
5616 struct binder_work
*w
;
5617 struct binder_thread
*thread
;
5619 int count
, strong
, weak
, ready_threads
;
5620 size_t free_async_space
=
5621 binder_alloc_get_free_async_space(&proc
->alloc
);
5623 seq_printf(m
, "proc %d\n", proc
->pid
);
5624 seq_printf(m
, "context %s\n", proc
->context
->name
);
5627 binder_inner_proc_lock(proc
);
5628 for (n
= rb_first(&proc
->threads
); n
!= NULL
; n
= rb_next(n
))
5631 list_for_each_entry(thread
, &proc
->waiting_threads
, waiting_thread_node
)
5634 seq_printf(m
, " threads: %d\n", count
);
5635 seq_printf(m
, " requested threads: %d+%d/%d\n"
5636 " ready threads %d\n"
5637 " free async space %zd\n", proc
->requested_threads
,
5638 proc
->requested_threads_started
, proc
->max_threads
,
5642 for (n
= rb_first(&proc
->nodes
); n
!= NULL
; n
= rb_next(n
))
5644 binder_inner_proc_unlock(proc
);
5645 seq_printf(m
, " nodes: %d\n", count
);
5649 binder_proc_lock(proc
);
5650 for (n
= rb_first(&proc
->refs_by_desc
); n
!= NULL
; n
= rb_next(n
)) {
5651 struct binder_ref
*ref
= rb_entry(n
, struct binder_ref
,
5654 strong
+= ref
->data
.strong
;
5655 weak
+= ref
->data
.weak
;
5657 binder_proc_unlock(proc
);
5658 seq_printf(m
, " refs: %d s %d w %d\n", count
, strong
, weak
);
5660 count
= binder_alloc_get_allocated_count(&proc
->alloc
);
5661 seq_printf(m
, " buffers: %d\n", count
);
5663 binder_alloc_print_pages(m
, &proc
->alloc
);
5666 binder_inner_proc_lock(proc
);
5667 list_for_each_entry(w
, &proc
->todo
, entry
) {
5668 if (w
->type
== BINDER_WORK_TRANSACTION
)
5671 binder_inner_proc_unlock(proc
);
5672 seq_printf(m
, " pending transactions: %d\n", count
);
5674 print_binder_stats(m
, " ", &proc
->stats
);
5678 static int state_show(struct seq_file
*m
, void *unused
)
5680 struct binder_proc
*proc
;
5681 struct binder_node
*node
;
5682 struct binder_node
*last_node
= NULL
;
5684 seq_puts(m
, "binder state:\n");
5686 spin_lock(&binder_dead_nodes_lock
);
5687 if (!hlist_empty(&binder_dead_nodes
))
5688 seq_puts(m
, "dead nodes:\n");
5689 hlist_for_each_entry(node
, &binder_dead_nodes
, dead_node
) {
5691 * take a temporary reference on the node so it
5692 * survives and isn't removed from the list
5693 * while we print it.
5696 spin_unlock(&binder_dead_nodes_lock
);
5698 binder_put_node(last_node
);
5699 binder_node_lock(node
);
5700 print_binder_node_nilocked(m
, node
);
5701 binder_node_unlock(node
);
5703 spin_lock(&binder_dead_nodes_lock
);
5705 spin_unlock(&binder_dead_nodes_lock
);
5707 binder_put_node(last_node
);
5709 mutex_lock(&binder_procs_lock
);
5710 hlist_for_each_entry(proc
, &binder_procs
, proc_node
)
5711 print_binder_proc(m
, proc
, 1);
5712 mutex_unlock(&binder_procs_lock
);
5717 static int stats_show(struct seq_file
*m
, void *unused
)
5719 struct binder_proc
*proc
;
5721 seq_puts(m
, "binder stats:\n");
5723 print_binder_stats(m
, "", &binder_stats
);
5725 mutex_lock(&binder_procs_lock
);
5726 hlist_for_each_entry(proc
, &binder_procs
, proc_node
)
5727 print_binder_proc_stats(m
, proc
);
5728 mutex_unlock(&binder_procs_lock
);
5733 static int transactions_show(struct seq_file
*m
, void *unused
)
5735 struct binder_proc
*proc
;
5737 seq_puts(m
, "binder transactions:\n");
5738 mutex_lock(&binder_procs_lock
);
5739 hlist_for_each_entry(proc
, &binder_procs
, proc_node
)
5740 print_binder_proc(m
, proc
, 0);
5741 mutex_unlock(&binder_procs_lock
);
5746 static int proc_show(struct seq_file
*m
, void *unused
)
5748 struct binder_proc
*itr
;
5749 int pid
= (unsigned long)m
->private;
5751 mutex_lock(&binder_procs_lock
);
5752 hlist_for_each_entry(itr
, &binder_procs
, proc_node
) {
5753 if (itr
->pid
== pid
) {
5754 seq_puts(m
, "binder proc state:\n");
5755 print_binder_proc(m
, itr
, 1);
5758 mutex_unlock(&binder_procs_lock
);
5763 static void print_binder_transaction_log_entry(struct seq_file
*m
,
5764 struct binder_transaction_log_entry
*e
)
5766 int debug_id
= READ_ONCE(e
->debug_id_done
);
5768 * read barrier to guarantee debug_id_done read before
5769 * we print the log values
5773 "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
5774 e
->debug_id
, (e
->call_type
== 2) ? "reply" :
5775 ((e
->call_type
== 1) ? "async" : "call "), e
->from_proc
,
5776 e
->from_thread
, e
->to_proc
, e
->to_thread
, e
->context_name
,
5777 e
->to_node
, e
->target_handle
, e
->data_size
, e
->offsets_size
,
5778 e
->return_error
, e
->return_error_param
,
5779 e
->return_error_line
);
5781 * read-barrier to guarantee read of debug_id_done after
5782 * done printing the fields of the entry
5785 seq_printf(m
, debug_id
&& debug_id
== READ_ONCE(e
->debug_id_done
) ?
5786 "\n" : " (incomplete)\n");
5789 static int transaction_log_show(struct seq_file
*m
, void *unused
)
5791 struct binder_transaction_log
*log
= m
->private;
5792 unsigned int log_cur
= atomic_read(&log
->cur
);
5797 count
= log_cur
+ 1;
5798 cur
= count
< ARRAY_SIZE(log
->entry
) && !log
->full
?
5799 0 : count
% ARRAY_SIZE(log
->entry
);
5800 if (count
> ARRAY_SIZE(log
->entry
) || log
->full
)
5801 count
= ARRAY_SIZE(log
->entry
);
5802 for (i
= 0; i
< count
; i
++) {
5803 unsigned int index
= cur
++ % ARRAY_SIZE(log
->entry
);
5805 print_binder_transaction_log_entry(m
, &log
->entry
[index
]);
5810 const struct file_operations binder_fops
= {
5811 .owner
= THIS_MODULE
,
5812 .poll
= binder_poll
,
5813 .unlocked_ioctl
= binder_ioctl
,
5814 .compat_ioctl
= binder_ioctl
,
5815 .mmap
= binder_mmap
,
5816 .open
= binder_open
,
5817 .flush
= binder_flush
,
5818 .release
= binder_release
,
5821 DEFINE_SHOW_ATTRIBUTE(state
);
5822 DEFINE_SHOW_ATTRIBUTE(stats
);
5823 DEFINE_SHOW_ATTRIBUTE(transactions
);
5824 DEFINE_SHOW_ATTRIBUTE(transaction_log
);
5826 static int __init
init_binder_device(const char *name
)
5829 struct binder_device
*binder_device
;
5831 binder_device
= kzalloc(sizeof(*binder_device
), GFP_KERNEL
);
5835 binder_device
->miscdev
.fops
= &binder_fops
;
5836 binder_device
->miscdev
.minor
= MISC_DYNAMIC_MINOR
;
5837 binder_device
->miscdev
.name
= name
;
5839 binder_device
->context
.binder_context_mgr_uid
= INVALID_UID
;
5840 binder_device
->context
.name
= name
;
5841 mutex_init(&binder_device
->context
.context_mgr_node_lock
);
5843 ret
= misc_register(&binder_device
->miscdev
);
5845 kfree(binder_device
);
5849 hlist_add_head(&binder_device
->hlist
, &binder_devices
);
5854 static int __init
binder_init(void)
5857 char *device_name
, *device_tmp
;
5858 struct binder_device
*device
;
5859 struct hlist_node
*tmp
;
5860 char *device_names
= NULL
;
5862 ret
= binder_alloc_shrinker_init();
5866 atomic_set(&binder_transaction_log
.cur
, ~0U);
5867 atomic_set(&binder_transaction_log_failed
.cur
, ~0U);
5869 binder_debugfs_dir_entry_root
= debugfs_create_dir("binder", NULL
);
5870 if (binder_debugfs_dir_entry_root
)
5871 binder_debugfs_dir_entry_proc
= debugfs_create_dir("proc",
5872 binder_debugfs_dir_entry_root
);
5874 if (binder_debugfs_dir_entry_root
) {
5875 debugfs_create_file("state",
5877 binder_debugfs_dir_entry_root
,
5880 debugfs_create_file("stats",
5882 binder_debugfs_dir_entry_root
,
5885 debugfs_create_file("transactions",
5887 binder_debugfs_dir_entry_root
,
5889 &transactions_fops
);
5890 debugfs_create_file("transaction_log",
5892 binder_debugfs_dir_entry_root
,
5893 &binder_transaction_log
,
5894 &transaction_log_fops
);
5895 debugfs_create_file("failed_transaction_log",
5897 binder_debugfs_dir_entry_root
,
5898 &binder_transaction_log_failed
,
5899 &transaction_log_fops
);
5902 if (strcmp(binder_devices_param
, "") != 0) {
5904 * Copy the module_parameter string, because we don't want to
5905 * tokenize it in-place.
5907 device_names
= kstrdup(binder_devices_param
, GFP_KERNEL
);
5908 if (!device_names
) {
5910 goto err_alloc_device_names_failed
;
5913 device_tmp
= device_names
;
5914 while ((device_name
= strsep(&device_tmp
, ","))) {
5915 ret
= init_binder_device(device_name
);
5917 goto err_init_binder_device_failed
;
5921 ret
= init_binderfs();
5923 goto err_init_binder_device_failed
;
5927 err_init_binder_device_failed
:
5928 hlist_for_each_entry_safe(device
, tmp
, &binder_devices
, hlist
) {
5929 misc_deregister(&device
->miscdev
);
5930 hlist_del(&device
->hlist
);
5934 kfree(device_names
);
5936 err_alloc_device_names_failed
:
5937 debugfs_remove_recursive(binder_debugfs_dir_entry_root
);
5942 device_initcall(binder_init
);
5944 #define CREATE_TRACE_POINTS
5945 #include "binder_trace.h"
5947 MODULE_LICENSE("GPL v2");