3 * Android IPC Subsystem
5 * Copyright (C) 2007-2008 Google, Inc.
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
21 * There are 3 main spinlocks which must be acquired in the
24 * 1) proc->outer_lock : protects binder_ref
25 * binder_proc_lock() and binder_proc_unlock() are
27 * 2) node->lock : protects most fields of binder_node.
28 * binder_node_lock() and binder_node_unlock() are
30 * 3) proc->inner_lock : protects the thread and node lists
31 * (proc->threads, proc->nodes) and all todo lists associated
32 * with the binder_proc (proc->todo, thread->todo,
33 * proc->delivered_death and node->async_todo), as well as
34 * thread->transaction_stack
35 * binder_inner_proc_lock() and binder_inner_proc_unlock()
38 * Any lock under procA must never be nested under any lock at the same
39 * level or below on procB.
41 * Functions that require a lock held on entry indicate which lock
42 * in the suffix of the function name:
44 * foo_olocked() : requires node->outer_lock
45 * foo_nlocked() : requires node->lock
46 * foo_ilocked() : requires proc->inner_lock
47 * foo_oilocked(): requires proc->outer_lock and proc->inner_lock
48 * foo_nilocked(): requires node->lock and proc->inner_lock
52 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
54 #include <asm/cacheflush.h>
55 #include <linux/fdtable.h>
56 #include <linux/file.h>
57 #include <linux/freezer.h>
59 #include <linux/list.h>
60 #include <linux/miscdevice.h>
61 #include <linux/module.h>
62 #include <linux/mutex.h>
63 #include <linux/nsproxy.h>
64 #include <linux/poll.h>
65 #include <linux/debugfs.h>
66 #include <linux/rbtree.h>
67 #include <linux/sched/signal.h>
68 #include <linux/sched/mm.h>
69 #include <linux/seq_file.h>
70 #include <linux/uaccess.h>
71 #include <linux/pid_namespace.h>
72 #include <linux/security.h>
73 #include <linux/spinlock.h>
75 #ifdef CONFIG_ANDROID_BINDER_IPC_32BIT
76 #define BINDER_IPC_32BIT 1
79 #include <uapi/linux/android/binder.h>
80 #include "binder_alloc.h"
81 #include "binder_trace.h"
83 static DEFINE_MUTEX(binder_main_lock
);
85 static HLIST_HEAD(binder_deferred_list
);
86 static DEFINE_MUTEX(binder_deferred_lock
);
88 static HLIST_HEAD(binder_devices
);
89 static HLIST_HEAD(binder_procs
);
90 static DEFINE_MUTEX(binder_procs_lock
);
92 static HLIST_HEAD(binder_dead_nodes
);
93 static DEFINE_SPINLOCK(binder_dead_nodes_lock
);
95 static struct dentry
*binder_debugfs_dir_entry_root
;
96 static struct dentry
*binder_debugfs_dir_entry_proc
;
97 static atomic_t binder_last_id
;
99 #define BINDER_DEBUG_ENTRY(name) \
100 static int binder_##name##_open(struct inode *inode, struct file *file) \
102 return single_open(file, binder_##name##_show, inode->i_private); \
105 static const struct file_operations binder_##name##_fops = { \
106 .owner = THIS_MODULE, \
107 .open = binder_##name##_open, \
109 .llseek = seq_lseek, \
110 .release = single_release, \
113 static int binder_proc_show(struct seq_file
*m
, void *unused
);
114 BINDER_DEBUG_ENTRY(proc
);
116 /* This is only defined in include/asm-arm/sizes.h */
122 #define SZ_4M 0x400000
125 #define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
127 #define BINDER_SMALL_BUF_SIZE (PAGE_SIZE * 64)
130 BINDER_DEBUG_USER_ERROR
= 1U << 0,
131 BINDER_DEBUG_FAILED_TRANSACTION
= 1U << 1,
132 BINDER_DEBUG_DEAD_TRANSACTION
= 1U << 2,
133 BINDER_DEBUG_OPEN_CLOSE
= 1U << 3,
134 BINDER_DEBUG_DEAD_BINDER
= 1U << 4,
135 BINDER_DEBUG_DEATH_NOTIFICATION
= 1U << 5,
136 BINDER_DEBUG_READ_WRITE
= 1U << 6,
137 BINDER_DEBUG_USER_REFS
= 1U << 7,
138 BINDER_DEBUG_THREADS
= 1U << 8,
139 BINDER_DEBUG_TRANSACTION
= 1U << 9,
140 BINDER_DEBUG_TRANSACTION_COMPLETE
= 1U << 10,
141 BINDER_DEBUG_FREE_BUFFER
= 1U << 11,
142 BINDER_DEBUG_INTERNAL_REFS
= 1U << 12,
143 BINDER_DEBUG_PRIORITY_CAP
= 1U << 13,
144 BINDER_DEBUG_SPINLOCKS
= 1U << 14,
146 static uint32_t binder_debug_mask
= BINDER_DEBUG_USER_ERROR
|
147 BINDER_DEBUG_FAILED_TRANSACTION
| BINDER_DEBUG_DEAD_TRANSACTION
;
148 module_param_named(debug_mask
, binder_debug_mask
, uint
, S_IWUSR
| S_IRUGO
);
150 static char *binder_devices_param
= CONFIG_ANDROID_BINDER_DEVICES
;
151 module_param_named(devices
, binder_devices_param
, charp
, 0444);
153 static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait
);
154 static int binder_stop_on_user_error
;
156 static int binder_set_stop_on_user_error(const char *val
,
157 struct kernel_param
*kp
)
161 ret
= param_set_int(val
, kp
);
162 if (binder_stop_on_user_error
< 2)
163 wake_up(&binder_user_error_wait
);
166 module_param_call(stop_on_user_error
, binder_set_stop_on_user_error
,
167 param_get_int
, &binder_stop_on_user_error
, S_IWUSR
| S_IRUGO
);
169 #define binder_debug(mask, x...) \
171 if (binder_debug_mask & mask) \
175 #define binder_user_error(x...) \
177 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
179 if (binder_stop_on_user_error) \
180 binder_stop_on_user_error = 2; \
183 #define to_flat_binder_object(hdr) \
184 container_of(hdr, struct flat_binder_object, hdr)
186 #define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
188 #define to_binder_buffer_object(hdr) \
189 container_of(hdr, struct binder_buffer_object, hdr)
191 #define to_binder_fd_array_object(hdr) \
192 container_of(hdr, struct binder_fd_array_object, hdr)
194 enum binder_stat_types
{
200 BINDER_STAT_TRANSACTION
,
201 BINDER_STAT_TRANSACTION_COMPLETE
,
205 struct binder_stats
{
206 atomic_t br
[_IOC_NR(BR_FAILED_REPLY
) + 1];
207 atomic_t bc
[_IOC_NR(BC_REPLY_SG
) + 1];
208 atomic_t obj_created
[BINDER_STAT_COUNT
];
209 atomic_t obj_deleted
[BINDER_STAT_COUNT
];
212 static struct binder_stats binder_stats
;
214 static inline void binder_stats_deleted(enum binder_stat_types type
)
216 atomic_inc(&binder_stats
.obj_deleted
[type
]);
219 static inline void binder_stats_created(enum binder_stat_types type
)
221 atomic_inc(&binder_stats
.obj_created
[type
]);
224 struct binder_transaction_log_entry
{
236 int return_error_line
;
237 uint32_t return_error
;
238 uint32_t return_error_param
;
239 const char *context_name
;
241 struct binder_transaction_log
{
244 struct binder_transaction_log_entry entry
[32];
246 static struct binder_transaction_log binder_transaction_log
;
247 static struct binder_transaction_log binder_transaction_log_failed
;
249 static struct binder_transaction_log_entry
*binder_transaction_log_add(
250 struct binder_transaction_log
*log
)
252 struct binder_transaction_log_entry
*e
;
253 unsigned int cur
= atomic_inc_return(&log
->cur
);
255 if (cur
>= ARRAY_SIZE(log
->entry
))
257 e
= &log
->entry
[cur
% ARRAY_SIZE(log
->entry
)];
258 WRITE_ONCE(e
->debug_id_done
, 0);
260 * write-barrier to synchronize access to e->debug_id_done.
261 * We make sure the initialized 0 value is seen before
262 * memset() other fields are zeroed by memset.
265 memset(e
, 0, sizeof(*e
));
269 struct binder_context
{
270 struct binder_node
*binder_context_mgr_node
;
271 struct mutex context_mgr_node_lock
;
273 kuid_t binder_context_mgr_uid
;
277 struct binder_device
{
278 struct hlist_node hlist
;
279 struct miscdevice miscdev
;
280 struct binder_context context
;
284 * struct binder_work - work enqueued on a worklist
285 * @entry: node enqueued on list
286 * @type: type of work to be performed
288 * There are separate work lists for proc, thread, and node (async).
291 struct list_head entry
;
294 BINDER_WORK_TRANSACTION
= 1,
295 BINDER_WORK_TRANSACTION_COMPLETE
,
296 BINDER_WORK_RETURN_ERROR
,
298 BINDER_WORK_DEAD_BINDER
,
299 BINDER_WORK_DEAD_BINDER_AND_CLEAR
,
300 BINDER_WORK_CLEAR_DEATH_NOTIFICATION
,
304 struct binder_error
{
305 struct binder_work work
;
310 * struct binder_node - binder node bookkeeping
311 * @debug_id: unique ID for debugging
312 * (invariant after initialized)
313 * @lock: lock for node fields
314 * @work: worklist element for node work
315 * (protected by @proc->inner_lock)
316 * @rb_node: element for proc->nodes tree
317 * (protected by @proc->inner_lock)
318 * @dead_node: element for binder_dead_nodes list
319 * (protected by binder_dead_nodes_lock)
320 * @proc: binder_proc that owns this node
321 * (invariant after initialized)
322 * @refs: list of references on this node
323 * (protected by @lock)
324 * @internal_strong_refs: used to take strong references when
325 * initiating a transaction
326 * (protected by @proc->inner_lock if @proc
328 * @local_weak_refs: weak user refs from local process
329 * (protected by @proc->inner_lock if @proc
331 * @local_strong_refs: strong user refs from local process
332 * (protected by @proc->inner_lock if @proc
334 * @tmp_refs: temporary kernel refs
335 * (protected by @proc->inner_lock while @proc
336 * is valid, and by binder_dead_nodes_lock
337 * if @proc is NULL. During inc/dec and node release
338 * it is also protected by @lock to provide safety
339 * as the node dies and @proc becomes NULL)
340 * @ptr: userspace pointer for node
341 * (invariant, no lock needed)
342 * @cookie: userspace cookie for node
343 * (invariant, no lock needed)
344 * @has_strong_ref: userspace notified of strong ref
345 * (protected by @proc->inner_lock if @proc
347 * @pending_strong_ref: userspace has acked notification of strong ref
348 * (protected by @proc->inner_lock if @proc
350 * @has_weak_ref: userspace notified of weak ref
351 * (protected by @proc->inner_lock if @proc
353 * @pending_weak_ref: userspace has acked notification of weak ref
354 * (protected by @proc->inner_lock if @proc
356 * @has_async_transaction: async transaction to node in progress
357 * (protected by @lock)
358 * @accept_fds: file descriptor operations supported for node
359 * (invariant after initialized)
360 * @min_priority: minimum scheduling priority
361 * (invariant after initialized)
362 * @async_todo: list of async work items
363 * (protected by @proc->inner_lock)
365 * Bookkeeping structure for binder nodes.
370 struct binder_work work
;
372 struct rb_node rb_node
;
373 struct hlist_node dead_node
;
375 struct binder_proc
*proc
;
376 struct hlist_head refs
;
377 int internal_strong_refs
;
379 int local_strong_refs
;
381 binder_uintptr_t ptr
;
382 binder_uintptr_t cookie
;
385 * bitfield elements protected by
389 u8 pending_strong_ref
:1;
391 u8 pending_weak_ref
:1;
395 * invariant after initialization
400 bool has_async_transaction
;
401 struct list_head async_todo
;
404 struct binder_ref_death
{
406 * @work: worklist element for death notifications
407 * (protected by inner_lock of the proc that
408 * this ref belongs to)
410 struct binder_work work
;
411 binder_uintptr_t cookie
;
415 * struct binder_ref_data - binder_ref counts and id
416 * @debug_id: unique ID for the ref
417 * @desc: unique userspace handle for ref
418 * @strong: strong ref count (debugging only if not locked)
419 * @weak: weak ref count (debugging only if not locked)
421 * Structure to hold ref count and ref id information. Since
422 * the actual ref can only be accessed with a lock, this structure
423 * is used to return information about the ref to callers of
424 * ref inc/dec functions.
426 struct binder_ref_data
{
434 * struct binder_ref - struct to track references on nodes
435 * @data: binder_ref_data containing id, handle, and current refcounts
436 * @rb_node_desc: node for lookup by @data.desc in proc's rb_tree
437 * @rb_node_node: node for lookup by @node in proc's rb_tree
438 * @node_entry: list entry for node->refs list in target node
439 * (protected by @node->lock)
440 * @proc: binder_proc containing ref
441 * @node: binder_node of target node. When cleaning up a
442 * ref for deletion in binder_cleanup_ref, a non-NULL
443 * @node indicates the node must be freed
444 * @death: pointer to death notification (ref_death) if requested
446 * Structure to track references from procA to target node (on procB). This
447 * structure is unsafe to access without holding @proc->outer_lock.
450 /* Lookups needed: */
451 /* node + proc => ref (transaction) */
452 /* desc + proc => ref (transaction, inc/dec ref) */
453 /* node => refs + procs (proc exit) */
454 struct binder_ref_data data
;
455 struct rb_node rb_node_desc
;
456 struct rb_node rb_node_node
;
457 struct hlist_node node_entry
;
458 struct binder_proc
*proc
;
459 struct binder_node
*node
;
460 struct binder_ref_death
*death
;
463 enum binder_deferred_state
{
464 BINDER_DEFERRED_PUT_FILES
= 0x01,
465 BINDER_DEFERRED_FLUSH
= 0x02,
466 BINDER_DEFERRED_RELEASE
= 0x04,
470 * struct binder_proc - binder process bookkeeping
471 * @proc_node: element for binder_procs list
472 * @threads: rbtree of binder_threads in this proc
473 * (protected by @inner_lock)
474 * @nodes: rbtree of binder nodes associated with
475 * this proc ordered by node->ptr
476 * (protected by @inner_lock)
477 * @refs_by_desc: rbtree of refs ordered by ref->desc
478 * @refs_by_node: rbtree of refs ordered by ref->node
479 * @pid PID of group_leader of process
480 * (invariant after initialized)
481 * @tsk task_struct for group_leader of process
482 * (invariant after initialized)
483 * @files files_struct for process
484 * (invariant after initialized)
485 * @deferred_work_node: element for binder_deferred_list
486 * (protected by binder_deferred_lock)
487 * @deferred_work: bitmap of deferred work to perform
488 * (protected by binder_deferred_lock)
489 * @is_dead: process is dead and awaiting free
490 * when outstanding transactions are cleaned up
491 * (protected by @inner_lock)
492 * @todo: list of work for this process
493 * (protected by @inner_lock)
494 * @wait: wait queue head to wait for proc work
495 * (invariant after initialized)
496 * @stats: per-process binder statistics
497 * (atomics, no lock needed)
498 * @delivered_death: list of delivered death notification
499 * (protected by @inner_lock)
500 * @max_threads: cap on number of binder threads
501 * @requested_threads: number of binder threads requested but not
502 * yet started. In current implementation, can
504 * @requested_threads_started: number binder threads started
505 * @ready_threads: number of threads waiting for proc work
506 * @tmp_ref: temporary reference to indicate proc is in use
507 * (protected by @inner_lock)
508 * @default_priority: default scheduler priority
509 * (invariant after initialized)
510 * @debugfs_entry: debugfs node
511 * @alloc: binder allocator bookkeeping
512 * @context: binder_context for this proc
513 * (invariant after initialized)
514 * @inner_lock: can nest under outer_lock and/or node lock
515 * @outer_lock: no nesting under innor or node lock
516 * Lock order: 1) outer, 2) node, 3) inner
518 * Bookkeeping structure for binder processes
521 struct hlist_node proc_node
;
522 struct rb_root threads
;
523 struct rb_root nodes
;
524 struct rb_root refs_by_desc
;
525 struct rb_root refs_by_node
;
527 struct task_struct
*tsk
;
528 struct files_struct
*files
;
529 struct hlist_node deferred_work_node
;
533 struct list_head todo
;
534 wait_queue_head_t wait
;
535 struct binder_stats stats
;
536 struct list_head delivered_death
;
538 int requested_threads
;
539 int requested_threads_started
;
542 long default_priority
;
543 struct dentry
*debugfs_entry
;
544 struct binder_alloc alloc
;
545 struct binder_context
*context
;
546 spinlock_t inner_lock
;
547 spinlock_t outer_lock
;
551 BINDER_LOOPER_STATE_REGISTERED
= 0x01,
552 BINDER_LOOPER_STATE_ENTERED
= 0x02,
553 BINDER_LOOPER_STATE_EXITED
= 0x04,
554 BINDER_LOOPER_STATE_INVALID
= 0x08,
555 BINDER_LOOPER_STATE_WAITING
= 0x10,
559 * struct binder_thread - binder thread bookkeeping
560 * @proc: binder process for this thread
561 * (invariant after initialization)
562 * @rb_node: element for proc->threads rbtree
563 * (protected by @proc->inner_lock)
564 * @pid: PID for this thread
565 * (invariant after initialization)
566 * @looper: bitmap of looping state
567 * (only accessed by this thread)
568 * @looper_needs_return: looping thread needs to exit driver
570 * @transaction_stack: stack of in-progress transactions for this thread
571 * (protected by @proc->inner_lock)
572 * @todo: list of work to do for this thread
573 * (protected by @proc->inner_lock)
574 * @return_error: transaction errors reported by this thread
575 * (only accessed by this thread)
576 * @reply_error: transaction errors reported by target thread
577 * (protected by @proc->inner_lock)
578 * @wait: wait queue for thread work
579 * @stats: per-thread statistics
580 * (atomics, no lock needed)
581 * @tmp_ref: temporary reference to indicate thread is in use
582 * (atomic since @proc->inner_lock cannot
583 * always be acquired)
584 * @is_dead: thread is dead and awaiting free
585 * when outstanding transactions are cleaned up
586 * (protected by @proc->inner_lock)
588 * Bookkeeping structure for binder threads.
590 struct binder_thread
{
591 struct binder_proc
*proc
;
592 struct rb_node rb_node
;
594 int looper
; /* only modified by this thread */
595 bool looper_need_return
; /* can be written by other thread */
596 struct binder_transaction
*transaction_stack
;
597 struct list_head todo
;
598 struct binder_error return_error
;
599 struct binder_error reply_error
;
600 wait_queue_head_t wait
;
601 struct binder_stats stats
;
606 struct binder_transaction
{
608 struct binder_work work
;
609 struct binder_thread
*from
;
610 struct binder_transaction
*from_parent
;
611 struct binder_proc
*to_proc
;
612 struct binder_thread
*to_thread
;
613 struct binder_transaction
*to_parent
;
614 unsigned need_reply
:1;
615 /* unsigned is_dead:1; */ /* not used at the moment */
617 struct binder_buffer
*buffer
;
624 * @lock: protects @from, @to_proc, and @to_thread
626 * @from, @to_proc, and @to_thread can be set to NULL
627 * during thread teardown
633 * binder_proc_lock() - Acquire outer lock for given binder_proc
634 * @proc: struct binder_proc to acquire
636 * Acquires proc->outer_lock. Used to protect binder_ref
637 * structures associated with the given proc.
639 #define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
641 _binder_proc_lock(struct binder_proc
*proc
, int line
)
643 binder_debug(BINDER_DEBUG_SPINLOCKS
,
644 "%s: line=%d\n", __func__
, line
);
645 spin_lock(&proc
->outer_lock
);
649 * binder_proc_unlock() - Release spinlock for given binder_proc
650 * @proc: struct binder_proc to acquire
652 * Release lock acquired via binder_proc_lock()
654 #define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__)
656 _binder_proc_unlock(struct binder_proc
*proc
, int line
)
658 binder_debug(BINDER_DEBUG_SPINLOCKS
,
659 "%s: line=%d\n", __func__
, line
);
660 spin_unlock(&proc
->outer_lock
);
664 * binder_inner_proc_lock() - Acquire inner lock for given binder_proc
665 * @proc: struct binder_proc to acquire
667 * Acquires proc->inner_lock. Used to protect todo lists
669 #define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
671 _binder_inner_proc_lock(struct binder_proc
*proc
, int line
)
673 binder_debug(BINDER_DEBUG_SPINLOCKS
,
674 "%s: line=%d\n", __func__
, line
);
675 spin_lock(&proc
->inner_lock
);
679 * binder_inner_proc_unlock() - Release inner lock for given binder_proc
680 * @proc: struct binder_proc to acquire
682 * Release lock acquired via binder_inner_proc_lock()
684 #define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
686 _binder_inner_proc_unlock(struct binder_proc
*proc
, int line
)
688 binder_debug(BINDER_DEBUG_SPINLOCKS
,
689 "%s: line=%d\n", __func__
, line
);
690 spin_unlock(&proc
->inner_lock
);
694 * binder_node_lock() - Acquire spinlock for given binder_node
695 * @node: struct binder_node to acquire
697 * Acquires node->lock. Used to protect binder_node fields
699 #define binder_node_lock(node) _binder_node_lock(node, __LINE__)
701 _binder_node_lock(struct binder_node
*node
, int line
)
703 binder_debug(BINDER_DEBUG_SPINLOCKS
,
704 "%s: line=%d\n", __func__
, line
);
705 spin_lock(&node
->lock
);
709 * binder_node_unlock() - Release spinlock for given binder_proc
710 * @node: struct binder_node to acquire
712 * Release lock acquired via binder_node_lock()
714 #define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
716 _binder_node_unlock(struct binder_node
*node
, int line
)
718 binder_debug(BINDER_DEBUG_SPINLOCKS
,
719 "%s: line=%d\n", __func__
, line
);
720 spin_unlock(&node
->lock
);
724 * binder_node_inner_lock() - Acquire node and inner locks
725 * @node: struct binder_node to acquire
727 * Acquires node->lock. If node->proc also acquires
728 * proc->inner_lock. Used to protect binder_node fields
730 #define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
732 _binder_node_inner_lock(struct binder_node
*node
, int line
)
734 binder_debug(BINDER_DEBUG_SPINLOCKS
,
735 "%s: line=%d\n", __func__
, line
);
736 spin_lock(&node
->lock
);
738 binder_inner_proc_lock(node
->proc
);
742 * binder_node_unlock() - Release node and inner locks
743 * @node: struct binder_node to acquire
745 * Release lock acquired via binder_node_lock()
747 #define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
749 _binder_node_inner_unlock(struct binder_node
*node
, int line
)
751 struct binder_proc
*proc
= node
->proc
;
753 binder_debug(BINDER_DEBUG_SPINLOCKS
,
754 "%s: line=%d\n", __func__
, line
);
756 binder_inner_proc_unlock(proc
);
757 spin_unlock(&node
->lock
);
760 static bool binder_worklist_empty_ilocked(struct list_head
*list
)
762 return list_empty(list
);
766 * binder_worklist_empty() - Check if no items on the work list
767 * @proc: binder_proc associated with list
768 * @list: list to check
770 * Return: true if there are no items on list, else false
772 static bool binder_worklist_empty(struct binder_proc
*proc
,
773 struct list_head
*list
)
777 binder_inner_proc_lock(proc
);
778 ret
= binder_worklist_empty_ilocked(list
);
779 binder_inner_proc_unlock(proc
);
784 binder_enqueue_work_ilocked(struct binder_work
*work
,
785 struct list_head
*target_list
)
787 BUG_ON(target_list
== NULL
);
788 BUG_ON(work
->entry
.next
&& !list_empty(&work
->entry
));
789 list_add_tail(&work
->entry
, target_list
);
793 * binder_enqueue_work() - Add an item to the work list
794 * @proc: binder_proc associated with list
795 * @work: struct binder_work to add to list
796 * @target_list: list to add work to
798 * Adds the work to the specified list. Asserts that work
799 * is not already on a list.
802 binder_enqueue_work(struct binder_proc
*proc
,
803 struct binder_work
*work
,
804 struct list_head
*target_list
)
806 binder_inner_proc_lock(proc
);
807 binder_enqueue_work_ilocked(work
, target_list
);
808 binder_inner_proc_unlock(proc
);
812 binder_dequeue_work_ilocked(struct binder_work
*work
)
814 list_del_init(&work
->entry
);
818 * binder_dequeue_work() - Removes an item from the work list
819 * @proc: binder_proc associated with list
820 * @work: struct binder_work to remove from list
822 * Removes the specified work item from whatever list it is on.
823 * Can safely be called if work is not on any list.
826 binder_dequeue_work(struct binder_proc
*proc
, struct binder_work
*work
)
828 binder_inner_proc_lock(proc
);
829 binder_dequeue_work_ilocked(work
);
830 binder_inner_proc_unlock(proc
);
833 static struct binder_work
*binder_dequeue_work_head_ilocked(
834 struct list_head
*list
)
836 struct binder_work
*w
;
838 w
= list_first_entry_or_null(list
, struct binder_work
, entry
);
840 list_del_init(&w
->entry
);
845 * binder_dequeue_work_head() - Dequeues the item at head of list
846 * @proc: binder_proc associated with list
847 * @list: list to dequeue head
849 * Removes the head of the list if there are items on the list
851 * Return: pointer dequeued binder_work, NULL if list was empty
853 static struct binder_work
*binder_dequeue_work_head(
854 struct binder_proc
*proc
,
855 struct list_head
*list
)
857 struct binder_work
*w
;
859 binder_inner_proc_lock(proc
);
860 w
= binder_dequeue_work_head_ilocked(list
);
861 binder_inner_proc_unlock(proc
);
866 binder_defer_work(struct binder_proc
*proc
, enum binder_deferred_state defer
);
867 static void binder_free_thread(struct binder_thread
*thread
);
868 static void binder_free_proc(struct binder_proc
*proc
);
869 static void binder_inc_node_tmpref_ilocked(struct binder_node
*node
);
871 static int task_get_unused_fd_flags(struct binder_proc
*proc
, int flags
)
873 struct files_struct
*files
= proc
->files
;
874 unsigned long rlim_cur
;
880 if (!lock_task_sighand(proc
->tsk
, &irqs
))
883 rlim_cur
= task_rlimit(proc
->tsk
, RLIMIT_NOFILE
);
884 unlock_task_sighand(proc
->tsk
, &irqs
);
886 return __alloc_fd(files
, 0, rlim_cur
, flags
);
890 * copied from fd_install
892 static void task_fd_install(
893 struct binder_proc
*proc
, unsigned int fd
, struct file
*file
)
896 __fd_install(proc
->files
, fd
, file
);
900 * copied from sys_close
902 static long task_close_fd(struct binder_proc
*proc
, unsigned int fd
)
906 if (proc
->files
== NULL
)
909 retval
= __close_fd(proc
->files
, fd
);
910 /* can't restart close syscall because file table entry was cleared */
911 if (unlikely(retval
== -ERESTARTSYS
||
912 retval
== -ERESTARTNOINTR
||
913 retval
== -ERESTARTNOHAND
||
914 retval
== -ERESTART_RESTARTBLOCK
))
920 static inline void binder_lock(const char *tag
)
922 trace_binder_lock(tag
);
923 mutex_lock(&binder_main_lock
);
924 trace_binder_locked(tag
);
927 static inline void binder_unlock(const char *tag
)
929 trace_binder_unlock(tag
);
930 mutex_unlock(&binder_main_lock
);
933 static void binder_set_nice(long nice
)
937 if (can_nice(current
, nice
)) {
938 set_user_nice(current
, nice
);
941 min_nice
= rlimit_to_nice(current
->signal
->rlim
[RLIMIT_NICE
].rlim_cur
);
942 binder_debug(BINDER_DEBUG_PRIORITY_CAP
,
943 "%d: nice value %ld not allowed use %ld instead\n",
944 current
->pid
, nice
, min_nice
);
945 set_user_nice(current
, min_nice
);
946 if (min_nice
<= MAX_NICE
)
948 binder_user_error("%d RLIMIT_NICE not set\n", current
->pid
);
951 static struct binder_node
*binder_get_node_ilocked(struct binder_proc
*proc
,
952 binder_uintptr_t ptr
)
954 struct rb_node
*n
= proc
->nodes
.rb_node
;
955 struct binder_node
*node
;
957 BUG_ON(!spin_is_locked(&proc
->inner_lock
));
960 node
= rb_entry(n
, struct binder_node
, rb_node
);
964 else if (ptr
> node
->ptr
)
968 * take an implicit weak reference
969 * to ensure node stays alive until
970 * call to binder_put_node()
972 binder_inc_node_tmpref_ilocked(node
);
979 static struct binder_node
*binder_get_node(struct binder_proc
*proc
,
980 binder_uintptr_t ptr
)
982 struct binder_node
*node
;
984 binder_inner_proc_lock(proc
);
985 node
= binder_get_node_ilocked(proc
, ptr
);
986 binder_inner_proc_unlock(proc
);
990 static struct binder_node
*binder_init_node_ilocked(
991 struct binder_proc
*proc
,
992 struct binder_node
*new_node
,
993 struct flat_binder_object
*fp
)
995 struct rb_node
**p
= &proc
->nodes
.rb_node
;
996 struct rb_node
*parent
= NULL
;
997 struct binder_node
*node
;
998 binder_uintptr_t ptr
= fp
? fp
->binder
: 0;
999 binder_uintptr_t cookie
= fp
? fp
->cookie
: 0;
1000 __u32 flags
= fp
? fp
->flags
: 0;
1002 BUG_ON(!spin_is_locked(&proc
->inner_lock
));
1006 node
= rb_entry(parent
, struct binder_node
, rb_node
);
1008 if (ptr
< node
->ptr
)
1010 else if (ptr
> node
->ptr
)
1011 p
= &(*p
)->rb_right
;
1014 * A matching node is already in
1015 * the rb tree. Abandon the init
1018 binder_inc_node_tmpref_ilocked(node
);
1023 binder_stats_created(BINDER_STAT_NODE
);
1025 rb_link_node(&node
->rb_node
, parent
, p
);
1026 rb_insert_color(&node
->rb_node
, &proc
->nodes
);
1027 node
->debug_id
= atomic_inc_return(&binder_last_id
);
1030 node
->cookie
= cookie
;
1031 node
->work
.type
= BINDER_WORK_NODE
;
1032 node
->min_priority
= flags
& FLAT_BINDER_FLAG_PRIORITY_MASK
;
1033 node
->accept_fds
= !!(flags
& FLAT_BINDER_FLAG_ACCEPTS_FDS
);
1034 spin_lock_init(&node
->lock
);
1035 INIT_LIST_HEAD(&node
->work
.entry
);
1036 INIT_LIST_HEAD(&node
->async_todo
);
1037 binder_debug(BINDER_DEBUG_INTERNAL_REFS
,
1038 "%d:%d node %d u%016llx c%016llx created\n",
1039 proc
->pid
, current
->pid
, node
->debug_id
,
1040 (u64
)node
->ptr
, (u64
)node
->cookie
);
1045 static struct binder_node
*binder_new_node(struct binder_proc
*proc
,
1046 struct flat_binder_object
*fp
)
1048 struct binder_node
*node
;
1049 struct binder_node
*new_node
= kzalloc(sizeof(*node
), GFP_KERNEL
);
1053 binder_inner_proc_lock(proc
);
1054 node
= binder_init_node_ilocked(proc
, new_node
, fp
);
1055 binder_inner_proc_unlock(proc
);
1056 if (node
!= new_node
)
1058 * The node was already added by another thread
1065 static void binder_free_node(struct binder_node
*node
)
1068 binder_stats_deleted(BINDER_STAT_NODE
);
1071 static int binder_inc_node_nilocked(struct binder_node
*node
, int strong
,
1073 struct list_head
*target_list
)
1075 struct binder_proc
*proc
= node
->proc
;
1077 BUG_ON(!spin_is_locked(&node
->lock
));
1079 BUG_ON(!spin_is_locked(&proc
->inner_lock
));
1082 if (target_list
== NULL
&&
1083 node
->internal_strong_refs
== 0 &&
1085 node
== node
->proc
->context
->binder_context_mgr_node
&&
1086 node
->has_strong_ref
)) {
1087 pr_err("invalid inc strong node for %d\n",
1091 node
->internal_strong_refs
++;
1093 node
->local_strong_refs
++;
1094 if (!node
->has_strong_ref
&& target_list
) {
1095 binder_dequeue_work_ilocked(&node
->work
);
1096 binder_enqueue_work_ilocked(&node
->work
, target_list
);
1100 node
->local_weak_refs
++;
1101 if (!node
->has_weak_ref
&& list_empty(&node
->work
.entry
)) {
1102 if (target_list
== NULL
) {
1103 pr_err("invalid inc weak node for %d\n",
1107 binder_enqueue_work_ilocked(&node
->work
, target_list
);
1113 static int binder_inc_node(struct binder_node
*node
, int strong
, int internal
,
1114 struct list_head
*target_list
)
1118 binder_node_inner_lock(node
);
1119 ret
= binder_inc_node_nilocked(node
, strong
, internal
, target_list
);
1120 binder_node_inner_unlock(node
);
1125 static bool binder_dec_node_nilocked(struct binder_node
*node
,
1126 int strong
, int internal
)
1128 struct binder_proc
*proc
= node
->proc
;
1130 BUG_ON(!spin_is_locked(&node
->lock
));
1132 BUG_ON(!spin_is_locked(&proc
->inner_lock
));
1135 node
->internal_strong_refs
--;
1137 node
->local_strong_refs
--;
1138 if (node
->local_strong_refs
|| node
->internal_strong_refs
)
1142 node
->local_weak_refs
--;
1143 if (node
->local_weak_refs
|| node
->tmp_refs
||
1144 !hlist_empty(&node
->refs
))
1148 if (proc
&& (node
->has_strong_ref
|| node
->has_weak_ref
)) {
1149 if (list_empty(&node
->work
.entry
)) {
1150 binder_enqueue_work_ilocked(&node
->work
, &proc
->todo
);
1151 wake_up_interruptible(&node
->proc
->wait
);
1154 if (hlist_empty(&node
->refs
) && !node
->local_strong_refs
&&
1155 !node
->local_weak_refs
&& !node
->tmp_refs
) {
1157 binder_dequeue_work_ilocked(&node
->work
);
1158 rb_erase(&node
->rb_node
, &proc
->nodes
);
1159 binder_debug(BINDER_DEBUG_INTERNAL_REFS
,
1160 "refless node %d deleted\n",
1163 BUG_ON(!list_empty(&node
->work
.entry
));
1164 spin_lock(&binder_dead_nodes_lock
);
1166 * tmp_refs could have changed so
1169 if (node
->tmp_refs
) {
1170 spin_unlock(&binder_dead_nodes_lock
);
1173 hlist_del(&node
->dead_node
);
1174 spin_unlock(&binder_dead_nodes_lock
);
1175 binder_debug(BINDER_DEBUG_INTERNAL_REFS
,
1176 "dead node %d deleted\n",
1185 static void binder_dec_node(struct binder_node
*node
, int strong
, int internal
)
1189 binder_node_inner_lock(node
);
1190 free_node
= binder_dec_node_nilocked(node
, strong
, internal
);
1191 binder_node_inner_unlock(node
);
1193 binder_free_node(node
);
1196 static void binder_inc_node_tmpref_ilocked(struct binder_node
*node
)
1199 * No call to binder_inc_node() is needed since we
1200 * don't need to inform userspace of any changes to
1207 * binder_inc_node_tmpref() - take a temporary reference on node
1208 * @node: node to reference
1210 * Take reference on node to prevent the node from being freed
1211 * while referenced only by a local variable. The inner lock is
1212 * needed to serialize with the node work on the queue (which
1213 * isn't needed after the node is dead). If the node is dead
1214 * (node->proc is NULL), use binder_dead_nodes_lock to protect
1215 * node->tmp_refs against dead-node-only cases where the node
1216 * lock cannot be acquired (eg traversing the dead node list to
1219 static void binder_inc_node_tmpref(struct binder_node
*node
)
1221 binder_node_lock(node
);
1223 binder_inner_proc_lock(node
->proc
);
1225 spin_lock(&binder_dead_nodes_lock
);
1226 binder_inc_node_tmpref_ilocked(node
);
1228 binder_inner_proc_unlock(node
->proc
);
1230 spin_unlock(&binder_dead_nodes_lock
);
1231 binder_node_unlock(node
);
1235 * binder_dec_node_tmpref() - remove a temporary reference on node
1236 * @node: node to reference
1238 * Release temporary reference on node taken via binder_inc_node_tmpref()
1240 static void binder_dec_node_tmpref(struct binder_node
*node
)
1244 binder_node_inner_lock(node
);
1246 spin_lock(&binder_dead_nodes_lock
);
1248 BUG_ON(node
->tmp_refs
< 0);
1250 spin_unlock(&binder_dead_nodes_lock
);
1252 * Call binder_dec_node() to check if all refcounts are 0
1253 * and cleanup is needed. Calling with strong=0 and internal=1
1254 * causes no actual reference to be released in binder_dec_node().
1255 * If that changes, a change is needed here too.
1257 free_node
= binder_dec_node_nilocked(node
, 0, 1);
1258 binder_node_inner_unlock(node
);
1260 binder_free_node(node
);
1263 static void binder_put_node(struct binder_node
*node
)
1265 binder_dec_node_tmpref(node
);
1268 static struct binder_ref
*binder_get_ref(struct binder_proc
*proc
,
1269 u32 desc
, bool need_strong_ref
)
1271 struct rb_node
*n
= proc
->refs_by_desc
.rb_node
;
1272 struct binder_ref
*ref
;
1275 ref
= rb_entry(n
, struct binder_ref
, rb_node_desc
);
1277 if (desc
< ref
->data
.desc
) {
1279 } else if (desc
> ref
->data
.desc
) {
1281 } else if (need_strong_ref
&& !ref
->data
.strong
) {
1282 binder_user_error("tried to use weak ref as strong ref\n");
1292 * binder_get_ref_for_node() - get the ref associated with given node
1293 * @proc: binder_proc that owns the ref
1294 * @node: binder_node of target
1295 * @new_ref: newly allocated binder_ref to be initialized or %NULL
1297 * Look up the ref for the given node and return it if it exists
1299 * If it doesn't exist and the caller provides a newly allocated
1300 * ref, initialize the fields of the newly allocated ref and insert
1301 * into the given proc rb_trees and node refs list.
1303 * Return: the ref for node. It is possible that another thread
1304 * allocated/initialized the ref first in which case the
1305 * returned ref would be different than the passed-in
1306 * new_ref. new_ref must be kfree'd by the caller in
1309 static struct binder_ref
*binder_get_ref_for_node(struct binder_proc
*proc
,
1310 struct binder_node
*node
,
1311 struct binder_ref
*new_ref
)
1313 struct binder_context
*context
= proc
->context
;
1314 struct rb_node
**p
= &proc
->refs_by_node
.rb_node
;
1315 struct rb_node
*parent
= NULL
;
1316 struct binder_ref
*ref
;
1321 ref
= rb_entry(parent
, struct binder_ref
, rb_node_node
);
1323 if (node
< ref
->node
)
1325 else if (node
> ref
->node
)
1326 p
= &(*p
)->rb_right
;
1333 binder_stats_created(BINDER_STAT_REF
);
1334 new_ref
->data
.debug_id
= atomic_inc_return(&binder_last_id
);
1335 new_ref
->proc
= proc
;
1336 new_ref
->node
= node
;
1337 rb_link_node(&new_ref
->rb_node_node
, parent
, p
);
1338 rb_insert_color(&new_ref
->rb_node_node
, &proc
->refs_by_node
);
1340 new_ref
->data
.desc
= (node
== context
->binder_context_mgr_node
) ? 0 : 1;
1341 for (n
= rb_first(&proc
->refs_by_desc
); n
!= NULL
; n
= rb_next(n
)) {
1342 ref
= rb_entry(n
, struct binder_ref
, rb_node_desc
);
1343 if (ref
->data
.desc
> new_ref
->data
.desc
)
1345 new_ref
->data
.desc
= ref
->data
.desc
+ 1;
1348 p
= &proc
->refs_by_desc
.rb_node
;
1351 ref
= rb_entry(parent
, struct binder_ref
, rb_node_desc
);
1353 if (new_ref
->data
.desc
< ref
->data
.desc
)
1355 else if (new_ref
->data
.desc
> ref
->data
.desc
)
1356 p
= &(*p
)->rb_right
;
1360 rb_link_node(&new_ref
->rb_node_desc
, parent
, p
);
1361 rb_insert_color(&new_ref
->rb_node_desc
, &proc
->refs_by_desc
);
1363 binder_node_lock(node
);
1364 hlist_add_head(&new_ref
->node_entry
, &node
->refs
);
1366 binder_debug(BINDER_DEBUG_INTERNAL_REFS
,
1367 "%d new ref %d desc %d for node %d\n",
1368 proc
->pid
, new_ref
->data
.debug_id
, new_ref
->data
.desc
,
1370 binder_node_unlock(node
);
1374 static void binder_cleanup_ref(struct binder_ref
*ref
)
1376 bool delete_node
= false;
1378 binder_debug(BINDER_DEBUG_INTERNAL_REFS
,
1379 "%d delete ref %d desc %d for node %d\n",
1380 ref
->proc
->pid
, ref
->data
.debug_id
, ref
->data
.desc
,
1381 ref
->node
->debug_id
);
1383 rb_erase(&ref
->rb_node_desc
, &ref
->proc
->refs_by_desc
);
1384 rb_erase(&ref
->rb_node_node
, &ref
->proc
->refs_by_node
);
1386 binder_node_inner_lock(ref
->node
);
1387 if (ref
->data
.strong
)
1388 binder_dec_node_nilocked(ref
->node
, 1, 1);
1390 hlist_del(&ref
->node_entry
);
1391 delete_node
= binder_dec_node_nilocked(ref
->node
, 0, 1);
1392 binder_node_inner_unlock(ref
->node
);
1394 * Clear ref->node unless we want the caller to free the node
1398 * The caller uses ref->node to determine
1399 * whether the node needs to be freed. Clear
1400 * it since the node is still alive.
1406 binder_debug(BINDER_DEBUG_DEAD_BINDER
,
1407 "%d delete ref %d desc %d has death notification\n",
1408 ref
->proc
->pid
, ref
->data
.debug_id
,
1410 binder_dequeue_work(ref
->proc
, &ref
->death
->work
);
1411 binder_stats_deleted(BINDER_STAT_DEATH
);
1413 binder_stats_deleted(BINDER_STAT_REF
);
1417 * binder_inc_ref() - increment the ref for given handle
1418 * @ref: ref to be incremented
1419 * @strong: if true, strong increment, else weak
1420 * @target_list: list to queue node work on
1422 * Increment the ref.
1424 * Return: 0, if successful, else errno
1426 static int binder_inc_ref(struct binder_ref
*ref
, int strong
,
1427 struct list_head
*target_list
)
1432 if (ref
->data
.strong
== 0) {
1433 ret
= binder_inc_node(ref
->node
, 1, 1, target_list
);
1439 if (ref
->data
.weak
== 0) {
1440 ret
= binder_inc_node(ref
->node
, 0, 1, target_list
);
1450 * binder_dec_ref() - dec the ref for given handle
1451 * @ref: ref to be decremented
1452 * @strong: if true, strong decrement, else weak
1454 * Decrement the ref.
1456 * TODO: kfree is avoided here since an upcoming patch
1457 * will put this under a lock.
1459 * Return: true if ref is cleaned up and ready to be freed
1461 static bool binder_dec_ref(struct binder_ref
*ref
, int strong
)
1464 if (ref
->data
.strong
== 0) {
1465 binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
1466 ref
->proc
->pid
, ref
->data
.debug_id
,
1467 ref
->data
.desc
, ref
->data
.strong
,
1472 if (ref
->data
.strong
== 0)
1473 binder_dec_node(ref
->node
, strong
, 1);
1475 if (ref
->data
.weak
== 0) {
1476 binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
1477 ref
->proc
->pid
, ref
->data
.debug_id
,
1478 ref
->data
.desc
, ref
->data
.strong
,
1484 if (ref
->data
.strong
== 0 && ref
->data
.weak
== 0) {
1485 binder_cleanup_ref(ref
);
1487 * TODO: we could kfree(ref) here, but an upcoming
1488 * patch will call this with a lock held, so we
1489 * return an indication that the ref should be
1498 * binder_get_node_from_ref() - get the node from the given proc/desc
1499 * @proc: proc containing the ref
1500 * @desc: the handle associated with the ref
1501 * @need_strong_ref: if true, only return node if ref is strong
1502 * @rdata: the id/refcount data for the ref
1504 * Given a proc and ref handle, return the associated binder_node
1506 * Return: a binder_node or NULL if not found or not strong when strong required
1508 static struct binder_node
*binder_get_node_from_ref(
1509 struct binder_proc
*proc
,
1510 u32 desc
, bool need_strong_ref
,
1511 struct binder_ref_data
*rdata
)
1513 struct binder_node
*node
;
1514 struct binder_ref
*ref
;
1516 ref
= binder_get_ref(proc
, desc
, need_strong_ref
);
1521 * Take an implicit reference on the node to ensure
1522 * it stays alive until the call to binder_put_node()
1524 binder_inc_node_tmpref(node
);
1535 * binder_free_ref() - free the binder_ref
1538 * Free the binder_ref. Free the binder_node indicated by ref->node
1539 * (if non-NULL) and the binder_ref_death indicated by ref->death.
1541 static void binder_free_ref(struct binder_ref
*ref
)
1544 binder_free_node(ref
->node
);
1550 * binder_update_ref_for_handle() - inc/dec the ref for given handle
1551 * @proc: proc containing the ref
1552 * @desc: the handle associated with the ref
1553 * @increment: true=inc reference, false=dec reference
1554 * @strong: true=strong reference, false=weak reference
1555 * @rdata: the id/refcount data for the ref
1557 * Given a proc and ref handle, increment or decrement the ref
1558 * according to "increment" arg.
1560 * Return: 0 if successful, else errno
1562 static int binder_update_ref_for_handle(struct binder_proc
*proc
,
1563 uint32_t desc
, bool increment
, bool strong
,
1564 struct binder_ref_data
*rdata
)
1567 struct binder_ref
*ref
;
1568 bool delete_ref
= false;
1570 ref
= binder_get_ref(proc
, desc
, strong
);
1576 ret
= binder_inc_ref(ref
, strong
, NULL
);
1578 delete_ref
= binder_dec_ref(ref
, strong
);
1584 binder_free_ref(ref
);
1592 * binder_dec_ref_for_handle() - dec the ref for given handle
1593 * @proc: proc containing the ref
1594 * @desc: the handle associated with the ref
1595 * @strong: true=strong reference, false=weak reference
1596 * @rdata: the id/refcount data for the ref
1598 * Just calls binder_update_ref_for_handle() to decrement the ref.
1600 * Return: 0 if successful, else errno
1602 static int binder_dec_ref_for_handle(struct binder_proc
*proc
,
1603 uint32_t desc
, bool strong
, struct binder_ref_data
*rdata
)
1605 return binder_update_ref_for_handle(proc
, desc
, false, strong
, rdata
);
1610 * binder_inc_ref_for_node() - increment the ref for given proc/node
1611 * @proc: proc containing the ref
1612 * @node: target node
1613 * @strong: true=strong reference, false=weak reference
1614 * @target_list: worklist to use if node is incremented
1615 * @rdata: the id/refcount data for the ref
1617 * Given a proc and node, increment the ref. Create the ref if it
1618 * doesn't already exist
1620 * Return: 0 if successful, else errno
1622 static int binder_inc_ref_for_node(struct binder_proc
*proc
,
1623 struct binder_node
*node
,
1625 struct list_head
*target_list
,
1626 struct binder_ref_data
*rdata
)
1628 struct binder_ref
*ref
;
1629 struct binder_ref
*new_ref
= NULL
;
1632 ref
= binder_get_ref_for_node(proc
, node
, NULL
);
1634 new_ref
= kzalloc(sizeof(*ref
), GFP_KERNEL
);
1637 ref
= binder_get_ref_for_node(proc
, node
, new_ref
);
1639 ret
= binder_inc_ref(ref
, strong
, target_list
);
1641 if (new_ref
&& ref
!= new_ref
)
1643 * Another thread created the ref first so
1644 * free the one we allocated
1650 static void binder_pop_transaction_ilocked(struct binder_thread
*target_thread
,
1651 struct binder_transaction
*t
)
1653 BUG_ON(!target_thread
);
1654 BUG_ON(!spin_is_locked(&target_thread
->proc
->inner_lock
));
1655 BUG_ON(target_thread
->transaction_stack
!= t
);
1656 BUG_ON(target_thread
->transaction_stack
->from
!= target_thread
);
1657 target_thread
->transaction_stack
=
1658 target_thread
->transaction_stack
->from_parent
;
1663 * binder_thread_dec_tmpref() - decrement thread->tmp_ref
1664 * @thread: thread to decrement
1666 * A thread needs to be kept alive while being used to create or
1667 * handle a transaction. binder_get_txn_from() is used to safely
1668 * extract t->from from a binder_transaction and keep the thread
1669 * indicated by t->from from being freed. When done with that
1670 * binder_thread, this function is called to decrement the
1671 * tmp_ref and free if appropriate (thread has been released
1672 * and no transaction being processed by the driver)
1674 static void binder_thread_dec_tmpref(struct binder_thread
*thread
)
1677 * atomic is used to protect the counter value while
1678 * it cannot reach zero or thread->is_dead is false
1680 binder_inner_proc_lock(thread
->proc
);
1681 atomic_dec(&thread
->tmp_ref
);
1682 if (thread
->is_dead
&& !atomic_read(&thread
->tmp_ref
)) {
1683 binder_inner_proc_unlock(thread
->proc
);
1684 binder_free_thread(thread
);
1687 binder_inner_proc_unlock(thread
->proc
);
1691 * binder_proc_dec_tmpref() - decrement proc->tmp_ref
1692 * @proc: proc to decrement
1694 * A binder_proc needs to be kept alive while being used to create or
1695 * handle a transaction. proc->tmp_ref is incremented when
1696 * creating a new transaction or the binder_proc is currently in-use
1697 * by threads that are being released. When done with the binder_proc,
1698 * this function is called to decrement the counter and free the
1699 * proc if appropriate (proc has been released, all threads have
1700 * been released and not currenly in-use to process a transaction).
1702 static void binder_proc_dec_tmpref(struct binder_proc
*proc
)
1704 binder_inner_proc_lock(proc
);
1706 if (proc
->is_dead
&& RB_EMPTY_ROOT(&proc
->threads
) &&
1708 binder_inner_proc_unlock(proc
);
1709 binder_free_proc(proc
);
1712 binder_inner_proc_unlock(proc
);
1716 * binder_get_txn_from() - safely extract the "from" thread in transaction
1717 * @t: binder transaction for t->from
1719 * Atomically return the "from" thread and increment the tmp_ref
1720 * count for the thread to ensure it stays alive until
1721 * binder_thread_dec_tmpref() is called.
1723 * Return: the value of t->from
1725 static struct binder_thread
*binder_get_txn_from(
1726 struct binder_transaction
*t
)
1728 struct binder_thread
*from
;
1730 spin_lock(&t
->lock
);
1733 atomic_inc(&from
->tmp_ref
);
1734 spin_unlock(&t
->lock
);
1739 * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock
1740 * @t: binder transaction for t->from
1742 * Same as binder_get_txn_from() except it also acquires the proc->inner_lock
1743 * to guarantee that the thread cannot be released while operating on it.
1744 * The caller must call binder_inner_proc_unlock() to release the inner lock
1745 * as well as call binder_dec_thread_txn() to release the reference.
1747 * Return: the value of t->from
1749 static struct binder_thread
*binder_get_txn_from_and_acq_inner(
1750 struct binder_transaction
*t
)
1752 struct binder_thread
*from
;
1754 from
= binder_get_txn_from(t
);
1757 binder_inner_proc_lock(from
->proc
);
1759 BUG_ON(from
!= t
->from
);
1762 binder_inner_proc_unlock(from
->proc
);
1763 binder_thread_dec_tmpref(from
);
1767 static void binder_free_transaction(struct binder_transaction
*t
)
1770 t
->buffer
->transaction
= NULL
;
1772 binder_stats_deleted(BINDER_STAT_TRANSACTION
);
1775 static void binder_send_failed_reply(struct binder_transaction
*t
,
1776 uint32_t error_code
)
1778 struct binder_thread
*target_thread
;
1779 struct binder_transaction
*next
;
1781 BUG_ON(t
->flags
& TF_ONE_WAY
);
1783 target_thread
= binder_get_txn_from_and_acq_inner(t
);
1784 if (target_thread
) {
1785 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION
,
1786 "send failed reply for transaction %d to %d:%d\n",
1788 target_thread
->proc
->pid
,
1789 target_thread
->pid
);
1791 binder_pop_transaction_ilocked(target_thread
, t
);
1792 if (target_thread
->reply_error
.cmd
== BR_OK
) {
1793 target_thread
->reply_error
.cmd
= error_code
;
1794 binder_enqueue_work_ilocked(
1795 &target_thread
->reply_error
.work
,
1796 &target_thread
->todo
);
1797 wake_up_interruptible(&target_thread
->wait
);
1799 WARN(1, "Unexpected reply error: %u\n",
1800 target_thread
->reply_error
.cmd
);
1802 binder_inner_proc_unlock(target_thread
->proc
);
1803 binder_thread_dec_tmpref(target_thread
);
1804 binder_free_transaction(t
);
1807 next
= t
->from_parent
;
1809 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION
,
1810 "send failed reply for transaction %d, target dead\n",
1813 binder_free_transaction(t
);
1815 binder_debug(BINDER_DEBUG_DEAD_BINDER
,
1816 "reply failed, no target thread at root\n");
1820 binder_debug(BINDER_DEBUG_DEAD_BINDER
,
1821 "reply failed, no target thread -- retry %d\n",
1827 * binder_validate_object() - checks for a valid metadata object in a buffer.
1828 * @buffer: binder_buffer that we're parsing.
1829 * @offset: offset in the buffer at which to validate an object.
1831 * Return: If there's a valid metadata object at @offset in @buffer, the
1832 * size of that object. Otherwise, it returns zero.
1834 static size_t binder_validate_object(struct binder_buffer
*buffer
, u64 offset
)
1836 /* Check if we can read a header first */
1837 struct binder_object_header
*hdr
;
1838 size_t object_size
= 0;
1840 if (offset
> buffer
->data_size
- sizeof(*hdr
) ||
1841 buffer
->data_size
< sizeof(*hdr
) ||
1842 !IS_ALIGNED(offset
, sizeof(u32
)))
1845 /* Ok, now see if we can read a complete object. */
1846 hdr
= (struct binder_object_header
*)(buffer
->data
+ offset
);
1847 switch (hdr
->type
) {
1848 case BINDER_TYPE_BINDER
:
1849 case BINDER_TYPE_WEAK_BINDER
:
1850 case BINDER_TYPE_HANDLE
:
1851 case BINDER_TYPE_WEAK_HANDLE
:
1852 object_size
= sizeof(struct flat_binder_object
);
1854 case BINDER_TYPE_FD
:
1855 object_size
= sizeof(struct binder_fd_object
);
1857 case BINDER_TYPE_PTR
:
1858 object_size
= sizeof(struct binder_buffer_object
);
1860 case BINDER_TYPE_FDA
:
1861 object_size
= sizeof(struct binder_fd_array_object
);
1866 if (offset
<= buffer
->data_size
- object_size
&&
1867 buffer
->data_size
>= object_size
)
1874 * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
1875 * @b: binder_buffer containing the object
1876 * @index: index in offset array at which the binder_buffer_object is
1878 * @start: points to the start of the offset array
1879 * @num_valid: the number of valid offsets in the offset array
1881 * Return: If @index is within the valid range of the offset array
1882 * described by @start and @num_valid, and if there's a valid
1883 * binder_buffer_object at the offset found in index @index
1884 * of the offset array, that object is returned. Otherwise,
1885 * %NULL is returned.
1886 * Note that the offset found in index @index itself is not
1887 * verified; this function assumes that @num_valid elements
1888 * from @start were previously verified to have valid offsets.
1890 static struct binder_buffer_object
*binder_validate_ptr(struct binder_buffer
*b
,
1891 binder_size_t index
,
1892 binder_size_t
*start
,
1893 binder_size_t num_valid
)
1895 struct binder_buffer_object
*buffer_obj
;
1896 binder_size_t
*offp
;
1898 if (index
>= num_valid
)
1901 offp
= start
+ index
;
1902 buffer_obj
= (struct binder_buffer_object
*)(b
->data
+ *offp
);
1903 if (buffer_obj
->hdr
.type
!= BINDER_TYPE_PTR
)
1910 * binder_validate_fixup() - validates pointer/fd fixups happen in order.
1911 * @b: transaction buffer
1912 * @objects_start start of objects buffer
1913 * @buffer: binder_buffer_object in which to fix up
1914 * @offset: start offset in @buffer to fix up
1915 * @last_obj: last binder_buffer_object that we fixed up in
1916 * @last_min_offset: minimum fixup offset in @last_obj
1918 * Return: %true if a fixup in buffer @buffer at offset @offset is
1921 * For safety reasons, we only allow fixups inside a buffer to happen
1922 * at increasing offsets; additionally, we only allow fixup on the last
1923 * buffer object that was verified, or one of its parents.
1925 * Example of what is allowed:
1928 * B (parent = A, offset = 0)
1929 * C (parent = A, offset = 16)
1930 * D (parent = C, offset = 0)
1931 * E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
1933 * Examples of what is not allowed:
1935 * Decreasing offsets within the same parent:
1937 * C (parent = A, offset = 16)
1938 * B (parent = A, offset = 0) // decreasing offset within A
1940 * Referring to a parent that wasn't the last object or any of its parents:
1942 * B (parent = A, offset = 0)
1943 * C (parent = A, offset = 0)
1944 * C (parent = A, offset = 16)
1945 * D (parent = B, offset = 0) // B is not A or any of A's parents
1947 static bool binder_validate_fixup(struct binder_buffer
*b
,
1948 binder_size_t
*objects_start
,
1949 struct binder_buffer_object
*buffer
,
1950 binder_size_t fixup_offset
,
1951 struct binder_buffer_object
*last_obj
,
1952 binder_size_t last_min_offset
)
1955 /* Nothing to fix up in */
1959 while (last_obj
!= buffer
) {
1961 * Safe to retrieve the parent of last_obj, since it
1962 * was already previously verified by the driver.
1964 if ((last_obj
->flags
& BINDER_BUFFER_FLAG_HAS_PARENT
) == 0)
1966 last_min_offset
= last_obj
->parent_offset
+ sizeof(uintptr_t);
1967 last_obj
= (struct binder_buffer_object
*)
1968 (b
->data
+ *(objects_start
+ last_obj
->parent
));
1970 return (fixup_offset
>= last_min_offset
);
1973 static void binder_transaction_buffer_release(struct binder_proc
*proc
,
1974 struct binder_buffer
*buffer
,
1975 binder_size_t
*failed_at
)
1977 binder_size_t
*offp
, *off_start
, *off_end
;
1978 int debug_id
= buffer
->debug_id
;
1980 binder_debug(BINDER_DEBUG_TRANSACTION
,
1981 "%d buffer release %d, size %zd-%zd, failed at %p\n",
1982 proc
->pid
, buffer
->debug_id
,
1983 buffer
->data_size
, buffer
->offsets_size
, failed_at
);
1985 if (buffer
->target_node
)
1986 binder_dec_node(buffer
->target_node
, 1, 0);
1988 off_start
= (binder_size_t
*)(buffer
->data
+
1989 ALIGN(buffer
->data_size
, sizeof(void *)));
1991 off_end
= failed_at
;
1993 off_end
= (void *)off_start
+ buffer
->offsets_size
;
1994 for (offp
= off_start
; offp
< off_end
; offp
++) {
1995 struct binder_object_header
*hdr
;
1996 size_t object_size
= binder_validate_object(buffer
, *offp
);
1998 if (object_size
== 0) {
1999 pr_err("transaction release %d bad object at offset %lld, size %zd\n",
2000 debug_id
, (u64
)*offp
, buffer
->data_size
);
2003 hdr
= (struct binder_object_header
*)(buffer
->data
+ *offp
);
2004 switch (hdr
->type
) {
2005 case BINDER_TYPE_BINDER
:
2006 case BINDER_TYPE_WEAK_BINDER
: {
2007 struct flat_binder_object
*fp
;
2008 struct binder_node
*node
;
2010 fp
= to_flat_binder_object(hdr
);
2011 node
= binder_get_node(proc
, fp
->binder
);
2013 pr_err("transaction release %d bad node %016llx\n",
2014 debug_id
, (u64
)fp
->binder
);
2017 binder_debug(BINDER_DEBUG_TRANSACTION
,
2018 " node %d u%016llx\n",
2019 node
->debug_id
, (u64
)node
->ptr
);
2020 binder_dec_node(node
, hdr
->type
== BINDER_TYPE_BINDER
,
2022 binder_put_node(node
);
2024 case BINDER_TYPE_HANDLE
:
2025 case BINDER_TYPE_WEAK_HANDLE
: {
2026 struct flat_binder_object
*fp
;
2027 struct binder_ref_data rdata
;
2030 fp
= to_flat_binder_object(hdr
);
2031 ret
= binder_dec_ref_for_handle(proc
, fp
->handle
,
2032 hdr
->type
== BINDER_TYPE_HANDLE
, &rdata
);
2035 pr_err("transaction release %d bad handle %d, ret = %d\n",
2036 debug_id
, fp
->handle
, ret
);
2039 binder_debug(BINDER_DEBUG_TRANSACTION
,
2040 " ref %d desc %d\n",
2041 rdata
.debug_id
, rdata
.desc
);
2044 case BINDER_TYPE_FD
: {
2045 struct binder_fd_object
*fp
= to_binder_fd_object(hdr
);
2047 binder_debug(BINDER_DEBUG_TRANSACTION
,
2048 " fd %d\n", fp
->fd
);
2050 task_close_fd(proc
, fp
->fd
);
2052 case BINDER_TYPE_PTR
:
2054 * Nothing to do here, this will get cleaned up when the
2055 * transaction buffer gets freed
2058 case BINDER_TYPE_FDA
: {
2059 struct binder_fd_array_object
*fda
;
2060 struct binder_buffer_object
*parent
;
2061 uintptr_t parent_buffer
;
2064 binder_size_t fd_buf_size
;
2066 fda
= to_binder_fd_array_object(hdr
);
2067 parent
= binder_validate_ptr(buffer
, fda
->parent
,
2071 pr_err("transaction release %d bad parent offset",
2076 * Since the parent was already fixed up, convert it
2077 * back to kernel address space to access it
2079 parent_buffer
= parent
->buffer
-
2080 binder_alloc_get_user_buffer_offset(
2083 fd_buf_size
= sizeof(u32
) * fda
->num_fds
;
2084 if (fda
->num_fds
>= SIZE_MAX
/ sizeof(u32
)) {
2085 pr_err("transaction release %d invalid number of fds (%lld)\n",
2086 debug_id
, (u64
)fda
->num_fds
);
2089 if (fd_buf_size
> parent
->length
||
2090 fda
->parent_offset
> parent
->length
- fd_buf_size
) {
2091 /* No space for all file descriptors here. */
2092 pr_err("transaction release %d not enough space for %lld fds in buffer\n",
2093 debug_id
, (u64
)fda
->num_fds
);
2096 fd_array
= (u32
*)(parent_buffer
+ fda
->parent_offset
);
2097 for (fd_index
= 0; fd_index
< fda
->num_fds
; fd_index
++)
2098 task_close_fd(proc
, fd_array
[fd_index
]);
2101 pr_err("transaction release %d bad object type %x\n",
2102 debug_id
, hdr
->type
);
2108 static int binder_translate_binder(struct flat_binder_object
*fp
,
2109 struct binder_transaction
*t
,
2110 struct binder_thread
*thread
)
2112 struct binder_node
*node
;
2113 struct binder_proc
*proc
= thread
->proc
;
2114 struct binder_proc
*target_proc
= t
->to_proc
;
2115 struct binder_ref_data rdata
;
2118 node
= binder_get_node(proc
, fp
->binder
);
2120 node
= binder_new_node(proc
, fp
);
2124 if (fp
->cookie
!= node
->cookie
) {
2125 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
2126 proc
->pid
, thread
->pid
, (u64
)fp
->binder
,
2127 node
->debug_id
, (u64
)fp
->cookie
,
2132 if (security_binder_transfer_binder(proc
->tsk
, target_proc
->tsk
)) {
2137 ret
= binder_inc_ref_for_node(target_proc
, node
,
2138 fp
->hdr
.type
== BINDER_TYPE_BINDER
,
2139 &thread
->todo
, &rdata
);
2143 if (fp
->hdr
.type
== BINDER_TYPE_BINDER
)
2144 fp
->hdr
.type
= BINDER_TYPE_HANDLE
;
2146 fp
->hdr
.type
= BINDER_TYPE_WEAK_HANDLE
;
2148 fp
->handle
= rdata
.desc
;
2151 trace_binder_transaction_node_to_ref(t
, node
, &rdata
);
2152 binder_debug(BINDER_DEBUG_TRANSACTION
,
2153 " node %d u%016llx -> ref %d desc %d\n",
2154 node
->debug_id
, (u64
)node
->ptr
,
2155 rdata
.debug_id
, rdata
.desc
);
2157 binder_put_node(node
);
2161 static int binder_translate_handle(struct flat_binder_object
*fp
,
2162 struct binder_transaction
*t
,
2163 struct binder_thread
*thread
)
2165 struct binder_proc
*proc
= thread
->proc
;
2166 struct binder_proc
*target_proc
= t
->to_proc
;
2167 struct binder_node
*node
;
2168 struct binder_ref_data src_rdata
;
2171 node
= binder_get_node_from_ref(proc
, fp
->handle
,
2172 fp
->hdr
.type
== BINDER_TYPE_HANDLE
, &src_rdata
);
2174 binder_user_error("%d:%d got transaction with invalid handle, %d\n",
2175 proc
->pid
, thread
->pid
, fp
->handle
);
2178 if (security_binder_transfer_binder(proc
->tsk
, target_proc
->tsk
)) {
2183 binder_node_lock(node
);
2184 if (node
->proc
== target_proc
) {
2185 if (fp
->hdr
.type
== BINDER_TYPE_HANDLE
)
2186 fp
->hdr
.type
= BINDER_TYPE_BINDER
;
2188 fp
->hdr
.type
= BINDER_TYPE_WEAK_BINDER
;
2189 fp
->binder
= node
->ptr
;
2190 fp
->cookie
= node
->cookie
;
2192 binder_inner_proc_lock(node
->proc
);
2193 binder_inc_node_nilocked(node
,
2194 fp
->hdr
.type
== BINDER_TYPE_BINDER
,
2197 binder_inner_proc_unlock(node
->proc
);
2198 trace_binder_transaction_ref_to_node(t
, node
, &src_rdata
);
2199 binder_debug(BINDER_DEBUG_TRANSACTION
,
2200 " ref %d desc %d -> node %d u%016llx\n",
2201 src_rdata
.debug_id
, src_rdata
.desc
, node
->debug_id
,
2203 binder_node_unlock(node
);
2206 struct binder_ref_data dest_rdata
;
2208 binder_node_unlock(node
);
2209 ret
= binder_inc_ref_for_node(target_proc
, node
,
2210 fp
->hdr
.type
== BINDER_TYPE_HANDLE
,
2216 fp
->handle
= dest_rdata
.desc
;
2218 trace_binder_transaction_ref_to_ref(t
, node
, &src_rdata
,
2220 binder_debug(BINDER_DEBUG_TRANSACTION
,
2221 " ref %d desc %d -> ref %d desc %d (node %d)\n",
2222 src_rdata
.debug_id
, src_rdata
.desc
,
2223 dest_rdata
.debug_id
, dest_rdata
.desc
,
2227 binder_put_node(node
);
2231 static int binder_translate_fd(int fd
,
2232 struct binder_transaction
*t
,
2233 struct binder_thread
*thread
,
2234 struct binder_transaction
*in_reply_to
)
2236 struct binder_proc
*proc
= thread
->proc
;
2237 struct binder_proc
*target_proc
= t
->to_proc
;
2241 bool target_allows_fd
;
2244 target_allows_fd
= !!(in_reply_to
->flags
& TF_ACCEPT_FDS
);
2246 target_allows_fd
= t
->buffer
->target_node
->accept_fds
;
2247 if (!target_allows_fd
) {
2248 binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
2249 proc
->pid
, thread
->pid
,
2250 in_reply_to
? "reply" : "transaction",
2253 goto err_fd_not_accepted
;
2258 binder_user_error("%d:%d got transaction with invalid fd, %d\n",
2259 proc
->pid
, thread
->pid
, fd
);
2263 ret
= security_binder_transfer_file(proc
->tsk
, target_proc
->tsk
, file
);
2269 target_fd
= task_get_unused_fd_flags(target_proc
, O_CLOEXEC
);
2270 if (target_fd
< 0) {
2272 goto err_get_unused_fd
;
2274 task_fd_install(target_proc
, target_fd
, file
);
2275 trace_binder_transaction_fd(t
, fd
, target_fd
);
2276 binder_debug(BINDER_DEBUG_TRANSACTION
, " fd %d -> %d\n",
2285 err_fd_not_accepted
:
2289 static int binder_translate_fd_array(struct binder_fd_array_object
*fda
,
2290 struct binder_buffer_object
*parent
,
2291 struct binder_transaction
*t
,
2292 struct binder_thread
*thread
,
2293 struct binder_transaction
*in_reply_to
)
2295 binder_size_t fdi
, fd_buf_size
, num_installed_fds
;
2297 uintptr_t parent_buffer
;
2299 struct binder_proc
*proc
= thread
->proc
;
2300 struct binder_proc
*target_proc
= t
->to_proc
;
2302 fd_buf_size
= sizeof(u32
) * fda
->num_fds
;
2303 if (fda
->num_fds
>= SIZE_MAX
/ sizeof(u32
)) {
2304 binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
2305 proc
->pid
, thread
->pid
, (u64
)fda
->num_fds
);
2308 if (fd_buf_size
> parent
->length
||
2309 fda
->parent_offset
> parent
->length
- fd_buf_size
) {
2310 /* No space for all file descriptors here. */
2311 binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
2312 proc
->pid
, thread
->pid
, (u64
)fda
->num_fds
);
2316 * Since the parent was already fixed up, convert it
2317 * back to the kernel address space to access it
2319 parent_buffer
= parent
->buffer
-
2320 binder_alloc_get_user_buffer_offset(&target_proc
->alloc
);
2321 fd_array
= (u32
*)(parent_buffer
+ fda
->parent_offset
);
2322 if (!IS_ALIGNED((unsigned long)fd_array
, sizeof(u32
))) {
2323 binder_user_error("%d:%d parent offset not aligned correctly.\n",
2324 proc
->pid
, thread
->pid
);
2327 for (fdi
= 0; fdi
< fda
->num_fds
; fdi
++) {
2328 target_fd
= binder_translate_fd(fd_array
[fdi
], t
, thread
,
2331 goto err_translate_fd_failed
;
2332 fd_array
[fdi
] = target_fd
;
2336 err_translate_fd_failed
:
2338 * Failed to allocate fd or security error, free fds
2341 num_installed_fds
= fdi
;
2342 for (fdi
= 0; fdi
< num_installed_fds
; fdi
++)
2343 task_close_fd(target_proc
, fd_array
[fdi
]);
2347 static int binder_fixup_parent(struct binder_transaction
*t
,
2348 struct binder_thread
*thread
,
2349 struct binder_buffer_object
*bp
,
2350 binder_size_t
*off_start
,
2351 binder_size_t num_valid
,
2352 struct binder_buffer_object
*last_fixup_obj
,
2353 binder_size_t last_fixup_min_off
)
2355 struct binder_buffer_object
*parent
;
2357 struct binder_buffer
*b
= t
->buffer
;
2358 struct binder_proc
*proc
= thread
->proc
;
2359 struct binder_proc
*target_proc
= t
->to_proc
;
2361 if (!(bp
->flags
& BINDER_BUFFER_FLAG_HAS_PARENT
))
2364 parent
= binder_validate_ptr(b
, bp
->parent
, off_start
, num_valid
);
2366 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2367 proc
->pid
, thread
->pid
);
2371 if (!binder_validate_fixup(b
, off_start
,
2372 parent
, bp
->parent_offset
,
2374 last_fixup_min_off
)) {
2375 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2376 proc
->pid
, thread
->pid
);
2380 if (parent
->length
< sizeof(binder_uintptr_t
) ||
2381 bp
->parent_offset
> parent
->length
- sizeof(binder_uintptr_t
)) {
2382 /* No space for a pointer here! */
2383 binder_user_error("%d:%d got transaction with invalid parent offset\n",
2384 proc
->pid
, thread
->pid
);
2387 parent_buffer
= (u8
*)(parent
->buffer
-
2388 binder_alloc_get_user_buffer_offset(
2389 &target_proc
->alloc
));
2390 *(binder_uintptr_t
*)(parent_buffer
+ bp
->parent_offset
) = bp
->buffer
;
2395 static void binder_transaction(struct binder_proc
*proc
,
2396 struct binder_thread
*thread
,
2397 struct binder_transaction_data
*tr
, int reply
,
2398 binder_size_t extra_buffers_size
)
2401 struct binder_transaction
*t
;
2402 struct binder_work
*tcomplete
;
2403 binder_size_t
*offp
, *off_end
, *off_start
;
2404 binder_size_t off_min
;
2405 u8
*sg_bufp
, *sg_buf_end
;
2406 struct binder_proc
*target_proc
= NULL
;
2407 struct binder_thread
*target_thread
= NULL
;
2408 struct binder_node
*target_node
= NULL
;
2409 struct list_head
*target_list
;
2410 wait_queue_head_t
*target_wait
;
2411 struct binder_transaction
*in_reply_to
= NULL
;
2412 struct binder_transaction_log_entry
*e
;
2413 uint32_t return_error
= 0;
2414 uint32_t return_error_param
= 0;
2415 uint32_t return_error_line
= 0;
2416 struct binder_buffer_object
*last_fixup_obj
= NULL
;
2417 binder_size_t last_fixup_min_off
= 0;
2418 struct binder_context
*context
= proc
->context
;
2419 int t_debug_id
= atomic_inc_return(&binder_last_id
);
2421 e
= binder_transaction_log_add(&binder_transaction_log
);
2422 e
->debug_id
= t_debug_id
;
2423 e
->call_type
= reply
? 2 : !!(tr
->flags
& TF_ONE_WAY
);
2424 e
->from_proc
= proc
->pid
;
2425 e
->from_thread
= thread
->pid
;
2426 e
->target_handle
= tr
->target
.handle
;
2427 e
->data_size
= tr
->data_size
;
2428 e
->offsets_size
= tr
->offsets_size
;
2429 e
->context_name
= proc
->context
->name
;
2432 binder_inner_proc_lock(proc
);
2433 in_reply_to
= thread
->transaction_stack
;
2434 if (in_reply_to
== NULL
) {
2435 binder_inner_proc_unlock(proc
);
2436 binder_user_error("%d:%d got reply transaction with no transaction stack\n",
2437 proc
->pid
, thread
->pid
);
2438 return_error
= BR_FAILED_REPLY
;
2439 return_error_param
= -EPROTO
;
2440 return_error_line
= __LINE__
;
2441 goto err_empty_call_stack
;
2443 if (in_reply_to
->to_thread
!= thread
) {
2444 spin_lock(&in_reply_to
->lock
);
2445 binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
2446 proc
->pid
, thread
->pid
, in_reply_to
->debug_id
,
2447 in_reply_to
->to_proc
?
2448 in_reply_to
->to_proc
->pid
: 0,
2449 in_reply_to
->to_thread
?
2450 in_reply_to
->to_thread
->pid
: 0);
2451 spin_unlock(&in_reply_to
->lock
);
2452 binder_inner_proc_unlock(proc
);
2453 return_error
= BR_FAILED_REPLY
;
2454 return_error_param
= -EPROTO
;
2455 return_error_line
= __LINE__
;
2457 goto err_bad_call_stack
;
2459 thread
->transaction_stack
= in_reply_to
->to_parent
;
2460 binder_inner_proc_unlock(proc
);
2461 binder_set_nice(in_reply_to
->saved_priority
);
2462 target_thread
= binder_get_txn_from_and_acq_inner(in_reply_to
);
2463 if (target_thread
== NULL
) {
2464 return_error
= BR_DEAD_REPLY
;
2465 return_error_line
= __LINE__
;
2466 goto err_dead_binder
;
2468 if (target_thread
->transaction_stack
!= in_reply_to
) {
2469 binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
2470 proc
->pid
, thread
->pid
,
2471 target_thread
->transaction_stack
?
2472 target_thread
->transaction_stack
->debug_id
: 0,
2473 in_reply_to
->debug_id
);
2474 binder_inner_proc_unlock(target_thread
->proc
);
2475 return_error
= BR_FAILED_REPLY
;
2476 return_error_param
= -EPROTO
;
2477 return_error_line
= __LINE__
;
2479 target_thread
= NULL
;
2480 goto err_dead_binder
;
2482 target_proc
= target_thread
->proc
;
2483 target_proc
->tmp_ref
++;
2484 binder_inner_proc_unlock(target_thread
->proc
);
2486 if (tr
->target
.handle
) {
2487 struct binder_ref
*ref
;
2490 * There must already be a strong ref
2491 * on this node. If so, do a strong
2492 * increment on the node to ensure it
2493 * stays alive until the transaction is
2496 ref
= binder_get_ref(proc
, tr
->target
.handle
, true);
2498 binder_inc_node(ref
->node
, 1, 0, NULL
);
2499 target_node
= ref
->node
;
2501 if (target_node
== NULL
) {
2502 binder_user_error("%d:%d got transaction to invalid handle\n",
2503 proc
->pid
, thread
->pid
);
2504 return_error
= BR_FAILED_REPLY
;
2505 return_error_param
= -EINVAL
;
2506 return_error_line
= __LINE__
;
2507 goto err_invalid_target_handle
;
2510 mutex_lock(&context
->context_mgr_node_lock
);
2511 target_node
= context
->binder_context_mgr_node
;
2512 if (target_node
== NULL
) {
2513 return_error
= BR_DEAD_REPLY
;
2514 mutex_unlock(&context
->context_mgr_node_lock
);
2515 return_error_line
= __LINE__
;
2516 goto err_no_context_mgr_node
;
2518 binder_inc_node(target_node
, 1, 0, NULL
);
2519 mutex_unlock(&context
->context_mgr_node_lock
);
2521 e
->to_node
= target_node
->debug_id
;
2522 binder_node_lock(target_node
);
2523 target_proc
= target_node
->proc
;
2524 if (target_proc
== NULL
) {
2525 binder_node_unlock(target_node
);
2526 return_error
= BR_DEAD_REPLY
;
2527 return_error_line
= __LINE__
;
2528 goto err_dead_binder
;
2530 binder_inner_proc_lock(target_proc
);
2531 target_proc
->tmp_ref
++;
2532 binder_inner_proc_unlock(target_proc
);
2533 binder_node_unlock(target_node
);
2534 if (security_binder_transaction(proc
->tsk
,
2535 target_proc
->tsk
) < 0) {
2536 return_error
= BR_FAILED_REPLY
;
2537 return_error_param
= -EPERM
;
2538 return_error_line
= __LINE__
;
2539 goto err_invalid_target_handle
;
2541 binder_inner_proc_lock(proc
);
2542 if (!(tr
->flags
& TF_ONE_WAY
) && thread
->transaction_stack
) {
2543 struct binder_transaction
*tmp
;
2545 tmp
= thread
->transaction_stack
;
2546 if (tmp
->to_thread
!= thread
) {
2547 spin_lock(&tmp
->lock
);
2548 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
2549 proc
->pid
, thread
->pid
, tmp
->debug_id
,
2550 tmp
->to_proc
? tmp
->to_proc
->pid
: 0,
2552 tmp
->to_thread
->pid
: 0);
2553 spin_unlock(&tmp
->lock
);
2554 binder_inner_proc_unlock(proc
);
2555 return_error
= BR_FAILED_REPLY
;
2556 return_error_param
= -EPROTO
;
2557 return_error_line
= __LINE__
;
2558 goto err_bad_call_stack
;
2561 struct binder_thread
*from
;
2563 spin_lock(&tmp
->lock
);
2565 if (from
&& from
->proc
== target_proc
) {
2566 atomic_inc(&from
->tmp_ref
);
2567 target_thread
= from
;
2568 spin_unlock(&tmp
->lock
);
2571 spin_unlock(&tmp
->lock
);
2572 tmp
= tmp
->from_parent
;
2575 binder_inner_proc_unlock(proc
);
2577 if (target_thread
) {
2578 e
->to_thread
= target_thread
->pid
;
2579 target_list
= &target_thread
->todo
;
2580 target_wait
= &target_thread
->wait
;
2582 target_list
= &target_proc
->todo
;
2583 target_wait
= &target_proc
->wait
;
2585 e
->to_proc
= target_proc
->pid
;
2587 /* TODO: reuse incoming transaction for reply */
2588 t
= kzalloc(sizeof(*t
), GFP_KERNEL
);
2590 return_error
= BR_FAILED_REPLY
;
2591 return_error_param
= -ENOMEM
;
2592 return_error_line
= __LINE__
;
2593 goto err_alloc_t_failed
;
2595 binder_stats_created(BINDER_STAT_TRANSACTION
);
2596 spin_lock_init(&t
->lock
);
2598 tcomplete
= kzalloc(sizeof(*tcomplete
), GFP_KERNEL
);
2599 if (tcomplete
== NULL
) {
2600 return_error
= BR_FAILED_REPLY
;
2601 return_error_param
= -ENOMEM
;
2602 return_error_line
= __LINE__
;
2603 goto err_alloc_tcomplete_failed
;
2605 binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE
);
2607 t
->debug_id
= t_debug_id
;
2610 binder_debug(BINDER_DEBUG_TRANSACTION
,
2611 "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
2612 proc
->pid
, thread
->pid
, t
->debug_id
,
2613 target_proc
->pid
, target_thread
->pid
,
2614 (u64
)tr
->data
.ptr
.buffer
,
2615 (u64
)tr
->data
.ptr
.offsets
,
2616 (u64
)tr
->data_size
, (u64
)tr
->offsets_size
,
2617 (u64
)extra_buffers_size
);
2619 binder_debug(BINDER_DEBUG_TRANSACTION
,
2620 "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
2621 proc
->pid
, thread
->pid
, t
->debug_id
,
2622 target_proc
->pid
, target_node
->debug_id
,
2623 (u64
)tr
->data
.ptr
.buffer
,
2624 (u64
)tr
->data
.ptr
.offsets
,
2625 (u64
)tr
->data_size
, (u64
)tr
->offsets_size
,
2626 (u64
)extra_buffers_size
);
2628 if (!reply
&& !(tr
->flags
& TF_ONE_WAY
))
2632 t
->sender_euid
= task_euid(proc
->tsk
);
2633 t
->to_proc
= target_proc
;
2634 t
->to_thread
= target_thread
;
2636 t
->flags
= tr
->flags
;
2637 t
->priority
= task_nice(current
);
2639 trace_binder_transaction(reply
, t
, target_node
);
2641 t
->buffer
= binder_alloc_new_buf(&target_proc
->alloc
, tr
->data_size
,
2642 tr
->offsets_size
, extra_buffers_size
,
2643 !reply
&& (t
->flags
& TF_ONE_WAY
));
2644 if (IS_ERR(t
->buffer
)) {
2646 * -ESRCH indicates VMA cleared. The target is dying.
2648 return_error_param
= PTR_ERR(t
->buffer
);
2649 return_error
= return_error_param
== -ESRCH
?
2650 BR_DEAD_REPLY
: BR_FAILED_REPLY
;
2651 return_error_line
= __LINE__
;
2653 goto err_binder_alloc_buf_failed
;
2655 t
->buffer
->allow_user_free
= 0;
2656 t
->buffer
->debug_id
= t
->debug_id
;
2657 t
->buffer
->transaction
= t
;
2658 t
->buffer
->target_node
= target_node
;
2659 trace_binder_transaction_alloc_buf(t
->buffer
);
2660 off_start
= (binder_size_t
*)(t
->buffer
->data
+
2661 ALIGN(tr
->data_size
, sizeof(void *)));
2664 if (copy_from_user(t
->buffer
->data
, (const void __user
*)(uintptr_t)
2665 tr
->data
.ptr
.buffer
, tr
->data_size
)) {
2666 binder_user_error("%d:%d got transaction with invalid data ptr\n",
2667 proc
->pid
, thread
->pid
);
2668 return_error
= BR_FAILED_REPLY
;
2669 return_error_param
= -EFAULT
;
2670 return_error_line
= __LINE__
;
2671 goto err_copy_data_failed
;
2673 if (copy_from_user(offp
, (const void __user
*)(uintptr_t)
2674 tr
->data
.ptr
.offsets
, tr
->offsets_size
)) {
2675 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
2676 proc
->pid
, thread
->pid
);
2677 return_error
= BR_FAILED_REPLY
;
2678 return_error_param
= -EFAULT
;
2679 return_error_line
= __LINE__
;
2680 goto err_copy_data_failed
;
2682 if (!IS_ALIGNED(tr
->offsets_size
, sizeof(binder_size_t
))) {
2683 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
2684 proc
->pid
, thread
->pid
, (u64
)tr
->offsets_size
);
2685 return_error
= BR_FAILED_REPLY
;
2686 return_error_param
= -EINVAL
;
2687 return_error_line
= __LINE__
;
2688 goto err_bad_offset
;
2690 if (!IS_ALIGNED(extra_buffers_size
, sizeof(u64
))) {
2691 binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
2692 proc
->pid
, thread
->pid
,
2693 (u64
)extra_buffers_size
);
2694 return_error
= BR_FAILED_REPLY
;
2695 return_error_param
= -EINVAL
;
2696 return_error_line
= __LINE__
;
2697 goto err_bad_offset
;
2699 off_end
= (void *)off_start
+ tr
->offsets_size
;
2700 sg_bufp
= (u8
*)(PTR_ALIGN(off_end
, sizeof(void *)));
2701 sg_buf_end
= sg_bufp
+ extra_buffers_size
;
2703 for (; offp
< off_end
; offp
++) {
2704 struct binder_object_header
*hdr
;
2705 size_t object_size
= binder_validate_object(t
->buffer
, *offp
);
2707 if (object_size
== 0 || *offp
< off_min
) {
2708 binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
2709 proc
->pid
, thread
->pid
, (u64
)*offp
,
2711 (u64
)t
->buffer
->data_size
);
2712 return_error
= BR_FAILED_REPLY
;
2713 return_error_param
= -EINVAL
;
2714 return_error_line
= __LINE__
;
2715 goto err_bad_offset
;
2718 hdr
= (struct binder_object_header
*)(t
->buffer
->data
+ *offp
);
2719 off_min
= *offp
+ object_size
;
2720 switch (hdr
->type
) {
2721 case BINDER_TYPE_BINDER
:
2722 case BINDER_TYPE_WEAK_BINDER
: {
2723 struct flat_binder_object
*fp
;
2725 fp
= to_flat_binder_object(hdr
);
2726 ret
= binder_translate_binder(fp
, t
, thread
);
2728 return_error
= BR_FAILED_REPLY
;
2729 return_error_param
= ret
;
2730 return_error_line
= __LINE__
;
2731 goto err_translate_failed
;
2734 case BINDER_TYPE_HANDLE
:
2735 case BINDER_TYPE_WEAK_HANDLE
: {
2736 struct flat_binder_object
*fp
;
2738 fp
= to_flat_binder_object(hdr
);
2739 ret
= binder_translate_handle(fp
, t
, thread
);
2741 return_error
= BR_FAILED_REPLY
;
2742 return_error_param
= ret
;
2743 return_error_line
= __LINE__
;
2744 goto err_translate_failed
;
2748 case BINDER_TYPE_FD
: {
2749 struct binder_fd_object
*fp
= to_binder_fd_object(hdr
);
2750 int target_fd
= binder_translate_fd(fp
->fd
, t
, thread
,
2753 if (target_fd
< 0) {
2754 return_error
= BR_FAILED_REPLY
;
2755 return_error_param
= target_fd
;
2756 return_error_line
= __LINE__
;
2757 goto err_translate_failed
;
2762 case BINDER_TYPE_FDA
: {
2763 struct binder_fd_array_object
*fda
=
2764 to_binder_fd_array_object(hdr
);
2765 struct binder_buffer_object
*parent
=
2766 binder_validate_ptr(t
->buffer
, fda
->parent
,
2770 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2771 proc
->pid
, thread
->pid
);
2772 return_error
= BR_FAILED_REPLY
;
2773 return_error_param
= -EINVAL
;
2774 return_error_line
= __LINE__
;
2775 goto err_bad_parent
;
2777 if (!binder_validate_fixup(t
->buffer
, off_start
,
2778 parent
, fda
->parent_offset
,
2780 last_fixup_min_off
)) {
2781 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2782 proc
->pid
, thread
->pid
);
2783 return_error
= BR_FAILED_REPLY
;
2784 return_error_param
= -EINVAL
;
2785 return_error_line
= __LINE__
;
2786 goto err_bad_parent
;
2788 ret
= binder_translate_fd_array(fda
, parent
, t
, thread
,
2791 return_error
= BR_FAILED_REPLY
;
2792 return_error_param
= ret
;
2793 return_error_line
= __LINE__
;
2794 goto err_translate_failed
;
2796 last_fixup_obj
= parent
;
2797 last_fixup_min_off
=
2798 fda
->parent_offset
+ sizeof(u32
) * fda
->num_fds
;
2800 case BINDER_TYPE_PTR
: {
2801 struct binder_buffer_object
*bp
=
2802 to_binder_buffer_object(hdr
);
2803 size_t buf_left
= sg_buf_end
- sg_bufp
;
2805 if (bp
->length
> buf_left
) {
2806 binder_user_error("%d:%d got transaction with too large buffer\n",
2807 proc
->pid
, thread
->pid
);
2808 return_error
= BR_FAILED_REPLY
;
2809 return_error_param
= -EINVAL
;
2810 return_error_line
= __LINE__
;
2811 goto err_bad_offset
;
2813 if (copy_from_user(sg_bufp
,
2814 (const void __user
*)(uintptr_t)
2815 bp
->buffer
, bp
->length
)) {
2816 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
2817 proc
->pid
, thread
->pid
);
2818 return_error_param
= -EFAULT
;
2819 return_error
= BR_FAILED_REPLY
;
2820 return_error_line
= __LINE__
;
2821 goto err_copy_data_failed
;
2823 /* Fixup buffer pointer to target proc address space */
2824 bp
->buffer
= (uintptr_t)sg_bufp
+
2825 binder_alloc_get_user_buffer_offset(
2826 &target_proc
->alloc
);
2827 sg_bufp
+= ALIGN(bp
->length
, sizeof(u64
));
2829 ret
= binder_fixup_parent(t
, thread
, bp
, off_start
,
2832 last_fixup_min_off
);
2834 return_error
= BR_FAILED_REPLY
;
2835 return_error_param
= ret
;
2836 return_error_line
= __LINE__
;
2837 goto err_translate_failed
;
2839 last_fixup_obj
= bp
;
2840 last_fixup_min_off
= 0;
2843 binder_user_error("%d:%d got transaction with invalid object type, %x\n",
2844 proc
->pid
, thread
->pid
, hdr
->type
);
2845 return_error
= BR_FAILED_REPLY
;
2846 return_error_param
= -EINVAL
;
2847 return_error_line
= __LINE__
;
2848 goto err_bad_object_type
;
2851 tcomplete
->type
= BINDER_WORK_TRANSACTION_COMPLETE
;
2852 binder_enqueue_work(proc
, tcomplete
, &thread
->todo
);
2853 t
->work
.type
= BINDER_WORK_TRANSACTION
;
2856 binder_inner_proc_lock(target_proc
);
2857 if (target_thread
->is_dead
) {
2858 binder_inner_proc_unlock(target_proc
);
2859 goto err_dead_proc_or_thread
;
2861 BUG_ON(t
->buffer
->async_transaction
!= 0);
2862 binder_pop_transaction_ilocked(target_thread
, in_reply_to
);
2863 binder_enqueue_work_ilocked(&t
->work
, target_list
);
2864 binder_inner_proc_unlock(target_proc
);
2865 binder_free_transaction(in_reply_to
);
2866 } else if (!(t
->flags
& TF_ONE_WAY
)) {
2867 BUG_ON(t
->buffer
->async_transaction
!= 0);
2868 binder_inner_proc_lock(proc
);
2870 t
->from_parent
= thread
->transaction_stack
;
2871 thread
->transaction_stack
= t
;
2872 binder_inner_proc_unlock(proc
);
2873 binder_inner_proc_lock(target_proc
);
2874 if (target_proc
->is_dead
||
2875 (target_thread
&& target_thread
->is_dead
)) {
2876 binder_inner_proc_unlock(target_proc
);
2877 binder_inner_proc_lock(proc
);
2878 binder_pop_transaction_ilocked(thread
, t
);
2879 binder_inner_proc_unlock(proc
);
2880 goto err_dead_proc_or_thread
;
2882 binder_enqueue_work_ilocked(&t
->work
, target_list
);
2883 binder_inner_proc_unlock(target_proc
);
2885 BUG_ON(target_node
== NULL
);
2886 BUG_ON(t
->buffer
->async_transaction
!= 1);
2887 binder_node_lock(target_node
);
2888 if (target_node
->has_async_transaction
) {
2889 target_list
= &target_node
->async_todo
;
2892 target_node
->has_async_transaction
= 1;
2894 * Test/set of has_async_transaction
2895 * must be atomic with enqueue on
2898 binder_inner_proc_lock(target_proc
);
2899 if (target_proc
->is_dead
||
2900 (target_thread
&& target_thread
->is_dead
)) {
2901 binder_inner_proc_unlock(target_proc
);
2902 binder_node_unlock(target_node
);
2903 goto err_dead_proc_or_thread
;
2905 binder_enqueue_work_ilocked(&t
->work
, target_list
);
2906 binder_inner_proc_unlock(target_proc
);
2907 binder_node_unlock(target_node
);
2910 if (reply
|| !(tr
->flags
& TF_ONE_WAY
))
2911 wake_up_interruptible_sync(target_wait
);
2913 wake_up_interruptible(target_wait
);
2916 binder_thread_dec_tmpref(target_thread
);
2917 binder_proc_dec_tmpref(target_proc
);
2919 * write barrier to synchronize with initialization
2923 WRITE_ONCE(e
->debug_id_done
, t_debug_id
);
2926 err_dead_proc_or_thread
:
2927 return_error
= BR_DEAD_REPLY
;
2928 return_error_line
= __LINE__
;
2929 err_translate_failed
:
2930 err_bad_object_type
:
2933 err_copy_data_failed
:
2934 trace_binder_transaction_failed_buffer_release(t
->buffer
);
2935 binder_transaction_buffer_release(target_proc
, t
->buffer
, offp
);
2937 t
->buffer
->transaction
= NULL
;
2938 binder_alloc_free_buf(&target_proc
->alloc
, t
->buffer
);
2939 err_binder_alloc_buf_failed
:
2941 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE
);
2942 err_alloc_tcomplete_failed
:
2944 binder_stats_deleted(BINDER_STAT_TRANSACTION
);
2947 err_empty_call_stack
:
2949 err_invalid_target_handle
:
2950 err_no_context_mgr_node
:
2952 binder_thread_dec_tmpref(target_thread
);
2954 binder_proc_dec_tmpref(target_proc
);
2956 binder_dec_node(target_node
, 1, 0);
2958 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION
,
2959 "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n",
2960 proc
->pid
, thread
->pid
, return_error
, return_error_param
,
2961 (u64
)tr
->data_size
, (u64
)tr
->offsets_size
,
2965 struct binder_transaction_log_entry
*fe
;
2967 e
->return_error
= return_error
;
2968 e
->return_error_param
= return_error_param
;
2969 e
->return_error_line
= return_error_line
;
2970 fe
= binder_transaction_log_add(&binder_transaction_log_failed
);
2973 * write barrier to synchronize with initialization
2977 WRITE_ONCE(e
->debug_id_done
, t_debug_id
);
2978 WRITE_ONCE(fe
->debug_id_done
, t_debug_id
);
2981 BUG_ON(thread
->return_error
.cmd
!= BR_OK
);
2983 thread
->return_error
.cmd
= BR_TRANSACTION_COMPLETE
;
2984 binder_enqueue_work(thread
->proc
,
2985 &thread
->return_error
.work
,
2987 binder_send_failed_reply(in_reply_to
, return_error
);
2989 thread
->return_error
.cmd
= return_error
;
2990 binder_enqueue_work(thread
->proc
,
2991 &thread
->return_error
.work
,
2996 static int binder_thread_write(struct binder_proc
*proc
,
2997 struct binder_thread
*thread
,
2998 binder_uintptr_t binder_buffer
, size_t size
,
2999 binder_size_t
*consumed
)
3002 struct binder_context
*context
= proc
->context
;
3003 void __user
*buffer
= (void __user
*)(uintptr_t)binder_buffer
;
3004 void __user
*ptr
= buffer
+ *consumed
;
3005 void __user
*end
= buffer
+ size
;
3007 while (ptr
< end
&& thread
->return_error
.cmd
== BR_OK
) {
3010 if (get_user(cmd
, (uint32_t __user
*)ptr
))
3012 ptr
+= sizeof(uint32_t);
3013 trace_binder_command(cmd
);
3014 if (_IOC_NR(cmd
) < ARRAY_SIZE(binder_stats
.bc
)) {
3015 atomic_inc(&binder_stats
.bc
[_IOC_NR(cmd
)]);
3016 atomic_inc(&proc
->stats
.bc
[_IOC_NR(cmd
)]);
3017 atomic_inc(&thread
->stats
.bc
[_IOC_NR(cmd
)]);
3025 const char *debug_string
;
3026 bool strong
= cmd
== BC_ACQUIRE
|| cmd
== BC_RELEASE
;
3027 bool increment
= cmd
== BC_INCREFS
|| cmd
== BC_ACQUIRE
;
3028 struct binder_ref_data rdata
;
3030 if (get_user(target
, (uint32_t __user
*)ptr
))
3033 ptr
+= sizeof(uint32_t);
3035 if (increment
&& !target
) {
3036 struct binder_node
*ctx_mgr_node
;
3037 mutex_lock(&context
->context_mgr_node_lock
);
3038 ctx_mgr_node
= context
->binder_context_mgr_node
;
3040 ret
= binder_inc_ref_for_node(
3042 strong
, NULL
, &rdata
);
3043 mutex_unlock(&context
->context_mgr_node_lock
);
3046 ret
= binder_update_ref_for_handle(
3047 proc
, target
, increment
, strong
,
3049 if (!ret
&& rdata
.desc
!= target
) {
3050 binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
3051 proc
->pid
, thread
->pid
,
3052 target
, rdata
.desc
);
3056 debug_string
= "IncRefs";
3059 debug_string
= "Acquire";
3062 debug_string
= "Release";
3066 debug_string
= "DecRefs";
3070 binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
3071 proc
->pid
, thread
->pid
, debug_string
,
3072 strong
, target
, ret
);
3075 binder_debug(BINDER_DEBUG_USER_REFS
,
3076 "%d:%d %s ref %d desc %d s %d w %d\n",
3077 proc
->pid
, thread
->pid
, debug_string
,
3078 rdata
.debug_id
, rdata
.desc
, rdata
.strong
,
3082 case BC_INCREFS_DONE
:
3083 case BC_ACQUIRE_DONE
: {
3084 binder_uintptr_t node_ptr
;
3085 binder_uintptr_t cookie
;
3086 struct binder_node
*node
;
3089 if (get_user(node_ptr
, (binder_uintptr_t __user
*)ptr
))
3091 ptr
+= sizeof(binder_uintptr_t
);
3092 if (get_user(cookie
, (binder_uintptr_t __user
*)ptr
))
3094 ptr
+= sizeof(binder_uintptr_t
);
3095 node
= binder_get_node(proc
, node_ptr
);
3097 binder_user_error("%d:%d %s u%016llx no match\n",
3098 proc
->pid
, thread
->pid
,
3099 cmd
== BC_INCREFS_DONE
?
3105 if (cookie
!= node
->cookie
) {
3106 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
3107 proc
->pid
, thread
->pid
,
3108 cmd
== BC_INCREFS_DONE
?
3109 "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3110 (u64
)node_ptr
, node
->debug_id
,
3111 (u64
)cookie
, (u64
)node
->cookie
);
3112 binder_put_node(node
);
3115 binder_node_inner_lock(node
);
3116 if (cmd
== BC_ACQUIRE_DONE
) {
3117 if (node
->pending_strong_ref
== 0) {
3118 binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
3119 proc
->pid
, thread
->pid
,
3121 binder_node_inner_unlock(node
);
3122 binder_put_node(node
);
3125 node
->pending_strong_ref
= 0;
3127 if (node
->pending_weak_ref
== 0) {
3128 binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
3129 proc
->pid
, thread
->pid
,
3131 binder_node_inner_unlock(node
);
3132 binder_put_node(node
);
3135 node
->pending_weak_ref
= 0;
3137 free_node
= binder_dec_node_nilocked(node
,
3138 cmd
== BC_ACQUIRE_DONE
, 0);
3140 binder_debug(BINDER_DEBUG_USER_REFS
,
3141 "%d:%d %s node %d ls %d lw %d tr %d\n",
3142 proc
->pid
, thread
->pid
,
3143 cmd
== BC_INCREFS_DONE
? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3144 node
->debug_id
, node
->local_strong_refs
,
3145 node
->local_weak_refs
, node
->tmp_refs
);
3146 binder_node_inner_unlock(node
);
3147 binder_put_node(node
);
3150 case BC_ATTEMPT_ACQUIRE
:
3151 pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
3153 case BC_ACQUIRE_RESULT
:
3154 pr_err("BC_ACQUIRE_RESULT not supported\n");
3157 case BC_FREE_BUFFER
: {
3158 binder_uintptr_t data_ptr
;
3159 struct binder_buffer
*buffer
;
3161 if (get_user(data_ptr
, (binder_uintptr_t __user
*)ptr
))
3163 ptr
+= sizeof(binder_uintptr_t
);
3165 buffer
= binder_alloc_prepare_to_free(&proc
->alloc
,
3167 if (buffer
== NULL
) {
3168 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx no match\n",
3169 proc
->pid
, thread
->pid
, (u64
)data_ptr
);
3172 if (!buffer
->allow_user_free
) {
3173 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx matched unreturned buffer\n",
3174 proc
->pid
, thread
->pid
, (u64
)data_ptr
);
3177 binder_debug(BINDER_DEBUG_FREE_BUFFER
,
3178 "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
3179 proc
->pid
, thread
->pid
, (u64
)data_ptr
,
3181 buffer
->transaction
? "active" : "finished");
3183 if (buffer
->transaction
) {
3184 buffer
->transaction
->buffer
= NULL
;
3185 buffer
->transaction
= NULL
;
3187 if (buffer
->async_transaction
&& buffer
->target_node
) {
3188 struct binder_node
*buf_node
;
3189 struct binder_work
*w
;
3191 buf_node
= buffer
->target_node
;
3192 binder_node_inner_lock(buf_node
);
3193 BUG_ON(!buf_node
->has_async_transaction
);
3194 BUG_ON(buf_node
->proc
!= proc
);
3195 w
= binder_dequeue_work_head_ilocked(
3196 &buf_node
->async_todo
);
3198 buf_node
->has_async_transaction
= 0;
3200 binder_enqueue_work_ilocked(
3202 binder_node_inner_unlock(buf_node
);
3204 trace_binder_transaction_buffer_release(buffer
);
3205 binder_transaction_buffer_release(proc
, buffer
, NULL
);
3206 binder_alloc_free_buf(&proc
->alloc
, buffer
);
3210 case BC_TRANSACTION_SG
:
3212 struct binder_transaction_data_sg tr
;
3214 if (copy_from_user(&tr
, ptr
, sizeof(tr
)))
3217 binder_transaction(proc
, thread
, &tr
.transaction_data
,
3218 cmd
== BC_REPLY_SG
, tr
.buffers_size
);
3221 case BC_TRANSACTION
:
3223 struct binder_transaction_data tr
;
3225 if (copy_from_user(&tr
, ptr
, sizeof(tr
)))
3228 binder_transaction(proc
, thread
, &tr
,
3229 cmd
== BC_REPLY
, 0);
3233 case BC_REGISTER_LOOPER
:
3234 binder_debug(BINDER_DEBUG_THREADS
,
3235 "%d:%d BC_REGISTER_LOOPER\n",
3236 proc
->pid
, thread
->pid
);
3237 if (thread
->looper
& BINDER_LOOPER_STATE_ENTERED
) {
3238 thread
->looper
|= BINDER_LOOPER_STATE_INVALID
;
3239 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
3240 proc
->pid
, thread
->pid
);
3241 } else if (proc
->requested_threads
== 0) {
3242 thread
->looper
|= BINDER_LOOPER_STATE_INVALID
;
3243 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
3244 proc
->pid
, thread
->pid
);
3246 proc
->requested_threads
--;
3247 proc
->requested_threads_started
++;
3249 thread
->looper
|= BINDER_LOOPER_STATE_REGISTERED
;
3251 case BC_ENTER_LOOPER
:
3252 binder_debug(BINDER_DEBUG_THREADS
,
3253 "%d:%d BC_ENTER_LOOPER\n",
3254 proc
->pid
, thread
->pid
);
3255 if (thread
->looper
& BINDER_LOOPER_STATE_REGISTERED
) {
3256 thread
->looper
|= BINDER_LOOPER_STATE_INVALID
;
3257 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
3258 proc
->pid
, thread
->pid
);
3260 thread
->looper
|= BINDER_LOOPER_STATE_ENTERED
;
3262 case BC_EXIT_LOOPER
:
3263 binder_debug(BINDER_DEBUG_THREADS
,
3264 "%d:%d BC_EXIT_LOOPER\n",
3265 proc
->pid
, thread
->pid
);
3266 thread
->looper
|= BINDER_LOOPER_STATE_EXITED
;
3269 case BC_REQUEST_DEATH_NOTIFICATION
:
3270 case BC_CLEAR_DEATH_NOTIFICATION
: {
3272 binder_uintptr_t cookie
;
3273 struct binder_ref
*ref
;
3274 struct binder_ref_death
*death
;
3276 if (get_user(target
, (uint32_t __user
*)ptr
))
3278 ptr
+= sizeof(uint32_t);
3279 if (get_user(cookie
, (binder_uintptr_t __user
*)ptr
))
3281 ptr
+= sizeof(binder_uintptr_t
);
3282 ref
= binder_get_ref(proc
, target
, false);
3284 binder_user_error("%d:%d %s invalid ref %d\n",
3285 proc
->pid
, thread
->pid
,
3286 cmd
== BC_REQUEST_DEATH_NOTIFICATION
?
3287 "BC_REQUEST_DEATH_NOTIFICATION" :
3288 "BC_CLEAR_DEATH_NOTIFICATION",
3293 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION
,
3294 "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
3295 proc
->pid
, thread
->pid
,
3296 cmd
== BC_REQUEST_DEATH_NOTIFICATION
?
3297 "BC_REQUEST_DEATH_NOTIFICATION" :
3298 "BC_CLEAR_DEATH_NOTIFICATION",
3299 (u64
)cookie
, ref
->data
.debug_id
,
3300 ref
->data
.desc
, ref
->data
.strong
,
3301 ref
->data
.weak
, ref
->node
->debug_id
);
3303 if (cmd
== BC_REQUEST_DEATH_NOTIFICATION
) {
3305 binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
3306 proc
->pid
, thread
->pid
);
3309 death
= kzalloc(sizeof(*death
), GFP_KERNEL
);
3310 if (death
== NULL
) {
3311 WARN_ON(thread
->return_error
.cmd
!=
3313 thread
->return_error
.cmd
= BR_ERROR
;
3314 binder_enqueue_work(
3316 &thread
->return_error
.work
,
3318 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION
,
3319 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
3320 proc
->pid
, thread
->pid
);
3323 binder_stats_created(BINDER_STAT_DEATH
);
3324 INIT_LIST_HEAD(&death
->work
.entry
);
3325 death
->cookie
= cookie
;
3327 binder_node_lock(ref
->node
);
3328 if (ref
->node
->proc
== NULL
) {
3329 ref
->death
->work
.type
= BINDER_WORK_DEAD_BINDER
;
3330 if (thread
->looper
&
3331 (BINDER_LOOPER_STATE_REGISTERED
|
3332 BINDER_LOOPER_STATE_ENTERED
))
3333 binder_enqueue_work(
3338 binder_enqueue_work(
3342 wake_up_interruptible(
3346 binder_node_unlock(ref
->node
);
3348 binder_node_lock(ref
->node
);
3349 if (ref
->death
== NULL
) {
3350 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
3351 proc
->pid
, thread
->pid
);
3352 binder_node_unlock(ref
->node
);
3356 if (death
->cookie
!= cookie
) {
3357 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
3358 proc
->pid
, thread
->pid
,
3361 binder_node_unlock(ref
->node
);
3365 binder_inner_proc_lock(proc
);
3366 if (list_empty(&death
->work
.entry
)) {
3367 death
->work
.type
= BINDER_WORK_CLEAR_DEATH_NOTIFICATION
;
3368 if (thread
->looper
&
3369 (BINDER_LOOPER_STATE_REGISTERED
|
3370 BINDER_LOOPER_STATE_ENTERED
))
3371 binder_enqueue_work_ilocked(
3375 binder_enqueue_work_ilocked(
3378 wake_up_interruptible(
3382 BUG_ON(death
->work
.type
!= BINDER_WORK_DEAD_BINDER
);
3383 death
->work
.type
= BINDER_WORK_DEAD_BINDER_AND_CLEAR
;
3385 binder_inner_proc_unlock(proc
);
3386 binder_node_unlock(ref
->node
);
3389 case BC_DEAD_BINDER_DONE
: {
3390 struct binder_work
*w
;
3391 binder_uintptr_t cookie
;
3392 struct binder_ref_death
*death
= NULL
;
3394 if (get_user(cookie
, (binder_uintptr_t __user
*)ptr
))
3397 ptr
+= sizeof(cookie
);
3398 binder_inner_proc_lock(proc
);
3399 list_for_each_entry(w
, &proc
->delivered_death
,
3401 struct binder_ref_death
*tmp_death
=
3403 struct binder_ref_death
,
3406 if (tmp_death
->cookie
== cookie
) {
3411 binder_debug(BINDER_DEBUG_DEAD_BINDER
,
3412 "%d:%d BC_DEAD_BINDER_DONE %016llx found %p\n",
3413 proc
->pid
, thread
->pid
, (u64
)cookie
,
3415 if (death
== NULL
) {
3416 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
3417 proc
->pid
, thread
->pid
, (u64
)cookie
);
3418 binder_inner_proc_unlock(proc
);
3421 binder_dequeue_work_ilocked(&death
->work
);
3422 if (death
->work
.type
== BINDER_WORK_DEAD_BINDER_AND_CLEAR
) {
3423 death
->work
.type
= BINDER_WORK_CLEAR_DEATH_NOTIFICATION
;
3424 if (thread
->looper
&
3425 (BINDER_LOOPER_STATE_REGISTERED
|
3426 BINDER_LOOPER_STATE_ENTERED
))
3427 binder_enqueue_work_ilocked(
3428 &death
->work
, &thread
->todo
);
3430 binder_enqueue_work_ilocked(
3433 wake_up_interruptible(&proc
->wait
);
3436 binder_inner_proc_unlock(proc
);
3440 pr_err("%d:%d unknown command %d\n",
3441 proc
->pid
, thread
->pid
, cmd
);
3444 *consumed
= ptr
- buffer
;
3449 static void binder_stat_br(struct binder_proc
*proc
,
3450 struct binder_thread
*thread
, uint32_t cmd
)
3452 trace_binder_return(cmd
);
3453 if (_IOC_NR(cmd
) < ARRAY_SIZE(binder_stats
.br
)) {
3454 atomic_inc(&binder_stats
.br
[_IOC_NR(cmd
)]);
3455 atomic_inc(&proc
->stats
.br
[_IOC_NR(cmd
)]);
3456 atomic_inc(&thread
->stats
.br
[_IOC_NR(cmd
)]);
3460 static int binder_has_proc_work(struct binder_proc
*proc
,
3461 struct binder_thread
*thread
)
3463 return !binder_worklist_empty(proc
, &proc
->todo
) ||
3464 thread
->looper_need_return
;
3467 static int binder_has_thread_work(struct binder_thread
*thread
)
3469 return !binder_worklist_empty(thread
->proc
, &thread
->todo
) ||
3470 thread
->looper_need_return
;
3473 static int binder_put_node_cmd(struct binder_proc
*proc
,
3474 struct binder_thread
*thread
,
3476 binder_uintptr_t node_ptr
,
3477 binder_uintptr_t node_cookie
,
3479 uint32_t cmd
, const char *cmd_name
)
3481 void __user
*ptr
= *ptrp
;
3483 if (put_user(cmd
, (uint32_t __user
*)ptr
))
3485 ptr
+= sizeof(uint32_t);
3487 if (put_user(node_ptr
, (binder_uintptr_t __user
*)ptr
))
3489 ptr
+= sizeof(binder_uintptr_t
);
3491 if (put_user(node_cookie
, (binder_uintptr_t __user
*)ptr
))
3493 ptr
+= sizeof(binder_uintptr_t
);
3495 binder_stat_br(proc
, thread
, cmd
);
3496 binder_debug(BINDER_DEBUG_USER_REFS
, "%d:%d %s %d u%016llx c%016llx\n",
3497 proc
->pid
, thread
->pid
, cmd_name
, node_debug_id
,
3498 (u64
)node_ptr
, (u64
)node_cookie
);
3504 static int binder_thread_read(struct binder_proc
*proc
,
3505 struct binder_thread
*thread
,
3506 binder_uintptr_t binder_buffer
, size_t size
,
3507 binder_size_t
*consumed
, int non_block
)
3509 void __user
*buffer
= (void __user
*)(uintptr_t)binder_buffer
;
3510 void __user
*ptr
= buffer
+ *consumed
;
3511 void __user
*end
= buffer
+ size
;
3514 int wait_for_proc_work
;
3516 if (*consumed
== 0) {
3517 if (put_user(BR_NOOP
, (uint32_t __user
*)ptr
))
3519 ptr
+= sizeof(uint32_t);
3523 binder_inner_proc_lock(proc
);
3524 wait_for_proc_work
= thread
->transaction_stack
== NULL
&&
3525 binder_worklist_empty_ilocked(&thread
->todo
);
3526 binder_inner_proc_unlock(proc
);
3528 thread
->looper
|= BINDER_LOOPER_STATE_WAITING
;
3529 if (wait_for_proc_work
)
3530 proc
->ready_threads
++;
3532 binder_unlock(__func__
);
3534 trace_binder_wait_for_work(wait_for_proc_work
,
3535 !!thread
->transaction_stack
,
3536 !binder_worklist_empty(proc
, &thread
->todo
));
3537 if (wait_for_proc_work
) {
3538 if (!(thread
->looper
& (BINDER_LOOPER_STATE_REGISTERED
|
3539 BINDER_LOOPER_STATE_ENTERED
))) {
3540 binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
3541 proc
->pid
, thread
->pid
, thread
->looper
);
3542 wait_event_interruptible(binder_user_error_wait
,
3543 binder_stop_on_user_error
< 2);
3545 binder_set_nice(proc
->default_priority
);
3547 if (!binder_has_proc_work(proc
, thread
))
3550 ret
= wait_event_freezable_exclusive(proc
->wait
, binder_has_proc_work(proc
, thread
));
3553 if (!binder_has_thread_work(thread
))
3556 ret
= wait_event_freezable(thread
->wait
, binder_has_thread_work(thread
));
3559 binder_lock(__func__
);
3561 if (wait_for_proc_work
)
3562 proc
->ready_threads
--;
3563 thread
->looper
&= ~BINDER_LOOPER_STATE_WAITING
;
3570 struct binder_transaction_data tr
;
3571 struct binder_work
*w
= NULL
;
3572 struct list_head
*list
= NULL
;
3573 struct binder_transaction
*t
= NULL
;
3574 struct binder_thread
*t_from
;
3576 binder_inner_proc_lock(proc
);
3577 if (!binder_worklist_empty_ilocked(&thread
->todo
))
3578 list
= &thread
->todo
;
3579 else if (!binder_worklist_empty_ilocked(&proc
->todo
) &&
3583 binder_inner_proc_unlock(proc
);
3586 if (ptr
- buffer
== 4 && !thread
->looper_need_return
)
3591 if (end
- ptr
< sizeof(tr
) + 4) {
3592 binder_inner_proc_unlock(proc
);
3595 w
= binder_dequeue_work_head_ilocked(list
);
3598 case BINDER_WORK_TRANSACTION
: {
3599 binder_inner_proc_unlock(proc
);
3600 t
= container_of(w
, struct binder_transaction
, work
);
3602 case BINDER_WORK_RETURN_ERROR
: {
3603 struct binder_error
*e
= container_of(
3604 w
, struct binder_error
, work
);
3606 WARN_ON(e
->cmd
== BR_OK
);
3607 binder_inner_proc_unlock(proc
);
3608 if (put_user(e
->cmd
, (uint32_t __user
*)ptr
))
3611 ptr
+= sizeof(uint32_t);
3613 binder_stat_br(proc
, thread
, cmd
);
3615 case BINDER_WORK_TRANSACTION_COMPLETE
: {
3616 binder_inner_proc_unlock(proc
);
3617 cmd
= BR_TRANSACTION_COMPLETE
;
3618 if (put_user(cmd
, (uint32_t __user
*)ptr
))
3620 ptr
+= sizeof(uint32_t);
3622 binder_stat_br(proc
, thread
, cmd
);
3623 binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE
,
3624 "%d:%d BR_TRANSACTION_COMPLETE\n",
3625 proc
->pid
, thread
->pid
);
3627 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE
);
3629 case BINDER_WORK_NODE
: {
3630 struct binder_node
*node
= container_of(w
, struct binder_node
, work
);
3632 binder_uintptr_t node_ptr
= node
->ptr
;
3633 binder_uintptr_t node_cookie
= node
->cookie
;
3634 int node_debug_id
= node
->debug_id
;
3637 void __user
*orig_ptr
= ptr
;
3639 BUG_ON(proc
!= node
->proc
);
3640 strong
= node
->internal_strong_refs
||
3641 node
->local_strong_refs
;
3642 weak
= !hlist_empty(&node
->refs
) ||
3643 node
->local_weak_refs
||
3644 node
->tmp_refs
|| strong
;
3645 has_strong_ref
= node
->has_strong_ref
;
3646 has_weak_ref
= node
->has_weak_ref
;
3648 if (weak
&& !has_weak_ref
) {
3649 node
->has_weak_ref
= 1;
3650 node
->pending_weak_ref
= 1;
3651 node
->local_weak_refs
++;
3653 if (strong
&& !has_strong_ref
) {
3654 node
->has_strong_ref
= 1;
3655 node
->pending_strong_ref
= 1;
3656 node
->local_strong_refs
++;
3658 if (!strong
&& has_strong_ref
)
3659 node
->has_strong_ref
= 0;
3660 if (!weak
&& has_weak_ref
)
3661 node
->has_weak_ref
= 0;
3662 if (!weak
&& !strong
) {
3663 binder_debug(BINDER_DEBUG_INTERNAL_REFS
,
3664 "%d:%d node %d u%016llx c%016llx deleted\n",
3665 proc
->pid
, thread
->pid
,
3669 rb_erase(&node
->rb_node
, &proc
->nodes
);
3670 binder_inner_proc_unlock(proc
);
3671 binder_node_lock(node
);
3673 * Acquire the node lock before freeing the
3674 * node to serialize with other threads that
3675 * may have been holding the node lock while
3676 * decrementing this node (avoids race where
3677 * this thread frees while the other thread
3678 * is unlocking the node after the final
3681 binder_node_unlock(node
);
3682 binder_free_node(node
);
3684 binder_inner_proc_unlock(proc
);
3686 if (weak
&& !has_weak_ref
)
3687 ret
= binder_put_node_cmd(
3688 proc
, thread
, &ptr
, node_ptr
,
3689 node_cookie
, node_debug_id
,
3690 BR_INCREFS
, "BR_INCREFS");
3691 if (!ret
&& strong
&& !has_strong_ref
)
3692 ret
= binder_put_node_cmd(
3693 proc
, thread
, &ptr
, node_ptr
,
3694 node_cookie
, node_debug_id
,
3695 BR_ACQUIRE
, "BR_ACQUIRE");
3696 if (!ret
&& !strong
&& has_strong_ref
)
3697 ret
= binder_put_node_cmd(
3698 proc
, thread
, &ptr
, node_ptr
,
3699 node_cookie
, node_debug_id
,
3700 BR_RELEASE
, "BR_RELEASE");
3701 if (!ret
&& !weak
&& has_weak_ref
)
3702 ret
= binder_put_node_cmd(
3703 proc
, thread
, &ptr
, node_ptr
,
3704 node_cookie
, node_debug_id
,
3705 BR_DECREFS
, "BR_DECREFS");
3706 if (orig_ptr
== ptr
)
3707 binder_debug(BINDER_DEBUG_INTERNAL_REFS
,
3708 "%d:%d node %d u%016llx c%016llx state unchanged\n",
3709 proc
->pid
, thread
->pid
,
3716 case BINDER_WORK_DEAD_BINDER
:
3717 case BINDER_WORK_DEAD_BINDER_AND_CLEAR
:
3718 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION
: {
3719 struct binder_ref_death
*death
;
3722 death
= container_of(w
, struct binder_ref_death
, work
);
3723 if (w
->type
== BINDER_WORK_CLEAR_DEATH_NOTIFICATION
)
3724 cmd
= BR_CLEAR_DEATH_NOTIFICATION_DONE
;
3726 cmd
= BR_DEAD_BINDER
;
3728 * TODO: there is a race condition between
3729 * death notification requests and delivery
3730 * of the notifications. This will be handled
3733 binder_inner_proc_unlock(proc
);
3734 if (put_user(cmd
, (uint32_t __user
*)ptr
))
3736 ptr
+= sizeof(uint32_t);
3737 if (put_user(death
->cookie
,
3738 (binder_uintptr_t __user
*)ptr
))
3740 ptr
+= sizeof(binder_uintptr_t
);
3741 binder_stat_br(proc
, thread
, cmd
);
3742 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION
,
3743 "%d:%d %s %016llx\n",
3744 proc
->pid
, thread
->pid
,
3745 cmd
== BR_DEAD_BINDER
?
3747 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
3748 (u64
)death
->cookie
);
3750 if (w
->type
== BINDER_WORK_CLEAR_DEATH_NOTIFICATION
) {
3752 binder_stats_deleted(BINDER_STAT_DEATH
);
3754 binder_inner_proc_lock(proc
);
3755 binder_enqueue_work_ilocked(
3756 w
, &proc
->delivered_death
);
3757 binder_inner_proc_unlock(proc
);
3759 if (cmd
== BR_DEAD_BINDER
)
3760 goto done
; /* DEAD_BINDER notifications can cause transactions */
3767 BUG_ON(t
->buffer
== NULL
);
3768 if (t
->buffer
->target_node
) {
3769 struct binder_node
*target_node
= t
->buffer
->target_node
;
3771 tr
.target
.ptr
= target_node
->ptr
;
3772 tr
.cookie
= target_node
->cookie
;
3773 t
->saved_priority
= task_nice(current
);
3774 if (t
->priority
< target_node
->min_priority
&&
3775 !(t
->flags
& TF_ONE_WAY
))
3776 binder_set_nice(t
->priority
);
3777 else if (!(t
->flags
& TF_ONE_WAY
) ||
3778 t
->saved_priority
> target_node
->min_priority
)
3779 binder_set_nice(target_node
->min_priority
);
3780 cmd
= BR_TRANSACTION
;
3787 tr
.flags
= t
->flags
;
3788 tr
.sender_euid
= from_kuid(current_user_ns(), t
->sender_euid
);
3790 t_from
= binder_get_txn_from(t
);
3792 struct task_struct
*sender
= t_from
->proc
->tsk
;
3794 tr
.sender_pid
= task_tgid_nr_ns(sender
,
3795 task_active_pid_ns(current
));
3800 tr
.data_size
= t
->buffer
->data_size
;
3801 tr
.offsets_size
= t
->buffer
->offsets_size
;
3802 tr
.data
.ptr
.buffer
= (binder_uintptr_t
)
3803 ((uintptr_t)t
->buffer
->data
+
3804 binder_alloc_get_user_buffer_offset(&proc
->alloc
));
3805 tr
.data
.ptr
.offsets
= tr
.data
.ptr
.buffer
+
3806 ALIGN(t
->buffer
->data_size
,
3809 if (put_user(cmd
, (uint32_t __user
*)ptr
)) {
3811 binder_thread_dec_tmpref(t_from
);
3814 ptr
+= sizeof(uint32_t);
3815 if (copy_to_user(ptr
, &tr
, sizeof(tr
))) {
3817 binder_thread_dec_tmpref(t_from
);
3822 trace_binder_transaction_received(t
);
3823 binder_stat_br(proc
, thread
, cmd
);
3824 binder_debug(BINDER_DEBUG_TRANSACTION
,
3825 "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
3826 proc
->pid
, thread
->pid
,
3827 (cmd
== BR_TRANSACTION
) ? "BR_TRANSACTION" :
3829 t
->debug_id
, t_from
? t_from
->proc
->pid
: 0,
3830 t_from
? t_from
->pid
: 0, cmd
,
3831 t
->buffer
->data_size
, t
->buffer
->offsets_size
,
3832 (u64
)tr
.data
.ptr
.buffer
, (u64
)tr
.data
.ptr
.offsets
);
3835 binder_thread_dec_tmpref(t_from
);
3836 t
->buffer
->allow_user_free
= 1;
3837 if (cmd
== BR_TRANSACTION
&& !(t
->flags
& TF_ONE_WAY
)) {
3838 binder_inner_proc_lock(thread
->proc
);
3839 t
->to_parent
= thread
->transaction_stack
;
3840 t
->to_thread
= thread
;
3841 thread
->transaction_stack
= t
;
3842 binder_inner_proc_unlock(thread
->proc
);
3844 binder_free_transaction(t
);
3851 *consumed
= ptr
- buffer
;
3852 if (proc
->requested_threads
+ proc
->ready_threads
== 0 &&
3853 proc
->requested_threads_started
< proc
->max_threads
&&
3854 (thread
->looper
& (BINDER_LOOPER_STATE_REGISTERED
|
3855 BINDER_LOOPER_STATE_ENTERED
)) /* the user-space code fails to */
3856 /*spawn a new thread if we leave this out */) {
3857 proc
->requested_threads
++;
3858 binder_debug(BINDER_DEBUG_THREADS
,
3859 "%d:%d BR_SPAWN_LOOPER\n",
3860 proc
->pid
, thread
->pid
);
3861 if (put_user(BR_SPAWN_LOOPER
, (uint32_t __user
*)buffer
))
3863 binder_stat_br(proc
, thread
, BR_SPAWN_LOOPER
);
3868 static void binder_release_work(struct binder_proc
*proc
,
3869 struct list_head
*list
)
3871 struct binder_work
*w
;
3874 w
= binder_dequeue_work_head(proc
, list
);
3879 case BINDER_WORK_TRANSACTION
: {
3880 struct binder_transaction
*t
;
3882 t
= container_of(w
, struct binder_transaction
, work
);
3883 if (t
->buffer
->target_node
&&
3884 !(t
->flags
& TF_ONE_WAY
)) {
3885 binder_send_failed_reply(t
, BR_DEAD_REPLY
);
3887 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION
,
3888 "undelivered transaction %d\n",
3890 binder_free_transaction(t
);
3893 case BINDER_WORK_RETURN_ERROR
: {
3894 struct binder_error
*e
= container_of(
3895 w
, struct binder_error
, work
);
3897 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION
,
3898 "undelivered TRANSACTION_ERROR: %u\n",
3901 case BINDER_WORK_TRANSACTION_COMPLETE
: {
3902 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION
,
3903 "undelivered TRANSACTION_COMPLETE\n");
3905 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE
);
3907 case BINDER_WORK_DEAD_BINDER_AND_CLEAR
:
3908 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION
: {
3909 struct binder_ref_death
*death
;
3911 death
= container_of(w
, struct binder_ref_death
, work
);
3912 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION
,
3913 "undelivered death notification, %016llx\n",
3914 (u64
)death
->cookie
);
3916 binder_stats_deleted(BINDER_STAT_DEATH
);
3919 pr_err("unexpected work type, %d, not freed\n",
3927 static struct binder_thread
*binder_get_thread_ilocked(
3928 struct binder_proc
*proc
, struct binder_thread
*new_thread
)
3930 struct binder_thread
*thread
= NULL
;
3931 struct rb_node
*parent
= NULL
;
3932 struct rb_node
**p
= &proc
->threads
.rb_node
;
3936 thread
= rb_entry(parent
, struct binder_thread
, rb_node
);
3938 if (current
->pid
< thread
->pid
)
3940 else if (current
->pid
> thread
->pid
)
3941 p
= &(*p
)->rb_right
;
3947 thread
= new_thread
;
3948 binder_stats_created(BINDER_STAT_THREAD
);
3949 thread
->proc
= proc
;
3950 thread
->pid
= current
->pid
;
3951 atomic_set(&thread
->tmp_ref
, 0);
3952 init_waitqueue_head(&thread
->wait
);
3953 INIT_LIST_HEAD(&thread
->todo
);
3954 rb_link_node(&thread
->rb_node
, parent
, p
);
3955 rb_insert_color(&thread
->rb_node
, &proc
->threads
);
3956 thread
->looper_need_return
= true;
3957 thread
->return_error
.work
.type
= BINDER_WORK_RETURN_ERROR
;
3958 thread
->return_error
.cmd
= BR_OK
;
3959 thread
->reply_error
.work
.type
= BINDER_WORK_RETURN_ERROR
;
3960 thread
->reply_error
.cmd
= BR_OK
;
3965 static struct binder_thread
*binder_get_thread(struct binder_proc
*proc
)
3967 struct binder_thread
*thread
;
3968 struct binder_thread
*new_thread
;
3970 binder_inner_proc_lock(proc
);
3971 thread
= binder_get_thread_ilocked(proc
, NULL
);
3972 binder_inner_proc_unlock(proc
);
3974 new_thread
= kzalloc(sizeof(*thread
), GFP_KERNEL
);
3975 if (new_thread
== NULL
)
3977 binder_inner_proc_lock(proc
);
3978 thread
= binder_get_thread_ilocked(proc
, new_thread
);
3979 binder_inner_proc_unlock(proc
);
3980 if (thread
!= new_thread
)
3986 static void binder_free_proc(struct binder_proc
*proc
)
3988 BUG_ON(!list_empty(&proc
->todo
));
3989 BUG_ON(!list_empty(&proc
->delivered_death
));
3990 binder_alloc_deferred_release(&proc
->alloc
);
3991 put_task_struct(proc
->tsk
);
3992 binder_stats_deleted(BINDER_STAT_PROC
);
3996 static void binder_free_thread(struct binder_thread
*thread
)
3998 BUG_ON(!list_empty(&thread
->todo
));
3999 binder_stats_deleted(BINDER_STAT_THREAD
);
4000 binder_proc_dec_tmpref(thread
->proc
);
4004 static int binder_thread_release(struct binder_proc
*proc
,
4005 struct binder_thread
*thread
)
4007 struct binder_transaction
*t
;
4008 struct binder_transaction
*send_reply
= NULL
;
4009 int active_transactions
= 0;
4010 struct binder_transaction
*last_t
= NULL
;
4012 binder_inner_proc_lock(thread
->proc
);
4014 * take a ref on the proc so it survives
4015 * after we remove this thread from proc->threads.
4016 * The corresponding dec is when we actually
4017 * free the thread in binder_free_thread()
4021 * take a ref on this thread to ensure it
4022 * survives while we are releasing it
4024 atomic_inc(&thread
->tmp_ref
);
4025 rb_erase(&thread
->rb_node
, &proc
->threads
);
4026 t
= thread
->transaction_stack
;
4028 spin_lock(&t
->lock
);
4029 if (t
->to_thread
== thread
)
4032 thread
->is_dead
= true;
4036 active_transactions
++;
4037 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION
,
4038 "release %d:%d transaction %d %s, still active\n",
4039 proc
->pid
, thread
->pid
,
4041 (t
->to_thread
== thread
) ? "in" : "out");
4043 if (t
->to_thread
== thread
) {
4045 t
->to_thread
= NULL
;
4047 t
->buffer
->transaction
= NULL
;
4051 } else if (t
->from
== thread
) {
4056 spin_unlock(&last_t
->lock
);
4058 spin_lock(&t
->lock
);
4060 binder_inner_proc_unlock(thread
->proc
);
4063 binder_send_failed_reply(send_reply
, BR_DEAD_REPLY
);
4064 binder_release_work(proc
, &thread
->todo
);
4065 binder_thread_dec_tmpref(thread
);
4066 return active_transactions
;
4069 static unsigned int binder_poll(struct file
*filp
,
4070 struct poll_table_struct
*wait
)
4072 struct binder_proc
*proc
= filp
->private_data
;
4073 struct binder_thread
*thread
= NULL
;
4074 int wait_for_proc_work
;
4076 binder_lock(__func__
);
4078 thread
= binder_get_thread(proc
);
4080 binder_inner_proc_lock(thread
->proc
);
4081 wait_for_proc_work
= thread
->transaction_stack
== NULL
&&
4082 binder_worklist_empty_ilocked(&thread
->todo
);
4083 binder_inner_proc_unlock(thread
->proc
);
4085 binder_unlock(__func__
);
4087 if (wait_for_proc_work
) {
4088 if (binder_has_proc_work(proc
, thread
))
4090 poll_wait(filp
, &proc
->wait
, wait
);
4091 if (binder_has_proc_work(proc
, thread
))
4094 if (binder_has_thread_work(thread
))
4096 poll_wait(filp
, &thread
->wait
, wait
);
4097 if (binder_has_thread_work(thread
))
4103 static int binder_ioctl_write_read(struct file
*filp
,
4104 unsigned int cmd
, unsigned long arg
,
4105 struct binder_thread
*thread
)
4108 struct binder_proc
*proc
= filp
->private_data
;
4109 unsigned int size
= _IOC_SIZE(cmd
);
4110 void __user
*ubuf
= (void __user
*)arg
;
4111 struct binder_write_read bwr
;
4113 if (size
!= sizeof(struct binder_write_read
)) {
4117 if (copy_from_user(&bwr
, ubuf
, sizeof(bwr
))) {
4121 binder_debug(BINDER_DEBUG_READ_WRITE
,
4122 "%d:%d write %lld at %016llx, read %lld at %016llx\n",
4123 proc
->pid
, thread
->pid
,
4124 (u64
)bwr
.write_size
, (u64
)bwr
.write_buffer
,
4125 (u64
)bwr
.read_size
, (u64
)bwr
.read_buffer
);
4127 if (bwr
.write_size
> 0) {
4128 ret
= binder_thread_write(proc
, thread
,
4131 &bwr
.write_consumed
);
4132 trace_binder_write_done(ret
);
4134 bwr
.read_consumed
= 0;
4135 if (copy_to_user(ubuf
, &bwr
, sizeof(bwr
)))
4140 if (bwr
.read_size
> 0) {
4141 ret
= binder_thread_read(proc
, thread
, bwr
.read_buffer
,
4144 filp
->f_flags
& O_NONBLOCK
);
4145 trace_binder_read_done(ret
);
4146 if (!binder_worklist_empty(proc
, &proc
->todo
))
4147 wake_up_interruptible(&proc
->wait
);
4149 if (copy_to_user(ubuf
, &bwr
, sizeof(bwr
)))
4154 binder_debug(BINDER_DEBUG_READ_WRITE
,
4155 "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
4156 proc
->pid
, thread
->pid
,
4157 (u64
)bwr
.write_consumed
, (u64
)bwr
.write_size
,
4158 (u64
)bwr
.read_consumed
, (u64
)bwr
.read_size
);
4159 if (copy_to_user(ubuf
, &bwr
, sizeof(bwr
))) {
4167 static int binder_ioctl_set_ctx_mgr(struct file
*filp
)
4170 struct binder_proc
*proc
= filp
->private_data
;
4171 struct binder_context
*context
= proc
->context
;
4172 struct binder_node
*new_node
;
4173 kuid_t curr_euid
= current_euid();
4175 mutex_lock(&context
->context_mgr_node_lock
);
4176 if (context
->binder_context_mgr_node
) {
4177 pr_err("BINDER_SET_CONTEXT_MGR already set\n");
4181 ret
= security_binder_set_context_mgr(proc
->tsk
);
4184 if (uid_valid(context
->binder_context_mgr_uid
)) {
4185 if (!uid_eq(context
->binder_context_mgr_uid
, curr_euid
)) {
4186 pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
4187 from_kuid(&init_user_ns
, curr_euid
),
4188 from_kuid(&init_user_ns
,
4189 context
->binder_context_mgr_uid
));
4194 context
->binder_context_mgr_uid
= curr_euid
;
4196 new_node
= binder_new_node(proc
, NULL
);
4201 binder_node_lock(new_node
);
4202 new_node
->local_weak_refs
++;
4203 new_node
->local_strong_refs
++;
4204 new_node
->has_strong_ref
= 1;
4205 new_node
->has_weak_ref
= 1;
4206 context
->binder_context_mgr_node
= new_node
;
4207 binder_node_unlock(new_node
);
4208 binder_put_node(new_node
);
4210 mutex_unlock(&context
->context_mgr_node_lock
);
4214 static long binder_ioctl(struct file
*filp
, unsigned int cmd
, unsigned long arg
)
4217 struct binder_proc
*proc
= filp
->private_data
;
4218 struct binder_thread
*thread
;
4219 unsigned int size
= _IOC_SIZE(cmd
);
4220 void __user
*ubuf
= (void __user
*)arg
;
4222 /*pr_info("binder_ioctl: %d:%d %x %lx\n",
4223 proc->pid, current->pid, cmd, arg);*/
4225 trace_binder_ioctl(cmd
, arg
);
4227 ret
= wait_event_interruptible(binder_user_error_wait
, binder_stop_on_user_error
< 2);
4231 binder_lock(__func__
);
4232 thread
= binder_get_thread(proc
);
4233 if (thread
== NULL
) {
4239 case BINDER_WRITE_READ
:
4240 ret
= binder_ioctl_write_read(filp
, cmd
, arg
, thread
);
4244 case BINDER_SET_MAX_THREADS
:
4245 if (copy_from_user(&proc
->max_threads
, ubuf
, sizeof(proc
->max_threads
))) {
4250 case BINDER_SET_CONTEXT_MGR
:
4251 ret
= binder_ioctl_set_ctx_mgr(filp
);
4255 case BINDER_THREAD_EXIT
:
4256 binder_debug(BINDER_DEBUG_THREADS
, "%d:%d exit\n",
4257 proc
->pid
, thread
->pid
);
4258 binder_thread_release(proc
, thread
);
4261 case BINDER_VERSION
: {
4262 struct binder_version __user
*ver
= ubuf
;
4264 if (size
!= sizeof(struct binder_version
)) {
4268 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION
,
4269 &ver
->protocol_version
)) {
4282 thread
->looper_need_return
= false;
4283 binder_unlock(__func__
);
4284 wait_event_interruptible(binder_user_error_wait
, binder_stop_on_user_error
< 2);
4285 if (ret
&& ret
!= -ERESTARTSYS
)
4286 pr_info("%d:%d ioctl %x %lx returned %d\n", proc
->pid
, current
->pid
, cmd
, arg
, ret
);
4288 trace_binder_ioctl_done(ret
);
4292 static void binder_vma_open(struct vm_area_struct
*vma
)
4294 struct binder_proc
*proc
= vma
->vm_private_data
;
4296 binder_debug(BINDER_DEBUG_OPEN_CLOSE
,
4297 "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
4298 proc
->pid
, vma
->vm_start
, vma
->vm_end
,
4299 (vma
->vm_end
- vma
->vm_start
) / SZ_1K
, vma
->vm_flags
,
4300 (unsigned long)pgprot_val(vma
->vm_page_prot
));
4303 static void binder_vma_close(struct vm_area_struct
*vma
)
4305 struct binder_proc
*proc
= vma
->vm_private_data
;
4307 binder_debug(BINDER_DEBUG_OPEN_CLOSE
,
4308 "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
4309 proc
->pid
, vma
->vm_start
, vma
->vm_end
,
4310 (vma
->vm_end
- vma
->vm_start
) / SZ_1K
, vma
->vm_flags
,
4311 (unsigned long)pgprot_val(vma
->vm_page_prot
));
4312 binder_alloc_vma_close(&proc
->alloc
);
4313 binder_defer_work(proc
, BINDER_DEFERRED_PUT_FILES
);
4316 static int binder_vm_fault(struct vm_fault
*vmf
)
4318 return VM_FAULT_SIGBUS
;
4321 static const struct vm_operations_struct binder_vm_ops
= {
4322 .open
= binder_vma_open
,
4323 .close
= binder_vma_close
,
4324 .fault
= binder_vm_fault
,
4327 static int binder_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
4330 struct binder_proc
*proc
= filp
->private_data
;
4331 const char *failure_string
;
4333 if (proc
->tsk
!= current
->group_leader
)
4336 if ((vma
->vm_end
- vma
->vm_start
) > SZ_4M
)
4337 vma
->vm_end
= vma
->vm_start
+ SZ_4M
;
4339 binder_debug(BINDER_DEBUG_OPEN_CLOSE
,
4340 "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
4341 __func__
, proc
->pid
, vma
->vm_start
, vma
->vm_end
,
4342 (vma
->vm_end
- vma
->vm_start
) / SZ_1K
, vma
->vm_flags
,
4343 (unsigned long)pgprot_val(vma
->vm_page_prot
));
4345 if (vma
->vm_flags
& FORBIDDEN_MMAP_FLAGS
) {
4347 failure_string
= "bad vm_flags";
4350 vma
->vm_flags
= (vma
->vm_flags
| VM_DONTCOPY
) & ~VM_MAYWRITE
;
4351 vma
->vm_ops
= &binder_vm_ops
;
4352 vma
->vm_private_data
= proc
;
4354 ret
= binder_alloc_mmap_handler(&proc
->alloc
, vma
);
4357 proc
->files
= get_files_struct(current
);
4361 pr_err("binder_mmap: %d %lx-%lx %s failed %d\n",
4362 proc
->pid
, vma
->vm_start
, vma
->vm_end
, failure_string
, ret
);
4366 static int binder_open(struct inode
*nodp
, struct file
*filp
)
4368 struct binder_proc
*proc
;
4369 struct binder_device
*binder_dev
;
4371 binder_debug(BINDER_DEBUG_OPEN_CLOSE
, "binder_open: %d:%d\n",
4372 current
->group_leader
->pid
, current
->pid
);
4374 proc
= kzalloc(sizeof(*proc
), GFP_KERNEL
);
4377 spin_lock_init(&proc
->inner_lock
);
4378 spin_lock_init(&proc
->outer_lock
);
4379 get_task_struct(current
->group_leader
);
4380 proc
->tsk
= current
->group_leader
;
4381 INIT_LIST_HEAD(&proc
->todo
);
4382 init_waitqueue_head(&proc
->wait
);
4383 proc
->default_priority
= task_nice(current
);
4384 binder_dev
= container_of(filp
->private_data
, struct binder_device
,
4386 proc
->context
= &binder_dev
->context
;
4387 binder_alloc_init(&proc
->alloc
);
4389 binder_lock(__func__
);
4391 binder_stats_created(BINDER_STAT_PROC
);
4392 proc
->pid
= current
->group_leader
->pid
;
4393 INIT_LIST_HEAD(&proc
->delivered_death
);
4394 filp
->private_data
= proc
;
4396 binder_unlock(__func__
);
4398 mutex_lock(&binder_procs_lock
);
4399 hlist_add_head(&proc
->proc_node
, &binder_procs
);
4400 mutex_unlock(&binder_procs_lock
);
4402 if (binder_debugfs_dir_entry_proc
) {
4405 snprintf(strbuf
, sizeof(strbuf
), "%u", proc
->pid
);
4407 * proc debug entries are shared between contexts, so
4408 * this will fail if the process tries to open the driver
4409 * again with a different context. The priting code will
4410 * anyway print all contexts that a given PID has, so this
4413 proc
->debugfs_entry
= debugfs_create_file(strbuf
, S_IRUGO
,
4414 binder_debugfs_dir_entry_proc
,
4415 (void *)(unsigned long)proc
->pid
,
4422 static int binder_flush(struct file
*filp
, fl_owner_t id
)
4424 struct binder_proc
*proc
= filp
->private_data
;
4426 binder_defer_work(proc
, BINDER_DEFERRED_FLUSH
);
4431 static void binder_deferred_flush(struct binder_proc
*proc
)
4436 binder_inner_proc_lock(proc
);
4437 for (n
= rb_first(&proc
->threads
); n
!= NULL
; n
= rb_next(n
)) {
4438 struct binder_thread
*thread
= rb_entry(n
, struct binder_thread
, rb_node
);
4440 thread
->looper_need_return
= true;
4441 if (thread
->looper
& BINDER_LOOPER_STATE_WAITING
) {
4442 wake_up_interruptible(&thread
->wait
);
4446 binder_inner_proc_unlock(proc
);
4447 wake_up_interruptible_all(&proc
->wait
);
4449 binder_debug(BINDER_DEBUG_OPEN_CLOSE
,
4450 "binder_flush: %d woke %d threads\n", proc
->pid
,
4454 static int binder_release(struct inode
*nodp
, struct file
*filp
)
4456 struct binder_proc
*proc
= filp
->private_data
;
4458 debugfs_remove(proc
->debugfs_entry
);
4459 binder_defer_work(proc
, BINDER_DEFERRED_RELEASE
);
4464 static int binder_node_release(struct binder_node
*node
, int refs
)
4466 struct binder_ref
*ref
;
4468 struct binder_proc
*proc
= node
->proc
;
4470 binder_release_work(proc
, &node
->async_todo
);
4472 binder_node_lock(node
);
4473 binder_inner_proc_lock(proc
);
4474 binder_dequeue_work_ilocked(&node
->work
);
4476 * The caller must have taken a temporary ref on the node,
4478 BUG_ON(!node
->tmp_refs
);
4479 if (hlist_empty(&node
->refs
) && node
->tmp_refs
== 1) {
4480 binder_inner_proc_unlock(proc
);
4481 binder_node_unlock(node
);
4482 binder_free_node(node
);
4488 node
->local_strong_refs
= 0;
4489 node
->local_weak_refs
= 0;
4490 binder_inner_proc_unlock(proc
);
4492 spin_lock(&binder_dead_nodes_lock
);
4493 hlist_add_head(&node
->dead_node
, &binder_dead_nodes
);
4494 spin_unlock(&binder_dead_nodes_lock
);
4496 hlist_for_each_entry(ref
, &node
->refs
, node_entry
) {
4504 binder_inner_proc_lock(ref
->proc
);
4505 if (list_empty(&ref
->death
->work
.entry
)) {
4506 ref
->death
->work
.type
= BINDER_WORK_DEAD_BINDER
;
4507 binder_enqueue_work_ilocked(&ref
->death
->work
,
4509 wake_up_interruptible(&ref
->proc
->wait
);
4512 binder_inner_proc_unlock(ref
->proc
);
4515 binder_debug(BINDER_DEBUG_DEAD_BINDER
,
4516 "node %d now dead, refs %d, death %d\n",
4517 node
->debug_id
, refs
, death
);
4518 binder_node_unlock(node
);
4519 binder_put_node(node
);
4524 static void binder_deferred_release(struct binder_proc
*proc
)
4526 struct binder_context
*context
= proc
->context
;
4528 int threads
, nodes
, incoming_refs
, outgoing_refs
, active_transactions
;
4530 BUG_ON(proc
->files
);
4532 mutex_lock(&binder_procs_lock
);
4533 hlist_del(&proc
->proc_node
);
4534 mutex_unlock(&binder_procs_lock
);
4536 mutex_lock(&context
->context_mgr_node_lock
);
4537 if (context
->binder_context_mgr_node
&&
4538 context
->binder_context_mgr_node
->proc
== proc
) {
4539 binder_debug(BINDER_DEBUG_DEAD_BINDER
,
4540 "%s: %d context_mgr_node gone\n",
4541 __func__
, proc
->pid
);
4542 context
->binder_context_mgr_node
= NULL
;
4544 mutex_unlock(&context
->context_mgr_node_lock
);
4545 binder_inner_proc_lock(proc
);
4547 * Make sure proc stays alive after we
4548 * remove all the threads
4552 proc
->is_dead
= true;
4554 active_transactions
= 0;
4555 while ((n
= rb_first(&proc
->threads
))) {
4556 struct binder_thread
*thread
;
4558 thread
= rb_entry(n
, struct binder_thread
, rb_node
);
4559 binder_inner_proc_unlock(proc
);
4561 active_transactions
+= binder_thread_release(proc
, thread
);
4562 binder_inner_proc_lock(proc
);
4567 while ((n
= rb_first(&proc
->nodes
))) {
4568 struct binder_node
*node
;
4570 node
= rb_entry(n
, struct binder_node
, rb_node
);
4573 * take a temporary ref on the node before
4574 * calling binder_node_release() which will either
4575 * kfree() the node or call binder_put_node()
4577 binder_inc_node_tmpref_ilocked(node
);
4578 rb_erase(&node
->rb_node
, &proc
->nodes
);
4579 binder_inner_proc_unlock(proc
);
4580 incoming_refs
= binder_node_release(node
, incoming_refs
);
4581 binder_inner_proc_lock(proc
);
4583 binder_inner_proc_unlock(proc
);
4586 while ((n
= rb_first(&proc
->refs_by_desc
))) {
4587 struct binder_ref
*ref
;
4589 ref
= rb_entry(n
, struct binder_ref
, rb_node_desc
);
4591 binder_cleanup_ref(ref
);
4592 binder_free_ref(ref
);
4595 binder_release_work(proc
, &proc
->todo
);
4596 binder_release_work(proc
, &proc
->delivered_death
);
4598 binder_debug(BINDER_DEBUG_OPEN_CLOSE
,
4599 "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
4600 __func__
, proc
->pid
, threads
, nodes
, incoming_refs
,
4601 outgoing_refs
, active_transactions
);
4603 binder_proc_dec_tmpref(proc
);
4606 static void binder_deferred_func(struct work_struct
*work
)
4608 struct binder_proc
*proc
;
4609 struct files_struct
*files
;
4614 binder_lock(__func__
);
4615 mutex_lock(&binder_deferred_lock
);
4616 if (!hlist_empty(&binder_deferred_list
)) {
4617 proc
= hlist_entry(binder_deferred_list
.first
,
4618 struct binder_proc
, deferred_work_node
);
4619 hlist_del_init(&proc
->deferred_work_node
);
4620 defer
= proc
->deferred_work
;
4621 proc
->deferred_work
= 0;
4626 mutex_unlock(&binder_deferred_lock
);
4629 if (defer
& BINDER_DEFERRED_PUT_FILES
) {
4630 files
= proc
->files
;
4635 if (defer
& BINDER_DEFERRED_FLUSH
)
4636 binder_deferred_flush(proc
);
4638 if (defer
& BINDER_DEFERRED_RELEASE
)
4639 binder_deferred_release(proc
); /* frees proc */
4641 binder_unlock(__func__
);
4643 put_files_struct(files
);
4646 static DECLARE_WORK(binder_deferred_work
, binder_deferred_func
);
4649 binder_defer_work(struct binder_proc
*proc
, enum binder_deferred_state defer
)
4651 mutex_lock(&binder_deferred_lock
);
4652 proc
->deferred_work
|= defer
;
4653 if (hlist_unhashed(&proc
->deferred_work_node
)) {
4654 hlist_add_head(&proc
->deferred_work_node
,
4655 &binder_deferred_list
);
4656 schedule_work(&binder_deferred_work
);
4658 mutex_unlock(&binder_deferred_lock
);
4661 static void print_binder_transaction(struct seq_file
*m
, const char *prefix
,
4662 struct binder_transaction
*t
)
4664 spin_lock(&t
->lock
);
4666 "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %ld r%d",
4667 prefix
, t
->debug_id
, t
,
4668 t
->from
? t
->from
->proc
->pid
: 0,
4669 t
->from
? t
->from
->pid
: 0,
4670 t
->to_proc
? t
->to_proc
->pid
: 0,
4671 t
->to_thread
? t
->to_thread
->pid
: 0,
4672 t
->code
, t
->flags
, t
->priority
, t
->need_reply
);
4673 spin_unlock(&t
->lock
);
4675 if (t
->buffer
== NULL
) {
4676 seq_puts(m
, " buffer free\n");
4679 if (t
->buffer
->target_node
)
4680 seq_printf(m
, " node %d",
4681 t
->buffer
->target_node
->debug_id
);
4682 seq_printf(m
, " size %zd:%zd data %p\n",
4683 t
->buffer
->data_size
, t
->buffer
->offsets_size
,
4687 static void print_binder_work_ilocked(struct seq_file
*m
, const char *prefix
,
4688 const char *transaction_prefix
,
4689 struct binder_work
*w
)
4691 struct binder_node
*node
;
4692 struct binder_transaction
*t
;
4695 case BINDER_WORK_TRANSACTION
:
4696 t
= container_of(w
, struct binder_transaction
, work
);
4697 print_binder_transaction(m
, transaction_prefix
, t
);
4699 case BINDER_WORK_RETURN_ERROR
: {
4700 struct binder_error
*e
= container_of(
4701 w
, struct binder_error
, work
);
4703 seq_printf(m
, "%stransaction error: %u\n",
4706 case BINDER_WORK_TRANSACTION_COMPLETE
:
4707 seq_printf(m
, "%stransaction complete\n", prefix
);
4709 case BINDER_WORK_NODE
:
4710 node
= container_of(w
, struct binder_node
, work
);
4711 seq_printf(m
, "%snode work %d: u%016llx c%016llx\n",
4712 prefix
, node
->debug_id
,
4713 (u64
)node
->ptr
, (u64
)node
->cookie
);
4715 case BINDER_WORK_DEAD_BINDER
:
4716 seq_printf(m
, "%shas dead binder\n", prefix
);
4718 case BINDER_WORK_DEAD_BINDER_AND_CLEAR
:
4719 seq_printf(m
, "%shas cleared dead binder\n", prefix
);
4721 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION
:
4722 seq_printf(m
, "%shas cleared death notification\n", prefix
);
4725 seq_printf(m
, "%sunknown work: type %d\n", prefix
, w
->type
);
4730 static void print_binder_thread_ilocked(struct seq_file
*m
,
4731 struct binder_thread
*thread
,
4734 struct binder_transaction
*t
;
4735 struct binder_work
*w
;
4736 size_t start_pos
= m
->count
;
4739 WARN_ON(!spin_is_locked(&thread
->proc
->inner_lock
));
4740 seq_printf(m
, " thread %d: l %02x need_return %d tr %d\n",
4741 thread
->pid
, thread
->looper
,
4742 thread
->looper_need_return
,
4743 atomic_read(&thread
->tmp_ref
));
4744 header_pos
= m
->count
;
4745 t
= thread
->transaction_stack
;
4747 if (t
->from
== thread
) {
4748 print_binder_transaction(m
,
4749 " outgoing transaction", t
);
4751 } else if (t
->to_thread
== thread
) {
4752 print_binder_transaction(m
,
4753 " incoming transaction", t
);
4756 print_binder_transaction(m
, " bad transaction", t
);
4760 list_for_each_entry(w
, &thread
->todo
, entry
) {
4761 print_binder_work_ilocked(m
, " ",
4762 " pending transaction", w
);
4764 if (!print_always
&& m
->count
== header_pos
)
4765 m
->count
= start_pos
;
4768 static void print_binder_node_nilocked(struct seq_file
*m
,
4769 struct binder_node
*node
)
4771 struct binder_ref
*ref
;
4772 struct binder_work
*w
;
4775 WARN_ON(!spin_is_locked(&node
->lock
));
4777 WARN_ON(!spin_is_locked(&node
->proc
->inner_lock
));
4780 hlist_for_each_entry(ref
, &node
->refs
, node_entry
)
4783 seq_printf(m
, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d tr %d",
4784 node
->debug_id
, (u64
)node
->ptr
, (u64
)node
->cookie
,
4785 node
->has_strong_ref
, node
->has_weak_ref
,
4786 node
->local_strong_refs
, node
->local_weak_refs
,
4787 node
->internal_strong_refs
, count
, node
->tmp_refs
);
4789 seq_puts(m
, " proc");
4790 hlist_for_each_entry(ref
, &node
->refs
, node_entry
)
4791 seq_printf(m
, " %d", ref
->proc
->pid
);
4795 list_for_each_entry(w
, &node
->async_todo
, entry
)
4796 print_binder_work_ilocked(m
, " ",
4797 " pending async transaction", w
);
4801 static void print_binder_ref(struct seq_file
*m
, struct binder_ref
*ref
)
4803 binder_node_lock(ref
->node
);
4804 seq_printf(m
, " ref %d: desc %d %snode %d s %d w %d d %pK\n",
4805 ref
->data
.debug_id
, ref
->data
.desc
,
4806 ref
->node
->proc
? "" : "dead ",
4807 ref
->node
->debug_id
, ref
->data
.strong
,
4808 ref
->data
.weak
, ref
->death
);
4809 binder_node_unlock(ref
->node
);
4812 static void print_binder_proc(struct seq_file
*m
,
4813 struct binder_proc
*proc
, int print_all
)
4815 struct binder_work
*w
;
4817 size_t start_pos
= m
->count
;
4819 struct binder_node
*last_node
= NULL
;
4821 seq_printf(m
, "proc %d\n", proc
->pid
);
4822 seq_printf(m
, "context %s\n", proc
->context
->name
);
4823 header_pos
= m
->count
;
4825 binder_inner_proc_lock(proc
);
4826 for (n
= rb_first(&proc
->threads
); n
!= NULL
; n
= rb_next(n
))
4827 print_binder_thread_ilocked(m
, rb_entry(n
, struct binder_thread
,
4828 rb_node
), print_all
);
4830 for (n
= rb_first(&proc
->nodes
); n
!= NULL
; n
= rb_next(n
)) {
4831 struct binder_node
*node
= rb_entry(n
, struct binder_node
,
4834 * take a temporary reference on the node so it
4835 * survives and isn't removed from the tree
4836 * while we print it.
4838 binder_inc_node_tmpref_ilocked(node
);
4839 /* Need to drop inner lock to take node lock */
4840 binder_inner_proc_unlock(proc
);
4842 binder_put_node(last_node
);
4843 binder_node_inner_lock(node
);
4844 print_binder_node_nilocked(m
, node
);
4845 binder_node_inner_unlock(node
);
4847 binder_inner_proc_lock(proc
);
4849 binder_inner_proc_unlock(proc
);
4851 binder_put_node(last_node
);
4854 for (n
= rb_first(&proc
->refs_by_desc
);
4857 print_binder_ref(m
, rb_entry(n
, struct binder_ref
,
4860 binder_alloc_print_allocated(m
, &proc
->alloc
);
4861 binder_inner_proc_lock(proc
);
4862 list_for_each_entry(w
, &proc
->todo
, entry
)
4863 print_binder_work_ilocked(m
, " ", " pending transaction", w
);
4864 list_for_each_entry(w
, &proc
->delivered_death
, entry
) {
4865 seq_puts(m
, " has delivered dead binder\n");
4868 binder_inner_proc_unlock(proc
);
4869 if (!print_all
&& m
->count
== header_pos
)
4870 m
->count
= start_pos
;
4873 static const char * const binder_return_strings
[] = {
4878 "BR_ACQUIRE_RESULT",
4880 "BR_TRANSACTION_COMPLETE",
4885 "BR_ATTEMPT_ACQUIRE",
4890 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
4894 static const char * const binder_command_strings
[] = {
4897 "BC_ACQUIRE_RESULT",
4905 "BC_ATTEMPT_ACQUIRE",
4906 "BC_REGISTER_LOOPER",
4909 "BC_REQUEST_DEATH_NOTIFICATION",
4910 "BC_CLEAR_DEATH_NOTIFICATION",
4911 "BC_DEAD_BINDER_DONE",
4912 "BC_TRANSACTION_SG",
4916 static const char * const binder_objstat_strings
[] = {
4923 "transaction_complete"
4926 static void print_binder_stats(struct seq_file
*m
, const char *prefix
,
4927 struct binder_stats
*stats
)
4931 BUILD_BUG_ON(ARRAY_SIZE(stats
->bc
) !=
4932 ARRAY_SIZE(binder_command_strings
));
4933 for (i
= 0; i
< ARRAY_SIZE(stats
->bc
); i
++) {
4934 int temp
= atomic_read(&stats
->bc
[i
]);
4937 seq_printf(m
, "%s%s: %d\n", prefix
,
4938 binder_command_strings
[i
], temp
);
4941 BUILD_BUG_ON(ARRAY_SIZE(stats
->br
) !=
4942 ARRAY_SIZE(binder_return_strings
));
4943 for (i
= 0; i
< ARRAY_SIZE(stats
->br
); i
++) {
4944 int temp
= atomic_read(&stats
->br
[i
]);
4947 seq_printf(m
, "%s%s: %d\n", prefix
,
4948 binder_return_strings
[i
], temp
);
4951 BUILD_BUG_ON(ARRAY_SIZE(stats
->obj_created
) !=
4952 ARRAY_SIZE(binder_objstat_strings
));
4953 BUILD_BUG_ON(ARRAY_SIZE(stats
->obj_created
) !=
4954 ARRAY_SIZE(stats
->obj_deleted
));
4955 for (i
= 0; i
< ARRAY_SIZE(stats
->obj_created
); i
++) {
4956 int created
= atomic_read(&stats
->obj_created
[i
]);
4957 int deleted
= atomic_read(&stats
->obj_deleted
[i
]);
4959 if (created
|| deleted
)
4960 seq_printf(m
, "%s%s: active %d total %d\n",
4962 binder_objstat_strings
[i
],
4968 static void print_binder_proc_stats(struct seq_file
*m
,
4969 struct binder_proc
*proc
)
4971 struct binder_work
*w
;
4973 int count
, strong
, weak
;
4974 size_t free_async_space
=
4975 binder_alloc_get_free_async_space(&proc
->alloc
);
4977 seq_printf(m
, "proc %d\n", proc
->pid
);
4978 seq_printf(m
, "context %s\n", proc
->context
->name
);
4980 binder_inner_proc_lock(proc
);
4981 for (n
= rb_first(&proc
->threads
); n
!= NULL
; n
= rb_next(n
))
4983 seq_printf(m
, " threads: %d\n", count
);
4984 seq_printf(m
, " requested threads: %d+%d/%d\n"
4985 " ready threads %d\n"
4986 " free async space %zd\n", proc
->requested_threads
,
4987 proc
->requested_threads_started
, proc
->max_threads
,
4988 proc
->ready_threads
,
4991 for (n
= rb_first(&proc
->nodes
); n
!= NULL
; n
= rb_next(n
))
4993 binder_inner_proc_unlock(proc
);
4994 seq_printf(m
, " nodes: %d\n", count
);
4998 for (n
= rb_first(&proc
->refs_by_desc
); n
!= NULL
; n
= rb_next(n
)) {
4999 struct binder_ref
*ref
= rb_entry(n
, struct binder_ref
,
5002 strong
+= ref
->data
.strong
;
5003 weak
+= ref
->data
.weak
;
5005 seq_printf(m
, " refs: %d s %d w %d\n", count
, strong
, weak
);
5007 count
= binder_alloc_get_allocated_count(&proc
->alloc
);
5008 seq_printf(m
, " buffers: %d\n", count
);
5011 binder_inner_proc_lock(proc
);
5012 list_for_each_entry(w
, &proc
->todo
, entry
) {
5013 if (w
->type
== BINDER_WORK_TRANSACTION
)
5016 binder_inner_proc_unlock(proc
);
5017 seq_printf(m
, " pending transactions: %d\n", count
);
5019 print_binder_stats(m
, " ", &proc
->stats
);
5023 static int binder_state_show(struct seq_file
*m
, void *unused
)
5025 struct binder_proc
*proc
;
5026 struct binder_node
*node
;
5027 struct binder_node
*last_node
= NULL
;
5029 binder_lock(__func__
);
5031 seq_puts(m
, "binder state:\n");
5033 spin_lock(&binder_dead_nodes_lock
);
5034 if (!hlist_empty(&binder_dead_nodes
))
5035 seq_puts(m
, "dead nodes:\n");
5036 hlist_for_each_entry(node
, &binder_dead_nodes
, dead_node
) {
5038 * take a temporary reference on the node so it
5039 * survives and isn't removed from the list
5040 * while we print it.
5043 spin_unlock(&binder_dead_nodes_lock
);
5045 binder_put_node(last_node
);
5046 binder_node_lock(node
);
5047 print_binder_node_nilocked(m
, node
);
5048 binder_node_unlock(node
);
5050 spin_lock(&binder_dead_nodes_lock
);
5052 spin_unlock(&binder_dead_nodes_lock
);
5054 binder_put_node(last_node
);
5056 mutex_lock(&binder_procs_lock
);
5057 hlist_for_each_entry(proc
, &binder_procs
, proc_node
)
5058 print_binder_proc(m
, proc
, 1);
5059 mutex_unlock(&binder_procs_lock
);
5060 binder_unlock(__func__
);
5064 static int binder_stats_show(struct seq_file
*m
, void *unused
)
5066 struct binder_proc
*proc
;
5068 binder_lock(__func__
);
5070 seq_puts(m
, "binder stats:\n");
5072 print_binder_stats(m
, "", &binder_stats
);
5074 mutex_lock(&binder_procs_lock
);
5075 hlist_for_each_entry(proc
, &binder_procs
, proc_node
)
5076 print_binder_proc_stats(m
, proc
);
5077 mutex_unlock(&binder_procs_lock
);
5078 binder_unlock(__func__
);
5082 static int binder_transactions_show(struct seq_file
*m
, void *unused
)
5084 struct binder_proc
*proc
;
5086 binder_lock(__func__
);
5088 seq_puts(m
, "binder transactions:\n");
5089 mutex_lock(&binder_procs_lock
);
5090 hlist_for_each_entry(proc
, &binder_procs
, proc_node
)
5091 print_binder_proc(m
, proc
, 0);
5092 mutex_unlock(&binder_procs_lock
);
5093 binder_unlock(__func__
);
5097 static int binder_proc_show(struct seq_file
*m
, void *unused
)
5099 struct binder_proc
*itr
;
5100 int pid
= (unsigned long)m
->private;
5102 binder_lock(__func__
);
5104 mutex_lock(&binder_procs_lock
);
5105 hlist_for_each_entry(itr
, &binder_procs
, proc_node
) {
5106 if (itr
->pid
== pid
) {
5107 seq_puts(m
, "binder proc state:\n");
5108 print_binder_proc(m
, itr
, 1);
5111 mutex_unlock(&binder_procs_lock
);
5113 binder_unlock(__func__
);
5117 static void print_binder_transaction_log_entry(struct seq_file
*m
,
5118 struct binder_transaction_log_entry
*e
)
5120 int debug_id
= READ_ONCE(e
->debug_id_done
);
5122 * read barrier to guarantee debug_id_done read before
5123 * we print the log values
5127 "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
5128 e
->debug_id
, (e
->call_type
== 2) ? "reply" :
5129 ((e
->call_type
== 1) ? "async" : "call "), e
->from_proc
,
5130 e
->from_thread
, e
->to_proc
, e
->to_thread
, e
->context_name
,
5131 e
->to_node
, e
->target_handle
, e
->data_size
, e
->offsets_size
,
5132 e
->return_error
, e
->return_error_param
,
5133 e
->return_error_line
);
5135 * read-barrier to guarantee read of debug_id_done after
5136 * done printing the fields of the entry
5139 seq_printf(m
, debug_id
&& debug_id
== READ_ONCE(e
->debug_id_done
) ?
5140 "\n" : " (incomplete)\n");
5143 static int binder_transaction_log_show(struct seq_file
*m
, void *unused
)
5145 struct binder_transaction_log
*log
= m
->private;
5146 unsigned int log_cur
= atomic_read(&log
->cur
);
5151 count
= log_cur
+ 1;
5152 cur
= count
< ARRAY_SIZE(log
->entry
) && !log
->full
?
5153 0 : count
% ARRAY_SIZE(log
->entry
);
5154 if (count
> ARRAY_SIZE(log
->entry
) || log
->full
)
5155 count
= ARRAY_SIZE(log
->entry
);
5156 for (i
= 0; i
< count
; i
++) {
5157 unsigned int index
= cur
++ % ARRAY_SIZE(log
->entry
);
5159 print_binder_transaction_log_entry(m
, &log
->entry
[index
]);
5164 static const struct file_operations binder_fops
= {
5165 .owner
= THIS_MODULE
,
5166 .poll
= binder_poll
,
5167 .unlocked_ioctl
= binder_ioctl
,
5168 .compat_ioctl
= binder_ioctl
,
5169 .mmap
= binder_mmap
,
5170 .open
= binder_open
,
5171 .flush
= binder_flush
,
5172 .release
= binder_release
,
5175 BINDER_DEBUG_ENTRY(state
);
5176 BINDER_DEBUG_ENTRY(stats
);
5177 BINDER_DEBUG_ENTRY(transactions
);
5178 BINDER_DEBUG_ENTRY(transaction_log
);
5180 static int __init
init_binder_device(const char *name
)
5183 struct binder_device
*binder_device
;
5185 binder_device
= kzalloc(sizeof(*binder_device
), GFP_KERNEL
);
5189 binder_device
->miscdev
.fops
= &binder_fops
;
5190 binder_device
->miscdev
.minor
= MISC_DYNAMIC_MINOR
;
5191 binder_device
->miscdev
.name
= name
;
5193 binder_device
->context
.binder_context_mgr_uid
= INVALID_UID
;
5194 binder_device
->context
.name
= name
;
5195 mutex_init(&binder_device
->context
.context_mgr_node_lock
);
5197 ret
= misc_register(&binder_device
->miscdev
);
5199 kfree(binder_device
);
5203 hlist_add_head(&binder_device
->hlist
, &binder_devices
);
5208 static int __init
binder_init(void)
5211 char *device_name
, *device_names
;
5212 struct binder_device
*device
;
5213 struct hlist_node
*tmp
;
5215 atomic_set(&binder_transaction_log
.cur
, ~0U);
5216 atomic_set(&binder_transaction_log_failed
.cur
, ~0U);
5218 binder_debugfs_dir_entry_root
= debugfs_create_dir("binder", NULL
);
5219 if (binder_debugfs_dir_entry_root
)
5220 binder_debugfs_dir_entry_proc
= debugfs_create_dir("proc",
5221 binder_debugfs_dir_entry_root
);
5223 if (binder_debugfs_dir_entry_root
) {
5224 debugfs_create_file("state",
5226 binder_debugfs_dir_entry_root
,
5228 &binder_state_fops
);
5229 debugfs_create_file("stats",
5231 binder_debugfs_dir_entry_root
,
5233 &binder_stats_fops
);
5234 debugfs_create_file("transactions",
5236 binder_debugfs_dir_entry_root
,
5238 &binder_transactions_fops
);
5239 debugfs_create_file("transaction_log",
5241 binder_debugfs_dir_entry_root
,
5242 &binder_transaction_log
,
5243 &binder_transaction_log_fops
);
5244 debugfs_create_file("failed_transaction_log",
5246 binder_debugfs_dir_entry_root
,
5247 &binder_transaction_log_failed
,
5248 &binder_transaction_log_fops
);
5252 * Copy the module_parameter string, because we don't want to
5253 * tokenize it in-place.
5255 device_names
= kzalloc(strlen(binder_devices_param
) + 1, GFP_KERNEL
);
5256 if (!device_names
) {
5258 goto err_alloc_device_names_failed
;
5260 strcpy(device_names
, binder_devices_param
);
5262 while ((device_name
= strsep(&device_names
, ","))) {
5263 ret
= init_binder_device(device_name
);
5265 goto err_init_binder_device_failed
;
5270 err_init_binder_device_failed
:
5271 hlist_for_each_entry_safe(device
, tmp
, &binder_devices
, hlist
) {
5272 misc_deregister(&device
->miscdev
);
5273 hlist_del(&device
->hlist
);
5276 err_alloc_device_names_failed
:
5277 debugfs_remove_recursive(binder_debugfs_dir_entry_root
);
5282 device_initcall(binder_init
);
5284 #define CREATE_TRACE_POINTS
5285 #include "binder_trace.h"
5287 MODULE_LICENSE("GPL v2");