1 // SPDX-License-Identifier: GPL-2.0-only
4 * Android IPC Subsystem
6 * Copyright (C) 2007-2008 Google, Inc.
12 * There are 3 main spinlocks which must be acquired in the
15 * 1) proc->outer_lock : protects binder_ref
16 * binder_proc_lock() and binder_proc_unlock() are
18 * 2) node->lock : protects most fields of binder_node.
19 * binder_node_lock() and binder_node_unlock() are
21 * 3) proc->inner_lock : protects the thread and node lists
22 * (proc->threads, proc->waiting_threads, proc->nodes)
23 * and all todo lists associated with the binder_proc
24 * (proc->todo, thread->todo, proc->delivered_death and
25 * node->async_todo), as well as thread->transaction_stack
26 * binder_inner_proc_lock() and binder_inner_proc_unlock()
29 * Any lock under procA must never be nested under any lock at the same
30 * level or below on procB.
32 * Functions that require a lock held on entry indicate which lock
33 * in the suffix of the function name:
35 * foo_olocked() : requires node->outer_lock
36 * foo_nlocked() : requires node->lock
37 * foo_ilocked() : requires proc->inner_lock
38 * foo_oilocked(): requires proc->outer_lock and proc->inner_lock
39 * foo_nilocked(): requires node->lock and proc->inner_lock
43 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
45 #include <linux/fdtable.h>
46 #include <linux/file.h>
47 #include <linux/freezer.h>
49 #include <linux/list.h>
50 #include <linux/miscdevice.h>
51 #include <linux/module.h>
52 #include <linux/mutex.h>
53 #include <linux/nsproxy.h>
54 #include <linux/poll.h>
55 #include <linux/debugfs.h>
56 #include <linux/rbtree.h>
57 #include <linux/sched/signal.h>
58 #include <linux/sched/mm.h>
59 #include <linux/seq_file.h>
60 #include <linux/string.h>
61 #include <linux/uaccess.h>
62 #include <linux/pid_namespace.h>
63 #include <linux/security.h>
64 #include <linux/spinlock.h>
65 #include <linux/ratelimit.h>
66 #include <linux/syscalls.h>
67 #include <linux/task_work.h>
68 #include <linux/sizes.h>
70 #include <uapi/linux/android/binder.h>
71 #include <uapi/linux/android/binderfs.h>
73 #include <asm/cacheflush.h>
75 #include "binder_alloc.h"
76 #include "binder_internal.h"
77 #include "binder_trace.h"
79 static HLIST_HEAD(binder_deferred_list
);
80 static DEFINE_MUTEX(binder_deferred_lock
);
82 static HLIST_HEAD(binder_devices
);
83 static HLIST_HEAD(binder_procs
);
84 static DEFINE_MUTEX(binder_procs_lock
);
86 static HLIST_HEAD(binder_dead_nodes
);
87 static DEFINE_SPINLOCK(binder_dead_nodes_lock
);
89 static struct dentry
*binder_debugfs_dir_entry_root
;
90 static struct dentry
*binder_debugfs_dir_entry_proc
;
91 static atomic_t binder_last_id
;
93 static int proc_show(struct seq_file
*m
, void *unused
);
94 DEFINE_SHOW_ATTRIBUTE(proc
);
96 #define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
99 BINDER_DEBUG_USER_ERROR
= 1U << 0,
100 BINDER_DEBUG_FAILED_TRANSACTION
= 1U << 1,
101 BINDER_DEBUG_DEAD_TRANSACTION
= 1U << 2,
102 BINDER_DEBUG_OPEN_CLOSE
= 1U << 3,
103 BINDER_DEBUG_DEAD_BINDER
= 1U << 4,
104 BINDER_DEBUG_DEATH_NOTIFICATION
= 1U << 5,
105 BINDER_DEBUG_READ_WRITE
= 1U << 6,
106 BINDER_DEBUG_USER_REFS
= 1U << 7,
107 BINDER_DEBUG_THREADS
= 1U << 8,
108 BINDER_DEBUG_TRANSACTION
= 1U << 9,
109 BINDER_DEBUG_TRANSACTION_COMPLETE
= 1U << 10,
110 BINDER_DEBUG_FREE_BUFFER
= 1U << 11,
111 BINDER_DEBUG_INTERNAL_REFS
= 1U << 12,
112 BINDER_DEBUG_PRIORITY_CAP
= 1U << 13,
113 BINDER_DEBUG_SPINLOCKS
= 1U << 14,
115 static uint32_t binder_debug_mask
= BINDER_DEBUG_USER_ERROR
|
116 BINDER_DEBUG_FAILED_TRANSACTION
| BINDER_DEBUG_DEAD_TRANSACTION
;
117 module_param_named(debug_mask
, binder_debug_mask
, uint
, 0644);
119 char *binder_devices_param
= CONFIG_ANDROID_BINDER_DEVICES
;
120 module_param_named(devices
, binder_devices_param
, charp
, 0444);
122 static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait
);
123 static int binder_stop_on_user_error
;
125 static int binder_set_stop_on_user_error(const char *val
,
126 const struct kernel_param
*kp
)
130 ret
= param_set_int(val
, kp
);
131 if (binder_stop_on_user_error
< 2)
132 wake_up(&binder_user_error_wait
);
135 module_param_call(stop_on_user_error
, binder_set_stop_on_user_error
,
136 param_get_int
, &binder_stop_on_user_error
, 0644);
138 #define binder_debug(mask, x...) \
140 if (binder_debug_mask & mask) \
141 pr_info_ratelimited(x); \
144 #define binder_user_error(x...) \
146 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
147 pr_info_ratelimited(x); \
148 if (binder_stop_on_user_error) \
149 binder_stop_on_user_error = 2; \
152 #define to_flat_binder_object(hdr) \
153 container_of(hdr, struct flat_binder_object, hdr)
155 #define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
157 #define to_binder_buffer_object(hdr) \
158 container_of(hdr, struct binder_buffer_object, hdr)
160 #define to_binder_fd_array_object(hdr) \
161 container_of(hdr, struct binder_fd_array_object, hdr)
163 enum binder_stat_types
{
169 BINDER_STAT_TRANSACTION
,
170 BINDER_STAT_TRANSACTION_COMPLETE
,
174 struct binder_stats
{
175 atomic_t br
[_IOC_NR(BR_FAILED_REPLY
) + 1];
176 atomic_t bc
[_IOC_NR(BC_REPLY_SG
) + 1];
177 atomic_t obj_created
[BINDER_STAT_COUNT
];
178 atomic_t obj_deleted
[BINDER_STAT_COUNT
];
181 static struct binder_stats binder_stats
;
183 static inline void binder_stats_deleted(enum binder_stat_types type
)
185 atomic_inc(&binder_stats
.obj_deleted
[type
]);
188 static inline void binder_stats_created(enum binder_stat_types type
)
190 atomic_inc(&binder_stats
.obj_created
[type
]);
193 struct binder_transaction_log binder_transaction_log
;
194 struct binder_transaction_log binder_transaction_log_failed
;
196 static struct binder_transaction_log_entry
*binder_transaction_log_add(
197 struct binder_transaction_log
*log
)
199 struct binder_transaction_log_entry
*e
;
200 unsigned int cur
= atomic_inc_return(&log
->cur
);
202 if (cur
>= ARRAY_SIZE(log
->entry
))
204 e
= &log
->entry
[cur
% ARRAY_SIZE(log
->entry
)];
205 WRITE_ONCE(e
->debug_id_done
, 0);
207 * write-barrier to synchronize access to e->debug_id_done.
208 * We make sure the initialized 0 value is seen before
209 * memset() other fields are zeroed by memset.
212 memset(e
, 0, sizeof(*e
));
217 * struct binder_work - work enqueued on a worklist
218 * @entry: node enqueued on list
219 * @type: type of work to be performed
221 * There are separate work lists for proc, thread, and node (async).
224 struct list_head entry
;
227 BINDER_WORK_TRANSACTION
= 1,
228 BINDER_WORK_TRANSACTION_COMPLETE
,
229 BINDER_WORK_RETURN_ERROR
,
231 BINDER_WORK_DEAD_BINDER
,
232 BINDER_WORK_DEAD_BINDER_AND_CLEAR
,
233 BINDER_WORK_CLEAR_DEATH_NOTIFICATION
,
237 struct binder_error
{
238 struct binder_work work
;
243 * struct binder_node - binder node bookkeeping
244 * @debug_id: unique ID for debugging
245 * (invariant after initialized)
246 * @lock: lock for node fields
247 * @work: worklist element for node work
248 * (protected by @proc->inner_lock)
249 * @rb_node: element for proc->nodes tree
250 * (protected by @proc->inner_lock)
251 * @dead_node: element for binder_dead_nodes list
252 * (protected by binder_dead_nodes_lock)
253 * @proc: binder_proc that owns this node
254 * (invariant after initialized)
255 * @refs: list of references on this node
256 * (protected by @lock)
257 * @internal_strong_refs: used to take strong references when
258 * initiating a transaction
259 * (protected by @proc->inner_lock if @proc
261 * @local_weak_refs: weak user refs from local process
262 * (protected by @proc->inner_lock if @proc
264 * @local_strong_refs: strong user refs from local process
265 * (protected by @proc->inner_lock if @proc
267 * @tmp_refs: temporary kernel refs
268 * (protected by @proc->inner_lock while @proc
269 * is valid, and by binder_dead_nodes_lock
270 * if @proc is NULL. During inc/dec and node release
271 * it is also protected by @lock to provide safety
272 * as the node dies and @proc becomes NULL)
273 * @ptr: userspace pointer for node
274 * (invariant, no lock needed)
275 * @cookie: userspace cookie for node
276 * (invariant, no lock needed)
277 * @has_strong_ref: userspace notified of strong ref
278 * (protected by @proc->inner_lock if @proc
280 * @pending_strong_ref: userspace has acked notification of strong ref
281 * (protected by @proc->inner_lock if @proc
283 * @has_weak_ref: userspace notified of weak ref
284 * (protected by @proc->inner_lock if @proc
286 * @pending_weak_ref: userspace has acked notification of weak ref
287 * (protected by @proc->inner_lock if @proc
289 * @has_async_transaction: async transaction to node in progress
290 * (protected by @lock)
291 * @accept_fds: file descriptor operations supported for node
292 * (invariant after initialized)
293 * @min_priority: minimum scheduling priority
294 * (invariant after initialized)
295 * @txn_security_ctx: require sender's security context
296 * (invariant after initialized)
297 * @async_todo: list of async work items
298 * (protected by @proc->inner_lock)
300 * Bookkeeping structure for binder nodes.
305 struct binder_work work
;
307 struct rb_node rb_node
;
308 struct hlist_node dead_node
;
310 struct binder_proc
*proc
;
311 struct hlist_head refs
;
312 int internal_strong_refs
;
314 int local_strong_refs
;
316 binder_uintptr_t ptr
;
317 binder_uintptr_t cookie
;
320 * bitfield elements protected by
324 u8 pending_strong_ref
:1;
326 u8 pending_weak_ref
:1;
330 * invariant after initialization
333 u8 txn_security_ctx
:1;
336 bool has_async_transaction
;
337 struct list_head async_todo
;
340 struct binder_ref_death
{
342 * @work: worklist element for death notifications
343 * (protected by inner_lock of the proc that
344 * this ref belongs to)
346 struct binder_work work
;
347 binder_uintptr_t cookie
;
351 * struct binder_ref_data - binder_ref counts and id
352 * @debug_id: unique ID for the ref
353 * @desc: unique userspace handle for ref
354 * @strong: strong ref count (debugging only if not locked)
355 * @weak: weak ref count (debugging only if not locked)
357 * Structure to hold ref count and ref id information. Since
358 * the actual ref can only be accessed with a lock, this structure
359 * is used to return information about the ref to callers of
360 * ref inc/dec functions.
362 struct binder_ref_data
{
370 * struct binder_ref - struct to track references on nodes
371 * @data: binder_ref_data containing id, handle, and current refcounts
372 * @rb_node_desc: node for lookup by @data.desc in proc's rb_tree
373 * @rb_node_node: node for lookup by @node in proc's rb_tree
374 * @node_entry: list entry for node->refs list in target node
375 * (protected by @node->lock)
376 * @proc: binder_proc containing ref
377 * @node: binder_node of target node. When cleaning up a
378 * ref for deletion in binder_cleanup_ref, a non-NULL
379 * @node indicates the node must be freed
380 * @death: pointer to death notification (ref_death) if requested
381 * (protected by @node->lock)
383 * Structure to track references from procA to target node (on procB). This
384 * structure is unsafe to access without holding @proc->outer_lock.
387 /* Lookups needed: */
388 /* node + proc => ref (transaction) */
389 /* desc + proc => ref (transaction, inc/dec ref) */
390 /* node => refs + procs (proc exit) */
391 struct binder_ref_data data
;
392 struct rb_node rb_node_desc
;
393 struct rb_node rb_node_node
;
394 struct hlist_node node_entry
;
395 struct binder_proc
*proc
;
396 struct binder_node
*node
;
397 struct binder_ref_death
*death
;
400 enum binder_deferred_state
{
401 BINDER_DEFERRED_FLUSH
= 0x01,
402 BINDER_DEFERRED_RELEASE
= 0x02,
406 * struct binder_proc - binder process bookkeeping
407 * @proc_node: element for binder_procs list
408 * @threads: rbtree of binder_threads in this proc
409 * (protected by @inner_lock)
410 * @nodes: rbtree of binder nodes associated with
411 * this proc ordered by node->ptr
412 * (protected by @inner_lock)
413 * @refs_by_desc: rbtree of refs ordered by ref->desc
414 * (protected by @outer_lock)
415 * @refs_by_node: rbtree of refs ordered by ref->node
416 * (protected by @outer_lock)
417 * @waiting_threads: threads currently waiting for proc work
418 * (protected by @inner_lock)
419 * @pid PID of group_leader of process
420 * (invariant after initialized)
421 * @tsk task_struct for group_leader of process
422 * (invariant after initialized)
423 * @deferred_work_node: element for binder_deferred_list
424 * (protected by binder_deferred_lock)
425 * @deferred_work: bitmap of deferred work to perform
426 * (protected by binder_deferred_lock)
427 * @is_dead: process is dead and awaiting free
428 * when outstanding transactions are cleaned up
429 * (protected by @inner_lock)
430 * @todo: list of work for this process
431 * (protected by @inner_lock)
432 * @stats: per-process binder statistics
433 * (atomics, no lock needed)
434 * @delivered_death: list of delivered death notification
435 * (protected by @inner_lock)
436 * @max_threads: cap on number of binder threads
437 * (protected by @inner_lock)
438 * @requested_threads: number of binder threads requested but not
439 * yet started. In current implementation, can
441 * (protected by @inner_lock)
442 * @requested_threads_started: number binder threads started
443 * (protected by @inner_lock)
444 * @tmp_ref: temporary reference to indicate proc is in use
445 * (protected by @inner_lock)
446 * @default_priority: default scheduler priority
447 * (invariant after initialized)
448 * @debugfs_entry: debugfs node
449 * @alloc: binder allocator bookkeeping
450 * @context: binder_context for this proc
451 * (invariant after initialized)
452 * @inner_lock: can nest under outer_lock and/or node lock
453 * @outer_lock: no nesting under innor or node lock
454 * Lock order: 1) outer, 2) node, 3) inner
455 * @binderfs_entry: process-specific binderfs log file
457 * Bookkeeping structure for binder processes
460 struct hlist_node proc_node
;
461 struct rb_root threads
;
462 struct rb_root nodes
;
463 struct rb_root refs_by_desc
;
464 struct rb_root refs_by_node
;
465 struct list_head waiting_threads
;
467 struct task_struct
*tsk
;
468 struct hlist_node deferred_work_node
;
472 struct list_head todo
;
473 struct binder_stats stats
;
474 struct list_head delivered_death
;
476 int requested_threads
;
477 int requested_threads_started
;
479 long default_priority
;
480 struct dentry
*debugfs_entry
;
481 struct binder_alloc alloc
;
482 struct binder_context
*context
;
483 spinlock_t inner_lock
;
484 spinlock_t outer_lock
;
485 struct dentry
*binderfs_entry
;
489 BINDER_LOOPER_STATE_REGISTERED
= 0x01,
490 BINDER_LOOPER_STATE_ENTERED
= 0x02,
491 BINDER_LOOPER_STATE_EXITED
= 0x04,
492 BINDER_LOOPER_STATE_INVALID
= 0x08,
493 BINDER_LOOPER_STATE_WAITING
= 0x10,
494 BINDER_LOOPER_STATE_POLL
= 0x20,
498 * struct binder_thread - binder thread bookkeeping
499 * @proc: binder process for this thread
500 * (invariant after initialization)
501 * @rb_node: element for proc->threads rbtree
502 * (protected by @proc->inner_lock)
503 * @waiting_thread_node: element for @proc->waiting_threads list
504 * (protected by @proc->inner_lock)
505 * @pid: PID for this thread
506 * (invariant after initialization)
507 * @looper: bitmap of looping state
508 * (only accessed by this thread)
509 * @looper_needs_return: looping thread needs to exit driver
511 * @transaction_stack: stack of in-progress transactions for this thread
512 * (protected by @proc->inner_lock)
513 * @todo: list of work to do for this thread
514 * (protected by @proc->inner_lock)
515 * @process_todo: whether work in @todo should be processed
516 * (protected by @proc->inner_lock)
517 * @return_error: transaction errors reported by this thread
518 * (only accessed by this thread)
519 * @reply_error: transaction errors reported by target thread
520 * (protected by @proc->inner_lock)
521 * @wait: wait queue for thread work
522 * @stats: per-thread statistics
523 * (atomics, no lock needed)
524 * @tmp_ref: temporary reference to indicate thread is in use
525 * (atomic since @proc->inner_lock cannot
526 * always be acquired)
527 * @is_dead: thread is dead and awaiting free
528 * when outstanding transactions are cleaned up
529 * (protected by @proc->inner_lock)
531 * Bookkeeping structure for binder threads.
533 struct binder_thread
{
534 struct binder_proc
*proc
;
535 struct rb_node rb_node
;
536 struct list_head waiting_thread_node
;
538 int looper
; /* only modified by this thread */
539 bool looper_need_return
; /* can be written by other thread */
540 struct binder_transaction
*transaction_stack
;
541 struct list_head todo
;
543 struct binder_error return_error
;
544 struct binder_error reply_error
;
545 wait_queue_head_t wait
;
546 struct binder_stats stats
;
552 * struct binder_txn_fd_fixup - transaction fd fixup list element
553 * @fixup_entry: list entry
554 * @file: struct file to be associated with new fd
555 * @offset: offset in buffer data to this fixup
557 * List element for fd fixups in a transaction. Since file
558 * descriptors need to be allocated in the context of the
559 * target process, we pass each fd to be processed in this
562 struct binder_txn_fd_fixup
{
563 struct list_head fixup_entry
;
568 struct binder_transaction
{
570 struct binder_work work
;
571 struct binder_thread
*from
;
572 struct binder_transaction
*from_parent
;
573 struct binder_proc
*to_proc
;
574 struct binder_thread
*to_thread
;
575 struct binder_transaction
*to_parent
;
576 unsigned need_reply
:1;
577 /* unsigned is_dead:1; */ /* not used at the moment */
579 struct binder_buffer
*buffer
;
585 struct list_head fd_fixups
;
586 binder_uintptr_t security_ctx
;
588 * @lock: protects @from, @to_proc, and @to_thread
590 * @from, @to_proc, and @to_thread can be set to NULL
591 * during thread teardown
597 * struct binder_object - union of flat binder object types
598 * @hdr: generic object header
599 * @fbo: binder object (nodes and refs)
600 * @fdo: file descriptor object
601 * @bbo: binder buffer pointer
602 * @fdao: file descriptor array
604 * Used for type-independent object copies
606 struct binder_object
{
608 struct binder_object_header hdr
;
609 struct flat_binder_object fbo
;
610 struct binder_fd_object fdo
;
611 struct binder_buffer_object bbo
;
612 struct binder_fd_array_object fdao
;
617 * binder_proc_lock() - Acquire outer lock for given binder_proc
618 * @proc: struct binder_proc to acquire
620 * Acquires proc->outer_lock. Used to protect binder_ref
621 * structures associated with the given proc.
623 #define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
625 _binder_proc_lock(struct binder_proc
*proc
, int line
)
626 __acquires(&proc
->outer_lock
)
628 binder_debug(BINDER_DEBUG_SPINLOCKS
,
629 "%s: line=%d\n", __func__
, line
);
630 spin_lock(&proc
->outer_lock
);
634 * binder_proc_unlock() - Release spinlock for given binder_proc
635 * @proc: struct binder_proc to acquire
637 * Release lock acquired via binder_proc_lock()
639 #define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__)
641 _binder_proc_unlock(struct binder_proc
*proc
, int line
)
642 __releases(&proc
->outer_lock
)
644 binder_debug(BINDER_DEBUG_SPINLOCKS
,
645 "%s: line=%d\n", __func__
, line
);
646 spin_unlock(&proc
->outer_lock
);
650 * binder_inner_proc_lock() - Acquire inner lock for given binder_proc
651 * @proc: struct binder_proc to acquire
653 * Acquires proc->inner_lock. Used to protect todo lists
655 #define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
657 _binder_inner_proc_lock(struct binder_proc
*proc
, int line
)
658 __acquires(&proc
->inner_lock
)
660 binder_debug(BINDER_DEBUG_SPINLOCKS
,
661 "%s: line=%d\n", __func__
, line
);
662 spin_lock(&proc
->inner_lock
);
666 * binder_inner_proc_unlock() - Release inner lock for given binder_proc
667 * @proc: struct binder_proc to acquire
669 * Release lock acquired via binder_inner_proc_lock()
671 #define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
673 _binder_inner_proc_unlock(struct binder_proc
*proc
, int line
)
674 __releases(&proc
->inner_lock
)
676 binder_debug(BINDER_DEBUG_SPINLOCKS
,
677 "%s: line=%d\n", __func__
, line
);
678 spin_unlock(&proc
->inner_lock
);
682 * binder_node_lock() - Acquire spinlock for given binder_node
683 * @node: struct binder_node to acquire
685 * Acquires node->lock. Used to protect binder_node fields
687 #define binder_node_lock(node) _binder_node_lock(node, __LINE__)
689 _binder_node_lock(struct binder_node
*node
, int line
)
690 __acquires(&node
->lock
)
692 binder_debug(BINDER_DEBUG_SPINLOCKS
,
693 "%s: line=%d\n", __func__
, line
);
694 spin_lock(&node
->lock
);
698 * binder_node_unlock() - Release spinlock for given binder_proc
699 * @node: struct binder_node to acquire
701 * Release lock acquired via binder_node_lock()
703 #define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
705 _binder_node_unlock(struct binder_node
*node
, int line
)
706 __releases(&node
->lock
)
708 binder_debug(BINDER_DEBUG_SPINLOCKS
,
709 "%s: line=%d\n", __func__
, line
);
710 spin_unlock(&node
->lock
);
714 * binder_node_inner_lock() - Acquire node and inner locks
715 * @node: struct binder_node to acquire
717 * Acquires node->lock. If node->proc also acquires
718 * proc->inner_lock. Used to protect binder_node fields
720 #define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
722 _binder_node_inner_lock(struct binder_node
*node
, int line
)
723 __acquires(&node
->lock
) __acquires(&node
->proc
->inner_lock
)
725 binder_debug(BINDER_DEBUG_SPINLOCKS
,
726 "%s: line=%d\n", __func__
, line
);
727 spin_lock(&node
->lock
);
729 binder_inner_proc_lock(node
->proc
);
731 /* annotation for sparse */
732 __acquire(&node
->proc
->inner_lock
);
736 * binder_node_unlock() - Release node and inner locks
737 * @node: struct binder_node to acquire
739 * Release lock acquired via binder_node_lock()
741 #define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
743 _binder_node_inner_unlock(struct binder_node
*node
, int line
)
744 __releases(&node
->lock
) __releases(&node
->proc
->inner_lock
)
746 struct binder_proc
*proc
= node
->proc
;
748 binder_debug(BINDER_DEBUG_SPINLOCKS
,
749 "%s: line=%d\n", __func__
, line
);
751 binder_inner_proc_unlock(proc
);
753 /* annotation for sparse */
754 __release(&node
->proc
->inner_lock
);
755 spin_unlock(&node
->lock
);
758 static bool binder_worklist_empty_ilocked(struct list_head
*list
)
760 return list_empty(list
);
764 * binder_worklist_empty() - Check if no items on the work list
765 * @proc: binder_proc associated with list
766 * @list: list to check
768 * Return: true if there are no items on list, else false
770 static bool binder_worklist_empty(struct binder_proc
*proc
,
771 struct list_head
*list
)
775 binder_inner_proc_lock(proc
);
776 ret
= binder_worklist_empty_ilocked(list
);
777 binder_inner_proc_unlock(proc
);
782 * binder_enqueue_work_ilocked() - Add an item to the work list
783 * @work: struct binder_work to add to list
784 * @target_list: list to add work to
786 * Adds the work to the specified list. Asserts that work
787 * is not already on a list.
789 * Requires the proc->inner_lock to be held.
792 binder_enqueue_work_ilocked(struct binder_work
*work
,
793 struct list_head
*target_list
)
795 BUG_ON(target_list
== NULL
);
796 BUG_ON(work
->entry
.next
&& !list_empty(&work
->entry
));
797 list_add_tail(&work
->entry
, target_list
);
801 * binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work
802 * @thread: thread to queue work to
803 * @work: struct binder_work to add to list
805 * Adds the work to the todo list of the thread. Doesn't set the process_todo
806 * flag, which means that (if it wasn't already set) the thread will go to
807 * sleep without handling this work when it calls read.
809 * Requires the proc->inner_lock to be held.
812 binder_enqueue_deferred_thread_work_ilocked(struct binder_thread
*thread
,
813 struct binder_work
*work
)
815 WARN_ON(!list_empty(&thread
->waiting_thread_node
));
816 binder_enqueue_work_ilocked(work
, &thread
->todo
);
820 * binder_enqueue_thread_work_ilocked() - Add an item to the thread work list
821 * @thread: thread to queue work to
822 * @work: struct binder_work to add to list
824 * Adds the work to the todo list of the thread, and enables processing
827 * Requires the proc->inner_lock to be held.
830 binder_enqueue_thread_work_ilocked(struct binder_thread
*thread
,
831 struct binder_work
*work
)
833 WARN_ON(!list_empty(&thread
->waiting_thread_node
));
834 binder_enqueue_work_ilocked(work
, &thread
->todo
);
835 thread
->process_todo
= true;
839 * binder_enqueue_thread_work() - Add an item to the thread work list
840 * @thread: thread to queue work to
841 * @work: struct binder_work to add to list
843 * Adds the work to the todo list of the thread, and enables processing
847 binder_enqueue_thread_work(struct binder_thread
*thread
,
848 struct binder_work
*work
)
850 binder_inner_proc_lock(thread
->proc
);
851 binder_enqueue_thread_work_ilocked(thread
, work
);
852 binder_inner_proc_unlock(thread
->proc
);
856 binder_dequeue_work_ilocked(struct binder_work
*work
)
858 list_del_init(&work
->entry
);
862 * binder_dequeue_work() - Removes an item from the work list
863 * @proc: binder_proc associated with list
864 * @work: struct binder_work to remove from list
866 * Removes the specified work item from whatever list it is on.
867 * Can safely be called if work is not on any list.
870 binder_dequeue_work(struct binder_proc
*proc
, struct binder_work
*work
)
872 binder_inner_proc_lock(proc
);
873 binder_dequeue_work_ilocked(work
);
874 binder_inner_proc_unlock(proc
);
877 static struct binder_work
*binder_dequeue_work_head_ilocked(
878 struct list_head
*list
)
880 struct binder_work
*w
;
882 w
= list_first_entry_or_null(list
, struct binder_work
, entry
);
884 list_del_init(&w
->entry
);
889 * binder_dequeue_work_head() - Dequeues the item at head of list
890 * @proc: binder_proc associated with list
891 * @list: list to dequeue head
893 * Removes the head of the list if there are items on the list
895 * Return: pointer dequeued binder_work, NULL if list was empty
897 static struct binder_work
*binder_dequeue_work_head(
898 struct binder_proc
*proc
,
899 struct list_head
*list
)
901 struct binder_work
*w
;
903 binder_inner_proc_lock(proc
);
904 w
= binder_dequeue_work_head_ilocked(list
);
905 binder_inner_proc_unlock(proc
);
910 binder_defer_work(struct binder_proc
*proc
, enum binder_deferred_state defer
);
911 static void binder_free_thread(struct binder_thread
*thread
);
912 static void binder_free_proc(struct binder_proc
*proc
);
913 static void binder_inc_node_tmpref_ilocked(struct binder_node
*node
);
915 static bool binder_has_work_ilocked(struct binder_thread
*thread
,
918 return thread
->process_todo
||
919 thread
->looper_need_return
||
921 !binder_worklist_empty_ilocked(&thread
->proc
->todo
));
924 static bool binder_has_work(struct binder_thread
*thread
, bool do_proc_work
)
928 binder_inner_proc_lock(thread
->proc
);
929 has_work
= binder_has_work_ilocked(thread
, do_proc_work
);
930 binder_inner_proc_unlock(thread
->proc
);
935 static bool binder_available_for_proc_work_ilocked(struct binder_thread
*thread
)
937 return !thread
->transaction_stack
&&
938 binder_worklist_empty_ilocked(&thread
->todo
) &&
939 (thread
->looper
& (BINDER_LOOPER_STATE_ENTERED
|
940 BINDER_LOOPER_STATE_REGISTERED
));
943 static void binder_wakeup_poll_threads_ilocked(struct binder_proc
*proc
,
947 struct binder_thread
*thread
;
949 for (n
= rb_first(&proc
->threads
); n
!= NULL
; n
= rb_next(n
)) {
950 thread
= rb_entry(n
, struct binder_thread
, rb_node
);
951 if (thread
->looper
& BINDER_LOOPER_STATE_POLL
&&
952 binder_available_for_proc_work_ilocked(thread
)) {
954 wake_up_interruptible_sync(&thread
->wait
);
956 wake_up_interruptible(&thread
->wait
);
962 * binder_select_thread_ilocked() - selects a thread for doing proc work.
963 * @proc: process to select a thread from
965 * Note that calling this function moves the thread off the waiting_threads
966 * list, so it can only be woken up by the caller of this function, or a
967 * signal. Therefore, callers *should* always wake up the thread this function
970 * Return: If there's a thread currently waiting for process work,
971 * returns that thread. Otherwise returns NULL.
973 static struct binder_thread
*
974 binder_select_thread_ilocked(struct binder_proc
*proc
)
976 struct binder_thread
*thread
;
978 assert_spin_locked(&proc
->inner_lock
);
979 thread
= list_first_entry_or_null(&proc
->waiting_threads
,
980 struct binder_thread
,
981 waiting_thread_node
);
984 list_del_init(&thread
->waiting_thread_node
);
990 * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work.
991 * @proc: process to wake up a thread in
992 * @thread: specific thread to wake-up (may be NULL)
993 * @sync: whether to do a synchronous wake-up
995 * This function wakes up a thread in the @proc process.
996 * The caller may provide a specific thread to wake-up in
997 * the @thread parameter. If @thread is NULL, this function
998 * will wake up threads that have called poll().
1000 * Note that for this function to work as expected, callers
1001 * should first call binder_select_thread() to find a thread
1002 * to handle the work (if they don't have a thread already),
1003 * and pass the result into the @thread parameter.
1005 static void binder_wakeup_thread_ilocked(struct binder_proc
*proc
,
1006 struct binder_thread
*thread
,
1009 assert_spin_locked(&proc
->inner_lock
);
1013 wake_up_interruptible_sync(&thread
->wait
);
1015 wake_up_interruptible(&thread
->wait
);
1019 /* Didn't find a thread waiting for proc work; this can happen
1021 * 1. All threads are busy handling transactions
1022 * In that case, one of those threads should call back into
1023 * the kernel driver soon and pick up this work.
1024 * 2. Threads are using the (e)poll interface, in which case
1025 * they may be blocked on the waitqueue without having been
1026 * added to waiting_threads. For this case, we just iterate
1027 * over all threads not handling transaction work, and
1028 * wake them all up. We wake all because we don't know whether
1029 * a thread that called into (e)poll is handling non-binder
1032 binder_wakeup_poll_threads_ilocked(proc
, sync
);
1035 static void binder_wakeup_proc_ilocked(struct binder_proc
*proc
)
1037 struct binder_thread
*thread
= binder_select_thread_ilocked(proc
);
1039 binder_wakeup_thread_ilocked(proc
, thread
, /* sync = */false);
1042 static void binder_set_nice(long nice
)
1046 if (can_nice(current
, nice
)) {
1047 set_user_nice(current
, nice
);
1050 min_nice
= rlimit_to_nice(rlimit(RLIMIT_NICE
));
1051 binder_debug(BINDER_DEBUG_PRIORITY_CAP
,
1052 "%d: nice value %ld not allowed use %ld instead\n",
1053 current
->pid
, nice
, min_nice
);
1054 set_user_nice(current
, min_nice
);
1055 if (min_nice
<= MAX_NICE
)
1057 binder_user_error("%d RLIMIT_NICE not set\n", current
->pid
);
1060 static struct binder_node
*binder_get_node_ilocked(struct binder_proc
*proc
,
1061 binder_uintptr_t ptr
)
1063 struct rb_node
*n
= proc
->nodes
.rb_node
;
1064 struct binder_node
*node
;
1066 assert_spin_locked(&proc
->inner_lock
);
1069 node
= rb_entry(n
, struct binder_node
, rb_node
);
1071 if (ptr
< node
->ptr
)
1073 else if (ptr
> node
->ptr
)
1077 * take an implicit weak reference
1078 * to ensure node stays alive until
1079 * call to binder_put_node()
1081 binder_inc_node_tmpref_ilocked(node
);
1088 static struct binder_node
*binder_get_node(struct binder_proc
*proc
,
1089 binder_uintptr_t ptr
)
1091 struct binder_node
*node
;
1093 binder_inner_proc_lock(proc
);
1094 node
= binder_get_node_ilocked(proc
, ptr
);
1095 binder_inner_proc_unlock(proc
);
1099 static struct binder_node
*binder_init_node_ilocked(
1100 struct binder_proc
*proc
,
1101 struct binder_node
*new_node
,
1102 struct flat_binder_object
*fp
)
1104 struct rb_node
**p
= &proc
->nodes
.rb_node
;
1105 struct rb_node
*parent
= NULL
;
1106 struct binder_node
*node
;
1107 binder_uintptr_t ptr
= fp
? fp
->binder
: 0;
1108 binder_uintptr_t cookie
= fp
? fp
->cookie
: 0;
1109 __u32 flags
= fp
? fp
->flags
: 0;
1111 assert_spin_locked(&proc
->inner_lock
);
1116 node
= rb_entry(parent
, struct binder_node
, rb_node
);
1118 if (ptr
< node
->ptr
)
1120 else if (ptr
> node
->ptr
)
1121 p
= &(*p
)->rb_right
;
1124 * A matching node is already in
1125 * the rb tree. Abandon the init
1128 binder_inc_node_tmpref_ilocked(node
);
1133 binder_stats_created(BINDER_STAT_NODE
);
1135 rb_link_node(&node
->rb_node
, parent
, p
);
1136 rb_insert_color(&node
->rb_node
, &proc
->nodes
);
1137 node
->debug_id
= atomic_inc_return(&binder_last_id
);
1140 node
->cookie
= cookie
;
1141 node
->work
.type
= BINDER_WORK_NODE
;
1142 node
->min_priority
= flags
& FLAT_BINDER_FLAG_PRIORITY_MASK
;
1143 node
->accept_fds
= !!(flags
& FLAT_BINDER_FLAG_ACCEPTS_FDS
);
1144 node
->txn_security_ctx
= !!(flags
& FLAT_BINDER_FLAG_TXN_SECURITY_CTX
);
1145 spin_lock_init(&node
->lock
);
1146 INIT_LIST_HEAD(&node
->work
.entry
);
1147 INIT_LIST_HEAD(&node
->async_todo
);
1148 binder_debug(BINDER_DEBUG_INTERNAL_REFS
,
1149 "%d:%d node %d u%016llx c%016llx created\n",
1150 proc
->pid
, current
->pid
, node
->debug_id
,
1151 (u64
)node
->ptr
, (u64
)node
->cookie
);
1156 static struct binder_node
*binder_new_node(struct binder_proc
*proc
,
1157 struct flat_binder_object
*fp
)
1159 struct binder_node
*node
;
1160 struct binder_node
*new_node
= kzalloc(sizeof(*node
), GFP_KERNEL
);
1164 binder_inner_proc_lock(proc
);
1165 node
= binder_init_node_ilocked(proc
, new_node
, fp
);
1166 binder_inner_proc_unlock(proc
);
1167 if (node
!= new_node
)
1169 * The node was already added by another thread
1176 static void binder_free_node(struct binder_node
*node
)
1179 binder_stats_deleted(BINDER_STAT_NODE
);
1182 static int binder_inc_node_nilocked(struct binder_node
*node
, int strong
,
1184 struct list_head
*target_list
)
1186 struct binder_proc
*proc
= node
->proc
;
1188 assert_spin_locked(&node
->lock
);
1190 assert_spin_locked(&proc
->inner_lock
);
1193 if (target_list
== NULL
&&
1194 node
->internal_strong_refs
== 0 &&
1196 node
== node
->proc
->context
->binder_context_mgr_node
&&
1197 node
->has_strong_ref
)) {
1198 pr_err("invalid inc strong node for %d\n",
1202 node
->internal_strong_refs
++;
1204 node
->local_strong_refs
++;
1205 if (!node
->has_strong_ref
&& target_list
) {
1206 struct binder_thread
*thread
= container_of(target_list
,
1207 struct binder_thread
, todo
);
1208 binder_dequeue_work_ilocked(&node
->work
);
1209 BUG_ON(&thread
->todo
!= target_list
);
1210 binder_enqueue_deferred_thread_work_ilocked(thread
,
1215 node
->local_weak_refs
++;
1216 if (!node
->has_weak_ref
&& list_empty(&node
->work
.entry
)) {
1217 if (target_list
== NULL
) {
1218 pr_err("invalid inc weak node for %d\n",
1225 binder_enqueue_work_ilocked(&node
->work
, target_list
);
1231 static int binder_inc_node(struct binder_node
*node
, int strong
, int internal
,
1232 struct list_head
*target_list
)
1236 binder_node_inner_lock(node
);
1237 ret
= binder_inc_node_nilocked(node
, strong
, internal
, target_list
);
1238 binder_node_inner_unlock(node
);
1243 static bool binder_dec_node_nilocked(struct binder_node
*node
,
1244 int strong
, int internal
)
1246 struct binder_proc
*proc
= node
->proc
;
1248 assert_spin_locked(&node
->lock
);
1250 assert_spin_locked(&proc
->inner_lock
);
1253 node
->internal_strong_refs
--;
1255 node
->local_strong_refs
--;
1256 if (node
->local_strong_refs
|| node
->internal_strong_refs
)
1260 node
->local_weak_refs
--;
1261 if (node
->local_weak_refs
|| node
->tmp_refs
||
1262 !hlist_empty(&node
->refs
))
1266 if (proc
&& (node
->has_strong_ref
|| node
->has_weak_ref
)) {
1267 if (list_empty(&node
->work
.entry
)) {
1268 binder_enqueue_work_ilocked(&node
->work
, &proc
->todo
);
1269 binder_wakeup_proc_ilocked(proc
);
1272 if (hlist_empty(&node
->refs
) && !node
->local_strong_refs
&&
1273 !node
->local_weak_refs
&& !node
->tmp_refs
) {
1275 binder_dequeue_work_ilocked(&node
->work
);
1276 rb_erase(&node
->rb_node
, &proc
->nodes
);
1277 binder_debug(BINDER_DEBUG_INTERNAL_REFS
,
1278 "refless node %d deleted\n",
1281 BUG_ON(!list_empty(&node
->work
.entry
));
1282 spin_lock(&binder_dead_nodes_lock
);
1284 * tmp_refs could have changed so
1287 if (node
->tmp_refs
) {
1288 spin_unlock(&binder_dead_nodes_lock
);
1291 hlist_del(&node
->dead_node
);
1292 spin_unlock(&binder_dead_nodes_lock
);
1293 binder_debug(BINDER_DEBUG_INTERNAL_REFS
,
1294 "dead node %d deleted\n",
1303 static void binder_dec_node(struct binder_node
*node
, int strong
, int internal
)
1307 binder_node_inner_lock(node
);
1308 free_node
= binder_dec_node_nilocked(node
, strong
, internal
);
1309 binder_node_inner_unlock(node
);
1311 binder_free_node(node
);
1314 static void binder_inc_node_tmpref_ilocked(struct binder_node
*node
)
1317 * No call to binder_inc_node() is needed since we
1318 * don't need to inform userspace of any changes to
1325 * binder_inc_node_tmpref() - take a temporary reference on node
1326 * @node: node to reference
1328 * Take reference on node to prevent the node from being freed
1329 * while referenced only by a local variable. The inner lock is
1330 * needed to serialize with the node work on the queue (which
1331 * isn't needed after the node is dead). If the node is dead
1332 * (node->proc is NULL), use binder_dead_nodes_lock to protect
1333 * node->tmp_refs against dead-node-only cases where the node
1334 * lock cannot be acquired (eg traversing the dead node list to
1337 static void binder_inc_node_tmpref(struct binder_node
*node
)
1339 binder_node_lock(node
);
1341 binder_inner_proc_lock(node
->proc
);
1343 spin_lock(&binder_dead_nodes_lock
);
1344 binder_inc_node_tmpref_ilocked(node
);
1346 binder_inner_proc_unlock(node
->proc
);
1348 spin_unlock(&binder_dead_nodes_lock
);
1349 binder_node_unlock(node
);
1353 * binder_dec_node_tmpref() - remove a temporary reference on node
1354 * @node: node to reference
1356 * Release temporary reference on node taken via binder_inc_node_tmpref()
1358 static void binder_dec_node_tmpref(struct binder_node
*node
)
1362 binder_node_inner_lock(node
);
1364 spin_lock(&binder_dead_nodes_lock
);
1366 __acquire(&binder_dead_nodes_lock
);
1368 BUG_ON(node
->tmp_refs
< 0);
1370 spin_unlock(&binder_dead_nodes_lock
);
1372 __release(&binder_dead_nodes_lock
);
1374 * Call binder_dec_node() to check if all refcounts are 0
1375 * and cleanup is needed. Calling with strong=0 and internal=1
1376 * causes no actual reference to be released in binder_dec_node().
1377 * If that changes, a change is needed here too.
1379 free_node
= binder_dec_node_nilocked(node
, 0, 1);
1380 binder_node_inner_unlock(node
);
1382 binder_free_node(node
);
1385 static void binder_put_node(struct binder_node
*node
)
1387 binder_dec_node_tmpref(node
);
1390 static struct binder_ref
*binder_get_ref_olocked(struct binder_proc
*proc
,
1391 u32 desc
, bool need_strong_ref
)
1393 struct rb_node
*n
= proc
->refs_by_desc
.rb_node
;
1394 struct binder_ref
*ref
;
1397 ref
= rb_entry(n
, struct binder_ref
, rb_node_desc
);
1399 if (desc
< ref
->data
.desc
) {
1401 } else if (desc
> ref
->data
.desc
) {
1403 } else if (need_strong_ref
&& !ref
->data
.strong
) {
1404 binder_user_error("tried to use weak ref as strong ref\n");
1414 * binder_get_ref_for_node_olocked() - get the ref associated with given node
1415 * @proc: binder_proc that owns the ref
1416 * @node: binder_node of target
1417 * @new_ref: newly allocated binder_ref to be initialized or %NULL
1419 * Look up the ref for the given node and return it if it exists
1421 * If it doesn't exist and the caller provides a newly allocated
1422 * ref, initialize the fields of the newly allocated ref and insert
1423 * into the given proc rb_trees and node refs list.
1425 * Return: the ref for node. It is possible that another thread
1426 * allocated/initialized the ref first in which case the
1427 * returned ref would be different than the passed-in
1428 * new_ref. new_ref must be kfree'd by the caller in
1431 static struct binder_ref
*binder_get_ref_for_node_olocked(
1432 struct binder_proc
*proc
,
1433 struct binder_node
*node
,
1434 struct binder_ref
*new_ref
)
1436 struct binder_context
*context
= proc
->context
;
1437 struct rb_node
**p
= &proc
->refs_by_node
.rb_node
;
1438 struct rb_node
*parent
= NULL
;
1439 struct binder_ref
*ref
;
1444 ref
= rb_entry(parent
, struct binder_ref
, rb_node_node
);
1446 if (node
< ref
->node
)
1448 else if (node
> ref
->node
)
1449 p
= &(*p
)->rb_right
;
1456 binder_stats_created(BINDER_STAT_REF
);
1457 new_ref
->data
.debug_id
= atomic_inc_return(&binder_last_id
);
1458 new_ref
->proc
= proc
;
1459 new_ref
->node
= node
;
1460 rb_link_node(&new_ref
->rb_node_node
, parent
, p
);
1461 rb_insert_color(&new_ref
->rb_node_node
, &proc
->refs_by_node
);
1463 new_ref
->data
.desc
= (node
== context
->binder_context_mgr_node
) ? 0 : 1;
1464 for (n
= rb_first(&proc
->refs_by_desc
); n
!= NULL
; n
= rb_next(n
)) {
1465 ref
= rb_entry(n
, struct binder_ref
, rb_node_desc
);
1466 if (ref
->data
.desc
> new_ref
->data
.desc
)
1468 new_ref
->data
.desc
= ref
->data
.desc
+ 1;
1471 p
= &proc
->refs_by_desc
.rb_node
;
1474 ref
= rb_entry(parent
, struct binder_ref
, rb_node_desc
);
1476 if (new_ref
->data
.desc
< ref
->data
.desc
)
1478 else if (new_ref
->data
.desc
> ref
->data
.desc
)
1479 p
= &(*p
)->rb_right
;
1483 rb_link_node(&new_ref
->rb_node_desc
, parent
, p
);
1484 rb_insert_color(&new_ref
->rb_node_desc
, &proc
->refs_by_desc
);
1486 binder_node_lock(node
);
1487 hlist_add_head(&new_ref
->node_entry
, &node
->refs
);
1489 binder_debug(BINDER_DEBUG_INTERNAL_REFS
,
1490 "%d new ref %d desc %d for node %d\n",
1491 proc
->pid
, new_ref
->data
.debug_id
, new_ref
->data
.desc
,
1493 binder_node_unlock(node
);
1497 static void binder_cleanup_ref_olocked(struct binder_ref
*ref
)
1499 bool delete_node
= false;
1501 binder_debug(BINDER_DEBUG_INTERNAL_REFS
,
1502 "%d delete ref %d desc %d for node %d\n",
1503 ref
->proc
->pid
, ref
->data
.debug_id
, ref
->data
.desc
,
1504 ref
->node
->debug_id
);
1506 rb_erase(&ref
->rb_node_desc
, &ref
->proc
->refs_by_desc
);
1507 rb_erase(&ref
->rb_node_node
, &ref
->proc
->refs_by_node
);
1509 binder_node_inner_lock(ref
->node
);
1510 if (ref
->data
.strong
)
1511 binder_dec_node_nilocked(ref
->node
, 1, 1);
1513 hlist_del(&ref
->node_entry
);
1514 delete_node
= binder_dec_node_nilocked(ref
->node
, 0, 1);
1515 binder_node_inner_unlock(ref
->node
);
1517 * Clear ref->node unless we want the caller to free the node
1521 * The caller uses ref->node to determine
1522 * whether the node needs to be freed. Clear
1523 * it since the node is still alive.
1529 binder_debug(BINDER_DEBUG_DEAD_BINDER
,
1530 "%d delete ref %d desc %d has death notification\n",
1531 ref
->proc
->pid
, ref
->data
.debug_id
,
1533 binder_dequeue_work(ref
->proc
, &ref
->death
->work
);
1534 binder_stats_deleted(BINDER_STAT_DEATH
);
1536 binder_stats_deleted(BINDER_STAT_REF
);
1540 * binder_inc_ref_olocked() - increment the ref for given handle
1541 * @ref: ref to be incremented
1542 * @strong: if true, strong increment, else weak
1543 * @target_list: list to queue node work on
1545 * Increment the ref. @ref->proc->outer_lock must be held on entry
1547 * Return: 0, if successful, else errno
1549 static int binder_inc_ref_olocked(struct binder_ref
*ref
, int strong
,
1550 struct list_head
*target_list
)
1555 if (ref
->data
.strong
== 0) {
1556 ret
= binder_inc_node(ref
->node
, 1, 1, target_list
);
1562 if (ref
->data
.weak
== 0) {
1563 ret
= binder_inc_node(ref
->node
, 0, 1, target_list
);
1573 * binder_dec_ref() - dec the ref for given handle
1574 * @ref: ref to be decremented
1575 * @strong: if true, strong decrement, else weak
1577 * Decrement the ref.
1579 * Return: true if ref is cleaned up and ready to be freed
1581 static bool binder_dec_ref_olocked(struct binder_ref
*ref
, int strong
)
1584 if (ref
->data
.strong
== 0) {
1585 binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
1586 ref
->proc
->pid
, ref
->data
.debug_id
,
1587 ref
->data
.desc
, ref
->data
.strong
,
1592 if (ref
->data
.strong
== 0)
1593 binder_dec_node(ref
->node
, strong
, 1);
1595 if (ref
->data
.weak
== 0) {
1596 binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
1597 ref
->proc
->pid
, ref
->data
.debug_id
,
1598 ref
->data
.desc
, ref
->data
.strong
,
1604 if (ref
->data
.strong
== 0 && ref
->data
.weak
== 0) {
1605 binder_cleanup_ref_olocked(ref
);
1612 * binder_get_node_from_ref() - get the node from the given proc/desc
1613 * @proc: proc containing the ref
1614 * @desc: the handle associated with the ref
1615 * @need_strong_ref: if true, only return node if ref is strong
1616 * @rdata: the id/refcount data for the ref
1618 * Given a proc and ref handle, return the associated binder_node
1620 * Return: a binder_node or NULL if not found or not strong when strong required
1622 static struct binder_node
*binder_get_node_from_ref(
1623 struct binder_proc
*proc
,
1624 u32 desc
, bool need_strong_ref
,
1625 struct binder_ref_data
*rdata
)
1627 struct binder_node
*node
;
1628 struct binder_ref
*ref
;
1630 binder_proc_lock(proc
);
1631 ref
= binder_get_ref_olocked(proc
, desc
, need_strong_ref
);
1636 * Take an implicit reference on the node to ensure
1637 * it stays alive until the call to binder_put_node()
1639 binder_inc_node_tmpref(node
);
1642 binder_proc_unlock(proc
);
1647 binder_proc_unlock(proc
);
1652 * binder_free_ref() - free the binder_ref
1655 * Free the binder_ref. Free the binder_node indicated by ref->node
1656 * (if non-NULL) and the binder_ref_death indicated by ref->death.
1658 static void binder_free_ref(struct binder_ref
*ref
)
1661 binder_free_node(ref
->node
);
1667 * binder_update_ref_for_handle() - inc/dec the ref for given handle
1668 * @proc: proc containing the ref
1669 * @desc: the handle associated with the ref
1670 * @increment: true=inc reference, false=dec reference
1671 * @strong: true=strong reference, false=weak reference
1672 * @rdata: the id/refcount data for the ref
1674 * Given a proc and ref handle, increment or decrement the ref
1675 * according to "increment" arg.
1677 * Return: 0 if successful, else errno
1679 static int binder_update_ref_for_handle(struct binder_proc
*proc
,
1680 uint32_t desc
, bool increment
, bool strong
,
1681 struct binder_ref_data
*rdata
)
1684 struct binder_ref
*ref
;
1685 bool delete_ref
= false;
1687 binder_proc_lock(proc
);
1688 ref
= binder_get_ref_olocked(proc
, desc
, strong
);
1694 ret
= binder_inc_ref_olocked(ref
, strong
, NULL
);
1696 delete_ref
= binder_dec_ref_olocked(ref
, strong
);
1700 binder_proc_unlock(proc
);
1703 binder_free_ref(ref
);
1707 binder_proc_unlock(proc
);
1712 * binder_dec_ref_for_handle() - dec the ref for given handle
1713 * @proc: proc containing the ref
1714 * @desc: the handle associated with the ref
1715 * @strong: true=strong reference, false=weak reference
1716 * @rdata: the id/refcount data for the ref
1718 * Just calls binder_update_ref_for_handle() to decrement the ref.
1720 * Return: 0 if successful, else errno
1722 static int binder_dec_ref_for_handle(struct binder_proc
*proc
,
1723 uint32_t desc
, bool strong
, struct binder_ref_data
*rdata
)
1725 return binder_update_ref_for_handle(proc
, desc
, false, strong
, rdata
);
1730 * binder_inc_ref_for_node() - increment the ref for given proc/node
1731 * @proc: proc containing the ref
1732 * @node: target node
1733 * @strong: true=strong reference, false=weak reference
1734 * @target_list: worklist to use if node is incremented
1735 * @rdata: the id/refcount data for the ref
1737 * Given a proc and node, increment the ref. Create the ref if it
1738 * doesn't already exist
1740 * Return: 0 if successful, else errno
1742 static int binder_inc_ref_for_node(struct binder_proc
*proc
,
1743 struct binder_node
*node
,
1745 struct list_head
*target_list
,
1746 struct binder_ref_data
*rdata
)
1748 struct binder_ref
*ref
;
1749 struct binder_ref
*new_ref
= NULL
;
1752 binder_proc_lock(proc
);
1753 ref
= binder_get_ref_for_node_olocked(proc
, node
, NULL
);
1755 binder_proc_unlock(proc
);
1756 new_ref
= kzalloc(sizeof(*ref
), GFP_KERNEL
);
1759 binder_proc_lock(proc
);
1760 ref
= binder_get_ref_for_node_olocked(proc
, node
, new_ref
);
1762 ret
= binder_inc_ref_olocked(ref
, strong
, target_list
);
1764 binder_proc_unlock(proc
);
1765 if (new_ref
&& ref
!= new_ref
)
1767 * Another thread created the ref first so
1768 * free the one we allocated
1774 static void binder_pop_transaction_ilocked(struct binder_thread
*target_thread
,
1775 struct binder_transaction
*t
)
1777 BUG_ON(!target_thread
);
1778 assert_spin_locked(&target_thread
->proc
->inner_lock
);
1779 BUG_ON(target_thread
->transaction_stack
!= t
);
1780 BUG_ON(target_thread
->transaction_stack
->from
!= target_thread
);
1781 target_thread
->transaction_stack
=
1782 target_thread
->transaction_stack
->from_parent
;
1787 * binder_thread_dec_tmpref() - decrement thread->tmp_ref
1788 * @thread: thread to decrement
1790 * A thread needs to be kept alive while being used to create or
1791 * handle a transaction. binder_get_txn_from() is used to safely
1792 * extract t->from from a binder_transaction and keep the thread
1793 * indicated by t->from from being freed. When done with that
1794 * binder_thread, this function is called to decrement the
1795 * tmp_ref and free if appropriate (thread has been released
1796 * and no transaction being processed by the driver)
1798 static void binder_thread_dec_tmpref(struct binder_thread
*thread
)
1801 * atomic is used to protect the counter value while
1802 * it cannot reach zero or thread->is_dead is false
1804 binder_inner_proc_lock(thread
->proc
);
1805 atomic_dec(&thread
->tmp_ref
);
1806 if (thread
->is_dead
&& !atomic_read(&thread
->tmp_ref
)) {
1807 binder_inner_proc_unlock(thread
->proc
);
1808 binder_free_thread(thread
);
1811 binder_inner_proc_unlock(thread
->proc
);
1815 * binder_proc_dec_tmpref() - decrement proc->tmp_ref
1816 * @proc: proc to decrement
1818 * A binder_proc needs to be kept alive while being used to create or
1819 * handle a transaction. proc->tmp_ref is incremented when
1820 * creating a new transaction or the binder_proc is currently in-use
1821 * by threads that are being released. When done with the binder_proc,
1822 * this function is called to decrement the counter and free the
1823 * proc if appropriate (proc has been released, all threads have
1824 * been released and not currenly in-use to process a transaction).
1826 static void binder_proc_dec_tmpref(struct binder_proc
*proc
)
1828 binder_inner_proc_lock(proc
);
1830 if (proc
->is_dead
&& RB_EMPTY_ROOT(&proc
->threads
) &&
1832 binder_inner_proc_unlock(proc
);
1833 binder_free_proc(proc
);
1836 binder_inner_proc_unlock(proc
);
1840 * binder_get_txn_from() - safely extract the "from" thread in transaction
1841 * @t: binder transaction for t->from
1843 * Atomically return the "from" thread and increment the tmp_ref
1844 * count for the thread to ensure it stays alive until
1845 * binder_thread_dec_tmpref() is called.
1847 * Return: the value of t->from
1849 static struct binder_thread
*binder_get_txn_from(
1850 struct binder_transaction
*t
)
1852 struct binder_thread
*from
;
1854 spin_lock(&t
->lock
);
1857 atomic_inc(&from
->tmp_ref
);
1858 spin_unlock(&t
->lock
);
1863 * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock
1864 * @t: binder transaction for t->from
1866 * Same as binder_get_txn_from() except it also acquires the proc->inner_lock
1867 * to guarantee that the thread cannot be released while operating on it.
1868 * The caller must call binder_inner_proc_unlock() to release the inner lock
1869 * as well as call binder_dec_thread_txn() to release the reference.
1871 * Return: the value of t->from
1873 static struct binder_thread
*binder_get_txn_from_and_acq_inner(
1874 struct binder_transaction
*t
)
1875 __acquires(&t
->from
->proc
->inner_lock
)
1877 struct binder_thread
*from
;
1879 from
= binder_get_txn_from(t
);
1881 __acquire(&from
->proc
->inner_lock
);
1884 binder_inner_proc_lock(from
->proc
);
1886 BUG_ON(from
!= t
->from
);
1889 binder_inner_proc_unlock(from
->proc
);
1890 __acquire(&from
->proc
->inner_lock
);
1891 binder_thread_dec_tmpref(from
);
1896 * binder_free_txn_fixups() - free unprocessed fd fixups
1897 * @t: binder transaction for t->from
1899 * If the transaction is being torn down prior to being
1900 * processed by the target process, free all of the
1901 * fd fixups and fput the file structs. It is safe to
1902 * call this function after the fixups have been
1903 * processed -- in that case, the list will be empty.
1905 static void binder_free_txn_fixups(struct binder_transaction
*t
)
1907 struct binder_txn_fd_fixup
*fixup
, *tmp
;
1909 list_for_each_entry_safe(fixup
, tmp
, &t
->fd_fixups
, fixup_entry
) {
1911 list_del(&fixup
->fixup_entry
);
1916 static void binder_free_transaction(struct binder_transaction
*t
)
1918 struct binder_proc
*target_proc
= t
->to_proc
;
1921 binder_inner_proc_lock(target_proc
);
1923 t
->buffer
->transaction
= NULL
;
1924 binder_inner_proc_unlock(target_proc
);
1927 * If the transaction has no target_proc, then
1928 * t->buffer->transaction has already been cleared.
1930 binder_free_txn_fixups(t
);
1932 binder_stats_deleted(BINDER_STAT_TRANSACTION
);
1935 static void binder_send_failed_reply(struct binder_transaction
*t
,
1936 uint32_t error_code
)
1938 struct binder_thread
*target_thread
;
1939 struct binder_transaction
*next
;
1941 BUG_ON(t
->flags
& TF_ONE_WAY
);
1943 target_thread
= binder_get_txn_from_and_acq_inner(t
);
1944 if (target_thread
) {
1945 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION
,
1946 "send failed reply for transaction %d to %d:%d\n",
1948 target_thread
->proc
->pid
,
1949 target_thread
->pid
);
1951 binder_pop_transaction_ilocked(target_thread
, t
);
1952 if (target_thread
->reply_error
.cmd
== BR_OK
) {
1953 target_thread
->reply_error
.cmd
= error_code
;
1954 binder_enqueue_thread_work_ilocked(
1956 &target_thread
->reply_error
.work
);
1957 wake_up_interruptible(&target_thread
->wait
);
1960 * Cannot get here for normal operation, but
1961 * we can if multiple synchronous transactions
1962 * are sent without blocking for responses.
1963 * Just ignore the 2nd error in this case.
1965 pr_warn("Unexpected reply error: %u\n",
1966 target_thread
->reply_error
.cmd
);
1968 binder_inner_proc_unlock(target_thread
->proc
);
1969 binder_thread_dec_tmpref(target_thread
);
1970 binder_free_transaction(t
);
1973 __release(&target_thread
->proc
->inner_lock
);
1975 next
= t
->from_parent
;
1977 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION
,
1978 "send failed reply for transaction %d, target dead\n",
1981 binder_free_transaction(t
);
1983 binder_debug(BINDER_DEBUG_DEAD_BINDER
,
1984 "reply failed, no target thread at root\n");
1988 binder_debug(BINDER_DEBUG_DEAD_BINDER
,
1989 "reply failed, no target thread -- retry %d\n",
1995 * binder_cleanup_transaction() - cleans up undelivered transaction
1996 * @t: transaction that needs to be cleaned up
1997 * @reason: reason the transaction wasn't delivered
1998 * @error_code: error to return to caller (if synchronous call)
2000 static void binder_cleanup_transaction(struct binder_transaction
*t
,
2002 uint32_t error_code
)
2004 if (t
->buffer
->target_node
&& !(t
->flags
& TF_ONE_WAY
)) {
2005 binder_send_failed_reply(t
, error_code
);
2007 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION
,
2008 "undelivered transaction %d, %s\n",
2009 t
->debug_id
, reason
);
2010 binder_free_transaction(t
);
2015 * binder_get_object() - gets object and checks for valid metadata
2016 * @proc: binder_proc owning the buffer
2017 * @buffer: binder_buffer that we're parsing.
2018 * @offset: offset in the @buffer at which to validate an object.
2019 * @object: struct binder_object to read into
2021 * Return: If there's a valid metadata object at @offset in @buffer, the
2022 * size of that object. Otherwise, it returns zero. The object
2023 * is read into the struct binder_object pointed to by @object.
2025 static size_t binder_get_object(struct binder_proc
*proc
,
2026 struct binder_buffer
*buffer
,
2027 unsigned long offset
,
2028 struct binder_object
*object
)
2031 struct binder_object_header
*hdr
;
2032 size_t object_size
= 0;
2034 read_size
= min_t(size_t, sizeof(*object
), buffer
->data_size
- offset
);
2035 if (offset
> buffer
->data_size
|| read_size
< sizeof(*hdr
) ||
2036 binder_alloc_copy_from_buffer(&proc
->alloc
, object
, buffer
,
2040 /* Ok, now see if we read a complete object. */
2042 switch (hdr
->type
) {
2043 case BINDER_TYPE_BINDER
:
2044 case BINDER_TYPE_WEAK_BINDER
:
2045 case BINDER_TYPE_HANDLE
:
2046 case BINDER_TYPE_WEAK_HANDLE
:
2047 object_size
= sizeof(struct flat_binder_object
);
2049 case BINDER_TYPE_FD
:
2050 object_size
= sizeof(struct binder_fd_object
);
2052 case BINDER_TYPE_PTR
:
2053 object_size
= sizeof(struct binder_buffer_object
);
2055 case BINDER_TYPE_FDA
:
2056 object_size
= sizeof(struct binder_fd_array_object
);
2061 if (offset
<= buffer
->data_size
- object_size
&&
2062 buffer
->data_size
>= object_size
)
2069 * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
2070 * @proc: binder_proc owning the buffer
2071 * @b: binder_buffer containing the object
2072 * @object: struct binder_object to read into
2073 * @index: index in offset array at which the binder_buffer_object is
2075 * @start_offset: points to the start of the offset array
2076 * @object_offsetp: offset of @object read from @b
2077 * @num_valid: the number of valid offsets in the offset array
2079 * Return: If @index is within the valid range of the offset array
2080 * described by @start and @num_valid, and if there's a valid
2081 * binder_buffer_object at the offset found in index @index
2082 * of the offset array, that object is returned. Otherwise,
2083 * %NULL is returned.
2084 * Note that the offset found in index @index itself is not
2085 * verified; this function assumes that @num_valid elements
2086 * from @start were previously verified to have valid offsets.
2087 * If @object_offsetp is non-NULL, then the offset within
2088 * @b is written to it.
2090 static struct binder_buffer_object
*binder_validate_ptr(
2091 struct binder_proc
*proc
,
2092 struct binder_buffer
*b
,
2093 struct binder_object
*object
,
2094 binder_size_t index
,
2095 binder_size_t start_offset
,
2096 binder_size_t
*object_offsetp
,
2097 binder_size_t num_valid
)
2100 binder_size_t object_offset
;
2101 unsigned long buffer_offset
;
2103 if (index
>= num_valid
)
2106 buffer_offset
= start_offset
+ sizeof(binder_size_t
) * index
;
2107 if (binder_alloc_copy_from_buffer(&proc
->alloc
, &object_offset
,
2109 sizeof(object_offset
)))
2111 object_size
= binder_get_object(proc
, b
, object_offset
, object
);
2112 if (!object_size
|| object
->hdr
.type
!= BINDER_TYPE_PTR
)
2115 *object_offsetp
= object_offset
;
2117 return &object
->bbo
;
2121 * binder_validate_fixup() - validates pointer/fd fixups happen in order.
2122 * @proc: binder_proc owning the buffer
2123 * @b: transaction buffer
2124 * @objects_start_offset: offset to start of objects buffer
2125 * @buffer_obj_offset: offset to binder_buffer_object in which to fix up
2126 * @fixup_offset: start offset in @buffer to fix up
2127 * @last_obj_offset: offset to last binder_buffer_object that we fixed
2128 * @last_min_offset: minimum fixup offset in object at @last_obj_offset
2130 * Return: %true if a fixup in buffer @buffer at offset @offset is
2133 * For safety reasons, we only allow fixups inside a buffer to happen
2134 * at increasing offsets; additionally, we only allow fixup on the last
2135 * buffer object that was verified, or one of its parents.
2137 * Example of what is allowed:
2140 * B (parent = A, offset = 0)
2141 * C (parent = A, offset = 16)
2142 * D (parent = C, offset = 0)
2143 * E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
2145 * Examples of what is not allowed:
2147 * Decreasing offsets within the same parent:
2149 * C (parent = A, offset = 16)
2150 * B (parent = A, offset = 0) // decreasing offset within A
2152 * Referring to a parent that wasn't the last object or any of its parents:
2154 * B (parent = A, offset = 0)
2155 * C (parent = A, offset = 0)
2156 * C (parent = A, offset = 16)
2157 * D (parent = B, offset = 0) // B is not A or any of A's parents
2159 static bool binder_validate_fixup(struct binder_proc
*proc
,
2160 struct binder_buffer
*b
,
2161 binder_size_t objects_start_offset
,
2162 binder_size_t buffer_obj_offset
,
2163 binder_size_t fixup_offset
,
2164 binder_size_t last_obj_offset
,
2165 binder_size_t last_min_offset
)
2167 if (!last_obj_offset
) {
2168 /* Nothing to fix up in */
2172 while (last_obj_offset
!= buffer_obj_offset
) {
2173 unsigned long buffer_offset
;
2174 struct binder_object last_object
;
2175 struct binder_buffer_object
*last_bbo
;
2176 size_t object_size
= binder_get_object(proc
, b
, last_obj_offset
,
2178 if (object_size
!= sizeof(*last_bbo
))
2181 last_bbo
= &last_object
.bbo
;
2183 * Safe to retrieve the parent of last_obj, since it
2184 * was already previously verified by the driver.
2186 if ((last_bbo
->flags
& BINDER_BUFFER_FLAG_HAS_PARENT
) == 0)
2188 last_min_offset
= last_bbo
->parent_offset
+ sizeof(uintptr_t);
2189 buffer_offset
= objects_start_offset
+
2190 sizeof(binder_size_t
) * last_bbo
->parent
;
2191 if (binder_alloc_copy_from_buffer(&proc
->alloc
,
2194 sizeof(last_obj_offset
)))
2197 return (fixup_offset
>= last_min_offset
);
2201 * struct binder_task_work_cb - for deferred close
2203 * @twork: callback_head for task work
2206 * Structure to pass task work to be handled after
2207 * returning from binder_ioctl() via task_work_add().
2209 struct binder_task_work_cb
{
2210 struct callback_head twork
;
2215 * binder_do_fd_close() - close list of file descriptors
2216 * @twork: callback head for task work
2218 * It is not safe to call ksys_close() during the binder_ioctl()
2219 * function if there is a chance that binder's own file descriptor
2220 * might be closed. This is to meet the requirements for using
2221 * fdget() (see comments for __fget_light()). Therefore use
2222 * task_work_add() to schedule the close operation once we have
2223 * returned from binder_ioctl(). This function is a callback
2224 * for that mechanism and does the actual ksys_close() on the
2225 * given file descriptor.
2227 static void binder_do_fd_close(struct callback_head
*twork
)
2229 struct binder_task_work_cb
*twcb
= container_of(twork
,
2230 struct binder_task_work_cb
, twork
);
2237 * binder_deferred_fd_close() - schedule a close for the given file-descriptor
2238 * @fd: file-descriptor to close
2240 * See comments in binder_do_fd_close(). This function is used to schedule
2241 * a file-descriptor to be closed after returning from binder_ioctl().
2243 static void binder_deferred_fd_close(int fd
)
2245 struct binder_task_work_cb
*twcb
;
2247 twcb
= kzalloc(sizeof(*twcb
), GFP_KERNEL
);
2250 init_task_work(&twcb
->twork
, binder_do_fd_close
);
2251 __close_fd_get_file(fd
, &twcb
->file
);
2253 task_work_add(current
, &twcb
->twork
, true);
2258 static void binder_transaction_buffer_release(struct binder_proc
*proc
,
2259 struct binder_buffer
*buffer
,
2260 binder_size_t failed_at
,
2263 int debug_id
= buffer
->debug_id
;
2264 binder_size_t off_start_offset
, buffer_offset
, off_end_offset
;
2266 binder_debug(BINDER_DEBUG_TRANSACTION
,
2267 "%d buffer release %d, size %zd-%zd, failed at %llx\n",
2268 proc
->pid
, buffer
->debug_id
,
2269 buffer
->data_size
, buffer
->offsets_size
,
2270 (unsigned long long)failed_at
);
2272 if (buffer
->target_node
)
2273 binder_dec_node(buffer
->target_node
, 1, 0);
2275 off_start_offset
= ALIGN(buffer
->data_size
, sizeof(void *));
2276 off_end_offset
= is_failure
? failed_at
:
2277 off_start_offset
+ buffer
->offsets_size
;
2278 for (buffer_offset
= off_start_offset
; buffer_offset
< off_end_offset
;
2279 buffer_offset
+= sizeof(binder_size_t
)) {
2280 struct binder_object_header
*hdr
;
2281 size_t object_size
= 0;
2282 struct binder_object object
;
2283 binder_size_t object_offset
;
2285 if (!binder_alloc_copy_from_buffer(&proc
->alloc
, &object_offset
,
2286 buffer
, buffer_offset
,
2287 sizeof(object_offset
)))
2288 object_size
= binder_get_object(proc
, buffer
,
2289 object_offset
, &object
);
2290 if (object_size
== 0) {
2291 pr_err("transaction release %d bad object at offset %lld, size %zd\n",
2292 debug_id
, (u64
)object_offset
, buffer
->data_size
);
2296 switch (hdr
->type
) {
2297 case BINDER_TYPE_BINDER
:
2298 case BINDER_TYPE_WEAK_BINDER
: {
2299 struct flat_binder_object
*fp
;
2300 struct binder_node
*node
;
2302 fp
= to_flat_binder_object(hdr
);
2303 node
= binder_get_node(proc
, fp
->binder
);
2305 pr_err("transaction release %d bad node %016llx\n",
2306 debug_id
, (u64
)fp
->binder
);
2309 binder_debug(BINDER_DEBUG_TRANSACTION
,
2310 " node %d u%016llx\n",
2311 node
->debug_id
, (u64
)node
->ptr
);
2312 binder_dec_node(node
, hdr
->type
== BINDER_TYPE_BINDER
,
2314 binder_put_node(node
);
2316 case BINDER_TYPE_HANDLE
:
2317 case BINDER_TYPE_WEAK_HANDLE
: {
2318 struct flat_binder_object
*fp
;
2319 struct binder_ref_data rdata
;
2322 fp
= to_flat_binder_object(hdr
);
2323 ret
= binder_dec_ref_for_handle(proc
, fp
->handle
,
2324 hdr
->type
== BINDER_TYPE_HANDLE
, &rdata
);
2327 pr_err("transaction release %d bad handle %d, ret = %d\n",
2328 debug_id
, fp
->handle
, ret
);
2331 binder_debug(BINDER_DEBUG_TRANSACTION
,
2332 " ref %d desc %d\n",
2333 rdata
.debug_id
, rdata
.desc
);
2336 case BINDER_TYPE_FD
: {
2338 * No need to close the file here since user-space
2339 * closes it for for successfully delivered
2340 * transactions. For transactions that weren't
2341 * delivered, the new fd was never allocated so
2342 * there is no need to close and the fput on the
2343 * file is done when the transaction is torn
2346 WARN_ON(failed_at
&&
2347 proc
->tsk
== current
->group_leader
);
2349 case BINDER_TYPE_PTR
:
2351 * Nothing to do here, this will get cleaned up when the
2352 * transaction buffer gets freed
2355 case BINDER_TYPE_FDA
: {
2356 struct binder_fd_array_object
*fda
;
2357 struct binder_buffer_object
*parent
;
2358 struct binder_object ptr_object
;
2359 binder_size_t fda_offset
;
2361 binder_size_t fd_buf_size
;
2362 binder_size_t num_valid
;
2364 if (proc
->tsk
!= current
->group_leader
) {
2366 * Nothing to do if running in sender context
2367 * The fd fixups have not been applied so no
2368 * fds need to be closed.
2373 num_valid
= (buffer_offset
- off_start_offset
) /
2374 sizeof(binder_size_t
);
2375 fda
= to_binder_fd_array_object(hdr
);
2376 parent
= binder_validate_ptr(proc
, buffer
, &ptr_object
,
2382 pr_err("transaction release %d bad parent offset\n",
2386 fd_buf_size
= sizeof(u32
) * fda
->num_fds
;
2387 if (fda
->num_fds
>= SIZE_MAX
/ sizeof(u32
)) {
2388 pr_err("transaction release %d invalid number of fds (%lld)\n",
2389 debug_id
, (u64
)fda
->num_fds
);
2392 if (fd_buf_size
> parent
->length
||
2393 fda
->parent_offset
> parent
->length
- fd_buf_size
) {
2394 /* No space for all file descriptors here. */
2395 pr_err("transaction release %d not enough space for %lld fds in buffer\n",
2396 debug_id
, (u64
)fda
->num_fds
);
2400 * the source data for binder_buffer_object is visible
2401 * to user-space and the @buffer element is the user
2402 * pointer to the buffer_object containing the fd_array.
2403 * Convert the address to an offset relative to
2404 * the base of the transaction buffer.
2407 (parent
->buffer
- (uintptr_t)buffer
->user_data
) +
2409 for (fd_index
= 0; fd_index
< fda
->num_fds
;
2413 binder_size_t offset
= fda_offset
+
2414 fd_index
* sizeof(fd
);
2416 err
= binder_alloc_copy_from_buffer(
2417 &proc
->alloc
, &fd
, buffer
,
2418 offset
, sizeof(fd
));
2421 binder_deferred_fd_close(fd
);
2425 pr_err("transaction release %d bad object type %x\n",
2426 debug_id
, hdr
->type
);
2432 static int binder_translate_binder(struct flat_binder_object
*fp
,
2433 struct binder_transaction
*t
,
2434 struct binder_thread
*thread
)
2436 struct binder_node
*node
;
2437 struct binder_proc
*proc
= thread
->proc
;
2438 struct binder_proc
*target_proc
= t
->to_proc
;
2439 struct binder_ref_data rdata
;
2442 node
= binder_get_node(proc
, fp
->binder
);
2444 node
= binder_new_node(proc
, fp
);
2448 if (fp
->cookie
!= node
->cookie
) {
2449 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
2450 proc
->pid
, thread
->pid
, (u64
)fp
->binder
,
2451 node
->debug_id
, (u64
)fp
->cookie
,
2456 if (security_binder_transfer_binder(proc
->tsk
, target_proc
->tsk
)) {
2461 ret
= binder_inc_ref_for_node(target_proc
, node
,
2462 fp
->hdr
.type
== BINDER_TYPE_BINDER
,
2463 &thread
->todo
, &rdata
);
2467 if (fp
->hdr
.type
== BINDER_TYPE_BINDER
)
2468 fp
->hdr
.type
= BINDER_TYPE_HANDLE
;
2470 fp
->hdr
.type
= BINDER_TYPE_WEAK_HANDLE
;
2472 fp
->handle
= rdata
.desc
;
2475 trace_binder_transaction_node_to_ref(t
, node
, &rdata
);
2476 binder_debug(BINDER_DEBUG_TRANSACTION
,
2477 " node %d u%016llx -> ref %d desc %d\n",
2478 node
->debug_id
, (u64
)node
->ptr
,
2479 rdata
.debug_id
, rdata
.desc
);
2481 binder_put_node(node
);
2485 static int binder_translate_handle(struct flat_binder_object
*fp
,
2486 struct binder_transaction
*t
,
2487 struct binder_thread
*thread
)
2489 struct binder_proc
*proc
= thread
->proc
;
2490 struct binder_proc
*target_proc
= t
->to_proc
;
2491 struct binder_node
*node
;
2492 struct binder_ref_data src_rdata
;
2495 node
= binder_get_node_from_ref(proc
, fp
->handle
,
2496 fp
->hdr
.type
== BINDER_TYPE_HANDLE
, &src_rdata
);
2498 binder_user_error("%d:%d got transaction with invalid handle, %d\n",
2499 proc
->pid
, thread
->pid
, fp
->handle
);
2502 if (security_binder_transfer_binder(proc
->tsk
, target_proc
->tsk
)) {
2507 binder_node_lock(node
);
2508 if (node
->proc
== target_proc
) {
2509 if (fp
->hdr
.type
== BINDER_TYPE_HANDLE
)
2510 fp
->hdr
.type
= BINDER_TYPE_BINDER
;
2512 fp
->hdr
.type
= BINDER_TYPE_WEAK_BINDER
;
2513 fp
->binder
= node
->ptr
;
2514 fp
->cookie
= node
->cookie
;
2516 binder_inner_proc_lock(node
->proc
);
2518 __acquire(&node
->proc
->inner_lock
);
2519 binder_inc_node_nilocked(node
,
2520 fp
->hdr
.type
== BINDER_TYPE_BINDER
,
2523 binder_inner_proc_unlock(node
->proc
);
2525 __release(&node
->proc
->inner_lock
);
2526 trace_binder_transaction_ref_to_node(t
, node
, &src_rdata
);
2527 binder_debug(BINDER_DEBUG_TRANSACTION
,
2528 " ref %d desc %d -> node %d u%016llx\n",
2529 src_rdata
.debug_id
, src_rdata
.desc
, node
->debug_id
,
2531 binder_node_unlock(node
);
2533 struct binder_ref_data dest_rdata
;
2535 binder_node_unlock(node
);
2536 ret
= binder_inc_ref_for_node(target_proc
, node
,
2537 fp
->hdr
.type
== BINDER_TYPE_HANDLE
,
2543 fp
->handle
= dest_rdata
.desc
;
2545 trace_binder_transaction_ref_to_ref(t
, node
, &src_rdata
,
2547 binder_debug(BINDER_DEBUG_TRANSACTION
,
2548 " ref %d desc %d -> ref %d desc %d (node %d)\n",
2549 src_rdata
.debug_id
, src_rdata
.desc
,
2550 dest_rdata
.debug_id
, dest_rdata
.desc
,
2554 binder_put_node(node
);
2558 static int binder_translate_fd(u32 fd
, binder_size_t fd_offset
,
2559 struct binder_transaction
*t
,
2560 struct binder_thread
*thread
,
2561 struct binder_transaction
*in_reply_to
)
2563 struct binder_proc
*proc
= thread
->proc
;
2564 struct binder_proc
*target_proc
= t
->to_proc
;
2565 struct binder_txn_fd_fixup
*fixup
;
2568 bool target_allows_fd
;
2571 target_allows_fd
= !!(in_reply_to
->flags
& TF_ACCEPT_FDS
);
2573 target_allows_fd
= t
->buffer
->target_node
->accept_fds
;
2574 if (!target_allows_fd
) {
2575 binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
2576 proc
->pid
, thread
->pid
,
2577 in_reply_to
? "reply" : "transaction",
2580 goto err_fd_not_accepted
;
2585 binder_user_error("%d:%d got transaction with invalid fd, %d\n",
2586 proc
->pid
, thread
->pid
, fd
);
2590 ret
= security_binder_transfer_file(proc
->tsk
, target_proc
->tsk
, file
);
2597 * Add fixup record for this transaction. The allocation
2598 * of the fd in the target needs to be done from a
2601 fixup
= kzalloc(sizeof(*fixup
), GFP_KERNEL
);
2607 fixup
->offset
= fd_offset
;
2608 trace_binder_transaction_fd_send(t
, fd
, fixup
->offset
);
2609 list_add_tail(&fixup
->fixup_entry
, &t
->fd_fixups
);
2617 err_fd_not_accepted
:
2621 static int binder_translate_fd_array(struct binder_fd_array_object
*fda
,
2622 struct binder_buffer_object
*parent
,
2623 struct binder_transaction
*t
,
2624 struct binder_thread
*thread
,
2625 struct binder_transaction
*in_reply_to
)
2627 binder_size_t fdi
, fd_buf_size
;
2628 binder_size_t fda_offset
;
2629 struct binder_proc
*proc
= thread
->proc
;
2630 struct binder_proc
*target_proc
= t
->to_proc
;
2632 fd_buf_size
= sizeof(u32
) * fda
->num_fds
;
2633 if (fda
->num_fds
>= SIZE_MAX
/ sizeof(u32
)) {
2634 binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
2635 proc
->pid
, thread
->pid
, (u64
)fda
->num_fds
);
2638 if (fd_buf_size
> parent
->length
||
2639 fda
->parent_offset
> parent
->length
- fd_buf_size
) {
2640 /* No space for all file descriptors here. */
2641 binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
2642 proc
->pid
, thread
->pid
, (u64
)fda
->num_fds
);
2646 * the source data for binder_buffer_object is visible
2647 * to user-space and the @buffer element is the user
2648 * pointer to the buffer_object containing the fd_array.
2649 * Convert the address to an offset relative to
2650 * the base of the transaction buffer.
2652 fda_offset
= (parent
->buffer
- (uintptr_t)t
->buffer
->user_data
) +
2654 if (!IS_ALIGNED((unsigned long)fda_offset
, sizeof(u32
))) {
2655 binder_user_error("%d:%d parent offset not aligned correctly.\n",
2656 proc
->pid
, thread
->pid
);
2659 for (fdi
= 0; fdi
< fda
->num_fds
; fdi
++) {
2662 binder_size_t offset
= fda_offset
+ fdi
* sizeof(fd
);
2664 ret
= binder_alloc_copy_from_buffer(&target_proc
->alloc
,
2666 offset
, sizeof(fd
));
2668 ret
= binder_translate_fd(fd
, offset
, t
, thread
,
2676 static int binder_fixup_parent(struct binder_transaction
*t
,
2677 struct binder_thread
*thread
,
2678 struct binder_buffer_object
*bp
,
2679 binder_size_t off_start_offset
,
2680 binder_size_t num_valid
,
2681 binder_size_t last_fixup_obj_off
,
2682 binder_size_t last_fixup_min_off
)
2684 struct binder_buffer_object
*parent
;
2685 struct binder_buffer
*b
= t
->buffer
;
2686 struct binder_proc
*proc
= thread
->proc
;
2687 struct binder_proc
*target_proc
= t
->to_proc
;
2688 struct binder_object object
;
2689 binder_size_t buffer_offset
;
2690 binder_size_t parent_offset
;
2692 if (!(bp
->flags
& BINDER_BUFFER_FLAG_HAS_PARENT
))
2695 parent
= binder_validate_ptr(target_proc
, b
, &object
, bp
->parent
,
2696 off_start_offset
, &parent_offset
,
2699 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2700 proc
->pid
, thread
->pid
);
2704 if (!binder_validate_fixup(target_proc
, b
, off_start_offset
,
2705 parent_offset
, bp
->parent_offset
,
2707 last_fixup_min_off
)) {
2708 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2709 proc
->pid
, thread
->pid
);
2713 if (parent
->length
< sizeof(binder_uintptr_t
) ||
2714 bp
->parent_offset
> parent
->length
- sizeof(binder_uintptr_t
)) {
2715 /* No space for a pointer here! */
2716 binder_user_error("%d:%d got transaction with invalid parent offset\n",
2717 proc
->pid
, thread
->pid
);
2720 buffer_offset
= bp
->parent_offset
+
2721 (uintptr_t)parent
->buffer
- (uintptr_t)b
->user_data
;
2722 if (binder_alloc_copy_to_buffer(&target_proc
->alloc
, b
, buffer_offset
,
2723 &bp
->buffer
, sizeof(bp
->buffer
))) {
2724 binder_user_error("%d:%d got transaction with invalid parent offset\n",
2725 proc
->pid
, thread
->pid
);
2733 * binder_proc_transaction() - sends a transaction to a process and wakes it up
2734 * @t: transaction to send
2735 * @proc: process to send the transaction to
2736 * @thread: thread in @proc to send the transaction to (may be NULL)
2738 * This function queues a transaction to the specified process. It will try
2739 * to find a thread in the target process to handle the transaction and
2740 * wake it up. If no thread is found, the work is queued to the proc
2743 * If the @thread parameter is not NULL, the transaction is always queued
2744 * to the waitlist of that specific thread.
2746 * Return: true if the transactions was successfully queued
2747 * false if the target process or thread is dead
2749 static bool binder_proc_transaction(struct binder_transaction
*t
,
2750 struct binder_proc
*proc
,
2751 struct binder_thread
*thread
)
2753 struct binder_node
*node
= t
->buffer
->target_node
;
2754 bool oneway
= !!(t
->flags
& TF_ONE_WAY
);
2755 bool pending_async
= false;
2758 binder_node_lock(node
);
2761 if (node
->has_async_transaction
) {
2762 pending_async
= true;
2764 node
->has_async_transaction
= true;
2768 binder_inner_proc_lock(proc
);
2770 if (proc
->is_dead
|| (thread
&& thread
->is_dead
)) {
2771 binder_inner_proc_unlock(proc
);
2772 binder_node_unlock(node
);
2776 if (!thread
&& !pending_async
)
2777 thread
= binder_select_thread_ilocked(proc
);
2780 binder_enqueue_thread_work_ilocked(thread
, &t
->work
);
2781 else if (!pending_async
)
2782 binder_enqueue_work_ilocked(&t
->work
, &proc
->todo
);
2784 binder_enqueue_work_ilocked(&t
->work
, &node
->async_todo
);
2787 binder_wakeup_thread_ilocked(proc
, thread
, !oneway
/* sync */);
2789 binder_inner_proc_unlock(proc
);
2790 binder_node_unlock(node
);
2796 * binder_get_node_refs_for_txn() - Get required refs on node for txn
2797 * @node: struct binder_node for which to get refs
2798 * @proc: returns @node->proc if valid
2799 * @error: if no @proc then returns BR_DEAD_REPLY
2801 * User-space normally keeps the node alive when creating a transaction
2802 * since it has a reference to the target. The local strong ref keeps it
2803 * alive if the sending process dies before the target process processes
2804 * the transaction. If the source process is malicious or has a reference
2805 * counting bug, relying on the local strong ref can fail.
2807 * Since user-space can cause the local strong ref to go away, we also take
2808 * a tmpref on the node to ensure it survives while we are constructing
2809 * the transaction. We also need a tmpref on the proc while we are
2810 * constructing the transaction, so we take that here as well.
2812 * Return: The target_node with refs taken or NULL if no @node->proc is NULL.
2813 * Also sets @proc if valid. If the @node->proc is NULL indicating that the
2814 * target proc has died, @error is set to BR_DEAD_REPLY
2816 static struct binder_node
*binder_get_node_refs_for_txn(
2817 struct binder_node
*node
,
2818 struct binder_proc
**procp
,
2821 struct binder_node
*target_node
= NULL
;
2823 binder_node_inner_lock(node
);
2826 binder_inc_node_nilocked(node
, 1, 0, NULL
);
2827 binder_inc_node_tmpref_ilocked(node
);
2828 node
->proc
->tmp_ref
++;
2829 *procp
= node
->proc
;
2831 *error
= BR_DEAD_REPLY
;
2832 binder_node_inner_unlock(node
);
2837 static void binder_transaction(struct binder_proc
*proc
,
2838 struct binder_thread
*thread
,
2839 struct binder_transaction_data
*tr
, int reply
,
2840 binder_size_t extra_buffers_size
)
2843 struct binder_transaction
*t
;
2844 struct binder_work
*w
;
2845 struct binder_work
*tcomplete
;
2846 binder_size_t buffer_offset
= 0;
2847 binder_size_t off_start_offset
, off_end_offset
;
2848 binder_size_t off_min
;
2849 binder_size_t sg_buf_offset
, sg_buf_end_offset
;
2850 struct binder_proc
*target_proc
= NULL
;
2851 struct binder_thread
*target_thread
= NULL
;
2852 struct binder_node
*target_node
= NULL
;
2853 struct binder_transaction
*in_reply_to
= NULL
;
2854 struct binder_transaction_log_entry
*e
;
2855 uint32_t return_error
= 0;
2856 uint32_t return_error_param
= 0;
2857 uint32_t return_error_line
= 0;
2858 binder_size_t last_fixup_obj_off
= 0;
2859 binder_size_t last_fixup_min_off
= 0;
2860 struct binder_context
*context
= proc
->context
;
2861 int t_debug_id
= atomic_inc_return(&binder_last_id
);
2862 char *secctx
= NULL
;
2865 e
= binder_transaction_log_add(&binder_transaction_log
);
2866 e
->debug_id
= t_debug_id
;
2867 e
->call_type
= reply
? 2 : !!(tr
->flags
& TF_ONE_WAY
);
2868 e
->from_proc
= proc
->pid
;
2869 e
->from_thread
= thread
->pid
;
2870 e
->target_handle
= tr
->target
.handle
;
2871 e
->data_size
= tr
->data_size
;
2872 e
->offsets_size
= tr
->offsets_size
;
2873 strscpy(e
->context_name
, proc
->context
->name
, BINDERFS_MAX_NAME
);
2876 binder_inner_proc_lock(proc
);
2877 in_reply_to
= thread
->transaction_stack
;
2878 if (in_reply_to
== NULL
) {
2879 binder_inner_proc_unlock(proc
);
2880 binder_user_error("%d:%d got reply transaction with no transaction stack\n",
2881 proc
->pid
, thread
->pid
);
2882 return_error
= BR_FAILED_REPLY
;
2883 return_error_param
= -EPROTO
;
2884 return_error_line
= __LINE__
;
2885 goto err_empty_call_stack
;
2887 if (in_reply_to
->to_thread
!= thread
) {
2888 spin_lock(&in_reply_to
->lock
);
2889 binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
2890 proc
->pid
, thread
->pid
, in_reply_to
->debug_id
,
2891 in_reply_to
->to_proc
?
2892 in_reply_to
->to_proc
->pid
: 0,
2893 in_reply_to
->to_thread
?
2894 in_reply_to
->to_thread
->pid
: 0);
2895 spin_unlock(&in_reply_to
->lock
);
2896 binder_inner_proc_unlock(proc
);
2897 return_error
= BR_FAILED_REPLY
;
2898 return_error_param
= -EPROTO
;
2899 return_error_line
= __LINE__
;
2901 goto err_bad_call_stack
;
2903 thread
->transaction_stack
= in_reply_to
->to_parent
;
2904 binder_inner_proc_unlock(proc
);
2905 binder_set_nice(in_reply_to
->saved_priority
);
2906 target_thread
= binder_get_txn_from_and_acq_inner(in_reply_to
);
2907 if (target_thread
== NULL
) {
2908 /* annotation for sparse */
2909 __release(&target_thread
->proc
->inner_lock
);
2910 return_error
= BR_DEAD_REPLY
;
2911 return_error_line
= __LINE__
;
2912 goto err_dead_binder
;
2914 if (target_thread
->transaction_stack
!= in_reply_to
) {
2915 binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
2916 proc
->pid
, thread
->pid
,
2917 target_thread
->transaction_stack
?
2918 target_thread
->transaction_stack
->debug_id
: 0,
2919 in_reply_to
->debug_id
);
2920 binder_inner_proc_unlock(target_thread
->proc
);
2921 return_error
= BR_FAILED_REPLY
;
2922 return_error_param
= -EPROTO
;
2923 return_error_line
= __LINE__
;
2925 target_thread
= NULL
;
2926 goto err_dead_binder
;
2928 target_proc
= target_thread
->proc
;
2929 target_proc
->tmp_ref
++;
2930 binder_inner_proc_unlock(target_thread
->proc
);
2932 if (tr
->target
.handle
) {
2933 struct binder_ref
*ref
;
2936 * There must already be a strong ref
2937 * on this node. If so, do a strong
2938 * increment on the node to ensure it
2939 * stays alive until the transaction is
2942 binder_proc_lock(proc
);
2943 ref
= binder_get_ref_olocked(proc
, tr
->target
.handle
,
2946 target_node
= binder_get_node_refs_for_txn(
2947 ref
->node
, &target_proc
,
2950 binder_user_error("%d:%d got transaction to invalid handle\n",
2951 proc
->pid
, thread
->pid
);
2952 return_error
= BR_FAILED_REPLY
;
2954 binder_proc_unlock(proc
);
2956 mutex_lock(&context
->context_mgr_node_lock
);
2957 target_node
= context
->binder_context_mgr_node
;
2959 target_node
= binder_get_node_refs_for_txn(
2960 target_node
, &target_proc
,
2963 return_error
= BR_DEAD_REPLY
;
2964 mutex_unlock(&context
->context_mgr_node_lock
);
2965 if (target_node
&& target_proc
->pid
== proc
->pid
) {
2966 binder_user_error("%d:%d got transaction to context manager from process owning it\n",
2967 proc
->pid
, thread
->pid
);
2968 return_error
= BR_FAILED_REPLY
;
2969 return_error_param
= -EINVAL
;
2970 return_error_line
= __LINE__
;
2971 goto err_invalid_target_handle
;
2976 * return_error is set above
2978 return_error_param
= -EINVAL
;
2979 return_error_line
= __LINE__
;
2980 goto err_dead_binder
;
2982 e
->to_node
= target_node
->debug_id
;
2983 if (security_binder_transaction(proc
->tsk
,
2984 target_proc
->tsk
) < 0) {
2985 return_error
= BR_FAILED_REPLY
;
2986 return_error_param
= -EPERM
;
2987 return_error_line
= __LINE__
;
2988 goto err_invalid_target_handle
;
2990 binder_inner_proc_lock(proc
);
2992 w
= list_first_entry_or_null(&thread
->todo
,
2993 struct binder_work
, entry
);
2994 if (!(tr
->flags
& TF_ONE_WAY
) && w
&&
2995 w
->type
== BINDER_WORK_TRANSACTION
) {
2997 * Do not allow new outgoing transaction from a
2998 * thread that has a transaction at the head of
2999 * its todo list. Only need to check the head
3000 * because binder_select_thread_ilocked picks a
3001 * thread from proc->waiting_threads to enqueue
3002 * the transaction, and nothing is queued to the
3003 * todo list while the thread is on waiting_threads.
3005 binder_user_error("%d:%d new transaction not allowed when there is a transaction on thread todo\n",
3006 proc
->pid
, thread
->pid
);
3007 binder_inner_proc_unlock(proc
);
3008 return_error
= BR_FAILED_REPLY
;
3009 return_error_param
= -EPROTO
;
3010 return_error_line
= __LINE__
;
3011 goto err_bad_todo_list
;
3014 if (!(tr
->flags
& TF_ONE_WAY
) && thread
->transaction_stack
) {
3015 struct binder_transaction
*tmp
;
3017 tmp
= thread
->transaction_stack
;
3018 if (tmp
->to_thread
!= thread
) {
3019 spin_lock(&tmp
->lock
);
3020 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
3021 proc
->pid
, thread
->pid
, tmp
->debug_id
,
3022 tmp
->to_proc
? tmp
->to_proc
->pid
: 0,
3024 tmp
->to_thread
->pid
: 0);
3025 spin_unlock(&tmp
->lock
);
3026 binder_inner_proc_unlock(proc
);
3027 return_error
= BR_FAILED_REPLY
;
3028 return_error_param
= -EPROTO
;
3029 return_error_line
= __LINE__
;
3030 goto err_bad_call_stack
;
3033 struct binder_thread
*from
;
3035 spin_lock(&tmp
->lock
);
3037 if (from
&& from
->proc
== target_proc
) {
3038 atomic_inc(&from
->tmp_ref
);
3039 target_thread
= from
;
3040 spin_unlock(&tmp
->lock
);
3043 spin_unlock(&tmp
->lock
);
3044 tmp
= tmp
->from_parent
;
3047 binder_inner_proc_unlock(proc
);
3050 e
->to_thread
= target_thread
->pid
;
3051 e
->to_proc
= target_proc
->pid
;
3053 /* TODO: reuse incoming transaction for reply */
3054 t
= kzalloc(sizeof(*t
), GFP_KERNEL
);
3056 return_error
= BR_FAILED_REPLY
;
3057 return_error_param
= -ENOMEM
;
3058 return_error_line
= __LINE__
;
3059 goto err_alloc_t_failed
;
3061 INIT_LIST_HEAD(&t
->fd_fixups
);
3062 binder_stats_created(BINDER_STAT_TRANSACTION
);
3063 spin_lock_init(&t
->lock
);
3065 tcomplete
= kzalloc(sizeof(*tcomplete
), GFP_KERNEL
);
3066 if (tcomplete
== NULL
) {
3067 return_error
= BR_FAILED_REPLY
;
3068 return_error_param
= -ENOMEM
;
3069 return_error_line
= __LINE__
;
3070 goto err_alloc_tcomplete_failed
;
3072 binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE
);
3074 t
->debug_id
= t_debug_id
;
3077 binder_debug(BINDER_DEBUG_TRANSACTION
,
3078 "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
3079 proc
->pid
, thread
->pid
, t
->debug_id
,
3080 target_proc
->pid
, target_thread
->pid
,
3081 (u64
)tr
->data
.ptr
.buffer
,
3082 (u64
)tr
->data
.ptr
.offsets
,
3083 (u64
)tr
->data_size
, (u64
)tr
->offsets_size
,
3084 (u64
)extra_buffers_size
);
3086 binder_debug(BINDER_DEBUG_TRANSACTION
,
3087 "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
3088 proc
->pid
, thread
->pid
, t
->debug_id
,
3089 target_proc
->pid
, target_node
->debug_id
,
3090 (u64
)tr
->data
.ptr
.buffer
,
3091 (u64
)tr
->data
.ptr
.offsets
,
3092 (u64
)tr
->data_size
, (u64
)tr
->offsets_size
,
3093 (u64
)extra_buffers_size
);
3095 if (!reply
&& !(tr
->flags
& TF_ONE_WAY
))
3099 t
->sender_euid
= task_euid(proc
->tsk
);
3100 t
->to_proc
= target_proc
;
3101 t
->to_thread
= target_thread
;
3103 t
->flags
= tr
->flags
;
3104 t
->priority
= task_nice(current
);
3106 if (target_node
&& target_node
->txn_security_ctx
) {
3110 security_task_getsecid(proc
->tsk
, &secid
);
3111 ret
= security_secid_to_secctx(secid
, &secctx
, &secctx_sz
);
3113 return_error
= BR_FAILED_REPLY
;
3114 return_error_param
= ret
;
3115 return_error_line
= __LINE__
;
3116 goto err_get_secctx_failed
;
3118 added_size
= ALIGN(secctx_sz
, sizeof(u64
));
3119 extra_buffers_size
+= added_size
;
3120 if (extra_buffers_size
< added_size
) {
3121 /* integer overflow of extra_buffers_size */
3122 return_error
= BR_FAILED_REPLY
;
3123 return_error_param
= EINVAL
;
3124 return_error_line
= __LINE__
;
3125 goto err_bad_extra_size
;
3129 trace_binder_transaction(reply
, t
, target_node
);
3131 t
->buffer
= binder_alloc_new_buf(&target_proc
->alloc
, tr
->data_size
,
3132 tr
->offsets_size
, extra_buffers_size
,
3133 !reply
&& (t
->flags
& TF_ONE_WAY
));
3134 if (IS_ERR(t
->buffer
)) {
3136 * -ESRCH indicates VMA cleared. The target is dying.
3138 return_error_param
= PTR_ERR(t
->buffer
);
3139 return_error
= return_error_param
== -ESRCH
?
3140 BR_DEAD_REPLY
: BR_FAILED_REPLY
;
3141 return_error_line
= __LINE__
;
3143 goto err_binder_alloc_buf_failed
;
3147 size_t buf_offset
= ALIGN(tr
->data_size
, sizeof(void *)) +
3148 ALIGN(tr
->offsets_size
, sizeof(void *)) +
3149 ALIGN(extra_buffers_size
, sizeof(void *)) -
3150 ALIGN(secctx_sz
, sizeof(u64
));
3152 t
->security_ctx
= (uintptr_t)t
->buffer
->user_data
+ buf_offset
;
3153 err
= binder_alloc_copy_to_buffer(&target_proc
->alloc
,
3154 t
->buffer
, buf_offset
,
3157 t
->security_ctx
= 0;
3160 security_release_secctx(secctx
, secctx_sz
);
3163 t
->buffer
->debug_id
= t
->debug_id
;
3164 t
->buffer
->transaction
= t
;
3165 t
->buffer
->target_node
= target_node
;
3166 trace_binder_transaction_alloc_buf(t
->buffer
);
3168 if (binder_alloc_copy_user_to_buffer(
3169 &target_proc
->alloc
,
3171 (const void __user
*)
3172 (uintptr_t)tr
->data
.ptr
.buffer
,
3174 binder_user_error("%d:%d got transaction with invalid data ptr\n",
3175 proc
->pid
, thread
->pid
);
3176 return_error
= BR_FAILED_REPLY
;
3177 return_error_param
= -EFAULT
;
3178 return_error_line
= __LINE__
;
3179 goto err_copy_data_failed
;
3181 if (binder_alloc_copy_user_to_buffer(
3182 &target_proc
->alloc
,
3184 ALIGN(tr
->data_size
, sizeof(void *)),
3185 (const void __user
*)
3186 (uintptr_t)tr
->data
.ptr
.offsets
,
3187 tr
->offsets_size
)) {
3188 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3189 proc
->pid
, thread
->pid
);
3190 return_error
= BR_FAILED_REPLY
;
3191 return_error_param
= -EFAULT
;
3192 return_error_line
= __LINE__
;
3193 goto err_copy_data_failed
;
3195 if (!IS_ALIGNED(tr
->offsets_size
, sizeof(binder_size_t
))) {
3196 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
3197 proc
->pid
, thread
->pid
, (u64
)tr
->offsets_size
);
3198 return_error
= BR_FAILED_REPLY
;
3199 return_error_param
= -EINVAL
;
3200 return_error_line
= __LINE__
;
3201 goto err_bad_offset
;
3203 if (!IS_ALIGNED(extra_buffers_size
, sizeof(u64
))) {
3204 binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
3205 proc
->pid
, thread
->pid
,
3206 (u64
)extra_buffers_size
);
3207 return_error
= BR_FAILED_REPLY
;
3208 return_error_param
= -EINVAL
;
3209 return_error_line
= __LINE__
;
3210 goto err_bad_offset
;
3212 off_start_offset
= ALIGN(tr
->data_size
, sizeof(void *));
3213 buffer_offset
= off_start_offset
;
3214 off_end_offset
= off_start_offset
+ tr
->offsets_size
;
3215 sg_buf_offset
= ALIGN(off_end_offset
, sizeof(void *));
3216 sg_buf_end_offset
= sg_buf_offset
+ extra_buffers_size
-
3217 ALIGN(secctx_sz
, sizeof(u64
));
3219 for (buffer_offset
= off_start_offset
; buffer_offset
< off_end_offset
;
3220 buffer_offset
+= sizeof(binder_size_t
)) {
3221 struct binder_object_header
*hdr
;
3223 struct binder_object object
;
3224 binder_size_t object_offset
;
3226 if (binder_alloc_copy_from_buffer(&target_proc
->alloc
,
3230 sizeof(object_offset
))) {
3231 return_error
= BR_FAILED_REPLY
;
3232 return_error_param
= -EINVAL
;
3233 return_error_line
= __LINE__
;
3234 goto err_bad_offset
;
3236 object_size
= binder_get_object(target_proc
, t
->buffer
,
3237 object_offset
, &object
);
3238 if (object_size
== 0 || object_offset
< off_min
) {
3239 binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
3240 proc
->pid
, thread
->pid
,
3243 (u64
)t
->buffer
->data_size
);
3244 return_error
= BR_FAILED_REPLY
;
3245 return_error_param
= -EINVAL
;
3246 return_error_line
= __LINE__
;
3247 goto err_bad_offset
;
3251 off_min
= object_offset
+ object_size
;
3252 switch (hdr
->type
) {
3253 case BINDER_TYPE_BINDER
:
3254 case BINDER_TYPE_WEAK_BINDER
: {
3255 struct flat_binder_object
*fp
;
3257 fp
= to_flat_binder_object(hdr
);
3258 ret
= binder_translate_binder(fp
, t
, thread
);
3261 binder_alloc_copy_to_buffer(&target_proc
->alloc
,
3265 return_error
= BR_FAILED_REPLY
;
3266 return_error_param
= ret
;
3267 return_error_line
= __LINE__
;
3268 goto err_translate_failed
;
3271 case BINDER_TYPE_HANDLE
:
3272 case BINDER_TYPE_WEAK_HANDLE
: {
3273 struct flat_binder_object
*fp
;
3275 fp
= to_flat_binder_object(hdr
);
3276 ret
= binder_translate_handle(fp
, t
, thread
);
3278 binder_alloc_copy_to_buffer(&target_proc
->alloc
,
3282 return_error
= BR_FAILED_REPLY
;
3283 return_error_param
= ret
;
3284 return_error_line
= __LINE__
;
3285 goto err_translate_failed
;
3289 case BINDER_TYPE_FD
: {
3290 struct binder_fd_object
*fp
= to_binder_fd_object(hdr
);
3291 binder_size_t fd_offset
= object_offset
+
3292 (uintptr_t)&fp
->fd
- (uintptr_t)fp
;
3293 int ret
= binder_translate_fd(fp
->fd
, fd_offset
, t
,
3294 thread
, in_reply_to
);
3298 binder_alloc_copy_to_buffer(&target_proc
->alloc
,
3302 return_error
= BR_FAILED_REPLY
;
3303 return_error_param
= ret
;
3304 return_error_line
= __LINE__
;
3305 goto err_translate_failed
;
3308 case BINDER_TYPE_FDA
: {
3309 struct binder_object ptr_object
;
3310 binder_size_t parent_offset
;
3311 struct binder_fd_array_object
*fda
=
3312 to_binder_fd_array_object(hdr
);
3313 size_t num_valid
= (buffer_offset
- off_start_offset
) /
3314 sizeof(binder_size_t
);
3315 struct binder_buffer_object
*parent
=
3316 binder_validate_ptr(target_proc
, t
->buffer
,
3317 &ptr_object
, fda
->parent
,
3322 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
3323 proc
->pid
, thread
->pid
);
3324 return_error
= BR_FAILED_REPLY
;
3325 return_error_param
= -EINVAL
;
3326 return_error_line
= __LINE__
;
3327 goto err_bad_parent
;
3329 if (!binder_validate_fixup(target_proc
, t
->buffer
,
3334 last_fixup_min_off
)) {
3335 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
3336 proc
->pid
, thread
->pid
);
3337 return_error
= BR_FAILED_REPLY
;
3338 return_error_param
= -EINVAL
;
3339 return_error_line
= __LINE__
;
3340 goto err_bad_parent
;
3342 ret
= binder_translate_fd_array(fda
, parent
, t
, thread
,
3345 return_error
= BR_FAILED_REPLY
;
3346 return_error_param
= ret
;
3347 return_error_line
= __LINE__
;
3348 goto err_translate_failed
;
3350 last_fixup_obj_off
= parent_offset
;
3351 last_fixup_min_off
=
3352 fda
->parent_offset
+ sizeof(u32
) * fda
->num_fds
;
3354 case BINDER_TYPE_PTR
: {
3355 struct binder_buffer_object
*bp
=
3356 to_binder_buffer_object(hdr
);
3357 size_t buf_left
= sg_buf_end_offset
- sg_buf_offset
;
3360 if (bp
->length
> buf_left
) {
3361 binder_user_error("%d:%d got transaction with too large buffer\n",
3362 proc
->pid
, thread
->pid
);
3363 return_error
= BR_FAILED_REPLY
;
3364 return_error_param
= -EINVAL
;
3365 return_error_line
= __LINE__
;
3366 goto err_bad_offset
;
3368 if (binder_alloc_copy_user_to_buffer(
3369 &target_proc
->alloc
,
3372 (const void __user
*)
3373 (uintptr_t)bp
->buffer
,
3375 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3376 proc
->pid
, thread
->pid
);
3377 return_error_param
= -EFAULT
;
3378 return_error
= BR_FAILED_REPLY
;
3379 return_error_line
= __LINE__
;
3380 goto err_copy_data_failed
;
3382 /* Fixup buffer pointer to target proc address space */
3383 bp
->buffer
= (uintptr_t)
3384 t
->buffer
->user_data
+ sg_buf_offset
;
3385 sg_buf_offset
+= ALIGN(bp
->length
, sizeof(u64
));
3387 num_valid
= (buffer_offset
- off_start_offset
) /
3388 sizeof(binder_size_t
);
3389 ret
= binder_fixup_parent(t
, thread
, bp
,
3393 last_fixup_min_off
);
3395 binder_alloc_copy_to_buffer(&target_proc
->alloc
,
3399 return_error
= BR_FAILED_REPLY
;
3400 return_error_param
= ret
;
3401 return_error_line
= __LINE__
;
3402 goto err_translate_failed
;
3404 last_fixup_obj_off
= object_offset
;
3405 last_fixup_min_off
= 0;
3408 binder_user_error("%d:%d got transaction with invalid object type, %x\n",
3409 proc
->pid
, thread
->pid
, hdr
->type
);
3410 return_error
= BR_FAILED_REPLY
;
3411 return_error_param
= -EINVAL
;
3412 return_error_line
= __LINE__
;
3413 goto err_bad_object_type
;
3416 tcomplete
->type
= BINDER_WORK_TRANSACTION_COMPLETE
;
3417 t
->work
.type
= BINDER_WORK_TRANSACTION
;
3420 binder_enqueue_thread_work(thread
, tcomplete
);
3421 binder_inner_proc_lock(target_proc
);
3422 if (target_thread
->is_dead
) {
3423 binder_inner_proc_unlock(target_proc
);
3424 goto err_dead_proc_or_thread
;
3426 BUG_ON(t
->buffer
->async_transaction
!= 0);
3427 binder_pop_transaction_ilocked(target_thread
, in_reply_to
);
3428 binder_enqueue_thread_work_ilocked(target_thread
, &t
->work
);
3429 binder_inner_proc_unlock(target_proc
);
3430 wake_up_interruptible_sync(&target_thread
->wait
);
3431 binder_free_transaction(in_reply_to
);
3432 } else if (!(t
->flags
& TF_ONE_WAY
)) {
3433 BUG_ON(t
->buffer
->async_transaction
!= 0);
3434 binder_inner_proc_lock(proc
);
3436 * Defer the TRANSACTION_COMPLETE, so we don't return to
3437 * userspace immediately; this allows the target process to
3438 * immediately start processing this transaction, reducing
3439 * latency. We will then return the TRANSACTION_COMPLETE when
3440 * the target replies (or there is an error).
3442 binder_enqueue_deferred_thread_work_ilocked(thread
, tcomplete
);
3444 t
->from_parent
= thread
->transaction_stack
;
3445 thread
->transaction_stack
= t
;
3446 binder_inner_proc_unlock(proc
);
3447 if (!binder_proc_transaction(t
, target_proc
, target_thread
)) {
3448 binder_inner_proc_lock(proc
);
3449 binder_pop_transaction_ilocked(thread
, t
);
3450 binder_inner_proc_unlock(proc
);
3451 goto err_dead_proc_or_thread
;
3454 BUG_ON(target_node
== NULL
);
3455 BUG_ON(t
->buffer
->async_transaction
!= 1);
3456 binder_enqueue_thread_work(thread
, tcomplete
);
3457 if (!binder_proc_transaction(t
, target_proc
, NULL
))
3458 goto err_dead_proc_or_thread
;
3461 binder_thread_dec_tmpref(target_thread
);
3462 binder_proc_dec_tmpref(target_proc
);
3464 binder_dec_node_tmpref(target_node
);
3466 * write barrier to synchronize with initialization
3470 WRITE_ONCE(e
->debug_id_done
, t_debug_id
);
3473 err_dead_proc_or_thread
:
3474 return_error
= BR_DEAD_REPLY
;
3475 return_error_line
= __LINE__
;
3476 binder_dequeue_work(proc
, tcomplete
);
3477 err_translate_failed
:
3478 err_bad_object_type
:
3481 err_copy_data_failed
:
3482 binder_free_txn_fixups(t
);
3483 trace_binder_transaction_failed_buffer_release(t
->buffer
);
3484 binder_transaction_buffer_release(target_proc
, t
->buffer
,
3485 buffer_offset
, true);
3487 binder_dec_node_tmpref(target_node
);
3489 t
->buffer
->transaction
= NULL
;
3490 binder_alloc_free_buf(&target_proc
->alloc
, t
->buffer
);
3491 err_binder_alloc_buf_failed
:
3494 security_release_secctx(secctx
, secctx_sz
);
3495 err_get_secctx_failed
:
3497 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE
);
3498 err_alloc_tcomplete_failed
:
3500 binder_stats_deleted(BINDER_STAT_TRANSACTION
);
3504 err_empty_call_stack
:
3506 err_invalid_target_handle
:
3508 binder_thread_dec_tmpref(target_thread
);
3510 binder_proc_dec_tmpref(target_proc
);
3512 binder_dec_node(target_node
, 1, 0);
3513 binder_dec_node_tmpref(target_node
);
3516 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION
,
3517 "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n",
3518 proc
->pid
, thread
->pid
, return_error
, return_error_param
,
3519 (u64
)tr
->data_size
, (u64
)tr
->offsets_size
,
3523 struct binder_transaction_log_entry
*fe
;
3525 e
->return_error
= return_error
;
3526 e
->return_error_param
= return_error_param
;
3527 e
->return_error_line
= return_error_line
;
3528 fe
= binder_transaction_log_add(&binder_transaction_log_failed
);
3531 * write barrier to synchronize with initialization
3535 WRITE_ONCE(e
->debug_id_done
, t_debug_id
);
3536 WRITE_ONCE(fe
->debug_id_done
, t_debug_id
);
3539 BUG_ON(thread
->return_error
.cmd
!= BR_OK
);
3541 thread
->return_error
.cmd
= BR_TRANSACTION_COMPLETE
;
3542 binder_enqueue_thread_work(thread
, &thread
->return_error
.work
);
3543 binder_send_failed_reply(in_reply_to
, return_error
);
3545 thread
->return_error
.cmd
= return_error
;
3546 binder_enqueue_thread_work(thread
, &thread
->return_error
.work
);
3551 * binder_free_buf() - free the specified buffer
3552 * @proc: binder proc that owns buffer
3553 * @buffer: buffer to be freed
3555 * If buffer for an async transaction, enqueue the next async
3556 * transaction from the node.
3558 * Cleanup buffer and free it.
3561 binder_free_buf(struct binder_proc
*proc
, struct binder_buffer
*buffer
)
3563 binder_inner_proc_lock(proc
);
3564 if (buffer
->transaction
) {
3565 buffer
->transaction
->buffer
= NULL
;
3566 buffer
->transaction
= NULL
;
3568 binder_inner_proc_unlock(proc
);
3569 if (buffer
->async_transaction
&& buffer
->target_node
) {
3570 struct binder_node
*buf_node
;
3571 struct binder_work
*w
;
3573 buf_node
= buffer
->target_node
;
3574 binder_node_inner_lock(buf_node
);
3575 BUG_ON(!buf_node
->has_async_transaction
);
3576 BUG_ON(buf_node
->proc
!= proc
);
3577 w
= binder_dequeue_work_head_ilocked(
3578 &buf_node
->async_todo
);
3580 buf_node
->has_async_transaction
= false;
3582 binder_enqueue_work_ilocked(
3584 binder_wakeup_proc_ilocked(proc
);
3586 binder_node_inner_unlock(buf_node
);
3588 trace_binder_transaction_buffer_release(buffer
);
3589 binder_transaction_buffer_release(proc
, buffer
, 0, false);
3590 binder_alloc_free_buf(&proc
->alloc
, buffer
);
3593 static int binder_thread_write(struct binder_proc
*proc
,
3594 struct binder_thread
*thread
,
3595 binder_uintptr_t binder_buffer
, size_t size
,
3596 binder_size_t
*consumed
)
3599 struct binder_context
*context
= proc
->context
;
3600 void __user
*buffer
= (void __user
*)(uintptr_t)binder_buffer
;
3601 void __user
*ptr
= buffer
+ *consumed
;
3602 void __user
*end
= buffer
+ size
;
3604 while (ptr
< end
&& thread
->return_error
.cmd
== BR_OK
) {
3607 if (get_user(cmd
, (uint32_t __user
*)ptr
))
3609 ptr
+= sizeof(uint32_t);
3610 trace_binder_command(cmd
);
3611 if (_IOC_NR(cmd
) < ARRAY_SIZE(binder_stats
.bc
)) {
3612 atomic_inc(&binder_stats
.bc
[_IOC_NR(cmd
)]);
3613 atomic_inc(&proc
->stats
.bc
[_IOC_NR(cmd
)]);
3614 atomic_inc(&thread
->stats
.bc
[_IOC_NR(cmd
)]);
3622 const char *debug_string
;
3623 bool strong
= cmd
== BC_ACQUIRE
|| cmd
== BC_RELEASE
;
3624 bool increment
= cmd
== BC_INCREFS
|| cmd
== BC_ACQUIRE
;
3625 struct binder_ref_data rdata
;
3627 if (get_user(target
, (uint32_t __user
*)ptr
))
3630 ptr
+= sizeof(uint32_t);
3632 if (increment
&& !target
) {
3633 struct binder_node
*ctx_mgr_node
;
3634 mutex_lock(&context
->context_mgr_node_lock
);
3635 ctx_mgr_node
= context
->binder_context_mgr_node
;
3637 ret
= binder_inc_ref_for_node(
3639 strong
, NULL
, &rdata
);
3640 mutex_unlock(&context
->context_mgr_node_lock
);
3643 ret
= binder_update_ref_for_handle(
3644 proc
, target
, increment
, strong
,
3646 if (!ret
&& rdata
.desc
!= target
) {
3647 binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
3648 proc
->pid
, thread
->pid
,
3649 target
, rdata
.desc
);
3653 debug_string
= "IncRefs";
3656 debug_string
= "Acquire";
3659 debug_string
= "Release";
3663 debug_string
= "DecRefs";
3667 binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
3668 proc
->pid
, thread
->pid
, debug_string
,
3669 strong
, target
, ret
);
3672 binder_debug(BINDER_DEBUG_USER_REFS
,
3673 "%d:%d %s ref %d desc %d s %d w %d\n",
3674 proc
->pid
, thread
->pid
, debug_string
,
3675 rdata
.debug_id
, rdata
.desc
, rdata
.strong
,
3679 case BC_INCREFS_DONE
:
3680 case BC_ACQUIRE_DONE
: {
3681 binder_uintptr_t node_ptr
;
3682 binder_uintptr_t cookie
;
3683 struct binder_node
*node
;
3686 if (get_user(node_ptr
, (binder_uintptr_t __user
*)ptr
))
3688 ptr
+= sizeof(binder_uintptr_t
);
3689 if (get_user(cookie
, (binder_uintptr_t __user
*)ptr
))
3691 ptr
+= sizeof(binder_uintptr_t
);
3692 node
= binder_get_node(proc
, node_ptr
);
3694 binder_user_error("%d:%d %s u%016llx no match\n",
3695 proc
->pid
, thread
->pid
,
3696 cmd
== BC_INCREFS_DONE
?
3702 if (cookie
!= node
->cookie
) {
3703 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
3704 proc
->pid
, thread
->pid
,
3705 cmd
== BC_INCREFS_DONE
?
3706 "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3707 (u64
)node_ptr
, node
->debug_id
,
3708 (u64
)cookie
, (u64
)node
->cookie
);
3709 binder_put_node(node
);
3712 binder_node_inner_lock(node
);
3713 if (cmd
== BC_ACQUIRE_DONE
) {
3714 if (node
->pending_strong_ref
== 0) {
3715 binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
3716 proc
->pid
, thread
->pid
,
3718 binder_node_inner_unlock(node
);
3719 binder_put_node(node
);
3722 node
->pending_strong_ref
= 0;
3724 if (node
->pending_weak_ref
== 0) {
3725 binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
3726 proc
->pid
, thread
->pid
,
3728 binder_node_inner_unlock(node
);
3729 binder_put_node(node
);
3732 node
->pending_weak_ref
= 0;
3734 free_node
= binder_dec_node_nilocked(node
,
3735 cmd
== BC_ACQUIRE_DONE
, 0);
3737 binder_debug(BINDER_DEBUG_USER_REFS
,
3738 "%d:%d %s node %d ls %d lw %d tr %d\n",
3739 proc
->pid
, thread
->pid
,
3740 cmd
== BC_INCREFS_DONE
? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3741 node
->debug_id
, node
->local_strong_refs
,
3742 node
->local_weak_refs
, node
->tmp_refs
);
3743 binder_node_inner_unlock(node
);
3744 binder_put_node(node
);
3747 case BC_ATTEMPT_ACQUIRE
:
3748 pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
3750 case BC_ACQUIRE_RESULT
:
3751 pr_err("BC_ACQUIRE_RESULT not supported\n");
3754 case BC_FREE_BUFFER
: {
3755 binder_uintptr_t data_ptr
;
3756 struct binder_buffer
*buffer
;
3758 if (get_user(data_ptr
, (binder_uintptr_t __user
*)ptr
))
3760 ptr
+= sizeof(binder_uintptr_t
);
3762 buffer
= binder_alloc_prepare_to_free(&proc
->alloc
,
3764 if (IS_ERR_OR_NULL(buffer
)) {
3765 if (PTR_ERR(buffer
) == -EPERM
) {
3767 "%d:%d BC_FREE_BUFFER u%016llx matched unreturned or currently freeing buffer\n",
3768 proc
->pid
, thread
->pid
,
3772 "%d:%d BC_FREE_BUFFER u%016llx no match\n",
3773 proc
->pid
, thread
->pid
,
3778 binder_debug(BINDER_DEBUG_FREE_BUFFER
,
3779 "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
3780 proc
->pid
, thread
->pid
, (u64
)data_ptr
,
3782 buffer
->transaction
? "active" : "finished");
3783 binder_free_buf(proc
, buffer
);
3787 case BC_TRANSACTION_SG
:
3789 struct binder_transaction_data_sg tr
;
3791 if (copy_from_user(&tr
, ptr
, sizeof(tr
)))
3794 binder_transaction(proc
, thread
, &tr
.transaction_data
,
3795 cmd
== BC_REPLY_SG
, tr
.buffers_size
);
3798 case BC_TRANSACTION
:
3800 struct binder_transaction_data tr
;
3802 if (copy_from_user(&tr
, ptr
, sizeof(tr
)))
3805 binder_transaction(proc
, thread
, &tr
,
3806 cmd
== BC_REPLY
, 0);
3810 case BC_REGISTER_LOOPER
:
3811 binder_debug(BINDER_DEBUG_THREADS
,
3812 "%d:%d BC_REGISTER_LOOPER\n",
3813 proc
->pid
, thread
->pid
);
3814 binder_inner_proc_lock(proc
);
3815 if (thread
->looper
& BINDER_LOOPER_STATE_ENTERED
) {
3816 thread
->looper
|= BINDER_LOOPER_STATE_INVALID
;
3817 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
3818 proc
->pid
, thread
->pid
);
3819 } else if (proc
->requested_threads
== 0) {
3820 thread
->looper
|= BINDER_LOOPER_STATE_INVALID
;
3821 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
3822 proc
->pid
, thread
->pid
);
3824 proc
->requested_threads
--;
3825 proc
->requested_threads_started
++;
3827 thread
->looper
|= BINDER_LOOPER_STATE_REGISTERED
;
3828 binder_inner_proc_unlock(proc
);
3830 case BC_ENTER_LOOPER
:
3831 binder_debug(BINDER_DEBUG_THREADS
,
3832 "%d:%d BC_ENTER_LOOPER\n",
3833 proc
->pid
, thread
->pid
);
3834 if (thread
->looper
& BINDER_LOOPER_STATE_REGISTERED
) {
3835 thread
->looper
|= BINDER_LOOPER_STATE_INVALID
;
3836 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
3837 proc
->pid
, thread
->pid
);
3839 thread
->looper
|= BINDER_LOOPER_STATE_ENTERED
;
3841 case BC_EXIT_LOOPER
:
3842 binder_debug(BINDER_DEBUG_THREADS
,
3843 "%d:%d BC_EXIT_LOOPER\n",
3844 proc
->pid
, thread
->pid
);
3845 thread
->looper
|= BINDER_LOOPER_STATE_EXITED
;
3848 case BC_REQUEST_DEATH_NOTIFICATION
:
3849 case BC_CLEAR_DEATH_NOTIFICATION
: {
3851 binder_uintptr_t cookie
;
3852 struct binder_ref
*ref
;
3853 struct binder_ref_death
*death
= NULL
;
3855 if (get_user(target
, (uint32_t __user
*)ptr
))
3857 ptr
+= sizeof(uint32_t);
3858 if (get_user(cookie
, (binder_uintptr_t __user
*)ptr
))
3860 ptr
+= sizeof(binder_uintptr_t
);
3861 if (cmd
== BC_REQUEST_DEATH_NOTIFICATION
) {
3863 * Allocate memory for death notification
3864 * before taking lock
3866 death
= kzalloc(sizeof(*death
), GFP_KERNEL
);
3867 if (death
== NULL
) {
3868 WARN_ON(thread
->return_error
.cmd
!=
3870 thread
->return_error
.cmd
= BR_ERROR
;
3871 binder_enqueue_thread_work(
3873 &thread
->return_error
.work
);
3875 BINDER_DEBUG_FAILED_TRANSACTION
,
3876 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
3877 proc
->pid
, thread
->pid
);
3881 binder_proc_lock(proc
);
3882 ref
= binder_get_ref_olocked(proc
, target
, false);
3884 binder_user_error("%d:%d %s invalid ref %d\n",
3885 proc
->pid
, thread
->pid
,
3886 cmd
== BC_REQUEST_DEATH_NOTIFICATION
?
3887 "BC_REQUEST_DEATH_NOTIFICATION" :
3888 "BC_CLEAR_DEATH_NOTIFICATION",
3890 binder_proc_unlock(proc
);
3895 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION
,
3896 "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
3897 proc
->pid
, thread
->pid
,
3898 cmd
== BC_REQUEST_DEATH_NOTIFICATION
?
3899 "BC_REQUEST_DEATH_NOTIFICATION" :
3900 "BC_CLEAR_DEATH_NOTIFICATION",
3901 (u64
)cookie
, ref
->data
.debug_id
,
3902 ref
->data
.desc
, ref
->data
.strong
,
3903 ref
->data
.weak
, ref
->node
->debug_id
);
3905 binder_node_lock(ref
->node
);
3906 if (cmd
== BC_REQUEST_DEATH_NOTIFICATION
) {
3908 binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
3909 proc
->pid
, thread
->pid
);
3910 binder_node_unlock(ref
->node
);
3911 binder_proc_unlock(proc
);
3915 binder_stats_created(BINDER_STAT_DEATH
);
3916 INIT_LIST_HEAD(&death
->work
.entry
);
3917 death
->cookie
= cookie
;
3919 if (ref
->node
->proc
== NULL
) {
3920 ref
->death
->work
.type
= BINDER_WORK_DEAD_BINDER
;
3922 binder_inner_proc_lock(proc
);
3923 binder_enqueue_work_ilocked(
3924 &ref
->death
->work
, &proc
->todo
);
3925 binder_wakeup_proc_ilocked(proc
);
3926 binder_inner_proc_unlock(proc
);
3929 if (ref
->death
== NULL
) {
3930 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
3931 proc
->pid
, thread
->pid
);
3932 binder_node_unlock(ref
->node
);
3933 binder_proc_unlock(proc
);
3937 if (death
->cookie
!= cookie
) {
3938 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
3939 proc
->pid
, thread
->pid
,
3942 binder_node_unlock(ref
->node
);
3943 binder_proc_unlock(proc
);
3947 binder_inner_proc_lock(proc
);
3948 if (list_empty(&death
->work
.entry
)) {
3949 death
->work
.type
= BINDER_WORK_CLEAR_DEATH_NOTIFICATION
;
3950 if (thread
->looper
&
3951 (BINDER_LOOPER_STATE_REGISTERED
|
3952 BINDER_LOOPER_STATE_ENTERED
))
3953 binder_enqueue_thread_work_ilocked(
3957 binder_enqueue_work_ilocked(
3960 binder_wakeup_proc_ilocked(
3964 BUG_ON(death
->work
.type
!= BINDER_WORK_DEAD_BINDER
);
3965 death
->work
.type
= BINDER_WORK_DEAD_BINDER_AND_CLEAR
;
3967 binder_inner_proc_unlock(proc
);
3969 binder_node_unlock(ref
->node
);
3970 binder_proc_unlock(proc
);
3972 case BC_DEAD_BINDER_DONE
: {
3973 struct binder_work
*w
;
3974 binder_uintptr_t cookie
;
3975 struct binder_ref_death
*death
= NULL
;
3977 if (get_user(cookie
, (binder_uintptr_t __user
*)ptr
))
3980 ptr
+= sizeof(cookie
);
3981 binder_inner_proc_lock(proc
);
3982 list_for_each_entry(w
, &proc
->delivered_death
,
3984 struct binder_ref_death
*tmp_death
=
3986 struct binder_ref_death
,
3989 if (tmp_death
->cookie
== cookie
) {
3994 binder_debug(BINDER_DEBUG_DEAD_BINDER
,
3995 "%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n",
3996 proc
->pid
, thread
->pid
, (u64
)cookie
,
3998 if (death
== NULL
) {
3999 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
4000 proc
->pid
, thread
->pid
, (u64
)cookie
);
4001 binder_inner_proc_unlock(proc
);
4004 binder_dequeue_work_ilocked(&death
->work
);
4005 if (death
->work
.type
== BINDER_WORK_DEAD_BINDER_AND_CLEAR
) {
4006 death
->work
.type
= BINDER_WORK_CLEAR_DEATH_NOTIFICATION
;
4007 if (thread
->looper
&
4008 (BINDER_LOOPER_STATE_REGISTERED
|
4009 BINDER_LOOPER_STATE_ENTERED
))
4010 binder_enqueue_thread_work_ilocked(
4011 thread
, &death
->work
);
4013 binder_enqueue_work_ilocked(
4016 binder_wakeup_proc_ilocked(proc
);
4019 binder_inner_proc_unlock(proc
);
4023 pr_err("%d:%d unknown command %d\n",
4024 proc
->pid
, thread
->pid
, cmd
);
4027 *consumed
= ptr
- buffer
;
4032 static void binder_stat_br(struct binder_proc
*proc
,
4033 struct binder_thread
*thread
, uint32_t cmd
)
4035 trace_binder_return(cmd
);
4036 if (_IOC_NR(cmd
) < ARRAY_SIZE(binder_stats
.br
)) {
4037 atomic_inc(&binder_stats
.br
[_IOC_NR(cmd
)]);
4038 atomic_inc(&proc
->stats
.br
[_IOC_NR(cmd
)]);
4039 atomic_inc(&thread
->stats
.br
[_IOC_NR(cmd
)]);
4043 static int binder_put_node_cmd(struct binder_proc
*proc
,
4044 struct binder_thread
*thread
,
4046 binder_uintptr_t node_ptr
,
4047 binder_uintptr_t node_cookie
,
4049 uint32_t cmd
, const char *cmd_name
)
4051 void __user
*ptr
= *ptrp
;
4053 if (put_user(cmd
, (uint32_t __user
*)ptr
))
4055 ptr
+= sizeof(uint32_t);
4057 if (put_user(node_ptr
, (binder_uintptr_t __user
*)ptr
))
4059 ptr
+= sizeof(binder_uintptr_t
);
4061 if (put_user(node_cookie
, (binder_uintptr_t __user
*)ptr
))
4063 ptr
+= sizeof(binder_uintptr_t
);
4065 binder_stat_br(proc
, thread
, cmd
);
4066 binder_debug(BINDER_DEBUG_USER_REFS
, "%d:%d %s %d u%016llx c%016llx\n",
4067 proc
->pid
, thread
->pid
, cmd_name
, node_debug_id
,
4068 (u64
)node_ptr
, (u64
)node_cookie
);
4074 static int binder_wait_for_work(struct binder_thread
*thread
,
4078 struct binder_proc
*proc
= thread
->proc
;
4081 freezer_do_not_count();
4082 binder_inner_proc_lock(proc
);
4084 prepare_to_wait(&thread
->wait
, &wait
, TASK_INTERRUPTIBLE
);
4085 if (binder_has_work_ilocked(thread
, do_proc_work
))
4088 list_add(&thread
->waiting_thread_node
,
4089 &proc
->waiting_threads
);
4090 binder_inner_proc_unlock(proc
);
4092 binder_inner_proc_lock(proc
);
4093 list_del_init(&thread
->waiting_thread_node
);
4094 if (signal_pending(current
)) {
4099 finish_wait(&thread
->wait
, &wait
);
4100 binder_inner_proc_unlock(proc
);
4107 * binder_apply_fd_fixups() - finish fd translation
4108 * @proc: binder_proc associated @t->buffer
4109 * @t: binder transaction with list of fd fixups
4111 * Now that we are in the context of the transaction target
4112 * process, we can allocate and install fds. Process the
4113 * list of fds to translate and fixup the buffer with the
4116 * If we fail to allocate an fd, then free the resources by
4117 * fput'ing files that have not been processed and ksys_close'ing
4118 * any fds that have already been allocated.
4120 static int binder_apply_fd_fixups(struct binder_proc
*proc
,
4121 struct binder_transaction
*t
)
4123 struct binder_txn_fd_fixup
*fixup
, *tmp
;
4126 list_for_each_entry(fixup
, &t
->fd_fixups
, fixup_entry
) {
4127 int fd
= get_unused_fd_flags(O_CLOEXEC
);
4130 binder_debug(BINDER_DEBUG_TRANSACTION
,
4131 "failed fd fixup txn %d fd %d\n",
4136 binder_debug(BINDER_DEBUG_TRANSACTION
,
4137 "fd fixup txn %d fd %d\n",
4139 trace_binder_transaction_fd_recv(t
, fd
, fixup
->offset
);
4140 fd_install(fd
, fixup
->file
);
4142 if (binder_alloc_copy_to_buffer(&proc
->alloc
, t
->buffer
,
4149 list_for_each_entry_safe(fixup
, tmp
, &t
->fd_fixups
, fixup_entry
) {
4156 err
= binder_alloc_copy_from_buffer(&proc
->alloc
, &fd
,
4162 binder_deferred_fd_close(fd
);
4164 list_del(&fixup
->fixup_entry
);
4171 static int binder_thread_read(struct binder_proc
*proc
,
4172 struct binder_thread
*thread
,
4173 binder_uintptr_t binder_buffer
, size_t size
,
4174 binder_size_t
*consumed
, int non_block
)
4176 void __user
*buffer
= (void __user
*)(uintptr_t)binder_buffer
;
4177 void __user
*ptr
= buffer
+ *consumed
;
4178 void __user
*end
= buffer
+ size
;
4181 int wait_for_proc_work
;
4183 if (*consumed
== 0) {
4184 if (put_user(BR_NOOP
, (uint32_t __user
*)ptr
))
4186 ptr
+= sizeof(uint32_t);
4190 binder_inner_proc_lock(proc
);
4191 wait_for_proc_work
= binder_available_for_proc_work_ilocked(thread
);
4192 binder_inner_proc_unlock(proc
);
4194 thread
->looper
|= BINDER_LOOPER_STATE_WAITING
;
4196 trace_binder_wait_for_work(wait_for_proc_work
,
4197 !!thread
->transaction_stack
,
4198 !binder_worklist_empty(proc
, &thread
->todo
));
4199 if (wait_for_proc_work
) {
4200 if (!(thread
->looper
& (BINDER_LOOPER_STATE_REGISTERED
|
4201 BINDER_LOOPER_STATE_ENTERED
))) {
4202 binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
4203 proc
->pid
, thread
->pid
, thread
->looper
);
4204 wait_event_interruptible(binder_user_error_wait
,
4205 binder_stop_on_user_error
< 2);
4207 binder_set_nice(proc
->default_priority
);
4211 if (!binder_has_work(thread
, wait_for_proc_work
))
4214 ret
= binder_wait_for_work(thread
, wait_for_proc_work
);
4217 thread
->looper
&= ~BINDER_LOOPER_STATE_WAITING
;
4224 struct binder_transaction_data_secctx tr
;
4225 struct binder_transaction_data
*trd
= &tr
.transaction_data
;
4226 struct binder_work
*w
= NULL
;
4227 struct list_head
*list
= NULL
;
4228 struct binder_transaction
*t
= NULL
;
4229 struct binder_thread
*t_from
;
4230 size_t trsize
= sizeof(*trd
);
4232 binder_inner_proc_lock(proc
);
4233 if (!binder_worklist_empty_ilocked(&thread
->todo
))
4234 list
= &thread
->todo
;
4235 else if (!binder_worklist_empty_ilocked(&proc
->todo
) &&
4239 binder_inner_proc_unlock(proc
);
4242 if (ptr
- buffer
== 4 && !thread
->looper_need_return
)
4247 if (end
- ptr
< sizeof(tr
) + 4) {
4248 binder_inner_proc_unlock(proc
);
4251 w
= binder_dequeue_work_head_ilocked(list
);
4252 if (binder_worklist_empty_ilocked(&thread
->todo
))
4253 thread
->process_todo
= false;
4256 case BINDER_WORK_TRANSACTION
: {
4257 binder_inner_proc_unlock(proc
);
4258 t
= container_of(w
, struct binder_transaction
, work
);
4260 case BINDER_WORK_RETURN_ERROR
: {
4261 struct binder_error
*e
= container_of(
4262 w
, struct binder_error
, work
);
4264 WARN_ON(e
->cmd
== BR_OK
);
4265 binder_inner_proc_unlock(proc
);
4266 if (put_user(e
->cmd
, (uint32_t __user
*)ptr
))
4270 ptr
+= sizeof(uint32_t);
4272 binder_stat_br(proc
, thread
, cmd
);
4274 case BINDER_WORK_TRANSACTION_COMPLETE
: {
4275 binder_inner_proc_unlock(proc
);
4276 cmd
= BR_TRANSACTION_COMPLETE
;
4278 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE
);
4279 if (put_user(cmd
, (uint32_t __user
*)ptr
))
4281 ptr
+= sizeof(uint32_t);
4283 binder_stat_br(proc
, thread
, cmd
);
4284 binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE
,
4285 "%d:%d BR_TRANSACTION_COMPLETE\n",
4286 proc
->pid
, thread
->pid
);
4288 case BINDER_WORK_NODE
: {
4289 struct binder_node
*node
= container_of(w
, struct binder_node
, work
);
4291 binder_uintptr_t node_ptr
= node
->ptr
;
4292 binder_uintptr_t node_cookie
= node
->cookie
;
4293 int node_debug_id
= node
->debug_id
;
4296 void __user
*orig_ptr
= ptr
;
4298 BUG_ON(proc
!= node
->proc
);
4299 strong
= node
->internal_strong_refs
||
4300 node
->local_strong_refs
;
4301 weak
= !hlist_empty(&node
->refs
) ||
4302 node
->local_weak_refs
||
4303 node
->tmp_refs
|| strong
;
4304 has_strong_ref
= node
->has_strong_ref
;
4305 has_weak_ref
= node
->has_weak_ref
;
4307 if (weak
&& !has_weak_ref
) {
4308 node
->has_weak_ref
= 1;
4309 node
->pending_weak_ref
= 1;
4310 node
->local_weak_refs
++;
4312 if (strong
&& !has_strong_ref
) {
4313 node
->has_strong_ref
= 1;
4314 node
->pending_strong_ref
= 1;
4315 node
->local_strong_refs
++;
4317 if (!strong
&& has_strong_ref
)
4318 node
->has_strong_ref
= 0;
4319 if (!weak
&& has_weak_ref
)
4320 node
->has_weak_ref
= 0;
4321 if (!weak
&& !strong
) {
4322 binder_debug(BINDER_DEBUG_INTERNAL_REFS
,
4323 "%d:%d node %d u%016llx c%016llx deleted\n",
4324 proc
->pid
, thread
->pid
,
4328 rb_erase(&node
->rb_node
, &proc
->nodes
);
4329 binder_inner_proc_unlock(proc
);
4330 binder_node_lock(node
);
4332 * Acquire the node lock before freeing the
4333 * node to serialize with other threads that
4334 * may have been holding the node lock while
4335 * decrementing this node (avoids race where
4336 * this thread frees while the other thread
4337 * is unlocking the node after the final
4340 binder_node_unlock(node
);
4341 binder_free_node(node
);
4343 binder_inner_proc_unlock(proc
);
4345 if (weak
&& !has_weak_ref
)
4346 ret
= binder_put_node_cmd(
4347 proc
, thread
, &ptr
, node_ptr
,
4348 node_cookie
, node_debug_id
,
4349 BR_INCREFS
, "BR_INCREFS");
4350 if (!ret
&& strong
&& !has_strong_ref
)
4351 ret
= binder_put_node_cmd(
4352 proc
, thread
, &ptr
, node_ptr
,
4353 node_cookie
, node_debug_id
,
4354 BR_ACQUIRE
, "BR_ACQUIRE");
4355 if (!ret
&& !strong
&& has_strong_ref
)
4356 ret
= binder_put_node_cmd(
4357 proc
, thread
, &ptr
, node_ptr
,
4358 node_cookie
, node_debug_id
,
4359 BR_RELEASE
, "BR_RELEASE");
4360 if (!ret
&& !weak
&& has_weak_ref
)
4361 ret
= binder_put_node_cmd(
4362 proc
, thread
, &ptr
, node_ptr
,
4363 node_cookie
, node_debug_id
,
4364 BR_DECREFS
, "BR_DECREFS");
4365 if (orig_ptr
== ptr
)
4366 binder_debug(BINDER_DEBUG_INTERNAL_REFS
,
4367 "%d:%d node %d u%016llx c%016llx state unchanged\n",
4368 proc
->pid
, thread
->pid
,
4375 case BINDER_WORK_DEAD_BINDER
:
4376 case BINDER_WORK_DEAD_BINDER_AND_CLEAR
:
4377 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION
: {
4378 struct binder_ref_death
*death
;
4380 binder_uintptr_t cookie
;
4382 death
= container_of(w
, struct binder_ref_death
, work
);
4383 if (w
->type
== BINDER_WORK_CLEAR_DEATH_NOTIFICATION
)
4384 cmd
= BR_CLEAR_DEATH_NOTIFICATION_DONE
;
4386 cmd
= BR_DEAD_BINDER
;
4387 cookie
= death
->cookie
;
4389 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION
,
4390 "%d:%d %s %016llx\n",
4391 proc
->pid
, thread
->pid
,
4392 cmd
== BR_DEAD_BINDER
?
4394 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
4396 if (w
->type
== BINDER_WORK_CLEAR_DEATH_NOTIFICATION
) {
4397 binder_inner_proc_unlock(proc
);
4399 binder_stats_deleted(BINDER_STAT_DEATH
);
4401 binder_enqueue_work_ilocked(
4402 w
, &proc
->delivered_death
);
4403 binder_inner_proc_unlock(proc
);
4405 if (put_user(cmd
, (uint32_t __user
*)ptr
))
4407 ptr
+= sizeof(uint32_t);
4408 if (put_user(cookie
,
4409 (binder_uintptr_t __user
*)ptr
))
4411 ptr
+= sizeof(binder_uintptr_t
);
4412 binder_stat_br(proc
, thread
, cmd
);
4413 if (cmd
== BR_DEAD_BINDER
)
4414 goto done
; /* DEAD_BINDER notifications can cause transactions */
4417 binder_inner_proc_unlock(proc
);
4418 pr_err("%d:%d: bad work type %d\n",
4419 proc
->pid
, thread
->pid
, w
->type
);
4426 BUG_ON(t
->buffer
== NULL
);
4427 if (t
->buffer
->target_node
) {
4428 struct binder_node
*target_node
= t
->buffer
->target_node
;
4430 trd
->target
.ptr
= target_node
->ptr
;
4431 trd
->cookie
= target_node
->cookie
;
4432 t
->saved_priority
= task_nice(current
);
4433 if (t
->priority
< target_node
->min_priority
&&
4434 !(t
->flags
& TF_ONE_WAY
))
4435 binder_set_nice(t
->priority
);
4436 else if (!(t
->flags
& TF_ONE_WAY
) ||
4437 t
->saved_priority
> target_node
->min_priority
)
4438 binder_set_nice(target_node
->min_priority
);
4439 cmd
= BR_TRANSACTION
;
4441 trd
->target
.ptr
= 0;
4445 trd
->code
= t
->code
;
4446 trd
->flags
= t
->flags
;
4447 trd
->sender_euid
= from_kuid(current_user_ns(), t
->sender_euid
);
4449 t_from
= binder_get_txn_from(t
);
4451 struct task_struct
*sender
= t_from
->proc
->tsk
;
4454 task_tgid_nr_ns(sender
,
4455 task_active_pid_ns(current
));
4457 trd
->sender_pid
= 0;
4460 ret
= binder_apply_fd_fixups(proc
, t
);
4462 struct binder_buffer
*buffer
= t
->buffer
;
4463 bool oneway
= !!(t
->flags
& TF_ONE_WAY
);
4464 int tid
= t
->debug_id
;
4467 binder_thread_dec_tmpref(t_from
);
4468 buffer
->transaction
= NULL
;
4469 binder_cleanup_transaction(t
, "fd fixups failed",
4471 binder_free_buf(proc
, buffer
);
4472 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION
,
4473 "%d:%d %stransaction %d fd fixups failed %d/%d, line %d\n",
4474 proc
->pid
, thread
->pid
,
4476 (cmd
== BR_REPLY
? "reply " : ""),
4477 tid
, BR_FAILED_REPLY
, ret
, __LINE__
);
4478 if (cmd
== BR_REPLY
) {
4479 cmd
= BR_FAILED_REPLY
;
4480 if (put_user(cmd
, (uint32_t __user
*)ptr
))
4482 ptr
+= sizeof(uint32_t);
4483 binder_stat_br(proc
, thread
, cmd
);
4488 trd
->data_size
= t
->buffer
->data_size
;
4489 trd
->offsets_size
= t
->buffer
->offsets_size
;
4490 trd
->data
.ptr
.buffer
= (uintptr_t)t
->buffer
->user_data
;
4491 trd
->data
.ptr
.offsets
= trd
->data
.ptr
.buffer
+
4492 ALIGN(t
->buffer
->data_size
,
4495 tr
.secctx
= t
->security_ctx
;
4496 if (t
->security_ctx
) {
4497 cmd
= BR_TRANSACTION_SEC_CTX
;
4498 trsize
= sizeof(tr
);
4500 if (put_user(cmd
, (uint32_t __user
*)ptr
)) {
4502 binder_thread_dec_tmpref(t_from
);
4504 binder_cleanup_transaction(t
, "put_user failed",
4509 ptr
+= sizeof(uint32_t);
4510 if (copy_to_user(ptr
, &tr
, trsize
)) {
4512 binder_thread_dec_tmpref(t_from
);
4514 binder_cleanup_transaction(t
, "copy_to_user failed",
4521 trace_binder_transaction_received(t
);
4522 binder_stat_br(proc
, thread
, cmd
);
4523 binder_debug(BINDER_DEBUG_TRANSACTION
,
4524 "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
4525 proc
->pid
, thread
->pid
,
4526 (cmd
== BR_TRANSACTION
) ? "BR_TRANSACTION" :
4527 (cmd
== BR_TRANSACTION_SEC_CTX
) ?
4528 "BR_TRANSACTION_SEC_CTX" : "BR_REPLY",
4529 t
->debug_id
, t_from
? t_from
->proc
->pid
: 0,
4530 t_from
? t_from
->pid
: 0, cmd
,
4531 t
->buffer
->data_size
, t
->buffer
->offsets_size
,
4532 (u64
)trd
->data
.ptr
.buffer
,
4533 (u64
)trd
->data
.ptr
.offsets
);
4536 binder_thread_dec_tmpref(t_from
);
4537 t
->buffer
->allow_user_free
= 1;
4538 if (cmd
!= BR_REPLY
&& !(t
->flags
& TF_ONE_WAY
)) {
4539 binder_inner_proc_lock(thread
->proc
);
4540 t
->to_parent
= thread
->transaction_stack
;
4541 t
->to_thread
= thread
;
4542 thread
->transaction_stack
= t
;
4543 binder_inner_proc_unlock(thread
->proc
);
4545 binder_free_transaction(t
);
4552 *consumed
= ptr
- buffer
;
4553 binder_inner_proc_lock(proc
);
4554 if (proc
->requested_threads
== 0 &&
4555 list_empty(&thread
->proc
->waiting_threads
) &&
4556 proc
->requested_threads_started
< proc
->max_threads
&&
4557 (thread
->looper
& (BINDER_LOOPER_STATE_REGISTERED
|
4558 BINDER_LOOPER_STATE_ENTERED
)) /* the user-space code fails to */
4559 /*spawn a new thread if we leave this out */) {
4560 proc
->requested_threads
++;
4561 binder_inner_proc_unlock(proc
);
4562 binder_debug(BINDER_DEBUG_THREADS
,
4563 "%d:%d BR_SPAWN_LOOPER\n",
4564 proc
->pid
, thread
->pid
);
4565 if (put_user(BR_SPAWN_LOOPER
, (uint32_t __user
*)buffer
))
4567 binder_stat_br(proc
, thread
, BR_SPAWN_LOOPER
);
4569 binder_inner_proc_unlock(proc
);
4573 static void binder_release_work(struct binder_proc
*proc
,
4574 struct list_head
*list
)
4576 struct binder_work
*w
;
4579 w
= binder_dequeue_work_head(proc
, list
);
4584 case BINDER_WORK_TRANSACTION
: {
4585 struct binder_transaction
*t
;
4587 t
= container_of(w
, struct binder_transaction
, work
);
4589 binder_cleanup_transaction(t
, "process died.",
4592 case BINDER_WORK_RETURN_ERROR
: {
4593 struct binder_error
*e
= container_of(
4594 w
, struct binder_error
, work
);
4596 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION
,
4597 "undelivered TRANSACTION_ERROR: %u\n",
4600 case BINDER_WORK_TRANSACTION_COMPLETE
: {
4601 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION
,
4602 "undelivered TRANSACTION_COMPLETE\n");
4604 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE
);
4606 case BINDER_WORK_DEAD_BINDER_AND_CLEAR
:
4607 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION
: {
4608 struct binder_ref_death
*death
;
4610 death
= container_of(w
, struct binder_ref_death
, work
);
4611 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION
,
4612 "undelivered death notification, %016llx\n",
4613 (u64
)death
->cookie
);
4615 binder_stats_deleted(BINDER_STAT_DEATH
);
4618 pr_err("unexpected work type, %d, not freed\n",
4626 static struct binder_thread
*binder_get_thread_ilocked(
4627 struct binder_proc
*proc
, struct binder_thread
*new_thread
)
4629 struct binder_thread
*thread
= NULL
;
4630 struct rb_node
*parent
= NULL
;
4631 struct rb_node
**p
= &proc
->threads
.rb_node
;
4635 thread
= rb_entry(parent
, struct binder_thread
, rb_node
);
4637 if (current
->pid
< thread
->pid
)
4639 else if (current
->pid
> thread
->pid
)
4640 p
= &(*p
)->rb_right
;
4646 thread
= new_thread
;
4647 binder_stats_created(BINDER_STAT_THREAD
);
4648 thread
->proc
= proc
;
4649 thread
->pid
= current
->pid
;
4650 atomic_set(&thread
->tmp_ref
, 0);
4651 init_waitqueue_head(&thread
->wait
);
4652 INIT_LIST_HEAD(&thread
->todo
);
4653 rb_link_node(&thread
->rb_node
, parent
, p
);
4654 rb_insert_color(&thread
->rb_node
, &proc
->threads
);
4655 thread
->looper_need_return
= true;
4656 thread
->return_error
.work
.type
= BINDER_WORK_RETURN_ERROR
;
4657 thread
->return_error
.cmd
= BR_OK
;
4658 thread
->reply_error
.work
.type
= BINDER_WORK_RETURN_ERROR
;
4659 thread
->reply_error
.cmd
= BR_OK
;
4660 INIT_LIST_HEAD(&new_thread
->waiting_thread_node
);
4664 static struct binder_thread
*binder_get_thread(struct binder_proc
*proc
)
4666 struct binder_thread
*thread
;
4667 struct binder_thread
*new_thread
;
4669 binder_inner_proc_lock(proc
);
4670 thread
= binder_get_thread_ilocked(proc
, NULL
);
4671 binder_inner_proc_unlock(proc
);
4673 new_thread
= kzalloc(sizeof(*thread
), GFP_KERNEL
);
4674 if (new_thread
== NULL
)
4676 binder_inner_proc_lock(proc
);
4677 thread
= binder_get_thread_ilocked(proc
, new_thread
);
4678 binder_inner_proc_unlock(proc
);
4679 if (thread
!= new_thread
)
4685 static void binder_free_proc(struct binder_proc
*proc
)
4687 BUG_ON(!list_empty(&proc
->todo
));
4688 BUG_ON(!list_empty(&proc
->delivered_death
));
4689 binder_alloc_deferred_release(&proc
->alloc
);
4690 put_task_struct(proc
->tsk
);
4691 binder_stats_deleted(BINDER_STAT_PROC
);
4695 static void binder_free_thread(struct binder_thread
*thread
)
4697 BUG_ON(!list_empty(&thread
->todo
));
4698 binder_stats_deleted(BINDER_STAT_THREAD
);
4699 binder_proc_dec_tmpref(thread
->proc
);
4703 static int binder_thread_release(struct binder_proc
*proc
,
4704 struct binder_thread
*thread
)
4706 struct binder_transaction
*t
;
4707 struct binder_transaction
*send_reply
= NULL
;
4708 int active_transactions
= 0;
4709 struct binder_transaction
*last_t
= NULL
;
4711 binder_inner_proc_lock(thread
->proc
);
4713 * take a ref on the proc so it survives
4714 * after we remove this thread from proc->threads.
4715 * The corresponding dec is when we actually
4716 * free the thread in binder_free_thread()
4720 * take a ref on this thread to ensure it
4721 * survives while we are releasing it
4723 atomic_inc(&thread
->tmp_ref
);
4724 rb_erase(&thread
->rb_node
, &proc
->threads
);
4725 t
= thread
->transaction_stack
;
4727 spin_lock(&t
->lock
);
4728 if (t
->to_thread
== thread
)
4731 __acquire(&t
->lock
);
4733 thread
->is_dead
= true;
4737 active_transactions
++;
4738 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION
,
4739 "release %d:%d transaction %d %s, still active\n",
4740 proc
->pid
, thread
->pid
,
4742 (t
->to_thread
== thread
) ? "in" : "out");
4744 if (t
->to_thread
== thread
) {
4746 t
->to_thread
= NULL
;
4748 t
->buffer
->transaction
= NULL
;
4752 } else if (t
->from
== thread
) {
4757 spin_unlock(&last_t
->lock
);
4759 spin_lock(&t
->lock
);
4761 __acquire(&t
->lock
);
4763 /* annotation for sparse, lock not acquired in last iteration above */
4764 __release(&t
->lock
);
4767 * If this thread used poll, make sure we remove the waitqueue
4768 * from any epoll data structures holding it with POLLFREE.
4769 * waitqueue_active() is safe to use here because we're holding
4772 if ((thread
->looper
& BINDER_LOOPER_STATE_POLL
) &&
4773 waitqueue_active(&thread
->wait
)) {
4774 wake_up_poll(&thread
->wait
, EPOLLHUP
| POLLFREE
);
4777 binder_inner_proc_unlock(thread
->proc
);
4780 * This is needed to avoid races between wake_up_poll() above and
4781 * and ep_remove_waitqueue() called for other reasons (eg the epoll file
4782 * descriptor being closed); ep_remove_waitqueue() holds an RCU read
4783 * lock, so we can be sure it's done after calling synchronize_rcu().
4785 if (thread
->looper
& BINDER_LOOPER_STATE_POLL
)
4789 binder_send_failed_reply(send_reply
, BR_DEAD_REPLY
);
4790 binder_release_work(proc
, &thread
->todo
);
4791 binder_thread_dec_tmpref(thread
);
4792 return active_transactions
;
4795 static __poll_t
binder_poll(struct file
*filp
,
4796 struct poll_table_struct
*wait
)
4798 struct binder_proc
*proc
= filp
->private_data
;
4799 struct binder_thread
*thread
= NULL
;
4800 bool wait_for_proc_work
;
4802 thread
= binder_get_thread(proc
);
4806 binder_inner_proc_lock(thread
->proc
);
4807 thread
->looper
|= BINDER_LOOPER_STATE_POLL
;
4808 wait_for_proc_work
= binder_available_for_proc_work_ilocked(thread
);
4810 binder_inner_proc_unlock(thread
->proc
);
4812 poll_wait(filp
, &thread
->wait
, wait
);
4814 if (binder_has_work(thread
, wait_for_proc_work
))
4820 static int binder_ioctl_write_read(struct file
*filp
,
4821 unsigned int cmd
, unsigned long arg
,
4822 struct binder_thread
*thread
)
4825 struct binder_proc
*proc
= filp
->private_data
;
4826 unsigned int size
= _IOC_SIZE(cmd
);
4827 void __user
*ubuf
= (void __user
*)arg
;
4828 struct binder_write_read bwr
;
4830 if (size
!= sizeof(struct binder_write_read
)) {
4834 if (copy_from_user(&bwr
, ubuf
, sizeof(bwr
))) {
4838 binder_debug(BINDER_DEBUG_READ_WRITE
,
4839 "%d:%d write %lld at %016llx, read %lld at %016llx\n",
4840 proc
->pid
, thread
->pid
,
4841 (u64
)bwr
.write_size
, (u64
)bwr
.write_buffer
,
4842 (u64
)bwr
.read_size
, (u64
)bwr
.read_buffer
);
4844 if (bwr
.write_size
> 0) {
4845 ret
= binder_thread_write(proc
, thread
,
4848 &bwr
.write_consumed
);
4849 trace_binder_write_done(ret
);
4851 bwr
.read_consumed
= 0;
4852 if (copy_to_user(ubuf
, &bwr
, sizeof(bwr
)))
4857 if (bwr
.read_size
> 0) {
4858 ret
= binder_thread_read(proc
, thread
, bwr
.read_buffer
,
4861 filp
->f_flags
& O_NONBLOCK
);
4862 trace_binder_read_done(ret
);
4863 binder_inner_proc_lock(proc
);
4864 if (!binder_worklist_empty_ilocked(&proc
->todo
))
4865 binder_wakeup_proc_ilocked(proc
);
4866 binder_inner_proc_unlock(proc
);
4868 if (copy_to_user(ubuf
, &bwr
, sizeof(bwr
)))
4873 binder_debug(BINDER_DEBUG_READ_WRITE
,
4874 "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
4875 proc
->pid
, thread
->pid
,
4876 (u64
)bwr
.write_consumed
, (u64
)bwr
.write_size
,
4877 (u64
)bwr
.read_consumed
, (u64
)bwr
.read_size
);
4878 if (copy_to_user(ubuf
, &bwr
, sizeof(bwr
))) {
4886 static int binder_ioctl_set_ctx_mgr(struct file
*filp
,
4887 struct flat_binder_object
*fbo
)
4890 struct binder_proc
*proc
= filp
->private_data
;
4891 struct binder_context
*context
= proc
->context
;
4892 struct binder_node
*new_node
;
4893 kuid_t curr_euid
= current_euid();
4895 mutex_lock(&context
->context_mgr_node_lock
);
4896 if (context
->binder_context_mgr_node
) {
4897 pr_err("BINDER_SET_CONTEXT_MGR already set\n");
4901 ret
= security_binder_set_context_mgr(proc
->tsk
);
4904 if (uid_valid(context
->binder_context_mgr_uid
)) {
4905 if (!uid_eq(context
->binder_context_mgr_uid
, curr_euid
)) {
4906 pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
4907 from_kuid(&init_user_ns
, curr_euid
),
4908 from_kuid(&init_user_ns
,
4909 context
->binder_context_mgr_uid
));
4914 context
->binder_context_mgr_uid
= curr_euid
;
4916 new_node
= binder_new_node(proc
, fbo
);
4921 binder_node_lock(new_node
);
4922 new_node
->local_weak_refs
++;
4923 new_node
->local_strong_refs
++;
4924 new_node
->has_strong_ref
= 1;
4925 new_node
->has_weak_ref
= 1;
4926 context
->binder_context_mgr_node
= new_node
;
4927 binder_node_unlock(new_node
);
4928 binder_put_node(new_node
);
4930 mutex_unlock(&context
->context_mgr_node_lock
);
4934 static int binder_ioctl_get_node_info_for_ref(struct binder_proc
*proc
,
4935 struct binder_node_info_for_ref
*info
)
4937 struct binder_node
*node
;
4938 struct binder_context
*context
= proc
->context
;
4939 __u32 handle
= info
->handle
;
4941 if (info
->strong_count
|| info
->weak_count
|| info
->reserved1
||
4942 info
->reserved2
|| info
->reserved3
) {
4943 binder_user_error("%d BINDER_GET_NODE_INFO_FOR_REF: only handle may be non-zero.",
4948 /* This ioctl may only be used by the context manager */
4949 mutex_lock(&context
->context_mgr_node_lock
);
4950 if (!context
->binder_context_mgr_node
||
4951 context
->binder_context_mgr_node
->proc
!= proc
) {
4952 mutex_unlock(&context
->context_mgr_node_lock
);
4955 mutex_unlock(&context
->context_mgr_node_lock
);
4957 node
= binder_get_node_from_ref(proc
, handle
, true, NULL
);
4961 info
->strong_count
= node
->local_strong_refs
+
4962 node
->internal_strong_refs
;
4963 info
->weak_count
= node
->local_weak_refs
;
4965 binder_put_node(node
);
4970 static int binder_ioctl_get_node_debug_info(struct binder_proc
*proc
,
4971 struct binder_node_debug_info
*info
)
4974 binder_uintptr_t ptr
= info
->ptr
;
4976 memset(info
, 0, sizeof(*info
));
4978 binder_inner_proc_lock(proc
);
4979 for (n
= rb_first(&proc
->nodes
); n
!= NULL
; n
= rb_next(n
)) {
4980 struct binder_node
*node
= rb_entry(n
, struct binder_node
,
4982 if (node
->ptr
> ptr
) {
4983 info
->ptr
= node
->ptr
;
4984 info
->cookie
= node
->cookie
;
4985 info
->has_strong_ref
= node
->has_strong_ref
;
4986 info
->has_weak_ref
= node
->has_weak_ref
;
4990 binder_inner_proc_unlock(proc
);
4995 static long binder_ioctl(struct file
*filp
, unsigned int cmd
, unsigned long arg
)
4998 struct binder_proc
*proc
= filp
->private_data
;
4999 struct binder_thread
*thread
;
5000 unsigned int size
= _IOC_SIZE(cmd
);
5001 void __user
*ubuf
= (void __user
*)arg
;
5003 /*pr_info("binder_ioctl: %d:%d %x %lx\n",
5004 proc->pid, current->pid, cmd, arg);*/
5006 binder_selftest_alloc(&proc
->alloc
);
5008 trace_binder_ioctl(cmd
, arg
);
5010 ret
= wait_event_interruptible(binder_user_error_wait
, binder_stop_on_user_error
< 2);
5014 thread
= binder_get_thread(proc
);
5015 if (thread
== NULL
) {
5021 case BINDER_WRITE_READ
:
5022 ret
= binder_ioctl_write_read(filp
, cmd
, arg
, thread
);
5026 case BINDER_SET_MAX_THREADS
: {
5029 if (copy_from_user(&max_threads
, ubuf
,
5030 sizeof(max_threads
))) {
5034 binder_inner_proc_lock(proc
);
5035 proc
->max_threads
= max_threads
;
5036 binder_inner_proc_unlock(proc
);
5039 case BINDER_SET_CONTEXT_MGR_EXT
: {
5040 struct flat_binder_object fbo
;
5042 if (copy_from_user(&fbo
, ubuf
, sizeof(fbo
))) {
5046 ret
= binder_ioctl_set_ctx_mgr(filp
, &fbo
);
5051 case BINDER_SET_CONTEXT_MGR
:
5052 ret
= binder_ioctl_set_ctx_mgr(filp
, NULL
);
5056 case BINDER_THREAD_EXIT
:
5057 binder_debug(BINDER_DEBUG_THREADS
, "%d:%d exit\n",
5058 proc
->pid
, thread
->pid
);
5059 binder_thread_release(proc
, thread
);
5062 case BINDER_VERSION
: {
5063 struct binder_version __user
*ver
= ubuf
;
5065 if (size
!= sizeof(struct binder_version
)) {
5069 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION
,
5070 &ver
->protocol_version
)) {
5076 case BINDER_GET_NODE_INFO_FOR_REF
: {
5077 struct binder_node_info_for_ref info
;
5079 if (copy_from_user(&info
, ubuf
, sizeof(info
))) {
5084 ret
= binder_ioctl_get_node_info_for_ref(proc
, &info
);
5088 if (copy_to_user(ubuf
, &info
, sizeof(info
))) {
5095 case BINDER_GET_NODE_DEBUG_INFO
: {
5096 struct binder_node_debug_info info
;
5098 if (copy_from_user(&info
, ubuf
, sizeof(info
))) {
5103 ret
= binder_ioctl_get_node_debug_info(proc
, &info
);
5107 if (copy_to_user(ubuf
, &info
, sizeof(info
))) {
5120 thread
->looper_need_return
= false;
5121 wait_event_interruptible(binder_user_error_wait
, binder_stop_on_user_error
< 2);
5122 if (ret
&& ret
!= -ERESTARTSYS
)
5123 pr_info("%d:%d ioctl %x %lx returned %d\n", proc
->pid
, current
->pid
, cmd
, arg
, ret
);
5125 trace_binder_ioctl_done(ret
);
5129 static void binder_vma_open(struct vm_area_struct
*vma
)
5131 struct binder_proc
*proc
= vma
->vm_private_data
;
5133 binder_debug(BINDER_DEBUG_OPEN_CLOSE
,
5134 "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
5135 proc
->pid
, vma
->vm_start
, vma
->vm_end
,
5136 (vma
->vm_end
- vma
->vm_start
) / SZ_1K
, vma
->vm_flags
,
5137 (unsigned long)pgprot_val(vma
->vm_page_prot
));
5140 static void binder_vma_close(struct vm_area_struct
*vma
)
5142 struct binder_proc
*proc
= vma
->vm_private_data
;
5144 binder_debug(BINDER_DEBUG_OPEN_CLOSE
,
5145 "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
5146 proc
->pid
, vma
->vm_start
, vma
->vm_end
,
5147 (vma
->vm_end
- vma
->vm_start
) / SZ_1K
, vma
->vm_flags
,
5148 (unsigned long)pgprot_val(vma
->vm_page_prot
));
5149 binder_alloc_vma_close(&proc
->alloc
);
5152 static vm_fault_t
binder_vm_fault(struct vm_fault
*vmf
)
5154 return VM_FAULT_SIGBUS
;
5157 static const struct vm_operations_struct binder_vm_ops
= {
5158 .open
= binder_vma_open
,
5159 .close
= binder_vma_close
,
5160 .fault
= binder_vm_fault
,
5163 static int binder_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
5166 struct binder_proc
*proc
= filp
->private_data
;
5167 const char *failure_string
;
5169 if (proc
->tsk
!= current
->group_leader
)
5172 binder_debug(BINDER_DEBUG_OPEN_CLOSE
,
5173 "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
5174 __func__
, proc
->pid
, vma
->vm_start
, vma
->vm_end
,
5175 (vma
->vm_end
- vma
->vm_start
) / SZ_1K
, vma
->vm_flags
,
5176 (unsigned long)pgprot_val(vma
->vm_page_prot
));
5178 if (vma
->vm_flags
& FORBIDDEN_MMAP_FLAGS
) {
5180 failure_string
= "bad vm_flags";
5183 vma
->vm_flags
|= VM_DONTCOPY
| VM_MIXEDMAP
;
5184 vma
->vm_flags
&= ~VM_MAYWRITE
;
5186 vma
->vm_ops
= &binder_vm_ops
;
5187 vma
->vm_private_data
= proc
;
5189 ret
= binder_alloc_mmap_handler(&proc
->alloc
, vma
);
5195 pr_err("%s: %d %lx-%lx %s failed %d\n", __func__
,
5196 proc
->pid
, vma
->vm_start
, vma
->vm_end
, failure_string
, ret
);
5200 static int binder_open(struct inode
*nodp
, struct file
*filp
)
5202 struct binder_proc
*proc
;
5203 struct binder_device
*binder_dev
;
5204 struct binderfs_info
*info
;
5205 struct dentry
*binder_binderfs_dir_entry_proc
= NULL
;
5207 binder_debug(BINDER_DEBUG_OPEN_CLOSE
, "%s: %d:%d\n", __func__
,
5208 current
->group_leader
->pid
, current
->pid
);
5210 proc
= kzalloc(sizeof(*proc
), GFP_KERNEL
);
5213 spin_lock_init(&proc
->inner_lock
);
5214 spin_lock_init(&proc
->outer_lock
);
5215 get_task_struct(current
->group_leader
);
5216 proc
->tsk
= current
->group_leader
;
5217 INIT_LIST_HEAD(&proc
->todo
);
5218 proc
->default_priority
= task_nice(current
);
5219 /* binderfs stashes devices in i_private */
5220 if (is_binderfs_device(nodp
)) {
5221 binder_dev
= nodp
->i_private
;
5222 info
= nodp
->i_sb
->s_fs_info
;
5223 binder_binderfs_dir_entry_proc
= info
->proc_log_dir
;
5225 binder_dev
= container_of(filp
->private_data
,
5226 struct binder_device
, miscdev
);
5228 proc
->context
= &binder_dev
->context
;
5229 binder_alloc_init(&proc
->alloc
);
5231 binder_stats_created(BINDER_STAT_PROC
);
5232 proc
->pid
= current
->group_leader
->pid
;
5233 INIT_LIST_HEAD(&proc
->delivered_death
);
5234 INIT_LIST_HEAD(&proc
->waiting_threads
);
5235 filp
->private_data
= proc
;
5237 mutex_lock(&binder_procs_lock
);
5238 hlist_add_head(&proc
->proc_node
, &binder_procs
);
5239 mutex_unlock(&binder_procs_lock
);
5241 if (binder_debugfs_dir_entry_proc
) {
5244 snprintf(strbuf
, sizeof(strbuf
), "%u", proc
->pid
);
5246 * proc debug entries are shared between contexts, so
5247 * this will fail if the process tries to open the driver
5248 * again with a different context. The priting code will
5249 * anyway print all contexts that a given PID has, so this
5252 proc
->debugfs_entry
= debugfs_create_file(strbuf
, 0444,
5253 binder_debugfs_dir_entry_proc
,
5254 (void *)(unsigned long)proc
->pid
,
5258 if (binder_binderfs_dir_entry_proc
) {
5260 struct dentry
*binderfs_entry
;
5262 snprintf(strbuf
, sizeof(strbuf
), "%u", proc
->pid
);
5264 * Similar to debugfs, the process specific log file is shared
5265 * between contexts. If the file has already been created for a
5266 * process, the following binderfs_create_file() call will
5267 * fail with error code EEXIST if another context of the same
5268 * process invoked binder_open(). This is ok since same as
5269 * debugfs, the log file will contain information on all
5270 * contexts of a given PID.
5272 binderfs_entry
= binderfs_create_file(binder_binderfs_dir_entry_proc
,
5273 strbuf
, &proc_fops
, (void *)(unsigned long)proc
->pid
);
5274 if (!IS_ERR(binderfs_entry
)) {
5275 proc
->binderfs_entry
= binderfs_entry
;
5279 error
= PTR_ERR(binderfs_entry
);
5280 if (error
!= -EEXIST
) {
5281 pr_warn("Unable to create file %s in binderfs (error %d)\n",
5290 static int binder_flush(struct file
*filp
, fl_owner_t id
)
5292 struct binder_proc
*proc
= filp
->private_data
;
5294 binder_defer_work(proc
, BINDER_DEFERRED_FLUSH
);
5299 static void binder_deferred_flush(struct binder_proc
*proc
)
5304 binder_inner_proc_lock(proc
);
5305 for (n
= rb_first(&proc
->threads
); n
!= NULL
; n
= rb_next(n
)) {
5306 struct binder_thread
*thread
= rb_entry(n
, struct binder_thread
, rb_node
);
5308 thread
->looper_need_return
= true;
5309 if (thread
->looper
& BINDER_LOOPER_STATE_WAITING
) {
5310 wake_up_interruptible(&thread
->wait
);
5314 binder_inner_proc_unlock(proc
);
5316 binder_debug(BINDER_DEBUG_OPEN_CLOSE
,
5317 "binder_flush: %d woke %d threads\n", proc
->pid
,
5321 static int binder_release(struct inode
*nodp
, struct file
*filp
)
5323 struct binder_proc
*proc
= filp
->private_data
;
5325 debugfs_remove(proc
->debugfs_entry
);
5327 if (proc
->binderfs_entry
) {
5328 binderfs_remove_file(proc
->binderfs_entry
);
5329 proc
->binderfs_entry
= NULL
;
5332 binder_defer_work(proc
, BINDER_DEFERRED_RELEASE
);
5337 static int binder_node_release(struct binder_node
*node
, int refs
)
5339 struct binder_ref
*ref
;
5341 struct binder_proc
*proc
= node
->proc
;
5343 binder_release_work(proc
, &node
->async_todo
);
5345 binder_node_lock(node
);
5346 binder_inner_proc_lock(proc
);
5347 binder_dequeue_work_ilocked(&node
->work
);
5349 * The caller must have taken a temporary ref on the node,
5351 BUG_ON(!node
->tmp_refs
);
5352 if (hlist_empty(&node
->refs
) && node
->tmp_refs
== 1) {
5353 binder_inner_proc_unlock(proc
);
5354 binder_node_unlock(node
);
5355 binder_free_node(node
);
5361 node
->local_strong_refs
= 0;
5362 node
->local_weak_refs
= 0;
5363 binder_inner_proc_unlock(proc
);
5365 spin_lock(&binder_dead_nodes_lock
);
5366 hlist_add_head(&node
->dead_node
, &binder_dead_nodes
);
5367 spin_unlock(&binder_dead_nodes_lock
);
5369 hlist_for_each_entry(ref
, &node
->refs
, node_entry
) {
5372 * Need the node lock to synchronize
5373 * with new notification requests and the
5374 * inner lock to synchronize with queued
5375 * death notifications.
5377 binder_inner_proc_lock(ref
->proc
);
5379 binder_inner_proc_unlock(ref
->proc
);
5385 BUG_ON(!list_empty(&ref
->death
->work
.entry
));
5386 ref
->death
->work
.type
= BINDER_WORK_DEAD_BINDER
;
5387 binder_enqueue_work_ilocked(&ref
->death
->work
,
5389 binder_wakeup_proc_ilocked(ref
->proc
);
5390 binder_inner_proc_unlock(ref
->proc
);
5393 binder_debug(BINDER_DEBUG_DEAD_BINDER
,
5394 "node %d now dead, refs %d, death %d\n",
5395 node
->debug_id
, refs
, death
);
5396 binder_node_unlock(node
);
5397 binder_put_node(node
);
5402 static void binder_deferred_release(struct binder_proc
*proc
)
5404 struct binder_context
*context
= proc
->context
;
5406 int threads
, nodes
, incoming_refs
, outgoing_refs
, active_transactions
;
5408 mutex_lock(&binder_procs_lock
);
5409 hlist_del(&proc
->proc_node
);
5410 mutex_unlock(&binder_procs_lock
);
5412 mutex_lock(&context
->context_mgr_node_lock
);
5413 if (context
->binder_context_mgr_node
&&
5414 context
->binder_context_mgr_node
->proc
== proc
) {
5415 binder_debug(BINDER_DEBUG_DEAD_BINDER
,
5416 "%s: %d context_mgr_node gone\n",
5417 __func__
, proc
->pid
);
5418 context
->binder_context_mgr_node
= NULL
;
5420 mutex_unlock(&context
->context_mgr_node_lock
);
5421 binder_inner_proc_lock(proc
);
5423 * Make sure proc stays alive after we
5424 * remove all the threads
5428 proc
->is_dead
= true;
5430 active_transactions
= 0;
5431 while ((n
= rb_first(&proc
->threads
))) {
5432 struct binder_thread
*thread
;
5434 thread
= rb_entry(n
, struct binder_thread
, rb_node
);
5435 binder_inner_proc_unlock(proc
);
5437 active_transactions
+= binder_thread_release(proc
, thread
);
5438 binder_inner_proc_lock(proc
);
5443 while ((n
= rb_first(&proc
->nodes
))) {
5444 struct binder_node
*node
;
5446 node
= rb_entry(n
, struct binder_node
, rb_node
);
5449 * take a temporary ref on the node before
5450 * calling binder_node_release() which will either
5451 * kfree() the node or call binder_put_node()
5453 binder_inc_node_tmpref_ilocked(node
);
5454 rb_erase(&node
->rb_node
, &proc
->nodes
);
5455 binder_inner_proc_unlock(proc
);
5456 incoming_refs
= binder_node_release(node
, incoming_refs
);
5457 binder_inner_proc_lock(proc
);
5459 binder_inner_proc_unlock(proc
);
5462 binder_proc_lock(proc
);
5463 while ((n
= rb_first(&proc
->refs_by_desc
))) {
5464 struct binder_ref
*ref
;
5466 ref
= rb_entry(n
, struct binder_ref
, rb_node_desc
);
5468 binder_cleanup_ref_olocked(ref
);
5469 binder_proc_unlock(proc
);
5470 binder_free_ref(ref
);
5471 binder_proc_lock(proc
);
5473 binder_proc_unlock(proc
);
5475 binder_release_work(proc
, &proc
->todo
);
5476 binder_release_work(proc
, &proc
->delivered_death
);
5478 binder_debug(BINDER_DEBUG_OPEN_CLOSE
,
5479 "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
5480 __func__
, proc
->pid
, threads
, nodes
, incoming_refs
,
5481 outgoing_refs
, active_transactions
);
5483 binder_proc_dec_tmpref(proc
);
5486 static void binder_deferred_func(struct work_struct
*work
)
5488 struct binder_proc
*proc
;
5493 mutex_lock(&binder_deferred_lock
);
5494 if (!hlist_empty(&binder_deferred_list
)) {
5495 proc
= hlist_entry(binder_deferred_list
.first
,
5496 struct binder_proc
, deferred_work_node
);
5497 hlist_del_init(&proc
->deferred_work_node
);
5498 defer
= proc
->deferred_work
;
5499 proc
->deferred_work
= 0;
5504 mutex_unlock(&binder_deferred_lock
);
5506 if (defer
& BINDER_DEFERRED_FLUSH
)
5507 binder_deferred_flush(proc
);
5509 if (defer
& BINDER_DEFERRED_RELEASE
)
5510 binder_deferred_release(proc
); /* frees proc */
5513 static DECLARE_WORK(binder_deferred_work
, binder_deferred_func
);
5516 binder_defer_work(struct binder_proc
*proc
, enum binder_deferred_state defer
)
5518 mutex_lock(&binder_deferred_lock
);
5519 proc
->deferred_work
|= defer
;
5520 if (hlist_unhashed(&proc
->deferred_work_node
)) {
5521 hlist_add_head(&proc
->deferred_work_node
,
5522 &binder_deferred_list
);
5523 schedule_work(&binder_deferred_work
);
5525 mutex_unlock(&binder_deferred_lock
);
5528 static void print_binder_transaction_ilocked(struct seq_file
*m
,
5529 struct binder_proc
*proc
,
5531 struct binder_transaction
*t
)
5533 struct binder_proc
*to_proc
;
5534 struct binder_buffer
*buffer
= t
->buffer
;
5536 spin_lock(&t
->lock
);
5537 to_proc
= t
->to_proc
;
5539 "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld r%d",
5540 prefix
, t
->debug_id
, t
,
5541 t
->from
? t
->from
->proc
->pid
: 0,
5542 t
->from
? t
->from
->pid
: 0,
5543 to_proc
? to_proc
->pid
: 0,
5544 t
->to_thread
? t
->to_thread
->pid
: 0,
5545 t
->code
, t
->flags
, t
->priority
, t
->need_reply
);
5546 spin_unlock(&t
->lock
);
5548 if (proc
!= to_proc
) {
5550 * Can only safely deref buffer if we are holding the
5551 * correct proc inner lock for this node
5557 if (buffer
== NULL
) {
5558 seq_puts(m
, " buffer free\n");
5561 if (buffer
->target_node
)
5562 seq_printf(m
, " node %d", buffer
->target_node
->debug_id
);
5563 seq_printf(m
, " size %zd:%zd data %pK\n",
5564 buffer
->data_size
, buffer
->offsets_size
,
5568 static void print_binder_work_ilocked(struct seq_file
*m
,
5569 struct binder_proc
*proc
,
5571 const char *transaction_prefix
,
5572 struct binder_work
*w
)
5574 struct binder_node
*node
;
5575 struct binder_transaction
*t
;
5578 case BINDER_WORK_TRANSACTION
:
5579 t
= container_of(w
, struct binder_transaction
, work
);
5580 print_binder_transaction_ilocked(
5581 m
, proc
, transaction_prefix
, t
);
5583 case BINDER_WORK_RETURN_ERROR
: {
5584 struct binder_error
*e
= container_of(
5585 w
, struct binder_error
, work
);
5587 seq_printf(m
, "%stransaction error: %u\n",
5590 case BINDER_WORK_TRANSACTION_COMPLETE
:
5591 seq_printf(m
, "%stransaction complete\n", prefix
);
5593 case BINDER_WORK_NODE
:
5594 node
= container_of(w
, struct binder_node
, work
);
5595 seq_printf(m
, "%snode work %d: u%016llx c%016llx\n",
5596 prefix
, node
->debug_id
,
5597 (u64
)node
->ptr
, (u64
)node
->cookie
);
5599 case BINDER_WORK_DEAD_BINDER
:
5600 seq_printf(m
, "%shas dead binder\n", prefix
);
5602 case BINDER_WORK_DEAD_BINDER_AND_CLEAR
:
5603 seq_printf(m
, "%shas cleared dead binder\n", prefix
);
5605 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION
:
5606 seq_printf(m
, "%shas cleared death notification\n", prefix
);
5609 seq_printf(m
, "%sunknown work: type %d\n", prefix
, w
->type
);
5614 static void print_binder_thread_ilocked(struct seq_file
*m
,
5615 struct binder_thread
*thread
,
5618 struct binder_transaction
*t
;
5619 struct binder_work
*w
;
5620 size_t start_pos
= m
->count
;
5623 seq_printf(m
, " thread %d: l %02x need_return %d tr %d\n",
5624 thread
->pid
, thread
->looper
,
5625 thread
->looper_need_return
,
5626 atomic_read(&thread
->tmp_ref
));
5627 header_pos
= m
->count
;
5628 t
= thread
->transaction_stack
;
5630 if (t
->from
== thread
) {
5631 print_binder_transaction_ilocked(m
, thread
->proc
,
5632 " outgoing transaction", t
);
5634 } else if (t
->to_thread
== thread
) {
5635 print_binder_transaction_ilocked(m
, thread
->proc
,
5636 " incoming transaction", t
);
5639 print_binder_transaction_ilocked(m
, thread
->proc
,
5640 " bad transaction", t
);
5644 list_for_each_entry(w
, &thread
->todo
, entry
) {
5645 print_binder_work_ilocked(m
, thread
->proc
, " ",
5646 " pending transaction", w
);
5648 if (!print_always
&& m
->count
== header_pos
)
5649 m
->count
= start_pos
;
5652 static void print_binder_node_nilocked(struct seq_file
*m
,
5653 struct binder_node
*node
)
5655 struct binder_ref
*ref
;
5656 struct binder_work
*w
;
5660 hlist_for_each_entry(ref
, &node
->refs
, node_entry
)
5663 seq_printf(m
, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d tr %d",
5664 node
->debug_id
, (u64
)node
->ptr
, (u64
)node
->cookie
,
5665 node
->has_strong_ref
, node
->has_weak_ref
,
5666 node
->local_strong_refs
, node
->local_weak_refs
,
5667 node
->internal_strong_refs
, count
, node
->tmp_refs
);
5669 seq_puts(m
, " proc");
5670 hlist_for_each_entry(ref
, &node
->refs
, node_entry
)
5671 seq_printf(m
, " %d", ref
->proc
->pid
);
5675 list_for_each_entry(w
, &node
->async_todo
, entry
)
5676 print_binder_work_ilocked(m
, node
->proc
, " ",
5677 " pending async transaction", w
);
5681 static void print_binder_ref_olocked(struct seq_file
*m
,
5682 struct binder_ref
*ref
)
5684 binder_node_lock(ref
->node
);
5685 seq_printf(m
, " ref %d: desc %d %snode %d s %d w %d d %pK\n",
5686 ref
->data
.debug_id
, ref
->data
.desc
,
5687 ref
->node
->proc
? "" : "dead ",
5688 ref
->node
->debug_id
, ref
->data
.strong
,
5689 ref
->data
.weak
, ref
->death
);
5690 binder_node_unlock(ref
->node
);
5693 static void print_binder_proc(struct seq_file
*m
,
5694 struct binder_proc
*proc
, int print_all
)
5696 struct binder_work
*w
;
5698 size_t start_pos
= m
->count
;
5700 struct binder_node
*last_node
= NULL
;
5702 seq_printf(m
, "proc %d\n", proc
->pid
);
5703 seq_printf(m
, "context %s\n", proc
->context
->name
);
5704 header_pos
= m
->count
;
5706 binder_inner_proc_lock(proc
);
5707 for (n
= rb_first(&proc
->threads
); n
!= NULL
; n
= rb_next(n
))
5708 print_binder_thread_ilocked(m
, rb_entry(n
, struct binder_thread
,
5709 rb_node
), print_all
);
5711 for (n
= rb_first(&proc
->nodes
); n
!= NULL
; n
= rb_next(n
)) {
5712 struct binder_node
*node
= rb_entry(n
, struct binder_node
,
5714 if (!print_all
&& !node
->has_async_transaction
)
5718 * take a temporary reference on the node so it
5719 * survives and isn't removed from the tree
5720 * while we print it.
5722 binder_inc_node_tmpref_ilocked(node
);
5723 /* Need to drop inner lock to take node lock */
5724 binder_inner_proc_unlock(proc
);
5726 binder_put_node(last_node
);
5727 binder_node_inner_lock(node
);
5728 print_binder_node_nilocked(m
, node
);
5729 binder_node_inner_unlock(node
);
5731 binder_inner_proc_lock(proc
);
5733 binder_inner_proc_unlock(proc
);
5735 binder_put_node(last_node
);
5738 binder_proc_lock(proc
);
5739 for (n
= rb_first(&proc
->refs_by_desc
);
5742 print_binder_ref_olocked(m
, rb_entry(n
,
5745 binder_proc_unlock(proc
);
5747 binder_alloc_print_allocated(m
, &proc
->alloc
);
5748 binder_inner_proc_lock(proc
);
5749 list_for_each_entry(w
, &proc
->todo
, entry
)
5750 print_binder_work_ilocked(m
, proc
, " ",
5751 " pending transaction", w
);
5752 list_for_each_entry(w
, &proc
->delivered_death
, entry
) {
5753 seq_puts(m
, " has delivered dead binder\n");
5756 binder_inner_proc_unlock(proc
);
5757 if (!print_all
&& m
->count
== header_pos
)
5758 m
->count
= start_pos
;
5761 static const char * const binder_return_strings
[] = {
5766 "BR_ACQUIRE_RESULT",
5768 "BR_TRANSACTION_COMPLETE",
5773 "BR_ATTEMPT_ACQUIRE",
5778 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
5782 static const char * const binder_command_strings
[] = {
5785 "BC_ACQUIRE_RESULT",
5793 "BC_ATTEMPT_ACQUIRE",
5794 "BC_REGISTER_LOOPER",
5797 "BC_REQUEST_DEATH_NOTIFICATION",
5798 "BC_CLEAR_DEATH_NOTIFICATION",
5799 "BC_DEAD_BINDER_DONE",
5800 "BC_TRANSACTION_SG",
5804 static const char * const binder_objstat_strings
[] = {
5811 "transaction_complete"
5814 static void print_binder_stats(struct seq_file
*m
, const char *prefix
,
5815 struct binder_stats
*stats
)
5819 BUILD_BUG_ON(ARRAY_SIZE(stats
->bc
) !=
5820 ARRAY_SIZE(binder_command_strings
));
5821 for (i
= 0; i
< ARRAY_SIZE(stats
->bc
); i
++) {
5822 int temp
= atomic_read(&stats
->bc
[i
]);
5825 seq_printf(m
, "%s%s: %d\n", prefix
,
5826 binder_command_strings
[i
], temp
);
5829 BUILD_BUG_ON(ARRAY_SIZE(stats
->br
) !=
5830 ARRAY_SIZE(binder_return_strings
));
5831 for (i
= 0; i
< ARRAY_SIZE(stats
->br
); i
++) {
5832 int temp
= atomic_read(&stats
->br
[i
]);
5835 seq_printf(m
, "%s%s: %d\n", prefix
,
5836 binder_return_strings
[i
], temp
);
5839 BUILD_BUG_ON(ARRAY_SIZE(stats
->obj_created
) !=
5840 ARRAY_SIZE(binder_objstat_strings
));
5841 BUILD_BUG_ON(ARRAY_SIZE(stats
->obj_created
) !=
5842 ARRAY_SIZE(stats
->obj_deleted
));
5843 for (i
= 0; i
< ARRAY_SIZE(stats
->obj_created
); i
++) {
5844 int created
= atomic_read(&stats
->obj_created
[i
]);
5845 int deleted
= atomic_read(&stats
->obj_deleted
[i
]);
5847 if (created
|| deleted
)
5848 seq_printf(m
, "%s%s: active %d total %d\n",
5850 binder_objstat_strings
[i
],
5856 static void print_binder_proc_stats(struct seq_file
*m
,
5857 struct binder_proc
*proc
)
5859 struct binder_work
*w
;
5860 struct binder_thread
*thread
;
5862 int count
, strong
, weak
, ready_threads
;
5863 size_t free_async_space
=
5864 binder_alloc_get_free_async_space(&proc
->alloc
);
5866 seq_printf(m
, "proc %d\n", proc
->pid
);
5867 seq_printf(m
, "context %s\n", proc
->context
->name
);
5870 binder_inner_proc_lock(proc
);
5871 for (n
= rb_first(&proc
->threads
); n
!= NULL
; n
= rb_next(n
))
5874 list_for_each_entry(thread
, &proc
->waiting_threads
, waiting_thread_node
)
5877 seq_printf(m
, " threads: %d\n", count
);
5878 seq_printf(m
, " requested threads: %d+%d/%d\n"
5879 " ready threads %d\n"
5880 " free async space %zd\n", proc
->requested_threads
,
5881 proc
->requested_threads_started
, proc
->max_threads
,
5885 for (n
= rb_first(&proc
->nodes
); n
!= NULL
; n
= rb_next(n
))
5887 binder_inner_proc_unlock(proc
);
5888 seq_printf(m
, " nodes: %d\n", count
);
5892 binder_proc_lock(proc
);
5893 for (n
= rb_first(&proc
->refs_by_desc
); n
!= NULL
; n
= rb_next(n
)) {
5894 struct binder_ref
*ref
= rb_entry(n
, struct binder_ref
,
5897 strong
+= ref
->data
.strong
;
5898 weak
+= ref
->data
.weak
;
5900 binder_proc_unlock(proc
);
5901 seq_printf(m
, " refs: %d s %d w %d\n", count
, strong
, weak
);
5903 count
= binder_alloc_get_allocated_count(&proc
->alloc
);
5904 seq_printf(m
, " buffers: %d\n", count
);
5906 binder_alloc_print_pages(m
, &proc
->alloc
);
5909 binder_inner_proc_lock(proc
);
5910 list_for_each_entry(w
, &proc
->todo
, entry
) {
5911 if (w
->type
== BINDER_WORK_TRANSACTION
)
5914 binder_inner_proc_unlock(proc
);
5915 seq_printf(m
, " pending transactions: %d\n", count
);
5917 print_binder_stats(m
, " ", &proc
->stats
);
5921 int binder_state_show(struct seq_file
*m
, void *unused
)
5923 struct binder_proc
*proc
;
5924 struct binder_node
*node
;
5925 struct binder_node
*last_node
= NULL
;
5927 seq_puts(m
, "binder state:\n");
5929 spin_lock(&binder_dead_nodes_lock
);
5930 if (!hlist_empty(&binder_dead_nodes
))
5931 seq_puts(m
, "dead nodes:\n");
5932 hlist_for_each_entry(node
, &binder_dead_nodes
, dead_node
) {
5934 * take a temporary reference on the node so it
5935 * survives and isn't removed from the list
5936 * while we print it.
5939 spin_unlock(&binder_dead_nodes_lock
);
5941 binder_put_node(last_node
);
5942 binder_node_lock(node
);
5943 print_binder_node_nilocked(m
, node
);
5944 binder_node_unlock(node
);
5946 spin_lock(&binder_dead_nodes_lock
);
5948 spin_unlock(&binder_dead_nodes_lock
);
5950 binder_put_node(last_node
);
5952 mutex_lock(&binder_procs_lock
);
5953 hlist_for_each_entry(proc
, &binder_procs
, proc_node
)
5954 print_binder_proc(m
, proc
, 1);
5955 mutex_unlock(&binder_procs_lock
);
5960 int binder_stats_show(struct seq_file
*m
, void *unused
)
5962 struct binder_proc
*proc
;
5964 seq_puts(m
, "binder stats:\n");
5966 print_binder_stats(m
, "", &binder_stats
);
5968 mutex_lock(&binder_procs_lock
);
5969 hlist_for_each_entry(proc
, &binder_procs
, proc_node
)
5970 print_binder_proc_stats(m
, proc
);
5971 mutex_unlock(&binder_procs_lock
);
5976 int binder_transactions_show(struct seq_file
*m
, void *unused
)
5978 struct binder_proc
*proc
;
5980 seq_puts(m
, "binder transactions:\n");
5981 mutex_lock(&binder_procs_lock
);
5982 hlist_for_each_entry(proc
, &binder_procs
, proc_node
)
5983 print_binder_proc(m
, proc
, 0);
5984 mutex_unlock(&binder_procs_lock
);
5989 static int proc_show(struct seq_file
*m
, void *unused
)
5991 struct binder_proc
*itr
;
5992 int pid
= (unsigned long)m
->private;
5994 mutex_lock(&binder_procs_lock
);
5995 hlist_for_each_entry(itr
, &binder_procs
, proc_node
) {
5996 if (itr
->pid
== pid
) {
5997 seq_puts(m
, "binder proc state:\n");
5998 print_binder_proc(m
, itr
, 1);
6001 mutex_unlock(&binder_procs_lock
);
6006 static void print_binder_transaction_log_entry(struct seq_file
*m
,
6007 struct binder_transaction_log_entry
*e
)
6009 int debug_id
= READ_ONCE(e
->debug_id_done
);
6011 * read barrier to guarantee debug_id_done read before
6012 * we print the log values
6016 "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
6017 e
->debug_id
, (e
->call_type
== 2) ? "reply" :
6018 ((e
->call_type
== 1) ? "async" : "call "), e
->from_proc
,
6019 e
->from_thread
, e
->to_proc
, e
->to_thread
, e
->context_name
,
6020 e
->to_node
, e
->target_handle
, e
->data_size
, e
->offsets_size
,
6021 e
->return_error
, e
->return_error_param
,
6022 e
->return_error_line
);
6024 * read-barrier to guarantee read of debug_id_done after
6025 * done printing the fields of the entry
6028 seq_printf(m
, debug_id
&& debug_id
== READ_ONCE(e
->debug_id_done
) ?
6029 "\n" : " (incomplete)\n");
6032 int binder_transaction_log_show(struct seq_file
*m
, void *unused
)
6034 struct binder_transaction_log
*log
= m
->private;
6035 unsigned int log_cur
= atomic_read(&log
->cur
);
6040 count
= log_cur
+ 1;
6041 cur
= count
< ARRAY_SIZE(log
->entry
) && !log
->full
?
6042 0 : count
% ARRAY_SIZE(log
->entry
);
6043 if (count
> ARRAY_SIZE(log
->entry
) || log
->full
)
6044 count
= ARRAY_SIZE(log
->entry
);
6045 for (i
= 0; i
< count
; i
++) {
6046 unsigned int index
= cur
++ % ARRAY_SIZE(log
->entry
);
6048 print_binder_transaction_log_entry(m
, &log
->entry
[index
]);
6053 const struct file_operations binder_fops
= {
6054 .owner
= THIS_MODULE
,
6055 .poll
= binder_poll
,
6056 .unlocked_ioctl
= binder_ioctl
,
6057 .compat_ioctl
= compat_ptr_ioctl
,
6058 .mmap
= binder_mmap
,
6059 .open
= binder_open
,
6060 .flush
= binder_flush
,
6061 .release
= binder_release
,
6064 static int __init
init_binder_device(const char *name
)
6067 struct binder_device
*binder_device
;
6069 binder_device
= kzalloc(sizeof(*binder_device
), GFP_KERNEL
);
6073 binder_device
->miscdev
.fops
= &binder_fops
;
6074 binder_device
->miscdev
.minor
= MISC_DYNAMIC_MINOR
;
6075 binder_device
->miscdev
.name
= name
;
6077 binder_device
->context
.binder_context_mgr_uid
= INVALID_UID
;
6078 binder_device
->context
.name
= name
;
6079 mutex_init(&binder_device
->context
.context_mgr_node_lock
);
6081 ret
= misc_register(&binder_device
->miscdev
);
6083 kfree(binder_device
);
6087 hlist_add_head(&binder_device
->hlist
, &binder_devices
);
6092 static int __init
binder_init(void)
6095 char *device_name
, *device_tmp
;
6096 struct binder_device
*device
;
6097 struct hlist_node
*tmp
;
6098 char *device_names
= NULL
;
6100 ret
= binder_alloc_shrinker_init();
6104 atomic_set(&binder_transaction_log
.cur
, ~0U);
6105 atomic_set(&binder_transaction_log_failed
.cur
, ~0U);
6107 binder_debugfs_dir_entry_root
= debugfs_create_dir("binder", NULL
);
6108 if (binder_debugfs_dir_entry_root
)
6109 binder_debugfs_dir_entry_proc
= debugfs_create_dir("proc",
6110 binder_debugfs_dir_entry_root
);
6112 if (binder_debugfs_dir_entry_root
) {
6113 debugfs_create_file("state",
6115 binder_debugfs_dir_entry_root
,
6117 &binder_state_fops
);
6118 debugfs_create_file("stats",
6120 binder_debugfs_dir_entry_root
,
6122 &binder_stats_fops
);
6123 debugfs_create_file("transactions",
6125 binder_debugfs_dir_entry_root
,
6127 &binder_transactions_fops
);
6128 debugfs_create_file("transaction_log",
6130 binder_debugfs_dir_entry_root
,
6131 &binder_transaction_log
,
6132 &binder_transaction_log_fops
);
6133 debugfs_create_file("failed_transaction_log",
6135 binder_debugfs_dir_entry_root
,
6136 &binder_transaction_log_failed
,
6137 &binder_transaction_log_fops
);
6140 if (!IS_ENABLED(CONFIG_ANDROID_BINDERFS
) &&
6141 strcmp(binder_devices_param
, "") != 0) {
6143 * Copy the module_parameter string, because we don't want to
6144 * tokenize it in-place.
6146 device_names
= kstrdup(binder_devices_param
, GFP_KERNEL
);
6147 if (!device_names
) {
6149 goto err_alloc_device_names_failed
;
6152 device_tmp
= device_names
;
6153 while ((device_name
= strsep(&device_tmp
, ","))) {
6154 ret
= init_binder_device(device_name
);
6156 goto err_init_binder_device_failed
;
6160 ret
= init_binderfs();
6162 goto err_init_binder_device_failed
;
6166 err_init_binder_device_failed
:
6167 hlist_for_each_entry_safe(device
, tmp
, &binder_devices
, hlist
) {
6168 misc_deregister(&device
->miscdev
);
6169 hlist_del(&device
->hlist
);
6173 kfree(device_names
);
6175 err_alloc_device_names_failed
:
6176 debugfs_remove_recursive(binder_debugfs_dir_entry_root
);
6181 device_initcall(binder_init
);
6183 #define CREATE_TRACE_POINTS
6184 #include "binder_trace.h"
6186 MODULE_LICENSE("GPL v2");