console: Replace #if 0 with atomic var 'ignore_console_lock_warning'
[linux/fpc-iii.git] / drivers / android / binder.c
blob95283f3bb51c56d44f353aa9f1bd0c151591bc37
1 /* binder.c
3 * Android IPC Subsystem
5 * Copyright (C) 2007-2008 Google, Inc.
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
19 * Locking overview
21 * There are 3 main spinlocks which must be acquired in the
22 * order shown:
24 * 1) proc->outer_lock : protects binder_ref
25 * binder_proc_lock() and binder_proc_unlock() are
26 * used to acq/rel.
27 * 2) node->lock : protects most fields of binder_node.
28 * binder_node_lock() and binder_node_unlock() are
29 * used to acq/rel
30 * 3) proc->inner_lock : protects the thread and node lists
31 * (proc->threads, proc->waiting_threads, proc->nodes)
32 * and all todo lists associated with the binder_proc
33 * (proc->todo, thread->todo, proc->delivered_death and
34 * node->async_todo), as well as thread->transaction_stack
35 * binder_inner_proc_lock() and binder_inner_proc_unlock()
36 * are used to acq/rel
38 * Any lock under procA must never be nested under any lock at the same
39 * level or below on procB.
41 * Functions that require a lock held on entry indicate which lock
42 * in the suffix of the function name:
44 * foo_olocked() : requires node->outer_lock
45 * foo_nlocked() : requires node->lock
46 * foo_ilocked() : requires proc->inner_lock
47 * foo_oilocked(): requires proc->outer_lock and proc->inner_lock
48 * foo_nilocked(): requires node->lock and proc->inner_lock
49 * ...
52 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
54 #include <asm/cacheflush.h>
55 #include <linux/fdtable.h>
56 #include <linux/file.h>
57 #include <linux/freezer.h>
58 #include <linux/fs.h>
59 #include <linux/list.h>
60 #include <linux/miscdevice.h>
61 #include <linux/module.h>
62 #include <linux/mutex.h>
63 #include <linux/nsproxy.h>
64 #include <linux/poll.h>
65 #include <linux/debugfs.h>
66 #include <linux/rbtree.h>
67 #include <linux/sched/signal.h>
68 #include <linux/sched/mm.h>
69 #include <linux/seq_file.h>
70 #include <linux/uaccess.h>
71 #include <linux/pid_namespace.h>
72 #include <linux/security.h>
73 #include <linux/spinlock.h>
75 #include <uapi/linux/android/binder.h>
76 #include "binder_alloc.h"
77 #include "binder_trace.h"
79 static HLIST_HEAD(binder_deferred_list);
80 static DEFINE_MUTEX(binder_deferred_lock);
82 static HLIST_HEAD(binder_devices);
83 static HLIST_HEAD(binder_procs);
84 static DEFINE_MUTEX(binder_procs_lock);
86 static HLIST_HEAD(binder_dead_nodes);
87 static DEFINE_SPINLOCK(binder_dead_nodes_lock);
89 static struct dentry *binder_debugfs_dir_entry_root;
90 static struct dentry *binder_debugfs_dir_entry_proc;
91 static atomic_t binder_last_id;
93 #define BINDER_DEBUG_ENTRY(name) \
94 static int binder_##name##_open(struct inode *inode, struct file *file) \
95 { \
96 return single_open(file, binder_##name##_show, inode->i_private); \
97 } \
99 static const struct file_operations binder_##name##_fops = { \
100 .owner = THIS_MODULE, \
101 .open = binder_##name##_open, \
102 .read = seq_read, \
103 .llseek = seq_lseek, \
104 .release = single_release, \
107 static int binder_proc_show(struct seq_file *m, void *unused);
108 BINDER_DEBUG_ENTRY(proc);
110 /* This is only defined in include/asm-arm/sizes.h */
111 #ifndef SZ_1K
112 #define SZ_1K 0x400
113 #endif
115 #ifndef SZ_4M
116 #define SZ_4M 0x400000
117 #endif
119 #define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
121 enum {
122 BINDER_DEBUG_USER_ERROR = 1U << 0,
123 BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1,
124 BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2,
125 BINDER_DEBUG_OPEN_CLOSE = 1U << 3,
126 BINDER_DEBUG_DEAD_BINDER = 1U << 4,
127 BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5,
128 BINDER_DEBUG_READ_WRITE = 1U << 6,
129 BINDER_DEBUG_USER_REFS = 1U << 7,
130 BINDER_DEBUG_THREADS = 1U << 8,
131 BINDER_DEBUG_TRANSACTION = 1U << 9,
132 BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10,
133 BINDER_DEBUG_FREE_BUFFER = 1U << 11,
134 BINDER_DEBUG_INTERNAL_REFS = 1U << 12,
135 BINDER_DEBUG_PRIORITY_CAP = 1U << 13,
136 BINDER_DEBUG_SPINLOCKS = 1U << 14,
138 static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
139 BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
140 module_param_named(debug_mask, binder_debug_mask, uint, 0644);
142 static char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
143 module_param_named(devices, binder_devices_param, charp, 0444);
145 static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
146 static int binder_stop_on_user_error;
148 static int binder_set_stop_on_user_error(const char *val,
149 const struct kernel_param *kp)
151 int ret;
153 ret = param_set_int(val, kp);
154 if (binder_stop_on_user_error < 2)
155 wake_up(&binder_user_error_wait);
156 return ret;
158 module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
159 param_get_int, &binder_stop_on_user_error, 0644);
161 #define binder_debug(mask, x...) \
162 do { \
163 if (binder_debug_mask & mask) \
164 pr_info(x); \
165 } while (0)
167 #define binder_user_error(x...) \
168 do { \
169 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
170 pr_info(x); \
171 if (binder_stop_on_user_error) \
172 binder_stop_on_user_error = 2; \
173 } while (0)
175 #define to_flat_binder_object(hdr) \
176 container_of(hdr, struct flat_binder_object, hdr)
178 #define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
180 #define to_binder_buffer_object(hdr) \
181 container_of(hdr, struct binder_buffer_object, hdr)
183 #define to_binder_fd_array_object(hdr) \
184 container_of(hdr, struct binder_fd_array_object, hdr)
186 enum binder_stat_types {
187 BINDER_STAT_PROC,
188 BINDER_STAT_THREAD,
189 BINDER_STAT_NODE,
190 BINDER_STAT_REF,
191 BINDER_STAT_DEATH,
192 BINDER_STAT_TRANSACTION,
193 BINDER_STAT_TRANSACTION_COMPLETE,
194 BINDER_STAT_COUNT
197 struct binder_stats {
198 atomic_t br[_IOC_NR(BR_FAILED_REPLY) + 1];
199 atomic_t bc[_IOC_NR(BC_REPLY_SG) + 1];
200 atomic_t obj_created[BINDER_STAT_COUNT];
201 atomic_t obj_deleted[BINDER_STAT_COUNT];
204 static struct binder_stats binder_stats;
206 static inline void binder_stats_deleted(enum binder_stat_types type)
208 atomic_inc(&binder_stats.obj_deleted[type]);
211 static inline void binder_stats_created(enum binder_stat_types type)
213 atomic_inc(&binder_stats.obj_created[type]);
216 struct binder_transaction_log_entry {
217 int debug_id;
218 int debug_id_done;
219 int call_type;
220 int from_proc;
221 int from_thread;
222 int target_handle;
223 int to_proc;
224 int to_thread;
225 int to_node;
226 int data_size;
227 int offsets_size;
228 int return_error_line;
229 uint32_t return_error;
230 uint32_t return_error_param;
231 const char *context_name;
233 struct binder_transaction_log {
234 atomic_t cur;
235 bool full;
236 struct binder_transaction_log_entry entry[32];
238 static struct binder_transaction_log binder_transaction_log;
239 static struct binder_transaction_log binder_transaction_log_failed;
241 static struct binder_transaction_log_entry *binder_transaction_log_add(
242 struct binder_transaction_log *log)
244 struct binder_transaction_log_entry *e;
245 unsigned int cur = atomic_inc_return(&log->cur);
247 if (cur >= ARRAY_SIZE(log->entry))
248 log->full = true;
249 e = &log->entry[cur % ARRAY_SIZE(log->entry)];
250 WRITE_ONCE(e->debug_id_done, 0);
252 * write-barrier to synchronize access to e->debug_id_done.
253 * We make sure the initialized 0 value is seen before
254 * memset() other fields are zeroed by memset.
256 smp_wmb();
257 memset(e, 0, sizeof(*e));
258 return e;
261 struct binder_context {
262 struct binder_node *binder_context_mgr_node;
263 struct mutex context_mgr_node_lock;
265 kuid_t binder_context_mgr_uid;
266 const char *name;
269 struct binder_device {
270 struct hlist_node hlist;
271 struct miscdevice miscdev;
272 struct binder_context context;
276 * struct binder_work - work enqueued on a worklist
277 * @entry: node enqueued on list
278 * @type: type of work to be performed
280 * There are separate work lists for proc, thread, and node (async).
282 struct binder_work {
283 struct list_head entry;
285 enum {
286 BINDER_WORK_TRANSACTION = 1,
287 BINDER_WORK_TRANSACTION_COMPLETE,
288 BINDER_WORK_RETURN_ERROR,
289 BINDER_WORK_NODE,
290 BINDER_WORK_DEAD_BINDER,
291 BINDER_WORK_DEAD_BINDER_AND_CLEAR,
292 BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
293 } type;
296 struct binder_error {
297 struct binder_work work;
298 uint32_t cmd;
302 * struct binder_node - binder node bookkeeping
303 * @debug_id: unique ID for debugging
304 * (invariant after initialized)
305 * @lock: lock for node fields
306 * @work: worklist element for node work
307 * (protected by @proc->inner_lock)
308 * @rb_node: element for proc->nodes tree
309 * (protected by @proc->inner_lock)
310 * @dead_node: element for binder_dead_nodes list
311 * (protected by binder_dead_nodes_lock)
312 * @proc: binder_proc that owns this node
313 * (invariant after initialized)
314 * @refs: list of references on this node
315 * (protected by @lock)
316 * @internal_strong_refs: used to take strong references when
317 * initiating a transaction
318 * (protected by @proc->inner_lock if @proc
319 * and by @lock)
320 * @local_weak_refs: weak user refs from local process
321 * (protected by @proc->inner_lock if @proc
322 * and by @lock)
323 * @local_strong_refs: strong user refs from local process
324 * (protected by @proc->inner_lock if @proc
325 * and by @lock)
326 * @tmp_refs: temporary kernel refs
327 * (protected by @proc->inner_lock while @proc
328 * is valid, and by binder_dead_nodes_lock
329 * if @proc is NULL. During inc/dec and node release
330 * it is also protected by @lock to provide safety
331 * as the node dies and @proc becomes NULL)
332 * @ptr: userspace pointer for node
333 * (invariant, no lock needed)
334 * @cookie: userspace cookie for node
335 * (invariant, no lock needed)
336 * @has_strong_ref: userspace notified of strong ref
337 * (protected by @proc->inner_lock if @proc
338 * and by @lock)
339 * @pending_strong_ref: userspace has acked notification of strong ref
340 * (protected by @proc->inner_lock if @proc
341 * and by @lock)
342 * @has_weak_ref: userspace notified of weak ref
343 * (protected by @proc->inner_lock if @proc
344 * and by @lock)
345 * @pending_weak_ref: userspace has acked notification of weak ref
346 * (protected by @proc->inner_lock if @proc
347 * and by @lock)
348 * @has_async_transaction: async transaction to node in progress
349 * (protected by @lock)
350 * @accept_fds: file descriptor operations supported for node
351 * (invariant after initialized)
352 * @min_priority: minimum scheduling priority
353 * (invariant after initialized)
354 * @async_todo: list of async work items
355 * (protected by @proc->inner_lock)
357 * Bookkeeping structure for binder nodes.
359 struct binder_node {
360 int debug_id;
361 spinlock_t lock;
362 struct binder_work work;
363 union {
364 struct rb_node rb_node;
365 struct hlist_node dead_node;
367 struct binder_proc *proc;
368 struct hlist_head refs;
369 int internal_strong_refs;
370 int local_weak_refs;
371 int local_strong_refs;
372 int tmp_refs;
373 binder_uintptr_t ptr;
374 binder_uintptr_t cookie;
375 struct {
377 * bitfield elements protected by
378 * proc inner_lock
380 u8 has_strong_ref:1;
381 u8 pending_strong_ref:1;
382 u8 has_weak_ref:1;
383 u8 pending_weak_ref:1;
385 struct {
387 * invariant after initialization
389 u8 accept_fds:1;
390 u8 min_priority;
392 bool has_async_transaction;
393 struct list_head async_todo;
396 struct binder_ref_death {
398 * @work: worklist element for death notifications
399 * (protected by inner_lock of the proc that
400 * this ref belongs to)
402 struct binder_work work;
403 binder_uintptr_t cookie;
407 * struct binder_ref_data - binder_ref counts and id
408 * @debug_id: unique ID for the ref
409 * @desc: unique userspace handle for ref
410 * @strong: strong ref count (debugging only if not locked)
411 * @weak: weak ref count (debugging only if not locked)
413 * Structure to hold ref count and ref id information. Since
414 * the actual ref can only be accessed with a lock, this structure
415 * is used to return information about the ref to callers of
416 * ref inc/dec functions.
418 struct binder_ref_data {
419 int debug_id;
420 uint32_t desc;
421 int strong;
422 int weak;
426 * struct binder_ref - struct to track references on nodes
427 * @data: binder_ref_data containing id, handle, and current refcounts
428 * @rb_node_desc: node for lookup by @data.desc in proc's rb_tree
429 * @rb_node_node: node for lookup by @node in proc's rb_tree
430 * @node_entry: list entry for node->refs list in target node
431 * (protected by @node->lock)
432 * @proc: binder_proc containing ref
433 * @node: binder_node of target node. When cleaning up a
434 * ref for deletion in binder_cleanup_ref, a non-NULL
435 * @node indicates the node must be freed
436 * @death: pointer to death notification (ref_death) if requested
437 * (protected by @node->lock)
439 * Structure to track references from procA to target node (on procB). This
440 * structure is unsafe to access without holding @proc->outer_lock.
442 struct binder_ref {
443 /* Lookups needed: */
444 /* node + proc => ref (transaction) */
445 /* desc + proc => ref (transaction, inc/dec ref) */
446 /* node => refs + procs (proc exit) */
447 struct binder_ref_data data;
448 struct rb_node rb_node_desc;
449 struct rb_node rb_node_node;
450 struct hlist_node node_entry;
451 struct binder_proc *proc;
452 struct binder_node *node;
453 struct binder_ref_death *death;
456 enum binder_deferred_state {
457 BINDER_DEFERRED_PUT_FILES = 0x01,
458 BINDER_DEFERRED_FLUSH = 0x02,
459 BINDER_DEFERRED_RELEASE = 0x04,
463 * struct binder_proc - binder process bookkeeping
464 * @proc_node: element for binder_procs list
465 * @threads: rbtree of binder_threads in this proc
466 * (protected by @inner_lock)
467 * @nodes: rbtree of binder nodes associated with
468 * this proc ordered by node->ptr
469 * (protected by @inner_lock)
470 * @refs_by_desc: rbtree of refs ordered by ref->desc
471 * (protected by @outer_lock)
472 * @refs_by_node: rbtree of refs ordered by ref->node
473 * (protected by @outer_lock)
474 * @waiting_threads: threads currently waiting for proc work
475 * (protected by @inner_lock)
476 * @pid PID of group_leader of process
477 * (invariant after initialized)
478 * @tsk task_struct for group_leader of process
479 * (invariant after initialized)
480 * @files files_struct for process
481 * (protected by @files_lock)
482 * @files_lock mutex to protect @files
483 * @deferred_work_node: element for binder_deferred_list
484 * (protected by binder_deferred_lock)
485 * @deferred_work: bitmap of deferred work to perform
486 * (protected by binder_deferred_lock)
487 * @is_dead: process is dead and awaiting free
488 * when outstanding transactions are cleaned up
489 * (protected by @inner_lock)
490 * @todo: list of work for this process
491 * (protected by @inner_lock)
492 * @stats: per-process binder statistics
493 * (atomics, no lock needed)
494 * @delivered_death: list of delivered death notification
495 * (protected by @inner_lock)
496 * @max_threads: cap on number of binder threads
497 * (protected by @inner_lock)
498 * @requested_threads: number of binder threads requested but not
499 * yet started. In current implementation, can
500 * only be 0 or 1.
501 * (protected by @inner_lock)
502 * @requested_threads_started: number binder threads started
503 * (protected by @inner_lock)
504 * @tmp_ref: temporary reference to indicate proc is in use
505 * (protected by @inner_lock)
506 * @default_priority: default scheduler priority
507 * (invariant after initialized)
508 * @debugfs_entry: debugfs node
509 * @alloc: binder allocator bookkeeping
510 * @context: binder_context for this proc
511 * (invariant after initialized)
512 * @inner_lock: can nest under outer_lock and/or node lock
513 * @outer_lock: no nesting under innor or node lock
514 * Lock order: 1) outer, 2) node, 3) inner
516 * Bookkeeping structure for binder processes
518 struct binder_proc {
519 struct hlist_node proc_node;
520 struct rb_root threads;
521 struct rb_root nodes;
522 struct rb_root refs_by_desc;
523 struct rb_root refs_by_node;
524 struct list_head waiting_threads;
525 int pid;
526 struct task_struct *tsk;
527 struct files_struct *files;
528 struct mutex files_lock;
529 struct hlist_node deferred_work_node;
530 int deferred_work;
531 bool is_dead;
533 struct list_head todo;
534 struct binder_stats stats;
535 struct list_head delivered_death;
536 int max_threads;
537 int requested_threads;
538 int requested_threads_started;
539 int tmp_ref;
540 long default_priority;
541 struct dentry *debugfs_entry;
542 struct binder_alloc alloc;
543 struct binder_context *context;
544 spinlock_t inner_lock;
545 spinlock_t outer_lock;
548 enum {
549 BINDER_LOOPER_STATE_REGISTERED = 0x01,
550 BINDER_LOOPER_STATE_ENTERED = 0x02,
551 BINDER_LOOPER_STATE_EXITED = 0x04,
552 BINDER_LOOPER_STATE_INVALID = 0x08,
553 BINDER_LOOPER_STATE_WAITING = 0x10,
554 BINDER_LOOPER_STATE_POLL = 0x20,
558 * struct binder_thread - binder thread bookkeeping
559 * @proc: binder process for this thread
560 * (invariant after initialization)
561 * @rb_node: element for proc->threads rbtree
562 * (protected by @proc->inner_lock)
563 * @waiting_thread_node: element for @proc->waiting_threads list
564 * (protected by @proc->inner_lock)
565 * @pid: PID for this thread
566 * (invariant after initialization)
567 * @looper: bitmap of looping state
568 * (only accessed by this thread)
569 * @looper_needs_return: looping thread needs to exit driver
570 * (no lock needed)
571 * @transaction_stack: stack of in-progress transactions for this thread
572 * (protected by @proc->inner_lock)
573 * @todo: list of work to do for this thread
574 * (protected by @proc->inner_lock)
575 * @process_todo: whether work in @todo should be processed
576 * (protected by @proc->inner_lock)
577 * @return_error: transaction errors reported by this thread
578 * (only accessed by this thread)
579 * @reply_error: transaction errors reported by target thread
580 * (protected by @proc->inner_lock)
581 * @wait: wait queue for thread work
582 * @stats: per-thread statistics
583 * (atomics, no lock needed)
584 * @tmp_ref: temporary reference to indicate thread is in use
585 * (atomic since @proc->inner_lock cannot
586 * always be acquired)
587 * @is_dead: thread is dead and awaiting free
588 * when outstanding transactions are cleaned up
589 * (protected by @proc->inner_lock)
591 * Bookkeeping structure for binder threads.
593 struct binder_thread {
594 struct binder_proc *proc;
595 struct rb_node rb_node;
596 struct list_head waiting_thread_node;
597 int pid;
598 int looper; /* only modified by this thread */
599 bool looper_need_return; /* can be written by other thread */
600 struct binder_transaction *transaction_stack;
601 struct list_head todo;
602 bool process_todo;
603 struct binder_error return_error;
604 struct binder_error reply_error;
605 wait_queue_head_t wait;
606 struct binder_stats stats;
607 atomic_t tmp_ref;
608 bool is_dead;
611 struct binder_transaction {
612 int debug_id;
613 struct binder_work work;
614 struct binder_thread *from;
615 struct binder_transaction *from_parent;
616 struct binder_proc *to_proc;
617 struct binder_thread *to_thread;
618 struct binder_transaction *to_parent;
619 unsigned need_reply:1;
620 /* unsigned is_dead:1; */ /* not used at the moment */
622 struct binder_buffer *buffer;
623 unsigned int code;
624 unsigned int flags;
625 long priority;
626 long saved_priority;
627 kuid_t sender_euid;
629 * @lock: protects @from, @to_proc, and @to_thread
631 * @from, @to_proc, and @to_thread can be set to NULL
632 * during thread teardown
634 spinlock_t lock;
638 * binder_proc_lock() - Acquire outer lock for given binder_proc
639 * @proc: struct binder_proc to acquire
641 * Acquires proc->outer_lock. Used to protect binder_ref
642 * structures associated with the given proc.
644 #define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
645 static void
646 _binder_proc_lock(struct binder_proc *proc, int line)
648 binder_debug(BINDER_DEBUG_SPINLOCKS,
649 "%s: line=%d\n", __func__, line);
650 spin_lock(&proc->outer_lock);
654 * binder_proc_unlock() - Release spinlock for given binder_proc
655 * @proc: struct binder_proc to acquire
657 * Release lock acquired via binder_proc_lock()
659 #define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__)
660 static void
661 _binder_proc_unlock(struct binder_proc *proc, int line)
663 binder_debug(BINDER_DEBUG_SPINLOCKS,
664 "%s: line=%d\n", __func__, line);
665 spin_unlock(&proc->outer_lock);
669 * binder_inner_proc_lock() - Acquire inner lock for given binder_proc
670 * @proc: struct binder_proc to acquire
672 * Acquires proc->inner_lock. Used to protect todo lists
674 #define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
675 static void
676 _binder_inner_proc_lock(struct binder_proc *proc, int line)
678 binder_debug(BINDER_DEBUG_SPINLOCKS,
679 "%s: line=%d\n", __func__, line);
680 spin_lock(&proc->inner_lock);
684 * binder_inner_proc_unlock() - Release inner lock for given binder_proc
685 * @proc: struct binder_proc to acquire
687 * Release lock acquired via binder_inner_proc_lock()
689 #define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
690 static void
691 _binder_inner_proc_unlock(struct binder_proc *proc, int line)
693 binder_debug(BINDER_DEBUG_SPINLOCKS,
694 "%s: line=%d\n", __func__, line);
695 spin_unlock(&proc->inner_lock);
699 * binder_node_lock() - Acquire spinlock for given binder_node
700 * @node: struct binder_node to acquire
702 * Acquires node->lock. Used to protect binder_node fields
704 #define binder_node_lock(node) _binder_node_lock(node, __LINE__)
705 static void
706 _binder_node_lock(struct binder_node *node, int line)
708 binder_debug(BINDER_DEBUG_SPINLOCKS,
709 "%s: line=%d\n", __func__, line);
710 spin_lock(&node->lock);
714 * binder_node_unlock() - Release spinlock for given binder_proc
715 * @node: struct binder_node to acquire
717 * Release lock acquired via binder_node_lock()
719 #define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
720 static void
721 _binder_node_unlock(struct binder_node *node, int line)
723 binder_debug(BINDER_DEBUG_SPINLOCKS,
724 "%s: line=%d\n", __func__, line);
725 spin_unlock(&node->lock);
729 * binder_node_inner_lock() - Acquire node and inner locks
730 * @node: struct binder_node to acquire
732 * Acquires node->lock. If node->proc also acquires
733 * proc->inner_lock. Used to protect binder_node fields
735 #define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
736 static void
737 _binder_node_inner_lock(struct binder_node *node, int line)
739 binder_debug(BINDER_DEBUG_SPINLOCKS,
740 "%s: line=%d\n", __func__, line);
741 spin_lock(&node->lock);
742 if (node->proc)
743 binder_inner_proc_lock(node->proc);
747 * binder_node_unlock() - Release node and inner locks
748 * @node: struct binder_node to acquire
750 * Release lock acquired via binder_node_lock()
752 #define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
753 static void
754 _binder_node_inner_unlock(struct binder_node *node, int line)
756 struct binder_proc *proc = node->proc;
758 binder_debug(BINDER_DEBUG_SPINLOCKS,
759 "%s: line=%d\n", __func__, line);
760 if (proc)
761 binder_inner_proc_unlock(proc);
762 spin_unlock(&node->lock);
765 static bool binder_worklist_empty_ilocked(struct list_head *list)
767 return list_empty(list);
771 * binder_worklist_empty() - Check if no items on the work list
772 * @proc: binder_proc associated with list
773 * @list: list to check
775 * Return: true if there are no items on list, else false
777 static bool binder_worklist_empty(struct binder_proc *proc,
778 struct list_head *list)
780 bool ret;
782 binder_inner_proc_lock(proc);
783 ret = binder_worklist_empty_ilocked(list);
784 binder_inner_proc_unlock(proc);
785 return ret;
789 * binder_enqueue_work_ilocked() - Add an item to the work list
790 * @work: struct binder_work to add to list
791 * @target_list: list to add work to
793 * Adds the work to the specified list. Asserts that work
794 * is not already on a list.
796 * Requires the proc->inner_lock to be held.
798 static void
799 binder_enqueue_work_ilocked(struct binder_work *work,
800 struct list_head *target_list)
802 BUG_ON(target_list == NULL);
803 BUG_ON(work->entry.next && !list_empty(&work->entry));
804 list_add_tail(&work->entry, target_list);
808 * binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work
809 * @thread: thread to queue work to
810 * @work: struct binder_work to add to list
812 * Adds the work to the todo list of the thread. Doesn't set the process_todo
813 * flag, which means that (if it wasn't already set) the thread will go to
814 * sleep without handling this work when it calls read.
816 * Requires the proc->inner_lock to be held.
818 static void
819 binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread,
820 struct binder_work *work)
822 binder_enqueue_work_ilocked(work, &thread->todo);
826 * binder_enqueue_thread_work_ilocked() - Add an item to the thread work list
827 * @thread: thread to queue work to
828 * @work: struct binder_work to add to list
830 * Adds the work to the todo list of the thread, and enables processing
831 * of the todo queue.
833 * Requires the proc->inner_lock to be held.
835 static void
836 binder_enqueue_thread_work_ilocked(struct binder_thread *thread,
837 struct binder_work *work)
839 binder_enqueue_work_ilocked(work, &thread->todo);
840 thread->process_todo = true;
844 * binder_enqueue_thread_work() - Add an item to the thread work list
845 * @thread: thread to queue work to
846 * @work: struct binder_work to add to list
848 * Adds the work to the todo list of the thread, and enables processing
849 * of the todo queue.
851 static void
852 binder_enqueue_thread_work(struct binder_thread *thread,
853 struct binder_work *work)
855 binder_inner_proc_lock(thread->proc);
856 binder_enqueue_thread_work_ilocked(thread, work);
857 binder_inner_proc_unlock(thread->proc);
860 static void
861 binder_dequeue_work_ilocked(struct binder_work *work)
863 list_del_init(&work->entry);
867 * binder_dequeue_work() - Removes an item from the work list
868 * @proc: binder_proc associated with list
869 * @work: struct binder_work to remove from list
871 * Removes the specified work item from whatever list it is on.
872 * Can safely be called if work is not on any list.
874 static void
875 binder_dequeue_work(struct binder_proc *proc, struct binder_work *work)
877 binder_inner_proc_lock(proc);
878 binder_dequeue_work_ilocked(work);
879 binder_inner_proc_unlock(proc);
882 static struct binder_work *binder_dequeue_work_head_ilocked(
883 struct list_head *list)
885 struct binder_work *w;
887 w = list_first_entry_or_null(list, struct binder_work, entry);
888 if (w)
889 list_del_init(&w->entry);
890 return w;
894 * binder_dequeue_work_head() - Dequeues the item at head of list
895 * @proc: binder_proc associated with list
896 * @list: list to dequeue head
898 * Removes the head of the list if there are items on the list
900 * Return: pointer dequeued binder_work, NULL if list was empty
902 static struct binder_work *binder_dequeue_work_head(
903 struct binder_proc *proc,
904 struct list_head *list)
906 struct binder_work *w;
908 binder_inner_proc_lock(proc);
909 w = binder_dequeue_work_head_ilocked(list);
910 binder_inner_proc_unlock(proc);
911 return w;
914 static void
915 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
916 static void binder_free_thread(struct binder_thread *thread);
917 static void binder_free_proc(struct binder_proc *proc);
918 static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
920 static int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
922 unsigned long rlim_cur;
923 unsigned long irqs;
924 int ret;
926 mutex_lock(&proc->files_lock);
927 if (proc->files == NULL) {
928 ret = -ESRCH;
929 goto err;
931 if (!lock_task_sighand(proc->tsk, &irqs)) {
932 ret = -EMFILE;
933 goto err;
935 rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE);
936 unlock_task_sighand(proc->tsk, &irqs);
938 ret = __alloc_fd(proc->files, 0, rlim_cur, flags);
939 err:
940 mutex_unlock(&proc->files_lock);
941 return ret;
945 * copied from fd_install
947 static void task_fd_install(
948 struct binder_proc *proc, unsigned int fd, struct file *file)
950 mutex_lock(&proc->files_lock);
951 if (proc->files)
952 __fd_install(proc->files, fd, file);
953 mutex_unlock(&proc->files_lock);
957 * copied from sys_close
959 static long task_close_fd(struct binder_proc *proc, unsigned int fd)
961 int retval;
963 mutex_lock(&proc->files_lock);
964 if (proc->files == NULL) {
965 retval = -ESRCH;
966 goto err;
968 retval = __close_fd(proc->files, fd);
969 /* can't restart close syscall because file table entry was cleared */
970 if (unlikely(retval == -ERESTARTSYS ||
971 retval == -ERESTARTNOINTR ||
972 retval == -ERESTARTNOHAND ||
973 retval == -ERESTART_RESTARTBLOCK))
974 retval = -EINTR;
975 err:
976 mutex_unlock(&proc->files_lock);
977 return retval;
980 static bool binder_has_work_ilocked(struct binder_thread *thread,
981 bool do_proc_work)
983 return thread->process_todo ||
984 thread->looper_need_return ||
985 (do_proc_work &&
986 !binder_worklist_empty_ilocked(&thread->proc->todo));
989 static bool binder_has_work(struct binder_thread *thread, bool do_proc_work)
991 bool has_work;
993 binder_inner_proc_lock(thread->proc);
994 has_work = binder_has_work_ilocked(thread, do_proc_work);
995 binder_inner_proc_unlock(thread->proc);
997 return has_work;
1000 static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread)
1002 return !thread->transaction_stack &&
1003 binder_worklist_empty_ilocked(&thread->todo) &&
1004 (thread->looper & (BINDER_LOOPER_STATE_ENTERED |
1005 BINDER_LOOPER_STATE_REGISTERED));
1008 static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc,
1009 bool sync)
1011 struct rb_node *n;
1012 struct binder_thread *thread;
1014 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
1015 thread = rb_entry(n, struct binder_thread, rb_node);
1016 if (thread->looper & BINDER_LOOPER_STATE_POLL &&
1017 binder_available_for_proc_work_ilocked(thread)) {
1018 if (sync)
1019 wake_up_interruptible_sync(&thread->wait);
1020 else
1021 wake_up_interruptible(&thread->wait);
1027 * binder_select_thread_ilocked() - selects a thread for doing proc work.
1028 * @proc: process to select a thread from
1030 * Note that calling this function moves the thread off the waiting_threads
1031 * list, so it can only be woken up by the caller of this function, or a
1032 * signal. Therefore, callers *should* always wake up the thread this function
1033 * returns.
1035 * Return: If there's a thread currently waiting for process work,
1036 * returns that thread. Otherwise returns NULL.
1038 static struct binder_thread *
1039 binder_select_thread_ilocked(struct binder_proc *proc)
1041 struct binder_thread *thread;
1043 assert_spin_locked(&proc->inner_lock);
1044 thread = list_first_entry_or_null(&proc->waiting_threads,
1045 struct binder_thread,
1046 waiting_thread_node);
1048 if (thread)
1049 list_del_init(&thread->waiting_thread_node);
1051 return thread;
1055 * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work.
1056 * @proc: process to wake up a thread in
1057 * @thread: specific thread to wake-up (may be NULL)
1058 * @sync: whether to do a synchronous wake-up
1060 * This function wakes up a thread in the @proc process.
1061 * The caller may provide a specific thread to wake-up in
1062 * the @thread parameter. If @thread is NULL, this function
1063 * will wake up threads that have called poll().
1065 * Note that for this function to work as expected, callers
1066 * should first call binder_select_thread() to find a thread
1067 * to handle the work (if they don't have a thread already),
1068 * and pass the result into the @thread parameter.
1070 static void binder_wakeup_thread_ilocked(struct binder_proc *proc,
1071 struct binder_thread *thread,
1072 bool sync)
1074 assert_spin_locked(&proc->inner_lock);
1076 if (thread) {
1077 if (sync)
1078 wake_up_interruptible_sync(&thread->wait);
1079 else
1080 wake_up_interruptible(&thread->wait);
1081 return;
1084 /* Didn't find a thread waiting for proc work; this can happen
1085 * in two scenarios:
1086 * 1. All threads are busy handling transactions
1087 * In that case, one of those threads should call back into
1088 * the kernel driver soon and pick up this work.
1089 * 2. Threads are using the (e)poll interface, in which case
1090 * they may be blocked on the waitqueue without having been
1091 * added to waiting_threads. For this case, we just iterate
1092 * over all threads not handling transaction work, and
1093 * wake them all up. We wake all because we don't know whether
1094 * a thread that called into (e)poll is handling non-binder
1095 * work currently.
1097 binder_wakeup_poll_threads_ilocked(proc, sync);
1100 static void binder_wakeup_proc_ilocked(struct binder_proc *proc)
1102 struct binder_thread *thread = binder_select_thread_ilocked(proc);
1104 binder_wakeup_thread_ilocked(proc, thread, /* sync = */false);
1107 static void binder_set_nice(long nice)
1109 long min_nice;
1111 if (can_nice(current, nice)) {
1112 set_user_nice(current, nice);
1113 return;
1115 min_nice = rlimit_to_nice(rlimit(RLIMIT_NICE));
1116 binder_debug(BINDER_DEBUG_PRIORITY_CAP,
1117 "%d: nice value %ld not allowed use %ld instead\n",
1118 current->pid, nice, min_nice);
1119 set_user_nice(current, min_nice);
1120 if (min_nice <= MAX_NICE)
1121 return;
1122 binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
1125 static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
1126 binder_uintptr_t ptr)
1128 struct rb_node *n = proc->nodes.rb_node;
1129 struct binder_node *node;
1131 assert_spin_locked(&proc->inner_lock);
1133 while (n) {
1134 node = rb_entry(n, struct binder_node, rb_node);
1136 if (ptr < node->ptr)
1137 n = n->rb_left;
1138 else if (ptr > node->ptr)
1139 n = n->rb_right;
1140 else {
1142 * take an implicit weak reference
1143 * to ensure node stays alive until
1144 * call to binder_put_node()
1146 binder_inc_node_tmpref_ilocked(node);
1147 return node;
1150 return NULL;
1153 static struct binder_node *binder_get_node(struct binder_proc *proc,
1154 binder_uintptr_t ptr)
1156 struct binder_node *node;
1158 binder_inner_proc_lock(proc);
1159 node = binder_get_node_ilocked(proc, ptr);
1160 binder_inner_proc_unlock(proc);
1161 return node;
1164 static struct binder_node *binder_init_node_ilocked(
1165 struct binder_proc *proc,
1166 struct binder_node *new_node,
1167 struct flat_binder_object *fp)
1169 struct rb_node **p = &proc->nodes.rb_node;
1170 struct rb_node *parent = NULL;
1171 struct binder_node *node;
1172 binder_uintptr_t ptr = fp ? fp->binder : 0;
1173 binder_uintptr_t cookie = fp ? fp->cookie : 0;
1174 __u32 flags = fp ? fp->flags : 0;
1176 assert_spin_locked(&proc->inner_lock);
1178 while (*p) {
1180 parent = *p;
1181 node = rb_entry(parent, struct binder_node, rb_node);
1183 if (ptr < node->ptr)
1184 p = &(*p)->rb_left;
1185 else if (ptr > node->ptr)
1186 p = &(*p)->rb_right;
1187 else {
1189 * A matching node is already in
1190 * the rb tree. Abandon the init
1191 * and return it.
1193 binder_inc_node_tmpref_ilocked(node);
1194 return node;
1197 node = new_node;
1198 binder_stats_created(BINDER_STAT_NODE);
1199 node->tmp_refs++;
1200 rb_link_node(&node->rb_node, parent, p);
1201 rb_insert_color(&node->rb_node, &proc->nodes);
1202 node->debug_id = atomic_inc_return(&binder_last_id);
1203 node->proc = proc;
1204 node->ptr = ptr;
1205 node->cookie = cookie;
1206 node->work.type = BINDER_WORK_NODE;
1207 node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
1208 node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
1209 spin_lock_init(&node->lock);
1210 INIT_LIST_HEAD(&node->work.entry);
1211 INIT_LIST_HEAD(&node->async_todo);
1212 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1213 "%d:%d node %d u%016llx c%016llx created\n",
1214 proc->pid, current->pid, node->debug_id,
1215 (u64)node->ptr, (u64)node->cookie);
1217 return node;
1220 static struct binder_node *binder_new_node(struct binder_proc *proc,
1221 struct flat_binder_object *fp)
1223 struct binder_node *node;
1224 struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL);
1226 if (!new_node)
1227 return NULL;
1228 binder_inner_proc_lock(proc);
1229 node = binder_init_node_ilocked(proc, new_node, fp);
1230 binder_inner_proc_unlock(proc);
1231 if (node != new_node)
1233 * The node was already added by another thread
1235 kfree(new_node);
1237 return node;
1240 static void binder_free_node(struct binder_node *node)
1242 kfree(node);
1243 binder_stats_deleted(BINDER_STAT_NODE);
1246 static int binder_inc_node_nilocked(struct binder_node *node, int strong,
1247 int internal,
1248 struct list_head *target_list)
1250 struct binder_proc *proc = node->proc;
1252 assert_spin_locked(&node->lock);
1253 if (proc)
1254 assert_spin_locked(&proc->inner_lock);
1255 if (strong) {
1256 if (internal) {
1257 if (target_list == NULL &&
1258 node->internal_strong_refs == 0 &&
1259 !(node->proc &&
1260 node == node->proc->context->binder_context_mgr_node &&
1261 node->has_strong_ref)) {
1262 pr_err("invalid inc strong node for %d\n",
1263 node->debug_id);
1264 return -EINVAL;
1266 node->internal_strong_refs++;
1267 } else
1268 node->local_strong_refs++;
1269 if (!node->has_strong_ref && target_list) {
1270 binder_dequeue_work_ilocked(&node->work);
1272 * Note: this function is the only place where we queue
1273 * directly to a thread->todo without using the
1274 * corresponding binder_enqueue_thread_work() helper
1275 * functions; in this case it's ok to not set the
1276 * process_todo flag, since we know this node work will
1277 * always be followed by other work that starts queue
1278 * processing: in case of synchronous transactions, a
1279 * BR_REPLY or BR_ERROR; in case of oneway
1280 * transactions, a BR_TRANSACTION_COMPLETE.
1282 binder_enqueue_work_ilocked(&node->work, target_list);
1284 } else {
1285 if (!internal)
1286 node->local_weak_refs++;
1287 if (!node->has_weak_ref && list_empty(&node->work.entry)) {
1288 if (target_list == NULL) {
1289 pr_err("invalid inc weak node for %d\n",
1290 node->debug_id);
1291 return -EINVAL;
1294 * See comment above
1296 binder_enqueue_work_ilocked(&node->work, target_list);
1299 return 0;
1302 static int binder_inc_node(struct binder_node *node, int strong, int internal,
1303 struct list_head *target_list)
1305 int ret;
1307 binder_node_inner_lock(node);
1308 ret = binder_inc_node_nilocked(node, strong, internal, target_list);
1309 binder_node_inner_unlock(node);
1311 return ret;
1314 static bool binder_dec_node_nilocked(struct binder_node *node,
1315 int strong, int internal)
1317 struct binder_proc *proc = node->proc;
1319 assert_spin_locked(&node->lock);
1320 if (proc)
1321 assert_spin_locked(&proc->inner_lock);
1322 if (strong) {
1323 if (internal)
1324 node->internal_strong_refs--;
1325 else
1326 node->local_strong_refs--;
1327 if (node->local_strong_refs || node->internal_strong_refs)
1328 return false;
1329 } else {
1330 if (!internal)
1331 node->local_weak_refs--;
1332 if (node->local_weak_refs || node->tmp_refs ||
1333 !hlist_empty(&node->refs))
1334 return false;
1337 if (proc && (node->has_strong_ref || node->has_weak_ref)) {
1338 if (list_empty(&node->work.entry)) {
1339 binder_enqueue_work_ilocked(&node->work, &proc->todo);
1340 binder_wakeup_proc_ilocked(proc);
1342 } else {
1343 if (hlist_empty(&node->refs) && !node->local_strong_refs &&
1344 !node->local_weak_refs && !node->tmp_refs) {
1345 if (proc) {
1346 binder_dequeue_work_ilocked(&node->work);
1347 rb_erase(&node->rb_node, &proc->nodes);
1348 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1349 "refless node %d deleted\n",
1350 node->debug_id);
1351 } else {
1352 BUG_ON(!list_empty(&node->work.entry));
1353 spin_lock(&binder_dead_nodes_lock);
1355 * tmp_refs could have changed so
1356 * check it again
1358 if (node->tmp_refs) {
1359 spin_unlock(&binder_dead_nodes_lock);
1360 return false;
1362 hlist_del(&node->dead_node);
1363 spin_unlock(&binder_dead_nodes_lock);
1364 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1365 "dead node %d deleted\n",
1366 node->debug_id);
1368 return true;
1371 return false;
1374 static void binder_dec_node(struct binder_node *node, int strong, int internal)
1376 bool free_node;
1378 binder_node_inner_lock(node);
1379 free_node = binder_dec_node_nilocked(node, strong, internal);
1380 binder_node_inner_unlock(node);
1381 if (free_node)
1382 binder_free_node(node);
1385 static void binder_inc_node_tmpref_ilocked(struct binder_node *node)
1388 * No call to binder_inc_node() is needed since we
1389 * don't need to inform userspace of any changes to
1390 * tmp_refs
1392 node->tmp_refs++;
1396 * binder_inc_node_tmpref() - take a temporary reference on node
1397 * @node: node to reference
1399 * Take reference on node to prevent the node from being freed
1400 * while referenced only by a local variable. The inner lock is
1401 * needed to serialize with the node work on the queue (which
1402 * isn't needed after the node is dead). If the node is dead
1403 * (node->proc is NULL), use binder_dead_nodes_lock to protect
1404 * node->tmp_refs against dead-node-only cases where the node
1405 * lock cannot be acquired (eg traversing the dead node list to
1406 * print nodes)
1408 static void binder_inc_node_tmpref(struct binder_node *node)
1410 binder_node_lock(node);
1411 if (node->proc)
1412 binder_inner_proc_lock(node->proc);
1413 else
1414 spin_lock(&binder_dead_nodes_lock);
1415 binder_inc_node_tmpref_ilocked(node);
1416 if (node->proc)
1417 binder_inner_proc_unlock(node->proc);
1418 else
1419 spin_unlock(&binder_dead_nodes_lock);
1420 binder_node_unlock(node);
1424 * binder_dec_node_tmpref() - remove a temporary reference on node
1425 * @node: node to reference
1427 * Release temporary reference on node taken via binder_inc_node_tmpref()
1429 static void binder_dec_node_tmpref(struct binder_node *node)
1431 bool free_node;
1433 binder_node_inner_lock(node);
1434 if (!node->proc)
1435 spin_lock(&binder_dead_nodes_lock);
1436 node->tmp_refs--;
1437 BUG_ON(node->tmp_refs < 0);
1438 if (!node->proc)
1439 spin_unlock(&binder_dead_nodes_lock);
1441 * Call binder_dec_node() to check if all refcounts are 0
1442 * and cleanup is needed. Calling with strong=0 and internal=1
1443 * causes no actual reference to be released in binder_dec_node().
1444 * If that changes, a change is needed here too.
1446 free_node = binder_dec_node_nilocked(node, 0, 1);
1447 binder_node_inner_unlock(node);
1448 if (free_node)
1449 binder_free_node(node);
1452 static void binder_put_node(struct binder_node *node)
1454 binder_dec_node_tmpref(node);
1457 static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc,
1458 u32 desc, bool need_strong_ref)
1460 struct rb_node *n = proc->refs_by_desc.rb_node;
1461 struct binder_ref *ref;
1463 while (n) {
1464 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1466 if (desc < ref->data.desc) {
1467 n = n->rb_left;
1468 } else if (desc > ref->data.desc) {
1469 n = n->rb_right;
1470 } else if (need_strong_ref && !ref->data.strong) {
1471 binder_user_error("tried to use weak ref as strong ref\n");
1472 return NULL;
1473 } else {
1474 return ref;
1477 return NULL;
1481 * binder_get_ref_for_node_olocked() - get the ref associated with given node
1482 * @proc: binder_proc that owns the ref
1483 * @node: binder_node of target
1484 * @new_ref: newly allocated binder_ref to be initialized or %NULL
1486 * Look up the ref for the given node and return it if it exists
1488 * If it doesn't exist and the caller provides a newly allocated
1489 * ref, initialize the fields of the newly allocated ref and insert
1490 * into the given proc rb_trees and node refs list.
1492 * Return: the ref for node. It is possible that another thread
1493 * allocated/initialized the ref first in which case the
1494 * returned ref would be different than the passed-in
1495 * new_ref. new_ref must be kfree'd by the caller in
1496 * this case.
1498 static struct binder_ref *binder_get_ref_for_node_olocked(
1499 struct binder_proc *proc,
1500 struct binder_node *node,
1501 struct binder_ref *new_ref)
1503 struct binder_context *context = proc->context;
1504 struct rb_node **p = &proc->refs_by_node.rb_node;
1505 struct rb_node *parent = NULL;
1506 struct binder_ref *ref;
1507 struct rb_node *n;
1509 while (*p) {
1510 parent = *p;
1511 ref = rb_entry(parent, struct binder_ref, rb_node_node);
1513 if (node < ref->node)
1514 p = &(*p)->rb_left;
1515 else if (node > ref->node)
1516 p = &(*p)->rb_right;
1517 else
1518 return ref;
1520 if (!new_ref)
1521 return NULL;
1523 binder_stats_created(BINDER_STAT_REF);
1524 new_ref->data.debug_id = atomic_inc_return(&binder_last_id);
1525 new_ref->proc = proc;
1526 new_ref->node = node;
1527 rb_link_node(&new_ref->rb_node_node, parent, p);
1528 rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1530 new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1;
1531 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
1532 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1533 if (ref->data.desc > new_ref->data.desc)
1534 break;
1535 new_ref->data.desc = ref->data.desc + 1;
1538 p = &proc->refs_by_desc.rb_node;
1539 while (*p) {
1540 parent = *p;
1541 ref = rb_entry(parent, struct binder_ref, rb_node_desc);
1543 if (new_ref->data.desc < ref->data.desc)
1544 p = &(*p)->rb_left;
1545 else if (new_ref->data.desc > ref->data.desc)
1546 p = &(*p)->rb_right;
1547 else
1548 BUG();
1550 rb_link_node(&new_ref->rb_node_desc, parent, p);
1551 rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
1553 binder_node_lock(node);
1554 hlist_add_head(&new_ref->node_entry, &node->refs);
1556 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1557 "%d new ref %d desc %d for node %d\n",
1558 proc->pid, new_ref->data.debug_id, new_ref->data.desc,
1559 node->debug_id);
1560 binder_node_unlock(node);
1561 return new_ref;
1564 static void binder_cleanup_ref_olocked(struct binder_ref *ref)
1566 bool delete_node = false;
1568 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1569 "%d delete ref %d desc %d for node %d\n",
1570 ref->proc->pid, ref->data.debug_id, ref->data.desc,
1571 ref->node->debug_id);
1573 rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1574 rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
1576 binder_node_inner_lock(ref->node);
1577 if (ref->data.strong)
1578 binder_dec_node_nilocked(ref->node, 1, 1);
1580 hlist_del(&ref->node_entry);
1581 delete_node = binder_dec_node_nilocked(ref->node, 0, 1);
1582 binder_node_inner_unlock(ref->node);
1584 * Clear ref->node unless we want the caller to free the node
1586 if (!delete_node) {
1588 * The caller uses ref->node to determine
1589 * whether the node needs to be freed. Clear
1590 * it since the node is still alive.
1592 ref->node = NULL;
1595 if (ref->death) {
1596 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1597 "%d delete ref %d desc %d has death notification\n",
1598 ref->proc->pid, ref->data.debug_id,
1599 ref->data.desc);
1600 binder_dequeue_work(ref->proc, &ref->death->work);
1601 binder_stats_deleted(BINDER_STAT_DEATH);
1603 binder_stats_deleted(BINDER_STAT_REF);
1607 * binder_inc_ref_olocked() - increment the ref for given handle
1608 * @ref: ref to be incremented
1609 * @strong: if true, strong increment, else weak
1610 * @target_list: list to queue node work on
1612 * Increment the ref. @ref->proc->outer_lock must be held on entry
1614 * Return: 0, if successful, else errno
1616 static int binder_inc_ref_olocked(struct binder_ref *ref, int strong,
1617 struct list_head *target_list)
1619 int ret;
1621 if (strong) {
1622 if (ref->data.strong == 0) {
1623 ret = binder_inc_node(ref->node, 1, 1, target_list);
1624 if (ret)
1625 return ret;
1627 ref->data.strong++;
1628 } else {
1629 if (ref->data.weak == 0) {
1630 ret = binder_inc_node(ref->node, 0, 1, target_list);
1631 if (ret)
1632 return ret;
1634 ref->data.weak++;
1636 return 0;
1640 * binder_dec_ref() - dec the ref for given handle
1641 * @ref: ref to be decremented
1642 * @strong: if true, strong decrement, else weak
1644 * Decrement the ref.
1646 * Return: true if ref is cleaned up and ready to be freed
1648 static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong)
1650 if (strong) {
1651 if (ref->data.strong == 0) {
1652 binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
1653 ref->proc->pid, ref->data.debug_id,
1654 ref->data.desc, ref->data.strong,
1655 ref->data.weak);
1656 return false;
1658 ref->data.strong--;
1659 if (ref->data.strong == 0)
1660 binder_dec_node(ref->node, strong, 1);
1661 } else {
1662 if (ref->data.weak == 0) {
1663 binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
1664 ref->proc->pid, ref->data.debug_id,
1665 ref->data.desc, ref->data.strong,
1666 ref->data.weak);
1667 return false;
1669 ref->data.weak--;
1671 if (ref->data.strong == 0 && ref->data.weak == 0) {
1672 binder_cleanup_ref_olocked(ref);
1673 return true;
1675 return false;
1679 * binder_get_node_from_ref() - get the node from the given proc/desc
1680 * @proc: proc containing the ref
1681 * @desc: the handle associated with the ref
1682 * @need_strong_ref: if true, only return node if ref is strong
1683 * @rdata: the id/refcount data for the ref
1685 * Given a proc and ref handle, return the associated binder_node
1687 * Return: a binder_node or NULL if not found or not strong when strong required
1689 static struct binder_node *binder_get_node_from_ref(
1690 struct binder_proc *proc,
1691 u32 desc, bool need_strong_ref,
1692 struct binder_ref_data *rdata)
1694 struct binder_node *node;
1695 struct binder_ref *ref;
1697 binder_proc_lock(proc);
1698 ref = binder_get_ref_olocked(proc, desc, need_strong_ref);
1699 if (!ref)
1700 goto err_no_ref;
1701 node = ref->node;
1703 * Take an implicit reference on the node to ensure
1704 * it stays alive until the call to binder_put_node()
1706 binder_inc_node_tmpref(node);
1707 if (rdata)
1708 *rdata = ref->data;
1709 binder_proc_unlock(proc);
1711 return node;
1713 err_no_ref:
1714 binder_proc_unlock(proc);
1715 return NULL;
1719 * binder_free_ref() - free the binder_ref
1720 * @ref: ref to free
1722 * Free the binder_ref. Free the binder_node indicated by ref->node
1723 * (if non-NULL) and the binder_ref_death indicated by ref->death.
1725 static void binder_free_ref(struct binder_ref *ref)
1727 if (ref->node)
1728 binder_free_node(ref->node);
1729 kfree(ref->death);
1730 kfree(ref);
1734 * binder_update_ref_for_handle() - inc/dec the ref for given handle
1735 * @proc: proc containing the ref
1736 * @desc: the handle associated with the ref
1737 * @increment: true=inc reference, false=dec reference
1738 * @strong: true=strong reference, false=weak reference
1739 * @rdata: the id/refcount data for the ref
1741 * Given a proc and ref handle, increment or decrement the ref
1742 * according to "increment" arg.
1744 * Return: 0 if successful, else errno
1746 static int binder_update_ref_for_handle(struct binder_proc *proc,
1747 uint32_t desc, bool increment, bool strong,
1748 struct binder_ref_data *rdata)
1750 int ret = 0;
1751 struct binder_ref *ref;
1752 bool delete_ref = false;
1754 binder_proc_lock(proc);
1755 ref = binder_get_ref_olocked(proc, desc, strong);
1756 if (!ref) {
1757 ret = -EINVAL;
1758 goto err_no_ref;
1760 if (increment)
1761 ret = binder_inc_ref_olocked(ref, strong, NULL);
1762 else
1763 delete_ref = binder_dec_ref_olocked(ref, strong);
1765 if (rdata)
1766 *rdata = ref->data;
1767 binder_proc_unlock(proc);
1769 if (delete_ref)
1770 binder_free_ref(ref);
1771 return ret;
1773 err_no_ref:
1774 binder_proc_unlock(proc);
1775 return ret;
1779 * binder_dec_ref_for_handle() - dec the ref for given handle
1780 * @proc: proc containing the ref
1781 * @desc: the handle associated with the ref
1782 * @strong: true=strong reference, false=weak reference
1783 * @rdata: the id/refcount data for the ref
1785 * Just calls binder_update_ref_for_handle() to decrement the ref.
1787 * Return: 0 if successful, else errno
1789 static int binder_dec_ref_for_handle(struct binder_proc *proc,
1790 uint32_t desc, bool strong, struct binder_ref_data *rdata)
1792 return binder_update_ref_for_handle(proc, desc, false, strong, rdata);
1797 * binder_inc_ref_for_node() - increment the ref for given proc/node
1798 * @proc: proc containing the ref
1799 * @node: target node
1800 * @strong: true=strong reference, false=weak reference
1801 * @target_list: worklist to use if node is incremented
1802 * @rdata: the id/refcount data for the ref
1804 * Given a proc and node, increment the ref. Create the ref if it
1805 * doesn't already exist
1807 * Return: 0 if successful, else errno
1809 static int binder_inc_ref_for_node(struct binder_proc *proc,
1810 struct binder_node *node,
1811 bool strong,
1812 struct list_head *target_list,
1813 struct binder_ref_data *rdata)
1815 struct binder_ref *ref;
1816 struct binder_ref *new_ref = NULL;
1817 int ret = 0;
1819 binder_proc_lock(proc);
1820 ref = binder_get_ref_for_node_olocked(proc, node, NULL);
1821 if (!ref) {
1822 binder_proc_unlock(proc);
1823 new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1824 if (!new_ref)
1825 return -ENOMEM;
1826 binder_proc_lock(proc);
1827 ref = binder_get_ref_for_node_olocked(proc, node, new_ref);
1829 ret = binder_inc_ref_olocked(ref, strong, target_list);
1830 *rdata = ref->data;
1831 binder_proc_unlock(proc);
1832 if (new_ref && ref != new_ref)
1834 * Another thread created the ref first so
1835 * free the one we allocated
1837 kfree(new_ref);
1838 return ret;
1841 static void binder_pop_transaction_ilocked(struct binder_thread *target_thread,
1842 struct binder_transaction *t)
1844 BUG_ON(!target_thread);
1845 assert_spin_locked(&target_thread->proc->inner_lock);
1846 BUG_ON(target_thread->transaction_stack != t);
1847 BUG_ON(target_thread->transaction_stack->from != target_thread);
1848 target_thread->transaction_stack =
1849 target_thread->transaction_stack->from_parent;
1850 t->from = NULL;
1854 * binder_thread_dec_tmpref() - decrement thread->tmp_ref
1855 * @thread: thread to decrement
1857 * A thread needs to be kept alive while being used to create or
1858 * handle a transaction. binder_get_txn_from() is used to safely
1859 * extract t->from from a binder_transaction and keep the thread
1860 * indicated by t->from from being freed. When done with that
1861 * binder_thread, this function is called to decrement the
1862 * tmp_ref and free if appropriate (thread has been released
1863 * and no transaction being processed by the driver)
1865 static void binder_thread_dec_tmpref(struct binder_thread *thread)
1868 * atomic is used to protect the counter value while
1869 * it cannot reach zero or thread->is_dead is false
1871 binder_inner_proc_lock(thread->proc);
1872 atomic_dec(&thread->tmp_ref);
1873 if (thread->is_dead && !atomic_read(&thread->tmp_ref)) {
1874 binder_inner_proc_unlock(thread->proc);
1875 binder_free_thread(thread);
1876 return;
1878 binder_inner_proc_unlock(thread->proc);
1882 * binder_proc_dec_tmpref() - decrement proc->tmp_ref
1883 * @proc: proc to decrement
1885 * A binder_proc needs to be kept alive while being used to create or
1886 * handle a transaction. proc->tmp_ref is incremented when
1887 * creating a new transaction or the binder_proc is currently in-use
1888 * by threads that are being released. When done with the binder_proc,
1889 * this function is called to decrement the counter and free the
1890 * proc if appropriate (proc has been released, all threads have
1891 * been released and not currenly in-use to process a transaction).
1893 static void binder_proc_dec_tmpref(struct binder_proc *proc)
1895 binder_inner_proc_lock(proc);
1896 proc->tmp_ref--;
1897 if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
1898 !proc->tmp_ref) {
1899 binder_inner_proc_unlock(proc);
1900 binder_free_proc(proc);
1901 return;
1903 binder_inner_proc_unlock(proc);
1907 * binder_get_txn_from() - safely extract the "from" thread in transaction
1908 * @t: binder transaction for t->from
1910 * Atomically return the "from" thread and increment the tmp_ref
1911 * count for the thread to ensure it stays alive until
1912 * binder_thread_dec_tmpref() is called.
1914 * Return: the value of t->from
1916 static struct binder_thread *binder_get_txn_from(
1917 struct binder_transaction *t)
1919 struct binder_thread *from;
1921 spin_lock(&t->lock);
1922 from = t->from;
1923 if (from)
1924 atomic_inc(&from->tmp_ref);
1925 spin_unlock(&t->lock);
1926 return from;
1930 * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock
1931 * @t: binder transaction for t->from
1933 * Same as binder_get_txn_from() except it also acquires the proc->inner_lock
1934 * to guarantee that the thread cannot be released while operating on it.
1935 * The caller must call binder_inner_proc_unlock() to release the inner lock
1936 * as well as call binder_dec_thread_txn() to release the reference.
1938 * Return: the value of t->from
1940 static struct binder_thread *binder_get_txn_from_and_acq_inner(
1941 struct binder_transaction *t)
1943 struct binder_thread *from;
1945 from = binder_get_txn_from(t);
1946 if (!from)
1947 return NULL;
1948 binder_inner_proc_lock(from->proc);
1949 if (t->from) {
1950 BUG_ON(from != t->from);
1951 return from;
1953 binder_inner_proc_unlock(from->proc);
1954 binder_thread_dec_tmpref(from);
1955 return NULL;
1958 static void binder_free_transaction(struct binder_transaction *t)
1960 if (t->buffer)
1961 t->buffer->transaction = NULL;
1962 kfree(t);
1963 binder_stats_deleted(BINDER_STAT_TRANSACTION);
1966 static void binder_send_failed_reply(struct binder_transaction *t,
1967 uint32_t error_code)
1969 struct binder_thread *target_thread;
1970 struct binder_transaction *next;
1972 BUG_ON(t->flags & TF_ONE_WAY);
1973 while (1) {
1974 target_thread = binder_get_txn_from_and_acq_inner(t);
1975 if (target_thread) {
1976 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1977 "send failed reply for transaction %d to %d:%d\n",
1978 t->debug_id,
1979 target_thread->proc->pid,
1980 target_thread->pid);
1982 binder_pop_transaction_ilocked(target_thread, t);
1983 if (target_thread->reply_error.cmd == BR_OK) {
1984 target_thread->reply_error.cmd = error_code;
1985 binder_enqueue_thread_work_ilocked(
1986 target_thread,
1987 &target_thread->reply_error.work);
1988 wake_up_interruptible(&target_thread->wait);
1989 } else {
1991 * Cannot get here for normal operation, but
1992 * we can if multiple synchronous transactions
1993 * are sent without blocking for responses.
1994 * Just ignore the 2nd error in this case.
1996 pr_warn("Unexpected reply error: %u\n",
1997 target_thread->reply_error.cmd);
1999 binder_inner_proc_unlock(target_thread->proc);
2000 binder_thread_dec_tmpref(target_thread);
2001 binder_free_transaction(t);
2002 return;
2004 next = t->from_parent;
2006 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
2007 "send failed reply for transaction %d, target dead\n",
2008 t->debug_id);
2010 binder_free_transaction(t);
2011 if (next == NULL) {
2012 binder_debug(BINDER_DEBUG_DEAD_BINDER,
2013 "reply failed, no target thread at root\n");
2014 return;
2016 t = next;
2017 binder_debug(BINDER_DEBUG_DEAD_BINDER,
2018 "reply failed, no target thread -- retry %d\n",
2019 t->debug_id);
2024 * binder_cleanup_transaction() - cleans up undelivered transaction
2025 * @t: transaction that needs to be cleaned up
2026 * @reason: reason the transaction wasn't delivered
2027 * @error_code: error to return to caller (if synchronous call)
2029 static void binder_cleanup_transaction(struct binder_transaction *t,
2030 const char *reason,
2031 uint32_t error_code)
2033 if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) {
2034 binder_send_failed_reply(t, error_code);
2035 } else {
2036 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
2037 "undelivered transaction %d, %s\n",
2038 t->debug_id, reason);
2039 binder_free_transaction(t);
2044 * binder_validate_object() - checks for a valid metadata object in a buffer.
2045 * @buffer: binder_buffer that we're parsing.
2046 * @offset: offset in the buffer at which to validate an object.
2048 * Return: If there's a valid metadata object at @offset in @buffer, the
2049 * size of that object. Otherwise, it returns zero.
2051 static size_t binder_validate_object(struct binder_buffer *buffer, u64 offset)
2053 /* Check if we can read a header first */
2054 struct binder_object_header *hdr;
2055 size_t object_size = 0;
2057 if (buffer->data_size < sizeof(*hdr) ||
2058 offset > buffer->data_size - sizeof(*hdr) ||
2059 !IS_ALIGNED(offset, sizeof(u32)))
2060 return 0;
2062 /* Ok, now see if we can read a complete object. */
2063 hdr = (struct binder_object_header *)(buffer->data + offset);
2064 switch (hdr->type) {
2065 case BINDER_TYPE_BINDER:
2066 case BINDER_TYPE_WEAK_BINDER:
2067 case BINDER_TYPE_HANDLE:
2068 case BINDER_TYPE_WEAK_HANDLE:
2069 object_size = sizeof(struct flat_binder_object);
2070 break;
2071 case BINDER_TYPE_FD:
2072 object_size = sizeof(struct binder_fd_object);
2073 break;
2074 case BINDER_TYPE_PTR:
2075 object_size = sizeof(struct binder_buffer_object);
2076 break;
2077 case BINDER_TYPE_FDA:
2078 object_size = sizeof(struct binder_fd_array_object);
2079 break;
2080 default:
2081 return 0;
2083 if (offset <= buffer->data_size - object_size &&
2084 buffer->data_size >= object_size)
2085 return object_size;
2086 else
2087 return 0;
2091 * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
2092 * @b: binder_buffer containing the object
2093 * @index: index in offset array at which the binder_buffer_object is
2094 * located
2095 * @start: points to the start of the offset array
2096 * @num_valid: the number of valid offsets in the offset array
2098 * Return: If @index is within the valid range of the offset array
2099 * described by @start and @num_valid, and if there's a valid
2100 * binder_buffer_object at the offset found in index @index
2101 * of the offset array, that object is returned. Otherwise,
2102 * %NULL is returned.
2103 * Note that the offset found in index @index itself is not
2104 * verified; this function assumes that @num_valid elements
2105 * from @start were previously verified to have valid offsets.
2107 static struct binder_buffer_object *binder_validate_ptr(struct binder_buffer *b,
2108 binder_size_t index,
2109 binder_size_t *start,
2110 binder_size_t num_valid)
2112 struct binder_buffer_object *buffer_obj;
2113 binder_size_t *offp;
2115 if (index >= num_valid)
2116 return NULL;
2118 offp = start + index;
2119 buffer_obj = (struct binder_buffer_object *)(b->data + *offp);
2120 if (buffer_obj->hdr.type != BINDER_TYPE_PTR)
2121 return NULL;
2123 return buffer_obj;
2127 * binder_validate_fixup() - validates pointer/fd fixups happen in order.
2128 * @b: transaction buffer
2129 * @objects_start start of objects buffer
2130 * @buffer: binder_buffer_object in which to fix up
2131 * @offset: start offset in @buffer to fix up
2132 * @last_obj: last binder_buffer_object that we fixed up in
2133 * @last_min_offset: minimum fixup offset in @last_obj
2135 * Return: %true if a fixup in buffer @buffer at offset @offset is
2136 * allowed.
2138 * For safety reasons, we only allow fixups inside a buffer to happen
2139 * at increasing offsets; additionally, we only allow fixup on the last
2140 * buffer object that was verified, or one of its parents.
2142 * Example of what is allowed:
2145 * B (parent = A, offset = 0)
2146 * C (parent = A, offset = 16)
2147 * D (parent = C, offset = 0)
2148 * E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
2150 * Examples of what is not allowed:
2152 * Decreasing offsets within the same parent:
2154 * C (parent = A, offset = 16)
2155 * B (parent = A, offset = 0) // decreasing offset within A
2157 * Referring to a parent that wasn't the last object or any of its parents:
2159 * B (parent = A, offset = 0)
2160 * C (parent = A, offset = 0)
2161 * C (parent = A, offset = 16)
2162 * D (parent = B, offset = 0) // B is not A or any of A's parents
2164 static bool binder_validate_fixup(struct binder_buffer *b,
2165 binder_size_t *objects_start,
2166 struct binder_buffer_object *buffer,
2167 binder_size_t fixup_offset,
2168 struct binder_buffer_object *last_obj,
2169 binder_size_t last_min_offset)
2171 if (!last_obj) {
2172 /* Nothing to fix up in */
2173 return false;
2176 while (last_obj != buffer) {
2178 * Safe to retrieve the parent of last_obj, since it
2179 * was already previously verified by the driver.
2181 if ((last_obj->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
2182 return false;
2183 last_min_offset = last_obj->parent_offset + sizeof(uintptr_t);
2184 last_obj = (struct binder_buffer_object *)
2185 (b->data + *(objects_start + last_obj->parent));
2187 return (fixup_offset >= last_min_offset);
2190 static void binder_transaction_buffer_release(struct binder_proc *proc,
2191 struct binder_buffer *buffer,
2192 binder_size_t *failed_at)
2194 binder_size_t *offp, *off_start, *off_end;
2195 int debug_id = buffer->debug_id;
2197 binder_debug(BINDER_DEBUG_TRANSACTION,
2198 "%d buffer release %d, size %zd-%zd, failed at %pK\n",
2199 proc->pid, buffer->debug_id,
2200 buffer->data_size, buffer->offsets_size, failed_at);
2202 if (buffer->target_node)
2203 binder_dec_node(buffer->target_node, 1, 0);
2205 off_start = (binder_size_t *)(buffer->data +
2206 ALIGN(buffer->data_size, sizeof(void *)));
2207 if (failed_at)
2208 off_end = failed_at;
2209 else
2210 off_end = (void *)off_start + buffer->offsets_size;
2211 for (offp = off_start; offp < off_end; offp++) {
2212 struct binder_object_header *hdr;
2213 size_t object_size = binder_validate_object(buffer, *offp);
2215 if (object_size == 0) {
2216 pr_err("transaction release %d bad object at offset %lld, size %zd\n",
2217 debug_id, (u64)*offp, buffer->data_size);
2218 continue;
2220 hdr = (struct binder_object_header *)(buffer->data + *offp);
2221 switch (hdr->type) {
2222 case BINDER_TYPE_BINDER:
2223 case BINDER_TYPE_WEAK_BINDER: {
2224 struct flat_binder_object *fp;
2225 struct binder_node *node;
2227 fp = to_flat_binder_object(hdr);
2228 node = binder_get_node(proc, fp->binder);
2229 if (node == NULL) {
2230 pr_err("transaction release %d bad node %016llx\n",
2231 debug_id, (u64)fp->binder);
2232 break;
2234 binder_debug(BINDER_DEBUG_TRANSACTION,
2235 " node %d u%016llx\n",
2236 node->debug_id, (u64)node->ptr);
2237 binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
2239 binder_put_node(node);
2240 } break;
2241 case BINDER_TYPE_HANDLE:
2242 case BINDER_TYPE_WEAK_HANDLE: {
2243 struct flat_binder_object *fp;
2244 struct binder_ref_data rdata;
2245 int ret;
2247 fp = to_flat_binder_object(hdr);
2248 ret = binder_dec_ref_for_handle(proc, fp->handle,
2249 hdr->type == BINDER_TYPE_HANDLE, &rdata);
2251 if (ret) {
2252 pr_err("transaction release %d bad handle %d, ret = %d\n",
2253 debug_id, fp->handle, ret);
2254 break;
2256 binder_debug(BINDER_DEBUG_TRANSACTION,
2257 " ref %d desc %d\n",
2258 rdata.debug_id, rdata.desc);
2259 } break;
2261 case BINDER_TYPE_FD: {
2262 struct binder_fd_object *fp = to_binder_fd_object(hdr);
2264 binder_debug(BINDER_DEBUG_TRANSACTION,
2265 " fd %d\n", fp->fd);
2266 if (failed_at)
2267 task_close_fd(proc, fp->fd);
2268 } break;
2269 case BINDER_TYPE_PTR:
2271 * Nothing to do here, this will get cleaned up when the
2272 * transaction buffer gets freed
2274 break;
2275 case BINDER_TYPE_FDA: {
2276 struct binder_fd_array_object *fda;
2277 struct binder_buffer_object *parent;
2278 uintptr_t parent_buffer;
2279 u32 *fd_array;
2280 size_t fd_index;
2281 binder_size_t fd_buf_size;
2283 fda = to_binder_fd_array_object(hdr);
2284 parent = binder_validate_ptr(buffer, fda->parent,
2285 off_start,
2286 offp - off_start);
2287 if (!parent) {
2288 pr_err("transaction release %d bad parent offset\n",
2289 debug_id);
2290 continue;
2293 * Since the parent was already fixed up, convert it
2294 * back to kernel address space to access it
2296 parent_buffer = parent->buffer -
2297 binder_alloc_get_user_buffer_offset(
2298 &proc->alloc);
2300 fd_buf_size = sizeof(u32) * fda->num_fds;
2301 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2302 pr_err("transaction release %d invalid number of fds (%lld)\n",
2303 debug_id, (u64)fda->num_fds);
2304 continue;
2306 if (fd_buf_size > parent->length ||
2307 fda->parent_offset > parent->length - fd_buf_size) {
2308 /* No space for all file descriptors here. */
2309 pr_err("transaction release %d not enough space for %lld fds in buffer\n",
2310 debug_id, (u64)fda->num_fds);
2311 continue;
2313 fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset);
2314 for (fd_index = 0; fd_index < fda->num_fds; fd_index++)
2315 task_close_fd(proc, fd_array[fd_index]);
2316 } break;
2317 default:
2318 pr_err("transaction release %d bad object type %x\n",
2319 debug_id, hdr->type);
2320 break;
2325 static int binder_translate_binder(struct flat_binder_object *fp,
2326 struct binder_transaction *t,
2327 struct binder_thread *thread)
2329 struct binder_node *node;
2330 struct binder_proc *proc = thread->proc;
2331 struct binder_proc *target_proc = t->to_proc;
2332 struct binder_ref_data rdata;
2333 int ret = 0;
2335 node = binder_get_node(proc, fp->binder);
2336 if (!node) {
2337 node = binder_new_node(proc, fp);
2338 if (!node)
2339 return -ENOMEM;
2341 if (fp->cookie != node->cookie) {
2342 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
2343 proc->pid, thread->pid, (u64)fp->binder,
2344 node->debug_id, (u64)fp->cookie,
2345 (u64)node->cookie);
2346 ret = -EINVAL;
2347 goto done;
2349 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
2350 ret = -EPERM;
2351 goto done;
2354 ret = binder_inc_ref_for_node(target_proc, node,
2355 fp->hdr.type == BINDER_TYPE_BINDER,
2356 &thread->todo, &rdata);
2357 if (ret)
2358 goto done;
2360 if (fp->hdr.type == BINDER_TYPE_BINDER)
2361 fp->hdr.type = BINDER_TYPE_HANDLE;
2362 else
2363 fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
2364 fp->binder = 0;
2365 fp->handle = rdata.desc;
2366 fp->cookie = 0;
2368 trace_binder_transaction_node_to_ref(t, node, &rdata);
2369 binder_debug(BINDER_DEBUG_TRANSACTION,
2370 " node %d u%016llx -> ref %d desc %d\n",
2371 node->debug_id, (u64)node->ptr,
2372 rdata.debug_id, rdata.desc);
2373 done:
2374 binder_put_node(node);
2375 return ret;
2378 static int binder_translate_handle(struct flat_binder_object *fp,
2379 struct binder_transaction *t,
2380 struct binder_thread *thread)
2382 struct binder_proc *proc = thread->proc;
2383 struct binder_proc *target_proc = t->to_proc;
2384 struct binder_node *node;
2385 struct binder_ref_data src_rdata;
2386 int ret = 0;
2388 node = binder_get_node_from_ref(proc, fp->handle,
2389 fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata);
2390 if (!node) {
2391 binder_user_error("%d:%d got transaction with invalid handle, %d\n",
2392 proc->pid, thread->pid, fp->handle);
2393 return -EINVAL;
2395 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
2396 ret = -EPERM;
2397 goto done;
2400 binder_node_lock(node);
2401 if (node->proc == target_proc) {
2402 if (fp->hdr.type == BINDER_TYPE_HANDLE)
2403 fp->hdr.type = BINDER_TYPE_BINDER;
2404 else
2405 fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
2406 fp->binder = node->ptr;
2407 fp->cookie = node->cookie;
2408 if (node->proc)
2409 binder_inner_proc_lock(node->proc);
2410 binder_inc_node_nilocked(node,
2411 fp->hdr.type == BINDER_TYPE_BINDER,
2412 0, NULL);
2413 if (node->proc)
2414 binder_inner_proc_unlock(node->proc);
2415 trace_binder_transaction_ref_to_node(t, node, &src_rdata);
2416 binder_debug(BINDER_DEBUG_TRANSACTION,
2417 " ref %d desc %d -> node %d u%016llx\n",
2418 src_rdata.debug_id, src_rdata.desc, node->debug_id,
2419 (u64)node->ptr);
2420 binder_node_unlock(node);
2421 } else {
2422 struct binder_ref_data dest_rdata;
2424 binder_node_unlock(node);
2425 ret = binder_inc_ref_for_node(target_proc, node,
2426 fp->hdr.type == BINDER_TYPE_HANDLE,
2427 NULL, &dest_rdata);
2428 if (ret)
2429 goto done;
2431 fp->binder = 0;
2432 fp->handle = dest_rdata.desc;
2433 fp->cookie = 0;
2434 trace_binder_transaction_ref_to_ref(t, node, &src_rdata,
2435 &dest_rdata);
2436 binder_debug(BINDER_DEBUG_TRANSACTION,
2437 " ref %d desc %d -> ref %d desc %d (node %d)\n",
2438 src_rdata.debug_id, src_rdata.desc,
2439 dest_rdata.debug_id, dest_rdata.desc,
2440 node->debug_id);
2442 done:
2443 binder_put_node(node);
2444 return ret;
2447 static int binder_translate_fd(int fd,
2448 struct binder_transaction *t,
2449 struct binder_thread *thread,
2450 struct binder_transaction *in_reply_to)
2452 struct binder_proc *proc = thread->proc;
2453 struct binder_proc *target_proc = t->to_proc;
2454 int target_fd;
2455 struct file *file;
2456 int ret;
2457 bool target_allows_fd;
2459 if (in_reply_to)
2460 target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
2461 else
2462 target_allows_fd = t->buffer->target_node->accept_fds;
2463 if (!target_allows_fd) {
2464 binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
2465 proc->pid, thread->pid,
2466 in_reply_to ? "reply" : "transaction",
2467 fd);
2468 ret = -EPERM;
2469 goto err_fd_not_accepted;
2472 file = fget(fd);
2473 if (!file) {
2474 binder_user_error("%d:%d got transaction with invalid fd, %d\n",
2475 proc->pid, thread->pid, fd);
2476 ret = -EBADF;
2477 goto err_fget;
2479 ret = security_binder_transfer_file(proc->tsk, target_proc->tsk, file);
2480 if (ret < 0) {
2481 ret = -EPERM;
2482 goto err_security;
2485 target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC);
2486 if (target_fd < 0) {
2487 ret = -ENOMEM;
2488 goto err_get_unused_fd;
2490 task_fd_install(target_proc, target_fd, file);
2491 trace_binder_transaction_fd(t, fd, target_fd);
2492 binder_debug(BINDER_DEBUG_TRANSACTION, " fd %d -> %d\n",
2493 fd, target_fd);
2495 return target_fd;
2497 err_get_unused_fd:
2498 err_security:
2499 fput(file);
2500 err_fget:
2501 err_fd_not_accepted:
2502 return ret;
2505 static int binder_translate_fd_array(struct binder_fd_array_object *fda,
2506 struct binder_buffer_object *parent,
2507 struct binder_transaction *t,
2508 struct binder_thread *thread,
2509 struct binder_transaction *in_reply_to)
2511 binder_size_t fdi, fd_buf_size, num_installed_fds;
2512 int target_fd;
2513 uintptr_t parent_buffer;
2514 u32 *fd_array;
2515 struct binder_proc *proc = thread->proc;
2516 struct binder_proc *target_proc = t->to_proc;
2518 fd_buf_size = sizeof(u32) * fda->num_fds;
2519 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2520 binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
2521 proc->pid, thread->pid, (u64)fda->num_fds);
2522 return -EINVAL;
2524 if (fd_buf_size > parent->length ||
2525 fda->parent_offset > parent->length - fd_buf_size) {
2526 /* No space for all file descriptors here. */
2527 binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
2528 proc->pid, thread->pid, (u64)fda->num_fds);
2529 return -EINVAL;
2532 * Since the parent was already fixed up, convert it
2533 * back to the kernel address space to access it
2535 parent_buffer = parent->buffer -
2536 binder_alloc_get_user_buffer_offset(&target_proc->alloc);
2537 fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset);
2538 if (!IS_ALIGNED((unsigned long)fd_array, sizeof(u32))) {
2539 binder_user_error("%d:%d parent offset not aligned correctly.\n",
2540 proc->pid, thread->pid);
2541 return -EINVAL;
2543 for (fdi = 0; fdi < fda->num_fds; fdi++) {
2544 target_fd = binder_translate_fd(fd_array[fdi], t, thread,
2545 in_reply_to);
2546 if (target_fd < 0)
2547 goto err_translate_fd_failed;
2548 fd_array[fdi] = target_fd;
2550 return 0;
2552 err_translate_fd_failed:
2554 * Failed to allocate fd or security error, free fds
2555 * installed so far.
2557 num_installed_fds = fdi;
2558 for (fdi = 0; fdi < num_installed_fds; fdi++)
2559 task_close_fd(target_proc, fd_array[fdi]);
2560 return target_fd;
2563 static int binder_fixup_parent(struct binder_transaction *t,
2564 struct binder_thread *thread,
2565 struct binder_buffer_object *bp,
2566 binder_size_t *off_start,
2567 binder_size_t num_valid,
2568 struct binder_buffer_object *last_fixup_obj,
2569 binder_size_t last_fixup_min_off)
2571 struct binder_buffer_object *parent;
2572 u8 *parent_buffer;
2573 struct binder_buffer *b = t->buffer;
2574 struct binder_proc *proc = thread->proc;
2575 struct binder_proc *target_proc = t->to_proc;
2577 if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
2578 return 0;
2580 parent = binder_validate_ptr(b, bp->parent, off_start, num_valid);
2581 if (!parent) {
2582 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2583 proc->pid, thread->pid);
2584 return -EINVAL;
2587 if (!binder_validate_fixup(b, off_start,
2588 parent, bp->parent_offset,
2589 last_fixup_obj,
2590 last_fixup_min_off)) {
2591 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2592 proc->pid, thread->pid);
2593 return -EINVAL;
2596 if (parent->length < sizeof(binder_uintptr_t) ||
2597 bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
2598 /* No space for a pointer here! */
2599 binder_user_error("%d:%d got transaction with invalid parent offset\n",
2600 proc->pid, thread->pid);
2601 return -EINVAL;
2603 parent_buffer = (u8 *)((uintptr_t)parent->buffer -
2604 binder_alloc_get_user_buffer_offset(
2605 &target_proc->alloc));
2606 *(binder_uintptr_t *)(parent_buffer + bp->parent_offset) = bp->buffer;
2608 return 0;
2612 * binder_proc_transaction() - sends a transaction to a process and wakes it up
2613 * @t: transaction to send
2614 * @proc: process to send the transaction to
2615 * @thread: thread in @proc to send the transaction to (may be NULL)
2617 * This function queues a transaction to the specified process. It will try
2618 * to find a thread in the target process to handle the transaction and
2619 * wake it up. If no thread is found, the work is queued to the proc
2620 * waitqueue.
2622 * If the @thread parameter is not NULL, the transaction is always queued
2623 * to the waitlist of that specific thread.
2625 * Return: true if the transactions was successfully queued
2626 * false if the target process or thread is dead
2628 static bool binder_proc_transaction(struct binder_transaction *t,
2629 struct binder_proc *proc,
2630 struct binder_thread *thread)
2632 struct binder_node *node = t->buffer->target_node;
2633 bool oneway = !!(t->flags & TF_ONE_WAY);
2634 bool pending_async = false;
2636 BUG_ON(!node);
2637 binder_node_lock(node);
2638 if (oneway) {
2639 BUG_ON(thread);
2640 if (node->has_async_transaction) {
2641 pending_async = true;
2642 } else {
2643 node->has_async_transaction = true;
2647 binder_inner_proc_lock(proc);
2649 if (proc->is_dead || (thread && thread->is_dead)) {
2650 binder_inner_proc_unlock(proc);
2651 binder_node_unlock(node);
2652 return false;
2655 if (!thread && !pending_async)
2656 thread = binder_select_thread_ilocked(proc);
2658 if (thread)
2659 binder_enqueue_thread_work_ilocked(thread, &t->work);
2660 else if (!pending_async)
2661 binder_enqueue_work_ilocked(&t->work, &proc->todo);
2662 else
2663 binder_enqueue_work_ilocked(&t->work, &node->async_todo);
2665 if (!pending_async)
2666 binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
2668 binder_inner_proc_unlock(proc);
2669 binder_node_unlock(node);
2671 return true;
2675 * binder_get_node_refs_for_txn() - Get required refs on node for txn
2676 * @node: struct binder_node for which to get refs
2677 * @proc: returns @node->proc if valid
2678 * @error: if no @proc then returns BR_DEAD_REPLY
2680 * User-space normally keeps the node alive when creating a transaction
2681 * since it has a reference to the target. The local strong ref keeps it
2682 * alive if the sending process dies before the target process processes
2683 * the transaction. If the source process is malicious or has a reference
2684 * counting bug, relying on the local strong ref can fail.
2686 * Since user-space can cause the local strong ref to go away, we also take
2687 * a tmpref on the node to ensure it survives while we are constructing
2688 * the transaction. We also need a tmpref on the proc while we are
2689 * constructing the transaction, so we take that here as well.
2691 * Return: The target_node with refs taken or NULL if no @node->proc is NULL.
2692 * Also sets @proc if valid. If the @node->proc is NULL indicating that the
2693 * target proc has died, @error is set to BR_DEAD_REPLY
2695 static struct binder_node *binder_get_node_refs_for_txn(
2696 struct binder_node *node,
2697 struct binder_proc **procp,
2698 uint32_t *error)
2700 struct binder_node *target_node = NULL;
2702 binder_node_inner_lock(node);
2703 if (node->proc) {
2704 target_node = node;
2705 binder_inc_node_nilocked(node, 1, 0, NULL);
2706 binder_inc_node_tmpref_ilocked(node);
2707 node->proc->tmp_ref++;
2708 *procp = node->proc;
2709 } else
2710 *error = BR_DEAD_REPLY;
2711 binder_node_inner_unlock(node);
2713 return target_node;
2716 static void binder_transaction(struct binder_proc *proc,
2717 struct binder_thread *thread,
2718 struct binder_transaction_data *tr, int reply,
2719 binder_size_t extra_buffers_size)
2721 int ret;
2722 struct binder_transaction *t;
2723 struct binder_work *tcomplete;
2724 binder_size_t *offp, *off_end, *off_start;
2725 binder_size_t off_min;
2726 u8 *sg_bufp, *sg_buf_end;
2727 struct binder_proc *target_proc = NULL;
2728 struct binder_thread *target_thread = NULL;
2729 struct binder_node *target_node = NULL;
2730 struct binder_transaction *in_reply_to = NULL;
2731 struct binder_transaction_log_entry *e;
2732 uint32_t return_error = 0;
2733 uint32_t return_error_param = 0;
2734 uint32_t return_error_line = 0;
2735 struct binder_buffer_object *last_fixup_obj = NULL;
2736 binder_size_t last_fixup_min_off = 0;
2737 struct binder_context *context = proc->context;
2738 int t_debug_id = atomic_inc_return(&binder_last_id);
2740 e = binder_transaction_log_add(&binder_transaction_log);
2741 e->debug_id = t_debug_id;
2742 e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
2743 e->from_proc = proc->pid;
2744 e->from_thread = thread->pid;
2745 e->target_handle = tr->target.handle;
2746 e->data_size = tr->data_size;
2747 e->offsets_size = tr->offsets_size;
2748 e->context_name = proc->context->name;
2750 if (reply) {
2751 binder_inner_proc_lock(proc);
2752 in_reply_to = thread->transaction_stack;
2753 if (in_reply_to == NULL) {
2754 binder_inner_proc_unlock(proc);
2755 binder_user_error("%d:%d got reply transaction with no transaction stack\n",
2756 proc->pid, thread->pid);
2757 return_error = BR_FAILED_REPLY;
2758 return_error_param = -EPROTO;
2759 return_error_line = __LINE__;
2760 goto err_empty_call_stack;
2762 if (in_reply_to->to_thread != thread) {
2763 spin_lock(&in_reply_to->lock);
2764 binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
2765 proc->pid, thread->pid, in_reply_to->debug_id,
2766 in_reply_to->to_proc ?
2767 in_reply_to->to_proc->pid : 0,
2768 in_reply_to->to_thread ?
2769 in_reply_to->to_thread->pid : 0);
2770 spin_unlock(&in_reply_to->lock);
2771 binder_inner_proc_unlock(proc);
2772 return_error = BR_FAILED_REPLY;
2773 return_error_param = -EPROTO;
2774 return_error_line = __LINE__;
2775 in_reply_to = NULL;
2776 goto err_bad_call_stack;
2778 thread->transaction_stack = in_reply_to->to_parent;
2779 binder_inner_proc_unlock(proc);
2780 binder_set_nice(in_reply_to->saved_priority);
2781 target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
2782 if (target_thread == NULL) {
2783 return_error = BR_DEAD_REPLY;
2784 return_error_line = __LINE__;
2785 goto err_dead_binder;
2787 if (target_thread->transaction_stack != in_reply_to) {
2788 binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
2789 proc->pid, thread->pid,
2790 target_thread->transaction_stack ?
2791 target_thread->transaction_stack->debug_id : 0,
2792 in_reply_to->debug_id);
2793 binder_inner_proc_unlock(target_thread->proc);
2794 return_error = BR_FAILED_REPLY;
2795 return_error_param = -EPROTO;
2796 return_error_line = __LINE__;
2797 in_reply_to = NULL;
2798 target_thread = NULL;
2799 goto err_dead_binder;
2801 target_proc = target_thread->proc;
2802 target_proc->tmp_ref++;
2803 binder_inner_proc_unlock(target_thread->proc);
2804 } else {
2805 if (tr->target.handle) {
2806 struct binder_ref *ref;
2809 * There must already be a strong ref
2810 * on this node. If so, do a strong
2811 * increment on the node to ensure it
2812 * stays alive until the transaction is
2813 * done.
2815 binder_proc_lock(proc);
2816 ref = binder_get_ref_olocked(proc, tr->target.handle,
2817 true);
2818 if (ref) {
2819 target_node = binder_get_node_refs_for_txn(
2820 ref->node, &target_proc,
2821 &return_error);
2822 } else {
2823 binder_user_error("%d:%d got transaction to invalid handle\n",
2824 proc->pid, thread->pid);
2825 return_error = BR_FAILED_REPLY;
2827 binder_proc_unlock(proc);
2828 } else {
2829 mutex_lock(&context->context_mgr_node_lock);
2830 target_node = context->binder_context_mgr_node;
2831 if (target_node)
2832 target_node = binder_get_node_refs_for_txn(
2833 target_node, &target_proc,
2834 &return_error);
2835 else
2836 return_error = BR_DEAD_REPLY;
2837 mutex_unlock(&context->context_mgr_node_lock);
2838 if (target_node && target_proc == proc) {
2839 binder_user_error("%d:%d got transaction to context manager from process owning it\n",
2840 proc->pid, thread->pid);
2841 return_error = BR_FAILED_REPLY;
2842 return_error_param = -EINVAL;
2843 return_error_line = __LINE__;
2844 goto err_invalid_target_handle;
2847 if (!target_node) {
2849 * return_error is set above
2851 return_error_param = -EINVAL;
2852 return_error_line = __LINE__;
2853 goto err_dead_binder;
2855 e->to_node = target_node->debug_id;
2856 if (security_binder_transaction(proc->tsk,
2857 target_proc->tsk) < 0) {
2858 return_error = BR_FAILED_REPLY;
2859 return_error_param = -EPERM;
2860 return_error_line = __LINE__;
2861 goto err_invalid_target_handle;
2863 binder_inner_proc_lock(proc);
2864 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
2865 struct binder_transaction *tmp;
2867 tmp = thread->transaction_stack;
2868 if (tmp->to_thread != thread) {
2869 spin_lock(&tmp->lock);
2870 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
2871 proc->pid, thread->pid, tmp->debug_id,
2872 tmp->to_proc ? tmp->to_proc->pid : 0,
2873 tmp->to_thread ?
2874 tmp->to_thread->pid : 0);
2875 spin_unlock(&tmp->lock);
2876 binder_inner_proc_unlock(proc);
2877 return_error = BR_FAILED_REPLY;
2878 return_error_param = -EPROTO;
2879 return_error_line = __LINE__;
2880 goto err_bad_call_stack;
2882 while (tmp) {
2883 struct binder_thread *from;
2885 spin_lock(&tmp->lock);
2886 from = tmp->from;
2887 if (from && from->proc == target_proc) {
2888 atomic_inc(&from->tmp_ref);
2889 target_thread = from;
2890 spin_unlock(&tmp->lock);
2891 break;
2893 spin_unlock(&tmp->lock);
2894 tmp = tmp->from_parent;
2897 binder_inner_proc_unlock(proc);
2899 if (target_thread)
2900 e->to_thread = target_thread->pid;
2901 e->to_proc = target_proc->pid;
2903 /* TODO: reuse incoming transaction for reply */
2904 t = kzalloc(sizeof(*t), GFP_KERNEL);
2905 if (t == NULL) {
2906 return_error = BR_FAILED_REPLY;
2907 return_error_param = -ENOMEM;
2908 return_error_line = __LINE__;
2909 goto err_alloc_t_failed;
2911 binder_stats_created(BINDER_STAT_TRANSACTION);
2912 spin_lock_init(&t->lock);
2914 tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
2915 if (tcomplete == NULL) {
2916 return_error = BR_FAILED_REPLY;
2917 return_error_param = -ENOMEM;
2918 return_error_line = __LINE__;
2919 goto err_alloc_tcomplete_failed;
2921 binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
2923 t->debug_id = t_debug_id;
2925 if (reply)
2926 binder_debug(BINDER_DEBUG_TRANSACTION,
2927 "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
2928 proc->pid, thread->pid, t->debug_id,
2929 target_proc->pid, target_thread->pid,
2930 (u64)tr->data.ptr.buffer,
2931 (u64)tr->data.ptr.offsets,
2932 (u64)tr->data_size, (u64)tr->offsets_size,
2933 (u64)extra_buffers_size);
2934 else
2935 binder_debug(BINDER_DEBUG_TRANSACTION,
2936 "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
2937 proc->pid, thread->pid, t->debug_id,
2938 target_proc->pid, target_node->debug_id,
2939 (u64)tr->data.ptr.buffer,
2940 (u64)tr->data.ptr.offsets,
2941 (u64)tr->data_size, (u64)tr->offsets_size,
2942 (u64)extra_buffers_size);
2944 if (!reply && !(tr->flags & TF_ONE_WAY))
2945 t->from = thread;
2946 else
2947 t->from = NULL;
2948 t->sender_euid = task_euid(proc->tsk);
2949 t->to_proc = target_proc;
2950 t->to_thread = target_thread;
2951 t->code = tr->code;
2952 t->flags = tr->flags;
2953 t->priority = task_nice(current);
2955 trace_binder_transaction(reply, t, target_node);
2957 t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
2958 tr->offsets_size, extra_buffers_size,
2959 !reply && (t->flags & TF_ONE_WAY));
2960 if (IS_ERR(t->buffer)) {
2962 * -ESRCH indicates VMA cleared. The target is dying.
2964 return_error_param = PTR_ERR(t->buffer);
2965 return_error = return_error_param == -ESRCH ?
2966 BR_DEAD_REPLY : BR_FAILED_REPLY;
2967 return_error_line = __LINE__;
2968 t->buffer = NULL;
2969 goto err_binder_alloc_buf_failed;
2971 t->buffer->allow_user_free = 0;
2972 t->buffer->debug_id = t->debug_id;
2973 t->buffer->transaction = t;
2974 t->buffer->target_node = target_node;
2975 trace_binder_transaction_alloc_buf(t->buffer);
2976 off_start = (binder_size_t *)(t->buffer->data +
2977 ALIGN(tr->data_size, sizeof(void *)));
2978 offp = off_start;
2980 if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)
2981 tr->data.ptr.buffer, tr->data_size)) {
2982 binder_user_error("%d:%d got transaction with invalid data ptr\n",
2983 proc->pid, thread->pid);
2984 return_error = BR_FAILED_REPLY;
2985 return_error_param = -EFAULT;
2986 return_error_line = __LINE__;
2987 goto err_copy_data_failed;
2989 if (copy_from_user(offp, (const void __user *)(uintptr_t)
2990 tr->data.ptr.offsets, tr->offsets_size)) {
2991 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
2992 proc->pid, thread->pid);
2993 return_error = BR_FAILED_REPLY;
2994 return_error_param = -EFAULT;
2995 return_error_line = __LINE__;
2996 goto err_copy_data_failed;
2998 if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
2999 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
3000 proc->pid, thread->pid, (u64)tr->offsets_size);
3001 return_error = BR_FAILED_REPLY;
3002 return_error_param = -EINVAL;
3003 return_error_line = __LINE__;
3004 goto err_bad_offset;
3006 if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
3007 binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
3008 proc->pid, thread->pid,
3009 (u64)extra_buffers_size);
3010 return_error = BR_FAILED_REPLY;
3011 return_error_param = -EINVAL;
3012 return_error_line = __LINE__;
3013 goto err_bad_offset;
3015 off_end = (void *)off_start + tr->offsets_size;
3016 sg_bufp = (u8 *)(PTR_ALIGN(off_end, sizeof(void *)));
3017 sg_buf_end = sg_bufp + extra_buffers_size;
3018 off_min = 0;
3019 for (; offp < off_end; offp++) {
3020 struct binder_object_header *hdr;
3021 size_t object_size = binder_validate_object(t->buffer, *offp);
3023 if (object_size == 0 || *offp < off_min) {
3024 binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
3025 proc->pid, thread->pid, (u64)*offp,
3026 (u64)off_min,
3027 (u64)t->buffer->data_size);
3028 return_error = BR_FAILED_REPLY;
3029 return_error_param = -EINVAL;
3030 return_error_line = __LINE__;
3031 goto err_bad_offset;
3034 hdr = (struct binder_object_header *)(t->buffer->data + *offp);
3035 off_min = *offp + object_size;
3036 switch (hdr->type) {
3037 case BINDER_TYPE_BINDER:
3038 case BINDER_TYPE_WEAK_BINDER: {
3039 struct flat_binder_object *fp;
3041 fp = to_flat_binder_object(hdr);
3042 ret = binder_translate_binder(fp, t, thread);
3043 if (ret < 0) {
3044 return_error = BR_FAILED_REPLY;
3045 return_error_param = ret;
3046 return_error_line = __LINE__;
3047 goto err_translate_failed;
3049 } break;
3050 case BINDER_TYPE_HANDLE:
3051 case BINDER_TYPE_WEAK_HANDLE: {
3052 struct flat_binder_object *fp;
3054 fp = to_flat_binder_object(hdr);
3055 ret = binder_translate_handle(fp, t, thread);
3056 if (ret < 0) {
3057 return_error = BR_FAILED_REPLY;
3058 return_error_param = ret;
3059 return_error_line = __LINE__;
3060 goto err_translate_failed;
3062 } break;
3064 case BINDER_TYPE_FD: {
3065 struct binder_fd_object *fp = to_binder_fd_object(hdr);
3066 int target_fd = binder_translate_fd(fp->fd, t, thread,
3067 in_reply_to);
3069 if (target_fd < 0) {
3070 return_error = BR_FAILED_REPLY;
3071 return_error_param = target_fd;
3072 return_error_line = __LINE__;
3073 goto err_translate_failed;
3075 fp->pad_binder = 0;
3076 fp->fd = target_fd;
3077 } break;
3078 case BINDER_TYPE_FDA: {
3079 struct binder_fd_array_object *fda =
3080 to_binder_fd_array_object(hdr);
3081 struct binder_buffer_object *parent =
3082 binder_validate_ptr(t->buffer, fda->parent,
3083 off_start,
3084 offp - off_start);
3085 if (!parent) {
3086 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
3087 proc->pid, thread->pid);
3088 return_error = BR_FAILED_REPLY;
3089 return_error_param = -EINVAL;
3090 return_error_line = __LINE__;
3091 goto err_bad_parent;
3093 if (!binder_validate_fixup(t->buffer, off_start,
3094 parent, fda->parent_offset,
3095 last_fixup_obj,
3096 last_fixup_min_off)) {
3097 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
3098 proc->pid, thread->pid);
3099 return_error = BR_FAILED_REPLY;
3100 return_error_param = -EINVAL;
3101 return_error_line = __LINE__;
3102 goto err_bad_parent;
3104 ret = binder_translate_fd_array(fda, parent, t, thread,
3105 in_reply_to);
3106 if (ret < 0) {
3107 return_error = BR_FAILED_REPLY;
3108 return_error_param = ret;
3109 return_error_line = __LINE__;
3110 goto err_translate_failed;
3112 last_fixup_obj = parent;
3113 last_fixup_min_off =
3114 fda->parent_offset + sizeof(u32) * fda->num_fds;
3115 } break;
3116 case BINDER_TYPE_PTR: {
3117 struct binder_buffer_object *bp =
3118 to_binder_buffer_object(hdr);
3119 size_t buf_left = sg_buf_end - sg_bufp;
3121 if (bp->length > buf_left) {
3122 binder_user_error("%d:%d got transaction with too large buffer\n",
3123 proc->pid, thread->pid);
3124 return_error = BR_FAILED_REPLY;
3125 return_error_param = -EINVAL;
3126 return_error_line = __LINE__;
3127 goto err_bad_offset;
3129 if (copy_from_user(sg_bufp,
3130 (const void __user *)(uintptr_t)
3131 bp->buffer, bp->length)) {
3132 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3133 proc->pid, thread->pid);
3134 return_error_param = -EFAULT;
3135 return_error = BR_FAILED_REPLY;
3136 return_error_line = __LINE__;
3137 goto err_copy_data_failed;
3139 /* Fixup buffer pointer to target proc address space */
3140 bp->buffer = (uintptr_t)sg_bufp +
3141 binder_alloc_get_user_buffer_offset(
3142 &target_proc->alloc);
3143 sg_bufp += ALIGN(bp->length, sizeof(u64));
3145 ret = binder_fixup_parent(t, thread, bp, off_start,
3146 offp - off_start,
3147 last_fixup_obj,
3148 last_fixup_min_off);
3149 if (ret < 0) {
3150 return_error = BR_FAILED_REPLY;
3151 return_error_param = ret;
3152 return_error_line = __LINE__;
3153 goto err_translate_failed;
3155 last_fixup_obj = bp;
3156 last_fixup_min_off = 0;
3157 } break;
3158 default:
3159 binder_user_error("%d:%d got transaction with invalid object type, %x\n",
3160 proc->pid, thread->pid, hdr->type);
3161 return_error = BR_FAILED_REPLY;
3162 return_error_param = -EINVAL;
3163 return_error_line = __LINE__;
3164 goto err_bad_object_type;
3167 tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
3168 t->work.type = BINDER_WORK_TRANSACTION;
3170 if (reply) {
3171 binder_enqueue_thread_work(thread, tcomplete);
3172 binder_inner_proc_lock(target_proc);
3173 if (target_thread->is_dead) {
3174 binder_inner_proc_unlock(target_proc);
3175 goto err_dead_proc_or_thread;
3177 BUG_ON(t->buffer->async_transaction != 0);
3178 binder_pop_transaction_ilocked(target_thread, in_reply_to);
3179 binder_enqueue_thread_work_ilocked(target_thread, &t->work);
3180 binder_inner_proc_unlock(target_proc);
3181 wake_up_interruptible_sync(&target_thread->wait);
3182 binder_free_transaction(in_reply_to);
3183 } else if (!(t->flags & TF_ONE_WAY)) {
3184 BUG_ON(t->buffer->async_transaction != 0);
3185 binder_inner_proc_lock(proc);
3187 * Defer the TRANSACTION_COMPLETE, so we don't return to
3188 * userspace immediately; this allows the target process to
3189 * immediately start processing this transaction, reducing
3190 * latency. We will then return the TRANSACTION_COMPLETE when
3191 * the target replies (or there is an error).
3193 binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete);
3194 t->need_reply = 1;
3195 t->from_parent = thread->transaction_stack;
3196 thread->transaction_stack = t;
3197 binder_inner_proc_unlock(proc);
3198 if (!binder_proc_transaction(t, target_proc, target_thread)) {
3199 binder_inner_proc_lock(proc);
3200 binder_pop_transaction_ilocked(thread, t);
3201 binder_inner_proc_unlock(proc);
3202 goto err_dead_proc_or_thread;
3204 } else {
3205 BUG_ON(target_node == NULL);
3206 BUG_ON(t->buffer->async_transaction != 1);
3207 binder_enqueue_thread_work(thread, tcomplete);
3208 if (!binder_proc_transaction(t, target_proc, NULL))
3209 goto err_dead_proc_or_thread;
3211 if (target_thread)
3212 binder_thread_dec_tmpref(target_thread);
3213 binder_proc_dec_tmpref(target_proc);
3214 if (target_node)
3215 binder_dec_node_tmpref(target_node);
3217 * write barrier to synchronize with initialization
3218 * of log entry
3220 smp_wmb();
3221 WRITE_ONCE(e->debug_id_done, t_debug_id);
3222 return;
3224 err_dead_proc_or_thread:
3225 return_error = BR_DEAD_REPLY;
3226 return_error_line = __LINE__;
3227 binder_dequeue_work(proc, tcomplete);
3228 err_translate_failed:
3229 err_bad_object_type:
3230 err_bad_offset:
3231 err_bad_parent:
3232 err_copy_data_failed:
3233 trace_binder_transaction_failed_buffer_release(t->buffer);
3234 binder_transaction_buffer_release(target_proc, t->buffer, offp);
3235 if (target_node)
3236 binder_dec_node_tmpref(target_node);
3237 target_node = NULL;
3238 t->buffer->transaction = NULL;
3239 binder_alloc_free_buf(&target_proc->alloc, t->buffer);
3240 err_binder_alloc_buf_failed:
3241 kfree(tcomplete);
3242 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3243 err_alloc_tcomplete_failed:
3244 kfree(t);
3245 binder_stats_deleted(BINDER_STAT_TRANSACTION);
3246 err_alloc_t_failed:
3247 err_bad_call_stack:
3248 err_empty_call_stack:
3249 err_dead_binder:
3250 err_invalid_target_handle:
3251 if (target_thread)
3252 binder_thread_dec_tmpref(target_thread);
3253 if (target_proc)
3254 binder_proc_dec_tmpref(target_proc);
3255 if (target_node) {
3256 binder_dec_node(target_node, 1, 0);
3257 binder_dec_node_tmpref(target_node);
3260 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
3261 "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n",
3262 proc->pid, thread->pid, return_error, return_error_param,
3263 (u64)tr->data_size, (u64)tr->offsets_size,
3264 return_error_line);
3267 struct binder_transaction_log_entry *fe;
3269 e->return_error = return_error;
3270 e->return_error_param = return_error_param;
3271 e->return_error_line = return_error_line;
3272 fe = binder_transaction_log_add(&binder_transaction_log_failed);
3273 *fe = *e;
3275 * write barrier to synchronize with initialization
3276 * of log entry
3278 smp_wmb();
3279 WRITE_ONCE(e->debug_id_done, t_debug_id);
3280 WRITE_ONCE(fe->debug_id_done, t_debug_id);
3283 BUG_ON(thread->return_error.cmd != BR_OK);
3284 if (in_reply_to) {
3285 thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
3286 binder_enqueue_thread_work(thread, &thread->return_error.work);
3287 binder_send_failed_reply(in_reply_to, return_error);
3288 } else {
3289 thread->return_error.cmd = return_error;
3290 binder_enqueue_thread_work(thread, &thread->return_error.work);
3294 static int binder_thread_write(struct binder_proc *proc,
3295 struct binder_thread *thread,
3296 binder_uintptr_t binder_buffer, size_t size,
3297 binder_size_t *consumed)
3299 uint32_t cmd;
3300 struct binder_context *context = proc->context;
3301 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
3302 void __user *ptr = buffer + *consumed;
3303 void __user *end = buffer + size;
3305 while (ptr < end && thread->return_error.cmd == BR_OK) {
3306 int ret;
3308 if (get_user(cmd, (uint32_t __user *)ptr))
3309 return -EFAULT;
3310 ptr += sizeof(uint32_t);
3311 trace_binder_command(cmd);
3312 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
3313 atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
3314 atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
3315 atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
3317 switch (cmd) {
3318 case BC_INCREFS:
3319 case BC_ACQUIRE:
3320 case BC_RELEASE:
3321 case BC_DECREFS: {
3322 uint32_t target;
3323 const char *debug_string;
3324 bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE;
3325 bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE;
3326 struct binder_ref_data rdata;
3328 if (get_user(target, (uint32_t __user *)ptr))
3329 return -EFAULT;
3331 ptr += sizeof(uint32_t);
3332 ret = -1;
3333 if (increment && !target) {
3334 struct binder_node *ctx_mgr_node;
3335 mutex_lock(&context->context_mgr_node_lock);
3336 ctx_mgr_node = context->binder_context_mgr_node;
3337 if (ctx_mgr_node)
3338 ret = binder_inc_ref_for_node(
3339 proc, ctx_mgr_node,
3340 strong, NULL, &rdata);
3341 mutex_unlock(&context->context_mgr_node_lock);
3343 if (ret)
3344 ret = binder_update_ref_for_handle(
3345 proc, target, increment, strong,
3346 &rdata);
3347 if (!ret && rdata.desc != target) {
3348 binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
3349 proc->pid, thread->pid,
3350 target, rdata.desc);
3352 switch (cmd) {
3353 case BC_INCREFS:
3354 debug_string = "IncRefs";
3355 break;
3356 case BC_ACQUIRE:
3357 debug_string = "Acquire";
3358 break;
3359 case BC_RELEASE:
3360 debug_string = "Release";
3361 break;
3362 case BC_DECREFS:
3363 default:
3364 debug_string = "DecRefs";
3365 break;
3367 if (ret) {
3368 binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
3369 proc->pid, thread->pid, debug_string,
3370 strong, target, ret);
3371 break;
3373 binder_debug(BINDER_DEBUG_USER_REFS,
3374 "%d:%d %s ref %d desc %d s %d w %d\n",
3375 proc->pid, thread->pid, debug_string,
3376 rdata.debug_id, rdata.desc, rdata.strong,
3377 rdata.weak);
3378 break;
3380 case BC_INCREFS_DONE:
3381 case BC_ACQUIRE_DONE: {
3382 binder_uintptr_t node_ptr;
3383 binder_uintptr_t cookie;
3384 struct binder_node *node;
3385 bool free_node;
3387 if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
3388 return -EFAULT;
3389 ptr += sizeof(binder_uintptr_t);
3390 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3391 return -EFAULT;
3392 ptr += sizeof(binder_uintptr_t);
3393 node = binder_get_node(proc, node_ptr);
3394 if (node == NULL) {
3395 binder_user_error("%d:%d %s u%016llx no match\n",
3396 proc->pid, thread->pid,
3397 cmd == BC_INCREFS_DONE ?
3398 "BC_INCREFS_DONE" :
3399 "BC_ACQUIRE_DONE",
3400 (u64)node_ptr);
3401 break;
3403 if (cookie != node->cookie) {
3404 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
3405 proc->pid, thread->pid,
3406 cmd == BC_INCREFS_DONE ?
3407 "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3408 (u64)node_ptr, node->debug_id,
3409 (u64)cookie, (u64)node->cookie);
3410 binder_put_node(node);
3411 break;
3413 binder_node_inner_lock(node);
3414 if (cmd == BC_ACQUIRE_DONE) {
3415 if (node->pending_strong_ref == 0) {
3416 binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
3417 proc->pid, thread->pid,
3418 node->debug_id);
3419 binder_node_inner_unlock(node);
3420 binder_put_node(node);
3421 break;
3423 node->pending_strong_ref = 0;
3424 } else {
3425 if (node->pending_weak_ref == 0) {
3426 binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
3427 proc->pid, thread->pid,
3428 node->debug_id);
3429 binder_node_inner_unlock(node);
3430 binder_put_node(node);
3431 break;
3433 node->pending_weak_ref = 0;
3435 free_node = binder_dec_node_nilocked(node,
3436 cmd == BC_ACQUIRE_DONE, 0);
3437 WARN_ON(free_node);
3438 binder_debug(BINDER_DEBUG_USER_REFS,
3439 "%d:%d %s node %d ls %d lw %d tr %d\n",
3440 proc->pid, thread->pid,
3441 cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3442 node->debug_id, node->local_strong_refs,
3443 node->local_weak_refs, node->tmp_refs);
3444 binder_node_inner_unlock(node);
3445 binder_put_node(node);
3446 break;
3448 case BC_ATTEMPT_ACQUIRE:
3449 pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
3450 return -EINVAL;
3451 case BC_ACQUIRE_RESULT:
3452 pr_err("BC_ACQUIRE_RESULT not supported\n");
3453 return -EINVAL;
3455 case BC_FREE_BUFFER: {
3456 binder_uintptr_t data_ptr;
3457 struct binder_buffer *buffer;
3459 if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
3460 return -EFAULT;
3461 ptr += sizeof(binder_uintptr_t);
3463 buffer = binder_alloc_prepare_to_free(&proc->alloc,
3464 data_ptr);
3465 if (buffer == NULL) {
3466 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx no match\n",
3467 proc->pid, thread->pid, (u64)data_ptr);
3468 break;
3470 if (!buffer->allow_user_free) {
3471 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx matched unreturned buffer\n",
3472 proc->pid, thread->pid, (u64)data_ptr);
3473 break;
3475 binder_debug(BINDER_DEBUG_FREE_BUFFER,
3476 "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
3477 proc->pid, thread->pid, (u64)data_ptr,
3478 buffer->debug_id,
3479 buffer->transaction ? "active" : "finished");
3481 if (buffer->transaction) {
3482 buffer->transaction->buffer = NULL;
3483 buffer->transaction = NULL;
3485 if (buffer->async_transaction && buffer->target_node) {
3486 struct binder_node *buf_node;
3487 struct binder_work *w;
3489 buf_node = buffer->target_node;
3490 binder_node_inner_lock(buf_node);
3491 BUG_ON(!buf_node->has_async_transaction);
3492 BUG_ON(buf_node->proc != proc);
3493 w = binder_dequeue_work_head_ilocked(
3494 &buf_node->async_todo);
3495 if (!w) {
3496 buf_node->has_async_transaction = false;
3497 } else {
3498 binder_enqueue_work_ilocked(
3499 w, &proc->todo);
3500 binder_wakeup_proc_ilocked(proc);
3502 binder_node_inner_unlock(buf_node);
3504 trace_binder_transaction_buffer_release(buffer);
3505 binder_transaction_buffer_release(proc, buffer, NULL);
3506 binder_alloc_free_buf(&proc->alloc, buffer);
3507 break;
3510 case BC_TRANSACTION_SG:
3511 case BC_REPLY_SG: {
3512 struct binder_transaction_data_sg tr;
3514 if (copy_from_user(&tr, ptr, sizeof(tr)))
3515 return -EFAULT;
3516 ptr += sizeof(tr);
3517 binder_transaction(proc, thread, &tr.transaction_data,
3518 cmd == BC_REPLY_SG, tr.buffers_size);
3519 break;
3521 case BC_TRANSACTION:
3522 case BC_REPLY: {
3523 struct binder_transaction_data tr;
3525 if (copy_from_user(&tr, ptr, sizeof(tr)))
3526 return -EFAULT;
3527 ptr += sizeof(tr);
3528 binder_transaction(proc, thread, &tr,
3529 cmd == BC_REPLY, 0);
3530 break;
3533 case BC_REGISTER_LOOPER:
3534 binder_debug(BINDER_DEBUG_THREADS,
3535 "%d:%d BC_REGISTER_LOOPER\n",
3536 proc->pid, thread->pid);
3537 binder_inner_proc_lock(proc);
3538 if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
3539 thread->looper |= BINDER_LOOPER_STATE_INVALID;
3540 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
3541 proc->pid, thread->pid);
3542 } else if (proc->requested_threads == 0) {
3543 thread->looper |= BINDER_LOOPER_STATE_INVALID;
3544 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
3545 proc->pid, thread->pid);
3546 } else {
3547 proc->requested_threads--;
3548 proc->requested_threads_started++;
3550 thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
3551 binder_inner_proc_unlock(proc);
3552 break;
3553 case BC_ENTER_LOOPER:
3554 binder_debug(BINDER_DEBUG_THREADS,
3555 "%d:%d BC_ENTER_LOOPER\n",
3556 proc->pid, thread->pid);
3557 if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
3558 thread->looper |= BINDER_LOOPER_STATE_INVALID;
3559 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
3560 proc->pid, thread->pid);
3562 thread->looper |= BINDER_LOOPER_STATE_ENTERED;
3563 break;
3564 case BC_EXIT_LOOPER:
3565 binder_debug(BINDER_DEBUG_THREADS,
3566 "%d:%d BC_EXIT_LOOPER\n",
3567 proc->pid, thread->pid);
3568 thread->looper |= BINDER_LOOPER_STATE_EXITED;
3569 break;
3571 case BC_REQUEST_DEATH_NOTIFICATION:
3572 case BC_CLEAR_DEATH_NOTIFICATION: {
3573 uint32_t target;
3574 binder_uintptr_t cookie;
3575 struct binder_ref *ref;
3576 struct binder_ref_death *death = NULL;
3578 if (get_user(target, (uint32_t __user *)ptr))
3579 return -EFAULT;
3580 ptr += sizeof(uint32_t);
3581 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3582 return -EFAULT;
3583 ptr += sizeof(binder_uintptr_t);
3584 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
3586 * Allocate memory for death notification
3587 * before taking lock
3589 death = kzalloc(sizeof(*death), GFP_KERNEL);
3590 if (death == NULL) {
3591 WARN_ON(thread->return_error.cmd !=
3592 BR_OK);
3593 thread->return_error.cmd = BR_ERROR;
3594 binder_enqueue_thread_work(
3595 thread,
3596 &thread->return_error.work);
3597 binder_debug(
3598 BINDER_DEBUG_FAILED_TRANSACTION,
3599 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
3600 proc->pid, thread->pid);
3601 break;
3604 binder_proc_lock(proc);
3605 ref = binder_get_ref_olocked(proc, target, false);
3606 if (ref == NULL) {
3607 binder_user_error("%d:%d %s invalid ref %d\n",
3608 proc->pid, thread->pid,
3609 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3610 "BC_REQUEST_DEATH_NOTIFICATION" :
3611 "BC_CLEAR_DEATH_NOTIFICATION",
3612 target);
3613 binder_proc_unlock(proc);
3614 kfree(death);
3615 break;
3618 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
3619 "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
3620 proc->pid, thread->pid,
3621 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3622 "BC_REQUEST_DEATH_NOTIFICATION" :
3623 "BC_CLEAR_DEATH_NOTIFICATION",
3624 (u64)cookie, ref->data.debug_id,
3625 ref->data.desc, ref->data.strong,
3626 ref->data.weak, ref->node->debug_id);
3628 binder_node_lock(ref->node);
3629 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
3630 if (ref->death) {
3631 binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
3632 proc->pid, thread->pid);
3633 binder_node_unlock(ref->node);
3634 binder_proc_unlock(proc);
3635 kfree(death);
3636 break;
3638 binder_stats_created(BINDER_STAT_DEATH);
3639 INIT_LIST_HEAD(&death->work.entry);
3640 death->cookie = cookie;
3641 ref->death = death;
3642 if (ref->node->proc == NULL) {
3643 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
3645 binder_inner_proc_lock(proc);
3646 binder_enqueue_work_ilocked(
3647 &ref->death->work, &proc->todo);
3648 binder_wakeup_proc_ilocked(proc);
3649 binder_inner_proc_unlock(proc);
3651 } else {
3652 if (ref->death == NULL) {
3653 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
3654 proc->pid, thread->pid);
3655 binder_node_unlock(ref->node);
3656 binder_proc_unlock(proc);
3657 break;
3659 death = ref->death;
3660 if (death->cookie != cookie) {
3661 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
3662 proc->pid, thread->pid,
3663 (u64)death->cookie,
3664 (u64)cookie);
3665 binder_node_unlock(ref->node);
3666 binder_proc_unlock(proc);
3667 break;
3669 ref->death = NULL;
3670 binder_inner_proc_lock(proc);
3671 if (list_empty(&death->work.entry)) {
3672 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
3673 if (thread->looper &
3674 (BINDER_LOOPER_STATE_REGISTERED |
3675 BINDER_LOOPER_STATE_ENTERED))
3676 binder_enqueue_thread_work_ilocked(
3677 thread,
3678 &death->work);
3679 else {
3680 binder_enqueue_work_ilocked(
3681 &death->work,
3682 &proc->todo);
3683 binder_wakeup_proc_ilocked(
3684 proc);
3686 } else {
3687 BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
3688 death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
3690 binder_inner_proc_unlock(proc);
3692 binder_node_unlock(ref->node);
3693 binder_proc_unlock(proc);
3694 } break;
3695 case BC_DEAD_BINDER_DONE: {
3696 struct binder_work *w;
3697 binder_uintptr_t cookie;
3698 struct binder_ref_death *death = NULL;
3700 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3701 return -EFAULT;
3703 ptr += sizeof(cookie);
3704 binder_inner_proc_lock(proc);
3705 list_for_each_entry(w, &proc->delivered_death,
3706 entry) {
3707 struct binder_ref_death *tmp_death =
3708 container_of(w,
3709 struct binder_ref_death,
3710 work);
3712 if (tmp_death->cookie == cookie) {
3713 death = tmp_death;
3714 break;
3717 binder_debug(BINDER_DEBUG_DEAD_BINDER,
3718 "%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n",
3719 proc->pid, thread->pid, (u64)cookie,
3720 death);
3721 if (death == NULL) {
3722 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
3723 proc->pid, thread->pid, (u64)cookie);
3724 binder_inner_proc_unlock(proc);
3725 break;
3727 binder_dequeue_work_ilocked(&death->work);
3728 if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
3729 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
3730 if (thread->looper &
3731 (BINDER_LOOPER_STATE_REGISTERED |
3732 BINDER_LOOPER_STATE_ENTERED))
3733 binder_enqueue_thread_work_ilocked(
3734 thread, &death->work);
3735 else {
3736 binder_enqueue_work_ilocked(
3737 &death->work,
3738 &proc->todo);
3739 binder_wakeup_proc_ilocked(proc);
3742 binder_inner_proc_unlock(proc);
3743 } break;
3745 default:
3746 pr_err("%d:%d unknown command %d\n",
3747 proc->pid, thread->pid, cmd);
3748 return -EINVAL;
3750 *consumed = ptr - buffer;
3752 return 0;
3755 static void binder_stat_br(struct binder_proc *proc,
3756 struct binder_thread *thread, uint32_t cmd)
3758 trace_binder_return(cmd);
3759 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
3760 atomic_inc(&binder_stats.br[_IOC_NR(cmd)]);
3761 atomic_inc(&proc->stats.br[_IOC_NR(cmd)]);
3762 atomic_inc(&thread->stats.br[_IOC_NR(cmd)]);
3766 static int binder_put_node_cmd(struct binder_proc *proc,
3767 struct binder_thread *thread,
3768 void __user **ptrp,
3769 binder_uintptr_t node_ptr,
3770 binder_uintptr_t node_cookie,
3771 int node_debug_id,
3772 uint32_t cmd, const char *cmd_name)
3774 void __user *ptr = *ptrp;
3776 if (put_user(cmd, (uint32_t __user *)ptr))
3777 return -EFAULT;
3778 ptr += sizeof(uint32_t);
3780 if (put_user(node_ptr, (binder_uintptr_t __user *)ptr))
3781 return -EFAULT;
3782 ptr += sizeof(binder_uintptr_t);
3784 if (put_user(node_cookie, (binder_uintptr_t __user *)ptr))
3785 return -EFAULT;
3786 ptr += sizeof(binder_uintptr_t);
3788 binder_stat_br(proc, thread, cmd);
3789 binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n",
3790 proc->pid, thread->pid, cmd_name, node_debug_id,
3791 (u64)node_ptr, (u64)node_cookie);
3793 *ptrp = ptr;
3794 return 0;
3797 static int binder_wait_for_work(struct binder_thread *thread,
3798 bool do_proc_work)
3800 DEFINE_WAIT(wait);
3801 struct binder_proc *proc = thread->proc;
3802 int ret = 0;
3804 freezer_do_not_count();
3805 binder_inner_proc_lock(proc);
3806 for (;;) {
3807 prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE);
3808 if (binder_has_work_ilocked(thread, do_proc_work))
3809 break;
3810 if (do_proc_work)
3811 list_add(&thread->waiting_thread_node,
3812 &proc->waiting_threads);
3813 binder_inner_proc_unlock(proc);
3814 schedule();
3815 binder_inner_proc_lock(proc);
3816 list_del_init(&thread->waiting_thread_node);
3817 if (signal_pending(current)) {
3818 ret = -ERESTARTSYS;
3819 break;
3822 finish_wait(&thread->wait, &wait);
3823 binder_inner_proc_unlock(proc);
3824 freezer_count();
3826 return ret;
3829 static int binder_thread_read(struct binder_proc *proc,
3830 struct binder_thread *thread,
3831 binder_uintptr_t binder_buffer, size_t size,
3832 binder_size_t *consumed, int non_block)
3834 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
3835 void __user *ptr = buffer + *consumed;
3836 void __user *end = buffer + size;
3838 int ret = 0;
3839 int wait_for_proc_work;
3841 if (*consumed == 0) {
3842 if (put_user(BR_NOOP, (uint32_t __user *)ptr))
3843 return -EFAULT;
3844 ptr += sizeof(uint32_t);
3847 retry:
3848 binder_inner_proc_lock(proc);
3849 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
3850 binder_inner_proc_unlock(proc);
3852 thread->looper |= BINDER_LOOPER_STATE_WAITING;
3854 trace_binder_wait_for_work(wait_for_proc_work,
3855 !!thread->transaction_stack,
3856 !binder_worklist_empty(proc, &thread->todo));
3857 if (wait_for_proc_work) {
3858 if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
3859 BINDER_LOOPER_STATE_ENTERED))) {
3860 binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
3861 proc->pid, thread->pid, thread->looper);
3862 wait_event_interruptible(binder_user_error_wait,
3863 binder_stop_on_user_error < 2);
3865 binder_set_nice(proc->default_priority);
3868 if (non_block) {
3869 if (!binder_has_work(thread, wait_for_proc_work))
3870 ret = -EAGAIN;
3871 } else {
3872 ret = binder_wait_for_work(thread, wait_for_proc_work);
3875 thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
3877 if (ret)
3878 return ret;
3880 while (1) {
3881 uint32_t cmd;
3882 struct binder_transaction_data tr;
3883 struct binder_work *w = NULL;
3884 struct list_head *list = NULL;
3885 struct binder_transaction *t = NULL;
3886 struct binder_thread *t_from;
3888 binder_inner_proc_lock(proc);
3889 if (!binder_worklist_empty_ilocked(&thread->todo))
3890 list = &thread->todo;
3891 else if (!binder_worklist_empty_ilocked(&proc->todo) &&
3892 wait_for_proc_work)
3893 list = &proc->todo;
3894 else {
3895 binder_inner_proc_unlock(proc);
3897 /* no data added */
3898 if (ptr - buffer == 4 && !thread->looper_need_return)
3899 goto retry;
3900 break;
3903 if (end - ptr < sizeof(tr) + 4) {
3904 binder_inner_proc_unlock(proc);
3905 break;
3907 w = binder_dequeue_work_head_ilocked(list);
3908 if (binder_worklist_empty_ilocked(&thread->todo))
3909 thread->process_todo = false;
3911 switch (w->type) {
3912 case BINDER_WORK_TRANSACTION: {
3913 binder_inner_proc_unlock(proc);
3914 t = container_of(w, struct binder_transaction, work);
3915 } break;
3916 case BINDER_WORK_RETURN_ERROR: {
3917 struct binder_error *e = container_of(
3918 w, struct binder_error, work);
3920 WARN_ON(e->cmd == BR_OK);
3921 binder_inner_proc_unlock(proc);
3922 if (put_user(e->cmd, (uint32_t __user *)ptr))
3923 return -EFAULT;
3924 cmd = e->cmd;
3925 e->cmd = BR_OK;
3926 ptr += sizeof(uint32_t);
3928 binder_stat_br(proc, thread, cmd);
3929 } break;
3930 case BINDER_WORK_TRANSACTION_COMPLETE: {
3931 binder_inner_proc_unlock(proc);
3932 cmd = BR_TRANSACTION_COMPLETE;
3933 if (put_user(cmd, (uint32_t __user *)ptr))
3934 return -EFAULT;
3935 ptr += sizeof(uint32_t);
3937 binder_stat_br(proc, thread, cmd);
3938 binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
3939 "%d:%d BR_TRANSACTION_COMPLETE\n",
3940 proc->pid, thread->pid);
3941 kfree(w);
3942 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3943 } break;
3944 case BINDER_WORK_NODE: {
3945 struct binder_node *node = container_of(w, struct binder_node, work);
3946 int strong, weak;
3947 binder_uintptr_t node_ptr = node->ptr;
3948 binder_uintptr_t node_cookie = node->cookie;
3949 int node_debug_id = node->debug_id;
3950 int has_weak_ref;
3951 int has_strong_ref;
3952 void __user *orig_ptr = ptr;
3954 BUG_ON(proc != node->proc);
3955 strong = node->internal_strong_refs ||
3956 node->local_strong_refs;
3957 weak = !hlist_empty(&node->refs) ||
3958 node->local_weak_refs ||
3959 node->tmp_refs || strong;
3960 has_strong_ref = node->has_strong_ref;
3961 has_weak_ref = node->has_weak_ref;
3963 if (weak && !has_weak_ref) {
3964 node->has_weak_ref = 1;
3965 node->pending_weak_ref = 1;
3966 node->local_weak_refs++;
3968 if (strong && !has_strong_ref) {
3969 node->has_strong_ref = 1;
3970 node->pending_strong_ref = 1;
3971 node->local_strong_refs++;
3973 if (!strong && has_strong_ref)
3974 node->has_strong_ref = 0;
3975 if (!weak && has_weak_ref)
3976 node->has_weak_ref = 0;
3977 if (!weak && !strong) {
3978 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
3979 "%d:%d node %d u%016llx c%016llx deleted\n",
3980 proc->pid, thread->pid,
3981 node_debug_id,
3982 (u64)node_ptr,
3983 (u64)node_cookie);
3984 rb_erase(&node->rb_node, &proc->nodes);
3985 binder_inner_proc_unlock(proc);
3986 binder_node_lock(node);
3988 * Acquire the node lock before freeing the
3989 * node to serialize with other threads that
3990 * may have been holding the node lock while
3991 * decrementing this node (avoids race where
3992 * this thread frees while the other thread
3993 * is unlocking the node after the final
3994 * decrement)
3996 binder_node_unlock(node);
3997 binder_free_node(node);
3998 } else
3999 binder_inner_proc_unlock(proc);
4001 if (weak && !has_weak_ref)
4002 ret = binder_put_node_cmd(
4003 proc, thread, &ptr, node_ptr,
4004 node_cookie, node_debug_id,
4005 BR_INCREFS, "BR_INCREFS");
4006 if (!ret && strong && !has_strong_ref)
4007 ret = binder_put_node_cmd(
4008 proc, thread, &ptr, node_ptr,
4009 node_cookie, node_debug_id,
4010 BR_ACQUIRE, "BR_ACQUIRE");
4011 if (!ret && !strong && has_strong_ref)
4012 ret = binder_put_node_cmd(
4013 proc, thread, &ptr, node_ptr,
4014 node_cookie, node_debug_id,
4015 BR_RELEASE, "BR_RELEASE");
4016 if (!ret && !weak && has_weak_ref)
4017 ret = binder_put_node_cmd(
4018 proc, thread, &ptr, node_ptr,
4019 node_cookie, node_debug_id,
4020 BR_DECREFS, "BR_DECREFS");
4021 if (orig_ptr == ptr)
4022 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4023 "%d:%d node %d u%016llx c%016llx state unchanged\n",
4024 proc->pid, thread->pid,
4025 node_debug_id,
4026 (u64)node_ptr,
4027 (u64)node_cookie);
4028 if (ret)
4029 return ret;
4030 } break;
4031 case BINDER_WORK_DEAD_BINDER:
4032 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4033 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4034 struct binder_ref_death *death;
4035 uint32_t cmd;
4036 binder_uintptr_t cookie;
4038 death = container_of(w, struct binder_ref_death, work);
4039 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
4040 cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
4041 else
4042 cmd = BR_DEAD_BINDER;
4043 cookie = death->cookie;
4045 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
4046 "%d:%d %s %016llx\n",
4047 proc->pid, thread->pid,
4048 cmd == BR_DEAD_BINDER ?
4049 "BR_DEAD_BINDER" :
4050 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
4051 (u64)cookie);
4052 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
4053 binder_inner_proc_unlock(proc);
4054 kfree(death);
4055 binder_stats_deleted(BINDER_STAT_DEATH);
4056 } else {
4057 binder_enqueue_work_ilocked(
4058 w, &proc->delivered_death);
4059 binder_inner_proc_unlock(proc);
4061 if (put_user(cmd, (uint32_t __user *)ptr))
4062 return -EFAULT;
4063 ptr += sizeof(uint32_t);
4064 if (put_user(cookie,
4065 (binder_uintptr_t __user *)ptr))
4066 return -EFAULT;
4067 ptr += sizeof(binder_uintptr_t);
4068 binder_stat_br(proc, thread, cmd);
4069 if (cmd == BR_DEAD_BINDER)
4070 goto done; /* DEAD_BINDER notifications can cause transactions */
4071 } break;
4074 if (!t)
4075 continue;
4077 BUG_ON(t->buffer == NULL);
4078 if (t->buffer->target_node) {
4079 struct binder_node *target_node = t->buffer->target_node;
4081 tr.target.ptr = target_node->ptr;
4082 tr.cookie = target_node->cookie;
4083 t->saved_priority = task_nice(current);
4084 if (t->priority < target_node->min_priority &&
4085 !(t->flags & TF_ONE_WAY))
4086 binder_set_nice(t->priority);
4087 else if (!(t->flags & TF_ONE_WAY) ||
4088 t->saved_priority > target_node->min_priority)
4089 binder_set_nice(target_node->min_priority);
4090 cmd = BR_TRANSACTION;
4091 } else {
4092 tr.target.ptr = 0;
4093 tr.cookie = 0;
4094 cmd = BR_REPLY;
4096 tr.code = t->code;
4097 tr.flags = t->flags;
4098 tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid);
4100 t_from = binder_get_txn_from(t);
4101 if (t_from) {
4102 struct task_struct *sender = t_from->proc->tsk;
4104 tr.sender_pid = task_tgid_nr_ns(sender,
4105 task_active_pid_ns(current));
4106 } else {
4107 tr.sender_pid = 0;
4110 tr.data_size = t->buffer->data_size;
4111 tr.offsets_size = t->buffer->offsets_size;
4112 tr.data.ptr.buffer = (binder_uintptr_t)
4113 ((uintptr_t)t->buffer->data +
4114 binder_alloc_get_user_buffer_offset(&proc->alloc));
4115 tr.data.ptr.offsets = tr.data.ptr.buffer +
4116 ALIGN(t->buffer->data_size,
4117 sizeof(void *));
4119 if (put_user(cmd, (uint32_t __user *)ptr)) {
4120 if (t_from)
4121 binder_thread_dec_tmpref(t_from);
4123 binder_cleanup_transaction(t, "put_user failed",
4124 BR_FAILED_REPLY);
4126 return -EFAULT;
4128 ptr += sizeof(uint32_t);
4129 if (copy_to_user(ptr, &tr, sizeof(tr))) {
4130 if (t_from)
4131 binder_thread_dec_tmpref(t_from);
4133 binder_cleanup_transaction(t, "copy_to_user failed",
4134 BR_FAILED_REPLY);
4136 return -EFAULT;
4138 ptr += sizeof(tr);
4140 trace_binder_transaction_received(t);
4141 binder_stat_br(proc, thread, cmd);
4142 binder_debug(BINDER_DEBUG_TRANSACTION,
4143 "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
4144 proc->pid, thread->pid,
4145 (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
4146 "BR_REPLY",
4147 t->debug_id, t_from ? t_from->proc->pid : 0,
4148 t_from ? t_from->pid : 0, cmd,
4149 t->buffer->data_size, t->buffer->offsets_size,
4150 (u64)tr.data.ptr.buffer, (u64)tr.data.ptr.offsets);
4152 if (t_from)
4153 binder_thread_dec_tmpref(t_from);
4154 t->buffer->allow_user_free = 1;
4155 if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
4156 binder_inner_proc_lock(thread->proc);
4157 t->to_parent = thread->transaction_stack;
4158 t->to_thread = thread;
4159 thread->transaction_stack = t;
4160 binder_inner_proc_unlock(thread->proc);
4161 } else {
4162 binder_free_transaction(t);
4164 break;
4167 done:
4169 *consumed = ptr - buffer;
4170 binder_inner_proc_lock(proc);
4171 if (proc->requested_threads == 0 &&
4172 list_empty(&thread->proc->waiting_threads) &&
4173 proc->requested_threads_started < proc->max_threads &&
4174 (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4175 BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
4176 /*spawn a new thread if we leave this out */) {
4177 proc->requested_threads++;
4178 binder_inner_proc_unlock(proc);
4179 binder_debug(BINDER_DEBUG_THREADS,
4180 "%d:%d BR_SPAWN_LOOPER\n",
4181 proc->pid, thread->pid);
4182 if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
4183 return -EFAULT;
4184 binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
4185 } else
4186 binder_inner_proc_unlock(proc);
4187 return 0;
4190 static void binder_release_work(struct binder_proc *proc,
4191 struct list_head *list)
4193 struct binder_work *w;
4195 while (1) {
4196 w = binder_dequeue_work_head(proc, list);
4197 if (!w)
4198 return;
4200 switch (w->type) {
4201 case BINDER_WORK_TRANSACTION: {
4202 struct binder_transaction *t;
4204 t = container_of(w, struct binder_transaction, work);
4206 binder_cleanup_transaction(t, "process died.",
4207 BR_DEAD_REPLY);
4208 } break;
4209 case BINDER_WORK_RETURN_ERROR: {
4210 struct binder_error *e = container_of(
4211 w, struct binder_error, work);
4213 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4214 "undelivered TRANSACTION_ERROR: %u\n",
4215 e->cmd);
4216 } break;
4217 case BINDER_WORK_TRANSACTION_COMPLETE: {
4218 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4219 "undelivered TRANSACTION_COMPLETE\n");
4220 kfree(w);
4221 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4222 } break;
4223 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4224 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4225 struct binder_ref_death *death;
4227 death = container_of(w, struct binder_ref_death, work);
4228 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4229 "undelivered death notification, %016llx\n",
4230 (u64)death->cookie);
4231 kfree(death);
4232 binder_stats_deleted(BINDER_STAT_DEATH);
4233 } break;
4234 default:
4235 pr_err("unexpected work type, %d, not freed\n",
4236 w->type);
4237 break;
4243 static struct binder_thread *binder_get_thread_ilocked(
4244 struct binder_proc *proc, struct binder_thread *new_thread)
4246 struct binder_thread *thread = NULL;
4247 struct rb_node *parent = NULL;
4248 struct rb_node **p = &proc->threads.rb_node;
4250 while (*p) {
4251 parent = *p;
4252 thread = rb_entry(parent, struct binder_thread, rb_node);
4254 if (current->pid < thread->pid)
4255 p = &(*p)->rb_left;
4256 else if (current->pid > thread->pid)
4257 p = &(*p)->rb_right;
4258 else
4259 return thread;
4261 if (!new_thread)
4262 return NULL;
4263 thread = new_thread;
4264 binder_stats_created(BINDER_STAT_THREAD);
4265 thread->proc = proc;
4266 thread->pid = current->pid;
4267 atomic_set(&thread->tmp_ref, 0);
4268 init_waitqueue_head(&thread->wait);
4269 INIT_LIST_HEAD(&thread->todo);
4270 rb_link_node(&thread->rb_node, parent, p);
4271 rb_insert_color(&thread->rb_node, &proc->threads);
4272 thread->looper_need_return = true;
4273 thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
4274 thread->return_error.cmd = BR_OK;
4275 thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
4276 thread->reply_error.cmd = BR_OK;
4277 INIT_LIST_HEAD(&new_thread->waiting_thread_node);
4278 return thread;
4281 static struct binder_thread *binder_get_thread(struct binder_proc *proc)
4283 struct binder_thread *thread;
4284 struct binder_thread *new_thread;
4286 binder_inner_proc_lock(proc);
4287 thread = binder_get_thread_ilocked(proc, NULL);
4288 binder_inner_proc_unlock(proc);
4289 if (!thread) {
4290 new_thread = kzalloc(sizeof(*thread), GFP_KERNEL);
4291 if (new_thread == NULL)
4292 return NULL;
4293 binder_inner_proc_lock(proc);
4294 thread = binder_get_thread_ilocked(proc, new_thread);
4295 binder_inner_proc_unlock(proc);
4296 if (thread != new_thread)
4297 kfree(new_thread);
4299 return thread;
4302 static void binder_free_proc(struct binder_proc *proc)
4304 BUG_ON(!list_empty(&proc->todo));
4305 BUG_ON(!list_empty(&proc->delivered_death));
4306 binder_alloc_deferred_release(&proc->alloc);
4307 put_task_struct(proc->tsk);
4308 binder_stats_deleted(BINDER_STAT_PROC);
4309 kfree(proc);
4312 static void binder_free_thread(struct binder_thread *thread)
4314 BUG_ON(!list_empty(&thread->todo));
4315 binder_stats_deleted(BINDER_STAT_THREAD);
4316 binder_proc_dec_tmpref(thread->proc);
4317 kfree(thread);
4320 static int binder_thread_release(struct binder_proc *proc,
4321 struct binder_thread *thread)
4323 struct binder_transaction *t;
4324 struct binder_transaction *send_reply = NULL;
4325 int active_transactions = 0;
4326 struct binder_transaction *last_t = NULL;
4328 binder_inner_proc_lock(thread->proc);
4330 * take a ref on the proc so it survives
4331 * after we remove this thread from proc->threads.
4332 * The corresponding dec is when we actually
4333 * free the thread in binder_free_thread()
4335 proc->tmp_ref++;
4337 * take a ref on this thread to ensure it
4338 * survives while we are releasing it
4340 atomic_inc(&thread->tmp_ref);
4341 rb_erase(&thread->rb_node, &proc->threads);
4342 t = thread->transaction_stack;
4343 if (t) {
4344 spin_lock(&t->lock);
4345 if (t->to_thread == thread)
4346 send_reply = t;
4348 thread->is_dead = true;
4350 while (t) {
4351 last_t = t;
4352 active_transactions++;
4353 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4354 "release %d:%d transaction %d %s, still active\n",
4355 proc->pid, thread->pid,
4356 t->debug_id,
4357 (t->to_thread == thread) ? "in" : "out");
4359 if (t->to_thread == thread) {
4360 t->to_proc = NULL;
4361 t->to_thread = NULL;
4362 if (t->buffer) {
4363 t->buffer->transaction = NULL;
4364 t->buffer = NULL;
4366 t = t->to_parent;
4367 } else if (t->from == thread) {
4368 t->from = NULL;
4369 t = t->from_parent;
4370 } else
4371 BUG();
4372 spin_unlock(&last_t->lock);
4373 if (t)
4374 spin_lock(&t->lock);
4378 * If this thread used poll, make sure we remove the waitqueue
4379 * from any epoll data structures holding it with POLLFREE.
4380 * waitqueue_active() is safe to use here because we're holding
4381 * the inner lock.
4383 if ((thread->looper & BINDER_LOOPER_STATE_POLL) &&
4384 waitqueue_active(&thread->wait)) {
4385 wake_up_poll(&thread->wait, EPOLLHUP | POLLFREE);
4388 binder_inner_proc_unlock(thread->proc);
4391 * This is needed to avoid races between wake_up_poll() above and
4392 * and ep_remove_waitqueue() called for other reasons (eg the epoll file
4393 * descriptor being closed); ep_remove_waitqueue() holds an RCU read
4394 * lock, so we can be sure it's done after calling synchronize_rcu().
4396 if (thread->looper & BINDER_LOOPER_STATE_POLL)
4397 synchronize_rcu();
4399 if (send_reply)
4400 binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
4401 binder_release_work(proc, &thread->todo);
4402 binder_thread_dec_tmpref(thread);
4403 return active_transactions;
4406 static __poll_t binder_poll(struct file *filp,
4407 struct poll_table_struct *wait)
4409 struct binder_proc *proc = filp->private_data;
4410 struct binder_thread *thread = NULL;
4411 bool wait_for_proc_work;
4413 thread = binder_get_thread(proc);
4414 if (!thread)
4415 return POLLERR;
4417 binder_inner_proc_lock(thread->proc);
4418 thread->looper |= BINDER_LOOPER_STATE_POLL;
4419 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
4421 binder_inner_proc_unlock(thread->proc);
4423 poll_wait(filp, &thread->wait, wait);
4425 if (binder_has_work(thread, wait_for_proc_work))
4426 return EPOLLIN;
4428 return 0;
4431 static int binder_ioctl_write_read(struct file *filp,
4432 unsigned int cmd, unsigned long arg,
4433 struct binder_thread *thread)
4435 int ret = 0;
4436 struct binder_proc *proc = filp->private_data;
4437 unsigned int size = _IOC_SIZE(cmd);
4438 void __user *ubuf = (void __user *)arg;
4439 struct binder_write_read bwr;
4441 if (size != sizeof(struct binder_write_read)) {
4442 ret = -EINVAL;
4443 goto out;
4445 if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
4446 ret = -EFAULT;
4447 goto out;
4449 binder_debug(BINDER_DEBUG_READ_WRITE,
4450 "%d:%d write %lld at %016llx, read %lld at %016llx\n",
4451 proc->pid, thread->pid,
4452 (u64)bwr.write_size, (u64)bwr.write_buffer,
4453 (u64)bwr.read_size, (u64)bwr.read_buffer);
4455 if (bwr.write_size > 0) {
4456 ret = binder_thread_write(proc, thread,
4457 bwr.write_buffer,
4458 bwr.write_size,
4459 &bwr.write_consumed);
4460 trace_binder_write_done(ret);
4461 if (ret < 0) {
4462 bwr.read_consumed = 0;
4463 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4464 ret = -EFAULT;
4465 goto out;
4468 if (bwr.read_size > 0) {
4469 ret = binder_thread_read(proc, thread, bwr.read_buffer,
4470 bwr.read_size,
4471 &bwr.read_consumed,
4472 filp->f_flags & O_NONBLOCK);
4473 trace_binder_read_done(ret);
4474 binder_inner_proc_lock(proc);
4475 if (!binder_worklist_empty_ilocked(&proc->todo))
4476 binder_wakeup_proc_ilocked(proc);
4477 binder_inner_proc_unlock(proc);
4478 if (ret < 0) {
4479 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4480 ret = -EFAULT;
4481 goto out;
4484 binder_debug(BINDER_DEBUG_READ_WRITE,
4485 "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
4486 proc->pid, thread->pid,
4487 (u64)bwr.write_consumed, (u64)bwr.write_size,
4488 (u64)bwr.read_consumed, (u64)bwr.read_size);
4489 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
4490 ret = -EFAULT;
4491 goto out;
4493 out:
4494 return ret;
4497 static int binder_ioctl_set_ctx_mgr(struct file *filp)
4499 int ret = 0;
4500 struct binder_proc *proc = filp->private_data;
4501 struct binder_context *context = proc->context;
4502 struct binder_node *new_node;
4503 kuid_t curr_euid = current_euid();
4505 mutex_lock(&context->context_mgr_node_lock);
4506 if (context->binder_context_mgr_node) {
4507 pr_err("BINDER_SET_CONTEXT_MGR already set\n");
4508 ret = -EBUSY;
4509 goto out;
4511 ret = security_binder_set_context_mgr(proc->tsk);
4512 if (ret < 0)
4513 goto out;
4514 if (uid_valid(context->binder_context_mgr_uid)) {
4515 if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
4516 pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
4517 from_kuid(&init_user_ns, curr_euid),
4518 from_kuid(&init_user_ns,
4519 context->binder_context_mgr_uid));
4520 ret = -EPERM;
4521 goto out;
4523 } else {
4524 context->binder_context_mgr_uid = curr_euid;
4526 new_node = binder_new_node(proc, NULL);
4527 if (!new_node) {
4528 ret = -ENOMEM;
4529 goto out;
4531 binder_node_lock(new_node);
4532 new_node->local_weak_refs++;
4533 new_node->local_strong_refs++;
4534 new_node->has_strong_ref = 1;
4535 new_node->has_weak_ref = 1;
4536 context->binder_context_mgr_node = new_node;
4537 binder_node_unlock(new_node);
4538 binder_put_node(new_node);
4539 out:
4540 mutex_unlock(&context->context_mgr_node_lock);
4541 return ret;
4544 static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
4545 struct binder_node_debug_info *info)
4547 struct rb_node *n;
4548 binder_uintptr_t ptr = info->ptr;
4550 memset(info, 0, sizeof(*info));
4552 binder_inner_proc_lock(proc);
4553 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
4554 struct binder_node *node = rb_entry(n, struct binder_node,
4555 rb_node);
4556 if (node->ptr > ptr) {
4557 info->ptr = node->ptr;
4558 info->cookie = node->cookie;
4559 info->has_strong_ref = node->has_strong_ref;
4560 info->has_weak_ref = node->has_weak_ref;
4561 break;
4564 binder_inner_proc_unlock(proc);
4566 return 0;
4569 static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
4571 int ret;
4572 struct binder_proc *proc = filp->private_data;
4573 struct binder_thread *thread;
4574 unsigned int size = _IOC_SIZE(cmd);
4575 void __user *ubuf = (void __user *)arg;
4577 /*pr_info("binder_ioctl: %d:%d %x %lx\n",
4578 proc->pid, current->pid, cmd, arg);*/
4580 binder_selftest_alloc(&proc->alloc);
4582 trace_binder_ioctl(cmd, arg);
4584 ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
4585 if (ret)
4586 goto err_unlocked;
4588 thread = binder_get_thread(proc);
4589 if (thread == NULL) {
4590 ret = -ENOMEM;
4591 goto err;
4594 switch (cmd) {
4595 case BINDER_WRITE_READ:
4596 ret = binder_ioctl_write_read(filp, cmd, arg, thread);
4597 if (ret)
4598 goto err;
4599 break;
4600 case BINDER_SET_MAX_THREADS: {
4601 int max_threads;
4603 if (copy_from_user(&max_threads, ubuf,
4604 sizeof(max_threads))) {
4605 ret = -EINVAL;
4606 goto err;
4608 binder_inner_proc_lock(proc);
4609 proc->max_threads = max_threads;
4610 binder_inner_proc_unlock(proc);
4611 break;
4613 case BINDER_SET_CONTEXT_MGR:
4614 ret = binder_ioctl_set_ctx_mgr(filp);
4615 if (ret)
4616 goto err;
4617 break;
4618 case BINDER_THREAD_EXIT:
4619 binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
4620 proc->pid, thread->pid);
4621 binder_thread_release(proc, thread);
4622 thread = NULL;
4623 break;
4624 case BINDER_VERSION: {
4625 struct binder_version __user *ver = ubuf;
4627 if (size != sizeof(struct binder_version)) {
4628 ret = -EINVAL;
4629 goto err;
4631 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
4632 &ver->protocol_version)) {
4633 ret = -EINVAL;
4634 goto err;
4636 break;
4638 case BINDER_GET_NODE_DEBUG_INFO: {
4639 struct binder_node_debug_info info;
4641 if (copy_from_user(&info, ubuf, sizeof(info))) {
4642 ret = -EFAULT;
4643 goto err;
4646 ret = binder_ioctl_get_node_debug_info(proc, &info);
4647 if (ret < 0)
4648 goto err;
4650 if (copy_to_user(ubuf, &info, sizeof(info))) {
4651 ret = -EFAULT;
4652 goto err;
4654 break;
4656 default:
4657 ret = -EINVAL;
4658 goto err;
4660 ret = 0;
4661 err:
4662 if (thread)
4663 thread->looper_need_return = false;
4664 wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
4665 if (ret && ret != -ERESTARTSYS)
4666 pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
4667 err_unlocked:
4668 trace_binder_ioctl_done(ret);
4669 return ret;
4672 static void binder_vma_open(struct vm_area_struct *vma)
4674 struct binder_proc *proc = vma->vm_private_data;
4676 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4677 "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
4678 proc->pid, vma->vm_start, vma->vm_end,
4679 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
4680 (unsigned long)pgprot_val(vma->vm_page_prot));
4683 static void binder_vma_close(struct vm_area_struct *vma)
4685 struct binder_proc *proc = vma->vm_private_data;
4687 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4688 "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
4689 proc->pid, vma->vm_start, vma->vm_end,
4690 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
4691 (unsigned long)pgprot_val(vma->vm_page_prot));
4692 binder_alloc_vma_close(&proc->alloc);
4693 binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
4696 static vm_fault_t binder_vm_fault(struct vm_fault *vmf)
4698 return VM_FAULT_SIGBUS;
4701 static const struct vm_operations_struct binder_vm_ops = {
4702 .open = binder_vma_open,
4703 .close = binder_vma_close,
4704 .fault = binder_vm_fault,
4707 static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
4709 int ret;
4710 struct binder_proc *proc = filp->private_data;
4711 const char *failure_string;
4713 if (proc->tsk != current->group_leader)
4714 return -EINVAL;
4716 if ((vma->vm_end - vma->vm_start) > SZ_4M)
4717 vma->vm_end = vma->vm_start + SZ_4M;
4719 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4720 "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
4721 __func__, proc->pid, vma->vm_start, vma->vm_end,
4722 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
4723 (unsigned long)pgprot_val(vma->vm_page_prot));
4725 if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
4726 ret = -EPERM;
4727 failure_string = "bad vm_flags";
4728 goto err_bad_arg;
4730 vma->vm_flags |= VM_DONTCOPY | VM_MIXEDMAP;
4731 vma->vm_flags &= ~VM_MAYWRITE;
4733 vma->vm_ops = &binder_vm_ops;
4734 vma->vm_private_data = proc;
4736 ret = binder_alloc_mmap_handler(&proc->alloc, vma);
4737 if (ret)
4738 return ret;
4739 mutex_lock(&proc->files_lock);
4740 proc->files = get_files_struct(current);
4741 mutex_unlock(&proc->files_lock);
4742 return 0;
4744 err_bad_arg:
4745 pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
4746 proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
4747 return ret;
4750 static int binder_open(struct inode *nodp, struct file *filp)
4752 struct binder_proc *proc;
4753 struct binder_device *binder_dev;
4755 binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__,
4756 current->group_leader->pid, current->pid);
4758 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
4759 if (proc == NULL)
4760 return -ENOMEM;
4761 spin_lock_init(&proc->inner_lock);
4762 spin_lock_init(&proc->outer_lock);
4763 get_task_struct(current->group_leader);
4764 proc->tsk = current->group_leader;
4765 mutex_init(&proc->files_lock);
4766 INIT_LIST_HEAD(&proc->todo);
4767 proc->default_priority = task_nice(current);
4768 binder_dev = container_of(filp->private_data, struct binder_device,
4769 miscdev);
4770 proc->context = &binder_dev->context;
4771 binder_alloc_init(&proc->alloc);
4773 binder_stats_created(BINDER_STAT_PROC);
4774 proc->pid = current->group_leader->pid;
4775 INIT_LIST_HEAD(&proc->delivered_death);
4776 INIT_LIST_HEAD(&proc->waiting_threads);
4777 filp->private_data = proc;
4779 mutex_lock(&binder_procs_lock);
4780 hlist_add_head(&proc->proc_node, &binder_procs);
4781 mutex_unlock(&binder_procs_lock);
4783 if (binder_debugfs_dir_entry_proc) {
4784 char strbuf[11];
4786 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
4788 * proc debug entries are shared between contexts, so
4789 * this will fail if the process tries to open the driver
4790 * again with a different context. The priting code will
4791 * anyway print all contexts that a given PID has, so this
4792 * is not a problem.
4794 proc->debugfs_entry = debugfs_create_file(strbuf, 0444,
4795 binder_debugfs_dir_entry_proc,
4796 (void *)(unsigned long)proc->pid,
4797 &binder_proc_fops);
4800 return 0;
4803 static int binder_flush(struct file *filp, fl_owner_t id)
4805 struct binder_proc *proc = filp->private_data;
4807 binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
4809 return 0;
4812 static void binder_deferred_flush(struct binder_proc *proc)
4814 struct rb_node *n;
4815 int wake_count = 0;
4817 binder_inner_proc_lock(proc);
4818 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
4819 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
4821 thread->looper_need_return = true;
4822 if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
4823 wake_up_interruptible(&thread->wait);
4824 wake_count++;
4827 binder_inner_proc_unlock(proc);
4829 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4830 "binder_flush: %d woke %d threads\n", proc->pid,
4831 wake_count);
4834 static int binder_release(struct inode *nodp, struct file *filp)
4836 struct binder_proc *proc = filp->private_data;
4838 debugfs_remove(proc->debugfs_entry);
4839 binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
4841 return 0;
4844 static int binder_node_release(struct binder_node *node, int refs)
4846 struct binder_ref *ref;
4847 int death = 0;
4848 struct binder_proc *proc = node->proc;
4850 binder_release_work(proc, &node->async_todo);
4852 binder_node_lock(node);
4853 binder_inner_proc_lock(proc);
4854 binder_dequeue_work_ilocked(&node->work);
4856 * The caller must have taken a temporary ref on the node,
4858 BUG_ON(!node->tmp_refs);
4859 if (hlist_empty(&node->refs) && node->tmp_refs == 1) {
4860 binder_inner_proc_unlock(proc);
4861 binder_node_unlock(node);
4862 binder_free_node(node);
4864 return refs;
4867 node->proc = NULL;
4868 node->local_strong_refs = 0;
4869 node->local_weak_refs = 0;
4870 binder_inner_proc_unlock(proc);
4872 spin_lock(&binder_dead_nodes_lock);
4873 hlist_add_head(&node->dead_node, &binder_dead_nodes);
4874 spin_unlock(&binder_dead_nodes_lock);
4876 hlist_for_each_entry(ref, &node->refs, node_entry) {
4877 refs++;
4879 * Need the node lock to synchronize
4880 * with new notification requests and the
4881 * inner lock to synchronize with queued
4882 * death notifications.
4884 binder_inner_proc_lock(ref->proc);
4885 if (!ref->death) {
4886 binder_inner_proc_unlock(ref->proc);
4887 continue;
4890 death++;
4892 BUG_ON(!list_empty(&ref->death->work.entry));
4893 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
4894 binder_enqueue_work_ilocked(&ref->death->work,
4895 &ref->proc->todo);
4896 binder_wakeup_proc_ilocked(ref->proc);
4897 binder_inner_proc_unlock(ref->proc);
4900 binder_debug(BINDER_DEBUG_DEAD_BINDER,
4901 "node %d now dead, refs %d, death %d\n",
4902 node->debug_id, refs, death);
4903 binder_node_unlock(node);
4904 binder_put_node(node);
4906 return refs;
4909 static void binder_deferred_release(struct binder_proc *proc)
4911 struct binder_context *context = proc->context;
4912 struct rb_node *n;
4913 int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
4915 BUG_ON(proc->files);
4917 mutex_lock(&binder_procs_lock);
4918 hlist_del(&proc->proc_node);
4919 mutex_unlock(&binder_procs_lock);
4921 mutex_lock(&context->context_mgr_node_lock);
4922 if (context->binder_context_mgr_node &&
4923 context->binder_context_mgr_node->proc == proc) {
4924 binder_debug(BINDER_DEBUG_DEAD_BINDER,
4925 "%s: %d context_mgr_node gone\n",
4926 __func__, proc->pid);
4927 context->binder_context_mgr_node = NULL;
4929 mutex_unlock(&context->context_mgr_node_lock);
4930 binder_inner_proc_lock(proc);
4932 * Make sure proc stays alive after we
4933 * remove all the threads
4935 proc->tmp_ref++;
4937 proc->is_dead = true;
4938 threads = 0;
4939 active_transactions = 0;
4940 while ((n = rb_first(&proc->threads))) {
4941 struct binder_thread *thread;
4943 thread = rb_entry(n, struct binder_thread, rb_node);
4944 binder_inner_proc_unlock(proc);
4945 threads++;
4946 active_transactions += binder_thread_release(proc, thread);
4947 binder_inner_proc_lock(proc);
4950 nodes = 0;
4951 incoming_refs = 0;
4952 while ((n = rb_first(&proc->nodes))) {
4953 struct binder_node *node;
4955 node = rb_entry(n, struct binder_node, rb_node);
4956 nodes++;
4958 * take a temporary ref on the node before
4959 * calling binder_node_release() which will either
4960 * kfree() the node or call binder_put_node()
4962 binder_inc_node_tmpref_ilocked(node);
4963 rb_erase(&node->rb_node, &proc->nodes);
4964 binder_inner_proc_unlock(proc);
4965 incoming_refs = binder_node_release(node, incoming_refs);
4966 binder_inner_proc_lock(proc);
4968 binder_inner_proc_unlock(proc);
4970 outgoing_refs = 0;
4971 binder_proc_lock(proc);
4972 while ((n = rb_first(&proc->refs_by_desc))) {
4973 struct binder_ref *ref;
4975 ref = rb_entry(n, struct binder_ref, rb_node_desc);
4976 outgoing_refs++;
4977 binder_cleanup_ref_olocked(ref);
4978 binder_proc_unlock(proc);
4979 binder_free_ref(ref);
4980 binder_proc_lock(proc);
4982 binder_proc_unlock(proc);
4984 binder_release_work(proc, &proc->todo);
4985 binder_release_work(proc, &proc->delivered_death);
4987 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4988 "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
4989 __func__, proc->pid, threads, nodes, incoming_refs,
4990 outgoing_refs, active_transactions);
4992 binder_proc_dec_tmpref(proc);
4995 static void binder_deferred_func(struct work_struct *work)
4997 struct binder_proc *proc;
4998 struct files_struct *files;
5000 int defer;
5002 do {
5003 mutex_lock(&binder_deferred_lock);
5004 if (!hlist_empty(&binder_deferred_list)) {
5005 proc = hlist_entry(binder_deferred_list.first,
5006 struct binder_proc, deferred_work_node);
5007 hlist_del_init(&proc->deferred_work_node);
5008 defer = proc->deferred_work;
5009 proc->deferred_work = 0;
5010 } else {
5011 proc = NULL;
5012 defer = 0;
5014 mutex_unlock(&binder_deferred_lock);
5016 files = NULL;
5017 if (defer & BINDER_DEFERRED_PUT_FILES) {
5018 mutex_lock(&proc->files_lock);
5019 files = proc->files;
5020 if (files)
5021 proc->files = NULL;
5022 mutex_unlock(&proc->files_lock);
5025 if (defer & BINDER_DEFERRED_FLUSH)
5026 binder_deferred_flush(proc);
5028 if (defer & BINDER_DEFERRED_RELEASE)
5029 binder_deferred_release(proc); /* frees proc */
5031 if (files)
5032 put_files_struct(files);
5033 } while (proc);
5035 static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
5037 static void
5038 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
5040 mutex_lock(&binder_deferred_lock);
5041 proc->deferred_work |= defer;
5042 if (hlist_unhashed(&proc->deferred_work_node)) {
5043 hlist_add_head(&proc->deferred_work_node,
5044 &binder_deferred_list);
5045 schedule_work(&binder_deferred_work);
5047 mutex_unlock(&binder_deferred_lock);
5050 static void print_binder_transaction_ilocked(struct seq_file *m,
5051 struct binder_proc *proc,
5052 const char *prefix,
5053 struct binder_transaction *t)
5055 struct binder_proc *to_proc;
5056 struct binder_buffer *buffer = t->buffer;
5058 spin_lock(&t->lock);
5059 to_proc = t->to_proc;
5060 seq_printf(m,
5061 "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld r%d",
5062 prefix, t->debug_id, t,
5063 t->from ? t->from->proc->pid : 0,
5064 t->from ? t->from->pid : 0,
5065 to_proc ? to_proc->pid : 0,
5066 t->to_thread ? t->to_thread->pid : 0,
5067 t->code, t->flags, t->priority, t->need_reply);
5068 spin_unlock(&t->lock);
5070 if (proc != to_proc) {
5072 * Can only safely deref buffer if we are holding the
5073 * correct proc inner lock for this node
5075 seq_puts(m, "\n");
5076 return;
5079 if (buffer == NULL) {
5080 seq_puts(m, " buffer free\n");
5081 return;
5083 if (buffer->target_node)
5084 seq_printf(m, " node %d", buffer->target_node->debug_id);
5085 seq_printf(m, " size %zd:%zd data %pK\n",
5086 buffer->data_size, buffer->offsets_size,
5087 buffer->data);
5090 static void print_binder_work_ilocked(struct seq_file *m,
5091 struct binder_proc *proc,
5092 const char *prefix,
5093 const char *transaction_prefix,
5094 struct binder_work *w)
5096 struct binder_node *node;
5097 struct binder_transaction *t;
5099 switch (w->type) {
5100 case BINDER_WORK_TRANSACTION:
5101 t = container_of(w, struct binder_transaction, work);
5102 print_binder_transaction_ilocked(
5103 m, proc, transaction_prefix, t);
5104 break;
5105 case BINDER_WORK_RETURN_ERROR: {
5106 struct binder_error *e = container_of(
5107 w, struct binder_error, work);
5109 seq_printf(m, "%stransaction error: %u\n",
5110 prefix, e->cmd);
5111 } break;
5112 case BINDER_WORK_TRANSACTION_COMPLETE:
5113 seq_printf(m, "%stransaction complete\n", prefix);
5114 break;
5115 case BINDER_WORK_NODE:
5116 node = container_of(w, struct binder_node, work);
5117 seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
5118 prefix, node->debug_id,
5119 (u64)node->ptr, (u64)node->cookie);
5120 break;
5121 case BINDER_WORK_DEAD_BINDER:
5122 seq_printf(m, "%shas dead binder\n", prefix);
5123 break;
5124 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
5125 seq_printf(m, "%shas cleared dead binder\n", prefix);
5126 break;
5127 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
5128 seq_printf(m, "%shas cleared death notification\n", prefix);
5129 break;
5130 default:
5131 seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
5132 break;
5136 static void print_binder_thread_ilocked(struct seq_file *m,
5137 struct binder_thread *thread,
5138 int print_always)
5140 struct binder_transaction *t;
5141 struct binder_work *w;
5142 size_t start_pos = m->count;
5143 size_t header_pos;
5145 seq_printf(m, " thread %d: l %02x need_return %d tr %d\n",
5146 thread->pid, thread->looper,
5147 thread->looper_need_return,
5148 atomic_read(&thread->tmp_ref));
5149 header_pos = m->count;
5150 t = thread->transaction_stack;
5151 while (t) {
5152 if (t->from == thread) {
5153 print_binder_transaction_ilocked(m, thread->proc,
5154 " outgoing transaction", t);
5155 t = t->from_parent;
5156 } else if (t->to_thread == thread) {
5157 print_binder_transaction_ilocked(m, thread->proc,
5158 " incoming transaction", t);
5159 t = t->to_parent;
5160 } else {
5161 print_binder_transaction_ilocked(m, thread->proc,
5162 " bad transaction", t);
5163 t = NULL;
5166 list_for_each_entry(w, &thread->todo, entry) {
5167 print_binder_work_ilocked(m, thread->proc, " ",
5168 " pending transaction", w);
5170 if (!print_always && m->count == header_pos)
5171 m->count = start_pos;
5174 static void print_binder_node_nilocked(struct seq_file *m,
5175 struct binder_node *node)
5177 struct binder_ref *ref;
5178 struct binder_work *w;
5179 int count;
5181 count = 0;
5182 hlist_for_each_entry(ref, &node->refs, node_entry)
5183 count++;
5185 seq_printf(m, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d tr %d",
5186 node->debug_id, (u64)node->ptr, (u64)node->cookie,
5187 node->has_strong_ref, node->has_weak_ref,
5188 node->local_strong_refs, node->local_weak_refs,
5189 node->internal_strong_refs, count, node->tmp_refs);
5190 if (count) {
5191 seq_puts(m, " proc");
5192 hlist_for_each_entry(ref, &node->refs, node_entry)
5193 seq_printf(m, " %d", ref->proc->pid);
5195 seq_puts(m, "\n");
5196 if (node->proc) {
5197 list_for_each_entry(w, &node->async_todo, entry)
5198 print_binder_work_ilocked(m, node->proc, " ",
5199 " pending async transaction", w);
5203 static void print_binder_ref_olocked(struct seq_file *m,
5204 struct binder_ref *ref)
5206 binder_node_lock(ref->node);
5207 seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %pK\n",
5208 ref->data.debug_id, ref->data.desc,
5209 ref->node->proc ? "" : "dead ",
5210 ref->node->debug_id, ref->data.strong,
5211 ref->data.weak, ref->death);
5212 binder_node_unlock(ref->node);
5215 static void print_binder_proc(struct seq_file *m,
5216 struct binder_proc *proc, int print_all)
5218 struct binder_work *w;
5219 struct rb_node *n;
5220 size_t start_pos = m->count;
5221 size_t header_pos;
5222 struct binder_node *last_node = NULL;
5224 seq_printf(m, "proc %d\n", proc->pid);
5225 seq_printf(m, "context %s\n", proc->context->name);
5226 header_pos = m->count;
5228 binder_inner_proc_lock(proc);
5229 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
5230 print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread,
5231 rb_node), print_all);
5233 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
5234 struct binder_node *node = rb_entry(n, struct binder_node,
5235 rb_node);
5237 * take a temporary reference on the node so it
5238 * survives and isn't removed from the tree
5239 * while we print it.
5241 binder_inc_node_tmpref_ilocked(node);
5242 /* Need to drop inner lock to take node lock */
5243 binder_inner_proc_unlock(proc);
5244 if (last_node)
5245 binder_put_node(last_node);
5246 binder_node_inner_lock(node);
5247 print_binder_node_nilocked(m, node);
5248 binder_node_inner_unlock(node);
5249 last_node = node;
5250 binder_inner_proc_lock(proc);
5252 binder_inner_proc_unlock(proc);
5253 if (last_node)
5254 binder_put_node(last_node);
5256 if (print_all) {
5257 binder_proc_lock(proc);
5258 for (n = rb_first(&proc->refs_by_desc);
5259 n != NULL;
5260 n = rb_next(n))
5261 print_binder_ref_olocked(m, rb_entry(n,
5262 struct binder_ref,
5263 rb_node_desc));
5264 binder_proc_unlock(proc);
5266 binder_alloc_print_allocated(m, &proc->alloc);
5267 binder_inner_proc_lock(proc);
5268 list_for_each_entry(w, &proc->todo, entry)
5269 print_binder_work_ilocked(m, proc, " ",
5270 " pending transaction", w);
5271 list_for_each_entry(w, &proc->delivered_death, entry) {
5272 seq_puts(m, " has delivered dead binder\n");
5273 break;
5275 binder_inner_proc_unlock(proc);
5276 if (!print_all && m->count == header_pos)
5277 m->count = start_pos;
5280 static const char * const binder_return_strings[] = {
5281 "BR_ERROR",
5282 "BR_OK",
5283 "BR_TRANSACTION",
5284 "BR_REPLY",
5285 "BR_ACQUIRE_RESULT",
5286 "BR_DEAD_REPLY",
5287 "BR_TRANSACTION_COMPLETE",
5288 "BR_INCREFS",
5289 "BR_ACQUIRE",
5290 "BR_RELEASE",
5291 "BR_DECREFS",
5292 "BR_ATTEMPT_ACQUIRE",
5293 "BR_NOOP",
5294 "BR_SPAWN_LOOPER",
5295 "BR_FINISHED",
5296 "BR_DEAD_BINDER",
5297 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
5298 "BR_FAILED_REPLY"
5301 static const char * const binder_command_strings[] = {
5302 "BC_TRANSACTION",
5303 "BC_REPLY",
5304 "BC_ACQUIRE_RESULT",
5305 "BC_FREE_BUFFER",
5306 "BC_INCREFS",
5307 "BC_ACQUIRE",
5308 "BC_RELEASE",
5309 "BC_DECREFS",
5310 "BC_INCREFS_DONE",
5311 "BC_ACQUIRE_DONE",
5312 "BC_ATTEMPT_ACQUIRE",
5313 "BC_REGISTER_LOOPER",
5314 "BC_ENTER_LOOPER",
5315 "BC_EXIT_LOOPER",
5316 "BC_REQUEST_DEATH_NOTIFICATION",
5317 "BC_CLEAR_DEATH_NOTIFICATION",
5318 "BC_DEAD_BINDER_DONE",
5319 "BC_TRANSACTION_SG",
5320 "BC_REPLY_SG",
5323 static const char * const binder_objstat_strings[] = {
5324 "proc",
5325 "thread",
5326 "node",
5327 "ref",
5328 "death",
5329 "transaction",
5330 "transaction_complete"
5333 static void print_binder_stats(struct seq_file *m, const char *prefix,
5334 struct binder_stats *stats)
5336 int i;
5338 BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
5339 ARRAY_SIZE(binder_command_strings));
5340 for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
5341 int temp = atomic_read(&stats->bc[i]);
5343 if (temp)
5344 seq_printf(m, "%s%s: %d\n", prefix,
5345 binder_command_strings[i], temp);
5348 BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
5349 ARRAY_SIZE(binder_return_strings));
5350 for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
5351 int temp = atomic_read(&stats->br[i]);
5353 if (temp)
5354 seq_printf(m, "%s%s: %d\n", prefix,
5355 binder_return_strings[i], temp);
5358 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
5359 ARRAY_SIZE(binder_objstat_strings));
5360 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
5361 ARRAY_SIZE(stats->obj_deleted));
5362 for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
5363 int created = atomic_read(&stats->obj_created[i]);
5364 int deleted = atomic_read(&stats->obj_deleted[i]);
5366 if (created || deleted)
5367 seq_printf(m, "%s%s: active %d total %d\n",
5368 prefix,
5369 binder_objstat_strings[i],
5370 created - deleted,
5371 created);
5375 static void print_binder_proc_stats(struct seq_file *m,
5376 struct binder_proc *proc)
5378 struct binder_work *w;
5379 struct binder_thread *thread;
5380 struct rb_node *n;
5381 int count, strong, weak, ready_threads;
5382 size_t free_async_space =
5383 binder_alloc_get_free_async_space(&proc->alloc);
5385 seq_printf(m, "proc %d\n", proc->pid);
5386 seq_printf(m, "context %s\n", proc->context->name);
5387 count = 0;
5388 ready_threads = 0;
5389 binder_inner_proc_lock(proc);
5390 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
5391 count++;
5393 list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
5394 ready_threads++;
5396 seq_printf(m, " threads: %d\n", count);
5397 seq_printf(m, " requested threads: %d+%d/%d\n"
5398 " ready threads %d\n"
5399 " free async space %zd\n", proc->requested_threads,
5400 proc->requested_threads_started, proc->max_threads,
5401 ready_threads,
5402 free_async_space);
5403 count = 0;
5404 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
5405 count++;
5406 binder_inner_proc_unlock(proc);
5407 seq_printf(m, " nodes: %d\n", count);
5408 count = 0;
5409 strong = 0;
5410 weak = 0;
5411 binder_proc_lock(proc);
5412 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
5413 struct binder_ref *ref = rb_entry(n, struct binder_ref,
5414 rb_node_desc);
5415 count++;
5416 strong += ref->data.strong;
5417 weak += ref->data.weak;
5419 binder_proc_unlock(proc);
5420 seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak);
5422 count = binder_alloc_get_allocated_count(&proc->alloc);
5423 seq_printf(m, " buffers: %d\n", count);
5425 binder_alloc_print_pages(m, &proc->alloc);
5427 count = 0;
5428 binder_inner_proc_lock(proc);
5429 list_for_each_entry(w, &proc->todo, entry) {
5430 if (w->type == BINDER_WORK_TRANSACTION)
5431 count++;
5433 binder_inner_proc_unlock(proc);
5434 seq_printf(m, " pending transactions: %d\n", count);
5436 print_binder_stats(m, " ", &proc->stats);
5440 static int binder_state_show(struct seq_file *m, void *unused)
5442 struct binder_proc *proc;
5443 struct binder_node *node;
5444 struct binder_node *last_node = NULL;
5446 seq_puts(m, "binder state:\n");
5448 spin_lock(&binder_dead_nodes_lock);
5449 if (!hlist_empty(&binder_dead_nodes))
5450 seq_puts(m, "dead nodes:\n");
5451 hlist_for_each_entry(node, &binder_dead_nodes, dead_node) {
5453 * take a temporary reference on the node so it
5454 * survives and isn't removed from the list
5455 * while we print it.
5457 node->tmp_refs++;
5458 spin_unlock(&binder_dead_nodes_lock);
5459 if (last_node)
5460 binder_put_node(last_node);
5461 binder_node_lock(node);
5462 print_binder_node_nilocked(m, node);
5463 binder_node_unlock(node);
5464 last_node = node;
5465 spin_lock(&binder_dead_nodes_lock);
5467 spin_unlock(&binder_dead_nodes_lock);
5468 if (last_node)
5469 binder_put_node(last_node);
5471 mutex_lock(&binder_procs_lock);
5472 hlist_for_each_entry(proc, &binder_procs, proc_node)
5473 print_binder_proc(m, proc, 1);
5474 mutex_unlock(&binder_procs_lock);
5476 return 0;
5479 static int binder_stats_show(struct seq_file *m, void *unused)
5481 struct binder_proc *proc;
5483 seq_puts(m, "binder stats:\n");
5485 print_binder_stats(m, "", &binder_stats);
5487 mutex_lock(&binder_procs_lock);
5488 hlist_for_each_entry(proc, &binder_procs, proc_node)
5489 print_binder_proc_stats(m, proc);
5490 mutex_unlock(&binder_procs_lock);
5492 return 0;
5495 static int binder_transactions_show(struct seq_file *m, void *unused)
5497 struct binder_proc *proc;
5499 seq_puts(m, "binder transactions:\n");
5500 mutex_lock(&binder_procs_lock);
5501 hlist_for_each_entry(proc, &binder_procs, proc_node)
5502 print_binder_proc(m, proc, 0);
5503 mutex_unlock(&binder_procs_lock);
5505 return 0;
5508 static int binder_proc_show(struct seq_file *m, void *unused)
5510 struct binder_proc *itr;
5511 int pid = (unsigned long)m->private;
5513 mutex_lock(&binder_procs_lock);
5514 hlist_for_each_entry(itr, &binder_procs, proc_node) {
5515 if (itr->pid == pid) {
5516 seq_puts(m, "binder proc state:\n");
5517 print_binder_proc(m, itr, 1);
5520 mutex_unlock(&binder_procs_lock);
5522 return 0;
5525 static void print_binder_transaction_log_entry(struct seq_file *m,
5526 struct binder_transaction_log_entry *e)
5528 int debug_id = READ_ONCE(e->debug_id_done);
5530 * read barrier to guarantee debug_id_done read before
5531 * we print the log values
5533 smp_rmb();
5534 seq_printf(m,
5535 "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
5536 e->debug_id, (e->call_type == 2) ? "reply" :
5537 ((e->call_type == 1) ? "async" : "call "), e->from_proc,
5538 e->from_thread, e->to_proc, e->to_thread, e->context_name,
5539 e->to_node, e->target_handle, e->data_size, e->offsets_size,
5540 e->return_error, e->return_error_param,
5541 e->return_error_line);
5543 * read-barrier to guarantee read of debug_id_done after
5544 * done printing the fields of the entry
5546 smp_rmb();
5547 seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ?
5548 "\n" : " (incomplete)\n");
5551 static int binder_transaction_log_show(struct seq_file *m, void *unused)
5553 struct binder_transaction_log *log = m->private;
5554 unsigned int log_cur = atomic_read(&log->cur);
5555 unsigned int count;
5556 unsigned int cur;
5557 int i;
5559 count = log_cur + 1;
5560 cur = count < ARRAY_SIZE(log->entry) && !log->full ?
5561 0 : count % ARRAY_SIZE(log->entry);
5562 if (count > ARRAY_SIZE(log->entry) || log->full)
5563 count = ARRAY_SIZE(log->entry);
5564 for (i = 0; i < count; i++) {
5565 unsigned int index = cur++ % ARRAY_SIZE(log->entry);
5567 print_binder_transaction_log_entry(m, &log->entry[index]);
5569 return 0;
5572 static const struct file_operations binder_fops = {
5573 .owner = THIS_MODULE,
5574 .poll = binder_poll,
5575 .unlocked_ioctl = binder_ioctl,
5576 .compat_ioctl = binder_ioctl,
5577 .mmap = binder_mmap,
5578 .open = binder_open,
5579 .flush = binder_flush,
5580 .release = binder_release,
5583 BINDER_DEBUG_ENTRY(state);
5584 BINDER_DEBUG_ENTRY(stats);
5585 BINDER_DEBUG_ENTRY(transactions);
5586 BINDER_DEBUG_ENTRY(transaction_log);
5588 static int __init init_binder_device(const char *name)
5590 int ret;
5591 struct binder_device *binder_device;
5593 binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
5594 if (!binder_device)
5595 return -ENOMEM;
5597 binder_device->miscdev.fops = &binder_fops;
5598 binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
5599 binder_device->miscdev.name = name;
5601 binder_device->context.binder_context_mgr_uid = INVALID_UID;
5602 binder_device->context.name = name;
5603 mutex_init(&binder_device->context.context_mgr_node_lock);
5605 ret = misc_register(&binder_device->miscdev);
5606 if (ret < 0) {
5607 kfree(binder_device);
5608 return ret;
5611 hlist_add_head(&binder_device->hlist, &binder_devices);
5613 return ret;
5616 static int __init binder_init(void)
5618 int ret;
5619 char *device_name, *device_names, *device_tmp;
5620 struct binder_device *device;
5621 struct hlist_node *tmp;
5623 ret = binder_alloc_shrinker_init();
5624 if (ret)
5625 return ret;
5627 atomic_set(&binder_transaction_log.cur, ~0U);
5628 atomic_set(&binder_transaction_log_failed.cur, ~0U);
5630 binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
5631 if (binder_debugfs_dir_entry_root)
5632 binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
5633 binder_debugfs_dir_entry_root);
5635 if (binder_debugfs_dir_entry_root) {
5636 debugfs_create_file("state",
5637 0444,
5638 binder_debugfs_dir_entry_root,
5639 NULL,
5640 &binder_state_fops);
5641 debugfs_create_file("stats",
5642 0444,
5643 binder_debugfs_dir_entry_root,
5644 NULL,
5645 &binder_stats_fops);
5646 debugfs_create_file("transactions",
5647 0444,
5648 binder_debugfs_dir_entry_root,
5649 NULL,
5650 &binder_transactions_fops);
5651 debugfs_create_file("transaction_log",
5652 0444,
5653 binder_debugfs_dir_entry_root,
5654 &binder_transaction_log,
5655 &binder_transaction_log_fops);
5656 debugfs_create_file("failed_transaction_log",
5657 0444,
5658 binder_debugfs_dir_entry_root,
5659 &binder_transaction_log_failed,
5660 &binder_transaction_log_fops);
5664 * Copy the module_parameter string, because we don't want to
5665 * tokenize it in-place.
5667 device_names = kzalloc(strlen(binder_devices_param) + 1, GFP_KERNEL);
5668 if (!device_names) {
5669 ret = -ENOMEM;
5670 goto err_alloc_device_names_failed;
5672 strcpy(device_names, binder_devices_param);
5674 device_tmp = device_names;
5675 while ((device_name = strsep(&device_tmp, ","))) {
5676 ret = init_binder_device(device_name);
5677 if (ret)
5678 goto err_init_binder_device_failed;
5681 return ret;
5683 err_init_binder_device_failed:
5684 hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
5685 misc_deregister(&device->miscdev);
5686 hlist_del(&device->hlist);
5687 kfree(device);
5690 kfree(device_names);
5692 err_alloc_device_names_failed:
5693 debugfs_remove_recursive(binder_debugfs_dir_entry_root);
5695 return ret;
5698 device_initcall(binder_init);
5700 #define CREATE_TRACE_POINTS
5701 #include "binder_trace.h"
5703 MODULE_LICENSE("GPL v2");