1 // SPDX-License-Identifier: GPL-2.0-only
4 * Android IPC Subsystem
6 * Copyright (C) 2007-2008 Google, Inc.
12 * There are 3 main spinlocks which must be acquired in the
15 * 1) proc->outer_lock : protects binder_ref
16 * binder_proc_lock() and binder_proc_unlock() are
18 * 2) node->lock : protects most fields of binder_node.
19 * binder_node_lock() and binder_node_unlock() are
21 * 3) proc->inner_lock : protects the thread and node lists
22 * (proc->threads, proc->waiting_threads, proc->nodes)
23 * and all todo lists associated with the binder_proc
24 * (proc->todo, thread->todo, proc->delivered_death and
25 * node->async_todo), as well as thread->transaction_stack
26 * binder_inner_proc_lock() and binder_inner_proc_unlock()
29 * Any lock under procA must never be nested under any lock at the same
30 * level or below on procB.
32 * Functions that require a lock held on entry indicate which lock
33 * in the suffix of the function name:
35 * foo_olocked() : requires node->outer_lock
36 * foo_nlocked() : requires node->lock
37 * foo_ilocked() : requires proc->inner_lock
38 * foo_oilocked(): requires proc->outer_lock and proc->inner_lock
39 * foo_nilocked(): requires node->lock and proc->inner_lock
43 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
45 #include <linux/fdtable.h>
46 #include <linux/file.h>
47 #include <linux/freezer.h>
49 #include <linux/list.h>
50 #include <linux/miscdevice.h>
51 #include <linux/module.h>
52 #include <linux/mutex.h>
53 #include <linux/nsproxy.h>
54 #include <linux/poll.h>
55 #include <linux/debugfs.h>
56 #include <linux/rbtree.h>
57 #include <linux/sched/signal.h>
58 #include <linux/sched/mm.h>
59 #include <linux/seq_file.h>
60 #include <linux/string.h>
61 #include <linux/uaccess.h>
62 #include <linux/pid_namespace.h>
63 #include <linux/security.h>
64 #include <linux/spinlock.h>
65 #include <linux/ratelimit.h>
66 #include <linux/syscalls.h>
67 #include <linux/task_work.h>
68 #include <linux/sizes.h>
69 #include <linux/ktime.h>
71 #include <uapi/linux/android/binder.h>
73 #include <linux/cacheflush.h>
75 #include "binder_internal.h"
76 #include "binder_trace.h"
78 static HLIST_HEAD(binder_deferred_list
);
79 static DEFINE_MUTEX(binder_deferred_lock
);
81 static HLIST_HEAD(binder_devices
);
82 static HLIST_HEAD(binder_procs
);
83 static DEFINE_MUTEX(binder_procs_lock
);
85 static HLIST_HEAD(binder_dead_nodes
);
86 static DEFINE_SPINLOCK(binder_dead_nodes_lock
);
88 static struct dentry
*binder_debugfs_dir_entry_root
;
89 static struct dentry
*binder_debugfs_dir_entry_proc
;
90 static atomic_t binder_last_id
;
92 static int proc_show(struct seq_file
*m
, void *unused
);
93 DEFINE_SHOW_ATTRIBUTE(proc
);
95 #define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
98 BINDER_DEBUG_USER_ERROR
= 1U << 0,
99 BINDER_DEBUG_FAILED_TRANSACTION
= 1U << 1,
100 BINDER_DEBUG_DEAD_TRANSACTION
= 1U << 2,
101 BINDER_DEBUG_OPEN_CLOSE
= 1U << 3,
102 BINDER_DEBUG_DEAD_BINDER
= 1U << 4,
103 BINDER_DEBUG_DEATH_NOTIFICATION
= 1U << 5,
104 BINDER_DEBUG_READ_WRITE
= 1U << 6,
105 BINDER_DEBUG_USER_REFS
= 1U << 7,
106 BINDER_DEBUG_THREADS
= 1U << 8,
107 BINDER_DEBUG_TRANSACTION
= 1U << 9,
108 BINDER_DEBUG_TRANSACTION_COMPLETE
= 1U << 10,
109 BINDER_DEBUG_FREE_BUFFER
= 1U << 11,
110 BINDER_DEBUG_INTERNAL_REFS
= 1U << 12,
111 BINDER_DEBUG_PRIORITY_CAP
= 1U << 13,
112 BINDER_DEBUG_SPINLOCKS
= 1U << 14,
114 static uint32_t binder_debug_mask
= BINDER_DEBUG_USER_ERROR
|
115 BINDER_DEBUG_FAILED_TRANSACTION
| BINDER_DEBUG_DEAD_TRANSACTION
;
116 module_param_named(debug_mask
, binder_debug_mask
, uint
, 0644);
118 char *binder_devices_param
= CONFIG_ANDROID_BINDER_DEVICES
;
119 module_param_named(devices
, binder_devices_param
, charp
, 0444);
121 static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait
);
122 static int binder_stop_on_user_error
;
124 static int binder_set_stop_on_user_error(const char *val
,
125 const struct kernel_param
*kp
)
129 ret
= param_set_int(val
, kp
);
130 if (binder_stop_on_user_error
< 2)
131 wake_up(&binder_user_error_wait
);
134 module_param_call(stop_on_user_error
, binder_set_stop_on_user_error
,
135 param_get_int
, &binder_stop_on_user_error
, 0644);
137 static __printf(2, 3) void binder_debug(int mask
, const char *format
, ...)
139 struct va_format vaf
;
142 if (binder_debug_mask
& mask
) {
143 va_start(args
, format
);
146 pr_info_ratelimited("%pV", &vaf
);
151 #define binder_txn_error(x...) \
152 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, x)
154 static __printf(1, 2) void binder_user_error(const char *format
, ...)
156 struct va_format vaf
;
159 if (binder_debug_mask
& BINDER_DEBUG_USER_ERROR
) {
160 va_start(args
, format
);
163 pr_info_ratelimited("%pV", &vaf
);
167 if (binder_stop_on_user_error
)
168 binder_stop_on_user_error
= 2;
171 #define binder_set_extended_error(ee, _id, _command, _param) \
174 (ee)->command = _command; \
175 (ee)->param = _param; \
178 #define to_flat_binder_object(hdr) \
179 container_of(hdr, struct flat_binder_object, hdr)
181 #define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
183 #define to_binder_buffer_object(hdr) \
184 container_of(hdr, struct binder_buffer_object, hdr)
186 #define to_binder_fd_array_object(hdr) \
187 container_of(hdr, struct binder_fd_array_object, hdr)
189 static struct binder_stats binder_stats
;
191 static inline void binder_stats_deleted(enum binder_stat_types type
)
193 atomic_inc(&binder_stats
.obj_deleted
[type
]);
196 static inline void binder_stats_created(enum binder_stat_types type
)
198 atomic_inc(&binder_stats
.obj_created
[type
]);
201 struct binder_transaction_log_entry
{
213 int return_error_line
;
214 uint32_t return_error
;
215 uint32_t return_error_param
;
216 char context_name
[BINDERFS_MAX_NAME
+ 1];
219 struct binder_transaction_log
{
222 struct binder_transaction_log_entry entry
[32];
225 static struct binder_transaction_log binder_transaction_log
;
226 static struct binder_transaction_log binder_transaction_log_failed
;
228 static struct binder_transaction_log_entry
*binder_transaction_log_add(
229 struct binder_transaction_log
*log
)
231 struct binder_transaction_log_entry
*e
;
232 unsigned int cur
= atomic_inc_return(&log
->cur
);
234 if (cur
>= ARRAY_SIZE(log
->entry
))
236 e
= &log
->entry
[cur
% ARRAY_SIZE(log
->entry
)];
237 WRITE_ONCE(e
->debug_id_done
, 0);
239 * write-barrier to synchronize access to e->debug_id_done.
240 * We make sure the initialized 0 value is seen before
241 * memset() other fields are zeroed by memset.
244 memset(e
, 0, sizeof(*e
));
248 enum binder_deferred_state
{
249 BINDER_DEFERRED_FLUSH
= 0x01,
250 BINDER_DEFERRED_RELEASE
= 0x02,
254 BINDER_LOOPER_STATE_REGISTERED
= 0x01,
255 BINDER_LOOPER_STATE_ENTERED
= 0x02,
256 BINDER_LOOPER_STATE_EXITED
= 0x04,
257 BINDER_LOOPER_STATE_INVALID
= 0x08,
258 BINDER_LOOPER_STATE_WAITING
= 0x10,
259 BINDER_LOOPER_STATE_POLL
= 0x20,
263 * binder_proc_lock() - Acquire outer lock for given binder_proc
264 * @proc: struct binder_proc to acquire
266 * Acquires proc->outer_lock. Used to protect binder_ref
267 * structures associated with the given proc.
269 #define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
271 _binder_proc_lock(struct binder_proc
*proc
, int line
)
272 __acquires(&proc
->outer_lock
)
274 binder_debug(BINDER_DEBUG_SPINLOCKS
,
275 "%s: line=%d\n", __func__
, line
);
276 spin_lock(&proc
->outer_lock
);
280 * binder_proc_unlock() - Release outer lock for given binder_proc
281 * @proc: struct binder_proc to acquire
283 * Release lock acquired via binder_proc_lock()
285 #define binder_proc_unlock(proc) _binder_proc_unlock(proc, __LINE__)
287 _binder_proc_unlock(struct binder_proc
*proc
, int line
)
288 __releases(&proc
->outer_lock
)
290 binder_debug(BINDER_DEBUG_SPINLOCKS
,
291 "%s: line=%d\n", __func__
, line
);
292 spin_unlock(&proc
->outer_lock
);
296 * binder_inner_proc_lock() - Acquire inner lock for given binder_proc
297 * @proc: struct binder_proc to acquire
299 * Acquires proc->inner_lock. Used to protect todo lists
301 #define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
303 _binder_inner_proc_lock(struct binder_proc
*proc
, int line
)
304 __acquires(&proc
->inner_lock
)
306 binder_debug(BINDER_DEBUG_SPINLOCKS
,
307 "%s: line=%d\n", __func__
, line
);
308 spin_lock(&proc
->inner_lock
);
312 * binder_inner_proc_unlock() - Release inner lock for given binder_proc
313 * @proc: struct binder_proc to acquire
315 * Release lock acquired via binder_inner_proc_lock()
317 #define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
319 _binder_inner_proc_unlock(struct binder_proc
*proc
, int line
)
320 __releases(&proc
->inner_lock
)
322 binder_debug(BINDER_DEBUG_SPINLOCKS
,
323 "%s: line=%d\n", __func__
, line
);
324 spin_unlock(&proc
->inner_lock
);
328 * binder_node_lock() - Acquire spinlock for given binder_node
329 * @node: struct binder_node to acquire
331 * Acquires node->lock. Used to protect binder_node fields
333 #define binder_node_lock(node) _binder_node_lock(node, __LINE__)
335 _binder_node_lock(struct binder_node
*node
, int line
)
336 __acquires(&node
->lock
)
338 binder_debug(BINDER_DEBUG_SPINLOCKS
,
339 "%s: line=%d\n", __func__
, line
);
340 spin_lock(&node
->lock
);
344 * binder_node_unlock() - Release spinlock for given binder_proc
345 * @node: struct binder_node to acquire
347 * Release lock acquired via binder_node_lock()
349 #define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
351 _binder_node_unlock(struct binder_node
*node
, int line
)
352 __releases(&node
->lock
)
354 binder_debug(BINDER_DEBUG_SPINLOCKS
,
355 "%s: line=%d\n", __func__
, line
);
356 spin_unlock(&node
->lock
);
360 * binder_node_inner_lock() - Acquire node and inner locks
361 * @node: struct binder_node to acquire
363 * Acquires node->lock. If node->proc also acquires
364 * proc->inner_lock. Used to protect binder_node fields
366 #define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
368 _binder_node_inner_lock(struct binder_node
*node
, int line
)
369 __acquires(&node
->lock
) __acquires(&node
->proc
->inner_lock
)
371 binder_debug(BINDER_DEBUG_SPINLOCKS
,
372 "%s: line=%d\n", __func__
, line
);
373 spin_lock(&node
->lock
);
375 binder_inner_proc_lock(node
->proc
);
377 /* annotation for sparse */
378 __acquire(&node
->proc
->inner_lock
);
382 * binder_node_inner_unlock() - Release node and inner locks
383 * @node: struct binder_node to acquire
385 * Release lock acquired via binder_node_lock()
387 #define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
389 _binder_node_inner_unlock(struct binder_node
*node
, int line
)
390 __releases(&node
->lock
) __releases(&node
->proc
->inner_lock
)
392 struct binder_proc
*proc
= node
->proc
;
394 binder_debug(BINDER_DEBUG_SPINLOCKS
,
395 "%s: line=%d\n", __func__
, line
);
397 binder_inner_proc_unlock(proc
);
399 /* annotation for sparse */
400 __release(&node
->proc
->inner_lock
);
401 spin_unlock(&node
->lock
);
404 static bool binder_worklist_empty_ilocked(struct list_head
*list
)
406 return list_empty(list
);
410 * binder_worklist_empty() - Check if no items on the work list
411 * @proc: binder_proc associated with list
412 * @list: list to check
414 * Return: true if there are no items on list, else false
416 static bool binder_worklist_empty(struct binder_proc
*proc
,
417 struct list_head
*list
)
421 binder_inner_proc_lock(proc
);
422 ret
= binder_worklist_empty_ilocked(list
);
423 binder_inner_proc_unlock(proc
);
428 * binder_enqueue_work_ilocked() - Add an item to the work list
429 * @work: struct binder_work to add to list
430 * @target_list: list to add work to
432 * Adds the work to the specified list. Asserts that work
433 * is not already on a list.
435 * Requires the proc->inner_lock to be held.
438 binder_enqueue_work_ilocked(struct binder_work
*work
,
439 struct list_head
*target_list
)
441 BUG_ON(target_list
== NULL
);
442 BUG_ON(work
->entry
.next
&& !list_empty(&work
->entry
));
443 list_add_tail(&work
->entry
, target_list
);
447 * binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work
448 * @thread: thread to queue work to
449 * @work: struct binder_work to add to list
451 * Adds the work to the todo list of the thread. Doesn't set the process_todo
452 * flag, which means that (if it wasn't already set) the thread will go to
453 * sleep without handling this work when it calls read.
455 * Requires the proc->inner_lock to be held.
458 binder_enqueue_deferred_thread_work_ilocked(struct binder_thread
*thread
,
459 struct binder_work
*work
)
461 WARN_ON(!list_empty(&thread
->waiting_thread_node
));
462 binder_enqueue_work_ilocked(work
, &thread
->todo
);
466 * binder_enqueue_thread_work_ilocked() - Add an item to the thread work list
467 * @thread: thread to queue work to
468 * @work: struct binder_work to add to list
470 * Adds the work to the todo list of the thread, and enables processing
473 * Requires the proc->inner_lock to be held.
476 binder_enqueue_thread_work_ilocked(struct binder_thread
*thread
,
477 struct binder_work
*work
)
479 WARN_ON(!list_empty(&thread
->waiting_thread_node
));
480 binder_enqueue_work_ilocked(work
, &thread
->todo
);
482 /* (e)poll-based threads require an explicit wakeup signal when
483 * queuing their own work; they rely on these events to consume
484 * messages without I/O block. Without it, threads risk waiting
485 * indefinitely without handling the work.
487 if (thread
->looper
& BINDER_LOOPER_STATE_POLL
&&
488 thread
->pid
== current
->pid
&& !thread
->process_todo
)
489 wake_up_interruptible_sync(&thread
->wait
);
491 thread
->process_todo
= true;
495 * binder_enqueue_thread_work() - Add an item to the thread work list
496 * @thread: thread to queue work to
497 * @work: struct binder_work to add to list
499 * Adds the work to the todo list of the thread, and enables processing
503 binder_enqueue_thread_work(struct binder_thread
*thread
,
504 struct binder_work
*work
)
506 binder_inner_proc_lock(thread
->proc
);
507 binder_enqueue_thread_work_ilocked(thread
, work
);
508 binder_inner_proc_unlock(thread
->proc
);
512 binder_dequeue_work_ilocked(struct binder_work
*work
)
514 list_del_init(&work
->entry
);
518 * binder_dequeue_work() - Removes an item from the work list
519 * @proc: binder_proc associated with list
520 * @work: struct binder_work to remove from list
522 * Removes the specified work item from whatever list it is on.
523 * Can safely be called if work is not on any list.
526 binder_dequeue_work(struct binder_proc
*proc
, struct binder_work
*work
)
528 binder_inner_proc_lock(proc
);
529 binder_dequeue_work_ilocked(work
);
530 binder_inner_proc_unlock(proc
);
533 static struct binder_work
*binder_dequeue_work_head_ilocked(
534 struct list_head
*list
)
536 struct binder_work
*w
;
538 w
= list_first_entry_or_null(list
, struct binder_work
, entry
);
540 list_del_init(&w
->entry
);
545 binder_defer_work(struct binder_proc
*proc
, enum binder_deferred_state defer
);
546 static void binder_free_thread(struct binder_thread
*thread
);
547 static void binder_free_proc(struct binder_proc
*proc
);
548 static void binder_inc_node_tmpref_ilocked(struct binder_node
*node
);
550 static bool binder_has_work_ilocked(struct binder_thread
*thread
,
553 return thread
->process_todo
||
554 thread
->looper_need_return
||
556 !binder_worklist_empty_ilocked(&thread
->proc
->todo
));
559 static bool binder_has_work(struct binder_thread
*thread
, bool do_proc_work
)
563 binder_inner_proc_lock(thread
->proc
);
564 has_work
= binder_has_work_ilocked(thread
, do_proc_work
);
565 binder_inner_proc_unlock(thread
->proc
);
570 static bool binder_available_for_proc_work_ilocked(struct binder_thread
*thread
)
572 return !thread
->transaction_stack
&&
573 binder_worklist_empty_ilocked(&thread
->todo
);
576 static void binder_wakeup_poll_threads_ilocked(struct binder_proc
*proc
,
580 struct binder_thread
*thread
;
582 for (n
= rb_first(&proc
->threads
); n
!= NULL
; n
= rb_next(n
)) {
583 thread
= rb_entry(n
, struct binder_thread
, rb_node
);
584 if (thread
->looper
& BINDER_LOOPER_STATE_POLL
&&
585 binder_available_for_proc_work_ilocked(thread
)) {
587 wake_up_interruptible_sync(&thread
->wait
);
589 wake_up_interruptible(&thread
->wait
);
595 * binder_select_thread_ilocked() - selects a thread for doing proc work.
596 * @proc: process to select a thread from
598 * Note that calling this function moves the thread off the waiting_threads
599 * list, so it can only be woken up by the caller of this function, or a
600 * signal. Therefore, callers *should* always wake up the thread this function
603 * Return: If there's a thread currently waiting for process work,
604 * returns that thread. Otherwise returns NULL.
606 static struct binder_thread
*
607 binder_select_thread_ilocked(struct binder_proc
*proc
)
609 struct binder_thread
*thread
;
611 assert_spin_locked(&proc
->inner_lock
);
612 thread
= list_first_entry_or_null(&proc
->waiting_threads
,
613 struct binder_thread
,
614 waiting_thread_node
);
617 list_del_init(&thread
->waiting_thread_node
);
623 * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work.
624 * @proc: process to wake up a thread in
625 * @thread: specific thread to wake-up (may be NULL)
626 * @sync: whether to do a synchronous wake-up
628 * This function wakes up a thread in the @proc process.
629 * The caller may provide a specific thread to wake-up in
630 * the @thread parameter. If @thread is NULL, this function
631 * will wake up threads that have called poll().
633 * Note that for this function to work as expected, callers
634 * should first call binder_select_thread() to find a thread
635 * to handle the work (if they don't have a thread already),
636 * and pass the result into the @thread parameter.
638 static void binder_wakeup_thread_ilocked(struct binder_proc
*proc
,
639 struct binder_thread
*thread
,
642 assert_spin_locked(&proc
->inner_lock
);
646 wake_up_interruptible_sync(&thread
->wait
);
648 wake_up_interruptible(&thread
->wait
);
652 /* Didn't find a thread waiting for proc work; this can happen
654 * 1. All threads are busy handling transactions
655 * In that case, one of those threads should call back into
656 * the kernel driver soon and pick up this work.
657 * 2. Threads are using the (e)poll interface, in which case
658 * they may be blocked on the waitqueue without having been
659 * added to waiting_threads. For this case, we just iterate
660 * over all threads not handling transaction work, and
661 * wake them all up. We wake all because we don't know whether
662 * a thread that called into (e)poll is handling non-binder
665 binder_wakeup_poll_threads_ilocked(proc
, sync
);
668 static void binder_wakeup_proc_ilocked(struct binder_proc
*proc
)
670 struct binder_thread
*thread
= binder_select_thread_ilocked(proc
);
672 binder_wakeup_thread_ilocked(proc
, thread
, /* sync = */false);
675 static void binder_set_nice(long nice
)
679 if (can_nice(current
, nice
)) {
680 set_user_nice(current
, nice
);
683 min_nice
= rlimit_to_nice(rlimit(RLIMIT_NICE
));
684 binder_debug(BINDER_DEBUG_PRIORITY_CAP
,
685 "%d: nice value %ld not allowed use %ld instead\n",
686 current
->pid
, nice
, min_nice
);
687 set_user_nice(current
, min_nice
);
688 if (min_nice
<= MAX_NICE
)
690 binder_user_error("%d RLIMIT_NICE not set\n", current
->pid
);
693 static struct binder_node
*binder_get_node_ilocked(struct binder_proc
*proc
,
694 binder_uintptr_t ptr
)
696 struct rb_node
*n
= proc
->nodes
.rb_node
;
697 struct binder_node
*node
;
699 assert_spin_locked(&proc
->inner_lock
);
702 node
= rb_entry(n
, struct binder_node
, rb_node
);
706 else if (ptr
> node
->ptr
)
710 * take an implicit weak reference
711 * to ensure node stays alive until
712 * call to binder_put_node()
714 binder_inc_node_tmpref_ilocked(node
);
721 static struct binder_node
*binder_get_node(struct binder_proc
*proc
,
722 binder_uintptr_t ptr
)
724 struct binder_node
*node
;
726 binder_inner_proc_lock(proc
);
727 node
= binder_get_node_ilocked(proc
, ptr
);
728 binder_inner_proc_unlock(proc
);
732 static struct binder_node
*binder_init_node_ilocked(
733 struct binder_proc
*proc
,
734 struct binder_node
*new_node
,
735 struct flat_binder_object
*fp
)
737 struct rb_node
**p
= &proc
->nodes
.rb_node
;
738 struct rb_node
*parent
= NULL
;
739 struct binder_node
*node
;
740 binder_uintptr_t ptr
= fp
? fp
->binder
: 0;
741 binder_uintptr_t cookie
= fp
? fp
->cookie
: 0;
742 __u32 flags
= fp
? fp
->flags
: 0;
744 assert_spin_locked(&proc
->inner_lock
);
749 node
= rb_entry(parent
, struct binder_node
, rb_node
);
753 else if (ptr
> node
->ptr
)
757 * A matching node is already in
758 * the rb tree. Abandon the init
761 binder_inc_node_tmpref_ilocked(node
);
766 binder_stats_created(BINDER_STAT_NODE
);
768 rb_link_node(&node
->rb_node
, parent
, p
);
769 rb_insert_color(&node
->rb_node
, &proc
->nodes
);
770 node
->debug_id
= atomic_inc_return(&binder_last_id
);
773 node
->cookie
= cookie
;
774 node
->work
.type
= BINDER_WORK_NODE
;
775 node
->min_priority
= flags
& FLAT_BINDER_FLAG_PRIORITY_MASK
;
776 node
->accept_fds
= !!(flags
& FLAT_BINDER_FLAG_ACCEPTS_FDS
);
777 node
->txn_security_ctx
= !!(flags
& FLAT_BINDER_FLAG_TXN_SECURITY_CTX
);
778 spin_lock_init(&node
->lock
);
779 INIT_LIST_HEAD(&node
->work
.entry
);
780 INIT_LIST_HEAD(&node
->async_todo
);
781 binder_debug(BINDER_DEBUG_INTERNAL_REFS
,
782 "%d:%d node %d u%016llx c%016llx created\n",
783 proc
->pid
, current
->pid
, node
->debug_id
,
784 (u64
)node
->ptr
, (u64
)node
->cookie
);
789 static struct binder_node
*binder_new_node(struct binder_proc
*proc
,
790 struct flat_binder_object
*fp
)
792 struct binder_node
*node
;
793 struct binder_node
*new_node
= kzalloc(sizeof(*node
), GFP_KERNEL
);
797 binder_inner_proc_lock(proc
);
798 node
= binder_init_node_ilocked(proc
, new_node
, fp
);
799 binder_inner_proc_unlock(proc
);
800 if (node
!= new_node
)
802 * The node was already added by another thread
809 static void binder_free_node(struct binder_node
*node
)
812 binder_stats_deleted(BINDER_STAT_NODE
);
815 static int binder_inc_node_nilocked(struct binder_node
*node
, int strong
,
817 struct list_head
*target_list
)
819 struct binder_proc
*proc
= node
->proc
;
821 assert_spin_locked(&node
->lock
);
823 assert_spin_locked(&proc
->inner_lock
);
826 if (target_list
== NULL
&&
827 node
->internal_strong_refs
== 0 &&
829 node
== node
->proc
->context
->binder_context_mgr_node
&&
830 node
->has_strong_ref
)) {
831 pr_err("invalid inc strong node for %d\n",
835 node
->internal_strong_refs
++;
837 node
->local_strong_refs
++;
838 if (!node
->has_strong_ref
&& target_list
) {
839 struct binder_thread
*thread
= container_of(target_list
,
840 struct binder_thread
, todo
);
841 binder_dequeue_work_ilocked(&node
->work
);
842 BUG_ON(&thread
->todo
!= target_list
);
843 binder_enqueue_deferred_thread_work_ilocked(thread
,
848 node
->local_weak_refs
++;
849 if (!node
->has_weak_ref
&& list_empty(&node
->work
.entry
)) {
850 if (target_list
== NULL
) {
851 pr_err("invalid inc weak node for %d\n",
858 binder_enqueue_work_ilocked(&node
->work
, target_list
);
864 static int binder_inc_node(struct binder_node
*node
, int strong
, int internal
,
865 struct list_head
*target_list
)
869 binder_node_inner_lock(node
);
870 ret
= binder_inc_node_nilocked(node
, strong
, internal
, target_list
);
871 binder_node_inner_unlock(node
);
876 static bool binder_dec_node_nilocked(struct binder_node
*node
,
877 int strong
, int internal
)
879 struct binder_proc
*proc
= node
->proc
;
881 assert_spin_locked(&node
->lock
);
883 assert_spin_locked(&proc
->inner_lock
);
886 node
->internal_strong_refs
--;
888 node
->local_strong_refs
--;
889 if (node
->local_strong_refs
|| node
->internal_strong_refs
)
893 node
->local_weak_refs
--;
894 if (node
->local_weak_refs
|| node
->tmp_refs
||
895 !hlist_empty(&node
->refs
))
899 if (proc
&& (node
->has_strong_ref
|| node
->has_weak_ref
)) {
900 if (list_empty(&node
->work
.entry
)) {
901 binder_enqueue_work_ilocked(&node
->work
, &proc
->todo
);
902 binder_wakeup_proc_ilocked(proc
);
905 if (hlist_empty(&node
->refs
) && !node
->local_strong_refs
&&
906 !node
->local_weak_refs
&& !node
->tmp_refs
) {
908 binder_dequeue_work_ilocked(&node
->work
);
909 rb_erase(&node
->rb_node
, &proc
->nodes
);
910 binder_debug(BINDER_DEBUG_INTERNAL_REFS
,
911 "refless node %d deleted\n",
914 BUG_ON(!list_empty(&node
->work
.entry
));
915 spin_lock(&binder_dead_nodes_lock
);
917 * tmp_refs could have changed so
920 if (node
->tmp_refs
) {
921 spin_unlock(&binder_dead_nodes_lock
);
924 hlist_del(&node
->dead_node
);
925 spin_unlock(&binder_dead_nodes_lock
);
926 binder_debug(BINDER_DEBUG_INTERNAL_REFS
,
927 "dead node %d deleted\n",
936 static void binder_dec_node(struct binder_node
*node
, int strong
, int internal
)
940 binder_node_inner_lock(node
);
941 free_node
= binder_dec_node_nilocked(node
, strong
, internal
);
942 binder_node_inner_unlock(node
);
944 binder_free_node(node
);
947 static void binder_inc_node_tmpref_ilocked(struct binder_node
*node
)
950 * No call to binder_inc_node() is needed since we
951 * don't need to inform userspace of any changes to
958 * binder_inc_node_tmpref() - take a temporary reference on node
959 * @node: node to reference
961 * Take reference on node to prevent the node from being freed
962 * while referenced only by a local variable. The inner lock is
963 * needed to serialize with the node work on the queue (which
964 * isn't needed after the node is dead). If the node is dead
965 * (node->proc is NULL), use binder_dead_nodes_lock to protect
966 * node->tmp_refs against dead-node-only cases where the node
967 * lock cannot be acquired (eg traversing the dead node list to
970 static void binder_inc_node_tmpref(struct binder_node
*node
)
972 binder_node_lock(node
);
974 binder_inner_proc_lock(node
->proc
);
976 spin_lock(&binder_dead_nodes_lock
);
977 binder_inc_node_tmpref_ilocked(node
);
979 binder_inner_proc_unlock(node
->proc
);
981 spin_unlock(&binder_dead_nodes_lock
);
982 binder_node_unlock(node
);
986 * binder_dec_node_tmpref() - remove a temporary reference on node
987 * @node: node to reference
989 * Release temporary reference on node taken via binder_inc_node_tmpref()
991 static void binder_dec_node_tmpref(struct binder_node
*node
)
995 binder_node_inner_lock(node
);
997 spin_lock(&binder_dead_nodes_lock
);
999 __acquire(&binder_dead_nodes_lock
);
1001 BUG_ON(node
->tmp_refs
< 0);
1003 spin_unlock(&binder_dead_nodes_lock
);
1005 __release(&binder_dead_nodes_lock
);
1007 * Call binder_dec_node() to check if all refcounts are 0
1008 * and cleanup is needed. Calling with strong=0 and internal=1
1009 * causes no actual reference to be released in binder_dec_node().
1010 * If that changes, a change is needed here too.
1012 free_node
= binder_dec_node_nilocked(node
, 0, 1);
1013 binder_node_inner_unlock(node
);
1015 binder_free_node(node
);
1018 static void binder_put_node(struct binder_node
*node
)
1020 binder_dec_node_tmpref(node
);
1023 static struct binder_ref
*binder_get_ref_olocked(struct binder_proc
*proc
,
1024 u32 desc
, bool need_strong_ref
)
1026 struct rb_node
*n
= proc
->refs_by_desc
.rb_node
;
1027 struct binder_ref
*ref
;
1030 ref
= rb_entry(n
, struct binder_ref
, rb_node_desc
);
1032 if (desc
< ref
->data
.desc
) {
1034 } else if (desc
> ref
->data
.desc
) {
1036 } else if (need_strong_ref
&& !ref
->data
.strong
) {
1037 binder_user_error("tried to use weak ref as strong ref\n");
1046 /* Find the smallest unused descriptor the "slow way" */
1047 static u32
slow_desc_lookup_olocked(struct binder_proc
*proc
, u32 offset
)
1049 struct binder_ref
*ref
;
1054 for (n
= rb_first(&proc
->refs_by_desc
); n
; n
= rb_next(n
)) {
1055 ref
= rb_entry(n
, struct binder_ref
, rb_node_desc
);
1056 if (ref
->data
.desc
> desc
)
1058 desc
= ref
->data
.desc
+ 1;
1065 * Find an available reference descriptor ID. The proc->outer_lock might
1066 * be released in the process, in which case -EAGAIN is returned and the
1067 * @desc should be considered invalid.
1069 static int get_ref_desc_olocked(struct binder_proc
*proc
,
1070 struct binder_node
*node
,
1073 struct dbitmap
*dmap
= &proc
->dmap
;
1074 unsigned int nbits
, offset
;
1075 unsigned long *new, bit
;
1077 /* 0 is reserved for the context manager */
1078 offset
= (node
== proc
->context
->binder_context_mgr_node
) ? 0 : 1;
1080 if (!dbitmap_enabled(dmap
)) {
1081 *desc
= slow_desc_lookup_olocked(proc
, offset
);
1085 if (dbitmap_acquire_next_zero_bit(dmap
, offset
, &bit
) == 0) {
1091 * The dbitmap is full and needs to grow. The proc->outer_lock
1092 * is briefly released to allocate the new bitmap safely.
1094 nbits
= dbitmap_grow_nbits(dmap
);
1095 binder_proc_unlock(proc
);
1096 new = bitmap_zalloc(nbits
, GFP_KERNEL
);
1097 binder_proc_lock(proc
);
1098 dbitmap_grow(dmap
, new, nbits
);
1104 * binder_get_ref_for_node_olocked() - get the ref associated with given node
1105 * @proc: binder_proc that owns the ref
1106 * @node: binder_node of target
1107 * @new_ref: newly allocated binder_ref to be initialized or %NULL
1109 * Look up the ref for the given node and return it if it exists
1111 * If it doesn't exist and the caller provides a newly allocated
1112 * ref, initialize the fields of the newly allocated ref and insert
1113 * into the given proc rb_trees and node refs list.
1115 * Return: the ref for node. It is possible that another thread
1116 * allocated/initialized the ref first in which case the
1117 * returned ref would be different than the passed-in
1118 * new_ref. new_ref must be kfree'd by the caller in
1121 static struct binder_ref
*binder_get_ref_for_node_olocked(
1122 struct binder_proc
*proc
,
1123 struct binder_node
*node
,
1124 struct binder_ref
*new_ref
)
1126 struct binder_ref
*ref
;
1127 struct rb_node
*parent
;
1132 p
= &proc
->refs_by_node
.rb_node
;
1136 ref
= rb_entry(parent
, struct binder_ref
, rb_node_node
);
1138 if (node
< ref
->node
)
1140 else if (node
> ref
->node
)
1141 p
= &(*p
)->rb_right
;
1148 /* might release the proc->outer_lock */
1149 if (get_ref_desc_olocked(proc
, node
, &desc
) == -EAGAIN
)
1152 binder_stats_created(BINDER_STAT_REF
);
1153 new_ref
->data
.debug_id
= atomic_inc_return(&binder_last_id
);
1154 new_ref
->proc
= proc
;
1155 new_ref
->node
= node
;
1156 rb_link_node(&new_ref
->rb_node_node
, parent
, p
);
1157 rb_insert_color(&new_ref
->rb_node_node
, &proc
->refs_by_node
);
1159 new_ref
->data
.desc
= desc
;
1160 p
= &proc
->refs_by_desc
.rb_node
;
1163 ref
= rb_entry(parent
, struct binder_ref
, rb_node_desc
);
1165 if (new_ref
->data
.desc
< ref
->data
.desc
)
1167 else if (new_ref
->data
.desc
> ref
->data
.desc
)
1168 p
= &(*p
)->rb_right
;
1172 rb_link_node(&new_ref
->rb_node_desc
, parent
, p
);
1173 rb_insert_color(&new_ref
->rb_node_desc
, &proc
->refs_by_desc
);
1175 binder_node_lock(node
);
1176 hlist_add_head(&new_ref
->node_entry
, &node
->refs
);
1178 binder_debug(BINDER_DEBUG_INTERNAL_REFS
,
1179 "%d new ref %d desc %d for node %d\n",
1180 proc
->pid
, new_ref
->data
.debug_id
, new_ref
->data
.desc
,
1182 binder_node_unlock(node
);
1186 static void binder_cleanup_ref_olocked(struct binder_ref
*ref
)
1188 struct dbitmap
*dmap
= &ref
->proc
->dmap
;
1189 bool delete_node
= false;
1191 binder_debug(BINDER_DEBUG_INTERNAL_REFS
,
1192 "%d delete ref %d desc %d for node %d\n",
1193 ref
->proc
->pid
, ref
->data
.debug_id
, ref
->data
.desc
,
1194 ref
->node
->debug_id
);
1196 if (dbitmap_enabled(dmap
))
1197 dbitmap_clear_bit(dmap
, ref
->data
.desc
);
1198 rb_erase(&ref
->rb_node_desc
, &ref
->proc
->refs_by_desc
);
1199 rb_erase(&ref
->rb_node_node
, &ref
->proc
->refs_by_node
);
1201 binder_node_inner_lock(ref
->node
);
1202 if (ref
->data
.strong
)
1203 binder_dec_node_nilocked(ref
->node
, 1, 1);
1205 hlist_del(&ref
->node_entry
);
1206 delete_node
= binder_dec_node_nilocked(ref
->node
, 0, 1);
1207 binder_node_inner_unlock(ref
->node
);
1209 * Clear ref->node unless we want the caller to free the node
1213 * The caller uses ref->node to determine
1214 * whether the node needs to be freed. Clear
1215 * it since the node is still alive.
1221 binder_debug(BINDER_DEBUG_DEAD_BINDER
,
1222 "%d delete ref %d desc %d has death notification\n",
1223 ref
->proc
->pid
, ref
->data
.debug_id
,
1225 binder_dequeue_work(ref
->proc
, &ref
->death
->work
);
1226 binder_stats_deleted(BINDER_STAT_DEATH
);
1230 binder_dequeue_work(ref
->proc
, &ref
->freeze
->work
);
1231 binder_stats_deleted(BINDER_STAT_FREEZE
);
1234 binder_stats_deleted(BINDER_STAT_REF
);
1238 * binder_inc_ref_olocked() - increment the ref for given handle
1239 * @ref: ref to be incremented
1240 * @strong: if true, strong increment, else weak
1241 * @target_list: list to queue node work on
1243 * Increment the ref. @ref->proc->outer_lock must be held on entry
1245 * Return: 0, if successful, else errno
1247 static int binder_inc_ref_olocked(struct binder_ref
*ref
, int strong
,
1248 struct list_head
*target_list
)
1253 if (ref
->data
.strong
== 0) {
1254 ret
= binder_inc_node(ref
->node
, 1, 1, target_list
);
1260 if (ref
->data
.weak
== 0) {
1261 ret
= binder_inc_node(ref
->node
, 0, 1, target_list
);
1271 * binder_dec_ref_olocked() - dec the ref for given handle
1272 * @ref: ref to be decremented
1273 * @strong: if true, strong decrement, else weak
1275 * Decrement the ref.
1277 * Return: %true if ref is cleaned up and ready to be freed.
1279 static bool binder_dec_ref_olocked(struct binder_ref
*ref
, int strong
)
1282 if (ref
->data
.strong
== 0) {
1283 binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
1284 ref
->proc
->pid
, ref
->data
.debug_id
,
1285 ref
->data
.desc
, ref
->data
.strong
,
1290 if (ref
->data
.strong
== 0)
1291 binder_dec_node(ref
->node
, strong
, 1);
1293 if (ref
->data
.weak
== 0) {
1294 binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
1295 ref
->proc
->pid
, ref
->data
.debug_id
,
1296 ref
->data
.desc
, ref
->data
.strong
,
1302 if (ref
->data
.strong
== 0 && ref
->data
.weak
== 0) {
1303 binder_cleanup_ref_olocked(ref
);
1310 * binder_get_node_from_ref() - get the node from the given proc/desc
1311 * @proc: proc containing the ref
1312 * @desc: the handle associated with the ref
1313 * @need_strong_ref: if true, only return node if ref is strong
1314 * @rdata: the id/refcount data for the ref
1316 * Given a proc and ref handle, return the associated binder_node
1318 * Return: a binder_node or NULL if not found or not strong when strong required
1320 static struct binder_node
*binder_get_node_from_ref(
1321 struct binder_proc
*proc
,
1322 u32 desc
, bool need_strong_ref
,
1323 struct binder_ref_data
*rdata
)
1325 struct binder_node
*node
;
1326 struct binder_ref
*ref
;
1328 binder_proc_lock(proc
);
1329 ref
= binder_get_ref_olocked(proc
, desc
, need_strong_ref
);
1334 * Take an implicit reference on the node to ensure
1335 * it stays alive until the call to binder_put_node()
1337 binder_inc_node_tmpref(node
);
1340 binder_proc_unlock(proc
);
1345 binder_proc_unlock(proc
);
1350 * binder_free_ref() - free the binder_ref
1353 * Free the binder_ref. Free the binder_node indicated by ref->node
1354 * (if non-NULL) and the binder_ref_death indicated by ref->death.
1356 static void binder_free_ref(struct binder_ref
*ref
)
1359 binder_free_node(ref
->node
);
1365 /* shrink descriptor bitmap if needed */
1366 static void try_shrink_dmap(struct binder_proc
*proc
)
1371 binder_proc_lock(proc
);
1372 nbits
= dbitmap_shrink_nbits(&proc
->dmap
);
1373 binder_proc_unlock(proc
);
1378 new = bitmap_zalloc(nbits
, GFP_KERNEL
);
1379 binder_proc_lock(proc
);
1380 dbitmap_shrink(&proc
->dmap
, new, nbits
);
1381 binder_proc_unlock(proc
);
1385 * binder_update_ref_for_handle() - inc/dec the ref for given handle
1386 * @proc: proc containing the ref
1387 * @desc: the handle associated with the ref
1388 * @increment: true=inc reference, false=dec reference
1389 * @strong: true=strong reference, false=weak reference
1390 * @rdata: the id/refcount data for the ref
1392 * Given a proc and ref handle, increment or decrement the ref
1393 * according to "increment" arg.
1395 * Return: 0 if successful, else errno
1397 static int binder_update_ref_for_handle(struct binder_proc
*proc
,
1398 uint32_t desc
, bool increment
, bool strong
,
1399 struct binder_ref_data
*rdata
)
1402 struct binder_ref
*ref
;
1403 bool delete_ref
= false;
1405 binder_proc_lock(proc
);
1406 ref
= binder_get_ref_olocked(proc
, desc
, strong
);
1412 ret
= binder_inc_ref_olocked(ref
, strong
, NULL
);
1414 delete_ref
= binder_dec_ref_olocked(ref
, strong
);
1418 binder_proc_unlock(proc
);
1421 binder_free_ref(ref
);
1422 try_shrink_dmap(proc
);
1427 binder_proc_unlock(proc
);
1432 * binder_dec_ref_for_handle() - dec the ref for given handle
1433 * @proc: proc containing the ref
1434 * @desc: the handle associated with the ref
1435 * @strong: true=strong reference, false=weak reference
1436 * @rdata: the id/refcount data for the ref
1438 * Just calls binder_update_ref_for_handle() to decrement the ref.
1440 * Return: 0 if successful, else errno
1442 static int binder_dec_ref_for_handle(struct binder_proc
*proc
,
1443 uint32_t desc
, bool strong
, struct binder_ref_data
*rdata
)
1445 return binder_update_ref_for_handle(proc
, desc
, false, strong
, rdata
);
1450 * binder_inc_ref_for_node() - increment the ref for given proc/node
1451 * @proc: proc containing the ref
1452 * @node: target node
1453 * @strong: true=strong reference, false=weak reference
1454 * @target_list: worklist to use if node is incremented
1455 * @rdata: the id/refcount data for the ref
1457 * Given a proc and node, increment the ref. Create the ref if it
1458 * doesn't already exist
1460 * Return: 0 if successful, else errno
1462 static int binder_inc_ref_for_node(struct binder_proc
*proc
,
1463 struct binder_node
*node
,
1465 struct list_head
*target_list
,
1466 struct binder_ref_data
*rdata
)
1468 struct binder_ref
*ref
;
1469 struct binder_ref
*new_ref
= NULL
;
1472 binder_proc_lock(proc
);
1473 ref
= binder_get_ref_for_node_olocked(proc
, node
, NULL
);
1475 binder_proc_unlock(proc
);
1476 new_ref
= kzalloc(sizeof(*ref
), GFP_KERNEL
);
1479 binder_proc_lock(proc
);
1480 ref
= binder_get_ref_for_node_olocked(proc
, node
, new_ref
);
1482 ret
= binder_inc_ref_olocked(ref
, strong
, target_list
);
1484 if (ret
&& ref
== new_ref
) {
1486 * Cleanup the failed reference here as the target
1487 * could now be dead and have already released its
1488 * references by now. Calling on the new reference
1489 * with strong=0 and a tmp_refs will not decrement
1490 * the node. The new_ref gets kfree'd below.
1492 binder_cleanup_ref_olocked(new_ref
);
1496 binder_proc_unlock(proc
);
1497 if (new_ref
&& ref
!= new_ref
)
1499 * Another thread created the ref first so
1500 * free the one we allocated
1506 static void binder_pop_transaction_ilocked(struct binder_thread
*target_thread
,
1507 struct binder_transaction
*t
)
1509 BUG_ON(!target_thread
);
1510 assert_spin_locked(&target_thread
->proc
->inner_lock
);
1511 BUG_ON(target_thread
->transaction_stack
!= t
);
1512 BUG_ON(target_thread
->transaction_stack
->from
!= target_thread
);
1513 target_thread
->transaction_stack
=
1514 target_thread
->transaction_stack
->from_parent
;
1519 * binder_thread_dec_tmpref() - decrement thread->tmp_ref
1520 * @thread: thread to decrement
1522 * A thread needs to be kept alive while being used to create or
1523 * handle a transaction. binder_get_txn_from() is used to safely
1524 * extract t->from from a binder_transaction and keep the thread
1525 * indicated by t->from from being freed. When done with that
1526 * binder_thread, this function is called to decrement the
1527 * tmp_ref and free if appropriate (thread has been released
1528 * and no transaction being processed by the driver)
1530 static void binder_thread_dec_tmpref(struct binder_thread
*thread
)
1533 * atomic is used to protect the counter value while
1534 * it cannot reach zero or thread->is_dead is false
1536 binder_inner_proc_lock(thread
->proc
);
1537 atomic_dec(&thread
->tmp_ref
);
1538 if (thread
->is_dead
&& !atomic_read(&thread
->tmp_ref
)) {
1539 binder_inner_proc_unlock(thread
->proc
);
1540 binder_free_thread(thread
);
1543 binder_inner_proc_unlock(thread
->proc
);
1547 * binder_proc_dec_tmpref() - decrement proc->tmp_ref
1548 * @proc: proc to decrement
1550 * A binder_proc needs to be kept alive while being used to create or
1551 * handle a transaction. proc->tmp_ref is incremented when
1552 * creating a new transaction or the binder_proc is currently in-use
1553 * by threads that are being released. When done with the binder_proc,
1554 * this function is called to decrement the counter and free the
1555 * proc if appropriate (proc has been released, all threads have
1556 * been released and not currently in-use to process a transaction).
1558 static void binder_proc_dec_tmpref(struct binder_proc
*proc
)
1560 binder_inner_proc_lock(proc
);
1562 if (proc
->is_dead
&& RB_EMPTY_ROOT(&proc
->threads
) &&
1564 binder_inner_proc_unlock(proc
);
1565 binder_free_proc(proc
);
1568 binder_inner_proc_unlock(proc
);
1572 * binder_get_txn_from() - safely extract the "from" thread in transaction
1573 * @t: binder transaction for t->from
1575 * Atomically return the "from" thread and increment the tmp_ref
1576 * count for the thread to ensure it stays alive until
1577 * binder_thread_dec_tmpref() is called.
1579 * Return: the value of t->from
1581 static struct binder_thread
*binder_get_txn_from(
1582 struct binder_transaction
*t
)
1584 struct binder_thread
*from
;
1586 spin_lock(&t
->lock
);
1589 atomic_inc(&from
->tmp_ref
);
1590 spin_unlock(&t
->lock
);
1595 * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock
1596 * @t: binder transaction for t->from
1598 * Same as binder_get_txn_from() except it also acquires the proc->inner_lock
1599 * to guarantee that the thread cannot be released while operating on it.
1600 * The caller must call binder_inner_proc_unlock() to release the inner lock
1601 * as well as call binder_dec_thread_txn() to release the reference.
1603 * Return: the value of t->from
1605 static struct binder_thread
*binder_get_txn_from_and_acq_inner(
1606 struct binder_transaction
*t
)
1607 __acquires(&t
->from
->proc
->inner_lock
)
1609 struct binder_thread
*from
;
1611 from
= binder_get_txn_from(t
);
1613 __acquire(&from
->proc
->inner_lock
);
1616 binder_inner_proc_lock(from
->proc
);
1618 BUG_ON(from
!= t
->from
);
1621 binder_inner_proc_unlock(from
->proc
);
1622 __acquire(&from
->proc
->inner_lock
);
1623 binder_thread_dec_tmpref(from
);
1628 * binder_free_txn_fixups() - free unprocessed fd fixups
1629 * @t: binder transaction for t->from
1631 * If the transaction is being torn down prior to being
1632 * processed by the target process, free all of the
1633 * fd fixups and fput the file structs. It is safe to
1634 * call this function after the fixups have been
1635 * processed -- in that case, the list will be empty.
1637 static void binder_free_txn_fixups(struct binder_transaction
*t
)
1639 struct binder_txn_fd_fixup
*fixup
, *tmp
;
1641 list_for_each_entry_safe(fixup
, tmp
, &t
->fd_fixups
, fixup_entry
) {
1643 if (fixup
->target_fd
>= 0)
1644 put_unused_fd(fixup
->target_fd
);
1645 list_del(&fixup
->fixup_entry
);
1650 static void binder_txn_latency_free(struct binder_transaction
*t
)
1652 int from_proc
, from_thread
, to_proc
, to_thread
;
1654 spin_lock(&t
->lock
);
1655 from_proc
= t
->from
? t
->from
->proc
->pid
: 0;
1656 from_thread
= t
->from
? t
->from
->pid
: 0;
1657 to_proc
= t
->to_proc
? t
->to_proc
->pid
: 0;
1658 to_thread
= t
->to_thread
? t
->to_thread
->pid
: 0;
1659 spin_unlock(&t
->lock
);
1661 trace_binder_txn_latency_free(t
, from_proc
, from_thread
, to_proc
, to_thread
);
1664 static void binder_free_transaction(struct binder_transaction
*t
)
1666 struct binder_proc
*target_proc
= t
->to_proc
;
1669 binder_inner_proc_lock(target_proc
);
1670 target_proc
->outstanding_txns
--;
1671 if (target_proc
->outstanding_txns
< 0)
1672 pr_warn("%s: Unexpected outstanding_txns %d\n",
1673 __func__
, target_proc
->outstanding_txns
);
1674 if (!target_proc
->outstanding_txns
&& target_proc
->is_frozen
)
1675 wake_up_interruptible_all(&target_proc
->freeze_wait
);
1677 t
->buffer
->transaction
= NULL
;
1678 binder_inner_proc_unlock(target_proc
);
1680 if (trace_binder_txn_latency_free_enabled())
1681 binder_txn_latency_free(t
);
1683 * If the transaction has no target_proc, then
1684 * t->buffer->transaction has already been cleared.
1686 binder_free_txn_fixups(t
);
1688 binder_stats_deleted(BINDER_STAT_TRANSACTION
);
1691 static void binder_send_failed_reply(struct binder_transaction
*t
,
1692 uint32_t error_code
)
1694 struct binder_thread
*target_thread
;
1695 struct binder_transaction
*next
;
1697 BUG_ON(t
->flags
& TF_ONE_WAY
);
1699 target_thread
= binder_get_txn_from_and_acq_inner(t
);
1700 if (target_thread
) {
1701 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION
,
1702 "send failed reply for transaction %d to %d:%d\n",
1704 target_thread
->proc
->pid
,
1705 target_thread
->pid
);
1707 binder_pop_transaction_ilocked(target_thread
, t
);
1708 if (target_thread
->reply_error
.cmd
== BR_OK
) {
1709 target_thread
->reply_error
.cmd
= error_code
;
1710 binder_enqueue_thread_work_ilocked(
1712 &target_thread
->reply_error
.work
);
1713 wake_up_interruptible(&target_thread
->wait
);
1716 * Cannot get here for normal operation, but
1717 * we can if multiple synchronous transactions
1718 * are sent without blocking for responses.
1719 * Just ignore the 2nd error in this case.
1721 pr_warn("Unexpected reply error: %u\n",
1722 target_thread
->reply_error
.cmd
);
1724 binder_inner_proc_unlock(target_thread
->proc
);
1725 binder_thread_dec_tmpref(target_thread
);
1726 binder_free_transaction(t
);
1729 __release(&target_thread
->proc
->inner_lock
);
1730 next
= t
->from_parent
;
1732 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION
,
1733 "send failed reply for transaction %d, target dead\n",
1736 binder_free_transaction(t
);
1738 binder_debug(BINDER_DEBUG_DEAD_BINDER
,
1739 "reply failed, no target thread at root\n");
1743 binder_debug(BINDER_DEBUG_DEAD_BINDER
,
1744 "reply failed, no target thread -- retry %d\n",
1750 * binder_cleanup_transaction() - cleans up undelivered transaction
1751 * @t: transaction that needs to be cleaned up
1752 * @reason: reason the transaction wasn't delivered
1753 * @error_code: error to return to caller (if synchronous call)
1755 static void binder_cleanup_transaction(struct binder_transaction
*t
,
1757 uint32_t error_code
)
1759 if (t
->buffer
->target_node
&& !(t
->flags
& TF_ONE_WAY
)) {
1760 binder_send_failed_reply(t
, error_code
);
1762 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION
,
1763 "undelivered transaction %d, %s\n",
1764 t
->debug_id
, reason
);
1765 binder_free_transaction(t
);
1770 * binder_get_object() - gets object and checks for valid metadata
1771 * @proc: binder_proc owning the buffer
1772 * @u: sender's user pointer to base of buffer
1773 * @buffer: binder_buffer that we're parsing.
1774 * @offset: offset in the @buffer at which to validate an object.
1775 * @object: struct binder_object to read into
1777 * Copy the binder object at the given offset into @object. If @u is
1778 * provided then the copy is from the sender's buffer. If not, then
1779 * it is copied from the target's @buffer.
1781 * Return: If there's a valid metadata object at @offset, the
1782 * size of that object. Otherwise, it returns zero. The object
1783 * is read into the struct binder_object pointed to by @object.
1785 static size_t binder_get_object(struct binder_proc
*proc
,
1786 const void __user
*u
,
1787 struct binder_buffer
*buffer
,
1788 unsigned long offset
,
1789 struct binder_object
*object
)
1792 struct binder_object_header
*hdr
;
1793 size_t object_size
= 0;
1795 read_size
= min_t(size_t, sizeof(*object
), buffer
->data_size
- offset
);
1796 if (offset
> buffer
->data_size
|| read_size
< sizeof(*hdr
) ||
1797 !IS_ALIGNED(offset
, sizeof(u32
)))
1801 if (copy_from_user(object
, u
+ offset
, read_size
))
1804 if (binder_alloc_copy_from_buffer(&proc
->alloc
, object
, buffer
,
1809 /* Ok, now see if we read a complete object. */
1811 switch (hdr
->type
) {
1812 case BINDER_TYPE_BINDER
:
1813 case BINDER_TYPE_WEAK_BINDER
:
1814 case BINDER_TYPE_HANDLE
:
1815 case BINDER_TYPE_WEAK_HANDLE
:
1816 object_size
= sizeof(struct flat_binder_object
);
1818 case BINDER_TYPE_FD
:
1819 object_size
= sizeof(struct binder_fd_object
);
1821 case BINDER_TYPE_PTR
:
1822 object_size
= sizeof(struct binder_buffer_object
);
1824 case BINDER_TYPE_FDA
:
1825 object_size
= sizeof(struct binder_fd_array_object
);
1830 if (offset
<= buffer
->data_size
- object_size
&&
1831 buffer
->data_size
>= object_size
)
1838 * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
1839 * @proc: binder_proc owning the buffer
1840 * @b: binder_buffer containing the object
1841 * @object: struct binder_object to read into
1842 * @index: index in offset array at which the binder_buffer_object is
1844 * @start_offset: points to the start of the offset array
1845 * @object_offsetp: offset of @object read from @b
1846 * @num_valid: the number of valid offsets in the offset array
1848 * Return: If @index is within the valid range of the offset array
1849 * described by @start and @num_valid, and if there's a valid
1850 * binder_buffer_object at the offset found in index @index
1851 * of the offset array, that object is returned. Otherwise,
1852 * %NULL is returned.
1853 * Note that the offset found in index @index itself is not
1854 * verified; this function assumes that @num_valid elements
1855 * from @start were previously verified to have valid offsets.
1856 * If @object_offsetp is non-NULL, then the offset within
1857 * @b is written to it.
1859 static struct binder_buffer_object
*binder_validate_ptr(
1860 struct binder_proc
*proc
,
1861 struct binder_buffer
*b
,
1862 struct binder_object
*object
,
1863 binder_size_t index
,
1864 binder_size_t start_offset
,
1865 binder_size_t
*object_offsetp
,
1866 binder_size_t num_valid
)
1869 binder_size_t object_offset
;
1870 unsigned long buffer_offset
;
1872 if (index
>= num_valid
)
1875 buffer_offset
= start_offset
+ sizeof(binder_size_t
) * index
;
1876 if (binder_alloc_copy_from_buffer(&proc
->alloc
, &object_offset
,
1878 sizeof(object_offset
)))
1880 object_size
= binder_get_object(proc
, NULL
, b
, object_offset
, object
);
1881 if (!object_size
|| object
->hdr
.type
!= BINDER_TYPE_PTR
)
1884 *object_offsetp
= object_offset
;
1886 return &object
->bbo
;
1890 * binder_validate_fixup() - validates pointer/fd fixups happen in order.
1891 * @proc: binder_proc owning the buffer
1892 * @b: transaction buffer
1893 * @objects_start_offset: offset to start of objects buffer
1894 * @buffer_obj_offset: offset to binder_buffer_object in which to fix up
1895 * @fixup_offset: start offset in @buffer to fix up
1896 * @last_obj_offset: offset to last binder_buffer_object that we fixed
1897 * @last_min_offset: minimum fixup offset in object at @last_obj_offset
1899 * Return: %true if a fixup in buffer @buffer at offset @offset is
1902 * For safety reasons, we only allow fixups inside a buffer to happen
1903 * at increasing offsets; additionally, we only allow fixup on the last
1904 * buffer object that was verified, or one of its parents.
1906 * Example of what is allowed:
1909 * B (parent = A, offset = 0)
1910 * C (parent = A, offset = 16)
1911 * D (parent = C, offset = 0)
1912 * E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
1914 * Examples of what is not allowed:
1916 * Decreasing offsets within the same parent:
1918 * C (parent = A, offset = 16)
1919 * B (parent = A, offset = 0) // decreasing offset within A
1921 * Referring to a parent that wasn't the last object or any of its parents:
1923 * B (parent = A, offset = 0)
1924 * C (parent = A, offset = 0)
1925 * C (parent = A, offset = 16)
1926 * D (parent = B, offset = 0) // B is not A or any of A's parents
1928 static bool binder_validate_fixup(struct binder_proc
*proc
,
1929 struct binder_buffer
*b
,
1930 binder_size_t objects_start_offset
,
1931 binder_size_t buffer_obj_offset
,
1932 binder_size_t fixup_offset
,
1933 binder_size_t last_obj_offset
,
1934 binder_size_t last_min_offset
)
1936 if (!last_obj_offset
) {
1937 /* Nothing to fix up in */
1941 while (last_obj_offset
!= buffer_obj_offset
) {
1942 unsigned long buffer_offset
;
1943 struct binder_object last_object
;
1944 struct binder_buffer_object
*last_bbo
;
1945 size_t object_size
= binder_get_object(proc
, NULL
, b
,
1948 if (object_size
!= sizeof(*last_bbo
))
1951 last_bbo
= &last_object
.bbo
;
1953 * Safe to retrieve the parent of last_obj, since it
1954 * was already previously verified by the driver.
1956 if ((last_bbo
->flags
& BINDER_BUFFER_FLAG_HAS_PARENT
) == 0)
1958 last_min_offset
= last_bbo
->parent_offset
+ sizeof(uintptr_t);
1959 buffer_offset
= objects_start_offset
+
1960 sizeof(binder_size_t
) * last_bbo
->parent
;
1961 if (binder_alloc_copy_from_buffer(&proc
->alloc
,
1964 sizeof(last_obj_offset
)))
1967 return (fixup_offset
>= last_min_offset
);
1971 * struct binder_task_work_cb - for deferred close
1973 * @twork: callback_head for task work
1976 * Structure to pass task work to be handled after
1977 * returning from binder_ioctl() via task_work_add().
1979 struct binder_task_work_cb
{
1980 struct callback_head twork
;
1985 * binder_do_fd_close() - close list of file descriptors
1986 * @twork: callback head for task work
1988 * It is not safe to call ksys_close() during the binder_ioctl()
1989 * function if there is a chance that binder's own file descriptor
1990 * might be closed. This is to meet the requirements for using
1991 * fdget() (see comments for __fget_light()). Therefore use
1992 * task_work_add() to schedule the close operation once we have
1993 * returned from binder_ioctl(). This function is a callback
1994 * for that mechanism and does the actual ksys_close() on the
1995 * given file descriptor.
1997 static void binder_do_fd_close(struct callback_head
*twork
)
1999 struct binder_task_work_cb
*twcb
= container_of(twork
,
2000 struct binder_task_work_cb
, twork
);
2007 * binder_deferred_fd_close() - schedule a close for the given file-descriptor
2008 * @fd: file-descriptor to close
2010 * See comments in binder_do_fd_close(). This function is used to schedule
2011 * a file-descriptor to be closed after returning from binder_ioctl().
2013 static void binder_deferred_fd_close(int fd
)
2015 struct binder_task_work_cb
*twcb
;
2017 twcb
= kzalloc(sizeof(*twcb
), GFP_KERNEL
);
2020 init_task_work(&twcb
->twork
, binder_do_fd_close
);
2021 twcb
->file
= file_close_fd(fd
);
2023 // pin it until binder_do_fd_close(); see comments there
2024 get_file(twcb
->file
);
2025 filp_close(twcb
->file
, current
->files
);
2026 task_work_add(current
, &twcb
->twork
, TWA_RESUME
);
2032 static void binder_transaction_buffer_release(struct binder_proc
*proc
,
2033 struct binder_thread
*thread
,
2034 struct binder_buffer
*buffer
,
2035 binder_size_t off_end_offset
,
2038 int debug_id
= buffer
->debug_id
;
2039 binder_size_t off_start_offset
, buffer_offset
;
2041 binder_debug(BINDER_DEBUG_TRANSACTION
,
2042 "%d buffer release %d, size %zd-%zd, failed at %llx\n",
2043 proc
->pid
, buffer
->debug_id
,
2044 buffer
->data_size
, buffer
->offsets_size
,
2045 (unsigned long long)off_end_offset
);
2047 if (buffer
->target_node
)
2048 binder_dec_node(buffer
->target_node
, 1, 0);
2050 off_start_offset
= ALIGN(buffer
->data_size
, sizeof(void *));
2052 for (buffer_offset
= off_start_offset
; buffer_offset
< off_end_offset
;
2053 buffer_offset
+= sizeof(binder_size_t
)) {
2054 struct binder_object_header
*hdr
;
2055 size_t object_size
= 0;
2056 struct binder_object object
;
2057 binder_size_t object_offset
;
2059 if (!binder_alloc_copy_from_buffer(&proc
->alloc
, &object_offset
,
2060 buffer
, buffer_offset
,
2061 sizeof(object_offset
)))
2062 object_size
= binder_get_object(proc
, NULL
, buffer
,
2063 object_offset
, &object
);
2064 if (object_size
== 0) {
2065 pr_err("transaction release %d bad object at offset %lld, size %zd\n",
2066 debug_id
, (u64
)object_offset
, buffer
->data_size
);
2070 switch (hdr
->type
) {
2071 case BINDER_TYPE_BINDER
:
2072 case BINDER_TYPE_WEAK_BINDER
: {
2073 struct flat_binder_object
*fp
;
2074 struct binder_node
*node
;
2076 fp
= to_flat_binder_object(hdr
);
2077 node
= binder_get_node(proc
, fp
->binder
);
2079 pr_err("transaction release %d bad node %016llx\n",
2080 debug_id
, (u64
)fp
->binder
);
2083 binder_debug(BINDER_DEBUG_TRANSACTION
,
2084 " node %d u%016llx\n",
2085 node
->debug_id
, (u64
)node
->ptr
);
2086 binder_dec_node(node
, hdr
->type
== BINDER_TYPE_BINDER
,
2088 binder_put_node(node
);
2090 case BINDER_TYPE_HANDLE
:
2091 case BINDER_TYPE_WEAK_HANDLE
: {
2092 struct flat_binder_object
*fp
;
2093 struct binder_ref_data rdata
;
2096 fp
= to_flat_binder_object(hdr
);
2097 ret
= binder_dec_ref_for_handle(proc
, fp
->handle
,
2098 hdr
->type
== BINDER_TYPE_HANDLE
, &rdata
);
2101 pr_err("transaction release %d bad handle %d, ret = %d\n",
2102 debug_id
, fp
->handle
, ret
);
2105 binder_debug(BINDER_DEBUG_TRANSACTION
,
2106 " ref %d desc %d\n",
2107 rdata
.debug_id
, rdata
.desc
);
2110 case BINDER_TYPE_FD
: {
2112 * No need to close the file here since user-space
2113 * closes it for successfully delivered
2114 * transactions. For transactions that weren't
2115 * delivered, the new fd was never allocated so
2116 * there is no need to close and the fput on the
2117 * file is done when the transaction is torn
2121 case BINDER_TYPE_PTR
:
2123 * Nothing to do here, this will get cleaned up when the
2124 * transaction buffer gets freed
2127 case BINDER_TYPE_FDA
: {
2128 struct binder_fd_array_object
*fda
;
2129 struct binder_buffer_object
*parent
;
2130 struct binder_object ptr_object
;
2131 binder_size_t fda_offset
;
2133 binder_size_t fd_buf_size
;
2134 binder_size_t num_valid
;
2138 * The fd fixups have not been applied so no
2139 * fds need to be closed.
2144 num_valid
= (buffer_offset
- off_start_offset
) /
2145 sizeof(binder_size_t
);
2146 fda
= to_binder_fd_array_object(hdr
);
2147 parent
= binder_validate_ptr(proc
, buffer
, &ptr_object
,
2153 pr_err("transaction release %d bad parent offset\n",
2157 fd_buf_size
= sizeof(u32
) * fda
->num_fds
;
2158 if (fda
->num_fds
>= SIZE_MAX
/ sizeof(u32
)) {
2159 pr_err("transaction release %d invalid number of fds (%lld)\n",
2160 debug_id
, (u64
)fda
->num_fds
);
2163 if (fd_buf_size
> parent
->length
||
2164 fda
->parent_offset
> parent
->length
- fd_buf_size
) {
2165 /* No space for all file descriptors here. */
2166 pr_err("transaction release %d not enough space for %lld fds in buffer\n",
2167 debug_id
, (u64
)fda
->num_fds
);
2171 * the source data for binder_buffer_object is visible
2172 * to user-space and the @buffer element is the user
2173 * pointer to the buffer_object containing the fd_array.
2174 * Convert the address to an offset relative to
2175 * the base of the transaction buffer.
2177 fda_offset
= parent
->buffer
- buffer
->user_data
+
2179 for (fd_index
= 0; fd_index
< fda
->num_fds
;
2183 binder_size_t offset
= fda_offset
+
2184 fd_index
* sizeof(fd
);
2186 err
= binder_alloc_copy_from_buffer(
2187 &proc
->alloc
, &fd
, buffer
,
2188 offset
, sizeof(fd
));
2191 binder_deferred_fd_close(fd
);
2193 * Need to make sure the thread goes
2194 * back to userspace to complete the
2198 thread
->looper_need_return
= true;
2203 pr_err("transaction release %d bad object type %x\n",
2204 debug_id
, hdr
->type
);
2210 /* Clean up all the objects in the buffer */
2211 static inline void binder_release_entire_buffer(struct binder_proc
*proc
,
2212 struct binder_thread
*thread
,
2213 struct binder_buffer
*buffer
,
2216 binder_size_t off_end_offset
;
2218 off_end_offset
= ALIGN(buffer
->data_size
, sizeof(void *));
2219 off_end_offset
+= buffer
->offsets_size
;
2221 binder_transaction_buffer_release(proc
, thread
, buffer
,
2222 off_end_offset
, is_failure
);
2225 static int binder_translate_binder(struct flat_binder_object
*fp
,
2226 struct binder_transaction
*t
,
2227 struct binder_thread
*thread
)
2229 struct binder_node
*node
;
2230 struct binder_proc
*proc
= thread
->proc
;
2231 struct binder_proc
*target_proc
= t
->to_proc
;
2232 struct binder_ref_data rdata
;
2235 node
= binder_get_node(proc
, fp
->binder
);
2237 node
= binder_new_node(proc
, fp
);
2241 if (fp
->cookie
!= node
->cookie
) {
2242 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
2243 proc
->pid
, thread
->pid
, (u64
)fp
->binder
,
2244 node
->debug_id
, (u64
)fp
->cookie
,
2249 if (security_binder_transfer_binder(proc
->cred
, target_proc
->cred
)) {
2254 ret
= binder_inc_ref_for_node(target_proc
, node
,
2255 fp
->hdr
.type
== BINDER_TYPE_BINDER
,
2256 &thread
->todo
, &rdata
);
2260 if (fp
->hdr
.type
== BINDER_TYPE_BINDER
)
2261 fp
->hdr
.type
= BINDER_TYPE_HANDLE
;
2263 fp
->hdr
.type
= BINDER_TYPE_WEAK_HANDLE
;
2265 fp
->handle
= rdata
.desc
;
2268 trace_binder_transaction_node_to_ref(t
, node
, &rdata
);
2269 binder_debug(BINDER_DEBUG_TRANSACTION
,
2270 " node %d u%016llx -> ref %d desc %d\n",
2271 node
->debug_id
, (u64
)node
->ptr
,
2272 rdata
.debug_id
, rdata
.desc
);
2274 binder_put_node(node
);
2278 static int binder_translate_handle(struct flat_binder_object
*fp
,
2279 struct binder_transaction
*t
,
2280 struct binder_thread
*thread
)
2282 struct binder_proc
*proc
= thread
->proc
;
2283 struct binder_proc
*target_proc
= t
->to_proc
;
2284 struct binder_node
*node
;
2285 struct binder_ref_data src_rdata
;
2288 node
= binder_get_node_from_ref(proc
, fp
->handle
,
2289 fp
->hdr
.type
== BINDER_TYPE_HANDLE
, &src_rdata
);
2291 binder_user_error("%d:%d got transaction with invalid handle, %d\n",
2292 proc
->pid
, thread
->pid
, fp
->handle
);
2295 if (security_binder_transfer_binder(proc
->cred
, target_proc
->cred
)) {
2300 binder_node_lock(node
);
2301 if (node
->proc
== target_proc
) {
2302 if (fp
->hdr
.type
== BINDER_TYPE_HANDLE
)
2303 fp
->hdr
.type
= BINDER_TYPE_BINDER
;
2305 fp
->hdr
.type
= BINDER_TYPE_WEAK_BINDER
;
2306 fp
->binder
= node
->ptr
;
2307 fp
->cookie
= node
->cookie
;
2309 binder_inner_proc_lock(node
->proc
);
2311 __acquire(&node
->proc
->inner_lock
);
2312 binder_inc_node_nilocked(node
,
2313 fp
->hdr
.type
== BINDER_TYPE_BINDER
,
2316 binder_inner_proc_unlock(node
->proc
);
2318 __release(&node
->proc
->inner_lock
);
2319 trace_binder_transaction_ref_to_node(t
, node
, &src_rdata
);
2320 binder_debug(BINDER_DEBUG_TRANSACTION
,
2321 " ref %d desc %d -> node %d u%016llx\n",
2322 src_rdata
.debug_id
, src_rdata
.desc
, node
->debug_id
,
2324 binder_node_unlock(node
);
2326 struct binder_ref_data dest_rdata
;
2328 binder_node_unlock(node
);
2329 ret
= binder_inc_ref_for_node(target_proc
, node
,
2330 fp
->hdr
.type
== BINDER_TYPE_HANDLE
,
2336 fp
->handle
= dest_rdata
.desc
;
2338 trace_binder_transaction_ref_to_ref(t
, node
, &src_rdata
,
2340 binder_debug(BINDER_DEBUG_TRANSACTION
,
2341 " ref %d desc %d -> ref %d desc %d (node %d)\n",
2342 src_rdata
.debug_id
, src_rdata
.desc
,
2343 dest_rdata
.debug_id
, dest_rdata
.desc
,
2347 binder_put_node(node
);
2351 static int binder_translate_fd(u32 fd
, binder_size_t fd_offset
,
2352 struct binder_transaction
*t
,
2353 struct binder_thread
*thread
,
2354 struct binder_transaction
*in_reply_to
)
2356 struct binder_proc
*proc
= thread
->proc
;
2357 struct binder_proc
*target_proc
= t
->to_proc
;
2358 struct binder_txn_fd_fixup
*fixup
;
2361 bool target_allows_fd
;
2364 target_allows_fd
= !!(in_reply_to
->flags
& TF_ACCEPT_FDS
);
2366 target_allows_fd
= t
->buffer
->target_node
->accept_fds
;
2367 if (!target_allows_fd
) {
2368 binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
2369 proc
->pid
, thread
->pid
,
2370 in_reply_to
? "reply" : "transaction",
2373 goto err_fd_not_accepted
;
2378 binder_user_error("%d:%d got transaction with invalid fd, %d\n",
2379 proc
->pid
, thread
->pid
, fd
);
2383 ret
= security_binder_transfer_file(proc
->cred
, target_proc
->cred
, file
);
2390 * Add fixup record for this transaction. The allocation
2391 * of the fd in the target needs to be done from a
2394 fixup
= kzalloc(sizeof(*fixup
), GFP_KERNEL
);
2400 fixup
->offset
= fd_offset
;
2401 fixup
->target_fd
= -1;
2402 trace_binder_transaction_fd_send(t
, fd
, fixup
->offset
);
2403 list_add_tail(&fixup
->fixup_entry
, &t
->fd_fixups
);
2411 err_fd_not_accepted
:
2416 * struct binder_ptr_fixup - data to be fixed-up in target buffer
2417 * @offset offset in target buffer to fixup
2418 * @skip_size bytes to skip in copy (fixup will be written later)
2419 * @fixup_data data to write at fixup offset
2422 * This is used for the pointer fixup list (pf) which is created and consumed
2423 * during binder_transaction() and is only accessed locally. No
2424 * locking is necessary.
2426 * The list is ordered by @offset.
2428 struct binder_ptr_fixup
{
2429 binder_size_t offset
;
2431 binder_uintptr_t fixup_data
;
2432 struct list_head node
;
2436 * struct binder_sg_copy - scatter-gather data to be copied
2437 * @offset offset in target buffer
2438 * @sender_uaddr user address in source buffer
2439 * @length bytes to copy
2442 * This is used for the sg copy list (sgc) which is created and consumed
2443 * during binder_transaction() and is only accessed locally. No
2444 * locking is necessary.
2446 * The list is ordered by @offset.
2448 struct binder_sg_copy
{
2449 binder_size_t offset
;
2450 const void __user
*sender_uaddr
;
2452 struct list_head node
;
2456 * binder_do_deferred_txn_copies() - copy and fixup scatter-gather data
2457 * @alloc: binder_alloc associated with @buffer
2458 * @buffer: binder buffer in target process
2459 * @sgc_head: list_head of scatter-gather copy list
2460 * @pf_head: list_head of pointer fixup list
2462 * Processes all elements of @sgc_head, applying fixups from @pf_head
2463 * and copying the scatter-gather data from the source process' user
2464 * buffer to the target's buffer. It is expected that the list creation
2465 * and processing all occurs during binder_transaction() so these lists
2466 * are only accessed in local context.
2468 * Return: 0=success, else -errno
2470 static int binder_do_deferred_txn_copies(struct binder_alloc
*alloc
,
2471 struct binder_buffer
*buffer
,
2472 struct list_head
*sgc_head
,
2473 struct list_head
*pf_head
)
2476 struct binder_sg_copy
*sgc
, *tmpsgc
;
2477 struct binder_ptr_fixup
*tmppf
;
2478 struct binder_ptr_fixup
*pf
=
2479 list_first_entry_or_null(pf_head
, struct binder_ptr_fixup
,
2482 list_for_each_entry_safe(sgc
, tmpsgc
, sgc_head
, node
) {
2483 size_t bytes_copied
= 0;
2485 while (bytes_copied
< sgc
->length
) {
2487 size_t bytes_left
= sgc
->length
- bytes_copied
;
2488 size_t offset
= sgc
->offset
+ bytes_copied
;
2491 * We copy up to the fixup (pointed to by pf)
2493 copy_size
= pf
? min(bytes_left
, (size_t)pf
->offset
- offset
)
2495 if (!ret
&& copy_size
)
2496 ret
= binder_alloc_copy_user_to_buffer(
2499 sgc
->sender_uaddr
+ bytes_copied
,
2501 bytes_copied
+= copy_size
;
2502 if (copy_size
!= bytes_left
) {
2504 /* we stopped at a fixup offset */
2505 if (pf
->skip_size
) {
2507 * we are just skipping. This is for
2508 * BINDER_TYPE_FDA where the translated
2509 * fds will be fixed up when we get
2510 * to target context.
2512 bytes_copied
+= pf
->skip_size
;
2514 /* apply the fixup indicated by pf */
2516 ret
= binder_alloc_copy_to_buffer(
2520 sizeof(pf
->fixup_data
));
2521 bytes_copied
+= sizeof(pf
->fixup_data
);
2523 list_del(&pf
->node
);
2525 pf
= list_first_entry_or_null(pf_head
,
2526 struct binder_ptr_fixup
, node
);
2529 list_del(&sgc
->node
);
2532 list_for_each_entry_safe(pf
, tmppf
, pf_head
, node
) {
2533 BUG_ON(pf
->skip_size
== 0);
2534 list_del(&pf
->node
);
2537 BUG_ON(!list_empty(sgc_head
));
2539 return ret
> 0 ? -EINVAL
: ret
;
2543 * binder_cleanup_deferred_txn_lists() - free specified lists
2544 * @sgc_head: list_head of scatter-gather copy list
2545 * @pf_head: list_head of pointer fixup list
2547 * Called to clean up @sgc_head and @pf_head if there is an
2550 static void binder_cleanup_deferred_txn_lists(struct list_head
*sgc_head
,
2551 struct list_head
*pf_head
)
2553 struct binder_sg_copy
*sgc
, *tmpsgc
;
2554 struct binder_ptr_fixup
*pf
, *tmppf
;
2556 list_for_each_entry_safe(sgc
, tmpsgc
, sgc_head
, node
) {
2557 list_del(&sgc
->node
);
2560 list_for_each_entry_safe(pf
, tmppf
, pf_head
, node
) {
2561 list_del(&pf
->node
);
2567 * binder_defer_copy() - queue a scatter-gather buffer for copy
2568 * @sgc_head: list_head of scatter-gather copy list
2569 * @offset: binder buffer offset in target process
2570 * @sender_uaddr: user address in source process
2571 * @length: bytes to copy
2573 * Specify a scatter-gather block to be copied. The actual copy must
2574 * be deferred until all the needed fixups are identified and queued.
2575 * Then the copy and fixups are done together so un-translated values
2576 * from the source are never visible in the target buffer.
2578 * We are guaranteed that repeated calls to this function will have
2579 * monotonically increasing @offset values so the list will naturally
2582 * Return: 0=success, else -errno
2584 static int binder_defer_copy(struct list_head
*sgc_head
, binder_size_t offset
,
2585 const void __user
*sender_uaddr
, size_t length
)
2587 struct binder_sg_copy
*bc
= kzalloc(sizeof(*bc
), GFP_KERNEL
);
2592 bc
->offset
= offset
;
2593 bc
->sender_uaddr
= sender_uaddr
;
2594 bc
->length
= length
;
2595 INIT_LIST_HEAD(&bc
->node
);
2598 * We are guaranteed that the deferred copies are in-order
2599 * so just add to the tail.
2601 list_add_tail(&bc
->node
, sgc_head
);
2607 * binder_add_fixup() - queue a fixup to be applied to sg copy
2608 * @pf_head: list_head of binder ptr fixup list
2609 * @offset: binder buffer offset in target process
2610 * @fixup: bytes to be copied for fixup
2611 * @skip_size: bytes to skip when copying (fixup will be applied later)
2613 * Add the specified fixup to a list ordered by @offset. When copying
2614 * the scatter-gather buffers, the fixup will be copied instead of
2615 * data from the source buffer. For BINDER_TYPE_FDA fixups, the fixup
2616 * will be applied later (in target process context), so we just skip
2617 * the bytes specified by @skip_size. If @skip_size is 0, we copy the
2620 * This function is called *mostly* in @offset order, but there are
2621 * exceptions. Since out-of-order inserts are relatively uncommon,
2622 * we insert the new element by searching backward from the tail of
2625 * Return: 0=success, else -errno
2627 static int binder_add_fixup(struct list_head
*pf_head
, binder_size_t offset
,
2628 binder_uintptr_t fixup
, size_t skip_size
)
2630 struct binder_ptr_fixup
*pf
= kzalloc(sizeof(*pf
), GFP_KERNEL
);
2631 struct binder_ptr_fixup
*tmppf
;
2636 pf
->offset
= offset
;
2637 pf
->fixup_data
= fixup
;
2638 pf
->skip_size
= skip_size
;
2639 INIT_LIST_HEAD(&pf
->node
);
2641 /* Fixups are *mostly* added in-order, but there are some
2642 * exceptions. Look backwards through list for insertion point.
2644 list_for_each_entry_reverse(tmppf
, pf_head
, node
) {
2645 if (tmppf
->offset
< pf
->offset
) {
2646 list_add(&pf
->node
, &tmppf
->node
);
2651 * if we get here, then the new offset is the lowest so
2652 * insert at the head
2654 list_add(&pf
->node
, pf_head
);
2658 static int binder_translate_fd_array(struct list_head
*pf_head
,
2659 struct binder_fd_array_object
*fda
,
2660 const void __user
*sender_ubuffer
,
2661 struct binder_buffer_object
*parent
,
2662 struct binder_buffer_object
*sender_uparent
,
2663 struct binder_transaction
*t
,
2664 struct binder_thread
*thread
,
2665 struct binder_transaction
*in_reply_to
)
2667 binder_size_t fdi
, fd_buf_size
;
2668 binder_size_t fda_offset
;
2669 const void __user
*sender_ufda_base
;
2670 struct binder_proc
*proc
= thread
->proc
;
2673 if (fda
->num_fds
== 0)
2676 fd_buf_size
= sizeof(u32
) * fda
->num_fds
;
2677 if (fda
->num_fds
>= SIZE_MAX
/ sizeof(u32
)) {
2678 binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
2679 proc
->pid
, thread
->pid
, (u64
)fda
->num_fds
);
2682 if (fd_buf_size
> parent
->length
||
2683 fda
->parent_offset
> parent
->length
- fd_buf_size
) {
2684 /* No space for all file descriptors here. */
2685 binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
2686 proc
->pid
, thread
->pid
, (u64
)fda
->num_fds
);
2690 * the source data for binder_buffer_object is visible
2691 * to user-space and the @buffer element is the user
2692 * pointer to the buffer_object containing the fd_array.
2693 * Convert the address to an offset relative to
2694 * the base of the transaction buffer.
2696 fda_offset
= parent
->buffer
- t
->buffer
->user_data
+
2698 sender_ufda_base
= (void __user
*)(uintptr_t)sender_uparent
->buffer
+
2701 if (!IS_ALIGNED((unsigned long)fda_offset
, sizeof(u32
)) ||
2702 !IS_ALIGNED((unsigned long)sender_ufda_base
, sizeof(u32
))) {
2703 binder_user_error("%d:%d parent offset not aligned correctly.\n",
2704 proc
->pid
, thread
->pid
);
2707 ret
= binder_add_fixup(pf_head
, fda_offset
, 0, fda
->num_fds
* sizeof(u32
));
2711 for (fdi
= 0; fdi
< fda
->num_fds
; fdi
++) {
2713 binder_size_t offset
= fda_offset
+ fdi
* sizeof(fd
);
2714 binder_size_t sender_uoffset
= fdi
* sizeof(fd
);
2716 ret
= copy_from_user(&fd
, sender_ufda_base
+ sender_uoffset
, sizeof(fd
));
2718 ret
= binder_translate_fd(fd
, offset
, t
, thread
,
2721 return ret
> 0 ? -EINVAL
: ret
;
2726 static int binder_fixup_parent(struct list_head
*pf_head
,
2727 struct binder_transaction
*t
,
2728 struct binder_thread
*thread
,
2729 struct binder_buffer_object
*bp
,
2730 binder_size_t off_start_offset
,
2731 binder_size_t num_valid
,
2732 binder_size_t last_fixup_obj_off
,
2733 binder_size_t last_fixup_min_off
)
2735 struct binder_buffer_object
*parent
;
2736 struct binder_buffer
*b
= t
->buffer
;
2737 struct binder_proc
*proc
= thread
->proc
;
2738 struct binder_proc
*target_proc
= t
->to_proc
;
2739 struct binder_object object
;
2740 binder_size_t buffer_offset
;
2741 binder_size_t parent_offset
;
2743 if (!(bp
->flags
& BINDER_BUFFER_FLAG_HAS_PARENT
))
2746 parent
= binder_validate_ptr(target_proc
, b
, &object
, bp
->parent
,
2747 off_start_offset
, &parent_offset
,
2750 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2751 proc
->pid
, thread
->pid
);
2755 if (!binder_validate_fixup(target_proc
, b
, off_start_offset
,
2756 parent_offset
, bp
->parent_offset
,
2758 last_fixup_min_off
)) {
2759 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2760 proc
->pid
, thread
->pid
);
2764 if (parent
->length
< sizeof(binder_uintptr_t
) ||
2765 bp
->parent_offset
> parent
->length
- sizeof(binder_uintptr_t
)) {
2766 /* No space for a pointer here! */
2767 binder_user_error("%d:%d got transaction with invalid parent offset\n",
2768 proc
->pid
, thread
->pid
);
2772 buffer_offset
= bp
->parent_offset
+ parent
->buffer
- b
->user_data
;
2774 return binder_add_fixup(pf_head
, buffer_offset
, bp
->buffer
, 0);
2778 * binder_can_update_transaction() - Can a txn be superseded by an updated one?
2779 * @t1: the pending async txn in the frozen process
2780 * @t2: the new async txn to supersede the outdated pending one
2782 * Return: true if t2 can supersede t1
2783 * false if t2 can not supersede t1
2785 static bool binder_can_update_transaction(struct binder_transaction
*t1
,
2786 struct binder_transaction
*t2
)
2788 if ((t1
->flags
& t2
->flags
& (TF_ONE_WAY
| TF_UPDATE_TXN
)) !=
2789 (TF_ONE_WAY
| TF_UPDATE_TXN
) || !t1
->to_proc
|| !t2
->to_proc
)
2791 if (t1
->to_proc
->tsk
== t2
->to_proc
->tsk
&& t1
->code
== t2
->code
&&
2792 t1
->flags
== t2
->flags
&& t1
->buffer
->pid
== t2
->buffer
->pid
&&
2793 t1
->buffer
->target_node
->ptr
== t2
->buffer
->target_node
->ptr
&&
2794 t1
->buffer
->target_node
->cookie
== t2
->buffer
->target_node
->cookie
)
2800 * binder_find_outdated_transaction_ilocked() - Find the outdated transaction
2801 * @t: new async transaction
2802 * @target_list: list to find outdated transaction
2804 * Return: the outdated transaction if found
2805 * NULL if no outdated transacton can be found
2807 * Requires the proc->inner_lock to be held.
2809 static struct binder_transaction
*
2810 binder_find_outdated_transaction_ilocked(struct binder_transaction
*t
,
2811 struct list_head
*target_list
)
2813 struct binder_work
*w
;
2815 list_for_each_entry(w
, target_list
, entry
) {
2816 struct binder_transaction
*t_queued
;
2818 if (w
->type
!= BINDER_WORK_TRANSACTION
)
2820 t_queued
= container_of(w
, struct binder_transaction
, work
);
2821 if (binder_can_update_transaction(t_queued
, t
))
2828 * binder_proc_transaction() - sends a transaction to a process and wakes it up
2829 * @t: transaction to send
2830 * @proc: process to send the transaction to
2831 * @thread: thread in @proc to send the transaction to (may be NULL)
2833 * This function queues a transaction to the specified process. It will try
2834 * to find a thread in the target process to handle the transaction and
2835 * wake it up. If no thread is found, the work is queued to the proc
2838 * If the @thread parameter is not NULL, the transaction is always queued
2839 * to the waitlist of that specific thread.
2841 * Return: 0 if the transaction was successfully queued
2842 * BR_DEAD_REPLY if the target process or thread is dead
2843 * BR_FROZEN_REPLY if the target process or thread is frozen and
2844 * the sync transaction was rejected
2845 * BR_TRANSACTION_PENDING_FROZEN if the target process is frozen
2846 * and the async transaction was successfully queued
2848 static int binder_proc_transaction(struct binder_transaction
*t
,
2849 struct binder_proc
*proc
,
2850 struct binder_thread
*thread
)
2852 struct binder_node
*node
= t
->buffer
->target_node
;
2853 bool oneway
= !!(t
->flags
& TF_ONE_WAY
);
2854 bool pending_async
= false;
2855 struct binder_transaction
*t_outdated
= NULL
;
2856 bool frozen
= false;
2859 binder_node_lock(node
);
2862 if (node
->has_async_transaction
)
2863 pending_async
= true;
2865 node
->has_async_transaction
= true;
2868 binder_inner_proc_lock(proc
);
2869 if (proc
->is_frozen
) {
2871 proc
->sync_recv
|= !oneway
;
2872 proc
->async_recv
|= oneway
;
2875 if ((frozen
&& !oneway
) || proc
->is_dead
||
2876 (thread
&& thread
->is_dead
)) {
2877 binder_inner_proc_unlock(proc
);
2878 binder_node_unlock(node
);
2879 return frozen
? BR_FROZEN_REPLY
: BR_DEAD_REPLY
;
2882 if (!thread
&& !pending_async
)
2883 thread
= binder_select_thread_ilocked(proc
);
2886 binder_enqueue_thread_work_ilocked(thread
, &t
->work
);
2887 } else if (!pending_async
) {
2888 binder_enqueue_work_ilocked(&t
->work
, &proc
->todo
);
2890 if ((t
->flags
& TF_UPDATE_TXN
) && frozen
) {
2891 t_outdated
= binder_find_outdated_transaction_ilocked(t
,
2894 binder_debug(BINDER_DEBUG_TRANSACTION
,
2895 "txn %d supersedes %d\n",
2896 t
->debug_id
, t_outdated
->debug_id
);
2897 list_del_init(&t_outdated
->work
.entry
);
2898 proc
->outstanding_txns
--;
2901 binder_enqueue_work_ilocked(&t
->work
, &node
->async_todo
);
2905 binder_wakeup_thread_ilocked(proc
, thread
, !oneway
/* sync */);
2907 proc
->outstanding_txns
++;
2908 binder_inner_proc_unlock(proc
);
2909 binder_node_unlock(node
);
2912 * To reduce potential contention, free the outdated transaction and
2913 * buffer after releasing the locks.
2916 struct binder_buffer
*buffer
= t_outdated
->buffer
;
2918 t_outdated
->buffer
= NULL
;
2919 buffer
->transaction
= NULL
;
2920 trace_binder_transaction_update_buffer_release(buffer
);
2921 binder_release_entire_buffer(proc
, NULL
, buffer
, false);
2922 binder_alloc_free_buf(&proc
->alloc
, buffer
);
2924 binder_stats_deleted(BINDER_STAT_TRANSACTION
);
2927 if (oneway
&& frozen
)
2928 return BR_TRANSACTION_PENDING_FROZEN
;
2934 * binder_get_node_refs_for_txn() - Get required refs on node for txn
2935 * @node: struct binder_node for which to get refs
2936 * @procp: returns @node->proc if valid
2937 * @error: if no @procp then returns BR_DEAD_REPLY
2939 * User-space normally keeps the node alive when creating a transaction
2940 * since it has a reference to the target. The local strong ref keeps it
2941 * alive if the sending process dies before the target process processes
2942 * the transaction. If the source process is malicious or has a reference
2943 * counting bug, relying on the local strong ref can fail.
2945 * Since user-space can cause the local strong ref to go away, we also take
2946 * a tmpref on the node to ensure it survives while we are constructing
2947 * the transaction. We also need a tmpref on the proc while we are
2948 * constructing the transaction, so we take that here as well.
2950 * Return: The target_node with refs taken or NULL if no @node->proc is NULL.
2951 * Also sets @procp if valid. If the @node->proc is NULL indicating that the
2952 * target proc has died, @error is set to BR_DEAD_REPLY.
2954 static struct binder_node
*binder_get_node_refs_for_txn(
2955 struct binder_node
*node
,
2956 struct binder_proc
**procp
,
2959 struct binder_node
*target_node
= NULL
;
2961 binder_node_inner_lock(node
);
2964 binder_inc_node_nilocked(node
, 1, 0, NULL
);
2965 binder_inc_node_tmpref_ilocked(node
);
2966 node
->proc
->tmp_ref
++;
2967 *procp
= node
->proc
;
2969 *error
= BR_DEAD_REPLY
;
2970 binder_node_inner_unlock(node
);
2975 static void binder_set_txn_from_error(struct binder_transaction
*t
, int id
,
2976 uint32_t command
, int32_t param
)
2978 struct binder_thread
*from
= binder_get_txn_from_and_acq_inner(t
);
2981 /* annotation for sparse */
2982 __release(&from
->proc
->inner_lock
);
2986 /* don't override existing errors */
2987 if (from
->ee
.command
== BR_OK
)
2988 binder_set_extended_error(&from
->ee
, id
, command
, param
);
2989 binder_inner_proc_unlock(from
->proc
);
2990 binder_thread_dec_tmpref(from
);
2993 static void binder_transaction(struct binder_proc
*proc
,
2994 struct binder_thread
*thread
,
2995 struct binder_transaction_data
*tr
, int reply
,
2996 binder_size_t extra_buffers_size
)
2999 struct binder_transaction
*t
;
3000 struct binder_work
*w
;
3001 struct binder_work
*tcomplete
;
3002 binder_size_t buffer_offset
= 0;
3003 binder_size_t off_start_offset
, off_end_offset
;
3004 binder_size_t off_min
;
3005 binder_size_t sg_buf_offset
, sg_buf_end_offset
;
3006 binder_size_t user_offset
= 0;
3007 struct binder_proc
*target_proc
= NULL
;
3008 struct binder_thread
*target_thread
= NULL
;
3009 struct binder_node
*target_node
= NULL
;
3010 struct binder_transaction
*in_reply_to
= NULL
;
3011 struct binder_transaction_log_entry
*e
;
3012 uint32_t return_error
= 0;
3013 uint32_t return_error_param
= 0;
3014 uint32_t return_error_line
= 0;
3015 binder_size_t last_fixup_obj_off
= 0;
3016 binder_size_t last_fixup_min_off
= 0;
3017 struct binder_context
*context
= proc
->context
;
3018 int t_debug_id
= atomic_inc_return(&binder_last_id
);
3019 ktime_t t_start_time
= ktime_get();
3020 char *secctx
= NULL
;
3022 struct list_head sgc_head
;
3023 struct list_head pf_head
;
3024 const void __user
*user_buffer
= (const void __user
*)
3025 (uintptr_t)tr
->data
.ptr
.buffer
;
3026 INIT_LIST_HEAD(&sgc_head
);
3027 INIT_LIST_HEAD(&pf_head
);
3029 e
= binder_transaction_log_add(&binder_transaction_log
);
3030 e
->debug_id
= t_debug_id
;
3031 e
->call_type
= reply
? 2 : !!(tr
->flags
& TF_ONE_WAY
);
3032 e
->from_proc
= proc
->pid
;
3033 e
->from_thread
= thread
->pid
;
3034 e
->target_handle
= tr
->target
.handle
;
3035 e
->data_size
= tr
->data_size
;
3036 e
->offsets_size
= tr
->offsets_size
;
3037 strscpy(e
->context_name
, proc
->context
->name
, BINDERFS_MAX_NAME
);
3039 binder_inner_proc_lock(proc
);
3040 binder_set_extended_error(&thread
->ee
, t_debug_id
, BR_OK
, 0);
3041 binder_inner_proc_unlock(proc
);
3044 binder_inner_proc_lock(proc
);
3045 in_reply_to
= thread
->transaction_stack
;
3046 if (in_reply_to
== NULL
) {
3047 binder_inner_proc_unlock(proc
);
3048 binder_user_error("%d:%d got reply transaction with no transaction stack\n",
3049 proc
->pid
, thread
->pid
);
3050 return_error
= BR_FAILED_REPLY
;
3051 return_error_param
= -EPROTO
;
3052 return_error_line
= __LINE__
;
3053 goto err_empty_call_stack
;
3055 if (in_reply_to
->to_thread
!= thread
) {
3056 spin_lock(&in_reply_to
->lock
);
3057 binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
3058 proc
->pid
, thread
->pid
, in_reply_to
->debug_id
,
3059 in_reply_to
->to_proc
?
3060 in_reply_to
->to_proc
->pid
: 0,
3061 in_reply_to
->to_thread
?
3062 in_reply_to
->to_thread
->pid
: 0);
3063 spin_unlock(&in_reply_to
->lock
);
3064 binder_inner_proc_unlock(proc
);
3065 return_error
= BR_FAILED_REPLY
;
3066 return_error_param
= -EPROTO
;
3067 return_error_line
= __LINE__
;
3069 goto err_bad_call_stack
;
3071 thread
->transaction_stack
= in_reply_to
->to_parent
;
3072 binder_inner_proc_unlock(proc
);
3073 binder_set_nice(in_reply_to
->saved_priority
);
3074 target_thread
= binder_get_txn_from_and_acq_inner(in_reply_to
);
3075 if (target_thread
== NULL
) {
3076 /* annotation for sparse */
3077 __release(&target_thread
->proc
->inner_lock
);
3078 binder_txn_error("%d:%d reply target not found\n",
3079 thread
->pid
, proc
->pid
);
3080 return_error
= BR_DEAD_REPLY
;
3081 return_error_line
= __LINE__
;
3082 goto err_dead_binder
;
3084 if (target_thread
->transaction_stack
!= in_reply_to
) {
3085 binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
3086 proc
->pid
, thread
->pid
,
3087 target_thread
->transaction_stack
?
3088 target_thread
->transaction_stack
->debug_id
: 0,
3089 in_reply_to
->debug_id
);
3090 binder_inner_proc_unlock(target_thread
->proc
);
3091 return_error
= BR_FAILED_REPLY
;
3092 return_error_param
= -EPROTO
;
3093 return_error_line
= __LINE__
;
3095 target_thread
= NULL
;
3096 goto err_dead_binder
;
3098 target_proc
= target_thread
->proc
;
3099 target_proc
->tmp_ref
++;
3100 binder_inner_proc_unlock(target_thread
->proc
);
3102 if (tr
->target
.handle
) {
3103 struct binder_ref
*ref
;
3106 * There must already be a strong ref
3107 * on this node. If so, do a strong
3108 * increment on the node to ensure it
3109 * stays alive until the transaction is
3112 binder_proc_lock(proc
);
3113 ref
= binder_get_ref_olocked(proc
, tr
->target
.handle
,
3116 target_node
= binder_get_node_refs_for_txn(
3117 ref
->node
, &target_proc
,
3120 binder_user_error("%d:%d got transaction to invalid handle, %u\n",
3121 proc
->pid
, thread
->pid
, tr
->target
.handle
);
3122 return_error
= BR_FAILED_REPLY
;
3124 binder_proc_unlock(proc
);
3126 mutex_lock(&context
->context_mgr_node_lock
);
3127 target_node
= context
->binder_context_mgr_node
;
3129 target_node
= binder_get_node_refs_for_txn(
3130 target_node
, &target_proc
,
3133 return_error
= BR_DEAD_REPLY
;
3134 mutex_unlock(&context
->context_mgr_node_lock
);
3135 if (target_node
&& target_proc
->pid
== proc
->pid
) {
3136 binder_user_error("%d:%d got transaction to context manager from process owning it\n",
3137 proc
->pid
, thread
->pid
);
3138 return_error
= BR_FAILED_REPLY
;
3139 return_error_param
= -EINVAL
;
3140 return_error_line
= __LINE__
;
3141 goto err_invalid_target_handle
;
3145 binder_txn_error("%d:%d cannot find target node\n",
3146 thread
->pid
, proc
->pid
);
3148 * return_error is set above
3150 return_error_param
= -EINVAL
;
3151 return_error_line
= __LINE__
;
3152 goto err_dead_binder
;
3154 e
->to_node
= target_node
->debug_id
;
3155 if (WARN_ON(proc
== target_proc
)) {
3156 binder_txn_error("%d:%d self transactions not allowed\n",
3157 thread
->pid
, proc
->pid
);
3158 return_error
= BR_FAILED_REPLY
;
3159 return_error_param
= -EINVAL
;
3160 return_error_line
= __LINE__
;
3161 goto err_invalid_target_handle
;
3163 if (security_binder_transaction(proc
->cred
,
3164 target_proc
->cred
) < 0) {
3165 binder_txn_error("%d:%d transaction credentials failed\n",
3166 thread
->pid
, proc
->pid
);
3167 return_error
= BR_FAILED_REPLY
;
3168 return_error_param
= -EPERM
;
3169 return_error_line
= __LINE__
;
3170 goto err_invalid_target_handle
;
3172 binder_inner_proc_lock(proc
);
3174 w
= list_first_entry_or_null(&thread
->todo
,
3175 struct binder_work
, entry
);
3176 if (!(tr
->flags
& TF_ONE_WAY
) && w
&&
3177 w
->type
== BINDER_WORK_TRANSACTION
) {
3179 * Do not allow new outgoing transaction from a
3180 * thread that has a transaction at the head of
3181 * its todo list. Only need to check the head
3182 * because binder_select_thread_ilocked picks a
3183 * thread from proc->waiting_threads to enqueue
3184 * the transaction, and nothing is queued to the
3185 * todo list while the thread is on waiting_threads.
3187 binder_user_error("%d:%d new transaction not allowed when there is a transaction on thread todo\n",
3188 proc
->pid
, thread
->pid
);
3189 binder_inner_proc_unlock(proc
);
3190 return_error
= BR_FAILED_REPLY
;
3191 return_error_param
= -EPROTO
;
3192 return_error_line
= __LINE__
;
3193 goto err_bad_todo_list
;
3196 if (!(tr
->flags
& TF_ONE_WAY
) && thread
->transaction_stack
) {
3197 struct binder_transaction
*tmp
;
3199 tmp
= thread
->transaction_stack
;
3200 if (tmp
->to_thread
!= thread
) {
3201 spin_lock(&tmp
->lock
);
3202 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
3203 proc
->pid
, thread
->pid
, tmp
->debug_id
,
3204 tmp
->to_proc
? tmp
->to_proc
->pid
: 0,
3206 tmp
->to_thread
->pid
: 0);
3207 spin_unlock(&tmp
->lock
);
3208 binder_inner_proc_unlock(proc
);
3209 return_error
= BR_FAILED_REPLY
;
3210 return_error_param
= -EPROTO
;
3211 return_error_line
= __LINE__
;
3212 goto err_bad_call_stack
;
3215 struct binder_thread
*from
;
3217 spin_lock(&tmp
->lock
);
3219 if (from
&& from
->proc
== target_proc
) {
3220 atomic_inc(&from
->tmp_ref
);
3221 target_thread
= from
;
3222 spin_unlock(&tmp
->lock
);
3225 spin_unlock(&tmp
->lock
);
3226 tmp
= tmp
->from_parent
;
3229 binder_inner_proc_unlock(proc
);
3232 e
->to_thread
= target_thread
->pid
;
3233 e
->to_proc
= target_proc
->pid
;
3235 /* TODO: reuse incoming transaction for reply */
3236 t
= kzalloc(sizeof(*t
), GFP_KERNEL
);
3238 binder_txn_error("%d:%d cannot allocate transaction\n",
3239 thread
->pid
, proc
->pid
);
3240 return_error
= BR_FAILED_REPLY
;
3241 return_error_param
= -ENOMEM
;
3242 return_error_line
= __LINE__
;
3243 goto err_alloc_t_failed
;
3245 INIT_LIST_HEAD(&t
->fd_fixups
);
3246 binder_stats_created(BINDER_STAT_TRANSACTION
);
3247 spin_lock_init(&t
->lock
);
3249 tcomplete
= kzalloc(sizeof(*tcomplete
), GFP_KERNEL
);
3250 if (tcomplete
== NULL
) {
3251 binder_txn_error("%d:%d cannot allocate work for transaction\n",
3252 thread
->pid
, proc
->pid
);
3253 return_error
= BR_FAILED_REPLY
;
3254 return_error_param
= -ENOMEM
;
3255 return_error_line
= __LINE__
;
3256 goto err_alloc_tcomplete_failed
;
3258 binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE
);
3260 t
->debug_id
= t_debug_id
;
3261 t
->start_time
= t_start_time
;
3264 binder_debug(BINDER_DEBUG_TRANSACTION
,
3265 "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
3266 proc
->pid
, thread
->pid
, t
->debug_id
,
3267 target_proc
->pid
, target_thread
->pid
,
3268 (u64
)tr
->data
.ptr
.buffer
,
3269 (u64
)tr
->data
.ptr
.offsets
,
3270 (u64
)tr
->data_size
, (u64
)tr
->offsets_size
,
3271 (u64
)extra_buffers_size
);
3273 binder_debug(BINDER_DEBUG_TRANSACTION
,
3274 "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
3275 proc
->pid
, thread
->pid
, t
->debug_id
,
3276 target_proc
->pid
, target_node
->debug_id
,
3277 (u64
)tr
->data
.ptr
.buffer
,
3278 (u64
)tr
->data
.ptr
.offsets
,
3279 (u64
)tr
->data_size
, (u64
)tr
->offsets_size
,
3280 (u64
)extra_buffers_size
);
3282 if (!reply
&& !(tr
->flags
& TF_ONE_WAY
))
3286 t
->from_pid
= proc
->pid
;
3287 t
->from_tid
= thread
->pid
;
3288 t
->sender_euid
= task_euid(proc
->tsk
);
3289 t
->to_proc
= target_proc
;
3290 t
->to_thread
= target_thread
;
3292 t
->flags
= tr
->flags
;
3293 t
->priority
= task_nice(current
);
3295 if (target_node
&& target_node
->txn_security_ctx
) {
3299 security_cred_getsecid(proc
->cred
, &secid
);
3300 ret
= security_secid_to_secctx(secid
, &secctx
, &secctx_sz
);
3302 binder_txn_error("%d:%d failed to get security context\n",
3303 thread
->pid
, proc
->pid
);
3304 return_error
= BR_FAILED_REPLY
;
3305 return_error_param
= ret
;
3306 return_error_line
= __LINE__
;
3307 goto err_get_secctx_failed
;
3309 added_size
= ALIGN(secctx_sz
, sizeof(u64
));
3310 extra_buffers_size
+= added_size
;
3311 if (extra_buffers_size
< added_size
) {
3312 binder_txn_error("%d:%d integer overflow of extra_buffers_size\n",
3313 thread
->pid
, proc
->pid
);
3314 return_error
= BR_FAILED_REPLY
;
3315 return_error_param
= -EINVAL
;
3316 return_error_line
= __LINE__
;
3317 goto err_bad_extra_size
;
3321 trace_binder_transaction(reply
, t
, target_node
);
3323 t
->buffer
= binder_alloc_new_buf(&target_proc
->alloc
, tr
->data_size
,
3324 tr
->offsets_size
, extra_buffers_size
,
3325 !reply
&& (t
->flags
& TF_ONE_WAY
));
3326 if (IS_ERR(t
->buffer
)) {
3329 ret
= PTR_ERR(t
->buffer
);
3330 s
= (ret
== -ESRCH
) ? ": vma cleared, target dead or dying"
3331 : (ret
== -ENOSPC
) ? ": no space left"
3332 : (ret
== -ENOMEM
) ? ": memory allocation failed"
3334 binder_txn_error("cannot allocate buffer%s", s
);
3336 return_error_param
= PTR_ERR(t
->buffer
);
3337 return_error
= return_error_param
== -ESRCH
?
3338 BR_DEAD_REPLY
: BR_FAILED_REPLY
;
3339 return_error_line
= __LINE__
;
3341 goto err_binder_alloc_buf_failed
;
3345 size_t buf_offset
= ALIGN(tr
->data_size
, sizeof(void *)) +
3346 ALIGN(tr
->offsets_size
, sizeof(void *)) +
3347 ALIGN(extra_buffers_size
, sizeof(void *)) -
3348 ALIGN(secctx_sz
, sizeof(u64
));
3350 t
->security_ctx
= t
->buffer
->user_data
+ buf_offset
;
3351 err
= binder_alloc_copy_to_buffer(&target_proc
->alloc
,
3352 t
->buffer
, buf_offset
,
3355 t
->security_ctx
= 0;
3358 security_release_secctx(secctx
, secctx_sz
);
3361 t
->buffer
->debug_id
= t
->debug_id
;
3362 t
->buffer
->transaction
= t
;
3363 t
->buffer
->target_node
= target_node
;
3364 t
->buffer
->clear_on_free
= !!(t
->flags
& TF_CLEAR_BUF
);
3365 trace_binder_transaction_alloc_buf(t
->buffer
);
3367 if (binder_alloc_copy_user_to_buffer(
3368 &target_proc
->alloc
,
3370 ALIGN(tr
->data_size
, sizeof(void *)),
3371 (const void __user
*)
3372 (uintptr_t)tr
->data
.ptr
.offsets
,
3373 tr
->offsets_size
)) {
3374 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3375 proc
->pid
, thread
->pid
);
3376 return_error
= BR_FAILED_REPLY
;
3377 return_error_param
= -EFAULT
;
3378 return_error_line
= __LINE__
;
3379 goto err_copy_data_failed
;
3381 if (!IS_ALIGNED(tr
->offsets_size
, sizeof(binder_size_t
))) {
3382 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
3383 proc
->pid
, thread
->pid
, (u64
)tr
->offsets_size
);
3384 return_error
= BR_FAILED_REPLY
;
3385 return_error_param
= -EINVAL
;
3386 return_error_line
= __LINE__
;
3387 goto err_bad_offset
;
3389 if (!IS_ALIGNED(extra_buffers_size
, sizeof(u64
))) {
3390 binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
3391 proc
->pid
, thread
->pid
,
3392 (u64
)extra_buffers_size
);
3393 return_error
= BR_FAILED_REPLY
;
3394 return_error_param
= -EINVAL
;
3395 return_error_line
= __LINE__
;
3396 goto err_bad_offset
;
3398 off_start_offset
= ALIGN(tr
->data_size
, sizeof(void *));
3399 buffer_offset
= off_start_offset
;
3400 off_end_offset
= off_start_offset
+ tr
->offsets_size
;
3401 sg_buf_offset
= ALIGN(off_end_offset
, sizeof(void *));
3402 sg_buf_end_offset
= sg_buf_offset
+ extra_buffers_size
-
3403 ALIGN(secctx_sz
, sizeof(u64
));
3405 for (buffer_offset
= off_start_offset
; buffer_offset
< off_end_offset
;
3406 buffer_offset
+= sizeof(binder_size_t
)) {
3407 struct binder_object_header
*hdr
;
3409 struct binder_object object
;
3410 binder_size_t object_offset
;
3411 binder_size_t copy_size
;
3413 if (binder_alloc_copy_from_buffer(&target_proc
->alloc
,
3417 sizeof(object_offset
))) {
3418 binder_txn_error("%d:%d copy offset from buffer failed\n",
3419 thread
->pid
, proc
->pid
);
3420 return_error
= BR_FAILED_REPLY
;
3421 return_error_param
= -EINVAL
;
3422 return_error_line
= __LINE__
;
3423 goto err_bad_offset
;
3427 * Copy the source user buffer up to the next object
3428 * that will be processed.
3430 copy_size
= object_offset
- user_offset
;
3431 if (copy_size
&& (user_offset
> object_offset
||
3432 object_offset
> tr
->data_size
||
3433 binder_alloc_copy_user_to_buffer(
3434 &target_proc
->alloc
,
3435 t
->buffer
, user_offset
,
3436 user_buffer
+ user_offset
,
3438 binder_user_error("%d:%d got transaction with invalid data ptr\n",
3439 proc
->pid
, thread
->pid
);
3440 return_error
= BR_FAILED_REPLY
;
3441 return_error_param
= -EFAULT
;
3442 return_error_line
= __LINE__
;
3443 goto err_copy_data_failed
;
3445 object_size
= binder_get_object(target_proc
, user_buffer
,
3446 t
->buffer
, object_offset
, &object
);
3447 if (object_size
== 0 || object_offset
< off_min
) {
3448 binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
3449 proc
->pid
, thread
->pid
,
3452 (u64
)t
->buffer
->data_size
);
3453 return_error
= BR_FAILED_REPLY
;
3454 return_error_param
= -EINVAL
;
3455 return_error_line
= __LINE__
;
3456 goto err_bad_offset
;
3459 * Set offset to the next buffer fragment to be
3462 user_offset
= object_offset
+ object_size
;
3465 off_min
= object_offset
+ object_size
;
3466 switch (hdr
->type
) {
3467 case BINDER_TYPE_BINDER
:
3468 case BINDER_TYPE_WEAK_BINDER
: {
3469 struct flat_binder_object
*fp
;
3471 fp
= to_flat_binder_object(hdr
);
3472 ret
= binder_translate_binder(fp
, t
, thread
);
3475 binder_alloc_copy_to_buffer(&target_proc
->alloc
,
3479 binder_txn_error("%d:%d translate binder failed\n",
3480 thread
->pid
, proc
->pid
);
3481 return_error
= BR_FAILED_REPLY
;
3482 return_error_param
= ret
;
3483 return_error_line
= __LINE__
;
3484 goto err_translate_failed
;
3487 case BINDER_TYPE_HANDLE
:
3488 case BINDER_TYPE_WEAK_HANDLE
: {
3489 struct flat_binder_object
*fp
;
3491 fp
= to_flat_binder_object(hdr
);
3492 ret
= binder_translate_handle(fp
, t
, thread
);
3494 binder_alloc_copy_to_buffer(&target_proc
->alloc
,
3498 binder_txn_error("%d:%d translate handle failed\n",
3499 thread
->pid
, proc
->pid
);
3500 return_error
= BR_FAILED_REPLY
;
3501 return_error_param
= ret
;
3502 return_error_line
= __LINE__
;
3503 goto err_translate_failed
;
3507 case BINDER_TYPE_FD
: {
3508 struct binder_fd_object
*fp
= to_binder_fd_object(hdr
);
3509 binder_size_t fd_offset
= object_offset
+
3510 (uintptr_t)&fp
->fd
- (uintptr_t)fp
;
3511 int ret
= binder_translate_fd(fp
->fd
, fd_offset
, t
,
3512 thread
, in_reply_to
);
3516 binder_alloc_copy_to_buffer(&target_proc
->alloc
,
3520 binder_txn_error("%d:%d translate fd failed\n",
3521 thread
->pid
, proc
->pid
);
3522 return_error
= BR_FAILED_REPLY
;
3523 return_error_param
= ret
;
3524 return_error_line
= __LINE__
;
3525 goto err_translate_failed
;
3528 case BINDER_TYPE_FDA
: {
3529 struct binder_object ptr_object
;
3530 binder_size_t parent_offset
;
3531 struct binder_object user_object
;
3532 size_t user_parent_size
;
3533 struct binder_fd_array_object
*fda
=
3534 to_binder_fd_array_object(hdr
);
3535 size_t num_valid
= (buffer_offset
- off_start_offset
) /
3536 sizeof(binder_size_t
);
3537 struct binder_buffer_object
*parent
=
3538 binder_validate_ptr(target_proc
, t
->buffer
,
3539 &ptr_object
, fda
->parent
,
3544 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
3545 proc
->pid
, thread
->pid
);
3546 return_error
= BR_FAILED_REPLY
;
3547 return_error_param
= -EINVAL
;
3548 return_error_line
= __LINE__
;
3549 goto err_bad_parent
;
3551 if (!binder_validate_fixup(target_proc
, t
->buffer
,
3556 last_fixup_min_off
)) {
3557 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
3558 proc
->pid
, thread
->pid
);
3559 return_error
= BR_FAILED_REPLY
;
3560 return_error_param
= -EINVAL
;
3561 return_error_line
= __LINE__
;
3562 goto err_bad_parent
;
3565 * We need to read the user version of the parent
3566 * object to get the original user offset
3569 binder_get_object(proc
, user_buffer
, t
->buffer
,
3570 parent_offset
, &user_object
);
3571 if (user_parent_size
!= sizeof(user_object
.bbo
)) {
3572 binder_user_error("%d:%d invalid ptr object size: %zd vs %zd\n",
3573 proc
->pid
, thread
->pid
,
3575 sizeof(user_object
.bbo
));
3576 return_error
= BR_FAILED_REPLY
;
3577 return_error_param
= -EINVAL
;
3578 return_error_line
= __LINE__
;
3579 goto err_bad_parent
;
3581 ret
= binder_translate_fd_array(&pf_head
, fda
,
3582 user_buffer
, parent
,
3583 &user_object
.bbo
, t
,
3584 thread
, in_reply_to
);
3586 ret
= binder_alloc_copy_to_buffer(&target_proc
->alloc
,
3591 binder_txn_error("%d:%d translate fd array failed\n",
3592 thread
->pid
, proc
->pid
);
3593 return_error
= BR_FAILED_REPLY
;
3594 return_error_param
= ret
> 0 ? -EINVAL
: ret
;
3595 return_error_line
= __LINE__
;
3596 goto err_translate_failed
;
3598 last_fixup_obj_off
= parent_offset
;
3599 last_fixup_min_off
=
3600 fda
->parent_offset
+ sizeof(u32
) * fda
->num_fds
;
3602 case BINDER_TYPE_PTR
: {
3603 struct binder_buffer_object
*bp
=
3604 to_binder_buffer_object(hdr
);
3605 size_t buf_left
= sg_buf_end_offset
- sg_buf_offset
;
3608 if (bp
->length
> buf_left
) {
3609 binder_user_error("%d:%d got transaction with too large buffer\n",
3610 proc
->pid
, thread
->pid
);
3611 return_error
= BR_FAILED_REPLY
;
3612 return_error_param
= -EINVAL
;
3613 return_error_line
= __LINE__
;
3614 goto err_bad_offset
;
3616 ret
= binder_defer_copy(&sgc_head
, sg_buf_offset
,
3617 (const void __user
*)(uintptr_t)bp
->buffer
,
3620 binder_txn_error("%d:%d deferred copy failed\n",
3621 thread
->pid
, proc
->pid
);
3622 return_error
= BR_FAILED_REPLY
;
3623 return_error_param
= ret
;
3624 return_error_line
= __LINE__
;
3625 goto err_translate_failed
;
3627 /* Fixup buffer pointer to target proc address space */
3628 bp
->buffer
= t
->buffer
->user_data
+ sg_buf_offset
;
3629 sg_buf_offset
+= ALIGN(bp
->length
, sizeof(u64
));
3631 num_valid
= (buffer_offset
- off_start_offset
) /
3632 sizeof(binder_size_t
);
3633 ret
= binder_fixup_parent(&pf_head
, t
,
3638 last_fixup_min_off
);
3640 binder_alloc_copy_to_buffer(&target_proc
->alloc
,
3644 binder_txn_error("%d:%d failed to fixup parent\n",
3645 thread
->pid
, proc
->pid
);
3646 return_error
= BR_FAILED_REPLY
;
3647 return_error_param
= ret
;
3648 return_error_line
= __LINE__
;
3649 goto err_translate_failed
;
3651 last_fixup_obj_off
= object_offset
;
3652 last_fixup_min_off
= 0;
3655 binder_user_error("%d:%d got transaction with invalid object type, %x\n",
3656 proc
->pid
, thread
->pid
, hdr
->type
);
3657 return_error
= BR_FAILED_REPLY
;
3658 return_error_param
= -EINVAL
;
3659 return_error_line
= __LINE__
;
3660 goto err_bad_object_type
;
3663 /* Done processing objects, copy the rest of the buffer */
3664 if (binder_alloc_copy_user_to_buffer(
3665 &target_proc
->alloc
,
3666 t
->buffer
, user_offset
,
3667 user_buffer
+ user_offset
,
3668 tr
->data_size
- user_offset
)) {
3669 binder_user_error("%d:%d got transaction with invalid data ptr\n",
3670 proc
->pid
, thread
->pid
);
3671 return_error
= BR_FAILED_REPLY
;
3672 return_error_param
= -EFAULT
;
3673 return_error_line
= __LINE__
;
3674 goto err_copy_data_failed
;
3677 ret
= binder_do_deferred_txn_copies(&target_proc
->alloc
, t
->buffer
,
3678 &sgc_head
, &pf_head
);
3680 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3681 proc
->pid
, thread
->pid
);
3682 return_error
= BR_FAILED_REPLY
;
3683 return_error_param
= ret
;
3684 return_error_line
= __LINE__
;
3685 goto err_copy_data_failed
;
3687 if (t
->buffer
->oneway_spam_suspect
)
3688 tcomplete
->type
= BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT
;
3690 tcomplete
->type
= BINDER_WORK_TRANSACTION_COMPLETE
;
3691 t
->work
.type
= BINDER_WORK_TRANSACTION
;
3694 binder_enqueue_thread_work(thread
, tcomplete
);
3695 binder_inner_proc_lock(target_proc
);
3696 if (target_thread
->is_dead
) {
3697 return_error
= BR_DEAD_REPLY
;
3698 binder_inner_proc_unlock(target_proc
);
3699 goto err_dead_proc_or_thread
;
3701 BUG_ON(t
->buffer
->async_transaction
!= 0);
3702 binder_pop_transaction_ilocked(target_thread
, in_reply_to
);
3703 binder_enqueue_thread_work_ilocked(target_thread
, &t
->work
);
3704 target_proc
->outstanding_txns
++;
3705 binder_inner_proc_unlock(target_proc
);
3706 wake_up_interruptible_sync(&target_thread
->wait
);
3707 binder_free_transaction(in_reply_to
);
3708 } else if (!(t
->flags
& TF_ONE_WAY
)) {
3709 BUG_ON(t
->buffer
->async_transaction
!= 0);
3710 binder_inner_proc_lock(proc
);
3712 * Defer the TRANSACTION_COMPLETE, so we don't return to
3713 * userspace immediately; this allows the target process to
3714 * immediately start processing this transaction, reducing
3715 * latency. We will then return the TRANSACTION_COMPLETE when
3716 * the target replies (or there is an error).
3718 binder_enqueue_deferred_thread_work_ilocked(thread
, tcomplete
);
3720 t
->from_parent
= thread
->transaction_stack
;
3721 thread
->transaction_stack
= t
;
3722 binder_inner_proc_unlock(proc
);
3723 return_error
= binder_proc_transaction(t
,
3724 target_proc
, target_thread
);
3726 binder_inner_proc_lock(proc
);
3727 binder_pop_transaction_ilocked(thread
, t
);
3728 binder_inner_proc_unlock(proc
);
3729 goto err_dead_proc_or_thread
;
3732 BUG_ON(target_node
== NULL
);
3733 BUG_ON(t
->buffer
->async_transaction
!= 1);
3734 return_error
= binder_proc_transaction(t
, target_proc
, NULL
);
3736 * Let the caller know when async transaction reaches a frozen
3737 * process and is put in a pending queue, waiting for the target
3738 * process to be unfrozen.
3740 if (return_error
== BR_TRANSACTION_PENDING_FROZEN
)
3741 tcomplete
->type
= BINDER_WORK_TRANSACTION_PENDING
;
3742 binder_enqueue_thread_work(thread
, tcomplete
);
3744 return_error
!= BR_TRANSACTION_PENDING_FROZEN
)
3745 goto err_dead_proc_or_thread
;
3748 binder_thread_dec_tmpref(target_thread
);
3749 binder_proc_dec_tmpref(target_proc
);
3751 binder_dec_node_tmpref(target_node
);
3753 * write barrier to synchronize with initialization
3757 WRITE_ONCE(e
->debug_id_done
, t_debug_id
);
3760 err_dead_proc_or_thread
:
3761 binder_txn_error("%d:%d dead process or thread\n",
3762 thread
->pid
, proc
->pid
);
3763 return_error_line
= __LINE__
;
3764 binder_dequeue_work(proc
, tcomplete
);
3765 err_translate_failed
:
3766 err_bad_object_type
:
3769 err_copy_data_failed
:
3770 binder_cleanup_deferred_txn_lists(&sgc_head
, &pf_head
);
3771 binder_free_txn_fixups(t
);
3772 trace_binder_transaction_failed_buffer_release(t
->buffer
);
3773 binder_transaction_buffer_release(target_proc
, NULL
, t
->buffer
,
3774 buffer_offset
, true);
3776 binder_dec_node_tmpref(target_node
);
3778 t
->buffer
->transaction
= NULL
;
3779 binder_alloc_free_buf(&target_proc
->alloc
, t
->buffer
);
3780 err_binder_alloc_buf_failed
:
3783 security_release_secctx(secctx
, secctx_sz
);
3784 err_get_secctx_failed
:
3786 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE
);
3787 err_alloc_tcomplete_failed
:
3788 if (trace_binder_txn_latency_free_enabled())
3789 binder_txn_latency_free(t
);
3791 binder_stats_deleted(BINDER_STAT_TRANSACTION
);
3795 err_empty_call_stack
:
3797 err_invalid_target_handle
:
3799 binder_dec_node(target_node
, 1, 0);
3800 binder_dec_node_tmpref(target_node
);
3803 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION
,
3804 "%d:%d transaction %s to %d:%d failed %d/%d/%d, size %lld-%lld line %d\n",
3805 proc
->pid
, thread
->pid
, reply
? "reply" :
3806 (tr
->flags
& TF_ONE_WAY
? "async" : "call"),
3807 target_proc
? target_proc
->pid
: 0,
3808 target_thread
? target_thread
->pid
: 0,
3809 t_debug_id
, return_error
, return_error_param
,
3810 (u64
)tr
->data_size
, (u64
)tr
->offsets_size
,
3814 binder_thread_dec_tmpref(target_thread
);
3816 binder_proc_dec_tmpref(target_proc
);
3819 struct binder_transaction_log_entry
*fe
;
3821 e
->return_error
= return_error
;
3822 e
->return_error_param
= return_error_param
;
3823 e
->return_error_line
= return_error_line
;
3824 fe
= binder_transaction_log_add(&binder_transaction_log_failed
);
3827 * write barrier to synchronize with initialization
3831 WRITE_ONCE(e
->debug_id_done
, t_debug_id
);
3832 WRITE_ONCE(fe
->debug_id_done
, t_debug_id
);
3835 BUG_ON(thread
->return_error
.cmd
!= BR_OK
);
3837 binder_set_txn_from_error(in_reply_to
, t_debug_id
,
3838 return_error
, return_error_param
);
3839 thread
->return_error
.cmd
= BR_TRANSACTION_COMPLETE
;
3840 binder_enqueue_thread_work(thread
, &thread
->return_error
.work
);
3841 binder_send_failed_reply(in_reply_to
, return_error
);
3843 binder_inner_proc_lock(proc
);
3844 binder_set_extended_error(&thread
->ee
, t_debug_id
,
3845 return_error
, return_error_param
);
3846 binder_inner_proc_unlock(proc
);
3847 thread
->return_error
.cmd
= return_error
;
3848 binder_enqueue_thread_work(thread
, &thread
->return_error
.work
);
3853 binder_request_freeze_notification(struct binder_proc
*proc
,
3854 struct binder_thread
*thread
,
3855 struct binder_handle_cookie
*handle_cookie
)
3857 struct binder_ref_freeze
*freeze
;
3858 struct binder_ref
*ref
;
3860 freeze
= kzalloc(sizeof(*freeze
), GFP_KERNEL
);
3863 binder_proc_lock(proc
);
3864 ref
= binder_get_ref_olocked(proc
, handle_cookie
->handle
, false);
3866 binder_user_error("%d:%d BC_REQUEST_FREEZE_NOTIFICATION invalid ref %d\n",
3867 proc
->pid
, thread
->pid
, handle_cookie
->handle
);
3868 binder_proc_unlock(proc
);
3873 binder_node_lock(ref
->node
);
3875 binder_user_error("%d:%d BC_REQUEST_FREEZE_NOTIFICATION already set\n",
3876 proc
->pid
, thread
->pid
);
3877 binder_node_unlock(ref
->node
);
3878 binder_proc_unlock(proc
);
3883 binder_stats_created(BINDER_STAT_FREEZE
);
3884 INIT_LIST_HEAD(&freeze
->work
.entry
);
3885 freeze
->cookie
= handle_cookie
->cookie
;
3886 freeze
->work
.type
= BINDER_WORK_FROZEN_BINDER
;
3887 ref
->freeze
= freeze
;
3889 if (ref
->node
->proc
) {
3890 binder_inner_proc_lock(ref
->node
->proc
);
3891 freeze
->is_frozen
= ref
->node
->proc
->is_frozen
;
3892 binder_inner_proc_unlock(ref
->node
->proc
);
3894 binder_inner_proc_lock(proc
);
3895 binder_enqueue_work_ilocked(&freeze
->work
, &proc
->todo
);
3896 binder_wakeup_proc_ilocked(proc
);
3897 binder_inner_proc_unlock(proc
);
3900 binder_node_unlock(ref
->node
);
3901 binder_proc_unlock(proc
);
3906 binder_clear_freeze_notification(struct binder_proc
*proc
,
3907 struct binder_thread
*thread
,
3908 struct binder_handle_cookie
*handle_cookie
)
3910 struct binder_ref_freeze
*freeze
;
3911 struct binder_ref
*ref
;
3913 binder_proc_lock(proc
);
3914 ref
= binder_get_ref_olocked(proc
, handle_cookie
->handle
, false);
3916 binder_user_error("%d:%d BC_CLEAR_FREEZE_NOTIFICATION invalid ref %d\n",
3917 proc
->pid
, thread
->pid
, handle_cookie
->handle
);
3918 binder_proc_unlock(proc
);
3922 binder_node_lock(ref
->node
);
3925 binder_user_error("%d:%d BC_CLEAR_FREEZE_NOTIFICATION freeze notification not active\n",
3926 proc
->pid
, thread
->pid
);
3927 binder_node_unlock(ref
->node
);
3928 binder_proc_unlock(proc
);
3931 freeze
= ref
->freeze
;
3932 binder_inner_proc_lock(proc
);
3933 if (freeze
->cookie
!= handle_cookie
->cookie
) {
3934 binder_user_error("%d:%d BC_CLEAR_FREEZE_NOTIFICATION freeze notification cookie mismatch %016llx != %016llx\n",
3935 proc
->pid
, thread
->pid
, (u64
)freeze
->cookie
,
3936 (u64
)handle_cookie
->cookie
);
3937 binder_inner_proc_unlock(proc
);
3938 binder_node_unlock(ref
->node
);
3939 binder_proc_unlock(proc
);
3944 * Take the existing freeze object and overwrite its work type. There are three cases here:
3945 * 1. No pending notification. In this case just add the work to the queue.
3946 * 2. A notification was sent and is pending an ack from userspace. Once an ack arrives, we
3947 * should resend with the new work type.
3948 * 3. A notification is pending to be sent. Since the work is already in the queue, nothing
3949 * needs to be done here.
3951 freeze
->work
.type
= BINDER_WORK_CLEAR_FREEZE_NOTIFICATION
;
3952 if (list_empty(&freeze
->work
.entry
)) {
3953 binder_enqueue_work_ilocked(&freeze
->work
, &proc
->todo
);
3954 binder_wakeup_proc_ilocked(proc
);
3955 } else if (freeze
->sent
) {
3956 freeze
->resend
= true;
3958 binder_inner_proc_unlock(proc
);
3959 binder_node_unlock(ref
->node
);
3960 binder_proc_unlock(proc
);
3965 binder_freeze_notification_done(struct binder_proc
*proc
,
3966 struct binder_thread
*thread
,
3967 binder_uintptr_t cookie
)
3969 struct binder_ref_freeze
*freeze
= NULL
;
3970 struct binder_work
*w
;
3972 binder_inner_proc_lock(proc
);
3973 list_for_each_entry(w
, &proc
->delivered_freeze
, entry
) {
3974 struct binder_ref_freeze
*tmp_freeze
=
3975 container_of(w
, struct binder_ref_freeze
, work
);
3977 if (tmp_freeze
->cookie
== cookie
) {
3978 freeze
= tmp_freeze
;
3983 binder_user_error("%d:%d BC_FREEZE_NOTIFICATION_DONE %016llx not found\n",
3984 proc
->pid
, thread
->pid
, (u64
)cookie
);
3985 binder_inner_proc_unlock(proc
);
3988 binder_dequeue_work_ilocked(&freeze
->work
);
3989 freeze
->sent
= false;
3990 if (freeze
->resend
) {
3991 freeze
->resend
= false;
3992 binder_enqueue_work_ilocked(&freeze
->work
, &proc
->todo
);
3993 binder_wakeup_proc_ilocked(proc
);
3995 binder_inner_proc_unlock(proc
);
4000 * binder_free_buf() - free the specified buffer
4001 * @proc: binder proc that owns buffer
4002 * @buffer: buffer to be freed
4003 * @is_failure: failed to send transaction
4005 * If buffer for an async transaction, enqueue the next async
4006 * transaction from the node.
4008 * Cleanup buffer and free it.
4011 binder_free_buf(struct binder_proc
*proc
,
4012 struct binder_thread
*thread
,
4013 struct binder_buffer
*buffer
, bool is_failure
)
4015 binder_inner_proc_lock(proc
);
4016 if (buffer
->transaction
) {
4017 buffer
->transaction
->buffer
= NULL
;
4018 buffer
->transaction
= NULL
;
4020 binder_inner_proc_unlock(proc
);
4021 if (buffer
->async_transaction
&& buffer
->target_node
) {
4022 struct binder_node
*buf_node
;
4023 struct binder_work
*w
;
4025 buf_node
= buffer
->target_node
;
4026 binder_node_inner_lock(buf_node
);
4027 BUG_ON(!buf_node
->has_async_transaction
);
4028 BUG_ON(buf_node
->proc
!= proc
);
4029 w
= binder_dequeue_work_head_ilocked(
4030 &buf_node
->async_todo
);
4032 buf_node
->has_async_transaction
= false;
4034 binder_enqueue_work_ilocked(
4036 binder_wakeup_proc_ilocked(proc
);
4038 binder_node_inner_unlock(buf_node
);
4040 trace_binder_transaction_buffer_release(buffer
);
4041 binder_release_entire_buffer(proc
, thread
, buffer
, is_failure
);
4042 binder_alloc_free_buf(&proc
->alloc
, buffer
);
4045 static int binder_thread_write(struct binder_proc
*proc
,
4046 struct binder_thread
*thread
,
4047 binder_uintptr_t binder_buffer
, size_t size
,
4048 binder_size_t
*consumed
)
4051 struct binder_context
*context
= proc
->context
;
4052 void __user
*buffer
= (void __user
*)(uintptr_t)binder_buffer
;
4053 void __user
*ptr
= buffer
+ *consumed
;
4054 void __user
*end
= buffer
+ size
;
4056 while (ptr
< end
&& thread
->return_error
.cmd
== BR_OK
) {
4059 if (get_user(cmd
, (uint32_t __user
*)ptr
))
4061 ptr
+= sizeof(uint32_t);
4062 trace_binder_command(cmd
);
4063 if (_IOC_NR(cmd
) < ARRAY_SIZE(binder_stats
.bc
)) {
4064 atomic_inc(&binder_stats
.bc
[_IOC_NR(cmd
)]);
4065 atomic_inc(&proc
->stats
.bc
[_IOC_NR(cmd
)]);
4066 atomic_inc(&thread
->stats
.bc
[_IOC_NR(cmd
)]);
4074 const char *debug_string
;
4075 bool strong
= cmd
== BC_ACQUIRE
|| cmd
== BC_RELEASE
;
4076 bool increment
= cmd
== BC_INCREFS
|| cmd
== BC_ACQUIRE
;
4077 struct binder_ref_data rdata
;
4079 if (get_user(target
, (uint32_t __user
*)ptr
))
4082 ptr
+= sizeof(uint32_t);
4084 if (increment
&& !target
) {
4085 struct binder_node
*ctx_mgr_node
;
4087 mutex_lock(&context
->context_mgr_node_lock
);
4088 ctx_mgr_node
= context
->binder_context_mgr_node
;
4090 if (ctx_mgr_node
->proc
== proc
) {
4091 binder_user_error("%d:%d context manager tried to acquire desc 0\n",
4092 proc
->pid
, thread
->pid
);
4093 mutex_unlock(&context
->context_mgr_node_lock
);
4096 ret
= binder_inc_ref_for_node(
4098 strong
, NULL
, &rdata
);
4100 mutex_unlock(&context
->context_mgr_node_lock
);
4103 ret
= binder_update_ref_for_handle(
4104 proc
, target
, increment
, strong
,
4106 if (!ret
&& rdata
.desc
!= target
) {
4107 binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
4108 proc
->pid
, thread
->pid
,
4109 target
, rdata
.desc
);
4113 debug_string
= "IncRefs";
4116 debug_string
= "Acquire";
4119 debug_string
= "Release";
4123 debug_string
= "DecRefs";
4127 binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
4128 proc
->pid
, thread
->pid
, debug_string
,
4129 strong
, target
, ret
);
4132 binder_debug(BINDER_DEBUG_USER_REFS
,
4133 "%d:%d %s ref %d desc %d s %d w %d\n",
4134 proc
->pid
, thread
->pid
, debug_string
,
4135 rdata
.debug_id
, rdata
.desc
, rdata
.strong
,
4139 case BC_INCREFS_DONE
:
4140 case BC_ACQUIRE_DONE
: {
4141 binder_uintptr_t node_ptr
;
4142 binder_uintptr_t cookie
;
4143 struct binder_node
*node
;
4146 if (get_user(node_ptr
, (binder_uintptr_t __user
*)ptr
))
4148 ptr
+= sizeof(binder_uintptr_t
);
4149 if (get_user(cookie
, (binder_uintptr_t __user
*)ptr
))
4151 ptr
+= sizeof(binder_uintptr_t
);
4152 node
= binder_get_node(proc
, node_ptr
);
4154 binder_user_error("%d:%d %s u%016llx no match\n",
4155 proc
->pid
, thread
->pid
,
4156 cmd
== BC_INCREFS_DONE
?
4162 if (cookie
!= node
->cookie
) {
4163 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
4164 proc
->pid
, thread
->pid
,
4165 cmd
== BC_INCREFS_DONE
?
4166 "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
4167 (u64
)node_ptr
, node
->debug_id
,
4168 (u64
)cookie
, (u64
)node
->cookie
);
4169 binder_put_node(node
);
4172 binder_node_inner_lock(node
);
4173 if (cmd
== BC_ACQUIRE_DONE
) {
4174 if (node
->pending_strong_ref
== 0) {
4175 binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
4176 proc
->pid
, thread
->pid
,
4178 binder_node_inner_unlock(node
);
4179 binder_put_node(node
);
4182 node
->pending_strong_ref
= 0;
4184 if (node
->pending_weak_ref
== 0) {
4185 binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
4186 proc
->pid
, thread
->pid
,
4188 binder_node_inner_unlock(node
);
4189 binder_put_node(node
);
4192 node
->pending_weak_ref
= 0;
4194 free_node
= binder_dec_node_nilocked(node
,
4195 cmd
== BC_ACQUIRE_DONE
, 0);
4197 binder_debug(BINDER_DEBUG_USER_REFS
,
4198 "%d:%d %s node %d ls %d lw %d tr %d\n",
4199 proc
->pid
, thread
->pid
,
4200 cmd
== BC_INCREFS_DONE
? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
4201 node
->debug_id
, node
->local_strong_refs
,
4202 node
->local_weak_refs
, node
->tmp_refs
);
4203 binder_node_inner_unlock(node
);
4204 binder_put_node(node
);
4207 case BC_ATTEMPT_ACQUIRE
:
4208 pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
4210 case BC_ACQUIRE_RESULT
:
4211 pr_err("BC_ACQUIRE_RESULT not supported\n");
4214 case BC_FREE_BUFFER
: {
4215 binder_uintptr_t data_ptr
;
4216 struct binder_buffer
*buffer
;
4218 if (get_user(data_ptr
, (binder_uintptr_t __user
*)ptr
))
4220 ptr
+= sizeof(binder_uintptr_t
);
4222 buffer
= binder_alloc_prepare_to_free(&proc
->alloc
,
4224 if (IS_ERR_OR_NULL(buffer
)) {
4225 if (PTR_ERR(buffer
) == -EPERM
) {
4227 "%d:%d BC_FREE_BUFFER u%016llx matched unreturned or currently freeing buffer\n",
4228 proc
->pid
, thread
->pid
,
4232 "%d:%d BC_FREE_BUFFER u%016llx no match\n",
4233 proc
->pid
, thread
->pid
,
4238 binder_debug(BINDER_DEBUG_FREE_BUFFER
,
4239 "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
4240 proc
->pid
, thread
->pid
, (u64
)data_ptr
,
4242 buffer
->transaction
? "active" : "finished");
4243 binder_free_buf(proc
, thread
, buffer
, false);
4247 case BC_TRANSACTION_SG
:
4249 struct binder_transaction_data_sg tr
;
4251 if (copy_from_user(&tr
, ptr
, sizeof(tr
)))
4254 binder_transaction(proc
, thread
, &tr
.transaction_data
,
4255 cmd
== BC_REPLY_SG
, tr
.buffers_size
);
4258 case BC_TRANSACTION
:
4260 struct binder_transaction_data tr
;
4262 if (copy_from_user(&tr
, ptr
, sizeof(tr
)))
4265 binder_transaction(proc
, thread
, &tr
,
4266 cmd
== BC_REPLY
, 0);
4270 case BC_REGISTER_LOOPER
:
4271 binder_debug(BINDER_DEBUG_THREADS
,
4272 "%d:%d BC_REGISTER_LOOPER\n",
4273 proc
->pid
, thread
->pid
);
4274 binder_inner_proc_lock(proc
);
4275 if (thread
->looper
& BINDER_LOOPER_STATE_ENTERED
) {
4276 thread
->looper
|= BINDER_LOOPER_STATE_INVALID
;
4277 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
4278 proc
->pid
, thread
->pid
);
4279 } else if (proc
->requested_threads
== 0) {
4280 thread
->looper
|= BINDER_LOOPER_STATE_INVALID
;
4281 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
4282 proc
->pid
, thread
->pid
);
4284 proc
->requested_threads
--;
4285 proc
->requested_threads_started
++;
4287 thread
->looper
|= BINDER_LOOPER_STATE_REGISTERED
;
4288 binder_inner_proc_unlock(proc
);
4290 case BC_ENTER_LOOPER
:
4291 binder_debug(BINDER_DEBUG_THREADS
,
4292 "%d:%d BC_ENTER_LOOPER\n",
4293 proc
->pid
, thread
->pid
);
4294 if (thread
->looper
& BINDER_LOOPER_STATE_REGISTERED
) {
4295 thread
->looper
|= BINDER_LOOPER_STATE_INVALID
;
4296 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
4297 proc
->pid
, thread
->pid
);
4299 thread
->looper
|= BINDER_LOOPER_STATE_ENTERED
;
4301 case BC_EXIT_LOOPER
:
4302 binder_debug(BINDER_DEBUG_THREADS
,
4303 "%d:%d BC_EXIT_LOOPER\n",
4304 proc
->pid
, thread
->pid
);
4305 thread
->looper
|= BINDER_LOOPER_STATE_EXITED
;
4308 case BC_REQUEST_DEATH_NOTIFICATION
:
4309 case BC_CLEAR_DEATH_NOTIFICATION
: {
4311 binder_uintptr_t cookie
;
4312 struct binder_ref
*ref
;
4313 struct binder_ref_death
*death
= NULL
;
4315 if (get_user(target
, (uint32_t __user
*)ptr
))
4317 ptr
+= sizeof(uint32_t);
4318 if (get_user(cookie
, (binder_uintptr_t __user
*)ptr
))
4320 ptr
+= sizeof(binder_uintptr_t
);
4321 if (cmd
== BC_REQUEST_DEATH_NOTIFICATION
) {
4323 * Allocate memory for death notification
4324 * before taking lock
4326 death
= kzalloc(sizeof(*death
), GFP_KERNEL
);
4327 if (death
== NULL
) {
4328 WARN_ON(thread
->return_error
.cmd
!=
4330 thread
->return_error
.cmd
= BR_ERROR
;
4331 binder_enqueue_thread_work(
4333 &thread
->return_error
.work
);
4335 BINDER_DEBUG_FAILED_TRANSACTION
,
4336 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
4337 proc
->pid
, thread
->pid
);
4341 binder_proc_lock(proc
);
4342 ref
= binder_get_ref_olocked(proc
, target
, false);
4344 binder_user_error("%d:%d %s invalid ref %d\n",
4345 proc
->pid
, thread
->pid
,
4346 cmd
== BC_REQUEST_DEATH_NOTIFICATION
?
4347 "BC_REQUEST_DEATH_NOTIFICATION" :
4348 "BC_CLEAR_DEATH_NOTIFICATION",
4350 binder_proc_unlock(proc
);
4355 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION
,
4356 "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
4357 proc
->pid
, thread
->pid
,
4358 cmd
== BC_REQUEST_DEATH_NOTIFICATION
?
4359 "BC_REQUEST_DEATH_NOTIFICATION" :
4360 "BC_CLEAR_DEATH_NOTIFICATION",
4361 (u64
)cookie
, ref
->data
.debug_id
,
4362 ref
->data
.desc
, ref
->data
.strong
,
4363 ref
->data
.weak
, ref
->node
->debug_id
);
4365 binder_node_lock(ref
->node
);
4366 if (cmd
== BC_REQUEST_DEATH_NOTIFICATION
) {
4368 binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
4369 proc
->pid
, thread
->pid
);
4370 binder_node_unlock(ref
->node
);
4371 binder_proc_unlock(proc
);
4375 binder_stats_created(BINDER_STAT_DEATH
);
4376 INIT_LIST_HEAD(&death
->work
.entry
);
4377 death
->cookie
= cookie
;
4379 if (ref
->node
->proc
== NULL
) {
4380 ref
->death
->work
.type
= BINDER_WORK_DEAD_BINDER
;
4382 binder_inner_proc_lock(proc
);
4383 binder_enqueue_work_ilocked(
4384 &ref
->death
->work
, &proc
->todo
);
4385 binder_wakeup_proc_ilocked(proc
);
4386 binder_inner_proc_unlock(proc
);
4389 if (ref
->death
== NULL
) {
4390 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
4391 proc
->pid
, thread
->pid
);
4392 binder_node_unlock(ref
->node
);
4393 binder_proc_unlock(proc
);
4397 if (death
->cookie
!= cookie
) {
4398 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
4399 proc
->pid
, thread
->pid
,
4402 binder_node_unlock(ref
->node
);
4403 binder_proc_unlock(proc
);
4407 binder_inner_proc_lock(proc
);
4408 if (list_empty(&death
->work
.entry
)) {
4409 death
->work
.type
= BINDER_WORK_CLEAR_DEATH_NOTIFICATION
;
4410 if (thread
->looper
&
4411 (BINDER_LOOPER_STATE_REGISTERED
|
4412 BINDER_LOOPER_STATE_ENTERED
))
4413 binder_enqueue_thread_work_ilocked(
4417 binder_enqueue_work_ilocked(
4420 binder_wakeup_proc_ilocked(
4424 BUG_ON(death
->work
.type
!= BINDER_WORK_DEAD_BINDER
);
4425 death
->work
.type
= BINDER_WORK_DEAD_BINDER_AND_CLEAR
;
4427 binder_inner_proc_unlock(proc
);
4429 binder_node_unlock(ref
->node
);
4430 binder_proc_unlock(proc
);
4432 case BC_DEAD_BINDER_DONE
: {
4433 struct binder_work
*w
;
4434 binder_uintptr_t cookie
;
4435 struct binder_ref_death
*death
= NULL
;
4437 if (get_user(cookie
, (binder_uintptr_t __user
*)ptr
))
4440 ptr
+= sizeof(cookie
);
4441 binder_inner_proc_lock(proc
);
4442 list_for_each_entry(w
, &proc
->delivered_death
,
4444 struct binder_ref_death
*tmp_death
=
4446 struct binder_ref_death
,
4449 if (tmp_death
->cookie
== cookie
) {
4454 binder_debug(BINDER_DEBUG_DEAD_BINDER
,
4455 "%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n",
4456 proc
->pid
, thread
->pid
, (u64
)cookie
,
4458 if (death
== NULL
) {
4459 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
4460 proc
->pid
, thread
->pid
, (u64
)cookie
);
4461 binder_inner_proc_unlock(proc
);
4464 binder_dequeue_work_ilocked(&death
->work
);
4465 if (death
->work
.type
== BINDER_WORK_DEAD_BINDER_AND_CLEAR
) {
4466 death
->work
.type
= BINDER_WORK_CLEAR_DEATH_NOTIFICATION
;
4467 if (thread
->looper
&
4468 (BINDER_LOOPER_STATE_REGISTERED
|
4469 BINDER_LOOPER_STATE_ENTERED
))
4470 binder_enqueue_thread_work_ilocked(
4471 thread
, &death
->work
);
4473 binder_enqueue_work_ilocked(
4476 binder_wakeup_proc_ilocked(proc
);
4479 binder_inner_proc_unlock(proc
);
4482 case BC_REQUEST_FREEZE_NOTIFICATION
: {
4483 struct binder_handle_cookie handle_cookie
;
4486 if (copy_from_user(&handle_cookie
, ptr
, sizeof(handle_cookie
)))
4488 ptr
+= sizeof(handle_cookie
);
4489 error
= binder_request_freeze_notification(proc
, thread
,
4495 case BC_CLEAR_FREEZE_NOTIFICATION
: {
4496 struct binder_handle_cookie handle_cookie
;
4499 if (copy_from_user(&handle_cookie
, ptr
, sizeof(handle_cookie
)))
4501 ptr
+= sizeof(handle_cookie
);
4502 error
= binder_clear_freeze_notification(proc
, thread
, &handle_cookie
);
4507 case BC_FREEZE_NOTIFICATION_DONE
: {
4508 binder_uintptr_t cookie
;
4511 if (get_user(cookie
, (binder_uintptr_t __user
*)ptr
))
4514 ptr
+= sizeof(cookie
);
4515 error
= binder_freeze_notification_done(proc
, thread
, cookie
);
4521 pr_err("%d:%d unknown command %u\n",
4522 proc
->pid
, thread
->pid
, cmd
);
4525 *consumed
= ptr
- buffer
;
4530 static void binder_stat_br(struct binder_proc
*proc
,
4531 struct binder_thread
*thread
, uint32_t cmd
)
4533 trace_binder_return(cmd
);
4534 if (_IOC_NR(cmd
) < ARRAY_SIZE(binder_stats
.br
)) {
4535 atomic_inc(&binder_stats
.br
[_IOC_NR(cmd
)]);
4536 atomic_inc(&proc
->stats
.br
[_IOC_NR(cmd
)]);
4537 atomic_inc(&thread
->stats
.br
[_IOC_NR(cmd
)]);
4541 static int binder_put_node_cmd(struct binder_proc
*proc
,
4542 struct binder_thread
*thread
,
4544 binder_uintptr_t node_ptr
,
4545 binder_uintptr_t node_cookie
,
4547 uint32_t cmd
, const char *cmd_name
)
4549 void __user
*ptr
= *ptrp
;
4551 if (put_user(cmd
, (uint32_t __user
*)ptr
))
4553 ptr
+= sizeof(uint32_t);
4555 if (put_user(node_ptr
, (binder_uintptr_t __user
*)ptr
))
4557 ptr
+= sizeof(binder_uintptr_t
);
4559 if (put_user(node_cookie
, (binder_uintptr_t __user
*)ptr
))
4561 ptr
+= sizeof(binder_uintptr_t
);
4563 binder_stat_br(proc
, thread
, cmd
);
4564 binder_debug(BINDER_DEBUG_USER_REFS
, "%d:%d %s %d u%016llx c%016llx\n",
4565 proc
->pid
, thread
->pid
, cmd_name
, node_debug_id
,
4566 (u64
)node_ptr
, (u64
)node_cookie
);
4572 static int binder_wait_for_work(struct binder_thread
*thread
,
4576 struct binder_proc
*proc
= thread
->proc
;
4579 binder_inner_proc_lock(proc
);
4581 prepare_to_wait(&thread
->wait
, &wait
, TASK_INTERRUPTIBLE
|TASK_FREEZABLE
);
4582 if (binder_has_work_ilocked(thread
, do_proc_work
))
4585 list_add(&thread
->waiting_thread_node
,
4586 &proc
->waiting_threads
);
4587 binder_inner_proc_unlock(proc
);
4589 binder_inner_proc_lock(proc
);
4590 list_del_init(&thread
->waiting_thread_node
);
4591 if (signal_pending(current
)) {
4596 finish_wait(&thread
->wait
, &wait
);
4597 binder_inner_proc_unlock(proc
);
4603 * binder_apply_fd_fixups() - finish fd translation
4604 * @proc: binder_proc associated @t->buffer
4605 * @t: binder transaction with list of fd fixups
4607 * Now that we are in the context of the transaction target
4608 * process, we can allocate and install fds. Process the
4609 * list of fds to translate and fixup the buffer with the
4610 * new fds first and only then install the files.
4612 * If we fail to allocate an fd, skip the install and release
4613 * any fds that have already been allocated.
4615 static int binder_apply_fd_fixups(struct binder_proc
*proc
,
4616 struct binder_transaction
*t
)
4618 struct binder_txn_fd_fixup
*fixup
, *tmp
;
4621 list_for_each_entry(fixup
, &t
->fd_fixups
, fixup_entry
) {
4622 int fd
= get_unused_fd_flags(O_CLOEXEC
);
4625 binder_debug(BINDER_DEBUG_TRANSACTION
,
4626 "failed fd fixup txn %d fd %d\n",
4631 binder_debug(BINDER_DEBUG_TRANSACTION
,
4632 "fd fixup txn %d fd %d\n",
4634 trace_binder_transaction_fd_recv(t
, fd
, fixup
->offset
);
4635 fixup
->target_fd
= fd
;
4636 if (binder_alloc_copy_to_buffer(&proc
->alloc
, t
->buffer
,
4643 list_for_each_entry_safe(fixup
, tmp
, &t
->fd_fixups
, fixup_entry
) {
4644 fd_install(fixup
->target_fd
, fixup
->file
);
4645 list_del(&fixup
->fixup_entry
);
4652 binder_free_txn_fixups(t
);
4656 static int binder_thread_read(struct binder_proc
*proc
,
4657 struct binder_thread
*thread
,
4658 binder_uintptr_t binder_buffer
, size_t size
,
4659 binder_size_t
*consumed
, int non_block
)
4661 void __user
*buffer
= (void __user
*)(uintptr_t)binder_buffer
;
4662 void __user
*ptr
= buffer
+ *consumed
;
4663 void __user
*end
= buffer
+ size
;
4666 int wait_for_proc_work
;
4668 if (*consumed
== 0) {
4669 if (put_user(BR_NOOP
, (uint32_t __user
*)ptr
))
4671 ptr
+= sizeof(uint32_t);
4675 binder_inner_proc_lock(proc
);
4676 wait_for_proc_work
= binder_available_for_proc_work_ilocked(thread
);
4677 binder_inner_proc_unlock(proc
);
4679 thread
->looper
|= BINDER_LOOPER_STATE_WAITING
;
4681 trace_binder_wait_for_work(wait_for_proc_work
,
4682 !!thread
->transaction_stack
,
4683 !binder_worklist_empty(proc
, &thread
->todo
));
4684 if (wait_for_proc_work
) {
4685 if (!(thread
->looper
& (BINDER_LOOPER_STATE_REGISTERED
|
4686 BINDER_LOOPER_STATE_ENTERED
))) {
4687 binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
4688 proc
->pid
, thread
->pid
, thread
->looper
);
4689 wait_event_interruptible(binder_user_error_wait
,
4690 binder_stop_on_user_error
< 2);
4692 binder_set_nice(proc
->default_priority
);
4696 if (!binder_has_work(thread
, wait_for_proc_work
))
4699 ret
= binder_wait_for_work(thread
, wait_for_proc_work
);
4702 thread
->looper
&= ~BINDER_LOOPER_STATE_WAITING
;
4709 struct binder_transaction_data_secctx tr
;
4710 struct binder_transaction_data
*trd
= &tr
.transaction_data
;
4711 struct binder_work
*w
= NULL
;
4712 struct list_head
*list
= NULL
;
4713 struct binder_transaction
*t
= NULL
;
4714 struct binder_thread
*t_from
;
4715 size_t trsize
= sizeof(*trd
);
4717 binder_inner_proc_lock(proc
);
4718 if (!binder_worklist_empty_ilocked(&thread
->todo
))
4719 list
= &thread
->todo
;
4720 else if (!binder_worklist_empty_ilocked(&proc
->todo
) &&
4724 binder_inner_proc_unlock(proc
);
4727 if (ptr
- buffer
== 4 && !thread
->looper_need_return
)
4732 if (end
- ptr
< sizeof(tr
) + 4) {
4733 binder_inner_proc_unlock(proc
);
4736 w
= binder_dequeue_work_head_ilocked(list
);
4737 if (binder_worklist_empty_ilocked(&thread
->todo
))
4738 thread
->process_todo
= false;
4741 case BINDER_WORK_TRANSACTION
: {
4742 binder_inner_proc_unlock(proc
);
4743 t
= container_of(w
, struct binder_transaction
, work
);
4745 case BINDER_WORK_RETURN_ERROR
: {
4746 struct binder_error
*e
= container_of(
4747 w
, struct binder_error
, work
);
4749 WARN_ON(e
->cmd
== BR_OK
);
4750 binder_inner_proc_unlock(proc
);
4751 if (put_user(e
->cmd
, (uint32_t __user
*)ptr
))
4755 ptr
+= sizeof(uint32_t);
4757 binder_stat_br(proc
, thread
, cmd
);
4759 case BINDER_WORK_TRANSACTION_COMPLETE
:
4760 case BINDER_WORK_TRANSACTION_PENDING
:
4761 case BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT
: {
4762 if (proc
->oneway_spam_detection_enabled
&&
4763 w
->type
== BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT
)
4764 cmd
= BR_ONEWAY_SPAM_SUSPECT
;
4765 else if (w
->type
== BINDER_WORK_TRANSACTION_PENDING
)
4766 cmd
= BR_TRANSACTION_PENDING_FROZEN
;
4768 cmd
= BR_TRANSACTION_COMPLETE
;
4769 binder_inner_proc_unlock(proc
);
4771 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE
);
4772 if (put_user(cmd
, (uint32_t __user
*)ptr
))
4774 ptr
+= sizeof(uint32_t);
4776 binder_stat_br(proc
, thread
, cmd
);
4777 binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE
,
4778 "%d:%d BR_TRANSACTION_COMPLETE\n",
4779 proc
->pid
, thread
->pid
);
4781 case BINDER_WORK_NODE
: {
4782 struct binder_node
*node
= container_of(w
, struct binder_node
, work
);
4784 binder_uintptr_t node_ptr
= node
->ptr
;
4785 binder_uintptr_t node_cookie
= node
->cookie
;
4786 int node_debug_id
= node
->debug_id
;
4789 void __user
*orig_ptr
= ptr
;
4791 BUG_ON(proc
!= node
->proc
);
4792 strong
= node
->internal_strong_refs
||
4793 node
->local_strong_refs
;
4794 weak
= !hlist_empty(&node
->refs
) ||
4795 node
->local_weak_refs
||
4796 node
->tmp_refs
|| strong
;
4797 has_strong_ref
= node
->has_strong_ref
;
4798 has_weak_ref
= node
->has_weak_ref
;
4800 if (weak
&& !has_weak_ref
) {
4801 node
->has_weak_ref
= 1;
4802 node
->pending_weak_ref
= 1;
4803 node
->local_weak_refs
++;
4805 if (strong
&& !has_strong_ref
) {
4806 node
->has_strong_ref
= 1;
4807 node
->pending_strong_ref
= 1;
4808 node
->local_strong_refs
++;
4810 if (!strong
&& has_strong_ref
)
4811 node
->has_strong_ref
= 0;
4812 if (!weak
&& has_weak_ref
)
4813 node
->has_weak_ref
= 0;
4814 if (!weak
&& !strong
) {
4815 binder_debug(BINDER_DEBUG_INTERNAL_REFS
,
4816 "%d:%d node %d u%016llx c%016llx deleted\n",
4817 proc
->pid
, thread
->pid
,
4821 rb_erase(&node
->rb_node
, &proc
->nodes
);
4822 binder_inner_proc_unlock(proc
);
4823 binder_node_lock(node
);
4825 * Acquire the node lock before freeing the
4826 * node to serialize with other threads that
4827 * may have been holding the node lock while
4828 * decrementing this node (avoids race where
4829 * this thread frees while the other thread
4830 * is unlocking the node after the final
4833 binder_node_unlock(node
);
4834 binder_free_node(node
);
4836 binder_inner_proc_unlock(proc
);
4838 if (weak
&& !has_weak_ref
)
4839 ret
= binder_put_node_cmd(
4840 proc
, thread
, &ptr
, node_ptr
,
4841 node_cookie
, node_debug_id
,
4842 BR_INCREFS
, "BR_INCREFS");
4843 if (!ret
&& strong
&& !has_strong_ref
)
4844 ret
= binder_put_node_cmd(
4845 proc
, thread
, &ptr
, node_ptr
,
4846 node_cookie
, node_debug_id
,
4847 BR_ACQUIRE
, "BR_ACQUIRE");
4848 if (!ret
&& !strong
&& has_strong_ref
)
4849 ret
= binder_put_node_cmd(
4850 proc
, thread
, &ptr
, node_ptr
,
4851 node_cookie
, node_debug_id
,
4852 BR_RELEASE
, "BR_RELEASE");
4853 if (!ret
&& !weak
&& has_weak_ref
)
4854 ret
= binder_put_node_cmd(
4855 proc
, thread
, &ptr
, node_ptr
,
4856 node_cookie
, node_debug_id
,
4857 BR_DECREFS
, "BR_DECREFS");
4858 if (orig_ptr
== ptr
)
4859 binder_debug(BINDER_DEBUG_INTERNAL_REFS
,
4860 "%d:%d node %d u%016llx c%016llx state unchanged\n",
4861 proc
->pid
, thread
->pid
,
4868 case BINDER_WORK_DEAD_BINDER
:
4869 case BINDER_WORK_DEAD_BINDER_AND_CLEAR
:
4870 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION
: {
4871 struct binder_ref_death
*death
;
4873 binder_uintptr_t cookie
;
4875 death
= container_of(w
, struct binder_ref_death
, work
);
4876 if (w
->type
== BINDER_WORK_CLEAR_DEATH_NOTIFICATION
)
4877 cmd
= BR_CLEAR_DEATH_NOTIFICATION_DONE
;
4879 cmd
= BR_DEAD_BINDER
;
4880 cookie
= death
->cookie
;
4882 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION
,
4883 "%d:%d %s %016llx\n",
4884 proc
->pid
, thread
->pid
,
4885 cmd
== BR_DEAD_BINDER
?
4887 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
4889 if (w
->type
== BINDER_WORK_CLEAR_DEATH_NOTIFICATION
) {
4890 binder_inner_proc_unlock(proc
);
4892 binder_stats_deleted(BINDER_STAT_DEATH
);
4894 binder_enqueue_work_ilocked(
4895 w
, &proc
->delivered_death
);
4896 binder_inner_proc_unlock(proc
);
4898 if (put_user(cmd
, (uint32_t __user
*)ptr
))
4900 ptr
+= sizeof(uint32_t);
4901 if (put_user(cookie
,
4902 (binder_uintptr_t __user
*)ptr
))
4904 ptr
+= sizeof(binder_uintptr_t
);
4905 binder_stat_br(proc
, thread
, cmd
);
4906 if (cmd
== BR_DEAD_BINDER
)
4907 goto done
; /* DEAD_BINDER notifications can cause transactions */
4910 case BINDER_WORK_FROZEN_BINDER
: {
4911 struct binder_ref_freeze
*freeze
;
4912 struct binder_frozen_state_info info
;
4914 memset(&info
, 0, sizeof(info
));
4915 freeze
= container_of(w
, struct binder_ref_freeze
, work
);
4916 info
.is_frozen
= freeze
->is_frozen
;
4917 info
.cookie
= freeze
->cookie
;
4918 freeze
->sent
= true;
4919 binder_enqueue_work_ilocked(w
, &proc
->delivered_freeze
);
4920 binder_inner_proc_unlock(proc
);
4922 if (put_user(BR_FROZEN_BINDER
, (uint32_t __user
*)ptr
))
4924 ptr
+= sizeof(uint32_t);
4925 if (copy_to_user(ptr
, &info
, sizeof(info
)))
4927 ptr
+= sizeof(info
);
4928 binder_stat_br(proc
, thread
, BR_FROZEN_BINDER
);
4929 goto done
; /* BR_FROZEN_BINDER notifications can cause transactions */
4932 case BINDER_WORK_CLEAR_FREEZE_NOTIFICATION
: {
4933 struct binder_ref_freeze
*freeze
=
4934 container_of(w
, struct binder_ref_freeze
, work
);
4935 binder_uintptr_t cookie
= freeze
->cookie
;
4937 binder_inner_proc_unlock(proc
);
4939 binder_stats_deleted(BINDER_STAT_FREEZE
);
4940 if (put_user(BR_CLEAR_FREEZE_NOTIFICATION_DONE
, (uint32_t __user
*)ptr
))
4942 ptr
+= sizeof(uint32_t);
4943 if (put_user(cookie
, (binder_uintptr_t __user
*)ptr
))
4945 ptr
+= sizeof(binder_uintptr_t
);
4946 binder_stat_br(proc
, thread
, BR_CLEAR_FREEZE_NOTIFICATION_DONE
);
4950 binder_inner_proc_unlock(proc
);
4951 pr_err("%d:%d: bad work type %d\n",
4952 proc
->pid
, thread
->pid
, w
->type
);
4959 BUG_ON(t
->buffer
== NULL
);
4960 if (t
->buffer
->target_node
) {
4961 struct binder_node
*target_node
= t
->buffer
->target_node
;
4963 trd
->target
.ptr
= target_node
->ptr
;
4964 trd
->cookie
= target_node
->cookie
;
4965 t
->saved_priority
= task_nice(current
);
4966 if (t
->priority
< target_node
->min_priority
&&
4967 !(t
->flags
& TF_ONE_WAY
))
4968 binder_set_nice(t
->priority
);
4969 else if (!(t
->flags
& TF_ONE_WAY
) ||
4970 t
->saved_priority
> target_node
->min_priority
)
4971 binder_set_nice(target_node
->min_priority
);
4972 cmd
= BR_TRANSACTION
;
4974 trd
->target
.ptr
= 0;
4978 trd
->code
= t
->code
;
4979 trd
->flags
= t
->flags
;
4980 trd
->sender_euid
= from_kuid(current_user_ns(), t
->sender_euid
);
4982 t_from
= binder_get_txn_from(t
);
4984 struct task_struct
*sender
= t_from
->proc
->tsk
;
4987 task_tgid_nr_ns(sender
,
4988 task_active_pid_ns(current
));
4990 trd
->sender_pid
= 0;
4993 ret
= binder_apply_fd_fixups(proc
, t
);
4995 struct binder_buffer
*buffer
= t
->buffer
;
4996 bool oneway
= !!(t
->flags
& TF_ONE_WAY
);
4997 int tid
= t
->debug_id
;
5000 binder_thread_dec_tmpref(t_from
);
5001 buffer
->transaction
= NULL
;
5002 binder_cleanup_transaction(t
, "fd fixups failed",
5004 binder_free_buf(proc
, thread
, buffer
, true);
5005 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION
,
5006 "%d:%d %stransaction %d fd fixups failed %d/%d, line %d\n",
5007 proc
->pid
, thread
->pid
,
5009 (cmd
== BR_REPLY
? "reply " : ""),
5010 tid
, BR_FAILED_REPLY
, ret
, __LINE__
);
5011 if (cmd
== BR_REPLY
) {
5012 cmd
= BR_FAILED_REPLY
;
5013 if (put_user(cmd
, (uint32_t __user
*)ptr
))
5015 ptr
+= sizeof(uint32_t);
5016 binder_stat_br(proc
, thread
, cmd
);
5021 trd
->data_size
= t
->buffer
->data_size
;
5022 trd
->offsets_size
= t
->buffer
->offsets_size
;
5023 trd
->data
.ptr
.buffer
= t
->buffer
->user_data
;
5024 trd
->data
.ptr
.offsets
= trd
->data
.ptr
.buffer
+
5025 ALIGN(t
->buffer
->data_size
,
5028 tr
.secctx
= t
->security_ctx
;
5029 if (t
->security_ctx
) {
5030 cmd
= BR_TRANSACTION_SEC_CTX
;
5031 trsize
= sizeof(tr
);
5033 if (put_user(cmd
, (uint32_t __user
*)ptr
)) {
5035 binder_thread_dec_tmpref(t_from
);
5037 binder_cleanup_transaction(t
, "put_user failed",
5042 ptr
+= sizeof(uint32_t);
5043 if (copy_to_user(ptr
, &tr
, trsize
)) {
5045 binder_thread_dec_tmpref(t_from
);
5047 binder_cleanup_transaction(t
, "copy_to_user failed",
5054 trace_binder_transaction_received(t
);
5055 binder_stat_br(proc
, thread
, cmd
);
5056 binder_debug(BINDER_DEBUG_TRANSACTION
,
5057 "%d:%d %s %d %d:%d, cmd %u size %zd-%zd ptr %016llx-%016llx\n",
5058 proc
->pid
, thread
->pid
,
5059 (cmd
== BR_TRANSACTION
) ? "BR_TRANSACTION" :
5060 (cmd
== BR_TRANSACTION_SEC_CTX
) ?
5061 "BR_TRANSACTION_SEC_CTX" : "BR_REPLY",
5062 t
->debug_id
, t_from
? t_from
->proc
->pid
: 0,
5063 t_from
? t_from
->pid
: 0, cmd
,
5064 t
->buffer
->data_size
, t
->buffer
->offsets_size
,
5065 (u64
)trd
->data
.ptr
.buffer
,
5066 (u64
)trd
->data
.ptr
.offsets
);
5069 binder_thread_dec_tmpref(t_from
);
5070 t
->buffer
->allow_user_free
= 1;
5071 if (cmd
!= BR_REPLY
&& !(t
->flags
& TF_ONE_WAY
)) {
5072 binder_inner_proc_lock(thread
->proc
);
5073 t
->to_parent
= thread
->transaction_stack
;
5074 t
->to_thread
= thread
;
5075 thread
->transaction_stack
= t
;
5076 binder_inner_proc_unlock(thread
->proc
);
5078 binder_free_transaction(t
);
5085 *consumed
= ptr
- buffer
;
5086 binder_inner_proc_lock(proc
);
5087 if (proc
->requested_threads
== 0 &&
5088 list_empty(&thread
->proc
->waiting_threads
) &&
5089 proc
->requested_threads_started
< proc
->max_threads
&&
5090 (thread
->looper
& (BINDER_LOOPER_STATE_REGISTERED
|
5091 BINDER_LOOPER_STATE_ENTERED
)) /* the user-space code fails to */
5092 /*spawn a new thread if we leave this out */) {
5093 proc
->requested_threads
++;
5094 binder_inner_proc_unlock(proc
);
5095 binder_debug(BINDER_DEBUG_THREADS
,
5096 "%d:%d BR_SPAWN_LOOPER\n",
5097 proc
->pid
, thread
->pid
);
5098 if (put_user(BR_SPAWN_LOOPER
, (uint32_t __user
*)buffer
))
5100 binder_stat_br(proc
, thread
, BR_SPAWN_LOOPER
);
5102 binder_inner_proc_unlock(proc
);
5106 static void binder_release_work(struct binder_proc
*proc
,
5107 struct list_head
*list
)
5109 struct binder_work
*w
;
5110 enum binder_work_type wtype
;
5113 binder_inner_proc_lock(proc
);
5114 w
= binder_dequeue_work_head_ilocked(list
);
5115 wtype
= w
? w
->type
: 0;
5116 binder_inner_proc_unlock(proc
);
5121 case BINDER_WORK_TRANSACTION
: {
5122 struct binder_transaction
*t
;
5124 t
= container_of(w
, struct binder_transaction
, work
);
5126 binder_cleanup_transaction(t
, "process died.",
5129 case BINDER_WORK_RETURN_ERROR
: {
5130 struct binder_error
*e
= container_of(
5131 w
, struct binder_error
, work
);
5133 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION
,
5134 "undelivered TRANSACTION_ERROR: %u\n",
5137 case BINDER_WORK_TRANSACTION_PENDING
:
5138 case BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT
:
5139 case BINDER_WORK_TRANSACTION_COMPLETE
: {
5140 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION
,
5141 "undelivered TRANSACTION_COMPLETE\n");
5143 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE
);
5145 case BINDER_WORK_DEAD_BINDER_AND_CLEAR
:
5146 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION
: {
5147 struct binder_ref_death
*death
;
5149 death
= container_of(w
, struct binder_ref_death
, work
);
5150 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION
,
5151 "undelivered death notification, %016llx\n",
5152 (u64
)death
->cookie
);
5154 binder_stats_deleted(BINDER_STAT_DEATH
);
5156 case BINDER_WORK_NODE
:
5158 case BINDER_WORK_CLEAR_FREEZE_NOTIFICATION
: {
5159 struct binder_ref_freeze
*freeze
;
5161 freeze
= container_of(w
, struct binder_ref_freeze
, work
);
5162 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION
,
5163 "undelivered freeze notification, %016llx\n",
5164 (u64
)freeze
->cookie
);
5166 binder_stats_deleted(BINDER_STAT_FREEZE
);
5169 pr_err("unexpected work type, %d, not freed\n",
5177 static struct binder_thread
*binder_get_thread_ilocked(
5178 struct binder_proc
*proc
, struct binder_thread
*new_thread
)
5180 struct binder_thread
*thread
= NULL
;
5181 struct rb_node
*parent
= NULL
;
5182 struct rb_node
**p
= &proc
->threads
.rb_node
;
5186 thread
= rb_entry(parent
, struct binder_thread
, rb_node
);
5188 if (current
->pid
< thread
->pid
)
5190 else if (current
->pid
> thread
->pid
)
5191 p
= &(*p
)->rb_right
;
5197 thread
= new_thread
;
5198 binder_stats_created(BINDER_STAT_THREAD
);
5199 thread
->proc
= proc
;
5200 thread
->pid
= current
->pid
;
5201 atomic_set(&thread
->tmp_ref
, 0);
5202 init_waitqueue_head(&thread
->wait
);
5203 INIT_LIST_HEAD(&thread
->todo
);
5204 rb_link_node(&thread
->rb_node
, parent
, p
);
5205 rb_insert_color(&thread
->rb_node
, &proc
->threads
);
5206 thread
->looper_need_return
= true;
5207 thread
->return_error
.work
.type
= BINDER_WORK_RETURN_ERROR
;
5208 thread
->return_error
.cmd
= BR_OK
;
5209 thread
->reply_error
.work
.type
= BINDER_WORK_RETURN_ERROR
;
5210 thread
->reply_error
.cmd
= BR_OK
;
5211 thread
->ee
.command
= BR_OK
;
5212 INIT_LIST_HEAD(&new_thread
->waiting_thread_node
);
5216 static struct binder_thread
*binder_get_thread(struct binder_proc
*proc
)
5218 struct binder_thread
*thread
;
5219 struct binder_thread
*new_thread
;
5221 binder_inner_proc_lock(proc
);
5222 thread
= binder_get_thread_ilocked(proc
, NULL
);
5223 binder_inner_proc_unlock(proc
);
5225 new_thread
= kzalloc(sizeof(*thread
), GFP_KERNEL
);
5226 if (new_thread
== NULL
)
5228 binder_inner_proc_lock(proc
);
5229 thread
= binder_get_thread_ilocked(proc
, new_thread
);
5230 binder_inner_proc_unlock(proc
);
5231 if (thread
!= new_thread
)
5237 static void binder_free_proc(struct binder_proc
*proc
)
5239 struct binder_device
*device
;
5241 BUG_ON(!list_empty(&proc
->todo
));
5242 BUG_ON(!list_empty(&proc
->delivered_death
));
5243 if (proc
->outstanding_txns
)
5244 pr_warn("%s: Unexpected outstanding_txns %d\n",
5245 __func__
, proc
->outstanding_txns
);
5246 device
= container_of(proc
->context
, struct binder_device
, context
);
5247 if (refcount_dec_and_test(&device
->ref
)) {
5248 kfree(proc
->context
->name
);
5251 binder_alloc_deferred_release(&proc
->alloc
);
5252 put_task_struct(proc
->tsk
);
5253 put_cred(proc
->cred
);
5254 binder_stats_deleted(BINDER_STAT_PROC
);
5255 dbitmap_free(&proc
->dmap
);
5259 static void binder_free_thread(struct binder_thread
*thread
)
5261 BUG_ON(!list_empty(&thread
->todo
));
5262 binder_stats_deleted(BINDER_STAT_THREAD
);
5263 binder_proc_dec_tmpref(thread
->proc
);
5267 static int binder_thread_release(struct binder_proc
*proc
,
5268 struct binder_thread
*thread
)
5270 struct binder_transaction
*t
;
5271 struct binder_transaction
*send_reply
= NULL
;
5272 int active_transactions
= 0;
5273 struct binder_transaction
*last_t
= NULL
;
5275 binder_inner_proc_lock(thread
->proc
);
5277 * take a ref on the proc so it survives
5278 * after we remove this thread from proc->threads.
5279 * The corresponding dec is when we actually
5280 * free the thread in binder_free_thread()
5284 * take a ref on this thread to ensure it
5285 * survives while we are releasing it
5287 atomic_inc(&thread
->tmp_ref
);
5288 rb_erase(&thread
->rb_node
, &proc
->threads
);
5289 t
= thread
->transaction_stack
;
5291 spin_lock(&t
->lock
);
5292 if (t
->to_thread
== thread
)
5295 __acquire(&t
->lock
);
5297 thread
->is_dead
= true;
5301 active_transactions
++;
5302 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION
,
5303 "release %d:%d transaction %d %s, still active\n",
5304 proc
->pid
, thread
->pid
,
5306 (t
->to_thread
== thread
) ? "in" : "out");
5308 if (t
->to_thread
== thread
) {
5309 thread
->proc
->outstanding_txns
--;
5311 t
->to_thread
= NULL
;
5313 t
->buffer
->transaction
= NULL
;
5317 } else if (t
->from
== thread
) {
5322 spin_unlock(&last_t
->lock
);
5324 spin_lock(&t
->lock
);
5326 __acquire(&t
->lock
);
5328 /* annotation for sparse, lock not acquired in last iteration above */
5329 __release(&t
->lock
);
5332 * If this thread used poll, make sure we remove the waitqueue from any
5333 * poll data structures holding it.
5335 if (thread
->looper
& BINDER_LOOPER_STATE_POLL
)
5336 wake_up_pollfree(&thread
->wait
);
5338 binder_inner_proc_unlock(thread
->proc
);
5341 * This is needed to avoid races between wake_up_pollfree() above and
5342 * someone else removing the last entry from the queue for other reasons
5343 * (e.g. ep_remove_wait_queue() being called due to an epoll file
5344 * descriptor being closed). Such other users hold an RCU read lock, so
5345 * we can be sure they're done after we call synchronize_rcu().
5347 if (thread
->looper
& BINDER_LOOPER_STATE_POLL
)
5351 binder_send_failed_reply(send_reply
, BR_DEAD_REPLY
);
5352 binder_release_work(proc
, &thread
->todo
);
5353 binder_thread_dec_tmpref(thread
);
5354 return active_transactions
;
5357 static __poll_t
binder_poll(struct file
*filp
,
5358 struct poll_table_struct
*wait
)
5360 struct binder_proc
*proc
= filp
->private_data
;
5361 struct binder_thread
*thread
= NULL
;
5362 bool wait_for_proc_work
;
5364 thread
= binder_get_thread(proc
);
5368 binder_inner_proc_lock(thread
->proc
);
5369 thread
->looper
|= BINDER_LOOPER_STATE_POLL
;
5370 wait_for_proc_work
= binder_available_for_proc_work_ilocked(thread
);
5372 binder_inner_proc_unlock(thread
->proc
);
5374 poll_wait(filp
, &thread
->wait
, wait
);
5376 if (binder_has_work(thread
, wait_for_proc_work
))
5382 static int binder_ioctl_write_read(struct file
*filp
, unsigned long arg
,
5383 struct binder_thread
*thread
)
5386 struct binder_proc
*proc
= filp
->private_data
;
5387 void __user
*ubuf
= (void __user
*)arg
;
5388 struct binder_write_read bwr
;
5390 if (copy_from_user(&bwr
, ubuf
, sizeof(bwr
))) {
5394 binder_debug(BINDER_DEBUG_READ_WRITE
,
5395 "%d:%d write %lld at %016llx, read %lld at %016llx\n",
5396 proc
->pid
, thread
->pid
,
5397 (u64
)bwr
.write_size
, (u64
)bwr
.write_buffer
,
5398 (u64
)bwr
.read_size
, (u64
)bwr
.read_buffer
);
5400 if (bwr
.write_size
> 0) {
5401 ret
= binder_thread_write(proc
, thread
,
5404 &bwr
.write_consumed
);
5405 trace_binder_write_done(ret
);
5407 bwr
.read_consumed
= 0;
5408 if (copy_to_user(ubuf
, &bwr
, sizeof(bwr
)))
5413 if (bwr
.read_size
> 0) {
5414 ret
= binder_thread_read(proc
, thread
, bwr
.read_buffer
,
5417 filp
->f_flags
& O_NONBLOCK
);
5418 trace_binder_read_done(ret
);
5419 binder_inner_proc_lock(proc
);
5420 if (!binder_worklist_empty_ilocked(&proc
->todo
))
5421 binder_wakeup_proc_ilocked(proc
);
5422 binder_inner_proc_unlock(proc
);
5424 if (copy_to_user(ubuf
, &bwr
, sizeof(bwr
)))
5429 binder_debug(BINDER_DEBUG_READ_WRITE
,
5430 "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
5431 proc
->pid
, thread
->pid
,
5432 (u64
)bwr
.write_consumed
, (u64
)bwr
.write_size
,
5433 (u64
)bwr
.read_consumed
, (u64
)bwr
.read_size
);
5434 if (copy_to_user(ubuf
, &bwr
, sizeof(bwr
))) {
5442 static int binder_ioctl_set_ctx_mgr(struct file
*filp
,
5443 struct flat_binder_object
*fbo
)
5446 struct binder_proc
*proc
= filp
->private_data
;
5447 struct binder_context
*context
= proc
->context
;
5448 struct binder_node
*new_node
;
5449 kuid_t curr_euid
= current_euid();
5451 mutex_lock(&context
->context_mgr_node_lock
);
5452 if (context
->binder_context_mgr_node
) {
5453 pr_err("BINDER_SET_CONTEXT_MGR already set\n");
5457 ret
= security_binder_set_context_mgr(proc
->cred
);
5460 if (uid_valid(context
->binder_context_mgr_uid
)) {
5461 if (!uid_eq(context
->binder_context_mgr_uid
, curr_euid
)) {
5462 pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
5463 from_kuid(&init_user_ns
, curr_euid
),
5464 from_kuid(&init_user_ns
,
5465 context
->binder_context_mgr_uid
));
5470 context
->binder_context_mgr_uid
= curr_euid
;
5472 new_node
= binder_new_node(proc
, fbo
);
5477 binder_node_lock(new_node
);
5478 new_node
->local_weak_refs
++;
5479 new_node
->local_strong_refs
++;
5480 new_node
->has_strong_ref
= 1;
5481 new_node
->has_weak_ref
= 1;
5482 context
->binder_context_mgr_node
= new_node
;
5483 binder_node_unlock(new_node
);
5484 binder_put_node(new_node
);
5486 mutex_unlock(&context
->context_mgr_node_lock
);
5490 static int binder_ioctl_get_node_info_for_ref(struct binder_proc
*proc
,
5491 struct binder_node_info_for_ref
*info
)
5493 struct binder_node
*node
;
5494 struct binder_context
*context
= proc
->context
;
5495 __u32 handle
= info
->handle
;
5497 if (info
->strong_count
|| info
->weak_count
|| info
->reserved1
||
5498 info
->reserved2
|| info
->reserved3
) {
5499 binder_user_error("%d BINDER_GET_NODE_INFO_FOR_REF: only handle may be non-zero.",
5504 /* This ioctl may only be used by the context manager */
5505 mutex_lock(&context
->context_mgr_node_lock
);
5506 if (!context
->binder_context_mgr_node
||
5507 context
->binder_context_mgr_node
->proc
!= proc
) {
5508 mutex_unlock(&context
->context_mgr_node_lock
);
5511 mutex_unlock(&context
->context_mgr_node_lock
);
5513 node
= binder_get_node_from_ref(proc
, handle
, true, NULL
);
5517 info
->strong_count
= node
->local_strong_refs
+
5518 node
->internal_strong_refs
;
5519 info
->weak_count
= node
->local_weak_refs
;
5521 binder_put_node(node
);
5526 static int binder_ioctl_get_node_debug_info(struct binder_proc
*proc
,
5527 struct binder_node_debug_info
*info
)
5530 binder_uintptr_t ptr
= info
->ptr
;
5532 memset(info
, 0, sizeof(*info
));
5534 binder_inner_proc_lock(proc
);
5535 for (n
= rb_first(&proc
->nodes
); n
!= NULL
; n
= rb_next(n
)) {
5536 struct binder_node
*node
= rb_entry(n
, struct binder_node
,
5538 if (node
->ptr
> ptr
) {
5539 info
->ptr
= node
->ptr
;
5540 info
->cookie
= node
->cookie
;
5541 info
->has_strong_ref
= node
->has_strong_ref
;
5542 info
->has_weak_ref
= node
->has_weak_ref
;
5546 binder_inner_proc_unlock(proc
);
5551 static bool binder_txns_pending_ilocked(struct binder_proc
*proc
)
5554 struct binder_thread
*thread
;
5556 if (proc
->outstanding_txns
> 0)
5559 for (n
= rb_first(&proc
->threads
); n
; n
= rb_next(n
)) {
5560 thread
= rb_entry(n
, struct binder_thread
, rb_node
);
5561 if (thread
->transaction_stack
)
5567 static void binder_add_freeze_work(struct binder_proc
*proc
, bool is_frozen
)
5569 struct binder_node
*prev
= NULL
;
5571 struct binder_ref
*ref
;
5573 binder_inner_proc_lock(proc
);
5574 for (n
= rb_first(&proc
->nodes
); n
; n
= rb_next(n
)) {
5575 struct binder_node
*node
;
5577 node
= rb_entry(n
, struct binder_node
, rb_node
);
5578 binder_inc_node_tmpref_ilocked(node
);
5579 binder_inner_proc_unlock(proc
);
5581 binder_put_node(prev
);
5582 binder_node_lock(node
);
5583 hlist_for_each_entry(ref
, &node
->refs
, node_entry
) {
5585 * Need the node lock to synchronize
5586 * with new notification requests and the
5587 * inner lock to synchronize with queued
5588 * freeze notifications.
5590 binder_inner_proc_lock(ref
->proc
);
5592 binder_inner_proc_unlock(ref
->proc
);
5595 ref
->freeze
->work
.type
= BINDER_WORK_FROZEN_BINDER
;
5596 if (list_empty(&ref
->freeze
->work
.entry
)) {
5597 ref
->freeze
->is_frozen
= is_frozen
;
5598 binder_enqueue_work_ilocked(&ref
->freeze
->work
, &ref
->proc
->todo
);
5599 binder_wakeup_proc_ilocked(ref
->proc
);
5601 if (ref
->freeze
->sent
&& ref
->freeze
->is_frozen
!= is_frozen
)
5602 ref
->freeze
->resend
= true;
5603 ref
->freeze
->is_frozen
= is_frozen
;
5605 binder_inner_proc_unlock(ref
->proc
);
5608 binder_node_unlock(node
);
5609 binder_inner_proc_lock(proc
);
5613 binder_inner_proc_unlock(proc
);
5615 binder_put_node(prev
);
5618 static int binder_ioctl_freeze(struct binder_freeze_info
*info
,
5619 struct binder_proc
*target_proc
)
5623 if (!info
->enable
) {
5624 binder_inner_proc_lock(target_proc
);
5625 target_proc
->sync_recv
= false;
5626 target_proc
->async_recv
= false;
5627 target_proc
->is_frozen
= false;
5628 binder_inner_proc_unlock(target_proc
);
5629 binder_add_freeze_work(target_proc
, false);
5634 * Freezing the target. Prevent new transactions by
5635 * setting frozen state. If timeout specified, wait
5636 * for transactions to drain.
5638 binder_inner_proc_lock(target_proc
);
5639 target_proc
->sync_recv
= false;
5640 target_proc
->async_recv
= false;
5641 target_proc
->is_frozen
= true;
5642 binder_inner_proc_unlock(target_proc
);
5644 if (info
->timeout_ms
> 0)
5645 ret
= wait_event_interruptible_timeout(
5646 target_proc
->freeze_wait
,
5647 (!target_proc
->outstanding_txns
),
5648 msecs_to_jiffies(info
->timeout_ms
));
5650 /* Check pending transactions that wait for reply */
5652 binder_inner_proc_lock(target_proc
);
5653 if (binder_txns_pending_ilocked(target_proc
))
5655 binder_inner_proc_unlock(target_proc
);
5659 binder_inner_proc_lock(target_proc
);
5660 target_proc
->is_frozen
= false;
5661 binder_inner_proc_unlock(target_proc
);
5663 binder_add_freeze_work(target_proc
, true);
5669 static int binder_ioctl_get_freezer_info(
5670 struct binder_frozen_status_info
*info
)
5672 struct binder_proc
*target_proc
;
5676 info
->sync_recv
= 0;
5677 info
->async_recv
= 0;
5679 mutex_lock(&binder_procs_lock
);
5680 hlist_for_each_entry(target_proc
, &binder_procs
, proc_node
) {
5681 if (target_proc
->pid
== info
->pid
) {
5683 binder_inner_proc_lock(target_proc
);
5684 txns_pending
= binder_txns_pending_ilocked(target_proc
);
5685 info
->sync_recv
|= target_proc
->sync_recv
|
5686 (txns_pending
<< 1);
5687 info
->async_recv
|= target_proc
->async_recv
;
5688 binder_inner_proc_unlock(target_proc
);
5691 mutex_unlock(&binder_procs_lock
);
5699 static int binder_ioctl_get_extended_error(struct binder_thread
*thread
,
5702 struct binder_extended_error ee
;
5704 binder_inner_proc_lock(thread
->proc
);
5706 binder_set_extended_error(&thread
->ee
, 0, BR_OK
, 0);
5707 binder_inner_proc_unlock(thread
->proc
);
5709 if (copy_to_user(ubuf
, &ee
, sizeof(ee
)))
5715 static long binder_ioctl(struct file
*filp
, unsigned int cmd
, unsigned long arg
)
5718 struct binder_proc
*proc
= filp
->private_data
;
5719 struct binder_thread
*thread
;
5720 void __user
*ubuf
= (void __user
*)arg
;
5722 /*pr_info("binder_ioctl: %d:%d %x %lx\n",
5723 proc->pid, current->pid, cmd, arg);*/
5725 binder_selftest_alloc(&proc
->alloc
);
5727 trace_binder_ioctl(cmd
, arg
);
5729 ret
= wait_event_interruptible(binder_user_error_wait
, binder_stop_on_user_error
< 2);
5733 thread
= binder_get_thread(proc
);
5734 if (thread
== NULL
) {
5740 case BINDER_WRITE_READ
:
5741 ret
= binder_ioctl_write_read(filp
, arg
, thread
);
5745 case BINDER_SET_MAX_THREADS
: {
5748 if (copy_from_user(&max_threads
, ubuf
,
5749 sizeof(max_threads
))) {
5753 binder_inner_proc_lock(proc
);
5754 proc
->max_threads
= max_threads
;
5755 binder_inner_proc_unlock(proc
);
5758 case BINDER_SET_CONTEXT_MGR_EXT
: {
5759 struct flat_binder_object fbo
;
5761 if (copy_from_user(&fbo
, ubuf
, sizeof(fbo
))) {
5765 ret
= binder_ioctl_set_ctx_mgr(filp
, &fbo
);
5770 case BINDER_SET_CONTEXT_MGR
:
5771 ret
= binder_ioctl_set_ctx_mgr(filp
, NULL
);
5775 case BINDER_THREAD_EXIT
:
5776 binder_debug(BINDER_DEBUG_THREADS
, "%d:%d exit\n",
5777 proc
->pid
, thread
->pid
);
5778 binder_thread_release(proc
, thread
);
5781 case BINDER_VERSION
: {
5782 struct binder_version __user
*ver
= ubuf
;
5784 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION
,
5785 &ver
->protocol_version
)) {
5791 case BINDER_GET_NODE_INFO_FOR_REF
: {
5792 struct binder_node_info_for_ref info
;
5794 if (copy_from_user(&info
, ubuf
, sizeof(info
))) {
5799 ret
= binder_ioctl_get_node_info_for_ref(proc
, &info
);
5803 if (copy_to_user(ubuf
, &info
, sizeof(info
))) {
5810 case BINDER_GET_NODE_DEBUG_INFO
: {
5811 struct binder_node_debug_info info
;
5813 if (copy_from_user(&info
, ubuf
, sizeof(info
))) {
5818 ret
= binder_ioctl_get_node_debug_info(proc
, &info
);
5822 if (copy_to_user(ubuf
, &info
, sizeof(info
))) {
5828 case BINDER_FREEZE
: {
5829 struct binder_freeze_info info
;
5830 struct binder_proc
**target_procs
= NULL
, *target_proc
;
5831 int target_procs_count
= 0, i
= 0;
5835 if (copy_from_user(&info
, ubuf
, sizeof(info
))) {
5840 mutex_lock(&binder_procs_lock
);
5841 hlist_for_each_entry(target_proc
, &binder_procs
, proc_node
) {
5842 if (target_proc
->pid
== info
.pid
)
5843 target_procs_count
++;
5846 if (target_procs_count
== 0) {
5847 mutex_unlock(&binder_procs_lock
);
5852 target_procs
= kcalloc(target_procs_count
,
5853 sizeof(struct binder_proc
*),
5856 if (!target_procs
) {
5857 mutex_unlock(&binder_procs_lock
);
5862 hlist_for_each_entry(target_proc
, &binder_procs
, proc_node
) {
5863 if (target_proc
->pid
!= info
.pid
)
5866 binder_inner_proc_lock(target_proc
);
5867 target_proc
->tmp_ref
++;
5868 binder_inner_proc_unlock(target_proc
);
5870 target_procs
[i
++] = target_proc
;
5872 mutex_unlock(&binder_procs_lock
);
5874 for (i
= 0; i
< target_procs_count
; i
++) {
5876 ret
= binder_ioctl_freeze(&info
,
5879 binder_proc_dec_tmpref(target_procs
[i
]);
5882 kfree(target_procs
);
5888 case BINDER_GET_FROZEN_INFO
: {
5889 struct binder_frozen_status_info info
;
5891 if (copy_from_user(&info
, ubuf
, sizeof(info
))) {
5896 ret
= binder_ioctl_get_freezer_info(&info
);
5900 if (copy_to_user(ubuf
, &info
, sizeof(info
))) {
5906 case BINDER_ENABLE_ONEWAY_SPAM_DETECTION
: {
5909 if (copy_from_user(&enable
, ubuf
, sizeof(enable
))) {
5913 binder_inner_proc_lock(proc
);
5914 proc
->oneway_spam_detection_enabled
= (bool)enable
;
5915 binder_inner_proc_unlock(proc
);
5918 case BINDER_GET_EXTENDED_ERROR
:
5919 ret
= binder_ioctl_get_extended_error(thread
, ubuf
);
5930 thread
->looper_need_return
= false;
5931 wait_event_interruptible(binder_user_error_wait
, binder_stop_on_user_error
< 2);
5932 if (ret
&& ret
!= -EINTR
)
5933 pr_info("%d:%d ioctl %x %lx returned %d\n", proc
->pid
, current
->pid
, cmd
, arg
, ret
);
5935 trace_binder_ioctl_done(ret
);
5939 static void binder_vma_open(struct vm_area_struct
*vma
)
5941 struct binder_proc
*proc
= vma
->vm_private_data
;
5943 binder_debug(BINDER_DEBUG_OPEN_CLOSE
,
5944 "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
5945 proc
->pid
, vma
->vm_start
, vma
->vm_end
,
5946 (vma
->vm_end
- vma
->vm_start
) / SZ_1K
, vma
->vm_flags
,
5947 (unsigned long)pgprot_val(vma
->vm_page_prot
));
5950 static void binder_vma_close(struct vm_area_struct
*vma
)
5952 struct binder_proc
*proc
= vma
->vm_private_data
;
5954 binder_debug(BINDER_DEBUG_OPEN_CLOSE
,
5955 "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
5956 proc
->pid
, vma
->vm_start
, vma
->vm_end
,
5957 (vma
->vm_end
- vma
->vm_start
) / SZ_1K
, vma
->vm_flags
,
5958 (unsigned long)pgprot_val(vma
->vm_page_prot
));
5959 binder_alloc_vma_close(&proc
->alloc
);
5962 static vm_fault_t
binder_vm_fault(struct vm_fault
*vmf
)
5964 return VM_FAULT_SIGBUS
;
5967 static const struct vm_operations_struct binder_vm_ops
= {
5968 .open
= binder_vma_open
,
5969 .close
= binder_vma_close
,
5970 .fault
= binder_vm_fault
,
5973 static int binder_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
5975 struct binder_proc
*proc
= filp
->private_data
;
5977 if (proc
->tsk
!= current
->group_leader
)
5980 binder_debug(BINDER_DEBUG_OPEN_CLOSE
,
5981 "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
5982 __func__
, proc
->pid
, vma
->vm_start
, vma
->vm_end
,
5983 (vma
->vm_end
- vma
->vm_start
) / SZ_1K
, vma
->vm_flags
,
5984 (unsigned long)pgprot_val(vma
->vm_page_prot
));
5986 if (vma
->vm_flags
& FORBIDDEN_MMAP_FLAGS
) {
5987 pr_err("%s: %d %lx-%lx %s failed %d\n", __func__
,
5988 proc
->pid
, vma
->vm_start
, vma
->vm_end
, "bad vm_flags", -EPERM
);
5991 vm_flags_mod(vma
, VM_DONTCOPY
| VM_MIXEDMAP
, VM_MAYWRITE
);
5993 vma
->vm_ops
= &binder_vm_ops
;
5994 vma
->vm_private_data
= proc
;
5996 return binder_alloc_mmap_handler(&proc
->alloc
, vma
);
5999 static int binder_open(struct inode
*nodp
, struct file
*filp
)
6001 struct binder_proc
*proc
, *itr
;
6002 struct binder_device
*binder_dev
;
6003 struct binderfs_info
*info
;
6004 struct dentry
*binder_binderfs_dir_entry_proc
= NULL
;
6005 bool existing_pid
= false;
6007 binder_debug(BINDER_DEBUG_OPEN_CLOSE
, "%s: %d:%d\n", __func__
,
6008 current
->group_leader
->pid
, current
->pid
);
6010 proc
= kzalloc(sizeof(*proc
), GFP_KERNEL
);
6014 dbitmap_init(&proc
->dmap
);
6015 spin_lock_init(&proc
->inner_lock
);
6016 spin_lock_init(&proc
->outer_lock
);
6017 get_task_struct(current
->group_leader
);
6018 proc
->tsk
= current
->group_leader
;
6019 proc
->cred
= get_cred(filp
->f_cred
);
6020 INIT_LIST_HEAD(&proc
->todo
);
6021 init_waitqueue_head(&proc
->freeze_wait
);
6022 proc
->default_priority
= task_nice(current
);
6023 /* binderfs stashes devices in i_private */
6024 if (is_binderfs_device(nodp
)) {
6025 binder_dev
= nodp
->i_private
;
6026 info
= nodp
->i_sb
->s_fs_info
;
6027 binder_binderfs_dir_entry_proc
= info
->proc_log_dir
;
6029 binder_dev
= container_of(filp
->private_data
,
6030 struct binder_device
, miscdev
);
6032 refcount_inc(&binder_dev
->ref
);
6033 proc
->context
= &binder_dev
->context
;
6034 binder_alloc_init(&proc
->alloc
);
6036 binder_stats_created(BINDER_STAT_PROC
);
6037 proc
->pid
= current
->group_leader
->pid
;
6038 INIT_LIST_HEAD(&proc
->delivered_death
);
6039 INIT_LIST_HEAD(&proc
->delivered_freeze
);
6040 INIT_LIST_HEAD(&proc
->waiting_threads
);
6041 filp
->private_data
= proc
;
6043 mutex_lock(&binder_procs_lock
);
6044 hlist_for_each_entry(itr
, &binder_procs
, proc_node
) {
6045 if (itr
->pid
== proc
->pid
) {
6046 existing_pid
= true;
6050 hlist_add_head(&proc
->proc_node
, &binder_procs
);
6051 mutex_unlock(&binder_procs_lock
);
6053 if (binder_debugfs_dir_entry_proc
&& !existing_pid
) {
6056 snprintf(strbuf
, sizeof(strbuf
), "%u", proc
->pid
);
6058 * proc debug entries are shared between contexts.
6059 * Only create for the first PID to avoid debugfs log spamming
6060 * The printing code will anyway print all contexts for a given
6061 * PID so this is not a problem.
6063 proc
->debugfs_entry
= debugfs_create_file(strbuf
, 0444,
6064 binder_debugfs_dir_entry_proc
,
6065 (void *)(unsigned long)proc
->pid
,
6069 if (binder_binderfs_dir_entry_proc
&& !existing_pid
) {
6071 struct dentry
*binderfs_entry
;
6073 snprintf(strbuf
, sizeof(strbuf
), "%u", proc
->pid
);
6075 * Similar to debugfs, the process specific log file is shared
6076 * between contexts. Only create for the first PID.
6077 * This is ok since same as debugfs, the log file will contain
6078 * information on all contexts of a given PID.
6080 binderfs_entry
= binderfs_create_file(binder_binderfs_dir_entry_proc
,
6081 strbuf
, &proc_fops
, (void *)(unsigned long)proc
->pid
);
6082 if (!IS_ERR(binderfs_entry
)) {
6083 proc
->binderfs_entry
= binderfs_entry
;
6087 error
= PTR_ERR(binderfs_entry
);
6088 pr_warn("Unable to create file %s in binderfs (error %d)\n",
6096 static int binder_flush(struct file
*filp
, fl_owner_t id
)
6098 struct binder_proc
*proc
= filp
->private_data
;
6100 binder_defer_work(proc
, BINDER_DEFERRED_FLUSH
);
6105 static void binder_deferred_flush(struct binder_proc
*proc
)
6110 binder_inner_proc_lock(proc
);
6111 for (n
= rb_first(&proc
->threads
); n
!= NULL
; n
= rb_next(n
)) {
6112 struct binder_thread
*thread
= rb_entry(n
, struct binder_thread
, rb_node
);
6114 thread
->looper_need_return
= true;
6115 if (thread
->looper
& BINDER_LOOPER_STATE_WAITING
) {
6116 wake_up_interruptible(&thread
->wait
);
6120 binder_inner_proc_unlock(proc
);
6122 binder_debug(BINDER_DEBUG_OPEN_CLOSE
,
6123 "binder_flush: %d woke %d threads\n", proc
->pid
,
6127 static int binder_release(struct inode
*nodp
, struct file
*filp
)
6129 struct binder_proc
*proc
= filp
->private_data
;
6131 debugfs_remove(proc
->debugfs_entry
);
6133 if (proc
->binderfs_entry
) {
6134 binderfs_remove_file(proc
->binderfs_entry
);
6135 proc
->binderfs_entry
= NULL
;
6138 binder_defer_work(proc
, BINDER_DEFERRED_RELEASE
);
6143 static int binder_node_release(struct binder_node
*node
, int refs
)
6145 struct binder_ref
*ref
;
6147 struct binder_proc
*proc
= node
->proc
;
6149 binder_release_work(proc
, &node
->async_todo
);
6151 binder_node_lock(node
);
6152 binder_inner_proc_lock(proc
);
6153 binder_dequeue_work_ilocked(&node
->work
);
6155 * The caller must have taken a temporary ref on the node,
6157 BUG_ON(!node
->tmp_refs
);
6158 if (hlist_empty(&node
->refs
) && node
->tmp_refs
== 1) {
6159 binder_inner_proc_unlock(proc
);
6160 binder_node_unlock(node
);
6161 binder_free_node(node
);
6167 node
->local_strong_refs
= 0;
6168 node
->local_weak_refs
= 0;
6169 binder_inner_proc_unlock(proc
);
6171 spin_lock(&binder_dead_nodes_lock
);
6172 hlist_add_head(&node
->dead_node
, &binder_dead_nodes
);
6173 spin_unlock(&binder_dead_nodes_lock
);
6175 hlist_for_each_entry(ref
, &node
->refs
, node_entry
) {
6178 * Need the node lock to synchronize
6179 * with new notification requests and the
6180 * inner lock to synchronize with queued
6181 * death notifications.
6183 binder_inner_proc_lock(ref
->proc
);
6185 binder_inner_proc_unlock(ref
->proc
);
6191 BUG_ON(!list_empty(&ref
->death
->work
.entry
));
6192 ref
->death
->work
.type
= BINDER_WORK_DEAD_BINDER
;
6193 binder_enqueue_work_ilocked(&ref
->death
->work
,
6195 binder_wakeup_proc_ilocked(ref
->proc
);
6196 binder_inner_proc_unlock(ref
->proc
);
6199 binder_debug(BINDER_DEBUG_DEAD_BINDER
,
6200 "node %d now dead, refs %d, death %d\n",
6201 node
->debug_id
, refs
, death
);
6202 binder_node_unlock(node
);
6203 binder_put_node(node
);
6208 static void binder_deferred_release(struct binder_proc
*proc
)
6210 struct binder_context
*context
= proc
->context
;
6212 int threads
, nodes
, incoming_refs
, outgoing_refs
, active_transactions
;
6214 mutex_lock(&binder_procs_lock
);
6215 hlist_del(&proc
->proc_node
);
6216 mutex_unlock(&binder_procs_lock
);
6218 mutex_lock(&context
->context_mgr_node_lock
);
6219 if (context
->binder_context_mgr_node
&&
6220 context
->binder_context_mgr_node
->proc
== proc
) {
6221 binder_debug(BINDER_DEBUG_DEAD_BINDER
,
6222 "%s: %d context_mgr_node gone\n",
6223 __func__
, proc
->pid
);
6224 context
->binder_context_mgr_node
= NULL
;
6226 mutex_unlock(&context
->context_mgr_node_lock
);
6227 binder_inner_proc_lock(proc
);
6229 * Make sure proc stays alive after we
6230 * remove all the threads
6234 proc
->is_dead
= true;
6235 proc
->is_frozen
= false;
6236 proc
->sync_recv
= false;
6237 proc
->async_recv
= false;
6239 active_transactions
= 0;
6240 while ((n
= rb_first(&proc
->threads
))) {
6241 struct binder_thread
*thread
;
6243 thread
= rb_entry(n
, struct binder_thread
, rb_node
);
6244 binder_inner_proc_unlock(proc
);
6246 active_transactions
+= binder_thread_release(proc
, thread
);
6247 binder_inner_proc_lock(proc
);
6252 while ((n
= rb_first(&proc
->nodes
))) {
6253 struct binder_node
*node
;
6255 node
= rb_entry(n
, struct binder_node
, rb_node
);
6258 * take a temporary ref on the node before
6259 * calling binder_node_release() which will either
6260 * kfree() the node or call binder_put_node()
6262 binder_inc_node_tmpref_ilocked(node
);
6263 rb_erase(&node
->rb_node
, &proc
->nodes
);
6264 binder_inner_proc_unlock(proc
);
6265 incoming_refs
= binder_node_release(node
, incoming_refs
);
6266 binder_inner_proc_lock(proc
);
6268 binder_inner_proc_unlock(proc
);
6271 binder_proc_lock(proc
);
6272 while ((n
= rb_first(&proc
->refs_by_desc
))) {
6273 struct binder_ref
*ref
;
6275 ref
= rb_entry(n
, struct binder_ref
, rb_node_desc
);
6277 binder_cleanup_ref_olocked(ref
);
6278 binder_proc_unlock(proc
);
6279 binder_free_ref(ref
);
6280 binder_proc_lock(proc
);
6282 binder_proc_unlock(proc
);
6284 binder_release_work(proc
, &proc
->todo
);
6285 binder_release_work(proc
, &proc
->delivered_death
);
6286 binder_release_work(proc
, &proc
->delivered_freeze
);
6288 binder_debug(BINDER_DEBUG_OPEN_CLOSE
,
6289 "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
6290 __func__
, proc
->pid
, threads
, nodes
, incoming_refs
,
6291 outgoing_refs
, active_transactions
);
6293 binder_proc_dec_tmpref(proc
);
6296 static void binder_deferred_func(struct work_struct
*work
)
6298 struct binder_proc
*proc
;
6303 mutex_lock(&binder_deferred_lock
);
6304 if (!hlist_empty(&binder_deferred_list
)) {
6305 proc
= hlist_entry(binder_deferred_list
.first
,
6306 struct binder_proc
, deferred_work_node
);
6307 hlist_del_init(&proc
->deferred_work_node
);
6308 defer
= proc
->deferred_work
;
6309 proc
->deferred_work
= 0;
6314 mutex_unlock(&binder_deferred_lock
);
6316 if (defer
& BINDER_DEFERRED_FLUSH
)
6317 binder_deferred_flush(proc
);
6319 if (defer
& BINDER_DEFERRED_RELEASE
)
6320 binder_deferred_release(proc
); /* frees proc */
6323 static DECLARE_WORK(binder_deferred_work
, binder_deferred_func
);
6326 binder_defer_work(struct binder_proc
*proc
, enum binder_deferred_state defer
)
6328 mutex_lock(&binder_deferred_lock
);
6329 proc
->deferred_work
|= defer
;
6330 if (hlist_unhashed(&proc
->deferred_work_node
)) {
6331 hlist_add_head(&proc
->deferred_work_node
,
6332 &binder_deferred_list
);
6333 schedule_work(&binder_deferred_work
);
6335 mutex_unlock(&binder_deferred_lock
);
6338 static void print_binder_transaction_ilocked(struct seq_file
*m
,
6339 struct binder_proc
*proc
,
6341 struct binder_transaction
*t
)
6343 struct binder_proc
*to_proc
;
6344 struct binder_buffer
*buffer
= t
->buffer
;
6345 ktime_t current_time
= ktime_get();
6347 spin_lock(&t
->lock
);
6348 to_proc
= t
->to_proc
;
6350 "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld r%d elapsed %lldms",
6351 prefix
, t
->debug_id
, t
,
6354 to_proc
? to_proc
->pid
: 0,
6355 t
->to_thread
? t
->to_thread
->pid
: 0,
6356 t
->code
, t
->flags
, t
->priority
, t
->need_reply
,
6357 ktime_ms_delta(current_time
, t
->start_time
));
6358 spin_unlock(&t
->lock
);
6360 if (proc
!= to_proc
) {
6362 * Can only safely deref buffer if we are holding the
6363 * correct proc inner lock for this node
6369 if (buffer
== NULL
) {
6370 seq_puts(m
, " buffer free\n");
6373 if (buffer
->target_node
)
6374 seq_printf(m
, " node %d", buffer
->target_node
->debug_id
);
6375 seq_printf(m
, " size %zd:%zd offset %lx\n",
6376 buffer
->data_size
, buffer
->offsets_size
,
6377 proc
->alloc
.buffer
- buffer
->user_data
);
6380 static void print_binder_work_ilocked(struct seq_file
*m
,
6381 struct binder_proc
*proc
,
6383 const char *transaction_prefix
,
6384 struct binder_work
*w
)
6386 struct binder_node
*node
;
6387 struct binder_transaction
*t
;
6390 case BINDER_WORK_TRANSACTION
:
6391 t
= container_of(w
, struct binder_transaction
, work
);
6392 print_binder_transaction_ilocked(
6393 m
, proc
, transaction_prefix
, t
);
6395 case BINDER_WORK_RETURN_ERROR
: {
6396 struct binder_error
*e
= container_of(
6397 w
, struct binder_error
, work
);
6399 seq_printf(m
, "%stransaction error: %u\n",
6402 case BINDER_WORK_TRANSACTION_COMPLETE
:
6403 seq_printf(m
, "%stransaction complete\n", prefix
);
6405 case BINDER_WORK_NODE
:
6406 node
= container_of(w
, struct binder_node
, work
);
6407 seq_printf(m
, "%snode work %d: u%016llx c%016llx\n",
6408 prefix
, node
->debug_id
,
6409 (u64
)node
->ptr
, (u64
)node
->cookie
);
6411 case BINDER_WORK_DEAD_BINDER
:
6412 seq_printf(m
, "%shas dead binder\n", prefix
);
6414 case BINDER_WORK_DEAD_BINDER_AND_CLEAR
:
6415 seq_printf(m
, "%shas cleared dead binder\n", prefix
);
6417 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION
:
6418 seq_printf(m
, "%shas cleared death notification\n", prefix
);
6420 case BINDER_WORK_FROZEN_BINDER
:
6421 seq_printf(m
, "%shas frozen binder\n", prefix
);
6423 case BINDER_WORK_CLEAR_FREEZE_NOTIFICATION
:
6424 seq_printf(m
, "%shas cleared freeze notification\n", prefix
);
6427 seq_printf(m
, "%sunknown work: type %d\n", prefix
, w
->type
);
6432 static void print_binder_thread_ilocked(struct seq_file
*m
,
6433 struct binder_thread
*thread
,
6436 struct binder_transaction
*t
;
6437 struct binder_work
*w
;
6438 size_t start_pos
= m
->count
;
6441 seq_printf(m
, " thread %d: l %02x need_return %d tr %d\n",
6442 thread
->pid
, thread
->looper
,
6443 thread
->looper_need_return
,
6444 atomic_read(&thread
->tmp_ref
));
6445 header_pos
= m
->count
;
6446 t
= thread
->transaction_stack
;
6448 if (t
->from
== thread
) {
6449 print_binder_transaction_ilocked(m
, thread
->proc
,
6450 " outgoing transaction", t
);
6452 } else if (t
->to_thread
== thread
) {
6453 print_binder_transaction_ilocked(m
, thread
->proc
,
6454 " incoming transaction", t
);
6457 print_binder_transaction_ilocked(m
, thread
->proc
,
6458 " bad transaction", t
);
6462 list_for_each_entry(w
, &thread
->todo
, entry
) {
6463 print_binder_work_ilocked(m
, thread
->proc
, " ",
6464 " pending transaction", w
);
6466 if (!print_always
&& m
->count
== header_pos
)
6467 m
->count
= start_pos
;
6470 static void print_binder_node_nilocked(struct seq_file
*m
,
6471 struct binder_node
*node
)
6473 struct binder_ref
*ref
;
6474 struct binder_work
*w
;
6477 count
= hlist_count_nodes(&node
->refs
);
6479 seq_printf(m
, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d tr %d",
6480 node
->debug_id
, (u64
)node
->ptr
, (u64
)node
->cookie
,
6481 node
->has_strong_ref
, node
->has_weak_ref
,
6482 node
->local_strong_refs
, node
->local_weak_refs
,
6483 node
->internal_strong_refs
, count
, node
->tmp_refs
);
6485 seq_puts(m
, " proc");
6486 hlist_for_each_entry(ref
, &node
->refs
, node_entry
)
6487 seq_printf(m
, " %d", ref
->proc
->pid
);
6491 list_for_each_entry(w
, &node
->async_todo
, entry
)
6492 print_binder_work_ilocked(m
, node
->proc
, " ",
6493 " pending async transaction", w
);
6497 static void print_binder_ref_olocked(struct seq_file
*m
,
6498 struct binder_ref
*ref
)
6500 binder_node_lock(ref
->node
);
6501 seq_printf(m
, " ref %d: desc %d %snode %d s %d w %d d %pK\n",
6502 ref
->data
.debug_id
, ref
->data
.desc
,
6503 ref
->node
->proc
? "" : "dead ",
6504 ref
->node
->debug_id
, ref
->data
.strong
,
6505 ref
->data
.weak
, ref
->death
);
6506 binder_node_unlock(ref
->node
);
6509 static void print_binder_proc(struct seq_file
*m
,
6510 struct binder_proc
*proc
, int print_all
)
6512 struct binder_work
*w
;
6514 size_t start_pos
= m
->count
;
6516 struct binder_node
*last_node
= NULL
;
6518 seq_printf(m
, "proc %d\n", proc
->pid
);
6519 seq_printf(m
, "context %s\n", proc
->context
->name
);
6520 header_pos
= m
->count
;
6522 binder_inner_proc_lock(proc
);
6523 for (n
= rb_first(&proc
->threads
); n
!= NULL
; n
= rb_next(n
))
6524 print_binder_thread_ilocked(m
, rb_entry(n
, struct binder_thread
,
6525 rb_node
), print_all
);
6527 for (n
= rb_first(&proc
->nodes
); n
!= NULL
; n
= rb_next(n
)) {
6528 struct binder_node
*node
= rb_entry(n
, struct binder_node
,
6530 if (!print_all
&& !node
->has_async_transaction
)
6534 * take a temporary reference on the node so it
6535 * survives and isn't removed from the tree
6536 * while we print it.
6538 binder_inc_node_tmpref_ilocked(node
);
6539 /* Need to drop inner lock to take node lock */
6540 binder_inner_proc_unlock(proc
);
6542 binder_put_node(last_node
);
6543 binder_node_inner_lock(node
);
6544 print_binder_node_nilocked(m
, node
);
6545 binder_node_inner_unlock(node
);
6547 binder_inner_proc_lock(proc
);
6549 binder_inner_proc_unlock(proc
);
6551 binder_put_node(last_node
);
6554 binder_proc_lock(proc
);
6555 for (n
= rb_first(&proc
->refs_by_desc
);
6558 print_binder_ref_olocked(m
, rb_entry(n
,
6561 binder_proc_unlock(proc
);
6563 binder_alloc_print_allocated(m
, &proc
->alloc
);
6564 binder_inner_proc_lock(proc
);
6565 list_for_each_entry(w
, &proc
->todo
, entry
)
6566 print_binder_work_ilocked(m
, proc
, " ",
6567 " pending transaction", w
);
6568 list_for_each_entry(w
, &proc
->delivered_death
, entry
) {
6569 seq_puts(m
, " has delivered dead binder\n");
6572 list_for_each_entry(w
, &proc
->delivered_freeze
, entry
) {
6573 seq_puts(m
, " has delivered freeze binder\n");
6576 binder_inner_proc_unlock(proc
);
6577 if (!print_all
&& m
->count
== header_pos
)
6578 m
->count
= start_pos
;
6581 static const char * const binder_return_strings
[] = {
6586 "BR_ACQUIRE_RESULT",
6588 "BR_TRANSACTION_COMPLETE",
6593 "BR_ATTEMPT_ACQUIRE",
6598 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
6601 "BR_ONEWAY_SPAM_SUSPECT",
6602 "BR_TRANSACTION_PENDING_FROZEN",
6604 "BR_CLEAR_FREEZE_NOTIFICATION_DONE",
6607 static const char * const binder_command_strings
[] = {
6610 "BC_ACQUIRE_RESULT",
6618 "BC_ATTEMPT_ACQUIRE",
6619 "BC_REGISTER_LOOPER",
6622 "BC_REQUEST_DEATH_NOTIFICATION",
6623 "BC_CLEAR_DEATH_NOTIFICATION",
6624 "BC_DEAD_BINDER_DONE",
6625 "BC_TRANSACTION_SG",
6627 "BC_REQUEST_FREEZE_NOTIFICATION",
6628 "BC_CLEAR_FREEZE_NOTIFICATION",
6629 "BC_FREEZE_NOTIFICATION_DONE",
6632 static const char * const binder_objstat_strings
[] = {
6639 "transaction_complete",
6643 static void print_binder_stats(struct seq_file
*m
, const char *prefix
,
6644 struct binder_stats
*stats
)
6648 BUILD_BUG_ON(ARRAY_SIZE(stats
->bc
) !=
6649 ARRAY_SIZE(binder_command_strings
));
6650 for (i
= 0; i
< ARRAY_SIZE(stats
->bc
); i
++) {
6651 int temp
= atomic_read(&stats
->bc
[i
]);
6654 seq_printf(m
, "%s%s: %d\n", prefix
,
6655 binder_command_strings
[i
], temp
);
6658 BUILD_BUG_ON(ARRAY_SIZE(stats
->br
) !=
6659 ARRAY_SIZE(binder_return_strings
));
6660 for (i
= 0; i
< ARRAY_SIZE(stats
->br
); i
++) {
6661 int temp
= atomic_read(&stats
->br
[i
]);
6664 seq_printf(m
, "%s%s: %d\n", prefix
,
6665 binder_return_strings
[i
], temp
);
6668 BUILD_BUG_ON(ARRAY_SIZE(stats
->obj_created
) !=
6669 ARRAY_SIZE(binder_objstat_strings
));
6670 BUILD_BUG_ON(ARRAY_SIZE(stats
->obj_created
) !=
6671 ARRAY_SIZE(stats
->obj_deleted
));
6672 for (i
= 0; i
< ARRAY_SIZE(stats
->obj_created
); i
++) {
6673 int created
= atomic_read(&stats
->obj_created
[i
]);
6674 int deleted
= atomic_read(&stats
->obj_deleted
[i
]);
6676 if (created
|| deleted
)
6677 seq_printf(m
, "%s%s: active %d total %d\n",
6679 binder_objstat_strings
[i
],
6685 static void print_binder_proc_stats(struct seq_file
*m
,
6686 struct binder_proc
*proc
)
6688 struct binder_work
*w
;
6689 struct binder_thread
*thread
;
6691 int count
, strong
, weak
, ready_threads
;
6692 size_t free_async_space
=
6693 binder_alloc_get_free_async_space(&proc
->alloc
);
6695 seq_printf(m
, "proc %d\n", proc
->pid
);
6696 seq_printf(m
, "context %s\n", proc
->context
->name
);
6699 binder_inner_proc_lock(proc
);
6700 for (n
= rb_first(&proc
->threads
); n
!= NULL
; n
= rb_next(n
))
6703 list_for_each_entry(thread
, &proc
->waiting_threads
, waiting_thread_node
)
6706 seq_printf(m
, " threads: %d\n", count
);
6707 seq_printf(m
, " requested threads: %d+%d/%d\n"
6708 " ready threads %d\n"
6709 " free async space %zd\n", proc
->requested_threads
,
6710 proc
->requested_threads_started
, proc
->max_threads
,
6714 for (n
= rb_first(&proc
->nodes
); n
!= NULL
; n
= rb_next(n
))
6716 binder_inner_proc_unlock(proc
);
6717 seq_printf(m
, " nodes: %d\n", count
);
6721 binder_proc_lock(proc
);
6722 for (n
= rb_first(&proc
->refs_by_desc
); n
!= NULL
; n
= rb_next(n
)) {
6723 struct binder_ref
*ref
= rb_entry(n
, struct binder_ref
,
6726 strong
+= ref
->data
.strong
;
6727 weak
+= ref
->data
.weak
;
6729 binder_proc_unlock(proc
);
6730 seq_printf(m
, " refs: %d s %d w %d\n", count
, strong
, weak
);
6732 count
= binder_alloc_get_allocated_count(&proc
->alloc
);
6733 seq_printf(m
, " buffers: %d\n", count
);
6735 binder_alloc_print_pages(m
, &proc
->alloc
);
6738 binder_inner_proc_lock(proc
);
6739 list_for_each_entry(w
, &proc
->todo
, entry
) {
6740 if (w
->type
== BINDER_WORK_TRANSACTION
)
6743 binder_inner_proc_unlock(proc
);
6744 seq_printf(m
, " pending transactions: %d\n", count
);
6746 print_binder_stats(m
, " ", &proc
->stats
);
6749 static int state_show(struct seq_file
*m
, void *unused
)
6751 struct binder_proc
*proc
;
6752 struct binder_node
*node
;
6753 struct binder_node
*last_node
= NULL
;
6755 seq_puts(m
, "binder state:\n");
6757 spin_lock(&binder_dead_nodes_lock
);
6758 if (!hlist_empty(&binder_dead_nodes
))
6759 seq_puts(m
, "dead nodes:\n");
6760 hlist_for_each_entry(node
, &binder_dead_nodes
, dead_node
) {
6762 * take a temporary reference on the node so it
6763 * survives and isn't removed from the list
6764 * while we print it.
6767 spin_unlock(&binder_dead_nodes_lock
);
6769 binder_put_node(last_node
);
6770 binder_node_lock(node
);
6771 print_binder_node_nilocked(m
, node
);
6772 binder_node_unlock(node
);
6774 spin_lock(&binder_dead_nodes_lock
);
6776 spin_unlock(&binder_dead_nodes_lock
);
6778 binder_put_node(last_node
);
6780 mutex_lock(&binder_procs_lock
);
6781 hlist_for_each_entry(proc
, &binder_procs
, proc_node
)
6782 print_binder_proc(m
, proc
, 1);
6783 mutex_unlock(&binder_procs_lock
);
6788 static int stats_show(struct seq_file
*m
, void *unused
)
6790 struct binder_proc
*proc
;
6792 seq_puts(m
, "binder stats:\n");
6794 print_binder_stats(m
, "", &binder_stats
);
6796 mutex_lock(&binder_procs_lock
);
6797 hlist_for_each_entry(proc
, &binder_procs
, proc_node
)
6798 print_binder_proc_stats(m
, proc
);
6799 mutex_unlock(&binder_procs_lock
);
6804 static int transactions_show(struct seq_file
*m
, void *unused
)
6806 struct binder_proc
*proc
;
6808 seq_puts(m
, "binder transactions:\n");
6809 mutex_lock(&binder_procs_lock
);
6810 hlist_for_each_entry(proc
, &binder_procs
, proc_node
)
6811 print_binder_proc(m
, proc
, 0);
6812 mutex_unlock(&binder_procs_lock
);
6817 static int proc_show(struct seq_file
*m
, void *unused
)
6819 struct binder_proc
*itr
;
6820 int pid
= (unsigned long)m
->private;
6822 mutex_lock(&binder_procs_lock
);
6823 hlist_for_each_entry(itr
, &binder_procs
, proc_node
) {
6824 if (itr
->pid
== pid
) {
6825 seq_puts(m
, "binder proc state:\n");
6826 print_binder_proc(m
, itr
, 1);
6829 mutex_unlock(&binder_procs_lock
);
6834 static void print_binder_transaction_log_entry(struct seq_file
*m
,
6835 struct binder_transaction_log_entry
*e
)
6837 int debug_id
= READ_ONCE(e
->debug_id_done
);
6839 * read barrier to guarantee debug_id_done read before
6840 * we print the log values
6844 "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
6845 e
->debug_id
, (e
->call_type
== 2) ? "reply" :
6846 ((e
->call_type
== 1) ? "async" : "call "), e
->from_proc
,
6847 e
->from_thread
, e
->to_proc
, e
->to_thread
, e
->context_name
,
6848 e
->to_node
, e
->target_handle
, e
->data_size
, e
->offsets_size
,
6849 e
->return_error
, e
->return_error_param
,
6850 e
->return_error_line
);
6852 * read-barrier to guarantee read of debug_id_done after
6853 * done printing the fields of the entry
6856 seq_printf(m
, debug_id
&& debug_id
== READ_ONCE(e
->debug_id_done
) ?
6857 "\n" : " (incomplete)\n");
6860 static int transaction_log_show(struct seq_file
*m
, void *unused
)
6862 struct binder_transaction_log
*log
= m
->private;
6863 unsigned int log_cur
= atomic_read(&log
->cur
);
6868 count
= log_cur
+ 1;
6869 cur
= count
< ARRAY_SIZE(log
->entry
) && !log
->full
?
6870 0 : count
% ARRAY_SIZE(log
->entry
);
6871 if (count
> ARRAY_SIZE(log
->entry
) || log
->full
)
6872 count
= ARRAY_SIZE(log
->entry
);
6873 for (i
= 0; i
< count
; i
++) {
6874 unsigned int index
= cur
++ % ARRAY_SIZE(log
->entry
);
6876 print_binder_transaction_log_entry(m
, &log
->entry
[index
]);
6881 const struct file_operations binder_fops
= {
6882 .owner
= THIS_MODULE
,
6883 .poll
= binder_poll
,
6884 .unlocked_ioctl
= binder_ioctl
,
6885 .compat_ioctl
= compat_ptr_ioctl
,
6886 .mmap
= binder_mmap
,
6887 .open
= binder_open
,
6888 .flush
= binder_flush
,
6889 .release
= binder_release
,
6892 DEFINE_SHOW_ATTRIBUTE(state
);
6893 DEFINE_SHOW_ATTRIBUTE(stats
);
6894 DEFINE_SHOW_ATTRIBUTE(transactions
);
6895 DEFINE_SHOW_ATTRIBUTE(transaction_log
);
6897 const struct binder_debugfs_entry binder_debugfs_entries
[] = {
6901 .fops
= &state_fops
,
6907 .fops
= &stats_fops
,
6911 .name
= "transactions",
6913 .fops
= &transactions_fops
,
6917 .name
= "transaction_log",
6919 .fops
= &transaction_log_fops
,
6920 .data
= &binder_transaction_log
,
6923 .name
= "failed_transaction_log",
6925 .fops
= &transaction_log_fops
,
6926 .data
= &binder_transaction_log_failed
,
6931 static int __init
init_binder_device(const char *name
)
6934 struct binder_device
*binder_device
;
6936 binder_device
= kzalloc(sizeof(*binder_device
), GFP_KERNEL
);
6940 binder_device
->miscdev
.fops
= &binder_fops
;
6941 binder_device
->miscdev
.minor
= MISC_DYNAMIC_MINOR
;
6942 binder_device
->miscdev
.name
= name
;
6944 refcount_set(&binder_device
->ref
, 1);
6945 binder_device
->context
.binder_context_mgr_uid
= INVALID_UID
;
6946 binder_device
->context
.name
= name
;
6947 mutex_init(&binder_device
->context
.context_mgr_node_lock
);
6949 ret
= misc_register(&binder_device
->miscdev
);
6951 kfree(binder_device
);
6955 hlist_add_head(&binder_device
->hlist
, &binder_devices
);
6960 static int __init
binder_init(void)
6963 char *device_name
, *device_tmp
;
6964 struct binder_device
*device
;
6965 struct hlist_node
*tmp
;
6966 char *device_names
= NULL
;
6967 const struct binder_debugfs_entry
*db_entry
;
6969 ret
= binder_alloc_shrinker_init();
6973 atomic_set(&binder_transaction_log
.cur
, ~0U);
6974 atomic_set(&binder_transaction_log_failed
.cur
, ~0U);
6976 binder_debugfs_dir_entry_root
= debugfs_create_dir("binder", NULL
);
6978 binder_for_each_debugfs_entry(db_entry
)
6979 debugfs_create_file(db_entry
->name
,
6981 binder_debugfs_dir_entry_root
,
6985 binder_debugfs_dir_entry_proc
= debugfs_create_dir("proc",
6986 binder_debugfs_dir_entry_root
);
6988 if (!IS_ENABLED(CONFIG_ANDROID_BINDERFS
) &&
6989 strcmp(binder_devices_param
, "") != 0) {
6991 * Copy the module_parameter string, because we don't want to
6992 * tokenize it in-place.
6994 device_names
= kstrdup(binder_devices_param
, GFP_KERNEL
);
6995 if (!device_names
) {
6997 goto err_alloc_device_names_failed
;
7000 device_tmp
= device_names
;
7001 while ((device_name
= strsep(&device_tmp
, ","))) {
7002 ret
= init_binder_device(device_name
);
7004 goto err_init_binder_device_failed
;
7008 ret
= init_binderfs();
7010 goto err_init_binder_device_failed
;
7014 err_init_binder_device_failed
:
7015 hlist_for_each_entry_safe(device
, tmp
, &binder_devices
, hlist
) {
7016 misc_deregister(&device
->miscdev
);
7017 hlist_del(&device
->hlist
);
7021 kfree(device_names
);
7023 err_alloc_device_names_failed
:
7024 debugfs_remove_recursive(binder_debugfs_dir_entry_root
);
7025 binder_alloc_shrinker_exit();
7030 device_initcall(binder_init
);
7032 #define CREATE_TRACE_POINTS
7033 #include "binder_trace.h"
7035 MODULE_LICENSE("GPL v2");