dm thin metadata: fix __udivdi3 undefined on 32-bit
[linux/fpc-iii.git] / drivers / android / binder.c
blob260ce0e60187f81f4c864cd87d90c4b9361ca13c
1 /* binder.c
3 * Android IPC Subsystem
5 * Copyright (C) 2007-2008 Google, Inc.
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 #include <asm/cacheflush.h>
21 #include <linux/fdtable.h>
22 #include <linux/file.h>
23 #include <linux/freezer.h>
24 #include <linux/fs.h>
25 #include <linux/list.h>
26 #include <linux/miscdevice.h>
27 #include <linux/mm.h>
28 #include <linux/module.h>
29 #include <linux/mutex.h>
30 #include <linux/nsproxy.h>
31 #include <linux/poll.h>
32 #include <linux/debugfs.h>
33 #include <linux/rbtree.h>
34 #include <linux/sched.h>
35 #include <linux/seq_file.h>
36 #include <linux/uaccess.h>
37 #include <linux/vmalloc.h>
38 #include <linux/slab.h>
39 #include <linux/pid_namespace.h>
40 #include <linux/security.h>
42 #ifdef CONFIG_ANDROID_BINDER_IPC_32BIT
43 #define BINDER_IPC_32BIT 1
44 #endif
46 #include <uapi/linux/android/binder.h>
47 #include "binder_trace.h"
49 static DEFINE_MUTEX(binder_main_lock);
50 static DEFINE_MUTEX(binder_deferred_lock);
51 static DEFINE_MUTEX(binder_mmap_lock);
53 static HLIST_HEAD(binder_procs);
54 static HLIST_HEAD(binder_deferred_list);
55 static HLIST_HEAD(binder_dead_nodes);
57 static struct dentry *binder_debugfs_dir_entry_root;
58 static struct dentry *binder_debugfs_dir_entry_proc;
59 static struct binder_node *binder_context_mgr_node;
60 static kuid_t binder_context_mgr_uid = INVALID_UID;
61 static int binder_last_id;
62 static struct workqueue_struct *binder_deferred_workqueue;
64 #define BINDER_DEBUG_ENTRY(name) \
65 static int binder_##name##_open(struct inode *inode, struct file *file) \
66 { \
67 return single_open(file, binder_##name##_show, inode->i_private); \
68 } \
70 static const struct file_operations binder_##name##_fops = { \
71 .owner = THIS_MODULE, \
72 .open = binder_##name##_open, \
73 .read = seq_read, \
74 .llseek = seq_lseek, \
75 .release = single_release, \
78 static int binder_proc_show(struct seq_file *m, void *unused);
79 BINDER_DEBUG_ENTRY(proc);
81 /* This is only defined in include/asm-arm/sizes.h */
82 #ifndef SZ_1K
83 #define SZ_1K 0x400
84 #endif
86 #ifndef SZ_4M
87 #define SZ_4M 0x400000
88 #endif
90 #define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
92 #define BINDER_SMALL_BUF_SIZE (PAGE_SIZE * 64)
94 enum {
95 BINDER_DEBUG_USER_ERROR = 1U << 0,
96 BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1,
97 BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2,
98 BINDER_DEBUG_OPEN_CLOSE = 1U << 3,
99 BINDER_DEBUG_DEAD_BINDER = 1U << 4,
100 BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5,
101 BINDER_DEBUG_READ_WRITE = 1U << 6,
102 BINDER_DEBUG_USER_REFS = 1U << 7,
103 BINDER_DEBUG_THREADS = 1U << 8,
104 BINDER_DEBUG_TRANSACTION = 1U << 9,
105 BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10,
106 BINDER_DEBUG_FREE_BUFFER = 1U << 11,
107 BINDER_DEBUG_INTERNAL_REFS = 1U << 12,
108 BINDER_DEBUG_BUFFER_ALLOC = 1U << 13,
109 BINDER_DEBUG_PRIORITY_CAP = 1U << 14,
110 BINDER_DEBUG_BUFFER_ALLOC_ASYNC = 1U << 15,
112 static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
113 BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
114 module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO);
116 static bool binder_debug_no_lock;
117 module_param_named(proc_no_lock, binder_debug_no_lock, bool, S_IWUSR | S_IRUGO);
119 static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
120 static int binder_stop_on_user_error;
122 static int binder_set_stop_on_user_error(const char *val,
123 struct kernel_param *kp)
125 int ret;
127 ret = param_set_int(val, kp);
128 if (binder_stop_on_user_error < 2)
129 wake_up(&binder_user_error_wait);
130 return ret;
132 module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
133 param_get_int, &binder_stop_on_user_error, S_IWUSR | S_IRUGO);
135 #define binder_debug(mask, x...) \
136 do { \
137 if (binder_debug_mask & mask) \
138 pr_info(x); \
139 } while (0)
141 #define binder_user_error(x...) \
142 do { \
143 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
144 pr_info(x); \
145 if (binder_stop_on_user_error) \
146 binder_stop_on_user_error = 2; \
147 } while (0)
149 enum binder_stat_types {
150 BINDER_STAT_PROC,
151 BINDER_STAT_THREAD,
152 BINDER_STAT_NODE,
153 BINDER_STAT_REF,
154 BINDER_STAT_DEATH,
155 BINDER_STAT_TRANSACTION,
156 BINDER_STAT_TRANSACTION_COMPLETE,
157 BINDER_STAT_COUNT
160 struct binder_stats {
161 int br[_IOC_NR(BR_FAILED_REPLY) + 1];
162 int bc[_IOC_NR(BC_DEAD_BINDER_DONE) + 1];
163 int obj_created[BINDER_STAT_COUNT];
164 int obj_deleted[BINDER_STAT_COUNT];
167 static struct binder_stats binder_stats;
169 static inline void binder_stats_deleted(enum binder_stat_types type)
171 binder_stats.obj_deleted[type]++;
174 static inline void binder_stats_created(enum binder_stat_types type)
176 binder_stats.obj_created[type]++;
179 struct binder_transaction_log_entry {
180 int debug_id;
181 int call_type;
182 int from_proc;
183 int from_thread;
184 int target_handle;
185 int to_proc;
186 int to_thread;
187 int to_node;
188 int data_size;
189 int offsets_size;
191 struct binder_transaction_log {
192 int next;
193 int full;
194 struct binder_transaction_log_entry entry[32];
196 static struct binder_transaction_log binder_transaction_log;
197 static struct binder_transaction_log binder_transaction_log_failed;
199 static struct binder_transaction_log_entry *binder_transaction_log_add(
200 struct binder_transaction_log *log)
202 struct binder_transaction_log_entry *e;
204 e = &log->entry[log->next];
205 memset(e, 0, sizeof(*e));
206 log->next++;
207 if (log->next == ARRAY_SIZE(log->entry)) {
208 log->next = 0;
209 log->full = 1;
211 return e;
214 struct binder_work {
215 struct list_head entry;
216 enum {
217 BINDER_WORK_TRANSACTION = 1,
218 BINDER_WORK_TRANSACTION_COMPLETE,
219 BINDER_WORK_NODE,
220 BINDER_WORK_DEAD_BINDER,
221 BINDER_WORK_DEAD_BINDER_AND_CLEAR,
222 BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
223 } type;
226 struct binder_node {
227 int debug_id;
228 struct binder_work work;
229 union {
230 struct rb_node rb_node;
231 struct hlist_node dead_node;
233 struct binder_proc *proc;
234 struct hlist_head refs;
235 int internal_strong_refs;
236 int local_weak_refs;
237 int local_strong_refs;
238 binder_uintptr_t ptr;
239 binder_uintptr_t cookie;
240 unsigned has_strong_ref:1;
241 unsigned pending_strong_ref:1;
242 unsigned has_weak_ref:1;
243 unsigned pending_weak_ref:1;
244 unsigned has_async_transaction:1;
245 unsigned accept_fds:1;
246 unsigned min_priority:8;
247 struct list_head async_todo;
250 struct binder_ref_death {
251 struct binder_work work;
252 binder_uintptr_t cookie;
255 struct binder_ref {
256 /* Lookups needed: */
257 /* node + proc => ref (transaction) */
258 /* desc + proc => ref (transaction, inc/dec ref) */
259 /* node => refs + procs (proc exit) */
260 int debug_id;
261 struct rb_node rb_node_desc;
262 struct rb_node rb_node_node;
263 struct hlist_node node_entry;
264 struct binder_proc *proc;
265 struct binder_node *node;
266 uint32_t desc;
267 int strong;
268 int weak;
269 struct binder_ref_death *death;
272 struct binder_buffer {
273 struct list_head entry; /* free and allocated entries by address */
274 struct rb_node rb_node; /* free entry by size or allocated entry */
275 /* by address */
276 unsigned free:1;
277 unsigned allow_user_free:1;
278 unsigned async_transaction:1;
279 unsigned debug_id:29;
281 struct binder_transaction *transaction;
283 struct binder_node *target_node;
284 size_t data_size;
285 size_t offsets_size;
286 uint8_t data[0];
289 enum binder_deferred_state {
290 BINDER_DEFERRED_PUT_FILES = 0x01,
291 BINDER_DEFERRED_FLUSH = 0x02,
292 BINDER_DEFERRED_RELEASE = 0x04,
295 struct binder_proc {
296 struct hlist_node proc_node;
297 struct rb_root threads;
298 struct rb_root nodes;
299 struct rb_root refs_by_desc;
300 struct rb_root refs_by_node;
301 int pid;
302 struct vm_area_struct *vma;
303 struct mm_struct *vma_vm_mm;
304 struct task_struct *tsk;
305 struct files_struct *files;
306 struct hlist_node deferred_work_node;
307 int deferred_work;
308 void *buffer;
309 ptrdiff_t user_buffer_offset;
311 struct list_head buffers;
312 struct rb_root free_buffers;
313 struct rb_root allocated_buffers;
314 size_t free_async_space;
316 struct page **pages;
317 size_t buffer_size;
318 uint32_t buffer_free;
319 struct list_head todo;
320 wait_queue_head_t wait;
321 struct binder_stats stats;
322 struct list_head delivered_death;
323 int max_threads;
324 int requested_threads;
325 int requested_threads_started;
326 int ready_threads;
327 long default_priority;
328 struct dentry *debugfs_entry;
331 enum {
332 BINDER_LOOPER_STATE_REGISTERED = 0x01,
333 BINDER_LOOPER_STATE_ENTERED = 0x02,
334 BINDER_LOOPER_STATE_EXITED = 0x04,
335 BINDER_LOOPER_STATE_INVALID = 0x08,
336 BINDER_LOOPER_STATE_WAITING = 0x10,
337 BINDER_LOOPER_STATE_NEED_RETURN = 0x20
340 struct binder_thread {
341 struct binder_proc *proc;
342 struct rb_node rb_node;
343 int pid;
344 int looper;
345 struct binder_transaction *transaction_stack;
346 struct list_head todo;
347 uint32_t return_error; /* Write failed, return error code in read buf */
348 uint32_t return_error2; /* Write failed, return error code in read */
349 /* buffer. Used when sending a reply to a dead process that */
350 /* we are also waiting on */
351 wait_queue_head_t wait;
352 struct binder_stats stats;
355 struct binder_transaction {
356 int debug_id;
357 struct binder_work work;
358 struct binder_thread *from;
359 struct binder_transaction *from_parent;
360 struct binder_proc *to_proc;
361 struct binder_thread *to_thread;
362 struct binder_transaction *to_parent;
363 unsigned need_reply:1;
364 /* unsigned is_dead:1; */ /* not used at the moment */
366 struct binder_buffer *buffer;
367 unsigned int code;
368 unsigned int flags;
369 long priority;
370 long saved_priority;
371 kuid_t sender_euid;
374 static void
375 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
377 static int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
379 struct files_struct *files = proc->files;
380 unsigned long rlim_cur;
381 unsigned long irqs;
383 if (files == NULL)
384 return -ESRCH;
386 if (!lock_task_sighand(proc->tsk, &irqs))
387 return -EMFILE;
389 rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE);
390 unlock_task_sighand(proc->tsk, &irqs);
392 return __alloc_fd(files, 0, rlim_cur, flags);
396 * copied from fd_install
398 static void task_fd_install(
399 struct binder_proc *proc, unsigned int fd, struct file *file)
401 if (proc->files)
402 __fd_install(proc->files, fd, file);
406 * copied from sys_close
408 static long task_close_fd(struct binder_proc *proc, unsigned int fd)
410 int retval;
412 if (proc->files == NULL)
413 return -ESRCH;
415 retval = __close_fd(proc->files, fd);
416 /* can't restart close syscall because file table entry was cleared */
417 if (unlikely(retval == -ERESTARTSYS ||
418 retval == -ERESTARTNOINTR ||
419 retval == -ERESTARTNOHAND ||
420 retval == -ERESTART_RESTARTBLOCK))
421 retval = -EINTR;
423 return retval;
426 static inline void binder_lock(const char *tag)
428 trace_binder_lock(tag);
429 mutex_lock(&binder_main_lock);
430 trace_binder_locked(tag);
433 static inline void binder_unlock(const char *tag)
435 trace_binder_unlock(tag);
436 mutex_unlock(&binder_main_lock);
439 static void binder_set_nice(long nice)
441 long min_nice;
443 if (can_nice(current, nice)) {
444 set_user_nice(current, nice);
445 return;
447 min_nice = rlimit_to_nice(current->signal->rlim[RLIMIT_NICE].rlim_cur);
448 binder_debug(BINDER_DEBUG_PRIORITY_CAP,
449 "%d: nice value %ld not allowed use %ld instead\n",
450 current->pid, nice, min_nice);
451 set_user_nice(current, min_nice);
452 if (min_nice <= MAX_NICE)
453 return;
454 binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
457 static size_t binder_buffer_size(struct binder_proc *proc,
458 struct binder_buffer *buffer)
460 if (list_is_last(&buffer->entry, &proc->buffers))
461 return proc->buffer + proc->buffer_size - (void *)buffer->data;
462 return (size_t)list_entry(buffer->entry.next,
463 struct binder_buffer, entry) - (size_t)buffer->data;
466 static void binder_insert_free_buffer(struct binder_proc *proc,
467 struct binder_buffer *new_buffer)
469 struct rb_node **p = &proc->free_buffers.rb_node;
470 struct rb_node *parent = NULL;
471 struct binder_buffer *buffer;
472 size_t buffer_size;
473 size_t new_buffer_size;
475 BUG_ON(!new_buffer->free);
477 new_buffer_size = binder_buffer_size(proc, new_buffer);
479 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
480 "%d: add free buffer, size %zd, at %p\n",
481 proc->pid, new_buffer_size, new_buffer);
483 while (*p) {
484 parent = *p;
485 buffer = rb_entry(parent, struct binder_buffer, rb_node);
486 BUG_ON(!buffer->free);
488 buffer_size = binder_buffer_size(proc, buffer);
490 if (new_buffer_size < buffer_size)
491 p = &parent->rb_left;
492 else
493 p = &parent->rb_right;
495 rb_link_node(&new_buffer->rb_node, parent, p);
496 rb_insert_color(&new_buffer->rb_node, &proc->free_buffers);
499 static void binder_insert_allocated_buffer(struct binder_proc *proc,
500 struct binder_buffer *new_buffer)
502 struct rb_node **p = &proc->allocated_buffers.rb_node;
503 struct rb_node *parent = NULL;
504 struct binder_buffer *buffer;
506 BUG_ON(new_buffer->free);
508 while (*p) {
509 parent = *p;
510 buffer = rb_entry(parent, struct binder_buffer, rb_node);
511 BUG_ON(buffer->free);
513 if (new_buffer < buffer)
514 p = &parent->rb_left;
515 else if (new_buffer > buffer)
516 p = &parent->rb_right;
517 else
518 BUG();
520 rb_link_node(&new_buffer->rb_node, parent, p);
521 rb_insert_color(&new_buffer->rb_node, &proc->allocated_buffers);
524 static struct binder_buffer *binder_buffer_lookup(struct binder_proc *proc,
525 uintptr_t user_ptr)
527 struct rb_node *n = proc->allocated_buffers.rb_node;
528 struct binder_buffer *buffer;
529 struct binder_buffer *kern_ptr;
531 kern_ptr = (struct binder_buffer *)(user_ptr - proc->user_buffer_offset
532 - offsetof(struct binder_buffer, data));
534 while (n) {
535 buffer = rb_entry(n, struct binder_buffer, rb_node);
536 BUG_ON(buffer->free);
538 if (kern_ptr < buffer)
539 n = n->rb_left;
540 else if (kern_ptr > buffer)
541 n = n->rb_right;
542 else
543 return buffer;
545 return NULL;
548 static int binder_update_page_range(struct binder_proc *proc, int allocate,
549 void *start, void *end,
550 struct vm_area_struct *vma)
552 void *page_addr;
553 unsigned long user_page_addr;
554 struct page **page;
555 struct mm_struct *mm;
557 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
558 "%d: %s pages %p-%p\n", proc->pid,
559 allocate ? "allocate" : "free", start, end);
561 if (end <= start)
562 return 0;
564 trace_binder_update_page_range(proc, allocate, start, end);
566 if (vma)
567 mm = NULL;
568 else
569 mm = get_task_mm(proc->tsk);
571 if (mm) {
572 down_write(&mm->mmap_sem);
573 vma = proc->vma;
574 if (vma && mm != proc->vma_vm_mm) {
575 pr_err("%d: vma mm and task mm mismatch\n",
576 proc->pid);
577 vma = NULL;
581 if (allocate == 0)
582 goto free_range;
584 if (vma == NULL) {
585 pr_err("%d: binder_alloc_buf failed to map pages in userspace, no vma\n",
586 proc->pid);
587 goto err_no_vma;
590 for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
591 int ret;
593 page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE];
595 BUG_ON(*page);
596 *page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
597 if (*page == NULL) {
598 pr_err("%d: binder_alloc_buf failed for page at %p\n",
599 proc->pid, page_addr);
600 goto err_alloc_page_failed;
602 ret = map_kernel_range_noflush((unsigned long)page_addr,
603 PAGE_SIZE, PAGE_KERNEL, page);
604 flush_cache_vmap((unsigned long)page_addr,
605 (unsigned long)page_addr + PAGE_SIZE);
606 if (ret != 1) {
607 pr_err("%d: binder_alloc_buf failed to map page at %p in kernel\n",
608 proc->pid, page_addr);
609 goto err_map_kernel_failed;
611 user_page_addr =
612 (uintptr_t)page_addr + proc->user_buffer_offset;
613 ret = vm_insert_page(vma, user_page_addr, page[0]);
614 if (ret) {
615 pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n",
616 proc->pid, user_page_addr);
617 goto err_vm_insert_page_failed;
619 /* vm_insert_page does not seem to increment the refcount */
621 if (mm) {
622 up_write(&mm->mmap_sem);
623 mmput(mm);
625 return 0;
627 free_range:
628 for (page_addr = end - PAGE_SIZE; page_addr >= start;
629 page_addr -= PAGE_SIZE) {
630 page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE];
631 if (vma)
632 zap_page_range(vma, (uintptr_t)page_addr +
633 proc->user_buffer_offset, PAGE_SIZE, NULL);
634 err_vm_insert_page_failed:
635 unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
636 err_map_kernel_failed:
637 __free_page(*page);
638 *page = NULL;
639 err_alloc_page_failed:
642 err_no_vma:
643 if (mm) {
644 up_write(&mm->mmap_sem);
645 mmput(mm);
647 return -ENOMEM;
650 static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc,
651 size_t data_size,
652 size_t offsets_size, int is_async)
654 struct rb_node *n = proc->free_buffers.rb_node;
655 struct binder_buffer *buffer;
656 size_t buffer_size;
657 struct rb_node *best_fit = NULL;
658 void *has_page_addr;
659 void *end_page_addr;
660 size_t size;
662 if (proc->vma == NULL) {
663 pr_err("%d: binder_alloc_buf, no vma\n",
664 proc->pid);
665 return NULL;
668 size = ALIGN(data_size, sizeof(void *)) +
669 ALIGN(offsets_size, sizeof(void *));
671 if (size < data_size || size < offsets_size) {
672 binder_user_error("%d: got transaction with invalid size %zd-%zd\n",
673 proc->pid, data_size, offsets_size);
674 return NULL;
677 if (is_async &&
678 proc->free_async_space < size + sizeof(struct binder_buffer)) {
679 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
680 "%d: binder_alloc_buf size %zd failed, no async space left\n",
681 proc->pid, size);
682 return NULL;
685 while (n) {
686 buffer = rb_entry(n, struct binder_buffer, rb_node);
687 BUG_ON(!buffer->free);
688 buffer_size = binder_buffer_size(proc, buffer);
690 if (size < buffer_size) {
691 best_fit = n;
692 n = n->rb_left;
693 } else if (size > buffer_size)
694 n = n->rb_right;
695 else {
696 best_fit = n;
697 break;
700 if (best_fit == NULL) {
701 pr_err("%d: binder_alloc_buf size %zd failed, no address space\n",
702 proc->pid, size);
703 return NULL;
705 if (n == NULL) {
706 buffer = rb_entry(best_fit, struct binder_buffer, rb_node);
707 buffer_size = binder_buffer_size(proc, buffer);
710 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
711 "%d: binder_alloc_buf size %zd got buffer %p size %zd\n",
712 proc->pid, size, buffer, buffer_size);
714 has_page_addr =
715 (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK);
716 if (n == NULL) {
717 if (size + sizeof(struct binder_buffer) + 4 >= buffer_size)
718 buffer_size = size; /* no room for other buffers */
719 else
720 buffer_size = size + sizeof(struct binder_buffer);
722 end_page_addr =
723 (void *)PAGE_ALIGN((uintptr_t)buffer->data + buffer_size);
724 if (end_page_addr > has_page_addr)
725 end_page_addr = has_page_addr;
726 if (binder_update_page_range(proc, 1,
727 (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr, NULL))
728 return NULL;
730 rb_erase(best_fit, &proc->free_buffers);
731 buffer->free = 0;
732 binder_insert_allocated_buffer(proc, buffer);
733 if (buffer_size != size) {
734 struct binder_buffer *new_buffer = (void *)buffer->data + size;
736 list_add(&new_buffer->entry, &buffer->entry);
737 new_buffer->free = 1;
738 binder_insert_free_buffer(proc, new_buffer);
740 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
741 "%d: binder_alloc_buf size %zd got %p\n",
742 proc->pid, size, buffer);
743 buffer->data_size = data_size;
744 buffer->offsets_size = offsets_size;
745 buffer->async_transaction = is_async;
746 if (is_async) {
747 proc->free_async_space -= size + sizeof(struct binder_buffer);
748 binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
749 "%d: binder_alloc_buf size %zd async free %zd\n",
750 proc->pid, size, proc->free_async_space);
753 return buffer;
756 static void *buffer_start_page(struct binder_buffer *buffer)
758 return (void *)((uintptr_t)buffer & PAGE_MASK);
761 static void *buffer_end_page(struct binder_buffer *buffer)
763 return (void *)(((uintptr_t)(buffer + 1) - 1) & PAGE_MASK);
766 static void binder_delete_free_buffer(struct binder_proc *proc,
767 struct binder_buffer *buffer)
769 struct binder_buffer *prev, *next = NULL;
770 int free_page_end = 1;
771 int free_page_start = 1;
773 BUG_ON(proc->buffers.next == &buffer->entry);
774 prev = list_entry(buffer->entry.prev, struct binder_buffer, entry);
775 BUG_ON(!prev->free);
776 if (buffer_end_page(prev) == buffer_start_page(buffer)) {
777 free_page_start = 0;
778 if (buffer_end_page(prev) == buffer_end_page(buffer))
779 free_page_end = 0;
780 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
781 "%d: merge free, buffer %p share page with %p\n",
782 proc->pid, buffer, prev);
785 if (!list_is_last(&buffer->entry, &proc->buffers)) {
786 next = list_entry(buffer->entry.next,
787 struct binder_buffer, entry);
788 if (buffer_start_page(next) == buffer_end_page(buffer)) {
789 free_page_end = 0;
790 if (buffer_start_page(next) ==
791 buffer_start_page(buffer))
792 free_page_start = 0;
793 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
794 "%d: merge free, buffer %p share page with %p\n",
795 proc->pid, buffer, prev);
798 list_del(&buffer->entry);
799 if (free_page_start || free_page_end) {
800 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
801 "%d: merge free, buffer %p do not share page%s%s with %p or %p\n",
802 proc->pid, buffer, free_page_start ? "" : " end",
803 free_page_end ? "" : " start", prev, next);
804 binder_update_page_range(proc, 0, free_page_start ?
805 buffer_start_page(buffer) : buffer_end_page(buffer),
806 (free_page_end ? buffer_end_page(buffer) :
807 buffer_start_page(buffer)) + PAGE_SIZE, NULL);
811 static void binder_free_buf(struct binder_proc *proc,
812 struct binder_buffer *buffer)
814 size_t size, buffer_size;
816 buffer_size = binder_buffer_size(proc, buffer);
818 size = ALIGN(buffer->data_size, sizeof(void *)) +
819 ALIGN(buffer->offsets_size, sizeof(void *));
821 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
822 "%d: binder_free_buf %p size %zd buffer_size %zd\n",
823 proc->pid, buffer, size, buffer_size);
825 BUG_ON(buffer->free);
826 BUG_ON(size > buffer_size);
827 BUG_ON(buffer->transaction != NULL);
828 BUG_ON((void *)buffer < proc->buffer);
829 BUG_ON((void *)buffer > proc->buffer + proc->buffer_size);
831 if (buffer->async_transaction) {
832 proc->free_async_space += size + sizeof(struct binder_buffer);
834 binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
835 "%d: binder_free_buf size %zd async free %zd\n",
836 proc->pid, size, proc->free_async_space);
839 binder_update_page_range(proc, 0,
840 (void *)PAGE_ALIGN((uintptr_t)buffer->data),
841 (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK),
842 NULL);
843 rb_erase(&buffer->rb_node, &proc->allocated_buffers);
844 buffer->free = 1;
845 if (!list_is_last(&buffer->entry, &proc->buffers)) {
846 struct binder_buffer *next = list_entry(buffer->entry.next,
847 struct binder_buffer, entry);
849 if (next->free) {
850 rb_erase(&next->rb_node, &proc->free_buffers);
851 binder_delete_free_buffer(proc, next);
854 if (proc->buffers.next != &buffer->entry) {
855 struct binder_buffer *prev = list_entry(buffer->entry.prev,
856 struct binder_buffer, entry);
858 if (prev->free) {
859 binder_delete_free_buffer(proc, buffer);
860 rb_erase(&prev->rb_node, &proc->free_buffers);
861 buffer = prev;
864 binder_insert_free_buffer(proc, buffer);
867 static struct binder_node *binder_get_node(struct binder_proc *proc,
868 binder_uintptr_t ptr)
870 struct rb_node *n = proc->nodes.rb_node;
871 struct binder_node *node;
873 while (n) {
874 node = rb_entry(n, struct binder_node, rb_node);
876 if (ptr < node->ptr)
877 n = n->rb_left;
878 else if (ptr > node->ptr)
879 n = n->rb_right;
880 else
881 return node;
883 return NULL;
886 static struct binder_node *binder_new_node(struct binder_proc *proc,
887 binder_uintptr_t ptr,
888 binder_uintptr_t cookie)
890 struct rb_node **p = &proc->nodes.rb_node;
891 struct rb_node *parent = NULL;
892 struct binder_node *node;
894 while (*p) {
895 parent = *p;
896 node = rb_entry(parent, struct binder_node, rb_node);
898 if (ptr < node->ptr)
899 p = &(*p)->rb_left;
900 else if (ptr > node->ptr)
901 p = &(*p)->rb_right;
902 else
903 return NULL;
906 node = kzalloc(sizeof(*node), GFP_KERNEL);
907 if (node == NULL)
908 return NULL;
909 binder_stats_created(BINDER_STAT_NODE);
910 rb_link_node(&node->rb_node, parent, p);
911 rb_insert_color(&node->rb_node, &proc->nodes);
912 node->debug_id = ++binder_last_id;
913 node->proc = proc;
914 node->ptr = ptr;
915 node->cookie = cookie;
916 node->work.type = BINDER_WORK_NODE;
917 INIT_LIST_HEAD(&node->work.entry);
918 INIT_LIST_HEAD(&node->async_todo);
919 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
920 "%d:%d node %d u%016llx c%016llx created\n",
921 proc->pid, current->pid, node->debug_id,
922 (u64)node->ptr, (u64)node->cookie);
923 return node;
926 static int binder_inc_node(struct binder_node *node, int strong, int internal,
927 struct list_head *target_list)
929 if (strong) {
930 if (internal) {
931 if (target_list == NULL &&
932 node->internal_strong_refs == 0 &&
933 !(node == binder_context_mgr_node &&
934 node->has_strong_ref)) {
935 pr_err("invalid inc strong node for %d\n",
936 node->debug_id);
937 return -EINVAL;
939 node->internal_strong_refs++;
940 } else
941 node->local_strong_refs++;
942 if (!node->has_strong_ref && target_list) {
943 list_del_init(&node->work.entry);
944 list_add_tail(&node->work.entry, target_list);
946 } else {
947 if (!internal)
948 node->local_weak_refs++;
949 if (!node->has_weak_ref && list_empty(&node->work.entry)) {
950 if (target_list == NULL) {
951 pr_err("invalid inc weak node for %d\n",
952 node->debug_id);
953 return -EINVAL;
955 list_add_tail(&node->work.entry, target_list);
958 return 0;
961 static int binder_dec_node(struct binder_node *node, int strong, int internal)
963 if (strong) {
964 if (internal)
965 node->internal_strong_refs--;
966 else
967 node->local_strong_refs--;
968 if (node->local_strong_refs || node->internal_strong_refs)
969 return 0;
970 } else {
971 if (!internal)
972 node->local_weak_refs--;
973 if (node->local_weak_refs || !hlist_empty(&node->refs))
974 return 0;
976 if (node->proc && (node->has_strong_ref || node->has_weak_ref)) {
977 if (list_empty(&node->work.entry)) {
978 list_add_tail(&node->work.entry, &node->proc->todo);
979 wake_up_interruptible(&node->proc->wait);
981 } else {
982 if (hlist_empty(&node->refs) && !node->local_strong_refs &&
983 !node->local_weak_refs) {
984 list_del_init(&node->work.entry);
985 if (node->proc) {
986 rb_erase(&node->rb_node, &node->proc->nodes);
987 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
988 "refless node %d deleted\n",
989 node->debug_id);
990 } else {
991 hlist_del(&node->dead_node);
992 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
993 "dead node %d deleted\n",
994 node->debug_id);
996 kfree(node);
997 binder_stats_deleted(BINDER_STAT_NODE);
1001 return 0;
1005 static struct binder_ref *binder_get_ref(struct binder_proc *proc,
1006 u32 desc, bool need_strong_ref)
1008 struct rb_node *n = proc->refs_by_desc.rb_node;
1009 struct binder_ref *ref;
1011 while (n) {
1012 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1014 if (desc < ref->desc) {
1015 n = n->rb_left;
1016 } else if (desc > ref->desc) {
1017 n = n->rb_right;
1018 } else if (need_strong_ref && !ref->strong) {
1019 binder_user_error("tried to use weak ref as strong ref\n");
1020 return NULL;
1021 } else {
1022 return ref;
1025 return NULL;
1028 static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc,
1029 struct binder_node *node)
1031 struct rb_node *n;
1032 struct rb_node **p = &proc->refs_by_node.rb_node;
1033 struct rb_node *parent = NULL;
1034 struct binder_ref *ref, *new_ref;
1036 while (*p) {
1037 parent = *p;
1038 ref = rb_entry(parent, struct binder_ref, rb_node_node);
1040 if (node < ref->node)
1041 p = &(*p)->rb_left;
1042 else if (node > ref->node)
1043 p = &(*p)->rb_right;
1044 else
1045 return ref;
1047 new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1048 if (new_ref == NULL)
1049 return NULL;
1050 binder_stats_created(BINDER_STAT_REF);
1051 new_ref->debug_id = ++binder_last_id;
1052 new_ref->proc = proc;
1053 new_ref->node = node;
1054 rb_link_node(&new_ref->rb_node_node, parent, p);
1055 rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1057 new_ref->desc = (node == binder_context_mgr_node) ? 0 : 1;
1058 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
1059 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1060 if (ref->desc > new_ref->desc)
1061 break;
1062 new_ref->desc = ref->desc + 1;
1065 p = &proc->refs_by_desc.rb_node;
1066 while (*p) {
1067 parent = *p;
1068 ref = rb_entry(parent, struct binder_ref, rb_node_desc);
1070 if (new_ref->desc < ref->desc)
1071 p = &(*p)->rb_left;
1072 else if (new_ref->desc > ref->desc)
1073 p = &(*p)->rb_right;
1074 else
1075 BUG();
1077 rb_link_node(&new_ref->rb_node_desc, parent, p);
1078 rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
1079 if (node) {
1080 hlist_add_head(&new_ref->node_entry, &node->refs);
1082 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1083 "%d new ref %d desc %d for node %d\n",
1084 proc->pid, new_ref->debug_id, new_ref->desc,
1085 node->debug_id);
1086 } else {
1087 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1088 "%d new ref %d desc %d for dead node\n",
1089 proc->pid, new_ref->debug_id, new_ref->desc);
1091 return new_ref;
1094 static void binder_delete_ref(struct binder_ref *ref)
1096 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1097 "%d delete ref %d desc %d for node %d\n",
1098 ref->proc->pid, ref->debug_id, ref->desc,
1099 ref->node->debug_id);
1101 rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1102 rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
1103 if (ref->strong)
1104 binder_dec_node(ref->node, 1, 1);
1105 hlist_del(&ref->node_entry);
1106 binder_dec_node(ref->node, 0, 1);
1107 if (ref->death) {
1108 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1109 "%d delete ref %d desc %d has death notification\n",
1110 ref->proc->pid, ref->debug_id, ref->desc);
1111 list_del(&ref->death->work.entry);
1112 kfree(ref->death);
1113 binder_stats_deleted(BINDER_STAT_DEATH);
1115 kfree(ref);
1116 binder_stats_deleted(BINDER_STAT_REF);
1119 static int binder_inc_ref(struct binder_ref *ref, int strong,
1120 struct list_head *target_list)
1122 int ret;
1124 if (strong) {
1125 if (ref->strong == 0) {
1126 ret = binder_inc_node(ref->node, 1, 1, target_list);
1127 if (ret)
1128 return ret;
1130 ref->strong++;
1131 } else {
1132 if (ref->weak == 0) {
1133 ret = binder_inc_node(ref->node, 0, 1, target_list);
1134 if (ret)
1135 return ret;
1137 ref->weak++;
1139 return 0;
1143 static int binder_dec_ref(struct binder_ref *ref, int strong)
1145 if (strong) {
1146 if (ref->strong == 0) {
1147 binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
1148 ref->proc->pid, ref->debug_id,
1149 ref->desc, ref->strong, ref->weak);
1150 return -EINVAL;
1152 ref->strong--;
1153 if (ref->strong == 0) {
1154 int ret;
1156 ret = binder_dec_node(ref->node, strong, 1);
1157 if (ret)
1158 return ret;
1160 } else {
1161 if (ref->weak == 0) {
1162 binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
1163 ref->proc->pid, ref->debug_id,
1164 ref->desc, ref->strong, ref->weak);
1165 return -EINVAL;
1167 ref->weak--;
1169 if (ref->strong == 0 && ref->weak == 0)
1170 binder_delete_ref(ref);
1171 return 0;
1174 static void binder_pop_transaction(struct binder_thread *target_thread,
1175 struct binder_transaction *t)
1177 if (target_thread) {
1178 BUG_ON(target_thread->transaction_stack != t);
1179 BUG_ON(target_thread->transaction_stack->from != target_thread);
1180 target_thread->transaction_stack =
1181 target_thread->transaction_stack->from_parent;
1182 t->from = NULL;
1184 t->need_reply = 0;
1185 if (t->buffer)
1186 t->buffer->transaction = NULL;
1187 kfree(t);
1188 binder_stats_deleted(BINDER_STAT_TRANSACTION);
1191 static void binder_send_failed_reply(struct binder_transaction *t,
1192 uint32_t error_code)
1194 struct binder_thread *target_thread;
1195 struct binder_transaction *next;
1197 BUG_ON(t->flags & TF_ONE_WAY);
1198 while (1) {
1199 target_thread = t->from;
1200 if (target_thread) {
1201 if (target_thread->return_error != BR_OK &&
1202 target_thread->return_error2 == BR_OK) {
1203 target_thread->return_error2 =
1204 target_thread->return_error;
1205 target_thread->return_error = BR_OK;
1207 if (target_thread->return_error == BR_OK) {
1208 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1209 "send failed reply for transaction %d to %d:%d\n",
1210 t->debug_id,
1211 target_thread->proc->pid,
1212 target_thread->pid);
1214 binder_pop_transaction(target_thread, t);
1215 target_thread->return_error = error_code;
1216 wake_up_interruptible(&target_thread->wait);
1217 } else {
1218 pr_err("reply failed, target thread, %d:%d, has error code %d already\n",
1219 target_thread->proc->pid,
1220 target_thread->pid,
1221 target_thread->return_error);
1223 return;
1225 next = t->from_parent;
1227 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1228 "send failed reply for transaction %d, target dead\n",
1229 t->debug_id);
1231 binder_pop_transaction(target_thread, t);
1232 if (next == NULL) {
1233 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1234 "reply failed, no target thread at root\n");
1235 return;
1237 t = next;
1238 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1239 "reply failed, no target thread -- retry %d\n",
1240 t->debug_id);
1244 static void binder_transaction_buffer_release(struct binder_proc *proc,
1245 struct binder_buffer *buffer,
1246 binder_size_t *failed_at)
1248 binder_size_t *offp, *off_end;
1249 int debug_id = buffer->debug_id;
1251 binder_debug(BINDER_DEBUG_TRANSACTION,
1252 "%d buffer release %d, size %zd-%zd, failed at %p\n",
1253 proc->pid, buffer->debug_id,
1254 buffer->data_size, buffer->offsets_size, failed_at);
1256 if (buffer->target_node)
1257 binder_dec_node(buffer->target_node, 1, 0);
1259 offp = (binder_size_t *)(buffer->data +
1260 ALIGN(buffer->data_size, sizeof(void *)));
1261 if (failed_at)
1262 off_end = failed_at;
1263 else
1264 off_end = (void *)offp + buffer->offsets_size;
1265 for (; offp < off_end; offp++) {
1266 struct flat_binder_object *fp;
1268 if (*offp > buffer->data_size - sizeof(*fp) ||
1269 buffer->data_size < sizeof(*fp) ||
1270 !IS_ALIGNED(*offp, sizeof(u32))) {
1271 pr_err("transaction release %d bad offset %lld, size %zd\n",
1272 debug_id, (u64)*offp, buffer->data_size);
1273 continue;
1275 fp = (struct flat_binder_object *)(buffer->data + *offp);
1276 switch (fp->type) {
1277 case BINDER_TYPE_BINDER:
1278 case BINDER_TYPE_WEAK_BINDER: {
1279 struct binder_node *node = binder_get_node(proc, fp->binder);
1281 if (node == NULL) {
1282 pr_err("transaction release %d bad node %016llx\n",
1283 debug_id, (u64)fp->binder);
1284 break;
1286 binder_debug(BINDER_DEBUG_TRANSACTION,
1287 " node %d u%016llx\n",
1288 node->debug_id, (u64)node->ptr);
1289 binder_dec_node(node, fp->type == BINDER_TYPE_BINDER, 0);
1290 } break;
1291 case BINDER_TYPE_HANDLE:
1292 case BINDER_TYPE_WEAK_HANDLE: {
1293 struct binder_ref *ref;
1295 ref = binder_get_ref(proc, fp->handle,
1296 fp->type == BINDER_TYPE_HANDLE);
1298 if (ref == NULL) {
1299 pr_err("transaction release %d bad handle %d\n",
1300 debug_id, fp->handle);
1301 break;
1303 binder_debug(BINDER_DEBUG_TRANSACTION,
1304 " ref %d desc %d (node %d)\n",
1305 ref->debug_id, ref->desc, ref->node->debug_id);
1306 binder_dec_ref(ref, fp->type == BINDER_TYPE_HANDLE);
1307 } break;
1309 case BINDER_TYPE_FD:
1310 binder_debug(BINDER_DEBUG_TRANSACTION,
1311 " fd %d\n", fp->handle);
1312 if (failed_at)
1313 task_close_fd(proc, fp->handle);
1314 break;
1316 default:
1317 pr_err("transaction release %d bad object type %x\n",
1318 debug_id, fp->type);
1319 break;
1324 static void binder_transaction(struct binder_proc *proc,
1325 struct binder_thread *thread,
1326 struct binder_transaction_data *tr, int reply)
1328 struct binder_transaction *t;
1329 struct binder_work *tcomplete;
1330 binder_size_t *offp, *off_end;
1331 struct binder_proc *target_proc;
1332 struct binder_thread *target_thread = NULL;
1333 struct binder_node *target_node = NULL;
1334 struct list_head *target_list;
1335 wait_queue_head_t *target_wait;
1336 struct binder_transaction *in_reply_to = NULL;
1337 struct binder_transaction_log_entry *e;
1338 uint32_t return_error;
1340 e = binder_transaction_log_add(&binder_transaction_log);
1341 e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
1342 e->from_proc = proc->pid;
1343 e->from_thread = thread->pid;
1344 e->target_handle = tr->target.handle;
1345 e->data_size = tr->data_size;
1346 e->offsets_size = tr->offsets_size;
1348 if (reply) {
1349 in_reply_to = thread->transaction_stack;
1350 if (in_reply_to == NULL) {
1351 binder_user_error("%d:%d got reply transaction with no transaction stack\n",
1352 proc->pid, thread->pid);
1353 return_error = BR_FAILED_REPLY;
1354 goto err_empty_call_stack;
1356 binder_set_nice(in_reply_to->saved_priority);
1357 if (in_reply_to->to_thread != thread) {
1358 binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
1359 proc->pid, thread->pid, in_reply_to->debug_id,
1360 in_reply_to->to_proc ?
1361 in_reply_to->to_proc->pid : 0,
1362 in_reply_to->to_thread ?
1363 in_reply_to->to_thread->pid : 0);
1364 return_error = BR_FAILED_REPLY;
1365 in_reply_to = NULL;
1366 goto err_bad_call_stack;
1368 thread->transaction_stack = in_reply_to->to_parent;
1369 target_thread = in_reply_to->from;
1370 if (target_thread == NULL) {
1371 return_error = BR_DEAD_REPLY;
1372 goto err_dead_binder;
1374 if (target_thread->transaction_stack != in_reply_to) {
1375 binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
1376 proc->pid, thread->pid,
1377 target_thread->transaction_stack ?
1378 target_thread->transaction_stack->debug_id : 0,
1379 in_reply_to->debug_id);
1380 return_error = BR_FAILED_REPLY;
1381 in_reply_to = NULL;
1382 target_thread = NULL;
1383 goto err_dead_binder;
1385 target_proc = target_thread->proc;
1386 } else {
1387 if (tr->target.handle) {
1388 struct binder_ref *ref;
1390 ref = binder_get_ref(proc, tr->target.handle, true);
1391 if (ref == NULL) {
1392 binder_user_error("%d:%d got transaction to invalid handle\n",
1393 proc->pid, thread->pid);
1394 return_error = BR_FAILED_REPLY;
1395 goto err_invalid_target_handle;
1397 target_node = ref->node;
1398 } else {
1399 target_node = binder_context_mgr_node;
1400 if (target_node == NULL) {
1401 return_error = BR_DEAD_REPLY;
1402 goto err_no_context_mgr_node;
1405 e->to_node = target_node->debug_id;
1406 target_proc = target_node->proc;
1407 if (target_proc == NULL) {
1408 return_error = BR_DEAD_REPLY;
1409 goto err_dead_binder;
1411 if (security_binder_transaction(proc->tsk,
1412 target_proc->tsk) < 0) {
1413 return_error = BR_FAILED_REPLY;
1414 goto err_invalid_target_handle;
1416 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
1417 struct binder_transaction *tmp;
1419 tmp = thread->transaction_stack;
1420 if (tmp->to_thread != thread) {
1421 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
1422 proc->pid, thread->pid, tmp->debug_id,
1423 tmp->to_proc ? tmp->to_proc->pid : 0,
1424 tmp->to_thread ?
1425 tmp->to_thread->pid : 0);
1426 return_error = BR_FAILED_REPLY;
1427 goto err_bad_call_stack;
1429 while (tmp) {
1430 if (tmp->from && tmp->from->proc == target_proc)
1431 target_thread = tmp->from;
1432 tmp = tmp->from_parent;
1436 if (target_thread) {
1437 e->to_thread = target_thread->pid;
1438 target_list = &target_thread->todo;
1439 target_wait = &target_thread->wait;
1440 } else {
1441 target_list = &target_proc->todo;
1442 target_wait = &target_proc->wait;
1444 e->to_proc = target_proc->pid;
1446 /* TODO: reuse incoming transaction for reply */
1447 t = kzalloc(sizeof(*t), GFP_KERNEL);
1448 if (t == NULL) {
1449 return_error = BR_FAILED_REPLY;
1450 goto err_alloc_t_failed;
1452 binder_stats_created(BINDER_STAT_TRANSACTION);
1454 tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
1455 if (tcomplete == NULL) {
1456 return_error = BR_FAILED_REPLY;
1457 goto err_alloc_tcomplete_failed;
1459 binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
1461 t->debug_id = ++binder_last_id;
1462 e->debug_id = t->debug_id;
1464 if (reply)
1465 binder_debug(BINDER_DEBUG_TRANSACTION,
1466 "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld\n",
1467 proc->pid, thread->pid, t->debug_id,
1468 target_proc->pid, target_thread->pid,
1469 (u64)tr->data.ptr.buffer,
1470 (u64)tr->data.ptr.offsets,
1471 (u64)tr->data_size, (u64)tr->offsets_size);
1472 else
1473 binder_debug(BINDER_DEBUG_TRANSACTION,
1474 "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld\n",
1475 proc->pid, thread->pid, t->debug_id,
1476 target_proc->pid, target_node->debug_id,
1477 (u64)tr->data.ptr.buffer,
1478 (u64)tr->data.ptr.offsets,
1479 (u64)tr->data_size, (u64)tr->offsets_size);
1481 if (!reply && !(tr->flags & TF_ONE_WAY))
1482 t->from = thread;
1483 else
1484 t->from = NULL;
1485 t->sender_euid = task_euid(proc->tsk);
1486 t->to_proc = target_proc;
1487 t->to_thread = target_thread;
1488 t->code = tr->code;
1489 t->flags = tr->flags;
1490 t->priority = task_nice(current);
1492 trace_binder_transaction(reply, t, target_node);
1494 t->buffer = binder_alloc_buf(target_proc, tr->data_size,
1495 tr->offsets_size, !reply && (t->flags & TF_ONE_WAY));
1496 if (t->buffer == NULL) {
1497 return_error = BR_FAILED_REPLY;
1498 goto err_binder_alloc_buf_failed;
1500 t->buffer->allow_user_free = 0;
1501 t->buffer->debug_id = t->debug_id;
1502 t->buffer->transaction = t;
1503 t->buffer->target_node = target_node;
1504 trace_binder_transaction_alloc_buf(t->buffer);
1505 if (target_node)
1506 binder_inc_node(target_node, 1, 0, NULL);
1508 offp = (binder_size_t *)(t->buffer->data +
1509 ALIGN(tr->data_size, sizeof(void *)));
1511 if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)
1512 tr->data.ptr.buffer, tr->data_size)) {
1513 binder_user_error("%d:%d got transaction with invalid data ptr\n",
1514 proc->pid, thread->pid);
1515 return_error = BR_FAILED_REPLY;
1516 goto err_copy_data_failed;
1518 if (copy_from_user(offp, (const void __user *)(uintptr_t)
1519 tr->data.ptr.offsets, tr->offsets_size)) {
1520 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
1521 proc->pid, thread->pid);
1522 return_error = BR_FAILED_REPLY;
1523 goto err_copy_data_failed;
1525 if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
1526 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
1527 proc->pid, thread->pid, (u64)tr->offsets_size);
1528 return_error = BR_FAILED_REPLY;
1529 goto err_bad_offset;
1531 off_end = (void *)offp + tr->offsets_size;
1532 for (; offp < off_end; offp++) {
1533 struct flat_binder_object *fp;
1535 if (*offp > t->buffer->data_size - sizeof(*fp) ||
1536 t->buffer->data_size < sizeof(*fp) ||
1537 !IS_ALIGNED(*offp, sizeof(u32))) {
1538 binder_user_error("%d:%d got transaction with invalid offset, %lld\n",
1539 proc->pid, thread->pid, (u64)*offp);
1540 return_error = BR_FAILED_REPLY;
1541 goto err_bad_offset;
1543 fp = (struct flat_binder_object *)(t->buffer->data + *offp);
1544 switch (fp->type) {
1545 case BINDER_TYPE_BINDER:
1546 case BINDER_TYPE_WEAK_BINDER: {
1547 struct binder_ref *ref;
1548 struct binder_node *node = binder_get_node(proc, fp->binder);
1550 if (node == NULL) {
1551 node = binder_new_node(proc, fp->binder, fp->cookie);
1552 if (node == NULL) {
1553 return_error = BR_FAILED_REPLY;
1554 goto err_binder_new_node_failed;
1556 node->min_priority = fp->flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
1557 node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
1559 if (fp->cookie != node->cookie) {
1560 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
1561 proc->pid, thread->pid,
1562 (u64)fp->binder, node->debug_id,
1563 (u64)fp->cookie, (u64)node->cookie);
1564 return_error = BR_FAILED_REPLY;
1565 goto err_binder_get_ref_for_node_failed;
1567 if (security_binder_transfer_binder(proc->tsk,
1568 target_proc->tsk)) {
1569 return_error = BR_FAILED_REPLY;
1570 goto err_binder_get_ref_for_node_failed;
1572 ref = binder_get_ref_for_node(target_proc, node);
1573 if (ref == NULL) {
1574 return_error = BR_FAILED_REPLY;
1575 goto err_binder_get_ref_for_node_failed;
1577 if (fp->type == BINDER_TYPE_BINDER)
1578 fp->type = BINDER_TYPE_HANDLE;
1579 else
1580 fp->type = BINDER_TYPE_WEAK_HANDLE;
1581 fp->binder = 0;
1582 fp->handle = ref->desc;
1583 fp->cookie = 0;
1584 binder_inc_ref(ref, fp->type == BINDER_TYPE_HANDLE,
1585 &thread->todo);
1587 trace_binder_transaction_node_to_ref(t, node, ref);
1588 binder_debug(BINDER_DEBUG_TRANSACTION,
1589 " node %d u%016llx -> ref %d desc %d\n",
1590 node->debug_id, (u64)node->ptr,
1591 ref->debug_id, ref->desc);
1592 } break;
1593 case BINDER_TYPE_HANDLE:
1594 case BINDER_TYPE_WEAK_HANDLE: {
1595 struct binder_ref *ref;
1597 ref = binder_get_ref(proc, fp->handle,
1598 fp->type == BINDER_TYPE_HANDLE);
1600 if (ref == NULL) {
1601 binder_user_error("%d:%d got transaction with invalid handle, %d\n",
1602 proc->pid,
1603 thread->pid, fp->handle);
1604 return_error = BR_FAILED_REPLY;
1605 goto err_binder_get_ref_failed;
1607 if (security_binder_transfer_binder(proc->tsk,
1608 target_proc->tsk)) {
1609 return_error = BR_FAILED_REPLY;
1610 goto err_binder_get_ref_failed;
1612 if (ref->node->proc == target_proc) {
1613 if (fp->type == BINDER_TYPE_HANDLE)
1614 fp->type = BINDER_TYPE_BINDER;
1615 else
1616 fp->type = BINDER_TYPE_WEAK_BINDER;
1617 fp->binder = ref->node->ptr;
1618 fp->cookie = ref->node->cookie;
1619 binder_inc_node(ref->node, fp->type == BINDER_TYPE_BINDER, 0, NULL);
1620 trace_binder_transaction_ref_to_node(t, ref);
1621 binder_debug(BINDER_DEBUG_TRANSACTION,
1622 " ref %d desc %d -> node %d u%016llx\n",
1623 ref->debug_id, ref->desc, ref->node->debug_id,
1624 (u64)ref->node->ptr);
1625 } else {
1626 struct binder_ref *new_ref;
1628 new_ref = binder_get_ref_for_node(target_proc, ref->node);
1629 if (new_ref == NULL) {
1630 return_error = BR_FAILED_REPLY;
1631 goto err_binder_get_ref_for_node_failed;
1633 fp->binder = 0;
1634 fp->handle = new_ref->desc;
1635 fp->cookie = 0;
1636 binder_inc_ref(new_ref, fp->type == BINDER_TYPE_HANDLE, NULL);
1637 trace_binder_transaction_ref_to_ref(t, ref,
1638 new_ref);
1639 binder_debug(BINDER_DEBUG_TRANSACTION,
1640 " ref %d desc %d -> ref %d desc %d (node %d)\n",
1641 ref->debug_id, ref->desc, new_ref->debug_id,
1642 new_ref->desc, ref->node->debug_id);
1644 } break;
1646 case BINDER_TYPE_FD: {
1647 int target_fd;
1648 struct file *file;
1650 if (reply) {
1651 if (!(in_reply_to->flags & TF_ACCEPT_FDS)) {
1652 binder_user_error("%d:%d got reply with fd, %d, but target does not allow fds\n",
1653 proc->pid, thread->pid, fp->handle);
1654 return_error = BR_FAILED_REPLY;
1655 goto err_fd_not_allowed;
1657 } else if (!target_node->accept_fds) {
1658 binder_user_error("%d:%d got transaction with fd, %d, but target does not allow fds\n",
1659 proc->pid, thread->pid, fp->handle);
1660 return_error = BR_FAILED_REPLY;
1661 goto err_fd_not_allowed;
1664 file = fget(fp->handle);
1665 if (file == NULL) {
1666 binder_user_error("%d:%d got transaction with invalid fd, %d\n",
1667 proc->pid, thread->pid, fp->handle);
1668 return_error = BR_FAILED_REPLY;
1669 goto err_fget_failed;
1671 if (security_binder_transfer_file(proc->tsk,
1672 target_proc->tsk,
1673 file) < 0) {
1674 fput(file);
1675 return_error = BR_FAILED_REPLY;
1676 goto err_get_unused_fd_failed;
1678 target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC);
1679 if (target_fd < 0) {
1680 fput(file);
1681 return_error = BR_FAILED_REPLY;
1682 goto err_get_unused_fd_failed;
1684 task_fd_install(target_proc, target_fd, file);
1685 trace_binder_transaction_fd(t, fp->handle, target_fd);
1686 binder_debug(BINDER_DEBUG_TRANSACTION,
1687 " fd %d -> %d\n", fp->handle, target_fd);
1688 /* TODO: fput? */
1689 fp->binder = 0;
1690 fp->handle = target_fd;
1691 } break;
1693 default:
1694 binder_user_error("%d:%d got transaction with invalid object type, %x\n",
1695 proc->pid, thread->pid, fp->type);
1696 return_error = BR_FAILED_REPLY;
1697 goto err_bad_object_type;
1700 if (reply) {
1701 BUG_ON(t->buffer->async_transaction != 0);
1702 binder_pop_transaction(target_thread, in_reply_to);
1703 } else if (!(t->flags & TF_ONE_WAY)) {
1704 BUG_ON(t->buffer->async_transaction != 0);
1705 t->need_reply = 1;
1706 t->from_parent = thread->transaction_stack;
1707 thread->transaction_stack = t;
1708 } else {
1709 BUG_ON(target_node == NULL);
1710 BUG_ON(t->buffer->async_transaction != 1);
1711 if (target_node->has_async_transaction) {
1712 target_list = &target_node->async_todo;
1713 target_wait = NULL;
1714 } else
1715 target_node->has_async_transaction = 1;
1717 t->work.type = BINDER_WORK_TRANSACTION;
1718 list_add_tail(&t->work.entry, target_list);
1719 tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
1720 list_add_tail(&tcomplete->entry, &thread->todo);
1721 if (target_wait) {
1722 if (reply || !(t->flags & TF_ONE_WAY))
1723 wake_up_interruptible_sync(target_wait);
1724 else
1725 wake_up_interruptible(target_wait);
1727 return;
1729 err_get_unused_fd_failed:
1730 err_fget_failed:
1731 err_fd_not_allowed:
1732 err_binder_get_ref_for_node_failed:
1733 err_binder_get_ref_failed:
1734 err_binder_new_node_failed:
1735 err_bad_object_type:
1736 err_bad_offset:
1737 err_copy_data_failed:
1738 trace_binder_transaction_failed_buffer_release(t->buffer);
1739 binder_transaction_buffer_release(target_proc, t->buffer, offp);
1740 t->buffer->transaction = NULL;
1741 binder_free_buf(target_proc, t->buffer);
1742 err_binder_alloc_buf_failed:
1743 kfree(tcomplete);
1744 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
1745 err_alloc_tcomplete_failed:
1746 kfree(t);
1747 binder_stats_deleted(BINDER_STAT_TRANSACTION);
1748 err_alloc_t_failed:
1749 err_bad_call_stack:
1750 err_empty_call_stack:
1751 err_dead_binder:
1752 err_invalid_target_handle:
1753 err_no_context_mgr_node:
1754 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1755 "%d:%d transaction failed %d, size %lld-%lld\n",
1756 proc->pid, thread->pid, return_error,
1757 (u64)tr->data_size, (u64)tr->offsets_size);
1760 struct binder_transaction_log_entry *fe;
1762 fe = binder_transaction_log_add(&binder_transaction_log_failed);
1763 *fe = *e;
1766 BUG_ON(thread->return_error != BR_OK);
1767 if (in_reply_to) {
1768 thread->return_error = BR_TRANSACTION_COMPLETE;
1769 binder_send_failed_reply(in_reply_to, return_error);
1770 } else
1771 thread->return_error = return_error;
1774 static int binder_thread_write(struct binder_proc *proc,
1775 struct binder_thread *thread,
1776 binder_uintptr_t binder_buffer, size_t size,
1777 binder_size_t *consumed)
1779 uint32_t cmd;
1780 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
1781 void __user *ptr = buffer + *consumed;
1782 void __user *end = buffer + size;
1784 while (ptr < end && thread->return_error == BR_OK) {
1785 if (get_user(cmd, (uint32_t __user *)ptr))
1786 return -EFAULT;
1787 ptr += sizeof(uint32_t);
1788 trace_binder_command(cmd);
1789 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
1790 binder_stats.bc[_IOC_NR(cmd)]++;
1791 proc->stats.bc[_IOC_NR(cmd)]++;
1792 thread->stats.bc[_IOC_NR(cmd)]++;
1794 switch (cmd) {
1795 case BC_INCREFS:
1796 case BC_ACQUIRE:
1797 case BC_RELEASE:
1798 case BC_DECREFS: {
1799 uint32_t target;
1800 struct binder_ref *ref;
1801 const char *debug_string;
1803 if (get_user(target, (uint32_t __user *)ptr))
1804 return -EFAULT;
1805 ptr += sizeof(uint32_t);
1806 if (target == 0 && binder_context_mgr_node &&
1807 (cmd == BC_INCREFS || cmd == BC_ACQUIRE)) {
1808 ref = binder_get_ref_for_node(proc,
1809 binder_context_mgr_node);
1810 if (ref->desc != target) {
1811 binder_user_error("%d:%d tried to acquire reference to desc 0, got %d instead\n",
1812 proc->pid, thread->pid,
1813 ref->desc);
1815 } else
1816 ref = binder_get_ref(proc, target,
1817 cmd == BC_ACQUIRE ||
1818 cmd == BC_RELEASE);
1819 if (ref == NULL) {
1820 binder_user_error("%d:%d refcount change on invalid ref %d\n",
1821 proc->pid, thread->pid, target);
1822 break;
1824 switch (cmd) {
1825 case BC_INCREFS:
1826 debug_string = "IncRefs";
1827 binder_inc_ref(ref, 0, NULL);
1828 break;
1829 case BC_ACQUIRE:
1830 debug_string = "Acquire";
1831 binder_inc_ref(ref, 1, NULL);
1832 break;
1833 case BC_RELEASE:
1834 debug_string = "Release";
1835 binder_dec_ref(ref, 1);
1836 break;
1837 case BC_DECREFS:
1838 default:
1839 debug_string = "DecRefs";
1840 binder_dec_ref(ref, 0);
1841 break;
1843 binder_debug(BINDER_DEBUG_USER_REFS,
1844 "%d:%d %s ref %d desc %d s %d w %d for node %d\n",
1845 proc->pid, thread->pid, debug_string, ref->debug_id,
1846 ref->desc, ref->strong, ref->weak, ref->node->debug_id);
1847 break;
1849 case BC_INCREFS_DONE:
1850 case BC_ACQUIRE_DONE: {
1851 binder_uintptr_t node_ptr;
1852 binder_uintptr_t cookie;
1853 struct binder_node *node;
1855 if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
1856 return -EFAULT;
1857 ptr += sizeof(binder_uintptr_t);
1858 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
1859 return -EFAULT;
1860 ptr += sizeof(binder_uintptr_t);
1861 node = binder_get_node(proc, node_ptr);
1862 if (node == NULL) {
1863 binder_user_error("%d:%d %s u%016llx no match\n",
1864 proc->pid, thread->pid,
1865 cmd == BC_INCREFS_DONE ?
1866 "BC_INCREFS_DONE" :
1867 "BC_ACQUIRE_DONE",
1868 (u64)node_ptr);
1869 break;
1871 if (cookie != node->cookie) {
1872 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
1873 proc->pid, thread->pid,
1874 cmd == BC_INCREFS_DONE ?
1875 "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
1876 (u64)node_ptr, node->debug_id,
1877 (u64)cookie, (u64)node->cookie);
1878 break;
1880 if (cmd == BC_ACQUIRE_DONE) {
1881 if (node->pending_strong_ref == 0) {
1882 binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
1883 proc->pid, thread->pid,
1884 node->debug_id);
1885 break;
1887 node->pending_strong_ref = 0;
1888 } else {
1889 if (node->pending_weak_ref == 0) {
1890 binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
1891 proc->pid, thread->pid,
1892 node->debug_id);
1893 break;
1895 node->pending_weak_ref = 0;
1897 binder_dec_node(node, cmd == BC_ACQUIRE_DONE, 0);
1898 binder_debug(BINDER_DEBUG_USER_REFS,
1899 "%d:%d %s node %d ls %d lw %d\n",
1900 proc->pid, thread->pid,
1901 cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
1902 node->debug_id, node->local_strong_refs, node->local_weak_refs);
1903 break;
1905 case BC_ATTEMPT_ACQUIRE:
1906 pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
1907 return -EINVAL;
1908 case BC_ACQUIRE_RESULT:
1909 pr_err("BC_ACQUIRE_RESULT not supported\n");
1910 return -EINVAL;
1912 case BC_FREE_BUFFER: {
1913 binder_uintptr_t data_ptr;
1914 struct binder_buffer *buffer;
1916 if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
1917 return -EFAULT;
1918 ptr += sizeof(binder_uintptr_t);
1920 buffer = binder_buffer_lookup(proc, data_ptr);
1921 if (buffer == NULL) {
1922 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx no match\n",
1923 proc->pid, thread->pid, (u64)data_ptr);
1924 break;
1926 if (!buffer->allow_user_free) {
1927 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx matched unreturned buffer\n",
1928 proc->pid, thread->pid, (u64)data_ptr);
1929 break;
1931 binder_debug(BINDER_DEBUG_FREE_BUFFER,
1932 "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
1933 proc->pid, thread->pid, (u64)data_ptr,
1934 buffer->debug_id,
1935 buffer->transaction ? "active" : "finished");
1937 if (buffer->transaction) {
1938 buffer->transaction->buffer = NULL;
1939 buffer->transaction = NULL;
1941 if (buffer->async_transaction && buffer->target_node) {
1942 BUG_ON(!buffer->target_node->has_async_transaction);
1943 if (list_empty(&buffer->target_node->async_todo))
1944 buffer->target_node->has_async_transaction = 0;
1945 else
1946 list_move_tail(buffer->target_node->async_todo.next, &thread->todo);
1948 trace_binder_transaction_buffer_release(buffer);
1949 binder_transaction_buffer_release(proc, buffer, NULL);
1950 binder_free_buf(proc, buffer);
1951 break;
1954 case BC_TRANSACTION:
1955 case BC_REPLY: {
1956 struct binder_transaction_data tr;
1958 if (copy_from_user(&tr, ptr, sizeof(tr)))
1959 return -EFAULT;
1960 ptr += sizeof(tr);
1961 binder_transaction(proc, thread, &tr, cmd == BC_REPLY);
1962 break;
1965 case BC_REGISTER_LOOPER:
1966 binder_debug(BINDER_DEBUG_THREADS,
1967 "%d:%d BC_REGISTER_LOOPER\n",
1968 proc->pid, thread->pid);
1969 if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
1970 thread->looper |= BINDER_LOOPER_STATE_INVALID;
1971 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
1972 proc->pid, thread->pid);
1973 } else if (proc->requested_threads == 0) {
1974 thread->looper |= BINDER_LOOPER_STATE_INVALID;
1975 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
1976 proc->pid, thread->pid);
1977 } else {
1978 proc->requested_threads--;
1979 proc->requested_threads_started++;
1981 thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
1982 break;
1983 case BC_ENTER_LOOPER:
1984 binder_debug(BINDER_DEBUG_THREADS,
1985 "%d:%d BC_ENTER_LOOPER\n",
1986 proc->pid, thread->pid);
1987 if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
1988 thread->looper |= BINDER_LOOPER_STATE_INVALID;
1989 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
1990 proc->pid, thread->pid);
1992 thread->looper |= BINDER_LOOPER_STATE_ENTERED;
1993 break;
1994 case BC_EXIT_LOOPER:
1995 binder_debug(BINDER_DEBUG_THREADS,
1996 "%d:%d BC_EXIT_LOOPER\n",
1997 proc->pid, thread->pid);
1998 thread->looper |= BINDER_LOOPER_STATE_EXITED;
1999 break;
2001 case BC_REQUEST_DEATH_NOTIFICATION:
2002 case BC_CLEAR_DEATH_NOTIFICATION: {
2003 uint32_t target;
2004 binder_uintptr_t cookie;
2005 struct binder_ref *ref;
2006 struct binder_ref_death *death;
2008 if (get_user(target, (uint32_t __user *)ptr))
2009 return -EFAULT;
2010 ptr += sizeof(uint32_t);
2011 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
2012 return -EFAULT;
2013 ptr += sizeof(binder_uintptr_t);
2014 ref = binder_get_ref(proc, target, false);
2015 if (ref == NULL) {
2016 binder_user_error("%d:%d %s invalid ref %d\n",
2017 proc->pid, thread->pid,
2018 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
2019 "BC_REQUEST_DEATH_NOTIFICATION" :
2020 "BC_CLEAR_DEATH_NOTIFICATION",
2021 target);
2022 break;
2025 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
2026 "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
2027 proc->pid, thread->pid,
2028 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
2029 "BC_REQUEST_DEATH_NOTIFICATION" :
2030 "BC_CLEAR_DEATH_NOTIFICATION",
2031 (u64)cookie, ref->debug_id, ref->desc,
2032 ref->strong, ref->weak, ref->node->debug_id);
2034 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
2035 if (ref->death) {
2036 binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
2037 proc->pid, thread->pid);
2038 break;
2040 death = kzalloc(sizeof(*death), GFP_KERNEL);
2041 if (death == NULL) {
2042 thread->return_error = BR_ERROR;
2043 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
2044 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
2045 proc->pid, thread->pid);
2046 break;
2048 binder_stats_created(BINDER_STAT_DEATH);
2049 INIT_LIST_HEAD(&death->work.entry);
2050 death->cookie = cookie;
2051 ref->death = death;
2052 if (ref->node->proc == NULL) {
2053 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
2054 if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
2055 list_add_tail(&ref->death->work.entry, &thread->todo);
2056 } else {
2057 list_add_tail(&ref->death->work.entry, &proc->todo);
2058 wake_up_interruptible(&proc->wait);
2061 } else {
2062 if (ref->death == NULL) {
2063 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
2064 proc->pid, thread->pid);
2065 break;
2067 death = ref->death;
2068 if (death->cookie != cookie) {
2069 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
2070 proc->pid, thread->pid,
2071 (u64)death->cookie,
2072 (u64)cookie);
2073 break;
2075 ref->death = NULL;
2076 if (list_empty(&death->work.entry)) {
2077 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
2078 if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
2079 list_add_tail(&death->work.entry, &thread->todo);
2080 } else {
2081 list_add_tail(&death->work.entry, &proc->todo);
2082 wake_up_interruptible(&proc->wait);
2084 } else {
2085 BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
2086 death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
2089 } break;
2090 case BC_DEAD_BINDER_DONE: {
2091 struct binder_work *w;
2092 binder_uintptr_t cookie;
2093 struct binder_ref_death *death = NULL;
2095 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
2096 return -EFAULT;
2098 ptr += sizeof(cookie);
2099 list_for_each_entry(w, &proc->delivered_death, entry) {
2100 struct binder_ref_death *tmp_death = container_of(w, struct binder_ref_death, work);
2102 if (tmp_death->cookie == cookie) {
2103 death = tmp_death;
2104 break;
2107 binder_debug(BINDER_DEBUG_DEAD_BINDER,
2108 "%d:%d BC_DEAD_BINDER_DONE %016llx found %p\n",
2109 proc->pid, thread->pid, (u64)cookie,
2110 death);
2111 if (death == NULL) {
2112 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
2113 proc->pid, thread->pid, (u64)cookie);
2114 break;
2117 list_del_init(&death->work.entry);
2118 if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
2119 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
2120 if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
2121 list_add_tail(&death->work.entry, &thread->todo);
2122 } else {
2123 list_add_tail(&death->work.entry, &proc->todo);
2124 wake_up_interruptible(&proc->wait);
2127 } break;
2129 default:
2130 pr_err("%d:%d unknown command %d\n",
2131 proc->pid, thread->pid, cmd);
2132 return -EINVAL;
2134 *consumed = ptr - buffer;
2136 return 0;
2139 static void binder_stat_br(struct binder_proc *proc,
2140 struct binder_thread *thread, uint32_t cmd)
2142 trace_binder_return(cmd);
2143 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
2144 binder_stats.br[_IOC_NR(cmd)]++;
2145 proc->stats.br[_IOC_NR(cmd)]++;
2146 thread->stats.br[_IOC_NR(cmd)]++;
2150 static int binder_has_proc_work(struct binder_proc *proc,
2151 struct binder_thread *thread)
2153 return !list_empty(&proc->todo) ||
2154 (thread->looper & BINDER_LOOPER_STATE_NEED_RETURN);
2157 static int binder_has_thread_work(struct binder_thread *thread)
2159 return !list_empty(&thread->todo) || thread->return_error != BR_OK ||
2160 (thread->looper & BINDER_LOOPER_STATE_NEED_RETURN);
2163 static int binder_thread_read(struct binder_proc *proc,
2164 struct binder_thread *thread,
2165 binder_uintptr_t binder_buffer, size_t size,
2166 binder_size_t *consumed, int non_block)
2168 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
2169 void __user *ptr = buffer + *consumed;
2170 void __user *end = buffer + size;
2172 int ret = 0;
2173 int wait_for_proc_work;
2175 if (*consumed == 0) {
2176 if (put_user(BR_NOOP, (uint32_t __user *)ptr))
2177 return -EFAULT;
2178 ptr += sizeof(uint32_t);
2181 retry:
2182 wait_for_proc_work = thread->transaction_stack == NULL &&
2183 list_empty(&thread->todo);
2185 if (thread->return_error != BR_OK && ptr < end) {
2186 if (thread->return_error2 != BR_OK) {
2187 if (put_user(thread->return_error2, (uint32_t __user *)ptr))
2188 return -EFAULT;
2189 ptr += sizeof(uint32_t);
2190 binder_stat_br(proc, thread, thread->return_error2);
2191 if (ptr == end)
2192 goto done;
2193 thread->return_error2 = BR_OK;
2195 if (put_user(thread->return_error, (uint32_t __user *)ptr))
2196 return -EFAULT;
2197 ptr += sizeof(uint32_t);
2198 binder_stat_br(proc, thread, thread->return_error);
2199 thread->return_error = BR_OK;
2200 goto done;
2204 thread->looper |= BINDER_LOOPER_STATE_WAITING;
2205 if (wait_for_proc_work)
2206 proc->ready_threads++;
2208 binder_unlock(__func__);
2210 trace_binder_wait_for_work(wait_for_proc_work,
2211 !!thread->transaction_stack,
2212 !list_empty(&thread->todo));
2213 if (wait_for_proc_work) {
2214 if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
2215 BINDER_LOOPER_STATE_ENTERED))) {
2216 binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
2217 proc->pid, thread->pid, thread->looper);
2218 wait_event_interruptible(binder_user_error_wait,
2219 binder_stop_on_user_error < 2);
2221 binder_set_nice(proc->default_priority);
2222 if (non_block) {
2223 if (!binder_has_proc_work(proc, thread))
2224 ret = -EAGAIN;
2225 } else
2226 ret = wait_event_freezable_exclusive(proc->wait, binder_has_proc_work(proc, thread));
2227 } else {
2228 if (non_block) {
2229 if (!binder_has_thread_work(thread))
2230 ret = -EAGAIN;
2231 } else
2232 ret = wait_event_freezable(thread->wait, binder_has_thread_work(thread));
2235 binder_lock(__func__);
2237 if (wait_for_proc_work)
2238 proc->ready_threads--;
2239 thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
2241 if (ret)
2242 return ret;
2244 while (1) {
2245 uint32_t cmd;
2246 struct binder_transaction_data tr;
2247 struct binder_work *w;
2248 struct binder_transaction *t = NULL;
2250 if (!list_empty(&thread->todo)) {
2251 w = list_first_entry(&thread->todo, struct binder_work,
2252 entry);
2253 } else if (!list_empty(&proc->todo) && wait_for_proc_work) {
2254 w = list_first_entry(&proc->todo, struct binder_work,
2255 entry);
2256 } else {
2257 /* no data added */
2258 if (ptr - buffer == 4 &&
2259 !(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN))
2260 goto retry;
2261 break;
2264 if (end - ptr < sizeof(tr) + 4)
2265 break;
2267 switch (w->type) {
2268 case BINDER_WORK_TRANSACTION: {
2269 t = container_of(w, struct binder_transaction, work);
2270 } break;
2271 case BINDER_WORK_TRANSACTION_COMPLETE: {
2272 cmd = BR_TRANSACTION_COMPLETE;
2273 if (put_user(cmd, (uint32_t __user *)ptr))
2274 return -EFAULT;
2275 ptr += sizeof(uint32_t);
2277 binder_stat_br(proc, thread, cmd);
2278 binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
2279 "%d:%d BR_TRANSACTION_COMPLETE\n",
2280 proc->pid, thread->pid);
2282 list_del(&w->entry);
2283 kfree(w);
2284 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
2285 } break;
2286 case BINDER_WORK_NODE: {
2287 struct binder_node *node = container_of(w, struct binder_node, work);
2288 uint32_t cmd = BR_NOOP;
2289 const char *cmd_name;
2290 int strong = node->internal_strong_refs || node->local_strong_refs;
2291 int weak = !hlist_empty(&node->refs) || node->local_weak_refs || strong;
2293 if (weak && !node->has_weak_ref) {
2294 cmd = BR_INCREFS;
2295 cmd_name = "BR_INCREFS";
2296 node->has_weak_ref = 1;
2297 node->pending_weak_ref = 1;
2298 node->local_weak_refs++;
2299 } else if (strong && !node->has_strong_ref) {
2300 cmd = BR_ACQUIRE;
2301 cmd_name = "BR_ACQUIRE";
2302 node->has_strong_ref = 1;
2303 node->pending_strong_ref = 1;
2304 node->local_strong_refs++;
2305 } else if (!strong && node->has_strong_ref) {
2306 cmd = BR_RELEASE;
2307 cmd_name = "BR_RELEASE";
2308 node->has_strong_ref = 0;
2309 } else if (!weak && node->has_weak_ref) {
2310 cmd = BR_DECREFS;
2311 cmd_name = "BR_DECREFS";
2312 node->has_weak_ref = 0;
2314 if (cmd != BR_NOOP) {
2315 if (put_user(cmd, (uint32_t __user *)ptr))
2316 return -EFAULT;
2317 ptr += sizeof(uint32_t);
2318 if (put_user(node->ptr,
2319 (binder_uintptr_t __user *)ptr))
2320 return -EFAULT;
2321 ptr += sizeof(binder_uintptr_t);
2322 if (put_user(node->cookie,
2323 (binder_uintptr_t __user *)ptr))
2324 return -EFAULT;
2325 ptr += sizeof(binder_uintptr_t);
2327 binder_stat_br(proc, thread, cmd);
2328 binder_debug(BINDER_DEBUG_USER_REFS,
2329 "%d:%d %s %d u%016llx c%016llx\n",
2330 proc->pid, thread->pid, cmd_name,
2331 node->debug_id,
2332 (u64)node->ptr, (u64)node->cookie);
2333 } else {
2334 list_del_init(&w->entry);
2335 if (!weak && !strong) {
2336 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
2337 "%d:%d node %d u%016llx c%016llx deleted\n",
2338 proc->pid, thread->pid,
2339 node->debug_id,
2340 (u64)node->ptr,
2341 (u64)node->cookie);
2342 rb_erase(&node->rb_node, &proc->nodes);
2343 kfree(node);
2344 binder_stats_deleted(BINDER_STAT_NODE);
2345 } else {
2346 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
2347 "%d:%d node %d u%016llx c%016llx state unchanged\n",
2348 proc->pid, thread->pid,
2349 node->debug_id,
2350 (u64)node->ptr,
2351 (u64)node->cookie);
2354 } break;
2355 case BINDER_WORK_DEAD_BINDER:
2356 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
2357 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
2358 struct binder_ref_death *death;
2359 uint32_t cmd;
2361 death = container_of(w, struct binder_ref_death, work);
2362 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
2363 cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
2364 else
2365 cmd = BR_DEAD_BINDER;
2366 if (put_user(cmd, (uint32_t __user *)ptr))
2367 return -EFAULT;
2368 ptr += sizeof(uint32_t);
2369 if (put_user(death->cookie,
2370 (binder_uintptr_t __user *)ptr))
2371 return -EFAULT;
2372 ptr += sizeof(binder_uintptr_t);
2373 binder_stat_br(proc, thread, cmd);
2374 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
2375 "%d:%d %s %016llx\n",
2376 proc->pid, thread->pid,
2377 cmd == BR_DEAD_BINDER ?
2378 "BR_DEAD_BINDER" :
2379 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
2380 (u64)death->cookie);
2382 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
2383 list_del(&w->entry);
2384 kfree(death);
2385 binder_stats_deleted(BINDER_STAT_DEATH);
2386 } else
2387 list_move(&w->entry, &proc->delivered_death);
2388 if (cmd == BR_DEAD_BINDER)
2389 goto done; /* DEAD_BINDER notifications can cause transactions */
2390 } break;
2393 if (!t)
2394 continue;
2396 BUG_ON(t->buffer == NULL);
2397 if (t->buffer->target_node) {
2398 struct binder_node *target_node = t->buffer->target_node;
2400 tr.target.ptr = target_node->ptr;
2401 tr.cookie = target_node->cookie;
2402 t->saved_priority = task_nice(current);
2403 if (t->priority < target_node->min_priority &&
2404 !(t->flags & TF_ONE_WAY))
2405 binder_set_nice(t->priority);
2406 else if (!(t->flags & TF_ONE_WAY) ||
2407 t->saved_priority > target_node->min_priority)
2408 binder_set_nice(target_node->min_priority);
2409 cmd = BR_TRANSACTION;
2410 } else {
2411 tr.target.ptr = 0;
2412 tr.cookie = 0;
2413 cmd = BR_REPLY;
2415 tr.code = t->code;
2416 tr.flags = t->flags;
2417 tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid);
2419 if (t->from) {
2420 struct task_struct *sender = t->from->proc->tsk;
2422 tr.sender_pid = task_tgid_nr_ns(sender,
2423 task_active_pid_ns(current));
2424 } else {
2425 tr.sender_pid = 0;
2428 tr.data_size = t->buffer->data_size;
2429 tr.offsets_size = t->buffer->offsets_size;
2430 tr.data.ptr.buffer = (binder_uintptr_t)(
2431 (uintptr_t)t->buffer->data +
2432 proc->user_buffer_offset);
2433 tr.data.ptr.offsets = tr.data.ptr.buffer +
2434 ALIGN(t->buffer->data_size,
2435 sizeof(void *));
2437 if (put_user(cmd, (uint32_t __user *)ptr))
2438 return -EFAULT;
2439 ptr += sizeof(uint32_t);
2440 if (copy_to_user(ptr, &tr, sizeof(tr)))
2441 return -EFAULT;
2442 ptr += sizeof(tr);
2444 trace_binder_transaction_received(t);
2445 binder_stat_br(proc, thread, cmd);
2446 binder_debug(BINDER_DEBUG_TRANSACTION,
2447 "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
2448 proc->pid, thread->pid,
2449 (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
2450 "BR_REPLY",
2451 t->debug_id, t->from ? t->from->proc->pid : 0,
2452 t->from ? t->from->pid : 0, cmd,
2453 t->buffer->data_size, t->buffer->offsets_size,
2454 (u64)tr.data.ptr.buffer, (u64)tr.data.ptr.offsets);
2456 list_del(&t->work.entry);
2457 t->buffer->allow_user_free = 1;
2458 if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
2459 t->to_parent = thread->transaction_stack;
2460 t->to_thread = thread;
2461 thread->transaction_stack = t;
2462 } else {
2463 t->buffer->transaction = NULL;
2464 kfree(t);
2465 binder_stats_deleted(BINDER_STAT_TRANSACTION);
2467 break;
2470 done:
2472 *consumed = ptr - buffer;
2473 if (proc->requested_threads + proc->ready_threads == 0 &&
2474 proc->requested_threads_started < proc->max_threads &&
2475 (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
2476 BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
2477 /*spawn a new thread if we leave this out */) {
2478 proc->requested_threads++;
2479 binder_debug(BINDER_DEBUG_THREADS,
2480 "%d:%d BR_SPAWN_LOOPER\n",
2481 proc->pid, thread->pid);
2482 if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
2483 return -EFAULT;
2484 binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
2486 return 0;
2489 static void binder_release_work(struct list_head *list)
2491 struct binder_work *w;
2493 while (!list_empty(list)) {
2494 w = list_first_entry(list, struct binder_work, entry);
2495 list_del_init(&w->entry);
2496 switch (w->type) {
2497 case BINDER_WORK_TRANSACTION: {
2498 struct binder_transaction *t;
2500 t = container_of(w, struct binder_transaction, work);
2501 if (t->buffer->target_node &&
2502 !(t->flags & TF_ONE_WAY)) {
2503 binder_send_failed_reply(t, BR_DEAD_REPLY);
2504 } else {
2505 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
2506 "undelivered transaction %d\n",
2507 t->debug_id);
2508 t->buffer->transaction = NULL;
2509 kfree(t);
2510 binder_stats_deleted(BINDER_STAT_TRANSACTION);
2512 } break;
2513 case BINDER_WORK_TRANSACTION_COMPLETE: {
2514 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
2515 "undelivered TRANSACTION_COMPLETE\n");
2516 kfree(w);
2517 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
2518 } break;
2519 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
2520 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
2521 struct binder_ref_death *death;
2523 death = container_of(w, struct binder_ref_death, work);
2524 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
2525 "undelivered death notification, %016llx\n",
2526 (u64)death->cookie);
2527 kfree(death);
2528 binder_stats_deleted(BINDER_STAT_DEATH);
2529 } break;
2530 default:
2531 pr_err("unexpected work type, %d, not freed\n",
2532 w->type);
2533 break;
2539 static struct binder_thread *binder_get_thread(struct binder_proc *proc)
2541 struct binder_thread *thread = NULL;
2542 struct rb_node *parent = NULL;
2543 struct rb_node **p = &proc->threads.rb_node;
2545 while (*p) {
2546 parent = *p;
2547 thread = rb_entry(parent, struct binder_thread, rb_node);
2549 if (current->pid < thread->pid)
2550 p = &(*p)->rb_left;
2551 else if (current->pid > thread->pid)
2552 p = &(*p)->rb_right;
2553 else
2554 break;
2556 if (*p == NULL) {
2557 thread = kzalloc(sizeof(*thread), GFP_KERNEL);
2558 if (thread == NULL)
2559 return NULL;
2560 binder_stats_created(BINDER_STAT_THREAD);
2561 thread->proc = proc;
2562 thread->pid = current->pid;
2563 init_waitqueue_head(&thread->wait);
2564 INIT_LIST_HEAD(&thread->todo);
2565 rb_link_node(&thread->rb_node, parent, p);
2566 rb_insert_color(&thread->rb_node, &proc->threads);
2567 thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN;
2568 thread->return_error = BR_OK;
2569 thread->return_error2 = BR_OK;
2571 return thread;
2574 static int binder_free_thread(struct binder_proc *proc,
2575 struct binder_thread *thread)
2577 struct binder_transaction *t;
2578 struct binder_transaction *send_reply = NULL;
2579 int active_transactions = 0;
2581 rb_erase(&thread->rb_node, &proc->threads);
2582 t = thread->transaction_stack;
2583 if (t && t->to_thread == thread)
2584 send_reply = t;
2585 while (t) {
2586 active_transactions++;
2587 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
2588 "release %d:%d transaction %d %s, still active\n",
2589 proc->pid, thread->pid,
2590 t->debug_id,
2591 (t->to_thread == thread) ? "in" : "out");
2593 if (t->to_thread == thread) {
2594 t->to_proc = NULL;
2595 t->to_thread = NULL;
2596 if (t->buffer) {
2597 t->buffer->transaction = NULL;
2598 t->buffer = NULL;
2600 t = t->to_parent;
2601 } else if (t->from == thread) {
2602 t->from = NULL;
2603 t = t->from_parent;
2604 } else
2605 BUG();
2607 if (send_reply)
2608 binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
2609 binder_release_work(&thread->todo);
2610 kfree(thread);
2611 binder_stats_deleted(BINDER_STAT_THREAD);
2612 return active_transactions;
2615 static unsigned int binder_poll(struct file *filp,
2616 struct poll_table_struct *wait)
2618 struct binder_proc *proc = filp->private_data;
2619 struct binder_thread *thread = NULL;
2620 int wait_for_proc_work;
2622 binder_lock(__func__);
2624 thread = binder_get_thread(proc);
2625 if (!thread) {
2626 binder_unlock(__func__);
2627 return POLLERR;
2630 wait_for_proc_work = thread->transaction_stack == NULL &&
2631 list_empty(&thread->todo) && thread->return_error == BR_OK;
2633 binder_unlock(__func__);
2635 if (wait_for_proc_work) {
2636 if (binder_has_proc_work(proc, thread))
2637 return POLLIN;
2638 poll_wait(filp, &proc->wait, wait);
2639 if (binder_has_proc_work(proc, thread))
2640 return POLLIN;
2641 } else {
2642 if (binder_has_thread_work(thread))
2643 return POLLIN;
2644 poll_wait(filp, &thread->wait, wait);
2645 if (binder_has_thread_work(thread))
2646 return POLLIN;
2648 return 0;
2651 static int binder_ioctl_write_read(struct file *filp,
2652 unsigned int cmd, unsigned long arg,
2653 struct binder_thread *thread)
2655 int ret = 0;
2656 struct binder_proc *proc = filp->private_data;
2657 unsigned int size = _IOC_SIZE(cmd);
2658 void __user *ubuf = (void __user *)arg;
2659 struct binder_write_read bwr;
2661 if (size != sizeof(struct binder_write_read)) {
2662 ret = -EINVAL;
2663 goto out;
2665 if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
2666 ret = -EFAULT;
2667 goto out;
2669 binder_debug(BINDER_DEBUG_READ_WRITE,
2670 "%d:%d write %lld at %016llx, read %lld at %016llx\n",
2671 proc->pid, thread->pid,
2672 (u64)bwr.write_size, (u64)bwr.write_buffer,
2673 (u64)bwr.read_size, (u64)bwr.read_buffer);
2675 if (bwr.write_size > 0) {
2676 ret = binder_thread_write(proc, thread,
2677 bwr.write_buffer,
2678 bwr.write_size,
2679 &bwr.write_consumed);
2680 trace_binder_write_done(ret);
2681 if (ret < 0) {
2682 bwr.read_consumed = 0;
2683 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
2684 ret = -EFAULT;
2685 goto out;
2688 if (bwr.read_size > 0) {
2689 ret = binder_thread_read(proc, thread, bwr.read_buffer,
2690 bwr.read_size,
2691 &bwr.read_consumed,
2692 filp->f_flags & O_NONBLOCK);
2693 trace_binder_read_done(ret);
2694 if (!list_empty(&proc->todo))
2695 wake_up_interruptible(&proc->wait);
2696 if (ret < 0) {
2697 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
2698 ret = -EFAULT;
2699 goto out;
2702 binder_debug(BINDER_DEBUG_READ_WRITE,
2703 "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
2704 proc->pid, thread->pid,
2705 (u64)bwr.write_consumed, (u64)bwr.write_size,
2706 (u64)bwr.read_consumed, (u64)bwr.read_size);
2707 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
2708 ret = -EFAULT;
2709 goto out;
2711 out:
2712 return ret;
2715 static int binder_ioctl_set_ctx_mgr(struct file *filp)
2717 int ret = 0;
2718 struct binder_proc *proc = filp->private_data;
2719 kuid_t curr_euid = current_euid();
2721 if (binder_context_mgr_node != NULL) {
2722 pr_err("BINDER_SET_CONTEXT_MGR already set\n");
2723 ret = -EBUSY;
2724 goto out;
2726 ret = security_binder_set_context_mgr(proc->tsk);
2727 if (ret < 0)
2728 goto out;
2729 if (uid_valid(binder_context_mgr_uid)) {
2730 if (!uid_eq(binder_context_mgr_uid, curr_euid)) {
2731 pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
2732 from_kuid(&init_user_ns, curr_euid),
2733 from_kuid(&init_user_ns,
2734 binder_context_mgr_uid));
2735 ret = -EPERM;
2736 goto out;
2738 } else {
2739 binder_context_mgr_uid = curr_euid;
2741 binder_context_mgr_node = binder_new_node(proc, 0, 0);
2742 if (binder_context_mgr_node == NULL) {
2743 ret = -ENOMEM;
2744 goto out;
2746 binder_context_mgr_node->local_weak_refs++;
2747 binder_context_mgr_node->local_strong_refs++;
2748 binder_context_mgr_node->has_strong_ref = 1;
2749 binder_context_mgr_node->has_weak_ref = 1;
2750 out:
2751 return ret;
2754 static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
2756 int ret;
2757 struct binder_proc *proc = filp->private_data;
2758 struct binder_thread *thread;
2759 unsigned int size = _IOC_SIZE(cmd);
2760 void __user *ubuf = (void __user *)arg;
2762 /*pr_info("binder_ioctl: %d:%d %x %lx\n",
2763 proc->pid, current->pid, cmd, arg);*/
2765 trace_binder_ioctl(cmd, arg);
2767 ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
2768 if (ret)
2769 goto err_unlocked;
2771 binder_lock(__func__);
2772 thread = binder_get_thread(proc);
2773 if (thread == NULL) {
2774 ret = -ENOMEM;
2775 goto err;
2778 switch (cmd) {
2779 case BINDER_WRITE_READ:
2780 ret = binder_ioctl_write_read(filp, cmd, arg, thread);
2781 if (ret)
2782 goto err;
2783 break;
2784 case BINDER_SET_MAX_THREADS:
2785 if (copy_from_user(&proc->max_threads, ubuf, sizeof(proc->max_threads))) {
2786 ret = -EINVAL;
2787 goto err;
2789 break;
2790 case BINDER_SET_CONTEXT_MGR:
2791 ret = binder_ioctl_set_ctx_mgr(filp);
2792 if (ret)
2793 goto err;
2794 break;
2795 case BINDER_THREAD_EXIT:
2796 binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
2797 proc->pid, thread->pid);
2798 binder_free_thread(proc, thread);
2799 thread = NULL;
2800 break;
2801 case BINDER_VERSION: {
2802 struct binder_version __user *ver = ubuf;
2804 if (size != sizeof(struct binder_version)) {
2805 ret = -EINVAL;
2806 goto err;
2808 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
2809 &ver->protocol_version)) {
2810 ret = -EINVAL;
2811 goto err;
2813 break;
2815 default:
2816 ret = -EINVAL;
2817 goto err;
2819 ret = 0;
2820 err:
2821 if (thread)
2822 thread->looper &= ~BINDER_LOOPER_STATE_NEED_RETURN;
2823 binder_unlock(__func__);
2824 wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
2825 if (ret && ret != -ERESTARTSYS)
2826 pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
2827 err_unlocked:
2828 trace_binder_ioctl_done(ret);
2829 return ret;
2832 static void binder_vma_open(struct vm_area_struct *vma)
2834 struct binder_proc *proc = vma->vm_private_data;
2836 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
2837 "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
2838 proc->pid, vma->vm_start, vma->vm_end,
2839 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
2840 (unsigned long)pgprot_val(vma->vm_page_prot));
2843 static void binder_vma_close(struct vm_area_struct *vma)
2845 struct binder_proc *proc = vma->vm_private_data;
2847 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
2848 "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
2849 proc->pid, vma->vm_start, vma->vm_end,
2850 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
2851 (unsigned long)pgprot_val(vma->vm_page_prot));
2852 proc->vma = NULL;
2853 proc->vma_vm_mm = NULL;
2854 binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
2857 static int binder_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
2859 return VM_FAULT_SIGBUS;
2862 static const struct vm_operations_struct binder_vm_ops = {
2863 .open = binder_vma_open,
2864 .close = binder_vma_close,
2865 .fault = binder_vm_fault,
2868 static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
2870 int ret;
2871 struct vm_struct *area;
2872 struct binder_proc *proc = filp->private_data;
2873 const char *failure_string;
2874 struct binder_buffer *buffer;
2876 if (proc->tsk != current->group_leader)
2877 return -EINVAL;
2879 if ((vma->vm_end - vma->vm_start) > SZ_4M)
2880 vma->vm_end = vma->vm_start + SZ_4M;
2882 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
2883 "binder_mmap: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
2884 proc->pid, vma->vm_start, vma->vm_end,
2885 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
2886 (unsigned long)pgprot_val(vma->vm_page_prot));
2888 if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
2889 ret = -EPERM;
2890 failure_string = "bad vm_flags";
2891 goto err_bad_arg;
2893 vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE;
2895 mutex_lock(&binder_mmap_lock);
2896 if (proc->buffer) {
2897 ret = -EBUSY;
2898 failure_string = "already mapped";
2899 goto err_already_mapped;
2902 area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP);
2903 if (area == NULL) {
2904 ret = -ENOMEM;
2905 failure_string = "get_vm_area";
2906 goto err_get_vm_area_failed;
2908 proc->buffer = area->addr;
2909 proc->user_buffer_offset = vma->vm_start - (uintptr_t)proc->buffer;
2910 mutex_unlock(&binder_mmap_lock);
2912 #ifdef CONFIG_CPU_CACHE_VIPT
2913 if (cache_is_vipt_aliasing()) {
2914 while (CACHE_COLOUR((vma->vm_start ^ (uint32_t)proc->buffer))) {
2915 pr_info("binder_mmap: %d %lx-%lx maps %p bad alignment\n", proc->pid, vma->vm_start, vma->vm_end, proc->buffer);
2916 vma->vm_start += PAGE_SIZE;
2919 #endif
2920 proc->pages = kzalloc(sizeof(proc->pages[0]) * ((vma->vm_end - vma->vm_start) / PAGE_SIZE), GFP_KERNEL);
2921 if (proc->pages == NULL) {
2922 ret = -ENOMEM;
2923 failure_string = "alloc page array";
2924 goto err_alloc_pages_failed;
2926 proc->buffer_size = vma->vm_end - vma->vm_start;
2928 vma->vm_ops = &binder_vm_ops;
2929 vma->vm_private_data = proc;
2931 if (binder_update_page_range(proc, 1, proc->buffer, proc->buffer + PAGE_SIZE, vma)) {
2932 ret = -ENOMEM;
2933 failure_string = "alloc small buf";
2934 goto err_alloc_small_buf_failed;
2936 buffer = proc->buffer;
2937 INIT_LIST_HEAD(&proc->buffers);
2938 list_add(&buffer->entry, &proc->buffers);
2939 buffer->free = 1;
2940 binder_insert_free_buffer(proc, buffer);
2941 proc->free_async_space = proc->buffer_size / 2;
2942 barrier();
2943 proc->files = get_files_struct(current);
2944 proc->vma = vma;
2945 proc->vma_vm_mm = vma->vm_mm;
2947 /*pr_info("binder_mmap: %d %lx-%lx maps %p\n",
2948 proc->pid, vma->vm_start, vma->vm_end, proc->buffer);*/
2949 return 0;
2951 err_alloc_small_buf_failed:
2952 kfree(proc->pages);
2953 proc->pages = NULL;
2954 err_alloc_pages_failed:
2955 mutex_lock(&binder_mmap_lock);
2956 vfree(proc->buffer);
2957 proc->buffer = NULL;
2958 err_get_vm_area_failed:
2959 err_already_mapped:
2960 mutex_unlock(&binder_mmap_lock);
2961 err_bad_arg:
2962 pr_err("binder_mmap: %d %lx-%lx %s failed %d\n",
2963 proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
2964 return ret;
2967 static int binder_open(struct inode *nodp, struct file *filp)
2969 struct binder_proc *proc;
2971 binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_open: %d:%d\n",
2972 current->group_leader->pid, current->pid);
2974 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
2975 if (proc == NULL)
2976 return -ENOMEM;
2977 get_task_struct(current->group_leader);
2978 proc->tsk = current->group_leader;
2979 INIT_LIST_HEAD(&proc->todo);
2980 init_waitqueue_head(&proc->wait);
2981 proc->default_priority = task_nice(current);
2983 binder_lock(__func__);
2985 binder_stats_created(BINDER_STAT_PROC);
2986 hlist_add_head(&proc->proc_node, &binder_procs);
2987 proc->pid = current->group_leader->pid;
2988 INIT_LIST_HEAD(&proc->delivered_death);
2989 filp->private_data = proc;
2991 binder_unlock(__func__);
2993 if (binder_debugfs_dir_entry_proc) {
2994 char strbuf[11];
2996 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
2997 proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO,
2998 binder_debugfs_dir_entry_proc, proc, &binder_proc_fops);
3001 return 0;
3004 static int binder_flush(struct file *filp, fl_owner_t id)
3006 struct binder_proc *proc = filp->private_data;
3008 binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
3010 return 0;
3013 static void binder_deferred_flush(struct binder_proc *proc)
3015 struct rb_node *n;
3016 int wake_count = 0;
3018 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
3019 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
3021 thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN;
3022 if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
3023 wake_up_interruptible(&thread->wait);
3024 wake_count++;
3027 wake_up_interruptible_all(&proc->wait);
3029 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
3030 "binder_flush: %d woke %d threads\n", proc->pid,
3031 wake_count);
3034 static int binder_release(struct inode *nodp, struct file *filp)
3036 struct binder_proc *proc = filp->private_data;
3038 debugfs_remove(proc->debugfs_entry);
3039 binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
3041 return 0;
3044 static int binder_node_release(struct binder_node *node, int refs)
3046 struct binder_ref *ref;
3047 int death = 0;
3049 list_del_init(&node->work.entry);
3050 binder_release_work(&node->async_todo);
3052 if (hlist_empty(&node->refs)) {
3053 kfree(node);
3054 binder_stats_deleted(BINDER_STAT_NODE);
3056 return refs;
3059 node->proc = NULL;
3060 node->local_strong_refs = 0;
3061 node->local_weak_refs = 0;
3062 hlist_add_head(&node->dead_node, &binder_dead_nodes);
3064 hlist_for_each_entry(ref, &node->refs, node_entry) {
3065 refs++;
3067 if (!ref->death)
3068 continue;
3070 death++;
3072 if (list_empty(&ref->death->work.entry)) {
3073 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
3074 list_add_tail(&ref->death->work.entry,
3075 &ref->proc->todo);
3076 wake_up_interruptible(&ref->proc->wait);
3077 } else
3078 BUG();
3081 binder_debug(BINDER_DEBUG_DEAD_BINDER,
3082 "node %d now dead, refs %d, death %d\n",
3083 node->debug_id, refs, death);
3085 return refs;
3088 static void binder_deferred_release(struct binder_proc *proc)
3090 struct binder_transaction *t;
3091 struct rb_node *n;
3092 int threads, nodes, incoming_refs, outgoing_refs, buffers,
3093 active_transactions, page_count;
3095 BUG_ON(proc->vma);
3096 BUG_ON(proc->files);
3098 hlist_del(&proc->proc_node);
3100 if (binder_context_mgr_node && binder_context_mgr_node->proc == proc) {
3101 binder_debug(BINDER_DEBUG_DEAD_BINDER,
3102 "%s: %d context_mgr_node gone\n",
3103 __func__, proc->pid);
3104 binder_context_mgr_node = NULL;
3107 threads = 0;
3108 active_transactions = 0;
3109 while ((n = rb_first(&proc->threads))) {
3110 struct binder_thread *thread;
3112 thread = rb_entry(n, struct binder_thread, rb_node);
3113 threads++;
3114 active_transactions += binder_free_thread(proc, thread);
3117 nodes = 0;
3118 incoming_refs = 0;
3119 while ((n = rb_first(&proc->nodes))) {
3120 struct binder_node *node;
3122 node = rb_entry(n, struct binder_node, rb_node);
3123 nodes++;
3124 rb_erase(&node->rb_node, &proc->nodes);
3125 incoming_refs = binder_node_release(node, incoming_refs);
3128 outgoing_refs = 0;
3129 while ((n = rb_first(&proc->refs_by_desc))) {
3130 struct binder_ref *ref;
3132 ref = rb_entry(n, struct binder_ref, rb_node_desc);
3133 outgoing_refs++;
3134 binder_delete_ref(ref);
3137 binder_release_work(&proc->todo);
3138 binder_release_work(&proc->delivered_death);
3140 buffers = 0;
3141 while ((n = rb_first(&proc->allocated_buffers))) {
3142 struct binder_buffer *buffer;
3144 buffer = rb_entry(n, struct binder_buffer, rb_node);
3146 t = buffer->transaction;
3147 if (t) {
3148 t->buffer = NULL;
3149 buffer->transaction = NULL;
3150 pr_err("release proc %d, transaction %d, not freed\n",
3151 proc->pid, t->debug_id);
3152 /*BUG();*/
3155 binder_free_buf(proc, buffer);
3156 buffers++;
3159 binder_stats_deleted(BINDER_STAT_PROC);
3161 page_count = 0;
3162 if (proc->pages) {
3163 int i;
3165 for (i = 0; i < proc->buffer_size / PAGE_SIZE; i++) {
3166 void *page_addr;
3168 if (!proc->pages[i])
3169 continue;
3171 page_addr = proc->buffer + i * PAGE_SIZE;
3172 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
3173 "%s: %d: page %d at %p not freed\n",
3174 __func__, proc->pid, i, page_addr);
3175 unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
3176 __free_page(proc->pages[i]);
3177 page_count++;
3179 kfree(proc->pages);
3180 vfree(proc->buffer);
3183 put_task_struct(proc->tsk);
3185 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
3186 "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d, buffers %d, pages %d\n",
3187 __func__, proc->pid, threads, nodes, incoming_refs,
3188 outgoing_refs, active_transactions, buffers, page_count);
3190 kfree(proc);
3193 static void binder_deferred_func(struct work_struct *work)
3195 struct binder_proc *proc;
3196 struct files_struct *files;
3198 int defer;
3200 do {
3201 binder_lock(__func__);
3202 mutex_lock(&binder_deferred_lock);
3203 if (!hlist_empty(&binder_deferred_list)) {
3204 proc = hlist_entry(binder_deferred_list.first,
3205 struct binder_proc, deferred_work_node);
3206 hlist_del_init(&proc->deferred_work_node);
3207 defer = proc->deferred_work;
3208 proc->deferred_work = 0;
3209 } else {
3210 proc = NULL;
3211 defer = 0;
3213 mutex_unlock(&binder_deferred_lock);
3215 files = NULL;
3216 if (defer & BINDER_DEFERRED_PUT_FILES) {
3217 files = proc->files;
3218 if (files)
3219 proc->files = NULL;
3222 if (defer & BINDER_DEFERRED_FLUSH)
3223 binder_deferred_flush(proc);
3225 if (defer & BINDER_DEFERRED_RELEASE)
3226 binder_deferred_release(proc); /* frees proc */
3228 binder_unlock(__func__);
3229 if (files)
3230 put_files_struct(files);
3231 } while (proc);
3233 static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
3235 static void
3236 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
3238 mutex_lock(&binder_deferred_lock);
3239 proc->deferred_work |= defer;
3240 if (hlist_unhashed(&proc->deferred_work_node)) {
3241 hlist_add_head(&proc->deferred_work_node,
3242 &binder_deferred_list);
3243 queue_work(binder_deferred_workqueue, &binder_deferred_work);
3245 mutex_unlock(&binder_deferred_lock);
3248 static void print_binder_transaction(struct seq_file *m, const char *prefix,
3249 struct binder_transaction *t)
3251 seq_printf(m,
3252 "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %ld r%d",
3253 prefix, t->debug_id, t,
3254 t->from ? t->from->proc->pid : 0,
3255 t->from ? t->from->pid : 0,
3256 t->to_proc ? t->to_proc->pid : 0,
3257 t->to_thread ? t->to_thread->pid : 0,
3258 t->code, t->flags, t->priority, t->need_reply);
3259 if (t->buffer == NULL) {
3260 seq_puts(m, " buffer free\n");
3261 return;
3263 if (t->buffer->target_node)
3264 seq_printf(m, " node %d",
3265 t->buffer->target_node->debug_id);
3266 seq_printf(m, " size %zd:%zd data %p\n",
3267 t->buffer->data_size, t->buffer->offsets_size,
3268 t->buffer->data);
3271 static void print_binder_buffer(struct seq_file *m, const char *prefix,
3272 struct binder_buffer *buffer)
3274 seq_printf(m, "%s %d: %p size %zd:%zd %s\n",
3275 prefix, buffer->debug_id, buffer->data,
3276 buffer->data_size, buffer->offsets_size,
3277 buffer->transaction ? "active" : "delivered");
3280 static void print_binder_work(struct seq_file *m, const char *prefix,
3281 const char *transaction_prefix,
3282 struct binder_work *w)
3284 struct binder_node *node;
3285 struct binder_transaction *t;
3287 switch (w->type) {
3288 case BINDER_WORK_TRANSACTION:
3289 t = container_of(w, struct binder_transaction, work);
3290 print_binder_transaction(m, transaction_prefix, t);
3291 break;
3292 case BINDER_WORK_TRANSACTION_COMPLETE:
3293 seq_printf(m, "%stransaction complete\n", prefix);
3294 break;
3295 case BINDER_WORK_NODE:
3296 node = container_of(w, struct binder_node, work);
3297 seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
3298 prefix, node->debug_id,
3299 (u64)node->ptr, (u64)node->cookie);
3300 break;
3301 case BINDER_WORK_DEAD_BINDER:
3302 seq_printf(m, "%shas dead binder\n", prefix);
3303 break;
3304 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
3305 seq_printf(m, "%shas cleared dead binder\n", prefix);
3306 break;
3307 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
3308 seq_printf(m, "%shas cleared death notification\n", prefix);
3309 break;
3310 default:
3311 seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
3312 break;
3316 static void print_binder_thread(struct seq_file *m,
3317 struct binder_thread *thread,
3318 int print_always)
3320 struct binder_transaction *t;
3321 struct binder_work *w;
3322 size_t start_pos = m->count;
3323 size_t header_pos;
3325 seq_printf(m, " thread %d: l %02x\n", thread->pid, thread->looper);
3326 header_pos = m->count;
3327 t = thread->transaction_stack;
3328 while (t) {
3329 if (t->from == thread) {
3330 print_binder_transaction(m,
3331 " outgoing transaction", t);
3332 t = t->from_parent;
3333 } else if (t->to_thread == thread) {
3334 print_binder_transaction(m,
3335 " incoming transaction", t);
3336 t = t->to_parent;
3337 } else {
3338 print_binder_transaction(m, " bad transaction", t);
3339 t = NULL;
3342 list_for_each_entry(w, &thread->todo, entry) {
3343 print_binder_work(m, " ", " pending transaction", w);
3345 if (!print_always && m->count == header_pos)
3346 m->count = start_pos;
3349 static void print_binder_node(struct seq_file *m, struct binder_node *node)
3351 struct binder_ref *ref;
3352 struct binder_work *w;
3353 int count;
3355 count = 0;
3356 hlist_for_each_entry(ref, &node->refs, node_entry)
3357 count++;
3359 seq_printf(m, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d",
3360 node->debug_id, (u64)node->ptr, (u64)node->cookie,
3361 node->has_strong_ref, node->has_weak_ref,
3362 node->local_strong_refs, node->local_weak_refs,
3363 node->internal_strong_refs, count);
3364 if (count) {
3365 seq_puts(m, " proc");
3366 hlist_for_each_entry(ref, &node->refs, node_entry)
3367 seq_printf(m, " %d", ref->proc->pid);
3369 seq_puts(m, "\n");
3370 list_for_each_entry(w, &node->async_todo, entry)
3371 print_binder_work(m, " ",
3372 " pending async transaction", w);
3375 static void print_binder_ref(struct seq_file *m, struct binder_ref *ref)
3377 seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %p\n",
3378 ref->debug_id, ref->desc, ref->node->proc ? "" : "dead ",
3379 ref->node->debug_id, ref->strong, ref->weak, ref->death);
3382 static void print_binder_proc(struct seq_file *m,
3383 struct binder_proc *proc, int print_all)
3385 struct binder_work *w;
3386 struct rb_node *n;
3387 size_t start_pos = m->count;
3388 size_t header_pos;
3390 seq_printf(m, "proc %d\n", proc->pid);
3391 header_pos = m->count;
3393 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
3394 print_binder_thread(m, rb_entry(n, struct binder_thread,
3395 rb_node), print_all);
3396 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
3397 struct binder_node *node = rb_entry(n, struct binder_node,
3398 rb_node);
3399 if (print_all || node->has_async_transaction)
3400 print_binder_node(m, node);
3402 if (print_all) {
3403 for (n = rb_first(&proc->refs_by_desc);
3404 n != NULL;
3405 n = rb_next(n))
3406 print_binder_ref(m, rb_entry(n, struct binder_ref,
3407 rb_node_desc));
3409 for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n))
3410 print_binder_buffer(m, " buffer",
3411 rb_entry(n, struct binder_buffer, rb_node));
3412 list_for_each_entry(w, &proc->todo, entry)
3413 print_binder_work(m, " ", " pending transaction", w);
3414 list_for_each_entry(w, &proc->delivered_death, entry) {
3415 seq_puts(m, " has delivered dead binder\n");
3416 break;
3418 if (!print_all && m->count == header_pos)
3419 m->count = start_pos;
3422 static const char * const binder_return_strings[] = {
3423 "BR_ERROR",
3424 "BR_OK",
3425 "BR_TRANSACTION",
3426 "BR_REPLY",
3427 "BR_ACQUIRE_RESULT",
3428 "BR_DEAD_REPLY",
3429 "BR_TRANSACTION_COMPLETE",
3430 "BR_INCREFS",
3431 "BR_ACQUIRE",
3432 "BR_RELEASE",
3433 "BR_DECREFS",
3434 "BR_ATTEMPT_ACQUIRE",
3435 "BR_NOOP",
3436 "BR_SPAWN_LOOPER",
3437 "BR_FINISHED",
3438 "BR_DEAD_BINDER",
3439 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
3440 "BR_FAILED_REPLY"
3443 static const char * const binder_command_strings[] = {
3444 "BC_TRANSACTION",
3445 "BC_REPLY",
3446 "BC_ACQUIRE_RESULT",
3447 "BC_FREE_BUFFER",
3448 "BC_INCREFS",
3449 "BC_ACQUIRE",
3450 "BC_RELEASE",
3451 "BC_DECREFS",
3452 "BC_INCREFS_DONE",
3453 "BC_ACQUIRE_DONE",
3454 "BC_ATTEMPT_ACQUIRE",
3455 "BC_REGISTER_LOOPER",
3456 "BC_ENTER_LOOPER",
3457 "BC_EXIT_LOOPER",
3458 "BC_REQUEST_DEATH_NOTIFICATION",
3459 "BC_CLEAR_DEATH_NOTIFICATION",
3460 "BC_DEAD_BINDER_DONE"
3463 static const char * const binder_objstat_strings[] = {
3464 "proc",
3465 "thread",
3466 "node",
3467 "ref",
3468 "death",
3469 "transaction",
3470 "transaction_complete"
3473 static void print_binder_stats(struct seq_file *m, const char *prefix,
3474 struct binder_stats *stats)
3476 int i;
3478 BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
3479 ARRAY_SIZE(binder_command_strings));
3480 for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
3481 if (stats->bc[i])
3482 seq_printf(m, "%s%s: %d\n", prefix,
3483 binder_command_strings[i], stats->bc[i]);
3486 BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
3487 ARRAY_SIZE(binder_return_strings));
3488 for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
3489 if (stats->br[i])
3490 seq_printf(m, "%s%s: %d\n", prefix,
3491 binder_return_strings[i], stats->br[i]);
3494 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
3495 ARRAY_SIZE(binder_objstat_strings));
3496 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
3497 ARRAY_SIZE(stats->obj_deleted));
3498 for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
3499 if (stats->obj_created[i] || stats->obj_deleted[i])
3500 seq_printf(m, "%s%s: active %d total %d\n", prefix,
3501 binder_objstat_strings[i],
3502 stats->obj_created[i] - stats->obj_deleted[i],
3503 stats->obj_created[i]);
3507 static void print_binder_proc_stats(struct seq_file *m,
3508 struct binder_proc *proc)
3510 struct binder_work *w;
3511 struct rb_node *n;
3512 int count, strong, weak;
3514 seq_printf(m, "proc %d\n", proc->pid);
3515 count = 0;
3516 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
3517 count++;
3518 seq_printf(m, " threads: %d\n", count);
3519 seq_printf(m, " requested threads: %d+%d/%d\n"
3520 " ready threads %d\n"
3521 " free async space %zd\n", proc->requested_threads,
3522 proc->requested_threads_started, proc->max_threads,
3523 proc->ready_threads, proc->free_async_space);
3524 count = 0;
3525 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
3526 count++;
3527 seq_printf(m, " nodes: %d\n", count);
3528 count = 0;
3529 strong = 0;
3530 weak = 0;
3531 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
3532 struct binder_ref *ref = rb_entry(n, struct binder_ref,
3533 rb_node_desc);
3534 count++;
3535 strong += ref->strong;
3536 weak += ref->weak;
3538 seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak);
3540 count = 0;
3541 for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n))
3542 count++;
3543 seq_printf(m, " buffers: %d\n", count);
3545 count = 0;
3546 list_for_each_entry(w, &proc->todo, entry) {
3547 switch (w->type) {
3548 case BINDER_WORK_TRANSACTION:
3549 count++;
3550 break;
3551 default:
3552 break;
3555 seq_printf(m, " pending transactions: %d\n", count);
3557 print_binder_stats(m, " ", &proc->stats);
3561 static int binder_state_show(struct seq_file *m, void *unused)
3563 struct binder_proc *proc;
3564 struct binder_node *node;
3565 int do_lock = !binder_debug_no_lock;
3567 if (do_lock)
3568 binder_lock(__func__);
3570 seq_puts(m, "binder state:\n");
3572 if (!hlist_empty(&binder_dead_nodes))
3573 seq_puts(m, "dead nodes:\n");
3574 hlist_for_each_entry(node, &binder_dead_nodes, dead_node)
3575 print_binder_node(m, node);
3577 hlist_for_each_entry(proc, &binder_procs, proc_node)
3578 print_binder_proc(m, proc, 1);
3579 if (do_lock)
3580 binder_unlock(__func__);
3581 return 0;
3584 static int binder_stats_show(struct seq_file *m, void *unused)
3586 struct binder_proc *proc;
3587 int do_lock = !binder_debug_no_lock;
3589 if (do_lock)
3590 binder_lock(__func__);
3592 seq_puts(m, "binder stats:\n");
3594 print_binder_stats(m, "", &binder_stats);
3596 hlist_for_each_entry(proc, &binder_procs, proc_node)
3597 print_binder_proc_stats(m, proc);
3598 if (do_lock)
3599 binder_unlock(__func__);
3600 return 0;
3603 static int binder_transactions_show(struct seq_file *m, void *unused)
3605 struct binder_proc *proc;
3606 int do_lock = !binder_debug_no_lock;
3608 if (do_lock)
3609 binder_lock(__func__);
3611 seq_puts(m, "binder transactions:\n");
3612 hlist_for_each_entry(proc, &binder_procs, proc_node)
3613 print_binder_proc(m, proc, 0);
3614 if (do_lock)
3615 binder_unlock(__func__);
3616 return 0;
3619 static int binder_proc_show(struct seq_file *m, void *unused)
3621 struct binder_proc *proc = m->private;
3622 int do_lock = !binder_debug_no_lock;
3624 if (do_lock)
3625 binder_lock(__func__);
3626 seq_puts(m, "binder proc state:\n");
3627 print_binder_proc(m, proc, 1);
3628 if (do_lock)
3629 binder_unlock(__func__);
3630 return 0;
3633 static void print_binder_transaction_log_entry(struct seq_file *m,
3634 struct binder_transaction_log_entry *e)
3636 seq_printf(m,
3637 "%d: %s from %d:%d to %d:%d node %d handle %d size %d:%d\n",
3638 e->debug_id, (e->call_type == 2) ? "reply" :
3639 ((e->call_type == 1) ? "async" : "call "), e->from_proc,
3640 e->from_thread, e->to_proc, e->to_thread, e->to_node,
3641 e->target_handle, e->data_size, e->offsets_size);
3644 static int binder_transaction_log_show(struct seq_file *m, void *unused)
3646 struct binder_transaction_log *log = m->private;
3647 int i;
3649 if (log->full) {
3650 for (i = log->next; i < ARRAY_SIZE(log->entry); i++)
3651 print_binder_transaction_log_entry(m, &log->entry[i]);
3653 for (i = 0; i < log->next; i++)
3654 print_binder_transaction_log_entry(m, &log->entry[i]);
3655 return 0;
3658 static const struct file_operations binder_fops = {
3659 .owner = THIS_MODULE,
3660 .poll = binder_poll,
3661 .unlocked_ioctl = binder_ioctl,
3662 .compat_ioctl = binder_ioctl,
3663 .mmap = binder_mmap,
3664 .open = binder_open,
3665 .flush = binder_flush,
3666 .release = binder_release,
3669 static struct miscdevice binder_miscdev = {
3670 .minor = MISC_DYNAMIC_MINOR,
3671 .name = "binder",
3672 .fops = &binder_fops
3675 BINDER_DEBUG_ENTRY(state);
3676 BINDER_DEBUG_ENTRY(stats);
3677 BINDER_DEBUG_ENTRY(transactions);
3678 BINDER_DEBUG_ENTRY(transaction_log);
3680 static int __init binder_init(void)
3682 int ret;
3684 binder_deferred_workqueue = create_singlethread_workqueue("binder");
3685 if (!binder_deferred_workqueue)
3686 return -ENOMEM;
3688 binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
3689 if (binder_debugfs_dir_entry_root)
3690 binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
3691 binder_debugfs_dir_entry_root);
3692 ret = misc_register(&binder_miscdev);
3693 if (binder_debugfs_dir_entry_root) {
3694 debugfs_create_file("state",
3695 S_IRUGO,
3696 binder_debugfs_dir_entry_root,
3697 NULL,
3698 &binder_state_fops);
3699 debugfs_create_file("stats",
3700 S_IRUGO,
3701 binder_debugfs_dir_entry_root,
3702 NULL,
3703 &binder_stats_fops);
3704 debugfs_create_file("transactions",
3705 S_IRUGO,
3706 binder_debugfs_dir_entry_root,
3707 NULL,
3708 &binder_transactions_fops);
3709 debugfs_create_file("transaction_log",
3710 S_IRUGO,
3711 binder_debugfs_dir_entry_root,
3712 &binder_transaction_log,
3713 &binder_transaction_log_fops);
3714 debugfs_create_file("failed_transaction_log",
3715 S_IRUGO,
3716 binder_debugfs_dir_entry_root,
3717 &binder_transaction_log_failed,
3718 &binder_transaction_log_fops);
3720 return ret;
3723 device_initcall(binder_init);
3725 #define CREATE_TRACE_POINTS
3726 #include "binder_trace.h"
3728 MODULE_LICENSE("GPL v2");