Staging: unisys: Remove RETINT macro
[linux/fpc-iii.git] / drivers / staging / android / binder.c
blobcfe4bc8f05cb82ec83b4e127ba53f8e72968a85c
1 /* binder.c
3 * Android IPC Subsystem
5 * Copyright (C) 2007-2008 Google, Inc.
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 #include <asm/cacheflush.h>
21 #include <linux/fdtable.h>
22 #include <linux/file.h>
23 #include <linux/freezer.h>
24 #include <linux/fs.h>
25 #include <linux/list.h>
26 #include <linux/miscdevice.h>
27 #include <linux/mm.h>
28 #include <linux/module.h>
29 #include <linux/mutex.h>
30 #include <linux/nsproxy.h>
31 #include <linux/poll.h>
32 #include <linux/debugfs.h>
33 #include <linux/rbtree.h>
34 #include <linux/sched.h>
35 #include <linux/seq_file.h>
36 #include <linux/uaccess.h>
37 #include <linux/vmalloc.h>
38 #include <linux/slab.h>
39 #include <linux/pid_namespace.h>
41 #include "binder.h"
42 #include "binder_trace.h"
44 static DEFINE_MUTEX(binder_main_lock);
45 static DEFINE_MUTEX(binder_deferred_lock);
46 static DEFINE_MUTEX(binder_mmap_lock);
48 static HLIST_HEAD(binder_procs);
49 static HLIST_HEAD(binder_deferred_list);
50 static HLIST_HEAD(binder_dead_nodes);
52 static struct dentry *binder_debugfs_dir_entry_root;
53 static struct dentry *binder_debugfs_dir_entry_proc;
54 static struct binder_node *binder_context_mgr_node;
55 static kuid_t binder_context_mgr_uid = INVALID_UID;
56 static int binder_last_id;
57 static struct workqueue_struct *binder_deferred_workqueue;
59 #define BINDER_DEBUG_ENTRY(name) \
60 static int binder_##name##_open(struct inode *inode, struct file *file) \
61 { \
62 return single_open(file, binder_##name##_show, inode->i_private); \
63 } \
65 static const struct file_operations binder_##name##_fops = { \
66 .owner = THIS_MODULE, \
67 .open = binder_##name##_open, \
68 .read = seq_read, \
69 .llseek = seq_lseek, \
70 .release = single_release, \
73 static int binder_proc_show(struct seq_file *m, void *unused);
74 BINDER_DEBUG_ENTRY(proc);
76 /* This is only defined in include/asm-arm/sizes.h */
77 #ifndef SZ_1K
78 #define SZ_1K 0x400
79 #endif
81 #ifndef SZ_4M
82 #define SZ_4M 0x400000
83 #endif
85 #define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
87 #define BINDER_SMALL_BUF_SIZE (PAGE_SIZE * 64)
89 enum {
90 BINDER_DEBUG_USER_ERROR = 1U << 0,
91 BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1,
92 BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2,
93 BINDER_DEBUG_OPEN_CLOSE = 1U << 3,
94 BINDER_DEBUG_DEAD_BINDER = 1U << 4,
95 BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5,
96 BINDER_DEBUG_READ_WRITE = 1U << 6,
97 BINDER_DEBUG_USER_REFS = 1U << 7,
98 BINDER_DEBUG_THREADS = 1U << 8,
99 BINDER_DEBUG_TRANSACTION = 1U << 9,
100 BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10,
101 BINDER_DEBUG_FREE_BUFFER = 1U << 11,
102 BINDER_DEBUG_INTERNAL_REFS = 1U << 12,
103 BINDER_DEBUG_BUFFER_ALLOC = 1U << 13,
104 BINDER_DEBUG_PRIORITY_CAP = 1U << 14,
105 BINDER_DEBUG_BUFFER_ALLOC_ASYNC = 1U << 15,
107 static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
108 BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
109 module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO);
111 static bool binder_debug_no_lock;
112 module_param_named(proc_no_lock, binder_debug_no_lock, bool, S_IWUSR | S_IRUGO);
114 static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
115 static int binder_stop_on_user_error;
117 static int binder_set_stop_on_user_error(const char *val,
118 struct kernel_param *kp)
120 int ret;
121 ret = param_set_int(val, kp);
122 if (binder_stop_on_user_error < 2)
123 wake_up(&binder_user_error_wait);
124 return ret;
126 module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
127 param_get_int, &binder_stop_on_user_error, S_IWUSR | S_IRUGO);
129 #define binder_debug(mask, x...) \
130 do { \
131 if (binder_debug_mask & mask) \
132 pr_info(x); \
133 } while (0)
135 #define binder_user_error(x...) \
136 do { \
137 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
138 pr_info(x); \
139 if (binder_stop_on_user_error) \
140 binder_stop_on_user_error = 2; \
141 } while (0)
143 enum binder_stat_types {
144 BINDER_STAT_PROC,
145 BINDER_STAT_THREAD,
146 BINDER_STAT_NODE,
147 BINDER_STAT_REF,
148 BINDER_STAT_DEATH,
149 BINDER_STAT_TRANSACTION,
150 BINDER_STAT_TRANSACTION_COMPLETE,
151 BINDER_STAT_COUNT
154 struct binder_stats {
155 int br[_IOC_NR(BR_FAILED_REPLY) + 1];
156 int bc[_IOC_NR(BC_DEAD_BINDER_DONE) + 1];
157 int obj_created[BINDER_STAT_COUNT];
158 int obj_deleted[BINDER_STAT_COUNT];
161 static struct binder_stats binder_stats;
163 static inline void binder_stats_deleted(enum binder_stat_types type)
165 binder_stats.obj_deleted[type]++;
168 static inline void binder_stats_created(enum binder_stat_types type)
170 binder_stats.obj_created[type]++;
173 struct binder_transaction_log_entry {
174 int debug_id;
175 int call_type;
176 int from_proc;
177 int from_thread;
178 int target_handle;
179 int to_proc;
180 int to_thread;
181 int to_node;
182 int data_size;
183 int offsets_size;
185 struct binder_transaction_log {
186 int next;
187 int full;
188 struct binder_transaction_log_entry entry[32];
190 static struct binder_transaction_log binder_transaction_log;
191 static struct binder_transaction_log binder_transaction_log_failed;
193 static struct binder_transaction_log_entry *binder_transaction_log_add(
194 struct binder_transaction_log *log)
196 struct binder_transaction_log_entry *e;
197 e = &log->entry[log->next];
198 memset(e, 0, sizeof(*e));
199 log->next++;
200 if (log->next == ARRAY_SIZE(log->entry)) {
201 log->next = 0;
202 log->full = 1;
204 return e;
207 struct binder_work {
208 struct list_head entry;
209 enum {
210 BINDER_WORK_TRANSACTION = 1,
211 BINDER_WORK_TRANSACTION_COMPLETE,
212 BINDER_WORK_NODE,
213 BINDER_WORK_DEAD_BINDER,
214 BINDER_WORK_DEAD_BINDER_AND_CLEAR,
215 BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
216 } type;
219 struct binder_node {
220 int debug_id;
221 struct binder_work work;
222 union {
223 struct rb_node rb_node;
224 struct hlist_node dead_node;
226 struct binder_proc *proc;
227 struct hlist_head refs;
228 int internal_strong_refs;
229 int local_weak_refs;
230 int local_strong_refs;
231 binder_uintptr_t ptr;
232 binder_uintptr_t cookie;
233 unsigned has_strong_ref:1;
234 unsigned pending_strong_ref:1;
235 unsigned has_weak_ref:1;
236 unsigned pending_weak_ref:1;
237 unsigned has_async_transaction:1;
238 unsigned accept_fds:1;
239 unsigned min_priority:8;
240 struct list_head async_todo;
243 struct binder_ref_death {
244 struct binder_work work;
245 binder_uintptr_t cookie;
248 struct binder_ref {
249 /* Lookups needed: */
250 /* node + proc => ref (transaction) */
251 /* desc + proc => ref (transaction, inc/dec ref) */
252 /* node => refs + procs (proc exit) */
253 int debug_id;
254 struct rb_node rb_node_desc;
255 struct rb_node rb_node_node;
256 struct hlist_node node_entry;
257 struct binder_proc *proc;
258 struct binder_node *node;
259 uint32_t desc;
260 int strong;
261 int weak;
262 struct binder_ref_death *death;
265 struct binder_buffer {
266 struct list_head entry; /* free and allocated entries by address */
267 struct rb_node rb_node; /* free entry by size or allocated entry */
268 /* by address */
269 unsigned free:1;
270 unsigned allow_user_free:1;
271 unsigned async_transaction:1;
272 unsigned debug_id:29;
274 struct binder_transaction *transaction;
276 struct binder_node *target_node;
277 size_t data_size;
278 size_t offsets_size;
279 uint8_t data[0];
282 enum binder_deferred_state {
283 BINDER_DEFERRED_PUT_FILES = 0x01,
284 BINDER_DEFERRED_FLUSH = 0x02,
285 BINDER_DEFERRED_RELEASE = 0x04,
288 struct binder_proc {
289 struct hlist_node proc_node;
290 struct rb_root threads;
291 struct rb_root nodes;
292 struct rb_root refs_by_desc;
293 struct rb_root refs_by_node;
294 int pid;
295 struct vm_area_struct *vma;
296 struct mm_struct *vma_vm_mm;
297 struct task_struct *tsk;
298 struct files_struct *files;
299 struct hlist_node deferred_work_node;
300 int deferred_work;
301 void *buffer;
302 ptrdiff_t user_buffer_offset;
304 struct list_head buffers;
305 struct rb_root free_buffers;
306 struct rb_root allocated_buffers;
307 size_t free_async_space;
309 struct page **pages;
310 size_t buffer_size;
311 uint32_t buffer_free;
312 struct list_head todo;
313 wait_queue_head_t wait;
314 struct binder_stats stats;
315 struct list_head delivered_death;
316 int max_threads;
317 int requested_threads;
318 int requested_threads_started;
319 int ready_threads;
320 long default_priority;
321 struct dentry *debugfs_entry;
324 enum {
325 BINDER_LOOPER_STATE_REGISTERED = 0x01,
326 BINDER_LOOPER_STATE_ENTERED = 0x02,
327 BINDER_LOOPER_STATE_EXITED = 0x04,
328 BINDER_LOOPER_STATE_INVALID = 0x08,
329 BINDER_LOOPER_STATE_WAITING = 0x10,
330 BINDER_LOOPER_STATE_NEED_RETURN = 0x20
333 struct binder_thread {
334 struct binder_proc *proc;
335 struct rb_node rb_node;
336 int pid;
337 int looper;
338 struct binder_transaction *transaction_stack;
339 struct list_head todo;
340 uint32_t return_error; /* Write failed, return error code in read buf */
341 uint32_t return_error2; /* Write failed, return error code in read */
342 /* buffer. Used when sending a reply to a dead process that */
343 /* we are also waiting on */
344 wait_queue_head_t wait;
345 struct binder_stats stats;
348 struct binder_transaction {
349 int debug_id;
350 struct binder_work work;
351 struct binder_thread *from;
352 struct binder_transaction *from_parent;
353 struct binder_proc *to_proc;
354 struct binder_thread *to_thread;
355 struct binder_transaction *to_parent;
356 unsigned need_reply:1;
357 /* unsigned is_dead:1; */ /* not used at the moment */
359 struct binder_buffer *buffer;
360 unsigned int code;
361 unsigned int flags;
362 long priority;
363 long saved_priority;
364 kuid_t sender_euid;
367 static void
368 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
370 static int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
372 struct files_struct *files = proc->files;
373 unsigned long rlim_cur;
374 unsigned long irqs;
376 if (files == NULL)
377 return -ESRCH;
379 if (!lock_task_sighand(proc->tsk, &irqs))
380 return -EMFILE;
382 rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE);
383 unlock_task_sighand(proc->tsk, &irqs);
385 return __alloc_fd(files, 0, rlim_cur, flags);
389 * copied from fd_install
391 static void task_fd_install(
392 struct binder_proc *proc, unsigned int fd, struct file *file)
394 if (proc->files)
395 __fd_install(proc->files, fd, file);
399 * copied from sys_close
401 static long task_close_fd(struct binder_proc *proc, unsigned int fd)
403 int retval;
405 if (proc->files == NULL)
406 return -ESRCH;
408 retval = __close_fd(proc->files, fd);
409 /* can't restart close syscall because file table entry was cleared */
410 if (unlikely(retval == -ERESTARTSYS ||
411 retval == -ERESTARTNOINTR ||
412 retval == -ERESTARTNOHAND ||
413 retval == -ERESTART_RESTARTBLOCK))
414 retval = -EINTR;
416 return retval;
419 static inline void binder_lock(const char *tag)
421 trace_binder_lock(tag);
422 mutex_lock(&binder_main_lock);
423 trace_binder_locked(tag);
426 static inline void binder_unlock(const char *tag)
428 trace_binder_unlock(tag);
429 mutex_unlock(&binder_main_lock);
432 static void binder_set_nice(long nice)
434 long min_nice;
435 if (can_nice(current, nice)) {
436 set_user_nice(current, nice);
437 return;
439 min_nice = 20 - current->signal->rlim[RLIMIT_NICE].rlim_cur;
440 binder_debug(BINDER_DEBUG_PRIORITY_CAP,
441 "%d: nice value %ld not allowed use %ld instead\n",
442 current->pid, nice, min_nice);
443 set_user_nice(current, min_nice);
444 if (min_nice < 20)
445 return;
446 binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
449 static size_t binder_buffer_size(struct binder_proc *proc,
450 struct binder_buffer *buffer)
452 if (list_is_last(&buffer->entry, &proc->buffers))
453 return proc->buffer + proc->buffer_size - (void *)buffer->data;
454 else
455 return (size_t)list_entry(buffer->entry.next,
456 struct binder_buffer, entry) - (size_t)buffer->data;
459 static void binder_insert_free_buffer(struct binder_proc *proc,
460 struct binder_buffer *new_buffer)
462 struct rb_node **p = &proc->free_buffers.rb_node;
463 struct rb_node *parent = NULL;
464 struct binder_buffer *buffer;
465 size_t buffer_size;
466 size_t new_buffer_size;
468 BUG_ON(!new_buffer->free);
470 new_buffer_size = binder_buffer_size(proc, new_buffer);
472 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
473 "%d: add free buffer, size %zd, at %p\n",
474 proc->pid, new_buffer_size, new_buffer);
476 while (*p) {
477 parent = *p;
478 buffer = rb_entry(parent, struct binder_buffer, rb_node);
479 BUG_ON(!buffer->free);
481 buffer_size = binder_buffer_size(proc, buffer);
483 if (new_buffer_size < buffer_size)
484 p = &parent->rb_left;
485 else
486 p = &parent->rb_right;
488 rb_link_node(&new_buffer->rb_node, parent, p);
489 rb_insert_color(&new_buffer->rb_node, &proc->free_buffers);
492 static void binder_insert_allocated_buffer(struct binder_proc *proc,
493 struct binder_buffer *new_buffer)
495 struct rb_node **p = &proc->allocated_buffers.rb_node;
496 struct rb_node *parent = NULL;
497 struct binder_buffer *buffer;
499 BUG_ON(new_buffer->free);
501 while (*p) {
502 parent = *p;
503 buffer = rb_entry(parent, struct binder_buffer, rb_node);
504 BUG_ON(buffer->free);
506 if (new_buffer < buffer)
507 p = &parent->rb_left;
508 else if (new_buffer > buffer)
509 p = &parent->rb_right;
510 else
511 BUG();
513 rb_link_node(&new_buffer->rb_node, parent, p);
514 rb_insert_color(&new_buffer->rb_node, &proc->allocated_buffers);
517 static struct binder_buffer *binder_buffer_lookup(struct binder_proc *proc,
518 uintptr_t user_ptr)
520 struct rb_node *n = proc->allocated_buffers.rb_node;
521 struct binder_buffer *buffer;
522 struct binder_buffer *kern_ptr;
524 kern_ptr = (struct binder_buffer *)(user_ptr - proc->user_buffer_offset
525 - offsetof(struct binder_buffer, data));
527 while (n) {
528 buffer = rb_entry(n, struct binder_buffer, rb_node);
529 BUG_ON(buffer->free);
531 if (kern_ptr < buffer)
532 n = n->rb_left;
533 else if (kern_ptr > buffer)
534 n = n->rb_right;
535 else
536 return buffer;
538 return NULL;
541 static int binder_update_page_range(struct binder_proc *proc, int allocate,
542 void *start, void *end,
543 struct vm_area_struct *vma)
545 void *page_addr;
546 unsigned long user_page_addr;
547 struct vm_struct tmp_area;
548 struct page **page;
549 struct mm_struct *mm;
551 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
552 "%d: %s pages %p-%p\n", proc->pid,
553 allocate ? "allocate" : "free", start, end);
555 if (end <= start)
556 return 0;
558 trace_binder_update_page_range(proc, allocate, start, end);
560 if (vma)
561 mm = NULL;
562 else
563 mm = get_task_mm(proc->tsk);
565 if (mm) {
566 down_write(&mm->mmap_sem);
567 vma = proc->vma;
568 if (vma && mm != proc->vma_vm_mm) {
569 pr_err("%d: vma mm and task mm mismatch\n",
570 proc->pid);
571 vma = NULL;
575 if (allocate == 0)
576 goto free_range;
578 if (vma == NULL) {
579 pr_err("%d: binder_alloc_buf failed to map pages in userspace, no vma\n",
580 proc->pid);
581 goto err_no_vma;
584 for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
585 int ret;
586 struct page **page_array_ptr;
587 page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE];
589 BUG_ON(*page);
590 *page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
591 if (*page == NULL) {
592 pr_err("%d: binder_alloc_buf failed for page at %p\n",
593 proc->pid, page_addr);
594 goto err_alloc_page_failed;
596 tmp_area.addr = page_addr;
597 tmp_area.size = PAGE_SIZE + PAGE_SIZE /* guard page? */;
598 page_array_ptr = page;
599 ret = map_vm_area(&tmp_area, PAGE_KERNEL, &page_array_ptr);
600 if (ret) {
601 pr_err("%d: binder_alloc_buf failed to map page at %p in kernel\n",
602 proc->pid, page_addr);
603 goto err_map_kernel_failed;
605 user_page_addr =
606 (uintptr_t)page_addr + proc->user_buffer_offset;
607 ret = vm_insert_page(vma, user_page_addr, page[0]);
608 if (ret) {
609 pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n",
610 proc->pid, user_page_addr);
611 goto err_vm_insert_page_failed;
613 /* vm_insert_page does not seem to increment the refcount */
615 if (mm) {
616 up_write(&mm->mmap_sem);
617 mmput(mm);
619 return 0;
621 free_range:
622 for (page_addr = end - PAGE_SIZE; page_addr >= start;
623 page_addr -= PAGE_SIZE) {
624 page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE];
625 if (vma)
626 zap_page_range(vma, (uintptr_t)page_addr +
627 proc->user_buffer_offset, PAGE_SIZE, NULL);
628 err_vm_insert_page_failed:
629 unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
630 err_map_kernel_failed:
631 __free_page(*page);
632 *page = NULL;
633 err_alloc_page_failed:
636 err_no_vma:
637 if (mm) {
638 up_write(&mm->mmap_sem);
639 mmput(mm);
641 return -ENOMEM;
644 static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc,
645 size_t data_size,
646 size_t offsets_size, int is_async)
648 struct rb_node *n = proc->free_buffers.rb_node;
649 struct binder_buffer *buffer;
650 size_t buffer_size;
651 struct rb_node *best_fit = NULL;
652 void *has_page_addr;
653 void *end_page_addr;
654 size_t size;
656 if (proc->vma == NULL) {
657 pr_err("%d: binder_alloc_buf, no vma\n",
658 proc->pid);
659 return NULL;
662 size = ALIGN(data_size, sizeof(void *)) +
663 ALIGN(offsets_size, sizeof(void *));
665 if (size < data_size || size < offsets_size) {
666 binder_user_error("%d: got transaction with invalid size %zd-%zd\n",
667 proc->pid, data_size, offsets_size);
668 return NULL;
671 if (is_async &&
672 proc->free_async_space < size + sizeof(struct binder_buffer)) {
673 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
674 "%d: binder_alloc_buf size %zd failed, no async space left\n",
675 proc->pid, size);
676 return NULL;
679 while (n) {
680 buffer = rb_entry(n, struct binder_buffer, rb_node);
681 BUG_ON(!buffer->free);
682 buffer_size = binder_buffer_size(proc, buffer);
684 if (size < buffer_size) {
685 best_fit = n;
686 n = n->rb_left;
687 } else if (size > buffer_size)
688 n = n->rb_right;
689 else {
690 best_fit = n;
691 break;
694 if (best_fit == NULL) {
695 pr_err("%d: binder_alloc_buf size %zd failed, no address space\n",
696 proc->pid, size);
697 return NULL;
699 if (n == NULL) {
700 buffer = rb_entry(best_fit, struct binder_buffer, rb_node);
701 buffer_size = binder_buffer_size(proc, buffer);
704 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
705 "%d: binder_alloc_buf size %zd got buffer %p size %zd\n",
706 proc->pid, size, buffer, buffer_size);
708 has_page_addr =
709 (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK);
710 if (n == NULL) {
711 if (size + sizeof(struct binder_buffer) + 4 >= buffer_size)
712 buffer_size = size; /* no room for other buffers */
713 else
714 buffer_size = size + sizeof(struct binder_buffer);
716 end_page_addr =
717 (void *)PAGE_ALIGN((uintptr_t)buffer->data + buffer_size);
718 if (end_page_addr > has_page_addr)
719 end_page_addr = has_page_addr;
720 if (binder_update_page_range(proc, 1,
721 (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr, NULL))
722 return NULL;
724 rb_erase(best_fit, &proc->free_buffers);
725 buffer->free = 0;
726 binder_insert_allocated_buffer(proc, buffer);
727 if (buffer_size != size) {
728 struct binder_buffer *new_buffer = (void *)buffer->data + size;
729 list_add(&new_buffer->entry, &buffer->entry);
730 new_buffer->free = 1;
731 binder_insert_free_buffer(proc, new_buffer);
733 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
734 "%d: binder_alloc_buf size %zd got %p\n",
735 proc->pid, size, buffer);
736 buffer->data_size = data_size;
737 buffer->offsets_size = offsets_size;
738 buffer->async_transaction = is_async;
739 if (is_async) {
740 proc->free_async_space -= size + sizeof(struct binder_buffer);
741 binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
742 "%d: binder_alloc_buf size %zd async free %zd\n",
743 proc->pid, size, proc->free_async_space);
746 return buffer;
749 static void *buffer_start_page(struct binder_buffer *buffer)
751 return (void *)((uintptr_t)buffer & PAGE_MASK);
754 static void *buffer_end_page(struct binder_buffer *buffer)
756 return (void *)(((uintptr_t)(buffer + 1) - 1) & PAGE_MASK);
759 static void binder_delete_free_buffer(struct binder_proc *proc,
760 struct binder_buffer *buffer)
762 struct binder_buffer *prev, *next = NULL;
763 int free_page_end = 1;
764 int free_page_start = 1;
766 BUG_ON(proc->buffers.next == &buffer->entry);
767 prev = list_entry(buffer->entry.prev, struct binder_buffer, entry);
768 BUG_ON(!prev->free);
769 if (buffer_end_page(prev) == buffer_start_page(buffer)) {
770 free_page_start = 0;
771 if (buffer_end_page(prev) == buffer_end_page(buffer))
772 free_page_end = 0;
773 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
774 "%d: merge free, buffer %p share page with %p\n",
775 proc->pid, buffer, prev);
778 if (!list_is_last(&buffer->entry, &proc->buffers)) {
779 next = list_entry(buffer->entry.next,
780 struct binder_buffer, entry);
781 if (buffer_start_page(next) == buffer_end_page(buffer)) {
782 free_page_end = 0;
783 if (buffer_start_page(next) ==
784 buffer_start_page(buffer))
785 free_page_start = 0;
786 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
787 "%d: merge free, buffer %p share page with %p\n",
788 proc->pid, buffer, prev);
791 list_del(&buffer->entry);
792 if (free_page_start || free_page_end) {
793 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
794 "%d: merge free, buffer %p do not share page%s%s with %p or %p\n",
795 proc->pid, buffer, free_page_start ? "" : " end",
796 free_page_end ? "" : " start", prev, next);
797 binder_update_page_range(proc, 0, free_page_start ?
798 buffer_start_page(buffer) : buffer_end_page(buffer),
799 (free_page_end ? buffer_end_page(buffer) :
800 buffer_start_page(buffer)) + PAGE_SIZE, NULL);
804 static void binder_free_buf(struct binder_proc *proc,
805 struct binder_buffer *buffer)
807 size_t size, buffer_size;
809 buffer_size = binder_buffer_size(proc, buffer);
811 size = ALIGN(buffer->data_size, sizeof(void *)) +
812 ALIGN(buffer->offsets_size, sizeof(void *));
814 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
815 "%d: binder_free_buf %p size %zd buffer_size %zd\n",
816 proc->pid, buffer, size, buffer_size);
818 BUG_ON(buffer->free);
819 BUG_ON(size > buffer_size);
820 BUG_ON(buffer->transaction != NULL);
821 BUG_ON((void *)buffer < proc->buffer);
822 BUG_ON((void *)buffer > proc->buffer + proc->buffer_size);
824 if (buffer->async_transaction) {
825 proc->free_async_space += size + sizeof(struct binder_buffer);
827 binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
828 "%d: binder_free_buf size %zd async free %zd\n",
829 proc->pid, size, proc->free_async_space);
832 binder_update_page_range(proc, 0,
833 (void *)PAGE_ALIGN((uintptr_t)buffer->data),
834 (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK),
835 NULL);
836 rb_erase(&buffer->rb_node, &proc->allocated_buffers);
837 buffer->free = 1;
838 if (!list_is_last(&buffer->entry, &proc->buffers)) {
839 struct binder_buffer *next = list_entry(buffer->entry.next,
840 struct binder_buffer, entry);
841 if (next->free) {
842 rb_erase(&next->rb_node, &proc->free_buffers);
843 binder_delete_free_buffer(proc, next);
846 if (proc->buffers.next != &buffer->entry) {
847 struct binder_buffer *prev = list_entry(buffer->entry.prev,
848 struct binder_buffer, entry);
849 if (prev->free) {
850 binder_delete_free_buffer(proc, buffer);
851 rb_erase(&prev->rb_node, &proc->free_buffers);
852 buffer = prev;
855 binder_insert_free_buffer(proc, buffer);
858 static struct binder_node *binder_get_node(struct binder_proc *proc,
859 binder_uintptr_t ptr)
861 struct rb_node *n = proc->nodes.rb_node;
862 struct binder_node *node;
864 while (n) {
865 node = rb_entry(n, struct binder_node, rb_node);
867 if (ptr < node->ptr)
868 n = n->rb_left;
869 else if (ptr > node->ptr)
870 n = n->rb_right;
871 else
872 return node;
874 return NULL;
877 static struct binder_node *binder_new_node(struct binder_proc *proc,
878 binder_uintptr_t ptr,
879 binder_uintptr_t cookie)
881 struct rb_node **p = &proc->nodes.rb_node;
882 struct rb_node *parent = NULL;
883 struct binder_node *node;
885 while (*p) {
886 parent = *p;
887 node = rb_entry(parent, struct binder_node, rb_node);
889 if (ptr < node->ptr)
890 p = &(*p)->rb_left;
891 else if (ptr > node->ptr)
892 p = &(*p)->rb_right;
893 else
894 return NULL;
897 node = kzalloc(sizeof(*node), GFP_KERNEL);
898 if (node == NULL)
899 return NULL;
900 binder_stats_created(BINDER_STAT_NODE);
901 rb_link_node(&node->rb_node, parent, p);
902 rb_insert_color(&node->rb_node, &proc->nodes);
903 node->debug_id = ++binder_last_id;
904 node->proc = proc;
905 node->ptr = ptr;
906 node->cookie = cookie;
907 node->work.type = BINDER_WORK_NODE;
908 INIT_LIST_HEAD(&node->work.entry);
909 INIT_LIST_HEAD(&node->async_todo);
910 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
911 "%d:%d node %d u%016llx c%016llx created\n",
912 proc->pid, current->pid, node->debug_id,
913 (u64)node->ptr, (u64)node->cookie);
914 return node;
917 static int binder_inc_node(struct binder_node *node, int strong, int internal,
918 struct list_head *target_list)
920 if (strong) {
921 if (internal) {
922 if (target_list == NULL &&
923 node->internal_strong_refs == 0 &&
924 !(node == binder_context_mgr_node &&
925 node->has_strong_ref)) {
926 pr_err("invalid inc strong node for %d\n",
927 node->debug_id);
928 return -EINVAL;
930 node->internal_strong_refs++;
931 } else
932 node->local_strong_refs++;
933 if (!node->has_strong_ref && target_list) {
934 list_del_init(&node->work.entry);
935 list_add_tail(&node->work.entry, target_list);
937 } else {
938 if (!internal)
939 node->local_weak_refs++;
940 if (!node->has_weak_ref && list_empty(&node->work.entry)) {
941 if (target_list == NULL) {
942 pr_err("invalid inc weak node for %d\n",
943 node->debug_id);
944 return -EINVAL;
946 list_add_tail(&node->work.entry, target_list);
949 return 0;
952 static int binder_dec_node(struct binder_node *node, int strong, int internal)
954 if (strong) {
955 if (internal)
956 node->internal_strong_refs--;
957 else
958 node->local_strong_refs--;
959 if (node->local_strong_refs || node->internal_strong_refs)
960 return 0;
961 } else {
962 if (!internal)
963 node->local_weak_refs--;
964 if (node->local_weak_refs || !hlist_empty(&node->refs))
965 return 0;
967 if (node->proc && (node->has_strong_ref || node->has_weak_ref)) {
968 if (list_empty(&node->work.entry)) {
969 list_add_tail(&node->work.entry, &node->proc->todo);
970 wake_up_interruptible(&node->proc->wait);
972 } else {
973 if (hlist_empty(&node->refs) && !node->local_strong_refs &&
974 !node->local_weak_refs) {
975 list_del_init(&node->work.entry);
976 if (node->proc) {
977 rb_erase(&node->rb_node, &node->proc->nodes);
978 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
979 "refless node %d deleted\n",
980 node->debug_id);
981 } else {
982 hlist_del(&node->dead_node);
983 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
984 "dead node %d deleted\n",
985 node->debug_id);
987 kfree(node);
988 binder_stats_deleted(BINDER_STAT_NODE);
992 return 0;
996 static struct binder_ref *binder_get_ref(struct binder_proc *proc,
997 uint32_t desc)
999 struct rb_node *n = proc->refs_by_desc.rb_node;
1000 struct binder_ref *ref;
1002 while (n) {
1003 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1005 if (desc < ref->desc)
1006 n = n->rb_left;
1007 else if (desc > ref->desc)
1008 n = n->rb_right;
1009 else
1010 return ref;
1012 return NULL;
1015 static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc,
1016 struct binder_node *node)
1018 struct rb_node *n;
1019 struct rb_node **p = &proc->refs_by_node.rb_node;
1020 struct rb_node *parent = NULL;
1021 struct binder_ref *ref, *new_ref;
1023 while (*p) {
1024 parent = *p;
1025 ref = rb_entry(parent, struct binder_ref, rb_node_node);
1027 if (node < ref->node)
1028 p = &(*p)->rb_left;
1029 else if (node > ref->node)
1030 p = &(*p)->rb_right;
1031 else
1032 return ref;
1034 new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1035 if (new_ref == NULL)
1036 return NULL;
1037 binder_stats_created(BINDER_STAT_REF);
1038 new_ref->debug_id = ++binder_last_id;
1039 new_ref->proc = proc;
1040 new_ref->node = node;
1041 rb_link_node(&new_ref->rb_node_node, parent, p);
1042 rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1044 new_ref->desc = (node == binder_context_mgr_node) ? 0 : 1;
1045 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
1046 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1047 if (ref->desc > new_ref->desc)
1048 break;
1049 new_ref->desc = ref->desc + 1;
1052 p = &proc->refs_by_desc.rb_node;
1053 while (*p) {
1054 parent = *p;
1055 ref = rb_entry(parent, struct binder_ref, rb_node_desc);
1057 if (new_ref->desc < ref->desc)
1058 p = &(*p)->rb_left;
1059 else if (new_ref->desc > ref->desc)
1060 p = &(*p)->rb_right;
1061 else
1062 BUG();
1064 rb_link_node(&new_ref->rb_node_desc, parent, p);
1065 rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
1066 if (node) {
1067 hlist_add_head(&new_ref->node_entry, &node->refs);
1069 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1070 "%d new ref %d desc %d for node %d\n",
1071 proc->pid, new_ref->debug_id, new_ref->desc,
1072 node->debug_id);
1073 } else {
1074 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1075 "%d new ref %d desc %d for dead node\n",
1076 proc->pid, new_ref->debug_id, new_ref->desc);
1078 return new_ref;
1081 static void binder_delete_ref(struct binder_ref *ref)
1083 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1084 "%d delete ref %d desc %d for node %d\n",
1085 ref->proc->pid, ref->debug_id, ref->desc,
1086 ref->node->debug_id);
1088 rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1089 rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
1090 if (ref->strong)
1091 binder_dec_node(ref->node, 1, 1);
1092 hlist_del(&ref->node_entry);
1093 binder_dec_node(ref->node, 0, 1);
1094 if (ref->death) {
1095 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1096 "%d delete ref %d desc %d has death notification\n",
1097 ref->proc->pid, ref->debug_id, ref->desc);
1098 list_del(&ref->death->work.entry);
1099 kfree(ref->death);
1100 binder_stats_deleted(BINDER_STAT_DEATH);
1102 kfree(ref);
1103 binder_stats_deleted(BINDER_STAT_REF);
1106 static int binder_inc_ref(struct binder_ref *ref, int strong,
1107 struct list_head *target_list)
1109 int ret;
1110 if (strong) {
1111 if (ref->strong == 0) {
1112 ret = binder_inc_node(ref->node, 1, 1, target_list);
1113 if (ret)
1114 return ret;
1116 ref->strong++;
1117 } else {
1118 if (ref->weak == 0) {
1119 ret = binder_inc_node(ref->node, 0, 1, target_list);
1120 if (ret)
1121 return ret;
1123 ref->weak++;
1125 return 0;
1129 static int binder_dec_ref(struct binder_ref *ref, int strong)
1131 if (strong) {
1132 if (ref->strong == 0) {
1133 binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
1134 ref->proc->pid, ref->debug_id,
1135 ref->desc, ref->strong, ref->weak);
1136 return -EINVAL;
1138 ref->strong--;
1139 if (ref->strong == 0) {
1140 int ret;
1141 ret = binder_dec_node(ref->node, strong, 1);
1142 if (ret)
1143 return ret;
1145 } else {
1146 if (ref->weak == 0) {
1147 binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
1148 ref->proc->pid, ref->debug_id,
1149 ref->desc, ref->strong, ref->weak);
1150 return -EINVAL;
1152 ref->weak--;
1154 if (ref->strong == 0 && ref->weak == 0)
1155 binder_delete_ref(ref);
1156 return 0;
1159 static void binder_pop_transaction(struct binder_thread *target_thread,
1160 struct binder_transaction *t)
1162 if (target_thread) {
1163 BUG_ON(target_thread->transaction_stack != t);
1164 BUG_ON(target_thread->transaction_stack->from != target_thread);
1165 target_thread->transaction_stack =
1166 target_thread->transaction_stack->from_parent;
1167 t->from = NULL;
1169 t->need_reply = 0;
1170 if (t->buffer)
1171 t->buffer->transaction = NULL;
1172 kfree(t);
1173 binder_stats_deleted(BINDER_STAT_TRANSACTION);
1176 static void binder_send_failed_reply(struct binder_transaction *t,
1177 uint32_t error_code)
1179 struct binder_thread *target_thread;
1180 BUG_ON(t->flags & TF_ONE_WAY);
1181 while (1) {
1182 target_thread = t->from;
1183 if (target_thread) {
1184 if (target_thread->return_error != BR_OK &&
1185 target_thread->return_error2 == BR_OK) {
1186 target_thread->return_error2 =
1187 target_thread->return_error;
1188 target_thread->return_error = BR_OK;
1190 if (target_thread->return_error == BR_OK) {
1191 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1192 "send failed reply for transaction %d to %d:%d\n",
1193 t->debug_id, target_thread->proc->pid,
1194 target_thread->pid);
1196 binder_pop_transaction(target_thread, t);
1197 target_thread->return_error = error_code;
1198 wake_up_interruptible(&target_thread->wait);
1199 } else {
1200 pr_err("reply failed, target thread, %d:%d, has error code %d already\n",
1201 target_thread->proc->pid,
1202 target_thread->pid,
1203 target_thread->return_error);
1205 return;
1206 } else {
1207 struct binder_transaction *next = t->from_parent;
1209 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1210 "send failed reply for transaction %d, target dead\n",
1211 t->debug_id);
1213 binder_pop_transaction(target_thread, t);
1214 if (next == NULL) {
1215 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1216 "reply failed, no target thread at root\n");
1217 return;
1219 t = next;
1220 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1221 "reply failed, no target thread -- retry %d\n",
1222 t->debug_id);
1227 static void binder_transaction_buffer_release(struct binder_proc *proc,
1228 struct binder_buffer *buffer,
1229 binder_size_t *failed_at)
1231 binder_size_t *offp, *off_end;
1232 int debug_id = buffer->debug_id;
1234 binder_debug(BINDER_DEBUG_TRANSACTION,
1235 "%d buffer release %d, size %zd-%zd, failed at %p\n",
1236 proc->pid, buffer->debug_id,
1237 buffer->data_size, buffer->offsets_size, failed_at);
1239 if (buffer->target_node)
1240 binder_dec_node(buffer->target_node, 1, 0);
1242 offp = (binder_size_t *)(buffer->data +
1243 ALIGN(buffer->data_size, sizeof(void *)));
1244 if (failed_at)
1245 off_end = failed_at;
1246 else
1247 off_end = (void *)offp + buffer->offsets_size;
1248 for (; offp < off_end; offp++) {
1249 struct flat_binder_object *fp;
1250 if (*offp > buffer->data_size - sizeof(*fp) ||
1251 buffer->data_size < sizeof(*fp) ||
1252 !IS_ALIGNED(*offp, sizeof(u32))) {
1253 pr_err("transaction release %d bad offset %lld, size %zd\n",
1254 debug_id, (u64)*offp, buffer->data_size);
1255 continue;
1257 fp = (struct flat_binder_object *)(buffer->data + *offp);
1258 switch (fp->type) {
1259 case BINDER_TYPE_BINDER:
1260 case BINDER_TYPE_WEAK_BINDER: {
1261 struct binder_node *node = binder_get_node(proc, fp->binder);
1262 if (node == NULL) {
1263 pr_err("transaction release %d bad node %016llx\n",
1264 debug_id, (u64)fp->binder);
1265 break;
1267 binder_debug(BINDER_DEBUG_TRANSACTION,
1268 " node %d u%016llx\n",
1269 node->debug_id, (u64)node->ptr);
1270 binder_dec_node(node, fp->type == BINDER_TYPE_BINDER, 0);
1271 } break;
1272 case BINDER_TYPE_HANDLE:
1273 case BINDER_TYPE_WEAK_HANDLE: {
1274 struct binder_ref *ref = binder_get_ref(proc, fp->handle);
1275 if (ref == NULL) {
1276 pr_err("transaction release %d bad handle %d\n",
1277 debug_id, fp->handle);
1278 break;
1280 binder_debug(BINDER_DEBUG_TRANSACTION,
1281 " ref %d desc %d (node %d)\n",
1282 ref->debug_id, ref->desc, ref->node->debug_id);
1283 binder_dec_ref(ref, fp->type == BINDER_TYPE_HANDLE);
1284 } break;
1286 case BINDER_TYPE_FD:
1287 binder_debug(BINDER_DEBUG_TRANSACTION,
1288 " fd %d\n", fp->handle);
1289 if (failed_at)
1290 task_close_fd(proc, fp->handle);
1291 break;
1293 default:
1294 pr_err("transaction release %d bad object type %x\n",
1295 debug_id, fp->type);
1296 break;
1301 static void binder_transaction(struct binder_proc *proc,
1302 struct binder_thread *thread,
1303 struct binder_transaction_data *tr, int reply)
1305 struct binder_transaction *t;
1306 struct binder_work *tcomplete;
1307 binder_size_t *offp, *off_end;
1308 struct binder_proc *target_proc;
1309 struct binder_thread *target_thread = NULL;
1310 struct binder_node *target_node = NULL;
1311 struct list_head *target_list;
1312 wait_queue_head_t *target_wait;
1313 struct binder_transaction *in_reply_to = NULL;
1314 struct binder_transaction_log_entry *e;
1315 uint32_t return_error;
1317 e = binder_transaction_log_add(&binder_transaction_log);
1318 e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
1319 e->from_proc = proc->pid;
1320 e->from_thread = thread->pid;
1321 e->target_handle = tr->target.handle;
1322 e->data_size = tr->data_size;
1323 e->offsets_size = tr->offsets_size;
1325 if (reply) {
1326 in_reply_to = thread->transaction_stack;
1327 if (in_reply_to == NULL) {
1328 binder_user_error("%d:%d got reply transaction with no transaction stack\n",
1329 proc->pid, thread->pid);
1330 return_error = BR_FAILED_REPLY;
1331 goto err_empty_call_stack;
1333 binder_set_nice(in_reply_to->saved_priority);
1334 if (in_reply_to->to_thread != thread) {
1335 binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
1336 proc->pid, thread->pid, in_reply_to->debug_id,
1337 in_reply_to->to_proc ?
1338 in_reply_to->to_proc->pid : 0,
1339 in_reply_to->to_thread ?
1340 in_reply_to->to_thread->pid : 0);
1341 return_error = BR_FAILED_REPLY;
1342 in_reply_to = NULL;
1343 goto err_bad_call_stack;
1345 thread->transaction_stack = in_reply_to->to_parent;
1346 target_thread = in_reply_to->from;
1347 if (target_thread == NULL) {
1348 return_error = BR_DEAD_REPLY;
1349 goto err_dead_binder;
1351 if (target_thread->transaction_stack != in_reply_to) {
1352 binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
1353 proc->pid, thread->pid,
1354 target_thread->transaction_stack ?
1355 target_thread->transaction_stack->debug_id : 0,
1356 in_reply_to->debug_id);
1357 return_error = BR_FAILED_REPLY;
1358 in_reply_to = NULL;
1359 target_thread = NULL;
1360 goto err_dead_binder;
1362 target_proc = target_thread->proc;
1363 } else {
1364 if (tr->target.handle) {
1365 struct binder_ref *ref;
1366 ref = binder_get_ref(proc, tr->target.handle);
1367 if (ref == NULL) {
1368 binder_user_error("%d:%d got transaction to invalid handle\n",
1369 proc->pid, thread->pid);
1370 return_error = BR_FAILED_REPLY;
1371 goto err_invalid_target_handle;
1373 target_node = ref->node;
1374 } else {
1375 target_node = binder_context_mgr_node;
1376 if (target_node == NULL) {
1377 return_error = BR_DEAD_REPLY;
1378 goto err_no_context_mgr_node;
1381 e->to_node = target_node->debug_id;
1382 target_proc = target_node->proc;
1383 if (target_proc == NULL) {
1384 return_error = BR_DEAD_REPLY;
1385 goto err_dead_binder;
1387 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
1388 struct binder_transaction *tmp;
1389 tmp = thread->transaction_stack;
1390 if (tmp->to_thread != thread) {
1391 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
1392 proc->pid, thread->pid, tmp->debug_id,
1393 tmp->to_proc ? tmp->to_proc->pid : 0,
1394 tmp->to_thread ?
1395 tmp->to_thread->pid : 0);
1396 return_error = BR_FAILED_REPLY;
1397 goto err_bad_call_stack;
1399 while (tmp) {
1400 if (tmp->from && tmp->from->proc == target_proc)
1401 target_thread = tmp->from;
1402 tmp = tmp->from_parent;
1406 if (target_thread) {
1407 e->to_thread = target_thread->pid;
1408 target_list = &target_thread->todo;
1409 target_wait = &target_thread->wait;
1410 } else {
1411 target_list = &target_proc->todo;
1412 target_wait = &target_proc->wait;
1414 e->to_proc = target_proc->pid;
1416 /* TODO: reuse incoming transaction for reply */
1417 t = kzalloc(sizeof(*t), GFP_KERNEL);
1418 if (t == NULL) {
1419 return_error = BR_FAILED_REPLY;
1420 goto err_alloc_t_failed;
1422 binder_stats_created(BINDER_STAT_TRANSACTION);
1424 tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
1425 if (tcomplete == NULL) {
1426 return_error = BR_FAILED_REPLY;
1427 goto err_alloc_tcomplete_failed;
1429 binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
1431 t->debug_id = ++binder_last_id;
1432 e->debug_id = t->debug_id;
1434 if (reply)
1435 binder_debug(BINDER_DEBUG_TRANSACTION,
1436 "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld\n",
1437 proc->pid, thread->pid, t->debug_id,
1438 target_proc->pid, target_thread->pid,
1439 (u64)tr->data.ptr.buffer,
1440 (u64)tr->data.ptr.offsets,
1441 (u64)tr->data_size, (u64)tr->offsets_size);
1442 else
1443 binder_debug(BINDER_DEBUG_TRANSACTION,
1444 "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld\n",
1445 proc->pid, thread->pid, t->debug_id,
1446 target_proc->pid, target_node->debug_id,
1447 (u64)tr->data.ptr.buffer,
1448 (u64)tr->data.ptr.offsets,
1449 (u64)tr->data_size, (u64)tr->offsets_size);
1451 if (!reply && !(tr->flags & TF_ONE_WAY))
1452 t->from = thread;
1453 else
1454 t->from = NULL;
1455 t->sender_euid = proc->tsk->cred->euid;
1456 t->to_proc = target_proc;
1457 t->to_thread = target_thread;
1458 t->code = tr->code;
1459 t->flags = tr->flags;
1460 t->priority = task_nice(current);
1462 trace_binder_transaction(reply, t, target_node);
1464 t->buffer = binder_alloc_buf(target_proc, tr->data_size,
1465 tr->offsets_size, !reply && (t->flags & TF_ONE_WAY));
1466 if (t->buffer == NULL) {
1467 return_error = BR_FAILED_REPLY;
1468 goto err_binder_alloc_buf_failed;
1470 t->buffer->allow_user_free = 0;
1471 t->buffer->debug_id = t->debug_id;
1472 t->buffer->transaction = t;
1473 t->buffer->target_node = target_node;
1474 trace_binder_transaction_alloc_buf(t->buffer);
1475 if (target_node)
1476 binder_inc_node(target_node, 1, 0, NULL);
1478 offp = (binder_size_t *)(t->buffer->data +
1479 ALIGN(tr->data_size, sizeof(void *)));
1481 if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)
1482 tr->data.ptr.buffer, tr->data_size)) {
1483 binder_user_error("%d:%d got transaction with invalid data ptr\n",
1484 proc->pid, thread->pid);
1485 return_error = BR_FAILED_REPLY;
1486 goto err_copy_data_failed;
1488 if (copy_from_user(offp, (const void __user *)(uintptr_t)
1489 tr->data.ptr.offsets, tr->offsets_size)) {
1490 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
1491 proc->pid, thread->pid);
1492 return_error = BR_FAILED_REPLY;
1493 goto err_copy_data_failed;
1495 if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
1496 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
1497 proc->pid, thread->pid, (u64)tr->offsets_size);
1498 return_error = BR_FAILED_REPLY;
1499 goto err_bad_offset;
1501 off_end = (void *)offp + tr->offsets_size;
1502 for (; offp < off_end; offp++) {
1503 struct flat_binder_object *fp;
1504 if (*offp > t->buffer->data_size - sizeof(*fp) ||
1505 t->buffer->data_size < sizeof(*fp) ||
1506 !IS_ALIGNED(*offp, sizeof(u32))) {
1507 binder_user_error("%d:%d got transaction with invalid offset, %lld\n",
1508 proc->pid, thread->pid, (u64)*offp);
1509 return_error = BR_FAILED_REPLY;
1510 goto err_bad_offset;
1512 fp = (struct flat_binder_object *)(t->buffer->data + *offp);
1513 switch (fp->type) {
1514 case BINDER_TYPE_BINDER:
1515 case BINDER_TYPE_WEAK_BINDER: {
1516 struct binder_ref *ref;
1517 struct binder_node *node = binder_get_node(proc, fp->binder);
1518 if (node == NULL) {
1519 node = binder_new_node(proc, fp->binder, fp->cookie);
1520 if (node == NULL) {
1521 return_error = BR_FAILED_REPLY;
1522 goto err_binder_new_node_failed;
1524 node->min_priority = fp->flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
1525 node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
1527 if (fp->cookie != node->cookie) {
1528 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
1529 proc->pid, thread->pid,
1530 (u64)fp->binder, node->debug_id,
1531 (u64)fp->cookie, (u64)node->cookie);
1532 goto err_binder_get_ref_for_node_failed;
1534 ref = binder_get_ref_for_node(target_proc, node);
1535 if (ref == NULL) {
1536 return_error = BR_FAILED_REPLY;
1537 goto err_binder_get_ref_for_node_failed;
1539 if (fp->type == BINDER_TYPE_BINDER)
1540 fp->type = BINDER_TYPE_HANDLE;
1541 else
1542 fp->type = BINDER_TYPE_WEAK_HANDLE;
1543 fp->handle = ref->desc;
1544 binder_inc_ref(ref, fp->type == BINDER_TYPE_HANDLE,
1545 &thread->todo);
1547 trace_binder_transaction_node_to_ref(t, node, ref);
1548 binder_debug(BINDER_DEBUG_TRANSACTION,
1549 " node %d u%016llx -> ref %d desc %d\n",
1550 node->debug_id, (u64)node->ptr,
1551 ref->debug_id, ref->desc);
1552 } break;
1553 case BINDER_TYPE_HANDLE:
1554 case BINDER_TYPE_WEAK_HANDLE: {
1555 struct binder_ref *ref = binder_get_ref(proc, fp->handle);
1556 if (ref == NULL) {
1557 binder_user_error("%d:%d got transaction with invalid handle, %d\n",
1558 proc->pid,
1559 thread->pid, fp->handle);
1560 return_error = BR_FAILED_REPLY;
1561 goto err_binder_get_ref_failed;
1563 if (ref->node->proc == target_proc) {
1564 if (fp->type == BINDER_TYPE_HANDLE)
1565 fp->type = BINDER_TYPE_BINDER;
1566 else
1567 fp->type = BINDER_TYPE_WEAK_BINDER;
1568 fp->binder = ref->node->ptr;
1569 fp->cookie = ref->node->cookie;
1570 binder_inc_node(ref->node, fp->type == BINDER_TYPE_BINDER, 0, NULL);
1571 trace_binder_transaction_ref_to_node(t, ref);
1572 binder_debug(BINDER_DEBUG_TRANSACTION,
1573 " ref %d desc %d -> node %d u%016llx\n",
1574 ref->debug_id, ref->desc, ref->node->debug_id,
1575 (u64)ref->node->ptr);
1576 } else {
1577 struct binder_ref *new_ref;
1578 new_ref = binder_get_ref_for_node(target_proc, ref->node);
1579 if (new_ref == NULL) {
1580 return_error = BR_FAILED_REPLY;
1581 goto err_binder_get_ref_for_node_failed;
1583 fp->handle = new_ref->desc;
1584 binder_inc_ref(new_ref, fp->type == BINDER_TYPE_HANDLE, NULL);
1585 trace_binder_transaction_ref_to_ref(t, ref,
1586 new_ref);
1587 binder_debug(BINDER_DEBUG_TRANSACTION,
1588 " ref %d desc %d -> ref %d desc %d (node %d)\n",
1589 ref->debug_id, ref->desc, new_ref->debug_id,
1590 new_ref->desc, ref->node->debug_id);
1592 } break;
1594 case BINDER_TYPE_FD: {
1595 int target_fd;
1596 struct file *file;
1598 if (reply) {
1599 if (!(in_reply_to->flags & TF_ACCEPT_FDS)) {
1600 binder_user_error("%d:%d got reply with fd, %d, but target does not allow fds\n",
1601 proc->pid, thread->pid, fp->handle);
1602 return_error = BR_FAILED_REPLY;
1603 goto err_fd_not_allowed;
1605 } else if (!target_node->accept_fds) {
1606 binder_user_error("%d:%d got transaction with fd, %d, but target does not allow fds\n",
1607 proc->pid, thread->pid, fp->handle);
1608 return_error = BR_FAILED_REPLY;
1609 goto err_fd_not_allowed;
1612 file = fget(fp->handle);
1613 if (file == NULL) {
1614 binder_user_error("%d:%d got transaction with invalid fd, %d\n",
1615 proc->pid, thread->pid, fp->handle);
1616 return_error = BR_FAILED_REPLY;
1617 goto err_fget_failed;
1619 target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC);
1620 if (target_fd < 0) {
1621 fput(file);
1622 return_error = BR_FAILED_REPLY;
1623 goto err_get_unused_fd_failed;
1625 task_fd_install(target_proc, target_fd, file);
1626 trace_binder_transaction_fd(t, fp->handle, target_fd);
1627 binder_debug(BINDER_DEBUG_TRANSACTION,
1628 " fd %d -> %d\n", fp->handle, target_fd);
1629 /* TODO: fput? */
1630 fp->handle = target_fd;
1631 } break;
1633 default:
1634 binder_user_error("%d:%d got transaction with invalid object type, %x\n",
1635 proc->pid, thread->pid, fp->type);
1636 return_error = BR_FAILED_REPLY;
1637 goto err_bad_object_type;
1640 if (reply) {
1641 BUG_ON(t->buffer->async_transaction != 0);
1642 binder_pop_transaction(target_thread, in_reply_to);
1643 } else if (!(t->flags & TF_ONE_WAY)) {
1644 BUG_ON(t->buffer->async_transaction != 0);
1645 t->need_reply = 1;
1646 t->from_parent = thread->transaction_stack;
1647 thread->transaction_stack = t;
1648 } else {
1649 BUG_ON(target_node == NULL);
1650 BUG_ON(t->buffer->async_transaction != 1);
1651 if (target_node->has_async_transaction) {
1652 target_list = &target_node->async_todo;
1653 target_wait = NULL;
1654 } else
1655 target_node->has_async_transaction = 1;
1657 t->work.type = BINDER_WORK_TRANSACTION;
1658 list_add_tail(&t->work.entry, target_list);
1659 tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
1660 list_add_tail(&tcomplete->entry, &thread->todo);
1661 if (target_wait)
1662 wake_up_interruptible(target_wait);
1663 return;
1665 err_get_unused_fd_failed:
1666 err_fget_failed:
1667 err_fd_not_allowed:
1668 err_binder_get_ref_for_node_failed:
1669 err_binder_get_ref_failed:
1670 err_binder_new_node_failed:
1671 err_bad_object_type:
1672 err_bad_offset:
1673 err_copy_data_failed:
1674 trace_binder_transaction_failed_buffer_release(t->buffer);
1675 binder_transaction_buffer_release(target_proc, t->buffer, offp);
1676 t->buffer->transaction = NULL;
1677 binder_free_buf(target_proc, t->buffer);
1678 err_binder_alloc_buf_failed:
1679 kfree(tcomplete);
1680 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
1681 err_alloc_tcomplete_failed:
1682 kfree(t);
1683 binder_stats_deleted(BINDER_STAT_TRANSACTION);
1684 err_alloc_t_failed:
1685 err_bad_call_stack:
1686 err_empty_call_stack:
1687 err_dead_binder:
1688 err_invalid_target_handle:
1689 err_no_context_mgr_node:
1690 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1691 "%d:%d transaction failed %d, size %lld-%lld\n",
1692 proc->pid, thread->pid, return_error,
1693 (u64)tr->data_size, (u64)tr->offsets_size);
1696 struct binder_transaction_log_entry *fe;
1697 fe = binder_transaction_log_add(&binder_transaction_log_failed);
1698 *fe = *e;
1701 BUG_ON(thread->return_error != BR_OK);
1702 if (in_reply_to) {
1703 thread->return_error = BR_TRANSACTION_COMPLETE;
1704 binder_send_failed_reply(in_reply_to, return_error);
1705 } else
1706 thread->return_error = return_error;
1709 static int binder_thread_write(struct binder_proc *proc,
1710 struct binder_thread *thread,
1711 binder_uintptr_t binder_buffer, size_t size,
1712 binder_size_t *consumed)
1714 uint32_t cmd;
1715 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
1716 void __user *ptr = buffer + *consumed;
1717 void __user *end = buffer + size;
1719 while (ptr < end && thread->return_error == BR_OK) {
1720 if (get_user(cmd, (uint32_t __user *)ptr))
1721 return -EFAULT;
1722 ptr += sizeof(uint32_t);
1723 trace_binder_command(cmd);
1724 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
1725 binder_stats.bc[_IOC_NR(cmd)]++;
1726 proc->stats.bc[_IOC_NR(cmd)]++;
1727 thread->stats.bc[_IOC_NR(cmd)]++;
1729 switch (cmd) {
1730 case BC_INCREFS:
1731 case BC_ACQUIRE:
1732 case BC_RELEASE:
1733 case BC_DECREFS: {
1734 uint32_t target;
1735 struct binder_ref *ref;
1736 const char *debug_string;
1738 if (get_user(target, (uint32_t __user *)ptr))
1739 return -EFAULT;
1740 ptr += sizeof(uint32_t);
1741 if (target == 0 && binder_context_mgr_node &&
1742 (cmd == BC_INCREFS || cmd == BC_ACQUIRE)) {
1743 ref = binder_get_ref_for_node(proc,
1744 binder_context_mgr_node);
1745 if (ref->desc != target) {
1746 binder_user_error("%d:%d tried to acquire reference to desc 0, got %d instead\n",
1747 proc->pid, thread->pid,
1748 ref->desc);
1750 } else
1751 ref = binder_get_ref(proc, target);
1752 if (ref == NULL) {
1753 binder_user_error("%d:%d refcount change on invalid ref %d\n",
1754 proc->pid, thread->pid, target);
1755 break;
1757 switch (cmd) {
1758 case BC_INCREFS:
1759 debug_string = "IncRefs";
1760 binder_inc_ref(ref, 0, NULL);
1761 break;
1762 case BC_ACQUIRE:
1763 debug_string = "Acquire";
1764 binder_inc_ref(ref, 1, NULL);
1765 break;
1766 case BC_RELEASE:
1767 debug_string = "Release";
1768 binder_dec_ref(ref, 1);
1769 break;
1770 case BC_DECREFS:
1771 default:
1772 debug_string = "DecRefs";
1773 binder_dec_ref(ref, 0);
1774 break;
1776 binder_debug(BINDER_DEBUG_USER_REFS,
1777 "%d:%d %s ref %d desc %d s %d w %d for node %d\n",
1778 proc->pid, thread->pid, debug_string, ref->debug_id,
1779 ref->desc, ref->strong, ref->weak, ref->node->debug_id);
1780 break;
1782 case BC_INCREFS_DONE:
1783 case BC_ACQUIRE_DONE: {
1784 binder_uintptr_t node_ptr;
1785 binder_uintptr_t cookie;
1786 struct binder_node *node;
1788 if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
1789 return -EFAULT;
1790 ptr += sizeof(binder_uintptr_t);
1791 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
1792 return -EFAULT;
1793 ptr += sizeof(binder_uintptr_t);
1794 node = binder_get_node(proc, node_ptr);
1795 if (node == NULL) {
1796 binder_user_error("%d:%d %s u%016llx no match\n",
1797 proc->pid, thread->pid,
1798 cmd == BC_INCREFS_DONE ?
1799 "BC_INCREFS_DONE" :
1800 "BC_ACQUIRE_DONE",
1801 (u64)node_ptr);
1802 break;
1804 if (cookie != node->cookie) {
1805 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
1806 proc->pid, thread->pid,
1807 cmd == BC_INCREFS_DONE ?
1808 "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
1809 (u64)node_ptr, node->debug_id,
1810 (u64)cookie, (u64)node->cookie);
1811 break;
1813 if (cmd == BC_ACQUIRE_DONE) {
1814 if (node->pending_strong_ref == 0) {
1815 binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
1816 proc->pid, thread->pid,
1817 node->debug_id);
1818 break;
1820 node->pending_strong_ref = 0;
1821 } else {
1822 if (node->pending_weak_ref == 0) {
1823 binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
1824 proc->pid, thread->pid,
1825 node->debug_id);
1826 break;
1828 node->pending_weak_ref = 0;
1830 binder_dec_node(node, cmd == BC_ACQUIRE_DONE, 0);
1831 binder_debug(BINDER_DEBUG_USER_REFS,
1832 "%d:%d %s node %d ls %d lw %d\n",
1833 proc->pid, thread->pid,
1834 cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
1835 node->debug_id, node->local_strong_refs, node->local_weak_refs);
1836 break;
1838 case BC_ATTEMPT_ACQUIRE:
1839 pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
1840 return -EINVAL;
1841 case BC_ACQUIRE_RESULT:
1842 pr_err("BC_ACQUIRE_RESULT not supported\n");
1843 return -EINVAL;
1845 case BC_FREE_BUFFER: {
1846 binder_uintptr_t data_ptr;
1847 struct binder_buffer *buffer;
1849 if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
1850 return -EFAULT;
1851 ptr += sizeof(binder_uintptr_t);
1853 buffer = binder_buffer_lookup(proc, data_ptr);
1854 if (buffer == NULL) {
1855 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx no match\n",
1856 proc->pid, thread->pid, (u64)data_ptr);
1857 break;
1859 if (!buffer->allow_user_free) {
1860 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx matched unreturned buffer\n",
1861 proc->pid, thread->pid, (u64)data_ptr);
1862 break;
1864 binder_debug(BINDER_DEBUG_FREE_BUFFER,
1865 "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
1866 proc->pid, thread->pid, (u64)data_ptr,
1867 buffer->debug_id,
1868 buffer->transaction ? "active" : "finished");
1870 if (buffer->transaction) {
1871 buffer->transaction->buffer = NULL;
1872 buffer->transaction = NULL;
1874 if (buffer->async_transaction && buffer->target_node) {
1875 BUG_ON(!buffer->target_node->has_async_transaction);
1876 if (list_empty(&buffer->target_node->async_todo))
1877 buffer->target_node->has_async_transaction = 0;
1878 else
1879 list_move_tail(buffer->target_node->async_todo.next, &thread->todo);
1881 trace_binder_transaction_buffer_release(buffer);
1882 binder_transaction_buffer_release(proc, buffer, NULL);
1883 binder_free_buf(proc, buffer);
1884 break;
1887 case BC_TRANSACTION:
1888 case BC_REPLY: {
1889 struct binder_transaction_data tr;
1891 if (copy_from_user(&tr, ptr, sizeof(tr)))
1892 return -EFAULT;
1893 ptr += sizeof(tr);
1894 binder_transaction(proc, thread, &tr, cmd == BC_REPLY);
1895 break;
1898 case BC_REGISTER_LOOPER:
1899 binder_debug(BINDER_DEBUG_THREADS,
1900 "%d:%d BC_REGISTER_LOOPER\n",
1901 proc->pid, thread->pid);
1902 if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
1903 thread->looper |= BINDER_LOOPER_STATE_INVALID;
1904 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
1905 proc->pid, thread->pid);
1906 } else if (proc->requested_threads == 0) {
1907 thread->looper |= BINDER_LOOPER_STATE_INVALID;
1908 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
1909 proc->pid, thread->pid);
1910 } else {
1911 proc->requested_threads--;
1912 proc->requested_threads_started++;
1914 thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
1915 break;
1916 case BC_ENTER_LOOPER:
1917 binder_debug(BINDER_DEBUG_THREADS,
1918 "%d:%d BC_ENTER_LOOPER\n",
1919 proc->pid, thread->pid);
1920 if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
1921 thread->looper |= BINDER_LOOPER_STATE_INVALID;
1922 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
1923 proc->pid, thread->pid);
1925 thread->looper |= BINDER_LOOPER_STATE_ENTERED;
1926 break;
1927 case BC_EXIT_LOOPER:
1928 binder_debug(BINDER_DEBUG_THREADS,
1929 "%d:%d BC_EXIT_LOOPER\n",
1930 proc->pid, thread->pid);
1931 thread->looper |= BINDER_LOOPER_STATE_EXITED;
1932 break;
1934 case BC_REQUEST_DEATH_NOTIFICATION:
1935 case BC_CLEAR_DEATH_NOTIFICATION: {
1936 uint32_t target;
1937 binder_uintptr_t cookie;
1938 struct binder_ref *ref;
1939 struct binder_ref_death *death;
1941 if (get_user(target, (uint32_t __user *)ptr))
1942 return -EFAULT;
1943 ptr += sizeof(uint32_t);
1944 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
1945 return -EFAULT;
1946 ptr += sizeof(binder_uintptr_t);
1947 ref = binder_get_ref(proc, target);
1948 if (ref == NULL) {
1949 binder_user_error("%d:%d %s invalid ref %d\n",
1950 proc->pid, thread->pid,
1951 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
1952 "BC_REQUEST_DEATH_NOTIFICATION" :
1953 "BC_CLEAR_DEATH_NOTIFICATION",
1954 target);
1955 break;
1958 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
1959 "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
1960 proc->pid, thread->pid,
1961 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
1962 "BC_REQUEST_DEATH_NOTIFICATION" :
1963 "BC_CLEAR_DEATH_NOTIFICATION",
1964 (u64)cookie, ref->debug_id, ref->desc,
1965 ref->strong, ref->weak, ref->node->debug_id);
1967 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
1968 if (ref->death) {
1969 binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
1970 proc->pid, thread->pid);
1971 break;
1973 death = kzalloc(sizeof(*death), GFP_KERNEL);
1974 if (death == NULL) {
1975 thread->return_error = BR_ERROR;
1976 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1977 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
1978 proc->pid, thread->pid);
1979 break;
1981 binder_stats_created(BINDER_STAT_DEATH);
1982 INIT_LIST_HEAD(&death->work.entry);
1983 death->cookie = cookie;
1984 ref->death = death;
1985 if (ref->node->proc == NULL) {
1986 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
1987 if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
1988 list_add_tail(&ref->death->work.entry, &thread->todo);
1989 } else {
1990 list_add_tail(&ref->death->work.entry, &proc->todo);
1991 wake_up_interruptible(&proc->wait);
1994 } else {
1995 if (ref->death == NULL) {
1996 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
1997 proc->pid, thread->pid);
1998 break;
2000 death = ref->death;
2001 if (death->cookie != cookie) {
2002 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
2003 proc->pid, thread->pid,
2004 (u64)death->cookie,
2005 (u64)cookie);
2006 break;
2008 ref->death = NULL;
2009 if (list_empty(&death->work.entry)) {
2010 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
2011 if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
2012 list_add_tail(&death->work.entry, &thread->todo);
2013 } else {
2014 list_add_tail(&death->work.entry, &proc->todo);
2015 wake_up_interruptible(&proc->wait);
2017 } else {
2018 BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
2019 death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
2022 } break;
2023 case BC_DEAD_BINDER_DONE: {
2024 struct binder_work *w;
2025 binder_uintptr_t cookie;
2026 struct binder_ref_death *death = NULL;
2027 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
2028 return -EFAULT;
2030 ptr += sizeof(void *);
2031 list_for_each_entry(w, &proc->delivered_death, entry) {
2032 struct binder_ref_death *tmp_death = container_of(w, struct binder_ref_death, work);
2033 if (tmp_death->cookie == cookie) {
2034 death = tmp_death;
2035 break;
2038 binder_debug(BINDER_DEBUG_DEAD_BINDER,
2039 "%d:%d BC_DEAD_BINDER_DONE %016llx found %p\n",
2040 proc->pid, thread->pid, (u64)cookie,
2041 death);
2042 if (death == NULL) {
2043 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
2044 proc->pid, thread->pid, (u64)cookie);
2045 break;
2048 list_del_init(&death->work.entry);
2049 if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
2050 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
2051 if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
2052 list_add_tail(&death->work.entry, &thread->todo);
2053 } else {
2054 list_add_tail(&death->work.entry, &proc->todo);
2055 wake_up_interruptible(&proc->wait);
2058 } break;
2060 default:
2061 pr_err("%d:%d unknown command %d\n",
2062 proc->pid, thread->pid, cmd);
2063 return -EINVAL;
2065 *consumed = ptr - buffer;
2067 return 0;
2070 static void binder_stat_br(struct binder_proc *proc,
2071 struct binder_thread *thread, uint32_t cmd)
2073 trace_binder_return(cmd);
2074 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
2075 binder_stats.br[_IOC_NR(cmd)]++;
2076 proc->stats.br[_IOC_NR(cmd)]++;
2077 thread->stats.br[_IOC_NR(cmd)]++;
2081 static int binder_has_proc_work(struct binder_proc *proc,
2082 struct binder_thread *thread)
2084 return !list_empty(&proc->todo) ||
2085 (thread->looper & BINDER_LOOPER_STATE_NEED_RETURN);
2088 static int binder_has_thread_work(struct binder_thread *thread)
2090 return !list_empty(&thread->todo) || thread->return_error != BR_OK ||
2091 (thread->looper & BINDER_LOOPER_STATE_NEED_RETURN);
2094 static int binder_thread_read(struct binder_proc *proc,
2095 struct binder_thread *thread,
2096 binder_uintptr_t binder_buffer, size_t size,
2097 binder_size_t *consumed, int non_block)
2099 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
2100 void __user *ptr = buffer + *consumed;
2101 void __user *end = buffer + size;
2103 int ret = 0;
2104 int wait_for_proc_work;
2106 if (*consumed == 0) {
2107 if (put_user(BR_NOOP, (uint32_t __user *)ptr))
2108 return -EFAULT;
2109 ptr += sizeof(uint32_t);
2112 retry:
2113 wait_for_proc_work = thread->transaction_stack == NULL &&
2114 list_empty(&thread->todo);
2116 if (thread->return_error != BR_OK && ptr < end) {
2117 if (thread->return_error2 != BR_OK) {
2118 if (put_user(thread->return_error2, (uint32_t __user *)ptr))
2119 return -EFAULT;
2120 ptr += sizeof(uint32_t);
2121 binder_stat_br(proc, thread, thread->return_error2);
2122 if (ptr == end)
2123 goto done;
2124 thread->return_error2 = BR_OK;
2126 if (put_user(thread->return_error, (uint32_t __user *)ptr))
2127 return -EFAULT;
2128 ptr += sizeof(uint32_t);
2129 binder_stat_br(proc, thread, thread->return_error);
2130 thread->return_error = BR_OK;
2131 goto done;
2135 thread->looper |= BINDER_LOOPER_STATE_WAITING;
2136 if (wait_for_proc_work)
2137 proc->ready_threads++;
2139 binder_unlock(__func__);
2141 trace_binder_wait_for_work(wait_for_proc_work,
2142 !!thread->transaction_stack,
2143 !list_empty(&thread->todo));
2144 if (wait_for_proc_work) {
2145 if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
2146 BINDER_LOOPER_STATE_ENTERED))) {
2147 binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
2148 proc->pid, thread->pid, thread->looper);
2149 wait_event_interruptible(binder_user_error_wait,
2150 binder_stop_on_user_error < 2);
2152 binder_set_nice(proc->default_priority);
2153 if (non_block) {
2154 if (!binder_has_proc_work(proc, thread))
2155 ret = -EAGAIN;
2156 } else
2157 ret = wait_event_freezable_exclusive(proc->wait, binder_has_proc_work(proc, thread));
2158 } else {
2159 if (non_block) {
2160 if (!binder_has_thread_work(thread))
2161 ret = -EAGAIN;
2162 } else
2163 ret = wait_event_freezable(thread->wait, binder_has_thread_work(thread));
2166 binder_lock(__func__);
2168 if (wait_for_proc_work)
2169 proc->ready_threads--;
2170 thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
2172 if (ret)
2173 return ret;
2175 while (1) {
2176 uint32_t cmd;
2177 struct binder_transaction_data tr;
2178 struct binder_work *w;
2179 struct binder_transaction *t = NULL;
2181 if (!list_empty(&thread->todo))
2182 w = list_first_entry(&thread->todo, struct binder_work, entry);
2183 else if (!list_empty(&proc->todo) && wait_for_proc_work)
2184 w = list_first_entry(&proc->todo, struct binder_work, entry);
2185 else {
2186 if (ptr - buffer == 4 && !(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN)) /* no data added */
2187 goto retry;
2188 break;
2191 if (end - ptr < sizeof(tr) + 4)
2192 break;
2194 switch (w->type) {
2195 case BINDER_WORK_TRANSACTION: {
2196 t = container_of(w, struct binder_transaction, work);
2197 } break;
2198 case BINDER_WORK_TRANSACTION_COMPLETE: {
2199 cmd = BR_TRANSACTION_COMPLETE;
2200 if (put_user(cmd, (uint32_t __user *)ptr))
2201 return -EFAULT;
2202 ptr += sizeof(uint32_t);
2204 binder_stat_br(proc, thread, cmd);
2205 binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
2206 "%d:%d BR_TRANSACTION_COMPLETE\n",
2207 proc->pid, thread->pid);
2209 list_del(&w->entry);
2210 kfree(w);
2211 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
2212 } break;
2213 case BINDER_WORK_NODE: {
2214 struct binder_node *node = container_of(w, struct binder_node, work);
2215 uint32_t cmd = BR_NOOP;
2216 const char *cmd_name;
2217 int strong = node->internal_strong_refs || node->local_strong_refs;
2218 int weak = !hlist_empty(&node->refs) || node->local_weak_refs || strong;
2219 if (weak && !node->has_weak_ref) {
2220 cmd = BR_INCREFS;
2221 cmd_name = "BR_INCREFS";
2222 node->has_weak_ref = 1;
2223 node->pending_weak_ref = 1;
2224 node->local_weak_refs++;
2225 } else if (strong && !node->has_strong_ref) {
2226 cmd = BR_ACQUIRE;
2227 cmd_name = "BR_ACQUIRE";
2228 node->has_strong_ref = 1;
2229 node->pending_strong_ref = 1;
2230 node->local_strong_refs++;
2231 } else if (!strong && node->has_strong_ref) {
2232 cmd = BR_RELEASE;
2233 cmd_name = "BR_RELEASE";
2234 node->has_strong_ref = 0;
2235 } else if (!weak && node->has_weak_ref) {
2236 cmd = BR_DECREFS;
2237 cmd_name = "BR_DECREFS";
2238 node->has_weak_ref = 0;
2240 if (cmd != BR_NOOP) {
2241 if (put_user(cmd, (uint32_t __user *)ptr))
2242 return -EFAULT;
2243 ptr += sizeof(uint32_t);
2244 if (put_user(node->ptr,
2245 (binder_uintptr_t __user *)ptr))
2246 return -EFAULT;
2247 ptr += sizeof(binder_uintptr_t);
2248 if (put_user(node->cookie,
2249 (binder_uintptr_t __user *)ptr))
2250 return -EFAULT;
2251 ptr += sizeof(binder_uintptr_t);
2253 binder_stat_br(proc, thread, cmd);
2254 binder_debug(BINDER_DEBUG_USER_REFS,
2255 "%d:%d %s %d u%016llx c%016llx\n",
2256 proc->pid, thread->pid, cmd_name,
2257 node->debug_id,
2258 (u64)node->ptr, (u64)node->cookie);
2259 } else {
2260 list_del_init(&w->entry);
2261 if (!weak && !strong) {
2262 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
2263 "%d:%d node %d u%016llx c%016llx deleted\n",
2264 proc->pid, thread->pid,
2265 node->debug_id,
2266 (u64)node->ptr,
2267 (u64)node->cookie);
2268 rb_erase(&node->rb_node, &proc->nodes);
2269 kfree(node);
2270 binder_stats_deleted(BINDER_STAT_NODE);
2271 } else {
2272 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
2273 "%d:%d node %d u%016llx c%016llx state unchanged\n",
2274 proc->pid, thread->pid,
2275 node->debug_id,
2276 (u64)node->ptr,
2277 (u64)node->cookie);
2280 } break;
2281 case BINDER_WORK_DEAD_BINDER:
2282 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
2283 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
2284 struct binder_ref_death *death;
2285 uint32_t cmd;
2287 death = container_of(w, struct binder_ref_death, work);
2288 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
2289 cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
2290 else
2291 cmd = BR_DEAD_BINDER;
2292 if (put_user(cmd, (uint32_t __user *)ptr))
2293 return -EFAULT;
2294 ptr += sizeof(uint32_t);
2295 if (put_user(death->cookie,
2296 (binder_uintptr_t __user *)ptr))
2297 return -EFAULT;
2298 ptr += sizeof(binder_uintptr_t);
2299 binder_stat_br(proc, thread, cmd);
2300 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
2301 "%d:%d %s %016llx\n",
2302 proc->pid, thread->pid,
2303 cmd == BR_DEAD_BINDER ?
2304 "BR_DEAD_BINDER" :
2305 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
2306 (u64)death->cookie);
2308 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
2309 list_del(&w->entry);
2310 kfree(death);
2311 binder_stats_deleted(BINDER_STAT_DEATH);
2312 } else
2313 list_move(&w->entry, &proc->delivered_death);
2314 if (cmd == BR_DEAD_BINDER)
2315 goto done; /* DEAD_BINDER notifications can cause transactions */
2316 } break;
2319 if (!t)
2320 continue;
2322 BUG_ON(t->buffer == NULL);
2323 if (t->buffer->target_node) {
2324 struct binder_node *target_node = t->buffer->target_node;
2325 tr.target.ptr = target_node->ptr;
2326 tr.cookie = target_node->cookie;
2327 t->saved_priority = task_nice(current);
2328 if (t->priority < target_node->min_priority &&
2329 !(t->flags & TF_ONE_WAY))
2330 binder_set_nice(t->priority);
2331 else if (!(t->flags & TF_ONE_WAY) ||
2332 t->saved_priority > target_node->min_priority)
2333 binder_set_nice(target_node->min_priority);
2334 cmd = BR_TRANSACTION;
2335 } else {
2336 tr.target.ptr = 0;
2337 tr.cookie = 0;
2338 cmd = BR_REPLY;
2340 tr.code = t->code;
2341 tr.flags = t->flags;
2342 tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid);
2344 if (t->from) {
2345 struct task_struct *sender = t->from->proc->tsk;
2346 tr.sender_pid = task_tgid_nr_ns(sender,
2347 task_active_pid_ns(current));
2348 } else {
2349 tr.sender_pid = 0;
2352 tr.data_size = t->buffer->data_size;
2353 tr.offsets_size = t->buffer->offsets_size;
2354 tr.data.ptr.buffer = (binder_uintptr_t)(
2355 (uintptr_t)t->buffer->data +
2356 proc->user_buffer_offset);
2357 tr.data.ptr.offsets = tr.data.ptr.buffer +
2358 ALIGN(t->buffer->data_size,
2359 sizeof(void *));
2361 if (put_user(cmd, (uint32_t __user *)ptr))
2362 return -EFAULT;
2363 ptr += sizeof(uint32_t);
2364 if (copy_to_user(ptr, &tr, sizeof(tr)))
2365 return -EFAULT;
2366 ptr += sizeof(tr);
2368 trace_binder_transaction_received(t);
2369 binder_stat_br(proc, thread, cmd);
2370 binder_debug(BINDER_DEBUG_TRANSACTION,
2371 "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
2372 proc->pid, thread->pid,
2373 (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
2374 "BR_REPLY",
2375 t->debug_id, t->from ? t->from->proc->pid : 0,
2376 t->from ? t->from->pid : 0, cmd,
2377 t->buffer->data_size, t->buffer->offsets_size,
2378 (u64)tr.data.ptr.buffer, (u64)tr.data.ptr.offsets);
2380 list_del(&t->work.entry);
2381 t->buffer->allow_user_free = 1;
2382 if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
2383 t->to_parent = thread->transaction_stack;
2384 t->to_thread = thread;
2385 thread->transaction_stack = t;
2386 } else {
2387 t->buffer->transaction = NULL;
2388 kfree(t);
2389 binder_stats_deleted(BINDER_STAT_TRANSACTION);
2391 break;
2394 done:
2396 *consumed = ptr - buffer;
2397 if (proc->requested_threads + proc->ready_threads == 0 &&
2398 proc->requested_threads_started < proc->max_threads &&
2399 (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
2400 BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
2401 /*spawn a new thread if we leave this out */) {
2402 proc->requested_threads++;
2403 binder_debug(BINDER_DEBUG_THREADS,
2404 "%d:%d BR_SPAWN_LOOPER\n",
2405 proc->pid, thread->pid);
2406 if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
2407 return -EFAULT;
2408 binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
2410 return 0;
2413 static void binder_release_work(struct list_head *list)
2415 struct binder_work *w;
2416 while (!list_empty(list)) {
2417 w = list_first_entry(list, struct binder_work, entry);
2418 list_del_init(&w->entry);
2419 switch (w->type) {
2420 case BINDER_WORK_TRANSACTION: {
2421 struct binder_transaction *t;
2423 t = container_of(w, struct binder_transaction, work);
2424 if (t->buffer->target_node &&
2425 !(t->flags & TF_ONE_WAY)) {
2426 binder_send_failed_reply(t, BR_DEAD_REPLY);
2427 } else {
2428 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
2429 "undelivered transaction %d\n",
2430 t->debug_id);
2431 t->buffer->transaction = NULL;
2432 kfree(t);
2433 binder_stats_deleted(BINDER_STAT_TRANSACTION);
2435 } break;
2436 case BINDER_WORK_TRANSACTION_COMPLETE: {
2437 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
2438 "undelivered TRANSACTION_COMPLETE\n");
2439 kfree(w);
2440 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
2441 } break;
2442 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
2443 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
2444 struct binder_ref_death *death;
2446 death = container_of(w, struct binder_ref_death, work);
2447 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
2448 "undelivered death notification, %016llx\n",
2449 (u64)death->cookie);
2450 kfree(death);
2451 binder_stats_deleted(BINDER_STAT_DEATH);
2452 } break;
2453 default:
2454 pr_err("unexpected work type, %d, not freed\n",
2455 w->type);
2456 break;
2462 static struct binder_thread *binder_get_thread(struct binder_proc *proc)
2464 struct binder_thread *thread = NULL;
2465 struct rb_node *parent = NULL;
2466 struct rb_node **p = &proc->threads.rb_node;
2468 while (*p) {
2469 parent = *p;
2470 thread = rb_entry(parent, struct binder_thread, rb_node);
2472 if (current->pid < thread->pid)
2473 p = &(*p)->rb_left;
2474 else if (current->pid > thread->pid)
2475 p = &(*p)->rb_right;
2476 else
2477 break;
2479 if (*p == NULL) {
2480 thread = kzalloc(sizeof(*thread), GFP_KERNEL);
2481 if (thread == NULL)
2482 return NULL;
2483 binder_stats_created(BINDER_STAT_THREAD);
2484 thread->proc = proc;
2485 thread->pid = current->pid;
2486 init_waitqueue_head(&thread->wait);
2487 INIT_LIST_HEAD(&thread->todo);
2488 rb_link_node(&thread->rb_node, parent, p);
2489 rb_insert_color(&thread->rb_node, &proc->threads);
2490 thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN;
2491 thread->return_error = BR_OK;
2492 thread->return_error2 = BR_OK;
2494 return thread;
2497 static int binder_free_thread(struct binder_proc *proc,
2498 struct binder_thread *thread)
2500 struct binder_transaction *t;
2501 struct binder_transaction *send_reply = NULL;
2502 int active_transactions = 0;
2504 rb_erase(&thread->rb_node, &proc->threads);
2505 t = thread->transaction_stack;
2506 if (t && t->to_thread == thread)
2507 send_reply = t;
2508 while (t) {
2509 active_transactions++;
2510 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
2511 "release %d:%d transaction %d %s, still active\n",
2512 proc->pid, thread->pid,
2513 t->debug_id,
2514 (t->to_thread == thread) ? "in" : "out");
2516 if (t->to_thread == thread) {
2517 t->to_proc = NULL;
2518 t->to_thread = NULL;
2519 if (t->buffer) {
2520 t->buffer->transaction = NULL;
2521 t->buffer = NULL;
2523 t = t->to_parent;
2524 } else if (t->from == thread) {
2525 t->from = NULL;
2526 t = t->from_parent;
2527 } else
2528 BUG();
2530 if (send_reply)
2531 binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
2532 binder_release_work(&thread->todo);
2533 kfree(thread);
2534 binder_stats_deleted(BINDER_STAT_THREAD);
2535 return active_transactions;
2538 static unsigned int binder_poll(struct file *filp,
2539 struct poll_table_struct *wait)
2541 struct binder_proc *proc = filp->private_data;
2542 struct binder_thread *thread = NULL;
2543 int wait_for_proc_work;
2545 binder_lock(__func__);
2547 thread = binder_get_thread(proc);
2549 wait_for_proc_work = thread->transaction_stack == NULL &&
2550 list_empty(&thread->todo) && thread->return_error == BR_OK;
2552 binder_unlock(__func__);
2554 if (wait_for_proc_work) {
2555 if (binder_has_proc_work(proc, thread))
2556 return POLLIN;
2557 poll_wait(filp, &proc->wait, wait);
2558 if (binder_has_proc_work(proc, thread))
2559 return POLLIN;
2560 } else {
2561 if (binder_has_thread_work(thread))
2562 return POLLIN;
2563 poll_wait(filp, &thread->wait, wait);
2564 if (binder_has_thread_work(thread))
2565 return POLLIN;
2567 return 0;
2570 static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
2572 int ret;
2573 struct binder_proc *proc = filp->private_data;
2574 struct binder_thread *thread;
2575 unsigned int size = _IOC_SIZE(cmd);
2576 void __user *ubuf = (void __user *)arg;
2578 /*pr_info("binder_ioctl: %d:%d %x %lx\n", proc->pid, current->pid, cmd, arg);*/
2580 trace_binder_ioctl(cmd, arg);
2582 ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
2583 if (ret)
2584 goto err_unlocked;
2586 binder_lock(__func__);
2587 thread = binder_get_thread(proc);
2588 if (thread == NULL) {
2589 ret = -ENOMEM;
2590 goto err;
2593 switch (cmd) {
2594 case BINDER_WRITE_READ: {
2595 struct binder_write_read bwr;
2596 if (size != sizeof(struct binder_write_read)) {
2597 ret = -EINVAL;
2598 goto err;
2600 if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
2601 ret = -EFAULT;
2602 goto err;
2604 binder_debug(BINDER_DEBUG_READ_WRITE,
2605 "%d:%d write %lld at %016llx, read %lld at %016llx\n",
2606 proc->pid, thread->pid,
2607 (u64)bwr.write_size, (u64)bwr.write_buffer,
2608 (u64)bwr.read_size, (u64)bwr.read_buffer);
2610 if (bwr.write_size > 0) {
2611 ret = binder_thread_write(proc, thread,
2612 bwr.write_buffer,
2613 bwr.write_size,
2614 &bwr.write_consumed);
2615 trace_binder_write_done(ret);
2616 if (ret < 0) {
2617 bwr.read_consumed = 0;
2618 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
2619 ret = -EFAULT;
2620 goto err;
2623 if (bwr.read_size > 0) {
2624 ret = binder_thread_read(proc, thread, bwr.read_buffer,
2625 bwr.read_size,
2626 &bwr.read_consumed,
2627 filp->f_flags & O_NONBLOCK);
2628 trace_binder_read_done(ret);
2629 if (!list_empty(&proc->todo))
2630 wake_up_interruptible(&proc->wait);
2631 if (ret < 0) {
2632 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
2633 ret = -EFAULT;
2634 goto err;
2637 binder_debug(BINDER_DEBUG_READ_WRITE,
2638 "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
2639 proc->pid, thread->pid,
2640 (u64)bwr.write_consumed, (u64)bwr.write_size,
2641 (u64)bwr.read_consumed, (u64)bwr.read_size);
2642 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
2643 ret = -EFAULT;
2644 goto err;
2646 break;
2648 case BINDER_SET_MAX_THREADS:
2649 if (copy_from_user(&proc->max_threads, ubuf, sizeof(proc->max_threads))) {
2650 ret = -EINVAL;
2651 goto err;
2653 break;
2654 case BINDER_SET_CONTEXT_MGR:
2655 if (binder_context_mgr_node != NULL) {
2656 pr_err("BINDER_SET_CONTEXT_MGR already set\n");
2657 ret = -EBUSY;
2658 goto err;
2660 if (uid_valid(binder_context_mgr_uid)) {
2661 if (!uid_eq(binder_context_mgr_uid, current->cred->euid)) {
2662 pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
2663 from_kuid(&init_user_ns, current->cred->euid),
2664 from_kuid(&init_user_ns, binder_context_mgr_uid));
2665 ret = -EPERM;
2666 goto err;
2668 } else
2669 binder_context_mgr_uid = current->cred->euid;
2670 binder_context_mgr_node = binder_new_node(proc, 0, 0);
2671 if (binder_context_mgr_node == NULL) {
2672 ret = -ENOMEM;
2673 goto err;
2675 binder_context_mgr_node->local_weak_refs++;
2676 binder_context_mgr_node->local_strong_refs++;
2677 binder_context_mgr_node->has_strong_ref = 1;
2678 binder_context_mgr_node->has_weak_ref = 1;
2679 break;
2680 case BINDER_THREAD_EXIT:
2681 binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
2682 proc->pid, thread->pid);
2683 binder_free_thread(proc, thread);
2684 thread = NULL;
2685 break;
2686 case BINDER_VERSION:
2687 if (size != sizeof(struct binder_version)) {
2688 ret = -EINVAL;
2689 goto err;
2691 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION, &((struct binder_version *)ubuf)->protocol_version)) {
2692 ret = -EINVAL;
2693 goto err;
2695 break;
2696 default:
2697 ret = -EINVAL;
2698 goto err;
2700 ret = 0;
2701 err:
2702 if (thread)
2703 thread->looper &= ~BINDER_LOOPER_STATE_NEED_RETURN;
2704 binder_unlock(__func__);
2705 wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
2706 if (ret && ret != -ERESTARTSYS)
2707 pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
2708 err_unlocked:
2709 trace_binder_ioctl_done(ret);
2710 return ret;
2713 static void binder_vma_open(struct vm_area_struct *vma)
2715 struct binder_proc *proc = vma->vm_private_data;
2716 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
2717 "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
2718 proc->pid, vma->vm_start, vma->vm_end,
2719 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
2720 (unsigned long)pgprot_val(vma->vm_page_prot));
2723 static void binder_vma_close(struct vm_area_struct *vma)
2725 struct binder_proc *proc = vma->vm_private_data;
2726 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
2727 "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
2728 proc->pid, vma->vm_start, vma->vm_end,
2729 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
2730 (unsigned long)pgprot_val(vma->vm_page_prot));
2731 proc->vma = NULL;
2732 proc->vma_vm_mm = NULL;
2733 binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
2736 static struct vm_operations_struct binder_vm_ops = {
2737 .open = binder_vma_open,
2738 .close = binder_vma_close,
2741 static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
2743 int ret;
2744 struct vm_struct *area;
2745 struct binder_proc *proc = filp->private_data;
2746 const char *failure_string;
2747 struct binder_buffer *buffer;
2749 if (proc->tsk != current)
2750 return -EINVAL;
2752 if ((vma->vm_end - vma->vm_start) > SZ_4M)
2753 vma->vm_end = vma->vm_start + SZ_4M;
2755 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
2756 "binder_mmap: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
2757 proc->pid, vma->vm_start, vma->vm_end,
2758 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
2759 (unsigned long)pgprot_val(vma->vm_page_prot));
2761 if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
2762 ret = -EPERM;
2763 failure_string = "bad vm_flags";
2764 goto err_bad_arg;
2766 vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE;
2768 mutex_lock(&binder_mmap_lock);
2769 if (proc->buffer) {
2770 ret = -EBUSY;
2771 failure_string = "already mapped";
2772 goto err_already_mapped;
2775 area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP);
2776 if (area == NULL) {
2777 ret = -ENOMEM;
2778 failure_string = "get_vm_area";
2779 goto err_get_vm_area_failed;
2781 proc->buffer = area->addr;
2782 proc->user_buffer_offset = vma->vm_start - (uintptr_t)proc->buffer;
2783 mutex_unlock(&binder_mmap_lock);
2785 #ifdef CONFIG_CPU_CACHE_VIPT
2786 if (cache_is_vipt_aliasing()) {
2787 while (CACHE_COLOUR((vma->vm_start ^ (uint32_t)proc->buffer))) {
2788 pr_info("binder_mmap: %d %lx-%lx maps %p bad alignment\n", proc->pid, vma->vm_start, vma->vm_end, proc->buffer);
2789 vma->vm_start += PAGE_SIZE;
2792 #endif
2793 proc->pages = kzalloc(sizeof(proc->pages[0]) * ((vma->vm_end - vma->vm_start) / PAGE_SIZE), GFP_KERNEL);
2794 if (proc->pages == NULL) {
2795 ret = -ENOMEM;
2796 failure_string = "alloc page array";
2797 goto err_alloc_pages_failed;
2799 proc->buffer_size = vma->vm_end - vma->vm_start;
2801 vma->vm_ops = &binder_vm_ops;
2802 vma->vm_private_data = proc;
2804 if (binder_update_page_range(proc, 1, proc->buffer, proc->buffer + PAGE_SIZE, vma)) {
2805 ret = -ENOMEM;
2806 failure_string = "alloc small buf";
2807 goto err_alloc_small_buf_failed;
2809 buffer = proc->buffer;
2810 INIT_LIST_HEAD(&proc->buffers);
2811 list_add(&buffer->entry, &proc->buffers);
2812 buffer->free = 1;
2813 binder_insert_free_buffer(proc, buffer);
2814 proc->free_async_space = proc->buffer_size / 2;
2815 barrier();
2816 proc->files = get_files_struct(current);
2817 proc->vma = vma;
2818 proc->vma_vm_mm = vma->vm_mm;
2820 /*pr_info("binder_mmap: %d %lx-%lx maps %p\n",
2821 proc->pid, vma->vm_start, vma->vm_end, proc->buffer);*/
2822 return 0;
2824 err_alloc_small_buf_failed:
2825 kfree(proc->pages);
2826 proc->pages = NULL;
2827 err_alloc_pages_failed:
2828 mutex_lock(&binder_mmap_lock);
2829 vfree(proc->buffer);
2830 proc->buffer = NULL;
2831 err_get_vm_area_failed:
2832 err_already_mapped:
2833 mutex_unlock(&binder_mmap_lock);
2834 err_bad_arg:
2835 pr_err("binder_mmap: %d %lx-%lx %s failed %d\n",
2836 proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
2837 return ret;
2840 static int binder_open(struct inode *nodp, struct file *filp)
2842 struct binder_proc *proc;
2844 binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_open: %d:%d\n",
2845 current->group_leader->pid, current->pid);
2847 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
2848 if (proc == NULL)
2849 return -ENOMEM;
2850 get_task_struct(current);
2851 proc->tsk = current;
2852 INIT_LIST_HEAD(&proc->todo);
2853 init_waitqueue_head(&proc->wait);
2854 proc->default_priority = task_nice(current);
2856 binder_lock(__func__);
2858 binder_stats_created(BINDER_STAT_PROC);
2859 hlist_add_head(&proc->proc_node, &binder_procs);
2860 proc->pid = current->group_leader->pid;
2861 INIT_LIST_HEAD(&proc->delivered_death);
2862 filp->private_data = proc;
2864 binder_unlock(__func__);
2866 if (binder_debugfs_dir_entry_proc) {
2867 char strbuf[11];
2868 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
2869 proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO,
2870 binder_debugfs_dir_entry_proc, proc, &binder_proc_fops);
2873 return 0;
2876 static int binder_flush(struct file *filp, fl_owner_t id)
2878 struct binder_proc *proc = filp->private_data;
2880 binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
2882 return 0;
2885 static void binder_deferred_flush(struct binder_proc *proc)
2887 struct rb_node *n;
2888 int wake_count = 0;
2889 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
2890 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
2891 thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN;
2892 if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
2893 wake_up_interruptible(&thread->wait);
2894 wake_count++;
2897 wake_up_interruptible_all(&proc->wait);
2899 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
2900 "binder_flush: %d woke %d threads\n", proc->pid,
2901 wake_count);
2904 static int binder_release(struct inode *nodp, struct file *filp)
2906 struct binder_proc *proc = filp->private_data;
2907 debugfs_remove(proc->debugfs_entry);
2908 binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
2910 return 0;
2913 static int binder_node_release(struct binder_node *node, int refs)
2915 struct binder_ref *ref;
2916 int death = 0;
2918 list_del_init(&node->work.entry);
2919 binder_release_work(&node->async_todo);
2921 if (hlist_empty(&node->refs)) {
2922 kfree(node);
2923 binder_stats_deleted(BINDER_STAT_NODE);
2925 return refs;
2928 node->proc = NULL;
2929 node->local_strong_refs = 0;
2930 node->local_weak_refs = 0;
2931 hlist_add_head(&node->dead_node, &binder_dead_nodes);
2933 hlist_for_each_entry(ref, &node->refs, node_entry) {
2934 refs++;
2936 if (!ref->death)
2937 continue;
2939 death++;
2941 if (list_empty(&ref->death->work.entry)) {
2942 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
2943 list_add_tail(&ref->death->work.entry,
2944 &ref->proc->todo);
2945 wake_up_interruptible(&ref->proc->wait);
2946 } else
2947 BUG();
2950 binder_debug(BINDER_DEBUG_DEAD_BINDER,
2951 "node %d now dead, refs %d, death %d\n",
2952 node->debug_id, refs, death);
2954 return refs;
2957 static void binder_deferred_release(struct binder_proc *proc)
2959 struct binder_transaction *t;
2960 struct rb_node *n;
2961 int threads, nodes, incoming_refs, outgoing_refs, buffers,
2962 active_transactions, page_count;
2964 BUG_ON(proc->vma);
2965 BUG_ON(proc->files);
2967 hlist_del(&proc->proc_node);
2969 if (binder_context_mgr_node && binder_context_mgr_node->proc == proc) {
2970 binder_debug(BINDER_DEBUG_DEAD_BINDER,
2971 "%s: %d context_mgr_node gone\n",
2972 __func__, proc->pid);
2973 binder_context_mgr_node = NULL;
2976 threads = 0;
2977 active_transactions = 0;
2978 while ((n = rb_first(&proc->threads))) {
2979 struct binder_thread *thread;
2981 thread = rb_entry(n, struct binder_thread, rb_node);
2982 threads++;
2983 active_transactions += binder_free_thread(proc, thread);
2986 nodes = 0;
2987 incoming_refs = 0;
2988 while ((n = rb_first(&proc->nodes))) {
2989 struct binder_node *node;
2991 node = rb_entry(n, struct binder_node, rb_node);
2992 nodes++;
2993 rb_erase(&node->rb_node, &proc->nodes);
2994 incoming_refs = binder_node_release(node, incoming_refs);
2997 outgoing_refs = 0;
2998 while ((n = rb_first(&proc->refs_by_desc))) {
2999 struct binder_ref *ref;
3001 ref = rb_entry(n, struct binder_ref, rb_node_desc);
3002 outgoing_refs++;
3003 binder_delete_ref(ref);
3006 binder_release_work(&proc->todo);
3007 binder_release_work(&proc->delivered_death);
3009 buffers = 0;
3010 while ((n = rb_first(&proc->allocated_buffers))) {
3011 struct binder_buffer *buffer;
3013 buffer = rb_entry(n, struct binder_buffer, rb_node);
3015 t = buffer->transaction;
3016 if (t) {
3017 t->buffer = NULL;
3018 buffer->transaction = NULL;
3019 pr_err("release proc %d, transaction %d, not freed\n",
3020 proc->pid, t->debug_id);
3021 /*BUG();*/
3024 binder_free_buf(proc, buffer);
3025 buffers++;
3028 binder_stats_deleted(BINDER_STAT_PROC);
3030 page_count = 0;
3031 if (proc->pages) {
3032 int i;
3034 for (i = 0; i < proc->buffer_size / PAGE_SIZE; i++) {
3035 void *page_addr;
3037 if (!proc->pages[i])
3038 continue;
3040 page_addr = proc->buffer + i * PAGE_SIZE;
3041 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
3042 "%s: %d: page %d at %p not freed\n",
3043 __func__, proc->pid, i, page_addr);
3044 unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
3045 __free_page(proc->pages[i]);
3046 page_count++;
3048 kfree(proc->pages);
3049 vfree(proc->buffer);
3052 put_task_struct(proc->tsk);
3054 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
3055 "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d, buffers %d, pages %d\n",
3056 __func__, proc->pid, threads, nodes, incoming_refs,
3057 outgoing_refs, active_transactions, buffers, page_count);
3059 kfree(proc);
3062 static void binder_deferred_func(struct work_struct *work)
3064 struct binder_proc *proc;
3065 struct files_struct *files;
3067 int defer;
3068 do {
3069 binder_lock(__func__);
3070 mutex_lock(&binder_deferred_lock);
3071 if (!hlist_empty(&binder_deferred_list)) {
3072 proc = hlist_entry(binder_deferred_list.first,
3073 struct binder_proc, deferred_work_node);
3074 hlist_del_init(&proc->deferred_work_node);
3075 defer = proc->deferred_work;
3076 proc->deferred_work = 0;
3077 } else {
3078 proc = NULL;
3079 defer = 0;
3081 mutex_unlock(&binder_deferred_lock);
3083 files = NULL;
3084 if (defer & BINDER_DEFERRED_PUT_FILES) {
3085 files = proc->files;
3086 if (files)
3087 proc->files = NULL;
3090 if (defer & BINDER_DEFERRED_FLUSH)
3091 binder_deferred_flush(proc);
3093 if (defer & BINDER_DEFERRED_RELEASE)
3094 binder_deferred_release(proc); /* frees proc */
3096 binder_unlock(__func__);
3097 if (files)
3098 put_files_struct(files);
3099 } while (proc);
3101 static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
3103 static void
3104 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
3106 mutex_lock(&binder_deferred_lock);
3107 proc->deferred_work |= defer;
3108 if (hlist_unhashed(&proc->deferred_work_node)) {
3109 hlist_add_head(&proc->deferred_work_node,
3110 &binder_deferred_list);
3111 queue_work(binder_deferred_workqueue, &binder_deferred_work);
3113 mutex_unlock(&binder_deferred_lock);
3116 static void print_binder_transaction(struct seq_file *m, const char *prefix,
3117 struct binder_transaction *t)
3119 seq_printf(m,
3120 "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %ld r%d",
3121 prefix, t->debug_id, t,
3122 t->from ? t->from->proc->pid : 0,
3123 t->from ? t->from->pid : 0,
3124 t->to_proc ? t->to_proc->pid : 0,
3125 t->to_thread ? t->to_thread->pid : 0,
3126 t->code, t->flags, t->priority, t->need_reply);
3127 if (t->buffer == NULL) {
3128 seq_puts(m, " buffer free\n");
3129 return;
3131 if (t->buffer->target_node)
3132 seq_printf(m, " node %d",
3133 t->buffer->target_node->debug_id);
3134 seq_printf(m, " size %zd:%zd data %p\n",
3135 t->buffer->data_size, t->buffer->offsets_size,
3136 t->buffer->data);
3139 static void print_binder_buffer(struct seq_file *m, const char *prefix,
3140 struct binder_buffer *buffer)
3142 seq_printf(m, "%s %d: %p size %zd:%zd %s\n",
3143 prefix, buffer->debug_id, buffer->data,
3144 buffer->data_size, buffer->offsets_size,
3145 buffer->transaction ? "active" : "delivered");
3148 static void print_binder_work(struct seq_file *m, const char *prefix,
3149 const char *transaction_prefix,
3150 struct binder_work *w)
3152 struct binder_node *node;
3153 struct binder_transaction *t;
3155 switch (w->type) {
3156 case BINDER_WORK_TRANSACTION:
3157 t = container_of(w, struct binder_transaction, work);
3158 print_binder_transaction(m, transaction_prefix, t);
3159 break;
3160 case BINDER_WORK_TRANSACTION_COMPLETE:
3161 seq_printf(m, "%stransaction complete\n", prefix);
3162 break;
3163 case BINDER_WORK_NODE:
3164 node = container_of(w, struct binder_node, work);
3165 seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
3166 prefix, node->debug_id,
3167 (u64)node->ptr, (u64)node->cookie);
3168 break;
3169 case BINDER_WORK_DEAD_BINDER:
3170 seq_printf(m, "%shas dead binder\n", prefix);
3171 break;
3172 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
3173 seq_printf(m, "%shas cleared dead binder\n", prefix);
3174 break;
3175 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
3176 seq_printf(m, "%shas cleared death notification\n", prefix);
3177 break;
3178 default:
3179 seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
3180 break;
3184 static void print_binder_thread(struct seq_file *m,
3185 struct binder_thread *thread,
3186 int print_always)
3188 struct binder_transaction *t;
3189 struct binder_work *w;
3190 size_t start_pos = m->count;
3191 size_t header_pos;
3193 seq_printf(m, " thread %d: l %02x\n", thread->pid, thread->looper);
3194 header_pos = m->count;
3195 t = thread->transaction_stack;
3196 while (t) {
3197 if (t->from == thread) {
3198 print_binder_transaction(m,
3199 " outgoing transaction", t);
3200 t = t->from_parent;
3201 } else if (t->to_thread == thread) {
3202 print_binder_transaction(m,
3203 " incoming transaction", t);
3204 t = t->to_parent;
3205 } else {
3206 print_binder_transaction(m, " bad transaction", t);
3207 t = NULL;
3210 list_for_each_entry(w, &thread->todo, entry) {
3211 print_binder_work(m, " ", " pending transaction", w);
3213 if (!print_always && m->count == header_pos)
3214 m->count = start_pos;
3217 static void print_binder_node(struct seq_file *m, struct binder_node *node)
3219 struct binder_ref *ref;
3220 struct binder_work *w;
3221 int count;
3223 count = 0;
3224 hlist_for_each_entry(ref, &node->refs, node_entry)
3225 count++;
3227 seq_printf(m, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d",
3228 node->debug_id, (u64)node->ptr, (u64)node->cookie,
3229 node->has_strong_ref, node->has_weak_ref,
3230 node->local_strong_refs, node->local_weak_refs,
3231 node->internal_strong_refs, count);
3232 if (count) {
3233 seq_puts(m, " proc");
3234 hlist_for_each_entry(ref, &node->refs, node_entry)
3235 seq_printf(m, " %d", ref->proc->pid);
3237 seq_puts(m, "\n");
3238 list_for_each_entry(w, &node->async_todo, entry)
3239 print_binder_work(m, " ",
3240 " pending async transaction", w);
3243 static void print_binder_ref(struct seq_file *m, struct binder_ref *ref)
3245 seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %p\n",
3246 ref->debug_id, ref->desc, ref->node->proc ? "" : "dead ",
3247 ref->node->debug_id, ref->strong, ref->weak, ref->death);
3250 static void print_binder_proc(struct seq_file *m,
3251 struct binder_proc *proc, int print_all)
3253 struct binder_work *w;
3254 struct rb_node *n;
3255 size_t start_pos = m->count;
3256 size_t header_pos;
3258 seq_printf(m, "proc %d\n", proc->pid);
3259 header_pos = m->count;
3261 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
3262 print_binder_thread(m, rb_entry(n, struct binder_thread,
3263 rb_node), print_all);
3264 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
3265 struct binder_node *node = rb_entry(n, struct binder_node,
3266 rb_node);
3267 if (print_all || node->has_async_transaction)
3268 print_binder_node(m, node);
3270 if (print_all) {
3271 for (n = rb_first(&proc->refs_by_desc);
3272 n != NULL;
3273 n = rb_next(n))
3274 print_binder_ref(m, rb_entry(n, struct binder_ref,
3275 rb_node_desc));
3277 for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n))
3278 print_binder_buffer(m, " buffer",
3279 rb_entry(n, struct binder_buffer, rb_node));
3280 list_for_each_entry(w, &proc->todo, entry)
3281 print_binder_work(m, " ", " pending transaction", w);
3282 list_for_each_entry(w, &proc->delivered_death, entry) {
3283 seq_puts(m, " has delivered dead binder\n");
3284 break;
3286 if (!print_all && m->count == header_pos)
3287 m->count = start_pos;
3290 static const char * const binder_return_strings[] = {
3291 "BR_ERROR",
3292 "BR_OK",
3293 "BR_TRANSACTION",
3294 "BR_REPLY",
3295 "BR_ACQUIRE_RESULT",
3296 "BR_DEAD_REPLY",
3297 "BR_TRANSACTION_COMPLETE",
3298 "BR_INCREFS",
3299 "BR_ACQUIRE",
3300 "BR_RELEASE",
3301 "BR_DECREFS",
3302 "BR_ATTEMPT_ACQUIRE",
3303 "BR_NOOP",
3304 "BR_SPAWN_LOOPER",
3305 "BR_FINISHED",
3306 "BR_DEAD_BINDER",
3307 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
3308 "BR_FAILED_REPLY"
3311 static const char * const binder_command_strings[] = {
3312 "BC_TRANSACTION",
3313 "BC_REPLY",
3314 "BC_ACQUIRE_RESULT",
3315 "BC_FREE_BUFFER",
3316 "BC_INCREFS",
3317 "BC_ACQUIRE",
3318 "BC_RELEASE",
3319 "BC_DECREFS",
3320 "BC_INCREFS_DONE",
3321 "BC_ACQUIRE_DONE",
3322 "BC_ATTEMPT_ACQUIRE",
3323 "BC_REGISTER_LOOPER",
3324 "BC_ENTER_LOOPER",
3325 "BC_EXIT_LOOPER",
3326 "BC_REQUEST_DEATH_NOTIFICATION",
3327 "BC_CLEAR_DEATH_NOTIFICATION",
3328 "BC_DEAD_BINDER_DONE"
3331 static const char * const binder_objstat_strings[] = {
3332 "proc",
3333 "thread",
3334 "node",
3335 "ref",
3336 "death",
3337 "transaction",
3338 "transaction_complete"
3341 static void print_binder_stats(struct seq_file *m, const char *prefix,
3342 struct binder_stats *stats)
3344 int i;
3346 BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
3347 ARRAY_SIZE(binder_command_strings));
3348 for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
3349 if (stats->bc[i])
3350 seq_printf(m, "%s%s: %d\n", prefix,
3351 binder_command_strings[i], stats->bc[i]);
3354 BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
3355 ARRAY_SIZE(binder_return_strings));
3356 for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
3357 if (stats->br[i])
3358 seq_printf(m, "%s%s: %d\n", prefix,
3359 binder_return_strings[i], stats->br[i]);
3362 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
3363 ARRAY_SIZE(binder_objstat_strings));
3364 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
3365 ARRAY_SIZE(stats->obj_deleted));
3366 for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
3367 if (stats->obj_created[i] || stats->obj_deleted[i])
3368 seq_printf(m, "%s%s: active %d total %d\n", prefix,
3369 binder_objstat_strings[i],
3370 stats->obj_created[i] - stats->obj_deleted[i],
3371 stats->obj_created[i]);
3375 static void print_binder_proc_stats(struct seq_file *m,
3376 struct binder_proc *proc)
3378 struct binder_work *w;
3379 struct rb_node *n;
3380 int count, strong, weak;
3382 seq_printf(m, "proc %d\n", proc->pid);
3383 count = 0;
3384 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
3385 count++;
3386 seq_printf(m, " threads: %d\n", count);
3387 seq_printf(m, " requested threads: %d+%d/%d\n"
3388 " ready threads %d\n"
3389 " free async space %zd\n", proc->requested_threads,
3390 proc->requested_threads_started, proc->max_threads,
3391 proc->ready_threads, proc->free_async_space);
3392 count = 0;
3393 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
3394 count++;
3395 seq_printf(m, " nodes: %d\n", count);
3396 count = 0;
3397 strong = 0;
3398 weak = 0;
3399 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
3400 struct binder_ref *ref = rb_entry(n, struct binder_ref,
3401 rb_node_desc);
3402 count++;
3403 strong += ref->strong;
3404 weak += ref->weak;
3406 seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak);
3408 count = 0;
3409 for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n))
3410 count++;
3411 seq_printf(m, " buffers: %d\n", count);
3413 count = 0;
3414 list_for_each_entry(w, &proc->todo, entry) {
3415 switch (w->type) {
3416 case BINDER_WORK_TRANSACTION:
3417 count++;
3418 break;
3419 default:
3420 break;
3423 seq_printf(m, " pending transactions: %d\n", count);
3425 print_binder_stats(m, " ", &proc->stats);
3429 static int binder_state_show(struct seq_file *m, void *unused)
3431 struct binder_proc *proc;
3432 struct binder_node *node;
3433 int do_lock = !binder_debug_no_lock;
3435 if (do_lock)
3436 binder_lock(__func__);
3438 seq_puts(m, "binder state:\n");
3440 if (!hlist_empty(&binder_dead_nodes))
3441 seq_puts(m, "dead nodes:\n");
3442 hlist_for_each_entry(node, &binder_dead_nodes, dead_node)
3443 print_binder_node(m, node);
3445 hlist_for_each_entry(proc, &binder_procs, proc_node)
3446 print_binder_proc(m, proc, 1);
3447 if (do_lock)
3448 binder_unlock(__func__);
3449 return 0;
3452 static int binder_stats_show(struct seq_file *m, void *unused)
3454 struct binder_proc *proc;
3455 int do_lock = !binder_debug_no_lock;
3457 if (do_lock)
3458 binder_lock(__func__);
3460 seq_puts(m, "binder stats:\n");
3462 print_binder_stats(m, "", &binder_stats);
3464 hlist_for_each_entry(proc, &binder_procs, proc_node)
3465 print_binder_proc_stats(m, proc);
3466 if (do_lock)
3467 binder_unlock(__func__);
3468 return 0;
3471 static int binder_transactions_show(struct seq_file *m, void *unused)
3473 struct binder_proc *proc;
3474 int do_lock = !binder_debug_no_lock;
3476 if (do_lock)
3477 binder_lock(__func__);
3479 seq_puts(m, "binder transactions:\n");
3480 hlist_for_each_entry(proc, &binder_procs, proc_node)
3481 print_binder_proc(m, proc, 0);
3482 if (do_lock)
3483 binder_unlock(__func__);
3484 return 0;
3487 static int binder_proc_show(struct seq_file *m, void *unused)
3489 struct binder_proc *proc = m->private;
3490 int do_lock = !binder_debug_no_lock;
3492 if (do_lock)
3493 binder_lock(__func__);
3494 seq_puts(m, "binder proc state:\n");
3495 print_binder_proc(m, proc, 1);
3496 if (do_lock)
3497 binder_unlock(__func__);
3498 return 0;
3501 static void print_binder_transaction_log_entry(struct seq_file *m,
3502 struct binder_transaction_log_entry *e)
3504 seq_printf(m,
3505 "%d: %s from %d:%d to %d:%d node %d handle %d size %d:%d\n",
3506 e->debug_id, (e->call_type == 2) ? "reply" :
3507 ((e->call_type == 1) ? "async" : "call "), e->from_proc,
3508 e->from_thread, e->to_proc, e->to_thread, e->to_node,
3509 e->target_handle, e->data_size, e->offsets_size);
3512 static int binder_transaction_log_show(struct seq_file *m, void *unused)
3514 struct binder_transaction_log *log = m->private;
3515 int i;
3517 if (log->full) {
3518 for (i = log->next; i < ARRAY_SIZE(log->entry); i++)
3519 print_binder_transaction_log_entry(m, &log->entry[i]);
3521 for (i = 0; i < log->next; i++)
3522 print_binder_transaction_log_entry(m, &log->entry[i]);
3523 return 0;
3526 static const struct file_operations binder_fops = {
3527 .owner = THIS_MODULE,
3528 .poll = binder_poll,
3529 .unlocked_ioctl = binder_ioctl,
3530 .compat_ioctl = binder_ioctl,
3531 .mmap = binder_mmap,
3532 .open = binder_open,
3533 .flush = binder_flush,
3534 .release = binder_release,
3537 static struct miscdevice binder_miscdev = {
3538 .minor = MISC_DYNAMIC_MINOR,
3539 .name = "binder",
3540 .fops = &binder_fops
3543 BINDER_DEBUG_ENTRY(state);
3544 BINDER_DEBUG_ENTRY(stats);
3545 BINDER_DEBUG_ENTRY(transactions);
3546 BINDER_DEBUG_ENTRY(transaction_log);
3548 static int __init binder_init(void)
3550 int ret;
3552 binder_deferred_workqueue = create_singlethread_workqueue("binder");
3553 if (!binder_deferred_workqueue)
3554 return -ENOMEM;
3556 binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
3557 if (binder_debugfs_dir_entry_root)
3558 binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
3559 binder_debugfs_dir_entry_root);
3560 ret = misc_register(&binder_miscdev);
3561 if (binder_debugfs_dir_entry_root) {
3562 debugfs_create_file("state",
3563 S_IRUGO,
3564 binder_debugfs_dir_entry_root,
3565 NULL,
3566 &binder_state_fops);
3567 debugfs_create_file("stats",
3568 S_IRUGO,
3569 binder_debugfs_dir_entry_root,
3570 NULL,
3571 &binder_stats_fops);
3572 debugfs_create_file("transactions",
3573 S_IRUGO,
3574 binder_debugfs_dir_entry_root,
3575 NULL,
3576 &binder_transactions_fops);
3577 debugfs_create_file("transaction_log",
3578 S_IRUGO,
3579 binder_debugfs_dir_entry_root,
3580 &binder_transaction_log,
3581 &binder_transaction_log_fops);
3582 debugfs_create_file("failed_transaction_log",
3583 S_IRUGO,
3584 binder_debugfs_dir_entry_root,
3585 &binder_transaction_log_failed,
3586 &binder_transaction_log_fops);
3588 return ret;
3591 device_initcall(binder_init);
3593 #define CREATE_TRACE_POINTS
3594 #include "binder_trace.h"
3596 MODULE_LICENSE("GPL v2");