jme: Do not enable NIC WoL functions on S0
[linux/fpc-iii.git] / drivers / staging / android / binder.c
blob3b79624703a750156d78573a00f4ebc1f9d99f63
1 /* binder.c
3 * Android IPC Subsystem
5 * Copyright (C) 2007-2008 Google, Inc.
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 #include <asm/cacheflush.h>
21 #include <linux/fdtable.h>
22 #include <linux/file.h>
23 #include <linux/freezer.h>
24 #include <linux/fs.h>
25 #include <linux/list.h>
26 #include <linux/miscdevice.h>
27 #include <linux/mm.h>
28 #include <linux/module.h>
29 #include <linux/mutex.h>
30 #include <linux/nsproxy.h>
31 #include <linux/poll.h>
32 #include <linux/debugfs.h>
33 #include <linux/rbtree.h>
34 #include <linux/sched.h>
35 #include <linux/seq_file.h>
36 #include <linux/uaccess.h>
37 #include <linux/vmalloc.h>
38 #include <linux/slab.h>
39 #include <linux/pid_namespace.h>
41 #include "binder.h"
42 #include "binder_trace.h"
44 static DEFINE_MUTEX(binder_main_lock);
45 static DEFINE_MUTEX(binder_deferred_lock);
46 static DEFINE_MUTEX(binder_mmap_lock);
48 static HLIST_HEAD(binder_procs);
49 static HLIST_HEAD(binder_deferred_list);
50 static HLIST_HEAD(binder_dead_nodes);
52 static struct dentry *binder_debugfs_dir_entry_root;
53 static struct dentry *binder_debugfs_dir_entry_proc;
54 static struct binder_node *binder_context_mgr_node;
55 static kuid_t binder_context_mgr_uid = INVALID_UID;
56 static int binder_last_id;
57 static struct workqueue_struct *binder_deferred_workqueue;
59 #define BINDER_DEBUG_ENTRY(name) \
60 static int binder_##name##_open(struct inode *inode, struct file *file) \
61 { \
62 return single_open(file, binder_##name##_show, inode->i_private); \
63 } \
65 static const struct file_operations binder_##name##_fops = { \
66 .owner = THIS_MODULE, \
67 .open = binder_##name##_open, \
68 .read = seq_read, \
69 .llseek = seq_lseek, \
70 .release = single_release, \
73 static int binder_proc_show(struct seq_file *m, void *unused);
74 BINDER_DEBUG_ENTRY(proc);
76 /* This is only defined in include/asm-arm/sizes.h */
77 #ifndef SZ_1K
78 #define SZ_1K 0x400
79 #endif
81 #ifndef SZ_4M
82 #define SZ_4M 0x400000
83 #endif
85 #define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
87 #define BINDER_SMALL_BUF_SIZE (PAGE_SIZE * 64)
89 enum {
90 BINDER_DEBUG_USER_ERROR = 1U << 0,
91 BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1,
92 BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2,
93 BINDER_DEBUG_OPEN_CLOSE = 1U << 3,
94 BINDER_DEBUG_DEAD_BINDER = 1U << 4,
95 BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5,
96 BINDER_DEBUG_READ_WRITE = 1U << 6,
97 BINDER_DEBUG_USER_REFS = 1U << 7,
98 BINDER_DEBUG_THREADS = 1U << 8,
99 BINDER_DEBUG_TRANSACTION = 1U << 9,
100 BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10,
101 BINDER_DEBUG_FREE_BUFFER = 1U << 11,
102 BINDER_DEBUG_INTERNAL_REFS = 1U << 12,
103 BINDER_DEBUG_BUFFER_ALLOC = 1U << 13,
104 BINDER_DEBUG_PRIORITY_CAP = 1U << 14,
105 BINDER_DEBUG_BUFFER_ALLOC_ASYNC = 1U << 15,
107 static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
108 BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
109 module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO);
111 static bool binder_debug_no_lock;
112 module_param_named(proc_no_lock, binder_debug_no_lock, bool, S_IWUSR | S_IRUGO);
114 static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
115 static int binder_stop_on_user_error;
117 static int binder_set_stop_on_user_error(const char *val,
118 struct kernel_param *kp)
120 int ret;
122 ret = param_set_int(val, kp);
123 if (binder_stop_on_user_error < 2)
124 wake_up(&binder_user_error_wait);
125 return ret;
127 module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
128 param_get_int, &binder_stop_on_user_error, S_IWUSR | S_IRUGO);
130 #define binder_debug(mask, x...) \
131 do { \
132 if (binder_debug_mask & mask) \
133 pr_info(x); \
134 } while (0)
136 #define binder_user_error(x...) \
137 do { \
138 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
139 pr_info(x); \
140 if (binder_stop_on_user_error) \
141 binder_stop_on_user_error = 2; \
142 } while (0)
144 enum binder_stat_types {
145 BINDER_STAT_PROC,
146 BINDER_STAT_THREAD,
147 BINDER_STAT_NODE,
148 BINDER_STAT_REF,
149 BINDER_STAT_DEATH,
150 BINDER_STAT_TRANSACTION,
151 BINDER_STAT_TRANSACTION_COMPLETE,
152 BINDER_STAT_COUNT
155 struct binder_stats {
156 int br[_IOC_NR(BR_FAILED_REPLY) + 1];
157 int bc[_IOC_NR(BC_DEAD_BINDER_DONE) + 1];
158 int obj_created[BINDER_STAT_COUNT];
159 int obj_deleted[BINDER_STAT_COUNT];
162 static struct binder_stats binder_stats;
164 static inline void binder_stats_deleted(enum binder_stat_types type)
166 binder_stats.obj_deleted[type]++;
169 static inline void binder_stats_created(enum binder_stat_types type)
171 binder_stats.obj_created[type]++;
174 struct binder_transaction_log_entry {
175 int debug_id;
176 int call_type;
177 int from_proc;
178 int from_thread;
179 int target_handle;
180 int to_proc;
181 int to_thread;
182 int to_node;
183 int data_size;
184 int offsets_size;
186 struct binder_transaction_log {
187 int next;
188 int full;
189 struct binder_transaction_log_entry entry[32];
191 static struct binder_transaction_log binder_transaction_log;
192 static struct binder_transaction_log binder_transaction_log_failed;
194 static struct binder_transaction_log_entry *binder_transaction_log_add(
195 struct binder_transaction_log *log)
197 struct binder_transaction_log_entry *e;
199 e = &log->entry[log->next];
200 memset(e, 0, sizeof(*e));
201 log->next++;
202 if (log->next == ARRAY_SIZE(log->entry)) {
203 log->next = 0;
204 log->full = 1;
206 return e;
209 struct binder_work {
210 struct list_head entry;
211 enum {
212 BINDER_WORK_TRANSACTION = 1,
213 BINDER_WORK_TRANSACTION_COMPLETE,
214 BINDER_WORK_NODE,
215 BINDER_WORK_DEAD_BINDER,
216 BINDER_WORK_DEAD_BINDER_AND_CLEAR,
217 BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
218 } type;
221 struct binder_node {
222 int debug_id;
223 struct binder_work work;
224 union {
225 struct rb_node rb_node;
226 struct hlist_node dead_node;
228 struct binder_proc *proc;
229 struct hlist_head refs;
230 int internal_strong_refs;
231 int local_weak_refs;
232 int local_strong_refs;
233 binder_uintptr_t ptr;
234 binder_uintptr_t cookie;
235 unsigned has_strong_ref:1;
236 unsigned pending_strong_ref:1;
237 unsigned has_weak_ref:1;
238 unsigned pending_weak_ref:1;
239 unsigned has_async_transaction:1;
240 unsigned accept_fds:1;
241 unsigned min_priority:8;
242 struct list_head async_todo;
245 struct binder_ref_death {
246 struct binder_work work;
247 binder_uintptr_t cookie;
250 struct binder_ref {
251 /* Lookups needed: */
252 /* node + proc => ref (transaction) */
253 /* desc + proc => ref (transaction, inc/dec ref) */
254 /* node => refs + procs (proc exit) */
255 int debug_id;
256 struct rb_node rb_node_desc;
257 struct rb_node rb_node_node;
258 struct hlist_node node_entry;
259 struct binder_proc *proc;
260 struct binder_node *node;
261 uint32_t desc;
262 int strong;
263 int weak;
264 struct binder_ref_death *death;
267 struct binder_buffer {
268 struct list_head entry; /* free and allocated entries by address */
269 struct rb_node rb_node; /* free entry by size or allocated entry */
270 /* by address */
271 unsigned free:1;
272 unsigned allow_user_free:1;
273 unsigned async_transaction:1;
274 unsigned debug_id:29;
276 struct binder_transaction *transaction;
278 struct binder_node *target_node;
279 size_t data_size;
280 size_t offsets_size;
281 uint8_t data[0];
284 enum binder_deferred_state {
285 BINDER_DEFERRED_PUT_FILES = 0x01,
286 BINDER_DEFERRED_FLUSH = 0x02,
287 BINDER_DEFERRED_RELEASE = 0x04,
290 struct binder_proc {
291 struct hlist_node proc_node;
292 struct rb_root threads;
293 struct rb_root nodes;
294 struct rb_root refs_by_desc;
295 struct rb_root refs_by_node;
296 int pid;
297 struct vm_area_struct *vma;
298 struct mm_struct *vma_vm_mm;
299 struct task_struct *tsk;
300 struct files_struct *files;
301 struct hlist_node deferred_work_node;
302 int deferred_work;
303 void *buffer;
304 ptrdiff_t user_buffer_offset;
306 struct list_head buffers;
307 struct rb_root free_buffers;
308 struct rb_root allocated_buffers;
309 size_t free_async_space;
311 struct page **pages;
312 size_t buffer_size;
313 uint32_t buffer_free;
314 struct list_head todo;
315 wait_queue_head_t wait;
316 struct binder_stats stats;
317 struct list_head delivered_death;
318 int max_threads;
319 int requested_threads;
320 int requested_threads_started;
321 int ready_threads;
322 long default_priority;
323 struct dentry *debugfs_entry;
326 enum {
327 BINDER_LOOPER_STATE_REGISTERED = 0x01,
328 BINDER_LOOPER_STATE_ENTERED = 0x02,
329 BINDER_LOOPER_STATE_EXITED = 0x04,
330 BINDER_LOOPER_STATE_INVALID = 0x08,
331 BINDER_LOOPER_STATE_WAITING = 0x10,
332 BINDER_LOOPER_STATE_NEED_RETURN = 0x20
335 struct binder_thread {
336 struct binder_proc *proc;
337 struct rb_node rb_node;
338 int pid;
339 int looper;
340 struct binder_transaction *transaction_stack;
341 struct list_head todo;
342 uint32_t return_error; /* Write failed, return error code in read buf */
343 uint32_t return_error2; /* Write failed, return error code in read */
344 /* buffer. Used when sending a reply to a dead process that */
345 /* we are also waiting on */
346 wait_queue_head_t wait;
347 struct binder_stats stats;
350 struct binder_transaction {
351 int debug_id;
352 struct binder_work work;
353 struct binder_thread *from;
354 struct binder_transaction *from_parent;
355 struct binder_proc *to_proc;
356 struct binder_thread *to_thread;
357 struct binder_transaction *to_parent;
358 unsigned need_reply:1;
359 /* unsigned is_dead:1; */ /* not used at the moment */
361 struct binder_buffer *buffer;
362 unsigned int code;
363 unsigned int flags;
364 long priority;
365 long saved_priority;
366 kuid_t sender_euid;
369 static void
370 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
372 static int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
374 struct files_struct *files = proc->files;
375 unsigned long rlim_cur;
376 unsigned long irqs;
378 if (files == NULL)
379 return -ESRCH;
381 if (!lock_task_sighand(proc->tsk, &irqs))
382 return -EMFILE;
384 rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE);
385 unlock_task_sighand(proc->tsk, &irqs);
387 return __alloc_fd(files, 0, rlim_cur, flags);
391 * copied from fd_install
393 static void task_fd_install(
394 struct binder_proc *proc, unsigned int fd, struct file *file)
396 if (proc->files)
397 __fd_install(proc->files, fd, file);
401 * copied from sys_close
403 static long task_close_fd(struct binder_proc *proc, unsigned int fd)
405 int retval;
407 if (proc->files == NULL)
408 return -ESRCH;
410 retval = __close_fd(proc->files, fd);
411 /* can't restart close syscall because file table entry was cleared */
412 if (unlikely(retval == -ERESTARTSYS ||
413 retval == -ERESTARTNOINTR ||
414 retval == -ERESTARTNOHAND ||
415 retval == -ERESTART_RESTARTBLOCK))
416 retval = -EINTR;
418 return retval;
421 static inline void binder_lock(const char *tag)
423 trace_binder_lock(tag);
424 mutex_lock(&binder_main_lock);
425 trace_binder_locked(tag);
428 static inline void binder_unlock(const char *tag)
430 trace_binder_unlock(tag);
431 mutex_unlock(&binder_main_lock);
434 static void binder_set_nice(long nice)
436 long min_nice;
438 if (can_nice(current, nice)) {
439 set_user_nice(current, nice);
440 return;
442 min_nice = rlimit_to_nice(current->signal->rlim[RLIMIT_NICE].rlim_cur);
443 binder_debug(BINDER_DEBUG_PRIORITY_CAP,
444 "%d: nice value %ld not allowed use %ld instead\n",
445 current->pid, nice, min_nice);
446 set_user_nice(current, min_nice);
447 if (min_nice <= MAX_NICE)
448 return;
449 binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
452 static size_t binder_buffer_size(struct binder_proc *proc,
453 struct binder_buffer *buffer)
455 if (list_is_last(&buffer->entry, &proc->buffers))
456 return proc->buffer + proc->buffer_size - (void *)buffer->data;
457 else
458 return (size_t)list_entry(buffer->entry.next,
459 struct binder_buffer, entry) - (size_t)buffer->data;
462 static void binder_insert_free_buffer(struct binder_proc *proc,
463 struct binder_buffer *new_buffer)
465 struct rb_node **p = &proc->free_buffers.rb_node;
466 struct rb_node *parent = NULL;
467 struct binder_buffer *buffer;
468 size_t buffer_size;
469 size_t new_buffer_size;
471 BUG_ON(!new_buffer->free);
473 new_buffer_size = binder_buffer_size(proc, new_buffer);
475 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
476 "%d: add free buffer, size %zd, at %p\n",
477 proc->pid, new_buffer_size, new_buffer);
479 while (*p) {
480 parent = *p;
481 buffer = rb_entry(parent, struct binder_buffer, rb_node);
482 BUG_ON(!buffer->free);
484 buffer_size = binder_buffer_size(proc, buffer);
486 if (new_buffer_size < buffer_size)
487 p = &parent->rb_left;
488 else
489 p = &parent->rb_right;
491 rb_link_node(&new_buffer->rb_node, parent, p);
492 rb_insert_color(&new_buffer->rb_node, &proc->free_buffers);
495 static void binder_insert_allocated_buffer(struct binder_proc *proc,
496 struct binder_buffer *new_buffer)
498 struct rb_node **p = &proc->allocated_buffers.rb_node;
499 struct rb_node *parent = NULL;
500 struct binder_buffer *buffer;
502 BUG_ON(new_buffer->free);
504 while (*p) {
505 parent = *p;
506 buffer = rb_entry(parent, struct binder_buffer, rb_node);
507 BUG_ON(buffer->free);
509 if (new_buffer < buffer)
510 p = &parent->rb_left;
511 else if (new_buffer > buffer)
512 p = &parent->rb_right;
513 else
514 BUG();
516 rb_link_node(&new_buffer->rb_node, parent, p);
517 rb_insert_color(&new_buffer->rb_node, &proc->allocated_buffers);
520 static struct binder_buffer *binder_buffer_lookup(struct binder_proc *proc,
521 uintptr_t user_ptr)
523 struct rb_node *n = proc->allocated_buffers.rb_node;
524 struct binder_buffer *buffer;
525 struct binder_buffer *kern_ptr;
527 kern_ptr = (struct binder_buffer *)(user_ptr - proc->user_buffer_offset
528 - offsetof(struct binder_buffer, data));
530 while (n) {
531 buffer = rb_entry(n, struct binder_buffer, rb_node);
532 BUG_ON(buffer->free);
534 if (kern_ptr < buffer)
535 n = n->rb_left;
536 else if (kern_ptr > buffer)
537 n = n->rb_right;
538 else
539 return buffer;
541 return NULL;
544 static int binder_update_page_range(struct binder_proc *proc, int allocate,
545 void *start, void *end,
546 struct vm_area_struct *vma)
548 void *page_addr;
549 unsigned long user_page_addr;
550 struct vm_struct tmp_area;
551 struct page **page;
552 struct mm_struct *mm;
554 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
555 "%d: %s pages %p-%p\n", proc->pid,
556 allocate ? "allocate" : "free", start, end);
558 if (end <= start)
559 return 0;
561 trace_binder_update_page_range(proc, allocate, start, end);
563 if (vma)
564 mm = NULL;
565 else
566 mm = get_task_mm(proc->tsk);
568 if (mm) {
569 down_write(&mm->mmap_sem);
570 vma = proc->vma;
571 if (vma && mm != proc->vma_vm_mm) {
572 pr_err("%d: vma mm and task mm mismatch\n",
573 proc->pid);
574 vma = NULL;
578 if (allocate == 0)
579 goto free_range;
581 if (vma == NULL) {
582 pr_err("%d: binder_alloc_buf failed to map pages in userspace, no vma\n",
583 proc->pid);
584 goto err_no_vma;
587 for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
588 int ret;
589 struct page **page_array_ptr;
591 page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE];
593 BUG_ON(*page);
594 *page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
595 if (*page == NULL) {
596 pr_err("%d: binder_alloc_buf failed for page at %p\n",
597 proc->pid, page_addr);
598 goto err_alloc_page_failed;
600 tmp_area.addr = page_addr;
601 tmp_area.size = PAGE_SIZE + PAGE_SIZE /* guard page? */;
602 page_array_ptr = page;
603 ret = map_vm_area(&tmp_area, PAGE_KERNEL, &page_array_ptr);
604 if (ret) {
605 pr_err("%d: binder_alloc_buf failed to map page at %p in kernel\n",
606 proc->pid, page_addr);
607 goto err_map_kernel_failed;
609 user_page_addr =
610 (uintptr_t)page_addr + proc->user_buffer_offset;
611 ret = vm_insert_page(vma, user_page_addr, page[0]);
612 if (ret) {
613 pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n",
614 proc->pid, user_page_addr);
615 goto err_vm_insert_page_failed;
617 /* vm_insert_page does not seem to increment the refcount */
619 if (mm) {
620 up_write(&mm->mmap_sem);
621 mmput(mm);
623 return 0;
625 free_range:
626 for (page_addr = end - PAGE_SIZE; page_addr >= start;
627 page_addr -= PAGE_SIZE) {
628 page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE];
629 if (vma)
630 zap_page_range(vma, (uintptr_t)page_addr +
631 proc->user_buffer_offset, PAGE_SIZE, NULL);
632 err_vm_insert_page_failed:
633 unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
634 err_map_kernel_failed:
635 __free_page(*page);
636 *page = NULL;
637 err_alloc_page_failed:
640 err_no_vma:
641 if (mm) {
642 up_write(&mm->mmap_sem);
643 mmput(mm);
645 return -ENOMEM;
648 static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc,
649 size_t data_size,
650 size_t offsets_size, int is_async)
652 struct rb_node *n = proc->free_buffers.rb_node;
653 struct binder_buffer *buffer;
654 size_t buffer_size;
655 struct rb_node *best_fit = NULL;
656 void *has_page_addr;
657 void *end_page_addr;
658 size_t size;
660 if (proc->vma == NULL) {
661 pr_err("%d: binder_alloc_buf, no vma\n",
662 proc->pid);
663 return NULL;
666 size = ALIGN(data_size, sizeof(void *)) +
667 ALIGN(offsets_size, sizeof(void *));
669 if (size < data_size || size < offsets_size) {
670 binder_user_error("%d: got transaction with invalid size %zd-%zd\n",
671 proc->pid, data_size, offsets_size);
672 return NULL;
675 if (is_async &&
676 proc->free_async_space < size + sizeof(struct binder_buffer)) {
677 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
678 "%d: binder_alloc_buf size %zd failed, no async space left\n",
679 proc->pid, size);
680 return NULL;
683 while (n) {
684 buffer = rb_entry(n, struct binder_buffer, rb_node);
685 BUG_ON(!buffer->free);
686 buffer_size = binder_buffer_size(proc, buffer);
688 if (size < buffer_size) {
689 best_fit = n;
690 n = n->rb_left;
691 } else if (size > buffer_size)
692 n = n->rb_right;
693 else {
694 best_fit = n;
695 break;
698 if (best_fit == NULL) {
699 pr_err("%d: binder_alloc_buf size %zd failed, no address space\n",
700 proc->pid, size);
701 return NULL;
703 if (n == NULL) {
704 buffer = rb_entry(best_fit, struct binder_buffer, rb_node);
705 buffer_size = binder_buffer_size(proc, buffer);
708 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
709 "%d: binder_alloc_buf size %zd got buffer %p size %zd\n",
710 proc->pid, size, buffer, buffer_size);
712 has_page_addr =
713 (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK);
714 if (n == NULL) {
715 if (size + sizeof(struct binder_buffer) + 4 >= buffer_size)
716 buffer_size = size; /* no room for other buffers */
717 else
718 buffer_size = size + sizeof(struct binder_buffer);
720 end_page_addr =
721 (void *)PAGE_ALIGN((uintptr_t)buffer->data + buffer_size);
722 if (end_page_addr > has_page_addr)
723 end_page_addr = has_page_addr;
724 if (binder_update_page_range(proc, 1,
725 (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr, NULL))
726 return NULL;
728 rb_erase(best_fit, &proc->free_buffers);
729 buffer->free = 0;
730 binder_insert_allocated_buffer(proc, buffer);
731 if (buffer_size != size) {
732 struct binder_buffer *new_buffer = (void *)buffer->data + size;
734 list_add(&new_buffer->entry, &buffer->entry);
735 new_buffer->free = 1;
736 binder_insert_free_buffer(proc, new_buffer);
738 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
739 "%d: binder_alloc_buf size %zd got %p\n",
740 proc->pid, size, buffer);
741 buffer->data_size = data_size;
742 buffer->offsets_size = offsets_size;
743 buffer->async_transaction = is_async;
744 if (is_async) {
745 proc->free_async_space -= size + sizeof(struct binder_buffer);
746 binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
747 "%d: binder_alloc_buf size %zd async free %zd\n",
748 proc->pid, size, proc->free_async_space);
751 return buffer;
754 static void *buffer_start_page(struct binder_buffer *buffer)
756 return (void *)((uintptr_t)buffer & PAGE_MASK);
759 static void *buffer_end_page(struct binder_buffer *buffer)
761 return (void *)(((uintptr_t)(buffer + 1) - 1) & PAGE_MASK);
764 static void binder_delete_free_buffer(struct binder_proc *proc,
765 struct binder_buffer *buffer)
767 struct binder_buffer *prev, *next = NULL;
768 int free_page_end = 1;
769 int free_page_start = 1;
771 BUG_ON(proc->buffers.next == &buffer->entry);
772 prev = list_entry(buffer->entry.prev, struct binder_buffer, entry);
773 BUG_ON(!prev->free);
774 if (buffer_end_page(prev) == buffer_start_page(buffer)) {
775 free_page_start = 0;
776 if (buffer_end_page(prev) == buffer_end_page(buffer))
777 free_page_end = 0;
778 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
779 "%d: merge free, buffer %p share page with %p\n",
780 proc->pid, buffer, prev);
783 if (!list_is_last(&buffer->entry, &proc->buffers)) {
784 next = list_entry(buffer->entry.next,
785 struct binder_buffer, entry);
786 if (buffer_start_page(next) == buffer_end_page(buffer)) {
787 free_page_end = 0;
788 if (buffer_start_page(next) ==
789 buffer_start_page(buffer))
790 free_page_start = 0;
791 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
792 "%d: merge free, buffer %p share page with %p\n",
793 proc->pid, buffer, prev);
796 list_del(&buffer->entry);
797 if (free_page_start || free_page_end) {
798 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
799 "%d: merge free, buffer %p do not share page%s%s with %p or %p\n",
800 proc->pid, buffer, free_page_start ? "" : " end",
801 free_page_end ? "" : " start", prev, next);
802 binder_update_page_range(proc, 0, free_page_start ?
803 buffer_start_page(buffer) : buffer_end_page(buffer),
804 (free_page_end ? buffer_end_page(buffer) :
805 buffer_start_page(buffer)) + PAGE_SIZE, NULL);
809 static void binder_free_buf(struct binder_proc *proc,
810 struct binder_buffer *buffer)
812 size_t size, buffer_size;
814 buffer_size = binder_buffer_size(proc, buffer);
816 size = ALIGN(buffer->data_size, sizeof(void *)) +
817 ALIGN(buffer->offsets_size, sizeof(void *));
819 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
820 "%d: binder_free_buf %p size %zd buffer_size %zd\n",
821 proc->pid, buffer, size, buffer_size);
823 BUG_ON(buffer->free);
824 BUG_ON(size > buffer_size);
825 BUG_ON(buffer->transaction != NULL);
826 BUG_ON((void *)buffer < proc->buffer);
827 BUG_ON((void *)buffer > proc->buffer + proc->buffer_size);
829 if (buffer->async_transaction) {
830 proc->free_async_space += size + sizeof(struct binder_buffer);
832 binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
833 "%d: binder_free_buf size %zd async free %zd\n",
834 proc->pid, size, proc->free_async_space);
837 binder_update_page_range(proc, 0,
838 (void *)PAGE_ALIGN((uintptr_t)buffer->data),
839 (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK),
840 NULL);
841 rb_erase(&buffer->rb_node, &proc->allocated_buffers);
842 buffer->free = 1;
843 if (!list_is_last(&buffer->entry, &proc->buffers)) {
844 struct binder_buffer *next = list_entry(buffer->entry.next,
845 struct binder_buffer, entry);
847 if (next->free) {
848 rb_erase(&next->rb_node, &proc->free_buffers);
849 binder_delete_free_buffer(proc, next);
852 if (proc->buffers.next != &buffer->entry) {
853 struct binder_buffer *prev = list_entry(buffer->entry.prev,
854 struct binder_buffer, entry);
856 if (prev->free) {
857 binder_delete_free_buffer(proc, buffer);
858 rb_erase(&prev->rb_node, &proc->free_buffers);
859 buffer = prev;
862 binder_insert_free_buffer(proc, buffer);
865 static struct binder_node *binder_get_node(struct binder_proc *proc,
866 binder_uintptr_t ptr)
868 struct rb_node *n = proc->nodes.rb_node;
869 struct binder_node *node;
871 while (n) {
872 node = rb_entry(n, struct binder_node, rb_node);
874 if (ptr < node->ptr)
875 n = n->rb_left;
876 else if (ptr > node->ptr)
877 n = n->rb_right;
878 else
879 return node;
881 return NULL;
884 static struct binder_node *binder_new_node(struct binder_proc *proc,
885 binder_uintptr_t ptr,
886 binder_uintptr_t cookie)
888 struct rb_node **p = &proc->nodes.rb_node;
889 struct rb_node *parent = NULL;
890 struct binder_node *node;
892 while (*p) {
893 parent = *p;
894 node = rb_entry(parent, struct binder_node, rb_node);
896 if (ptr < node->ptr)
897 p = &(*p)->rb_left;
898 else if (ptr > node->ptr)
899 p = &(*p)->rb_right;
900 else
901 return NULL;
904 node = kzalloc(sizeof(*node), GFP_KERNEL);
905 if (node == NULL)
906 return NULL;
907 binder_stats_created(BINDER_STAT_NODE);
908 rb_link_node(&node->rb_node, parent, p);
909 rb_insert_color(&node->rb_node, &proc->nodes);
910 node->debug_id = ++binder_last_id;
911 node->proc = proc;
912 node->ptr = ptr;
913 node->cookie = cookie;
914 node->work.type = BINDER_WORK_NODE;
915 INIT_LIST_HEAD(&node->work.entry);
916 INIT_LIST_HEAD(&node->async_todo);
917 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
918 "%d:%d node %d u%016llx c%016llx created\n",
919 proc->pid, current->pid, node->debug_id,
920 (u64)node->ptr, (u64)node->cookie);
921 return node;
924 static int binder_inc_node(struct binder_node *node, int strong, int internal,
925 struct list_head *target_list)
927 if (strong) {
928 if (internal) {
929 if (target_list == NULL &&
930 node->internal_strong_refs == 0 &&
931 !(node == binder_context_mgr_node &&
932 node->has_strong_ref)) {
933 pr_err("invalid inc strong node for %d\n",
934 node->debug_id);
935 return -EINVAL;
937 node->internal_strong_refs++;
938 } else
939 node->local_strong_refs++;
940 if (!node->has_strong_ref && target_list) {
941 list_del_init(&node->work.entry);
942 list_add_tail(&node->work.entry, target_list);
944 } else {
945 if (!internal)
946 node->local_weak_refs++;
947 if (!node->has_weak_ref && list_empty(&node->work.entry)) {
948 if (target_list == NULL) {
949 pr_err("invalid inc weak node for %d\n",
950 node->debug_id);
951 return -EINVAL;
953 list_add_tail(&node->work.entry, target_list);
956 return 0;
959 static int binder_dec_node(struct binder_node *node, int strong, int internal)
961 if (strong) {
962 if (internal)
963 node->internal_strong_refs--;
964 else
965 node->local_strong_refs--;
966 if (node->local_strong_refs || node->internal_strong_refs)
967 return 0;
968 } else {
969 if (!internal)
970 node->local_weak_refs--;
971 if (node->local_weak_refs || !hlist_empty(&node->refs))
972 return 0;
974 if (node->proc && (node->has_strong_ref || node->has_weak_ref)) {
975 if (list_empty(&node->work.entry)) {
976 list_add_tail(&node->work.entry, &node->proc->todo);
977 wake_up_interruptible(&node->proc->wait);
979 } else {
980 if (hlist_empty(&node->refs) && !node->local_strong_refs &&
981 !node->local_weak_refs) {
982 list_del_init(&node->work.entry);
983 if (node->proc) {
984 rb_erase(&node->rb_node, &node->proc->nodes);
985 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
986 "refless node %d deleted\n",
987 node->debug_id);
988 } else {
989 hlist_del(&node->dead_node);
990 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
991 "dead node %d deleted\n",
992 node->debug_id);
994 kfree(node);
995 binder_stats_deleted(BINDER_STAT_NODE);
999 return 0;
1003 static struct binder_ref *binder_get_ref(struct binder_proc *proc,
1004 uint32_t desc)
1006 struct rb_node *n = proc->refs_by_desc.rb_node;
1007 struct binder_ref *ref;
1009 while (n) {
1010 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1012 if (desc < ref->desc)
1013 n = n->rb_left;
1014 else if (desc > ref->desc)
1015 n = n->rb_right;
1016 else
1017 return ref;
1019 return NULL;
1022 static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc,
1023 struct binder_node *node)
1025 struct rb_node *n;
1026 struct rb_node **p = &proc->refs_by_node.rb_node;
1027 struct rb_node *parent = NULL;
1028 struct binder_ref *ref, *new_ref;
1030 while (*p) {
1031 parent = *p;
1032 ref = rb_entry(parent, struct binder_ref, rb_node_node);
1034 if (node < ref->node)
1035 p = &(*p)->rb_left;
1036 else if (node > ref->node)
1037 p = &(*p)->rb_right;
1038 else
1039 return ref;
1041 new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1042 if (new_ref == NULL)
1043 return NULL;
1044 binder_stats_created(BINDER_STAT_REF);
1045 new_ref->debug_id = ++binder_last_id;
1046 new_ref->proc = proc;
1047 new_ref->node = node;
1048 rb_link_node(&new_ref->rb_node_node, parent, p);
1049 rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1051 new_ref->desc = (node == binder_context_mgr_node) ? 0 : 1;
1052 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
1053 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1054 if (ref->desc > new_ref->desc)
1055 break;
1056 new_ref->desc = ref->desc + 1;
1059 p = &proc->refs_by_desc.rb_node;
1060 while (*p) {
1061 parent = *p;
1062 ref = rb_entry(parent, struct binder_ref, rb_node_desc);
1064 if (new_ref->desc < ref->desc)
1065 p = &(*p)->rb_left;
1066 else if (new_ref->desc > ref->desc)
1067 p = &(*p)->rb_right;
1068 else
1069 BUG();
1071 rb_link_node(&new_ref->rb_node_desc, parent, p);
1072 rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
1073 if (node) {
1074 hlist_add_head(&new_ref->node_entry, &node->refs);
1076 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1077 "%d new ref %d desc %d for node %d\n",
1078 proc->pid, new_ref->debug_id, new_ref->desc,
1079 node->debug_id);
1080 } else {
1081 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1082 "%d new ref %d desc %d for dead node\n",
1083 proc->pid, new_ref->debug_id, new_ref->desc);
1085 return new_ref;
1088 static void binder_delete_ref(struct binder_ref *ref)
1090 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1091 "%d delete ref %d desc %d for node %d\n",
1092 ref->proc->pid, ref->debug_id, ref->desc,
1093 ref->node->debug_id);
1095 rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1096 rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
1097 if (ref->strong)
1098 binder_dec_node(ref->node, 1, 1);
1099 hlist_del(&ref->node_entry);
1100 binder_dec_node(ref->node, 0, 1);
1101 if (ref->death) {
1102 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1103 "%d delete ref %d desc %d has death notification\n",
1104 ref->proc->pid, ref->debug_id, ref->desc);
1105 list_del(&ref->death->work.entry);
1106 kfree(ref->death);
1107 binder_stats_deleted(BINDER_STAT_DEATH);
1109 kfree(ref);
1110 binder_stats_deleted(BINDER_STAT_REF);
1113 static int binder_inc_ref(struct binder_ref *ref, int strong,
1114 struct list_head *target_list)
1116 int ret;
1118 if (strong) {
1119 if (ref->strong == 0) {
1120 ret = binder_inc_node(ref->node, 1, 1, target_list);
1121 if (ret)
1122 return ret;
1124 ref->strong++;
1125 } else {
1126 if (ref->weak == 0) {
1127 ret = binder_inc_node(ref->node, 0, 1, target_list);
1128 if (ret)
1129 return ret;
1131 ref->weak++;
1133 return 0;
1137 static int binder_dec_ref(struct binder_ref *ref, int strong)
1139 if (strong) {
1140 if (ref->strong == 0) {
1141 binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
1142 ref->proc->pid, ref->debug_id,
1143 ref->desc, ref->strong, ref->weak);
1144 return -EINVAL;
1146 ref->strong--;
1147 if (ref->strong == 0) {
1148 int ret;
1150 ret = binder_dec_node(ref->node, strong, 1);
1151 if (ret)
1152 return ret;
1154 } else {
1155 if (ref->weak == 0) {
1156 binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
1157 ref->proc->pid, ref->debug_id,
1158 ref->desc, ref->strong, ref->weak);
1159 return -EINVAL;
1161 ref->weak--;
1163 if (ref->strong == 0 && ref->weak == 0)
1164 binder_delete_ref(ref);
1165 return 0;
1168 static void binder_pop_transaction(struct binder_thread *target_thread,
1169 struct binder_transaction *t)
1171 if (target_thread) {
1172 BUG_ON(target_thread->transaction_stack != t);
1173 BUG_ON(target_thread->transaction_stack->from != target_thread);
1174 target_thread->transaction_stack =
1175 target_thread->transaction_stack->from_parent;
1176 t->from = NULL;
1178 t->need_reply = 0;
1179 if (t->buffer)
1180 t->buffer->transaction = NULL;
1181 kfree(t);
1182 binder_stats_deleted(BINDER_STAT_TRANSACTION);
1185 static void binder_send_failed_reply(struct binder_transaction *t,
1186 uint32_t error_code)
1188 struct binder_thread *target_thread;
1190 BUG_ON(t->flags & TF_ONE_WAY);
1191 while (1) {
1192 target_thread = t->from;
1193 if (target_thread) {
1194 if (target_thread->return_error != BR_OK &&
1195 target_thread->return_error2 == BR_OK) {
1196 target_thread->return_error2 =
1197 target_thread->return_error;
1198 target_thread->return_error = BR_OK;
1200 if (target_thread->return_error == BR_OK) {
1201 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1202 "send failed reply for transaction %d to %d:%d\n",
1203 t->debug_id, target_thread->proc->pid,
1204 target_thread->pid);
1206 binder_pop_transaction(target_thread, t);
1207 target_thread->return_error = error_code;
1208 wake_up_interruptible(&target_thread->wait);
1209 } else {
1210 pr_err("reply failed, target thread, %d:%d, has error code %d already\n",
1211 target_thread->proc->pid,
1212 target_thread->pid,
1213 target_thread->return_error);
1215 return;
1216 } else {
1217 struct binder_transaction *next = t->from_parent;
1219 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1220 "send failed reply for transaction %d, target dead\n",
1221 t->debug_id);
1223 binder_pop_transaction(target_thread, t);
1224 if (next == NULL) {
1225 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1226 "reply failed, no target thread at root\n");
1227 return;
1229 t = next;
1230 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1231 "reply failed, no target thread -- retry %d\n",
1232 t->debug_id);
1237 static void binder_transaction_buffer_release(struct binder_proc *proc,
1238 struct binder_buffer *buffer,
1239 binder_size_t *failed_at)
1241 binder_size_t *offp, *off_end;
1242 int debug_id = buffer->debug_id;
1244 binder_debug(BINDER_DEBUG_TRANSACTION,
1245 "%d buffer release %d, size %zd-%zd, failed at %p\n",
1246 proc->pid, buffer->debug_id,
1247 buffer->data_size, buffer->offsets_size, failed_at);
1249 if (buffer->target_node)
1250 binder_dec_node(buffer->target_node, 1, 0);
1252 offp = (binder_size_t *)(buffer->data +
1253 ALIGN(buffer->data_size, sizeof(void *)));
1254 if (failed_at)
1255 off_end = failed_at;
1256 else
1257 off_end = (void *)offp + buffer->offsets_size;
1258 for (; offp < off_end; offp++) {
1259 struct flat_binder_object *fp;
1261 if (*offp > buffer->data_size - sizeof(*fp) ||
1262 buffer->data_size < sizeof(*fp) ||
1263 !IS_ALIGNED(*offp, sizeof(u32))) {
1264 pr_err("transaction release %d bad offset %lld, size %zd\n",
1265 debug_id, (u64)*offp, buffer->data_size);
1266 continue;
1268 fp = (struct flat_binder_object *)(buffer->data + *offp);
1269 switch (fp->type) {
1270 case BINDER_TYPE_BINDER:
1271 case BINDER_TYPE_WEAK_BINDER: {
1272 struct binder_node *node = binder_get_node(proc, fp->binder);
1274 if (node == NULL) {
1275 pr_err("transaction release %d bad node %016llx\n",
1276 debug_id, (u64)fp->binder);
1277 break;
1279 binder_debug(BINDER_DEBUG_TRANSACTION,
1280 " node %d u%016llx\n",
1281 node->debug_id, (u64)node->ptr);
1282 binder_dec_node(node, fp->type == BINDER_TYPE_BINDER, 0);
1283 } break;
1284 case BINDER_TYPE_HANDLE:
1285 case BINDER_TYPE_WEAK_HANDLE: {
1286 struct binder_ref *ref = binder_get_ref(proc, fp->handle);
1288 if (ref == NULL) {
1289 pr_err("transaction release %d bad handle %d\n",
1290 debug_id, fp->handle);
1291 break;
1293 binder_debug(BINDER_DEBUG_TRANSACTION,
1294 " ref %d desc %d (node %d)\n",
1295 ref->debug_id, ref->desc, ref->node->debug_id);
1296 binder_dec_ref(ref, fp->type == BINDER_TYPE_HANDLE);
1297 } break;
1299 case BINDER_TYPE_FD:
1300 binder_debug(BINDER_DEBUG_TRANSACTION,
1301 " fd %d\n", fp->handle);
1302 if (failed_at)
1303 task_close_fd(proc, fp->handle);
1304 break;
1306 default:
1307 pr_err("transaction release %d bad object type %x\n",
1308 debug_id, fp->type);
1309 break;
1314 static void binder_transaction(struct binder_proc *proc,
1315 struct binder_thread *thread,
1316 struct binder_transaction_data *tr, int reply)
1318 struct binder_transaction *t;
1319 struct binder_work *tcomplete;
1320 binder_size_t *offp, *off_end;
1321 struct binder_proc *target_proc;
1322 struct binder_thread *target_thread = NULL;
1323 struct binder_node *target_node = NULL;
1324 struct list_head *target_list;
1325 wait_queue_head_t *target_wait;
1326 struct binder_transaction *in_reply_to = NULL;
1327 struct binder_transaction_log_entry *e;
1328 uint32_t return_error;
1330 e = binder_transaction_log_add(&binder_transaction_log);
1331 e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
1332 e->from_proc = proc->pid;
1333 e->from_thread = thread->pid;
1334 e->target_handle = tr->target.handle;
1335 e->data_size = tr->data_size;
1336 e->offsets_size = tr->offsets_size;
1338 if (reply) {
1339 in_reply_to = thread->transaction_stack;
1340 if (in_reply_to == NULL) {
1341 binder_user_error("%d:%d got reply transaction with no transaction stack\n",
1342 proc->pid, thread->pid);
1343 return_error = BR_FAILED_REPLY;
1344 goto err_empty_call_stack;
1346 binder_set_nice(in_reply_to->saved_priority);
1347 if (in_reply_to->to_thread != thread) {
1348 binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
1349 proc->pid, thread->pid, in_reply_to->debug_id,
1350 in_reply_to->to_proc ?
1351 in_reply_to->to_proc->pid : 0,
1352 in_reply_to->to_thread ?
1353 in_reply_to->to_thread->pid : 0);
1354 return_error = BR_FAILED_REPLY;
1355 in_reply_to = NULL;
1356 goto err_bad_call_stack;
1358 thread->transaction_stack = in_reply_to->to_parent;
1359 target_thread = in_reply_to->from;
1360 if (target_thread == NULL) {
1361 return_error = BR_DEAD_REPLY;
1362 goto err_dead_binder;
1364 if (target_thread->transaction_stack != in_reply_to) {
1365 binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
1366 proc->pid, thread->pid,
1367 target_thread->transaction_stack ?
1368 target_thread->transaction_stack->debug_id : 0,
1369 in_reply_to->debug_id);
1370 return_error = BR_FAILED_REPLY;
1371 in_reply_to = NULL;
1372 target_thread = NULL;
1373 goto err_dead_binder;
1375 target_proc = target_thread->proc;
1376 } else {
1377 if (tr->target.handle) {
1378 struct binder_ref *ref;
1380 ref = binder_get_ref(proc, tr->target.handle);
1381 if (ref == NULL) {
1382 binder_user_error("%d:%d got transaction to invalid handle\n",
1383 proc->pid, thread->pid);
1384 return_error = BR_FAILED_REPLY;
1385 goto err_invalid_target_handle;
1387 target_node = ref->node;
1388 } else {
1389 target_node = binder_context_mgr_node;
1390 if (target_node == NULL) {
1391 return_error = BR_DEAD_REPLY;
1392 goto err_no_context_mgr_node;
1395 e->to_node = target_node->debug_id;
1396 target_proc = target_node->proc;
1397 if (target_proc == NULL) {
1398 return_error = BR_DEAD_REPLY;
1399 goto err_dead_binder;
1401 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
1402 struct binder_transaction *tmp;
1404 tmp = thread->transaction_stack;
1405 if (tmp->to_thread != thread) {
1406 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
1407 proc->pid, thread->pid, tmp->debug_id,
1408 tmp->to_proc ? tmp->to_proc->pid : 0,
1409 tmp->to_thread ?
1410 tmp->to_thread->pid : 0);
1411 return_error = BR_FAILED_REPLY;
1412 goto err_bad_call_stack;
1414 while (tmp) {
1415 if (tmp->from && tmp->from->proc == target_proc)
1416 target_thread = tmp->from;
1417 tmp = tmp->from_parent;
1421 if (target_thread) {
1422 e->to_thread = target_thread->pid;
1423 target_list = &target_thread->todo;
1424 target_wait = &target_thread->wait;
1425 } else {
1426 target_list = &target_proc->todo;
1427 target_wait = &target_proc->wait;
1429 e->to_proc = target_proc->pid;
1431 /* TODO: reuse incoming transaction for reply */
1432 t = kzalloc(sizeof(*t), GFP_KERNEL);
1433 if (t == NULL) {
1434 return_error = BR_FAILED_REPLY;
1435 goto err_alloc_t_failed;
1437 binder_stats_created(BINDER_STAT_TRANSACTION);
1439 tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
1440 if (tcomplete == NULL) {
1441 return_error = BR_FAILED_REPLY;
1442 goto err_alloc_tcomplete_failed;
1444 binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
1446 t->debug_id = ++binder_last_id;
1447 e->debug_id = t->debug_id;
1449 if (reply)
1450 binder_debug(BINDER_DEBUG_TRANSACTION,
1451 "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld\n",
1452 proc->pid, thread->pid, t->debug_id,
1453 target_proc->pid, target_thread->pid,
1454 (u64)tr->data.ptr.buffer,
1455 (u64)tr->data.ptr.offsets,
1456 (u64)tr->data_size, (u64)tr->offsets_size);
1457 else
1458 binder_debug(BINDER_DEBUG_TRANSACTION,
1459 "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld\n",
1460 proc->pid, thread->pid, t->debug_id,
1461 target_proc->pid, target_node->debug_id,
1462 (u64)tr->data.ptr.buffer,
1463 (u64)tr->data.ptr.offsets,
1464 (u64)tr->data_size, (u64)tr->offsets_size);
1466 if (!reply && !(tr->flags & TF_ONE_WAY))
1467 t->from = thread;
1468 else
1469 t->from = NULL;
1470 t->sender_euid = task_euid(proc->tsk);
1471 t->to_proc = target_proc;
1472 t->to_thread = target_thread;
1473 t->code = tr->code;
1474 t->flags = tr->flags;
1475 t->priority = task_nice(current);
1477 trace_binder_transaction(reply, t, target_node);
1479 t->buffer = binder_alloc_buf(target_proc, tr->data_size,
1480 tr->offsets_size, !reply && (t->flags & TF_ONE_WAY));
1481 if (t->buffer == NULL) {
1482 return_error = BR_FAILED_REPLY;
1483 goto err_binder_alloc_buf_failed;
1485 t->buffer->allow_user_free = 0;
1486 t->buffer->debug_id = t->debug_id;
1487 t->buffer->transaction = t;
1488 t->buffer->target_node = target_node;
1489 trace_binder_transaction_alloc_buf(t->buffer);
1490 if (target_node)
1491 binder_inc_node(target_node, 1, 0, NULL);
1493 offp = (binder_size_t *)(t->buffer->data +
1494 ALIGN(tr->data_size, sizeof(void *)));
1496 if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)
1497 tr->data.ptr.buffer, tr->data_size)) {
1498 binder_user_error("%d:%d got transaction with invalid data ptr\n",
1499 proc->pid, thread->pid);
1500 return_error = BR_FAILED_REPLY;
1501 goto err_copy_data_failed;
1503 if (copy_from_user(offp, (const void __user *)(uintptr_t)
1504 tr->data.ptr.offsets, tr->offsets_size)) {
1505 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
1506 proc->pid, thread->pid);
1507 return_error = BR_FAILED_REPLY;
1508 goto err_copy_data_failed;
1510 if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
1511 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
1512 proc->pid, thread->pid, (u64)tr->offsets_size);
1513 return_error = BR_FAILED_REPLY;
1514 goto err_bad_offset;
1516 off_end = (void *)offp + tr->offsets_size;
1517 for (; offp < off_end; offp++) {
1518 struct flat_binder_object *fp;
1520 if (*offp > t->buffer->data_size - sizeof(*fp) ||
1521 t->buffer->data_size < sizeof(*fp) ||
1522 !IS_ALIGNED(*offp, sizeof(u32))) {
1523 binder_user_error("%d:%d got transaction with invalid offset, %lld\n",
1524 proc->pid, thread->pid, (u64)*offp);
1525 return_error = BR_FAILED_REPLY;
1526 goto err_bad_offset;
1528 fp = (struct flat_binder_object *)(t->buffer->data + *offp);
1529 switch (fp->type) {
1530 case BINDER_TYPE_BINDER:
1531 case BINDER_TYPE_WEAK_BINDER: {
1532 struct binder_ref *ref;
1533 struct binder_node *node = binder_get_node(proc, fp->binder);
1535 if (node == NULL) {
1536 node = binder_new_node(proc, fp->binder, fp->cookie);
1537 if (node == NULL) {
1538 return_error = BR_FAILED_REPLY;
1539 goto err_binder_new_node_failed;
1541 node->min_priority = fp->flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
1542 node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
1544 if (fp->cookie != node->cookie) {
1545 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
1546 proc->pid, thread->pid,
1547 (u64)fp->binder, node->debug_id,
1548 (u64)fp->cookie, (u64)node->cookie);
1549 return_error = BR_FAILED_REPLY;
1550 goto err_binder_get_ref_for_node_failed;
1552 ref = binder_get_ref_for_node(target_proc, node);
1553 if (ref == NULL) {
1554 return_error = BR_FAILED_REPLY;
1555 goto err_binder_get_ref_for_node_failed;
1557 if (fp->type == BINDER_TYPE_BINDER)
1558 fp->type = BINDER_TYPE_HANDLE;
1559 else
1560 fp->type = BINDER_TYPE_WEAK_HANDLE;
1561 fp->handle = ref->desc;
1562 binder_inc_ref(ref, fp->type == BINDER_TYPE_HANDLE,
1563 &thread->todo);
1565 trace_binder_transaction_node_to_ref(t, node, ref);
1566 binder_debug(BINDER_DEBUG_TRANSACTION,
1567 " node %d u%016llx -> ref %d desc %d\n",
1568 node->debug_id, (u64)node->ptr,
1569 ref->debug_id, ref->desc);
1570 } break;
1571 case BINDER_TYPE_HANDLE:
1572 case BINDER_TYPE_WEAK_HANDLE: {
1573 struct binder_ref *ref = binder_get_ref(proc, fp->handle);
1575 if (ref == NULL) {
1576 binder_user_error("%d:%d got transaction with invalid handle, %d\n",
1577 proc->pid,
1578 thread->pid, fp->handle);
1579 return_error = BR_FAILED_REPLY;
1580 goto err_binder_get_ref_failed;
1582 if (ref->node->proc == target_proc) {
1583 if (fp->type == BINDER_TYPE_HANDLE)
1584 fp->type = BINDER_TYPE_BINDER;
1585 else
1586 fp->type = BINDER_TYPE_WEAK_BINDER;
1587 fp->binder = ref->node->ptr;
1588 fp->cookie = ref->node->cookie;
1589 binder_inc_node(ref->node, fp->type == BINDER_TYPE_BINDER, 0, NULL);
1590 trace_binder_transaction_ref_to_node(t, ref);
1591 binder_debug(BINDER_DEBUG_TRANSACTION,
1592 " ref %d desc %d -> node %d u%016llx\n",
1593 ref->debug_id, ref->desc, ref->node->debug_id,
1594 (u64)ref->node->ptr);
1595 } else {
1596 struct binder_ref *new_ref;
1598 new_ref = binder_get_ref_for_node(target_proc, ref->node);
1599 if (new_ref == NULL) {
1600 return_error = BR_FAILED_REPLY;
1601 goto err_binder_get_ref_for_node_failed;
1603 fp->handle = new_ref->desc;
1604 binder_inc_ref(new_ref, fp->type == BINDER_TYPE_HANDLE, NULL);
1605 trace_binder_transaction_ref_to_ref(t, ref,
1606 new_ref);
1607 binder_debug(BINDER_DEBUG_TRANSACTION,
1608 " ref %d desc %d -> ref %d desc %d (node %d)\n",
1609 ref->debug_id, ref->desc, new_ref->debug_id,
1610 new_ref->desc, ref->node->debug_id);
1612 } break;
1614 case BINDER_TYPE_FD: {
1615 int target_fd;
1616 struct file *file;
1618 if (reply) {
1619 if (!(in_reply_to->flags & TF_ACCEPT_FDS)) {
1620 binder_user_error("%d:%d got reply with fd, %d, but target does not allow fds\n",
1621 proc->pid, thread->pid, fp->handle);
1622 return_error = BR_FAILED_REPLY;
1623 goto err_fd_not_allowed;
1625 } else if (!target_node->accept_fds) {
1626 binder_user_error("%d:%d got transaction with fd, %d, but target does not allow fds\n",
1627 proc->pid, thread->pid, fp->handle);
1628 return_error = BR_FAILED_REPLY;
1629 goto err_fd_not_allowed;
1632 file = fget(fp->handle);
1633 if (file == NULL) {
1634 binder_user_error("%d:%d got transaction with invalid fd, %d\n",
1635 proc->pid, thread->pid, fp->handle);
1636 return_error = BR_FAILED_REPLY;
1637 goto err_fget_failed;
1639 target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC);
1640 if (target_fd < 0) {
1641 fput(file);
1642 return_error = BR_FAILED_REPLY;
1643 goto err_get_unused_fd_failed;
1645 task_fd_install(target_proc, target_fd, file);
1646 trace_binder_transaction_fd(t, fp->handle, target_fd);
1647 binder_debug(BINDER_DEBUG_TRANSACTION,
1648 " fd %d -> %d\n", fp->handle, target_fd);
1649 /* TODO: fput? */
1650 fp->handle = target_fd;
1651 } break;
1653 default:
1654 binder_user_error("%d:%d got transaction with invalid object type, %x\n",
1655 proc->pid, thread->pid, fp->type);
1656 return_error = BR_FAILED_REPLY;
1657 goto err_bad_object_type;
1660 if (reply) {
1661 BUG_ON(t->buffer->async_transaction != 0);
1662 binder_pop_transaction(target_thread, in_reply_to);
1663 } else if (!(t->flags & TF_ONE_WAY)) {
1664 BUG_ON(t->buffer->async_transaction != 0);
1665 t->need_reply = 1;
1666 t->from_parent = thread->transaction_stack;
1667 thread->transaction_stack = t;
1668 } else {
1669 BUG_ON(target_node == NULL);
1670 BUG_ON(t->buffer->async_transaction != 1);
1671 if (target_node->has_async_transaction) {
1672 target_list = &target_node->async_todo;
1673 target_wait = NULL;
1674 } else
1675 target_node->has_async_transaction = 1;
1677 t->work.type = BINDER_WORK_TRANSACTION;
1678 list_add_tail(&t->work.entry, target_list);
1679 tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
1680 list_add_tail(&tcomplete->entry, &thread->todo);
1681 if (target_wait)
1682 wake_up_interruptible(target_wait);
1683 return;
1685 err_get_unused_fd_failed:
1686 err_fget_failed:
1687 err_fd_not_allowed:
1688 err_binder_get_ref_for_node_failed:
1689 err_binder_get_ref_failed:
1690 err_binder_new_node_failed:
1691 err_bad_object_type:
1692 err_bad_offset:
1693 err_copy_data_failed:
1694 trace_binder_transaction_failed_buffer_release(t->buffer);
1695 binder_transaction_buffer_release(target_proc, t->buffer, offp);
1696 t->buffer->transaction = NULL;
1697 binder_free_buf(target_proc, t->buffer);
1698 err_binder_alloc_buf_failed:
1699 kfree(tcomplete);
1700 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
1701 err_alloc_tcomplete_failed:
1702 kfree(t);
1703 binder_stats_deleted(BINDER_STAT_TRANSACTION);
1704 err_alloc_t_failed:
1705 err_bad_call_stack:
1706 err_empty_call_stack:
1707 err_dead_binder:
1708 err_invalid_target_handle:
1709 err_no_context_mgr_node:
1710 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1711 "%d:%d transaction failed %d, size %lld-%lld\n",
1712 proc->pid, thread->pid, return_error,
1713 (u64)tr->data_size, (u64)tr->offsets_size);
1716 struct binder_transaction_log_entry *fe;
1718 fe = binder_transaction_log_add(&binder_transaction_log_failed);
1719 *fe = *e;
1722 BUG_ON(thread->return_error != BR_OK);
1723 if (in_reply_to) {
1724 thread->return_error = BR_TRANSACTION_COMPLETE;
1725 binder_send_failed_reply(in_reply_to, return_error);
1726 } else
1727 thread->return_error = return_error;
1730 static int binder_thread_write(struct binder_proc *proc,
1731 struct binder_thread *thread,
1732 binder_uintptr_t binder_buffer, size_t size,
1733 binder_size_t *consumed)
1735 uint32_t cmd;
1736 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
1737 void __user *ptr = buffer + *consumed;
1738 void __user *end = buffer + size;
1740 while (ptr < end && thread->return_error == BR_OK) {
1741 if (get_user(cmd, (uint32_t __user *)ptr))
1742 return -EFAULT;
1743 ptr += sizeof(uint32_t);
1744 trace_binder_command(cmd);
1745 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
1746 binder_stats.bc[_IOC_NR(cmd)]++;
1747 proc->stats.bc[_IOC_NR(cmd)]++;
1748 thread->stats.bc[_IOC_NR(cmd)]++;
1750 switch (cmd) {
1751 case BC_INCREFS:
1752 case BC_ACQUIRE:
1753 case BC_RELEASE:
1754 case BC_DECREFS: {
1755 uint32_t target;
1756 struct binder_ref *ref;
1757 const char *debug_string;
1759 if (get_user(target, (uint32_t __user *)ptr))
1760 return -EFAULT;
1761 ptr += sizeof(uint32_t);
1762 if (target == 0 && binder_context_mgr_node &&
1763 (cmd == BC_INCREFS || cmd == BC_ACQUIRE)) {
1764 ref = binder_get_ref_for_node(proc,
1765 binder_context_mgr_node);
1766 if (ref->desc != target) {
1767 binder_user_error("%d:%d tried to acquire reference to desc 0, got %d instead\n",
1768 proc->pid, thread->pid,
1769 ref->desc);
1771 } else
1772 ref = binder_get_ref(proc, target);
1773 if (ref == NULL) {
1774 binder_user_error("%d:%d refcount change on invalid ref %d\n",
1775 proc->pid, thread->pid, target);
1776 break;
1778 switch (cmd) {
1779 case BC_INCREFS:
1780 debug_string = "IncRefs";
1781 binder_inc_ref(ref, 0, NULL);
1782 break;
1783 case BC_ACQUIRE:
1784 debug_string = "Acquire";
1785 binder_inc_ref(ref, 1, NULL);
1786 break;
1787 case BC_RELEASE:
1788 debug_string = "Release";
1789 binder_dec_ref(ref, 1);
1790 break;
1791 case BC_DECREFS:
1792 default:
1793 debug_string = "DecRefs";
1794 binder_dec_ref(ref, 0);
1795 break;
1797 binder_debug(BINDER_DEBUG_USER_REFS,
1798 "%d:%d %s ref %d desc %d s %d w %d for node %d\n",
1799 proc->pid, thread->pid, debug_string, ref->debug_id,
1800 ref->desc, ref->strong, ref->weak, ref->node->debug_id);
1801 break;
1803 case BC_INCREFS_DONE:
1804 case BC_ACQUIRE_DONE: {
1805 binder_uintptr_t node_ptr;
1806 binder_uintptr_t cookie;
1807 struct binder_node *node;
1809 if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
1810 return -EFAULT;
1811 ptr += sizeof(binder_uintptr_t);
1812 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
1813 return -EFAULT;
1814 ptr += sizeof(binder_uintptr_t);
1815 node = binder_get_node(proc, node_ptr);
1816 if (node == NULL) {
1817 binder_user_error("%d:%d %s u%016llx no match\n",
1818 proc->pid, thread->pid,
1819 cmd == BC_INCREFS_DONE ?
1820 "BC_INCREFS_DONE" :
1821 "BC_ACQUIRE_DONE",
1822 (u64)node_ptr);
1823 break;
1825 if (cookie != node->cookie) {
1826 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
1827 proc->pid, thread->pid,
1828 cmd == BC_INCREFS_DONE ?
1829 "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
1830 (u64)node_ptr, node->debug_id,
1831 (u64)cookie, (u64)node->cookie);
1832 break;
1834 if (cmd == BC_ACQUIRE_DONE) {
1835 if (node->pending_strong_ref == 0) {
1836 binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
1837 proc->pid, thread->pid,
1838 node->debug_id);
1839 break;
1841 node->pending_strong_ref = 0;
1842 } else {
1843 if (node->pending_weak_ref == 0) {
1844 binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
1845 proc->pid, thread->pid,
1846 node->debug_id);
1847 break;
1849 node->pending_weak_ref = 0;
1851 binder_dec_node(node, cmd == BC_ACQUIRE_DONE, 0);
1852 binder_debug(BINDER_DEBUG_USER_REFS,
1853 "%d:%d %s node %d ls %d lw %d\n",
1854 proc->pid, thread->pid,
1855 cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
1856 node->debug_id, node->local_strong_refs, node->local_weak_refs);
1857 break;
1859 case BC_ATTEMPT_ACQUIRE:
1860 pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
1861 return -EINVAL;
1862 case BC_ACQUIRE_RESULT:
1863 pr_err("BC_ACQUIRE_RESULT not supported\n");
1864 return -EINVAL;
1866 case BC_FREE_BUFFER: {
1867 binder_uintptr_t data_ptr;
1868 struct binder_buffer *buffer;
1870 if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
1871 return -EFAULT;
1872 ptr += sizeof(binder_uintptr_t);
1874 buffer = binder_buffer_lookup(proc, data_ptr);
1875 if (buffer == NULL) {
1876 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx no match\n",
1877 proc->pid, thread->pid, (u64)data_ptr);
1878 break;
1880 if (!buffer->allow_user_free) {
1881 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx matched unreturned buffer\n",
1882 proc->pid, thread->pid, (u64)data_ptr);
1883 break;
1885 binder_debug(BINDER_DEBUG_FREE_BUFFER,
1886 "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
1887 proc->pid, thread->pid, (u64)data_ptr,
1888 buffer->debug_id,
1889 buffer->transaction ? "active" : "finished");
1891 if (buffer->transaction) {
1892 buffer->transaction->buffer = NULL;
1893 buffer->transaction = NULL;
1895 if (buffer->async_transaction && buffer->target_node) {
1896 BUG_ON(!buffer->target_node->has_async_transaction);
1897 if (list_empty(&buffer->target_node->async_todo))
1898 buffer->target_node->has_async_transaction = 0;
1899 else
1900 list_move_tail(buffer->target_node->async_todo.next, &thread->todo);
1902 trace_binder_transaction_buffer_release(buffer);
1903 binder_transaction_buffer_release(proc, buffer, NULL);
1904 binder_free_buf(proc, buffer);
1905 break;
1908 case BC_TRANSACTION:
1909 case BC_REPLY: {
1910 struct binder_transaction_data tr;
1912 if (copy_from_user(&tr, ptr, sizeof(tr)))
1913 return -EFAULT;
1914 ptr += sizeof(tr);
1915 binder_transaction(proc, thread, &tr, cmd == BC_REPLY);
1916 break;
1919 case BC_REGISTER_LOOPER:
1920 binder_debug(BINDER_DEBUG_THREADS,
1921 "%d:%d BC_REGISTER_LOOPER\n",
1922 proc->pid, thread->pid);
1923 if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
1924 thread->looper |= BINDER_LOOPER_STATE_INVALID;
1925 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
1926 proc->pid, thread->pid);
1927 } else if (proc->requested_threads == 0) {
1928 thread->looper |= BINDER_LOOPER_STATE_INVALID;
1929 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
1930 proc->pid, thread->pid);
1931 } else {
1932 proc->requested_threads--;
1933 proc->requested_threads_started++;
1935 thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
1936 break;
1937 case BC_ENTER_LOOPER:
1938 binder_debug(BINDER_DEBUG_THREADS,
1939 "%d:%d BC_ENTER_LOOPER\n",
1940 proc->pid, thread->pid);
1941 if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
1942 thread->looper |= BINDER_LOOPER_STATE_INVALID;
1943 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
1944 proc->pid, thread->pid);
1946 thread->looper |= BINDER_LOOPER_STATE_ENTERED;
1947 break;
1948 case BC_EXIT_LOOPER:
1949 binder_debug(BINDER_DEBUG_THREADS,
1950 "%d:%d BC_EXIT_LOOPER\n",
1951 proc->pid, thread->pid);
1952 thread->looper |= BINDER_LOOPER_STATE_EXITED;
1953 break;
1955 case BC_REQUEST_DEATH_NOTIFICATION:
1956 case BC_CLEAR_DEATH_NOTIFICATION: {
1957 uint32_t target;
1958 binder_uintptr_t cookie;
1959 struct binder_ref *ref;
1960 struct binder_ref_death *death;
1962 if (get_user(target, (uint32_t __user *)ptr))
1963 return -EFAULT;
1964 ptr += sizeof(uint32_t);
1965 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
1966 return -EFAULT;
1967 ptr += sizeof(binder_uintptr_t);
1968 ref = binder_get_ref(proc, target);
1969 if (ref == NULL) {
1970 binder_user_error("%d:%d %s invalid ref %d\n",
1971 proc->pid, thread->pid,
1972 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
1973 "BC_REQUEST_DEATH_NOTIFICATION" :
1974 "BC_CLEAR_DEATH_NOTIFICATION",
1975 target);
1976 break;
1979 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
1980 "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
1981 proc->pid, thread->pid,
1982 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
1983 "BC_REQUEST_DEATH_NOTIFICATION" :
1984 "BC_CLEAR_DEATH_NOTIFICATION",
1985 (u64)cookie, ref->debug_id, ref->desc,
1986 ref->strong, ref->weak, ref->node->debug_id);
1988 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
1989 if (ref->death) {
1990 binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
1991 proc->pid, thread->pid);
1992 break;
1994 death = kzalloc(sizeof(*death), GFP_KERNEL);
1995 if (death == NULL) {
1996 thread->return_error = BR_ERROR;
1997 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1998 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
1999 proc->pid, thread->pid);
2000 break;
2002 binder_stats_created(BINDER_STAT_DEATH);
2003 INIT_LIST_HEAD(&death->work.entry);
2004 death->cookie = cookie;
2005 ref->death = death;
2006 if (ref->node->proc == NULL) {
2007 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
2008 if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
2009 list_add_tail(&ref->death->work.entry, &thread->todo);
2010 } else {
2011 list_add_tail(&ref->death->work.entry, &proc->todo);
2012 wake_up_interruptible(&proc->wait);
2015 } else {
2016 if (ref->death == NULL) {
2017 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
2018 proc->pid, thread->pid);
2019 break;
2021 death = ref->death;
2022 if (death->cookie != cookie) {
2023 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
2024 proc->pid, thread->pid,
2025 (u64)death->cookie,
2026 (u64)cookie);
2027 break;
2029 ref->death = NULL;
2030 if (list_empty(&death->work.entry)) {
2031 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
2032 if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
2033 list_add_tail(&death->work.entry, &thread->todo);
2034 } else {
2035 list_add_tail(&death->work.entry, &proc->todo);
2036 wake_up_interruptible(&proc->wait);
2038 } else {
2039 BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
2040 death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
2043 } break;
2044 case BC_DEAD_BINDER_DONE: {
2045 struct binder_work *w;
2046 binder_uintptr_t cookie;
2047 struct binder_ref_death *death = NULL;
2049 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
2050 return -EFAULT;
2052 ptr += sizeof(cookie);
2053 list_for_each_entry(w, &proc->delivered_death, entry) {
2054 struct binder_ref_death *tmp_death = container_of(w, struct binder_ref_death, work);
2056 if (tmp_death->cookie == cookie) {
2057 death = tmp_death;
2058 break;
2061 binder_debug(BINDER_DEBUG_DEAD_BINDER,
2062 "%d:%d BC_DEAD_BINDER_DONE %016llx found %p\n",
2063 proc->pid, thread->pid, (u64)cookie,
2064 death);
2065 if (death == NULL) {
2066 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
2067 proc->pid, thread->pid, (u64)cookie);
2068 break;
2071 list_del_init(&death->work.entry);
2072 if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
2073 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
2074 if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
2075 list_add_tail(&death->work.entry, &thread->todo);
2076 } else {
2077 list_add_tail(&death->work.entry, &proc->todo);
2078 wake_up_interruptible(&proc->wait);
2081 } break;
2083 default:
2084 pr_err("%d:%d unknown command %d\n",
2085 proc->pid, thread->pid, cmd);
2086 return -EINVAL;
2088 *consumed = ptr - buffer;
2090 return 0;
2093 static void binder_stat_br(struct binder_proc *proc,
2094 struct binder_thread *thread, uint32_t cmd)
2096 trace_binder_return(cmd);
2097 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
2098 binder_stats.br[_IOC_NR(cmd)]++;
2099 proc->stats.br[_IOC_NR(cmd)]++;
2100 thread->stats.br[_IOC_NR(cmd)]++;
2104 static int binder_has_proc_work(struct binder_proc *proc,
2105 struct binder_thread *thread)
2107 return !list_empty(&proc->todo) ||
2108 (thread->looper & BINDER_LOOPER_STATE_NEED_RETURN);
2111 static int binder_has_thread_work(struct binder_thread *thread)
2113 return !list_empty(&thread->todo) || thread->return_error != BR_OK ||
2114 (thread->looper & BINDER_LOOPER_STATE_NEED_RETURN);
2117 static int binder_thread_read(struct binder_proc *proc,
2118 struct binder_thread *thread,
2119 binder_uintptr_t binder_buffer, size_t size,
2120 binder_size_t *consumed, int non_block)
2122 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
2123 void __user *ptr = buffer + *consumed;
2124 void __user *end = buffer + size;
2126 int ret = 0;
2127 int wait_for_proc_work;
2129 if (*consumed == 0) {
2130 if (put_user(BR_NOOP, (uint32_t __user *)ptr))
2131 return -EFAULT;
2132 ptr += sizeof(uint32_t);
2135 retry:
2136 wait_for_proc_work = thread->transaction_stack == NULL &&
2137 list_empty(&thread->todo);
2139 if (thread->return_error != BR_OK && ptr < end) {
2140 if (thread->return_error2 != BR_OK) {
2141 if (put_user(thread->return_error2, (uint32_t __user *)ptr))
2142 return -EFAULT;
2143 ptr += sizeof(uint32_t);
2144 binder_stat_br(proc, thread, thread->return_error2);
2145 if (ptr == end)
2146 goto done;
2147 thread->return_error2 = BR_OK;
2149 if (put_user(thread->return_error, (uint32_t __user *)ptr))
2150 return -EFAULT;
2151 ptr += sizeof(uint32_t);
2152 binder_stat_br(proc, thread, thread->return_error);
2153 thread->return_error = BR_OK;
2154 goto done;
2158 thread->looper |= BINDER_LOOPER_STATE_WAITING;
2159 if (wait_for_proc_work)
2160 proc->ready_threads++;
2162 binder_unlock(__func__);
2164 trace_binder_wait_for_work(wait_for_proc_work,
2165 !!thread->transaction_stack,
2166 !list_empty(&thread->todo));
2167 if (wait_for_proc_work) {
2168 if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
2169 BINDER_LOOPER_STATE_ENTERED))) {
2170 binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
2171 proc->pid, thread->pid, thread->looper);
2172 wait_event_interruptible(binder_user_error_wait,
2173 binder_stop_on_user_error < 2);
2175 binder_set_nice(proc->default_priority);
2176 if (non_block) {
2177 if (!binder_has_proc_work(proc, thread))
2178 ret = -EAGAIN;
2179 } else
2180 ret = wait_event_freezable_exclusive(proc->wait, binder_has_proc_work(proc, thread));
2181 } else {
2182 if (non_block) {
2183 if (!binder_has_thread_work(thread))
2184 ret = -EAGAIN;
2185 } else
2186 ret = wait_event_freezable(thread->wait, binder_has_thread_work(thread));
2189 binder_lock(__func__);
2191 if (wait_for_proc_work)
2192 proc->ready_threads--;
2193 thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
2195 if (ret)
2196 return ret;
2198 while (1) {
2199 uint32_t cmd;
2200 struct binder_transaction_data tr;
2201 struct binder_work *w;
2202 struct binder_transaction *t = NULL;
2204 if (!list_empty(&thread->todo))
2205 w = list_first_entry(&thread->todo, struct binder_work, entry);
2206 else if (!list_empty(&proc->todo) && wait_for_proc_work)
2207 w = list_first_entry(&proc->todo, struct binder_work, entry);
2208 else {
2209 if (ptr - buffer == 4 && !(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN)) /* no data added */
2210 goto retry;
2211 break;
2214 if (end - ptr < sizeof(tr) + 4)
2215 break;
2217 switch (w->type) {
2218 case BINDER_WORK_TRANSACTION: {
2219 t = container_of(w, struct binder_transaction, work);
2220 } break;
2221 case BINDER_WORK_TRANSACTION_COMPLETE: {
2222 cmd = BR_TRANSACTION_COMPLETE;
2223 if (put_user(cmd, (uint32_t __user *)ptr))
2224 return -EFAULT;
2225 ptr += sizeof(uint32_t);
2227 binder_stat_br(proc, thread, cmd);
2228 binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
2229 "%d:%d BR_TRANSACTION_COMPLETE\n",
2230 proc->pid, thread->pid);
2232 list_del(&w->entry);
2233 kfree(w);
2234 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
2235 } break;
2236 case BINDER_WORK_NODE: {
2237 struct binder_node *node = container_of(w, struct binder_node, work);
2238 uint32_t cmd = BR_NOOP;
2239 const char *cmd_name;
2240 int strong = node->internal_strong_refs || node->local_strong_refs;
2241 int weak = !hlist_empty(&node->refs) || node->local_weak_refs || strong;
2243 if (weak && !node->has_weak_ref) {
2244 cmd = BR_INCREFS;
2245 cmd_name = "BR_INCREFS";
2246 node->has_weak_ref = 1;
2247 node->pending_weak_ref = 1;
2248 node->local_weak_refs++;
2249 } else if (strong && !node->has_strong_ref) {
2250 cmd = BR_ACQUIRE;
2251 cmd_name = "BR_ACQUIRE";
2252 node->has_strong_ref = 1;
2253 node->pending_strong_ref = 1;
2254 node->local_strong_refs++;
2255 } else if (!strong && node->has_strong_ref) {
2256 cmd = BR_RELEASE;
2257 cmd_name = "BR_RELEASE";
2258 node->has_strong_ref = 0;
2259 } else if (!weak && node->has_weak_ref) {
2260 cmd = BR_DECREFS;
2261 cmd_name = "BR_DECREFS";
2262 node->has_weak_ref = 0;
2264 if (cmd != BR_NOOP) {
2265 if (put_user(cmd, (uint32_t __user *)ptr))
2266 return -EFAULT;
2267 ptr += sizeof(uint32_t);
2268 if (put_user(node->ptr,
2269 (binder_uintptr_t __user *)ptr))
2270 return -EFAULT;
2271 ptr += sizeof(binder_uintptr_t);
2272 if (put_user(node->cookie,
2273 (binder_uintptr_t __user *)ptr))
2274 return -EFAULT;
2275 ptr += sizeof(binder_uintptr_t);
2277 binder_stat_br(proc, thread, cmd);
2278 binder_debug(BINDER_DEBUG_USER_REFS,
2279 "%d:%d %s %d u%016llx c%016llx\n",
2280 proc->pid, thread->pid, cmd_name,
2281 node->debug_id,
2282 (u64)node->ptr, (u64)node->cookie);
2283 } else {
2284 list_del_init(&w->entry);
2285 if (!weak && !strong) {
2286 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
2287 "%d:%d node %d u%016llx c%016llx deleted\n",
2288 proc->pid, thread->pid,
2289 node->debug_id,
2290 (u64)node->ptr,
2291 (u64)node->cookie);
2292 rb_erase(&node->rb_node, &proc->nodes);
2293 kfree(node);
2294 binder_stats_deleted(BINDER_STAT_NODE);
2295 } else {
2296 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
2297 "%d:%d node %d u%016llx c%016llx state unchanged\n",
2298 proc->pid, thread->pid,
2299 node->debug_id,
2300 (u64)node->ptr,
2301 (u64)node->cookie);
2304 } break;
2305 case BINDER_WORK_DEAD_BINDER:
2306 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
2307 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
2308 struct binder_ref_death *death;
2309 uint32_t cmd;
2311 death = container_of(w, struct binder_ref_death, work);
2312 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
2313 cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
2314 else
2315 cmd = BR_DEAD_BINDER;
2316 if (put_user(cmd, (uint32_t __user *)ptr))
2317 return -EFAULT;
2318 ptr += sizeof(uint32_t);
2319 if (put_user(death->cookie,
2320 (binder_uintptr_t __user *)ptr))
2321 return -EFAULT;
2322 ptr += sizeof(binder_uintptr_t);
2323 binder_stat_br(proc, thread, cmd);
2324 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
2325 "%d:%d %s %016llx\n",
2326 proc->pid, thread->pid,
2327 cmd == BR_DEAD_BINDER ?
2328 "BR_DEAD_BINDER" :
2329 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
2330 (u64)death->cookie);
2332 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
2333 list_del(&w->entry);
2334 kfree(death);
2335 binder_stats_deleted(BINDER_STAT_DEATH);
2336 } else
2337 list_move(&w->entry, &proc->delivered_death);
2338 if (cmd == BR_DEAD_BINDER)
2339 goto done; /* DEAD_BINDER notifications can cause transactions */
2340 } break;
2343 if (!t)
2344 continue;
2346 BUG_ON(t->buffer == NULL);
2347 if (t->buffer->target_node) {
2348 struct binder_node *target_node = t->buffer->target_node;
2350 tr.target.ptr = target_node->ptr;
2351 tr.cookie = target_node->cookie;
2352 t->saved_priority = task_nice(current);
2353 if (t->priority < target_node->min_priority &&
2354 !(t->flags & TF_ONE_WAY))
2355 binder_set_nice(t->priority);
2356 else if (!(t->flags & TF_ONE_WAY) ||
2357 t->saved_priority > target_node->min_priority)
2358 binder_set_nice(target_node->min_priority);
2359 cmd = BR_TRANSACTION;
2360 } else {
2361 tr.target.ptr = 0;
2362 tr.cookie = 0;
2363 cmd = BR_REPLY;
2365 tr.code = t->code;
2366 tr.flags = t->flags;
2367 tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid);
2369 if (t->from) {
2370 struct task_struct *sender = t->from->proc->tsk;
2372 tr.sender_pid = task_tgid_nr_ns(sender,
2373 task_active_pid_ns(current));
2374 } else {
2375 tr.sender_pid = 0;
2378 tr.data_size = t->buffer->data_size;
2379 tr.offsets_size = t->buffer->offsets_size;
2380 tr.data.ptr.buffer = (binder_uintptr_t)(
2381 (uintptr_t)t->buffer->data +
2382 proc->user_buffer_offset);
2383 tr.data.ptr.offsets = tr.data.ptr.buffer +
2384 ALIGN(t->buffer->data_size,
2385 sizeof(void *));
2387 if (put_user(cmd, (uint32_t __user *)ptr))
2388 return -EFAULT;
2389 ptr += sizeof(uint32_t);
2390 if (copy_to_user(ptr, &tr, sizeof(tr)))
2391 return -EFAULT;
2392 ptr += sizeof(tr);
2394 trace_binder_transaction_received(t);
2395 binder_stat_br(proc, thread, cmd);
2396 binder_debug(BINDER_DEBUG_TRANSACTION,
2397 "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
2398 proc->pid, thread->pid,
2399 (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
2400 "BR_REPLY",
2401 t->debug_id, t->from ? t->from->proc->pid : 0,
2402 t->from ? t->from->pid : 0, cmd,
2403 t->buffer->data_size, t->buffer->offsets_size,
2404 (u64)tr.data.ptr.buffer, (u64)tr.data.ptr.offsets);
2406 list_del(&t->work.entry);
2407 t->buffer->allow_user_free = 1;
2408 if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
2409 t->to_parent = thread->transaction_stack;
2410 t->to_thread = thread;
2411 thread->transaction_stack = t;
2412 } else {
2413 t->buffer->transaction = NULL;
2414 kfree(t);
2415 binder_stats_deleted(BINDER_STAT_TRANSACTION);
2417 break;
2420 done:
2422 *consumed = ptr - buffer;
2423 if (proc->requested_threads + proc->ready_threads == 0 &&
2424 proc->requested_threads_started < proc->max_threads &&
2425 (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
2426 BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
2427 /*spawn a new thread if we leave this out */) {
2428 proc->requested_threads++;
2429 binder_debug(BINDER_DEBUG_THREADS,
2430 "%d:%d BR_SPAWN_LOOPER\n",
2431 proc->pid, thread->pid);
2432 if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
2433 return -EFAULT;
2434 binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
2436 return 0;
2439 static void binder_release_work(struct list_head *list)
2441 struct binder_work *w;
2443 while (!list_empty(list)) {
2444 w = list_first_entry(list, struct binder_work, entry);
2445 list_del_init(&w->entry);
2446 switch (w->type) {
2447 case BINDER_WORK_TRANSACTION: {
2448 struct binder_transaction *t;
2450 t = container_of(w, struct binder_transaction, work);
2451 if (t->buffer->target_node &&
2452 !(t->flags & TF_ONE_WAY)) {
2453 binder_send_failed_reply(t, BR_DEAD_REPLY);
2454 } else {
2455 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
2456 "undelivered transaction %d\n",
2457 t->debug_id);
2458 t->buffer->transaction = NULL;
2459 kfree(t);
2460 binder_stats_deleted(BINDER_STAT_TRANSACTION);
2462 } break;
2463 case BINDER_WORK_TRANSACTION_COMPLETE: {
2464 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
2465 "undelivered TRANSACTION_COMPLETE\n");
2466 kfree(w);
2467 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
2468 } break;
2469 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
2470 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
2471 struct binder_ref_death *death;
2473 death = container_of(w, struct binder_ref_death, work);
2474 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
2475 "undelivered death notification, %016llx\n",
2476 (u64)death->cookie);
2477 kfree(death);
2478 binder_stats_deleted(BINDER_STAT_DEATH);
2479 } break;
2480 default:
2481 pr_err("unexpected work type, %d, not freed\n",
2482 w->type);
2483 break;
2489 static struct binder_thread *binder_get_thread(struct binder_proc *proc)
2491 struct binder_thread *thread = NULL;
2492 struct rb_node *parent = NULL;
2493 struct rb_node **p = &proc->threads.rb_node;
2495 while (*p) {
2496 parent = *p;
2497 thread = rb_entry(parent, struct binder_thread, rb_node);
2499 if (current->pid < thread->pid)
2500 p = &(*p)->rb_left;
2501 else if (current->pid > thread->pid)
2502 p = &(*p)->rb_right;
2503 else
2504 break;
2506 if (*p == NULL) {
2507 thread = kzalloc(sizeof(*thread), GFP_KERNEL);
2508 if (thread == NULL)
2509 return NULL;
2510 binder_stats_created(BINDER_STAT_THREAD);
2511 thread->proc = proc;
2512 thread->pid = current->pid;
2513 init_waitqueue_head(&thread->wait);
2514 INIT_LIST_HEAD(&thread->todo);
2515 rb_link_node(&thread->rb_node, parent, p);
2516 rb_insert_color(&thread->rb_node, &proc->threads);
2517 thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN;
2518 thread->return_error = BR_OK;
2519 thread->return_error2 = BR_OK;
2521 return thread;
2524 static int binder_free_thread(struct binder_proc *proc,
2525 struct binder_thread *thread)
2527 struct binder_transaction *t;
2528 struct binder_transaction *send_reply = NULL;
2529 int active_transactions = 0;
2531 rb_erase(&thread->rb_node, &proc->threads);
2532 t = thread->transaction_stack;
2533 if (t && t->to_thread == thread)
2534 send_reply = t;
2535 while (t) {
2536 active_transactions++;
2537 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
2538 "release %d:%d transaction %d %s, still active\n",
2539 proc->pid, thread->pid,
2540 t->debug_id,
2541 (t->to_thread == thread) ? "in" : "out");
2543 if (t->to_thread == thread) {
2544 t->to_proc = NULL;
2545 t->to_thread = NULL;
2546 if (t->buffer) {
2547 t->buffer->transaction = NULL;
2548 t->buffer = NULL;
2550 t = t->to_parent;
2551 } else if (t->from == thread) {
2552 t->from = NULL;
2553 t = t->from_parent;
2554 } else
2555 BUG();
2557 if (send_reply)
2558 binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
2559 binder_release_work(&thread->todo);
2560 kfree(thread);
2561 binder_stats_deleted(BINDER_STAT_THREAD);
2562 return active_transactions;
2565 static unsigned int binder_poll(struct file *filp,
2566 struct poll_table_struct *wait)
2568 struct binder_proc *proc = filp->private_data;
2569 struct binder_thread *thread = NULL;
2570 int wait_for_proc_work;
2572 binder_lock(__func__);
2574 thread = binder_get_thread(proc);
2576 wait_for_proc_work = thread->transaction_stack == NULL &&
2577 list_empty(&thread->todo) && thread->return_error == BR_OK;
2579 binder_unlock(__func__);
2581 if (wait_for_proc_work) {
2582 if (binder_has_proc_work(proc, thread))
2583 return POLLIN;
2584 poll_wait(filp, &proc->wait, wait);
2585 if (binder_has_proc_work(proc, thread))
2586 return POLLIN;
2587 } else {
2588 if (binder_has_thread_work(thread))
2589 return POLLIN;
2590 poll_wait(filp, &thread->wait, wait);
2591 if (binder_has_thread_work(thread))
2592 return POLLIN;
2594 return 0;
2597 static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
2599 int ret;
2600 struct binder_proc *proc = filp->private_data;
2601 struct binder_thread *thread;
2602 unsigned int size = _IOC_SIZE(cmd);
2603 void __user *ubuf = (void __user *)arg;
2604 kuid_t curr_euid = current_euid();
2606 /*pr_info("binder_ioctl: %d:%d %x %lx\n", proc->pid, current->pid, cmd, arg);*/
2608 trace_binder_ioctl(cmd, arg);
2610 ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
2611 if (ret)
2612 goto err_unlocked;
2614 binder_lock(__func__);
2615 thread = binder_get_thread(proc);
2616 if (thread == NULL) {
2617 ret = -ENOMEM;
2618 goto err;
2621 switch (cmd) {
2622 case BINDER_WRITE_READ: {
2623 struct binder_write_read bwr;
2625 if (size != sizeof(struct binder_write_read)) {
2626 ret = -EINVAL;
2627 goto err;
2629 if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
2630 ret = -EFAULT;
2631 goto err;
2633 binder_debug(BINDER_DEBUG_READ_WRITE,
2634 "%d:%d write %lld at %016llx, read %lld at %016llx\n",
2635 proc->pid, thread->pid,
2636 (u64)bwr.write_size, (u64)bwr.write_buffer,
2637 (u64)bwr.read_size, (u64)bwr.read_buffer);
2639 if (bwr.write_size > 0) {
2640 ret = binder_thread_write(proc, thread,
2641 bwr.write_buffer,
2642 bwr.write_size,
2643 &bwr.write_consumed);
2644 trace_binder_write_done(ret);
2645 if (ret < 0) {
2646 bwr.read_consumed = 0;
2647 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
2648 ret = -EFAULT;
2649 goto err;
2652 if (bwr.read_size > 0) {
2653 ret = binder_thread_read(proc, thread, bwr.read_buffer,
2654 bwr.read_size,
2655 &bwr.read_consumed,
2656 filp->f_flags & O_NONBLOCK);
2657 trace_binder_read_done(ret);
2658 if (!list_empty(&proc->todo))
2659 wake_up_interruptible(&proc->wait);
2660 if (ret < 0) {
2661 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
2662 ret = -EFAULT;
2663 goto err;
2666 binder_debug(BINDER_DEBUG_READ_WRITE,
2667 "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
2668 proc->pid, thread->pid,
2669 (u64)bwr.write_consumed, (u64)bwr.write_size,
2670 (u64)bwr.read_consumed, (u64)bwr.read_size);
2671 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
2672 ret = -EFAULT;
2673 goto err;
2675 break;
2677 case BINDER_SET_MAX_THREADS:
2678 if (copy_from_user(&proc->max_threads, ubuf, sizeof(proc->max_threads))) {
2679 ret = -EINVAL;
2680 goto err;
2682 break;
2683 case BINDER_SET_CONTEXT_MGR:
2684 if (binder_context_mgr_node != NULL) {
2685 pr_err("BINDER_SET_CONTEXT_MGR already set\n");
2686 ret = -EBUSY;
2687 goto err;
2689 if (uid_valid(binder_context_mgr_uid)) {
2690 if (!uid_eq(binder_context_mgr_uid, curr_euid)) {
2691 pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
2692 from_kuid(&init_user_ns, curr_euid),
2693 from_kuid(&init_user_ns, binder_context_mgr_uid));
2694 ret = -EPERM;
2695 goto err;
2697 } else {
2698 binder_context_mgr_uid = curr_euid;
2700 binder_context_mgr_node = binder_new_node(proc, 0, 0);
2701 if (binder_context_mgr_node == NULL) {
2702 ret = -ENOMEM;
2703 goto err;
2705 binder_context_mgr_node->local_weak_refs++;
2706 binder_context_mgr_node->local_strong_refs++;
2707 binder_context_mgr_node->has_strong_ref = 1;
2708 binder_context_mgr_node->has_weak_ref = 1;
2709 break;
2710 case BINDER_THREAD_EXIT:
2711 binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
2712 proc->pid, thread->pid);
2713 binder_free_thread(proc, thread);
2714 thread = NULL;
2715 break;
2716 case BINDER_VERSION: {
2717 struct binder_version __user *ver = ubuf;
2719 if (size != sizeof(struct binder_version)) {
2720 ret = -EINVAL;
2721 goto err;
2723 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
2724 &ver->protocol_version)) {
2725 ret = -EINVAL;
2726 goto err;
2728 break;
2730 default:
2731 ret = -EINVAL;
2732 goto err;
2734 ret = 0;
2735 err:
2736 if (thread)
2737 thread->looper &= ~BINDER_LOOPER_STATE_NEED_RETURN;
2738 binder_unlock(__func__);
2739 wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
2740 if (ret && ret != -ERESTARTSYS)
2741 pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
2742 err_unlocked:
2743 trace_binder_ioctl_done(ret);
2744 return ret;
2747 static void binder_vma_open(struct vm_area_struct *vma)
2749 struct binder_proc *proc = vma->vm_private_data;
2751 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
2752 "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
2753 proc->pid, vma->vm_start, vma->vm_end,
2754 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
2755 (unsigned long)pgprot_val(vma->vm_page_prot));
2758 static void binder_vma_close(struct vm_area_struct *vma)
2760 struct binder_proc *proc = vma->vm_private_data;
2762 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
2763 "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
2764 proc->pid, vma->vm_start, vma->vm_end,
2765 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
2766 (unsigned long)pgprot_val(vma->vm_page_prot));
2767 proc->vma = NULL;
2768 proc->vma_vm_mm = NULL;
2769 binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
2772 static struct vm_operations_struct binder_vm_ops = {
2773 .open = binder_vma_open,
2774 .close = binder_vma_close,
2777 static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
2779 int ret;
2780 struct vm_struct *area;
2781 struct binder_proc *proc = filp->private_data;
2782 const char *failure_string;
2783 struct binder_buffer *buffer;
2785 if (proc->tsk != current)
2786 return -EINVAL;
2788 if ((vma->vm_end - vma->vm_start) > SZ_4M)
2789 vma->vm_end = vma->vm_start + SZ_4M;
2791 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
2792 "binder_mmap: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
2793 proc->pid, vma->vm_start, vma->vm_end,
2794 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
2795 (unsigned long)pgprot_val(vma->vm_page_prot));
2797 if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
2798 ret = -EPERM;
2799 failure_string = "bad vm_flags";
2800 goto err_bad_arg;
2802 vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE;
2804 mutex_lock(&binder_mmap_lock);
2805 if (proc->buffer) {
2806 ret = -EBUSY;
2807 failure_string = "already mapped";
2808 goto err_already_mapped;
2811 area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP);
2812 if (area == NULL) {
2813 ret = -ENOMEM;
2814 failure_string = "get_vm_area";
2815 goto err_get_vm_area_failed;
2817 proc->buffer = area->addr;
2818 proc->user_buffer_offset = vma->vm_start - (uintptr_t)proc->buffer;
2819 mutex_unlock(&binder_mmap_lock);
2821 #ifdef CONFIG_CPU_CACHE_VIPT
2822 if (cache_is_vipt_aliasing()) {
2823 while (CACHE_COLOUR((vma->vm_start ^ (uint32_t)proc->buffer))) {
2824 pr_info("binder_mmap: %d %lx-%lx maps %p bad alignment\n", proc->pid, vma->vm_start, vma->vm_end, proc->buffer);
2825 vma->vm_start += PAGE_SIZE;
2828 #endif
2829 proc->pages = kzalloc(sizeof(proc->pages[0]) * ((vma->vm_end - vma->vm_start) / PAGE_SIZE), GFP_KERNEL);
2830 if (proc->pages == NULL) {
2831 ret = -ENOMEM;
2832 failure_string = "alloc page array";
2833 goto err_alloc_pages_failed;
2835 proc->buffer_size = vma->vm_end - vma->vm_start;
2837 vma->vm_ops = &binder_vm_ops;
2838 vma->vm_private_data = proc;
2840 if (binder_update_page_range(proc, 1, proc->buffer, proc->buffer + PAGE_SIZE, vma)) {
2841 ret = -ENOMEM;
2842 failure_string = "alloc small buf";
2843 goto err_alloc_small_buf_failed;
2845 buffer = proc->buffer;
2846 INIT_LIST_HEAD(&proc->buffers);
2847 list_add(&buffer->entry, &proc->buffers);
2848 buffer->free = 1;
2849 binder_insert_free_buffer(proc, buffer);
2850 proc->free_async_space = proc->buffer_size / 2;
2851 barrier();
2852 proc->files = get_files_struct(current);
2853 proc->vma = vma;
2854 proc->vma_vm_mm = vma->vm_mm;
2856 /*pr_info("binder_mmap: %d %lx-%lx maps %p\n",
2857 proc->pid, vma->vm_start, vma->vm_end, proc->buffer);*/
2858 return 0;
2860 err_alloc_small_buf_failed:
2861 kfree(proc->pages);
2862 proc->pages = NULL;
2863 err_alloc_pages_failed:
2864 mutex_lock(&binder_mmap_lock);
2865 vfree(proc->buffer);
2866 proc->buffer = NULL;
2867 err_get_vm_area_failed:
2868 err_already_mapped:
2869 mutex_unlock(&binder_mmap_lock);
2870 err_bad_arg:
2871 pr_err("binder_mmap: %d %lx-%lx %s failed %d\n",
2872 proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
2873 return ret;
2876 static int binder_open(struct inode *nodp, struct file *filp)
2878 struct binder_proc *proc;
2880 binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_open: %d:%d\n",
2881 current->group_leader->pid, current->pid);
2883 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
2884 if (proc == NULL)
2885 return -ENOMEM;
2886 get_task_struct(current);
2887 proc->tsk = current;
2888 INIT_LIST_HEAD(&proc->todo);
2889 init_waitqueue_head(&proc->wait);
2890 proc->default_priority = task_nice(current);
2892 binder_lock(__func__);
2894 binder_stats_created(BINDER_STAT_PROC);
2895 hlist_add_head(&proc->proc_node, &binder_procs);
2896 proc->pid = current->group_leader->pid;
2897 INIT_LIST_HEAD(&proc->delivered_death);
2898 filp->private_data = proc;
2900 binder_unlock(__func__);
2902 if (binder_debugfs_dir_entry_proc) {
2903 char strbuf[11];
2905 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
2906 proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO,
2907 binder_debugfs_dir_entry_proc, proc, &binder_proc_fops);
2910 return 0;
2913 static int binder_flush(struct file *filp, fl_owner_t id)
2915 struct binder_proc *proc = filp->private_data;
2917 binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
2919 return 0;
2922 static void binder_deferred_flush(struct binder_proc *proc)
2924 struct rb_node *n;
2925 int wake_count = 0;
2927 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
2928 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
2930 thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN;
2931 if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
2932 wake_up_interruptible(&thread->wait);
2933 wake_count++;
2936 wake_up_interruptible_all(&proc->wait);
2938 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
2939 "binder_flush: %d woke %d threads\n", proc->pid,
2940 wake_count);
2943 static int binder_release(struct inode *nodp, struct file *filp)
2945 struct binder_proc *proc = filp->private_data;
2947 debugfs_remove(proc->debugfs_entry);
2948 binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
2950 return 0;
2953 static int binder_node_release(struct binder_node *node, int refs)
2955 struct binder_ref *ref;
2956 int death = 0;
2958 list_del_init(&node->work.entry);
2959 binder_release_work(&node->async_todo);
2961 if (hlist_empty(&node->refs)) {
2962 kfree(node);
2963 binder_stats_deleted(BINDER_STAT_NODE);
2965 return refs;
2968 node->proc = NULL;
2969 node->local_strong_refs = 0;
2970 node->local_weak_refs = 0;
2971 hlist_add_head(&node->dead_node, &binder_dead_nodes);
2973 hlist_for_each_entry(ref, &node->refs, node_entry) {
2974 refs++;
2976 if (!ref->death)
2977 continue;
2979 death++;
2981 if (list_empty(&ref->death->work.entry)) {
2982 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
2983 list_add_tail(&ref->death->work.entry,
2984 &ref->proc->todo);
2985 wake_up_interruptible(&ref->proc->wait);
2986 } else
2987 BUG();
2990 binder_debug(BINDER_DEBUG_DEAD_BINDER,
2991 "node %d now dead, refs %d, death %d\n",
2992 node->debug_id, refs, death);
2994 return refs;
2997 static void binder_deferred_release(struct binder_proc *proc)
2999 struct binder_transaction *t;
3000 struct rb_node *n;
3001 int threads, nodes, incoming_refs, outgoing_refs, buffers,
3002 active_transactions, page_count;
3004 BUG_ON(proc->vma);
3005 BUG_ON(proc->files);
3007 hlist_del(&proc->proc_node);
3009 if (binder_context_mgr_node && binder_context_mgr_node->proc == proc) {
3010 binder_debug(BINDER_DEBUG_DEAD_BINDER,
3011 "%s: %d context_mgr_node gone\n",
3012 __func__, proc->pid);
3013 binder_context_mgr_node = NULL;
3016 threads = 0;
3017 active_transactions = 0;
3018 while ((n = rb_first(&proc->threads))) {
3019 struct binder_thread *thread;
3021 thread = rb_entry(n, struct binder_thread, rb_node);
3022 threads++;
3023 active_transactions += binder_free_thread(proc, thread);
3026 nodes = 0;
3027 incoming_refs = 0;
3028 while ((n = rb_first(&proc->nodes))) {
3029 struct binder_node *node;
3031 node = rb_entry(n, struct binder_node, rb_node);
3032 nodes++;
3033 rb_erase(&node->rb_node, &proc->nodes);
3034 incoming_refs = binder_node_release(node, incoming_refs);
3037 outgoing_refs = 0;
3038 while ((n = rb_first(&proc->refs_by_desc))) {
3039 struct binder_ref *ref;
3041 ref = rb_entry(n, struct binder_ref, rb_node_desc);
3042 outgoing_refs++;
3043 binder_delete_ref(ref);
3046 binder_release_work(&proc->todo);
3047 binder_release_work(&proc->delivered_death);
3049 buffers = 0;
3050 while ((n = rb_first(&proc->allocated_buffers))) {
3051 struct binder_buffer *buffer;
3053 buffer = rb_entry(n, struct binder_buffer, rb_node);
3055 t = buffer->transaction;
3056 if (t) {
3057 t->buffer = NULL;
3058 buffer->transaction = NULL;
3059 pr_err("release proc %d, transaction %d, not freed\n",
3060 proc->pid, t->debug_id);
3061 /*BUG();*/
3064 binder_free_buf(proc, buffer);
3065 buffers++;
3068 binder_stats_deleted(BINDER_STAT_PROC);
3070 page_count = 0;
3071 if (proc->pages) {
3072 int i;
3074 for (i = 0; i < proc->buffer_size / PAGE_SIZE; i++) {
3075 void *page_addr;
3077 if (!proc->pages[i])
3078 continue;
3080 page_addr = proc->buffer + i * PAGE_SIZE;
3081 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
3082 "%s: %d: page %d at %p not freed\n",
3083 __func__, proc->pid, i, page_addr);
3084 unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
3085 __free_page(proc->pages[i]);
3086 page_count++;
3088 kfree(proc->pages);
3089 vfree(proc->buffer);
3092 put_task_struct(proc->tsk);
3094 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
3095 "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d, buffers %d, pages %d\n",
3096 __func__, proc->pid, threads, nodes, incoming_refs,
3097 outgoing_refs, active_transactions, buffers, page_count);
3099 kfree(proc);
3102 static void binder_deferred_func(struct work_struct *work)
3104 struct binder_proc *proc;
3105 struct files_struct *files;
3107 int defer;
3109 do {
3110 binder_lock(__func__);
3111 mutex_lock(&binder_deferred_lock);
3112 if (!hlist_empty(&binder_deferred_list)) {
3113 proc = hlist_entry(binder_deferred_list.first,
3114 struct binder_proc, deferred_work_node);
3115 hlist_del_init(&proc->deferred_work_node);
3116 defer = proc->deferred_work;
3117 proc->deferred_work = 0;
3118 } else {
3119 proc = NULL;
3120 defer = 0;
3122 mutex_unlock(&binder_deferred_lock);
3124 files = NULL;
3125 if (defer & BINDER_DEFERRED_PUT_FILES) {
3126 files = proc->files;
3127 if (files)
3128 proc->files = NULL;
3131 if (defer & BINDER_DEFERRED_FLUSH)
3132 binder_deferred_flush(proc);
3134 if (defer & BINDER_DEFERRED_RELEASE)
3135 binder_deferred_release(proc); /* frees proc */
3137 binder_unlock(__func__);
3138 if (files)
3139 put_files_struct(files);
3140 } while (proc);
3142 static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
3144 static void
3145 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
3147 mutex_lock(&binder_deferred_lock);
3148 proc->deferred_work |= defer;
3149 if (hlist_unhashed(&proc->deferred_work_node)) {
3150 hlist_add_head(&proc->deferred_work_node,
3151 &binder_deferred_list);
3152 queue_work(binder_deferred_workqueue, &binder_deferred_work);
3154 mutex_unlock(&binder_deferred_lock);
3157 static void print_binder_transaction(struct seq_file *m, const char *prefix,
3158 struct binder_transaction *t)
3160 seq_printf(m,
3161 "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %ld r%d",
3162 prefix, t->debug_id, t,
3163 t->from ? t->from->proc->pid : 0,
3164 t->from ? t->from->pid : 0,
3165 t->to_proc ? t->to_proc->pid : 0,
3166 t->to_thread ? t->to_thread->pid : 0,
3167 t->code, t->flags, t->priority, t->need_reply);
3168 if (t->buffer == NULL) {
3169 seq_puts(m, " buffer free\n");
3170 return;
3172 if (t->buffer->target_node)
3173 seq_printf(m, " node %d",
3174 t->buffer->target_node->debug_id);
3175 seq_printf(m, " size %zd:%zd data %p\n",
3176 t->buffer->data_size, t->buffer->offsets_size,
3177 t->buffer->data);
3180 static void print_binder_buffer(struct seq_file *m, const char *prefix,
3181 struct binder_buffer *buffer)
3183 seq_printf(m, "%s %d: %p size %zd:%zd %s\n",
3184 prefix, buffer->debug_id, buffer->data,
3185 buffer->data_size, buffer->offsets_size,
3186 buffer->transaction ? "active" : "delivered");
3189 static void print_binder_work(struct seq_file *m, const char *prefix,
3190 const char *transaction_prefix,
3191 struct binder_work *w)
3193 struct binder_node *node;
3194 struct binder_transaction *t;
3196 switch (w->type) {
3197 case BINDER_WORK_TRANSACTION:
3198 t = container_of(w, struct binder_transaction, work);
3199 print_binder_transaction(m, transaction_prefix, t);
3200 break;
3201 case BINDER_WORK_TRANSACTION_COMPLETE:
3202 seq_printf(m, "%stransaction complete\n", prefix);
3203 break;
3204 case BINDER_WORK_NODE:
3205 node = container_of(w, struct binder_node, work);
3206 seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
3207 prefix, node->debug_id,
3208 (u64)node->ptr, (u64)node->cookie);
3209 break;
3210 case BINDER_WORK_DEAD_BINDER:
3211 seq_printf(m, "%shas dead binder\n", prefix);
3212 break;
3213 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
3214 seq_printf(m, "%shas cleared dead binder\n", prefix);
3215 break;
3216 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
3217 seq_printf(m, "%shas cleared death notification\n", prefix);
3218 break;
3219 default:
3220 seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
3221 break;
3225 static void print_binder_thread(struct seq_file *m,
3226 struct binder_thread *thread,
3227 int print_always)
3229 struct binder_transaction *t;
3230 struct binder_work *w;
3231 size_t start_pos = m->count;
3232 size_t header_pos;
3234 seq_printf(m, " thread %d: l %02x\n", thread->pid, thread->looper);
3235 header_pos = m->count;
3236 t = thread->transaction_stack;
3237 while (t) {
3238 if (t->from == thread) {
3239 print_binder_transaction(m,
3240 " outgoing transaction", t);
3241 t = t->from_parent;
3242 } else if (t->to_thread == thread) {
3243 print_binder_transaction(m,
3244 " incoming transaction", t);
3245 t = t->to_parent;
3246 } else {
3247 print_binder_transaction(m, " bad transaction", t);
3248 t = NULL;
3251 list_for_each_entry(w, &thread->todo, entry) {
3252 print_binder_work(m, " ", " pending transaction", w);
3254 if (!print_always && m->count == header_pos)
3255 m->count = start_pos;
3258 static void print_binder_node(struct seq_file *m, struct binder_node *node)
3260 struct binder_ref *ref;
3261 struct binder_work *w;
3262 int count;
3264 count = 0;
3265 hlist_for_each_entry(ref, &node->refs, node_entry)
3266 count++;
3268 seq_printf(m, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d",
3269 node->debug_id, (u64)node->ptr, (u64)node->cookie,
3270 node->has_strong_ref, node->has_weak_ref,
3271 node->local_strong_refs, node->local_weak_refs,
3272 node->internal_strong_refs, count);
3273 if (count) {
3274 seq_puts(m, " proc");
3275 hlist_for_each_entry(ref, &node->refs, node_entry)
3276 seq_printf(m, " %d", ref->proc->pid);
3278 seq_puts(m, "\n");
3279 list_for_each_entry(w, &node->async_todo, entry)
3280 print_binder_work(m, " ",
3281 " pending async transaction", w);
3284 static void print_binder_ref(struct seq_file *m, struct binder_ref *ref)
3286 seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %p\n",
3287 ref->debug_id, ref->desc, ref->node->proc ? "" : "dead ",
3288 ref->node->debug_id, ref->strong, ref->weak, ref->death);
3291 static void print_binder_proc(struct seq_file *m,
3292 struct binder_proc *proc, int print_all)
3294 struct binder_work *w;
3295 struct rb_node *n;
3296 size_t start_pos = m->count;
3297 size_t header_pos;
3299 seq_printf(m, "proc %d\n", proc->pid);
3300 header_pos = m->count;
3302 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
3303 print_binder_thread(m, rb_entry(n, struct binder_thread,
3304 rb_node), print_all);
3305 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
3306 struct binder_node *node = rb_entry(n, struct binder_node,
3307 rb_node);
3308 if (print_all || node->has_async_transaction)
3309 print_binder_node(m, node);
3311 if (print_all) {
3312 for (n = rb_first(&proc->refs_by_desc);
3313 n != NULL;
3314 n = rb_next(n))
3315 print_binder_ref(m, rb_entry(n, struct binder_ref,
3316 rb_node_desc));
3318 for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n))
3319 print_binder_buffer(m, " buffer",
3320 rb_entry(n, struct binder_buffer, rb_node));
3321 list_for_each_entry(w, &proc->todo, entry)
3322 print_binder_work(m, " ", " pending transaction", w);
3323 list_for_each_entry(w, &proc->delivered_death, entry) {
3324 seq_puts(m, " has delivered dead binder\n");
3325 break;
3327 if (!print_all && m->count == header_pos)
3328 m->count = start_pos;
3331 static const char * const binder_return_strings[] = {
3332 "BR_ERROR",
3333 "BR_OK",
3334 "BR_TRANSACTION",
3335 "BR_REPLY",
3336 "BR_ACQUIRE_RESULT",
3337 "BR_DEAD_REPLY",
3338 "BR_TRANSACTION_COMPLETE",
3339 "BR_INCREFS",
3340 "BR_ACQUIRE",
3341 "BR_RELEASE",
3342 "BR_DECREFS",
3343 "BR_ATTEMPT_ACQUIRE",
3344 "BR_NOOP",
3345 "BR_SPAWN_LOOPER",
3346 "BR_FINISHED",
3347 "BR_DEAD_BINDER",
3348 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
3349 "BR_FAILED_REPLY"
3352 static const char * const binder_command_strings[] = {
3353 "BC_TRANSACTION",
3354 "BC_REPLY",
3355 "BC_ACQUIRE_RESULT",
3356 "BC_FREE_BUFFER",
3357 "BC_INCREFS",
3358 "BC_ACQUIRE",
3359 "BC_RELEASE",
3360 "BC_DECREFS",
3361 "BC_INCREFS_DONE",
3362 "BC_ACQUIRE_DONE",
3363 "BC_ATTEMPT_ACQUIRE",
3364 "BC_REGISTER_LOOPER",
3365 "BC_ENTER_LOOPER",
3366 "BC_EXIT_LOOPER",
3367 "BC_REQUEST_DEATH_NOTIFICATION",
3368 "BC_CLEAR_DEATH_NOTIFICATION",
3369 "BC_DEAD_BINDER_DONE"
3372 static const char * const binder_objstat_strings[] = {
3373 "proc",
3374 "thread",
3375 "node",
3376 "ref",
3377 "death",
3378 "transaction",
3379 "transaction_complete"
3382 static void print_binder_stats(struct seq_file *m, const char *prefix,
3383 struct binder_stats *stats)
3385 int i;
3387 BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
3388 ARRAY_SIZE(binder_command_strings));
3389 for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
3390 if (stats->bc[i])
3391 seq_printf(m, "%s%s: %d\n", prefix,
3392 binder_command_strings[i], stats->bc[i]);
3395 BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
3396 ARRAY_SIZE(binder_return_strings));
3397 for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
3398 if (stats->br[i])
3399 seq_printf(m, "%s%s: %d\n", prefix,
3400 binder_return_strings[i], stats->br[i]);
3403 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
3404 ARRAY_SIZE(binder_objstat_strings));
3405 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
3406 ARRAY_SIZE(stats->obj_deleted));
3407 for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
3408 if (stats->obj_created[i] || stats->obj_deleted[i])
3409 seq_printf(m, "%s%s: active %d total %d\n", prefix,
3410 binder_objstat_strings[i],
3411 stats->obj_created[i] - stats->obj_deleted[i],
3412 stats->obj_created[i]);
3416 static void print_binder_proc_stats(struct seq_file *m,
3417 struct binder_proc *proc)
3419 struct binder_work *w;
3420 struct rb_node *n;
3421 int count, strong, weak;
3423 seq_printf(m, "proc %d\n", proc->pid);
3424 count = 0;
3425 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
3426 count++;
3427 seq_printf(m, " threads: %d\n", count);
3428 seq_printf(m, " requested threads: %d+%d/%d\n"
3429 " ready threads %d\n"
3430 " free async space %zd\n", proc->requested_threads,
3431 proc->requested_threads_started, proc->max_threads,
3432 proc->ready_threads, proc->free_async_space);
3433 count = 0;
3434 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
3435 count++;
3436 seq_printf(m, " nodes: %d\n", count);
3437 count = 0;
3438 strong = 0;
3439 weak = 0;
3440 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
3441 struct binder_ref *ref = rb_entry(n, struct binder_ref,
3442 rb_node_desc);
3443 count++;
3444 strong += ref->strong;
3445 weak += ref->weak;
3447 seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak);
3449 count = 0;
3450 for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n))
3451 count++;
3452 seq_printf(m, " buffers: %d\n", count);
3454 count = 0;
3455 list_for_each_entry(w, &proc->todo, entry) {
3456 switch (w->type) {
3457 case BINDER_WORK_TRANSACTION:
3458 count++;
3459 break;
3460 default:
3461 break;
3464 seq_printf(m, " pending transactions: %d\n", count);
3466 print_binder_stats(m, " ", &proc->stats);
3470 static int binder_state_show(struct seq_file *m, void *unused)
3472 struct binder_proc *proc;
3473 struct binder_node *node;
3474 int do_lock = !binder_debug_no_lock;
3476 if (do_lock)
3477 binder_lock(__func__);
3479 seq_puts(m, "binder state:\n");
3481 if (!hlist_empty(&binder_dead_nodes))
3482 seq_puts(m, "dead nodes:\n");
3483 hlist_for_each_entry(node, &binder_dead_nodes, dead_node)
3484 print_binder_node(m, node);
3486 hlist_for_each_entry(proc, &binder_procs, proc_node)
3487 print_binder_proc(m, proc, 1);
3488 if (do_lock)
3489 binder_unlock(__func__);
3490 return 0;
3493 static int binder_stats_show(struct seq_file *m, void *unused)
3495 struct binder_proc *proc;
3496 int do_lock = !binder_debug_no_lock;
3498 if (do_lock)
3499 binder_lock(__func__);
3501 seq_puts(m, "binder stats:\n");
3503 print_binder_stats(m, "", &binder_stats);
3505 hlist_for_each_entry(proc, &binder_procs, proc_node)
3506 print_binder_proc_stats(m, proc);
3507 if (do_lock)
3508 binder_unlock(__func__);
3509 return 0;
3512 static int binder_transactions_show(struct seq_file *m, void *unused)
3514 struct binder_proc *proc;
3515 int do_lock = !binder_debug_no_lock;
3517 if (do_lock)
3518 binder_lock(__func__);
3520 seq_puts(m, "binder transactions:\n");
3521 hlist_for_each_entry(proc, &binder_procs, proc_node)
3522 print_binder_proc(m, proc, 0);
3523 if (do_lock)
3524 binder_unlock(__func__);
3525 return 0;
3528 static int binder_proc_show(struct seq_file *m, void *unused)
3530 struct binder_proc *proc = m->private;
3531 int do_lock = !binder_debug_no_lock;
3533 if (do_lock)
3534 binder_lock(__func__);
3535 seq_puts(m, "binder proc state:\n");
3536 print_binder_proc(m, proc, 1);
3537 if (do_lock)
3538 binder_unlock(__func__);
3539 return 0;
3542 static void print_binder_transaction_log_entry(struct seq_file *m,
3543 struct binder_transaction_log_entry *e)
3545 seq_printf(m,
3546 "%d: %s from %d:%d to %d:%d node %d handle %d size %d:%d\n",
3547 e->debug_id, (e->call_type == 2) ? "reply" :
3548 ((e->call_type == 1) ? "async" : "call "), e->from_proc,
3549 e->from_thread, e->to_proc, e->to_thread, e->to_node,
3550 e->target_handle, e->data_size, e->offsets_size);
3553 static int binder_transaction_log_show(struct seq_file *m, void *unused)
3555 struct binder_transaction_log *log = m->private;
3556 int i;
3558 if (log->full) {
3559 for (i = log->next; i < ARRAY_SIZE(log->entry); i++)
3560 print_binder_transaction_log_entry(m, &log->entry[i]);
3562 for (i = 0; i < log->next; i++)
3563 print_binder_transaction_log_entry(m, &log->entry[i]);
3564 return 0;
3567 static const struct file_operations binder_fops = {
3568 .owner = THIS_MODULE,
3569 .poll = binder_poll,
3570 .unlocked_ioctl = binder_ioctl,
3571 .compat_ioctl = binder_ioctl,
3572 .mmap = binder_mmap,
3573 .open = binder_open,
3574 .flush = binder_flush,
3575 .release = binder_release,
3578 static struct miscdevice binder_miscdev = {
3579 .minor = MISC_DYNAMIC_MINOR,
3580 .name = "binder",
3581 .fops = &binder_fops
3584 BINDER_DEBUG_ENTRY(state);
3585 BINDER_DEBUG_ENTRY(stats);
3586 BINDER_DEBUG_ENTRY(transactions);
3587 BINDER_DEBUG_ENTRY(transaction_log);
3589 static int __init binder_init(void)
3591 int ret;
3593 binder_deferred_workqueue = create_singlethread_workqueue("binder");
3594 if (!binder_deferred_workqueue)
3595 return -ENOMEM;
3597 binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
3598 if (binder_debugfs_dir_entry_root)
3599 binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
3600 binder_debugfs_dir_entry_root);
3601 ret = misc_register(&binder_miscdev);
3602 if (binder_debugfs_dir_entry_root) {
3603 debugfs_create_file("state",
3604 S_IRUGO,
3605 binder_debugfs_dir_entry_root,
3606 NULL,
3607 &binder_state_fops);
3608 debugfs_create_file("stats",
3609 S_IRUGO,
3610 binder_debugfs_dir_entry_root,
3611 NULL,
3612 &binder_stats_fops);
3613 debugfs_create_file("transactions",
3614 S_IRUGO,
3615 binder_debugfs_dir_entry_root,
3616 NULL,
3617 &binder_transactions_fops);
3618 debugfs_create_file("transaction_log",
3619 S_IRUGO,
3620 binder_debugfs_dir_entry_root,
3621 &binder_transaction_log,
3622 &binder_transaction_log_fops);
3623 debugfs_create_file("failed_transaction_log",
3624 S_IRUGO,
3625 binder_debugfs_dir_entry_root,
3626 &binder_transaction_log_failed,
3627 &binder_transaction_log_fops);
3629 return ret;
3632 device_initcall(binder_init);
3634 #define CREATE_TRACE_POINTS
3635 #include "binder_trace.h"
3637 MODULE_LICENSE("GPL v2");