1 // SPDX-License-Identifier: GPL-2.0-only
4 * Android IPC Subsystem
6 * Copyright (C) 2007-2017 Google, Inc.
9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11 #include <linux/list.h>
12 #include <linux/sched/mm.h>
13 #include <linux/module.h>
14 #include <linux/rtmutex.h>
15 #include <linux/rbtree.h>
16 #include <linux/seq_file.h>
17 #include <linux/vmalloc.h>
18 #include <linux/slab.h>
19 #include <linux/sched.h>
20 #include <linux/list_lru.h>
21 #include <linux/ratelimit.h>
22 #include <asm/cacheflush.h>
23 #include <linux/uaccess.h>
24 #include <linux/highmem.h>
25 #include "binder_alloc.h"
26 #include "binder_trace.h"
28 struct list_lru binder_alloc_lru
;
30 static DEFINE_MUTEX(binder_alloc_mmap_lock
);
33 BINDER_DEBUG_USER_ERROR
= 1U << 0,
34 BINDER_DEBUG_OPEN_CLOSE
= 1U << 1,
35 BINDER_DEBUG_BUFFER_ALLOC
= 1U << 2,
36 BINDER_DEBUG_BUFFER_ALLOC_ASYNC
= 1U << 3,
38 static uint32_t binder_alloc_debug_mask
= BINDER_DEBUG_USER_ERROR
;
40 module_param_named(debug_mask
, binder_alloc_debug_mask
,
43 #define binder_alloc_debug(mask, x...) \
45 if (binder_alloc_debug_mask & mask) \
46 pr_info_ratelimited(x); \
49 static struct binder_buffer
*binder_buffer_next(struct binder_buffer
*buffer
)
51 return list_entry(buffer
->entry
.next
, struct binder_buffer
, entry
);
54 static struct binder_buffer
*binder_buffer_prev(struct binder_buffer
*buffer
)
56 return list_entry(buffer
->entry
.prev
, struct binder_buffer
, entry
);
59 static size_t binder_alloc_buffer_size(struct binder_alloc
*alloc
,
60 struct binder_buffer
*buffer
)
62 if (list_is_last(&buffer
->entry
, &alloc
->buffers
))
63 return alloc
->buffer
+ alloc
->buffer_size
- buffer
->user_data
;
64 return binder_buffer_next(buffer
)->user_data
- buffer
->user_data
;
67 static void binder_insert_free_buffer(struct binder_alloc
*alloc
,
68 struct binder_buffer
*new_buffer
)
70 struct rb_node
**p
= &alloc
->free_buffers
.rb_node
;
71 struct rb_node
*parent
= NULL
;
72 struct binder_buffer
*buffer
;
74 size_t new_buffer_size
;
76 BUG_ON(!new_buffer
->free
);
78 new_buffer_size
= binder_alloc_buffer_size(alloc
, new_buffer
);
80 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC
,
81 "%d: add free buffer, size %zd, at %pK\n",
82 alloc
->pid
, new_buffer_size
, new_buffer
);
86 buffer
= rb_entry(parent
, struct binder_buffer
, rb_node
);
87 BUG_ON(!buffer
->free
);
89 buffer_size
= binder_alloc_buffer_size(alloc
, buffer
);
91 if (new_buffer_size
< buffer_size
)
94 p
= &parent
->rb_right
;
96 rb_link_node(&new_buffer
->rb_node
, parent
, p
);
97 rb_insert_color(&new_buffer
->rb_node
, &alloc
->free_buffers
);
100 static void binder_insert_allocated_buffer_locked(
101 struct binder_alloc
*alloc
, struct binder_buffer
*new_buffer
)
103 struct rb_node
**p
= &alloc
->allocated_buffers
.rb_node
;
104 struct rb_node
*parent
= NULL
;
105 struct binder_buffer
*buffer
;
107 BUG_ON(new_buffer
->free
);
111 buffer
= rb_entry(parent
, struct binder_buffer
, rb_node
);
112 BUG_ON(buffer
->free
);
114 if (new_buffer
->user_data
< buffer
->user_data
)
115 p
= &parent
->rb_left
;
116 else if (new_buffer
->user_data
> buffer
->user_data
)
117 p
= &parent
->rb_right
;
121 rb_link_node(&new_buffer
->rb_node
, parent
, p
);
122 rb_insert_color(&new_buffer
->rb_node
, &alloc
->allocated_buffers
);
125 static struct binder_buffer
*binder_alloc_prepare_to_free_locked(
126 struct binder_alloc
*alloc
,
129 struct rb_node
*n
= alloc
->allocated_buffers
.rb_node
;
130 struct binder_buffer
*buffer
;
133 uptr
= (void __user
*)user_ptr
;
136 buffer
= rb_entry(n
, struct binder_buffer
, rb_node
);
137 BUG_ON(buffer
->free
);
139 if (uptr
< buffer
->user_data
)
141 else if (uptr
> buffer
->user_data
)
145 * Guard against user threads attempting to
146 * free the buffer when in use by kernel or
147 * after it's already been freed.
149 if (!buffer
->allow_user_free
)
150 return ERR_PTR(-EPERM
);
151 buffer
->allow_user_free
= 0;
159 * binder_alloc_buffer_lookup() - get buffer given user ptr
160 * @alloc: binder_alloc for this proc
161 * @user_ptr: User pointer to buffer data
163 * Validate userspace pointer to buffer data and return buffer corresponding to
164 * that user pointer. Search the rb tree for buffer that matches user data
167 * Return: Pointer to buffer or NULL
169 struct binder_buffer
*binder_alloc_prepare_to_free(struct binder_alloc
*alloc
,
172 struct binder_buffer
*buffer
;
174 mutex_lock(&alloc
->mutex
);
175 buffer
= binder_alloc_prepare_to_free_locked(alloc
, user_ptr
);
176 mutex_unlock(&alloc
->mutex
);
180 static int binder_update_page_range(struct binder_alloc
*alloc
, int allocate
,
181 void __user
*start
, void __user
*end
)
183 void __user
*page_addr
;
184 unsigned long user_page_addr
;
185 struct binder_lru_page
*page
;
186 struct vm_area_struct
*vma
= NULL
;
187 struct mm_struct
*mm
= NULL
;
188 bool need_mm
= false;
190 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC
,
191 "%d: %s pages %pK-%pK\n", alloc
->pid
,
192 allocate
? "allocate" : "free", start
, end
);
197 trace_binder_update_page_range(alloc
, allocate
, start
, end
);
202 for (page_addr
= start
; page_addr
< end
; page_addr
+= PAGE_SIZE
) {
203 page
= &alloc
->pages
[(page_addr
- alloc
->buffer
) / PAGE_SIZE
];
204 if (!page
->page_ptr
) {
210 if (need_mm
&& mmget_not_zero(alloc
->vma_vm_mm
))
211 mm
= alloc
->vma_vm_mm
;
214 down_read(&mm
->mmap_sem
);
218 if (!vma
&& need_mm
) {
219 binder_alloc_debug(BINDER_DEBUG_USER_ERROR
,
220 "%d: binder_alloc_buf failed to map pages in userspace, no vma\n",
225 for (page_addr
= start
; page_addr
< end
; page_addr
+= PAGE_SIZE
) {
230 index
= (page_addr
- alloc
->buffer
) / PAGE_SIZE
;
231 page
= &alloc
->pages
[index
];
233 if (page
->page_ptr
) {
234 trace_binder_alloc_lru_start(alloc
, index
);
236 on_lru
= list_lru_del(&binder_alloc_lru
, &page
->lru
);
239 trace_binder_alloc_lru_end(alloc
, index
);
244 goto err_page_ptr_cleared
;
246 trace_binder_alloc_page_start(alloc
, index
);
247 page
->page_ptr
= alloc_page(GFP_KERNEL
|
250 if (!page
->page_ptr
) {
251 pr_err("%d: binder_alloc_buf failed for page at %pK\n",
252 alloc
->pid
, page_addr
);
253 goto err_alloc_page_failed
;
256 INIT_LIST_HEAD(&page
->lru
);
258 user_page_addr
= (uintptr_t)page_addr
;
259 ret
= vm_insert_page(vma
, user_page_addr
, page
[0].page_ptr
);
261 pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n",
262 alloc
->pid
, user_page_addr
);
263 goto err_vm_insert_page_failed
;
266 if (index
+ 1 > alloc
->pages_high
)
267 alloc
->pages_high
= index
+ 1;
269 trace_binder_alloc_page_end(alloc
, index
);
270 /* vm_insert_page does not seem to increment the refcount */
273 up_read(&mm
->mmap_sem
);
279 for (page_addr
= end
- PAGE_SIZE
; page_addr
>= start
;
280 page_addr
-= PAGE_SIZE
) {
284 index
= (page_addr
- alloc
->buffer
) / PAGE_SIZE
;
285 page
= &alloc
->pages
[index
];
287 trace_binder_free_lru_start(alloc
, index
);
289 ret
= list_lru_add(&binder_alloc_lru
, &page
->lru
);
292 trace_binder_free_lru_end(alloc
, index
);
295 err_vm_insert_page_failed
:
296 __free_page(page
->page_ptr
);
297 page
->page_ptr
= NULL
;
298 err_alloc_page_failed
:
299 err_page_ptr_cleared
:
304 up_read(&mm
->mmap_sem
);
307 return vma
? -ENOMEM
: -ESRCH
;
311 static inline void binder_alloc_set_vma(struct binder_alloc
*alloc
,
312 struct vm_area_struct
*vma
)
315 alloc
->vma_vm_mm
= vma
->vm_mm
;
317 * If we see alloc->vma is not NULL, buffer data structures set up
318 * completely. Look at smp_rmb side binder_alloc_get_vma.
319 * We also want to guarantee new alloc->vma_vm_mm is always visible
320 * if alloc->vma is set.
326 static inline struct vm_area_struct
*binder_alloc_get_vma(
327 struct binder_alloc
*alloc
)
329 struct vm_area_struct
*vma
= NULL
;
332 /* Look at description in binder_alloc_set_vma */
339 static struct binder_buffer
*binder_alloc_new_buf_locked(
340 struct binder_alloc
*alloc
,
343 size_t extra_buffers_size
,
346 struct rb_node
*n
= alloc
->free_buffers
.rb_node
;
347 struct binder_buffer
*buffer
;
349 struct rb_node
*best_fit
= NULL
;
350 void __user
*has_page_addr
;
351 void __user
*end_page_addr
;
352 size_t size
, data_offsets_size
;
355 if (!binder_alloc_get_vma(alloc
)) {
356 binder_alloc_debug(BINDER_DEBUG_USER_ERROR
,
357 "%d: binder_alloc_buf, no vma\n",
359 return ERR_PTR(-ESRCH
);
362 data_offsets_size
= ALIGN(data_size
, sizeof(void *)) +
363 ALIGN(offsets_size
, sizeof(void *));
365 if (data_offsets_size
< data_size
|| data_offsets_size
< offsets_size
) {
366 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC
,
367 "%d: got transaction with invalid size %zd-%zd\n",
368 alloc
->pid
, data_size
, offsets_size
);
369 return ERR_PTR(-EINVAL
);
371 size
= data_offsets_size
+ ALIGN(extra_buffers_size
, sizeof(void *));
372 if (size
< data_offsets_size
|| size
< extra_buffers_size
) {
373 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC
,
374 "%d: got transaction with invalid extra_buffers_size %zd\n",
375 alloc
->pid
, extra_buffers_size
);
376 return ERR_PTR(-EINVAL
);
379 alloc
->free_async_space
< size
+ sizeof(struct binder_buffer
)) {
380 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC
,
381 "%d: binder_alloc_buf size %zd failed, no async space left\n",
383 return ERR_PTR(-ENOSPC
);
386 /* Pad 0-size buffers so they get assigned unique addresses */
387 size
= max(size
, sizeof(void *));
390 buffer
= rb_entry(n
, struct binder_buffer
, rb_node
);
391 BUG_ON(!buffer
->free
);
392 buffer_size
= binder_alloc_buffer_size(alloc
, buffer
);
394 if (size
< buffer_size
) {
397 } else if (size
> buffer_size
)
404 if (best_fit
== NULL
) {
405 size_t allocated_buffers
= 0;
406 size_t largest_alloc_size
= 0;
407 size_t total_alloc_size
= 0;
408 size_t free_buffers
= 0;
409 size_t largest_free_size
= 0;
410 size_t total_free_size
= 0;
412 for (n
= rb_first(&alloc
->allocated_buffers
); n
!= NULL
;
414 buffer
= rb_entry(n
, struct binder_buffer
, rb_node
);
415 buffer_size
= binder_alloc_buffer_size(alloc
, buffer
);
417 total_alloc_size
+= buffer_size
;
418 if (buffer_size
> largest_alloc_size
)
419 largest_alloc_size
= buffer_size
;
421 for (n
= rb_first(&alloc
->free_buffers
); n
!= NULL
;
423 buffer
= rb_entry(n
, struct binder_buffer
, rb_node
);
424 buffer_size
= binder_alloc_buffer_size(alloc
, buffer
);
426 total_free_size
+= buffer_size
;
427 if (buffer_size
> largest_free_size
)
428 largest_free_size
= buffer_size
;
430 binder_alloc_debug(BINDER_DEBUG_USER_ERROR
,
431 "%d: binder_alloc_buf size %zd failed, no address space\n",
433 binder_alloc_debug(BINDER_DEBUG_USER_ERROR
,
434 "allocated: %zd (num: %zd largest: %zd), free: %zd (num: %zd largest: %zd)\n",
435 total_alloc_size
, allocated_buffers
,
436 largest_alloc_size
, total_free_size
,
437 free_buffers
, largest_free_size
);
438 return ERR_PTR(-ENOSPC
);
441 buffer
= rb_entry(best_fit
, struct binder_buffer
, rb_node
);
442 buffer_size
= binder_alloc_buffer_size(alloc
, buffer
);
445 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC
,
446 "%d: binder_alloc_buf size %zd got buffer %pK size %zd\n",
447 alloc
->pid
, size
, buffer
, buffer_size
);
449 has_page_addr
= (void __user
*)
450 (((uintptr_t)buffer
->user_data
+ buffer_size
) & PAGE_MASK
);
451 WARN_ON(n
&& buffer_size
!= size
);
453 (void __user
*)PAGE_ALIGN((uintptr_t)buffer
->user_data
+ size
);
454 if (end_page_addr
> has_page_addr
)
455 end_page_addr
= has_page_addr
;
456 ret
= binder_update_page_range(alloc
, 1, (void __user
*)
457 PAGE_ALIGN((uintptr_t)buffer
->user_data
), end_page_addr
);
461 if (buffer_size
!= size
) {
462 struct binder_buffer
*new_buffer
;
464 new_buffer
= kzalloc(sizeof(*buffer
), GFP_KERNEL
);
466 pr_err("%s: %d failed to alloc new buffer struct\n",
467 __func__
, alloc
->pid
);
468 goto err_alloc_buf_struct_failed
;
470 new_buffer
->user_data
= (u8 __user
*)buffer
->user_data
+ size
;
471 list_add(&new_buffer
->entry
, &buffer
->entry
);
472 new_buffer
->free
= 1;
473 binder_insert_free_buffer(alloc
, new_buffer
);
476 rb_erase(best_fit
, &alloc
->free_buffers
);
478 buffer
->allow_user_free
= 0;
479 binder_insert_allocated_buffer_locked(alloc
, buffer
);
480 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC
,
481 "%d: binder_alloc_buf size %zd got %pK\n",
482 alloc
->pid
, size
, buffer
);
483 buffer
->data_size
= data_size
;
484 buffer
->offsets_size
= offsets_size
;
485 buffer
->async_transaction
= is_async
;
486 buffer
->extra_buffers_size
= extra_buffers_size
;
488 alloc
->free_async_space
-= size
+ sizeof(struct binder_buffer
);
489 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC
,
490 "%d: binder_alloc_buf size %zd async free %zd\n",
491 alloc
->pid
, size
, alloc
->free_async_space
);
495 err_alloc_buf_struct_failed
:
496 binder_update_page_range(alloc
, 0, (void __user
*)
497 PAGE_ALIGN((uintptr_t)buffer
->user_data
),
499 return ERR_PTR(-ENOMEM
);
503 * binder_alloc_new_buf() - Allocate a new binder buffer
504 * @alloc: binder_alloc for this proc
505 * @data_size: size of user data buffer
506 * @offsets_size: user specified buffer offset
507 * @extra_buffers_size: size of extra space for meta-data (eg, security context)
508 * @is_async: buffer for async transaction
510 * Allocate a new buffer given the requested sizes. Returns
511 * the kernel version of the buffer pointer. The size allocated
512 * is the sum of the three given sizes (each rounded up to
513 * pointer-sized boundary)
515 * Return: The allocated buffer or %NULL if error
517 struct binder_buffer
*binder_alloc_new_buf(struct binder_alloc
*alloc
,
520 size_t extra_buffers_size
,
523 struct binder_buffer
*buffer
;
525 mutex_lock(&alloc
->mutex
);
526 buffer
= binder_alloc_new_buf_locked(alloc
, data_size
, offsets_size
,
527 extra_buffers_size
, is_async
);
528 mutex_unlock(&alloc
->mutex
);
532 static void __user
*buffer_start_page(struct binder_buffer
*buffer
)
534 return (void __user
*)((uintptr_t)buffer
->user_data
& PAGE_MASK
);
537 static void __user
*prev_buffer_end_page(struct binder_buffer
*buffer
)
539 return (void __user
*)
540 (((uintptr_t)(buffer
->user_data
) - 1) & PAGE_MASK
);
543 static void binder_delete_free_buffer(struct binder_alloc
*alloc
,
544 struct binder_buffer
*buffer
)
546 struct binder_buffer
*prev
, *next
= NULL
;
548 BUG_ON(alloc
->buffers
.next
== &buffer
->entry
);
549 prev
= binder_buffer_prev(buffer
);
551 if (prev_buffer_end_page(prev
) == buffer_start_page(buffer
)) {
553 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC
,
554 "%d: merge free, buffer %pK share page with %pK\n",
555 alloc
->pid
, buffer
->user_data
,
559 if (!list_is_last(&buffer
->entry
, &alloc
->buffers
)) {
560 next
= binder_buffer_next(buffer
);
561 if (buffer_start_page(next
) == buffer_start_page(buffer
)) {
563 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC
,
564 "%d: merge free, buffer %pK share page with %pK\n",
571 if (PAGE_ALIGNED(buffer
->user_data
)) {
572 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC
,
573 "%d: merge free, buffer start %pK is page aligned\n",
574 alloc
->pid
, buffer
->user_data
);
579 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC
,
580 "%d: merge free, buffer %pK do not share page with %pK or %pK\n",
581 alloc
->pid
, buffer
->user_data
,
583 next
? next
->user_data
: NULL
);
584 binder_update_page_range(alloc
, 0, buffer_start_page(buffer
),
585 buffer_start_page(buffer
) + PAGE_SIZE
);
587 list_del(&buffer
->entry
);
591 static void binder_free_buf_locked(struct binder_alloc
*alloc
,
592 struct binder_buffer
*buffer
)
594 size_t size
, buffer_size
;
596 buffer_size
= binder_alloc_buffer_size(alloc
, buffer
);
598 size
= ALIGN(buffer
->data_size
, sizeof(void *)) +
599 ALIGN(buffer
->offsets_size
, sizeof(void *)) +
600 ALIGN(buffer
->extra_buffers_size
, sizeof(void *));
602 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC
,
603 "%d: binder_free_buf %pK size %zd buffer_size %zd\n",
604 alloc
->pid
, buffer
, size
, buffer_size
);
606 BUG_ON(buffer
->free
);
607 BUG_ON(size
> buffer_size
);
608 BUG_ON(buffer
->transaction
!= NULL
);
609 BUG_ON(buffer
->user_data
< alloc
->buffer
);
610 BUG_ON(buffer
->user_data
> alloc
->buffer
+ alloc
->buffer_size
);
612 if (buffer
->async_transaction
) {
613 alloc
->free_async_space
+= size
+ sizeof(struct binder_buffer
);
615 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC
,
616 "%d: binder_free_buf size %zd async free %zd\n",
617 alloc
->pid
, size
, alloc
->free_async_space
);
620 binder_update_page_range(alloc
, 0,
621 (void __user
*)PAGE_ALIGN((uintptr_t)buffer
->user_data
),
622 (void __user
*)(((uintptr_t)
623 buffer
->user_data
+ buffer_size
) & PAGE_MASK
));
625 rb_erase(&buffer
->rb_node
, &alloc
->allocated_buffers
);
627 if (!list_is_last(&buffer
->entry
, &alloc
->buffers
)) {
628 struct binder_buffer
*next
= binder_buffer_next(buffer
);
631 rb_erase(&next
->rb_node
, &alloc
->free_buffers
);
632 binder_delete_free_buffer(alloc
, next
);
635 if (alloc
->buffers
.next
!= &buffer
->entry
) {
636 struct binder_buffer
*prev
= binder_buffer_prev(buffer
);
639 binder_delete_free_buffer(alloc
, buffer
);
640 rb_erase(&prev
->rb_node
, &alloc
->free_buffers
);
644 binder_insert_free_buffer(alloc
, buffer
);
648 * binder_alloc_free_buf() - free a binder buffer
649 * @alloc: binder_alloc for this proc
650 * @buffer: kernel pointer to buffer
652 * Free the buffer allocated via binder_alloc_new_buffer()
654 void binder_alloc_free_buf(struct binder_alloc
*alloc
,
655 struct binder_buffer
*buffer
)
657 mutex_lock(&alloc
->mutex
);
658 binder_free_buf_locked(alloc
, buffer
);
659 mutex_unlock(&alloc
->mutex
);
663 * binder_alloc_mmap_handler() - map virtual address space for proc
664 * @alloc: alloc structure for this proc
665 * @vma: vma passed to mmap()
667 * Called by binder_mmap() to initialize the space specified in
668 * vma for allocating binder buffers
672 * -EBUSY = address space already mapped
673 * -ENOMEM = failed to map memory to given address space
675 int binder_alloc_mmap_handler(struct binder_alloc
*alloc
,
676 struct vm_area_struct
*vma
)
679 const char *failure_string
;
680 struct binder_buffer
*buffer
;
682 mutex_lock(&binder_alloc_mmap_lock
);
685 failure_string
= "already mapped";
686 goto err_already_mapped
;
689 alloc
->buffer
= (void __user
*)vma
->vm_start
;
690 mutex_unlock(&binder_alloc_mmap_lock
);
692 alloc
->pages
= kcalloc((vma
->vm_end
- vma
->vm_start
) / PAGE_SIZE
,
693 sizeof(alloc
->pages
[0]),
695 if (alloc
->pages
== NULL
) {
697 failure_string
= "alloc page array";
698 goto err_alloc_pages_failed
;
700 alloc
->buffer_size
= vma
->vm_end
- vma
->vm_start
;
702 buffer
= kzalloc(sizeof(*buffer
), GFP_KERNEL
);
705 failure_string
= "alloc buffer struct";
706 goto err_alloc_buf_struct_failed
;
709 buffer
->user_data
= alloc
->buffer
;
710 list_add(&buffer
->entry
, &alloc
->buffers
);
712 binder_insert_free_buffer(alloc
, buffer
);
713 alloc
->free_async_space
= alloc
->buffer_size
/ 2;
714 binder_alloc_set_vma(alloc
, vma
);
715 mmgrab(alloc
->vma_vm_mm
);
719 err_alloc_buf_struct_failed
:
722 err_alloc_pages_failed
:
723 mutex_lock(&binder_alloc_mmap_lock
);
724 alloc
->buffer
= NULL
;
726 mutex_unlock(&binder_alloc_mmap_lock
);
727 binder_alloc_debug(BINDER_DEBUG_USER_ERROR
,
728 "%s: %d %lx-%lx %s failed %d\n", __func__
,
729 alloc
->pid
, vma
->vm_start
, vma
->vm_end
,
730 failure_string
, ret
);
735 void binder_alloc_deferred_release(struct binder_alloc
*alloc
)
738 int buffers
, page_count
;
739 struct binder_buffer
*buffer
;
742 mutex_lock(&alloc
->mutex
);
745 while ((n
= rb_first(&alloc
->allocated_buffers
))) {
746 buffer
= rb_entry(n
, struct binder_buffer
, rb_node
);
748 /* Transaction should already have been freed */
749 BUG_ON(buffer
->transaction
);
751 binder_free_buf_locked(alloc
, buffer
);
755 while (!list_empty(&alloc
->buffers
)) {
756 buffer
= list_first_entry(&alloc
->buffers
,
757 struct binder_buffer
, entry
);
758 WARN_ON(!buffer
->free
);
760 list_del(&buffer
->entry
);
761 WARN_ON_ONCE(!list_empty(&alloc
->buffers
));
769 for (i
= 0; i
< alloc
->buffer_size
/ PAGE_SIZE
; i
++) {
770 void __user
*page_addr
;
773 if (!alloc
->pages
[i
].page_ptr
)
776 on_lru
= list_lru_del(&binder_alloc_lru
,
777 &alloc
->pages
[i
].lru
);
778 page_addr
= alloc
->buffer
+ i
* PAGE_SIZE
;
779 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC
,
780 "%s: %d: page %d at %pK %s\n",
781 __func__
, alloc
->pid
, i
, page_addr
,
782 on_lru
? "on lru" : "active");
783 __free_page(alloc
->pages
[i
].page_ptr
);
788 mutex_unlock(&alloc
->mutex
);
789 if (alloc
->vma_vm_mm
)
790 mmdrop(alloc
->vma_vm_mm
);
792 binder_alloc_debug(BINDER_DEBUG_OPEN_CLOSE
,
793 "%s: %d buffers %d, pages %d\n",
794 __func__
, alloc
->pid
, buffers
, page_count
);
797 static void print_binder_buffer(struct seq_file
*m
, const char *prefix
,
798 struct binder_buffer
*buffer
)
800 seq_printf(m
, "%s %d: %pK size %zd:%zd:%zd %s\n",
801 prefix
, buffer
->debug_id
, buffer
->user_data
,
802 buffer
->data_size
, buffer
->offsets_size
,
803 buffer
->extra_buffers_size
,
804 buffer
->transaction
? "active" : "delivered");
808 * binder_alloc_print_allocated() - print buffer info
809 * @m: seq_file for output via seq_printf()
810 * @alloc: binder_alloc for this proc
812 * Prints information about every buffer associated with
813 * the binder_alloc state to the given seq_file
815 void binder_alloc_print_allocated(struct seq_file
*m
,
816 struct binder_alloc
*alloc
)
820 mutex_lock(&alloc
->mutex
);
821 for (n
= rb_first(&alloc
->allocated_buffers
); n
!= NULL
; n
= rb_next(n
))
822 print_binder_buffer(m
, " buffer",
823 rb_entry(n
, struct binder_buffer
, rb_node
));
824 mutex_unlock(&alloc
->mutex
);
828 * binder_alloc_print_pages() - print page usage
829 * @m: seq_file for output via seq_printf()
830 * @alloc: binder_alloc for this proc
832 void binder_alloc_print_pages(struct seq_file
*m
,
833 struct binder_alloc
*alloc
)
835 struct binder_lru_page
*page
;
841 mutex_lock(&alloc
->mutex
);
842 for (i
= 0; i
< alloc
->buffer_size
/ PAGE_SIZE
; i
++) {
843 page
= &alloc
->pages
[i
];
846 else if (list_empty(&page
->lru
))
851 mutex_unlock(&alloc
->mutex
);
852 seq_printf(m
, " pages: %d:%d:%d\n", active
, lru
, free
);
853 seq_printf(m
, " pages high watermark: %zu\n", alloc
->pages_high
);
857 * binder_alloc_get_allocated_count() - return count of buffers
858 * @alloc: binder_alloc for this proc
860 * Return: count of allocated buffers
862 int binder_alloc_get_allocated_count(struct binder_alloc
*alloc
)
867 mutex_lock(&alloc
->mutex
);
868 for (n
= rb_first(&alloc
->allocated_buffers
); n
!= NULL
; n
= rb_next(n
))
870 mutex_unlock(&alloc
->mutex
);
876 * binder_alloc_vma_close() - invalidate address space
877 * @alloc: binder_alloc for this proc
879 * Called from binder_vma_close() when releasing address space.
880 * Clears alloc->vma to prevent new incoming transactions from
881 * allocating more buffers.
883 void binder_alloc_vma_close(struct binder_alloc
*alloc
)
885 binder_alloc_set_vma(alloc
, NULL
);
889 * binder_alloc_free_page() - shrinker callback to free pages
890 * @item: item to free
891 * @lock: lock protecting the item
892 * @cb_arg: callback argument
894 * Called from list_lru_walk() in binder_shrink_scan() to free
895 * up pages when the system is under memory pressure.
897 enum lru_status
binder_alloc_free_page(struct list_head
*item
,
898 struct list_lru_one
*lru
,
903 struct mm_struct
*mm
= NULL
;
904 struct binder_lru_page
*page
= container_of(item
,
905 struct binder_lru_page
,
907 struct binder_alloc
*alloc
;
910 struct vm_area_struct
*vma
;
913 if (!mutex_trylock(&alloc
->mutex
))
914 goto err_get_alloc_mutex_failed
;
917 goto err_page_already_freed
;
919 index
= page
- alloc
->pages
;
920 page_addr
= (uintptr_t)alloc
->buffer
+ index
* PAGE_SIZE
;
922 mm
= alloc
->vma_vm_mm
;
923 if (!mmget_not_zero(mm
))
925 if (!down_read_trylock(&mm
->mmap_sem
))
926 goto err_down_read_mmap_sem_failed
;
927 vma
= binder_alloc_get_vma(alloc
);
929 list_lru_isolate(lru
, item
);
933 trace_binder_unmap_user_start(alloc
, index
);
935 zap_page_range(vma
, page_addr
, PAGE_SIZE
);
937 trace_binder_unmap_user_end(alloc
, index
);
939 up_read(&mm
->mmap_sem
);
942 trace_binder_unmap_kernel_start(alloc
, index
);
944 __free_page(page
->page_ptr
);
945 page
->page_ptr
= NULL
;
947 trace_binder_unmap_kernel_end(alloc
, index
);
950 mutex_unlock(&alloc
->mutex
);
951 return LRU_REMOVED_RETRY
;
953 err_down_read_mmap_sem_failed
:
956 err_page_already_freed
:
957 mutex_unlock(&alloc
->mutex
);
958 err_get_alloc_mutex_failed
:
963 binder_shrink_count(struct shrinker
*shrink
, struct shrink_control
*sc
)
965 unsigned long ret
= list_lru_count(&binder_alloc_lru
);
970 binder_shrink_scan(struct shrinker
*shrink
, struct shrink_control
*sc
)
974 ret
= list_lru_walk(&binder_alloc_lru
, binder_alloc_free_page
,
975 NULL
, sc
->nr_to_scan
);
979 static struct shrinker binder_shrinker
= {
980 .count_objects
= binder_shrink_count
,
981 .scan_objects
= binder_shrink_scan
,
982 .seeks
= DEFAULT_SEEKS
,
986 * binder_alloc_init() - called by binder_open() for per-proc initialization
987 * @alloc: binder_alloc for this proc
989 * Called from binder_open() to initialize binder_alloc fields for
992 void binder_alloc_init(struct binder_alloc
*alloc
)
994 alloc
->pid
= current
->group_leader
->pid
;
995 mutex_init(&alloc
->mutex
);
996 INIT_LIST_HEAD(&alloc
->buffers
);
999 int binder_alloc_shrinker_init(void)
1001 int ret
= list_lru_init(&binder_alloc_lru
);
1004 ret
= register_shrinker(&binder_shrinker
);
1006 list_lru_destroy(&binder_alloc_lru
);
1012 * check_buffer() - verify that buffer/offset is safe to access
1013 * @alloc: binder_alloc for this proc
1014 * @buffer: binder buffer to be accessed
1015 * @offset: offset into @buffer data
1016 * @bytes: bytes to access from offset
1018 * Check that the @offset/@bytes are within the size of the given
1019 * @buffer and that the buffer is currently active and not freeable.
1020 * Offsets must also be multiples of sizeof(u32). The kernel is
1021 * allowed to touch the buffer in two cases:
1023 * 1) when the buffer is being created:
1024 * (buffer->free == 0 && buffer->allow_user_free == 0)
1025 * 2) when the buffer is being torn down:
1026 * (buffer->free == 0 && buffer->transaction == NULL).
1028 * Return: true if the buffer is safe to access
1030 static inline bool check_buffer(struct binder_alloc
*alloc
,
1031 struct binder_buffer
*buffer
,
1032 binder_size_t offset
, size_t bytes
)
1034 size_t buffer_size
= binder_alloc_buffer_size(alloc
, buffer
);
1036 return buffer_size
>= bytes
&&
1037 offset
<= buffer_size
- bytes
&&
1038 IS_ALIGNED(offset
, sizeof(u32
)) &&
1040 (!buffer
->allow_user_free
|| !buffer
->transaction
);
1044 * binder_alloc_get_page() - get kernel pointer for given buffer offset
1045 * @alloc: binder_alloc for this proc
1046 * @buffer: binder buffer to be accessed
1047 * @buffer_offset: offset into @buffer data
1048 * @pgoffp: address to copy final page offset to
1050 * Lookup the struct page corresponding to the address
1051 * at @buffer_offset into @buffer->user_data. If @pgoffp is not
1052 * NULL, the byte-offset into the page is written there.
1054 * The caller is responsible to ensure that the offset points
1055 * to a valid address within the @buffer and that @buffer is
1056 * not freeable by the user. Since it can't be freed, we are
1057 * guaranteed that the corresponding elements of @alloc->pages[]
1060 * Return: struct page
1062 static struct page
*binder_alloc_get_page(struct binder_alloc
*alloc
,
1063 struct binder_buffer
*buffer
,
1064 binder_size_t buffer_offset
,
1067 binder_size_t buffer_space_offset
= buffer_offset
+
1068 (buffer
->user_data
- alloc
->buffer
);
1069 pgoff_t pgoff
= buffer_space_offset
& ~PAGE_MASK
;
1070 size_t index
= buffer_space_offset
>> PAGE_SHIFT
;
1071 struct binder_lru_page
*lru_page
;
1073 lru_page
= &alloc
->pages
[index
];
1075 return lru_page
->page_ptr
;
1079 * binder_alloc_copy_user_to_buffer() - copy src user to tgt user
1080 * @alloc: binder_alloc for this proc
1081 * @buffer: binder buffer to be accessed
1082 * @buffer_offset: offset into @buffer data
1083 * @from: userspace pointer to source buffer
1084 * @bytes: bytes to copy
1086 * Copy bytes from source userspace to target buffer.
1088 * Return: bytes remaining to be copied
1091 binder_alloc_copy_user_to_buffer(struct binder_alloc
*alloc
,
1092 struct binder_buffer
*buffer
,
1093 binder_size_t buffer_offset
,
1094 const void __user
*from
,
1097 if (!check_buffer(alloc
, buffer
, buffer_offset
, bytes
))
1107 page
= binder_alloc_get_page(alloc
, buffer
,
1108 buffer_offset
, &pgoff
);
1109 size
= min_t(size_t, bytes
, PAGE_SIZE
- pgoff
);
1110 kptr
= kmap(page
) + pgoff
;
1111 ret
= copy_from_user(kptr
, from
, size
);
1114 return bytes
- size
+ ret
;
1117 buffer_offset
+= size
;
1122 static void binder_alloc_do_buffer_copy(struct binder_alloc
*alloc
,
1124 struct binder_buffer
*buffer
,
1125 binder_size_t buffer_offset
,
1129 /* All copies must be 32-bit aligned and 32-bit size */
1130 BUG_ON(!check_buffer(alloc
, buffer
, buffer_offset
, bytes
));
1139 page
= binder_alloc_get_page(alloc
, buffer
,
1140 buffer_offset
, &pgoff
);
1141 size
= min_t(size_t, bytes
, PAGE_SIZE
- pgoff
);
1142 base_ptr
= kmap_atomic(page
);
1143 tmpptr
= base_ptr
+ pgoff
;
1145 memcpy(tmpptr
, ptr
, size
);
1147 memcpy(ptr
, tmpptr
, size
);
1149 * kunmap_atomic() takes care of flushing the cache
1150 * if this device has VIVT cache arch
1152 kunmap_atomic(base_ptr
);
1156 buffer_offset
+= size
;
1160 void binder_alloc_copy_to_buffer(struct binder_alloc
*alloc
,
1161 struct binder_buffer
*buffer
,
1162 binder_size_t buffer_offset
,
1166 binder_alloc_do_buffer_copy(alloc
, true, buffer
, buffer_offset
,
1170 void binder_alloc_copy_from_buffer(struct binder_alloc
*alloc
,
1172 struct binder_buffer
*buffer
,
1173 binder_size_t buffer_offset
,
1176 binder_alloc_do_buffer_copy(alloc
, false, buffer
, buffer_offset
,