3 * Android IPC Subsystem
5 * Copyright (C) 2007-2017 Google, Inc.
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 #include <asm/cacheflush.h>
21 #include <linux/list.h>
22 #include <linux/sched/mm.h>
23 #include <linux/module.h>
24 #include <linux/rtmutex.h>
25 #include <linux/rbtree.h>
26 #include <linux/seq_file.h>
27 #include <linux/vmalloc.h>
28 #include <linux/slab.h>
29 #include <linux/sched.h>
30 #include <linux/list_lru.h>
31 #include "binder_alloc.h"
32 #include "binder_trace.h"
34 struct list_lru binder_alloc_lru
;
36 static DEFINE_MUTEX(binder_alloc_mmap_lock
);
39 BINDER_DEBUG_OPEN_CLOSE
= 1U << 1,
40 BINDER_DEBUG_BUFFER_ALLOC
= 1U << 2,
41 BINDER_DEBUG_BUFFER_ALLOC_ASYNC
= 1U << 3,
43 static uint32_t binder_alloc_debug_mask
;
45 module_param_named(debug_mask
, binder_alloc_debug_mask
,
48 #define binder_alloc_debug(mask, x...) \
50 if (binder_alloc_debug_mask & mask) \
54 static struct binder_buffer
*binder_buffer_next(struct binder_buffer
*buffer
)
56 return list_entry(buffer
->entry
.next
, struct binder_buffer
, entry
);
59 static struct binder_buffer
*binder_buffer_prev(struct binder_buffer
*buffer
)
61 return list_entry(buffer
->entry
.prev
, struct binder_buffer
, entry
);
64 static size_t binder_alloc_buffer_size(struct binder_alloc
*alloc
,
65 struct binder_buffer
*buffer
)
67 if (list_is_last(&buffer
->entry
, &alloc
->buffers
))
68 return (u8
*)alloc
->buffer
+
69 alloc
->buffer_size
- (u8
*)buffer
->data
;
70 return (u8
*)binder_buffer_next(buffer
)->data
- (u8
*)buffer
->data
;
73 static void binder_insert_free_buffer(struct binder_alloc
*alloc
,
74 struct binder_buffer
*new_buffer
)
76 struct rb_node
**p
= &alloc
->free_buffers
.rb_node
;
77 struct rb_node
*parent
= NULL
;
78 struct binder_buffer
*buffer
;
80 size_t new_buffer_size
;
82 BUG_ON(!new_buffer
->free
);
84 new_buffer_size
= binder_alloc_buffer_size(alloc
, new_buffer
);
86 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC
,
87 "%d: add free buffer, size %zd, at %pK\n",
88 alloc
->pid
, new_buffer_size
, new_buffer
);
92 buffer
= rb_entry(parent
, struct binder_buffer
, rb_node
);
93 BUG_ON(!buffer
->free
);
95 buffer_size
= binder_alloc_buffer_size(alloc
, buffer
);
97 if (new_buffer_size
< buffer_size
)
100 p
= &parent
->rb_right
;
102 rb_link_node(&new_buffer
->rb_node
, parent
, p
);
103 rb_insert_color(&new_buffer
->rb_node
, &alloc
->free_buffers
);
106 static void binder_insert_allocated_buffer_locked(
107 struct binder_alloc
*alloc
, struct binder_buffer
*new_buffer
)
109 struct rb_node
**p
= &alloc
->allocated_buffers
.rb_node
;
110 struct rb_node
*parent
= NULL
;
111 struct binder_buffer
*buffer
;
113 BUG_ON(new_buffer
->free
);
117 buffer
= rb_entry(parent
, struct binder_buffer
, rb_node
);
118 BUG_ON(buffer
->free
);
120 if (new_buffer
->data
< buffer
->data
)
121 p
= &parent
->rb_left
;
122 else if (new_buffer
->data
> buffer
->data
)
123 p
= &parent
->rb_right
;
127 rb_link_node(&new_buffer
->rb_node
, parent
, p
);
128 rb_insert_color(&new_buffer
->rb_node
, &alloc
->allocated_buffers
);
131 static struct binder_buffer
*binder_alloc_prepare_to_free_locked(
132 struct binder_alloc
*alloc
,
135 struct rb_node
*n
= alloc
->allocated_buffers
.rb_node
;
136 struct binder_buffer
*buffer
;
139 kern_ptr
= (void *)(user_ptr
- alloc
->user_buffer_offset
);
142 buffer
= rb_entry(n
, struct binder_buffer
, rb_node
);
143 BUG_ON(buffer
->free
);
145 if (kern_ptr
< buffer
->data
)
147 else if (kern_ptr
> buffer
->data
)
151 * Guard against user threads attempting to
152 * free the buffer twice
154 if (buffer
->free_in_progress
) {
155 pr_err("%d:%d FREE_BUFFER u%016llx user freed buffer twice\n",
156 alloc
->pid
, current
->pid
, (u64
)user_ptr
);
159 buffer
->free_in_progress
= 1;
167 * binder_alloc_buffer_lookup() - get buffer given user ptr
168 * @alloc: binder_alloc for this proc
169 * @user_ptr: User pointer to buffer data
171 * Validate userspace pointer to buffer data and return buffer corresponding to
172 * that user pointer. Search the rb tree for buffer that matches user data
175 * Return: Pointer to buffer or NULL
177 struct binder_buffer
*binder_alloc_prepare_to_free(struct binder_alloc
*alloc
,
180 struct binder_buffer
*buffer
;
182 mutex_lock(&alloc
->mutex
);
183 buffer
= binder_alloc_prepare_to_free_locked(alloc
, user_ptr
);
184 mutex_unlock(&alloc
->mutex
);
188 static int binder_update_page_range(struct binder_alloc
*alloc
, int allocate
,
189 void *start
, void *end
,
190 struct vm_area_struct
*vma
)
193 unsigned long user_page_addr
;
194 struct binder_lru_page
*page
;
195 struct mm_struct
*mm
= NULL
;
196 bool need_mm
= false;
198 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC
,
199 "%d: %s pages %pK-%pK\n", alloc
->pid
,
200 allocate
? "allocate" : "free", start
, end
);
205 trace_binder_update_page_range(alloc
, allocate
, start
, end
);
210 for (page_addr
= start
; page_addr
< end
; page_addr
+= PAGE_SIZE
) {
211 page
= &alloc
->pages
[(page_addr
- alloc
->buffer
) / PAGE_SIZE
];
212 if (!page
->page_ptr
) {
218 if (!vma
&& need_mm
&& mmget_not_zero(alloc
->vma_vm_mm
))
219 mm
= alloc
->vma_vm_mm
;
222 down_write(&mm
->mmap_sem
);
226 if (!vma
&& need_mm
) {
227 pr_err("%d: binder_alloc_buf failed to map pages in userspace, no vma\n",
232 for (page_addr
= start
; page_addr
< end
; page_addr
+= PAGE_SIZE
) {
237 index
= (page_addr
- alloc
->buffer
) / PAGE_SIZE
;
238 page
= &alloc
->pages
[index
];
240 if (page
->page_ptr
) {
241 trace_binder_alloc_lru_start(alloc
, index
);
243 on_lru
= list_lru_del(&binder_alloc_lru
, &page
->lru
);
246 trace_binder_alloc_lru_end(alloc
, index
);
251 goto err_page_ptr_cleared
;
253 trace_binder_alloc_page_start(alloc
, index
);
254 page
->page_ptr
= alloc_page(GFP_KERNEL
|
257 if (!page
->page_ptr
) {
258 pr_err("%d: binder_alloc_buf failed for page at %pK\n",
259 alloc
->pid
, page_addr
);
260 goto err_alloc_page_failed
;
263 INIT_LIST_HEAD(&page
->lru
);
265 ret
= map_kernel_range_noflush((unsigned long)page_addr
,
266 PAGE_SIZE
, PAGE_KERNEL
,
268 flush_cache_vmap((unsigned long)page_addr
,
269 (unsigned long)page_addr
+ PAGE_SIZE
);
271 pr_err("%d: binder_alloc_buf failed to map page at %pK in kernel\n",
272 alloc
->pid
, page_addr
);
273 goto err_map_kernel_failed
;
276 (uintptr_t)page_addr
+ alloc
->user_buffer_offset
;
277 ret
= vm_insert_page(vma
, user_page_addr
, page
[0].page_ptr
);
279 pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n",
280 alloc
->pid
, user_page_addr
);
281 goto err_vm_insert_page_failed
;
284 trace_binder_alloc_page_end(alloc
, index
);
285 /* vm_insert_page does not seem to increment the refcount */
288 up_write(&mm
->mmap_sem
);
294 for (page_addr
= end
- PAGE_SIZE
; page_addr
>= start
;
295 page_addr
-= PAGE_SIZE
) {
299 index
= (page_addr
- alloc
->buffer
) / PAGE_SIZE
;
300 page
= &alloc
->pages
[index
];
302 trace_binder_free_lru_start(alloc
, index
);
304 ret
= list_lru_add(&binder_alloc_lru
, &page
->lru
);
307 trace_binder_free_lru_end(alloc
, index
);
310 err_vm_insert_page_failed
:
311 unmap_kernel_range((unsigned long)page_addr
, PAGE_SIZE
);
312 err_map_kernel_failed
:
313 __free_page(page
->page_ptr
);
314 page
->page_ptr
= NULL
;
315 err_alloc_page_failed
:
316 err_page_ptr_cleared
:
321 up_write(&mm
->mmap_sem
);
324 return vma
? -ENOMEM
: -ESRCH
;
327 struct binder_buffer
*binder_alloc_new_buf_locked(struct binder_alloc
*alloc
,
330 size_t extra_buffers_size
,
333 struct rb_node
*n
= alloc
->free_buffers
.rb_node
;
334 struct binder_buffer
*buffer
;
336 struct rb_node
*best_fit
= NULL
;
339 size_t size
, data_offsets_size
;
342 if (alloc
->vma
== NULL
) {
343 pr_err("%d: binder_alloc_buf, no vma\n",
345 return ERR_PTR(-ESRCH
);
348 data_offsets_size
= ALIGN(data_size
, sizeof(void *)) +
349 ALIGN(offsets_size
, sizeof(void *));
351 if (data_offsets_size
< data_size
|| data_offsets_size
< offsets_size
) {
352 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC
,
353 "%d: got transaction with invalid size %zd-%zd\n",
354 alloc
->pid
, data_size
, offsets_size
);
355 return ERR_PTR(-EINVAL
);
357 size
= data_offsets_size
+ ALIGN(extra_buffers_size
, sizeof(void *));
358 if (size
< data_offsets_size
|| size
< extra_buffers_size
) {
359 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC
,
360 "%d: got transaction with invalid extra_buffers_size %zd\n",
361 alloc
->pid
, extra_buffers_size
);
362 return ERR_PTR(-EINVAL
);
365 alloc
->free_async_space
< size
+ sizeof(struct binder_buffer
)) {
366 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC
,
367 "%d: binder_alloc_buf size %zd failed, no async space left\n",
369 return ERR_PTR(-ENOSPC
);
372 /* Pad 0-size buffers so they get assigned unique addresses */
373 size
= max(size
, sizeof(void *));
376 buffer
= rb_entry(n
, struct binder_buffer
, rb_node
);
377 BUG_ON(!buffer
->free
);
378 buffer_size
= binder_alloc_buffer_size(alloc
, buffer
);
380 if (size
< buffer_size
) {
383 } else if (size
> buffer_size
)
390 if (best_fit
== NULL
) {
391 size_t allocated_buffers
= 0;
392 size_t largest_alloc_size
= 0;
393 size_t total_alloc_size
= 0;
394 size_t free_buffers
= 0;
395 size_t largest_free_size
= 0;
396 size_t total_free_size
= 0;
398 for (n
= rb_first(&alloc
->allocated_buffers
); n
!= NULL
;
400 buffer
= rb_entry(n
, struct binder_buffer
, rb_node
);
401 buffer_size
= binder_alloc_buffer_size(alloc
, buffer
);
403 total_alloc_size
+= buffer_size
;
404 if (buffer_size
> largest_alloc_size
)
405 largest_alloc_size
= buffer_size
;
407 for (n
= rb_first(&alloc
->free_buffers
); n
!= NULL
;
409 buffer
= rb_entry(n
, struct binder_buffer
, rb_node
);
410 buffer_size
= binder_alloc_buffer_size(alloc
, buffer
);
412 total_free_size
+= buffer_size
;
413 if (buffer_size
> largest_free_size
)
414 largest_free_size
= buffer_size
;
416 pr_err("%d: binder_alloc_buf size %zd failed, no address space\n",
418 pr_err("allocated: %zd (num: %zd largest: %zd), free: %zd (num: %zd largest: %zd)\n",
419 total_alloc_size
, allocated_buffers
, largest_alloc_size
,
420 total_free_size
, free_buffers
, largest_free_size
);
421 return ERR_PTR(-ENOSPC
);
424 buffer
= rb_entry(best_fit
, struct binder_buffer
, rb_node
);
425 buffer_size
= binder_alloc_buffer_size(alloc
, buffer
);
428 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC
,
429 "%d: binder_alloc_buf size %zd got buffer %pK size %zd\n",
430 alloc
->pid
, size
, buffer
, buffer_size
);
433 (void *)(((uintptr_t)buffer
->data
+ buffer_size
) & PAGE_MASK
);
434 WARN_ON(n
&& buffer_size
!= size
);
436 (void *)PAGE_ALIGN((uintptr_t)buffer
->data
+ size
);
437 if (end_page_addr
> has_page_addr
)
438 end_page_addr
= has_page_addr
;
439 ret
= binder_update_page_range(alloc
, 1,
440 (void *)PAGE_ALIGN((uintptr_t)buffer
->data
), end_page_addr
, NULL
);
444 if (buffer_size
!= size
) {
445 struct binder_buffer
*new_buffer
;
447 new_buffer
= kzalloc(sizeof(*buffer
), GFP_KERNEL
);
449 pr_err("%s: %d failed to alloc new buffer struct\n",
450 __func__
, alloc
->pid
);
451 goto err_alloc_buf_struct_failed
;
453 new_buffer
->data
= (u8
*)buffer
->data
+ size
;
454 list_add(&new_buffer
->entry
, &buffer
->entry
);
455 new_buffer
->free
= 1;
456 binder_insert_free_buffer(alloc
, new_buffer
);
459 rb_erase(best_fit
, &alloc
->free_buffers
);
461 buffer
->free_in_progress
= 0;
462 binder_insert_allocated_buffer_locked(alloc
, buffer
);
463 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC
,
464 "%d: binder_alloc_buf size %zd got %pK\n",
465 alloc
->pid
, size
, buffer
);
466 buffer
->data_size
= data_size
;
467 buffer
->offsets_size
= offsets_size
;
468 buffer
->async_transaction
= is_async
;
469 buffer
->extra_buffers_size
= extra_buffers_size
;
471 alloc
->free_async_space
-= size
+ sizeof(struct binder_buffer
);
472 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC
,
473 "%d: binder_alloc_buf size %zd async free %zd\n",
474 alloc
->pid
, size
, alloc
->free_async_space
);
478 err_alloc_buf_struct_failed
:
479 binder_update_page_range(alloc
, 0,
480 (void *)PAGE_ALIGN((uintptr_t)buffer
->data
),
481 end_page_addr
, NULL
);
482 return ERR_PTR(-ENOMEM
);
486 * binder_alloc_new_buf() - Allocate a new binder buffer
487 * @alloc: binder_alloc for this proc
488 * @data_size: size of user data buffer
489 * @offsets_size: user specified buffer offset
490 * @extra_buffers_size: size of extra space for meta-data (eg, security context)
491 * @is_async: buffer for async transaction
493 * Allocate a new buffer given the requested sizes. Returns
494 * the kernel version of the buffer pointer. The size allocated
495 * is the sum of the three given sizes (each rounded up to
496 * pointer-sized boundary)
498 * Return: The allocated buffer or %NULL if error
500 struct binder_buffer
*binder_alloc_new_buf(struct binder_alloc
*alloc
,
503 size_t extra_buffers_size
,
506 struct binder_buffer
*buffer
;
508 mutex_lock(&alloc
->mutex
);
509 buffer
= binder_alloc_new_buf_locked(alloc
, data_size
, offsets_size
,
510 extra_buffers_size
, is_async
);
511 mutex_unlock(&alloc
->mutex
);
515 static void *buffer_start_page(struct binder_buffer
*buffer
)
517 return (void *)((uintptr_t)buffer
->data
& PAGE_MASK
);
520 static void *prev_buffer_end_page(struct binder_buffer
*buffer
)
522 return (void *)(((uintptr_t)(buffer
->data
) - 1) & PAGE_MASK
);
525 static void binder_delete_free_buffer(struct binder_alloc
*alloc
,
526 struct binder_buffer
*buffer
)
528 struct binder_buffer
*prev
, *next
= NULL
;
530 BUG_ON(alloc
->buffers
.next
== &buffer
->entry
);
531 prev
= binder_buffer_prev(buffer
);
533 if (prev_buffer_end_page(prev
) == buffer_start_page(buffer
)) {
535 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC
,
536 "%d: merge free, buffer %pK share page with %pK\n",
537 alloc
->pid
, buffer
->data
, prev
->data
);
540 if (!list_is_last(&buffer
->entry
, &alloc
->buffers
)) {
541 next
= binder_buffer_next(buffer
);
542 if (buffer_start_page(next
) == buffer_start_page(buffer
)) {
544 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC
,
545 "%d: merge free, buffer %pK share page with %pK\n",
552 if (PAGE_ALIGNED(buffer
->data
)) {
553 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC
,
554 "%d: merge free, buffer start %pK is page aligned\n",
555 alloc
->pid
, buffer
->data
);
560 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC
,
561 "%d: merge free, buffer %pK do not share page with %pK or %pK\n",
562 alloc
->pid
, buffer
->data
,
563 prev
->data
, next
? next
->data
: NULL
);
564 binder_update_page_range(alloc
, 0, buffer_start_page(buffer
),
565 buffer_start_page(buffer
) + PAGE_SIZE
,
568 list_del(&buffer
->entry
);
572 static void binder_free_buf_locked(struct binder_alloc
*alloc
,
573 struct binder_buffer
*buffer
)
575 size_t size
, buffer_size
;
577 buffer_size
= binder_alloc_buffer_size(alloc
, buffer
);
579 size
= ALIGN(buffer
->data_size
, sizeof(void *)) +
580 ALIGN(buffer
->offsets_size
, sizeof(void *)) +
581 ALIGN(buffer
->extra_buffers_size
, sizeof(void *));
583 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC
,
584 "%d: binder_free_buf %pK size %zd buffer_size %zd\n",
585 alloc
->pid
, buffer
, size
, buffer_size
);
587 BUG_ON(buffer
->free
);
588 BUG_ON(size
> buffer_size
);
589 BUG_ON(buffer
->transaction
!= NULL
);
590 BUG_ON(buffer
->data
< alloc
->buffer
);
591 BUG_ON(buffer
->data
> alloc
->buffer
+ alloc
->buffer_size
);
593 if (buffer
->async_transaction
) {
594 alloc
->free_async_space
+= size
+ sizeof(struct binder_buffer
);
596 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC
,
597 "%d: binder_free_buf size %zd async free %zd\n",
598 alloc
->pid
, size
, alloc
->free_async_space
);
601 binder_update_page_range(alloc
, 0,
602 (void *)PAGE_ALIGN((uintptr_t)buffer
->data
),
603 (void *)(((uintptr_t)buffer
->data
+ buffer_size
) & PAGE_MASK
),
606 rb_erase(&buffer
->rb_node
, &alloc
->allocated_buffers
);
608 if (!list_is_last(&buffer
->entry
, &alloc
->buffers
)) {
609 struct binder_buffer
*next
= binder_buffer_next(buffer
);
612 rb_erase(&next
->rb_node
, &alloc
->free_buffers
);
613 binder_delete_free_buffer(alloc
, next
);
616 if (alloc
->buffers
.next
!= &buffer
->entry
) {
617 struct binder_buffer
*prev
= binder_buffer_prev(buffer
);
620 binder_delete_free_buffer(alloc
, buffer
);
621 rb_erase(&prev
->rb_node
, &alloc
->free_buffers
);
625 binder_insert_free_buffer(alloc
, buffer
);
629 * binder_alloc_free_buf() - free a binder buffer
630 * @alloc: binder_alloc for this proc
631 * @buffer: kernel pointer to buffer
633 * Free the buffer allocated via binder_alloc_new_buffer()
635 void binder_alloc_free_buf(struct binder_alloc
*alloc
,
636 struct binder_buffer
*buffer
)
638 mutex_lock(&alloc
->mutex
);
639 binder_free_buf_locked(alloc
, buffer
);
640 mutex_unlock(&alloc
->mutex
);
644 * binder_alloc_mmap_handler() - map virtual address space for proc
645 * @alloc: alloc structure for this proc
646 * @vma: vma passed to mmap()
648 * Called by binder_mmap() to initialize the space specified in
649 * vma for allocating binder buffers
653 * -EBUSY = address space already mapped
654 * -ENOMEM = failed to map memory to given address space
656 int binder_alloc_mmap_handler(struct binder_alloc
*alloc
,
657 struct vm_area_struct
*vma
)
660 struct vm_struct
*area
;
661 const char *failure_string
;
662 struct binder_buffer
*buffer
;
664 mutex_lock(&binder_alloc_mmap_lock
);
667 failure_string
= "already mapped";
668 goto err_already_mapped
;
671 area
= get_vm_area(vma
->vm_end
- vma
->vm_start
, VM_IOREMAP
);
674 failure_string
= "get_vm_area";
675 goto err_get_vm_area_failed
;
677 alloc
->buffer
= area
->addr
;
678 alloc
->user_buffer_offset
=
679 vma
->vm_start
- (uintptr_t)alloc
->buffer
;
680 mutex_unlock(&binder_alloc_mmap_lock
);
682 #ifdef CONFIG_CPU_CACHE_VIPT
683 if (cache_is_vipt_aliasing()) {
685 (vma
->vm_start
^ (uint32_t)alloc
->buffer
))) {
686 pr_info("%s: %d %lx-%lx maps %pK bad alignment\n",
687 __func__
, alloc
->pid
, vma
->vm_start
,
688 vma
->vm_end
, alloc
->buffer
);
689 vma
->vm_start
+= PAGE_SIZE
;
693 alloc
->pages
= kzalloc(sizeof(alloc
->pages
[0]) *
694 ((vma
->vm_end
- vma
->vm_start
) / PAGE_SIZE
),
696 if (alloc
->pages
== NULL
) {
698 failure_string
= "alloc page array";
699 goto err_alloc_pages_failed
;
701 alloc
->buffer_size
= vma
->vm_end
- vma
->vm_start
;
703 buffer
= kzalloc(sizeof(*buffer
), GFP_KERNEL
);
706 failure_string
= "alloc buffer struct";
707 goto err_alloc_buf_struct_failed
;
710 buffer
->data
= alloc
->buffer
;
711 list_add(&buffer
->entry
, &alloc
->buffers
);
713 binder_insert_free_buffer(alloc
, buffer
);
714 alloc
->free_async_space
= alloc
->buffer_size
/ 2;
717 alloc
->vma_vm_mm
= vma
->vm_mm
;
718 mmgrab(alloc
->vma_vm_mm
);
722 err_alloc_buf_struct_failed
:
725 err_alloc_pages_failed
:
726 mutex_lock(&binder_alloc_mmap_lock
);
727 vfree(alloc
->buffer
);
728 alloc
->buffer
= NULL
;
729 err_get_vm_area_failed
:
731 mutex_unlock(&binder_alloc_mmap_lock
);
732 pr_err("%s: %d %lx-%lx %s failed %d\n", __func__
,
733 alloc
->pid
, vma
->vm_start
, vma
->vm_end
, failure_string
, ret
);
738 void binder_alloc_deferred_release(struct binder_alloc
*alloc
)
741 int buffers
, page_count
;
742 struct binder_buffer
*buffer
;
747 mutex_lock(&alloc
->mutex
);
748 while ((n
= rb_first(&alloc
->allocated_buffers
))) {
749 buffer
= rb_entry(n
, struct binder_buffer
, rb_node
);
751 /* Transaction should already have been freed */
752 BUG_ON(buffer
->transaction
);
754 binder_free_buf_locked(alloc
, buffer
);
758 while (!list_empty(&alloc
->buffers
)) {
759 buffer
= list_first_entry(&alloc
->buffers
,
760 struct binder_buffer
, entry
);
761 WARN_ON(!buffer
->free
);
763 list_del(&buffer
->entry
);
764 WARN_ON_ONCE(!list_empty(&alloc
->buffers
));
772 for (i
= 0; i
< alloc
->buffer_size
/ PAGE_SIZE
; i
++) {
776 if (!alloc
->pages
[i
].page_ptr
)
779 on_lru
= list_lru_del(&binder_alloc_lru
,
780 &alloc
->pages
[i
].lru
);
781 page_addr
= alloc
->buffer
+ i
* PAGE_SIZE
;
782 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC
,
783 "%s: %d: page %d at %pK %s\n",
784 __func__
, alloc
->pid
, i
, page_addr
,
785 on_lru
? "on lru" : "active");
786 unmap_kernel_range((unsigned long)page_addr
, PAGE_SIZE
);
787 __free_page(alloc
->pages
[i
].page_ptr
);
791 vfree(alloc
->buffer
);
793 mutex_unlock(&alloc
->mutex
);
794 if (alloc
->vma_vm_mm
)
795 mmdrop(alloc
->vma_vm_mm
);
797 binder_alloc_debug(BINDER_DEBUG_OPEN_CLOSE
,
798 "%s: %d buffers %d, pages %d\n",
799 __func__
, alloc
->pid
, buffers
, page_count
);
802 static void print_binder_buffer(struct seq_file
*m
, const char *prefix
,
803 struct binder_buffer
*buffer
)
805 seq_printf(m
, "%s %d: %pK size %zd:%zd:%zd %s\n",
806 prefix
, buffer
->debug_id
, buffer
->data
,
807 buffer
->data_size
, buffer
->offsets_size
,
808 buffer
->extra_buffers_size
,
809 buffer
->transaction
? "active" : "delivered");
813 * binder_alloc_print_allocated() - print buffer info
814 * @m: seq_file for output via seq_printf()
815 * @alloc: binder_alloc for this proc
817 * Prints information about every buffer associated with
818 * the binder_alloc state to the given seq_file
820 void binder_alloc_print_allocated(struct seq_file
*m
,
821 struct binder_alloc
*alloc
)
825 mutex_lock(&alloc
->mutex
);
826 for (n
= rb_first(&alloc
->allocated_buffers
); n
!= NULL
; n
= rb_next(n
))
827 print_binder_buffer(m
, " buffer",
828 rb_entry(n
, struct binder_buffer
, rb_node
));
829 mutex_unlock(&alloc
->mutex
);
833 * binder_alloc_print_pages() - print page usage
834 * @m: seq_file for output via seq_printf()
835 * @alloc: binder_alloc for this proc
837 void binder_alloc_print_pages(struct seq_file
*m
,
838 struct binder_alloc
*alloc
)
840 struct binder_lru_page
*page
;
846 mutex_lock(&alloc
->mutex
);
847 for (i
= 0; i
< alloc
->buffer_size
/ PAGE_SIZE
; i
++) {
848 page
= &alloc
->pages
[i
];
851 else if (list_empty(&page
->lru
))
856 mutex_unlock(&alloc
->mutex
);
857 seq_printf(m
, " pages: %d:%d:%d\n", active
, lru
, free
);
861 * binder_alloc_get_allocated_count() - return count of buffers
862 * @alloc: binder_alloc for this proc
864 * Return: count of allocated buffers
866 int binder_alloc_get_allocated_count(struct binder_alloc
*alloc
)
871 mutex_lock(&alloc
->mutex
);
872 for (n
= rb_first(&alloc
->allocated_buffers
); n
!= NULL
; n
= rb_next(n
))
874 mutex_unlock(&alloc
->mutex
);
880 * binder_alloc_vma_close() - invalidate address space
881 * @alloc: binder_alloc for this proc
883 * Called from binder_vma_close() when releasing address space.
884 * Clears alloc->vma to prevent new incoming transactions from
885 * allocating more buffers.
887 void binder_alloc_vma_close(struct binder_alloc
*alloc
)
889 WRITE_ONCE(alloc
->vma
, NULL
);
893 * binder_alloc_free_page() - shrinker callback to free pages
894 * @item: item to free
895 * @lock: lock protecting the item
896 * @cb_arg: callback argument
898 * Called from list_lru_walk() in binder_shrink_scan() to free
899 * up pages when the system is under memory pressure.
901 enum lru_status
binder_alloc_free_page(struct list_head
*item
,
902 struct list_lru_one
*lru
,
906 struct mm_struct
*mm
= NULL
;
907 struct binder_lru_page
*page
= container_of(item
,
908 struct binder_lru_page
,
910 struct binder_alloc
*alloc
;
913 struct vm_area_struct
*vma
;
916 if (!mutex_trylock(&alloc
->mutex
))
917 goto err_get_alloc_mutex_failed
;
920 goto err_page_already_freed
;
922 index
= page
- alloc
->pages
;
923 page_addr
= (uintptr_t)alloc
->buffer
+ index
* PAGE_SIZE
;
926 if (!mmget_not_zero(alloc
->vma_vm_mm
))
928 mm
= alloc
->vma_vm_mm
;
929 if (!down_write_trylock(&mm
->mmap_sem
))
930 goto err_down_write_mmap_sem_failed
;
933 list_lru_isolate(lru
, item
);
937 trace_binder_unmap_user_start(alloc
, index
);
940 page_addr
+ alloc
->user_buffer_offset
,
943 trace_binder_unmap_user_end(alloc
, index
);
945 up_write(&mm
->mmap_sem
);
949 trace_binder_unmap_kernel_start(alloc
, index
);
951 unmap_kernel_range(page_addr
, PAGE_SIZE
);
952 __free_page(page
->page_ptr
);
953 page
->page_ptr
= NULL
;
955 trace_binder_unmap_kernel_end(alloc
, index
);
958 mutex_unlock(&alloc
->mutex
);
959 return LRU_REMOVED_RETRY
;
961 err_down_write_mmap_sem_failed
:
964 err_page_already_freed
:
965 mutex_unlock(&alloc
->mutex
);
966 err_get_alloc_mutex_failed
:
971 binder_shrink_count(struct shrinker
*shrink
, struct shrink_control
*sc
)
973 unsigned long ret
= list_lru_count(&binder_alloc_lru
);
978 binder_shrink_scan(struct shrinker
*shrink
, struct shrink_control
*sc
)
982 ret
= list_lru_walk(&binder_alloc_lru
, binder_alloc_free_page
,
983 NULL
, sc
->nr_to_scan
);
987 struct shrinker binder_shrinker
= {
988 .count_objects
= binder_shrink_count
,
989 .scan_objects
= binder_shrink_scan
,
990 .seeks
= DEFAULT_SEEKS
,
994 * binder_alloc_init() - called by binder_open() for per-proc initialization
995 * @alloc: binder_alloc for this proc
997 * Called from binder_open() to initialize binder_alloc fields for
1000 void binder_alloc_init(struct binder_alloc
*alloc
)
1002 alloc
->pid
= current
->group_leader
->pid
;
1003 mutex_init(&alloc
->mutex
);
1004 INIT_LIST_HEAD(&alloc
->buffers
);
1007 void binder_alloc_shrinker_init(void)
1009 list_lru_init(&binder_alloc_lru
);
1010 register_shrinker(&binder_shrinker
);