3 * Android IPC Subsystem
5 * Copyright (C) 2007-2017 Google, Inc.
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 #include <asm/cacheflush.h>
21 #include <linux/list.h>
22 #include <linux/sched/mm.h>
23 #include <linux/module.h>
24 #include <linux/rtmutex.h>
25 #include <linux/rbtree.h>
26 #include <linux/seq_file.h>
27 #include <linux/vmalloc.h>
28 #include <linux/slab.h>
29 #include <linux/sched.h>
30 #include <linux/list_lru.h>
31 #include "binder_alloc.h"
32 #include "binder_trace.h"
34 struct list_lru binder_alloc_lru
;
36 static DEFINE_MUTEX(binder_alloc_mmap_lock
);
39 BINDER_DEBUG_OPEN_CLOSE
= 1U << 1,
40 BINDER_DEBUG_BUFFER_ALLOC
= 1U << 2,
41 BINDER_DEBUG_BUFFER_ALLOC_ASYNC
= 1U << 3,
43 static uint32_t binder_alloc_debug_mask
;
45 module_param_named(debug_mask
, binder_alloc_debug_mask
,
48 #define binder_alloc_debug(mask, x...) \
50 if (binder_alloc_debug_mask & mask) \
54 static struct binder_buffer
*binder_buffer_next(struct binder_buffer
*buffer
)
56 return list_entry(buffer
->entry
.next
, struct binder_buffer
, entry
);
59 static struct binder_buffer
*binder_buffer_prev(struct binder_buffer
*buffer
)
61 return list_entry(buffer
->entry
.prev
, struct binder_buffer
, entry
);
64 static size_t binder_alloc_buffer_size(struct binder_alloc
*alloc
,
65 struct binder_buffer
*buffer
)
67 if (list_is_last(&buffer
->entry
, &alloc
->buffers
))
68 return (u8
*)alloc
->buffer
+
69 alloc
->buffer_size
- (u8
*)buffer
->data
;
70 return (u8
*)binder_buffer_next(buffer
)->data
- (u8
*)buffer
->data
;
73 static void binder_insert_free_buffer(struct binder_alloc
*alloc
,
74 struct binder_buffer
*new_buffer
)
76 struct rb_node
**p
= &alloc
->free_buffers
.rb_node
;
77 struct rb_node
*parent
= NULL
;
78 struct binder_buffer
*buffer
;
80 size_t new_buffer_size
;
82 BUG_ON(!new_buffer
->free
);
84 new_buffer_size
= binder_alloc_buffer_size(alloc
, new_buffer
);
86 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC
,
87 "%d: add free buffer, size %zd, at %pK\n",
88 alloc
->pid
, new_buffer_size
, new_buffer
);
92 buffer
= rb_entry(parent
, struct binder_buffer
, rb_node
);
93 BUG_ON(!buffer
->free
);
95 buffer_size
= binder_alloc_buffer_size(alloc
, buffer
);
97 if (new_buffer_size
< buffer_size
)
100 p
= &parent
->rb_right
;
102 rb_link_node(&new_buffer
->rb_node
, parent
, p
);
103 rb_insert_color(&new_buffer
->rb_node
, &alloc
->free_buffers
);
106 static void binder_insert_allocated_buffer_locked(
107 struct binder_alloc
*alloc
, struct binder_buffer
*new_buffer
)
109 struct rb_node
**p
= &alloc
->allocated_buffers
.rb_node
;
110 struct rb_node
*parent
= NULL
;
111 struct binder_buffer
*buffer
;
113 BUG_ON(new_buffer
->free
);
117 buffer
= rb_entry(parent
, struct binder_buffer
, rb_node
);
118 BUG_ON(buffer
->free
);
120 if (new_buffer
->data
< buffer
->data
)
121 p
= &parent
->rb_left
;
122 else if (new_buffer
->data
> buffer
->data
)
123 p
= &parent
->rb_right
;
127 rb_link_node(&new_buffer
->rb_node
, parent
, p
);
128 rb_insert_color(&new_buffer
->rb_node
, &alloc
->allocated_buffers
);
131 static struct binder_buffer
*binder_alloc_prepare_to_free_locked(
132 struct binder_alloc
*alloc
,
135 struct rb_node
*n
= alloc
->allocated_buffers
.rb_node
;
136 struct binder_buffer
*buffer
;
139 kern_ptr
= (void *)(user_ptr
- alloc
->user_buffer_offset
);
142 buffer
= rb_entry(n
, struct binder_buffer
, rb_node
);
143 BUG_ON(buffer
->free
);
145 if (kern_ptr
< buffer
->data
)
147 else if (kern_ptr
> buffer
->data
)
151 * Guard against user threads attempting to
152 * free the buffer twice
154 if (buffer
->free_in_progress
) {
155 pr_err("%d:%d FREE_BUFFER u%016llx user freed buffer twice\n",
156 alloc
->pid
, current
->pid
, (u64
)user_ptr
);
159 buffer
->free_in_progress
= 1;
167 * binder_alloc_buffer_lookup() - get buffer given user ptr
168 * @alloc: binder_alloc for this proc
169 * @user_ptr: User pointer to buffer data
171 * Validate userspace pointer to buffer data and return buffer corresponding to
172 * that user pointer. Search the rb tree for buffer that matches user data
175 * Return: Pointer to buffer or NULL
177 struct binder_buffer
*binder_alloc_prepare_to_free(struct binder_alloc
*alloc
,
180 struct binder_buffer
*buffer
;
182 mutex_lock(&alloc
->mutex
);
183 buffer
= binder_alloc_prepare_to_free_locked(alloc
, user_ptr
);
184 mutex_unlock(&alloc
->mutex
);
188 static int binder_update_page_range(struct binder_alloc
*alloc
, int allocate
,
189 void *start
, void *end
)
192 unsigned long user_page_addr
;
193 struct binder_lru_page
*page
;
194 struct vm_area_struct
*vma
= NULL
;
195 struct mm_struct
*mm
= NULL
;
196 bool need_mm
= false;
198 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC
,
199 "%d: %s pages %pK-%pK\n", alloc
->pid
,
200 allocate
? "allocate" : "free", start
, end
);
205 trace_binder_update_page_range(alloc
, allocate
, start
, end
);
210 for (page_addr
= start
; page_addr
< end
; page_addr
+= PAGE_SIZE
) {
211 page
= &alloc
->pages
[(page_addr
- alloc
->buffer
) / PAGE_SIZE
];
212 if (!page
->page_ptr
) {
218 if (need_mm
&& mmget_not_zero(alloc
->vma_vm_mm
))
219 mm
= alloc
->vma_vm_mm
;
222 down_write(&mm
->mmap_sem
);
226 if (!vma
&& need_mm
) {
227 pr_err("%d: binder_alloc_buf failed to map pages in userspace, no vma\n",
232 for (page_addr
= start
; page_addr
< end
; page_addr
+= PAGE_SIZE
) {
237 index
= (page_addr
- alloc
->buffer
) / PAGE_SIZE
;
238 page
= &alloc
->pages
[index
];
240 if (page
->page_ptr
) {
241 trace_binder_alloc_lru_start(alloc
, index
);
243 on_lru
= list_lru_del(&binder_alloc_lru
, &page
->lru
);
246 trace_binder_alloc_lru_end(alloc
, index
);
251 goto err_page_ptr_cleared
;
253 trace_binder_alloc_page_start(alloc
, index
);
254 page
->page_ptr
= alloc_page(GFP_KERNEL
|
257 if (!page
->page_ptr
) {
258 pr_err("%d: binder_alloc_buf failed for page at %pK\n",
259 alloc
->pid
, page_addr
);
260 goto err_alloc_page_failed
;
263 INIT_LIST_HEAD(&page
->lru
);
265 ret
= map_kernel_range_noflush((unsigned long)page_addr
,
266 PAGE_SIZE
, PAGE_KERNEL
,
268 flush_cache_vmap((unsigned long)page_addr
,
269 (unsigned long)page_addr
+ PAGE_SIZE
);
271 pr_err("%d: binder_alloc_buf failed to map page at %pK in kernel\n",
272 alloc
->pid
, page_addr
);
273 goto err_map_kernel_failed
;
276 (uintptr_t)page_addr
+ alloc
->user_buffer_offset
;
277 ret
= vm_insert_page(vma
, user_page_addr
, page
[0].page_ptr
);
279 pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n",
280 alloc
->pid
, user_page_addr
);
281 goto err_vm_insert_page_failed
;
284 if (index
+ 1 > alloc
->pages_high
)
285 alloc
->pages_high
= index
+ 1;
287 trace_binder_alloc_page_end(alloc
, index
);
288 /* vm_insert_page does not seem to increment the refcount */
291 up_write(&mm
->mmap_sem
);
297 for (page_addr
= end
- PAGE_SIZE
; page_addr
>= start
;
298 page_addr
-= PAGE_SIZE
) {
302 index
= (page_addr
- alloc
->buffer
) / PAGE_SIZE
;
303 page
= &alloc
->pages
[index
];
305 trace_binder_free_lru_start(alloc
, index
);
307 ret
= list_lru_add(&binder_alloc_lru
, &page
->lru
);
310 trace_binder_free_lru_end(alloc
, index
);
313 err_vm_insert_page_failed
:
314 unmap_kernel_range((unsigned long)page_addr
, PAGE_SIZE
);
315 err_map_kernel_failed
:
316 __free_page(page
->page_ptr
);
317 page
->page_ptr
= NULL
;
318 err_alloc_page_failed
:
319 err_page_ptr_cleared
:
324 up_write(&mm
->mmap_sem
);
327 return vma
? -ENOMEM
: -ESRCH
;
330 static struct binder_buffer
*binder_alloc_new_buf_locked(
331 struct binder_alloc
*alloc
,
334 size_t extra_buffers_size
,
337 struct rb_node
*n
= alloc
->free_buffers
.rb_node
;
338 struct binder_buffer
*buffer
;
340 struct rb_node
*best_fit
= NULL
;
343 size_t size
, data_offsets_size
;
346 if (alloc
->vma
== NULL
) {
347 pr_err("%d: binder_alloc_buf, no vma\n",
349 return ERR_PTR(-ESRCH
);
352 data_offsets_size
= ALIGN(data_size
, sizeof(void *)) +
353 ALIGN(offsets_size
, sizeof(void *));
355 if (data_offsets_size
< data_size
|| data_offsets_size
< offsets_size
) {
356 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC
,
357 "%d: got transaction with invalid size %zd-%zd\n",
358 alloc
->pid
, data_size
, offsets_size
);
359 return ERR_PTR(-EINVAL
);
361 size
= data_offsets_size
+ ALIGN(extra_buffers_size
, sizeof(void *));
362 if (size
< data_offsets_size
|| size
< extra_buffers_size
) {
363 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC
,
364 "%d: got transaction with invalid extra_buffers_size %zd\n",
365 alloc
->pid
, extra_buffers_size
);
366 return ERR_PTR(-EINVAL
);
369 alloc
->free_async_space
< size
+ sizeof(struct binder_buffer
)) {
370 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC
,
371 "%d: binder_alloc_buf size %zd failed, no async space left\n",
373 return ERR_PTR(-ENOSPC
);
376 /* Pad 0-size buffers so they get assigned unique addresses */
377 size
= max(size
, sizeof(void *));
380 buffer
= rb_entry(n
, struct binder_buffer
, rb_node
);
381 BUG_ON(!buffer
->free
);
382 buffer_size
= binder_alloc_buffer_size(alloc
, buffer
);
384 if (size
< buffer_size
) {
387 } else if (size
> buffer_size
)
394 if (best_fit
== NULL
) {
395 size_t allocated_buffers
= 0;
396 size_t largest_alloc_size
= 0;
397 size_t total_alloc_size
= 0;
398 size_t free_buffers
= 0;
399 size_t largest_free_size
= 0;
400 size_t total_free_size
= 0;
402 for (n
= rb_first(&alloc
->allocated_buffers
); n
!= NULL
;
404 buffer
= rb_entry(n
, struct binder_buffer
, rb_node
);
405 buffer_size
= binder_alloc_buffer_size(alloc
, buffer
);
407 total_alloc_size
+= buffer_size
;
408 if (buffer_size
> largest_alloc_size
)
409 largest_alloc_size
= buffer_size
;
411 for (n
= rb_first(&alloc
->free_buffers
); n
!= NULL
;
413 buffer
= rb_entry(n
, struct binder_buffer
, rb_node
);
414 buffer_size
= binder_alloc_buffer_size(alloc
, buffer
);
416 total_free_size
+= buffer_size
;
417 if (buffer_size
> largest_free_size
)
418 largest_free_size
= buffer_size
;
420 pr_err("%d: binder_alloc_buf size %zd failed, no address space\n",
422 pr_err("allocated: %zd (num: %zd largest: %zd), free: %zd (num: %zd largest: %zd)\n",
423 total_alloc_size
, allocated_buffers
, largest_alloc_size
,
424 total_free_size
, free_buffers
, largest_free_size
);
425 return ERR_PTR(-ENOSPC
);
428 buffer
= rb_entry(best_fit
, struct binder_buffer
, rb_node
);
429 buffer_size
= binder_alloc_buffer_size(alloc
, buffer
);
432 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC
,
433 "%d: binder_alloc_buf size %zd got buffer %pK size %zd\n",
434 alloc
->pid
, size
, buffer
, buffer_size
);
437 (void *)(((uintptr_t)buffer
->data
+ buffer_size
) & PAGE_MASK
);
438 WARN_ON(n
&& buffer_size
!= size
);
440 (void *)PAGE_ALIGN((uintptr_t)buffer
->data
+ size
);
441 if (end_page_addr
> has_page_addr
)
442 end_page_addr
= has_page_addr
;
443 ret
= binder_update_page_range(alloc
, 1,
444 (void *)PAGE_ALIGN((uintptr_t)buffer
->data
), end_page_addr
);
448 if (buffer_size
!= size
) {
449 struct binder_buffer
*new_buffer
;
451 new_buffer
= kzalloc(sizeof(*buffer
), GFP_KERNEL
);
453 pr_err("%s: %d failed to alloc new buffer struct\n",
454 __func__
, alloc
->pid
);
455 goto err_alloc_buf_struct_failed
;
457 new_buffer
->data
= (u8
*)buffer
->data
+ size
;
458 list_add(&new_buffer
->entry
, &buffer
->entry
);
459 new_buffer
->free
= 1;
460 binder_insert_free_buffer(alloc
, new_buffer
);
463 rb_erase(best_fit
, &alloc
->free_buffers
);
465 buffer
->free_in_progress
= 0;
466 binder_insert_allocated_buffer_locked(alloc
, buffer
);
467 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC
,
468 "%d: binder_alloc_buf size %zd got %pK\n",
469 alloc
->pid
, size
, buffer
);
470 buffer
->data_size
= data_size
;
471 buffer
->offsets_size
= offsets_size
;
472 buffer
->async_transaction
= is_async
;
473 buffer
->extra_buffers_size
= extra_buffers_size
;
475 alloc
->free_async_space
-= size
+ sizeof(struct binder_buffer
);
476 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC
,
477 "%d: binder_alloc_buf size %zd async free %zd\n",
478 alloc
->pid
, size
, alloc
->free_async_space
);
482 err_alloc_buf_struct_failed
:
483 binder_update_page_range(alloc
, 0,
484 (void *)PAGE_ALIGN((uintptr_t)buffer
->data
),
486 return ERR_PTR(-ENOMEM
);
490 * binder_alloc_new_buf() - Allocate a new binder buffer
491 * @alloc: binder_alloc for this proc
492 * @data_size: size of user data buffer
493 * @offsets_size: user specified buffer offset
494 * @extra_buffers_size: size of extra space for meta-data (eg, security context)
495 * @is_async: buffer for async transaction
497 * Allocate a new buffer given the requested sizes. Returns
498 * the kernel version of the buffer pointer. The size allocated
499 * is the sum of the three given sizes (each rounded up to
500 * pointer-sized boundary)
502 * Return: The allocated buffer or %NULL if error
504 struct binder_buffer
*binder_alloc_new_buf(struct binder_alloc
*alloc
,
507 size_t extra_buffers_size
,
510 struct binder_buffer
*buffer
;
512 mutex_lock(&alloc
->mutex
);
513 buffer
= binder_alloc_new_buf_locked(alloc
, data_size
, offsets_size
,
514 extra_buffers_size
, is_async
);
515 mutex_unlock(&alloc
->mutex
);
519 static void *buffer_start_page(struct binder_buffer
*buffer
)
521 return (void *)((uintptr_t)buffer
->data
& PAGE_MASK
);
524 static void *prev_buffer_end_page(struct binder_buffer
*buffer
)
526 return (void *)(((uintptr_t)(buffer
->data
) - 1) & PAGE_MASK
);
529 static void binder_delete_free_buffer(struct binder_alloc
*alloc
,
530 struct binder_buffer
*buffer
)
532 struct binder_buffer
*prev
, *next
= NULL
;
534 BUG_ON(alloc
->buffers
.next
== &buffer
->entry
);
535 prev
= binder_buffer_prev(buffer
);
537 if (prev_buffer_end_page(prev
) == buffer_start_page(buffer
)) {
539 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC
,
540 "%d: merge free, buffer %pK share page with %pK\n",
541 alloc
->pid
, buffer
->data
, prev
->data
);
544 if (!list_is_last(&buffer
->entry
, &alloc
->buffers
)) {
545 next
= binder_buffer_next(buffer
);
546 if (buffer_start_page(next
) == buffer_start_page(buffer
)) {
548 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC
,
549 "%d: merge free, buffer %pK share page with %pK\n",
556 if (PAGE_ALIGNED(buffer
->data
)) {
557 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC
,
558 "%d: merge free, buffer start %pK is page aligned\n",
559 alloc
->pid
, buffer
->data
);
564 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC
,
565 "%d: merge free, buffer %pK do not share page with %pK or %pK\n",
566 alloc
->pid
, buffer
->data
,
567 prev
->data
, next
? next
->data
: NULL
);
568 binder_update_page_range(alloc
, 0, buffer_start_page(buffer
),
569 buffer_start_page(buffer
) + PAGE_SIZE
);
571 list_del(&buffer
->entry
);
575 static void binder_free_buf_locked(struct binder_alloc
*alloc
,
576 struct binder_buffer
*buffer
)
578 size_t size
, buffer_size
;
580 buffer_size
= binder_alloc_buffer_size(alloc
, buffer
);
582 size
= ALIGN(buffer
->data_size
, sizeof(void *)) +
583 ALIGN(buffer
->offsets_size
, sizeof(void *)) +
584 ALIGN(buffer
->extra_buffers_size
, sizeof(void *));
586 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC
,
587 "%d: binder_free_buf %pK size %zd buffer_size %zd\n",
588 alloc
->pid
, buffer
, size
, buffer_size
);
590 BUG_ON(buffer
->free
);
591 BUG_ON(size
> buffer_size
);
592 BUG_ON(buffer
->transaction
!= NULL
);
593 BUG_ON(buffer
->data
< alloc
->buffer
);
594 BUG_ON(buffer
->data
> alloc
->buffer
+ alloc
->buffer_size
);
596 if (buffer
->async_transaction
) {
597 alloc
->free_async_space
+= size
+ sizeof(struct binder_buffer
);
599 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC
,
600 "%d: binder_free_buf size %zd async free %zd\n",
601 alloc
->pid
, size
, alloc
->free_async_space
);
604 binder_update_page_range(alloc
, 0,
605 (void *)PAGE_ALIGN((uintptr_t)buffer
->data
),
606 (void *)(((uintptr_t)buffer
->data
+ buffer_size
) & PAGE_MASK
));
608 rb_erase(&buffer
->rb_node
, &alloc
->allocated_buffers
);
610 if (!list_is_last(&buffer
->entry
, &alloc
->buffers
)) {
611 struct binder_buffer
*next
= binder_buffer_next(buffer
);
614 rb_erase(&next
->rb_node
, &alloc
->free_buffers
);
615 binder_delete_free_buffer(alloc
, next
);
618 if (alloc
->buffers
.next
!= &buffer
->entry
) {
619 struct binder_buffer
*prev
= binder_buffer_prev(buffer
);
622 binder_delete_free_buffer(alloc
, buffer
);
623 rb_erase(&prev
->rb_node
, &alloc
->free_buffers
);
627 binder_insert_free_buffer(alloc
, buffer
);
631 * binder_alloc_free_buf() - free a binder buffer
632 * @alloc: binder_alloc for this proc
633 * @buffer: kernel pointer to buffer
635 * Free the buffer allocated via binder_alloc_new_buffer()
637 void binder_alloc_free_buf(struct binder_alloc
*alloc
,
638 struct binder_buffer
*buffer
)
640 mutex_lock(&alloc
->mutex
);
641 binder_free_buf_locked(alloc
, buffer
);
642 mutex_unlock(&alloc
->mutex
);
646 * binder_alloc_mmap_handler() - map virtual address space for proc
647 * @alloc: alloc structure for this proc
648 * @vma: vma passed to mmap()
650 * Called by binder_mmap() to initialize the space specified in
651 * vma for allocating binder buffers
655 * -EBUSY = address space already mapped
656 * -ENOMEM = failed to map memory to given address space
658 int binder_alloc_mmap_handler(struct binder_alloc
*alloc
,
659 struct vm_area_struct
*vma
)
662 struct vm_struct
*area
;
663 const char *failure_string
;
664 struct binder_buffer
*buffer
;
666 mutex_lock(&binder_alloc_mmap_lock
);
669 failure_string
= "already mapped";
670 goto err_already_mapped
;
673 area
= get_vm_area(vma
->vm_end
- vma
->vm_start
, VM_ALLOC
);
676 failure_string
= "get_vm_area";
677 goto err_get_vm_area_failed
;
679 alloc
->buffer
= area
->addr
;
680 alloc
->user_buffer_offset
=
681 vma
->vm_start
- (uintptr_t)alloc
->buffer
;
682 mutex_unlock(&binder_alloc_mmap_lock
);
684 #ifdef CONFIG_CPU_CACHE_VIPT
685 if (cache_is_vipt_aliasing()) {
687 (vma
->vm_start
^ (uint32_t)alloc
->buffer
))) {
688 pr_info("%s: %d %lx-%lx maps %pK bad alignment\n",
689 __func__
, alloc
->pid
, vma
->vm_start
,
690 vma
->vm_end
, alloc
->buffer
);
691 vma
->vm_start
+= PAGE_SIZE
;
695 alloc
->pages
= kzalloc(sizeof(alloc
->pages
[0]) *
696 ((vma
->vm_end
- vma
->vm_start
) / PAGE_SIZE
),
698 if (alloc
->pages
== NULL
) {
700 failure_string
= "alloc page array";
701 goto err_alloc_pages_failed
;
703 alloc
->buffer_size
= vma
->vm_end
- vma
->vm_start
;
705 buffer
= kzalloc(sizeof(*buffer
), GFP_KERNEL
);
708 failure_string
= "alloc buffer struct";
709 goto err_alloc_buf_struct_failed
;
712 buffer
->data
= alloc
->buffer
;
713 list_add(&buffer
->entry
, &alloc
->buffers
);
715 binder_insert_free_buffer(alloc
, buffer
);
716 alloc
->free_async_space
= alloc
->buffer_size
/ 2;
719 alloc
->vma_vm_mm
= vma
->vm_mm
;
720 mmgrab(alloc
->vma_vm_mm
);
724 err_alloc_buf_struct_failed
:
727 err_alloc_pages_failed
:
728 mutex_lock(&binder_alloc_mmap_lock
);
729 vfree(alloc
->buffer
);
730 alloc
->buffer
= NULL
;
731 err_get_vm_area_failed
:
733 mutex_unlock(&binder_alloc_mmap_lock
);
734 pr_err("%s: %d %lx-%lx %s failed %d\n", __func__
,
735 alloc
->pid
, vma
->vm_start
, vma
->vm_end
, failure_string
, ret
);
740 void binder_alloc_deferred_release(struct binder_alloc
*alloc
)
743 int buffers
, page_count
;
744 struct binder_buffer
*buffer
;
749 mutex_lock(&alloc
->mutex
);
750 while ((n
= rb_first(&alloc
->allocated_buffers
))) {
751 buffer
= rb_entry(n
, struct binder_buffer
, rb_node
);
753 /* Transaction should already have been freed */
754 BUG_ON(buffer
->transaction
);
756 binder_free_buf_locked(alloc
, buffer
);
760 while (!list_empty(&alloc
->buffers
)) {
761 buffer
= list_first_entry(&alloc
->buffers
,
762 struct binder_buffer
, entry
);
763 WARN_ON(!buffer
->free
);
765 list_del(&buffer
->entry
);
766 WARN_ON_ONCE(!list_empty(&alloc
->buffers
));
774 for (i
= 0; i
< alloc
->buffer_size
/ PAGE_SIZE
; i
++) {
778 if (!alloc
->pages
[i
].page_ptr
)
781 on_lru
= list_lru_del(&binder_alloc_lru
,
782 &alloc
->pages
[i
].lru
);
783 page_addr
= alloc
->buffer
+ i
* PAGE_SIZE
;
784 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC
,
785 "%s: %d: page %d at %pK %s\n",
786 __func__
, alloc
->pid
, i
, page_addr
,
787 on_lru
? "on lru" : "active");
788 unmap_kernel_range((unsigned long)page_addr
, PAGE_SIZE
);
789 __free_page(alloc
->pages
[i
].page_ptr
);
793 vfree(alloc
->buffer
);
795 mutex_unlock(&alloc
->mutex
);
796 if (alloc
->vma_vm_mm
)
797 mmdrop(alloc
->vma_vm_mm
);
799 binder_alloc_debug(BINDER_DEBUG_OPEN_CLOSE
,
800 "%s: %d buffers %d, pages %d\n",
801 __func__
, alloc
->pid
, buffers
, page_count
);
804 static void print_binder_buffer(struct seq_file
*m
, const char *prefix
,
805 struct binder_buffer
*buffer
)
807 seq_printf(m
, "%s %d: %pK size %zd:%zd:%zd %s\n",
808 prefix
, buffer
->debug_id
, buffer
->data
,
809 buffer
->data_size
, buffer
->offsets_size
,
810 buffer
->extra_buffers_size
,
811 buffer
->transaction
? "active" : "delivered");
815 * binder_alloc_print_allocated() - print buffer info
816 * @m: seq_file for output via seq_printf()
817 * @alloc: binder_alloc for this proc
819 * Prints information about every buffer associated with
820 * the binder_alloc state to the given seq_file
822 void binder_alloc_print_allocated(struct seq_file
*m
,
823 struct binder_alloc
*alloc
)
827 mutex_lock(&alloc
->mutex
);
828 for (n
= rb_first(&alloc
->allocated_buffers
); n
!= NULL
; n
= rb_next(n
))
829 print_binder_buffer(m
, " buffer",
830 rb_entry(n
, struct binder_buffer
, rb_node
));
831 mutex_unlock(&alloc
->mutex
);
835 * binder_alloc_print_pages() - print page usage
836 * @m: seq_file for output via seq_printf()
837 * @alloc: binder_alloc for this proc
839 void binder_alloc_print_pages(struct seq_file
*m
,
840 struct binder_alloc
*alloc
)
842 struct binder_lru_page
*page
;
848 mutex_lock(&alloc
->mutex
);
849 for (i
= 0; i
< alloc
->buffer_size
/ PAGE_SIZE
; i
++) {
850 page
= &alloc
->pages
[i
];
853 else if (list_empty(&page
->lru
))
858 mutex_unlock(&alloc
->mutex
);
859 seq_printf(m
, " pages: %d:%d:%d\n", active
, lru
, free
);
860 seq_printf(m
, " pages high watermark: %zu\n", alloc
->pages_high
);
864 * binder_alloc_get_allocated_count() - return count of buffers
865 * @alloc: binder_alloc for this proc
867 * Return: count of allocated buffers
869 int binder_alloc_get_allocated_count(struct binder_alloc
*alloc
)
874 mutex_lock(&alloc
->mutex
);
875 for (n
= rb_first(&alloc
->allocated_buffers
); n
!= NULL
; n
= rb_next(n
))
877 mutex_unlock(&alloc
->mutex
);
883 * binder_alloc_vma_close() - invalidate address space
884 * @alloc: binder_alloc for this proc
886 * Called from binder_vma_close() when releasing address space.
887 * Clears alloc->vma to prevent new incoming transactions from
888 * allocating more buffers.
890 void binder_alloc_vma_close(struct binder_alloc
*alloc
)
892 WRITE_ONCE(alloc
->vma
, NULL
);
896 * binder_alloc_free_page() - shrinker callback to free pages
897 * @item: item to free
898 * @lock: lock protecting the item
899 * @cb_arg: callback argument
901 * Called from list_lru_walk() in binder_shrink_scan() to free
902 * up pages when the system is under memory pressure.
904 enum lru_status
binder_alloc_free_page(struct list_head
*item
,
905 struct list_lru_one
*lru
,
909 struct mm_struct
*mm
= NULL
;
910 struct binder_lru_page
*page
= container_of(item
,
911 struct binder_lru_page
,
913 struct binder_alloc
*alloc
;
916 struct vm_area_struct
*vma
;
919 if (!mutex_trylock(&alloc
->mutex
))
920 goto err_get_alloc_mutex_failed
;
923 goto err_page_already_freed
;
925 index
= page
- alloc
->pages
;
926 page_addr
= (uintptr_t)alloc
->buffer
+ index
* PAGE_SIZE
;
929 if (!mmget_not_zero(alloc
->vma_vm_mm
))
931 mm
= alloc
->vma_vm_mm
;
932 if (!down_write_trylock(&mm
->mmap_sem
))
933 goto err_down_write_mmap_sem_failed
;
936 list_lru_isolate(lru
, item
);
940 trace_binder_unmap_user_start(alloc
, index
);
943 page_addr
+ alloc
->user_buffer_offset
,
946 trace_binder_unmap_user_end(alloc
, index
);
948 up_write(&mm
->mmap_sem
);
952 trace_binder_unmap_kernel_start(alloc
, index
);
954 unmap_kernel_range(page_addr
, PAGE_SIZE
);
955 __free_page(page
->page_ptr
);
956 page
->page_ptr
= NULL
;
958 trace_binder_unmap_kernel_end(alloc
, index
);
961 mutex_unlock(&alloc
->mutex
);
962 return LRU_REMOVED_RETRY
;
964 err_down_write_mmap_sem_failed
:
967 err_page_already_freed
:
968 mutex_unlock(&alloc
->mutex
);
969 err_get_alloc_mutex_failed
:
974 binder_shrink_count(struct shrinker
*shrink
, struct shrink_control
*sc
)
976 unsigned long ret
= list_lru_count(&binder_alloc_lru
);
981 binder_shrink_scan(struct shrinker
*shrink
, struct shrink_control
*sc
)
985 ret
= list_lru_walk(&binder_alloc_lru
, binder_alloc_free_page
,
986 NULL
, sc
->nr_to_scan
);
990 static struct shrinker binder_shrinker
= {
991 .count_objects
= binder_shrink_count
,
992 .scan_objects
= binder_shrink_scan
,
993 .seeks
= DEFAULT_SEEKS
,
997 * binder_alloc_init() - called by binder_open() for per-proc initialization
998 * @alloc: binder_alloc for this proc
1000 * Called from binder_open() to initialize binder_alloc fields for
1003 void binder_alloc_init(struct binder_alloc
*alloc
)
1005 alloc
->pid
= current
->group_leader
->pid
;
1006 mutex_init(&alloc
->mutex
);
1007 INIT_LIST_HEAD(&alloc
->buffers
);
1010 int binder_alloc_shrinker_init(void)
1012 int ret
= list_lru_init(&binder_alloc_lru
);
1015 ret
= register_shrinker(&binder_shrinker
);
1017 list_lru_destroy(&binder_alloc_lru
);