3 * Android IPC Subsystem
5 * Copyright (C) 2007-2017 Google, Inc.
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 #include <linux/list.h>
21 #include <linux/sched/mm.h>
22 #include <linux/module.h>
23 #include <linux/rtmutex.h>
24 #include <linux/rbtree.h>
25 #include <linux/seq_file.h>
26 #include <linux/vmalloc.h>
27 #include <linux/slab.h>
28 #include <linux/sched.h>
29 #include <linux/list_lru.h>
30 #include <linux/ratelimit.h>
31 #include <asm/cacheflush.h>
32 #include "binder_alloc.h"
33 #include "binder_trace.h"
35 struct list_lru binder_alloc_lru
;
37 static DEFINE_MUTEX(binder_alloc_mmap_lock
);
40 BINDER_DEBUG_USER_ERROR
= 1U << 0,
41 BINDER_DEBUG_OPEN_CLOSE
= 1U << 1,
42 BINDER_DEBUG_BUFFER_ALLOC
= 1U << 2,
43 BINDER_DEBUG_BUFFER_ALLOC_ASYNC
= 1U << 3,
45 static uint32_t binder_alloc_debug_mask
= BINDER_DEBUG_USER_ERROR
;
47 module_param_named(debug_mask
, binder_alloc_debug_mask
,
50 #define binder_alloc_debug(mask, x...) \
52 if (binder_alloc_debug_mask & mask) \
53 pr_info_ratelimited(x); \
56 static struct binder_buffer
*binder_buffer_next(struct binder_buffer
*buffer
)
58 return list_entry(buffer
->entry
.next
, struct binder_buffer
, entry
);
61 static struct binder_buffer
*binder_buffer_prev(struct binder_buffer
*buffer
)
63 return list_entry(buffer
->entry
.prev
, struct binder_buffer
, entry
);
66 static size_t binder_alloc_buffer_size(struct binder_alloc
*alloc
,
67 struct binder_buffer
*buffer
)
69 if (list_is_last(&buffer
->entry
, &alloc
->buffers
))
70 return (u8
*)alloc
->buffer
+
71 alloc
->buffer_size
- (u8
*)buffer
->data
;
72 return (u8
*)binder_buffer_next(buffer
)->data
- (u8
*)buffer
->data
;
75 static void binder_insert_free_buffer(struct binder_alloc
*alloc
,
76 struct binder_buffer
*new_buffer
)
78 struct rb_node
**p
= &alloc
->free_buffers
.rb_node
;
79 struct rb_node
*parent
= NULL
;
80 struct binder_buffer
*buffer
;
82 size_t new_buffer_size
;
84 BUG_ON(!new_buffer
->free
);
86 new_buffer_size
= binder_alloc_buffer_size(alloc
, new_buffer
);
88 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC
,
89 "%d: add free buffer, size %zd, at %pK\n",
90 alloc
->pid
, new_buffer_size
, new_buffer
);
94 buffer
= rb_entry(parent
, struct binder_buffer
, rb_node
);
95 BUG_ON(!buffer
->free
);
97 buffer_size
= binder_alloc_buffer_size(alloc
, buffer
);
99 if (new_buffer_size
< buffer_size
)
100 p
= &parent
->rb_left
;
102 p
= &parent
->rb_right
;
104 rb_link_node(&new_buffer
->rb_node
, parent
, p
);
105 rb_insert_color(&new_buffer
->rb_node
, &alloc
->free_buffers
);
108 static void binder_insert_allocated_buffer_locked(
109 struct binder_alloc
*alloc
, struct binder_buffer
*new_buffer
)
111 struct rb_node
**p
= &alloc
->allocated_buffers
.rb_node
;
112 struct rb_node
*parent
= NULL
;
113 struct binder_buffer
*buffer
;
115 BUG_ON(new_buffer
->free
);
119 buffer
= rb_entry(parent
, struct binder_buffer
, rb_node
);
120 BUG_ON(buffer
->free
);
122 if (new_buffer
->data
< buffer
->data
)
123 p
= &parent
->rb_left
;
124 else if (new_buffer
->data
> buffer
->data
)
125 p
= &parent
->rb_right
;
129 rb_link_node(&new_buffer
->rb_node
, parent
, p
);
130 rb_insert_color(&new_buffer
->rb_node
, &alloc
->allocated_buffers
);
133 static struct binder_buffer
*binder_alloc_prepare_to_free_locked(
134 struct binder_alloc
*alloc
,
137 struct rb_node
*n
= alloc
->allocated_buffers
.rb_node
;
138 struct binder_buffer
*buffer
;
141 kern_ptr
= (void *)(user_ptr
- alloc
->user_buffer_offset
);
144 buffer
= rb_entry(n
, struct binder_buffer
, rb_node
);
145 BUG_ON(buffer
->free
);
147 if (kern_ptr
< buffer
->data
)
149 else if (kern_ptr
> buffer
->data
)
153 * Guard against user threads attempting to
154 * free the buffer when in use by kernel or
155 * after it's already been freed.
157 if (!buffer
->allow_user_free
)
158 return ERR_PTR(-EPERM
);
159 buffer
->allow_user_free
= 0;
167 * binder_alloc_buffer_lookup() - get buffer given user ptr
168 * @alloc: binder_alloc for this proc
169 * @user_ptr: User pointer to buffer data
171 * Validate userspace pointer to buffer data and return buffer corresponding to
172 * that user pointer. Search the rb tree for buffer that matches user data
175 * Return: Pointer to buffer or NULL
177 struct binder_buffer
*binder_alloc_prepare_to_free(struct binder_alloc
*alloc
,
180 struct binder_buffer
*buffer
;
182 mutex_lock(&alloc
->mutex
);
183 buffer
= binder_alloc_prepare_to_free_locked(alloc
, user_ptr
);
184 mutex_unlock(&alloc
->mutex
);
188 static int binder_update_page_range(struct binder_alloc
*alloc
, int allocate
,
189 void *start
, void *end
)
192 unsigned long user_page_addr
;
193 struct binder_lru_page
*page
;
194 struct vm_area_struct
*vma
= NULL
;
195 struct mm_struct
*mm
= NULL
;
196 bool need_mm
= false;
198 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC
,
199 "%d: %s pages %pK-%pK\n", alloc
->pid
,
200 allocate
? "allocate" : "free", start
, end
);
205 trace_binder_update_page_range(alloc
, allocate
, start
, end
);
210 for (page_addr
= start
; page_addr
< end
; page_addr
+= PAGE_SIZE
) {
211 page
= &alloc
->pages
[(page_addr
- alloc
->buffer
) / PAGE_SIZE
];
212 if (!page
->page_ptr
) {
218 if (need_mm
&& mmget_not_zero(alloc
->vma_vm_mm
))
219 mm
= alloc
->vma_vm_mm
;
222 down_read(&mm
->mmap_sem
);
226 if (!vma
&& need_mm
) {
227 binder_alloc_debug(BINDER_DEBUG_USER_ERROR
,
228 "%d: binder_alloc_buf failed to map pages in userspace, no vma\n",
233 for (page_addr
= start
; page_addr
< end
; page_addr
+= PAGE_SIZE
) {
238 index
= (page_addr
- alloc
->buffer
) / PAGE_SIZE
;
239 page
= &alloc
->pages
[index
];
241 if (page
->page_ptr
) {
242 trace_binder_alloc_lru_start(alloc
, index
);
244 on_lru
= list_lru_del(&binder_alloc_lru
, &page
->lru
);
247 trace_binder_alloc_lru_end(alloc
, index
);
252 goto err_page_ptr_cleared
;
254 trace_binder_alloc_page_start(alloc
, index
);
255 page
->page_ptr
= alloc_page(GFP_KERNEL
|
258 if (!page
->page_ptr
) {
259 pr_err("%d: binder_alloc_buf failed for page at %pK\n",
260 alloc
->pid
, page_addr
);
261 goto err_alloc_page_failed
;
264 INIT_LIST_HEAD(&page
->lru
);
266 ret
= map_kernel_range_noflush((unsigned long)page_addr
,
267 PAGE_SIZE
, PAGE_KERNEL
,
269 flush_cache_vmap((unsigned long)page_addr
,
270 (unsigned long)page_addr
+ PAGE_SIZE
);
272 pr_err("%d: binder_alloc_buf failed to map page at %pK in kernel\n",
273 alloc
->pid
, page_addr
);
274 goto err_map_kernel_failed
;
277 (uintptr_t)page_addr
+ alloc
->user_buffer_offset
;
278 ret
= vm_insert_page(vma
, user_page_addr
, page
[0].page_ptr
);
280 pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n",
281 alloc
->pid
, user_page_addr
);
282 goto err_vm_insert_page_failed
;
285 if (index
+ 1 > alloc
->pages_high
)
286 alloc
->pages_high
= index
+ 1;
288 trace_binder_alloc_page_end(alloc
, index
);
289 /* vm_insert_page does not seem to increment the refcount */
292 up_read(&mm
->mmap_sem
);
298 for (page_addr
= end
- PAGE_SIZE
; page_addr
>= start
;
299 page_addr
-= PAGE_SIZE
) {
303 index
= (page_addr
- alloc
->buffer
) / PAGE_SIZE
;
304 page
= &alloc
->pages
[index
];
306 trace_binder_free_lru_start(alloc
, index
);
308 ret
= list_lru_add(&binder_alloc_lru
, &page
->lru
);
311 trace_binder_free_lru_end(alloc
, index
);
314 err_vm_insert_page_failed
:
315 unmap_kernel_range((unsigned long)page_addr
, PAGE_SIZE
);
316 err_map_kernel_failed
:
317 __free_page(page
->page_ptr
);
318 page
->page_ptr
= NULL
;
319 err_alloc_page_failed
:
320 err_page_ptr_cleared
:
325 up_read(&mm
->mmap_sem
);
328 return vma
? -ENOMEM
: -ESRCH
;
332 static inline void binder_alloc_set_vma(struct binder_alloc
*alloc
,
333 struct vm_area_struct
*vma
)
336 alloc
->vma_vm_mm
= vma
->vm_mm
;
338 * If we see alloc->vma is not NULL, buffer data structures set up
339 * completely. Look at smp_rmb side binder_alloc_get_vma.
340 * We also want to guarantee new alloc->vma_vm_mm is always visible
341 * if alloc->vma is set.
347 static inline struct vm_area_struct
*binder_alloc_get_vma(
348 struct binder_alloc
*alloc
)
350 struct vm_area_struct
*vma
= NULL
;
353 /* Look at description in binder_alloc_set_vma */
360 static struct binder_buffer
*binder_alloc_new_buf_locked(
361 struct binder_alloc
*alloc
,
364 size_t extra_buffers_size
,
367 struct rb_node
*n
= alloc
->free_buffers
.rb_node
;
368 struct binder_buffer
*buffer
;
370 struct rb_node
*best_fit
= NULL
;
373 size_t size
, data_offsets_size
;
376 if (!binder_alloc_get_vma(alloc
)) {
377 binder_alloc_debug(BINDER_DEBUG_USER_ERROR
,
378 "%d: binder_alloc_buf, no vma\n",
380 return ERR_PTR(-ESRCH
);
383 data_offsets_size
= ALIGN(data_size
, sizeof(void *)) +
384 ALIGN(offsets_size
, sizeof(void *));
386 if (data_offsets_size
< data_size
|| data_offsets_size
< offsets_size
) {
387 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC
,
388 "%d: got transaction with invalid size %zd-%zd\n",
389 alloc
->pid
, data_size
, offsets_size
);
390 return ERR_PTR(-EINVAL
);
392 size
= data_offsets_size
+ ALIGN(extra_buffers_size
, sizeof(void *));
393 if (size
< data_offsets_size
|| size
< extra_buffers_size
) {
394 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC
,
395 "%d: got transaction with invalid extra_buffers_size %zd\n",
396 alloc
->pid
, extra_buffers_size
);
397 return ERR_PTR(-EINVAL
);
400 alloc
->free_async_space
< size
+ sizeof(struct binder_buffer
)) {
401 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC
,
402 "%d: binder_alloc_buf size %zd failed, no async space left\n",
404 return ERR_PTR(-ENOSPC
);
407 /* Pad 0-size buffers so they get assigned unique addresses */
408 size
= max(size
, sizeof(void *));
411 buffer
= rb_entry(n
, struct binder_buffer
, rb_node
);
412 BUG_ON(!buffer
->free
);
413 buffer_size
= binder_alloc_buffer_size(alloc
, buffer
);
415 if (size
< buffer_size
) {
418 } else if (size
> buffer_size
)
425 if (best_fit
== NULL
) {
426 size_t allocated_buffers
= 0;
427 size_t largest_alloc_size
= 0;
428 size_t total_alloc_size
= 0;
429 size_t free_buffers
= 0;
430 size_t largest_free_size
= 0;
431 size_t total_free_size
= 0;
433 for (n
= rb_first(&alloc
->allocated_buffers
); n
!= NULL
;
435 buffer
= rb_entry(n
, struct binder_buffer
, rb_node
);
436 buffer_size
= binder_alloc_buffer_size(alloc
, buffer
);
438 total_alloc_size
+= buffer_size
;
439 if (buffer_size
> largest_alloc_size
)
440 largest_alloc_size
= buffer_size
;
442 for (n
= rb_first(&alloc
->free_buffers
); n
!= NULL
;
444 buffer
= rb_entry(n
, struct binder_buffer
, rb_node
);
445 buffer_size
= binder_alloc_buffer_size(alloc
, buffer
);
447 total_free_size
+= buffer_size
;
448 if (buffer_size
> largest_free_size
)
449 largest_free_size
= buffer_size
;
451 binder_alloc_debug(BINDER_DEBUG_USER_ERROR
,
452 "%d: binder_alloc_buf size %zd failed, no address space\n",
454 binder_alloc_debug(BINDER_DEBUG_USER_ERROR
,
455 "allocated: %zd (num: %zd largest: %zd), free: %zd (num: %zd largest: %zd)\n",
456 total_alloc_size
, allocated_buffers
,
457 largest_alloc_size
, total_free_size
,
458 free_buffers
, largest_free_size
);
459 return ERR_PTR(-ENOSPC
);
462 buffer
= rb_entry(best_fit
, struct binder_buffer
, rb_node
);
463 buffer_size
= binder_alloc_buffer_size(alloc
, buffer
);
466 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC
,
467 "%d: binder_alloc_buf size %zd got buffer %pK size %zd\n",
468 alloc
->pid
, size
, buffer
, buffer_size
);
471 (void *)(((uintptr_t)buffer
->data
+ buffer_size
) & PAGE_MASK
);
472 WARN_ON(n
&& buffer_size
!= size
);
474 (void *)PAGE_ALIGN((uintptr_t)buffer
->data
+ size
);
475 if (end_page_addr
> has_page_addr
)
476 end_page_addr
= has_page_addr
;
477 ret
= binder_update_page_range(alloc
, 1,
478 (void *)PAGE_ALIGN((uintptr_t)buffer
->data
), end_page_addr
);
482 if (buffer_size
!= size
) {
483 struct binder_buffer
*new_buffer
;
485 new_buffer
= kzalloc(sizeof(*buffer
), GFP_KERNEL
);
487 pr_err("%s: %d failed to alloc new buffer struct\n",
488 __func__
, alloc
->pid
);
489 goto err_alloc_buf_struct_failed
;
491 new_buffer
->data
= (u8
*)buffer
->data
+ size
;
492 list_add(&new_buffer
->entry
, &buffer
->entry
);
493 new_buffer
->free
= 1;
494 binder_insert_free_buffer(alloc
, new_buffer
);
497 rb_erase(best_fit
, &alloc
->free_buffers
);
499 buffer
->allow_user_free
= 0;
500 binder_insert_allocated_buffer_locked(alloc
, buffer
);
501 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC
,
502 "%d: binder_alloc_buf size %zd got %pK\n",
503 alloc
->pid
, size
, buffer
);
504 buffer
->data_size
= data_size
;
505 buffer
->offsets_size
= offsets_size
;
506 buffer
->async_transaction
= is_async
;
507 buffer
->extra_buffers_size
= extra_buffers_size
;
509 alloc
->free_async_space
-= size
+ sizeof(struct binder_buffer
);
510 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC
,
511 "%d: binder_alloc_buf size %zd async free %zd\n",
512 alloc
->pid
, size
, alloc
->free_async_space
);
516 err_alloc_buf_struct_failed
:
517 binder_update_page_range(alloc
, 0,
518 (void *)PAGE_ALIGN((uintptr_t)buffer
->data
),
520 return ERR_PTR(-ENOMEM
);
524 * binder_alloc_new_buf() - Allocate a new binder buffer
525 * @alloc: binder_alloc for this proc
526 * @data_size: size of user data buffer
527 * @offsets_size: user specified buffer offset
528 * @extra_buffers_size: size of extra space for meta-data (eg, security context)
529 * @is_async: buffer for async transaction
531 * Allocate a new buffer given the requested sizes. Returns
532 * the kernel version of the buffer pointer. The size allocated
533 * is the sum of the three given sizes (each rounded up to
534 * pointer-sized boundary)
536 * Return: The allocated buffer or %NULL if error
538 struct binder_buffer
*binder_alloc_new_buf(struct binder_alloc
*alloc
,
541 size_t extra_buffers_size
,
544 struct binder_buffer
*buffer
;
546 mutex_lock(&alloc
->mutex
);
547 buffer
= binder_alloc_new_buf_locked(alloc
, data_size
, offsets_size
,
548 extra_buffers_size
, is_async
);
549 mutex_unlock(&alloc
->mutex
);
553 static void *buffer_start_page(struct binder_buffer
*buffer
)
555 return (void *)((uintptr_t)buffer
->data
& PAGE_MASK
);
558 static void *prev_buffer_end_page(struct binder_buffer
*buffer
)
560 return (void *)(((uintptr_t)(buffer
->data
) - 1) & PAGE_MASK
);
563 static void binder_delete_free_buffer(struct binder_alloc
*alloc
,
564 struct binder_buffer
*buffer
)
566 struct binder_buffer
*prev
, *next
= NULL
;
568 BUG_ON(alloc
->buffers
.next
== &buffer
->entry
);
569 prev
= binder_buffer_prev(buffer
);
571 if (prev_buffer_end_page(prev
) == buffer_start_page(buffer
)) {
573 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC
,
574 "%d: merge free, buffer %pK share page with %pK\n",
575 alloc
->pid
, buffer
->data
, prev
->data
);
578 if (!list_is_last(&buffer
->entry
, &alloc
->buffers
)) {
579 next
= binder_buffer_next(buffer
);
580 if (buffer_start_page(next
) == buffer_start_page(buffer
)) {
582 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC
,
583 "%d: merge free, buffer %pK share page with %pK\n",
590 if (PAGE_ALIGNED(buffer
->data
)) {
591 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC
,
592 "%d: merge free, buffer start %pK is page aligned\n",
593 alloc
->pid
, buffer
->data
);
598 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC
,
599 "%d: merge free, buffer %pK do not share page with %pK or %pK\n",
600 alloc
->pid
, buffer
->data
,
601 prev
->data
, next
? next
->data
: NULL
);
602 binder_update_page_range(alloc
, 0, buffer_start_page(buffer
),
603 buffer_start_page(buffer
) + PAGE_SIZE
);
605 list_del(&buffer
->entry
);
609 static void binder_free_buf_locked(struct binder_alloc
*alloc
,
610 struct binder_buffer
*buffer
)
612 size_t size
, buffer_size
;
614 buffer_size
= binder_alloc_buffer_size(alloc
, buffer
);
616 size
= ALIGN(buffer
->data_size
, sizeof(void *)) +
617 ALIGN(buffer
->offsets_size
, sizeof(void *)) +
618 ALIGN(buffer
->extra_buffers_size
, sizeof(void *));
620 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC
,
621 "%d: binder_free_buf %pK size %zd buffer_size %zd\n",
622 alloc
->pid
, buffer
, size
, buffer_size
);
624 BUG_ON(buffer
->free
);
625 BUG_ON(size
> buffer_size
);
626 BUG_ON(buffer
->transaction
!= NULL
);
627 BUG_ON(buffer
->data
< alloc
->buffer
);
628 BUG_ON(buffer
->data
> alloc
->buffer
+ alloc
->buffer_size
);
630 if (buffer
->async_transaction
) {
631 alloc
->free_async_space
+= size
+ sizeof(struct binder_buffer
);
633 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC
,
634 "%d: binder_free_buf size %zd async free %zd\n",
635 alloc
->pid
, size
, alloc
->free_async_space
);
638 binder_update_page_range(alloc
, 0,
639 (void *)PAGE_ALIGN((uintptr_t)buffer
->data
),
640 (void *)(((uintptr_t)buffer
->data
+ buffer_size
) & PAGE_MASK
));
642 rb_erase(&buffer
->rb_node
, &alloc
->allocated_buffers
);
644 if (!list_is_last(&buffer
->entry
, &alloc
->buffers
)) {
645 struct binder_buffer
*next
= binder_buffer_next(buffer
);
648 rb_erase(&next
->rb_node
, &alloc
->free_buffers
);
649 binder_delete_free_buffer(alloc
, next
);
652 if (alloc
->buffers
.next
!= &buffer
->entry
) {
653 struct binder_buffer
*prev
= binder_buffer_prev(buffer
);
656 binder_delete_free_buffer(alloc
, buffer
);
657 rb_erase(&prev
->rb_node
, &alloc
->free_buffers
);
661 binder_insert_free_buffer(alloc
, buffer
);
665 * binder_alloc_free_buf() - free a binder buffer
666 * @alloc: binder_alloc for this proc
667 * @buffer: kernel pointer to buffer
669 * Free the buffer allocated via binder_alloc_new_buffer()
671 void binder_alloc_free_buf(struct binder_alloc
*alloc
,
672 struct binder_buffer
*buffer
)
674 mutex_lock(&alloc
->mutex
);
675 binder_free_buf_locked(alloc
, buffer
);
676 mutex_unlock(&alloc
->mutex
);
680 * binder_alloc_mmap_handler() - map virtual address space for proc
681 * @alloc: alloc structure for this proc
682 * @vma: vma passed to mmap()
684 * Called by binder_mmap() to initialize the space specified in
685 * vma for allocating binder buffers
689 * -EBUSY = address space already mapped
690 * -ENOMEM = failed to map memory to given address space
692 int binder_alloc_mmap_handler(struct binder_alloc
*alloc
,
693 struct vm_area_struct
*vma
)
696 struct vm_struct
*area
;
697 const char *failure_string
;
698 struct binder_buffer
*buffer
;
700 mutex_lock(&binder_alloc_mmap_lock
);
703 failure_string
= "already mapped";
704 goto err_already_mapped
;
707 area
= get_vm_area(vma
->vm_end
- vma
->vm_start
, VM_ALLOC
);
710 failure_string
= "get_vm_area";
711 goto err_get_vm_area_failed
;
713 alloc
->buffer
= area
->addr
;
714 alloc
->user_buffer_offset
=
715 vma
->vm_start
- (uintptr_t)alloc
->buffer
;
716 mutex_unlock(&binder_alloc_mmap_lock
);
718 #ifdef CONFIG_CPU_CACHE_VIPT
719 if (cache_is_vipt_aliasing()) {
721 (vma
->vm_start
^ (uint32_t)alloc
->buffer
))) {
722 pr_info("%s: %d %lx-%lx maps %pK bad alignment\n",
723 __func__
, alloc
->pid
, vma
->vm_start
,
724 vma
->vm_end
, alloc
->buffer
);
725 vma
->vm_start
+= PAGE_SIZE
;
729 alloc
->pages
= kcalloc((vma
->vm_end
- vma
->vm_start
) / PAGE_SIZE
,
730 sizeof(alloc
->pages
[0]),
732 if (alloc
->pages
== NULL
) {
734 failure_string
= "alloc page array";
735 goto err_alloc_pages_failed
;
737 alloc
->buffer_size
= vma
->vm_end
- vma
->vm_start
;
739 buffer
= kzalloc(sizeof(*buffer
), GFP_KERNEL
);
742 failure_string
= "alloc buffer struct";
743 goto err_alloc_buf_struct_failed
;
746 buffer
->data
= alloc
->buffer
;
747 list_add(&buffer
->entry
, &alloc
->buffers
);
749 binder_insert_free_buffer(alloc
, buffer
);
750 alloc
->free_async_space
= alloc
->buffer_size
/ 2;
751 binder_alloc_set_vma(alloc
, vma
);
752 mmgrab(alloc
->vma_vm_mm
);
756 err_alloc_buf_struct_failed
:
759 err_alloc_pages_failed
:
760 mutex_lock(&binder_alloc_mmap_lock
);
761 vfree(alloc
->buffer
);
762 alloc
->buffer
= NULL
;
763 err_get_vm_area_failed
:
765 mutex_unlock(&binder_alloc_mmap_lock
);
766 binder_alloc_debug(BINDER_DEBUG_USER_ERROR
,
767 "%s: %d %lx-%lx %s failed %d\n", __func__
,
768 alloc
->pid
, vma
->vm_start
, vma
->vm_end
,
769 failure_string
, ret
);
774 void binder_alloc_deferred_release(struct binder_alloc
*alloc
)
777 int buffers
, page_count
;
778 struct binder_buffer
*buffer
;
781 mutex_lock(&alloc
->mutex
);
784 while ((n
= rb_first(&alloc
->allocated_buffers
))) {
785 buffer
= rb_entry(n
, struct binder_buffer
, rb_node
);
787 /* Transaction should already have been freed */
788 BUG_ON(buffer
->transaction
);
790 binder_free_buf_locked(alloc
, buffer
);
794 while (!list_empty(&alloc
->buffers
)) {
795 buffer
= list_first_entry(&alloc
->buffers
,
796 struct binder_buffer
, entry
);
797 WARN_ON(!buffer
->free
);
799 list_del(&buffer
->entry
);
800 WARN_ON_ONCE(!list_empty(&alloc
->buffers
));
808 for (i
= 0; i
< alloc
->buffer_size
/ PAGE_SIZE
; i
++) {
812 if (!alloc
->pages
[i
].page_ptr
)
815 on_lru
= list_lru_del(&binder_alloc_lru
,
816 &alloc
->pages
[i
].lru
);
817 page_addr
= alloc
->buffer
+ i
* PAGE_SIZE
;
818 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC
,
819 "%s: %d: page %d at %pK %s\n",
820 __func__
, alloc
->pid
, i
, page_addr
,
821 on_lru
? "on lru" : "active");
822 unmap_kernel_range((unsigned long)page_addr
, PAGE_SIZE
);
823 __free_page(alloc
->pages
[i
].page_ptr
);
827 vfree(alloc
->buffer
);
829 mutex_unlock(&alloc
->mutex
);
830 if (alloc
->vma_vm_mm
)
831 mmdrop(alloc
->vma_vm_mm
);
833 binder_alloc_debug(BINDER_DEBUG_OPEN_CLOSE
,
834 "%s: %d buffers %d, pages %d\n",
835 __func__
, alloc
->pid
, buffers
, page_count
);
838 static void print_binder_buffer(struct seq_file
*m
, const char *prefix
,
839 struct binder_buffer
*buffer
)
841 seq_printf(m
, "%s %d: %pK size %zd:%zd:%zd %s\n",
842 prefix
, buffer
->debug_id
, buffer
->data
,
843 buffer
->data_size
, buffer
->offsets_size
,
844 buffer
->extra_buffers_size
,
845 buffer
->transaction
? "active" : "delivered");
849 * binder_alloc_print_allocated() - print buffer info
850 * @m: seq_file for output via seq_printf()
851 * @alloc: binder_alloc for this proc
853 * Prints information about every buffer associated with
854 * the binder_alloc state to the given seq_file
856 void binder_alloc_print_allocated(struct seq_file
*m
,
857 struct binder_alloc
*alloc
)
861 mutex_lock(&alloc
->mutex
);
862 for (n
= rb_first(&alloc
->allocated_buffers
); n
!= NULL
; n
= rb_next(n
))
863 print_binder_buffer(m
, " buffer",
864 rb_entry(n
, struct binder_buffer
, rb_node
));
865 mutex_unlock(&alloc
->mutex
);
869 * binder_alloc_print_pages() - print page usage
870 * @m: seq_file for output via seq_printf()
871 * @alloc: binder_alloc for this proc
873 void binder_alloc_print_pages(struct seq_file
*m
,
874 struct binder_alloc
*alloc
)
876 struct binder_lru_page
*page
;
882 mutex_lock(&alloc
->mutex
);
883 for (i
= 0; i
< alloc
->buffer_size
/ PAGE_SIZE
; i
++) {
884 page
= &alloc
->pages
[i
];
887 else if (list_empty(&page
->lru
))
892 mutex_unlock(&alloc
->mutex
);
893 seq_printf(m
, " pages: %d:%d:%d\n", active
, lru
, free
);
894 seq_printf(m
, " pages high watermark: %zu\n", alloc
->pages_high
);
898 * binder_alloc_get_allocated_count() - return count of buffers
899 * @alloc: binder_alloc for this proc
901 * Return: count of allocated buffers
903 int binder_alloc_get_allocated_count(struct binder_alloc
*alloc
)
908 mutex_lock(&alloc
->mutex
);
909 for (n
= rb_first(&alloc
->allocated_buffers
); n
!= NULL
; n
= rb_next(n
))
911 mutex_unlock(&alloc
->mutex
);
917 * binder_alloc_vma_close() - invalidate address space
918 * @alloc: binder_alloc for this proc
920 * Called from binder_vma_close() when releasing address space.
921 * Clears alloc->vma to prevent new incoming transactions from
922 * allocating more buffers.
924 void binder_alloc_vma_close(struct binder_alloc
*alloc
)
926 binder_alloc_set_vma(alloc
, NULL
);
930 * binder_alloc_free_page() - shrinker callback to free pages
931 * @item: item to free
932 * @lock: lock protecting the item
933 * @cb_arg: callback argument
935 * Called from list_lru_walk() in binder_shrink_scan() to free
936 * up pages when the system is under memory pressure.
938 enum lru_status
binder_alloc_free_page(struct list_head
*item
,
939 struct list_lru_one
*lru
,
943 struct mm_struct
*mm
= NULL
;
944 struct binder_lru_page
*page
= container_of(item
,
945 struct binder_lru_page
,
947 struct binder_alloc
*alloc
;
950 struct vm_area_struct
*vma
;
953 if (!mutex_trylock(&alloc
->mutex
))
954 goto err_get_alloc_mutex_failed
;
957 goto err_page_already_freed
;
959 index
= page
- alloc
->pages
;
960 page_addr
= (uintptr_t)alloc
->buffer
+ index
* PAGE_SIZE
;
961 vma
= binder_alloc_get_vma(alloc
);
963 if (!mmget_not_zero(alloc
->vma_vm_mm
))
965 mm
= alloc
->vma_vm_mm
;
966 if (!down_write_trylock(&mm
->mmap_sem
))
967 goto err_down_write_mmap_sem_failed
;
970 list_lru_isolate(lru
, item
);
974 trace_binder_unmap_user_start(alloc
, index
);
977 page_addr
+ alloc
->user_buffer_offset
,
980 trace_binder_unmap_user_end(alloc
, index
);
982 up_write(&mm
->mmap_sem
);
986 trace_binder_unmap_kernel_start(alloc
, index
);
988 unmap_kernel_range(page_addr
, PAGE_SIZE
);
989 __free_page(page
->page_ptr
);
990 page
->page_ptr
= NULL
;
992 trace_binder_unmap_kernel_end(alloc
, index
);
995 mutex_unlock(&alloc
->mutex
);
996 return LRU_REMOVED_RETRY
;
998 err_down_write_mmap_sem_failed
:
1001 err_page_already_freed
:
1002 mutex_unlock(&alloc
->mutex
);
1003 err_get_alloc_mutex_failed
:
1007 static unsigned long
1008 binder_shrink_count(struct shrinker
*shrink
, struct shrink_control
*sc
)
1010 unsigned long ret
= list_lru_count(&binder_alloc_lru
);
1014 static unsigned long
1015 binder_shrink_scan(struct shrinker
*shrink
, struct shrink_control
*sc
)
1019 ret
= list_lru_walk(&binder_alloc_lru
, binder_alloc_free_page
,
1020 NULL
, sc
->nr_to_scan
);
1024 static struct shrinker binder_shrinker
= {
1025 .count_objects
= binder_shrink_count
,
1026 .scan_objects
= binder_shrink_scan
,
1027 .seeks
= DEFAULT_SEEKS
,
1031 * binder_alloc_init() - called by binder_open() for per-proc initialization
1032 * @alloc: binder_alloc for this proc
1034 * Called from binder_open() to initialize binder_alloc fields for
1037 void binder_alloc_init(struct binder_alloc
*alloc
)
1039 alloc
->pid
= current
->group_leader
->pid
;
1040 mutex_init(&alloc
->mutex
);
1041 INIT_LIST_HEAD(&alloc
->buffers
);
1044 int binder_alloc_shrinker_init(void)
1046 int ret
= list_lru_init(&binder_alloc_lru
);
1049 ret
= register_shrinker(&binder_shrinker
);
1051 list_lru_destroy(&binder_alloc_lru
);