1 // SPDX-License-Identifier: GPL-2.0-only
4 * Android IPC Subsystem
6 * Copyright (C) 2007-2017 Google, Inc.
9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11 #include <linux/list.h>
12 #include <linux/sched/mm.h>
13 #include <linux/module.h>
14 #include <linux/rtmutex.h>
15 #include <linux/rbtree.h>
16 #include <linux/seq_file.h>
17 #include <linux/vmalloc.h>
18 #include <linux/slab.h>
19 #include <linux/sched.h>
20 #include <linux/list_lru.h>
21 #include <linux/ratelimit.h>
22 #include <asm/cacheflush.h>
23 #include <linux/uaccess.h>
24 #include <linux/highmem.h>
25 #include <linux/sizes.h>
26 #include "binder_alloc.h"
27 #include "binder_trace.h"
29 struct list_lru binder_alloc_lru
;
31 static DEFINE_MUTEX(binder_alloc_mmap_lock
);
34 BINDER_DEBUG_USER_ERROR
= 1U << 0,
35 BINDER_DEBUG_OPEN_CLOSE
= 1U << 1,
36 BINDER_DEBUG_BUFFER_ALLOC
= 1U << 2,
37 BINDER_DEBUG_BUFFER_ALLOC_ASYNC
= 1U << 3,
39 static uint32_t binder_alloc_debug_mask
= BINDER_DEBUG_USER_ERROR
;
41 module_param_named(debug_mask
, binder_alloc_debug_mask
,
44 #define binder_alloc_debug(mask, x...) \
46 if (binder_alloc_debug_mask & mask) \
47 pr_info_ratelimited(x); \
50 static struct binder_buffer
*binder_buffer_next(struct binder_buffer
*buffer
)
52 return list_entry(buffer
->entry
.next
, struct binder_buffer
, entry
);
55 static struct binder_buffer
*binder_buffer_prev(struct binder_buffer
*buffer
)
57 return list_entry(buffer
->entry
.prev
, struct binder_buffer
, entry
);
60 static size_t binder_alloc_buffer_size(struct binder_alloc
*alloc
,
61 struct binder_buffer
*buffer
)
63 if (list_is_last(&buffer
->entry
, &alloc
->buffers
))
64 return alloc
->buffer
+ alloc
->buffer_size
- buffer
->user_data
;
65 return binder_buffer_next(buffer
)->user_data
- buffer
->user_data
;
68 static void binder_insert_free_buffer(struct binder_alloc
*alloc
,
69 struct binder_buffer
*new_buffer
)
71 struct rb_node
**p
= &alloc
->free_buffers
.rb_node
;
72 struct rb_node
*parent
= NULL
;
73 struct binder_buffer
*buffer
;
75 size_t new_buffer_size
;
77 BUG_ON(!new_buffer
->free
);
79 new_buffer_size
= binder_alloc_buffer_size(alloc
, new_buffer
);
81 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC
,
82 "%d: add free buffer, size %zd, at %pK\n",
83 alloc
->pid
, new_buffer_size
, new_buffer
);
87 buffer
= rb_entry(parent
, struct binder_buffer
, rb_node
);
88 BUG_ON(!buffer
->free
);
90 buffer_size
= binder_alloc_buffer_size(alloc
, buffer
);
92 if (new_buffer_size
< buffer_size
)
95 p
= &parent
->rb_right
;
97 rb_link_node(&new_buffer
->rb_node
, parent
, p
);
98 rb_insert_color(&new_buffer
->rb_node
, &alloc
->free_buffers
);
101 static void binder_insert_allocated_buffer_locked(
102 struct binder_alloc
*alloc
, struct binder_buffer
*new_buffer
)
104 struct rb_node
**p
= &alloc
->allocated_buffers
.rb_node
;
105 struct rb_node
*parent
= NULL
;
106 struct binder_buffer
*buffer
;
108 BUG_ON(new_buffer
->free
);
112 buffer
= rb_entry(parent
, struct binder_buffer
, rb_node
);
113 BUG_ON(buffer
->free
);
115 if (new_buffer
->user_data
< buffer
->user_data
)
116 p
= &parent
->rb_left
;
117 else if (new_buffer
->user_data
> buffer
->user_data
)
118 p
= &parent
->rb_right
;
122 rb_link_node(&new_buffer
->rb_node
, parent
, p
);
123 rb_insert_color(&new_buffer
->rb_node
, &alloc
->allocated_buffers
);
126 static struct binder_buffer
*binder_alloc_prepare_to_free_locked(
127 struct binder_alloc
*alloc
,
130 struct rb_node
*n
= alloc
->allocated_buffers
.rb_node
;
131 struct binder_buffer
*buffer
;
134 uptr
= (void __user
*)user_ptr
;
137 buffer
= rb_entry(n
, struct binder_buffer
, rb_node
);
138 BUG_ON(buffer
->free
);
140 if (uptr
< buffer
->user_data
)
142 else if (uptr
> buffer
->user_data
)
146 * Guard against user threads attempting to
147 * free the buffer when in use by kernel or
148 * after it's already been freed.
150 if (!buffer
->allow_user_free
)
151 return ERR_PTR(-EPERM
);
152 buffer
->allow_user_free
= 0;
160 * binder_alloc_prepare_to_free() - get buffer given user ptr
161 * @alloc: binder_alloc for this proc
162 * @user_ptr: User pointer to buffer data
164 * Validate userspace pointer to buffer data and return buffer corresponding to
165 * that user pointer. Search the rb tree for buffer that matches user data
168 * Return: Pointer to buffer or NULL
170 struct binder_buffer
*binder_alloc_prepare_to_free(struct binder_alloc
*alloc
,
173 struct binder_buffer
*buffer
;
175 mutex_lock(&alloc
->mutex
);
176 buffer
= binder_alloc_prepare_to_free_locked(alloc
, user_ptr
);
177 mutex_unlock(&alloc
->mutex
);
181 static int binder_update_page_range(struct binder_alloc
*alloc
, int allocate
,
182 void __user
*start
, void __user
*end
)
184 void __user
*page_addr
;
185 unsigned long user_page_addr
;
186 struct binder_lru_page
*page
;
187 struct vm_area_struct
*vma
= NULL
;
188 struct mm_struct
*mm
= NULL
;
189 bool need_mm
= false;
191 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC
,
192 "%d: %s pages %pK-%pK\n", alloc
->pid
,
193 allocate
? "allocate" : "free", start
, end
);
198 trace_binder_update_page_range(alloc
, allocate
, start
, end
);
203 for (page_addr
= start
; page_addr
< end
; page_addr
+= PAGE_SIZE
) {
204 page
= &alloc
->pages
[(page_addr
- alloc
->buffer
) / PAGE_SIZE
];
205 if (!page
->page_ptr
) {
211 if (need_mm
&& mmget_not_zero(alloc
->vma_vm_mm
))
212 mm
= alloc
->vma_vm_mm
;
219 if (!vma
&& need_mm
) {
220 binder_alloc_debug(BINDER_DEBUG_USER_ERROR
,
221 "%d: binder_alloc_buf failed to map pages in userspace, no vma\n",
226 for (page_addr
= start
; page_addr
< end
; page_addr
+= PAGE_SIZE
) {
231 index
= (page_addr
- alloc
->buffer
) / PAGE_SIZE
;
232 page
= &alloc
->pages
[index
];
234 if (page
->page_ptr
) {
235 trace_binder_alloc_lru_start(alloc
, index
);
237 on_lru
= list_lru_del(&binder_alloc_lru
, &page
->lru
);
240 trace_binder_alloc_lru_end(alloc
, index
);
245 goto err_page_ptr_cleared
;
247 trace_binder_alloc_page_start(alloc
, index
);
248 page
->page_ptr
= alloc_page(GFP_KERNEL
|
251 if (!page
->page_ptr
) {
252 pr_err("%d: binder_alloc_buf failed for page at %pK\n",
253 alloc
->pid
, page_addr
);
254 goto err_alloc_page_failed
;
257 INIT_LIST_HEAD(&page
->lru
);
259 user_page_addr
= (uintptr_t)page_addr
;
260 ret
= vm_insert_page(vma
, user_page_addr
, page
[0].page_ptr
);
262 pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n",
263 alloc
->pid
, user_page_addr
);
264 goto err_vm_insert_page_failed
;
267 if (index
+ 1 > alloc
->pages_high
)
268 alloc
->pages_high
= index
+ 1;
270 trace_binder_alloc_page_end(alloc
, index
);
273 mmap_read_unlock(mm
);
279 for (page_addr
= end
- PAGE_SIZE
; 1; page_addr
-= PAGE_SIZE
) {
283 index
= (page_addr
- alloc
->buffer
) / PAGE_SIZE
;
284 page
= &alloc
->pages
[index
];
286 trace_binder_free_lru_start(alloc
, index
);
288 ret
= list_lru_add(&binder_alloc_lru
, &page
->lru
);
291 trace_binder_free_lru_end(alloc
, index
);
292 if (page_addr
== start
)
296 err_vm_insert_page_failed
:
297 __free_page(page
->page_ptr
);
298 page
->page_ptr
= NULL
;
299 err_alloc_page_failed
:
300 err_page_ptr_cleared
:
301 if (page_addr
== start
)
306 mmap_read_unlock(mm
);
309 return vma
? -ENOMEM
: -ESRCH
;
313 static inline void binder_alloc_set_vma(struct binder_alloc
*alloc
,
314 struct vm_area_struct
*vma
)
317 alloc
->vma_vm_mm
= vma
->vm_mm
;
319 * If we see alloc->vma is not NULL, buffer data structures set up
320 * completely. Look at smp_rmb side binder_alloc_get_vma.
321 * We also want to guarantee new alloc->vma_vm_mm is always visible
322 * if alloc->vma is set.
328 static inline struct vm_area_struct
*binder_alloc_get_vma(
329 struct binder_alloc
*alloc
)
331 struct vm_area_struct
*vma
= NULL
;
334 /* Look at description in binder_alloc_set_vma */
341 static void debug_low_async_space_locked(struct binder_alloc
*alloc
, int pid
)
344 * Find the amount and size of buffers allocated by the current caller;
345 * The idea is that once we cross the threshold, whoever is responsible
346 * for the low async space is likely to try to send another async txn,
347 * and at some point we'll catch them in the act. This is more efficient
348 * than keeping a map per pid.
351 struct binder_buffer
*buffer
;
352 size_t total_alloc_size
= 0;
353 size_t num_buffers
= 0;
355 for (n
= rb_first(&alloc
->allocated_buffers
); n
!= NULL
;
357 buffer
= rb_entry(n
, struct binder_buffer
, rb_node
);
358 if (buffer
->pid
!= pid
)
360 if (!buffer
->async_transaction
)
362 total_alloc_size
+= binder_alloc_buffer_size(alloc
, buffer
)
363 + sizeof(struct binder_buffer
);
368 * Warn if this pid has more than 50 transactions, or more than 50% of
369 * async space (which is 25% of total buffer size).
371 if (num_buffers
> 50 || total_alloc_size
> alloc
->buffer_size
/ 4) {
372 binder_alloc_debug(BINDER_DEBUG_USER_ERROR
,
373 "%d: pid %d spamming oneway? %zd buffers allocated for a total size of %zd\n",
374 alloc
->pid
, pid
, num_buffers
, total_alloc_size
);
378 static struct binder_buffer
*binder_alloc_new_buf_locked(
379 struct binder_alloc
*alloc
,
382 size_t extra_buffers_size
,
386 struct rb_node
*n
= alloc
->free_buffers
.rb_node
;
387 struct binder_buffer
*buffer
;
389 struct rb_node
*best_fit
= NULL
;
390 void __user
*has_page_addr
;
391 void __user
*end_page_addr
;
392 size_t size
, data_offsets_size
;
395 if (!binder_alloc_get_vma(alloc
)) {
396 binder_alloc_debug(BINDER_DEBUG_USER_ERROR
,
397 "%d: binder_alloc_buf, no vma\n",
399 return ERR_PTR(-ESRCH
);
402 data_offsets_size
= ALIGN(data_size
, sizeof(void *)) +
403 ALIGN(offsets_size
, sizeof(void *));
405 if (data_offsets_size
< data_size
|| data_offsets_size
< offsets_size
) {
406 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC
,
407 "%d: got transaction with invalid size %zd-%zd\n",
408 alloc
->pid
, data_size
, offsets_size
);
409 return ERR_PTR(-EINVAL
);
411 size
= data_offsets_size
+ ALIGN(extra_buffers_size
, sizeof(void *));
412 if (size
< data_offsets_size
|| size
< extra_buffers_size
) {
413 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC
,
414 "%d: got transaction with invalid extra_buffers_size %zd\n",
415 alloc
->pid
, extra_buffers_size
);
416 return ERR_PTR(-EINVAL
);
419 alloc
->free_async_space
< size
+ sizeof(struct binder_buffer
)) {
420 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC
,
421 "%d: binder_alloc_buf size %zd failed, no async space left\n",
423 return ERR_PTR(-ENOSPC
);
426 /* Pad 0-size buffers so they get assigned unique addresses */
427 size
= max(size
, sizeof(void *));
430 buffer
= rb_entry(n
, struct binder_buffer
, rb_node
);
431 BUG_ON(!buffer
->free
);
432 buffer_size
= binder_alloc_buffer_size(alloc
, buffer
);
434 if (size
< buffer_size
) {
437 } else if (size
> buffer_size
)
444 if (best_fit
== NULL
) {
445 size_t allocated_buffers
= 0;
446 size_t largest_alloc_size
= 0;
447 size_t total_alloc_size
= 0;
448 size_t free_buffers
= 0;
449 size_t largest_free_size
= 0;
450 size_t total_free_size
= 0;
452 for (n
= rb_first(&alloc
->allocated_buffers
); n
!= NULL
;
454 buffer
= rb_entry(n
, struct binder_buffer
, rb_node
);
455 buffer_size
= binder_alloc_buffer_size(alloc
, buffer
);
457 total_alloc_size
+= buffer_size
;
458 if (buffer_size
> largest_alloc_size
)
459 largest_alloc_size
= buffer_size
;
461 for (n
= rb_first(&alloc
->free_buffers
); n
!= NULL
;
463 buffer
= rb_entry(n
, struct binder_buffer
, rb_node
);
464 buffer_size
= binder_alloc_buffer_size(alloc
, buffer
);
466 total_free_size
+= buffer_size
;
467 if (buffer_size
> largest_free_size
)
468 largest_free_size
= buffer_size
;
470 binder_alloc_debug(BINDER_DEBUG_USER_ERROR
,
471 "%d: binder_alloc_buf size %zd failed, no address space\n",
473 binder_alloc_debug(BINDER_DEBUG_USER_ERROR
,
474 "allocated: %zd (num: %zd largest: %zd), free: %zd (num: %zd largest: %zd)\n",
475 total_alloc_size
, allocated_buffers
,
476 largest_alloc_size
, total_free_size
,
477 free_buffers
, largest_free_size
);
478 return ERR_PTR(-ENOSPC
);
481 buffer
= rb_entry(best_fit
, struct binder_buffer
, rb_node
);
482 buffer_size
= binder_alloc_buffer_size(alloc
, buffer
);
485 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC
,
486 "%d: binder_alloc_buf size %zd got buffer %pK size %zd\n",
487 alloc
->pid
, size
, buffer
, buffer_size
);
489 has_page_addr
= (void __user
*)
490 (((uintptr_t)buffer
->user_data
+ buffer_size
) & PAGE_MASK
);
491 WARN_ON(n
&& buffer_size
!= size
);
493 (void __user
*)PAGE_ALIGN((uintptr_t)buffer
->user_data
+ size
);
494 if (end_page_addr
> has_page_addr
)
495 end_page_addr
= has_page_addr
;
496 ret
= binder_update_page_range(alloc
, 1, (void __user
*)
497 PAGE_ALIGN((uintptr_t)buffer
->user_data
), end_page_addr
);
501 if (buffer_size
!= size
) {
502 struct binder_buffer
*new_buffer
;
504 new_buffer
= kzalloc(sizeof(*buffer
), GFP_KERNEL
);
506 pr_err("%s: %d failed to alloc new buffer struct\n",
507 __func__
, alloc
->pid
);
508 goto err_alloc_buf_struct_failed
;
510 new_buffer
->user_data
= (u8 __user
*)buffer
->user_data
+ size
;
511 list_add(&new_buffer
->entry
, &buffer
->entry
);
512 new_buffer
->free
= 1;
513 binder_insert_free_buffer(alloc
, new_buffer
);
516 rb_erase(best_fit
, &alloc
->free_buffers
);
518 buffer
->allow_user_free
= 0;
519 binder_insert_allocated_buffer_locked(alloc
, buffer
);
520 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC
,
521 "%d: binder_alloc_buf size %zd got %pK\n",
522 alloc
->pid
, size
, buffer
);
523 buffer
->data_size
= data_size
;
524 buffer
->offsets_size
= offsets_size
;
525 buffer
->async_transaction
= is_async
;
526 buffer
->extra_buffers_size
= extra_buffers_size
;
529 alloc
->free_async_space
-= size
+ sizeof(struct binder_buffer
);
530 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC
,
531 "%d: binder_alloc_buf size %zd async free %zd\n",
532 alloc
->pid
, size
, alloc
->free_async_space
);
533 if (alloc
->free_async_space
< alloc
->buffer_size
/ 10) {
535 * Start detecting spammers once we have less than 20%
536 * of async space left (which is less than 10% of total
539 debug_low_async_space_locked(alloc
, pid
);
544 err_alloc_buf_struct_failed
:
545 binder_update_page_range(alloc
, 0, (void __user
*)
546 PAGE_ALIGN((uintptr_t)buffer
->user_data
),
548 return ERR_PTR(-ENOMEM
);
552 * binder_alloc_new_buf() - Allocate a new binder buffer
553 * @alloc: binder_alloc for this proc
554 * @data_size: size of user data buffer
555 * @offsets_size: user specified buffer offset
556 * @extra_buffers_size: size of extra space for meta-data (eg, security context)
557 * @is_async: buffer for async transaction
558 * @pid: pid to attribute allocation to (used for debugging)
560 * Allocate a new buffer given the requested sizes. Returns
561 * the kernel version of the buffer pointer. The size allocated
562 * is the sum of the three given sizes (each rounded up to
563 * pointer-sized boundary)
565 * Return: The allocated buffer or %NULL if error
567 struct binder_buffer
*binder_alloc_new_buf(struct binder_alloc
*alloc
,
570 size_t extra_buffers_size
,
574 struct binder_buffer
*buffer
;
576 mutex_lock(&alloc
->mutex
);
577 buffer
= binder_alloc_new_buf_locked(alloc
, data_size
, offsets_size
,
578 extra_buffers_size
, is_async
, pid
);
579 mutex_unlock(&alloc
->mutex
);
583 static void __user
*buffer_start_page(struct binder_buffer
*buffer
)
585 return (void __user
*)((uintptr_t)buffer
->user_data
& PAGE_MASK
);
588 static void __user
*prev_buffer_end_page(struct binder_buffer
*buffer
)
590 return (void __user
*)
591 (((uintptr_t)(buffer
->user_data
) - 1) & PAGE_MASK
);
594 static void binder_delete_free_buffer(struct binder_alloc
*alloc
,
595 struct binder_buffer
*buffer
)
597 struct binder_buffer
*prev
, *next
= NULL
;
600 BUG_ON(alloc
->buffers
.next
== &buffer
->entry
);
601 prev
= binder_buffer_prev(buffer
);
603 if (prev_buffer_end_page(prev
) == buffer_start_page(buffer
)) {
605 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC
,
606 "%d: merge free, buffer %pK share page with %pK\n",
607 alloc
->pid
, buffer
->user_data
,
611 if (!list_is_last(&buffer
->entry
, &alloc
->buffers
)) {
612 next
= binder_buffer_next(buffer
);
613 if (buffer_start_page(next
) == buffer_start_page(buffer
)) {
615 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC
,
616 "%d: merge free, buffer %pK share page with %pK\n",
623 if (PAGE_ALIGNED(buffer
->user_data
)) {
624 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC
,
625 "%d: merge free, buffer start %pK is page aligned\n",
626 alloc
->pid
, buffer
->user_data
);
631 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC
,
632 "%d: merge free, buffer %pK do not share page with %pK or %pK\n",
633 alloc
->pid
, buffer
->user_data
,
635 next
? next
->user_data
: NULL
);
636 binder_update_page_range(alloc
, 0, buffer_start_page(buffer
),
637 buffer_start_page(buffer
) + PAGE_SIZE
);
639 list_del(&buffer
->entry
);
643 static void binder_free_buf_locked(struct binder_alloc
*alloc
,
644 struct binder_buffer
*buffer
)
646 size_t size
, buffer_size
;
648 buffer_size
= binder_alloc_buffer_size(alloc
, buffer
);
650 size
= ALIGN(buffer
->data_size
, sizeof(void *)) +
651 ALIGN(buffer
->offsets_size
, sizeof(void *)) +
652 ALIGN(buffer
->extra_buffers_size
, sizeof(void *));
654 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC
,
655 "%d: binder_free_buf %pK size %zd buffer_size %zd\n",
656 alloc
->pid
, buffer
, size
, buffer_size
);
658 BUG_ON(buffer
->free
);
659 BUG_ON(size
> buffer_size
);
660 BUG_ON(buffer
->transaction
!= NULL
);
661 BUG_ON(buffer
->user_data
< alloc
->buffer
);
662 BUG_ON(buffer
->user_data
> alloc
->buffer
+ alloc
->buffer_size
);
664 if (buffer
->async_transaction
) {
665 alloc
->free_async_space
+= size
+ sizeof(struct binder_buffer
);
667 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC
,
668 "%d: binder_free_buf size %zd async free %zd\n",
669 alloc
->pid
, size
, alloc
->free_async_space
);
672 binder_update_page_range(alloc
, 0,
673 (void __user
*)PAGE_ALIGN((uintptr_t)buffer
->user_data
),
674 (void __user
*)(((uintptr_t)
675 buffer
->user_data
+ buffer_size
) & PAGE_MASK
));
677 rb_erase(&buffer
->rb_node
, &alloc
->allocated_buffers
);
679 if (!list_is_last(&buffer
->entry
, &alloc
->buffers
)) {
680 struct binder_buffer
*next
= binder_buffer_next(buffer
);
683 rb_erase(&next
->rb_node
, &alloc
->free_buffers
);
684 binder_delete_free_buffer(alloc
, next
);
687 if (alloc
->buffers
.next
!= &buffer
->entry
) {
688 struct binder_buffer
*prev
= binder_buffer_prev(buffer
);
691 binder_delete_free_buffer(alloc
, buffer
);
692 rb_erase(&prev
->rb_node
, &alloc
->free_buffers
);
696 binder_insert_free_buffer(alloc
, buffer
);
699 static void binder_alloc_clear_buf(struct binder_alloc
*alloc
,
700 struct binder_buffer
*buffer
);
702 * binder_alloc_free_buf() - free a binder buffer
703 * @alloc: binder_alloc for this proc
704 * @buffer: kernel pointer to buffer
706 * Free the buffer allocated via binder_alloc_new_buf()
708 void binder_alloc_free_buf(struct binder_alloc
*alloc
,
709 struct binder_buffer
*buffer
)
712 * We could eliminate the call to binder_alloc_clear_buf()
713 * from binder_alloc_deferred_release() by moving this to
714 * binder_alloc_free_buf_locked(). However, that could
715 * increase contention for the alloc mutex if clear_on_free
716 * is used frequently for large buffers. The mutex is not
717 * needed for correctness here.
719 if (buffer
->clear_on_free
) {
720 binder_alloc_clear_buf(alloc
, buffer
);
721 buffer
->clear_on_free
= false;
723 mutex_lock(&alloc
->mutex
);
724 binder_free_buf_locked(alloc
, buffer
);
725 mutex_unlock(&alloc
->mutex
);
729 * binder_alloc_mmap_handler() - map virtual address space for proc
730 * @alloc: alloc structure for this proc
731 * @vma: vma passed to mmap()
733 * Called by binder_mmap() to initialize the space specified in
734 * vma for allocating binder buffers
738 * -EBUSY = address space already mapped
739 * -ENOMEM = failed to map memory to given address space
741 int binder_alloc_mmap_handler(struct binder_alloc
*alloc
,
742 struct vm_area_struct
*vma
)
745 const char *failure_string
;
746 struct binder_buffer
*buffer
;
748 mutex_lock(&binder_alloc_mmap_lock
);
749 if (alloc
->buffer_size
) {
751 failure_string
= "already mapped";
752 goto err_already_mapped
;
754 alloc
->buffer_size
= min_t(unsigned long, vma
->vm_end
- vma
->vm_start
,
756 mutex_unlock(&binder_alloc_mmap_lock
);
758 alloc
->buffer
= (void __user
*)vma
->vm_start
;
760 alloc
->pages
= kcalloc(alloc
->buffer_size
/ PAGE_SIZE
,
761 sizeof(alloc
->pages
[0]),
763 if (alloc
->pages
== NULL
) {
765 failure_string
= "alloc page array";
766 goto err_alloc_pages_failed
;
769 buffer
= kzalloc(sizeof(*buffer
), GFP_KERNEL
);
772 failure_string
= "alloc buffer struct";
773 goto err_alloc_buf_struct_failed
;
776 buffer
->user_data
= alloc
->buffer
;
777 list_add(&buffer
->entry
, &alloc
->buffers
);
779 binder_insert_free_buffer(alloc
, buffer
);
780 alloc
->free_async_space
= alloc
->buffer_size
/ 2;
781 binder_alloc_set_vma(alloc
, vma
);
782 mmgrab(alloc
->vma_vm_mm
);
786 err_alloc_buf_struct_failed
:
789 err_alloc_pages_failed
:
790 alloc
->buffer
= NULL
;
791 mutex_lock(&binder_alloc_mmap_lock
);
792 alloc
->buffer_size
= 0;
794 mutex_unlock(&binder_alloc_mmap_lock
);
795 binder_alloc_debug(BINDER_DEBUG_USER_ERROR
,
796 "%s: %d %lx-%lx %s failed %d\n", __func__
,
797 alloc
->pid
, vma
->vm_start
, vma
->vm_end
,
798 failure_string
, ret
);
803 void binder_alloc_deferred_release(struct binder_alloc
*alloc
)
806 int buffers
, page_count
;
807 struct binder_buffer
*buffer
;
810 mutex_lock(&alloc
->mutex
);
813 while ((n
= rb_first(&alloc
->allocated_buffers
))) {
814 buffer
= rb_entry(n
, struct binder_buffer
, rb_node
);
816 /* Transaction should already have been freed */
817 BUG_ON(buffer
->transaction
);
819 if (buffer
->clear_on_free
) {
820 binder_alloc_clear_buf(alloc
, buffer
);
821 buffer
->clear_on_free
= false;
823 binder_free_buf_locked(alloc
, buffer
);
827 while (!list_empty(&alloc
->buffers
)) {
828 buffer
= list_first_entry(&alloc
->buffers
,
829 struct binder_buffer
, entry
);
830 WARN_ON(!buffer
->free
);
832 list_del(&buffer
->entry
);
833 WARN_ON_ONCE(!list_empty(&alloc
->buffers
));
841 for (i
= 0; i
< alloc
->buffer_size
/ PAGE_SIZE
; i
++) {
842 void __user
*page_addr
;
845 if (!alloc
->pages
[i
].page_ptr
)
848 on_lru
= list_lru_del(&binder_alloc_lru
,
849 &alloc
->pages
[i
].lru
);
850 page_addr
= alloc
->buffer
+ i
* PAGE_SIZE
;
851 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC
,
852 "%s: %d: page %d at %pK %s\n",
853 __func__
, alloc
->pid
, i
, page_addr
,
854 on_lru
? "on lru" : "active");
855 __free_page(alloc
->pages
[i
].page_ptr
);
860 mutex_unlock(&alloc
->mutex
);
861 if (alloc
->vma_vm_mm
)
862 mmdrop(alloc
->vma_vm_mm
);
864 binder_alloc_debug(BINDER_DEBUG_OPEN_CLOSE
,
865 "%s: %d buffers %d, pages %d\n",
866 __func__
, alloc
->pid
, buffers
, page_count
);
869 static void print_binder_buffer(struct seq_file
*m
, const char *prefix
,
870 struct binder_buffer
*buffer
)
872 seq_printf(m
, "%s %d: %pK size %zd:%zd:%zd %s\n",
873 prefix
, buffer
->debug_id
, buffer
->user_data
,
874 buffer
->data_size
, buffer
->offsets_size
,
875 buffer
->extra_buffers_size
,
876 buffer
->transaction
? "active" : "delivered");
880 * binder_alloc_print_allocated() - print buffer info
881 * @m: seq_file for output via seq_printf()
882 * @alloc: binder_alloc for this proc
884 * Prints information about every buffer associated with
885 * the binder_alloc state to the given seq_file
887 void binder_alloc_print_allocated(struct seq_file
*m
,
888 struct binder_alloc
*alloc
)
892 mutex_lock(&alloc
->mutex
);
893 for (n
= rb_first(&alloc
->allocated_buffers
); n
!= NULL
; n
= rb_next(n
))
894 print_binder_buffer(m
, " buffer",
895 rb_entry(n
, struct binder_buffer
, rb_node
));
896 mutex_unlock(&alloc
->mutex
);
900 * binder_alloc_print_pages() - print page usage
901 * @m: seq_file for output via seq_printf()
902 * @alloc: binder_alloc for this proc
904 void binder_alloc_print_pages(struct seq_file
*m
,
905 struct binder_alloc
*alloc
)
907 struct binder_lru_page
*page
;
913 mutex_lock(&alloc
->mutex
);
915 * Make sure the binder_alloc is fully initialized, otherwise we might
916 * read inconsistent state.
918 if (binder_alloc_get_vma(alloc
) != NULL
) {
919 for (i
= 0; i
< alloc
->buffer_size
/ PAGE_SIZE
; i
++) {
920 page
= &alloc
->pages
[i
];
923 else if (list_empty(&page
->lru
))
929 mutex_unlock(&alloc
->mutex
);
930 seq_printf(m
, " pages: %d:%d:%d\n", active
, lru
, free
);
931 seq_printf(m
, " pages high watermark: %zu\n", alloc
->pages_high
);
935 * binder_alloc_get_allocated_count() - return count of buffers
936 * @alloc: binder_alloc for this proc
938 * Return: count of allocated buffers
940 int binder_alloc_get_allocated_count(struct binder_alloc
*alloc
)
945 mutex_lock(&alloc
->mutex
);
946 for (n
= rb_first(&alloc
->allocated_buffers
); n
!= NULL
; n
= rb_next(n
))
948 mutex_unlock(&alloc
->mutex
);
954 * binder_alloc_vma_close() - invalidate address space
955 * @alloc: binder_alloc for this proc
957 * Called from binder_vma_close() when releasing address space.
958 * Clears alloc->vma to prevent new incoming transactions from
959 * allocating more buffers.
961 void binder_alloc_vma_close(struct binder_alloc
*alloc
)
963 binder_alloc_set_vma(alloc
, NULL
);
967 * binder_alloc_free_page() - shrinker callback to free pages
968 * @item: item to free
969 * @lock: lock protecting the item
970 * @cb_arg: callback argument
972 * Called from list_lru_walk() in binder_shrink_scan() to free
973 * up pages when the system is under memory pressure.
975 enum lru_status
binder_alloc_free_page(struct list_head
*item
,
976 struct list_lru_one
*lru
,
981 struct mm_struct
*mm
= NULL
;
982 struct binder_lru_page
*page
= container_of(item
,
983 struct binder_lru_page
,
985 struct binder_alloc
*alloc
;
988 struct vm_area_struct
*vma
;
991 if (!mutex_trylock(&alloc
->mutex
))
992 goto err_get_alloc_mutex_failed
;
995 goto err_page_already_freed
;
997 index
= page
- alloc
->pages
;
998 page_addr
= (uintptr_t)alloc
->buffer
+ index
* PAGE_SIZE
;
1000 mm
= alloc
->vma_vm_mm
;
1001 if (!mmget_not_zero(mm
))
1003 if (!mmap_read_trylock(mm
))
1004 goto err_mmap_read_lock_failed
;
1005 vma
= binder_alloc_get_vma(alloc
);
1007 list_lru_isolate(lru
, item
);
1011 trace_binder_unmap_user_start(alloc
, index
);
1013 zap_page_range(vma
, page_addr
, PAGE_SIZE
);
1015 trace_binder_unmap_user_end(alloc
, index
);
1017 mmap_read_unlock(mm
);
1020 trace_binder_unmap_kernel_start(alloc
, index
);
1022 __free_page(page
->page_ptr
);
1023 page
->page_ptr
= NULL
;
1025 trace_binder_unmap_kernel_end(alloc
, index
);
1028 mutex_unlock(&alloc
->mutex
);
1029 return LRU_REMOVED_RETRY
;
1031 err_mmap_read_lock_failed
:
1034 err_page_already_freed
:
1035 mutex_unlock(&alloc
->mutex
);
1036 err_get_alloc_mutex_failed
:
1040 static unsigned long
1041 binder_shrink_count(struct shrinker
*shrink
, struct shrink_control
*sc
)
1043 unsigned long ret
= list_lru_count(&binder_alloc_lru
);
1047 static unsigned long
1048 binder_shrink_scan(struct shrinker
*shrink
, struct shrink_control
*sc
)
1052 ret
= list_lru_walk(&binder_alloc_lru
, binder_alloc_free_page
,
1053 NULL
, sc
->nr_to_scan
);
1057 static struct shrinker binder_shrinker
= {
1058 .count_objects
= binder_shrink_count
,
1059 .scan_objects
= binder_shrink_scan
,
1060 .seeks
= DEFAULT_SEEKS
,
1064 * binder_alloc_init() - called by binder_open() for per-proc initialization
1065 * @alloc: binder_alloc for this proc
1067 * Called from binder_open() to initialize binder_alloc fields for
1070 void binder_alloc_init(struct binder_alloc
*alloc
)
1072 alloc
->pid
= current
->group_leader
->pid
;
1073 mutex_init(&alloc
->mutex
);
1074 INIT_LIST_HEAD(&alloc
->buffers
);
1077 int binder_alloc_shrinker_init(void)
1079 int ret
= list_lru_init(&binder_alloc_lru
);
1082 ret
= register_shrinker(&binder_shrinker
);
1084 list_lru_destroy(&binder_alloc_lru
);
1090 * check_buffer() - verify that buffer/offset is safe to access
1091 * @alloc: binder_alloc for this proc
1092 * @buffer: binder buffer to be accessed
1093 * @offset: offset into @buffer data
1094 * @bytes: bytes to access from offset
1096 * Check that the @offset/@bytes are within the size of the given
1097 * @buffer and that the buffer is currently active and not freeable.
1098 * Offsets must also be multiples of sizeof(u32). The kernel is
1099 * allowed to touch the buffer in two cases:
1101 * 1) when the buffer is being created:
1102 * (buffer->free == 0 && buffer->allow_user_free == 0)
1103 * 2) when the buffer is being torn down:
1104 * (buffer->free == 0 && buffer->transaction == NULL).
1106 * Return: true if the buffer is safe to access
1108 static inline bool check_buffer(struct binder_alloc
*alloc
,
1109 struct binder_buffer
*buffer
,
1110 binder_size_t offset
, size_t bytes
)
1112 size_t buffer_size
= binder_alloc_buffer_size(alloc
, buffer
);
1114 return buffer_size
>= bytes
&&
1115 offset
<= buffer_size
- bytes
&&
1116 IS_ALIGNED(offset
, sizeof(u32
)) &&
1118 (!buffer
->allow_user_free
|| !buffer
->transaction
);
1122 * binder_alloc_get_page() - get kernel pointer for given buffer offset
1123 * @alloc: binder_alloc for this proc
1124 * @buffer: binder buffer to be accessed
1125 * @buffer_offset: offset into @buffer data
1126 * @pgoffp: address to copy final page offset to
1128 * Lookup the struct page corresponding to the address
1129 * at @buffer_offset into @buffer->user_data. If @pgoffp is not
1130 * NULL, the byte-offset into the page is written there.
1132 * The caller is responsible to ensure that the offset points
1133 * to a valid address within the @buffer and that @buffer is
1134 * not freeable by the user. Since it can't be freed, we are
1135 * guaranteed that the corresponding elements of @alloc->pages[]
1138 * Return: struct page
1140 static struct page
*binder_alloc_get_page(struct binder_alloc
*alloc
,
1141 struct binder_buffer
*buffer
,
1142 binder_size_t buffer_offset
,
1145 binder_size_t buffer_space_offset
= buffer_offset
+
1146 (buffer
->user_data
- alloc
->buffer
);
1147 pgoff_t pgoff
= buffer_space_offset
& ~PAGE_MASK
;
1148 size_t index
= buffer_space_offset
>> PAGE_SHIFT
;
1149 struct binder_lru_page
*lru_page
;
1151 lru_page
= &alloc
->pages
[index
];
1153 return lru_page
->page_ptr
;
1157 * binder_alloc_clear_buf() - zero out buffer
1158 * @alloc: binder_alloc for this proc
1159 * @buffer: binder buffer to be cleared
1161 * memset the given buffer to 0
1163 static void binder_alloc_clear_buf(struct binder_alloc
*alloc
,
1164 struct binder_buffer
*buffer
)
1166 size_t bytes
= binder_alloc_buffer_size(alloc
, buffer
);
1167 binder_size_t buffer_offset
= 0;
1175 page
= binder_alloc_get_page(alloc
, buffer
,
1176 buffer_offset
, &pgoff
);
1177 size
= min_t(size_t, bytes
, PAGE_SIZE
- pgoff
);
1178 kptr
= kmap(page
) + pgoff
;
1179 memset(kptr
, 0, size
);
1182 buffer_offset
+= size
;
1187 * binder_alloc_copy_user_to_buffer() - copy src user to tgt user
1188 * @alloc: binder_alloc for this proc
1189 * @buffer: binder buffer to be accessed
1190 * @buffer_offset: offset into @buffer data
1191 * @from: userspace pointer to source buffer
1192 * @bytes: bytes to copy
1194 * Copy bytes from source userspace to target buffer.
1196 * Return: bytes remaining to be copied
1199 binder_alloc_copy_user_to_buffer(struct binder_alloc
*alloc
,
1200 struct binder_buffer
*buffer
,
1201 binder_size_t buffer_offset
,
1202 const void __user
*from
,
1205 if (!check_buffer(alloc
, buffer
, buffer_offset
, bytes
))
1215 page
= binder_alloc_get_page(alloc
, buffer
,
1216 buffer_offset
, &pgoff
);
1217 size
= min_t(size_t, bytes
, PAGE_SIZE
- pgoff
);
1218 kptr
= kmap(page
) + pgoff
;
1219 ret
= copy_from_user(kptr
, from
, size
);
1222 return bytes
- size
+ ret
;
1225 buffer_offset
+= size
;
1230 static int binder_alloc_do_buffer_copy(struct binder_alloc
*alloc
,
1232 struct binder_buffer
*buffer
,
1233 binder_size_t buffer_offset
,
1237 /* All copies must be 32-bit aligned and 32-bit size */
1238 if (!check_buffer(alloc
, buffer
, buffer_offset
, bytes
))
1248 page
= binder_alloc_get_page(alloc
, buffer
,
1249 buffer_offset
, &pgoff
);
1250 size
= min_t(size_t, bytes
, PAGE_SIZE
- pgoff
);
1251 base_ptr
= kmap_atomic(page
);
1252 tmpptr
= base_ptr
+ pgoff
;
1254 memcpy(tmpptr
, ptr
, size
);
1256 memcpy(ptr
, tmpptr
, size
);
1258 * kunmap_atomic() takes care of flushing the cache
1259 * if this device has VIVT cache arch
1261 kunmap_atomic(base_ptr
);
1265 buffer_offset
+= size
;
1270 int binder_alloc_copy_to_buffer(struct binder_alloc
*alloc
,
1271 struct binder_buffer
*buffer
,
1272 binder_size_t buffer_offset
,
1276 return binder_alloc_do_buffer_copy(alloc
, true, buffer
, buffer_offset
,
1280 int binder_alloc_copy_from_buffer(struct binder_alloc
*alloc
,
1282 struct binder_buffer
*buffer
,
1283 binder_size_t buffer_offset
,
1286 return binder_alloc_do_buffer_copy(alloc
, false, buffer
, buffer_offset
,