3 * Android IPC Subsystem
5 * Copyright (C) 2007-2017 Google, Inc.
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 #include <linux/list.h>
21 #include <linux/sched/mm.h>
22 #include <linux/module.h>
23 #include <linux/rtmutex.h>
24 #include <linux/rbtree.h>
25 #include <linux/seq_file.h>
26 #include <linux/vmalloc.h>
27 #include <linux/slab.h>
28 #include <linux/sched.h>
29 #include <linux/list_lru.h>
30 #include <linux/ratelimit.h>
31 #include <asm/cacheflush.h>
32 #include <linux/uaccess.h>
33 #include <linux/highmem.h>
34 #include "binder_alloc.h"
35 #include "binder_trace.h"
37 struct list_lru binder_alloc_lru
;
39 static DEFINE_MUTEX(binder_alloc_mmap_lock
);
42 BINDER_DEBUG_USER_ERROR
= 1U << 0,
43 BINDER_DEBUG_OPEN_CLOSE
= 1U << 1,
44 BINDER_DEBUG_BUFFER_ALLOC
= 1U << 2,
45 BINDER_DEBUG_BUFFER_ALLOC_ASYNC
= 1U << 3,
47 static uint32_t binder_alloc_debug_mask
= BINDER_DEBUG_USER_ERROR
;
49 module_param_named(debug_mask
, binder_alloc_debug_mask
,
52 #define binder_alloc_debug(mask, x...) \
54 if (binder_alloc_debug_mask & mask) \
55 pr_info_ratelimited(x); \
58 static struct binder_buffer
*binder_buffer_next(struct binder_buffer
*buffer
)
60 return list_entry(buffer
->entry
.next
, struct binder_buffer
, entry
);
63 static struct binder_buffer
*binder_buffer_prev(struct binder_buffer
*buffer
)
65 return list_entry(buffer
->entry
.prev
, struct binder_buffer
, entry
);
68 static size_t binder_alloc_buffer_size(struct binder_alloc
*alloc
,
69 struct binder_buffer
*buffer
)
71 if (list_is_last(&buffer
->entry
, &alloc
->buffers
))
72 return alloc
->buffer
+ alloc
->buffer_size
- buffer
->user_data
;
73 return binder_buffer_next(buffer
)->user_data
- buffer
->user_data
;
76 static void binder_insert_free_buffer(struct binder_alloc
*alloc
,
77 struct binder_buffer
*new_buffer
)
79 struct rb_node
**p
= &alloc
->free_buffers
.rb_node
;
80 struct rb_node
*parent
= NULL
;
81 struct binder_buffer
*buffer
;
83 size_t new_buffer_size
;
85 BUG_ON(!new_buffer
->free
);
87 new_buffer_size
= binder_alloc_buffer_size(alloc
, new_buffer
);
89 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC
,
90 "%d: add free buffer, size %zd, at %pK\n",
91 alloc
->pid
, new_buffer_size
, new_buffer
);
95 buffer
= rb_entry(parent
, struct binder_buffer
, rb_node
);
96 BUG_ON(!buffer
->free
);
98 buffer_size
= binder_alloc_buffer_size(alloc
, buffer
);
100 if (new_buffer_size
< buffer_size
)
101 p
= &parent
->rb_left
;
103 p
= &parent
->rb_right
;
105 rb_link_node(&new_buffer
->rb_node
, parent
, p
);
106 rb_insert_color(&new_buffer
->rb_node
, &alloc
->free_buffers
);
109 static void binder_insert_allocated_buffer_locked(
110 struct binder_alloc
*alloc
, struct binder_buffer
*new_buffer
)
112 struct rb_node
**p
= &alloc
->allocated_buffers
.rb_node
;
113 struct rb_node
*parent
= NULL
;
114 struct binder_buffer
*buffer
;
116 BUG_ON(new_buffer
->free
);
120 buffer
= rb_entry(parent
, struct binder_buffer
, rb_node
);
121 BUG_ON(buffer
->free
);
123 if (new_buffer
->user_data
< buffer
->user_data
)
124 p
= &parent
->rb_left
;
125 else if (new_buffer
->user_data
> buffer
->user_data
)
126 p
= &parent
->rb_right
;
130 rb_link_node(&new_buffer
->rb_node
, parent
, p
);
131 rb_insert_color(&new_buffer
->rb_node
, &alloc
->allocated_buffers
);
134 static struct binder_buffer
*binder_alloc_prepare_to_free_locked(
135 struct binder_alloc
*alloc
,
138 struct rb_node
*n
= alloc
->allocated_buffers
.rb_node
;
139 struct binder_buffer
*buffer
;
142 uptr
= (void __user
*)user_ptr
;
145 buffer
= rb_entry(n
, struct binder_buffer
, rb_node
);
146 BUG_ON(buffer
->free
);
148 if (uptr
< buffer
->user_data
)
150 else if (uptr
> buffer
->user_data
)
154 * Guard against user threads attempting to
155 * free the buffer when in use by kernel or
156 * after it's already been freed.
158 if (!buffer
->allow_user_free
)
159 return ERR_PTR(-EPERM
);
160 buffer
->allow_user_free
= 0;
168 * binder_alloc_buffer_lookup() - get buffer given user ptr
169 * @alloc: binder_alloc for this proc
170 * @user_ptr: User pointer to buffer data
172 * Validate userspace pointer to buffer data and return buffer corresponding to
173 * that user pointer. Search the rb tree for buffer that matches user data
176 * Return: Pointer to buffer or NULL
178 struct binder_buffer
*binder_alloc_prepare_to_free(struct binder_alloc
*alloc
,
181 struct binder_buffer
*buffer
;
183 mutex_lock(&alloc
->mutex
);
184 buffer
= binder_alloc_prepare_to_free_locked(alloc
, user_ptr
);
185 mutex_unlock(&alloc
->mutex
);
189 static int binder_update_page_range(struct binder_alloc
*alloc
, int allocate
,
190 void __user
*start
, void __user
*end
)
192 void __user
*page_addr
;
193 unsigned long user_page_addr
;
194 struct binder_lru_page
*page
;
195 struct vm_area_struct
*vma
= NULL
;
196 struct mm_struct
*mm
= NULL
;
197 bool need_mm
= false;
199 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC
,
200 "%d: %s pages %pK-%pK\n", alloc
->pid
,
201 allocate
? "allocate" : "free", start
, end
);
206 trace_binder_update_page_range(alloc
, allocate
, start
, end
);
211 for (page_addr
= start
; page_addr
< end
; page_addr
+= PAGE_SIZE
) {
212 page
= &alloc
->pages
[(page_addr
- alloc
->buffer
) / PAGE_SIZE
];
213 if (!page
->page_ptr
) {
219 if (need_mm
&& mmget_not_zero(alloc
->vma_vm_mm
))
220 mm
= alloc
->vma_vm_mm
;
223 down_read(&mm
->mmap_sem
);
227 if (!vma
&& need_mm
) {
228 binder_alloc_debug(BINDER_DEBUG_USER_ERROR
,
229 "%d: binder_alloc_buf failed to map pages in userspace, no vma\n",
234 for (page_addr
= start
; page_addr
< end
; page_addr
+= PAGE_SIZE
) {
239 index
= (page_addr
- alloc
->buffer
) / PAGE_SIZE
;
240 page
= &alloc
->pages
[index
];
242 if (page
->page_ptr
) {
243 trace_binder_alloc_lru_start(alloc
, index
);
245 on_lru
= list_lru_del(&binder_alloc_lru
, &page
->lru
);
248 trace_binder_alloc_lru_end(alloc
, index
);
253 goto err_page_ptr_cleared
;
255 trace_binder_alloc_page_start(alloc
, index
);
256 page
->page_ptr
= alloc_page(GFP_KERNEL
|
259 if (!page
->page_ptr
) {
260 pr_err("%d: binder_alloc_buf failed for page at %pK\n",
261 alloc
->pid
, page_addr
);
262 goto err_alloc_page_failed
;
265 INIT_LIST_HEAD(&page
->lru
);
267 user_page_addr
= (uintptr_t)page_addr
;
268 ret
= vm_insert_page(vma
, user_page_addr
, page
[0].page_ptr
);
270 pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n",
271 alloc
->pid
, user_page_addr
);
272 goto err_vm_insert_page_failed
;
275 if (index
+ 1 > alloc
->pages_high
)
276 alloc
->pages_high
= index
+ 1;
278 trace_binder_alloc_page_end(alloc
, index
);
279 /* vm_insert_page does not seem to increment the refcount */
282 up_read(&mm
->mmap_sem
);
288 for (page_addr
= end
- PAGE_SIZE
; page_addr
>= start
;
289 page_addr
-= PAGE_SIZE
) {
293 index
= (page_addr
- alloc
->buffer
) / PAGE_SIZE
;
294 page
= &alloc
->pages
[index
];
296 trace_binder_free_lru_start(alloc
, index
);
298 ret
= list_lru_add(&binder_alloc_lru
, &page
->lru
);
301 trace_binder_free_lru_end(alloc
, index
);
304 err_vm_insert_page_failed
:
305 __free_page(page
->page_ptr
);
306 page
->page_ptr
= NULL
;
307 err_alloc_page_failed
:
308 err_page_ptr_cleared
:
313 up_read(&mm
->mmap_sem
);
316 return vma
? -ENOMEM
: -ESRCH
;
320 static inline void binder_alloc_set_vma(struct binder_alloc
*alloc
,
321 struct vm_area_struct
*vma
)
324 alloc
->vma_vm_mm
= vma
->vm_mm
;
326 * If we see alloc->vma is not NULL, buffer data structures set up
327 * completely. Look at smp_rmb side binder_alloc_get_vma.
328 * We also want to guarantee new alloc->vma_vm_mm is always visible
329 * if alloc->vma is set.
335 static inline struct vm_area_struct
*binder_alloc_get_vma(
336 struct binder_alloc
*alloc
)
338 struct vm_area_struct
*vma
= NULL
;
341 /* Look at description in binder_alloc_set_vma */
348 static struct binder_buffer
*binder_alloc_new_buf_locked(
349 struct binder_alloc
*alloc
,
352 size_t extra_buffers_size
,
355 struct rb_node
*n
= alloc
->free_buffers
.rb_node
;
356 struct binder_buffer
*buffer
;
358 struct rb_node
*best_fit
= NULL
;
359 void __user
*has_page_addr
;
360 void __user
*end_page_addr
;
361 size_t size
, data_offsets_size
;
364 if (!binder_alloc_get_vma(alloc
)) {
365 binder_alloc_debug(BINDER_DEBUG_USER_ERROR
,
366 "%d: binder_alloc_buf, no vma\n",
368 return ERR_PTR(-ESRCH
);
371 data_offsets_size
= ALIGN(data_size
, sizeof(void *)) +
372 ALIGN(offsets_size
, sizeof(void *));
374 if (data_offsets_size
< data_size
|| data_offsets_size
< offsets_size
) {
375 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC
,
376 "%d: got transaction with invalid size %zd-%zd\n",
377 alloc
->pid
, data_size
, offsets_size
);
378 return ERR_PTR(-EINVAL
);
380 size
= data_offsets_size
+ ALIGN(extra_buffers_size
, sizeof(void *));
381 if (size
< data_offsets_size
|| size
< extra_buffers_size
) {
382 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC
,
383 "%d: got transaction with invalid extra_buffers_size %zd\n",
384 alloc
->pid
, extra_buffers_size
);
385 return ERR_PTR(-EINVAL
);
388 alloc
->free_async_space
< size
+ sizeof(struct binder_buffer
)) {
389 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC
,
390 "%d: binder_alloc_buf size %zd failed, no async space left\n",
392 return ERR_PTR(-ENOSPC
);
395 /* Pad 0-size buffers so they get assigned unique addresses */
396 size
= max(size
, sizeof(void *));
399 buffer
= rb_entry(n
, struct binder_buffer
, rb_node
);
400 BUG_ON(!buffer
->free
);
401 buffer_size
= binder_alloc_buffer_size(alloc
, buffer
);
403 if (size
< buffer_size
) {
406 } else if (size
> buffer_size
)
413 if (best_fit
== NULL
) {
414 size_t allocated_buffers
= 0;
415 size_t largest_alloc_size
= 0;
416 size_t total_alloc_size
= 0;
417 size_t free_buffers
= 0;
418 size_t largest_free_size
= 0;
419 size_t total_free_size
= 0;
421 for (n
= rb_first(&alloc
->allocated_buffers
); n
!= NULL
;
423 buffer
= rb_entry(n
, struct binder_buffer
, rb_node
);
424 buffer_size
= binder_alloc_buffer_size(alloc
, buffer
);
426 total_alloc_size
+= buffer_size
;
427 if (buffer_size
> largest_alloc_size
)
428 largest_alloc_size
= buffer_size
;
430 for (n
= rb_first(&alloc
->free_buffers
); n
!= NULL
;
432 buffer
= rb_entry(n
, struct binder_buffer
, rb_node
);
433 buffer_size
= binder_alloc_buffer_size(alloc
, buffer
);
435 total_free_size
+= buffer_size
;
436 if (buffer_size
> largest_free_size
)
437 largest_free_size
= buffer_size
;
439 binder_alloc_debug(BINDER_DEBUG_USER_ERROR
,
440 "%d: binder_alloc_buf size %zd failed, no address space\n",
442 binder_alloc_debug(BINDER_DEBUG_USER_ERROR
,
443 "allocated: %zd (num: %zd largest: %zd), free: %zd (num: %zd largest: %zd)\n",
444 total_alloc_size
, allocated_buffers
,
445 largest_alloc_size
, total_free_size
,
446 free_buffers
, largest_free_size
);
447 return ERR_PTR(-ENOSPC
);
450 buffer
= rb_entry(best_fit
, struct binder_buffer
, rb_node
);
451 buffer_size
= binder_alloc_buffer_size(alloc
, buffer
);
454 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC
,
455 "%d: binder_alloc_buf size %zd got buffer %pK size %zd\n",
456 alloc
->pid
, size
, buffer
, buffer_size
);
458 has_page_addr
= (void __user
*)
459 (((uintptr_t)buffer
->user_data
+ buffer_size
) & PAGE_MASK
);
460 WARN_ON(n
&& buffer_size
!= size
);
462 (void __user
*)PAGE_ALIGN((uintptr_t)buffer
->user_data
+ size
);
463 if (end_page_addr
> has_page_addr
)
464 end_page_addr
= has_page_addr
;
465 ret
= binder_update_page_range(alloc
, 1, (void __user
*)
466 PAGE_ALIGN((uintptr_t)buffer
->user_data
), end_page_addr
);
470 if (buffer_size
!= size
) {
471 struct binder_buffer
*new_buffer
;
473 new_buffer
= kzalloc(sizeof(*buffer
), GFP_KERNEL
);
475 pr_err("%s: %d failed to alloc new buffer struct\n",
476 __func__
, alloc
->pid
);
477 goto err_alloc_buf_struct_failed
;
479 new_buffer
->user_data
= (u8 __user
*)buffer
->user_data
+ size
;
480 list_add(&new_buffer
->entry
, &buffer
->entry
);
481 new_buffer
->free
= 1;
482 binder_insert_free_buffer(alloc
, new_buffer
);
485 rb_erase(best_fit
, &alloc
->free_buffers
);
487 buffer
->allow_user_free
= 0;
488 binder_insert_allocated_buffer_locked(alloc
, buffer
);
489 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC
,
490 "%d: binder_alloc_buf size %zd got %pK\n",
491 alloc
->pid
, size
, buffer
);
492 buffer
->data_size
= data_size
;
493 buffer
->offsets_size
= offsets_size
;
494 buffer
->async_transaction
= is_async
;
495 buffer
->extra_buffers_size
= extra_buffers_size
;
497 alloc
->free_async_space
-= size
+ sizeof(struct binder_buffer
);
498 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC
,
499 "%d: binder_alloc_buf size %zd async free %zd\n",
500 alloc
->pid
, size
, alloc
->free_async_space
);
504 err_alloc_buf_struct_failed
:
505 binder_update_page_range(alloc
, 0, (void __user
*)
506 PAGE_ALIGN((uintptr_t)buffer
->user_data
),
508 return ERR_PTR(-ENOMEM
);
512 * binder_alloc_new_buf() - Allocate a new binder buffer
513 * @alloc: binder_alloc for this proc
514 * @data_size: size of user data buffer
515 * @offsets_size: user specified buffer offset
516 * @extra_buffers_size: size of extra space for meta-data (eg, security context)
517 * @is_async: buffer for async transaction
519 * Allocate a new buffer given the requested sizes. Returns
520 * the kernel version of the buffer pointer. The size allocated
521 * is the sum of the three given sizes (each rounded up to
522 * pointer-sized boundary)
524 * Return: The allocated buffer or %NULL if error
526 struct binder_buffer
*binder_alloc_new_buf(struct binder_alloc
*alloc
,
529 size_t extra_buffers_size
,
532 struct binder_buffer
*buffer
;
534 mutex_lock(&alloc
->mutex
);
535 buffer
= binder_alloc_new_buf_locked(alloc
, data_size
, offsets_size
,
536 extra_buffers_size
, is_async
);
537 mutex_unlock(&alloc
->mutex
);
541 static void __user
*buffer_start_page(struct binder_buffer
*buffer
)
543 return (void __user
*)((uintptr_t)buffer
->user_data
& PAGE_MASK
);
546 static void __user
*prev_buffer_end_page(struct binder_buffer
*buffer
)
548 return (void __user
*)
549 (((uintptr_t)(buffer
->user_data
) - 1) & PAGE_MASK
);
552 static void binder_delete_free_buffer(struct binder_alloc
*alloc
,
553 struct binder_buffer
*buffer
)
555 struct binder_buffer
*prev
, *next
= NULL
;
557 BUG_ON(alloc
->buffers
.next
== &buffer
->entry
);
558 prev
= binder_buffer_prev(buffer
);
560 if (prev_buffer_end_page(prev
) == buffer_start_page(buffer
)) {
562 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC
,
563 "%d: merge free, buffer %pK share page with %pK\n",
564 alloc
->pid
, buffer
->user_data
,
568 if (!list_is_last(&buffer
->entry
, &alloc
->buffers
)) {
569 next
= binder_buffer_next(buffer
);
570 if (buffer_start_page(next
) == buffer_start_page(buffer
)) {
572 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC
,
573 "%d: merge free, buffer %pK share page with %pK\n",
580 if (PAGE_ALIGNED(buffer
->user_data
)) {
581 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC
,
582 "%d: merge free, buffer start %pK is page aligned\n",
583 alloc
->pid
, buffer
->user_data
);
588 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC
,
589 "%d: merge free, buffer %pK do not share page with %pK or %pK\n",
590 alloc
->pid
, buffer
->user_data
,
592 next
? next
->user_data
: NULL
);
593 binder_update_page_range(alloc
, 0, buffer_start_page(buffer
),
594 buffer_start_page(buffer
) + PAGE_SIZE
);
596 list_del(&buffer
->entry
);
600 static void binder_free_buf_locked(struct binder_alloc
*alloc
,
601 struct binder_buffer
*buffer
)
603 size_t size
, buffer_size
;
605 buffer_size
= binder_alloc_buffer_size(alloc
, buffer
);
607 size
= ALIGN(buffer
->data_size
, sizeof(void *)) +
608 ALIGN(buffer
->offsets_size
, sizeof(void *)) +
609 ALIGN(buffer
->extra_buffers_size
, sizeof(void *));
611 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC
,
612 "%d: binder_free_buf %pK size %zd buffer_size %zd\n",
613 alloc
->pid
, buffer
, size
, buffer_size
);
615 BUG_ON(buffer
->free
);
616 BUG_ON(size
> buffer_size
);
617 BUG_ON(buffer
->transaction
!= NULL
);
618 BUG_ON(buffer
->user_data
< alloc
->buffer
);
619 BUG_ON(buffer
->user_data
> alloc
->buffer
+ alloc
->buffer_size
);
621 if (buffer
->async_transaction
) {
622 alloc
->free_async_space
+= size
+ sizeof(struct binder_buffer
);
624 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC
,
625 "%d: binder_free_buf size %zd async free %zd\n",
626 alloc
->pid
, size
, alloc
->free_async_space
);
629 binder_update_page_range(alloc
, 0,
630 (void __user
*)PAGE_ALIGN((uintptr_t)buffer
->user_data
),
631 (void __user
*)(((uintptr_t)
632 buffer
->user_data
+ buffer_size
) & PAGE_MASK
));
634 rb_erase(&buffer
->rb_node
, &alloc
->allocated_buffers
);
636 if (!list_is_last(&buffer
->entry
, &alloc
->buffers
)) {
637 struct binder_buffer
*next
= binder_buffer_next(buffer
);
640 rb_erase(&next
->rb_node
, &alloc
->free_buffers
);
641 binder_delete_free_buffer(alloc
, next
);
644 if (alloc
->buffers
.next
!= &buffer
->entry
) {
645 struct binder_buffer
*prev
= binder_buffer_prev(buffer
);
648 binder_delete_free_buffer(alloc
, buffer
);
649 rb_erase(&prev
->rb_node
, &alloc
->free_buffers
);
653 binder_insert_free_buffer(alloc
, buffer
);
657 * binder_alloc_free_buf() - free a binder buffer
658 * @alloc: binder_alloc for this proc
659 * @buffer: kernel pointer to buffer
661 * Free the buffer allocated via binder_alloc_new_buffer()
663 void binder_alloc_free_buf(struct binder_alloc
*alloc
,
664 struct binder_buffer
*buffer
)
666 mutex_lock(&alloc
->mutex
);
667 binder_free_buf_locked(alloc
, buffer
);
668 mutex_unlock(&alloc
->mutex
);
672 * binder_alloc_mmap_handler() - map virtual address space for proc
673 * @alloc: alloc structure for this proc
674 * @vma: vma passed to mmap()
676 * Called by binder_mmap() to initialize the space specified in
677 * vma for allocating binder buffers
681 * -EBUSY = address space already mapped
682 * -ENOMEM = failed to map memory to given address space
684 int binder_alloc_mmap_handler(struct binder_alloc
*alloc
,
685 struct vm_area_struct
*vma
)
688 const char *failure_string
;
689 struct binder_buffer
*buffer
;
691 mutex_lock(&binder_alloc_mmap_lock
);
694 failure_string
= "already mapped";
695 goto err_already_mapped
;
698 alloc
->buffer
= (void __user
*)vma
->vm_start
;
699 mutex_unlock(&binder_alloc_mmap_lock
);
701 alloc
->pages
= kcalloc((vma
->vm_end
- vma
->vm_start
) / PAGE_SIZE
,
702 sizeof(alloc
->pages
[0]),
704 if (alloc
->pages
== NULL
) {
706 failure_string
= "alloc page array";
707 goto err_alloc_pages_failed
;
709 alloc
->buffer_size
= vma
->vm_end
- vma
->vm_start
;
711 buffer
= kzalloc(sizeof(*buffer
), GFP_KERNEL
);
714 failure_string
= "alloc buffer struct";
715 goto err_alloc_buf_struct_failed
;
718 buffer
->user_data
= alloc
->buffer
;
719 list_add(&buffer
->entry
, &alloc
->buffers
);
721 binder_insert_free_buffer(alloc
, buffer
);
722 alloc
->free_async_space
= alloc
->buffer_size
/ 2;
723 binder_alloc_set_vma(alloc
, vma
);
724 mmgrab(alloc
->vma_vm_mm
);
728 err_alloc_buf_struct_failed
:
731 err_alloc_pages_failed
:
732 mutex_lock(&binder_alloc_mmap_lock
);
733 alloc
->buffer
= NULL
;
735 mutex_unlock(&binder_alloc_mmap_lock
);
736 binder_alloc_debug(BINDER_DEBUG_USER_ERROR
,
737 "%s: %d %lx-%lx %s failed %d\n", __func__
,
738 alloc
->pid
, vma
->vm_start
, vma
->vm_end
,
739 failure_string
, ret
);
744 void binder_alloc_deferred_release(struct binder_alloc
*alloc
)
747 int buffers
, page_count
;
748 struct binder_buffer
*buffer
;
751 mutex_lock(&alloc
->mutex
);
754 while ((n
= rb_first(&alloc
->allocated_buffers
))) {
755 buffer
= rb_entry(n
, struct binder_buffer
, rb_node
);
757 /* Transaction should already have been freed */
758 BUG_ON(buffer
->transaction
);
760 binder_free_buf_locked(alloc
, buffer
);
764 while (!list_empty(&alloc
->buffers
)) {
765 buffer
= list_first_entry(&alloc
->buffers
,
766 struct binder_buffer
, entry
);
767 WARN_ON(!buffer
->free
);
769 list_del(&buffer
->entry
);
770 WARN_ON_ONCE(!list_empty(&alloc
->buffers
));
778 for (i
= 0; i
< alloc
->buffer_size
/ PAGE_SIZE
; i
++) {
779 void __user
*page_addr
;
782 if (!alloc
->pages
[i
].page_ptr
)
785 on_lru
= list_lru_del(&binder_alloc_lru
,
786 &alloc
->pages
[i
].lru
);
787 page_addr
= alloc
->buffer
+ i
* PAGE_SIZE
;
788 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC
,
789 "%s: %d: page %d at %pK %s\n",
790 __func__
, alloc
->pid
, i
, page_addr
,
791 on_lru
? "on lru" : "active");
792 __free_page(alloc
->pages
[i
].page_ptr
);
797 mutex_unlock(&alloc
->mutex
);
798 if (alloc
->vma_vm_mm
)
799 mmdrop(alloc
->vma_vm_mm
);
801 binder_alloc_debug(BINDER_DEBUG_OPEN_CLOSE
,
802 "%s: %d buffers %d, pages %d\n",
803 __func__
, alloc
->pid
, buffers
, page_count
);
806 static void print_binder_buffer(struct seq_file
*m
, const char *prefix
,
807 struct binder_buffer
*buffer
)
809 seq_printf(m
, "%s %d: %pK size %zd:%zd:%zd %s\n",
810 prefix
, buffer
->debug_id
, buffer
->user_data
,
811 buffer
->data_size
, buffer
->offsets_size
,
812 buffer
->extra_buffers_size
,
813 buffer
->transaction
? "active" : "delivered");
817 * binder_alloc_print_allocated() - print buffer info
818 * @m: seq_file for output via seq_printf()
819 * @alloc: binder_alloc for this proc
821 * Prints information about every buffer associated with
822 * the binder_alloc state to the given seq_file
824 void binder_alloc_print_allocated(struct seq_file
*m
,
825 struct binder_alloc
*alloc
)
829 mutex_lock(&alloc
->mutex
);
830 for (n
= rb_first(&alloc
->allocated_buffers
); n
!= NULL
; n
= rb_next(n
))
831 print_binder_buffer(m
, " buffer",
832 rb_entry(n
, struct binder_buffer
, rb_node
));
833 mutex_unlock(&alloc
->mutex
);
837 * binder_alloc_print_pages() - print page usage
838 * @m: seq_file for output via seq_printf()
839 * @alloc: binder_alloc for this proc
841 void binder_alloc_print_pages(struct seq_file
*m
,
842 struct binder_alloc
*alloc
)
844 struct binder_lru_page
*page
;
850 mutex_lock(&alloc
->mutex
);
851 for (i
= 0; i
< alloc
->buffer_size
/ PAGE_SIZE
; i
++) {
852 page
= &alloc
->pages
[i
];
855 else if (list_empty(&page
->lru
))
860 mutex_unlock(&alloc
->mutex
);
861 seq_printf(m
, " pages: %d:%d:%d\n", active
, lru
, free
);
862 seq_printf(m
, " pages high watermark: %zu\n", alloc
->pages_high
);
866 * binder_alloc_get_allocated_count() - return count of buffers
867 * @alloc: binder_alloc for this proc
869 * Return: count of allocated buffers
871 int binder_alloc_get_allocated_count(struct binder_alloc
*alloc
)
876 mutex_lock(&alloc
->mutex
);
877 for (n
= rb_first(&alloc
->allocated_buffers
); n
!= NULL
; n
= rb_next(n
))
879 mutex_unlock(&alloc
->mutex
);
885 * binder_alloc_vma_close() - invalidate address space
886 * @alloc: binder_alloc for this proc
888 * Called from binder_vma_close() when releasing address space.
889 * Clears alloc->vma to prevent new incoming transactions from
890 * allocating more buffers.
892 void binder_alloc_vma_close(struct binder_alloc
*alloc
)
894 binder_alloc_set_vma(alloc
, NULL
);
898 * binder_alloc_free_page() - shrinker callback to free pages
899 * @item: item to free
900 * @lock: lock protecting the item
901 * @cb_arg: callback argument
903 * Called from list_lru_walk() in binder_shrink_scan() to free
904 * up pages when the system is under memory pressure.
906 enum lru_status
binder_alloc_free_page(struct list_head
*item
,
907 struct list_lru_one
*lru
,
912 struct mm_struct
*mm
= NULL
;
913 struct binder_lru_page
*page
= container_of(item
,
914 struct binder_lru_page
,
916 struct binder_alloc
*alloc
;
919 struct vm_area_struct
*vma
;
922 if (!mutex_trylock(&alloc
->mutex
))
923 goto err_get_alloc_mutex_failed
;
926 goto err_page_already_freed
;
928 index
= page
- alloc
->pages
;
929 page_addr
= (uintptr_t)alloc
->buffer
+ index
* PAGE_SIZE
;
931 mm
= alloc
->vma_vm_mm
;
932 if (!mmget_not_zero(mm
))
934 if (!down_read_trylock(&mm
->mmap_sem
))
935 goto err_down_read_mmap_sem_failed
;
936 vma
= binder_alloc_get_vma(alloc
);
938 list_lru_isolate(lru
, item
);
942 trace_binder_unmap_user_start(alloc
, index
);
944 zap_page_range(vma
, page_addr
, PAGE_SIZE
);
946 trace_binder_unmap_user_end(alloc
, index
);
948 up_read(&mm
->mmap_sem
);
951 trace_binder_unmap_kernel_start(alloc
, index
);
953 __free_page(page
->page_ptr
);
954 page
->page_ptr
= NULL
;
956 trace_binder_unmap_kernel_end(alloc
, index
);
959 mutex_unlock(&alloc
->mutex
);
960 return LRU_REMOVED_RETRY
;
962 err_down_read_mmap_sem_failed
:
965 err_page_already_freed
:
966 mutex_unlock(&alloc
->mutex
);
967 err_get_alloc_mutex_failed
:
972 binder_shrink_count(struct shrinker
*shrink
, struct shrink_control
*sc
)
974 unsigned long ret
= list_lru_count(&binder_alloc_lru
);
979 binder_shrink_scan(struct shrinker
*shrink
, struct shrink_control
*sc
)
983 ret
= list_lru_walk(&binder_alloc_lru
, binder_alloc_free_page
,
984 NULL
, sc
->nr_to_scan
);
988 static struct shrinker binder_shrinker
= {
989 .count_objects
= binder_shrink_count
,
990 .scan_objects
= binder_shrink_scan
,
991 .seeks
= DEFAULT_SEEKS
,
995 * binder_alloc_init() - called by binder_open() for per-proc initialization
996 * @alloc: binder_alloc for this proc
998 * Called from binder_open() to initialize binder_alloc fields for
1001 void binder_alloc_init(struct binder_alloc
*alloc
)
1003 alloc
->pid
= current
->group_leader
->pid
;
1004 mutex_init(&alloc
->mutex
);
1005 INIT_LIST_HEAD(&alloc
->buffers
);
1008 int binder_alloc_shrinker_init(void)
1010 int ret
= list_lru_init(&binder_alloc_lru
);
1013 ret
= register_shrinker(&binder_shrinker
);
1015 list_lru_destroy(&binder_alloc_lru
);
1021 * check_buffer() - verify that buffer/offset is safe to access
1022 * @alloc: binder_alloc for this proc
1023 * @buffer: binder buffer to be accessed
1024 * @offset: offset into @buffer data
1025 * @bytes: bytes to access from offset
1027 * Check that the @offset/@bytes are within the size of the given
1028 * @buffer and that the buffer is currently active and not freeable.
1029 * Offsets must also be multiples of sizeof(u32). The kernel is
1030 * allowed to touch the buffer in two cases:
1032 * 1) when the buffer is being created:
1033 * (buffer->free == 0 && buffer->allow_user_free == 0)
1034 * 2) when the buffer is being torn down:
1035 * (buffer->free == 0 && buffer->transaction == NULL).
1037 * Return: true if the buffer is safe to access
1039 static inline bool check_buffer(struct binder_alloc
*alloc
,
1040 struct binder_buffer
*buffer
,
1041 binder_size_t offset
, size_t bytes
)
1043 size_t buffer_size
= binder_alloc_buffer_size(alloc
, buffer
);
1045 return buffer_size
>= bytes
&&
1046 offset
<= buffer_size
- bytes
&&
1047 IS_ALIGNED(offset
, sizeof(u32
)) &&
1049 (!buffer
->allow_user_free
|| !buffer
->transaction
);
1053 * binder_alloc_get_page() - get kernel pointer for given buffer offset
1054 * @alloc: binder_alloc for this proc
1055 * @buffer: binder buffer to be accessed
1056 * @buffer_offset: offset into @buffer data
1057 * @pgoffp: address to copy final page offset to
1059 * Lookup the struct page corresponding to the address
1060 * at @buffer_offset into @buffer->user_data. If @pgoffp is not
1061 * NULL, the byte-offset into the page is written there.
1063 * The caller is responsible to ensure that the offset points
1064 * to a valid address within the @buffer and that @buffer is
1065 * not freeable by the user. Since it can't be freed, we are
1066 * guaranteed that the corresponding elements of @alloc->pages[]
1069 * Return: struct page
1071 static struct page
*binder_alloc_get_page(struct binder_alloc
*alloc
,
1072 struct binder_buffer
*buffer
,
1073 binder_size_t buffer_offset
,
1076 binder_size_t buffer_space_offset
= buffer_offset
+
1077 (buffer
->user_data
- alloc
->buffer
);
1078 pgoff_t pgoff
= buffer_space_offset
& ~PAGE_MASK
;
1079 size_t index
= buffer_space_offset
>> PAGE_SHIFT
;
1080 struct binder_lru_page
*lru_page
;
1082 lru_page
= &alloc
->pages
[index
];
1084 return lru_page
->page_ptr
;
1088 * binder_alloc_copy_user_to_buffer() - copy src user to tgt user
1089 * @alloc: binder_alloc for this proc
1090 * @buffer: binder buffer to be accessed
1091 * @buffer_offset: offset into @buffer data
1092 * @from: userspace pointer to source buffer
1093 * @bytes: bytes to copy
1095 * Copy bytes from source userspace to target buffer.
1097 * Return: bytes remaining to be copied
1100 binder_alloc_copy_user_to_buffer(struct binder_alloc
*alloc
,
1101 struct binder_buffer
*buffer
,
1102 binder_size_t buffer_offset
,
1103 const void __user
*from
,
1106 if (!check_buffer(alloc
, buffer
, buffer_offset
, bytes
))
1116 page
= binder_alloc_get_page(alloc
, buffer
,
1117 buffer_offset
, &pgoff
);
1118 size
= min_t(size_t, bytes
, PAGE_SIZE
- pgoff
);
1119 kptr
= kmap(page
) + pgoff
;
1120 ret
= copy_from_user(kptr
, from
, size
);
1123 return bytes
- size
+ ret
;
1126 buffer_offset
+= size
;
1131 static void binder_alloc_do_buffer_copy(struct binder_alloc
*alloc
,
1133 struct binder_buffer
*buffer
,
1134 binder_size_t buffer_offset
,
1138 /* All copies must be 32-bit aligned and 32-bit size */
1139 BUG_ON(!check_buffer(alloc
, buffer
, buffer_offset
, bytes
));
1148 page
= binder_alloc_get_page(alloc
, buffer
,
1149 buffer_offset
, &pgoff
);
1150 size
= min_t(size_t, bytes
, PAGE_SIZE
- pgoff
);
1151 base_ptr
= kmap_atomic(page
);
1152 tmpptr
= base_ptr
+ pgoff
;
1154 memcpy(tmpptr
, ptr
, size
);
1156 memcpy(ptr
, tmpptr
, size
);
1158 * kunmap_atomic() takes care of flushing the cache
1159 * if this device has VIVT cache arch
1161 kunmap_atomic(base_ptr
);
1165 buffer_offset
+= size
;
1169 void binder_alloc_copy_to_buffer(struct binder_alloc
*alloc
,
1170 struct binder_buffer
*buffer
,
1171 binder_size_t buffer_offset
,
1175 binder_alloc_do_buffer_copy(alloc
, true, buffer
, buffer_offset
,
1179 void binder_alloc_copy_from_buffer(struct binder_alloc
*alloc
,
1181 struct binder_buffer
*buffer
,
1182 binder_size_t buffer_offset
,
1185 binder_alloc_do_buffer_copy(alloc
, false, buffer
, buffer_offset
,