3 ** Anonymous Shared Memory Subsystem, ashmem
5 ** Copyright (C) 2008 Google, Inc.
7 ** Robert Love <rlove@google.com>
9 ** This software is licensed under the terms of the GNU General Public
10 ** License version 2, as published by the Free Software Foundation, and
11 ** may be copied, distributed, and modified under those terms.
13 ** This program is distributed in the hope that it will be useful,
14 ** but WITHOUT ANY WARRANTY; without even the implied warranty of
15 ** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 ** GNU General Public License for more details.
19 #define pr_fmt(fmt) "ashmem: " fmt
21 #include <linux/module.h>
22 #include <linux/file.h>
24 #include <linux/falloc.h>
25 #include <linux/miscdevice.h>
26 #include <linux/security.h>
28 #include <linux/mman.h>
29 #include <linux/uaccess.h>
30 #include <linux/personality.h>
31 #include <linux/bitops.h>
32 #include <linux/mutex.h>
33 #include <linux/shmem_fs.h>
36 #define ASHMEM_NAME_PREFIX "dev/ashmem/"
37 #define ASHMEM_NAME_PREFIX_LEN (sizeof(ASHMEM_NAME_PREFIX) - 1)
38 #define ASHMEM_FULL_NAME_LEN (ASHMEM_NAME_LEN + ASHMEM_NAME_PREFIX_LEN)
41 * ashmem_area - anonymous shared memory area
42 * Lifecycle: From our parent file's open() until its release()
43 * Locking: Protected by `ashmem_mutex'
44 * Big Note: Mappings do NOT pin this structure; it dies on close()
47 char name
[ASHMEM_FULL_NAME_LEN
]; /* optional name in /proc/pid/maps */
48 struct list_head unpinned_list
; /* list of all ashmem areas */
49 struct file
*file
; /* the shmem-based backing file */
50 size_t size
; /* size of the mapping, in bytes */
51 unsigned long prot_mask
; /* allowed prot bits, as vm_flags */
55 * ashmem_range - represents an interval of unpinned (evictable) pages
56 * Lifecycle: From unpin to pin
57 * Locking: Protected by `ashmem_mutex'
60 struct list_head lru
; /* entry in LRU list */
61 struct list_head unpinned
; /* entry in its area's unpinned list */
62 struct ashmem_area
*asma
; /* associated area */
63 size_t pgstart
; /* starting page, inclusive */
64 size_t pgend
; /* ending page, inclusive */
65 unsigned int purged
; /* ASHMEM_NOT or ASHMEM_WAS_PURGED */
68 /* LRU list of unpinned pages, protected by ashmem_mutex */
69 static LIST_HEAD(ashmem_lru_list
);
71 /* Count of pages on our LRU list, protected by ashmem_mutex */
72 static unsigned long lru_count
;
75 * ashmem_mutex - protects the list of and each individual ashmem_area
77 * Lock Ordering: ashmex_mutex -> i_mutex -> i_alloc_sem
79 static DEFINE_MUTEX(ashmem_mutex
);
81 static struct kmem_cache
*ashmem_area_cachep __read_mostly
;
82 static struct kmem_cache
*ashmem_range_cachep __read_mostly
;
84 #define range_size(range) \
85 ((range)->pgend - (range)->pgstart + 1)
87 #define range_on_lru(range) \
88 ((range)->purged == ASHMEM_NOT_PURGED)
90 #define page_range_subsumes_range(range, start, end) \
91 (((range)->pgstart >= (start)) && ((range)->pgend <= (end)))
93 #define page_range_subsumed_by_range(range, start, end) \
94 (((range)->pgstart <= (start)) && ((range)->pgend >= (end)))
96 #define page_in_range(range, page) \
97 (((range)->pgstart <= (page)) && ((range)->pgend >= (page)))
99 #define page_range_in_range(range, start, end) \
100 (page_in_range(range, start) || page_in_range(range, end) || \
101 page_range_subsumes_range(range, start, end))
103 #define range_before_page(range, page) \
104 ((range)->pgend < (page))
106 #define PROT_MASK (PROT_EXEC | PROT_READ | PROT_WRITE)
108 static inline void lru_add(struct ashmem_range
*range
)
110 list_add_tail(&range
->lru
, &ashmem_lru_list
);
111 lru_count
+= range_size(range
);
114 static inline void lru_del(struct ashmem_range
*range
)
116 list_del(&range
->lru
);
117 lru_count
-= range_size(range
);
121 * range_alloc - allocate and initialize a new ashmem_range structure
123 * 'asma' - associated ashmem_area
124 * 'prev_range' - the previous ashmem_range in the sorted asma->unpinned list
125 * 'purged' - initial purge value (ASMEM_NOT_PURGED or ASHMEM_WAS_PURGED)
126 * 'start' - starting page, inclusive
127 * 'end' - ending page, inclusive
129 * Caller must hold ashmem_mutex.
131 static int range_alloc(struct ashmem_area
*asma
,
132 struct ashmem_range
*prev_range
, unsigned int purged
,
133 size_t start
, size_t end
)
135 struct ashmem_range
*range
;
137 range
= kmem_cache_zalloc(ashmem_range_cachep
, GFP_KERNEL
);
138 if (unlikely(!range
))
142 range
->pgstart
= start
;
144 range
->purged
= purged
;
146 list_add_tail(&range
->unpinned
, &prev_range
->unpinned
);
148 if (range_on_lru(range
))
154 static void range_del(struct ashmem_range
*range
)
156 list_del(&range
->unpinned
);
157 if (range_on_lru(range
))
159 kmem_cache_free(ashmem_range_cachep
, range
);
163 * range_shrink - shrinks a range
165 * Caller must hold ashmem_mutex.
167 static inline void range_shrink(struct ashmem_range
*range
,
168 size_t start
, size_t end
)
170 size_t pre
= range_size(range
);
172 range
->pgstart
= start
;
175 if (range_on_lru(range
))
176 lru_count
-= pre
- range_size(range
);
179 static int ashmem_open(struct inode
*inode
, struct file
*file
)
181 struct ashmem_area
*asma
;
184 ret
= generic_file_open(inode
, file
);
188 asma
= kmem_cache_zalloc(ashmem_area_cachep
, GFP_KERNEL
);
192 INIT_LIST_HEAD(&asma
->unpinned_list
);
193 memcpy(asma
->name
, ASHMEM_NAME_PREFIX
, ASHMEM_NAME_PREFIX_LEN
);
194 asma
->prot_mask
= PROT_MASK
;
195 file
->private_data
= asma
;
200 static int ashmem_release(struct inode
*ignored
, struct file
*file
)
202 struct ashmem_area
*asma
= file
->private_data
;
203 struct ashmem_range
*range
, *next
;
205 mutex_lock(&ashmem_mutex
);
206 list_for_each_entry_safe(range
, next
, &asma
->unpinned_list
, unpinned
)
208 mutex_unlock(&ashmem_mutex
);
212 kmem_cache_free(ashmem_area_cachep
, asma
);
217 static ssize_t
ashmem_read(struct file
*file
, char __user
*buf
,
218 size_t len
, loff_t
*pos
)
220 struct ashmem_area
*asma
= file
->private_data
;
223 mutex_lock(&ashmem_mutex
);
225 /* If size is not set, or set to 0, always return EOF. */
234 ret
= asma
->file
->f_op
->read(asma
->file
, buf
, len
, pos
);
238 /** Update backing file pos, since f_ops->read() doesn't */
239 asma
->file
->f_pos
= *pos
;
242 mutex_unlock(&ashmem_mutex
);
246 static loff_t
ashmem_llseek(struct file
*file
, loff_t offset
, int origin
)
248 struct ashmem_area
*asma
= file
->private_data
;
251 mutex_lock(&ashmem_mutex
);
253 if (asma
->size
== 0) {
263 ret
= asma
->file
->f_op
->llseek(asma
->file
, offset
, origin
);
267 /** Copy f_pos from backing file, since f_ops->llseek() sets it */
268 file
->f_pos
= asma
->file
->f_pos
;
271 mutex_unlock(&ashmem_mutex
);
275 static inline vm_flags_t
calc_vm_may_flags(unsigned long prot
)
277 return _calc_vm_trans(prot
, PROT_READ
, VM_MAYREAD
) |
278 _calc_vm_trans(prot
, PROT_WRITE
, VM_MAYWRITE
) |
279 _calc_vm_trans(prot
, PROT_EXEC
, VM_MAYEXEC
);
282 static int ashmem_mmap(struct file
*file
, struct vm_area_struct
*vma
)
284 struct ashmem_area
*asma
= file
->private_data
;
287 mutex_lock(&ashmem_mutex
);
289 /* user needs to SET_SIZE before mapping */
290 if (unlikely(!asma
->size
)) {
295 /* requested protection bits must match our allowed protection mask */
296 if (unlikely((vma
->vm_flags
& ~calc_vm_prot_bits(asma
->prot_mask
)) &
297 calc_vm_prot_bits(PROT_MASK
))) {
301 vma
->vm_flags
&= ~calc_vm_may_flags(~asma
->prot_mask
);
304 char *name
= ASHMEM_NAME_DEF
;
307 if (asma
->name
[ASHMEM_NAME_PREFIX_LEN
] != '\0')
310 /* ... and allocate the backing shmem file */
311 vmfile
= shmem_file_setup(name
, asma
->size
, vma
->vm_flags
);
312 if (unlikely(IS_ERR(vmfile
))) {
313 ret
= PTR_ERR(vmfile
);
318 get_file(asma
->file
);
321 * XXX - Reworked to use shmem_zero_setup() instead of
322 * shmem_set_file while we're in staging. -jstultz
324 if (vma
->vm_flags
& VM_SHARED
) {
325 ret
= shmem_zero_setup(vma
);
334 vma
->vm_file
= asma
->file
;
335 vma
->vm_flags
|= VM_CAN_NONLINEAR
;
338 mutex_unlock(&ashmem_mutex
);
343 * ashmem_shrink - our cache shrinker, called from mm/vmscan.c :: shrink_slab
345 * 'nr_to_scan' is the number of objects (pages) to prune, or 0 to query how
346 * many objects (pages) we have in total.
348 * 'gfp_mask' is the mask of the allocation that got us into this mess.
350 * Return value is the number of objects (pages) remaining, or -1 if we cannot
351 * proceed without risk of deadlock (due to gfp_mask).
353 * We approximate LRU via least-recently-unpinned, jettisoning unpinned partial
354 * chunks of ashmem regions LRU-wise one-at-a-time until we hit 'nr_to_scan'
357 static int ashmem_shrink(struct shrinker
*s
, struct shrink_control
*sc
)
359 struct ashmem_range
*range
, *next
;
361 /* We might recurse into filesystem code, so bail out if necessary */
362 if (sc
->nr_to_scan
&& !(sc
->gfp_mask
& __GFP_FS
))
367 mutex_lock(&ashmem_mutex
);
368 list_for_each_entry_safe(range
, next
, &ashmem_lru_list
, lru
) {
369 loff_t start
= range
->pgstart
* PAGE_SIZE
;
370 loff_t end
= (range
->pgend
+ 1) * PAGE_SIZE
;
372 do_fallocate(range
->asma
->file
,
373 FALLOC_FL_PUNCH_HOLE
| FALLOC_FL_KEEP_SIZE
,
375 range
->purged
= ASHMEM_WAS_PURGED
;
378 sc
->nr_to_scan
-= range_size(range
);
379 if (sc
->nr_to_scan
<= 0)
382 mutex_unlock(&ashmem_mutex
);
387 static struct shrinker ashmem_shrinker
= {
388 .shrink
= ashmem_shrink
,
389 .seeks
= DEFAULT_SEEKS
* 4,
392 static int set_prot_mask(struct ashmem_area
*asma
, unsigned long prot
)
396 mutex_lock(&ashmem_mutex
);
398 /* the user can only remove, not add, protection bits */
399 if (unlikely((asma
->prot_mask
& prot
) != prot
)) {
404 /* does the application expect PROT_READ to imply PROT_EXEC? */
405 if ((prot
& PROT_READ
) && (current
->personality
& READ_IMPLIES_EXEC
))
408 asma
->prot_mask
= prot
;
411 mutex_unlock(&ashmem_mutex
);
415 static int set_name(struct ashmem_area
*asma
, void __user
*name
)
419 mutex_lock(&ashmem_mutex
);
421 /* cannot change an existing mapping's name */
422 if (unlikely(asma
->file
)) {
427 if (unlikely(copy_from_user(asma
->name
+ ASHMEM_NAME_PREFIX_LEN
,
428 name
, ASHMEM_NAME_LEN
)))
430 asma
->name
[ASHMEM_FULL_NAME_LEN
-1] = '\0';
433 mutex_unlock(&ashmem_mutex
);
438 static int get_name(struct ashmem_area
*asma
, void __user
*name
)
442 mutex_lock(&ashmem_mutex
);
443 if (asma
->name
[ASHMEM_NAME_PREFIX_LEN
] != '\0') {
447 * Copying only `len', instead of ASHMEM_NAME_LEN, bytes
448 * prevents us from revealing one user's stack to another.
450 len
= strlen(asma
->name
+ ASHMEM_NAME_PREFIX_LEN
) + 1;
451 if (unlikely(copy_to_user(name
,
452 asma
->name
+ ASHMEM_NAME_PREFIX_LEN
, len
)))
455 if (unlikely(copy_to_user(name
, ASHMEM_NAME_DEF
,
456 sizeof(ASHMEM_NAME_DEF
))))
459 mutex_unlock(&ashmem_mutex
);
465 * ashmem_pin - pin the given ashmem region, returning whether it was
466 * previously purged (ASHMEM_WAS_PURGED) or not (ASHMEM_NOT_PURGED).
468 * Caller must hold ashmem_mutex.
470 static int ashmem_pin(struct ashmem_area
*asma
, size_t pgstart
, size_t pgend
)
472 struct ashmem_range
*range
, *next
;
473 int ret
= ASHMEM_NOT_PURGED
;
475 list_for_each_entry_safe(range
, next
, &asma
->unpinned_list
, unpinned
) {
476 /* moved past last applicable page; we can short circuit */
477 if (range_before_page(range
, pgstart
))
481 * The user can ask us to pin pages that span multiple ranges,
482 * or to pin pages that aren't even unpinned, so this is messy.
485 * 1. The requested range subsumes an existing range, so we
486 * just remove the entire matching range.
487 * 2. The requested range overlaps the start of an existing
488 * range, so we just update that range.
489 * 3. The requested range overlaps the end of an existing
490 * range, so we just update that range.
491 * 4. The requested range punches a hole in an existing range,
492 * so we have to update one side of the range and then
493 * create a new range for the other side.
495 if (page_range_in_range(range
, pgstart
, pgend
)) {
496 ret
|= range
->purged
;
498 /* Case #1: Easy. Just nuke the whole thing. */
499 if (page_range_subsumes_range(range
, pgstart
, pgend
)) {
504 /* Case #2: We overlap from the start, so adjust it */
505 if (range
->pgstart
>= pgstart
) {
506 range_shrink(range
, pgend
+ 1, range
->pgend
);
510 /* Case #3: We overlap from the rear, so adjust it */
511 if (range
->pgend
<= pgend
) {
512 range_shrink(range
, range
->pgstart
, pgstart
-1);
517 * Case #4: We eat a chunk out of the middle. A bit
518 * more complicated, we allocate a new range for the
519 * second half and adjust the first chunk's endpoint.
521 range_alloc(asma
, range
, range
->purged
,
522 pgend
+ 1, range
->pgend
);
523 range_shrink(range
, range
->pgstart
, pgstart
- 1);
532 * ashmem_unpin - unpin the given range of pages. Returns zero on success.
534 * Caller must hold ashmem_mutex.
536 static int ashmem_unpin(struct ashmem_area
*asma
, size_t pgstart
, size_t pgend
)
538 struct ashmem_range
*range
, *next
;
539 unsigned int purged
= ASHMEM_NOT_PURGED
;
542 list_for_each_entry_safe(range
, next
, &asma
->unpinned_list
, unpinned
) {
543 /* short circuit: this is our insertion point */
544 if (range_before_page(range
, pgstart
))
548 * The user can ask us to unpin pages that are already entirely
549 * or partially pinned. We handle those two cases here.
551 if (page_range_subsumed_by_range(range
, pgstart
, pgend
))
553 if (page_range_in_range(range
, pgstart
, pgend
)) {
554 pgstart
= min_t(size_t, range
->pgstart
, pgstart
),
555 pgend
= max_t(size_t, range
->pgend
, pgend
);
556 purged
|= range
->purged
;
562 return range_alloc(asma
, range
, purged
, pgstart
, pgend
);
566 * ashmem_get_pin_status - Returns ASHMEM_IS_UNPINNED if _any_ pages in the
567 * given interval are unpinned and ASHMEM_IS_PINNED otherwise.
569 * Caller must hold ashmem_mutex.
571 static int ashmem_get_pin_status(struct ashmem_area
*asma
, size_t pgstart
,
574 struct ashmem_range
*range
;
575 int ret
= ASHMEM_IS_PINNED
;
577 list_for_each_entry(range
, &asma
->unpinned_list
, unpinned
) {
578 if (range_before_page(range
, pgstart
))
580 if (page_range_in_range(range
, pgstart
, pgend
)) {
581 ret
= ASHMEM_IS_UNPINNED
;
589 static int ashmem_pin_unpin(struct ashmem_area
*asma
, unsigned long cmd
,
592 struct ashmem_pin pin
;
593 size_t pgstart
, pgend
;
596 if (unlikely(!asma
->file
))
599 if (unlikely(copy_from_user(&pin
, p
, sizeof(pin
))))
602 /* per custom, you can pass zero for len to mean "everything onward" */
604 pin
.len
= PAGE_ALIGN(asma
->size
) - pin
.offset
;
606 if (unlikely((pin
.offset
| pin
.len
) & ~PAGE_MASK
))
609 if (unlikely(((__u32
) -1) - pin
.offset
< pin
.len
))
612 if (unlikely(PAGE_ALIGN(asma
->size
) < pin
.offset
+ pin
.len
))
615 pgstart
= pin
.offset
/ PAGE_SIZE
;
616 pgend
= pgstart
+ (pin
.len
/ PAGE_SIZE
) - 1;
618 mutex_lock(&ashmem_mutex
);
622 ret
= ashmem_pin(asma
, pgstart
, pgend
);
625 ret
= ashmem_unpin(asma
, pgstart
, pgend
);
627 case ASHMEM_GET_PIN_STATUS
:
628 ret
= ashmem_get_pin_status(asma
, pgstart
, pgend
);
632 mutex_unlock(&ashmem_mutex
);
637 static long ashmem_ioctl(struct file
*file
, unsigned int cmd
, unsigned long arg
)
639 struct ashmem_area
*asma
= file
->private_data
;
643 case ASHMEM_SET_NAME
:
644 ret
= set_name(asma
, (void __user
*) arg
);
646 case ASHMEM_GET_NAME
:
647 ret
= get_name(asma
, (void __user
*) arg
);
649 case ASHMEM_SET_SIZE
:
653 asma
->size
= (size_t) arg
;
656 case ASHMEM_GET_SIZE
:
659 case ASHMEM_SET_PROT_MASK
:
660 ret
= set_prot_mask(asma
, arg
);
662 case ASHMEM_GET_PROT_MASK
:
663 ret
= asma
->prot_mask
;
667 case ASHMEM_GET_PIN_STATUS
:
668 ret
= ashmem_pin_unpin(asma
, cmd
, (void __user
*) arg
);
670 case ASHMEM_PURGE_ALL_CACHES
:
672 if (capable(CAP_SYS_ADMIN
)) {
673 struct shrink_control sc
= {
674 .gfp_mask
= GFP_KERNEL
,
677 ret
= ashmem_shrink(&ashmem_shrinker
, &sc
);
679 ashmem_shrink(&ashmem_shrinker
, &sc
);
687 static const struct file_operations ashmem_fops
= {
688 .owner
= THIS_MODULE
,
690 .release
= ashmem_release
,
692 .llseek
= ashmem_llseek
,
694 .unlocked_ioctl
= ashmem_ioctl
,
695 .compat_ioctl
= ashmem_ioctl
,
698 static struct miscdevice ashmem_misc
= {
699 .minor
= MISC_DYNAMIC_MINOR
,
701 .fops
= &ashmem_fops
,
704 static int __init
ashmem_init(void)
708 ashmem_area_cachep
= kmem_cache_create("ashmem_area_cache",
709 sizeof(struct ashmem_area
),
711 if (unlikely(!ashmem_area_cachep
)) {
712 pr_err("failed to create slab cache\n");
716 ashmem_range_cachep
= kmem_cache_create("ashmem_range_cache",
717 sizeof(struct ashmem_range
),
719 if (unlikely(!ashmem_range_cachep
)) {
720 pr_err("failed to create slab cache\n");
724 ret
= misc_register(&ashmem_misc
);
726 pr_err("failed to register misc device!\n");
730 register_shrinker(&ashmem_shrinker
);
732 pr_info("initialized\n");
737 static void __exit
ashmem_exit(void)
741 unregister_shrinker(&ashmem_shrinker
);
743 ret
= misc_deregister(&ashmem_misc
);
745 pr_err("failed to unregister misc device!\n");
747 kmem_cache_destroy(ashmem_range_cachep
);
748 kmem_cache_destroy(ashmem_area_cachep
);
750 pr_info("unloaded\n");
753 module_init(ashmem_init
);
754 module_exit(ashmem_exit
);
756 MODULE_LICENSE("GPL");