3 ** Anonymous Shared Memory Subsystem, ashmem
5 ** Copyright (C) 2008 Google, Inc.
7 ** Robert Love <rlove@google.com>
9 ** This software is licensed under the terms of the GNU General Public
10 ** License version 2, as published by the Free Software Foundation, and
11 ** may be copied, distributed, and modified under those terms.
13 ** This program is distributed in the hope that it will be useful,
14 ** but WITHOUT ANY WARRANTY; without even the implied warranty of
15 ** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 ** GNU General Public License for more details.
19 #include <linux/module.h>
20 #include <linux/file.h>
22 #include <linux/miscdevice.h>
23 #include <linux/security.h>
25 #include <linux/mman.h>
26 #include <linux/uaccess.h>
27 #include <linux/personality.h>
28 #include <linux/bitops.h>
29 #include <linux/mutex.h>
30 #include <linux/shmem_fs.h>
31 #include <linux/ashmem.h>
33 #define ASHMEM_NAME_PREFIX "dev/ashmem/"
34 #define ASHMEM_NAME_PREFIX_LEN (sizeof(ASHMEM_NAME_PREFIX) - 1)
35 #define ASHMEM_FULL_NAME_LEN (ASHMEM_NAME_LEN + ASHMEM_NAME_PREFIX_LEN)
38 * ashmem_area - anonymous shared memory area
39 * Lifecycle: From our parent file's open() until its release()
40 * Locking: Protected by `ashmem_mutex'
41 * Big Note: Mappings do NOT pin this structure; it dies on close()
44 char name
[ASHMEM_FULL_NAME_LEN
];/* optional name for /proc/pid/maps */
45 struct list_head unpinned_list
; /* list of all ashmem areas */
46 struct file
*file
; /* the shmem-based backing file */
47 size_t size
; /* size of the mapping, in bytes */
48 unsigned long prot_mask
; /* allowed prot bits, as vm_flags */
52 * ashmem_range - represents an interval of unpinned (evictable) pages
53 * Lifecycle: From unpin to pin
54 * Locking: Protected by `ashmem_mutex'
57 struct list_head lru
; /* entry in LRU list */
58 struct list_head unpinned
; /* entry in its area's unpinned list */
59 struct ashmem_area
*asma
; /* associated area */
60 size_t pgstart
; /* starting page, inclusive */
61 size_t pgend
; /* ending page, inclusive */
62 unsigned int purged
; /* ASHMEM_NOT or ASHMEM_WAS_PURGED */
65 /* LRU list of unpinned pages, protected by ashmem_mutex */
66 static LIST_HEAD(ashmem_lru_list
);
68 /* Count of pages on our LRU list, protected by ashmem_mutex */
69 static unsigned long lru_count
;
72 * ashmem_mutex - protects the list of and each individual ashmem_area
74 * Lock Ordering: ashmex_mutex -> i_mutex -> i_alloc_sem
76 static DEFINE_MUTEX(ashmem_mutex
);
78 static struct kmem_cache
*ashmem_area_cachep __read_mostly
;
79 static struct kmem_cache
*ashmem_range_cachep __read_mostly
;
81 #define range_size(range) \
82 ((range)->pgend - (range)->pgstart + 1)
84 #define range_on_lru(range) \
85 ((range)->purged == ASHMEM_NOT_PURGED)
87 #define page_range_subsumes_range(range, start, end) \
88 (((range)->pgstart >= (start)) && ((range)->pgend <= (end)))
90 #define page_range_subsumed_by_range(range, start, end) \
91 (((range)->pgstart <= (start)) && ((range)->pgend >= (end)))
93 #define page_in_range(range, page) \
94 (((range)->pgstart <= (page)) && ((range)->pgend >= (page)))
96 #define page_range_in_range(range, start, end) \
97 (page_in_range(range, start) || page_in_range(range, end) || \
98 page_range_subsumes_range(range, start, end))
100 #define range_before_page(range, page) \
101 ((range)->pgend < (page))
103 #define PROT_MASK (PROT_EXEC | PROT_READ | PROT_WRITE)
105 static inline void lru_add(struct ashmem_range
*range
)
107 list_add_tail(&range
->lru
, &ashmem_lru_list
);
108 lru_count
+= range_size(range
);
111 static inline void lru_del(struct ashmem_range
*range
)
113 list_del(&range
->lru
);
114 lru_count
-= range_size(range
);
118 * range_alloc - allocate and initialize a new ashmem_range structure
120 * 'asma' - associated ashmem_area
121 * 'prev_range' - the previous ashmem_range in the sorted asma->unpinned list
122 * 'purged' - initial purge value (ASMEM_NOT_PURGED or ASHMEM_WAS_PURGED)
123 * 'start' - starting page, inclusive
124 * 'end' - ending page, inclusive
126 * Caller must hold ashmem_mutex.
128 static int range_alloc(struct ashmem_area
*asma
,
129 struct ashmem_range
*prev_range
, unsigned int purged
,
130 size_t start
, size_t end
)
132 struct ashmem_range
*range
;
134 range
= kmem_cache_zalloc(ashmem_range_cachep
, GFP_KERNEL
);
135 if (unlikely(!range
))
139 range
->pgstart
= start
;
141 range
->purged
= purged
;
143 list_add_tail(&range
->unpinned
, &prev_range
->unpinned
);
145 if (range_on_lru(range
))
151 static void range_del(struct ashmem_range
*range
)
153 list_del(&range
->unpinned
);
154 if (range_on_lru(range
))
156 kmem_cache_free(ashmem_range_cachep
, range
);
160 * range_shrink - shrinks a range
162 * Caller must hold ashmem_mutex.
164 static inline void range_shrink(struct ashmem_range
*range
,
165 size_t start
, size_t end
)
167 size_t pre
= range_size(range
);
169 range
->pgstart
= start
;
172 if (range_on_lru(range
))
173 lru_count
-= pre
- range_size(range
);
176 static int ashmem_open(struct inode
*inode
, struct file
*file
)
178 struct ashmem_area
*asma
;
181 ret
= generic_file_open(inode
, file
);
185 asma
= kmem_cache_zalloc(ashmem_area_cachep
, GFP_KERNEL
);
189 INIT_LIST_HEAD(&asma
->unpinned_list
);
190 memcpy(asma
->name
, ASHMEM_NAME_PREFIX
, ASHMEM_NAME_PREFIX_LEN
);
191 asma
->prot_mask
= PROT_MASK
;
192 file
->private_data
= asma
;
197 static int ashmem_release(struct inode
*ignored
, struct file
*file
)
199 struct ashmem_area
*asma
= file
->private_data
;
200 struct ashmem_range
*range
, *next
;
202 mutex_lock(&ashmem_mutex
);
203 list_for_each_entry_safe(range
, next
, &asma
->unpinned_list
, unpinned
)
205 mutex_unlock(&ashmem_mutex
);
209 kmem_cache_free(ashmem_area_cachep
, asma
);
214 static ssize_t
ashmem_read(struct file
*file
, char __user
*buf
,
215 size_t len
, loff_t
*pos
)
217 struct ashmem_area
*asma
= file
->private_data
;
220 mutex_lock(&ashmem_mutex
);
222 /* If size is not set, or set to 0, always return EOF. */
223 if (asma
->size
== 0) {
232 ret
= asma
->file
->f_op
->read(asma
->file
, buf
, len
, pos
);
237 /** Update backing file pos, since f_ops->read() doesn't */
238 asma
->file
->f_pos
= *pos
;
241 mutex_unlock(&ashmem_mutex
);
245 static loff_t
ashmem_llseek(struct file
*file
, loff_t offset
, int origin
)
247 struct ashmem_area
*asma
= file
->private_data
;
250 mutex_lock(&ashmem_mutex
);
252 if (asma
->size
== 0) {
262 ret
= asma
->file
->f_op
->llseek(asma
->file
, offset
, origin
);
267 /** Copy f_pos from backing file, since f_ops->llseek() sets it */
268 file
->f_pos
= asma
->file
->f_pos
;
271 mutex_unlock(&ashmem_mutex
);
275 static inline unsigned long
276 calc_vm_may_flags(unsigned long prot
)
278 return _calc_vm_trans(prot
, PROT_READ
, VM_MAYREAD
) |
279 _calc_vm_trans(prot
, PROT_WRITE
, VM_MAYWRITE
) |
280 _calc_vm_trans(prot
, PROT_EXEC
, VM_MAYEXEC
);
283 static int ashmem_mmap(struct file
*file
, struct vm_area_struct
*vma
)
285 struct ashmem_area
*asma
= file
->private_data
;
288 mutex_lock(&ashmem_mutex
);
290 /* user needs to SET_SIZE before mapping */
291 if (unlikely(!asma
->size
)) {
296 /* requested protection bits must match our allowed protection mask */
297 if (unlikely((vma
->vm_flags
& ~calc_vm_prot_bits(asma
->prot_mask
)) &
298 calc_vm_prot_bits(PROT_MASK
))) {
302 vma
->vm_flags
&= ~calc_vm_may_flags(~asma
->prot_mask
);
305 char *name
= ASHMEM_NAME_DEF
;
308 if (asma
->name
[ASHMEM_NAME_PREFIX_LEN
] != '\0')
311 /* ... and allocate the backing shmem file */
312 vmfile
= shmem_file_setup(name
, asma
->size
, vma
->vm_flags
);
313 if (unlikely(IS_ERR(vmfile
))) {
314 ret
= PTR_ERR(vmfile
);
319 get_file(asma
->file
);
321 if (vma
->vm_flags
& VM_SHARED
)
322 shmem_set_file(vma
, asma
->file
);
326 vma
->vm_file
= asma
->file
;
328 vma
->vm_flags
|= VM_CAN_NONLINEAR
;
331 mutex_unlock(&ashmem_mutex
);
336 * ashmem_shrink - our cache shrinker, called from mm/vmscan.c :: shrink_slab
338 * 'nr_to_scan' is the number of objects (pages) to prune, or 0 to query how
339 * many objects (pages) we have in total.
341 * 'gfp_mask' is the mask of the allocation that got us into this mess.
343 * Return value is the number of objects (pages) remaining, or -1 if we cannot
344 * proceed without risk of deadlock (due to gfp_mask).
346 * We approximate LRU via least-recently-unpinned, jettisoning unpinned partial
347 * chunks of ashmem regions LRU-wise one-at-a-time until we hit 'nr_to_scan'
350 static int ashmem_shrink(struct shrinker
*s
, struct shrink_control
*sc
)
352 struct ashmem_range
*range
, *next
;
354 /* We might recurse into filesystem code, so bail out if necessary */
355 if (sc
->nr_to_scan
&& !(sc
->gfp_mask
& __GFP_FS
))
360 mutex_lock(&ashmem_mutex
);
361 list_for_each_entry_safe(range
, next
, &ashmem_lru_list
, lru
) {
362 struct inode
*inode
= range
->asma
->file
->f_dentry
->d_inode
;
363 loff_t start
= range
->pgstart
* PAGE_SIZE
;
364 loff_t end
= (range
->pgend
+ 1) * PAGE_SIZE
- 1;
366 vmtruncate_range(inode
, start
, end
);
367 range
->purged
= ASHMEM_WAS_PURGED
;
370 sc
->nr_to_scan
-= range_size(range
);
371 if (sc
->nr_to_scan
<= 0)
374 mutex_unlock(&ashmem_mutex
);
379 static struct shrinker ashmem_shrinker
= {
380 .shrink
= ashmem_shrink
,
381 .seeks
= DEFAULT_SEEKS
* 4,
384 static int set_prot_mask(struct ashmem_area
*asma
, unsigned long prot
)
388 mutex_lock(&ashmem_mutex
);
390 /* the user can only remove, not add, protection bits */
391 if (unlikely((asma
->prot_mask
& prot
) != prot
)) {
396 /* does the application expect PROT_READ to imply PROT_EXEC? */
397 if ((prot
& PROT_READ
) && (current
->personality
& READ_IMPLIES_EXEC
))
400 asma
->prot_mask
= prot
;
403 mutex_unlock(&ashmem_mutex
);
407 static int set_name(struct ashmem_area
*asma
, void __user
*name
)
411 mutex_lock(&ashmem_mutex
);
413 /* cannot change an existing mapping's name */
414 if (unlikely(asma
->file
)) {
419 if (unlikely(copy_from_user(asma
->name
+ ASHMEM_NAME_PREFIX_LEN
,
420 name
, ASHMEM_NAME_LEN
)))
422 asma
->name
[ASHMEM_FULL_NAME_LEN
-1] = '\0';
425 mutex_unlock(&ashmem_mutex
);
430 static int get_name(struct ashmem_area
*asma
, void __user
*name
)
434 mutex_lock(&ashmem_mutex
);
435 if (asma
->name
[ASHMEM_NAME_PREFIX_LEN
] != '\0') {
439 * Copying only `len', instead of ASHMEM_NAME_LEN, bytes
440 * prevents us from revealing one user's stack to another.
442 len
= strlen(asma
->name
+ ASHMEM_NAME_PREFIX_LEN
) + 1;
443 if (unlikely(copy_to_user(name
,
444 asma
->name
+ ASHMEM_NAME_PREFIX_LEN
, len
)))
447 if (unlikely(copy_to_user(name
, ASHMEM_NAME_DEF
,
448 sizeof(ASHMEM_NAME_DEF
))))
451 mutex_unlock(&ashmem_mutex
);
457 * ashmem_pin - pin the given ashmem region, returning whether it was
458 * previously purged (ASHMEM_WAS_PURGED) or not (ASHMEM_NOT_PURGED).
460 * Caller must hold ashmem_mutex.
462 static int ashmem_pin(struct ashmem_area
*asma
, size_t pgstart
, size_t pgend
)
464 struct ashmem_range
*range
, *next
;
465 int ret
= ASHMEM_NOT_PURGED
;
467 list_for_each_entry_safe(range
, next
, &asma
->unpinned_list
, unpinned
) {
468 /* moved past last applicable page; we can short circuit */
469 if (range_before_page(range
, pgstart
))
473 * The user can ask us to pin pages that span multiple ranges,
474 * or to pin pages that aren't even unpinned, so this is messy.
477 * 1. The requested range subsumes an existing range, so we
478 * just remove the entire matching range.
479 * 2. The requested range overlaps the start of an existing
480 * range, so we just update that range.
481 * 3. The requested range overlaps the end of an existing
482 * range, so we just update that range.
483 * 4. The requested range punches a hole in an existing range,
484 * so we have to update one side of the range and then
485 * create a new range for the other side.
487 if (page_range_in_range(range
, pgstart
, pgend
)) {
488 ret
|= range
->purged
;
490 /* Case #1: Easy. Just nuke the whole thing. */
491 if (page_range_subsumes_range(range
, pgstart
, pgend
)) {
496 /* Case #2: We overlap from the start, so adjust it */
497 if (range
->pgstart
>= pgstart
) {
498 range_shrink(range
, pgend
+ 1, range
->pgend
);
502 /* Case #3: We overlap from the rear, so adjust it */
503 if (range
->pgend
<= pgend
) {
504 range_shrink(range
, range
->pgstart
, pgstart
-1);
509 * Case #4: We eat a chunk out of the middle. A bit
510 * more complicated, we allocate a new range for the
511 * second half and adjust the first chunk's endpoint.
513 range_alloc(asma
, range
, range
->purged
,
514 pgend
+ 1, range
->pgend
);
515 range_shrink(range
, range
->pgstart
, pgstart
- 1);
524 * ashmem_unpin - unpin the given range of pages. Returns zero on success.
526 * Caller must hold ashmem_mutex.
528 static int ashmem_unpin(struct ashmem_area
*asma
, size_t pgstart
, size_t pgend
)
530 struct ashmem_range
*range
, *next
;
531 unsigned int purged
= ASHMEM_NOT_PURGED
;
534 list_for_each_entry_safe(range
, next
, &asma
->unpinned_list
, unpinned
) {
535 /* short circuit: this is our insertion point */
536 if (range_before_page(range
, pgstart
))
540 * The user can ask us to unpin pages that are already entirely
541 * or partially pinned. We handle those two cases here.
543 if (page_range_subsumed_by_range(range
, pgstart
, pgend
))
545 if (page_range_in_range(range
, pgstart
, pgend
)) {
546 pgstart
= min_t(size_t, range
->pgstart
, pgstart
),
547 pgend
= max_t(size_t, range
->pgend
, pgend
);
548 purged
|= range
->purged
;
554 return range_alloc(asma
, range
, purged
, pgstart
, pgend
);
558 * ashmem_get_pin_status - Returns ASHMEM_IS_UNPINNED if _any_ pages in the
559 * given interval are unpinned and ASHMEM_IS_PINNED otherwise.
561 * Caller must hold ashmem_mutex.
563 static int ashmem_get_pin_status(struct ashmem_area
*asma
, size_t pgstart
,
566 struct ashmem_range
*range
;
567 int ret
= ASHMEM_IS_PINNED
;
569 list_for_each_entry(range
, &asma
->unpinned_list
, unpinned
) {
570 if (range_before_page(range
, pgstart
))
572 if (page_range_in_range(range
, pgstart
, pgend
)) {
573 ret
= ASHMEM_IS_UNPINNED
;
581 static int ashmem_pin_unpin(struct ashmem_area
*asma
, unsigned long cmd
,
584 struct ashmem_pin pin
;
585 size_t pgstart
, pgend
;
588 if (unlikely(!asma
->file
))
591 if (unlikely(copy_from_user(&pin
, p
, sizeof(pin
))))
594 /* per custom, you can pass zero for len to mean "everything onward" */
596 pin
.len
= PAGE_ALIGN(asma
->size
) - pin
.offset
;
598 if (unlikely((pin
.offset
| pin
.len
) & ~PAGE_MASK
))
601 if (unlikely(((__u32
) -1) - pin
.offset
< pin
.len
))
604 if (unlikely(PAGE_ALIGN(asma
->size
) < pin
.offset
+ pin
.len
))
607 pgstart
= pin
.offset
/ PAGE_SIZE
;
608 pgend
= pgstart
+ (pin
.len
/ PAGE_SIZE
) - 1;
610 mutex_lock(&ashmem_mutex
);
614 ret
= ashmem_pin(asma
, pgstart
, pgend
);
617 ret
= ashmem_unpin(asma
, pgstart
, pgend
);
619 case ASHMEM_GET_PIN_STATUS
:
620 ret
= ashmem_get_pin_status(asma
, pgstart
, pgend
);
624 mutex_unlock(&ashmem_mutex
);
629 static long ashmem_ioctl(struct file
*file
, unsigned int cmd
, unsigned long arg
)
631 struct ashmem_area
*asma
= file
->private_data
;
635 case ASHMEM_SET_NAME
:
636 ret
= set_name(asma
, (void __user
*) arg
);
638 case ASHMEM_GET_NAME
:
639 ret
= get_name(asma
, (void __user
*) arg
);
641 case ASHMEM_SET_SIZE
:
645 asma
->size
= (size_t) arg
;
648 case ASHMEM_GET_SIZE
:
651 case ASHMEM_SET_PROT_MASK
:
652 ret
= set_prot_mask(asma
, arg
);
654 case ASHMEM_GET_PROT_MASK
:
655 ret
= asma
->prot_mask
;
659 case ASHMEM_GET_PIN_STATUS
:
660 ret
= ashmem_pin_unpin(asma
, cmd
, (void __user
*) arg
);
662 case ASHMEM_PURGE_ALL_CACHES
:
664 if (capable(CAP_SYS_ADMIN
)) {
665 struct shrink_control sc
= {
666 .gfp_mask
= GFP_KERNEL
,
669 ret
= ashmem_shrink(&ashmem_shrinker
, &sc
);
671 ashmem_shrink(&ashmem_shrinker
, &sc
);
679 static struct file_operations ashmem_fops
= {
680 .owner
= THIS_MODULE
,
682 .release
= ashmem_release
,
684 .llseek
= ashmem_llseek
,
686 .unlocked_ioctl
= ashmem_ioctl
,
687 .compat_ioctl
= ashmem_ioctl
,
690 static struct miscdevice ashmem_misc
= {
691 .minor
= MISC_DYNAMIC_MINOR
,
693 .fops
= &ashmem_fops
,
696 static int __init
ashmem_init(void)
700 ashmem_area_cachep
= kmem_cache_create("ashmem_area_cache",
701 sizeof(struct ashmem_area
),
703 if (unlikely(!ashmem_area_cachep
)) {
704 printk(KERN_ERR
"ashmem: failed to create slab cache\n");
708 ashmem_range_cachep
= kmem_cache_create("ashmem_range_cache",
709 sizeof(struct ashmem_range
),
711 if (unlikely(!ashmem_range_cachep
)) {
712 printk(KERN_ERR
"ashmem: failed to create slab cache\n");
716 ret
= misc_register(&ashmem_misc
);
718 printk(KERN_ERR
"ashmem: failed to register misc device!\n");
722 register_shrinker(&ashmem_shrinker
);
724 printk(KERN_INFO
"ashmem: initialized\n");
729 static void __exit
ashmem_exit(void)
733 unregister_shrinker(&ashmem_shrinker
);
735 ret
= misc_deregister(&ashmem_misc
);
737 printk(KERN_ERR
"ashmem: failed to unregister misc device!\n");
739 kmem_cache_destroy(ashmem_range_cachep
);
740 kmem_cache_destroy(ashmem_area_cachep
);
742 printk(KERN_INFO
"ashmem: unloaded\n");
745 module_init(ashmem_init
);
746 module_exit(ashmem_exit
);
748 MODULE_LICENSE("GPL");