3 ** Anonymous Shared Memory Subsystem, ashmem
5 ** Copyright (C) 2008 Google, Inc.
7 ** Robert Love <rlove@google.com>
9 ** This software is licensed under the terms of the GNU General Public
10 ** License version 2, as published by the Free Software Foundation, and
11 ** may be copied, distributed, and modified under those terms.
13 ** This program is distributed in the hope that it will be useful,
14 ** but WITHOUT ANY WARRANTY; without even the implied warranty of
15 ** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 ** GNU General Public License for more details.
19 #include <linux/module.h>
20 #include <linux/file.h>
22 #include <linux/miscdevice.h>
23 #include <linux/security.h>
25 #include <linux/mman.h>
26 #include <linux/uaccess.h>
27 #include <linux/personality.h>
28 #include <linux/bitops.h>
29 #include <linux/mutex.h>
30 #include <linux/shmem_fs.h>
33 #define ASHMEM_NAME_PREFIX "dev/ashmem/"
34 #define ASHMEM_NAME_PREFIX_LEN (sizeof(ASHMEM_NAME_PREFIX) - 1)
35 #define ASHMEM_FULL_NAME_LEN (ASHMEM_NAME_LEN + ASHMEM_NAME_PREFIX_LEN)
38 * ashmem_area - anonymous shared memory area
39 * Lifecycle: From our parent file's open() until its release()
40 * Locking: Protected by `ashmem_mutex'
41 * Big Note: Mappings do NOT pin this structure; it dies on close()
44 char name
[ASHMEM_FULL_NAME_LEN
]; /* optional name in /proc/pid/maps */
45 struct list_head unpinned_list
; /* list of all ashmem areas */
46 struct file
*file
; /* the shmem-based backing file */
47 size_t size
; /* size of the mapping, in bytes */
48 unsigned long prot_mask
; /* allowed prot bits, as vm_flags */
52 * ashmem_range - represents an interval of unpinned (evictable) pages
53 * Lifecycle: From unpin to pin
54 * Locking: Protected by `ashmem_mutex'
57 struct list_head lru
; /* entry in LRU list */
58 struct list_head unpinned
; /* entry in its area's unpinned list */
59 struct ashmem_area
*asma
; /* associated area */
60 size_t pgstart
; /* starting page, inclusive */
61 size_t pgend
; /* ending page, inclusive */
62 unsigned int purged
; /* ASHMEM_NOT or ASHMEM_WAS_PURGED */
65 /* LRU list of unpinned pages, protected by ashmem_mutex */
66 static LIST_HEAD(ashmem_lru_list
);
68 /* Count of pages on our LRU list, protected by ashmem_mutex */
69 static unsigned long lru_count
;
72 * ashmem_mutex - protects the list of and each individual ashmem_area
74 * Lock Ordering: ashmex_mutex -> i_mutex -> i_alloc_sem
76 static DEFINE_MUTEX(ashmem_mutex
);
78 static struct kmem_cache
*ashmem_area_cachep __read_mostly
;
79 static struct kmem_cache
*ashmem_range_cachep __read_mostly
;
81 #define range_size(range) \
82 ((range)->pgend - (range)->pgstart + 1)
84 #define range_on_lru(range) \
85 ((range)->purged == ASHMEM_NOT_PURGED)
87 #define page_range_subsumes_range(range, start, end) \
88 (((range)->pgstart >= (start)) && ((range)->pgend <= (end)))
90 #define page_range_subsumed_by_range(range, start, end) \
91 (((range)->pgstart <= (start)) && ((range)->pgend >= (end)))
93 #define page_in_range(range, page) \
94 (((range)->pgstart <= (page)) && ((range)->pgend >= (page)))
96 #define page_range_in_range(range, start, end) \
97 (page_in_range(range, start) || page_in_range(range, end) || \
98 page_range_subsumes_range(range, start, end))
100 #define range_before_page(range, page) \
101 ((range)->pgend < (page))
103 #define PROT_MASK (PROT_EXEC | PROT_READ | PROT_WRITE)
105 static inline void lru_add(struct ashmem_range
*range
)
107 list_add_tail(&range
->lru
, &ashmem_lru_list
);
108 lru_count
+= range_size(range
);
111 static inline void lru_del(struct ashmem_range
*range
)
113 list_del(&range
->lru
);
114 lru_count
-= range_size(range
);
118 * range_alloc - allocate and initialize a new ashmem_range structure
120 * 'asma' - associated ashmem_area
121 * 'prev_range' - the previous ashmem_range in the sorted asma->unpinned list
122 * 'purged' - initial purge value (ASMEM_NOT_PURGED or ASHMEM_WAS_PURGED)
123 * 'start' - starting page, inclusive
124 * 'end' - ending page, inclusive
126 * Caller must hold ashmem_mutex.
128 static int range_alloc(struct ashmem_area
*asma
,
129 struct ashmem_range
*prev_range
, unsigned int purged
,
130 size_t start
, size_t end
)
132 struct ashmem_range
*range
;
134 range
= kmem_cache_zalloc(ashmem_range_cachep
, GFP_KERNEL
);
135 if (unlikely(!range
))
139 range
->pgstart
= start
;
141 range
->purged
= purged
;
143 list_add_tail(&range
->unpinned
, &prev_range
->unpinned
);
145 if (range_on_lru(range
))
151 static void range_del(struct ashmem_range
*range
)
153 list_del(&range
->unpinned
);
154 if (range_on_lru(range
))
156 kmem_cache_free(ashmem_range_cachep
, range
);
160 * range_shrink - shrinks a range
162 * Caller must hold ashmem_mutex.
164 static inline void range_shrink(struct ashmem_range
*range
,
165 size_t start
, size_t end
)
167 size_t pre
= range_size(range
);
169 range
->pgstart
= start
;
172 if (range_on_lru(range
))
173 lru_count
-= pre
- range_size(range
);
176 static int ashmem_open(struct inode
*inode
, struct file
*file
)
178 struct ashmem_area
*asma
;
181 ret
= generic_file_open(inode
, file
);
185 asma
= kmem_cache_zalloc(ashmem_area_cachep
, GFP_KERNEL
);
189 INIT_LIST_HEAD(&asma
->unpinned_list
);
190 memcpy(asma
->name
, ASHMEM_NAME_PREFIX
, ASHMEM_NAME_PREFIX_LEN
);
191 asma
->prot_mask
= PROT_MASK
;
192 file
->private_data
= asma
;
197 static int ashmem_release(struct inode
*ignored
, struct file
*file
)
199 struct ashmem_area
*asma
= file
->private_data
;
200 struct ashmem_range
*range
, *next
;
202 mutex_lock(&ashmem_mutex
);
203 list_for_each_entry_safe(range
, next
, &asma
->unpinned_list
, unpinned
)
205 mutex_unlock(&ashmem_mutex
);
209 kmem_cache_free(ashmem_area_cachep
, asma
);
214 static ssize_t
ashmem_read(struct file
*file
, char __user
*buf
,
215 size_t len
, loff_t
*pos
)
217 struct ashmem_area
*asma
= file
->private_data
;
220 mutex_lock(&ashmem_mutex
);
222 /* If size is not set, or set to 0, always return EOF. */
231 ret
= asma
->file
->f_op
->read(asma
->file
, buf
, len
, pos
);
235 /** Update backing file pos, since f_ops->read() doesn't */
236 asma
->file
->f_pos
= *pos
;
239 mutex_unlock(&ashmem_mutex
);
243 static loff_t
ashmem_llseek(struct file
*file
, loff_t offset
, int origin
)
245 struct ashmem_area
*asma
= file
->private_data
;
248 mutex_lock(&ashmem_mutex
);
250 if (asma
->size
== 0) {
260 ret
= asma
->file
->f_op
->llseek(asma
->file
, offset
, origin
);
264 /** Copy f_pos from backing file, since f_ops->llseek() sets it */
265 file
->f_pos
= asma
->file
->f_pos
;
268 mutex_unlock(&ashmem_mutex
);
272 static inline unsigned long calc_vm_may_flags(unsigned long prot
)
274 return _calc_vm_trans(prot
, PROT_READ
, VM_MAYREAD
) |
275 _calc_vm_trans(prot
, PROT_WRITE
, VM_MAYWRITE
) |
276 _calc_vm_trans(prot
, PROT_EXEC
, VM_MAYEXEC
);
279 static int ashmem_mmap(struct file
*file
, struct vm_area_struct
*vma
)
281 struct ashmem_area
*asma
= file
->private_data
;
284 mutex_lock(&ashmem_mutex
);
286 /* user needs to SET_SIZE before mapping */
287 if (unlikely(!asma
->size
)) {
292 /* requested protection bits must match our allowed protection mask */
293 if (unlikely((vma
->vm_flags
& ~calc_vm_prot_bits(asma
->prot_mask
)) &
294 calc_vm_prot_bits(PROT_MASK
))) {
298 vma
->vm_flags
&= ~calc_vm_may_flags(~asma
->prot_mask
);
301 char *name
= ASHMEM_NAME_DEF
;
304 if (asma
->name
[ASHMEM_NAME_PREFIX_LEN
] != '\0')
307 /* ... and allocate the backing shmem file */
308 vmfile
= shmem_file_setup(name
, asma
->size
, vma
->vm_flags
);
309 if (unlikely(IS_ERR(vmfile
))) {
310 ret
= PTR_ERR(vmfile
);
315 get_file(asma
->file
);
318 * XXX - Reworked to use shmem_zero_setup() instead of
319 * shmem_set_file while we're in staging. -jstultz
321 if (vma
->vm_flags
& VM_SHARED
) {
322 ret
= shmem_zero_setup(vma
);
331 vma
->vm_file
= asma
->file
;
332 vma
->vm_flags
|= VM_CAN_NONLINEAR
;
335 mutex_unlock(&ashmem_mutex
);
340 * ashmem_shrink - our cache shrinker, called from mm/vmscan.c :: shrink_slab
342 * 'nr_to_scan' is the number of objects (pages) to prune, or 0 to query how
343 * many objects (pages) we have in total.
345 * 'gfp_mask' is the mask of the allocation that got us into this mess.
347 * Return value is the number of objects (pages) remaining, or -1 if we cannot
348 * proceed without risk of deadlock (due to gfp_mask).
350 * We approximate LRU via least-recently-unpinned, jettisoning unpinned partial
351 * chunks of ashmem regions LRU-wise one-at-a-time until we hit 'nr_to_scan'
354 static int ashmem_shrink(struct shrinker
*s
, struct shrink_control
*sc
)
356 struct ashmem_range
*range
, *next
;
358 /* We might recurse into filesystem code, so bail out if necessary */
359 if (sc
->nr_to_scan
&& !(sc
->gfp_mask
& __GFP_FS
))
364 mutex_lock(&ashmem_mutex
);
365 list_for_each_entry_safe(range
, next
, &ashmem_lru_list
, lru
) {
366 struct inode
*inode
= range
->asma
->file
->f_dentry
->d_inode
;
367 loff_t start
= range
->pgstart
* PAGE_SIZE
;
368 loff_t end
= (range
->pgend
+ 1) * PAGE_SIZE
- 1;
370 vmtruncate_range(inode
, start
, end
);
371 range
->purged
= ASHMEM_WAS_PURGED
;
374 sc
->nr_to_scan
-= range_size(range
);
375 if (sc
->nr_to_scan
<= 0)
378 mutex_unlock(&ashmem_mutex
);
383 static struct shrinker ashmem_shrinker
= {
384 .shrink
= ashmem_shrink
,
385 .seeks
= DEFAULT_SEEKS
* 4,
388 static int set_prot_mask(struct ashmem_area
*asma
, unsigned long prot
)
392 mutex_lock(&ashmem_mutex
);
394 /* the user can only remove, not add, protection bits */
395 if (unlikely((asma
->prot_mask
& prot
) != prot
)) {
400 /* does the application expect PROT_READ to imply PROT_EXEC? */
401 if ((prot
& PROT_READ
) && (current
->personality
& READ_IMPLIES_EXEC
))
404 asma
->prot_mask
= prot
;
407 mutex_unlock(&ashmem_mutex
);
411 static int set_name(struct ashmem_area
*asma
, void __user
*name
)
415 mutex_lock(&ashmem_mutex
);
417 /* cannot change an existing mapping's name */
418 if (unlikely(asma
->file
)) {
423 if (unlikely(copy_from_user(asma
->name
+ ASHMEM_NAME_PREFIX_LEN
,
424 name
, ASHMEM_NAME_LEN
)))
426 asma
->name
[ASHMEM_FULL_NAME_LEN
-1] = '\0';
429 mutex_unlock(&ashmem_mutex
);
434 static int get_name(struct ashmem_area
*asma
, void __user
*name
)
438 mutex_lock(&ashmem_mutex
);
439 if (asma
->name
[ASHMEM_NAME_PREFIX_LEN
] != '\0') {
443 * Copying only `len', instead of ASHMEM_NAME_LEN, bytes
444 * prevents us from revealing one user's stack to another.
446 len
= strlen(asma
->name
+ ASHMEM_NAME_PREFIX_LEN
) + 1;
447 if (unlikely(copy_to_user(name
,
448 asma
->name
+ ASHMEM_NAME_PREFIX_LEN
, len
)))
451 if (unlikely(copy_to_user(name
, ASHMEM_NAME_DEF
,
452 sizeof(ASHMEM_NAME_DEF
))))
455 mutex_unlock(&ashmem_mutex
);
461 * ashmem_pin - pin the given ashmem region, returning whether it was
462 * previously purged (ASHMEM_WAS_PURGED) or not (ASHMEM_NOT_PURGED).
464 * Caller must hold ashmem_mutex.
466 static int ashmem_pin(struct ashmem_area
*asma
, size_t pgstart
, size_t pgend
)
468 struct ashmem_range
*range
, *next
;
469 int ret
= ASHMEM_NOT_PURGED
;
471 list_for_each_entry_safe(range
, next
, &asma
->unpinned_list
, unpinned
) {
472 /* moved past last applicable page; we can short circuit */
473 if (range_before_page(range
, pgstart
))
477 * The user can ask us to pin pages that span multiple ranges,
478 * or to pin pages that aren't even unpinned, so this is messy.
481 * 1. The requested range subsumes an existing range, so we
482 * just remove the entire matching range.
483 * 2. The requested range overlaps the start of an existing
484 * range, so we just update that range.
485 * 3. The requested range overlaps the end of an existing
486 * range, so we just update that range.
487 * 4. The requested range punches a hole in an existing range,
488 * so we have to update one side of the range and then
489 * create a new range for the other side.
491 if (page_range_in_range(range
, pgstart
, pgend
)) {
492 ret
|= range
->purged
;
494 /* Case #1: Easy. Just nuke the whole thing. */
495 if (page_range_subsumes_range(range
, pgstart
, pgend
)) {
500 /* Case #2: We overlap from the start, so adjust it */
501 if (range
->pgstart
>= pgstart
) {
502 range_shrink(range
, pgend
+ 1, range
->pgend
);
506 /* Case #3: We overlap from the rear, so adjust it */
507 if (range
->pgend
<= pgend
) {
508 range_shrink(range
, range
->pgstart
, pgstart
-1);
513 * Case #4: We eat a chunk out of the middle. A bit
514 * more complicated, we allocate a new range for the
515 * second half and adjust the first chunk's endpoint.
517 range_alloc(asma
, range
, range
->purged
,
518 pgend
+ 1, range
->pgend
);
519 range_shrink(range
, range
->pgstart
, pgstart
- 1);
528 * ashmem_unpin - unpin the given range of pages. Returns zero on success.
530 * Caller must hold ashmem_mutex.
532 static int ashmem_unpin(struct ashmem_area
*asma
, size_t pgstart
, size_t pgend
)
534 struct ashmem_range
*range
, *next
;
535 unsigned int purged
= ASHMEM_NOT_PURGED
;
538 list_for_each_entry_safe(range
, next
, &asma
->unpinned_list
, unpinned
) {
539 /* short circuit: this is our insertion point */
540 if (range_before_page(range
, pgstart
))
544 * The user can ask us to unpin pages that are already entirely
545 * or partially pinned. We handle those two cases here.
547 if (page_range_subsumed_by_range(range
, pgstart
, pgend
))
549 if (page_range_in_range(range
, pgstart
, pgend
)) {
550 pgstart
= min_t(size_t, range
->pgstart
, pgstart
),
551 pgend
= max_t(size_t, range
->pgend
, pgend
);
552 purged
|= range
->purged
;
558 return range_alloc(asma
, range
, purged
, pgstart
, pgend
);
562 * ashmem_get_pin_status - Returns ASHMEM_IS_UNPINNED if _any_ pages in the
563 * given interval are unpinned and ASHMEM_IS_PINNED otherwise.
565 * Caller must hold ashmem_mutex.
567 static int ashmem_get_pin_status(struct ashmem_area
*asma
, size_t pgstart
,
570 struct ashmem_range
*range
;
571 int ret
= ASHMEM_IS_PINNED
;
573 list_for_each_entry(range
, &asma
->unpinned_list
, unpinned
) {
574 if (range_before_page(range
, pgstart
))
576 if (page_range_in_range(range
, pgstart
, pgend
)) {
577 ret
= ASHMEM_IS_UNPINNED
;
585 static int ashmem_pin_unpin(struct ashmem_area
*asma
, unsigned long cmd
,
588 struct ashmem_pin pin
;
589 size_t pgstart
, pgend
;
592 if (unlikely(!asma
->file
))
595 if (unlikely(copy_from_user(&pin
, p
, sizeof(pin
))))
598 /* per custom, you can pass zero for len to mean "everything onward" */
600 pin
.len
= PAGE_ALIGN(asma
->size
) - pin
.offset
;
602 if (unlikely((pin
.offset
| pin
.len
) & ~PAGE_MASK
))
605 if (unlikely(((__u32
) -1) - pin
.offset
< pin
.len
))
608 if (unlikely(PAGE_ALIGN(asma
->size
) < pin
.offset
+ pin
.len
))
611 pgstart
= pin
.offset
/ PAGE_SIZE
;
612 pgend
= pgstart
+ (pin
.len
/ PAGE_SIZE
) - 1;
614 mutex_lock(&ashmem_mutex
);
618 ret
= ashmem_pin(asma
, pgstart
, pgend
);
621 ret
= ashmem_unpin(asma
, pgstart
, pgend
);
623 case ASHMEM_GET_PIN_STATUS
:
624 ret
= ashmem_get_pin_status(asma
, pgstart
, pgend
);
628 mutex_unlock(&ashmem_mutex
);
633 static long ashmem_ioctl(struct file
*file
, unsigned int cmd
, unsigned long arg
)
635 struct ashmem_area
*asma
= file
->private_data
;
639 case ASHMEM_SET_NAME
:
640 ret
= set_name(asma
, (void __user
*) arg
);
642 case ASHMEM_GET_NAME
:
643 ret
= get_name(asma
, (void __user
*) arg
);
645 case ASHMEM_SET_SIZE
:
649 asma
->size
= (size_t) arg
;
652 case ASHMEM_GET_SIZE
:
655 case ASHMEM_SET_PROT_MASK
:
656 ret
= set_prot_mask(asma
, arg
);
658 case ASHMEM_GET_PROT_MASK
:
659 ret
= asma
->prot_mask
;
663 case ASHMEM_GET_PIN_STATUS
:
664 ret
= ashmem_pin_unpin(asma
, cmd
, (void __user
*) arg
);
666 case ASHMEM_PURGE_ALL_CACHES
:
668 if (capable(CAP_SYS_ADMIN
)) {
669 struct shrink_control sc
= {
670 .gfp_mask
= GFP_KERNEL
,
673 ret
= ashmem_shrink(&ashmem_shrinker
, &sc
);
675 ashmem_shrink(&ashmem_shrinker
, &sc
);
683 static struct file_operations ashmem_fops
= {
684 .owner
= THIS_MODULE
,
686 .release
= ashmem_release
,
688 .llseek
= ashmem_llseek
,
690 .unlocked_ioctl
= ashmem_ioctl
,
691 .compat_ioctl
= ashmem_ioctl
,
694 static struct miscdevice ashmem_misc
= {
695 .minor
= MISC_DYNAMIC_MINOR
,
697 .fops
= &ashmem_fops
,
700 static int __init
ashmem_init(void)
704 ashmem_area_cachep
= kmem_cache_create("ashmem_area_cache",
705 sizeof(struct ashmem_area
),
707 if (unlikely(!ashmem_area_cachep
)) {
708 printk(KERN_ERR
"ashmem: failed to create slab cache\n");
712 ashmem_range_cachep
= kmem_cache_create("ashmem_range_cache",
713 sizeof(struct ashmem_range
),
715 if (unlikely(!ashmem_range_cachep
)) {
716 printk(KERN_ERR
"ashmem: failed to create slab cache\n");
720 ret
= misc_register(&ashmem_misc
);
722 printk(KERN_ERR
"ashmem: failed to register misc device!\n");
726 register_shrinker(&ashmem_shrinker
);
728 printk(KERN_INFO
"ashmem: initialized\n");
733 static void __exit
ashmem_exit(void)
737 unregister_shrinker(&ashmem_shrinker
);
739 ret
= misc_deregister(&ashmem_misc
);
741 printk(KERN_ERR
"ashmem: failed to unregister misc device!\n");
743 kmem_cache_destroy(ashmem_range_cachep
);
744 kmem_cache_destroy(ashmem_area_cachep
);
746 printk(KERN_INFO
"ashmem: unloaded\n");
749 module_init(ashmem_init
);
750 module_exit(ashmem_exit
);
752 MODULE_LICENSE("GPL");