3 * Anonymous Shared Memory Subsystem, ashmem
5 * Copyright (C) 2008 Google, Inc.
7 * Robert Love <rlove@google.com>
9 * This software is licensed under the terms of the GNU General Public
10 * License version 2, as published by the Free Software Foundation, and
11 * may be copied, distributed, and modified under those terms.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
19 #define pr_fmt(fmt) "ashmem: " fmt
21 #include <linux/init.h>
22 #include <linux/export.h>
23 #include <linux/file.h>
25 #include <linux/falloc.h>
26 #include <linux/miscdevice.h>
27 #include <linux/security.h>
29 #include <linux/mman.h>
30 #include <linux/uaccess.h>
31 #include <linux/personality.h>
32 #include <linux/bitops.h>
33 #include <linux/mutex.h>
34 #include <linux/shmem_fs.h>
37 #define ASHMEM_NAME_PREFIX "dev/ashmem/"
38 #define ASHMEM_NAME_PREFIX_LEN (sizeof(ASHMEM_NAME_PREFIX) - 1)
39 #define ASHMEM_FULL_NAME_LEN (ASHMEM_NAME_LEN + ASHMEM_NAME_PREFIX_LEN)
42 * struct ashmem_area - The anonymous shared memory area
43 * @name: The optional name in /proc/pid/maps
44 * @unpinned_list: The list of all ashmem areas
45 * @file: The shmem-based backing file
46 * @size: The size of the mapping, in bytes
47 * @prot_mask: The allowed protection bits, as vm_flags
49 * The lifecycle of this structure is from our parent file's open() until
50 * its release(). It is also protected by 'ashmem_mutex'
52 * Warning: Mappings do NOT pin this structure; It dies on close()
55 char name
[ASHMEM_FULL_NAME_LEN
];
56 struct list_head unpinned_list
;
59 unsigned long prot_mask
;
63 * struct ashmem_range - A range of unpinned/evictable pages
64 * @lru: The entry in the LRU list
65 * @unpinned: The entry in its area's unpinned list
66 * @asma: The associated anonymous shared memory area.
67 * @pgstart: The starting page (inclusive)
68 * @pgend: The ending page (inclusive)
69 * @purged: The purge status (ASHMEM_NOT or ASHMEM_WAS_PURGED)
71 * The lifecycle of this structure is from unpin to pin.
72 * It is protected by 'ashmem_mutex'
76 struct list_head unpinned
;
77 struct ashmem_area
*asma
;
83 /* LRU list of unpinned pages, protected by ashmem_mutex */
84 static LIST_HEAD(ashmem_lru_list
);
87 * long lru_count - The count of pages on our LRU list.
89 * This is protected by ashmem_mutex.
91 static unsigned long lru_count
;
94 * ashmem_mutex - protects the list of and each individual ashmem_area
96 * Lock Ordering: ashmex_mutex -> i_mutex -> i_alloc_sem
98 static DEFINE_MUTEX(ashmem_mutex
);
100 static struct kmem_cache
*ashmem_area_cachep __read_mostly
;
101 static struct kmem_cache
*ashmem_range_cachep __read_mostly
;
103 #define range_size(range) \
104 ((range)->pgend - (range)->pgstart + 1)
106 #define range_on_lru(range) \
107 ((range)->purged == ASHMEM_NOT_PURGED)
109 #define page_range_subsumes_range(range, start, end) \
110 (((range)->pgstart >= (start)) && ((range)->pgend <= (end)))
112 #define page_range_subsumed_by_range(range, start, end) \
113 (((range)->pgstart <= (start)) && ((range)->pgend >= (end)))
115 #define page_in_range(range, page) \
116 (((range)->pgstart <= (page)) && ((range)->pgend >= (page)))
118 #define page_range_in_range(range, start, end) \
119 (page_in_range(range, start) || page_in_range(range, end) || \
120 page_range_subsumes_range(range, start, end))
122 #define range_before_page(range, page) \
123 ((range)->pgend < (page))
125 #define PROT_MASK (PROT_EXEC | PROT_READ | PROT_WRITE)
128 * lru_add() - Adds a range of memory to the LRU list
129 * @range: The memory range being added.
131 * The range is first added to the end (tail) of the LRU list.
132 * After this, the size of the range is added to @lru_count
134 static inline void lru_add(struct ashmem_range
*range
)
136 list_add_tail(&range
->lru
, &ashmem_lru_list
);
137 lru_count
+= range_size(range
);
141 * lru_del() - Removes a range of memory from the LRU list
142 * @range: The memory range being removed
144 * The range is first deleted from the LRU list.
145 * After this, the size of the range is removed from @lru_count
147 static inline void lru_del(struct ashmem_range
*range
)
149 list_del(&range
->lru
);
150 lru_count
-= range_size(range
);
154 * range_alloc() - Allocates and initializes a new ashmem_range structure
155 * @asma: The associated ashmem_area
156 * @prev_range: The previous ashmem_range in the sorted asma->unpinned list
157 * @purged: Initial purge status (ASMEM_NOT_PURGED or ASHMEM_WAS_PURGED)
158 * @start: The starting page (inclusive)
159 * @end: The ending page (inclusive)
161 * This function is protected by ashmem_mutex.
163 * Return: 0 if successful, or -ENOMEM if there is an error
165 static int range_alloc(struct ashmem_area
*asma
,
166 struct ashmem_range
*prev_range
, unsigned int purged
,
167 size_t start
, size_t end
)
169 struct ashmem_range
*range
;
171 range
= kmem_cache_zalloc(ashmem_range_cachep
, GFP_KERNEL
);
172 if (unlikely(!range
))
176 range
->pgstart
= start
;
178 range
->purged
= purged
;
180 list_add_tail(&range
->unpinned
, &prev_range
->unpinned
);
182 if (range_on_lru(range
))
189 * range_del() - Deletes and dealloctes an ashmem_range structure
190 * @range: The associated ashmem_range that has previously been allocated
192 static void range_del(struct ashmem_range
*range
)
194 list_del(&range
->unpinned
);
195 if (range_on_lru(range
))
197 kmem_cache_free(ashmem_range_cachep
, range
);
201 * range_shrink() - Shrinks an ashmem_range
202 * @range: The associated ashmem_range being shrunk
203 * @start: The starting byte of the new range
204 * @end: The ending byte of the new range
206 * This does not modify the data inside the existing range in any way - It
207 * simply shrinks the boundaries of the range.
209 * Theoretically, with a little tweaking, this could eventually be changed
210 * to range_resize, and expand the lru_count if the new range is larger.
212 static inline void range_shrink(struct ashmem_range
*range
,
213 size_t start
, size_t end
)
215 size_t pre
= range_size(range
);
217 range
->pgstart
= start
;
220 if (range_on_lru(range
))
221 lru_count
-= pre
- range_size(range
);
225 * ashmem_open() - Opens an Anonymous Shared Memory structure
226 * @inode: The backing file's index node(?)
227 * @file: The backing file
229 * Please note that the ashmem_area is not returned by this function - It is
230 * instead written to "file->private_data".
232 * Return: 0 if successful, or another code if unsuccessful.
234 static int ashmem_open(struct inode
*inode
, struct file
*file
)
236 struct ashmem_area
*asma
;
239 ret
= generic_file_open(inode
, file
);
243 asma
= kmem_cache_zalloc(ashmem_area_cachep
, GFP_KERNEL
);
247 INIT_LIST_HEAD(&asma
->unpinned_list
);
248 memcpy(asma
->name
, ASHMEM_NAME_PREFIX
, ASHMEM_NAME_PREFIX_LEN
);
249 asma
->prot_mask
= PROT_MASK
;
250 file
->private_data
= asma
;
256 * ashmem_release() - Releases an Anonymous Shared Memory structure
257 * @ignored: The backing file's Index Node(?) - It is ignored here.
258 * @file: The backing file
260 * Return: 0 if successful. If it is anything else, go have a coffee and
263 static int ashmem_release(struct inode
*ignored
, struct file
*file
)
265 struct ashmem_area
*asma
= file
->private_data
;
266 struct ashmem_range
*range
, *next
;
268 mutex_lock(&ashmem_mutex
);
269 list_for_each_entry_safe(range
, next
, &asma
->unpinned_list
, unpinned
)
271 mutex_unlock(&ashmem_mutex
);
275 kmem_cache_free(ashmem_area_cachep
, asma
);
281 * ashmem_read() - Reads a set of bytes from an Ashmem-enabled file
282 * @file: The associated backing file.
283 * @buf: The buffer of data being written to
284 * @len: The number of bytes being read
285 * @pos: The position of the first byte to read.
287 * Return: 0 if successful, or another return code if not.
289 static ssize_t
ashmem_read(struct file
*file
, char __user
*buf
,
290 size_t len
, loff_t
*pos
)
292 struct ashmem_area
*asma
= file
->private_data
;
295 mutex_lock(&ashmem_mutex
);
297 /* If size is not set, or set to 0, always return EOF. */
306 mutex_unlock(&ashmem_mutex
);
309 * asma and asma->file are used outside the lock here. We assume
310 * once asma->file is set it will never be changed, and will not
311 * be destroyed until all references to the file are dropped and
312 * ashmem_release is called.
314 ret
= __vfs_read(asma
->file
, buf
, len
, pos
);
316 /** Update backing file pos, since f_ops->read() doesn't */
317 asma
->file
->f_pos
= *pos
;
321 mutex_unlock(&ashmem_mutex
);
325 static loff_t
ashmem_llseek(struct file
*file
, loff_t offset
, int origin
)
327 struct ashmem_area
*asma
= file
->private_data
;
330 mutex_lock(&ashmem_mutex
);
332 if (asma
->size
== 0) {
342 ret
= vfs_llseek(asma
->file
, offset
, origin
);
346 /** Copy f_pos from backing file, since f_ops->llseek() sets it */
347 file
->f_pos
= asma
->file
->f_pos
;
350 mutex_unlock(&ashmem_mutex
);
354 static inline vm_flags_t
calc_vm_may_flags(unsigned long prot
)
356 return _calc_vm_trans(prot
, PROT_READ
, VM_MAYREAD
) |
357 _calc_vm_trans(prot
, PROT_WRITE
, VM_MAYWRITE
) |
358 _calc_vm_trans(prot
, PROT_EXEC
, VM_MAYEXEC
);
361 static int ashmem_mmap(struct file
*file
, struct vm_area_struct
*vma
)
363 struct ashmem_area
*asma
= file
->private_data
;
366 mutex_lock(&ashmem_mutex
);
368 /* user needs to SET_SIZE before mapping */
369 if (unlikely(!asma
->size
)) {
374 /* requested protection bits must match our allowed protection mask */
375 if (unlikely((vma
->vm_flags
& ~calc_vm_prot_bits(asma
->prot_mask
)) &
376 calc_vm_prot_bits(PROT_MASK
))) {
380 vma
->vm_flags
&= ~calc_vm_may_flags(~asma
->prot_mask
);
383 char *name
= ASHMEM_NAME_DEF
;
386 if (asma
->name
[ASHMEM_NAME_PREFIX_LEN
] != '\0')
389 /* ... and allocate the backing shmem file */
390 vmfile
= shmem_file_setup(name
, asma
->size
, vma
->vm_flags
);
391 if (IS_ERR(vmfile
)) {
392 ret
= PTR_ERR(vmfile
);
397 get_file(asma
->file
);
400 * XXX - Reworked to use shmem_zero_setup() instead of
401 * shmem_set_file while we're in staging. -jstultz
403 if (vma
->vm_flags
& VM_SHARED
) {
404 ret
= shmem_zero_setup(vma
);
413 vma
->vm_file
= asma
->file
;
416 mutex_unlock(&ashmem_mutex
);
421 * ashmem_shrink - our cache shrinker, called from mm/vmscan.c
423 * 'nr_to_scan' is the number of objects to scan for freeing.
425 * 'gfp_mask' is the mask of the allocation that got us into this mess.
427 * Return value is the number of objects freed or -1 if we cannot
428 * proceed without risk of deadlock (due to gfp_mask).
430 * We approximate LRU via least-recently-unpinned, jettisoning unpinned partial
431 * chunks of ashmem regions LRU-wise one-at-a-time until we hit 'nr_to_scan'
435 ashmem_shrink_scan(struct shrinker
*shrink
, struct shrink_control
*sc
)
437 struct ashmem_range
*range
, *next
;
438 unsigned long freed
= 0;
440 /* We might recurse into filesystem code, so bail out if necessary */
441 if (!(sc
->gfp_mask
& __GFP_FS
))
444 mutex_lock(&ashmem_mutex
);
445 list_for_each_entry_safe(range
, next
, &ashmem_lru_list
, lru
) {
446 loff_t start
= range
->pgstart
* PAGE_SIZE
;
447 loff_t end
= (range
->pgend
+ 1) * PAGE_SIZE
;
449 vfs_fallocate(range
->asma
->file
,
450 FALLOC_FL_PUNCH_HOLE
| FALLOC_FL_KEEP_SIZE
,
452 range
->purged
= ASHMEM_WAS_PURGED
;
455 freed
+= range_size(range
);
456 if (--sc
->nr_to_scan
<= 0)
459 mutex_unlock(&ashmem_mutex
);
464 ashmem_shrink_count(struct shrinker
*shrink
, struct shrink_control
*sc
)
467 * note that lru_count is count of pages on the lru, not a count of
468 * objects on the list. This means the scan function needs to return the
469 * number of pages freed, not the number of objects scanned.
474 static struct shrinker ashmem_shrinker
= {
475 .count_objects
= ashmem_shrink_count
,
476 .scan_objects
= ashmem_shrink_scan
,
478 * XXX (dchinner): I wish people would comment on why they need on
479 * significant changes to the default value here
481 .seeks
= DEFAULT_SEEKS
* 4,
484 static int set_prot_mask(struct ashmem_area
*asma
, unsigned long prot
)
488 mutex_lock(&ashmem_mutex
);
490 /* the user can only remove, not add, protection bits */
491 if (unlikely((asma
->prot_mask
& prot
) != prot
)) {
496 /* does the application expect PROT_READ to imply PROT_EXEC? */
497 if ((prot
& PROT_READ
) && (current
->personality
& READ_IMPLIES_EXEC
))
500 asma
->prot_mask
= prot
;
503 mutex_unlock(&ashmem_mutex
);
507 static int set_name(struct ashmem_area
*asma
, void __user
*name
)
511 char local_name
[ASHMEM_NAME_LEN
];
514 * Holding the ashmem_mutex while doing a copy_from_user might cause
515 * an data abort which would try to access mmap_sem. If another
516 * thread has invoked ashmem_mmap then it will be holding the
517 * semaphore and will be waiting for ashmem_mutex, there by leading to
518 * deadlock. We'll release the mutex and take the name to a local
519 * variable that does not need protection and later copy the local
520 * variable to the structure member with lock held.
522 len
= strncpy_from_user(local_name
, name
, ASHMEM_NAME_LEN
);
525 if (len
== ASHMEM_NAME_LEN
)
526 local_name
[ASHMEM_NAME_LEN
- 1] = '\0';
527 mutex_lock(&ashmem_mutex
);
528 /* cannot change an existing mapping's name */
529 if (unlikely(asma
->file
))
532 strcpy(asma
->name
+ ASHMEM_NAME_PREFIX_LEN
, local_name
);
534 mutex_unlock(&ashmem_mutex
);
538 static int get_name(struct ashmem_area
*asma
, void __user
*name
)
543 * Have a local variable to which we'll copy the content
544 * from asma with the lock held. Later we can copy this to the user
545 * space safely without holding any locks. So even if we proceed to
546 * wait for mmap_sem, it won't lead to deadlock.
548 char local_name
[ASHMEM_NAME_LEN
];
550 mutex_lock(&ashmem_mutex
);
551 if (asma
->name
[ASHMEM_NAME_PREFIX_LEN
] != '\0') {
553 * Copying only `len', instead of ASHMEM_NAME_LEN, bytes
554 * prevents us from revealing one user's stack to another.
556 len
= strlen(asma
->name
+ ASHMEM_NAME_PREFIX_LEN
) + 1;
557 memcpy(local_name
, asma
->name
+ ASHMEM_NAME_PREFIX_LEN
, len
);
559 len
= sizeof(ASHMEM_NAME_DEF
);
560 memcpy(local_name
, ASHMEM_NAME_DEF
, len
);
562 mutex_unlock(&ashmem_mutex
);
565 * Now we are just copying from the stack variable to userland
568 if (unlikely(copy_to_user(name
, local_name
, len
)))
574 * ashmem_pin - pin the given ashmem region, returning whether it was
575 * previously purged (ASHMEM_WAS_PURGED) or not (ASHMEM_NOT_PURGED).
577 * Caller must hold ashmem_mutex.
579 static int ashmem_pin(struct ashmem_area
*asma
, size_t pgstart
, size_t pgend
)
581 struct ashmem_range
*range
, *next
;
582 int ret
= ASHMEM_NOT_PURGED
;
584 list_for_each_entry_safe(range
, next
, &asma
->unpinned_list
, unpinned
) {
585 /* moved past last applicable page; we can short circuit */
586 if (range_before_page(range
, pgstart
))
590 * The user can ask us to pin pages that span multiple ranges,
591 * or to pin pages that aren't even unpinned, so this is messy.
594 * 1. The requested range subsumes an existing range, so we
595 * just remove the entire matching range.
596 * 2. The requested range overlaps the start of an existing
597 * range, so we just update that range.
598 * 3. The requested range overlaps the end of an existing
599 * range, so we just update that range.
600 * 4. The requested range punches a hole in an existing range,
601 * so we have to update one side of the range and then
602 * create a new range for the other side.
604 if (page_range_in_range(range
, pgstart
, pgend
)) {
605 ret
|= range
->purged
;
607 /* Case #1: Easy. Just nuke the whole thing. */
608 if (page_range_subsumes_range(range
, pgstart
, pgend
)) {
613 /* Case #2: We overlap from the start, so adjust it */
614 if (range
->pgstart
>= pgstart
) {
615 range_shrink(range
, pgend
+ 1, range
->pgend
);
619 /* Case #3: We overlap from the rear, so adjust it */
620 if (range
->pgend
<= pgend
) {
621 range_shrink(range
, range
->pgstart
,
627 * Case #4: We eat a chunk out of the middle. A bit
628 * more complicated, we allocate a new range for the
629 * second half and adjust the first chunk's endpoint.
631 range_alloc(asma
, range
, range
->purged
,
632 pgend
+ 1, range
->pgend
);
633 range_shrink(range
, range
->pgstart
, pgstart
- 1);
642 * ashmem_unpin - unpin the given range of pages. Returns zero on success.
644 * Caller must hold ashmem_mutex.
646 static int ashmem_unpin(struct ashmem_area
*asma
, size_t pgstart
, size_t pgend
)
648 struct ashmem_range
*range
, *next
;
649 unsigned int purged
= ASHMEM_NOT_PURGED
;
652 list_for_each_entry_safe(range
, next
, &asma
->unpinned_list
, unpinned
) {
653 /* short circuit: this is our insertion point */
654 if (range_before_page(range
, pgstart
))
658 * The user can ask us to unpin pages that are already entirely
659 * or partially pinned. We handle those two cases here.
661 if (page_range_subsumed_by_range(range
, pgstart
, pgend
))
663 if (page_range_in_range(range
, pgstart
, pgend
)) {
664 pgstart
= min_t(size_t, range
->pgstart
, pgstart
);
665 pgend
= max_t(size_t, range
->pgend
, pgend
);
666 purged
|= range
->purged
;
672 return range_alloc(asma
, range
, purged
, pgstart
, pgend
);
676 * ashmem_get_pin_status - Returns ASHMEM_IS_UNPINNED if _any_ pages in the
677 * given interval are unpinned and ASHMEM_IS_PINNED otherwise.
679 * Caller must hold ashmem_mutex.
681 static int ashmem_get_pin_status(struct ashmem_area
*asma
, size_t pgstart
,
684 struct ashmem_range
*range
;
685 int ret
= ASHMEM_IS_PINNED
;
687 list_for_each_entry(range
, &asma
->unpinned_list
, unpinned
) {
688 if (range_before_page(range
, pgstart
))
690 if (page_range_in_range(range
, pgstart
, pgend
)) {
691 ret
= ASHMEM_IS_UNPINNED
;
699 static int ashmem_pin_unpin(struct ashmem_area
*asma
, unsigned long cmd
,
702 struct ashmem_pin pin
;
703 size_t pgstart
, pgend
;
706 if (unlikely(!asma
->file
))
709 if (unlikely(copy_from_user(&pin
, p
, sizeof(pin
))))
712 /* per custom, you can pass zero for len to mean "everything onward" */
714 pin
.len
= PAGE_ALIGN(asma
->size
) - pin
.offset
;
716 if (unlikely((pin
.offset
| pin
.len
) & ~PAGE_MASK
))
719 if (unlikely(((__u32
)-1) - pin
.offset
< pin
.len
))
722 if (unlikely(PAGE_ALIGN(asma
->size
) < pin
.offset
+ pin
.len
))
725 pgstart
= pin
.offset
/ PAGE_SIZE
;
726 pgend
= pgstart
+ (pin
.len
/ PAGE_SIZE
) - 1;
728 mutex_lock(&ashmem_mutex
);
732 ret
= ashmem_pin(asma
, pgstart
, pgend
);
735 ret
= ashmem_unpin(asma
, pgstart
, pgend
);
737 case ASHMEM_GET_PIN_STATUS
:
738 ret
= ashmem_get_pin_status(asma
, pgstart
, pgend
);
742 mutex_unlock(&ashmem_mutex
);
747 static long ashmem_ioctl(struct file
*file
, unsigned int cmd
, unsigned long arg
)
749 struct ashmem_area
*asma
= file
->private_data
;
753 case ASHMEM_SET_NAME
:
754 ret
= set_name(asma
, (void __user
*)arg
);
756 case ASHMEM_GET_NAME
:
757 ret
= get_name(asma
, (void __user
*)arg
);
759 case ASHMEM_SET_SIZE
:
763 asma
->size
= (size_t)arg
;
766 case ASHMEM_GET_SIZE
:
769 case ASHMEM_SET_PROT_MASK
:
770 ret
= set_prot_mask(asma
, arg
);
772 case ASHMEM_GET_PROT_MASK
:
773 ret
= asma
->prot_mask
;
777 case ASHMEM_GET_PIN_STATUS
:
778 ret
= ashmem_pin_unpin(asma
, cmd
, (void __user
*)arg
);
780 case ASHMEM_PURGE_ALL_CACHES
:
782 if (capable(CAP_SYS_ADMIN
)) {
783 struct shrink_control sc
= {
784 .gfp_mask
= GFP_KERNEL
,
785 .nr_to_scan
= LONG_MAX
,
787 ret
= ashmem_shrink_count(&ashmem_shrinker
, &sc
);
788 ashmem_shrink_scan(&ashmem_shrinker
, &sc
);
796 /* support of 32bit userspace on 64bit platforms */
798 static long compat_ashmem_ioctl(struct file
*file
, unsigned int cmd
,
802 case COMPAT_ASHMEM_SET_SIZE
:
803 cmd
= ASHMEM_SET_SIZE
;
805 case COMPAT_ASHMEM_SET_PROT_MASK
:
806 cmd
= ASHMEM_SET_PROT_MASK
;
809 return ashmem_ioctl(file
, cmd
, arg
);
813 static const struct file_operations ashmem_fops
= {
814 .owner
= THIS_MODULE
,
816 .release
= ashmem_release
,
818 .llseek
= ashmem_llseek
,
820 .unlocked_ioctl
= ashmem_ioctl
,
822 .compat_ioctl
= compat_ashmem_ioctl
,
826 static struct miscdevice ashmem_misc
= {
827 .minor
= MISC_DYNAMIC_MINOR
,
829 .fops
= &ashmem_fops
,
832 static int __init
ashmem_init(void)
836 ashmem_area_cachep
= kmem_cache_create("ashmem_area_cache",
837 sizeof(struct ashmem_area
),
839 if (unlikely(!ashmem_area_cachep
)) {
840 pr_err("failed to create slab cache\n");
844 ashmem_range_cachep
= kmem_cache_create("ashmem_range_cache",
845 sizeof(struct ashmem_range
),
847 if (unlikely(!ashmem_range_cachep
)) {
848 pr_err("failed to create slab cache\n");
852 ret
= misc_register(&ashmem_misc
);
854 pr_err("failed to register misc device!\n");
858 register_shrinker(&ashmem_shrinker
);
860 pr_info("initialized\n");
864 device_initcall(ashmem_init
);