1 // SPDX-License-Identifier: GPL-2.0
4 * Anonymous Shared Memory Subsystem, ashmem
6 * Copyright (C) 2008 Google, Inc.
8 * Robert Love <rlove@google.com>
11 #define pr_fmt(fmt) "ashmem: " fmt
13 #include <linux/init.h>
14 #include <linux/export.h>
15 #include <linux/file.h>
17 #include <linux/falloc.h>
18 #include <linux/miscdevice.h>
19 #include <linux/security.h>
21 #include <linux/mman.h>
22 #include <linux/uaccess.h>
23 #include <linux/personality.h>
24 #include <linux/bitops.h>
25 #include <linux/mutex.h>
26 #include <linux/shmem_fs.h>
29 #define ASHMEM_NAME_PREFIX "dev/ashmem/"
30 #define ASHMEM_NAME_PREFIX_LEN (sizeof(ASHMEM_NAME_PREFIX) - 1)
31 #define ASHMEM_FULL_NAME_LEN (ASHMEM_NAME_LEN + ASHMEM_NAME_PREFIX_LEN)
34 * struct ashmem_area - The anonymous shared memory area
35 * @name: The optional name in /proc/pid/maps
36 * @unpinned_list: The list of all ashmem areas
37 * @file: The shmem-based backing file
38 * @size: The size of the mapping, in bytes
39 * @prot_mask: The allowed protection bits, as vm_flags
41 * The lifecycle of this structure is from our parent file's open() until
42 * its release(). It is also protected by 'ashmem_mutex'
44 * Warning: Mappings do NOT pin this structure; It dies on close()
47 char name
[ASHMEM_FULL_NAME_LEN
];
48 struct list_head unpinned_list
;
51 unsigned long prot_mask
;
55 * struct ashmem_range - A range of unpinned/evictable pages
56 * @lru: The entry in the LRU list
57 * @unpinned: The entry in its area's unpinned list
58 * @asma: The associated anonymous shared memory area.
59 * @pgstart: The starting page (inclusive)
60 * @pgend: The ending page (inclusive)
61 * @purged: The purge status (ASHMEM_NOT or ASHMEM_WAS_PURGED)
63 * The lifecycle of this structure is from unpin to pin.
64 * It is protected by 'ashmem_mutex'
68 struct list_head unpinned
;
69 struct ashmem_area
*asma
;
75 /* LRU list of unpinned pages, protected by ashmem_mutex */
76 static LIST_HEAD(ashmem_lru_list
);
78 static atomic_t ashmem_shrink_inflight
= ATOMIC_INIT(0);
79 static DECLARE_WAIT_QUEUE_HEAD(ashmem_shrink_wait
);
82 * long lru_count - The count of pages on our LRU list.
84 * This is protected by ashmem_mutex.
86 static unsigned long lru_count
;
89 * ashmem_mutex - protects the list of and each individual ashmem_area
91 * Lock Ordering: ashmex_mutex -> i_mutex -> i_alloc_sem
93 static DEFINE_MUTEX(ashmem_mutex
);
95 static struct kmem_cache
*ashmem_area_cachep __read_mostly
;
96 static struct kmem_cache
*ashmem_range_cachep __read_mostly
;
99 * A separate lockdep class for the backing shmem inodes to resolve the lockdep
100 * warning about the race between kswapd taking fs_reclaim before inode_lock
101 * and write syscall taking inode_lock and then fs_reclaim.
102 * Note that such race is impossible because ashmem does not support write
103 * syscalls operating on the backing shmem.
105 static struct lock_class_key backing_shmem_inode_class
;
107 static inline unsigned long range_size(struct ashmem_range
*range
)
109 return range
->pgend
- range
->pgstart
+ 1;
112 static inline bool range_on_lru(struct ashmem_range
*range
)
114 return range
->purged
== ASHMEM_NOT_PURGED
;
117 static inline bool page_range_subsumes_range(struct ashmem_range
*range
,
118 size_t start
, size_t end
)
120 return (range
->pgstart
>= start
) && (range
->pgend
<= end
);
123 static inline bool page_range_subsumed_by_range(struct ashmem_range
*range
,
124 size_t start
, size_t end
)
126 return (range
->pgstart
<= start
) && (range
->pgend
>= end
);
129 static inline bool page_in_range(struct ashmem_range
*range
, size_t page
)
131 return (range
->pgstart
<= page
) && (range
->pgend
>= page
);
134 static inline bool page_range_in_range(struct ashmem_range
*range
,
135 size_t start
, size_t end
)
137 return page_in_range(range
, start
) || page_in_range(range
, end
) ||
138 page_range_subsumes_range(range
, start
, end
);
141 static inline bool range_before_page(struct ashmem_range
*range
,
144 return range
->pgend
< page
;
147 #define PROT_MASK (PROT_EXEC | PROT_READ | PROT_WRITE)
150 * lru_add() - Adds a range of memory to the LRU list
151 * @range: The memory range being added.
153 * The range is first added to the end (tail) of the LRU list.
154 * After this, the size of the range is added to @lru_count
156 static inline void lru_add(struct ashmem_range
*range
)
158 list_add_tail(&range
->lru
, &ashmem_lru_list
);
159 lru_count
+= range_size(range
);
163 * lru_del() - Removes a range of memory from the LRU list
164 * @range: The memory range being removed
166 * The range is first deleted from the LRU list.
167 * After this, the size of the range is removed from @lru_count
169 static inline void lru_del(struct ashmem_range
*range
)
171 list_del(&range
->lru
);
172 lru_count
-= range_size(range
);
176 * range_alloc() - Allocates and initializes a new ashmem_range structure
177 * @asma: The associated ashmem_area
178 * @prev_range: The previous ashmem_range in the sorted asma->unpinned list
179 * @purged: Initial purge status (ASMEM_NOT_PURGED or ASHMEM_WAS_PURGED)
180 * @start: The starting page (inclusive)
181 * @end: The ending page (inclusive)
183 * This function is protected by ashmem_mutex.
185 static void range_alloc(struct ashmem_area
*asma
,
186 struct ashmem_range
*prev_range
, unsigned int purged
,
187 size_t start
, size_t end
,
188 struct ashmem_range
**new_range
)
190 struct ashmem_range
*range
= *new_range
;
194 range
->pgstart
= start
;
196 range
->purged
= purged
;
198 list_add_tail(&range
->unpinned
, &prev_range
->unpinned
);
200 if (range_on_lru(range
))
205 * range_del() - Deletes and deallocates an ashmem_range structure
206 * @range: The associated ashmem_range that has previously been allocated
208 static void range_del(struct ashmem_range
*range
)
210 list_del(&range
->unpinned
);
211 if (range_on_lru(range
))
213 kmem_cache_free(ashmem_range_cachep
, range
);
217 * range_shrink() - Shrinks an ashmem_range
218 * @range: The associated ashmem_range being shrunk
219 * @start: The starting byte of the new range
220 * @end: The ending byte of the new range
222 * This does not modify the data inside the existing range in any way - It
223 * simply shrinks the boundaries of the range.
225 * Theoretically, with a little tweaking, this could eventually be changed
226 * to range_resize, and expand the lru_count if the new range is larger.
228 static inline void range_shrink(struct ashmem_range
*range
,
229 size_t start
, size_t end
)
231 size_t pre
= range_size(range
);
233 range
->pgstart
= start
;
236 if (range_on_lru(range
))
237 lru_count
-= pre
- range_size(range
);
241 * ashmem_open() - Opens an Anonymous Shared Memory structure
242 * @inode: The backing file's index node(?)
243 * @file: The backing file
245 * Please note that the ashmem_area is not returned by this function - It is
246 * instead written to "file->private_data".
248 * Return: 0 if successful, or another code if unsuccessful.
250 static int ashmem_open(struct inode
*inode
, struct file
*file
)
252 struct ashmem_area
*asma
;
255 ret
= generic_file_open(inode
, file
);
259 asma
= kmem_cache_zalloc(ashmem_area_cachep
, GFP_KERNEL
);
263 INIT_LIST_HEAD(&asma
->unpinned_list
);
264 memcpy(asma
->name
, ASHMEM_NAME_PREFIX
, ASHMEM_NAME_PREFIX_LEN
);
265 asma
->prot_mask
= PROT_MASK
;
266 file
->private_data
= asma
;
272 * ashmem_release() - Releases an Anonymous Shared Memory structure
273 * @ignored: The backing file's Index Node(?) - It is ignored here.
274 * @file: The backing file
276 * Return: 0 if successful. If it is anything else, go have a coffee and
279 static int ashmem_release(struct inode
*ignored
, struct file
*file
)
281 struct ashmem_area
*asma
= file
->private_data
;
282 struct ashmem_range
*range
, *next
;
284 mutex_lock(&ashmem_mutex
);
285 list_for_each_entry_safe(range
, next
, &asma
->unpinned_list
, unpinned
)
287 mutex_unlock(&ashmem_mutex
);
291 kmem_cache_free(ashmem_area_cachep
, asma
);
296 static ssize_t
ashmem_read_iter(struct kiocb
*iocb
, struct iov_iter
*iter
)
298 struct ashmem_area
*asma
= iocb
->ki_filp
->private_data
;
301 mutex_lock(&ashmem_mutex
);
303 /* If size is not set, or set to 0, always return EOF. */
313 * asma and asma->file are used outside the lock here. We assume
314 * once asma->file is set it will never be changed, and will not
315 * be destroyed until all references to the file are dropped and
316 * ashmem_release is called.
318 mutex_unlock(&ashmem_mutex
);
319 ret
= vfs_iter_read(asma
->file
, iter
, &iocb
->ki_pos
, 0);
320 mutex_lock(&ashmem_mutex
);
322 asma
->file
->f_pos
= iocb
->ki_pos
;
324 mutex_unlock(&ashmem_mutex
);
328 static loff_t
ashmem_llseek(struct file
*file
, loff_t offset
, int origin
)
330 struct ashmem_area
*asma
= file
->private_data
;
333 mutex_lock(&ashmem_mutex
);
335 if (asma
->size
== 0) {
336 mutex_unlock(&ashmem_mutex
);
341 mutex_unlock(&ashmem_mutex
);
345 mutex_unlock(&ashmem_mutex
);
347 ret
= vfs_llseek(asma
->file
, offset
, origin
);
351 /** Copy f_pos from backing file, since f_ops->llseek() sets it */
352 file
->f_pos
= asma
->file
->f_pos
;
356 static inline vm_flags_t
calc_vm_may_flags(unsigned long prot
)
358 return _calc_vm_trans(prot
, PROT_READ
, VM_MAYREAD
) |
359 _calc_vm_trans(prot
, PROT_WRITE
, VM_MAYWRITE
) |
360 _calc_vm_trans(prot
, PROT_EXEC
, VM_MAYEXEC
);
363 static int ashmem_vmfile_mmap(struct file
*file
, struct vm_area_struct
*vma
)
365 /* do not allow to mmap ashmem backing shmem file directly */
370 ashmem_vmfile_get_unmapped_area(struct file
*file
, unsigned long addr
,
371 unsigned long len
, unsigned long pgoff
,
374 return current
->mm
->get_unmapped_area(file
, addr
, len
, pgoff
, flags
);
377 static int ashmem_mmap(struct file
*file
, struct vm_area_struct
*vma
)
379 static struct file_operations vmfile_fops
;
380 struct ashmem_area
*asma
= file
->private_data
;
383 mutex_lock(&ashmem_mutex
);
385 /* user needs to SET_SIZE before mapping */
391 /* requested mapping size larger than object size */
392 if (vma
->vm_end
- vma
->vm_start
> PAGE_ALIGN(asma
->size
)) {
397 /* requested protection bits must match our allowed protection mask */
398 if ((vma
->vm_flags
& ~calc_vm_prot_bits(asma
->prot_mask
, 0)) &
399 calc_vm_prot_bits(PROT_MASK
, 0)) {
403 vma
->vm_flags
&= ~calc_vm_may_flags(~asma
->prot_mask
);
406 char *name
= ASHMEM_NAME_DEF
;
410 if (asma
->name
[ASHMEM_NAME_PREFIX_LEN
] != '\0')
413 /* ... and allocate the backing shmem file */
414 vmfile
= shmem_file_setup(name
, asma
->size
, vma
->vm_flags
);
415 if (IS_ERR(vmfile
)) {
416 ret
= PTR_ERR(vmfile
);
419 vmfile
->f_mode
|= FMODE_LSEEK
;
420 inode
= file_inode(vmfile
);
421 lockdep_set_class(&inode
->i_rwsem
, &backing_shmem_inode_class
);
424 * override mmap operation of the vmfile so that it can't be
425 * remapped which would lead to creation of a new vma with no
426 * asma permission checks. Have to override get_unmapped_area
427 * as well to prevent VM_BUG_ON check for f_ops modification.
429 if (!vmfile_fops
.mmap
) {
430 vmfile_fops
= *vmfile
->f_op
;
431 vmfile_fops
.mmap
= ashmem_vmfile_mmap
;
432 vmfile_fops
.get_unmapped_area
=
433 ashmem_vmfile_get_unmapped_area
;
435 vmfile
->f_op
= &vmfile_fops
;
437 get_file(asma
->file
);
440 * XXX - Reworked to use shmem_zero_setup() instead of
441 * shmem_set_file while we're in staging. -jstultz
443 if (vma
->vm_flags
& VM_SHARED
) {
444 ret
= shmem_zero_setup(vma
);
450 vma_set_anonymous(vma
);
453 vma_set_file(vma
, asma
->file
);
454 /* XXX: merge this with the get_file() above if possible */
458 mutex_unlock(&ashmem_mutex
);
463 * ashmem_shrink - our cache shrinker, called from mm/vmscan.c
465 * 'nr_to_scan' is the number of objects to scan for freeing.
467 * 'gfp_mask' is the mask of the allocation that got us into this mess.
469 * Return value is the number of objects freed or -1 if we cannot
470 * proceed without risk of deadlock (due to gfp_mask).
472 * We approximate LRU via least-recently-unpinned, jettisoning unpinned partial
473 * chunks of ashmem regions LRU-wise one-at-a-time until we hit 'nr_to_scan'
477 ashmem_shrink_scan(struct shrinker
*shrink
, struct shrink_control
*sc
)
479 unsigned long freed
= 0;
481 /* We might recurse into filesystem code, so bail out if necessary */
482 if (!(sc
->gfp_mask
& __GFP_FS
))
485 if (!mutex_trylock(&ashmem_mutex
))
488 while (!list_empty(&ashmem_lru_list
)) {
489 struct ashmem_range
*range
=
490 list_first_entry(&ashmem_lru_list
, typeof(*range
), lru
);
491 loff_t start
= range
->pgstart
* PAGE_SIZE
;
492 loff_t end
= (range
->pgend
+ 1) * PAGE_SIZE
;
493 struct file
*f
= range
->asma
->file
;
496 atomic_inc(&ashmem_shrink_inflight
);
497 range
->purged
= ASHMEM_WAS_PURGED
;
500 freed
+= range_size(range
);
501 mutex_unlock(&ashmem_mutex
);
502 f
->f_op
->fallocate(f
,
503 FALLOC_FL_PUNCH_HOLE
| FALLOC_FL_KEEP_SIZE
,
506 if (atomic_dec_and_test(&ashmem_shrink_inflight
))
507 wake_up_all(&ashmem_shrink_wait
);
508 if (!mutex_trylock(&ashmem_mutex
))
510 if (--sc
->nr_to_scan
<= 0)
513 mutex_unlock(&ashmem_mutex
);
519 ashmem_shrink_count(struct shrinker
*shrink
, struct shrink_control
*sc
)
522 * note that lru_count is count of pages on the lru, not a count of
523 * objects on the list. This means the scan function needs to return the
524 * number of pages freed, not the number of objects scanned.
529 static struct shrinker ashmem_shrinker
= {
530 .count_objects
= ashmem_shrink_count
,
531 .scan_objects
= ashmem_shrink_scan
,
533 * XXX (dchinner): I wish people would comment on why they need on
534 * significant changes to the default value here
536 .seeks
= DEFAULT_SEEKS
* 4,
539 static int set_prot_mask(struct ashmem_area
*asma
, unsigned long prot
)
543 mutex_lock(&ashmem_mutex
);
545 /* the user can only remove, not add, protection bits */
546 if ((asma
->prot_mask
& prot
) != prot
) {
551 /* does the application expect PROT_READ to imply PROT_EXEC? */
552 if ((prot
& PROT_READ
) && (current
->personality
& READ_IMPLIES_EXEC
))
555 asma
->prot_mask
= prot
;
558 mutex_unlock(&ashmem_mutex
);
562 static int set_name(struct ashmem_area
*asma
, void __user
*name
)
566 char local_name
[ASHMEM_NAME_LEN
];
569 * Holding the ashmem_mutex while doing a copy_from_user might cause
570 * an data abort which would try to access mmap_lock. If another
571 * thread has invoked ashmem_mmap then it will be holding the
572 * semaphore and will be waiting for ashmem_mutex, there by leading to
573 * deadlock. We'll release the mutex and take the name to a local
574 * variable that does not need protection and later copy the local
575 * variable to the structure member with lock held.
577 len
= strncpy_from_user(local_name
, name
, ASHMEM_NAME_LEN
);
581 mutex_lock(&ashmem_mutex
);
582 /* cannot change an existing mapping's name */
586 strscpy(asma
->name
+ ASHMEM_NAME_PREFIX_LEN
, local_name
,
589 mutex_unlock(&ashmem_mutex
);
593 static int get_name(struct ashmem_area
*asma
, void __user
*name
)
598 * Have a local variable to which we'll copy the content
599 * from asma with the lock held. Later we can copy this to the user
600 * space safely without holding any locks. So even if we proceed to
601 * wait for mmap_lock, it won't lead to deadlock.
603 char local_name
[ASHMEM_NAME_LEN
];
605 mutex_lock(&ashmem_mutex
);
606 if (asma
->name
[ASHMEM_NAME_PREFIX_LEN
] != '\0') {
608 * Copying only `len', instead of ASHMEM_NAME_LEN, bytes
609 * prevents us from revealing one user's stack to another.
611 len
= strlen(asma
->name
+ ASHMEM_NAME_PREFIX_LEN
) + 1;
612 memcpy(local_name
, asma
->name
+ ASHMEM_NAME_PREFIX_LEN
, len
);
614 len
= sizeof(ASHMEM_NAME_DEF
);
615 memcpy(local_name
, ASHMEM_NAME_DEF
, len
);
617 mutex_unlock(&ashmem_mutex
);
620 * Now we are just copying from the stack variable to userland
623 if (copy_to_user(name
, local_name
, len
))
629 * ashmem_pin - pin the given ashmem region, returning whether it was
630 * previously purged (ASHMEM_WAS_PURGED) or not (ASHMEM_NOT_PURGED).
632 * Caller must hold ashmem_mutex.
634 static int ashmem_pin(struct ashmem_area
*asma
, size_t pgstart
, size_t pgend
,
635 struct ashmem_range
**new_range
)
637 struct ashmem_range
*range
, *next
;
638 int ret
= ASHMEM_NOT_PURGED
;
640 list_for_each_entry_safe(range
, next
, &asma
->unpinned_list
, unpinned
) {
641 /* moved past last applicable page; we can short circuit */
642 if (range_before_page(range
, pgstart
))
646 * The user can ask us to pin pages that span multiple ranges,
647 * or to pin pages that aren't even unpinned, so this is messy.
650 * 1. The requested range subsumes an existing range, so we
651 * just remove the entire matching range.
652 * 2. The requested range overlaps the start of an existing
653 * range, so we just update that range.
654 * 3. The requested range overlaps the end of an existing
655 * range, so we just update that range.
656 * 4. The requested range punches a hole in an existing range,
657 * so we have to update one side of the range and then
658 * create a new range for the other side.
660 if (page_range_in_range(range
, pgstart
, pgend
)) {
661 ret
|= range
->purged
;
663 /* Case #1: Easy. Just nuke the whole thing. */
664 if (page_range_subsumes_range(range
, pgstart
, pgend
)) {
669 /* Case #2: We overlap from the start, so adjust it */
670 if (range
->pgstart
>= pgstart
) {
671 range_shrink(range
, pgend
+ 1, range
->pgend
);
675 /* Case #3: We overlap from the rear, so adjust it */
676 if (range
->pgend
<= pgend
) {
677 range_shrink(range
, range
->pgstart
,
683 * Case #4: We eat a chunk out of the middle. A bit
684 * more complicated, we allocate a new range for the
685 * second half and adjust the first chunk's endpoint.
687 range_alloc(asma
, range
, range
->purged
,
688 pgend
+ 1, range
->pgend
, new_range
);
689 range_shrink(range
, range
->pgstart
, pgstart
- 1);
698 * ashmem_unpin - unpin the given range of pages. Returns zero on success.
700 * Caller must hold ashmem_mutex.
702 static int ashmem_unpin(struct ashmem_area
*asma
, size_t pgstart
, size_t pgend
,
703 struct ashmem_range
**new_range
)
705 struct ashmem_range
*range
, *next
;
706 unsigned int purged
= ASHMEM_NOT_PURGED
;
709 list_for_each_entry_safe(range
, next
, &asma
->unpinned_list
, unpinned
) {
710 /* short circuit: this is our insertion point */
711 if (range_before_page(range
, pgstart
))
715 * The user can ask us to unpin pages that are already entirely
716 * or partially pinned. We handle those two cases here.
718 if (page_range_subsumed_by_range(range
, pgstart
, pgend
))
720 if (page_range_in_range(range
, pgstart
, pgend
)) {
721 pgstart
= min(range
->pgstart
, pgstart
);
722 pgend
= max(range
->pgend
, pgend
);
723 purged
|= range
->purged
;
729 range_alloc(asma
, range
, purged
, pgstart
, pgend
, new_range
);
734 * ashmem_get_pin_status - Returns ASHMEM_IS_UNPINNED if _any_ pages in the
735 * given interval are unpinned and ASHMEM_IS_PINNED otherwise.
737 * Caller must hold ashmem_mutex.
739 static int ashmem_get_pin_status(struct ashmem_area
*asma
, size_t pgstart
,
742 struct ashmem_range
*range
;
743 int ret
= ASHMEM_IS_PINNED
;
745 list_for_each_entry(range
, &asma
->unpinned_list
, unpinned
) {
746 if (range_before_page(range
, pgstart
))
748 if (page_range_in_range(range
, pgstart
, pgend
)) {
749 ret
= ASHMEM_IS_UNPINNED
;
757 static int ashmem_pin_unpin(struct ashmem_area
*asma
, unsigned long cmd
,
760 struct ashmem_pin pin
;
761 size_t pgstart
, pgend
;
763 struct ashmem_range
*range
= NULL
;
765 if (copy_from_user(&pin
, p
, sizeof(pin
)))
768 if (cmd
== ASHMEM_PIN
|| cmd
== ASHMEM_UNPIN
) {
769 range
= kmem_cache_zalloc(ashmem_range_cachep
, GFP_KERNEL
);
774 mutex_lock(&ashmem_mutex
);
775 wait_event(ashmem_shrink_wait
, !atomic_read(&ashmem_shrink_inflight
));
780 /* per custom, you can pass zero for len to mean "everything onward" */
782 pin
.len
= PAGE_ALIGN(asma
->size
) - pin
.offset
;
784 if ((pin
.offset
| pin
.len
) & ~PAGE_MASK
)
787 if (((__u32
)-1) - pin
.offset
< pin
.len
)
790 if (PAGE_ALIGN(asma
->size
) < pin
.offset
+ pin
.len
)
793 pgstart
= pin
.offset
/ PAGE_SIZE
;
794 pgend
= pgstart
+ (pin
.len
/ PAGE_SIZE
) - 1;
798 ret
= ashmem_pin(asma
, pgstart
, pgend
, &range
);
801 ret
= ashmem_unpin(asma
, pgstart
, pgend
, &range
);
803 case ASHMEM_GET_PIN_STATUS
:
804 ret
= ashmem_get_pin_status(asma
, pgstart
, pgend
);
809 mutex_unlock(&ashmem_mutex
);
811 kmem_cache_free(ashmem_range_cachep
, range
);
816 static long ashmem_ioctl(struct file
*file
, unsigned int cmd
, unsigned long arg
)
818 struct ashmem_area
*asma
= file
->private_data
;
822 case ASHMEM_SET_NAME
:
823 ret
= set_name(asma
, (void __user
*)arg
);
825 case ASHMEM_GET_NAME
:
826 ret
= get_name(asma
, (void __user
*)arg
);
828 case ASHMEM_SET_SIZE
:
830 mutex_lock(&ashmem_mutex
);
833 asma
->size
= (size_t)arg
;
835 mutex_unlock(&ashmem_mutex
);
837 case ASHMEM_GET_SIZE
:
840 case ASHMEM_SET_PROT_MASK
:
841 ret
= set_prot_mask(asma
, arg
);
843 case ASHMEM_GET_PROT_MASK
:
844 ret
= asma
->prot_mask
;
848 case ASHMEM_GET_PIN_STATUS
:
849 ret
= ashmem_pin_unpin(asma
, cmd
, (void __user
*)arg
);
851 case ASHMEM_PURGE_ALL_CACHES
:
853 if (capable(CAP_SYS_ADMIN
)) {
854 struct shrink_control sc
= {
855 .gfp_mask
= GFP_KERNEL
,
856 .nr_to_scan
= LONG_MAX
,
858 ret
= ashmem_shrink_count(&ashmem_shrinker
, &sc
);
859 ashmem_shrink_scan(&ashmem_shrinker
, &sc
);
867 /* support of 32bit userspace on 64bit platforms */
869 static long compat_ashmem_ioctl(struct file
*file
, unsigned int cmd
,
873 case COMPAT_ASHMEM_SET_SIZE
:
874 cmd
= ASHMEM_SET_SIZE
;
876 case COMPAT_ASHMEM_SET_PROT_MASK
:
877 cmd
= ASHMEM_SET_PROT_MASK
;
880 return ashmem_ioctl(file
, cmd
, arg
);
883 #ifdef CONFIG_PROC_FS
884 static void ashmem_show_fdinfo(struct seq_file
*m
, struct file
*file
)
886 struct ashmem_area
*asma
= file
->private_data
;
888 mutex_lock(&ashmem_mutex
);
891 seq_printf(m
, "inode:\t%ld\n", file_inode(asma
->file
)->i_ino
);
893 if (asma
->name
[ASHMEM_NAME_PREFIX_LEN
] != '\0')
894 seq_printf(m
, "name:\t%s\n",
895 asma
->name
+ ASHMEM_NAME_PREFIX_LEN
);
897 mutex_unlock(&ashmem_mutex
);
900 static const struct file_operations ashmem_fops
= {
901 .owner
= THIS_MODULE
,
903 .release
= ashmem_release
,
904 .read_iter
= ashmem_read_iter
,
905 .llseek
= ashmem_llseek
,
907 .unlocked_ioctl
= ashmem_ioctl
,
909 .compat_ioctl
= compat_ashmem_ioctl
,
911 #ifdef CONFIG_PROC_FS
912 .show_fdinfo
= ashmem_show_fdinfo
,
916 static struct miscdevice ashmem_misc
= {
917 .minor
= MISC_DYNAMIC_MINOR
,
919 .fops
= &ashmem_fops
,
922 static int __init
ashmem_init(void)
926 ashmem_area_cachep
= kmem_cache_create("ashmem_area_cache",
927 sizeof(struct ashmem_area
),
929 if (!ashmem_area_cachep
) {
930 pr_err("failed to create slab cache\n");
934 ashmem_range_cachep
= kmem_cache_create("ashmem_range_cache",
935 sizeof(struct ashmem_range
),
937 if (!ashmem_range_cachep
) {
938 pr_err("failed to create slab cache\n");
942 ret
= misc_register(&ashmem_misc
);
944 pr_err("failed to register misc device!\n");
948 ret
= register_shrinker(&ashmem_shrinker
);
950 pr_err("failed to register shrinker!\n");
954 pr_info("initialized\n");
959 misc_deregister(&ashmem_misc
);
961 kmem_cache_destroy(ashmem_range_cachep
);
963 kmem_cache_destroy(ashmem_area_cachep
);
967 device_initcall(ashmem_init
);