treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / drivers / staging / android / ashmem.c
blob5891d0744a760b2fb8e690ac6fea2e91d96927a3
1 // SPDX-License-Identifier: GPL-2.0
2 /* mm/ashmem.c
4 * Anonymous Shared Memory Subsystem, ashmem
6 * Copyright (C) 2008 Google, Inc.
8 * Robert Love <rlove@google.com>
9 */
11 #define pr_fmt(fmt) "ashmem: " fmt
13 #include <linux/init.h>
14 #include <linux/export.h>
15 #include <linux/file.h>
16 #include <linux/fs.h>
17 #include <linux/falloc.h>
18 #include <linux/miscdevice.h>
19 #include <linux/security.h>
20 #include <linux/mm.h>
21 #include <linux/mman.h>
22 #include <linux/uaccess.h>
23 #include <linux/personality.h>
24 #include <linux/bitops.h>
25 #include <linux/mutex.h>
26 #include <linux/shmem_fs.h>
27 #include "ashmem.h"
29 #define ASHMEM_NAME_PREFIX "dev/ashmem/"
30 #define ASHMEM_NAME_PREFIX_LEN (sizeof(ASHMEM_NAME_PREFIX) - 1)
31 #define ASHMEM_FULL_NAME_LEN (ASHMEM_NAME_LEN + ASHMEM_NAME_PREFIX_LEN)
33 /**
34 * struct ashmem_area - The anonymous shared memory area
35 * @name: The optional name in /proc/pid/maps
36 * @unpinned_list: The list of all ashmem areas
37 * @file: The shmem-based backing file
38 * @size: The size of the mapping, in bytes
39 * @prot_mask: The allowed protection bits, as vm_flags
41 * The lifecycle of this structure is from our parent file's open() until
42 * its release(). It is also protected by 'ashmem_mutex'
44 * Warning: Mappings do NOT pin this structure; It dies on close()
46 struct ashmem_area {
47 char name[ASHMEM_FULL_NAME_LEN];
48 struct list_head unpinned_list;
49 struct file *file;
50 size_t size;
51 unsigned long prot_mask;
54 /**
55 * struct ashmem_range - A range of unpinned/evictable pages
56 * @lru: The entry in the LRU list
57 * @unpinned: The entry in its area's unpinned list
58 * @asma: The associated anonymous shared memory area.
59 * @pgstart: The starting page (inclusive)
60 * @pgend: The ending page (inclusive)
61 * @purged: The purge status (ASHMEM_NOT or ASHMEM_WAS_PURGED)
63 * The lifecycle of this structure is from unpin to pin.
64 * It is protected by 'ashmem_mutex'
66 struct ashmem_range {
67 struct list_head lru;
68 struct list_head unpinned;
69 struct ashmem_area *asma;
70 size_t pgstart;
71 size_t pgend;
72 unsigned int purged;
75 /* LRU list of unpinned pages, protected by ashmem_mutex */
76 static LIST_HEAD(ashmem_lru_list);
78 static atomic_t ashmem_shrink_inflight = ATOMIC_INIT(0);
79 static DECLARE_WAIT_QUEUE_HEAD(ashmem_shrink_wait);
82 * long lru_count - The count of pages on our LRU list.
84 * This is protected by ashmem_mutex.
86 static unsigned long lru_count;
89 * ashmem_mutex - protects the list of and each individual ashmem_area
91 * Lock Ordering: ashmex_mutex -> i_mutex -> i_alloc_sem
93 static DEFINE_MUTEX(ashmem_mutex);
95 static struct kmem_cache *ashmem_area_cachep __read_mostly;
96 static struct kmem_cache *ashmem_range_cachep __read_mostly;
98 static inline unsigned long range_size(struct ashmem_range *range)
100 return range->pgend - range->pgstart + 1;
103 static inline bool range_on_lru(struct ashmem_range *range)
105 return range->purged == ASHMEM_NOT_PURGED;
108 static inline bool page_range_subsumes_range(struct ashmem_range *range,
109 size_t start, size_t end)
111 return (range->pgstart >= start) && (range->pgend <= end);
114 static inline bool page_range_subsumed_by_range(struct ashmem_range *range,
115 size_t start, size_t end)
117 return (range->pgstart <= start) && (range->pgend >= end);
120 static inline bool page_in_range(struct ashmem_range *range, size_t page)
122 return (range->pgstart <= page) && (range->pgend >= page);
125 static inline bool page_range_in_range(struct ashmem_range *range,
126 size_t start, size_t end)
128 return page_in_range(range, start) || page_in_range(range, end) ||
129 page_range_subsumes_range(range, start, end);
132 static inline bool range_before_page(struct ashmem_range *range,
133 size_t page)
135 return range->pgend < page;
138 #define PROT_MASK (PROT_EXEC | PROT_READ | PROT_WRITE)
141 * lru_add() - Adds a range of memory to the LRU list
142 * @range: The memory range being added.
144 * The range is first added to the end (tail) of the LRU list.
145 * After this, the size of the range is added to @lru_count
147 static inline void lru_add(struct ashmem_range *range)
149 list_add_tail(&range->lru, &ashmem_lru_list);
150 lru_count += range_size(range);
154 * lru_del() - Removes a range of memory from the LRU list
155 * @range: The memory range being removed
157 * The range is first deleted from the LRU list.
158 * After this, the size of the range is removed from @lru_count
160 static inline void lru_del(struct ashmem_range *range)
162 list_del(&range->lru);
163 lru_count -= range_size(range);
167 * range_alloc() - Allocates and initializes a new ashmem_range structure
168 * @asma: The associated ashmem_area
169 * @prev_range: The previous ashmem_range in the sorted asma->unpinned list
170 * @purged: Initial purge status (ASMEM_NOT_PURGED or ASHMEM_WAS_PURGED)
171 * @start: The starting page (inclusive)
172 * @end: The ending page (inclusive)
174 * This function is protected by ashmem_mutex.
176 static void range_alloc(struct ashmem_area *asma,
177 struct ashmem_range *prev_range, unsigned int purged,
178 size_t start, size_t end,
179 struct ashmem_range **new_range)
181 struct ashmem_range *range = *new_range;
183 *new_range = NULL;
184 range->asma = asma;
185 range->pgstart = start;
186 range->pgend = end;
187 range->purged = purged;
189 list_add_tail(&range->unpinned, &prev_range->unpinned);
191 if (range_on_lru(range))
192 lru_add(range);
196 * range_del() - Deletes and deallocates an ashmem_range structure
197 * @range: The associated ashmem_range that has previously been allocated
199 static void range_del(struct ashmem_range *range)
201 list_del(&range->unpinned);
202 if (range_on_lru(range))
203 lru_del(range);
204 kmem_cache_free(ashmem_range_cachep, range);
208 * range_shrink() - Shrinks an ashmem_range
209 * @range: The associated ashmem_range being shrunk
210 * @start: The starting byte of the new range
211 * @end: The ending byte of the new range
213 * This does not modify the data inside the existing range in any way - It
214 * simply shrinks the boundaries of the range.
216 * Theoretically, with a little tweaking, this could eventually be changed
217 * to range_resize, and expand the lru_count if the new range is larger.
219 static inline void range_shrink(struct ashmem_range *range,
220 size_t start, size_t end)
222 size_t pre = range_size(range);
224 range->pgstart = start;
225 range->pgend = end;
227 if (range_on_lru(range))
228 lru_count -= pre - range_size(range);
232 * ashmem_open() - Opens an Anonymous Shared Memory structure
233 * @inode: The backing file's index node(?)
234 * @file: The backing file
236 * Please note that the ashmem_area is not returned by this function - It is
237 * instead written to "file->private_data".
239 * Return: 0 if successful, or another code if unsuccessful.
241 static int ashmem_open(struct inode *inode, struct file *file)
243 struct ashmem_area *asma;
244 int ret;
246 ret = generic_file_open(inode, file);
247 if (ret)
248 return ret;
250 asma = kmem_cache_zalloc(ashmem_area_cachep, GFP_KERNEL);
251 if (!asma)
252 return -ENOMEM;
254 INIT_LIST_HEAD(&asma->unpinned_list);
255 memcpy(asma->name, ASHMEM_NAME_PREFIX, ASHMEM_NAME_PREFIX_LEN);
256 asma->prot_mask = PROT_MASK;
257 file->private_data = asma;
259 return 0;
263 * ashmem_release() - Releases an Anonymous Shared Memory structure
264 * @ignored: The backing file's Index Node(?) - It is ignored here.
265 * @file: The backing file
267 * Return: 0 if successful. If it is anything else, go have a coffee and
268 * try again.
270 static int ashmem_release(struct inode *ignored, struct file *file)
272 struct ashmem_area *asma = file->private_data;
273 struct ashmem_range *range, *next;
275 mutex_lock(&ashmem_mutex);
276 list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned)
277 range_del(range);
278 mutex_unlock(&ashmem_mutex);
280 if (asma->file)
281 fput(asma->file);
282 kmem_cache_free(ashmem_area_cachep, asma);
284 return 0;
287 static ssize_t ashmem_read_iter(struct kiocb *iocb, struct iov_iter *iter)
289 struct ashmem_area *asma = iocb->ki_filp->private_data;
290 int ret = 0;
292 mutex_lock(&ashmem_mutex);
294 /* If size is not set, or set to 0, always return EOF. */
295 if (asma->size == 0)
296 goto out_unlock;
298 if (!asma->file) {
299 ret = -EBADF;
300 goto out_unlock;
304 * asma and asma->file are used outside the lock here. We assume
305 * once asma->file is set it will never be changed, and will not
306 * be destroyed until all references to the file are dropped and
307 * ashmem_release is called.
309 mutex_unlock(&ashmem_mutex);
310 ret = vfs_iter_read(asma->file, iter, &iocb->ki_pos, 0);
311 mutex_lock(&ashmem_mutex);
312 if (ret > 0)
313 asma->file->f_pos = iocb->ki_pos;
314 out_unlock:
315 mutex_unlock(&ashmem_mutex);
316 return ret;
319 static loff_t ashmem_llseek(struct file *file, loff_t offset, int origin)
321 struct ashmem_area *asma = file->private_data;
322 loff_t ret;
324 mutex_lock(&ashmem_mutex);
326 if (asma->size == 0) {
327 mutex_unlock(&ashmem_mutex);
328 return -EINVAL;
331 if (!asma->file) {
332 mutex_unlock(&ashmem_mutex);
333 return -EBADF;
336 mutex_unlock(&ashmem_mutex);
338 ret = vfs_llseek(asma->file, offset, origin);
339 if (ret < 0)
340 return ret;
342 /** Copy f_pos from backing file, since f_ops->llseek() sets it */
343 file->f_pos = asma->file->f_pos;
344 return ret;
347 static inline vm_flags_t calc_vm_may_flags(unsigned long prot)
349 return _calc_vm_trans(prot, PROT_READ, VM_MAYREAD) |
350 _calc_vm_trans(prot, PROT_WRITE, VM_MAYWRITE) |
351 _calc_vm_trans(prot, PROT_EXEC, VM_MAYEXEC);
354 static int ashmem_mmap(struct file *file, struct vm_area_struct *vma)
356 struct ashmem_area *asma = file->private_data;
357 int ret = 0;
359 mutex_lock(&ashmem_mutex);
361 /* user needs to SET_SIZE before mapping */
362 if (!asma->size) {
363 ret = -EINVAL;
364 goto out;
367 /* requested mapping size larger than object size */
368 if (vma->vm_end - vma->vm_start > PAGE_ALIGN(asma->size)) {
369 ret = -EINVAL;
370 goto out;
373 /* requested protection bits must match our allowed protection mask */
374 if ((vma->vm_flags & ~calc_vm_prot_bits(asma->prot_mask, 0)) &
375 calc_vm_prot_bits(PROT_MASK, 0)) {
376 ret = -EPERM;
377 goto out;
379 vma->vm_flags &= ~calc_vm_may_flags(~asma->prot_mask);
381 if (!asma->file) {
382 char *name = ASHMEM_NAME_DEF;
383 struct file *vmfile;
385 if (asma->name[ASHMEM_NAME_PREFIX_LEN] != '\0')
386 name = asma->name;
388 /* ... and allocate the backing shmem file */
389 vmfile = shmem_file_setup(name, asma->size, vma->vm_flags);
390 if (IS_ERR(vmfile)) {
391 ret = PTR_ERR(vmfile);
392 goto out;
394 vmfile->f_mode |= FMODE_LSEEK;
395 asma->file = vmfile;
397 get_file(asma->file);
400 * XXX - Reworked to use shmem_zero_setup() instead of
401 * shmem_set_file while we're in staging. -jstultz
403 if (vma->vm_flags & VM_SHARED) {
404 ret = shmem_zero_setup(vma);
405 if (ret) {
406 fput(asma->file);
407 goto out;
409 } else {
410 vma_set_anonymous(vma);
413 if (vma->vm_file)
414 fput(vma->vm_file);
415 vma->vm_file = asma->file;
417 out:
418 mutex_unlock(&ashmem_mutex);
419 return ret;
423 * ashmem_shrink - our cache shrinker, called from mm/vmscan.c
425 * 'nr_to_scan' is the number of objects to scan for freeing.
427 * 'gfp_mask' is the mask of the allocation that got us into this mess.
429 * Return value is the number of objects freed or -1 if we cannot
430 * proceed without risk of deadlock (due to gfp_mask).
432 * We approximate LRU via least-recently-unpinned, jettisoning unpinned partial
433 * chunks of ashmem regions LRU-wise one-at-a-time until we hit 'nr_to_scan'
434 * pages freed.
436 static unsigned long
437 ashmem_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
439 unsigned long freed = 0;
441 /* We might recurse into filesystem code, so bail out if necessary */
442 if (!(sc->gfp_mask & __GFP_FS))
443 return SHRINK_STOP;
445 if (!mutex_trylock(&ashmem_mutex))
446 return -1;
448 while (!list_empty(&ashmem_lru_list)) {
449 struct ashmem_range *range =
450 list_first_entry(&ashmem_lru_list, typeof(*range), lru);
451 loff_t start = range->pgstart * PAGE_SIZE;
452 loff_t end = (range->pgend + 1) * PAGE_SIZE;
453 struct file *f = range->asma->file;
455 get_file(f);
456 atomic_inc(&ashmem_shrink_inflight);
457 range->purged = ASHMEM_WAS_PURGED;
458 lru_del(range);
460 freed += range_size(range);
461 mutex_unlock(&ashmem_mutex);
462 f->f_op->fallocate(f,
463 FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
464 start, end - start);
465 fput(f);
466 if (atomic_dec_and_test(&ashmem_shrink_inflight))
467 wake_up_all(&ashmem_shrink_wait);
468 if (!mutex_trylock(&ashmem_mutex))
469 goto out;
470 if (--sc->nr_to_scan <= 0)
471 break;
473 mutex_unlock(&ashmem_mutex);
474 out:
475 return freed;
478 static unsigned long
479 ashmem_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
482 * note that lru_count is count of pages on the lru, not a count of
483 * objects on the list. This means the scan function needs to return the
484 * number of pages freed, not the number of objects scanned.
486 return lru_count;
489 static struct shrinker ashmem_shrinker = {
490 .count_objects = ashmem_shrink_count,
491 .scan_objects = ashmem_shrink_scan,
493 * XXX (dchinner): I wish people would comment on why they need on
494 * significant changes to the default value here
496 .seeks = DEFAULT_SEEKS * 4,
499 static int set_prot_mask(struct ashmem_area *asma, unsigned long prot)
501 int ret = 0;
503 mutex_lock(&ashmem_mutex);
505 /* the user can only remove, not add, protection bits */
506 if ((asma->prot_mask & prot) != prot) {
507 ret = -EINVAL;
508 goto out;
511 /* does the application expect PROT_READ to imply PROT_EXEC? */
512 if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
513 prot |= PROT_EXEC;
515 asma->prot_mask = prot;
517 out:
518 mutex_unlock(&ashmem_mutex);
519 return ret;
522 static int set_name(struct ashmem_area *asma, void __user *name)
524 int len;
525 int ret = 0;
526 char local_name[ASHMEM_NAME_LEN];
529 * Holding the ashmem_mutex while doing a copy_from_user might cause
530 * an data abort which would try to access mmap_sem. If another
531 * thread has invoked ashmem_mmap then it will be holding the
532 * semaphore and will be waiting for ashmem_mutex, there by leading to
533 * deadlock. We'll release the mutex and take the name to a local
534 * variable that does not need protection and later copy the local
535 * variable to the structure member with lock held.
537 len = strncpy_from_user(local_name, name, ASHMEM_NAME_LEN);
538 if (len < 0)
539 return len;
541 mutex_lock(&ashmem_mutex);
542 /* cannot change an existing mapping's name */
543 if (asma->file)
544 ret = -EINVAL;
545 else
546 strscpy(asma->name + ASHMEM_NAME_PREFIX_LEN, local_name,
547 ASHMEM_NAME_LEN);
549 mutex_unlock(&ashmem_mutex);
550 return ret;
553 static int get_name(struct ashmem_area *asma, void __user *name)
555 int ret = 0;
556 size_t len;
558 * Have a local variable to which we'll copy the content
559 * from asma with the lock held. Later we can copy this to the user
560 * space safely without holding any locks. So even if we proceed to
561 * wait for mmap_sem, it won't lead to deadlock.
563 char local_name[ASHMEM_NAME_LEN];
565 mutex_lock(&ashmem_mutex);
566 if (asma->name[ASHMEM_NAME_PREFIX_LEN] != '\0') {
568 * Copying only `len', instead of ASHMEM_NAME_LEN, bytes
569 * prevents us from revealing one user's stack to another.
571 len = strlen(asma->name + ASHMEM_NAME_PREFIX_LEN) + 1;
572 memcpy(local_name, asma->name + ASHMEM_NAME_PREFIX_LEN, len);
573 } else {
574 len = sizeof(ASHMEM_NAME_DEF);
575 memcpy(local_name, ASHMEM_NAME_DEF, len);
577 mutex_unlock(&ashmem_mutex);
580 * Now we are just copying from the stack variable to userland
581 * No lock held
583 if (copy_to_user(name, local_name, len))
584 ret = -EFAULT;
585 return ret;
589 * ashmem_pin - pin the given ashmem region, returning whether it was
590 * previously purged (ASHMEM_WAS_PURGED) or not (ASHMEM_NOT_PURGED).
592 * Caller must hold ashmem_mutex.
594 static int ashmem_pin(struct ashmem_area *asma, size_t pgstart, size_t pgend,
595 struct ashmem_range **new_range)
597 struct ashmem_range *range, *next;
598 int ret = ASHMEM_NOT_PURGED;
600 list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned) {
601 /* moved past last applicable page; we can short circuit */
602 if (range_before_page(range, pgstart))
603 break;
606 * The user can ask us to pin pages that span multiple ranges,
607 * or to pin pages that aren't even unpinned, so this is messy.
609 * Four cases:
610 * 1. The requested range subsumes an existing range, so we
611 * just remove the entire matching range.
612 * 2. The requested range overlaps the start of an existing
613 * range, so we just update that range.
614 * 3. The requested range overlaps the end of an existing
615 * range, so we just update that range.
616 * 4. The requested range punches a hole in an existing range,
617 * so we have to update one side of the range and then
618 * create a new range for the other side.
620 if (page_range_in_range(range, pgstart, pgend)) {
621 ret |= range->purged;
623 /* Case #1: Easy. Just nuke the whole thing. */
624 if (page_range_subsumes_range(range, pgstart, pgend)) {
625 range_del(range);
626 continue;
629 /* Case #2: We overlap from the start, so adjust it */
630 if (range->pgstart >= pgstart) {
631 range_shrink(range, pgend + 1, range->pgend);
632 continue;
635 /* Case #3: We overlap from the rear, so adjust it */
636 if (range->pgend <= pgend) {
637 range_shrink(range, range->pgstart,
638 pgstart - 1);
639 continue;
643 * Case #4: We eat a chunk out of the middle. A bit
644 * more complicated, we allocate a new range for the
645 * second half and adjust the first chunk's endpoint.
647 range_alloc(asma, range, range->purged,
648 pgend + 1, range->pgend, new_range);
649 range_shrink(range, range->pgstart, pgstart - 1);
650 break;
654 return ret;
658 * ashmem_unpin - unpin the given range of pages. Returns zero on success.
660 * Caller must hold ashmem_mutex.
662 static int ashmem_unpin(struct ashmem_area *asma, size_t pgstart, size_t pgend,
663 struct ashmem_range **new_range)
665 struct ashmem_range *range, *next;
666 unsigned int purged = ASHMEM_NOT_PURGED;
668 restart:
669 list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned) {
670 /* short circuit: this is our insertion point */
671 if (range_before_page(range, pgstart))
672 break;
675 * The user can ask us to unpin pages that are already entirely
676 * or partially pinned. We handle those two cases here.
678 if (page_range_subsumed_by_range(range, pgstart, pgend))
679 return 0;
680 if (page_range_in_range(range, pgstart, pgend)) {
681 pgstart = min(range->pgstart, pgstart);
682 pgend = max(range->pgend, pgend);
683 purged |= range->purged;
684 range_del(range);
685 goto restart;
689 range_alloc(asma, range, purged, pgstart, pgend, new_range);
690 return 0;
694 * ashmem_get_pin_status - Returns ASHMEM_IS_UNPINNED if _any_ pages in the
695 * given interval are unpinned and ASHMEM_IS_PINNED otherwise.
697 * Caller must hold ashmem_mutex.
699 static int ashmem_get_pin_status(struct ashmem_area *asma, size_t pgstart,
700 size_t pgend)
702 struct ashmem_range *range;
703 int ret = ASHMEM_IS_PINNED;
705 list_for_each_entry(range, &asma->unpinned_list, unpinned) {
706 if (range_before_page(range, pgstart))
707 break;
708 if (page_range_in_range(range, pgstart, pgend)) {
709 ret = ASHMEM_IS_UNPINNED;
710 break;
714 return ret;
717 static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd,
718 void __user *p)
720 struct ashmem_pin pin;
721 size_t pgstart, pgend;
722 int ret = -EINVAL;
723 struct ashmem_range *range = NULL;
725 if (copy_from_user(&pin, p, sizeof(pin)))
726 return -EFAULT;
728 if (cmd == ASHMEM_PIN || cmd == ASHMEM_UNPIN) {
729 range = kmem_cache_zalloc(ashmem_range_cachep, GFP_KERNEL);
730 if (!range)
731 return -ENOMEM;
734 mutex_lock(&ashmem_mutex);
735 wait_event(ashmem_shrink_wait, !atomic_read(&ashmem_shrink_inflight));
737 if (!asma->file)
738 goto out_unlock;
740 /* per custom, you can pass zero for len to mean "everything onward" */
741 if (!pin.len)
742 pin.len = PAGE_ALIGN(asma->size) - pin.offset;
744 if ((pin.offset | pin.len) & ~PAGE_MASK)
745 goto out_unlock;
747 if (((__u32)-1) - pin.offset < pin.len)
748 goto out_unlock;
750 if (PAGE_ALIGN(asma->size) < pin.offset + pin.len)
751 goto out_unlock;
753 pgstart = pin.offset / PAGE_SIZE;
754 pgend = pgstart + (pin.len / PAGE_SIZE) - 1;
756 switch (cmd) {
757 case ASHMEM_PIN:
758 ret = ashmem_pin(asma, pgstart, pgend, &range);
759 break;
760 case ASHMEM_UNPIN:
761 ret = ashmem_unpin(asma, pgstart, pgend, &range);
762 break;
763 case ASHMEM_GET_PIN_STATUS:
764 ret = ashmem_get_pin_status(asma, pgstart, pgend);
765 break;
768 out_unlock:
769 mutex_unlock(&ashmem_mutex);
770 if (range)
771 kmem_cache_free(ashmem_range_cachep, range);
773 return ret;
776 static long ashmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
778 struct ashmem_area *asma = file->private_data;
779 long ret = -ENOTTY;
781 switch (cmd) {
782 case ASHMEM_SET_NAME:
783 ret = set_name(asma, (void __user *)arg);
784 break;
785 case ASHMEM_GET_NAME:
786 ret = get_name(asma, (void __user *)arg);
787 break;
788 case ASHMEM_SET_SIZE:
789 ret = -EINVAL;
790 mutex_lock(&ashmem_mutex);
791 if (!asma->file) {
792 ret = 0;
793 asma->size = (size_t)arg;
795 mutex_unlock(&ashmem_mutex);
796 break;
797 case ASHMEM_GET_SIZE:
798 ret = asma->size;
799 break;
800 case ASHMEM_SET_PROT_MASK:
801 ret = set_prot_mask(asma, arg);
802 break;
803 case ASHMEM_GET_PROT_MASK:
804 ret = asma->prot_mask;
805 break;
806 case ASHMEM_PIN:
807 case ASHMEM_UNPIN:
808 case ASHMEM_GET_PIN_STATUS:
809 ret = ashmem_pin_unpin(asma, cmd, (void __user *)arg);
810 break;
811 case ASHMEM_PURGE_ALL_CACHES:
812 ret = -EPERM;
813 if (capable(CAP_SYS_ADMIN)) {
814 struct shrink_control sc = {
815 .gfp_mask = GFP_KERNEL,
816 .nr_to_scan = LONG_MAX,
818 ret = ashmem_shrink_count(&ashmem_shrinker, &sc);
819 ashmem_shrink_scan(&ashmem_shrinker, &sc);
821 break;
824 return ret;
827 /* support of 32bit userspace on 64bit platforms */
828 #ifdef CONFIG_COMPAT
829 static long compat_ashmem_ioctl(struct file *file, unsigned int cmd,
830 unsigned long arg)
832 switch (cmd) {
833 case COMPAT_ASHMEM_SET_SIZE:
834 cmd = ASHMEM_SET_SIZE;
835 break;
836 case COMPAT_ASHMEM_SET_PROT_MASK:
837 cmd = ASHMEM_SET_PROT_MASK;
838 break;
840 return ashmem_ioctl(file, cmd, arg);
842 #endif
843 #ifdef CONFIG_PROC_FS
844 static void ashmem_show_fdinfo(struct seq_file *m, struct file *file)
846 struct ashmem_area *asma = file->private_data;
848 mutex_lock(&ashmem_mutex);
850 if (asma->file)
851 seq_printf(m, "inode:\t%ld\n", file_inode(asma->file)->i_ino);
853 if (asma->name[ASHMEM_NAME_PREFIX_LEN] != '\0')
854 seq_printf(m, "name:\t%s\n",
855 asma->name + ASHMEM_NAME_PREFIX_LEN);
857 mutex_unlock(&ashmem_mutex);
859 #endif
860 static const struct file_operations ashmem_fops = {
861 .owner = THIS_MODULE,
862 .open = ashmem_open,
863 .release = ashmem_release,
864 .read_iter = ashmem_read_iter,
865 .llseek = ashmem_llseek,
866 .mmap = ashmem_mmap,
867 .unlocked_ioctl = ashmem_ioctl,
868 #ifdef CONFIG_COMPAT
869 .compat_ioctl = compat_ashmem_ioctl,
870 #endif
871 #ifdef CONFIG_PROC_FS
872 .show_fdinfo = ashmem_show_fdinfo,
873 #endif
876 static struct miscdevice ashmem_misc = {
877 .minor = MISC_DYNAMIC_MINOR,
878 .name = "ashmem",
879 .fops = &ashmem_fops,
882 static int __init ashmem_init(void)
884 int ret = -ENOMEM;
886 ashmem_area_cachep = kmem_cache_create("ashmem_area_cache",
887 sizeof(struct ashmem_area),
888 0, 0, NULL);
889 if (!ashmem_area_cachep) {
890 pr_err("failed to create slab cache\n");
891 goto out;
894 ashmem_range_cachep = kmem_cache_create("ashmem_range_cache",
895 sizeof(struct ashmem_range),
896 0, 0, NULL);
897 if (!ashmem_range_cachep) {
898 pr_err("failed to create slab cache\n");
899 goto out_free1;
902 ret = misc_register(&ashmem_misc);
903 if (ret) {
904 pr_err("failed to register misc device!\n");
905 goto out_free2;
908 ret = register_shrinker(&ashmem_shrinker);
909 if (ret) {
910 pr_err("failed to register shrinker!\n");
911 goto out_demisc;
914 pr_info("initialized\n");
916 return 0;
918 out_demisc:
919 misc_deregister(&ashmem_misc);
920 out_free2:
921 kmem_cache_destroy(ashmem_range_cachep);
922 out_free1:
923 kmem_cache_destroy(ashmem_area_cachep);
924 out:
925 return ret;
927 device_initcall(ashmem_init);