PM / sleep: Asynchronous threads for suspend_noirq
[linux/fpc-iii.git] / drivers / staging / android / ashmem.c
blob713a9722678746f24363e3b9f3f4a0f89cc145ad
1 /* mm/ashmem.c
3 * Anonymous Shared Memory Subsystem, ashmem
5 * Copyright (C) 2008 Google, Inc.
7 * Robert Love <rlove@google.com>
9 * This software is licensed under the terms of the GNU General Public
10 * License version 2, as published by the Free Software Foundation, and
11 * may be copied, distributed, and modified under those terms.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
19 #define pr_fmt(fmt) "ashmem: " fmt
21 #include <linux/module.h>
22 #include <linux/file.h>
23 #include <linux/fs.h>
24 #include <linux/falloc.h>
25 #include <linux/miscdevice.h>
26 #include <linux/security.h>
27 #include <linux/mm.h>
28 #include <linux/mman.h>
29 #include <linux/uaccess.h>
30 #include <linux/personality.h>
31 #include <linux/bitops.h>
32 #include <linux/mutex.h>
33 #include <linux/shmem_fs.h>
34 #include "ashmem.h"
36 #define ASHMEM_NAME_PREFIX "dev/ashmem/"
37 #define ASHMEM_NAME_PREFIX_LEN (sizeof(ASHMEM_NAME_PREFIX) - 1)
38 #define ASHMEM_FULL_NAME_LEN (ASHMEM_NAME_LEN + ASHMEM_NAME_PREFIX_LEN)
40 /**
41 * struct ashmem_area - The anonymous shared memory area
42 * @name: The optional name in /proc/pid/maps
43 * @unpinned_list: The list of all ashmem areas
44 * @file: The shmem-based backing file
45 * @size: The size of the mapping, in bytes
46 * @prot_masks: The allowed protection bits, as vm_flags
48 * The lifecycle of this structure is from our parent file's open() until
49 * its release(). It is also protected by 'ashmem_mutex'
51 * Warning: Mappings do NOT pin this structure; It dies on close()
53 struct ashmem_area {
54 char name[ASHMEM_FULL_NAME_LEN];
55 struct list_head unpinned_list;
56 struct file *file;
57 size_t size;
58 unsigned long prot_mask;
61 /**
62 * struct ashmem_range - A range of unpinned/evictable pages
63 * @lru: The entry in the LRU list
64 * @unpinned: The entry in its area's unpinned list
65 * @asma: The associated anonymous shared memory area.
66 * @pgstart: The starting page (inclusive)
67 * @pgend: The ending page (inclusive)
68 * @purged: The purge status (ASHMEM_NOT or ASHMEM_WAS_PURGED)
70 * The lifecycle of this structure is from unpin to pin.
71 * It is protected by 'ashmem_mutex'
73 struct ashmem_range {
74 struct list_head lru;
75 struct list_head unpinned;
76 struct ashmem_area *asma;
77 size_t pgstart;
78 size_t pgend;
79 unsigned int purged;
82 /* LRU list of unpinned pages, protected by ashmem_mutex */
83 static LIST_HEAD(ashmem_lru_list);
85 /**
86 * long lru_count - The count of pages on our LRU list.
88 * This is protected by ashmem_mutex.
90 static unsigned long lru_count;
92 /**
93 * ashmem_mutex - protects the list of and each individual ashmem_area
95 * Lock Ordering: ashmex_mutex -> i_mutex -> i_alloc_sem
97 static DEFINE_MUTEX(ashmem_mutex);
99 static struct kmem_cache *ashmem_area_cachep __read_mostly;
100 static struct kmem_cache *ashmem_range_cachep __read_mostly;
102 #define range_size(range) \
103 ((range)->pgend - (range)->pgstart + 1)
105 #define range_on_lru(range) \
106 ((range)->purged == ASHMEM_NOT_PURGED)
108 #define page_range_subsumes_range(range, start, end) \
109 (((range)->pgstart >= (start)) && ((range)->pgend <= (end)))
111 #define page_range_subsumed_by_range(range, start, end) \
112 (((range)->pgstart <= (start)) && ((range)->pgend >= (end)))
114 #define page_in_range(range, page) \
115 (((range)->pgstart <= (page)) && ((range)->pgend >= (page)))
117 #define page_range_in_range(range, start, end) \
118 (page_in_range(range, start) || page_in_range(range, end) || \
119 page_range_subsumes_range(range, start, end))
121 #define range_before_page(range, page) \
122 ((range)->pgend < (page))
124 #define PROT_MASK (PROT_EXEC | PROT_READ | PROT_WRITE)
127 * lru_add() - Adds a range of memory to the LRU list
128 * @range: The memory range being added.
130 * The range is first added to the end (tail) of the LRU list.
131 * After this, the size of the range is added to @lru_count
133 static inline void lru_add(struct ashmem_range *range)
135 list_add_tail(&range->lru, &ashmem_lru_list);
136 lru_count += range_size(range);
140 * lru_del() - Removes a range of memory from the LRU list
141 * @range: The memory range being removed
143 * The range is first deleted from the LRU list.
144 * After this, the size of the range is removed from @lru_count
146 static inline void lru_del(struct ashmem_range *range)
148 list_del(&range->lru);
149 lru_count -= range_size(range);
153 * range_alloc() - Allocates and initializes a new ashmem_range structure
154 * @asma: The associated ashmem_area
155 * @prev_range: The previous ashmem_range in the sorted asma->unpinned list
156 * @purged: Initial purge status (ASMEM_NOT_PURGED or ASHMEM_WAS_PURGED)
157 * @start: The starting page (inclusive)
158 * @end: The ending page (inclusive)
160 * This function is protected by ashmem_mutex.
162 * Return: 0 if successful, or -ENOMEM if there is an error
164 static int range_alloc(struct ashmem_area *asma,
165 struct ashmem_range *prev_range, unsigned int purged,
166 size_t start, size_t end)
168 struct ashmem_range *range;
170 range = kmem_cache_zalloc(ashmem_range_cachep, GFP_KERNEL);
171 if (unlikely(!range))
172 return -ENOMEM;
174 range->asma = asma;
175 range->pgstart = start;
176 range->pgend = end;
177 range->purged = purged;
179 list_add_tail(&range->unpinned, &prev_range->unpinned);
181 if (range_on_lru(range))
182 lru_add(range);
184 return 0;
188 * range_del() - Deletes and dealloctes an ashmem_range structure
189 * @range: The associated ashmem_range that has previously been allocated
191 static void range_del(struct ashmem_range *range)
193 list_del(&range->unpinned);
194 if (range_on_lru(range))
195 lru_del(range);
196 kmem_cache_free(ashmem_range_cachep, range);
200 * range_shrink() - Shrinks an ashmem_range
201 * @range: The associated ashmem_range being shrunk
202 * @start: The starting byte of the new range
203 * @end: The ending byte of the new range
205 * This does not modify the data inside the existing range in any way - It
206 * simply shrinks the boundaries of the range.
208 * Theoretically, with a little tweaking, this could eventually be changed
209 * to range_resize, and expand the lru_count if the new range is larger.
211 static inline void range_shrink(struct ashmem_range *range,
212 size_t start, size_t end)
214 size_t pre = range_size(range);
216 range->pgstart = start;
217 range->pgend = end;
219 if (range_on_lru(range))
220 lru_count -= pre - range_size(range);
224 * ashmem_open() - Opens an Anonymous Shared Memory structure
225 * @inode: The backing file's index node(?)
226 * @file: The backing file
228 * Please note that the ashmem_area is not returned by this function - It is
229 * instead written to "file->private_data".
231 * Return: 0 if successful, or another code if unsuccessful.
233 static int ashmem_open(struct inode *inode, struct file *file)
235 struct ashmem_area *asma;
236 int ret;
238 ret = generic_file_open(inode, file);
239 if (unlikely(ret))
240 return ret;
242 asma = kmem_cache_zalloc(ashmem_area_cachep, GFP_KERNEL);
243 if (unlikely(!asma))
244 return -ENOMEM;
246 INIT_LIST_HEAD(&asma->unpinned_list);
247 memcpy(asma->name, ASHMEM_NAME_PREFIX, ASHMEM_NAME_PREFIX_LEN);
248 asma->prot_mask = PROT_MASK;
249 file->private_data = asma;
251 return 0;
255 * ashmem_release() - Releases an Anonymous Shared Memory structure
256 * @ignored: The backing file's Index Node(?) - It is ignored here.
257 * @file: The backing file
259 * Return: 0 if successful. If it is anything else, go have a coffee and
260 * try again.
262 static int ashmem_release(struct inode *ignored, struct file *file)
264 struct ashmem_area *asma = file->private_data;
265 struct ashmem_range *range, *next;
267 mutex_lock(&ashmem_mutex);
268 list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned)
269 range_del(range);
270 mutex_unlock(&ashmem_mutex);
272 if (asma->file)
273 fput(asma->file);
274 kmem_cache_free(ashmem_area_cachep, asma);
276 return 0;
280 * ashmem_read() - Reads a set of bytes from an Ashmem-enabled file
281 * @file: The associated backing file.
282 * @buf: The buffer of data being written to
283 * @len: The number of bytes being read
284 * @pos: The position of the first byte to read.
286 * Return: 0 if successful, or another return code if not.
288 static ssize_t ashmem_read(struct file *file, char __user *buf,
289 size_t len, loff_t *pos)
291 struct ashmem_area *asma = file->private_data;
292 int ret = 0;
294 mutex_lock(&ashmem_mutex);
296 /* If size is not set, or set to 0, always return EOF. */
297 if (asma->size == 0)
298 goto out_unlock;
300 if (!asma->file) {
301 ret = -EBADF;
302 goto out_unlock;
305 mutex_unlock(&ashmem_mutex);
308 * asma and asma->file are used outside the lock here. We assume
309 * once asma->file is set it will never be changed, and will not
310 * be destroyed until all references to the file are dropped and
311 * ashmem_release is called.
313 ret = asma->file->f_op->read(asma->file, buf, len, pos);
314 if (ret >= 0) {
315 /** Update backing file pos, since f_ops->read() doesn't */
316 asma->file->f_pos = *pos;
318 return ret;
320 out_unlock:
321 mutex_unlock(&ashmem_mutex);
322 return ret;
325 static loff_t ashmem_llseek(struct file *file, loff_t offset, int origin)
327 struct ashmem_area *asma = file->private_data;
328 int ret;
330 mutex_lock(&ashmem_mutex);
332 if (asma->size == 0) {
333 ret = -EINVAL;
334 goto out;
337 if (!asma->file) {
338 ret = -EBADF;
339 goto out;
342 ret = asma->file->f_op->llseek(asma->file, offset, origin);
343 if (ret < 0)
344 goto out;
346 /** Copy f_pos from backing file, since f_ops->llseek() sets it */
347 file->f_pos = asma->file->f_pos;
349 out:
350 mutex_unlock(&ashmem_mutex);
351 return ret;
354 static inline vm_flags_t calc_vm_may_flags(unsigned long prot)
356 return _calc_vm_trans(prot, PROT_READ, VM_MAYREAD) |
357 _calc_vm_trans(prot, PROT_WRITE, VM_MAYWRITE) |
358 _calc_vm_trans(prot, PROT_EXEC, VM_MAYEXEC);
361 static int ashmem_mmap(struct file *file, struct vm_area_struct *vma)
363 struct ashmem_area *asma = file->private_data;
364 int ret = 0;
366 mutex_lock(&ashmem_mutex);
368 /* user needs to SET_SIZE before mapping */
369 if (unlikely(!asma->size)) {
370 ret = -EINVAL;
371 goto out;
374 /* requested protection bits must match our allowed protection mask */
375 if (unlikely((vma->vm_flags & ~calc_vm_prot_bits(asma->prot_mask)) &
376 calc_vm_prot_bits(PROT_MASK))) {
377 ret = -EPERM;
378 goto out;
380 vma->vm_flags &= ~calc_vm_may_flags(~asma->prot_mask);
382 if (!asma->file) {
383 char *name = ASHMEM_NAME_DEF;
384 struct file *vmfile;
386 if (asma->name[ASHMEM_NAME_PREFIX_LEN] != '\0')
387 name = asma->name;
389 /* ... and allocate the backing shmem file */
390 vmfile = shmem_file_setup(name, asma->size, vma->vm_flags);
391 if (unlikely(IS_ERR(vmfile))) {
392 ret = PTR_ERR(vmfile);
393 goto out;
395 asma->file = vmfile;
397 get_file(asma->file);
400 * XXX - Reworked to use shmem_zero_setup() instead of
401 * shmem_set_file while we're in staging. -jstultz
403 if (vma->vm_flags & VM_SHARED) {
404 ret = shmem_zero_setup(vma);
405 if (ret) {
406 fput(asma->file);
407 goto out;
411 if (vma->vm_file)
412 fput(vma->vm_file);
413 vma->vm_file = asma->file;
415 out:
416 mutex_unlock(&ashmem_mutex);
417 return ret;
421 * ashmem_shrink - our cache shrinker, called from mm/vmscan.c :: shrink_slab
423 * 'nr_to_scan' is the number of objects to scan for freeing.
425 * 'gfp_mask' is the mask of the allocation that got us into this mess.
427 * Return value is the number of objects freed or -1 if we cannot
428 * proceed without risk of deadlock (due to gfp_mask).
430 * We approximate LRU via least-recently-unpinned, jettisoning unpinned partial
431 * chunks of ashmem regions LRU-wise one-at-a-time until we hit 'nr_to_scan'
432 * pages freed.
434 static unsigned long
435 ashmem_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
437 struct ashmem_range *range, *next;
438 unsigned long freed = 0;
440 /* We might recurse into filesystem code, so bail out if necessary */
441 if (!(sc->gfp_mask & __GFP_FS))
442 return SHRINK_STOP;
444 mutex_lock(&ashmem_mutex);
445 list_for_each_entry_safe(range, next, &ashmem_lru_list, lru) {
446 loff_t start = range->pgstart * PAGE_SIZE;
447 loff_t end = (range->pgend + 1) * PAGE_SIZE;
449 do_fallocate(range->asma->file,
450 FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
451 start, end - start);
452 range->purged = ASHMEM_WAS_PURGED;
453 lru_del(range);
455 freed += range_size(range);
456 if (--sc->nr_to_scan <= 0)
457 break;
459 mutex_unlock(&ashmem_mutex);
460 return freed;
463 static unsigned long
464 ashmem_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
467 * note that lru_count is count of pages on the lru, not a count of
468 * objects on the list. This means the scan function needs to return the
469 * number of pages freed, not the number of objects scanned.
471 return lru_count;
474 static struct shrinker ashmem_shrinker = {
475 .count_objects = ashmem_shrink_count,
476 .scan_objects = ashmem_shrink_scan,
478 * XXX (dchinner): I wish people would comment on why they need on
479 * significant changes to the default value here
481 .seeks = DEFAULT_SEEKS * 4,
484 static int set_prot_mask(struct ashmem_area *asma, unsigned long prot)
486 int ret = 0;
488 mutex_lock(&ashmem_mutex);
490 /* the user can only remove, not add, protection bits */
491 if (unlikely((asma->prot_mask & prot) != prot)) {
492 ret = -EINVAL;
493 goto out;
496 /* does the application expect PROT_READ to imply PROT_EXEC? */
497 if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
498 prot |= PROT_EXEC;
500 asma->prot_mask = prot;
502 out:
503 mutex_unlock(&ashmem_mutex);
504 return ret;
507 static int set_name(struct ashmem_area *asma, void __user *name)
509 int len;
510 int ret = 0;
511 char local_name[ASHMEM_NAME_LEN];
514 * Holding the ashmem_mutex while doing a copy_from_user might cause
515 * an data abort which would try to access mmap_sem. If another
516 * thread has invoked ashmem_mmap then it will be holding the
517 * semaphore and will be waiting for ashmem_mutex, there by leading to
518 * deadlock. We'll release the mutex and take the name to a local
519 * variable that does not need protection and later copy the local
520 * variable to the structure member with lock held.
522 len = strncpy_from_user(local_name, name, ASHMEM_NAME_LEN);
523 if (len < 0)
524 return len;
525 if (len == ASHMEM_NAME_LEN)
526 local_name[ASHMEM_NAME_LEN - 1] = '\0';
527 mutex_lock(&ashmem_mutex);
528 /* cannot change an existing mapping's name */
529 if (unlikely(asma->file))
530 ret = -EINVAL;
531 else
532 strcpy(asma->name + ASHMEM_NAME_PREFIX_LEN, local_name);
534 mutex_unlock(&ashmem_mutex);
535 return ret;
538 static int get_name(struct ashmem_area *asma, void __user *name)
540 int ret = 0;
541 size_t len;
543 * Have a local variable to which we'll copy the content
544 * from asma with the lock held. Later we can copy this to the user
545 * space safely without holding any locks. So even if we proceed to
546 * wait for mmap_sem, it won't lead to deadlock.
548 char local_name[ASHMEM_NAME_LEN];
550 mutex_lock(&ashmem_mutex);
551 if (asma->name[ASHMEM_NAME_PREFIX_LEN] != '\0') {
554 * Copying only `len', instead of ASHMEM_NAME_LEN, bytes
555 * prevents us from revealing one user's stack to another.
557 len = strlen(asma->name + ASHMEM_NAME_PREFIX_LEN) + 1;
558 memcpy(local_name, asma->name + ASHMEM_NAME_PREFIX_LEN, len);
559 } else {
560 len = sizeof(ASHMEM_NAME_DEF);
561 memcpy(local_name, ASHMEM_NAME_DEF, len);
563 mutex_unlock(&ashmem_mutex);
566 * Now we are just copying from the stack variable to userland
567 * No lock held
569 if (unlikely(copy_to_user(name, local_name, len)))
570 ret = -EFAULT;
571 return ret;
575 * ashmem_pin - pin the given ashmem region, returning whether it was
576 * previously purged (ASHMEM_WAS_PURGED) or not (ASHMEM_NOT_PURGED).
578 * Caller must hold ashmem_mutex.
580 static int ashmem_pin(struct ashmem_area *asma, size_t pgstart, size_t pgend)
582 struct ashmem_range *range, *next;
583 int ret = ASHMEM_NOT_PURGED;
585 list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned) {
586 /* moved past last applicable page; we can short circuit */
587 if (range_before_page(range, pgstart))
588 break;
591 * The user can ask us to pin pages that span multiple ranges,
592 * or to pin pages that aren't even unpinned, so this is messy.
594 * Four cases:
595 * 1. The requested range subsumes an existing range, so we
596 * just remove the entire matching range.
597 * 2. The requested range overlaps the start of an existing
598 * range, so we just update that range.
599 * 3. The requested range overlaps the end of an existing
600 * range, so we just update that range.
601 * 4. The requested range punches a hole in an existing range,
602 * so we have to update one side of the range and then
603 * create a new range for the other side.
605 if (page_range_in_range(range, pgstart, pgend)) {
606 ret |= range->purged;
608 /* Case #1: Easy. Just nuke the whole thing. */
609 if (page_range_subsumes_range(range, pgstart, pgend)) {
610 range_del(range);
611 continue;
614 /* Case #2: We overlap from the start, so adjust it */
615 if (range->pgstart >= pgstart) {
616 range_shrink(range, pgend + 1, range->pgend);
617 continue;
620 /* Case #3: We overlap from the rear, so adjust it */
621 if (range->pgend <= pgend) {
622 range_shrink(range, range->pgstart, pgstart-1);
623 continue;
627 * Case #4: We eat a chunk out of the middle. A bit
628 * more complicated, we allocate a new range for the
629 * second half and adjust the first chunk's endpoint.
631 range_alloc(asma, range, range->purged,
632 pgend + 1, range->pgend);
633 range_shrink(range, range->pgstart, pgstart - 1);
634 break;
638 return ret;
642 * ashmem_unpin - unpin the given range of pages. Returns zero on success.
644 * Caller must hold ashmem_mutex.
646 static int ashmem_unpin(struct ashmem_area *asma, size_t pgstart, size_t pgend)
648 struct ashmem_range *range, *next;
649 unsigned int purged = ASHMEM_NOT_PURGED;
651 restart:
652 list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned) {
653 /* short circuit: this is our insertion point */
654 if (range_before_page(range, pgstart))
655 break;
658 * The user can ask us to unpin pages that are already entirely
659 * or partially pinned. We handle those two cases here.
661 if (page_range_subsumed_by_range(range, pgstart, pgend))
662 return 0;
663 if (page_range_in_range(range, pgstart, pgend)) {
664 pgstart = min_t(size_t, range->pgstart, pgstart),
665 pgend = max_t(size_t, range->pgend, pgend);
666 purged |= range->purged;
667 range_del(range);
668 goto restart;
672 return range_alloc(asma, range, purged, pgstart, pgend);
676 * ashmem_get_pin_status - Returns ASHMEM_IS_UNPINNED if _any_ pages in the
677 * given interval are unpinned and ASHMEM_IS_PINNED otherwise.
679 * Caller must hold ashmem_mutex.
681 static int ashmem_get_pin_status(struct ashmem_area *asma, size_t pgstart,
682 size_t pgend)
684 struct ashmem_range *range;
685 int ret = ASHMEM_IS_PINNED;
687 list_for_each_entry(range, &asma->unpinned_list, unpinned) {
688 if (range_before_page(range, pgstart))
689 break;
690 if (page_range_in_range(range, pgstart, pgend)) {
691 ret = ASHMEM_IS_UNPINNED;
692 break;
696 return ret;
699 static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd,
700 void __user *p)
702 struct ashmem_pin pin;
703 size_t pgstart, pgend;
704 int ret = -EINVAL;
706 if (unlikely(!asma->file))
707 return -EINVAL;
709 if (unlikely(copy_from_user(&pin, p, sizeof(pin))))
710 return -EFAULT;
712 /* per custom, you can pass zero for len to mean "everything onward" */
713 if (!pin.len)
714 pin.len = PAGE_ALIGN(asma->size) - pin.offset;
716 if (unlikely((pin.offset | pin.len) & ~PAGE_MASK))
717 return -EINVAL;
719 if (unlikely(((__u32) -1) - pin.offset < pin.len))
720 return -EINVAL;
722 if (unlikely(PAGE_ALIGN(asma->size) < pin.offset + pin.len))
723 return -EINVAL;
725 pgstart = pin.offset / PAGE_SIZE;
726 pgend = pgstart + (pin.len / PAGE_SIZE) - 1;
728 mutex_lock(&ashmem_mutex);
730 switch (cmd) {
731 case ASHMEM_PIN:
732 ret = ashmem_pin(asma, pgstart, pgend);
733 break;
734 case ASHMEM_UNPIN:
735 ret = ashmem_unpin(asma, pgstart, pgend);
736 break;
737 case ASHMEM_GET_PIN_STATUS:
738 ret = ashmem_get_pin_status(asma, pgstart, pgend);
739 break;
742 mutex_unlock(&ashmem_mutex);
744 return ret;
747 static long ashmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
749 struct ashmem_area *asma = file->private_data;
750 long ret = -ENOTTY;
752 switch (cmd) {
753 case ASHMEM_SET_NAME:
754 ret = set_name(asma, (void __user *) arg);
755 break;
756 case ASHMEM_GET_NAME:
757 ret = get_name(asma, (void __user *) arg);
758 break;
759 case ASHMEM_SET_SIZE:
760 ret = -EINVAL;
761 if (!asma->file) {
762 ret = 0;
763 asma->size = (size_t) arg;
765 break;
766 case ASHMEM_GET_SIZE:
767 ret = asma->size;
768 break;
769 case ASHMEM_SET_PROT_MASK:
770 ret = set_prot_mask(asma, arg);
771 break;
772 case ASHMEM_GET_PROT_MASK:
773 ret = asma->prot_mask;
774 break;
775 case ASHMEM_PIN:
776 case ASHMEM_UNPIN:
777 case ASHMEM_GET_PIN_STATUS:
778 ret = ashmem_pin_unpin(asma, cmd, (void __user *) arg);
779 break;
780 case ASHMEM_PURGE_ALL_CACHES:
781 ret = -EPERM;
782 if (capable(CAP_SYS_ADMIN)) {
783 struct shrink_control sc = {
784 .gfp_mask = GFP_KERNEL,
785 .nr_to_scan = LONG_MAX,
787 ret = ashmem_shrink_count(&ashmem_shrinker, &sc);
788 nodes_setall(sc.nodes_to_scan);
789 ashmem_shrink_scan(&ashmem_shrinker, &sc);
791 break;
794 return ret;
797 /* support of 32bit userspace on 64bit platforms */
798 #ifdef CONFIG_COMPAT
799 static long compat_ashmem_ioctl(struct file *file, unsigned int cmd,
800 unsigned long arg)
803 switch (cmd) {
804 case COMPAT_ASHMEM_SET_SIZE:
805 cmd = ASHMEM_SET_SIZE;
806 break;
807 case COMPAT_ASHMEM_SET_PROT_MASK:
808 cmd = ASHMEM_SET_PROT_MASK;
809 break;
811 return ashmem_ioctl(file, cmd, arg);
813 #endif
815 static const struct file_operations ashmem_fops = {
816 .owner = THIS_MODULE,
817 .open = ashmem_open,
818 .release = ashmem_release,
819 .read = ashmem_read,
820 .llseek = ashmem_llseek,
821 .mmap = ashmem_mmap,
822 .unlocked_ioctl = ashmem_ioctl,
823 #ifdef CONFIG_COMPAT
824 .compat_ioctl = compat_ashmem_ioctl,
825 #endif
828 static struct miscdevice ashmem_misc = {
829 .minor = MISC_DYNAMIC_MINOR,
830 .name = "ashmem",
831 .fops = &ashmem_fops,
834 static int __init ashmem_init(void)
836 int ret;
838 ashmem_area_cachep = kmem_cache_create("ashmem_area_cache",
839 sizeof(struct ashmem_area),
840 0, 0, NULL);
841 if (unlikely(!ashmem_area_cachep)) {
842 pr_err("failed to create slab cache\n");
843 return -ENOMEM;
846 ashmem_range_cachep = kmem_cache_create("ashmem_range_cache",
847 sizeof(struct ashmem_range),
848 0, 0, NULL);
849 if (unlikely(!ashmem_range_cachep)) {
850 pr_err("failed to create slab cache\n");
851 return -ENOMEM;
854 ret = misc_register(&ashmem_misc);
855 if (unlikely(ret)) {
856 pr_err("failed to register misc device!\n");
857 return ret;
860 register_shrinker(&ashmem_shrinker);
862 pr_info("initialized\n");
864 return 0;
867 static void __exit ashmem_exit(void)
869 int ret;
871 unregister_shrinker(&ashmem_shrinker);
873 ret = misc_deregister(&ashmem_misc);
874 if (unlikely(ret))
875 pr_err("failed to unregister misc device!\n");
877 kmem_cache_destroy(ashmem_range_cachep);
878 kmem_cache_destroy(ashmem_area_cachep);
880 pr_info("unloaded\n");
883 module_init(ashmem_init);
884 module_exit(ashmem_exit);
886 MODULE_LICENSE("GPL");