2 * Resizable virtual memory filesystem for Linux.
4 * Copyright (C) 2000 Linus Torvalds.
6 * 2000-2001 Christoph Rohland
9 * Copyright (C) 2002-2011 Hugh Dickins.
10 * Copyright (C) 2011 Google Inc.
11 * Copyright (C) 2002-2005 VERITAS Software Corporation.
12 * Copyright (C) 2004 Andi Kleen, SuSE Labs
14 * Extended attribute support for tmpfs:
15 * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net>
16 * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
19 * Copyright (c) 2004, 2008 Matt Mackall <mpm@selenic.com>
21 * This file is released under the GPL.
25 #include <linux/init.h>
26 #include <linux/vfs.h>
27 #include <linux/mount.h>
28 #include <linux/pagemap.h>
29 #include <linux/file.h>
31 #include <linux/export.h>
32 #include <linux/swap.h>
34 static struct vfsmount
*shm_mnt
;
38 * This virtual memory filesystem is heavily based on the ramfs. It
39 * extends ramfs by the ability to use swap and honor resource limits
40 * which makes it a completely usable filesystem.
43 #include <linux/xattr.h>
44 #include <linux/exportfs.h>
45 #include <linux/posix_acl.h>
46 #include <linux/generic_acl.h>
47 #include <linux/mman.h>
48 #include <linux/string.h>
49 #include <linux/slab.h>
50 #include <linux/backing-dev.h>
51 #include <linux/shmem_fs.h>
52 #include <linux/writeback.h>
53 #include <linux/blkdev.h>
54 #include <linux/pagevec.h>
55 #include <linux/percpu_counter.h>
56 #include <linux/splice.h>
57 #include <linux/security.h>
58 #include <linux/swapops.h>
59 #include <linux/mempolicy.h>
60 #include <linux/namei.h>
61 #include <linux/ctype.h>
62 #include <linux/migrate.h>
63 #include <linux/highmem.h>
64 #include <linux/seq_file.h>
65 #include <linux/magic.h>
67 #include <asm/uaccess.h>
68 #include <asm/pgtable.h>
70 #define BLOCKS_PER_PAGE (PAGE_CACHE_SIZE/512)
71 #define VM_ACCT(size) (PAGE_CACHE_ALIGN(size) >> PAGE_SHIFT)
73 /* Pretend that each entry is of this size in directory's i_size */
74 #define BOGO_DIRENT_SIZE 20
76 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
77 #define SHORT_SYMLINK_LEN 128
80 struct list_head list
; /* anchored by shmem_inode_info->xattr_list */
81 char *name
; /* xattr name */
86 /* Flag allocation requirements to shmem_getpage */
88 SGP_READ
, /* don't exceed i_size, don't allocate page */
89 SGP_CACHE
, /* don't exceed i_size, may allocate page */
90 SGP_DIRTY
, /* like SGP_CACHE, but set new page dirty */
91 SGP_WRITE
, /* may exceed i_size, may allocate page */
95 static unsigned long shmem_default_max_blocks(void)
97 return totalram_pages
/ 2;
100 static unsigned long shmem_default_max_inodes(void)
102 return min(totalram_pages
- totalhigh_pages
, totalram_pages
/ 2);
106 static int shmem_getpage_gfp(struct inode
*inode
, pgoff_t index
,
107 struct page
**pagep
, enum sgp_type sgp
, gfp_t gfp
, int *fault_type
);
109 static inline int shmem_getpage(struct inode
*inode
, pgoff_t index
,
110 struct page
**pagep
, enum sgp_type sgp
, int *fault_type
)
112 return shmem_getpage_gfp(inode
, index
, pagep
, sgp
,
113 mapping_gfp_mask(inode
->i_mapping
), fault_type
);
116 static inline struct shmem_sb_info
*SHMEM_SB(struct super_block
*sb
)
118 return sb
->s_fs_info
;
122 * shmem_file_setup pre-accounts the whole fixed size of a VM object,
123 * for shared memory and for shared anonymous (/dev/zero) mappings
124 * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1),
125 * consistent with the pre-accounting of private mappings ...
127 static inline int shmem_acct_size(unsigned long flags
, loff_t size
)
129 return (flags
& VM_NORESERVE
) ?
130 0 : security_vm_enough_memory_kern(VM_ACCT(size
));
133 static inline void shmem_unacct_size(unsigned long flags
, loff_t size
)
135 if (!(flags
& VM_NORESERVE
))
136 vm_unacct_memory(VM_ACCT(size
));
140 * ... whereas tmpfs objects are accounted incrementally as
141 * pages are allocated, in order to allow huge sparse files.
142 * shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM,
143 * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM.
145 static inline int shmem_acct_block(unsigned long flags
)
147 return (flags
& VM_NORESERVE
) ?
148 security_vm_enough_memory_kern(VM_ACCT(PAGE_CACHE_SIZE
)) : 0;
151 static inline void shmem_unacct_blocks(unsigned long flags
, long pages
)
153 if (flags
& VM_NORESERVE
)
154 vm_unacct_memory(pages
* VM_ACCT(PAGE_CACHE_SIZE
));
157 static const struct super_operations shmem_ops
;
158 static const struct address_space_operations shmem_aops
;
159 static const struct file_operations shmem_file_operations
;
160 static const struct inode_operations shmem_inode_operations
;
161 static const struct inode_operations shmem_dir_inode_operations
;
162 static const struct inode_operations shmem_special_inode_operations
;
163 static const struct vm_operations_struct shmem_vm_ops
;
165 static struct backing_dev_info shmem_backing_dev_info __read_mostly
= {
166 .ra_pages
= 0, /* No readahead */
167 .capabilities
= BDI_CAP_NO_ACCT_AND_WRITEBACK
| BDI_CAP_SWAP_BACKED
,
170 static LIST_HEAD(shmem_swaplist
);
171 static DEFINE_MUTEX(shmem_swaplist_mutex
);
173 static int shmem_reserve_inode(struct super_block
*sb
)
175 struct shmem_sb_info
*sbinfo
= SHMEM_SB(sb
);
176 if (sbinfo
->max_inodes
) {
177 spin_lock(&sbinfo
->stat_lock
);
178 if (!sbinfo
->free_inodes
) {
179 spin_unlock(&sbinfo
->stat_lock
);
182 sbinfo
->free_inodes
--;
183 spin_unlock(&sbinfo
->stat_lock
);
188 static void shmem_free_inode(struct super_block
*sb
)
190 struct shmem_sb_info
*sbinfo
= SHMEM_SB(sb
);
191 if (sbinfo
->max_inodes
) {
192 spin_lock(&sbinfo
->stat_lock
);
193 sbinfo
->free_inodes
++;
194 spin_unlock(&sbinfo
->stat_lock
);
199 * shmem_recalc_inode - recalculate the block usage of an inode
200 * @inode: inode to recalc
202 * We have to calculate the free blocks since the mm can drop
203 * undirtied hole pages behind our back.
205 * But normally info->alloced == inode->i_mapping->nrpages + info->swapped
206 * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped)
208 * It has to be called with the spinlock held.
210 static void shmem_recalc_inode(struct inode
*inode
)
212 struct shmem_inode_info
*info
= SHMEM_I(inode
);
215 freed
= info
->alloced
- info
->swapped
- inode
->i_mapping
->nrpages
;
217 struct shmem_sb_info
*sbinfo
= SHMEM_SB(inode
->i_sb
);
218 if (sbinfo
->max_blocks
)
219 percpu_counter_add(&sbinfo
->used_blocks
, -freed
);
220 info
->alloced
-= freed
;
221 inode
->i_blocks
-= freed
* BLOCKS_PER_PAGE
;
222 shmem_unacct_blocks(info
->flags
, freed
);
227 * Replace item expected in radix tree by a new item, while holding tree lock.
229 static int shmem_radix_tree_replace(struct address_space
*mapping
,
230 pgoff_t index
, void *expected
, void *replacement
)
235 VM_BUG_ON(!expected
);
236 pslot
= radix_tree_lookup_slot(&mapping
->page_tree
, index
);
238 item
= radix_tree_deref_slot_protected(pslot
,
239 &mapping
->tree_lock
);
240 if (item
!= expected
)
243 radix_tree_replace_slot(pslot
, replacement
);
245 radix_tree_delete(&mapping
->page_tree
, index
);
250 * Like add_to_page_cache_locked, but error if expected item has gone.
252 static int shmem_add_to_page_cache(struct page
*page
,
253 struct address_space
*mapping
,
254 pgoff_t index
, gfp_t gfp
, void *expected
)
258 VM_BUG_ON(!PageLocked(page
));
259 VM_BUG_ON(!PageSwapBacked(page
));
262 error
= radix_tree_preload(gfp
& GFP_RECLAIM_MASK
);
264 page_cache_get(page
);
265 page
->mapping
= mapping
;
268 spin_lock_irq(&mapping
->tree_lock
);
270 error
= radix_tree_insert(&mapping
->page_tree
,
273 error
= shmem_radix_tree_replace(mapping
, index
,
277 __inc_zone_page_state(page
, NR_FILE_PAGES
);
278 __inc_zone_page_state(page
, NR_SHMEM
);
279 spin_unlock_irq(&mapping
->tree_lock
);
281 page
->mapping
= NULL
;
282 spin_unlock_irq(&mapping
->tree_lock
);
283 page_cache_release(page
);
286 radix_tree_preload_end();
289 mem_cgroup_uncharge_cache_page(page
);
294 * Like delete_from_page_cache, but substitutes swap for page.
296 static void shmem_delete_from_page_cache(struct page
*page
, void *radswap
)
298 struct address_space
*mapping
= page
->mapping
;
301 spin_lock_irq(&mapping
->tree_lock
);
302 error
= shmem_radix_tree_replace(mapping
, page
->index
, page
, radswap
);
303 page
->mapping
= NULL
;
305 __dec_zone_page_state(page
, NR_FILE_PAGES
);
306 __dec_zone_page_state(page
, NR_SHMEM
);
307 spin_unlock_irq(&mapping
->tree_lock
);
308 page_cache_release(page
);
313 * Like find_get_pages, but collecting swap entries as well as pages.
315 static unsigned shmem_find_get_pages_and_swap(struct address_space
*mapping
,
316 pgoff_t start
, unsigned int nr_pages
,
317 struct page
**pages
, pgoff_t
*indices
)
321 unsigned int nr_found
;
325 nr_found
= radix_tree_gang_lookup_slot(&mapping
->page_tree
,
326 (void ***)pages
, indices
, start
, nr_pages
);
328 for (i
= 0; i
< nr_found
; i
++) {
331 page
= radix_tree_deref_slot((void **)pages
[i
]);
334 if (radix_tree_exception(page
)) {
335 if (radix_tree_deref_retry(page
))
338 * Otherwise, we must be storing a swap entry
339 * here as an exceptional entry: so return it
340 * without attempting to raise page count.
344 if (!page_cache_get_speculative(page
))
347 /* Has the page moved? */
348 if (unlikely(page
!= *((void **)pages
[i
]))) {
349 page_cache_release(page
);
353 indices
[ret
] = indices
[i
];
357 if (unlikely(!ret
&& nr_found
))
364 * Remove swap entry from radix tree, free the swap and its page cache.
366 static int shmem_free_swap(struct address_space
*mapping
,
367 pgoff_t index
, void *radswap
)
371 spin_lock_irq(&mapping
->tree_lock
);
372 error
= shmem_radix_tree_replace(mapping
, index
, radswap
, NULL
);
373 spin_unlock_irq(&mapping
->tree_lock
);
375 free_swap_and_cache(radix_to_swp_entry(radswap
));
380 * Pagevec may contain swap entries, so shuffle up pages before releasing.
382 static void shmem_deswap_pagevec(struct pagevec
*pvec
)
386 for (i
= 0, j
= 0; i
< pagevec_count(pvec
); i
++) {
387 struct page
*page
= pvec
->pages
[i
];
388 if (!radix_tree_exceptional_entry(page
))
389 pvec
->pages
[j
++] = page
;
395 * SysV IPC SHM_UNLOCK restore Unevictable pages to their evictable lists.
397 void shmem_unlock_mapping(struct address_space
*mapping
)
400 pgoff_t indices
[PAGEVEC_SIZE
];
403 pagevec_init(&pvec
, 0);
405 * Minor point, but we might as well stop if someone else SHM_LOCKs it.
407 while (!mapping_unevictable(mapping
)) {
409 * Avoid pagevec_lookup(): find_get_pages() returns 0 as if it
410 * has finished, if it hits a row of PAGEVEC_SIZE swap entries.
412 pvec
.nr
= shmem_find_get_pages_and_swap(mapping
, index
,
413 PAGEVEC_SIZE
, pvec
.pages
, indices
);
416 index
= indices
[pvec
.nr
- 1] + 1;
417 shmem_deswap_pagevec(&pvec
);
418 check_move_unevictable_pages(pvec
.pages
, pvec
.nr
);
419 pagevec_release(&pvec
);
425 * Remove range of pages and swap entries from radix tree, and free them.
427 void shmem_truncate_range(struct inode
*inode
, loff_t lstart
, loff_t lend
)
429 struct address_space
*mapping
= inode
->i_mapping
;
430 struct shmem_inode_info
*info
= SHMEM_I(inode
);
431 pgoff_t start
= (lstart
+ PAGE_CACHE_SIZE
- 1) >> PAGE_CACHE_SHIFT
;
432 unsigned partial
= lstart
& (PAGE_CACHE_SIZE
- 1);
433 pgoff_t end
= (lend
>> PAGE_CACHE_SHIFT
);
435 pgoff_t indices
[PAGEVEC_SIZE
];
436 long nr_swaps_freed
= 0;
440 BUG_ON((lend
& (PAGE_CACHE_SIZE
- 1)) != (PAGE_CACHE_SIZE
- 1));
442 pagevec_init(&pvec
, 0);
444 while (index
<= end
) {
445 pvec
.nr
= shmem_find_get_pages_and_swap(mapping
, index
,
446 min(end
- index
, (pgoff_t
)PAGEVEC_SIZE
- 1) + 1,
447 pvec
.pages
, indices
);
450 mem_cgroup_uncharge_start();
451 for (i
= 0; i
< pagevec_count(&pvec
); i
++) {
452 struct page
*page
= pvec
.pages
[i
];
458 if (radix_tree_exceptional_entry(page
)) {
459 nr_swaps_freed
+= !shmem_free_swap(mapping
,
464 if (!trylock_page(page
))
466 if (page
->mapping
== mapping
) {
467 VM_BUG_ON(PageWriteback(page
));
468 truncate_inode_page(mapping
, page
);
472 shmem_deswap_pagevec(&pvec
);
473 pagevec_release(&pvec
);
474 mem_cgroup_uncharge_end();
480 struct page
*page
= NULL
;
481 shmem_getpage(inode
, start
- 1, &page
, SGP_READ
, NULL
);
483 zero_user_segment(page
, partial
, PAGE_CACHE_SIZE
);
484 set_page_dirty(page
);
486 page_cache_release(page
);
493 pvec
.nr
= shmem_find_get_pages_and_swap(mapping
, index
,
494 min(end
- index
, (pgoff_t
)PAGEVEC_SIZE
- 1) + 1,
495 pvec
.pages
, indices
);
502 if (index
== start
&& indices
[0] > end
) {
503 shmem_deswap_pagevec(&pvec
);
504 pagevec_release(&pvec
);
507 mem_cgroup_uncharge_start();
508 for (i
= 0; i
< pagevec_count(&pvec
); i
++) {
509 struct page
*page
= pvec
.pages
[i
];
515 if (radix_tree_exceptional_entry(page
)) {
516 nr_swaps_freed
+= !shmem_free_swap(mapping
,
522 if (page
->mapping
== mapping
) {
523 VM_BUG_ON(PageWriteback(page
));
524 truncate_inode_page(mapping
, page
);
528 shmem_deswap_pagevec(&pvec
);
529 pagevec_release(&pvec
);
530 mem_cgroup_uncharge_end();
534 spin_lock(&info
->lock
);
535 info
->swapped
-= nr_swaps_freed
;
536 shmem_recalc_inode(inode
);
537 spin_unlock(&info
->lock
);
539 inode
->i_ctime
= inode
->i_mtime
= CURRENT_TIME
;
541 EXPORT_SYMBOL_GPL(shmem_truncate_range
);
543 static int shmem_setattr(struct dentry
*dentry
, struct iattr
*attr
)
545 struct inode
*inode
= dentry
->d_inode
;
548 error
= inode_change_ok(inode
, attr
);
552 if (S_ISREG(inode
->i_mode
) && (attr
->ia_valid
& ATTR_SIZE
)) {
553 loff_t oldsize
= inode
->i_size
;
554 loff_t newsize
= attr
->ia_size
;
556 if (newsize
!= oldsize
) {
557 i_size_write(inode
, newsize
);
558 inode
->i_ctime
= inode
->i_mtime
= CURRENT_TIME
;
560 if (newsize
< oldsize
) {
561 loff_t holebegin
= round_up(newsize
, PAGE_SIZE
);
562 unmap_mapping_range(inode
->i_mapping
, holebegin
, 0, 1);
563 shmem_truncate_range(inode
, newsize
, (loff_t
)-1);
564 /* unmap again to remove racily COWed private pages */
565 unmap_mapping_range(inode
->i_mapping
, holebegin
, 0, 1);
569 setattr_copy(inode
, attr
);
570 #ifdef CONFIG_TMPFS_POSIX_ACL
571 if (attr
->ia_valid
& ATTR_MODE
)
572 error
= generic_acl_chmod(inode
);
577 static void shmem_evict_inode(struct inode
*inode
)
579 struct shmem_inode_info
*info
= SHMEM_I(inode
);
580 struct shmem_xattr
*xattr
, *nxattr
;
582 if (inode
->i_mapping
->a_ops
== &shmem_aops
) {
583 shmem_unacct_size(info
->flags
, inode
->i_size
);
585 shmem_truncate_range(inode
, 0, (loff_t
)-1);
586 if (!list_empty(&info
->swaplist
)) {
587 mutex_lock(&shmem_swaplist_mutex
);
588 list_del_init(&info
->swaplist
);
589 mutex_unlock(&shmem_swaplist_mutex
);
592 kfree(info
->symlink
);
594 list_for_each_entry_safe(xattr
, nxattr
, &info
->xattr_list
, list
) {
598 WARN_ON(inode
->i_blocks
);
599 shmem_free_inode(inode
->i_sb
);
600 end_writeback(inode
);
604 * If swap found in inode, free it and move page from swapcache to filecache.
606 static int shmem_unuse_inode(struct shmem_inode_info
*info
,
607 swp_entry_t swap
, struct page
*page
)
609 struct address_space
*mapping
= info
->vfs_inode
.i_mapping
;
614 radswap
= swp_to_radix_entry(swap
);
615 index
= radix_tree_locate_item(&mapping
->page_tree
, radswap
);
620 * Move _head_ to start search for next from here.
621 * But be careful: shmem_evict_inode checks list_empty without taking
622 * mutex, and there's an instant in list_move_tail when info->swaplist
623 * would appear empty, if it were the only one on shmem_swaplist.
625 if (shmem_swaplist
.next
!= &info
->swaplist
)
626 list_move_tail(&shmem_swaplist
, &info
->swaplist
);
629 * We rely on shmem_swaplist_mutex, not only to protect the swaplist,
630 * but also to hold up shmem_evict_inode(): so inode cannot be freed
631 * beneath us (pagelock doesn't help until the page is in pagecache).
633 error
= shmem_add_to_page_cache(page
, mapping
, index
,
634 GFP_NOWAIT
, radswap
);
635 /* which does mem_cgroup_uncharge_cache_page on error */
637 if (error
!= -ENOMEM
) {
639 * Truncation and eviction use free_swap_and_cache(), which
640 * only does trylock page: if we raced, best clean up here.
642 delete_from_swap_cache(page
);
643 set_page_dirty(page
);
645 spin_lock(&info
->lock
);
647 spin_unlock(&info
->lock
);
650 error
= 1; /* not an error, but entry was found */
656 * Search through swapped inodes to find and replace swap by page.
658 int shmem_unuse(swp_entry_t swap
, struct page
*page
)
660 struct list_head
*this, *next
;
661 struct shmem_inode_info
*info
;
666 * Charge page using GFP_KERNEL while we can wait, before taking
667 * the shmem_swaplist_mutex which might hold up shmem_writepage().
668 * Charged back to the user (not to caller) when swap account is used.
670 error
= mem_cgroup_cache_charge(page
, current
->mm
, GFP_KERNEL
);
673 /* No radix_tree_preload: swap entry keeps a place for page in tree */
675 mutex_lock(&shmem_swaplist_mutex
);
676 list_for_each_safe(this, next
, &shmem_swaplist
) {
677 info
= list_entry(this, struct shmem_inode_info
, swaplist
);
679 found
= shmem_unuse_inode(info
, swap
, page
);
681 list_del_init(&info
->swaplist
);
686 mutex_unlock(&shmem_swaplist_mutex
);
689 mem_cgroup_uncharge_cache_page(page
);
694 page_cache_release(page
);
699 * Move the page from the page cache to the swap cache.
701 static int shmem_writepage(struct page
*page
, struct writeback_control
*wbc
)
703 struct shmem_inode_info
*info
;
704 struct address_space
*mapping
;
709 BUG_ON(!PageLocked(page
));
710 mapping
= page
->mapping
;
712 inode
= mapping
->host
;
713 info
= SHMEM_I(inode
);
714 if (info
->flags
& VM_LOCKED
)
716 if (!total_swap_pages
)
720 * shmem_backing_dev_info's capabilities prevent regular writeback or
721 * sync from ever calling shmem_writepage; but a stacking filesystem
722 * might use ->writepage of its underlying filesystem, in which case
723 * tmpfs should write out to swap only in response to memory pressure,
724 * and not for the writeback threads or sync.
726 if (!wbc
->for_reclaim
) {
727 WARN_ON_ONCE(1); /* Still happens? Tell us about it! */
730 swap
= get_swap_page();
735 * Add inode to shmem_unuse()'s list of swapped-out inodes,
736 * if it's not already there. Do it now before the page is
737 * moved to swap cache, when its pagelock no longer protects
738 * the inode from eviction. But don't unlock the mutex until
739 * we've incremented swapped, because shmem_unuse_inode() will
740 * prune a !swapped inode from the swaplist under this mutex.
742 mutex_lock(&shmem_swaplist_mutex
);
743 if (list_empty(&info
->swaplist
))
744 list_add_tail(&info
->swaplist
, &shmem_swaplist
);
746 if (add_to_swap_cache(page
, swap
, GFP_ATOMIC
) == 0) {
747 swap_shmem_alloc(swap
);
748 shmem_delete_from_page_cache(page
, swp_to_radix_entry(swap
));
750 spin_lock(&info
->lock
);
752 shmem_recalc_inode(inode
);
753 spin_unlock(&info
->lock
);
755 mutex_unlock(&shmem_swaplist_mutex
);
756 BUG_ON(page_mapped(page
));
757 swap_writepage(page
, wbc
);
761 mutex_unlock(&shmem_swaplist_mutex
);
762 swapcache_free(swap
, NULL
);
764 set_page_dirty(page
);
765 if (wbc
->for_reclaim
)
766 return AOP_WRITEPAGE_ACTIVATE
; /* Return with page locked */
773 static void shmem_show_mpol(struct seq_file
*seq
, struct mempolicy
*mpol
)
777 if (!mpol
|| mpol
->mode
== MPOL_DEFAULT
)
778 return; /* show nothing */
780 mpol_to_str(buffer
, sizeof(buffer
), mpol
, 1);
782 seq_printf(seq
, ",mpol=%s", buffer
);
785 static struct mempolicy
*shmem_get_sbmpol(struct shmem_sb_info
*sbinfo
)
787 struct mempolicy
*mpol
= NULL
;
789 spin_lock(&sbinfo
->stat_lock
); /* prevent replace/use races */
792 spin_unlock(&sbinfo
->stat_lock
);
796 #endif /* CONFIG_TMPFS */
798 static struct page
*shmem_swapin(swp_entry_t swap
, gfp_t gfp
,
799 struct shmem_inode_info
*info
, pgoff_t index
)
801 struct vm_area_struct pvma
;
804 /* Create a pseudo vma that just contains the policy */
806 pvma
.vm_pgoff
= index
;
808 pvma
.vm_policy
= mpol_shared_policy_lookup(&info
->policy
, index
);
810 page
= swapin_readahead(swap
, gfp
, &pvma
, 0);
812 /* Drop reference taken by mpol_shared_policy_lookup() */
813 mpol_cond_put(pvma
.vm_policy
);
818 static struct page
*shmem_alloc_page(gfp_t gfp
,
819 struct shmem_inode_info
*info
, pgoff_t index
)
821 struct vm_area_struct pvma
;
824 /* Create a pseudo vma that just contains the policy */
826 pvma
.vm_pgoff
= index
;
828 pvma
.vm_policy
= mpol_shared_policy_lookup(&info
->policy
, index
);
830 page
= alloc_page_vma(gfp
, &pvma
, 0);
832 /* Drop reference taken by mpol_shared_policy_lookup() */
833 mpol_cond_put(pvma
.vm_policy
);
837 #else /* !CONFIG_NUMA */
839 static inline void shmem_show_mpol(struct seq_file
*seq
, struct mempolicy
*mpol
)
842 #endif /* CONFIG_TMPFS */
844 static inline struct page
*shmem_swapin(swp_entry_t swap
, gfp_t gfp
,
845 struct shmem_inode_info
*info
, pgoff_t index
)
847 return swapin_readahead(swap
, gfp
, NULL
, 0);
850 static inline struct page
*shmem_alloc_page(gfp_t gfp
,
851 struct shmem_inode_info
*info
, pgoff_t index
)
853 return alloc_page(gfp
);
855 #endif /* CONFIG_NUMA */
857 #if !defined(CONFIG_NUMA) || !defined(CONFIG_TMPFS)
858 static inline struct mempolicy
*shmem_get_sbmpol(struct shmem_sb_info
*sbinfo
)
865 * shmem_getpage_gfp - find page in cache, or get from swap, or allocate
867 * If we allocate a new one we do not mark it dirty. That's up to the
868 * vm. If we swap it in we mark it dirty since we also free the swap
869 * entry since a page cannot live in both the swap and page cache
871 static int shmem_getpage_gfp(struct inode
*inode
, pgoff_t index
,
872 struct page
**pagep
, enum sgp_type sgp
, gfp_t gfp
, int *fault_type
)
874 struct address_space
*mapping
= inode
->i_mapping
;
875 struct shmem_inode_info
*info
;
876 struct shmem_sb_info
*sbinfo
;
882 if (index
> (MAX_LFS_FILESIZE
>> PAGE_CACHE_SHIFT
))
886 page
= find_lock_page(mapping
, index
);
887 if (radix_tree_exceptional_entry(page
)) {
888 swap
= radix_to_swp_entry(page
);
892 if (sgp
!= SGP_WRITE
&&
893 ((loff_t
)index
<< PAGE_CACHE_SHIFT
) >= i_size_read(inode
)) {
898 if (page
|| (sgp
== SGP_READ
&& !swap
.val
)) {
900 * Once we can get the page lock, it must be uptodate:
901 * if there were an error in reading back from swap,
902 * the page would not be inserted into the filecache.
904 BUG_ON(page
&& !PageUptodate(page
));
910 * Fast cache lookup did not find it:
911 * bring it back from swap or allocate.
913 info
= SHMEM_I(inode
);
914 sbinfo
= SHMEM_SB(inode
->i_sb
);
917 /* Look it up and read it in.. */
918 page
= lookup_swap_cache(swap
);
920 /* here we actually do the io */
922 *fault_type
|= VM_FAULT_MAJOR
;
923 page
= shmem_swapin(swap
, gfp
, info
, index
);
930 /* We have to do this with page locked to prevent races */
932 if (!PageUptodate(page
)) {
936 wait_on_page_writeback(page
);
938 /* Someone may have already done it for us */
940 if (page
->mapping
== mapping
&&
941 page
->index
== index
)
947 error
= mem_cgroup_cache_charge(page
, current
->mm
,
948 gfp
& GFP_RECLAIM_MASK
);
950 error
= shmem_add_to_page_cache(page
, mapping
, index
,
951 gfp
, swp_to_radix_entry(swap
));
955 spin_lock(&info
->lock
);
957 shmem_recalc_inode(inode
);
958 spin_unlock(&info
->lock
);
960 delete_from_swap_cache(page
);
961 set_page_dirty(page
);
965 if (shmem_acct_block(info
->flags
)) {
969 if (sbinfo
->max_blocks
) {
970 if (percpu_counter_compare(&sbinfo
->used_blocks
,
971 sbinfo
->max_blocks
) >= 0) {
975 percpu_counter_inc(&sbinfo
->used_blocks
);
978 page
= shmem_alloc_page(gfp
, info
, index
);
984 SetPageSwapBacked(page
);
985 __set_page_locked(page
);
986 error
= mem_cgroup_cache_charge(page
, current
->mm
,
987 gfp
& GFP_RECLAIM_MASK
);
989 error
= shmem_add_to_page_cache(page
, mapping
, index
,
993 lru_cache_add_anon(page
);
995 spin_lock(&info
->lock
);
997 inode
->i_blocks
+= BLOCKS_PER_PAGE
;
998 shmem_recalc_inode(inode
);
999 spin_unlock(&info
->lock
);
1001 clear_highpage(page
);
1002 flush_dcache_page(page
);
1003 SetPageUptodate(page
);
1004 if (sgp
== SGP_DIRTY
)
1005 set_page_dirty(page
);
1008 /* Perhaps the file has been truncated since we checked */
1009 if (sgp
!= SGP_WRITE
&&
1010 ((loff_t
)index
<< PAGE_CACHE_SHIFT
) >= i_size_read(inode
)) {
1021 ClearPageDirty(page
);
1022 delete_from_page_cache(page
);
1023 spin_lock(&info
->lock
);
1025 inode
->i_blocks
-= BLOCKS_PER_PAGE
;
1026 spin_unlock(&info
->lock
);
1028 if (sbinfo
->max_blocks
)
1029 percpu_counter_add(&sbinfo
->used_blocks
, -1);
1031 shmem_unacct_blocks(info
->flags
, 1);
1033 if (swap
.val
&& error
!= -EINVAL
) {
1034 struct page
*test
= find_get_page(mapping
, index
);
1035 if (test
&& !radix_tree_exceptional_entry(test
))
1036 page_cache_release(test
);
1037 /* Have another try if the entry has changed */
1038 if (test
!= swp_to_radix_entry(swap
))
1043 page_cache_release(page
);
1045 if (error
== -ENOSPC
&& !once
++) {
1046 info
= SHMEM_I(inode
);
1047 spin_lock(&info
->lock
);
1048 shmem_recalc_inode(inode
);
1049 spin_unlock(&info
->lock
);
1052 if (error
== -EEXIST
)
1057 static int shmem_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
1059 struct inode
*inode
= vma
->vm_file
->f_path
.dentry
->d_inode
;
1061 int ret
= VM_FAULT_LOCKED
;
1063 error
= shmem_getpage(inode
, vmf
->pgoff
, &vmf
->page
, SGP_CACHE
, &ret
);
1065 return ((error
== -ENOMEM
) ? VM_FAULT_OOM
: VM_FAULT_SIGBUS
);
1067 if (ret
& VM_FAULT_MAJOR
) {
1068 count_vm_event(PGMAJFAULT
);
1069 mem_cgroup_count_vm_event(vma
->vm_mm
, PGMAJFAULT
);
1075 static int shmem_set_policy(struct vm_area_struct
*vma
, struct mempolicy
*mpol
)
1077 struct inode
*inode
= vma
->vm_file
->f_path
.dentry
->d_inode
;
1078 return mpol_set_shared_policy(&SHMEM_I(inode
)->policy
, vma
, mpol
);
1081 static struct mempolicy
*shmem_get_policy(struct vm_area_struct
*vma
,
1084 struct inode
*inode
= vma
->vm_file
->f_path
.dentry
->d_inode
;
1087 index
= ((addr
- vma
->vm_start
) >> PAGE_SHIFT
) + vma
->vm_pgoff
;
1088 return mpol_shared_policy_lookup(&SHMEM_I(inode
)->policy
, index
);
1092 int shmem_lock(struct file
*file
, int lock
, struct user_struct
*user
)
1094 struct inode
*inode
= file
->f_path
.dentry
->d_inode
;
1095 struct shmem_inode_info
*info
= SHMEM_I(inode
);
1096 int retval
= -ENOMEM
;
1098 spin_lock(&info
->lock
);
1099 if (lock
&& !(info
->flags
& VM_LOCKED
)) {
1100 if (!user_shm_lock(inode
->i_size
, user
))
1102 info
->flags
|= VM_LOCKED
;
1103 mapping_set_unevictable(file
->f_mapping
);
1105 if (!lock
&& (info
->flags
& VM_LOCKED
) && user
) {
1106 user_shm_unlock(inode
->i_size
, user
);
1107 info
->flags
&= ~VM_LOCKED
;
1108 mapping_clear_unevictable(file
->f_mapping
);
1113 spin_unlock(&info
->lock
);
1117 static int shmem_mmap(struct file
*file
, struct vm_area_struct
*vma
)
1119 file_accessed(file
);
1120 vma
->vm_ops
= &shmem_vm_ops
;
1121 vma
->vm_flags
|= VM_CAN_NONLINEAR
;
1125 static struct inode
*shmem_get_inode(struct super_block
*sb
, const struct inode
*dir
,
1126 int mode
, dev_t dev
, unsigned long flags
)
1128 struct inode
*inode
;
1129 struct shmem_inode_info
*info
;
1130 struct shmem_sb_info
*sbinfo
= SHMEM_SB(sb
);
1132 if (shmem_reserve_inode(sb
))
1135 inode
= new_inode(sb
);
1137 inode
->i_ino
= get_next_ino();
1138 inode_init_owner(inode
, dir
, mode
);
1139 inode
->i_blocks
= 0;
1140 inode
->i_mapping
->backing_dev_info
= &shmem_backing_dev_info
;
1141 inode
->i_atime
= inode
->i_mtime
= inode
->i_ctime
= CURRENT_TIME
;
1142 inode
->i_generation
= get_seconds();
1143 info
= SHMEM_I(inode
);
1144 memset(info
, 0, (char *)inode
- (char *)info
);
1145 spin_lock_init(&info
->lock
);
1146 info
->flags
= flags
& VM_NORESERVE
;
1147 INIT_LIST_HEAD(&info
->swaplist
);
1148 INIT_LIST_HEAD(&info
->xattr_list
);
1149 cache_no_acl(inode
);
1151 switch (mode
& S_IFMT
) {
1153 inode
->i_op
= &shmem_special_inode_operations
;
1154 init_special_inode(inode
, mode
, dev
);
1157 inode
->i_mapping
->a_ops
= &shmem_aops
;
1158 inode
->i_op
= &shmem_inode_operations
;
1159 inode
->i_fop
= &shmem_file_operations
;
1160 mpol_shared_policy_init(&info
->policy
,
1161 shmem_get_sbmpol(sbinfo
));
1165 /* Some things misbehave if size == 0 on a directory */
1166 inode
->i_size
= 2 * BOGO_DIRENT_SIZE
;
1167 inode
->i_op
= &shmem_dir_inode_operations
;
1168 inode
->i_fop
= &simple_dir_operations
;
1172 * Must not load anything in the rbtree,
1173 * mpol_free_shared_policy will not be called.
1175 mpol_shared_policy_init(&info
->policy
, NULL
);
1179 shmem_free_inode(sb
);
1184 static const struct inode_operations shmem_symlink_inode_operations
;
1185 static const struct inode_operations shmem_short_symlink_operations
;
1188 shmem_write_begin(struct file
*file
, struct address_space
*mapping
,
1189 loff_t pos
, unsigned len
, unsigned flags
,
1190 struct page
**pagep
, void **fsdata
)
1192 struct inode
*inode
= mapping
->host
;
1193 pgoff_t index
= pos
>> PAGE_CACHE_SHIFT
;
1194 return shmem_getpage(inode
, index
, pagep
, SGP_WRITE
, NULL
);
1198 shmem_write_end(struct file
*file
, struct address_space
*mapping
,
1199 loff_t pos
, unsigned len
, unsigned copied
,
1200 struct page
*page
, void *fsdata
)
1202 struct inode
*inode
= mapping
->host
;
1204 if (pos
+ copied
> inode
->i_size
)
1205 i_size_write(inode
, pos
+ copied
);
1207 set_page_dirty(page
);
1209 page_cache_release(page
);
1214 static void do_shmem_file_read(struct file
*filp
, loff_t
*ppos
, read_descriptor_t
*desc
, read_actor_t actor
)
1216 struct inode
*inode
= filp
->f_path
.dentry
->d_inode
;
1217 struct address_space
*mapping
= inode
->i_mapping
;
1219 unsigned long offset
;
1220 enum sgp_type sgp
= SGP_READ
;
1223 * Might this read be for a stacking filesystem? Then when reading
1224 * holes of a sparse file, we actually need to allocate those pages,
1225 * and even mark them dirty, so it cannot exceed the max_blocks limit.
1227 if (segment_eq(get_fs(), KERNEL_DS
))
1230 index
= *ppos
>> PAGE_CACHE_SHIFT
;
1231 offset
= *ppos
& ~PAGE_CACHE_MASK
;
1234 struct page
*page
= NULL
;
1236 unsigned long nr
, ret
;
1237 loff_t i_size
= i_size_read(inode
);
1239 end_index
= i_size
>> PAGE_CACHE_SHIFT
;
1240 if (index
> end_index
)
1242 if (index
== end_index
) {
1243 nr
= i_size
& ~PAGE_CACHE_MASK
;
1248 desc
->error
= shmem_getpage(inode
, index
, &page
, sgp
, NULL
);
1250 if (desc
->error
== -EINVAL
)
1258 * We must evaluate after, since reads (unlike writes)
1259 * are called without i_mutex protection against truncate
1261 nr
= PAGE_CACHE_SIZE
;
1262 i_size
= i_size_read(inode
);
1263 end_index
= i_size
>> PAGE_CACHE_SHIFT
;
1264 if (index
== end_index
) {
1265 nr
= i_size
& ~PAGE_CACHE_MASK
;
1268 page_cache_release(page
);
1276 * If users can be writing to this page using arbitrary
1277 * virtual addresses, take care about potential aliasing
1278 * before reading the page on the kernel side.
1280 if (mapping_writably_mapped(mapping
))
1281 flush_dcache_page(page
);
1283 * Mark the page accessed if we read the beginning.
1286 mark_page_accessed(page
);
1288 page
= ZERO_PAGE(0);
1289 page_cache_get(page
);
1293 * Ok, we have the page, and it's up-to-date, so
1294 * now we can copy it to user space...
1296 * The actor routine returns how many bytes were actually used..
1297 * NOTE! This may not be the same as how much of a user buffer
1298 * we filled up (we may be padding etc), so we can only update
1299 * "pos" here (the actor routine has to update the user buffer
1300 * pointers and the remaining count).
1302 ret
= actor(desc
, page
, offset
, nr
);
1304 index
+= offset
>> PAGE_CACHE_SHIFT
;
1305 offset
&= ~PAGE_CACHE_MASK
;
1307 page_cache_release(page
);
1308 if (ret
!= nr
|| !desc
->count
)
1314 *ppos
= ((loff_t
) index
<< PAGE_CACHE_SHIFT
) + offset
;
1315 file_accessed(filp
);
1318 static ssize_t
shmem_file_aio_read(struct kiocb
*iocb
,
1319 const struct iovec
*iov
, unsigned long nr_segs
, loff_t pos
)
1321 struct file
*filp
= iocb
->ki_filp
;
1325 loff_t
*ppos
= &iocb
->ki_pos
;
1327 retval
= generic_segment_checks(iov
, &nr_segs
, &count
, VERIFY_WRITE
);
1331 for (seg
= 0; seg
< nr_segs
; seg
++) {
1332 read_descriptor_t desc
;
1335 desc
.arg
.buf
= iov
[seg
].iov_base
;
1336 desc
.count
= iov
[seg
].iov_len
;
1337 if (desc
.count
== 0)
1340 do_shmem_file_read(filp
, ppos
, &desc
, file_read_actor
);
1341 retval
+= desc
.written
;
1343 retval
= retval
?: desc
.error
;
1352 static ssize_t
shmem_file_splice_read(struct file
*in
, loff_t
*ppos
,
1353 struct pipe_inode_info
*pipe
, size_t len
,
1356 struct address_space
*mapping
= in
->f_mapping
;
1357 struct inode
*inode
= mapping
->host
;
1358 unsigned int loff
, nr_pages
, req_pages
;
1359 struct page
*pages
[PIPE_DEF_BUFFERS
];
1360 struct partial_page partial
[PIPE_DEF_BUFFERS
];
1362 pgoff_t index
, end_index
;
1365 struct splice_pipe_desc spd
= {
1368 .nr_pages_max
= PIPE_DEF_BUFFERS
,
1370 .ops
= &page_cache_pipe_buf_ops
,
1371 .spd_release
= spd_release_page
,
1374 isize
= i_size_read(inode
);
1375 if (unlikely(*ppos
>= isize
))
1378 left
= isize
- *ppos
;
1379 if (unlikely(left
< len
))
1382 if (splice_grow_spd(pipe
, &spd
))
1385 index
= *ppos
>> PAGE_CACHE_SHIFT
;
1386 loff
= *ppos
& ~PAGE_CACHE_MASK
;
1387 req_pages
= (len
+ loff
+ PAGE_CACHE_SIZE
- 1) >> PAGE_CACHE_SHIFT
;
1388 nr_pages
= min(req_pages
, pipe
->buffers
);
1390 spd
.nr_pages
= find_get_pages_contig(mapping
, index
,
1391 nr_pages
, spd
.pages
);
1392 index
+= spd
.nr_pages
;
1395 while (spd
.nr_pages
< nr_pages
) {
1396 error
= shmem_getpage(inode
, index
, &page
, SGP_CACHE
, NULL
);
1400 spd
.pages
[spd
.nr_pages
++] = page
;
1404 index
= *ppos
>> PAGE_CACHE_SHIFT
;
1405 nr_pages
= spd
.nr_pages
;
1408 for (page_nr
= 0; page_nr
< nr_pages
; page_nr
++) {
1409 unsigned int this_len
;
1414 this_len
= min_t(unsigned long, len
, PAGE_CACHE_SIZE
- loff
);
1415 page
= spd
.pages
[page_nr
];
1417 if (!PageUptodate(page
) || page
->mapping
!= mapping
) {
1418 error
= shmem_getpage(inode
, index
, &page
,
1423 page_cache_release(spd
.pages
[page_nr
]);
1424 spd
.pages
[page_nr
] = page
;
1427 isize
= i_size_read(inode
);
1428 end_index
= (isize
- 1) >> PAGE_CACHE_SHIFT
;
1429 if (unlikely(!isize
|| index
> end_index
))
1432 if (end_index
== index
) {
1435 plen
= ((isize
- 1) & ~PAGE_CACHE_MASK
) + 1;
1439 this_len
= min(this_len
, plen
- loff
);
1443 spd
.partial
[page_nr
].offset
= loff
;
1444 spd
.partial
[page_nr
].len
= this_len
;
1451 while (page_nr
< nr_pages
)
1452 page_cache_release(spd
.pages
[page_nr
++]);
1455 error
= splice_to_pipe(pipe
, &spd
);
1457 splice_shrink_spd(&spd
);
1466 static int shmem_statfs(struct dentry
*dentry
, struct kstatfs
*buf
)
1468 struct shmem_sb_info
*sbinfo
= SHMEM_SB(dentry
->d_sb
);
1470 buf
->f_type
= TMPFS_MAGIC
;
1471 buf
->f_bsize
= PAGE_CACHE_SIZE
;
1472 buf
->f_namelen
= NAME_MAX
;
1473 if (sbinfo
->max_blocks
) {
1474 buf
->f_blocks
= sbinfo
->max_blocks
;
1476 buf
->f_bfree
= sbinfo
->max_blocks
-
1477 percpu_counter_sum(&sbinfo
->used_blocks
);
1479 if (sbinfo
->max_inodes
) {
1480 buf
->f_files
= sbinfo
->max_inodes
;
1481 buf
->f_ffree
= sbinfo
->free_inodes
;
1483 /* else leave those fields 0 like simple_statfs */
1488 * File creation. Allocate an inode, and we're done..
1491 shmem_mknod(struct inode
*dir
, struct dentry
*dentry
, int mode
, dev_t dev
)
1493 struct inode
*inode
;
1494 int error
= -ENOSPC
;
1496 inode
= shmem_get_inode(dir
->i_sb
, dir
, mode
, dev
, VM_NORESERVE
);
1498 error
= security_inode_init_security(inode
, dir
,
1502 if (error
!= -EOPNOTSUPP
) {
1507 #ifdef CONFIG_TMPFS_POSIX_ACL
1508 error
= generic_acl_init(inode
, dir
);
1516 dir
->i_size
+= BOGO_DIRENT_SIZE
;
1517 dir
->i_ctime
= dir
->i_mtime
= CURRENT_TIME
;
1518 d_instantiate(dentry
, inode
);
1519 dget(dentry
); /* Extra count - pin the dentry in core */
1524 static int shmem_mkdir(struct inode
*dir
, struct dentry
*dentry
, int mode
)
1528 if ((error
= shmem_mknod(dir
, dentry
, mode
| S_IFDIR
, 0)))
1534 static int shmem_create(struct inode
*dir
, struct dentry
*dentry
, int mode
,
1535 struct nameidata
*nd
)
1537 return shmem_mknod(dir
, dentry
, mode
| S_IFREG
, 0);
1543 static int shmem_link(struct dentry
*old_dentry
, struct inode
*dir
, struct dentry
*dentry
)
1545 struct inode
*inode
= old_dentry
->d_inode
;
1549 * No ordinary (disk based) filesystem counts links as inodes;
1550 * but each new link needs a new dentry, pinning lowmem, and
1551 * tmpfs dentries cannot be pruned until they are unlinked.
1553 ret
= shmem_reserve_inode(inode
->i_sb
);
1557 dir
->i_size
+= BOGO_DIRENT_SIZE
;
1558 inode
->i_ctime
= dir
->i_ctime
= dir
->i_mtime
= CURRENT_TIME
;
1560 ihold(inode
); /* New dentry reference */
1561 dget(dentry
); /* Extra pinning count for the created dentry */
1562 d_instantiate(dentry
, inode
);
1567 static int shmem_unlink(struct inode
*dir
, struct dentry
*dentry
)
1569 struct inode
*inode
= dentry
->d_inode
;
1571 if (inode
->i_nlink
> 1 && !S_ISDIR(inode
->i_mode
))
1572 shmem_free_inode(inode
->i_sb
);
1574 dir
->i_size
-= BOGO_DIRENT_SIZE
;
1575 inode
->i_ctime
= dir
->i_ctime
= dir
->i_mtime
= CURRENT_TIME
;
1577 dput(dentry
); /* Undo the count from "create" - this does all the work */
1581 static int shmem_rmdir(struct inode
*dir
, struct dentry
*dentry
)
1583 if (!simple_empty(dentry
))
1586 drop_nlink(dentry
->d_inode
);
1588 return shmem_unlink(dir
, dentry
);
1592 * The VFS layer already does all the dentry stuff for rename,
1593 * we just have to decrement the usage count for the target if
1594 * it exists so that the VFS layer correctly free's it when it
1597 static int shmem_rename(struct inode
*old_dir
, struct dentry
*old_dentry
, struct inode
*new_dir
, struct dentry
*new_dentry
)
1599 struct inode
*inode
= old_dentry
->d_inode
;
1600 int they_are_dirs
= S_ISDIR(inode
->i_mode
);
1602 if (!simple_empty(new_dentry
))
1605 if (new_dentry
->d_inode
) {
1606 (void) shmem_unlink(new_dir
, new_dentry
);
1608 drop_nlink(old_dir
);
1609 } else if (they_are_dirs
) {
1610 drop_nlink(old_dir
);
1614 old_dir
->i_size
-= BOGO_DIRENT_SIZE
;
1615 new_dir
->i_size
+= BOGO_DIRENT_SIZE
;
1616 old_dir
->i_ctime
= old_dir
->i_mtime
=
1617 new_dir
->i_ctime
= new_dir
->i_mtime
=
1618 inode
->i_ctime
= CURRENT_TIME
;
1622 static int shmem_symlink(struct inode
*dir
, struct dentry
*dentry
, const char *symname
)
1626 struct inode
*inode
;
1629 struct shmem_inode_info
*info
;
1631 len
= strlen(symname
) + 1;
1632 if (len
> PAGE_CACHE_SIZE
)
1633 return -ENAMETOOLONG
;
1635 inode
= shmem_get_inode(dir
->i_sb
, dir
, S_IFLNK
|S_IRWXUGO
, 0, VM_NORESERVE
);
1639 error
= security_inode_init_security(inode
, dir
, &dentry
->d_name
,
1642 if (error
!= -EOPNOTSUPP
) {
1649 info
= SHMEM_I(inode
);
1650 inode
->i_size
= len
-1;
1651 if (len
<= SHORT_SYMLINK_LEN
) {
1652 info
->symlink
= kmemdup(symname
, len
, GFP_KERNEL
);
1653 if (!info
->symlink
) {
1657 inode
->i_op
= &shmem_short_symlink_operations
;
1659 error
= shmem_getpage(inode
, 0, &page
, SGP_WRITE
, NULL
);
1664 inode
->i_mapping
->a_ops
= &shmem_aops
;
1665 inode
->i_op
= &shmem_symlink_inode_operations
;
1666 kaddr
= kmap_atomic(page
, KM_USER0
);
1667 memcpy(kaddr
, symname
, len
);
1668 kunmap_atomic(kaddr
, KM_USER0
);
1669 set_page_dirty(page
);
1671 page_cache_release(page
);
1673 dir
->i_size
+= BOGO_DIRENT_SIZE
;
1674 dir
->i_ctime
= dir
->i_mtime
= CURRENT_TIME
;
1675 d_instantiate(dentry
, inode
);
1680 static void *shmem_follow_short_symlink(struct dentry
*dentry
, struct nameidata
*nd
)
1682 nd_set_link(nd
, SHMEM_I(dentry
->d_inode
)->symlink
);
1686 static void *shmem_follow_link(struct dentry
*dentry
, struct nameidata
*nd
)
1688 struct page
*page
= NULL
;
1689 int error
= shmem_getpage(dentry
->d_inode
, 0, &page
, SGP_READ
, NULL
);
1690 nd_set_link(nd
, error
? ERR_PTR(error
) : kmap(page
));
1696 static void shmem_put_link(struct dentry
*dentry
, struct nameidata
*nd
, void *cookie
)
1698 if (!IS_ERR(nd_get_link(nd
))) {
1699 struct page
*page
= cookie
;
1701 mark_page_accessed(page
);
1702 page_cache_release(page
);
1706 #ifdef CONFIG_TMPFS_XATTR
1708 * Superblocks without xattr inode operations may get some security.* xattr
1709 * support from the LSM "for free". As soon as we have any other xattrs
1710 * like ACLs, we also need to implement the security.* handlers at
1711 * filesystem level, though.
1714 static int shmem_xattr_get(struct dentry
*dentry
, const char *name
,
1715 void *buffer
, size_t size
)
1717 struct shmem_inode_info
*info
;
1718 struct shmem_xattr
*xattr
;
1721 info
= SHMEM_I(dentry
->d_inode
);
1723 spin_lock(&info
->lock
);
1724 list_for_each_entry(xattr
, &info
->xattr_list
, list
) {
1725 if (strcmp(name
, xattr
->name
))
1730 if (size
< xattr
->size
)
1733 memcpy(buffer
, xattr
->value
, xattr
->size
);
1737 spin_unlock(&info
->lock
);
1741 static int shmem_xattr_set(struct dentry
*dentry
, const char *name
,
1742 const void *value
, size_t size
, int flags
)
1744 struct inode
*inode
= dentry
->d_inode
;
1745 struct shmem_inode_info
*info
= SHMEM_I(inode
);
1746 struct shmem_xattr
*xattr
;
1747 struct shmem_xattr
*new_xattr
= NULL
;
1751 /* value == NULL means remove */
1754 len
= sizeof(*new_xattr
) + size
;
1755 if (len
<= sizeof(*new_xattr
))
1758 new_xattr
= kmalloc(len
, GFP_KERNEL
);
1762 new_xattr
->name
= kstrdup(name
, GFP_KERNEL
);
1763 if (!new_xattr
->name
) {
1768 new_xattr
->size
= size
;
1769 memcpy(new_xattr
->value
, value
, size
);
1772 spin_lock(&info
->lock
);
1773 list_for_each_entry(xattr
, &info
->xattr_list
, list
) {
1774 if (!strcmp(name
, xattr
->name
)) {
1775 if (flags
& XATTR_CREATE
) {
1778 } else if (new_xattr
) {
1779 list_replace(&xattr
->list
, &new_xattr
->list
);
1781 list_del(&xattr
->list
);
1786 if (flags
& XATTR_REPLACE
) {
1790 list_add(&new_xattr
->list
, &info
->xattr_list
);
1794 spin_unlock(&info
->lock
);
1801 static const struct xattr_handler
*shmem_xattr_handlers
[] = {
1802 #ifdef CONFIG_TMPFS_POSIX_ACL
1803 &generic_acl_access_handler
,
1804 &generic_acl_default_handler
,
1809 static int shmem_xattr_validate(const char *name
)
1811 struct { const char *prefix
; size_t len
; } arr
[] = {
1812 { XATTR_SECURITY_PREFIX
, XATTR_SECURITY_PREFIX_LEN
},
1813 { XATTR_TRUSTED_PREFIX
, XATTR_TRUSTED_PREFIX_LEN
}
1817 for (i
= 0; i
< ARRAY_SIZE(arr
); i
++) {
1818 size_t preflen
= arr
[i
].len
;
1819 if (strncmp(name
, arr
[i
].prefix
, preflen
) == 0) {
1828 static ssize_t
shmem_getxattr(struct dentry
*dentry
, const char *name
,
1829 void *buffer
, size_t size
)
1834 * If this is a request for a synthetic attribute in the system.*
1835 * namespace use the generic infrastructure to resolve a handler
1836 * for it via sb->s_xattr.
1838 if (!strncmp(name
, XATTR_SYSTEM_PREFIX
, XATTR_SYSTEM_PREFIX_LEN
))
1839 return generic_getxattr(dentry
, name
, buffer
, size
);
1841 err
= shmem_xattr_validate(name
);
1845 return shmem_xattr_get(dentry
, name
, buffer
, size
);
1848 static int shmem_setxattr(struct dentry
*dentry
, const char *name
,
1849 const void *value
, size_t size
, int flags
)
1854 * If this is a request for a synthetic attribute in the system.*
1855 * namespace use the generic infrastructure to resolve a handler
1856 * for it via sb->s_xattr.
1858 if (!strncmp(name
, XATTR_SYSTEM_PREFIX
, XATTR_SYSTEM_PREFIX_LEN
))
1859 return generic_setxattr(dentry
, name
, value
, size
, flags
);
1861 err
= shmem_xattr_validate(name
);
1866 value
= ""; /* empty EA, do not remove */
1868 return shmem_xattr_set(dentry
, name
, value
, size
, flags
);
1872 static int shmem_removexattr(struct dentry
*dentry
, const char *name
)
1877 * If this is a request for a synthetic attribute in the system.*
1878 * namespace use the generic infrastructure to resolve a handler
1879 * for it via sb->s_xattr.
1881 if (!strncmp(name
, XATTR_SYSTEM_PREFIX
, XATTR_SYSTEM_PREFIX_LEN
))
1882 return generic_removexattr(dentry
, name
);
1884 err
= shmem_xattr_validate(name
);
1888 return shmem_xattr_set(dentry
, name
, NULL
, 0, XATTR_REPLACE
);
1891 static bool xattr_is_trusted(const char *name
)
1893 return !strncmp(name
, XATTR_TRUSTED_PREFIX
, XATTR_TRUSTED_PREFIX_LEN
);
1896 static ssize_t
shmem_listxattr(struct dentry
*dentry
, char *buffer
, size_t size
)
1898 bool trusted
= capable(CAP_SYS_ADMIN
);
1899 struct shmem_xattr
*xattr
;
1900 struct shmem_inode_info
*info
;
1903 info
= SHMEM_I(dentry
->d_inode
);
1905 spin_lock(&info
->lock
);
1906 list_for_each_entry(xattr
, &info
->xattr_list
, list
) {
1909 /* skip "trusted." attributes for unprivileged callers */
1910 if (!trusted
&& xattr_is_trusted(xattr
->name
))
1913 len
= strlen(xattr
->name
) + 1;
1920 memcpy(buffer
, xattr
->name
, len
);
1924 spin_unlock(&info
->lock
);
1928 #endif /* CONFIG_TMPFS_XATTR */
1930 static const struct inode_operations shmem_short_symlink_operations
= {
1931 .readlink
= generic_readlink
,
1932 .follow_link
= shmem_follow_short_symlink
,
1933 #ifdef CONFIG_TMPFS_XATTR
1934 .setxattr
= shmem_setxattr
,
1935 .getxattr
= shmem_getxattr
,
1936 .listxattr
= shmem_listxattr
,
1937 .removexattr
= shmem_removexattr
,
1941 static const struct inode_operations shmem_symlink_inode_operations
= {
1942 .readlink
= generic_readlink
,
1943 .follow_link
= shmem_follow_link
,
1944 .put_link
= shmem_put_link
,
1945 #ifdef CONFIG_TMPFS_XATTR
1946 .setxattr
= shmem_setxattr
,
1947 .getxattr
= shmem_getxattr
,
1948 .listxattr
= shmem_listxattr
,
1949 .removexattr
= shmem_removexattr
,
1953 static struct dentry
*shmem_get_parent(struct dentry
*child
)
1955 return ERR_PTR(-ESTALE
);
1958 static int shmem_match(struct inode
*ino
, void *vfh
)
1962 inum
= (inum
<< 32) | fh
[1];
1963 return ino
->i_ino
== inum
&& fh
[0] == ino
->i_generation
;
1966 static struct dentry
*shmem_fh_to_dentry(struct super_block
*sb
,
1967 struct fid
*fid
, int fh_len
, int fh_type
)
1969 struct inode
*inode
;
1970 struct dentry
*dentry
= NULL
;
1977 inum
= (inum
<< 32) | fid
->raw
[1];
1979 inode
= ilookup5(sb
, (unsigned long)(inum
+ fid
->raw
[0]),
1980 shmem_match
, fid
->raw
);
1982 dentry
= d_find_alias(inode
);
1989 static int shmem_encode_fh(struct dentry
*dentry
, __u32
*fh
, int *len
,
1992 struct inode
*inode
= dentry
->d_inode
;
1999 if (inode_unhashed(inode
)) {
2000 /* Unfortunately insert_inode_hash is not idempotent,
2001 * so as we hash inodes here rather than at creation
2002 * time, we need a lock to ensure we only try
2005 static DEFINE_SPINLOCK(lock
);
2007 if (inode_unhashed(inode
))
2008 __insert_inode_hash(inode
,
2009 inode
->i_ino
+ inode
->i_generation
);
2013 fh
[0] = inode
->i_generation
;
2014 fh
[1] = inode
->i_ino
;
2015 fh
[2] = ((__u64
)inode
->i_ino
) >> 32;
2021 static const struct export_operations shmem_export_ops
= {
2022 .get_parent
= shmem_get_parent
,
2023 .encode_fh
= shmem_encode_fh
,
2024 .fh_to_dentry
= shmem_fh_to_dentry
,
2027 static int shmem_parse_options(char *options
, struct shmem_sb_info
*sbinfo
,
2030 char *this_char
, *value
, *rest
;
2032 while (options
!= NULL
) {
2033 this_char
= options
;
2036 * NUL-terminate this option: unfortunately,
2037 * mount options form a comma-separated list,
2038 * but mpol's nodelist may also contain commas.
2040 options
= strchr(options
, ',');
2041 if (options
== NULL
)
2044 if (!isdigit(*options
)) {
2051 if ((value
= strchr(this_char
,'=')) != NULL
) {
2055 "tmpfs: No value for mount option '%s'\n",
2060 if (!strcmp(this_char
,"size")) {
2061 unsigned long long size
;
2062 size
= memparse(value
,&rest
);
2064 size
<<= PAGE_SHIFT
;
2065 size
*= totalram_pages
;
2071 sbinfo
->max_blocks
=
2072 DIV_ROUND_UP(size
, PAGE_CACHE_SIZE
);
2073 } else if (!strcmp(this_char
,"nr_blocks")) {
2074 sbinfo
->max_blocks
= memparse(value
, &rest
);
2077 } else if (!strcmp(this_char
,"nr_inodes")) {
2078 sbinfo
->max_inodes
= memparse(value
, &rest
);
2081 } else if (!strcmp(this_char
,"mode")) {
2084 sbinfo
->mode
= simple_strtoul(value
, &rest
, 8) & 07777;
2087 } else if (!strcmp(this_char
,"uid")) {
2090 sbinfo
->uid
= simple_strtoul(value
, &rest
, 0);
2093 } else if (!strcmp(this_char
,"gid")) {
2096 sbinfo
->gid
= simple_strtoul(value
, &rest
, 0);
2099 } else if (!strcmp(this_char
,"mpol")) {
2100 if (mpol_parse_str(value
, &sbinfo
->mpol
, 1))
2103 printk(KERN_ERR
"tmpfs: Bad mount option %s\n",
2111 printk(KERN_ERR
"tmpfs: Bad value '%s' for mount option '%s'\n",
2117 static int shmem_remount_fs(struct super_block
*sb
, int *flags
, char *data
)
2119 struct shmem_sb_info
*sbinfo
= SHMEM_SB(sb
);
2120 struct shmem_sb_info config
= *sbinfo
;
2121 unsigned long inodes
;
2122 int error
= -EINVAL
;
2125 if (shmem_parse_options(data
, &config
, true))
2128 spin_lock(&sbinfo
->stat_lock
);
2129 inodes
= sbinfo
->max_inodes
- sbinfo
->free_inodes
;
2130 if (percpu_counter_compare(&sbinfo
->used_blocks
, config
.max_blocks
) > 0)
2132 if (config
.max_inodes
< inodes
)
2135 * Those tests disallow limited->unlimited while any are in use;
2136 * but we must separately disallow unlimited->limited, because
2137 * in that case we have no record of how much is already in use.
2139 if (config
.max_blocks
&& !sbinfo
->max_blocks
)
2141 if (config
.max_inodes
&& !sbinfo
->max_inodes
)
2145 sbinfo
->max_blocks
= config
.max_blocks
;
2146 sbinfo
->max_inodes
= config
.max_inodes
;
2147 sbinfo
->free_inodes
= config
.max_inodes
- inodes
;
2150 * Preserve previous mempolicy unless mpol remount option was specified.
2153 mpol_put(sbinfo
->mpol
);
2154 sbinfo
->mpol
= config
.mpol
; /* transfers initial ref */
2157 spin_unlock(&sbinfo
->stat_lock
);
2161 static int shmem_show_options(struct seq_file
*seq
, struct vfsmount
*vfs
)
2163 struct shmem_sb_info
*sbinfo
= SHMEM_SB(vfs
->mnt_sb
);
2165 if (sbinfo
->max_blocks
!= shmem_default_max_blocks())
2166 seq_printf(seq
, ",size=%luk",
2167 sbinfo
->max_blocks
<< (PAGE_CACHE_SHIFT
- 10));
2168 if (sbinfo
->max_inodes
!= shmem_default_max_inodes())
2169 seq_printf(seq
, ",nr_inodes=%lu", sbinfo
->max_inodes
);
2170 if (sbinfo
->mode
!= (S_IRWXUGO
| S_ISVTX
))
2171 seq_printf(seq
, ",mode=%03o", sbinfo
->mode
);
2172 if (sbinfo
->uid
!= 0)
2173 seq_printf(seq
, ",uid=%u", sbinfo
->uid
);
2174 if (sbinfo
->gid
!= 0)
2175 seq_printf(seq
, ",gid=%u", sbinfo
->gid
);
2176 shmem_show_mpol(seq
, sbinfo
->mpol
);
2179 #endif /* CONFIG_TMPFS */
2181 static void shmem_put_super(struct super_block
*sb
)
2183 struct shmem_sb_info
*sbinfo
= SHMEM_SB(sb
);
2185 percpu_counter_destroy(&sbinfo
->used_blocks
);
2187 sb
->s_fs_info
= NULL
;
2190 int shmem_fill_super(struct super_block
*sb
, void *data
, int silent
)
2192 struct inode
*inode
;
2193 struct dentry
*root
;
2194 struct shmem_sb_info
*sbinfo
;
2197 /* Round up to L1_CACHE_BYTES to resist false sharing */
2198 sbinfo
= kzalloc(max((int)sizeof(struct shmem_sb_info
),
2199 L1_CACHE_BYTES
), GFP_KERNEL
);
2203 sbinfo
->mode
= S_IRWXUGO
| S_ISVTX
;
2204 sbinfo
->uid
= current_fsuid();
2205 sbinfo
->gid
= current_fsgid();
2206 sb
->s_fs_info
= sbinfo
;
2210 * Per default we only allow half of the physical ram per
2211 * tmpfs instance, limiting inodes to one per page of lowmem;
2212 * but the internal instance is left unlimited.
2214 if (!(sb
->s_flags
& MS_NOUSER
)) {
2215 sbinfo
->max_blocks
= shmem_default_max_blocks();
2216 sbinfo
->max_inodes
= shmem_default_max_inodes();
2217 if (shmem_parse_options(data
, sbinfo
, false)) {
2222 sb
->s_export_op
= &shmem_export_ops
;
2224 sb
->s_flags
|= MS_NOUSER
;
2227 spin_lock_init(&sbinfo
->stat_lock
);
2228 if (percpu_counter_init(&sbinfo
->used_blocks
, 0))
2230 sbinfo
->free_inodes
= sbinfo
->max_inodes
;
2232 sb
->s_maxbytes
= MAX_LFS_FILESIZE
;
2233 sb
->s_blocksize
= PAGE_CACHE_SIZE
;
2234 sb
->s_blocksize_bits
= PAGE_CACHE_SHIFT
;
2235 sb
->s_magic
= TMPFS_MAGIC
;
2236 sb
->s_op
= &shmem_ops
;
2237 sb
->s_time_gran
= 1;
2238 #ifdef CONFIG_TMPFS_XATTR
2239 sb
->s_xattr
= shmem_xattr_handlers
;
2241 #ifdef CONFIG_TMPFS_POSIX_ACL
2242 sb
->s_flags
|= MS_POSIXACL
;
2245 inode
= shmem_get_inode(sb
, NULL
, S_IFDIR
| sbinfo
->mode
, 0, VM_NORESERVE
);
2248 inode
->i_uid
= sbinfo
->uid
;
2249 inode
->i_gid
= sbinfo
->gid
;
2250 root
= d_alloc_root(inode
);
2259 shmem_put_super(sb
);
2263 static struct kmem_cache
*shmem_inode_cachep
;
2265 static struct inode
*shmem_alloc_inode(struct super_block
*sb
)
2267 struct shmem_inode_info
*info
;
2268 info
= kmem_cache_alloc(shmem_inode_cachep
, GFP_KERNEL
);
2271 return &info
->vfs_inode
;
2274 static void shmem_destroy_callback(struct rcu_head
*head
)
2276 struct inode
*inode
= container_of(head
, struct inode
, i_rcu
);
2277 INIT_LIST_HEAD(&inode
->i_dentry
);
2278 kmem_cache_free(shmem_inode_cachep
, SHMEM_I(inode
));
2281 static void shmem_destroy_inode(struct inode
*inode
)
2283 if ((inode
->i_mode
& S_IFMT
) == S_IFREG
)
2284 mpol_free_shared_policy(&SHMEM_I(inode
)->policy
);
2285 call_rcu(&inode
->i_rcu
, shmem_destroy_callback
);
2288 static void shmem_init_inode(void *foo
)
2290 struct shmem_inode_info
*info
= foo
;
2291 inode_init_once(&info
->vfs_inode
);
2294 static int shmem_init_inodecache(void)
2296 shmem_inode_cachep
= kmem_cache_create("shmem_inode_cache",
2297 sizeof(struct shmem_inode_info
),
2298 0, SLAB_PANIC
, shmem_init_inode
);
2302 static void shmem_destroy_inodecache(void)
2304 kmem_cache_destroy(shmem_inode_cachep
);
2307 static const struct address_space_operations shmem_aops
= {
2308 .writepage
= shmem_writepage
,
2309 .set_page_dirty
= __set_page_dirty_no_writeback
,
2311 .write_begin
= shmem_write_begin
,
2312 .write_end
= shmem_write_end
,
2314 .migratepage
= migrate_page
,
2315 .error_remove_page
= generic_error_remove_page
,
2318 static const struct file_operations shmem_file_operations
= {
2321 .llseek
= generic_file_llseek
,
2322 .read
= do_sync_read
,
2323 .write
= do_sync_write
,
2324 .aio_read
= shmem_file_aio_read
,
2325 .aio_write
= generic_file_aio_write
,
2326 .fsync
= noop_fsync
,
2327 .splice_read
= shmem_file_splice_read
,
2328 .splice_write
= generic_file_splice_write
,
2332 static const struct inode_operations shmem_inode_operations
= {
2333 .setattr
= shmem_setattr
,
2334 .truncate_range
= shmem_truncate_range
,
2335 #ifdef CONFIG_TMPFS_XATTR
2336 .setxattr
= shmem_setxattr
,
2337 .getxattr
= shmem_getxattr
,
2338 .listxattr
= shmem_listxattr
,
2339 .removexattr
= shmem_removexattr
,
2343 static const struct inode_operations shmem_dir_inode_operations
= {
2345 .create
= shmem_create
,
2346 .lookup
= simple_lookup
,
2348 .unlink
= shmem_unlink
,
2349 .symlink
= shmem_symlink
,
2350 .mkdir
= shmem_mkdir
,
2351 .rmdir
= shmem_rmdir
,
2352 .mknod
= shmem_mknod
,
2353 .rename
= shmem_rename
,
2355 #ifdef CONFIG_TMPFS_XATTR
2356 .setxattr
= shmem_setxattr
,
2357 .getxattr
= shmem_getxattr
,
2358 .listxattr
= shmem_listxattr
,
2359 .removexattr
= shmem_removexattr
,
2361 #ifdef CONFIG_TMPFS_POSIX_ACL
2362 .setattr
= shmem_setattr
,
2366 static const struct inode_operations shmem_special_inode_operations
= {
2367 #ifdef CONFIG_TMPFS_XATTR
2368 .setxattr
= shmem_setxattr
,
2369 .getxattr
= shmem_getxattr
,
2370 .listxattr
= shmem_listxattr
,
2371 .removexattr
= shmem_removexattr
,
2373 #ifdef CONFIG_TMPFS_POSIX_ACL
2374 .setattr
= shmem_setattr
,
2378 static const struct super_operations shmem_ops
= {
2379 .alloc_inode
= shmem_alloc_inode
,
2380 .destroy_inode
= shmem_destroy_inode
,
2382 .statfs
= shmem_statfs
,
2383 .remount_fs
= shmem_remount_fs
,
2384 .show_options
= shmem_show_options
,
2386 .evict_inode
= shmem_evict_inode
,
2387 .drop_inode
= generic_delete_inode
,
2388 .put_super
= shmem_put_super
,
2391 static const struct vm_operations_struct shmem_vm_ops
= {
2392 .fault
= shmem_fault
,
2394 .set_policy
= shmem_set_policy
,
2395 .get_policy
= shmem_get_policy
,
2399 static struct dentry
*shmem_mount(struct file_system_type
*fs_type
,
2400 int flags
, const char *dev_name
, void *data
)
2402 return mount_nodev(fs_type
, flags
, data
, shmem_fill_super
);
2405 static struct file_system_type shmem_fs_type
= {
2406 .owner
= THIS_MODULE
,
2408 .mount
= shmem_mount
,
2409 .kill_sb
= kill_litter_super
,
2412 int __init
shmem_init(void)
2416 error
= bdi_init(&shmem_backing_dev_info
);
2420 error
= shmem_init_inodecache();
2424 error
= register_filesystem(&shmem_fs_type
);
2426 printk(KERN_ERR
"Could not register tmpfs\n");
2430 shm_mnt
= vfs_kern_mount(&shmem_fs_type
, MS_NOUSER
,
2431 shmem_fs_type
.name
, NULL
);
2432 if (IS_ERR(shm_mnt
)) {
2433 error
= PTR_ERR(shm_mnt
);
2434 printk(KERN_ERR
"Could not kern_mount tmpfs\n");
2440 unregister_filesystem(&shmem_fs_type
);
2442 shmem_destroy_inodecache();
2444 bdi_destroy(&shmem_backing_dev_info
);
2446 shm_mnt
= ERR_PTR(error
);
2450 #else /* !CONFIG_SHMEM */
2453 * tiny-shmem: simple shmemfs and tmpfs using ramfs code
2455 * This is intended for small system where the benefits of the full
2456 * shmem code (swap-backed and resource-limited) are outweighed by
2457 * their complexity. On systems without swap this code should be
2458 * effectively equivalent, but much lighter weight.
2461 #include <linux/ramfs.h>
2463 static struct file_system_type shmem_fs_type
= {
2465 .mount
= ramfs_mount
,
2466 .kill_sb
= kill_litter_super
,
2469 int __init
shmem_init(void)
2471 BUG_ON(register_filesystem(&shmem_fs_type
) != 0);
2473 shm_mnt
= kern_mount(&shmem_fs_type
);
2474 BUG_ON(IS_ERR(shm_mnt
));
2479 int shmem_unuse(swp_entry_t swap
, struct page
*page
)
2484 int shmem_lock(struct file
*file
, int lock
, struct user_struct
*user
)
2489 void shmem_unlock_mapping(struct address_space
*mapping
)
2493 void shmem_truncate_range(struct inode
*inode
, loff_t lstart
, loff_t lend
)
2495 truncate_inode_pages_range(inode
->i_mapping
, lstart
, lend
);
2497 EXPORT_SYMBOL_GPL(shmem_truncate_range
);
2499 #define shmem_vm_ops generic_file_vm_ops
2500 #define shmem_file_operations ramfs_file_operations
2501 #define shmem_get_inode(sb, dir, mode, dev, flags) ramfs_get_inode(sb, dir, mode, dev)
2502 #define shmem_acct_size(flags, size) 0
2503 #define shmem_unacct_size(flags, size) do {} while (0)
2505 #endif /* CONFIG_SHMEM */
2510 * shmem_file_setup - get an unlinked file living in tmpfs
2511 * @name: name for dentry (to be seen in /proc/<pid>/maps
2512 * @size: size to be set for the file
2513 * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
2515 struct file
*shmem_file_setup(const char *name
, loff_t size
, unsigned long flags
)
2519 struct inode
*inode
;
2521 struct dentry
*root
;
2524 if (IS_ERR(shm_mnt
))
2525 return (void *)shm_mnt
;
2527 if (size
< 0 || size
> MAX_LFS_FILESIZE
)
2528 return ERR_PTR(-EINVAL
);
2530 if (shmem_acct_size(flags
, size
))
2531 return ERR_PTR(-ENOMEM
);
2535 this.len
= strlen(name
);
2536 this.hash
= 0; /* will go */
2537 root
= shm_mnt
->mnt_root
;
2538 path
.dentry
= d_alloc(root
, &this);
2541 path
.mnt
= mntget(shm_mnt
);
2544 inode
= shmem_get_inode(root
->d_sb
, NULL
, S_IFREG
| S_IRWXUGO
, 0, flags
);
2548 d_instantiate(path
.dentry
, inode
);
2549 inode
->i_size
= size
;
2550 clear_nlink(inode
); /* It is unlinked */
2552 error
= ramfs_nommu_expand_for_mapping(inode
, size
);
2558 file
= alloc_file(&path
, FMODE_WRITE
| FMODE_READ
,
2559 &shmem_file_operations
);
2568 shmem_unacct_size(flags
, size
);
2569 return ERR_PTR(error
);
2571 EXPORT_SYMBOL_GPL(shmem_file_setup
);
2574 * shmem_zero_setup - setup a shared anonymous mapping
2575 * @vma: the vma to be mmapped is prepared by do_mmap_pgoff
2577 int shmem_zero_setup(struct vm_area_struct
*vma
)
2580 loff_t size
= vma
->vm_end
- vma
->vm_start
;
2582 file
= shmem_file_setup("dev/zero", size
, vma
->vm_flags
);
2584 return PTR_ERR(file
);
2588 vma
->vm_file
= file
;
2589 vma
->vm_ops
= &shmem_vm_ops
;
2590 vma
->vm_flags
|= VM_CAN_NONLINEAR
;
2595 * shmem_read_mapping_page_gfp - read into page cache, using specified page allocation flags.
2596 * @mapping: the page's address_space
2597 * @index: the page index
2598 * @gfp: the page allocator flags to use if allocating
2600 * This behaves as a tmpfs "read_cache_page_gfp(mapping, index, gfp)",
2601 * with any new page allocations done using the specified allocation flags.
2602 * But read_cache_page_gfp() uses the ->readpage() method: which does not
2603 * suit tmpfs, since it may have pages in swapcache, and needs to find those
2604 * for itself; although drivers/gpu/drm i915 and ttm rely upon this support.
2606 * i915_gem_object_get_pages_gtt() mixes __GFP_NORETRY | __GFP_NOWARN in
2607 * with the mapping_gfp_mask(), to avoid OOMing the machine unnecessarily.
2609 struct page
*shmem_read_mapping_page_gfp(struct address_space
*mapping
,
2610 pgoff_t index
, gfp_t gfp
)
2613 struct inode
*inode
= mapping
->host
;
2617 BUG_ON(mapping
->a_ops
!= &shmem_aops
);
2618 error
= shmem_getpage_gfp(inode
, index
, &page
, SGP_CACHE
, gfp
, NULL
);
2620 page
= ERR_PTR(error
);
2626 * The tiny !SHMEM case uses ramfs without swap
2628 return read_cache_page_gfp(mapping
, index
, gfp
);
2631 EXPORT_SYMBOL_GPL(shmem_read_mapping_page_gfp
);