2 * Resizable virtual memory filesystem for Linux.
4 * Copyright (C) 2000 Linus Torvalds.
6 * 2000-2001 Christoph Rohland
9 * Copyright (C) 2002-2011 Hugh Dickins.
10 * Copyright (C) 2011 Google Inc.
11 * Copyright (C) 2002-2005 VERITAS Software Corporation.
12 * Copyright (C) 2004 Andi Kleen, SuSE Labs
14 * Extended attribute support for tmpfs:
15 * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net>
16 * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
19 * Copyright (c) 2004, 2008 Matt Mackall <mpm@selenic.com>
21 * This file is released under the GPL.
25 #include <linux/init.h>
26 #include <linux/vfs.h>
27 #include <linux/mount.h>
28 #include <linux/pagemap.h>
29 #include <linux/file.h>
31 #include <linux/export.h>
32 #include <linux/swap.h>
34 static struct vfsmount
*shm_mnt
;
38 * This virtual memory filesystem is heavily based on the ramfs. It
39 * extends ramfs by the ability to use swap and honor resource limits
40 * which makes it a completely usable filesystem.
43 #include <linux/xattr.h>
44 #include <linux/exportfs.h>
45 #include <linux/posix_acl.h>
46 #include <linux/generic_acl.h>
47 #include <linux/mman.h>
48 #include <linux/string.h>
49 #include <linux/slab.h>
50 #include <linux/backing-dev.h>
51 #include <linux/shmem_fs.h>
52 #include <linux/writeback.h>
53 #include <linux/blkdev.h>
54 #include <linux/pagevec.h>
55 #include <linux/percpu_counter.h>
56 #include <linux/splice.h>
57 #include <linux/security.h>
58 #include <linux/swapops.h>
59 #include <linux/mempolicy.h>
60 #include <linux/namei.h>
61 #include <linux/ctype.h>
62 #include <linux/migrate.h>
63 #include <linux/highmem.h>
64 #include <linux/seq_file.h>
65 #include <linux/magic.h>
67 #include <asm/uaccess.h>
68 #include <asm/pgtable.h>
70 #define BLOCKS_PER_PAGE (PAGE_CACHE_SIZE/512)
71 #define VM_ACCT(size) (PAGE_CACHE_ALIGN(size) >> PAGE_SHIFT)
73 /* Pretend that each entry is of this size in directory's i_size */
74 #define BOGO_DIRENT_SIZE 20
76 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
77 #define SHORT_SYMLINK_LEN 128
80 * vmtruncate_range() communicates with shmem_fault via
81 * inode->i_private (with i_mutex making sure that it has only one user at
82 * a time): we would prefer not to enlarge the shmem inode just for that.
85 wait_queue_head_t
*waitq
; /* faults into hole wait for punch to end */
86 pgoff_t start
; /* start of range currently being fallocated */
87 pgoff_t next
; /* the next page offset to be fallocated */
91 struct list_head list
; /* anchored by shmem_inode_info->xattr_list */
92 char *name
; /* xattr name */
97 /* Flag allocation requirements to shmem_getpage */
99 SGP_READ
, /* don't exceed i_size, don't allocate page */
100 SGP_CACHE
, /* don't exceed i_size, may allocate page */
101 SGP_DIRTY
, /* like SGP_CACHE, but set new page dirty */
102 SGP_WRITE
, /* may exceed i_size, may allocate page */
106 static unsigned long shmem_default_max_blocks(void)
108 return totalram_pages
/ 2;
111 static unsigned long shmem_default_max_inodes(void)
113 return min(totalram_pages
- totalhigh_pages
, totalram_pages
/ 2);
117 static int shmem_getpage_gfp(struct inode
*inode
, pgoff_t index
,
118 struct page
**pagep
, enum sgp_type sgp
, gfp_t gfp
, int *fault_type
);
120 static inline int shmem_getpage(struct inode
*inode
, pgoff_t index
,
121 struct page
**pagep
, enum sgp_type sgp
, int *fault_type
)
123 return shmem_getpage_gfp(inode
, index
, pagep
, sgp
,
124 mapping_gfp_mask(inode
->i_mapping
), fault_type
);
127 static inline struct shmem_sb_info
*SHMEM_SB(struct super_block
*sb
)
129 return sb
->s_fs_info
;
133 * shmem_file_setup pre-accounts the whole fixed size of a VM object,
134 * for shared memory and for shared anonymous (/dev/zero) mappings
135 * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1),
136 * consistent with the pre-accounting of private mappings ...
138 static inline int shmem_acct_size(unsigned long flags
, loff_t size
)
140 return (flags
& VM_NORESERVE
) ?
141 0 : security_vm_enough_memory_kern(VM_ACCT(size
));
144 static inline void shmem_unacct_size(unsigned long flags
, loff_t size
)
146 if (!(flags
& VM_NORESERVE
))
147 vm_unacct_memory(VM_ACCT(size
));
151 * ... whereas tmpfs objects are accounted incrementally as
152 * pages are allocated, in order to allow huge sparse files.
153 * shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM,
154 * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM.
156 static inline int shmem_acct_block(unsigned long flags
)
158 return (flags
& VM_NORESERVE
) ?
159 security_vm_enough_memory_kern(VM_ACCT(PAGE_CACHE_SIZE
)) : 0;
162 static inline void shmem_unacct_blocks(unsigned long flags
, long pages
)
164 if (flags
& VM_NORESERVE
)
165 vm_unacct_memory(pages
* VM_ACCT(PAGE_CACHE_SIZE
));
168 static const struct super_operations shmem_ops
;
169 static const struct address_space_operations shmem_aops
;
170 static const struct file_operations shmem_file_operations
;
171 static const struct inode_operations shmem_inode_operations
;
172 static const struct inode_operations shmem_dir_inode_operations
;
173 static const struct inode_operations shmem_special_inode_operations
;
174 static const struct vm_operations_struct shmem_vm_ops
;
176 static struct backing_dev_info shmem_backing_dev_info __read_mostly
= {
177 .ra_pages
= 0, /* No readahead */
178 .capabilities
= BDI_CAP_NO_ACCT_AND_WRITEBACK
| BDI_CAP_SWAP_BACKED
,
181 static LIST_HEAD(shmem_swaplist
);
182 static DEFINE_MUTEX(shmem_swaplist_mutex
);
184 static int shmem_reserve_inode(struct super_block
*sb
)
186 struct shmem_sb_info
*sbinfo
= SHMEM_SB(sb
);
187 if (sbinfo
->max_inodes
) {
188 spin_lock(&sbinfo
->stat_lock
);
189 if (!sbinfo
->free_inodes
) {
190 spin_unlock(&sbinfo
->stat_lock
);
193 sbinfo
->free_inodes
--;
194 spin_unlock(&sbinfo
->stat_lock
);
199 static void shmem_free_inode(struct super_block
*sb
)
201 struct shmem_sb_info
*sbinfo
= SHMEM_SB(sb
);
202 if (sbinfo
->max_inodes
) {
203 spin_lock(&sbinfo
->stat_lock
);
204 sbinfo
->free_inodes
++;
205 spin_unlock(&sbinfo
->stat_lock
);
210 * shmem_recalc_inode - recalculate the block usage of an inode
211 * @inode: inode to recalc
213 * We have to calculate the free blocks since the mm can drop
214 * undirtied hole pages behind our back.
216 * But normally info->alloced == inode->i_mapping->nrpages + info->swapped
217 * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped)
219 * It has to be called with the spinlock held.
221 static void shmem_recalc_inode(struct inode
*inode
)
223 struct shmem_inode_info
*info
= SHMEM_I(inode
);
226 freed
= info
->alloced
- info
->swapped
- inode
->i_mapping
->nrpages
;
228 struct shmem_sb_info
*sbinfo
= SHMEM_SB(inode
->i_sb
);
229 if (sbinfo
->max_blocks
)
230 percpu_counter_add(&sbinfo
->used_blocks
, -freed
);
231 info
->alloced
-= freed
;
232 inode
->i_blocks
-= freed
* BLOCKS_PER_PAGE
;
233 shmem_unacct_blocks(info
->flags
, freed
);
238 * Replace item expected in radix tree by a new item, while holding tree lock.
240 static int shmem_radix_tree_replace(struct address_space
*mapping
,
241 pgoff_t index
, void *expected
, void *replacement
)
246 VM_BUG_ON(!expected
);
247 pslot
= radix_tree_lookup_slot(&mapping
->page_tree
, index
);
249 item
= radix_tree_deref_slot_protected(pslot
,
250 &mapping
->tree_lock
);
251 if (item
!= expected
)
254 radix_tree_replace_slot(pslot
, replacement
);
256 radix_tree_delete(&mapping
->page_tree
, index
);
261 * Like add_to_page_cache_locked, but error if expected item has gone.
263 static int shmem_add_to_page_cache(struct page
*page
,
264 struct address_space
*mapping
,
265 pgoff_t index
, gfp_t gfp
, void *expected
)
269 VM_BUG_ON(!PageLocked(page
));
270 VM_BUG_ON(!PageSwapBacked(page
));
273 error
= radix_tree_preload(gfp
& GFP_RECLAIM_MASK
);
275 page_cache_get(page
);
276 page
->mapping
= mapping
;
279 spin_lock_irq(&mapping
->tree_lock
);
281 error
= radix_tree_insert(&mapping
->page_tree
,
284 error
= shmem_radix_tree_replace(mapping
, index
,
288 __inc_zone_page_state(page
, NR_FILE_PAGES
);
289 __inc_zone_page_state(page
, NR_SHMEM
);
290 spin_unlock_irq(&mapping
->tree_lock
);
292 page
->mapping
= NULL
;
293 spin_unlock_irq(&mapping
->tree_lock
);
294 page_cache_release(page
);
297 radix_tree_preload_end();
300 mem_cgroup_uncharge_cache_page(page
);
305 * Like delete_from_page_cache, but substitutes swap for page.
307 static void shmem_delete_from_page_cache(struct page
*page
, void *radswap
)
309 struct address_space
*mapping
= page
->mapping
;
312 spin_lock_irq(&mapping
->tree_lock
);
313 error
= shmem_radix_tree_replace(mapping
, page
->index
, page
, radswap
);
314 page
->mapping
= NULL
;
316 __dec_zone_page_state(page
, NR_FILE_PAGES
);
317 __dec_zone_page_state(page
, NR_SHMEM
);
318 spin_unlock_irq(&mapping
->tree_lock
);
319 page_cache_release(page
);
324 * Like find_get_pages, but collecting swap entries as well as pages.
326 static unsigned shmem_find_get_pages_and_swap(struct address_space
*mapping
,
327 pgoff_t start
, unsigned int nr_pages
,
328 struct page
**pages
, pgoff_t
*indices
)
332 unsigned int nr_found
;
336 nr_found
= radix_tree_gang_lookup_slot(&mapping
->page_tree
,
337 (void ***)pages
, indices
, start
, nr_pages
);
339 for (i
= 0; i
< nr_found
; i
++) {
342 page
= radix_tree_deref_slot((void **)pages
[i
]);
345 if (radix_tree_exception(page
)) {
346 if (radix_tree_deref_retry(page
))
349 * Otherwise, we must be storing a swap entry
350 * here as an exceptional entry: so return it
351 * without attempting to raise page count.
355 if (!page_cache_get_speculative(page
))
358 /* Has the page moved? */
359 if (unlikely(page
!= *((void **)pages
[i
]))) {
360 page_cache_release(page
);
364 indices
[ret
] = indices
[i
];
368 if (unlikely(!ret
&& nr_found
))
375 * Remove swap entry from radix tree, free the swap and its page cache.
377 static int shmem_free_swap(struct address_space
*mapping
,
378 pgoff_t index
, void *radswap
)
382 spin_lock_irq(&mapping
->tree_lock
);
383 error
= shmem_radix_tree_replace(mapping
, index
, radswap
, NULL
);
384 spin_unlock_irq(&mapping
->tree_lock
);
386 free_swap_and_cache(radix_to_swp_entry(radswap
));
391 * Pagevec may contain swap entries, so shuffle up pages before releasing.
393 static void shmem_deswap_pagevec(struct pagevec
*pvec
)
397 for (i
= 0, j
= 0; i
< pagevec_count(pvec
); i
++) {
398 struct page
*page
= pvec
->pages
[i
];
399 if (!radix_tree_exceptional_entry(page
))
400 pvec
->pages
[j
++] = page
;
406 * SysV IPC SHM_UNLOCK restore Unevictable pages to their evictable lists.
408 void shmem_unlock_mapping(struct address_space
*mapping
)
411 pgoff_t indices
[PAGEVEC_SIZE
];
414 pagevec_init(&pvec
, 0);
416 * Minor point, but we might as well stop if someone else SHM_LOCKs it.
418 while (!mapping_unevictable(mapping
)) {
420 * Avoid pagevec_lookup(): find_get_pages() returns 0 as if it
421 * has finished, if it hits a row of PAGEVEC_SIZE swap entries.
423 pvec
.nr
= shmem_find_get_pages_and_swap(mapping
, index
,
424 PAGEVEC_SIZE
, pvec
.pages
, indices
);
427 index
= indices
[pvec
.nr
- 1] + 1;
428 shmem_deswap_pagevec(&pvec
);
429 check_move_unevictable_pages(pvec
.pages
, pvec
.nr
);
430 pagevec_release(&pvec
);
436 * Remove range of pages and swap entries from radix tree, and free them.
438 void shmem_truncate_range(struct inode
*inode
, loff_t lstart
, loff_t lend
)
440 struct address_space
*mapping
= inode
->i_mapping
;
441 struct shmem_inode_info
*info
= SHMEM_I(inode
);
442 pgoff_t start
= (lstart
+ PAGE_CACHE_SIZE
- 1) >> PAGE_CACHE_SHIFT
;
443 unsigned partial
= lstart
& (PAGE_CACHE_SIZE
- 1);
444 pgoff_t end
= (lend
>> PAGE_CACHE_SHIFT
);
446 pgoff_t indices
[PAGEVEC_SIZE
];
447 long nr_swaps_freed
= 0;
451 BUG_ON((lend
& (PAGE_CACHE_SIZE
- 1)) != (PAGE_CACHE_SIZE
- 1));
453 pagevec_init(&pvec
, 0);
455 while (index
<= end
) {
456 pvec
.nr
= shmem_find_get_pages_and_swap(mapping
, index
,
457 min(end
- index
, (pgoff_t
)PAGEVEC_SIZE
- 1) + 1,
458 pvec
.pages
, indices
);
461 mem_cgroup_uncharge_start();
462 for (i
= 0; i
< pagevec_count(&pvec
); i
++) {
463 struct page
*page
= pvec
.pages
[i
];
469 if (radix_tree_exceptional_entry(page
)) {
470 nr_swaps_freed
+= !shmem_free_swap(mapping
,
475 if (!trylock_page(page
))
477 if (page
->mapping
== mapping
) {
478 VM_BUG_ON(PageWriteback(page
));
479 truncate_inode_page(mapping
, page
);
483 shmem_deswap_pagevec(&pvec
);
484 pagevec_release(&pvec
);
485 mem_cgroup_uncharge_end();
491 struct page
*page
= NULL
;
492 shmem_getpage(inode
, start
- 1, &page
, SGP_READ
, NULL
);
494 zero_user_segment(page
, partial
, PAGE_CACHE_SIZE
);
495 set_page_dirty(page
);
497 page_cache_release(page
);
502 while (index
<= end
) {
504 pvec
.nr
= shmem_find_get_pages_and_swap(mapping
, index
,
505 min(end
- index
, (pgoff_t
)PAGEVEC_SIZE
- 1) + 1,
506 pvec
.pages
, indices
);
508 /* If all gone or hole-punch, we're done */
509 if (index
== start
|| end
!= -1)
511 /* But if truncating, restart to make sure all gone */
515 mem_cgroup_uncharge_start();
516 for (i
= 0; i
< pagevec_count(&pvec
); i
++) {
517 struct page
*page
= pvec
.pages
[i
];
523 if (radix_tree_exceptional_entry(page
)) {
524 if (shmem_free_swap(mapping
, index
, page
)) {
525 /* Swap was replaced by page: retry */
534 if (page
->mapping
== mapping
) {
535 VM_BUG_ON(PageWriteback(page
));
536 truncate_inode_page(mapping
, page
);
538 /* Page was replaced by swap: retry */
545 shmem_deswap_pagevec(&pvec
);
546 pagevec_release(&pvec
);
547 mem_cgroup_uncharge_end();
551 spin_lock(&info
->lock
);
552 info
->swapped
-= nr_swaps_freed
;
553 shmem_recalc_inode(inode
);
554 spin_unlock(&info
->lock
);
556 inode
->i_ctime
= inode
->i_mtime
= CURRENT_TIME
;
558 EXPORT_SYMBOL_GPL(shmem_truncate_range
);
560 static int shmem_setattr(struct dentry
*dentry
, struct iattr
*attr
)
562 struct inode
*inode
= dentry
->d_inode
;
565 error
= setattr_prepare(dentry
, attr
);
569 if (S_ISREG(inode
->i_mode
) && (attr
->ia_valid
& ATTR_SIZE
)) {
570 loff_t oldsize
= inode
->i_size
;
571 loff_t newsize
= attr
->ia_size
;
573 if (newsize
!= oldsize
) {
574 i_size_write(inode
, newsize
);
575 inode
->i_ctime
= inode
->i_mtime
= CURRENT_TIME
;
577 if (newsize
< oldsize
) {
578 loff_t holebegin
= round_up(newsize
, PAGE_SIZE
);
579 unmap_mapping_range(inode
->i_mapping
, holebegin
, 0, 1);
580 shmem_truncate_range(inode
, newsize
, (loff_t
)-1);
581 /* unmap again to remove racily COWed private pages */
582 unmap_mapping_range(inode
->i_mapping
, holebegin
, 0, 1);
586 setattr_copy(inode
, attr
);
587 #ifdef CONFIG_TMPFS_POSIX_ACL
588 if (attr
->ia_valid
& ATTR_MODE
)
589 error
= generic_acl_chmod(inode
);
594 static void shmem_evict_inode(struct inode
*inode
)
596 struct shmem_inode_info
*info
= SHMEM_I(inode
);
597 struct shmem_xattr
*xattr
, *nxattr
;
599 if (inode
->i_mapping
->a_ops
== &shmem_aops
) {
600 shmem_unacct_size(info
->flags
, inode
->i_size
);
602 shmem_truncate_range(inode
, 0, (loff_t
)-1);
603 if (!list_empty(&info
->swaplist
)) {
604 mutex_lock(&shmem_swaplist_mutex
);
605 list_del_init(&info
->swaplist
);
606 mutex_unlock(&shmem_swaplist_mutex
);
609 kfree(info
->symlink
);
611 list_for_each_entry_safe(xattr
, nxattr
, &info
->xattr_list
, list
) {
615 WARN_ON(inode
->i_blocks
);
616 shmem_free_inode(inode
->i_sb
);
617 end_writeback(inode
);
621 * If swap found in inode, free it and move page from swapcache to filecache.
623 static int shmem_unuse_inode(struct shmem_inode_info
*info
,
624 swp_entry_t swap
, struct page
*page
)
626 struct address_space
*mapping
= info
->vfs_inode
.i_mapping
;
631 radswap
= swp_to_radix_entry(swap
);
632 index
= radix_tree_locate_item(&mapping
->page_tree
, radswap
);
637 * Move _head_ to start search for next from here.
638 * But be careful: shmem_evict_inode checks list_empty without taking
639 * mutex, and there's an instant in list_move_tail when info->swaplist
640 * would appear empty, if it were the only one on shmem_swaplist.
642 if (shmem_swaplist
.next
!= &info
->swaplist
)
643 list_move_tail(&shmem_swaplist
, &info
->swaplist
);
646 * We rely on shmem_swaplist_mutex, not only to protect the swaplist,
647 * but also to hold up shmem_evict_inode(): so inode cannot be freed
648 * beneath us (pagelock doesn't help until the page is in pagecache).
650 error
= shmem_add_to_page_cache(page
, mapping
, index
,
651 GFP_NOWAIT
, radswap
);
652 /* which does mem_cgroup_uncharge_cache_page on error */
654 if (error
!= -ENOMEM
) {
656 * Truncation and eviction use free_swap_and_cache(), which
657 * only does trylock page: if we raced, best clean up here.
659 delete_from_swap_cache(page
);
660 set_page_dirty(page
);
662 spin_lock(&info
->lock
);
664 spin_unlock(&info
->lock
);
667 error
= 1; /* not an error, but entry was found */
673 * Search through swapped inodes to find and replace swap by page.
675 int shmem_unuse(swp_entry_t swap
, struct page
*page
)
677 struct list_head
*this, *next
;
678 struct shmem_inode_info
*info
;
683 * Charge page using GFP_KERNEL while we can wait, before taking
684 * the shmem_swaplist_mutex which might hold up shmem_writepage().
685 * Charged back to the user (not to caller) when swap account is used.
687 error
= mem_cgroup_cache_charge(page
, current
->mm
, GFP_KERNEL
);
690 /* No radix_tree_preload: swap entry keeps a place for page in tree */
692 mutex_lock(&shmem_swaplist_mutex
);
693 list_for_each_safe(this, next
, &shmem_swaplist
) {
694 info
= list_entry(this, struct shmem_inode_info
, swaplist
);
696 found
= shmem_unuse_inode(info
, swap
, page
);
698 list_del_init(&info
->swaplist
);
703 mutex_unlock(&shmem_swaplist_mutex
);
706 mem_cgroup_uncharge_cache_page(page
);
711 page_cache_release(page
);
716 * Move the page from the page cache to the swap cache.
718 static int shmem_writepage(struct page
*page
, struct writeback_control
*wbc
)
720 struct shmem_inode_info
*info
;
721 struct address_space
*mapping
;
726 BUG_ON(!PageLocked(page
));
727 mapping
= page
->mapping
;
729 inode
= mapping
->host
;
730 info
= SHMEM_I(inode
);
731 if (info
->flags
& VM_LOCKED
)
733 if (!total_swap_pages
)
737 * shmem_backing_dev_info's capabilities prevent regular writeback or
738 * sync from ever calling shmem_writepage; but a stacking filesystem
739 * might use ->writepage of its underlying filesystem, in which case
740 * tmpfs should write out to swap only in response to memory pressure,
741 * and not for the writeback threads or sync.
743 if (!wbc
->for_reclaim
) {
744 WARN_ON_ONCE(1); /* Still happens? Tell us about it! */
747 swap
= get_swap_page();
752 * Add inode to shmem_unuse()'s list of swapped-out inodes,
753 * if it's not already there. Do it now before the page is
754 * moved to swap cache, when its pagelock no longer protects
755 * the inode from eviction. But don't unlock the mutex until
756 * we've incremented swapped, because shmem_unuse_inode() will
757 * prune a !swapped inode from the swaplist under this mutex.
759 mutex_lock(&shmem_swaplist_mutex
);
760 if (list_empty(&info
->swaplist
))
761 list_add_tail(&info
->swaplist
, &shmem_swaplist
);
763 if (add_to_swap_cache(page
, swap
, GFP_ATOMIC
) == 0) {
764 swap_shmem_alloc(swap
);
765 shmem_delete_from_page_cache(page
, swp_to_radix_entry(swap
));
767 spin_lock(&info
->lock
);
769 shmem_recalc_inode(inode
);
770 spin_unlock(&info
->lock
);
772 mutex_unlock(&shmem_swaplist_mutex
);
773 BUG_ON(page_mapped(page
));
774 swap_writepage(page
, wbc
);
778 mutex_unlock(&shmem_swaplist_mutex
);
779 swapcache_free(swap
, NULL
);
781 set_page_dirty(page
);
782 if (wbc
->for_reclaim
)
783 return AOP_WRITEPAGE_ACTIVATE
; /* Return with page locked */
790 static void shmem_show_mpol(struct seq_file
*seq
, struct mempolicy
*mpol
)
794 if (!mpol
|| mpol
->mode
== MPOL_DEFAULT
)
795 return; /* show nothing */
797 mpol_to_str(buffer
, sizeof(buffer
), mpol
, 1);
799 seq_printf(seq
, ",mpol=%s", buffer
);
802 static struct mempolicy
*shmem_get_sbmpol(struct shmem_sb_info
*sbinfo
)
804 struct mempolicy
*mpol
= NULL
;
806 spin_lock(&sbinfo
->stat_lock
); /* prevent replace/use races */
809 spin_unlock(&sbinfo
->stat_lock
);
813 #endif /* CONFIG_TMPFS */
815 static struct page
*shmem_swapin(swp_entry_t swap
, gfp_t gfp
,
816 struct shmem_inode_info
*info
, pgoff_t index
)
818 struct vm_area_struct pvma
;
821 /* Create a pseudo vma that just contains the policy */
823 pvma
.vm_pgoff
= index
;
825 pvma
.vm_policy
= mpol_shared_policy_lookup(&info
->policy
, index
);
827 page
= swapin_readahead(swap
, gfp
, &pvma
, 0);
829 /* Drop reference taken by mpol_shared_policy_lookup() */
830 mpol_cond_put(pvma
.vm_policy
);
835 static struct page
*shmem_alloc_page(gfp_t gfp
,
836 struct shmem_inode_info
*info
, pgoff_t index
)
838 struct vm_area_struct pvma
;
841 /* Create a pseudo vma that just contains the policy */
843 pvma
.vm_pgoff
= index
;
845 pvma
.vm_policy
= mpol_shared_policy_lookup(&info
->policy
, index
);
847 page
= alloc_page_vma(gfp
, &pvma
, 0);
849 /* Drop reference taken by mpol_shared_policy_lookup() */
850 mpol_cond_put(pvma
.vm_policy
);
854 #else /* !CONFIG_NUMA */
856 static inline void shmem_show_mpol(struct seq_file
*seq
, struct mempolicy
*mpol
)
859 #endif /* CONFIG_TMPFS */
861 static inline struct page
*shmem_swapin(swp_entry_t swap
, gfp_t gfp
,
862 struct shmem_inode_info
*info
, pgoff_t index
)
864 return swapin_readahead(swap
, gfp
, NULL
, 0);
867 static inline struct page
*shmem_alloc_page(gfp_t gfp
,
868 struct shmem_inode_info
*info
, pgoff_t index
)
870 return alloc_page(gfp
);
872 #endif /* CONFIG_NUMA */
874 #if !defined(CONFIG_NUMA) || !defined(CONFIG_TMPFS)
875 static inline struct mempolicy
*shmem_get_sbmpol(struct shmem_sb_info
*sbinfo
)
882 * shmem_getpage_gfp - find page in cache, or get from swap, or allocate
884 * If we allocate a new one we do not mark it dirty. That's up to the
885 * vm. If we swap it in we mark it dirty since we also free the swap
886 * entry since a page cannot live in both the swap and page cache
888 static int shmem_getpage_gfp(struct inode
*inode
, pgoff_t index
,
889 struct page
**pagep
, enum sgp_type sgp
, gfp_t gfp
, int *fault_type
)
891 struct address_space
*mapping
= inode
->i_mapping
;
892 struct shmem_inode_info
*info
;
893 struct shmem_sb_info
*sbinfo
;
899 if (index
> (MAX_LFS_FILESIZE
>> PAGE_CACHE_SHIFT
))
903 page
= find_lock_page(mapping
, index
);
904 if (radix_tree_exceptional_entry(page
)) {
905 swap
= radix_to_swp_entry(page
);
909 if (sgp
!= SGP_WRITE
&&
910 ((loff_t
)index
<< PAGE_CACHE_SHIFT
) >= i_size_read(inode
)) {
915 if (page
|| (sgp
== SGP_READ
&& !swap
.val
)) {
917 * Once we can get the page lock, it must be uptodate:
918 * if there were an error in reading back from swap,
919 * the page would not be inserted into the filecache.
921 BUG_ON(page
&& !PageUptodate(page
));
927 * Fast cache lookup did not find it:
928 * bring it back from swap or allocate.
930 info
= SHMEM_I(inode
);
931 sbinfo
= SHMEM_SB(inode
->i_sb
);
934 /* Look it up and read it in.. */
935 page
= lookup_swap_cache(swap
);
937 /* here we actually do the io */
939 *fault_type
|= VM_FAULT_MAJOR
;
940 page
= shmem_swapin(swap
, gfp
, info
, index
);
947 /* We have to do this with page locked to prevent races */
949 if (!PageUptodate(page
)) {
953 wait_on_page_writeback(page
);
955 /* Someone may have already done it for us */
957 if (page
->mapping
== mapping
&&
958 page
->index
== index
)
964 error
= mem_cgroup_cache_charge(page
, current
->mm
,
965 gfp
& GFP_RECLAIM_MASK
);
967 error
= shmem_add_to_page_cache(page
, mapping
, index
,
968 gfp
, swp_to_radix_entry(swap
));
972 spin_lock(&info
->lock
);
974 shmem_recalc_inode(inode
);
975 spin_unlock(&info
->lock
);
977 delete_from_swap_cache(page
);
978 set_page_dirty(page
);
982 if (shmem_acct_block(info
->flags
)) {
986 if (sbinfo
->max_blocks
) {
987 if (percpu_counter_compare(&sbinfo
->used_blocks
,
988 sbinfo
->max_blocks
) >= 0) {
992 percpu_counter_inc(&sbinfo
->used_blocks
);
995 page
= shmem_alloc_page(gfp
, info
, index
);
1001 SetPageSwapBacked(page
);
1002 __set_page_locked(page
);
1003 error
= mem_cgroup_cache_charge(page
, current
->mm
,
1004 gfp
& GFP_RECLAIM_MASK
);
1006 error
= shmem_add_to_page_cache(page
, mapping
, index
,
1010 lru_cache_add_anon(page
);
1012 spin_lock(&info
->lock
);
1014 inode
->i_blocks
+= BLOCKS_PER_PAGE
;
1015 shmem_recalc_inode(inode
);
1016 spin_unlock(&info
->lock
);
1018 clear_highpage(page
);
1019 flush_dcache_page(page
);
1020 SetPageUptodate(page
);
1021 if (sgp
== SGP_DIRTY
)
1022 set_page_dirty(page
);
1025 /* Perhaps the file has been truncated since we checked */
1026 if (sgp
!= SGP_WRITE
&&
1027 ((loff_t
)index
<< PAGE_CACHE_SHIFT
) >= i_size_read(inode
)) {
1038 ClearPageDirty(page
);
1039 delete_from_page_cache(page
);
1040 spin_lock(&info
->lock
);
1042 inode
->i_blocks
-= BLOCKS_PER_PAGE
;
1043 spin_unlock(&info
->lock
);
1045 if (sbinfo
->max_blocks
)
1046 percpu_counter_add(&sbinfo
->used_blocks
, -1);
1048 shmem_unacct_blocks(info
->flags
, 1);
1050 if (swap
.val
&& error
!= -EINVAL
) {
1051 struct page
*test
= find_get_page(mapping
, index
);
1052 if (test
&& !radix_tree_exceptional_entry(test
))
1053 page_cache_release(test
);
1054 /* Have another try if the entry has changed */
1055 if (test
!= swp_to_radix_entry(swap
))
1060 page_cache_release(page
);
1062 if (error
== -ENOSPC
&& !once
++) {
1063 info
= SHMEM_I(inode
);
1064 spin_lock(&info
->lock
);
1065 shmem_recalc_inode(inode
);
1066 spin_unlock(&info
->lock
);
1069 if (error
== -EEXIST
)
1074 static int shmem_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
1076 struct inode
*inode
= vma
->vm_file
->f_path
.dentry
->d_inode
;
1078 int ret
= VM_FAULT_LOCKED
;
1081 * Trinity finds that probing a hole which tmpfs is punching can
1082 * prevent the hole-punch from ever completing: which in turn
1083 * locks writers out with its hold on i_mutex. So refrain from
1084 * faulting pages into the hole while it's being punched. Although
1085 * shmem_truncate_range() does remove the additions, it may be unable to
1086 * keep up, as each new page needs its own unmap_mapping_range() call,
1087 * and the i_mmap tree grows ever slower to scan if new vmas are added.
1089 * It does not matter if we sometimes reach this check just before the
1090 * hole-punch begins, so that one fault then races with the punch:
1091 * we just need to make racing faults a rare case.
1093 * The implementation below would be much simpler if we just used a
1094 * standard mutex or completion: but we cannot take i_mutex in fault,
1095 * and bloating every shmem inode for this unlikely case would be sad.
1097 if (unlikely(inode
->i_private
)) {
1098 struct shmem_falloc
*shmem_falloc
;
1100 spin_lock(&inode
->i_lock
);
1101 shmem_falloc
= inode
->i_private
;
1103 vmf
->pgoff
>= shmem_falloc
->start
&&
1104 vmf
->pgoff
< shmem_falloc
->next
) {
1105 wait_queue_head_t
*shmem_falloc_waitq
;
1106 DEFINE_WAIT(shmem_fault_wait
);
1108 ret
= VM_FAULT_NOPAGE
;
1109 if ((vmf
->flags
& FAULT_FLAG_ALLOW_RETRY
) &&
1110 !(vmf
->flags
& FAULT_FLAG_RETRY_NOWAIT
)) {
1111 /* It's polite to up mmap_sem if we can */
1112 up_read(&vma
->vm_mm
->mmap_sem
);
1113 ret
= VM_FAULT_RETRY
;
1116 shmem_falloc_waitq
= shmem_falloc
->waitq
;
1117 prepare_to_wait(shmem_falloc_waitq
, &shmem_fault_wait
,
1118 TASK_UNINTERRUPTIBLE
);
1119 spin_unlock(&inode
->i_lock
);
1123 * shmem_falloc_waitq points into the vmtruncate_range()
1124 * stack of the hole-punching task: shmem_falloc_waitq
1125 * is usually invalid by the time we reach here, but
1126 * finish_wait() does not dereference it in that case;
1127 * though i_lock needed lest racing with wake_up_all().
1129 spin_lock(&inode
->i_lock
);
1130 finish_wait(shmem_falloc_waitq
, &shmem_fault_wait
);
1131 spin_unlock(&inode
->i_lock
);
1134 spin_unlock(&inode
->i_lock
);
1137 error
= shmem_getpage(inode
, vmf
->pgoff
, &vmf
->page
, SGP_CACHE
, &ret
);
1139 return ((error
== -ENOMEM
) ? VM_FAULT_OOM
: VM_FAULT_SIGBUS
);
1141 if (ret
& VM_FAULT_MAJOR
) {
1142 count_vm_event(PGMAJFAULT
);
1143 mem_cgroup_count_vm_event(vma
->vm_mm
, PGMAJFAULT
);
1148 int vmtruncate_range(struct inode
*inode
, loff_t lstart
, loff_t lend
)
1151 * If the underlying filesystem is not going to provide
1152 * a way to truncate a range of blocks (punch a hole) -
1153 * we should return failure right now.
1154 * Only CONFIG_SHMEM shmem.c ever supported i_op->truncate_range().
1156 if (inode
->i_op
->truncate_range
!= shmem_truncate_range
)
1159 mutex_lock(&inode
->i_mutex
);
1161 struct shmem_falloc shmem_falloc
;
1162 struct address_space
*mapping
= inode
->i_mapping
;
1163 loff_t unmap_start
= round_up(lstart
, PAGE_SIZE
);
1164 loff_t unmap_end
= round_down(1 + lend
, PAGE_SIZE
) - 1;
1165 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(shmem_falloc_waitq
);
1167 shmem_falloc
.waitq
= &shmem_falloc_waitq
;
1168 shmem_falloc
.start
= unmap_start
>> PAGE_SHIFT
;
1169 shmem_falloc
.next
= (unmap_end
+ 1) >> PAGE_SHIFT
;
1170 spin_lock(&inode
->i_lock
);
1171 inode
->i_private
= &shmem_falloc
;
1172 spin_unlock(&inode
->i_lock
);
1174 if ((u64
)unmap_end
> (u64
)unmap_start
)
1175 unmap_mapping_range(mapping
, unmap_start
,
1176 1 + unmap_end
- unmap_start
, 0);
1177 shmem_truncate_range(inode
, lstart
, lend
);
1178 /* No need to unmap again: hole-punching leaves COWed pages */
1180 spin_lock(&inode
->i_lock
);
1181 inode
->i_private
= NULL
;
1182 wake_up_all(&shmem_falloc_waitq
);
1183 spin_unlock(&inode
->i_lock
);
1185 mutex_unlock(&inode
->i_mutex
);
1190 static int shmem_set_policy(struct vm_area_struct
*vma
, struct mempolicy
*mpol
)
1192 struct inode
*inode
= vma
->vm_file
->f_path
.dentry
->d_inode
;
1193 return mpol_set_shared_policy(&SHMEM_I(inode
)->policy
, vma
, mpol
);
1196 static struct mempolicy
*shmem_get_policy(struct vm_area_struct
*vma
,
1199 struct inode
*inode
= vma
->vm_file
->f_path
.dentry
->d_inode
;
1202 index
= ((addr
- vma
->vm_start
) >> PAGE_SHIFT
) + vma
->vm_pgoff
;
1203 return mpol_shared_policy_lookup(&SHMEM_I(inode
)->policy
, index
);
1207 int shmem_lock(struct file
*file
, int lock
, struct user_struct
*user
)
1209 struct inode
*inode
= file
->f_path
.dentry
->d_inode
;
1210 struct shmem_inode_info
*info
= SHMEM_I(inode
);
1211 int retval
= -ENOMEM
;
1213 spin_lock(&info
->lock
);
1214 if (lock
&& !(info
->flags
& VM_LOCKED
)) {
1215 if (!user_shm_lock(inode
->i_size
, user
))
1217 info
->flags
|= VM_LOCKED
;
1218 mapping_set_unevictable(file
->f_mapping
);
1220 if (!lock
&& (info
->flags
& VM_LOCKED
) && user
) {
1221 user_shm_unlock(inode
->i_size
, user
);
1222 info
->flags
&= ~VM_LOCKED
;
1223 mapping_clear_unevictable(file
->f_mapping
);
1228 spin_unlock(&info
->lock
);
1232 static int shmem_mmap(struct file
*file
, struct vm_area_struct
*vma
)
1234 file_accessed(file
);
1235 vma
->vm_ops
= &shmem_vm_ops
;
1236 vma
->vm_flags
|= VM_CAN_NONLINEAR
;
1240 static struct inode
*shmem_get_inode(struct super_block
*sb
, const struct inode
*dir
,
1241 int mode
, dev_t dev
, unsigned long flags
)
1243 struct inode
*inode
;
1244 struct shmem_inode_info
*info
;
1245 struct shmem_sb_info
*sbinfo
= SHMEM_SB(sb
);
1247 if (shmem_reserve_inode(sb
))
1250 inode
= new_inode(sb
);
1252 inode
->i_ino
= get_next_ino();
1253 inode_init_owner(inode
, dir
, mode
);
1254 inode
->i_blocks
= 0;
1255 inode
->i_mapping
->backing_dev_info
= &shmem_backing_dev_info
;
1256 inode
->i_atime
= inode
->i_mtime
= inode
->i_ctime
= CURRENT_TIME
;
1257 inode
->i_generation
= get_seconds();
1258 info
= SHMEM_I(inode
);
1259 memset(info
, 0, (char *)inode
- (char *)info
);
1260 spin_lock_init(&info
->lock
);
1261 info
->flags
= flags
& VM_NORESERVE
;
1262 INIT_LIST_HEAD(&info
->swaplist
);
1263 INIT_LIST_HEAD(&info
->xattr_list
);
1264 cache_no_acl(inode
);
1266 switch (mode
& S_IFMT
) {
1268 inode
->i_op
= &shmem_special_inode_operations
;
1269 init_special_inode(inode
, mode
, dev
);
1272 inode
->i_mapping
->a_ops
= &shmem_aops
;
1273 inode
->i_op
= &shmem_inode_operations
;
1274 inode
->i_fop
= &shmem_file_operations
;
1275 mpol_shared_policy_init(&info
->policy
,
1276 shmem_get_sbmpol(sbinfo
));
1280 /* Some things misbehave if size == 0 on a directory */
1281 inode
->i_size
= 2 * BOGO_DIRENT_SIZE
;
1282 inode
->i_op
= &shmem_dir_inode_operations
;
1283 inode
->i_fop
= &simple_dir_operations
;
1287 * Must not load anything in the rbtree,
1288 * mpol_free_shared_policy will not be called.
1290 mpol_shared_policy_init(&info
->policy
, NULL
);
1294 shmem_free_inode(sb
);
1299 static const struct inode_operations shmem_symlink_inode_operations
;
1300 static const struct inode_operations shmem_short_symlink_operations
;
1303 shmem_write_begin(struct file
*file
, struct address_space
*mapping
,
1304 loff_t pos
, unsigned len
, unsigned flags
,
1305 struct page
**pagep
, void **fsdata
)
1307 struct inode
*inode
= mapping
->host
;
1308 pgoff_t index
= pos
>> PAGE_CACHE_SHIFT
;
1309 return shmem_getpage(inode
, index
, pagep
, SGP_WRITE
, NULL
);
1313 shmem_write_end(struct file
*file
, struct address_space
*mapping
,
1314 loff_t pos
, unsigned len
, unsigned copied
,
1315 struct page
*page
, void *fsdata
)
1317 struct inode
*inode
= mapping
->host
;
1319 if (pos
+ copied
> inode
->i_size
)
1320 i_size_write(inode
, pos
+ copied
);
1322 set_page_dirty(page
);
1324 page_cache_release(page
);
1329 static void do_shmem_file_read(struct file
*filp
, loff_t
*ppos
, read_descriptor_t
*desc
, read_actor_t actor
)
1331 struct inode
*inode
= filp
->f_path
.dentry
->d_inode
;
1332 struct address_space
*mapping
= inode
->i_mapping
;
1334 unsigned long offset
;
1335 enum sgp_type sgp
= SGP_READ
;
1338 * Might this read be for a stacking filesystem? Then when reading
1339 * holes of a sparse file, we actually need to allocate those pages,
1340 * and even mark them dirty, so it cannot exceed the max_blocks limit.
1342 if (segment_eq(get_fs(), KERNEL_DS
))
1345 index
= *ppos
>> PAGE_CACHE_SHIFT
;
1346 offset
= *ppos
& ~PAGE_CACHE_MASK
;
1349 struct page
*page
= NULL
;
1351 unsigned long nr
, ret
;
1352 loff_t i_size
= i_size_read(inode
);
1354 end_index
= i_size
>> PAGE_CACHE_SHIFT
;
1355 if (index
> end_index
)
1357 if (index
== end_index
) {
1358 nr
= i_size
& ~PAGE_CACHE_MASK
;
1363 desc
->error
= shmem_getpage(inode
, index
, &page
, sgp
, NULL
);
1365 if (desc
->error
== -EINVAL
)
1373 * We must evaluate after, since reads (unlike writes)
1374 * are called without i_mutex protection against truncate
1376 nr
= PAGE_CACHE_SIZE
;
1377 i_size
= i_size_read(inode
);
1378 end_index
= i_size
>> PAGE_CACHE_SHIFT
;
1379 if (index
== end_index
) {
1380 nr
= i_size
& ~PAGE_CACHE_MASK
;
1383 page_cache_release(page
);
1391 * If users can be writing to this page using arbitrary
1392 * virtual addresses, take care about potential aliasing
1393 * before reading the page on the kernel side.
1395 if (mapping_writably_mapped(mapping
))
1396 flush_dcache_page(page
);
1398 * Mark the page accessed if we read the beginning.
1401 mark_page_accessed(page
);
1403 page
= ZERO_PAGE(0);
1404 page_cache_get(page
);
1408 * Ok, we have the page, and it's up-to-date, so
1409 * now we can copy it to user space...
1411 * The actor routine returns how many bytes were actually used..
1412 * NOTE! This may not be the same as how much of a user buffer
1413 * we filled up (we may be padding etc), so we can only update
1414 * "pos" here (the actor routine has to update the user buffer
1415 * pointers and the remaining count).
1417 ret
= actor(desc
, page
, offset
, nr
);
1419 index
+= offset
>> PAGE_CACHE_SHIFT
;
1420 offset
&= ~PAGE_CACHE_MASK
;
1422 page_cache_release(page
);
1423 if (ret
!= nr
|| !desc
->count
)
1429 *ppos
= ((loff_t
) index
<< PAGE_CACHE_SHIFT
) + offset
;
1430 file_accessed(filp
);
1433 static ssize_t
shmem_file_aio_read(struct kiocb
*iocb
,
1434 const struct iovec
*iov
, unsigned long nr_segs
, loff_t pos
)
1436 struct file
*filp
= iocb
->ki_filp
;
1440 loff_t
*ppos
= &iocb
->ki_pos
;
1442 retval
= generic_segment_checks(iov
, &nr_segs
, &count
, VERIFY_WRITE
);
1446 for (seg
= 0; seg
< nr_segs
; seg
++) {
1447 read_descriptor_t desc
;
1450 desc
.arg
.buf
= iov
[seg
].iov_base
;
1451 desc
.count
= iov
[seg
].iov_len
;
1452 if (desc
.count
== 0)
1455 do_shmem_file_read(filp
, ppos
, &desc
, file_read_actor
);
1456 retval
+= desc
.written
;
1458 retval
= retval
?: desc
.error
;
1467 static ssize_t
shmem_file_splice_read(struct file
*in
, loff_t
*ppos
,
1468 struct pipe_inode_info
*pipe
, size_t len
,
1471 struct address_space
*mapping
= in
->f_mapping
;
1472 struct inode
*inode
= mapping
->host
;
1473 unsigned int loff
, nr_pages
, req_pages
;
1474 struct page
*pages
[PIPE_DEF_BUFFERS
];
1475 struct partial_page partial
[PIPE_DEF_BUFFERS
];
1477 pgoff_t index
, end_index
;
1480 struct splice_pipe_desc spd
= {
1483 .nr_pages_max
= PIPE_DEF_BUFFERS
,
1485 .ops
= &page_cache_pipe_buf_ops
,
1486 .spd_release
= spd_release_page
,
1489 isize
= i_size_read(inode
);
1490 if (unlikely(*ppos
>= isize
))
1493 left
= isize
- *ppos
;
1494 if (unlikely(left
< len
))
1497 if (splice_grow_spd(pipe
, &spd
))
1500 index
= *ppos
>> PAGE_CACHE_SHIFT
;
1501 loff
= *ppos
& ~PAGE_CACHE_MASK
;
1502 req_pages
= (len
+ loff
+ PAGE_CACHE_SIZE
- 1) >> PAGE_CACHE_SHIFT
;
1503 nr_pages
= min(req_pages
, pipe
->buffers
);
1505 spd
.nr_pages
= find_get_pages_contig(mapping
, index
,
1506 nr_pages
, spd
.pages
);
1507 index
+= spd
.nr_pages
;
1510 while (spd
.nr_pages
< nr_pages
) {
1511 error
= shmem_getpage(inode
, index
, &page
, SGP_CACHE
, NULL
);
1515 spd
.pages
[spd
.nr_pages
++] = page
;
1519 index
= *ppos
>> PAGE_CACHE_SHIFT
;
1520 nr_pages
= spd
.nr_pages
;
1523 for (page_nr
= 0; page_nr
< nr_pages
; page_nr
++) {
1524 unsigned int this_len
;
1529 this_len
= min_t(unsigned long, len
, PAGE_CACHE_SIZE
- loff
);
1530 page
= spd
.pages
[page_nr
];
1532 if (!PageUptodate(page
) || page
->mapping
!= mapping
) {
1533 error
= shmem_getpage(inode
, index
, &page
,
1538 page_cache_release(spd
.pages
[page_nr
]);
1539 spd
.pages
[page_nr
] = page
;
1542 isize
= i_size_read(inode
);
1543 end_index
= (isize
- 1) >> PAGE_CACHE_SHIFT
;
1544 if (unlikely(!isize
|| index
> end_index
))
1547 if (end_index
== index
) {
1550 plen
= ((isize
- 1) & ~PAGE_CACHE_MASK
) + 1;
1554 this_len
= min(this_len
, plen
- loff
);
1558 spd
.partial
[page_nr
].offset
= loff
;
1559 spd
.partial
[page_nr
].len
= this_len
;
1566 while (page_nr
< nr_pages
)
1567 page_cache_release(spd
.pages
[page_nr
++]);
1570 error
= splice_to_pipe(pipe
, &spd
);
1572 splice_shrink_spd(&spd
);
1581 static int shmem_statfs(struct dentry
*dentry
, struct kstatfs
*buf
)
1583 struct shmem_sb_info
*sbinfo
= SHMEM_SB(dentry
->d_sb
);
1585 buf
->f_type
= TMPFS_MAGIC
;
1586 buf
->f_bsize
= PAGE_CACHE_SIZE
;
1587 buf
->f_namelen
= NAME_MAX
;
1588 if (sbinfo
->max_blocks
) {
1589 buf
->f_blocks
= sbinfo
->max_blocks
;
1591 buf
->f_bfree
= sbinfo
->max_blocks
-
1592 percpu_counter_sum(&sbinfo
->used_blocks
);
1594 if (sbinfo
->max_inodes
) {
1595 buf
->f_files
= sbinfo
->max_inodes
;
1596 buf
->f_ffree
= sbinfo
->free_inodes
;
1598 /* else leave those fields 0 like simple_statfs */
1603 * File creation. Allocate an inode, and we're done..
1606 shmem_mknod(struct inode
*dir
, struct dentry
*dentry
, int mode
, dev_t dev
)
1608 struct inode
*inode
;
1609 int error
= -ENOSPC
;
1611 inode
= shmem_get_inode(dir
->i_sb
, dir
, mode
, dev
, VM_NORESERVE
);
1613 error
= security_inode_init_security(inode
, dir
,
1617 if (error
!= -EOPNOTSUPP
) {
1622 #ifdef CONFIG_TMPFS_POSIX_ACL
1623 error
= generic_acl_init(inode
, dir
);
1631 dir
->i_size
+= BOGO_DIRENT_SIZE
;
1632 dir
->i_ctime
= dir
->i_mtime
= CURRENT_TIME
;
1633 d_instantiate(dentry
, inode
);
1634 dget(dentry
); /* Extra count - pin the dentry in core */
1639 static int shmem_mkdir(struct inode
*dir
, struct dentry
*dentry
, int mode
)
1643 if ((error
= shmem_mknod(dir
, dentry
, mode
| S_IFDIR
, 0)))
1649 static int shmem_create(struct inode
*dir
, struct dentry
*dentry
, int mode
,
1650 struct nameidata
*nd
)
1652 return shmem_mknod(dir
, dentry
, mode
| S_IFREG
, 0);
1658 static int shmem_link(struct dentry
*old_dentry
, struct inode
*dir
, struct dentry
*dentry
)
1660 struct inode
*inode
= old_dentry
->d_inode
;
1664 * No ordinary (disk based) filesystem counts links as inodes;
1665 * but each new link needs a new dentry, pinning lowmem, and
1666 * tmpfs dentries cannot be pruned until they are unlinked.
1668 ret
= shmem_reserve_inode(inode
->i_sb
);
1672 dir
->i_size
+= BOGO_DIRENT_SIZE
;
1673 inode
->i_ctime
= dir
->i_ctime
= dir
->i_mtime
= CURRENT_TIME
;
1675 ihold(inode
); /* New dentry reference */
1676 dget(dentry
); /* Extra pinning count for the created dentry */
1677 d_instantiate(dentry
, inode
);
1682 static int shmem_unlink(struct inode
*dir
, struct dentry
*dentry
)
1684 struct inode
*inode
= dentry
->d_inode
;
1686 if (inode
->i_nlink
> 1 && !S_ISDIR(inode
->i_mode
))
1687 shmem_free_inode(inode
->i_sb
);
1689 dir
->i_size
-= BOGO_DIRENT_SIZE
;
1690 inode
->i_ctime
= dir
->i_ctime
= dir
->i_mtime
= CURRENT_TIME
;
1692 dput(dentry
); /* Undo the count from "create" - this does all the work */
1696 static int shmem_rmdir(struct inode
*dir
, struct dentry
*dentry
)
1698 if (!simple_empty(dentry
))
1701 drop_nlink(dentry
->d_inode
);
1703 return shmem_unlink(dir
, dentry
);
1707 * The VFS layer already does all the dentry stuff for rename,
1708 * we just have to decrement the usage count for the target if
1709 * it exists so that the VFS layer correctly free's it when it
1712 static int shmem_rename(struct inode
*old_dir
, struct dentry
*old_dentry
, struct inode
*new_dir
, struct dentry
*new_dentry
)
1714 struct inode
*inode
= old_dentry
->d_inode
;
1715 int they_are_dirs
= S_ISDIR(inode
->i_mode
);
1717 if (!simple_empty(new_dentry
))
1720 if (new_dentry
->d_inode
) {
1721 (void) shmem_unlink(new_dir
, new_dentry
);
1722 if (they_are_dirs
) {
1723 drop_nlink(new_dentry
->d_inode
);
1724 drop_nlink(old_dir
);
1726 } else if (they_are_dirs
) {
1727 drop_nlink(old_dir
);
1731 old_dir
->i_size
-= BOGO_DIRENT_SIZE
;
1732 new_dir
->i_size
+= BOGO_DIRENT_SIZE
;
1733 old_dir
->i_ctime
= old_dir
->i_mtime
=
1734 new_dir
->i_ctime
= new_dir
->i_mtime
=
1735 inode
->i_ctime
= CURRENT_TIME
;
1739 static int shmem_symlink(struct inode
*dir
, struct dentry
*dentry
, const char *symname
)
1743 struct inode
*inode
;
1746 struct shmem_inode_info
*info
;
1748 len
= strlen(symname
) + 1;
1749 if (len
> PAGE_CACHE_SIZE
)
1750 return -ENAMETOOLONG
;
1752 inode
= shmem_get_inode(dir
->i_sb
, dir
, S_IFLNK
|S_IRWXUGO
, 0, VM_NORESERVE
);
1756 error
= security_inode_init_security(inode
, dir
, &dentry
->d_name
,
1759 if (error
!= -EOPNOTSUPP
) {
1766 info
= SHMEM_I(inode
);
1767 inode
->i_size
= len
-1;
1768 if (len
<= SHORT_SYMLINK_LEN
) {
1769 info
->symlink
= kmemdup(symname
, len
, GFP_KERNEL
);
1770 if (!info
->symlink
) {
1774 inode
->i_op
= &shmem_short_symlink_operations
;
1776 error
= shmem_getpage(inode
, 0, &page
, SGP_WRITE
, NULL
);
1781 inode
->i_mapping
->a_ops
= &shmem_aops
;
1782 inode
->i_op
= &shmem_symlink_inode_operations
;
1783 kaddr
= kmap_atomic(page
, KM_USER0
);
1784 memcpy(kaddr
, symname
, len
);
1785 kunmap_atomic(kaddr
, KM_USER0
);
1786 set_page_dirty(page
);
1788 page_cache_release(page
);
1790 dir
->i_size
+= BOGO_DIRENT_SIZE
;
1791 dir
->i_ctime
= dir
->i_mtime
= CURRENT_TIME
;
1792 d_instantiate(dentry
, inode
);
1797 static void *shmem_follow_short_symlink(struct dentry
*dentry
, struct nameidata
*nd
)
1799 nd_set_link(nd
, SHMEM_I(dentry
->d_inode
)->symlink
);
1803 static void *shmem_follow_link(struct dentry
*dentry
, struct nameidata
*nd
)
1805 struct page
*page
= NULL
;
1806 int error
= shmem_getpage(dentry
->d_inode
, 0, &page
, SGP_READ
, NULL
);
1807 nd_set_link(nd
, error
? ERR_PTR(error
) : kmap(page
));
1813 static void shmem_put_link(struct dentry
*dentry
, struct nameidata
*nd
, void *cookie
)
1815 if (!IS_ERR(nd_get_link(nd
))) {
1816 struct page
*page
= cookie
;
1818 mark_page_accessed(page
);
1819 page_cache_release(page
);
1823 #ifdef CONFIG_TMPFS_XATTR
1825 * Superblocks without xattr inode operations may get some security.* xattr
1826 * support from the LSM "for free". As soon as we have any other xattrs
1827 * like ACLs, we also need to implement the security.* handlers at
1828 * filesystem level, though.
1831 static int shmem_xattr_get(struct dentry
*dentry
, const char *name
,
1832 void *buffer
, size_t size
)
1834 struct shmem_inode_info
*info
;
1835 struct shmem_xattr
*xattr
;
1838 info
= SHMEM_I(dentry
->d_inode
);
1840 spin_lock(&info
->lock
);
1841 list_for_each_entry(xattr
, &info
->xattr_list
, list
) {
1842 if (strcmp(name
, xattr
->name
))
1847 if (size
< xattr
->size
)
1850 memcpy(buffer
, xattr
->value
, xattr
->size
);
1854 spin_unlock(&info
->lock
);
1858 static int shmem_xattr_set(struct dentry
*dentry
, const char *name
,
1859 const void *value
, size_t size
, int flags
)
1861 struct inode
*inode
= dentry
->d_inode
;
1862 struct shmem_inode_info
*info
= SHMEM_I(inode
);
1863 struct shmem_xattr
*xattr
;
1864 struct shmem_xattr
*new_xattr
= NULL
;
1868 /* value == NULL means remove */
1871 len
= sizeof(*new_xattr
) + size
;
1872 if (len
<= sizeof(*new_xattr
))
1875 new_xattr
= kmalloc(len
, GFP_KERNEL
);
1879 new_xattr
->name
= kstrdup(name
, GFP_KERNEL
);
1880 if (!new_xattr
->name
) {
1885 new_xattr
->size
= size
;
1886 memcpy(new_xattr
->value
, value
, size
);
1889 spin_lock(&info
->lock
);
1890 list_for_each_entry(xattr
, &info
->xattr_list
, list
) {
1891 if (!strcmp(name
, xattr
->name
)) {
1892 if (flags
& XATTR_CREATE
) {
1895 } else if (new_xattr
) {
1896 list_replace(&xattr
->list
, &new_xattr
->list
);
1898 list_del(&xattr
->list
);
1903 if (flags
& XATTR_REPLACE
) {
1907 list_add(&new_xattr
->list
, &info
->xattr_list
);
1911 spin_unlock(&info
->lock
);
1918 static const struct xattr_handler
*shmem_xattr_handlers
[] = {
1919 #ifdef CONFIG_TMPFS_POSIX_ACL
1920 &generic_acl_access_handler
,
1921 &generic_acl_default_handler
,
1926 static int shmem_xattr_validate(const char *name
)
1928 struct { const char *prefix
; size_t len
; } arr
[] = {
1929 { XATTR_SECURITY_PREFIX
, XATTR_SECURITY_PREFIX_LEN
},
1930 { XATTR_TRUSTED_PREFIX
, XATTR_TRUSTED_PREFIX_LEN
}
1934 for (i
= 0; i
< ARRAY_SIZE(arr
); i
++) {
1935 size_t preflen
= arr
[i
].len
;
1936 if (strncmp(name
, arr
[i
].prefix
, preflen
) == 0) {
1945 static ssize_t
shmem_getxattr(struct dentry
*dentry
, const char *name
,
1946 void *buffer
, size_t size
)
1951 * If this is a request for a synthetic attribute in the system.*
1952 * namespace use the generic infrastructure to resolve a handler
1953 * for it via sb->s_xattr.
1955 if (!strncmp(name
, XATTR_SYSTEM_PREFIX
, XATTR_SYSTEM_PREFIX_LEN
))
1956 return generic_getxattr(dentry
, name
, buffer
, size
);
1958 err
= shmem_xattr_validate(name
);
1962 return shmem_xattr_get(dentry
, name
, buffer
, size
);
1965 static int shmem_setxattr(struct dentry
*dentry
, const char *name
,
1966 const void *value
, size_t size
, int flags
)
1971 * If this is a request for a synthetic attribute in the system.*
1972 * namespace use the generic infrastructure to resolve a handler
1973 * for it via sb->s_xattr.
1975 if (!strncmp(name
, XATTR_SYSTEM_PREFIX
, XATTR_SYSTEM_PREFIX_LEN
))
1976 return generic_setxattr(dentry
, name
, value
, size
, flags
);
1978 err
= shmem_xattr_validate(name
);
1983 value
= ""; /* empty EA, do not remove */
1985 return shmem_xattr_set(dentry
, name
, value
, size
, flags
);
1989 static int shmem_removexattr(struct dentry
*dentry
, const char *name
)
1994 * If this is a request for a synthetic attribute in the system.*
1995 * namespace use the generic infrastructure to resolve a handler
1996 * for it via sb->s_xattr.
1998 if (!strncmp(name
, XATTR_SYSTEM_PREFIX
, XATTR_SYSTEM_PREFIX_LEN
))
1999 return generic_removexattr(dentry
, name
);
2001 err
= shmem_xattr_validate(name
);
2005 return shmem_xattr_set(dentry
, name
, NULL
, 0, XATTR_REPLACE
);
2008 static bool xattr_is_trusted(const char *name
)
2010 return !strncmp(name
, XATTR_TRUSTED_PREFIX
, XATTR_TRUSTED_PREFIX_LEN
);
2013 static ssize_t
shmem_listxattr(struct dentry
*dentry
, char *buffer
, size_t size
)
2015 bool trusted
= capable(CAP_SYS_ADMIN
);
2016 struct shmem_xattr
*xattr
;
2017 struct shmem_inode_info
*info
;
2020 info
= SHMEM_I(dentry
->d_inode
);
2022 spin_lock(&info
->lock
);
2023 list_for_each_entry(xattr
, &info
->xattr_list
, list
) {
2026 /* skip "trusted." attributes for unprivileged callers */
2027 if (!trusted
&& xattr_is_trusted(xattr
->name
))
2030 len
= strlen(xattr
->name
) + 1;
2037 memcpy(buffer
, xattr
->name
, len
);
2041 spin_unlock(&info
->lock
);
2045 #endif /* CONFIG_TMPFS_XATTR */
2047 static const struct inode_operations shmem_short_symlink_operations
= {
2048 .readlink
= generic_readlink
,
2049 .follow_link
= shmem_follow_short_symlink
,
2050 #ifdef CONFIG_TMPFS_XATTR
2051 .setxattr
= shmem_setxattr
,
2052 .getxattr
= shmem_getxattr
,
2053 .listxattr
= shmem_listxattr
,
2054 .removexattr
= shmem_removexattr
,
2058 static const struct inode_operations shmem_symlink_inode_operations
= {
2059 .readlink
= generic_readlink
,
2060 .follow_link
= shmem_follow_link
,
2061 .put_link
= shmem_put_link
,
2062 #ifdef CONFIG_TMPFS_XATTR
2063 .setxattr
= shmem_setxattr
,
2064 .getxattr
= shmem_getxattr
,
2065 .listxattr
= shmem_listxattr
,
2066 .removexattr
= shmem_removexattr
,
2070 static struct dentry
*shmem_get_parent(struct dentry
*child
)
2072 return ERR_PTR(-ESTALE
);
2075 static int shmem_match(struct inode
*ino
, void *vfh
)
2079 inum
= (inum
<< 32) | fh
[1];
2080 return ino
->i_ino
== inum
&& fh
[0] == ino
->i_generation
;
2083 static struct dentry
*shmem_fh_to_dentry(struct super_block
*sb
,
2084 struct fid
*fid
, int fh_len
, int fh_type
)
2086 struct inode
*inode
;
2087 struct dentry
*dentry
= NULL
;
2094 inum
= (inum
<< 32) | fid
->raw
[1];
2096 inode
= ilookup5(sb
, (unsigned long)(inum
+ fid
->raw
[0]),
2097 shmem_match
, fid
->raw
);
2099 dentry
= d_find_alias(inode
);
2106 static int shmem_encode_fh(struct dentry
*dentry
, __u32
*fh
, int *len
,
2109 struct inode
*inode
= dentry
->d_inode
;
2116 if (inode_unhashed(inode
)) {
2117 /* Unfortunately insert_inode_hash is not idempotent,
2118 * so as we hash inodes here rather than at creation
2119 * time, we need a lock to ensure we only try
2122 static DEFINE_SPINLOCK(lock
);
2124 if (inode_unhashed(inode
))
2125 __insert_inode_hash(inode
,
2126 inode
->i_ino
+ inode
->i_generation
);
2130 fh
[0] = inode
->i_generation
;
2131 fh
[1] = inode
->i_ino
;
2132 fh
[2] = ((__u64
)inode
->i_ino
) >> 32;
2138 static const struct export_operations shmem_export_ops
= {
2139 .get_parent
= shmem_get_parent
,
2140 .encode_fh
= shmem_encode_fh
,
2141 .fh_to_dentry
= shmem_fh_to_dentry
,
2144 static int shmem_parse_options(char *options
, struct shmem_sb_info
*sbinfo
,
2147 char *this_char
, *value
, *rest
;
2149 while (options
!= NULL
) {
2150 this_char
= options
;
2153 * NUL-terminate this option: unfortunately,
2154 * mount options form a comma-separated list,
2155 * but mpol's nodelist may also contain commas.
2157 options
= strchr(options
, ',');
2158 if (options
== NULL
)
2161 if (!isdigit(*options
)) {
2168 if ((value
= strchr(this_char
,'=')) != NULL
) {
2172 "tmpfs: No value for mount option '%s'\n",
2177 if (!strcmp(this_char
,"size")) {
2178 unsigned long long size
;
2179 size
= memparse(value
,&rest
);
2181 size
<<= PAGE_SHIFT
;
2182 size
*= totalram_pages
;
2188 sbinfo
->max_blocks
=
2189 DIV_ROUND_UP(size
, PAGE_CACHE_SIZE
);
2190 } else if (!strcmp(this_char
,"nr_blocks")) {
2191 sbinfo
->max_blocks
= memparse(value
, &rest
);
2194 } else if (!strcmp(this_char
,"nr_inodes")) {
2195 sbinfo
->max_inodes
= memparse(value
, &rest
);
2198 } else if (!strcmp(this_char
,"mode")) {
2201 sbinfo
->mode
= simple_strtoul(value
, &rest
, 8) & 07777;
2204 } else if (!strcmp(this_char
,"uid")) {
2207 sbinfo
->uid
= simple_strtoul(value
, &rest
, 0);
2210 } else if (!strcmp(this_char
,"gid")) {
2213 sbinfo
->gid
= simple_strtoul(value
, &rest
, 0);
2216 } else if (!strcmp(this_char
,"mpol")) {
2217 if (mpol_parse_str(value
, &sbinfo
->mpol
, 1))
2220 printk(KERN_ERR
"tmpfs: Bad mount option %s\n",
2228 printk(KERN_ERR
"tmpfs: Bad value '%s' for mount option '%s'\n",
2234 static int shmem_remount_fs(struct super_block
*sb
, int *flags
, char *data
)
2236 struct shmem_sb_info
*sbinfo
= SHMEM_SB(sb
);
2237 struct shmem_sb_info config
= *sbinfo
;
2238 unsigned long inodes
;
2239 int error
= -EINVAL
;
2242 if (shmem_parse_options(data
, &config
, true))
2245 spin_lock(&sbinfo
->stat_lock
);
2246 inodes
= sbinfo
->max_inodes
- sbinfo
->free_inodes
;
2247 if (percpu_counter_compare(&sbinfo
->used_blocks
, config
.max_blocks
) > 0)
2249 if (config
.max_inodes
< inodes
)
2252 * Those tests disallow limited->unlimited while any are in use;
2253 * but we must separately disallow unlimited->limited, because
2254 * in that case we have no record of how much is already in use.
2256 if (config
.max_blocks
&& !sbinfo
->max_blocks
)
2258 if (config
.max_inodes
&& !sbinfo
->max_inodes
)
2262 sbinfo
->max_blocks
= config
.max_blocks
;
2263 sbinfo
->max_inodes
= config
.max_inodes
;
2264 sbinfo
->free_inodes
= config
.max_inodes
- inodes
;
2267 * Preserve previous mempolicy unless mpol remount option was specified.
2270 mpol_put(sbinfo
->mpol
);
2271 sbinfo
->mpol
= config
.mpol
; /* transfers initial ref */
2274 spin_unlock(&sbinfo
->stat_lock
);
2278 static int shmem_show_options(struct seq_file
*seq
, struct vfsmount
*vfs
)
2280 struct shmem_sb_info
*sbinfo
= SHMEM_SB(vfs
->mnt_sb
);
2282 if (sbinfo
->max_blocks
!= shmem_default_max_blocks())
2283 seq_printf(seq
, ",size=%luk",
2284 sbinfo
->max_blocks
<< (PAGE_CACHE_SHIFT
- 10));
2285 if (sbinfo
->max_inodes
!= shmem_default_max_inodes())
2286 seq_printf(seq
, ",nr_inodes=%lu", sbinfo
->max_inodes
);
2287 if (sbinfo
->mode
!= (S_IRWXUGO
| S_ISVTX
))
2288 seq_printf(seq
, ",mode=%03o", sbinfo
->mode
);
2289 if (sbinfo
->uid
!= 0)
2290 seq_printf(seq
, ",uid=%u", sbinfo
->uid
);
2291 if (sbinfo
->gid
!= 0)
2292 seq_printf(seq
, ",gid=%u", sbinfo
->gid
);
2293 shmem_show_mpol(seq
, sbinfo
->mpol
);
2296 #endif /* CONFIG_TMPFS */
2298 static void shmem_put_super(struct super_block
*sb
)
2300 struct shmem_sb_info
*sbinfo
= SHMEM_SB(sb
);
2302 percpu_counter_destroy(&sbinfo
->used_blocks
);
2304 sb
->s_fs_info
= NULL
;
2307 int shmem_fill_super(struct super_block
*sb
, void *data
, int silent
)
2309 struct inode
*inode
;
2310 struct dentry
*root
;
2311 struct shmem_sb_info
*sbinfo
;
2314 /* Round up to L1_CACHE_BYTES to resist false sharing */
2315 sbinfo
= kzalloc(max((int)sizeof(struct shmem_sb_info
),
2316 L1_CACHE_BYTES
), GFP_KERNEL
);
2320 sbinfo
->mode
= S_IRWXUGO
| S_ISVTX
;
2321 sbinfo
->uid
= current_fsuid();
2322 sbinfo
->gid
= current_fsgid();
2323 sb
->s_fs_info
= sbinfo
;
2327 * Per default we only allow half of the physical ram per
2328 * tmpfs instance, limiting inodes to one per page of lowmem;
2329 * but the internal instance is left unlimited.
2331 if (!(sb
->s_flags
& MS_NOUSER
)) {
2332 sbinfo
->max_blocks
= shmem_default_max_blocks();
2333 sbinfo
->max_inodes
= shmem_default_max_inodes();
2334 if (shmem_parse_options(data
, sbinfo
, false)) {
2339 sb
->s_export_op
= &shmem_export_ops
;
2341 sb
->s_flags
|= MS_NOUSER
;
2344 spin_lock_init(&sbinfo
->stat_lock
);
2345 if (percpu_counter_init(&sbinfo
->used_blocks
, 0))
2347 sbinfo
->free_inodes
= sbinfo
->max_inodes
;
2349 sb
->s_maxbytes
= MAX_LFS_FILESIZE
;
2350 sb
->s_blocksize
= PAGE_CACHE_SIZE
;
2351 sb
->s_blocksize_bits
= PAGE_CACHE_SHIFT
;
2352 sb
->s_magic
= TMPFS_MAGIC
;
2353 sb
->s_op
= &shmem_ops
;
2354 sb
->s_time_gran
= 1;
2355 #ifdef CONFIG_TMPFS_XATTR
2356 sb
->s_xattr
= shmem_xattr_handlers
;
2358 #ifdef CONFIG_TMPFS_POSIX_ACL
2359 sb
->s_flags
|= MS_POSIXACL
;
2362 inode
= shmem_get_inode(sb
, NULL
, S_IFDIR
| sbinfo
->mode
, 0, VM_NORESERVE
);
2365 inode
->i_uid
= sbinfo
->uid
;
2366 inode
->i_gid
= sbinfo
->gid
;
2367 root
= d_alloc_root(inode
);
2376 shmem_put_super(sb
);
2380 static struct kmem_cache
*shmem_inode_cachep
;
2382 static struct inode
*shmem_alloc_inode(struct super_block
*sb
)
2384 struct shmem_inode_info
*info
;
2385 info
= kmem_cache_alloc(shmem_inode_cachep
, GFP_KERNEL
);
2388 return &info
->vfs_inode
;
2391 static void shmem_destroy_callback(struct rcu_head
*head
)
2393 struct inode
*inode
= container_of(head
, struct inode
, i_rcu
);
2394 INIT_LIST_HEAD(&inode
->i_dentry
);
2395 kmem_cache_free(shmem_inode_cachep
, SHMEM_I(inode
));
2398 static void shmem_destroy_inode(struct inode
*inode
)
2400 if ((inode
->i_mode
& S_IFMT
) == S_IFREG
)
2401 mpol_free_shared_policy(&SHMEM_I(inode
)->policy
);
2402 call_rcu(&inode
->i_rcu
, shmem_destroy_callback
);
2405 static void shmem_init_inode(void *foo
)
2407 struct shmem_inode_info
*info
= foo
;
2408 inode_init_once(&info
->vfs_inode
);
2411 static int shmem_init_inodecache(void)
2413 shmem_inode_cachep
= kmem_cache_create("shmem_inode_cache",
2414 sizeof(struct shmem_inode_info
),
2415 0, SLAB_PANIC
, shmem_init_inode
);
2419 static void shmem_destroy_inodecache(void)
2421 kmem_cache_destroy(shmem_inode_cachep
);
2424 static const struct address_space_operations shmem_aops
= {
2425 .writepage
= shmem_writepage
,
2426 .set_page_dirty
= __set_page_dirty_no_writeback
,
2428 .write_begin
= shmem_write_begin
,
2429 .write_end
= shmem_write_end
,
2431 .migratepage
= migrate_page
,
2432 .error_remove_page
= generic_error_remove_page
,
2435 static const struct file_operations shmem_file_operations
= {
2438 .llseek
= generic_file_llseek
,
2439 .read
= do_sync_read
,
2440 .write
= do_sync_write
,
2441 .aio_read
= shmem_file_aio_read
,
2442 .aio_write
= generic_file_aio_write
,
2443 .fsync
= noop_fsync
,
2444 .splice_read
= shmem_file_splice_read
,
2445 .splice_write
= generic_file_splice_write
,
2449 static const struct inode_operations shmem_inode_operations
= {
2450 .setattr
= shmem_setattr
,
2451 .truncate_range
= shmem_truncate_range
,
2452 #ifdef CONFIG_TMPFS_XATTR
2453 .setxattr
= shmem_setxattr
,
2454 .getxattr
= shmem_getxattr
,
2455 .listxattr
= shmem_listxattr
,
2456 .removexattr
= shmem_removexattr
,
2460 static const struct inode_operations shmem_dir_inode_operations
= {
2462 .create
= shmem_create
,
2463 .lookup
= simple_lookup
,
2465 .unlink
= shmem_unlink
,
2466 .symlink
= shmem_symlink
,
2467 .mkdir
= shmem_mkdir
,
2468 .rmdir
= shmem_rmdir
,
2469 .mknod
= shmem_mknod
,
2470 .rename
= shmem_rename
,
2472 #ifdef CONFIG_TMPFS_XATTR
2473 .setxattr
= shmem_setxattr
,
2474 .getxattr
= shmem_getxattr
,
2475 .listxattr
= shmem_listxattr
,
2476 .removexattr
= shmem_removexattr
,
2478 #ifdef CONFIG_TMPFS_POSIX_ACL
2479 .setattr
= shmem_setattr
,
2483 static const struct inode_operations shmem_special_inode_operations
= {
2484 #ifdef CONFIG_TMPFS_XATTR
2485 .setxattr
= shmem_setxattr
,
2486 .getxattr
= shmem_getxattr
,
2487 .listxattr
= shmem_listxattr
,
2488 .removexattr
= shmem_removexattr
,
2490 #ifdef CONFIG_TMPFS_POSIX_ACL
2491 .setattr
= shmem_setattr
,
2495 static const struct super_operations shmem_ops
= {
2496 .alloc_inode
= shmem_alloc_inode
,
2497 .destroy_inode
= shmem_destroy_inode
,
2499 .statfs
= shmem_statfs
,
2500 .remount_fs
= shmem_remount_fs
,
2501 .show_options
= shmem_show_options
,
2503 .evict_inode
= shmem_evict_inode
,
2504 .drop_inode
= generic_delete_inode
,
2505 .put_super
= shmem_put_super
,
2508 static const struct vm_operations_struct shmem_vm_ops
= {
2509 .fault
= shmem_fault
,
2511 .set_policy
= shmem_set_policy
,
2512 .get_policy
= shmem_get_policy
,
2516 static struct dentry
*shmem_mount(struct file_system_type
*fs_type
,
2517 int flags
, const char *dev_name
, void *data
)
2519 return mount_nodev(fs_type
, flags
, data
, shmem_fill_super
);
2522 static struct file_system_type shmem_fs_type
= {
2523 .owner
= THIS_MODULE
,
2525 .mount
= shmem_mount
,
2526 .kill_sb
= kill_litter_super
,
2529 int __init
shmem_init(void)
2533 error
= bdi_init(&shmem_backing_dev_info
);
2537 error
= shmem_init_inodecache();
2541 error
= register_filesystem(&shmem_fs_type
);
2543 printk(KERN_ERR
"Could not register tmpfs\n");
2547 shm_mnt
= vfs_kern_mount(&shmem_fs_type
, MS_NOUSER
,
2548 shmem_fs_type
.name
, NULL
);
2549 if (IS_ERR(shm_mnt
)) {
2550 error
= PTR_ERR(shm_mnt
);
2551 printk(KERN_ERR
"Could not kern_mount tmpfs\n");
2557 unregister_filesystem(&shmem_fs_type
);
2559 shmem_destroy_inodecache();
2561 bdi_destroy(&shmem_backing_dev_info
);
2563 shm_mnt
= ERR_PTR(error
);
2567 #else /* !CONFIG_SHMEM */
2570 * tiny-shmem: simple shmemfs and tmpfs using ramfs code
2572 * This is intended for small system where the benefits of the full
2573 * shmem code (swap-backed and resource-limited) are outweighed by
2574 * their complexity. On systems without swap this code should be
2575 * effectively equivalent, but much lighter weight.
2578 #include <linux/ramfs.h>
2580 static struct file_system_type shmem_fs_type
= {
2582 .mount
= ramfs_mount
,
2583 .kill_sb
= kill_litter_super
,
2586 int __init
shmem_init(void)
2588 BUG_ON(register_filesystem(&shmem_fs_type
) != 0);
2590 shm_mnt
= kern_mount(&shmem_fs_type
);
2591 BUG_ON(IS_ERR(shm_mnt
));
2596 int shmem_unuse(swp_entry_t swap
, struct page
*page
)
2601 int shmem_lock(struct file
*file
, int lock
, struct user_struct
*user
)
2606 void shmem_unlock_mapping(struct address_space
*mapping
)
2610 void shmem_truncate_range(struct inode
*inode
, loff_t lstart
, loff_t lend
)
2612 truncate_inode_pages_range(inode
->i_mapping
, lstart
, lend
);
2614 EXPORT_SYMBOL_GPL(shmem_truncate_range
);
2616 int vmtruncate_range(struct inode
*inode
, loff_t lstart
, loff_t lend
)
2618 /* Only CONFIG_SHMEM shmem.c ever supported i_op->truncate_range(). */
2622 #define shmem_vm_ops generic_file_vm_ops
2623 #define shmem_file_operations ramfs_file_operations
2624 #define shmem_get_inode(sb, dir, mode, dev, flags) ramfs_get_inode(sb, dir, mode, dev)
2625 #define shmem_acct_size(flags, size) 0
2626 #define shmem_unacct_size(flags, size) do {} while (0)
2628 #endif /* CONFIG_SHMEM */
2633 * shmem_file_setup - get an unlinked file living in tmpfs
2634 * @name: name for dentry (to be seen in /proc/<pid>/maps
2635 * @size: size to be set for the file
2636 * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
2638 struct file
*shmem_file_setup(const char *name
, loff_t size
, unsigned long flags
)
2642 struct inode
*inode
;
2644 struct dentry
*root
;
2647 if (IS_ERR(shm_mnt
))
2648 return (void *)shm_mnt
;
2650 if (size
< 0 || size
> MAX_LFS_FILESIZE
)
2651 return ERR_PTR(-EINVAL
);
2653 if (shmem_acct_size(flags
, size
))
2654 return ERR_PTR(-ENOMEM
);
2658 this.len
= strlen(name
);
2659 this.hash
= 0; /* will go */
2660 root
= shm_mnt
->mnt_root
;
2661 path
.dentry
= d_alloc(root
, &this);
2664 path
.mnt
= mntget(shm_mnt
);
2667 inode
= shmem_get_inode(root
->d_sb
, NULL
, S_IFREG
| S_IRWXUGO
, 0, flags
);
2671 d_instantiate(path
.dentry
, inode
);
2672 inode
->i_size
= size
;
2673 clear_nlink(inode
); /* It is unlinked */
2675 error
= ramfs_nommu_expand_for_mapping(inode
, size
);
2681 file
= alloc_file(&path
, FMODE_WRITE
| FMODE_READ
,
2682 &shmem_file_operations
);
2691 shmem_unacct_size(flags
, size
);
2692 return ERR_PTR(error
);
2694 EXPORT_SYMBOL_GPL(shmem_file_setup
);
2697 * shmem_zero_setup - setup a shared anonymous mapping
2698 * @vma: the vma to be mmapped is prepared by do_mmap_pgoff
2700 int shmem_zero_setup(struct vm_area_struct
*vma
)
2703 loff_t size
= vma
->vm_end
- vma
->vm_start
;
2705 file
= shmem_file_setup("dev/zero", size
, vma
->vm_flags
);
2707 return PTR_ERR(file
);
2711 vma
->vm_file
= file
;
2712 vma
->vm_ops
= &shmem_vm_ops
;
2713 vma
->vm_flags
|= VM_CAN_NONLINEAR
;
2718 * shmem_read_mapping_page_gfp - read into page cache, using specified page allocation flags.
2719 * @mapping: the page's address_space
2720 * @index: the page index
2721 * @gfp: the page allocator flags to use if allocating
2723 * This behaves as a tmpfs "read_cache_page_gfp(mapping, index, gfp)",
2724 * with any new page allocations done using the specified allocation flags.
2725 * But read_cache_page_gfp() uses the ->readpage() method: which does not
2726 * suit tmpfs, since it may have pages in swapcache, and needs to find those
2727 * for itself; although drivers/gpu/drm i915 and ttm rely upon this support.
2729 * i915_gem_object_get_pages_gtt() mixes __GFP_NORETRY | __GFP_NOWARN in
2730 * with the mapping_gfp_mask(), to avoid OOMing the machine unnecessarily.
2732 struct page
*shmem_read_mapping_page_gfp(struct address_space
*mapping
,
2733 pgoff_t index
, gfp_t gfp
)
2736 struct inode
*inode
= mapping
->host
;
2740 BUG_ON(mapping
->a_ops
!= &shmem_aops
);
2741 error
= shmem_getpage_gfp(inode
, index
, &page
, SGP_CACHE
, gfp
, NULL
);
2743 page
= ERR_PTR(error
);
2749 * The tiny !SHMEM case uses ramfs without swap
2751 return read_cache_page_gfp(mapping
, index
, gfp
);
2754 EXPORT_SYMBOL_GPL(shmem_read_mapping_page_gfp
);