2 * (C) 1997 Linus Torvalds
3 * (C) 1999 Andrea Arcangeli <andrea@suse.de> (dynamic inode allocation)
5 #include <linux/export.h>
8 #include <linux/backing-dev.h>
9 #include <linux/hash.h>
10 #include <linux/swap.h>
11 #include <linux/security.h>
12 #include <linux/cdev.h>
13 #include <linux/bootmem.h>
14 #include <linux/fsnotify.h>
15 #include <linux/mount.h>
16 #include <linux/posix_acl.h>
17 #include <linux/prefetch.h>
18 #include <linux/buffer_head.h> /* for inode_has_buffers */
19 #include <linux/ratelimit.h>
20 #include <linux/list_lru.h>
21 #include <trace/events/writeback.h>
25 * Inode locking rules:
27 * inode->i_lock protects:
28 * inode->i_state, inode->i_hash, __iget()
29 * Inode LRU list locks protect:
30 * inode->i_sb->s_inode_lru, inode->i_lru
31 * inode_sb_list_lock protects:
32 * sb->s_inodes, inode->i_sb_list
33 * bdi->wb.list_lock protects:
34 * bdi->wb.b_{dirty,io,more_io,dirty_time}, inode->i_wb_list
35 * inode_hash_lock protects:
36 * inode_hashtable, inode->i_hash
42 * Inode LRU list locks
55 static unsigned int i_hash_mask __read_mostly
;
56 static unsigned int i_hash_shift __read_mostly
;
57 static struct hlist_head
*inode_hashtable __read_mostly
;
58 static __cacheline_aligned_in_smp
DEFINE_SPINLOCK(inode_hash_lock
);
60 __cacheline_aligned_in_smp
DEFINE_SPINLOCK(inode_sb_list_lock
);
63 * Empty aops. Can be used for the cases where the user does not
64 * define any of the address_space operations.
66 const struct address_space_operations empty_aops
= {
68 EXPORT_SYMBOL(empty_aops
);
71 * Statistics gathering..
73 struct inodes_stat_t inodes_stat
;
75 static DEFINE_PER_CPU(unsigned long, nr_inodes
);
76 static DEFINE_PER_CPU(unsigned long, nr_unused
);
78 static struct kmem_cache
*inode_cachep __read_mostly
;
80 static long get_nr_inodes(void)
84 for_each_possible_cpu(i
)
85 sum
+= per_cpu(nr_inodes
, i
);
86 return sum
< 0 ? 0 : sum
;
89 static inline long get_nr_inodes_unused(void)
93 for_each_possible_cpu(i
)
94 sum
+= per_cpu(nr_unused
, i
);
95 return sum
< 0 ? 0 : sum
;
98 long get_nr_dirty_inodes(void)
100 /* not actually dirty inodes, but a wild approximation */
101 long nr_dirty
= get_nr_inodes() - get_nr_inodes_unused();
102 return nr_dirty
> 0 ? nr_dirty
: 0;
106 * Handle nr_inode sysctl
109 int proc_nr_inodes(struct ctl_table
*table
, int write
,
110 void __user
*buffer
, size_t *lenp
, loff_t
*ppos
)
112 inodes_stat
.nr_inodes
= get_nr_inodes();
113 inodes_stat
.nr_unused
= get_nr_inodes_unused();
114 return proc_doulongvec_minmax(table
, write
, buffer
, lenp
, ppos
);
118 static int no_open(struct inode
*inode
, struct file
*file
)
124 * inode_init_always - perform inode structure intialisation
125 * @sb: superblock inode belongs to
126 * @inode: inode to initialise
128 * These are initializations that need to be done on every inode
129 * allocation as the fields are not initialised by slab allocation.
131 int inode_init_always(struct super_block
*sb
, struct inode
*inode
)
133 static const struct inode_operations empty_iops
;
134 static const struct file_operations no_open_fops
= {.open
= no_open
};
135 struct address_space
*const mapping
= &inode
->i_data
;
138 inode
->i_blkbits
= sb
->s_blocksize_bits
;
140 atomic_set(&inode
->i_count
, 1);
141 inode
->i_op
= &empty_iops
;
142 inode
->i_fop
= &no_open_fops
;
143 inode
->__i_nlink
= 1;
144 inode
->i_opflags
= 0;
145 i_uid_write(inode
, 0);
146 i_gid_write(inode
, 0);
147 atomic_set(&inode
->i_writecount
, 0);
151 inode
->i_generation
= 0;
152 inode
->i_pipe
= NULL
;
153 inode
->i_bdev
= NULL
;
154 inode
->i_cdev
= NULL
;
155 inode
->i_link
= NULL
;
157 inode
->dirtied_when
= 0;
159 if (security_inode_alloc(inode
))
161 spin_lock_init(&inode
->i_lock
);
162 lockdep_set_class(&inode
->i_lock
, &sb
->s_type
->i_lock_key
);
164 mutex_init(&inode
->i_mutex
);
165 lockdep_set_class(&inode
->i_mutex
, &sb
->s_type
->i_mutex_key
);
167 atomic_set(&inode
->i_dio_count
, 0);
169 mapping
->a_ops
= &empty_aops
;
170 mapping
->host
= inode
;
172 atomic_set(&mapping
->i_mmap_writable
, 0);
173 mapping_set_gfp_mask(mapping
, GFP_HIGHUSER_MOVABLE
);
174 mapping
->private_data
= NULL
;
175 mapping
->writeback_index
= 0;
176 inode
->i_private
= NULL
;
177 inode
->i_mapping
= mapping
;
178 INIT_HLIST_HEAD(&inode
->i_dentry
); /* buggered by rcu freeing */
179 #ifdef CONFIG_FS_POSIX_ACL
180 inode
->i_acl
= inode
->i_default_acl
= ACL_NOT_CACHED
;
183 #ifdef CONFIG_FSNOTIFY
184 inode
->i_fsnotify_mask
= 0;
186 inode
->i_flctx
= NULL
;
187 this_cpu_inc(nr_inodes
);
193 EXPORT_SYMBOL(inode_init_always
);
195 static struct inode
*alloc_inode(struct super_block
*sb
)
199 if (sb
->s_op
->alloc_inode
)
200 inode
= sb
->s_op
->alloc_inode(sb
);
202 inode
= kmem_cache_alloc(inode_cachep
, GFP_KERNEL
);
207 if (unlikely(inode_init_always(sb
, inode
))) {
208 if (inode
->i_sb
->s_op
->destroy_inode
)
209 inode
->i_sb
->s_op
->destroy_inode(inode
);
211 kmem_cache_free(inode_cachep
, inode
);
218 void free_inode_nonrcu(struct inode
*inode
)
220 kmem_cache_free(inode_cachep
, inode
);
222 EXPORT_SYMBOL(free_inode_nonrcu
);
224 void __destroy_inode(struct inode
*inode
)
226 BUG_ON(inode_has_buffers(inode
));
227 inode_detach_wb(inode
);
228 security_inode_free(inode
);
229 fsnotify_inode_delete(inode
);
230 locks_free_lock_context(inode
->i_flctx
);
231 if (!inode
->i_nlink
) {
232 WARN_ON(atomic_long_read(&inode
->i_sb
->s_remove_count
) == 0);
233 atomic_long_dec(&inode
->i_sb
->s_remove_count
);
236 #ifdef CONFIG_FS_POSIX_ACL
237 if (inode
->i_acl
&& inode
->i_acl
!= ACL_NOT_CACHED
)
238 posix_acl_release(inode
->i_acl
);
239 if (inode
->i_default_acl
&& inode
->i_default_acl
!= ACL_NOT_CACHED
)
240 posix_acl_release(inode
->i_default_acl
);
242 this_cpu_dec(nr_inodes
);
244 EXPORT_SYMBOL(__destroy_inode
);
246 static void i_callback(struct rcu_head
*head
)
248 struct inode
*inode
= container_of(head
, struct inode
, i_rcu
);
249 kmem_cache_free(inode_cachep
, inode
);
252 static void destroy_inode(struct inode
*inode
)
254 BUG_ON(!list_empty(&inode
->i_lru
));
255 __destroy_inode(inode
);
256 if (inode
->i_sb
->s_op
->destroy_inode
)
257 inode
->i_sb
->s_op
->destroy_inode(inode
);
259 call_rcu(&inode
->i_rcu
, i_callback
);
263 * drop_nlink - directly drop an inode's link count
266 * This is a low-level filesystem helper to replace any
267 * direct filesystem manipulation of i_nlink. In cases
268 * where we are attempting to track writes to the
269 * filesystem, a decrement to zero means an imminent
270 * write when the file is truncated and actually unlinked
273 void drop_nlink(struct inode
*inode
)
275 WARN_ON(inode
->i_nlink
== 0);
278 atomic_long_inc(&inode
->i_sb
->s_remove_count
);
280 EXPORT_SYMBOL(drop_nlink
);
283 * clear_nlink - directly zero an inode's link count
286 * This is a low-level filesystem helper to replace any
287 * direct filesystem manipulation of i_nlink. See
288 * drop_nlink() for why we care about i_nlink hitting zero.
290 void clear_nlink(struct inode
*inode
)
292 if (inode
->i_nlink
) {
293 inode
->__i_nlink
= 0;
294 atomic_long_inc(&inode
->i_sb
->s_remove_count
);
297 EXPORT_SYMBOL(clear_nlink
);
300 * set_nlink - directly set an inode's link count
302 * @nlink: new nlink (should be non-zero)
304 * This is a low-level filesystem helper to replace any
305 * direct filesystem manipulation of i_nlink.
307 void set_nlink(struct inode
*inode
, unsigned int nlink
)
312 /* Yes, some filesystems do change nlink from zero to one */
313 if (inode
->i_nlink
== 0)
314 atomic_long_dec(&inode
->i_sb
->s_remove_count
);
316 inode
->__i_nlink
= nlink
;
319 EXPORT_SYMBOL(set_nlink
);
322 * inc_nlink - directly increment an inode's link count
325 * This is a low-level filesystem helper to replace any
326 * direct filesystem manipulation of i_nlink. Currently,
327 * it is only here for parity with dec_nlink().
329 void inc_nlink(struct inode
*inode
)
331 if (unlikely(inode
->i_nlink
== 0)) {
332 WARN_ON(!(inode
->i_state
& I_LINKABLE
));
333 atomic_long_dec(&inode
->i_sb
->s_remove_count
);
338 EXPORT_SYMBOL(inc_nlink
);
340 void address_space_init_once(struct address_space
*mapping
)
342 memset(mapping
, 0, sizeof(*mapping
));
343 INIT_RADIX_TREE(&mapping
->page_tree
, GFP_ATOMIC
);
344 spin_lock_init(&mapping
->tree_lock
);
345 init_rwsem(&mapping
->i_mmap_rwsem
);
346 INIT_LIST_HEAD(&mapping
->private_list
);
347 spin_lock_init(&mapping
->private_lock
);
348 mapping
->i_mmap
= RB_ROOT
;
350 EXPORT_SYMBOL(address_space_init_once
);
353 * These are initializations that only need to be done
354 * once, because the fields are idempotent across use
355 * of the inode, so let the slab aware of that.
357 void inode_init_once(struct inode
*inode
)
359 memset(inode
, 0, sizeof(*inode
));
360 INIT_HLIST_NODE(&inode
->i_hash
);
361 INIT_LIST_HEAD(&inode
->i_devices
);
362 INIT_LIST_HEAD(&inode
->i_wb_list
);
363 INIT_LIST_HEAD(&inode
->i_lru
);
364 address_space_init_once(&inode
->i_data
);
365 i_size_ordered_init(inode
);
366 #ifdef CONFIG_FSNOTIFY
367 INIT_HLIST_HEAD(&inode
->i_fsnotify_marks
);
370 EXPORT_SYMBOL(inode_init_once
);
372 static void init_once(void *foo
)
374 struct inode
*inode
= (struct inode
*) foo
;
376 inode_init_once(inode
);
380 * inode->i_lock must be held
382 void __iget(struct inode
*inode
)
384 atomic_inc(&inode
->i_count
);
388 * get additional reference to inode; caller must already hold one.
390 void ihold(struct inode
*inode
)
392 WARN_ON(atomic_inc_return(&inode
->i_count
) < 2);
394 EXPORT_SYMBOL(ihold
);
396 static void inode_lru_list_add(struct inode
*inode
)
398 if (list_lru_add(&inode
->i_sb
->s_inode_lru
, &inode
->i_lru
))
399 this_cpu_inc(nr_unused
);
403 * Add inode to LRU if needed (inode is unused and clean).
405 * Needs inode->i_lock held.
407 void inode_add_lru(struct inode
*inode
)
409 if (!(inode
->i_state
& (I_DIRTY_ALL
| I_SYNC
|
410 I_FREEING
| I_WILL_FREE
)) &&
411 !atomic_read(&inode
->i_count
) && inode
->i_sb
->s_flags
& MS_ACTIVE
)
412 inode_lru_list_add(inode
);
416 static void inode_lru_list_del(struct inode
*inode
)
419 if (list_lru_del(&inode
->i_sb
->s_inode_lru
, &inode
->i_lru
))
420 this_cpu_dec(nr_unused
);
424 * inode_sb_list_add - add inode to the superblock list of inodes
425 * @inode: inode to add
427 void inode_sb_list_add(struct inode
*inode
)
429 spin_lock(&inode_sb_list_lock
);
430 list_add(&inode
->i_sb_list
, &inode
->i_sb
->s_inodes
);
431 spin_unlock(&inode_sb_list_lock
);
433 EXPORT_SYMBOL_GPL(inode_sb_list_add
);
435 static inline void inode_sb_list_del(struct inode
*inode
)
437 if (!list_empty(&inode
->i_sb_list
)) {
438 spin_lock(&inode_sb_list_lock
);
439 list_del_init(&inode
->i_sb_list
);
440 spin_unlock(&inode_sb_list_lock
);
444 static unsigned long hash(struct super_block
*sb
, unsigned long hashval
)
448 tmp
= (hashval
* (unsigned long)sb
) ^ (GOLDEN_RATIO_PRIME
+ hashval
) /
450 tmp
= tmp
^ ((tmp
^ GOLDEN_RATIO_PRIME
) >> i_hash_shift
);
451 return tmp
& i_hash_mask
;
455 * __insert_inode_hash - hash an inode
456 * @inode: unhashed inode
457 * @hashval: unsigned long value used to locate this object in the
460 * Add an inode to the inode hash for this superblock.
462 void __insert_inode_hash(struct inode
*inode
, unsigned long hashval
)
464 struct hlist_head
*b
= inode_hashtable
+ hash(inode
->i_sb
, hashval
);
466 spin_lock(&inode_hash_lock
);
467 spin_lock(&inode
->i_lock
);
468 hlist_add_head(&inode
->i_hash
, b
);
469 spin_unlock(&inode
->i_lock
);
470 spin_unlock(&inode_hash_lock
);
472 EXPORT_SYMBOL(__insert_inode_hash
);
475 * __remove_inode_hash - remove an inode from the hash
476 * @inode: inode to unhash
478 * Remove an inode from the superblock.
480 void __remove_inode_hash(struct inode
*inode
)
482 spin_lock(&inode_hash_lock
);
483 spin_lock(&inode
->i_lock
);
484 hlist_del_init(&inode
->i_hash
);
485 spin_unlock(&inode
->i_lock
);
486 spin_unlock(&inode_hash_lock
);
488 EXPORT_SYMBOL(__remove_inode_hash
);
490 void clear_inode(struct inode
*inode
)
494 * We have to cycle tree_lock here because reclaim can be still in the
495 * process of removing the last page (in __delete_from_page_cache())
496 * and we must not free mapping under it.
498 spin_lock_irq(&inode
->i_data
.tree_lock
);
499 BUG_ON(inode
->i_data
.nrpages
);
500 BUG_ON(inode
->i_data
.nrshadows
);
501 spin_unlock_irq(&inode
->i_data
.tree_lock
);
502 BUG_ON(!list_empty(&inode
->i_data
.private_list
));
503 BUG_ON(!(inode
->i_state
& I_FREEING
));
504 BUG_ON(inode
->i_state
& I_CLEAR
);
505 /* don't need i_lock here, no concurrent mods to i_state */
506 inode
->i_state
= I_FREEING
| I_CLEAR
;
508 EXPORT_SYMBOL(clear_inode
);
511 * Free the inode passed in, removing it from the lists it is still connected
512 * to. We remove any pages still attached to the inode and wait for any IO that
513 * is still in progress before finally destroying the inode.
515 * An inode must already be marked I_FREEING so that we avoid the inode being
516 * moved back onto lists if we race with other code that manipulates the lists
517 * (e.g. writeback_single_inode). The caller is responsible for setting this.
519 * An inode must already be removed from the LRU list before being evicted from
520 * the cache. This should occur atomically with setting the I_FREEING state
521 * flag, so no inodes here should ever be on the LRU when being evicted.
523 static void evict(struct inode
*inode
)
525 const struct super_operations
*op
= inode
->i_sb
->s_op
;
527 BUG_ON(!(inode
->i_state
& I_FREEING
));
528 BUG_ON(!list_empty(&inode
->i_lru
));
530 if (!list_empty(&inode
->i_wb_list
))
531 inode_wb_list_del(inode
);
533 inode_sb_list_del(inode
);
536 * Wait for flusher thread to be done with the inode so that filesystem
537 * does not start destroying it while writeback is still running. Since
538 * the inode has I_FREEING set, flusher thread won't start new work on
539 * the inode. We just have to wait for running writeback to finish.
541 inode_wait_for_writeback(inode
);
543 if (op
->evict_inode
) {
544 op
->evict_inode(inode
);
546 truncate_inode_pages_final(&inode
->i_data
);
549 if (S_ISBLK(inode
->i_mode
) && inode
->i_bdev
)
551 if (S_ISCHR(inode
->i_mode
) && inode
->i_cdev
)
554 remove_inode_hash(inode
);
556 spin_lock(&inode
->i_lock
);
557 wake_up_bit(&inode
->i_state
, __I_NEW
);
558 BUG_ON(inode
->i_state
!= (I_FREEING
| I_CLEAR
));
559 spin_unlock(&inode
->i_lock
);
561 destroy_inode(inode
);
565 * dispose_list - dispose of the contents of a local list
566 * @head: the head of the list to free
568 * Dispose-list gets a local list with local inodes in it, so it doesn't
569 * need to worry about list corruption and SMP locks.
571 static void dispose_list(struct list_head
*head
)
573 while (!list_empty(head
)) {
576 inode
= list_first_entry(head
, struct inode
, i_lru
);
577 list_del_init(&inode
->i_lru
);
584 * evict_inodes - evict all evictable inodes for a superblock
585 * @sb: superblock to operate on
587 * Make sure that no inodes with zero refcount are retained. This is
588 * called by superblock shutdown after having MS_ACTIVE flag removed,
589 * so any inode reaching zero refcount during or after that call will
590 * be immediately evicted.
592 void evict_inodes(struct super_block
*sb
)
594 struct inode
*inode
, *next
;
597 spin_lock(&inode_sb_list_lock
);
598 list_for_each_entry_safe(inode
, next
, &sb
->s_inodes
, i_sb_list
) {
599 if (atomic_read(&inode
->i_count
))
602 spin_lock(&inode
->i_lock
);
603 if (inode
->i_state
& (I_NEW
| I_FREEING
| I_WILL_FREE
)) {
604 spin_unlock(&inode
->i_lock
);
608 inode
->i_state
|= I_FREEING
;
609 inode_lru_list_del(inode
);
610 spin_unlock(&inode
->i_lock
);
611 list_add(&inode
->i_lru
, &dispose
);
613 spin_unlock(&inode_sb_list_lock
);
615 dispose_list(&dispose
);
619 * invalidate_inodes - attempt to free all inodes on a superblock
620 * @sb: superblock to operate on
621 * @kill_dirty: flag to guide handling of dirty inodes
623 * Attempts to free all inodes for a given superblock. If there were any
624 * busy inodes return a non-zero value, else zero.
625 * If @kill_dirty is set, discard dirty inodes too, otherwise treat
628 int invalidate_inodes(struct super_block
*sb
, bool kill_dirty
)
631 struct inode
*inode
, *next
;
634 spin_lock(&inode_sb_list_lock
);
635 list_for_each_entry_safe(inode
, next
, &sb
->s_inodes
, i_sb_list
) {
636 spin_lock(&inode
->i_lock
);
637 if (inode
->i_state
& (I_NEW
| I_FREEING
| I_WILL_FREE
)) {
638 spin_unlock(&inode
->i_lock
);
641 if (inode
->i_state
& I_DIRTY_ALL
&& !kill_dirty
) {
642 spin_unlock(&inode
->i_lock
);
646 if (atomic_read(&inode
->i_count
)) {
647 spin_unlock(&inode
->i_lock
);
652 inode
->i_state
|= I_FREEING
;
653 inode_lru_list_del(inode
);
654 spin_unlock(&inode
->i_lock
);
655 list_add(&inode
->i_lru
, &dispose
);
657 spin_unlock(&inode_sb_list_lock
);
659 dispose_list(&dispose
);
665 * Isolate the inode from the LRU in preparation for freeing it.
667 * Any inodes which are pinned purely because of attached pagecache have their
668 * pagecache removed. If the inode has metadata buffers attached to
669 * mapping->private_list then try to remove them.
671 * If the inode has the I_REFERENCED flag set, then it means that it has been
672 * used recently - the flag is set in iput_final(). When we encounter such an
673 * inode, clear the flag and move it to the back of the LRU so it gets another
674 * pass through the LRU before it gets reclaimed. This is necessary because of
675 * the fact we are doing lazy LRU updates to minimise lock contention so the
676 * LRU does not have strict ordering. Hence we don't want to reclaim inodes
677 * with this flag set because they are the inodes that are out of order.
679 static enum lru_status
inode_lru_isolate(struct list_head
*item
,
680 struct list_lru_one
*lru
, spinlock_t
*lru_lock
, void *arg
)
682 struct list_head
*freeable
= arg
;
683 struct inode
*inode
= container_of(item
, struct inode
, i_lru
);
686 * we are inverting the lru lock/inode->i_lock here, so use a trylock.
687 * If we fail to get the lock, just skip it.
689 if (!spin_trylock(&inode
->i_lock
))
693 * Referenced or dirty inodes are still in use. Give them another pass
694 * through the LRU as we canot reclaim them now.
696 if (atomic_read(&inode
->i_count
) ||
697 (inode
->i_state
& ~I_REFERENCED
)) {
698 list_lru_isolate(lru
, &inode
->i_lru
);
699 spin_unlock(&inode
->i_lock
);
700 this_cpu_dec(nr_unused
);
704 /* recently referenced inodes get one more pass */
705 if (inode
->i_state
& I_REFERENCED
) {
706 inode
->i_state
&= ~I_REFERENCED
;
707 spin_unlock(&inode
->i_lock
);
711 if (inode_has_buffers(inode
) || inode
->i_data
.nrpages
) {
713 spin_unlock(&inode
->i_lock
);
714 spin_unlock(lru_lock
);
715 if (remove_inode_buffers(inode
)) {
717 reap
= invalidate_mapping_pages(&inode
->i_data
, 0, -1);
718 if (current_is_kswapd())
719 __count_vm_events(KSWAPD_INODESTEAL
, reap
);
721 __count_vm_events(PGINODESTEAL
, reap
);
722 if (current
->reclaim_state
)
723 current
->reclaim_state
->reclaimed_slab
+= reap
;
730 WARN_ON(inode
->i_state
& I_NEW
);
731 inode
->i_state
|= I_FREEING
;
732 list_lru_isolate_move(lru
, &inode
->i_lru
, freeable
);
733 spin_unlock(&inode
->i_lock
);
735 this_cpu_dec(nr_unused
);
740 * Walk the superblock inode LRU for freeable inodes and attempt to free them.
741 * This is called from the superblock shrinker function with a number of inodes
742 * to trim from the LRU. Inodes to be freed are moved to a temporary list and
743 * then are freed outside inode_lock by dispose_list().
745 long prune_icache_sb(struct super_block
*sb
, struct shrink_control
*sc
)
750 freed
= list_lru_shrink_walk(&sb
->s_inode_lru
, sc
,
751 inode_lru_isolate
, &freeable
);
752 dispose_list(&freeable
);
756 static void __wait_on_freeing_inode(struct inode
*inode
);
758 * Called with the inode lock held.
760 static struct inode
*find_inode(struct super_block
*sb
,
761 struct hlist_head
*head
,
762 int (*test
)(struct inode
*, void *),
765 struct inode
*inode
= NULL
;
768 hlist_for_each_entry(inode
, head
, i_hash
) {
769 if (inode
->i_sb
!= sb
)
771 if (!test(inode
, data
))
773 spin_lock(&inode
->i_lock
);
774 if (inode
->i_state
& (I_FREEING
|I_WILL_FREE
)) {
775 __wait_on_freeing_inode(inode
);
779 spin_unlock(&inode
->i_lock
);
786 * find_inode_fast is the fast path version of find_inode, see the comment at
787 * iget_locked for details.
789 static struct inode
*find_inode_fast(struct super_block
*sb
,
790 struct hlist_head
*head
, unsigned long ino
)
792 struct inode
*inode
= NULL
;
795 hlist_for_each_entry(inode
, head
, i_hash
) {
796 if (inode
->i_ino
!= ino
)
798 if (inode
->i_sb
!= sb
)
800 spin_lock(&inode
->i_lock
);
801 if (inode
->i_state
& (I_FREEING
|I_WILL_FREE
)) {
802 __wait_on_freeing_inode(inode
);
806 spin_unlock(&inode
->i_lock
);
813 * Each cpu owns a range of LAST_INO_BATCH numbers.
814 * 'shared_last_ino' is dirtied only once out of LAST_INO_BATCH allocations,
815 * to renew the exhausted range.
817 * This does not significantly increase overflow rate because every CPU can
818 * consume at most LAST_INO_BATCH-1 unused inode numbers. So there is
819 * NR_CPUS*(LAST_INO_BATCH-1) wastage. At 4096 and 1024, this is ~0.1% of the
820 * 2^32 range, and is a worst-case. Even a 50% wastage would only increase
821 * overflow rate by 2x, which does not seem too significant.
823 * On a 32bit, non LFS stat() call, glibc will generate an EOVERFLOW
824 * error if st_ino won't fit in target struct field. Use 32bit counter
825 * here to attempt to avoid that.
827 #define LAST_INO_BATCH 1024
828 static DEFINE_PER_CPU(unsigned int, last_ino
);
830 unsigned int get_next_ino(void)
832 unsigned int *p
= &get_cpu_var(last_ino
);
833 unsigned int res
= *p
;
836 if (unlikely((res
& (LAST_INO_BATCH
-1)) == 0)) {
837 static atomic_t shared_last_ino
;
838 int next
= atomic_add_return(LAST_INO_BATCH
, &shared_last_ino
);
840 res
= next
- LAST_INO_BATCH
;
845 /* get_next_ino should not provide a 0 inode number */
849 put_cpu_var(last_ino
);
852 EXPORT_SYMBOL(get_next_ino
);
855 * new_inode_pseudo - obtain an inode
858 * Allocates a new inode for given superblock.
859 * Inode wont be chained in superblock s_inodes list
861 * - fs can't be unmount
862 * - quotas, fsnotify, writeback can't work
864 struct inode
*new_inode_pseudo(struct super_block
*sb
)
866 struct inode
*inode
= alloc_inode(sb
);
869 spin_lock(&inode
->i_lock
);
871 spin_unlock(&inode
->i_lock
);
872 INIT_LIST_HEAD(&inode
->i_sb_list
);
878 * new_inode - obtain an inode
881 * Allocates a new inode for given superblock. The default gfp_mask
882 * for allocations related to inode->i_mapping is GFP_HIGHUSER_MOVABLE.
883 * If HIGHMEM pages are unsuitable or it is known that pages allocated
884 * for the page cache are not reclaimable or migratable,
885 * mapping_set_gfp_mask() must be called with suitable flags on the
886 * newly created inode's mapping
889 struct inode
*new_inode(struct super_block
*sb
)
893 spin_lock_prefetch(&inode_sb_list_lock
);
895 inode
= new_inode_pseudo(sb
);
897 inode_sb_list_add(inode
);
900 EXPORT_SYMBOL(new_inode
);
902 #ifdef CONFIG_DEBUG_LOCK_ALLOC
903 void lockdep_annotate_inode_mutex_key(struct inode
*inode
)
905 if (S_ISDIR(inode
->i_mode
)) {
906 struct file_system_type
*type
= inode
->i_sb
->s_type
;
908 /* Set new key only if filesystem hasn't already changed it */
909 if (lockdep_match_class(&inode
->i_mutex
, &type
->i_mutex_key
)) {
911 * ensure nobody is actually holding i_mutex
913 mutex_destroy(&inode
->i_mutex
);
914 mutex_init(&inode
->i_mutex
);
915 lockdep_set_class(&inode
->i_mutex
,
916 &type
->i_mutex_dir_key
);
920 EXPORT_SYMBOL(lockdep_annotate_inode_mutex_key
);
924 * unlock_new_inode - clear the I_NEW state and wake up any waiters
925 * @inode: new inode to unlock
927 * Called when the inode is fully initialised to clear the new state of the
928 * inode and wake up anyone waiting for the inode to finish initialisation.
930 void unlock_new_inode(struct inode
*inode
)
932 lockdep_annotate_inode_mutex_key(inode
);
933 spin_lock(&inode
->i_lock
);
934 WARN_ON(!(inode
->i_state
& I_NEW
));
935 inode
->i_state
&= ~I_NEW
;
937 wake_up_bit(&inode
->i_state
, __I_NEW
);
938 spin_unlock(&inode
->i_lock
);
940 EXPORT_SYMBOL(unlock_new_inode
);
943 * lock_two_nondirectories - take two i_mutexes on non-directory objects
945 * Lock any non-NULL argument that is not a directory.
946 * Zero, one or two objects may be locked by this function.
948 * @inode1: first inode to lock
949 * @inode2: second inode to lock
951 void lock_two_nondirectories(struct inode
*inode1
, struct inode
*inode2
)
954 swap(inode1
, inode2
);
956 if (inode1
&& !S_ISDIR(inode1
->i_mode
))
957 mutex_lock(&inode1
->i_mutex
);
958 if (inode2
&& !S_ISDIR(inode2
->i_mode
) && inode2
!= inode1
)
959 mutex_lock_nested(&inode2
->i_mutex
, I_MUTEX_NONDIR2
);
961 EXPORT_SYMBOL(lock_two_nondirectories
);
964 * unlock_two_nondirectories - release locks from lock_two_nondirectories()
965 * @inode1: first inode to unlock
966 * @inode2: second inode to unlock
968 void unlock_two_nondirectories(struct inode
*inode1
, struct inode
*inode2
)
970 if (inode1
&& !S_ISDIR(inode1
->i_mode
))
971 mutex_unlock(&inode1
->i_mutex
);
972 if (inode2
&& !S_ISDIR(inode2
->i_mode
) && inode2
!= inode1
)
973 mutex_unlock(&inode2
->i_mutex
);
975 EXPORT_SYMBOL(unlock_two_nondirectories
);
978 * iget5_locked - obtain an inode from a mounted file system
979 * @sb: super block of file system
980 * @hashval: hash value (usually inode number) to get
981 * @test: callback used for comparisons between inodes
982 * @set: callback used to initialize a new struct inode
983 * @data: opaque data pointer to pass to @test and @set
985 * Search for the inode specified by @hashval and @data in the inode cache,
986 * and if present it is return it with an increased reference count. This is
987 * a generalized version of iget_locked() for file systems where the inode
988 * number is not sufficient for unique identification of an inode.
990 * If the inode is not in cache, allocate a new inode and return it locked,
991 * hashed, and with the I_NEW flag set. The file system gets to fill it in
992 * before unlocking it via unlock_new_inode().
994 * Note both @test and @set are called with the inode_hash_lock held, so can't
997 struct inode
*iget5_locked(struct super_block
*sb
, unsigned long hashval
,
998 int (*test
)(struct inode
*, void *),
999 int (*set
)(struct inode
*, void *), void *data
)
1001 struct hlist_head
*head
= inode_hashtable
+ hash(sb
, hashval
);
1002 struct inode
*inode
;
1004 spin_lock(&inode_hash_lock
);
1005 inode
= find_inode(sb
, head
, test
, data
);
1006 spin_unlock(&inode_hash_lock
);
1009 wait_on_inode(inode
);
1013 inode
= alloc_inode(sb
);
1017 spin_lock(&inode_hash_lock
);
1018 /* We released the lock, so.. */
1019 old
= find_inode(sb
, head
, test
, data
);
1021 if (set(inode
, data
))
1024 spin_lock(&inode
->i_lock
);
1025 inode
->i_state
= I_NEW
;
1026 hlist_add_head(&inode
->i_hash
, head
);
1027 spin_unlock(&inode
->i_lock
);
1028 inode_sb_list_add(inode
);
1029 spin_unlock(&inode_hash_lock
);
1031 /* Return the locked inode with I_NEW set, the
1032 * caller is responsible for filling in the contents
1038 * Uhhuh, somebody else created the same inode under
1039 * us. Use the old inode instead of the one we just
1042 spin_unlock(&inode_hash_lock
);
1043 destroy_inode(inode
);
1045 wait_on_inode(inode
);
1050 spin_unlock(&inode_hash_lock
);
1051 destroy_inode(inode
);
1054 EXPORT_SYMBOL(iget5_locked
);
1057 * iget_locked - obtain an inode from a mounted file system
1058 * @sb: super block of file system
1059 * @ino: inode number to get
1061 * Search for the inode specified by @ino in the inode cache and if present
1062 * return it with an increased reference count. This is for file systems
1063 * where the inode number is sufficient for unique identification of an inode.
1065 * If the inode is not in cache, allocate a new inode and return it locked,
1066 * hashed, and with the I_NEW flag set. The file system gets to fill it in
1067 * before unlocking it via unlock_new_inode().
1069 struct inode
*iget_locked(struct super_block
*sb
, unsigned long ino
)
1071 struct hlist_head
*head
= inode_hashtable
+ hash(sb
, ino
);
1072 struct inode
*inode
;
1074 spin_lock(&inode_hash_lock
);
1075 inode
= find_inode_fast(sb
, head
, ino
);
1076 spin_unlock(&inode_hash_lock
);
1078 wait_on_inode(inode
);
1082 inode
= alloc_inode(sb
);
1086 spin_lock(&inode_hash_lock
);
1087 /* We released the lock, so.. */
1088 old
= find_inode_fast(sb
, head
, ino
);
1091 spin_lock(&inode
->i_lock
);
1092 inode
->i_state
= I_NEW
;
1093 hlist_add_head(&inode
->i_hash
, head
);
1094 spin_unlock(&inode
->i_lock
);
1095 inode_sb_list_add(inode
);
1096 spin_unlock(&inode_hash_lock
);
1098 /* Return the locked inode with I_NEW set, the
1099 * caller is responsible for filling in the contents
1105 * Uhhuh, somebody else created the same inode under
1106 * us. Use the old inode instead of the one we just
1109 spin_unlock(&inode_hash_lock
);
1110 destroy_inode(inode
);
1112 wait_on_inode(inode
);
1116 EXPORT_SYMBOL(iget_locked
);
1119 * search the inode cache for a matching inode number.
1120 * If we find one, then the inode number we are trying to
1121 * allocate is not unique and so we should not use it.
1123 * Returns 1 if the inode number is unique, 0 if it is not.
1125 static int test_inode_iunique(struct super_block
*sb
, unsigned long ino
)
1127 struct hlist_head
*b
= inode_hashtable
+ hash(sb
, ino
);
1128 struct inode
*inode
;
1130 spin_lock(&inode_hash_lock
);
1131 hlist_for_each_entry(inode
, b
, i_hash
) {
1132 if (inode
->i_ino
== ino
&& inode
->i_sb
== sb
) {
1133 spin_unlock(&inode_hash_lock
);
1137 spin_unlock(&inode_hash_lock
);
1143 * iunique - get a unique inode number
1145 * @max_reserved: highest reserved inode number
1147 * Obtain an inode number that is unique on the system for a given
1148 * superblock. This is used by file systems that have no natural
1149 * permanent inode numbering system. An inode number is returned that
1150 * is higher than the reserved limit but unique.
1153 * With a large number of inodes live on the file system this function
1154 * currently becomes quite slow.
1156 ino_t
iunique(struct super_block
*sb
, ino_t max_reserved
)
1159 * On a 32bit, non LFS stat() call, glibc will generate an EOVERFLOW
1160 * error if st_ino won't fit in target struct field. Use 32bit counter
1161 * here to attempt to avoid that.
1163 static DEFINE_SPINLOCK(iunique_lock
);
1164 static unsigned int counter
;
1167 spin_lock(&iunique_lock
);
1169 if (counter
<= max_reserved
)
1170 counter
= max_reserved
+ 1;
1172 } while (!test_inode_iunique(sb
, res
));
1173 spin_unlock(&iunique_lock
);
1177 EXPORT_SYMBOL(iunique
);
1179 struct inode
*igrab(struct inode
*inode
)
1181 spin_lock(&inode
->i_lock
);
1182 if (!(inode
->i_state
& (I_FREEING
|I_WILL_FREE
))) {
1184 spin_unlock(&inode
->i_lock
);
1186 spin_unlock(&inode
->i_lock
);
1188 * Handle the case where s_op->clear_inode is not been
1189 * called yet, and somebody is calling igrab
1190 * while the inode is getting freed.
1196 EXPORT_SYMBOL(igrab
);
1199 * ilookup5_nowait - search for an inode in the inode cache
1200 * @sb: super block of file system to search
1201 * @hashval: hash value (usually inode number) to search for
1202 * @test: callback used for comparisons between inodes
1203 * @data: opaque data pointer to pass to @test
1205 * Search for the inode specified by @hashval and @data in the inode cache.
1206 * If the inode is in the cache, the inode is returned with an incremented
1209 * Note: I_NEW is not waited upon so you have to be very careful what you do
1210 * with the returned inode. You probably should be using ilookup5() instead.
1212 * Note2: @test is called with the inode_hash_lock held, so can't sleep.
1214 struct inode
*ilookup5_nowait(struct super_block
*sb
, unsigned long hashval
,
1215 int (*test
)(struct inode
*, void *), void *data
)
1217 struct hlist_head
*head
= inode_hashtable
+ hash(sb
, hashval
);
1218 struct inode
*inode
;
1220 spin_lock(&inode_hash_lock
);
1221 inode
= find_inode(sb
, head
, test
, data
);
1222 spin_unlock(&inode_hash_lock
);
1226 EXPORT_SYMBOL(ilookup5_nowait
);
1229 * ilookup5 - search for an inode in the inode cache
1230 * @sb: super block of file system to search
1231 * @hashval: hash value (usually inode number) to search for
1232 * @test: callback used for comparisons between inodes
1233 * @data: opaque data pointer to pass to @test
1235 * Search for the inode specified by @hashval and @data in the inode cache,
1236 * and if the inode is in the cache, return the inode with an incremented
1237 * reference count. Waits on I_NEW before returning the inode.
1238 * returned with an incremented reference count.
1240 * This is a generalized version of ilookup() for file systems where the
1241 * inode number is not sufficient for unique identification of an inode.
1243 * Note: @test is called with the inode_hash_lock held, so can't sleep.
1245 struct inode
*ilookup5(struct super_block
*sb
, unsigned long hashval
,
1246 int (*test
)(struct inode
*, void *), void *data
)
1248 struct inode
*inode
= ilookup5_nowait(sb
, hashval
, test
, data
);
1251 wait_on_inode(inode
);
1254 EXPORT_SYMBOL(ilookup5
);
1257 * ilookup - search for an inode in the inode cache
1258 * @sb: super block of file system to search
1259 * @ino: inode number to search for
1261 * Search for the inode @ino in the inode cache, and if the inode is in the
1262 * cache, the inode is returned with an incremented reference count.
1264 struct inode
*ilookup(struct super_block
*sb
, unsigned long ino
)
1266 struct hlist_head
*head
= inode_hashtable
+ hash(sb
, ino
);
1267 struct inode
*inode
;
1269 spin_lock(&inode_hash_lock
);
1270 inode
= find_inode_fast(sb
, head
, ino
);
1271 spin_unlock(&inode_hash_lock
);
1274 wait_on_inode(inode
);
1277 EXPORT_SYMBOL(ilookup
);
1280 * find_inode_nowait - find an inode in the inode cache
1281 * @sb: super block of file system to search
1282 * @hashval: hash value (usually inode number) to search for
1283 * @match: callback used for comparisons between inodes
1284 * @data: opaque data pointer to pass to @match
1286 * Search for the inode specified by @hashval and @data in the inode
1287 * cache, where the helper function @match will return 0 if the inode
1288 * does not match, 1 if the inode does match, and -1 if the search
1289 * should be stopped. The @match function must be responsible for
1290 * taking the i_lock spin_lock and checking i_state for an inode being
1291 * freed or being initialized, and incrementing the reference count
1292 * before returning 1. It also must not sleep, since it is called with
1293 * the inode_hash_lock spinlock held.
1295 * This is a even more generalized version of ilookup5() when the
1296 * function must never block --- find_inode() can block in
1297 * __wait_on_freeing_inode() --- or when the caller can not increment
1298 * the reference count because the resulting iput() might cause an
1299 * inode eviction. The tradeoff is that the @match funtion must be
1300 * very carefully implemented.
1302 struct inode
*find_inode_nowait(struct super_block
*sb
,
1303 unsigned long hashval
,
1304 int (*match
)(struct inode
*, unsigned long,
1308 struct hlist_head
*head
= inode_hashtable
+ hash(sb
, hashval
);
1309 struct inode
*inode
, *ret_inode
= NULL
;
1312 spin_lock(&inode_hash_lock
);
1313 hlist_for_each_entry(inode
, head
, i_hash
) {
1314 if (inode
->i_sb
!= sb
)
1316 mval
= match(inode
, hashval
, data
);
1324 spin_unlock(&inode_hash_lock
);
1327 EXPORT_SYMBOL(find_inode_nowait
);
1329 int insert_inode_locked(struct inode
*inode
)
1331 struct super_block
*sb
= inode
->i_sb
;
1332 ino_t ino
= inode
->i_ino
;
1333 struct hlist_head
*head
= inode_hashtable
+ hash(sb
, ino
);
1336 struct inode
*old
= NULL
;
1337 spin_lock(&inode_hash_lock
);
1338 hlist_for_each_entry(old
, head
, i_hash
) {
1339 if (old
->i_ino
!= ino
)
1341 if (old
->i_sb
!= sb
)
1343 spin_lock(&old
->i_lock
);
1344 if (old
->i_state
& (I_FREEING
|I_WILL_FREE
)) {
1345 spin_unlock(&old
->i_lock
);
1351 spin_lock(&inode
->i_lock
);
1352 inode
->i_state
|= I_NEW
;
1353 hlist_add_head(&inode
->i_hash
, head
);
1354 spin_unlock(&inode
->i_lock
);
1355 spin_unlock(&inode_hash_lock
);
1359 spin_unlock(&old
->i_lock
);
1360 spin_unlock(&inode_hash_lock
);
1362 if (unlikely(!inode_unhashed(old
))) {
1369 EXPORT_SYMBOL(insert_inode_locked
);
1371 int insert_inode_locked4(struct inode
*inode
, unsigned long hashval
,
1372 int (*test
)(struct inode
*, void *), void *data
)
1374 struct super_block
*sb
= inode
->i_sb
;
1375 struct hlist_head
*head
= inode_hashtable
+ hash(sb
, hashval
);
1378 struct inode
*old
= NULL
;
1380 spin_lock(&inode_hash_lock
);
1381 hlist_for_each_entry(old
, head
, i_hash
) {
1382 if (old
->i_sb
!= sb
)
1384 if (!test(old
, data
))
1386 spin_lock(&old
->i_lock
);
1387 if (old
->i_state
& (I_FREEING
|I_WILL_FREE
)) {
1388 spin_unlock(&old
->i_lock
);
1394 spin_lock(&inode
->i_lock
);
1395 inode
->i_state
|= I_NEW
;
1396 hlist_add_head(&inode
->i_hash
, head
);
1397 spin_unlock(&inode
->i_lock
);
1398 spin_unlock(&inode_hash_lock
);
1402 spin_unlock(&old
->i_lock
);
1403 spin_unlock(&inode_hash_lock
);
1405 if (unlikely(!inode_unhashed(old
))) {
1412 EXPORT_SYMBOL(insert_inode_locked4
);
1415 int generic_delete_inode(struct inode
*inode
)
1419 EXPORT_SYMBOL(generic_delete_inode
);
1422 * Called when we're dropping the last reference
1425 * Call the FS "drop_inode()" function, defaulting to
1426 * the legacy UNIX filesystem behaviour. If it tells
1427 * us to evict inode, do so. Otherwise, retain inode
1428 * in cache if fs is alive, sync and evict if fs is
1431 static void iput_final(struct inode
*inode
)
1433 struct super_block
*sb
= inode
->i_sb
;
1434 const struct super_operations
*op
= inode
->i_sb
->s_op
;
1437 WARN_ON(inode
->i_state
& I_NEW
);
1440 drop
= op
->drop_inode(inode
);
1442 drop
= generic_drop_inode(inode
);
1444 if (!drop
&& (sb
->s_flags
& MS_ACTIVE
)) {
1445 inode
->i_state
|= I_REFERENCED
;
1446 inode_add_lru(inode
);
1447 spin_unlock(&inode
->i_lock
);
1452 inode
->i_state
|= I_WILL_FREE
;
1453 spin_unlock(&inode
->i_lock
);
1454 write_inode_now(inode
, 1);
1455 spin_lock(&inode
->i_lock
);
1456 WARN_ON(inode
->i_state
& I_NEW
);
1457 inode
->i_state
&= ~I_WILL_FREE
;
1460 inode
->i_state
|= I_FREEING
;
1461 if (!list_empty(&inode
->i_lru
))
1462 inode_lru_list_del(inode
);
1463 spin_unlock(&inode
->i_lock
);
1469 * iput - put an inode
1470 * @inode: inode to put
1472 * Puts an inode, dropping its usage count. If the inode use count hits
1473 * zero, the inode is then freed and may also be destroyed.
1475 * Consequently, iput() can sleep.
1477 void iput(struct inode
*inode
)
1481 BUG_ON(inode
->i_state
& I_CLEAR
);
1483 if (atomic_dec_and_lock(&inode
->i_count
, &inode
->i_lock
)) {
1484 if (inode
->i_nlink
&& (inode
->i_state
& I_DIRTY_TIME
)) {
1485 atomic_inc(&inode
->i_count
);
1486 inode
->i_state
&= ~I_DIRTY_TIME
;
1487 spin_unlock(&inode
->i_lock
);
1488 trace_writeback_lazytime_iput(inode
);
1489 mark_inode_dirty_sync(inode
);
1495 EXPORT_SYMBOL(iput
);
1498 * bmap - find a block number in a file
1499 * @inode: inode of file
1500 * @block: block to find
1502 * Returns the block number on the device holding the inode that
1503 * is the disk block number for the block of the file requested.
1504 * That is, asked for block 4 of inode 1 the function will return the
1505 * disk block relative to the disk start that holds that block of the
1508 sector_t
bmap(struct inode
*inode
, sector_t block
)
1511 if (inode
->i_mapping
->a_ops
->bmap
)
1512 res
= inode
->i_mapping
->a_ops
->bmap(inode
->i_mapping
, block
);
1515 EXPORT_SYMBOL(bmap
);
1518 * With relative atime, only update atime if the previous atime is
1519 * earlier than either the ctime or mtime or if at least a day has
1520 * passed since the last atime update.
1522 static int relatime_need_update(struct vfsmount
*mnt
, struct inode
*inode
,
1523 struct timespec now
)
1526 if (!(mnt
->mnt_flags
& MNT_RELATIME
))
1529 * Is mtime younger than atime? If yes, update atime:
1531 if (timespec_compare(&inode
->i_mtime
, &inode
->i_atime
) >= 0)
1534 * Is ctime younger than atime? If yes, update atime:
1536 if (timespec_compare(&inode
->i_ctime
, &inode
->i_atime
) >= 0)
1540 * Is the previous atime value older than a day? If yes,
1543 if ((long)(now
.tv_sec
- inode
->i_atime
.tv_sec
) >= 24*60*60)
1546 * Good, we can skip the atime update:
1551 int generic_update_time(struct inode
*inode
, struct timespec
*time
, int flags
)
1553 int iflags
= I_DIRTY_TIME
;
1555 if (flags
& S_ATIME
)
1556 inode
->i_atime
= *time
;
1557 if (flags
& S_VERSION
)
1558 inode_inc_iversion(inode
);
1559 if (flags
& S_CTIME
)
1560 inode
->i_ctime
= *time
;
1561 if (flags
& S_MTIME
)
1562 inode
->i_mtime
= *time
;
1564 if (!(inode
->i_sb
->s_flags
& MS_LAZYTIME
) || (flags
& S_VERSION
))
1565 iflags
|= I_DIRTY_SYNC
;
1566 __mark_inode_dirty(inode
, iflags
);
1569 EXPORT_SYMBOL(generic_update_time
);
1572 * This does the actual work of updating an inodes time or version. Must have
1573 * had called mnt_want_write() before calling this.
1575 static int update_time(struct inode
*inode
, struct timespec
*time
, int flags
)
1577 int (*update_time
)(struct inode
*, struct timespec
*, int);
1579 update_time
= inode
->i_op
->update_time
? inode
->i_op
->update_time
:
1580 generic_update_time
;
1582 return update_time(inode
, time
, flags
);
1586 * touch_atime - update the access time
1587 * @path: the &struct path to update
1589 * Update the accessed time on an inode and mark it for writeback.
1590 * This function automatically handles read only file systems and media,
1591 * as well as the "noatime" flag and inode specific "noatime" markers.
1593 bool atime_needs_update(const struct path
*path
, struct inode
*inode
)
1595 struct vfsmount
*mnt
= path
->mnt
;
1596 struct timespec now
;
1598 if (inode
->i_flags
& S_NOATIME
)
1600 if (IS_NOATIME(inode
))
1602 if ((inode
->i_sb
->s_flags
& MS_NODIRATIME
) && S_ISDIR(inode
->i_mode
))
1605 if (mnt
->mnt_flags
& MNT_NOATIME
)
1607 if ((mnt
->mnt_flags
& MNT_NODIRATIME
) && S_ISDIR(inode
->i_mode
))
1610 now
= current_fs_time(inode
->i_sb
);
1612 if (!relatime_need_update(mnt
, inode
, now
))
1615 if (timespec_equal(&inode
->i_atime
, &now
))
1621 void touch_atime(const struct path
*path
)
1623 struct vfsmount
*mnt
= path
->mnt
;
1624 struct inode
*inode
= d_inode(path
->dentry
);
1625 struct timespec now
;
1627 if (!atime_needs_update(path
, inode
))
1630 if (!sb_start_write_trylock(inode
->i_sb
))
1633 if (__mnt_want_write(mnt
) != 0)
1636 * File systems can error out when updating inodes if they need to
1637 * allocate new space to modify an inode (such is the case for
1638 * Btrfs), but since we touch atime while walking down the path we
1639 * really don't care if we failed to update the atime of the file,
1640 * so just ignore the return value.
1641 * We may also fail on filesystems that have the ability to make parts
1642 * of the fs read only, e.g. subvolumes in Btrfs.
1644 now
= current_fs_time(inode
->i_sb
);
1645 update_time(inode
, &now
, S_ATIME
);
1646 __mnt_drop_write(mnt
);
1648 sb_end_write(inode
->i_sb
);
1650 EXPORT_SYMBOL(touch_atime
);
1653 * The logic we want is
1655 * if suid or (sgid and xgrp)
1658 int should_remove_suid(struct dentry
*dentry
)
1660 umode_t mode
= d_inode(dentry
)->i_mode
;
1663 /* suid always must be killed */
1664 if (unlikely(mode
& S_ISUID
))
1665 kill
= ATTR_KILL_SUID
;
1668 * sgid without any exec bits is just a mandatory locking mark; leave
1669 * it alone. If some exec bits are set, it's a real sgid; kill it.
1671 if (unlikely((mode
& S_ISGID
) && (mode
& S_IXGRP
)))
1672 kill
|= ATTR_KILL_SGID
;
1674 if (unlikely(kill
&& !capable(CAP_FSETID
) && S_ISREG(mode
)))
1679 EXPORT_SYMBOL(should_remove_suid
);
1682 * Return mask of changes for notify_change() that need to be done as a
1683 * response to write or truncate. Return 0 if nothing has to be changed.
1684 * Negative value on error (change should be denied).
1686 int dentry_needs_remove_privs(struct dentry
*dentry
)
1688 struct inode
*inode
= d_inode(dentry
);
1692 if (IS_NOSEC(inode
))
1695 mask
= should_remove_suid(dentry
);
1696 ret
= security_inode_need_killpriv(dentry
);
1700 mask
|= ATTR_KILL_PRIV
;
1703 EXPORT_SYMBOL(dentry_needs_remove_privs
);
1705 static int __remove_privs(struct dentry
*dentry
, int kill
)
1707 struct iattr newattrs
;
1709 newattrs
.ia_valid
= ATTR_FORCE
| kill
;
1711 * Note we call this on write, so notify_change will not
1712 * encounter any conflicting delegations:
1714 return notify_change(dentry
, &newattrs
, NULL
);
1718 * Remove special file priviledges (suid, capabilities) when file is written
1721 int file_remove_privs(struct file
*file
)
1723 struct dentry
*dentry
= file
->f_path
.dentry
;
1724 struct inode
*inode
= d_inode(dentry
);
1728 /* Fast path for nothing security related */
1729 if (IS_NOSEC(inode
))
1732 kill
= file_needs_remove_privs(file
);
1736 error
= __remove_privs(dentry
, kill
);
1738 inode_has_no_xattr(inode
);
1742 EXPORT_SYMBOL(file_remove_privs
);
1745 * file_update_time - update mtime and ctime time
1746 * @file: file accessed
1748 * Update the mtime and ctime members of an inode and mark the inode
1749 * for writeback. Note that this function is meant exclusively for
1750 * usage in the file write path of filesystems, and filesystems may
1751 * choose to explicitly ignore update via this function with the
1752 * S_NOCMTIME inode flag, e.g. for network filesystem where these
1753 * timestamps are handled by the server. This can return an error for
1754 * file systems who need to allocate space in order to update an inode.
1757 int file_update_time(struct file
*file
)
1759 struct inode
*inode
= file_inode(file
);
1760 struct timespec now
;
1764 /* First try to exhaust all avenues to not sync */
1765 if (IS_NOCMTIME(inode
))
1768 now
= current_fs_time(inode
->i_sb
);
1769 if (!timespec_equal(&inode
->i_mtime
, &now
))
1772 if (!timespec_equal(&inode
->i_ctime
, &now
))
1775 if (IS_I_VERSION(inode
))
1776 sync_it
|= S_VERSION
;
1781 /* Finally allowed to write? Takes lock. */
1782 if (__mnt_want_write_file(file
))
1785 ret
= update_time(inode
, &now
, sync_it
);
1786 __mnt_drop_write_file(file
);
1790 EXPORT_SYMBOL(file_update_time
);
1792 int inode_needs_sync(struct inode
*inode
)
1796 if (S_ISDIR(inode
->i_mode
) && IS_DIRSYNC(inode
))
1800 EXPORT_SYMBOL(inode_needs_sync
);
1803 * If we try to find an inode in the inode hash while it is being
1804 * deleted, we have to wait until the filesystem completes its
1805 * deletion before reporting that it isn't found. This function waits
1806 * until the deletion _might_ have completed. Callers are responsible
1807 * to recheck inode state.
1809 * It doesn't matter if I_NEW is not set initially, a call to
1810 * wake_up_bit(&inode->i_state, __I_NEW) after removing from the hash list
1813 static void __wait_on_freeing_inode(struct inode
*inode
)
1815 wait_queue_head_t
*wq
;
1816 DEFINE_WAIT_BIT(wait
, &inode
->i_state
, __I_NEW
);
1817 wq
= bit_waitqueue(&inode
->i_state
, __I_NEW
);
1818 prepare_to_wait(wq
, &wait
.wait
, TASK_UNINTERRUPTIBLE
);
1819 spin_unlock(&inode
->i_lock
);
1820 spin_unlock(&inode_hash_lock
);
1822 finish_wait(wq
, &wait
.wait
);
1823 spin_lock(&inode_hash_lock
);
1826 static __initdata
unsigned long ihash_entries
;
1827 static int __init
set_ihash_entries(char *str
)
1831 ihash_entries
= simple_strtoul(str
, &str
, 0);
1834 __setup("ihash_entries=", set_ihash_entries
);
1837 * Initialize the waitqueues and inode hash table.
1839 void __init
inode_init_early(void)
1843 /* If hashes are distributed across NUMA nodes, defer
1844 * hash allocation until vmalloc space is available.
1850 alloc_large_system_hash("Inode-cache",
1851 sizeof(struct hlist_head
),
1860 for (loop
= 0; loop
< (1U << i_hash_shift
); loop
++)
1861 INIT_HLIST_HEAD(&inode_hashtable
[loop
]);
1864 void __init
inode_init(void)
1868 /* inode slab cache */
1869 inode_cachep
= kmem_cache_create("inode_cache",
1870 sizeof(struct inode
),
1872 (SLAB_RECLAIM_ACCOUNT
|SLAB_PANIC
|
1876 /* Hash may have been set up in inode_init_early */
1881 alloc_large_system_hash("Inode-cache",
1882 sizeof(struct hlist_head
),
1891 for (loop
= 0; loop
< (1U << i_hash_shift
); loop
++)
1892 INIT_HLIST_HEAD(&inode_hashtable
[loop
]);
1895 void init_special_inode(struct inode
*inode
, umode_t mode
, dev_t rdev
)
1897 inode
->i_mode
= mode
;
1898 if (S_ISCHR(mode
)) {
1899 inode
->i_fop
= &def_chr_fops
;
1900 inode
->i_rdev
= rdev
;
1901 } else if (S_ISBLK(mode
)) {
1902 inode
->i_fop
= &def_blk_fops
;
1903 inode
->i_rdev
= rdev
;
1904 } else if (S_ISFIFO(mode
))
1905 inode
->i_fop
= &pipefifo_fops
;
1906 else if (S_ISSOCK(mode
))
1907 ; /* leave it no_open_fops */
1909 printk(KERN_DEBUG
"init_special_inode: bogus i_mode (%o) for"
1910 " inode %s:%lu\n", mode
, inode
->i_sb
->s_id
,
1913 EXPORT_SYMBOL(init_special_inode
);
1916 * inode_init_owner - Init uid,gid,mode for new inode according to posix standards
1918 * @dir: Directory inode
1919 * @mode: mode of the new inode
1921 void inode_init_owner(struct inode
*inode
, const struct inode
*dir
,
1924 inode
->i_uid
= current_fsuid();
1925 if (dir
&& dir
->i_mode
& S_ISGID
) {
1926 inode
->i_gid
= dir
->i_gid
;
1930 inode
->i_gid
= current_fsgid();
1931 inode
->i_mode
= mode
;
1933 EXPORT_SYMBOL(inode_init_owner
);
1936 * inode_owner_or_capable - check current task permissions to inode
1937 * @inode: inode being checked
1939 * Return true if current either has CAP_FOWNER in a namespace with the
1940 * inode owner uid mapped, or owns the file.
1942 bool inode_owner_or_capable(const struct inode
*inode
)
1944 struct user_namespace
*ns
;
1946 if (uid_eq(current_fsuid(), inode
->i_uid
))
1949 ns
= current_user_ns();
1950 if (ns_capable(ns
, CAP_FOWNER
) && kuid_has_mapping(ns
, inode
->i_uid
))
1954 EXPORT_SYMBOL(inode_owner_or_capable
);
1957 * Direct i/o helper functions
1959 static void __inode_dio_wait(struct inode
*inode
)
1961 wait_queue_head_t
*wq
= bit_waitqueue(&inode
->i_state
, __I_DIO_WAKEUP
);
1962 DEFINE_WAIT_BIT(q
, &inode
->i_state
, __I_DIO_WAKEUP
);
1965 prepare_to_wait(wq
, &q
.wait
, TASK_UNINTERRUPTIBLE
);
1966 if (atomic_read(&inode
->i_dio_count
))
1968 } while (atomic_read(&inode
->i_dio_count
));
1969 finish_wait(wq
, &q
.wait
);
1973 * inode_dio_wait - wait for outstanding DIO requests to finish
1974 * @inode: inode to wait for
1976 * Waits for all pending direct I/O requests to finish so that we can
1977 * proceed with a truncate or equivalent operation.
1979 * Must be called under a lock that serializes taking new references
1980 * to i_dio_count, usually by inode->i_mutex.
1982 void inode_dio_wait(struct inode
*inode
)
1984 if (atomic_read(&inode
->i_dio_count
))
1985 __inode_dio_wait(inode
);
1987 EXPORT_SYMBOL(inode_dio_wait
);
1990 * inode_set_flags - atomically set some inode flags
1992 * Note: the caller should be holding i_mutex, or else be sure that
1993 * they have exclusive access to the inode structure (i.e., while the
1994 * inode is being instantiated). The reason for the cmpxchg() loop
1995 * --- which wouldn't be necessary if all code paths which modify
1996 * i_flags actually followed this rule, is that there is at least one
1997 * code path which doesn't today so we use cmpxchg() out of an abundance
2000 * In the long run, i_mutex is overkill, and we should probably look
2001 * at using the i_lock spinlock to protect i_flags, and then make sure
2002 * it is so documented in include/linux/fs.h and that all code follows
2003 * the locking convention!!
2005 void inode_set_flags(struct inode
*inode
, unsigned int flags
,
2008 unsigned int old_flags
, new_flags
;
2010 WARN_ON_ONCE(flags
& ~mask
);
2012 old_flags
= ACCESS_ONCE(inode
->i_flags
);
2013 new_flags
= (old_flags
& ~mask
) | flags
;
2014 } while (unlikely(cmpxchg(&inode
->i_flags
, old_flags
,
2015 new_flags
) != old_flags
));
2017 EXPORT_SYMBOL(inode_set_flags
);