2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 #include "xfs_types.h"
25 #include "xfs_trans.h"
28 #include "xfs_mount.h"
29 #include "xfs_bmap_btree.h"
30 #include "xfs_alloc_btree.h"
31 #include "xfs_ialloc_btree.h"
32 #include "xfs_dinode.h"
33 #include "xfs_inode.h"
34 #include "xfs_btree.h"
35 #include "xfs_ialloc.h"
36 #include "xfs_quota.h"
37 #include "xfs_utils.h"
38 #include "xfs_trans_priv.h"
39 #include "xfs_inode_item.h"
41 #include "xfs_trace.h"
45 * Define xfs inode iolock lockdep classes. We need to ensure that all active
46 * inodes are considered the same for lockdep purposes, including inodes that
47 * are recycled through the XFS_IRECLAIMABLE state. This is the the only way to
48 * guarantee the locks are considered the same when there are multiple lock
49 * initialisation siteѕ. Also, define a reclaimable inode class so it is
50 * obvious in lockdep reports which class the report is against.
52 static struct lock_class_key xfs_iolock_active
;
53 struct lock_class_key xfs_iolock_reclaimable
;
56 * Allocate and initialise an xfs_inode.
58 STATIC
struct xfs_inode
*
66 * if this didn't occur in transactions, we could use
67 * KM_MAYFAIL and return NULL here on ENOMEM. Set the
68 * code up to do this anyway.
70 ip
= kmem_zone_alloc(xfs_inode_zone
, KM_SLEEP
);
73 if (inode_init_always(mp
->m_super
, VFS_I(ip
))) {
74 kmem_zone_free(xfs_inode_zone
, ip
);
78 ASSERT(atomic_read(&ip
->i_pincount
) == 0);
79 ASSERT(!spin_is_locked(&ip
->i_flags_lock
));
80 ASSERT(!xfs_isiflocked(ip
));
81 ASSERT(ip
->i_ino
== 0);
83 mrlock_init(&ip
->i_iolock
, MRLOCK_BARRIER
, "xfsio", ip
->i_ino
);
84 lockdep_set_class_and_name(&ip
->i_iolock
.mr_lock
,
85 &xfs_iolock_active
, "xfs_iolock_active");
87 /* initialise the xfs inode */
90 memset(&ip
->i_imap
, 0, sizeof(struct xfs_imap
));
92 memset(&ip
->i_df
, 0, sizeof(xfs_ifork_t
));
94 ip
->i_update_core
= 0;
95 ip
->i_delayed_blks
= 0;
96 memset(&ip
->i_d
, 0, sizeof(xfs_icdinode_t
));
102 xfs_inode_free_callback(
103 struct rcu_head
*head
)
105 struct inode
*inode
= container_of(head
, struct inode
, i_rcu
);
106 struct xfs_inode
*ip
= XFS_I(inode
);
108 kmem_zone_free(xfs_inode_zone
, ip
);
113 struct xfs_inode
*ip
)
115 switch (ip
->i_d
.di_mode
& S_IFMT
) {
119 xfs_idestroy_fork(ip
, XFS_DATA_FORK
);
124 xfs_idestroy_fork(ip
, XFS_ATTR_FORK
);
128 * Only if we are shutting down the fs will we see an
129 * inode still in the AIL. If it is there, we should remove
130 * it to prevent a use-after-free from occurring.
132 xfs_log_item_t
*lip
= &ip
->i_itemp
->ili_item
;
133 struct xfs_ail
*ailp
= lip
->li_ailp
;
135 ASSERT(((lip
->li_flags
& XFS_LI_IN_AIL
) == 0) ||
136 XFS_FORCED_SHUTDOWN(ip
->i_mount
));
137 if (lip
->li_flags
& XFS_LI_IN_AIL
) {
138 spin_lock(&ailp
->xa_lock
);
139 if (lip
->li_flags
& XFS_LI_IN_AIL
)
140 xfs_trans_ail_delete(ailp
, lip
);
142 spin_unlock(&ailp
->xa_lock
);
144 xfs_inode_item_destroy(ip
);
148 /* asserts to verify all state is correct here */
149 ASSERT(atomic_read(&ip
->i_pincount
) == 0);
150 ASSERT(!spin_is_locked(&ip
->i_flags_lock
));
151 ASSERT(!xfs_isiflocked(ip
));
154 * Because we use RCU freeing we need to ensure the inode always
155 * appears to be reclaimed with an invalid inode number when in the
156 * free state. The ip->i_flags_lock provides the barrier against lookup
159 spin_lock(&ip
->i_flags_lock
);
160 ip
->i_flags
= XFS_IRECLAIM
;
162 spin_unlock(&ip
->i_flags_lock
);
164 call_rcu(&VFS_I(ip
)->i_rcu
, xfs_inode_free_callback
);
168 * Check the validity of the inode we just found it the cache
172 struct xfs_perag
*pag
,
173 struct xfs_inode
*ip
,
176 int lock_flags
) __releases(RCU
)
178 struct inode
*inode
= VFS_I(ip
);
179 struct xfs_mount
*mp
= ip
->i_mount
;
183 * check for re-use of an inode within an RCU grace period due to the
184 * radix tree nodes not being updated yet. We monitor for this by
185 * setting the inode number to zero before freeing the inode structure.
186 * If the inode has been reallocated and set up, then the inode number
187 * will not match, so check for that, too.
189 spin_lock(&ip
->i_flags_lock
);
190 if (ip
->i_ino
!= ino
) {
191 trace_xfs_iget_skip(ip
);
192 XFS_STATS_INC(xs_ig_frecycle
);
199 * If we are racing with another cache hit that is currently
200 * instantiating this inode or currently recycling it out of
201 * reclaimabe state, wait for the initialisation to complete
204 * XXX(hch): eventually we should do something equivalent to
205 * wait_on_inode to wait for these flags to be cleared
206 * instead of polling for it.
208 if (ip
->i_flags
& (XFS_INEW
|XFS_IRECLAIM
)) {
209 trace_xfs_iget_skip(ip
);
210 XFS_STATS_INC(xs_ig_frecycle
);
216 * If lookup is racing with unlink return an error immediately.
218 if (ip
->i_d
.di_mode
== 0 && !(flags
& XFS_IGET_CREATE
)) {
224 * If IRECLAIMABLE is set, we've torn down the VFS inode already.
225 * Need to carefully get it back into useable state.
227 if (ip
->i_flags
& XFS_IRECLAIMABLE
) {
228 trace_xfs_iget_reclaim(ip
);
231 * We need to set XFS_IRECLAIM to prevent xfs_reclaim_inode
232 * from stomping over us while we recycle the inode. We can't
233 * clear the radix tree reclaimable tag yet as it requires
234 * pag_ici_lock to be held exclusive.
236 ip
->i_flags
|= XFS_IRECLAIM
;
238 spin_unlock(&ip
->i_flags_lock
);
241 error
= -inode_init_always(mp
->m_super
, inode
);
244 * Re-initializing the inode failed, and we are in deep
245 * trouble. Try to re-add it to the reclaim list.
248 spin_lock(&ip
->i_flags_lock
);
250 ip
->i_flags
&= ~(XFS_INEW
| XFS_IRECLAIM
);
251 ASSERT(ip
->i_flags
& XFS_IRECLAIMABLE
);
252 trace_xfs_iget_reclaim_fail(ip
);
256 spin_lock(&pag
->pag_ici_lock
);
257 spin_lock(&ip
->i_flags_lock
);
260 * Clear the per-lifetime state in the inode as we are now
261 * effectively a new inode and need to return to the initial
262 * state before reuse occurs.
264 ip
->i_flags
&= ~XFS_IRECLAIM_RESET_FLAGS
;
265 ip
->i_flags
|= XFS_INEW
;
266 __xfs_inode_clear_reclaim_tag(mp
, pag
, ip
);
267 inode
->i_state
= I_NEW
;
269 ASSERT(!rwsem_is_locked(&ip
->i_iolock
.mr_lock
));
270 mrlock_init(&ip
->i_iolock
, MRLOCK_BARRIER
, "xfsio", ip
->i_ino
);
271 lockdep_set_class_and_name(&ip
->i_iolock
.mr_lock
,
272 &xfs_iolock_active
, "xfs_iolock_active");
274 spin_unlock(&ip
->i_flags_lock
);
275 spin_unlock(&pag
->pag_ici_lock
);
277 /* If the VFS inode is being torn down, pause and try again. */
279 trace_xfs_iget_skip(ip
);
284 /* We've got a live one. */
285 spin_unlock(&ip
->i_flags_lock
);
287 trace_xfs_iget_hit(ip
);
291 xfs_ilock(ip
, lock_flags
);
293 xfs_iflags_clear(ip
, XFS_ISTALE
);
294 XFS_STATS_INC(xs_ig_found
);
299 spin_unlock(&ip
->i_flags_lock
);
307 struct xfs_mount
*mp
,
308 struct xfs_perag
*pag
,
311 struct xfs_inode
**ipp
,
315 struct xfs_inode
*ip
;
317 xfs_agino_t agino
= XFS_INO_TO_AGINO(mp
, ino
);
319 ip
= xfs_inode_alloc(mp
, ino
);
323 error
= xfs_iread(mp
, tp
, ip
, flags
);
327 trace_xfs_iget_miss(ip
);
329 if ((ip
->i_d
.di_mode
== 0) && !(flags
& XFS_IGET_CREATE
)) {
335 * Preload the radix tree so we can insert safely under the
336 * write spinlock. Note that we cannot sleep inside the preload
339 if (radix_tree_preload(GFP_KERNEL
)) {
345 * Because the inode hasn't been added to the radix-tree yet it can't
346 * be found by another thread, so we can do the non-sleeping lock here.
349 if (!xfs_ilock_nowait(ip
, lock_flags
))
354 * These values must be set before inserting the inode into the radix
355 * tree as the moment it is inserted a concurrent lookup (allowed by the
356 * RCU locking mechanism) can find it and that lookup must see that this
357 * is an inode currently under construction (i.e. that XFS_INEW is set).
358 * The ip->i_flags_lock that protects the XFS_INEW flag forms the
359 * memory barrier that ensures this detection works correctly at lookup
362 ip
->i_udquot
= ip
->i_gdquot
= NULL
;
363 xfs_iflags_set(ip
, XFS_INEW
);
365 /* insert the new inode */
366 spin_lock(&pag
->pag_ici_lock
);
367 error
= radix_tree_insert(&pag
->pag_ici_root
, agino
, ip
);
368 if (unlikely(error
)) {
369 WARN_ON(error
!= -EEXIST
);
370 XFS_STATS_INC(xs_ig_dup
);
372 goto out_preload_end
;
374 spin_unlock(&pag
->pag_ici_lock
);
375 radix_tree_preload_end();
381 spin_unlock(&pag
->pag_ici_lock
);
382 radix_tree_preload_end();
384 xfs_iunlock(ip
, lock_flags
);
386 __destroy_inode(VFS_I(ip
));
392 * Look up an inode by number in the given file system.
393 * The inode is looked up in the cache held in each AG.
394 * If the inode is found in the cache, initialise the vfs inode
397 * If it is not in core, read it in from the file system's device,
398 * add it to the cache and initialise the vfs inode.
400 * The inode is locked according to the value of the lock_flags parameter.
401 * This flag parameter indicates how and if the inode's IO lock and inode lock
404 * mp -- the mount point structure for the current file system. It points
405 * to the inode hash table.
406 * tp -- a pointer to the current transaction if there is one. This is
407 * simply passed through to the xfs_iread() call.
408 * ino -- the number of the inode desired. This is the unique identifier
409 * within the file system for the inode being requested.
410 * lock_flags -- flags indicating how to lock the inode. See the comment
411 * for xfs_ilock() for a list of valid values.
427 /* reject inode numbers outside existing AGs */
428 if (!ino
|| XFS_INO_TO_AGNO(mp
, ino
) >= mp
->m_sb
.sb_agcount
)
431 /* get the perag structure and ensure that it's inode capable */
432 pag
= xfs_perag_get(mp
, XFS_INO_TO_AGNO(mp
, ino
));
433 agino
= XFS_INO_TO_AGINO(mp
, ino
);
438 ip
= radix_tree_lookup(&pag
->pag_ici_root
, agino
);
441 error
= xfs_iget_cache_hit(pag
, ip
, ino
, flags
, lock_flags
);
443 goto out_error_or_again
;
446 XFS_STATS_INC(xs_ig_missed
);
448 error
= xfs_iget_cache_miss(mp
, pag
, tp
, ino
, &ip
,
451 goto out_error_or_again
;
458 * If we have a real type for an on-disk inode, we can set ops(&unlock)
459 * now. If it's a new inode being created, xfs_ialloc will handle it.
461 if (xfs_iflags_test(ip
, XFS_INEW
) && ip
->i_d
.di_mode
!= 0)
466 if (error
== EAGAIN
) {
475 * This is a wrapper routine around the xfs_ilock() routine
476 * used to centralize some grungy code. It is used in places
477 * that wish to lock the inode solely for reading the extents.
478 * The reason these places can't just call xfs_ilock(SHARED)
479 * is that the inode lock also guards to bringing in of the
480 * extents from disk for a file in b-tree format. If the inode
481 * is in b-tree format, then we need to lock the inode exclusively
482 * until the extents are read in. Locking it exclusively all
483 * the time would limit our parallelism unnecessarily, though.
484 * What we do instead is check to see if the extents have been
485 * read in yet, and only lock the inode exclusively if they
488 * The function returns a value which should be given to the
489 * corresponding xfs_iunlock_map_shared(). This value is
490 * the mode in which the lock was actually taken.
493 xfs_ilock_map_shared(
498 if ((ip
->i_d
.di_format
== XFS_DINODE_FMT_BTREE
) &&
499 ((ip
->i_df
.if_flags
& XFS_IFEXTENTS
) == 0)) {
500 lock_mode
= XFS_ILOCK_EXCL
;
502 lock_mode
= XFS_ILOCK_SHARED
;
505 xfs_ilock(ip
, lock_mode
);
511 * This is simply the unlock routine to go with xfs_ilock_map_shared().
512 * All it does is call xfs_iunlock() with the given lock_mode.
515 xfs_iunlock_map_shared(
517 unsigned int lock_mode
)
519 xfs_iunlock(ip
, lock_mode
);
523 * The xfs inode contains 2 locks: a multi-reader lock called the
524 * i_iolock and a multi-reader lock called the i_lock. This routine
525 * allows either or both of the locks to be obtained.
527 * The 2 locks should always be ordered so that the IO lock is
528 * obtained first in order to prevent deadlock.
530 * ip -- the inode being locked
531 * lock_flags -- this parameter indicates the inode's locks
532 * to be locked. It can be:
537 * XFS_IOLOCK_SHARED | XFS_ILOCK_SHARED,
538 * XFS_IOLOCK_SHARED | XFS_ILOCK_EXCL,
539 * XFS_IOLOCK_EXCL | XFS_ILOCK_SHARED,
540 * XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL
548 * You can't set both SHARED and EXCL for the same lock,
549 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
550 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
552 ASSERT((lock_flags
& (XFS_IOLOCK_SHARED
| XFS_IOLOCK_EXCL
)) !=
553 (XFS_IOLOCK_SHARED
| XFS_IOLOCK_EXCL
));
554 ASSERT((lock_flags
& (XFS_ILOCK_SHARED
| XFS_ILOCK_EXCL
)) !=
555 (XFS_ILOCK_SHARED
| XFS_ILOCK_EXCL
));
556 ASSERT((lock_flags
& ~(XFS_LOCK_MASK
| XFS_LOCK_DEP_MASK
)) == 0);
558 if (lock_flags
& XFS_IOLOCK_EXCL
)
559 mrupdate_nested(&ip
->i_iolock
, XFS_IOLOCK_DEP(lock_flags
));
560 else if (lock_flags
& XFS_IOLOCK_SHARED
)
561 mraccess_nested(&ip
->i_iolock
, XFS_IOLOCK_DEP(lock_flags
));
563 if (lock_flags
& XFS_ILOCK_EXCL
)
564 mrupdate_nested(&ip
->i_lock
, XFS_ILOCK_DEP(lock_flags
));
565 else if (lock_flags
& XFS_ILOCK_SHARED
)
566 mraccess_nested(&ip
->i_lock
, XFS_ILOCK_DEP(lock_flags
));
568 trace_xfs_ilock(ip
, lock_flags
, _RET_IP_
);
572 * This is just like xfs_ilock(), except that the caller
573 * is guaranteed not to sleep. It returns 1 if it gets
574 * the requested locks and 0 otherwise. If the IO lock is
575 * obtained but the inode lock cannot be, then the IO lock
576 * is dropped before returning.
578 * ip -- the inode being locked
579 * lock_flags -- this parameter indicates the inode's locks to be
580 * to be locked. See the comment for xfs_ilock() for a list
589 * You can't set both SHARED and EXCL for the same lock,
590 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
591 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
593 ASSERT((lock_flags
& (XFS_IOLOCK_SHARED
| XFS_IOLOCK_EXCL
)) !=
594 (XFS_IOLOCK_SHARED
| XFS_IOLOCK_EXCL
));
595 ASSERT((lock_flags
& (XFS_ILOCK_SHARED
| XFS_ILOCK_EXCL
)) !=
596 (XFS_ILOCK_SHARED
| XFS_ILOCK_EXCL
));
597 ASSERT((lock_flags
& ~(XFS_LOCK_MASK
| XFS_LOCK_DEP_MASK
)) == 0);
599 if (lock_flags
& XFS_IOLOCK_EXCL
) {
600 if (!mrtryupdate(&ip
->i_iolock
))
602 } else if (lock_flags
& XFS_IOLOCK_SHARED
) {
603 if (!mrtryaccess(&ip
->i_iolock
))
606 if (lock_flags
& XFS_ILOCK_EXCL
) {
607 if (!mrtryupdate(&ip
->i_lock
))
608 goto out_undo_iolock
;
609 } else if (lock_flags
& XFS_ILOCK_SHARED
) {
610 if (!mrtryaccess(&ip
->i_lock
))
611 goto out_undo_iolock
;
613 trace_xfs_ilock_nowait(ip
, lock_flags
, _RET_IP_
);
617 if (lock_flags
& XFS_IOLOCK_EXCL
)
618 mrunlock_excl(&ip
->i_iolock
);
619 else if (lock_flags
& XFS_IOLOCK_SHARED
)
620 mrunlock_shared(&ip
->i_iolock
);
626 * xfs_iunlock() is used to drop the inode locks acquired with
627 * xfs_ilock() and xfs_ilock_nowait(). The caller must pass
628 * in the flags given to xfs_ilock() or xfs_ilock_nowait() so
629 * that we know which locks to drop.
631 * ip -- the inode being unlocked
632 * lock_flags -- this parameter indicates the inode's locks to be
633 * to be unlocked. See the comment for xfs_ilock() for a list
634 * of valid values for this parameter.
643 * You can't set both SHARED and EXCL for the same lock,
644 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
645 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
647 ASSERT((lock_flags
& (XFS_IOLOCK_SHARED
| XFS_IOLOCK_EXCL
)) !=
648 (XFS_IOLOCK_SHARED
| XFS_IOLOCK_EXCL
));
649 ASSERT((lock_flags
& (XFS_ILOCK_SHARED
| XFS_ILOCK_EXCL
)) !=
650 (XFS_ILOCK_SHARED
| XFS_ILOCK_EXCL
));
651 ASSERT((lock_flags
& ~(XFS_LOCK_MASK
| XFS_IUNLOCK_NONOTIFY
|
652 XFS_LOCK_DEP_MASK
)) == 0);
653 ASSERT(lock_flags
!= 0);
655 if (lock_flags
& XFS_IOLOCK_EXCL
)
656 mrunlock_excl(&ip
->i_iolock
);
657 else if (lock_flags
& XFS_IOLOCK_SHARED
)
658 mrunlock_shared(&ip
->i_iolock
);
660 if (lock_flags
& XFS_ILOCK_EXCL
)
661 mrunlock_excl(&ip
->i_lock
);
662 else if (lock_flags
& XFS_ILOCK_SHARED
)
663 mrunlock_shared(&ip
->i_lock
);
665 if ((lock_flags
& (XFS_ILOCK_SHARED
| XFS_ILOCK_EXCL
)) &&
666 !(lock_flags
& XFS_IUNLOCK_NONOTIFY
) && ip
->i_itemp
) {
668 * Let the AIL know that this item has been unlocked in case
669 * it is in the AIL and anyone is waiting on it. Don't do
670 * this if the caller has asked us not to.
672 xfs_trans_unlocked_item(ip
->i_itemp
->ili_item
.li_ailp
,
673 (xfs_log_item_t
*)(ip
->i_itemp
));
675 trace_xfs_iunlock(ip
, lock_flags
, _RET_IP_
);
679 * give up write locks. the i/o lock cannot be held nested
680 * if it is being demoted.
687 ASSERT(lock_flags
& (XFS_IOLOCK_EXCL
|XFS_ILOCK_EXCL
));
688 ASSERT((lock_flags
& ~(XFS_IOLOCK_EXCL
|XFS_ILOCK_EXCL
)) == 0);
690 if (lock_flags
& XFS_ILOCK_EXCL
)
691 mrdemote(&ip
->i_lock
);
692 if (lock_flags
& XFS_IOLOCK_EXCL
)
693 mrdemote(&ip
->i_iolock
);
695 trace_xfs_ilock_demote(ip
, lock_flags
, _RET_IP_
);
704 if (lock_flags
& (XFS_ILOCK_EXCL
|XFS_ILOCK_SHARED
)) {
705 if (!(lock_flags
& XFS_ILOCK_SHARED
))
706 return !!ip
->i_lock
.mr_writer
;
707 return rwsem_is_locked(&ip
->i_lock
.mr_lock
);
710 if (lock_flags
& (XFS_IOLOCK_EXCL
|XFS_IOLOCK_SHARED
)) {
711 if (!(lock_flags
& XFS_IOLOCK_SHARED
))
712 return !!ip
->i_iolock
.mr_writer
;
713 return rwsem_is_locked(&ip
->i_iolock
.mr_lock
);
723 struct xfs_inode
*ip
)
725 wait_queue_head_t
*wq
= bit_waitqueue(&ip
->i_flags
, __XFS_IFLOCK_BIT
);
726 DEFINE_WAIT_BIT(wait
, &ip
->i_flags
, __XFS_IFLOCK_BIT
);
729 prepare_to_wait_exclusive(wq
, &wait
.wait
, TASK_UNINTERRUPTIBLE
);
730 if (xfs_isiflocked(ip
))
732 } while (!xfs_iflock_nowait(ip
));
734 finish_wait(wq
, &wait
.wait
);