2 * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it would be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
12 * Further, this software is distributed without any warranty that it is
13 * free of the rightful claim of any third person regarding infringement
14 * or the like. Any license provided herein, whether implied or
15 * otherwise, applies only to this software file. Patent licenses, if
16 * any, provided herein do not apply to combinations of this program with
17 * other software, or any other product whatsoever.
19 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, write the Free Software Foundation, Inc., 59
21 * Temple Place - Suite 330, Boston MA 02111-1307, USA.
23 * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
24 * Mountain View, CA 94043, or:
28 * For further information regarding this notice, see:
30 * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
35 #include "xfs_macros.h"
36 #include "xfs_types.h"
39 #include "xfs_trans.h"
44 #include "xfs_dmapi.h"
45 #include "xfs_mount.h"
46 #include "xfs_alloc_btree.h"
47 #include "xfs_bmap_btree.h"
48 #include "xfs_ialloc_btree.h"
49 #include "xfs_btree.h"
50 #include "xfs_ialloc.h"
51 #include "xfs_attr_sf.h"
52 #include "xfs_dir_sf.h"
53 #include "xfs_dir2_sf.h"
54 #include "xfs_dinode.h"
55 #include "xfs_inode.h"
56 #include "xfs_quota.h"
57 #include "xfs_utils.h"
60 * Initialize the inode hash table for the newly mounted file system.
62 * mp -- this is the mount point structure for the file system being
66 xfs_ihash_init(xfs_mount_t
*mp
)
70 mp
->m_ihsize
= XFS_BUCKETS(mp
);
71 mp
->m_ihash
= (xfs_ihash_t
*)kmem_zalloc(mp
->m_ihsize
72 * sizeof(xfs_ihash_t
), KM_SLEEP
);
73 ASSERT(mp
->m_ihash
!= NULL
);
74 for (i
= 0; i
< mp
->m_ihsize
; i
++) {
75 rwlock_init(&(mp
->m_ihash
[i
].ih_lock
));
80 * Free up structures allocated by xfs_ihash_init, at unmount time.
83 xfs_ihash_free(xfs_mount_t
*mp
)
85 kmem_free(mp
->m_ihash
, mp
->m_ihsize
*sizeof(xfs_ihash_t
));
90 * Initialize the inode cluster hash table for the newly mounted file system.
92 * mp -- this is the mount point structure for the file system being
96 xfs_chash_init(xfs_mount_t
*mp
)
101 * m_chash size is based on m_ihash
102 * with a minimum of 37 entries
104 mp
->m_chsize
= (XFS_BUCKETS(mp
)) /
105 (XFS_INODE_CLUSTER_SIZE(mp
) >> mp
->m_sb
.sb_inodelog
);
106 if (mp
->m_chsize
< 37) {
109 mp
->m_chash
= (xfs_chash_t
*)kmem_zalloc(mp
->m_chsize
110 * sizeof(xfs_chash_t
),
112 ASSERT(mp
->m_chash
!= NULL
);
114 for (i
= 0; i
< mp
->m_chsize
; i
++) {
115 spinlock_init(&mp
->m_chash
[i
].ch_lock
,"xfshash");
120 * Free up structures allocated by xfs_chash_init, at unmount time.
123 xfs_chash_free(xfs_mount_t
*mp
)
127 for (i
= 0; i
< mp
->m_chsize
; i
++) {
128 spinlock_destroy(&mp
->m_chash
[i
].ch_lock
);
131 kmem_free(mp
->m_chash
, mp
->m_chsize
*sizeof(xfs_chash_t
));
136 * Look up an inode by number in the given file system.
137 * The inode is looked up in the hash table for the file system
138 * represented by the mount point parameter mp. Each bucket of
139 * the hash table is guarded by an individual semaphore.
141 * If the inode is found in the hash table, its corresponding vnode
142 * is obtained with a call to vn_get(). This call takes care of
143 * coordination with the reclamation of the inode and vnode. Note
144 * that the vmap structure is filled in while holding the hash lock.
145 * This gives us the state of the inode/vnode when we found it and
146 * is used for coordination in vn_get().
148 * If it is not in core, read it in from the file system's device and
149 * add the inode into the hash table.
151 * The inode is locked according to the value of the lock_flags parameter.
152 * This flag parameter indicates how and if the inode's IO lock and inode lock
155 * mp -- the mount point structure for the current file system. It points
156 * to the inode hash table.
157 * tp -- a pointer to the current transaction if there is one. This is
158 * simply passed through to the xfs_iread() call.
159 * ino -- the number of the inode desired. This is the unique identifier
160 * within the file system for the inode being requested.
161 * lock_flags -- flags indicating how to lock the inode. See the comment
162 * for xfs_ilock() for a list of valid values.
163 * bno -- the block number starting the buffer containing the inode,
164 * if known (as by bulkstat), else 0.
185 xfs_chashlist_t
*chl
, *chlnew
;
189 ih
= XFS_IHASH(mp
, ino
);
192 read_lock(&ih
->ih_lock
);
194 for (ip
= ih
->ih_next
; ip
!= NULL
; ip
= ip
->i_next
) {
195 if (ip
->i_ino
== ino
) {
197 inode_vp
= XFS_ITOV_NULL(ip
);
199 if (inode_vp
== NULL
) {
200 /* If IRECLAIM is set this inode is
201 * on its way out of the system,
202 * we need to pause and try again.
204 if (ip
->i_flags
& XFS_IRECLAIM
) {
205 read_unlock(&ih
->ih_lock
);
207 XFS_STATS_INC(xs_ig_frecycle
);
212 vn_trace_exit(vp
, "xfs_iget.alloc",
213 (inst_t
*)__return_address
);
215 XFS_STATS_INC(xs_ig_found
);
217 ip
->i_flags
&= ~XFS_IRECLAIMABLE
;
218 read_unlock(&ih
->ih_lock
);
221 list_del_init(&ip
->i_reclaim
);
222 XFS_MOUNT_IUNLOCK(mp
);
226 } else if (vp
!= inode_vp
) {
227 struct inode
*inode
= LINVFS_GET_IP(inode_vp
);
229 /* The inode is being torn down, pause and
232 if (inode
->i_state
& (I_FREEING
| I_CLEAR
)) {
233 read_unlock(&ih
->ih_lock
);
235 XFS_STATS_INC(xs_ig_frecycle
);
239 /* Chances are the other vnode (the one in the inode) is being torn
240 * down right now, and we landed on top of it. Question is, what do
241 * we do? Unhook the old inode and hook up the new one?
244 "xfs_iget_core: ambiguous vns: vp/0x%p, invp/0x%p",
248 read_unlock(&ih
->ih_lock
);
250 XFS_STATS_INC(xs_ig_found
);
253 if (lock_flags
!= 0) {
254 xfs_ilock(ip
, lock_flags
);
257 newnode
= (ip
->i_d
.di_mode
== 0);
259 xfs_iocore_inode_reinit(ip
);
261 ip
->i_flags
&= ~XFS_ISTALE
;
263 vn_trace_exit(vp
, "xfs_iget.found",
264 (inst_t
*)__return_address
);
270 * Inode cache miss: save the hash chain version stamp and unlock
271 * the chain, so we don't deadlock in vn_alloc.
273 XFS_STATS_INC(xs_ig_missed
);
275 version
= ih
->ih_version
;
277 read_unlock(&ih
->ih_lock
);
280 * Read the disk inode attributes into a new inode structure and get
281 * a new vnode for it. This should also initialize i_ino and i_mount.
283 error
= xfs_iread(mp
, tp
, ino
, &ip
, bno
);
288 vn_trace_exit(vp
, "xfs_iget.alloc", (inst_t
*)__return_address
);
290 xfs_inode_lock_init(ip
, vp
);
291 xfs_iocore_inode_init(ip
);
293 if (lock_flags
!= 0) {
294 xfs_ilock(ip
, lock_flags
);
298 * Put ip on its hash chain, unless someone else hashed a duplicate
299 * after we released the hash lock.
301 write_lock(&ih
->ih_lock
);
303 if (ih
->ih_version
!= version
) {
304 for (iq
= ih
->ih_next
; iq
!= NULL
; iq
= iq
->i_next
) {
305 if (iq
->i_ino
== ino
) {
306 write_unlock(&ih
->ih_lock
);
309 XFS_STATS_INC(xs_ig_dup
);
316 * These values _must_ be set before releasing ihlock!
319 if ((iq
= ih
->ih_next
)) {
320 iq
->i_prevp
= &ip
->i_next
;
323 ip
->i_prevp
= &ih
->ih_next
;
325 ip
->i_udquot
= ip
->i_gdquot
= NULL
;
328 write_unlock(&ih
->ih_lock
);
331 * put ip on its cluster's hash chain
333 ASSERT(ip
->i_chash
== NULL
&& ip
->i_cprev
== NULL
&&
334 ip
->i_cnext
== NULL
);
337 ch
= XFS_CHASH(mp
, ip
->i_blkno
);
339 s
= mutex_spinlock(&ch
->ch_lock
);
340 for (chl
= ch
->ch_list
; chl
!= NULL
; chl
= chl
->chl_next
) {
341 if (chl
->chl_blkno
== ip
->i_blkno
) {
343 /* insert this inode into the doubly-linked list
344 * where chl points */
345 if ((iq
= chl
->chl_ip
)) {
346 ip
->i_cprev
= iq
->i_cprev
;
347 iq
->i_cprev
->i_cnext
= ip
;
360 /* no hash list found for this block; add a new hash list */
362 if (chlnew
== NULL
) {
363 mutex_spinunlock(&ch
->ch_lock
, s
);
364 ASSERT(xfs_chashlist_zone
!= NULL
);
365 chlnew
= (xfs_chashlist_t
*)
366 kmem_zone_alloc(xfs_chashlist_zone
,
368 ASSERT(chlnew
!= NULL
);
373 ip
->i_chash
= chlnew
;
375 chlnew
->chl_blkno
= ip
->i_blkno
;
376 chlnew
->chl_next
= ch
->ch_list
;
377 ch
->ch_list
= chlnew
;
381 if (chlnew
!= NULL
) {
382 kmem_zone_free(xfs_chashlist_zone
, chlnew
);
386 mutex_spinunlock(&ch
->ch_lock
, s
);
390 * Link ip to its mount and thread it on the mount's inode list.
393 if ((iq
= mp
->m_inodes
)) {
394 ASSERT(iq
->i_mprev
->i_mnext
== iq
);
395 ip
->i_mprev
= iq
->i_mprev
;
396 iq
->i_mprev
->i_mnext
= ip
;
405 XFS_MOUNT_IUNLOCK(mp
);
410 ASSERT(ip
->i_df
.if_ext_max
==
411 XFS_IFORK_DSIZE(ip
) / sizeof(xfs_bmbt_rec_t
));
413 ASSERT(((ip
->i_d
.di_flags
& XFS_DIFLAG_REALTIME
) != 0) ==
414 ((ip
->i_iocore
.io_flags
& XFS_IOCORE_RT
) != 0));
419 * If we have a real type for an on-disk inode, we can set ops(&unlock)
420 * now. If it's a new inode being created, xfs_ialloc will handle it.
422 VFS_INIT_VNODE(XFS_MTOVFS(mp
), vp
, XFS_ITOBHV(ip
), 1);
429 * The 'normal' internal xfs_iget, if needed it will
430 * 'allocate', or 'get', the vnode.
446 XFS_STATS_INC(xs_ig_attempts
);
448 if ((inode
= iget_locked(XFS_MTOVFS(mp
)->vfs_super
, ino
))) {
453 vp
= LINVFS_GET_VP(inode
);
454 if (inode
->i_state
& I_NEW
) {
456 vn_initialize(inode
);
457 error
= xfs_iget_core(vp
, mp
, tp
, ino
,
458 lock_flags
, ipp
, bno
);
461 if (inode
->i_state
& I_NEW
)
462 unlock_new_inode(inode
);
466 /* These are true if the inode is in inactive or
467 * reclaim. The linux inode is about to go away,
468 * wait for that path to finish, and try again.
470 if (vp
->v_flag
& (VINACT
| VRECLM
)) {
476 bdp
= vn_bhv_lookup(VN_BHV_HEAD(vp
), &xfs_vnodeops
);
478 XFS_STATS_INC(xs_ig_dup
);
481 ip
= XFS_BHVTOI(bdp
);
483 xfs_ilock(ip
, lock_flags
);
484 newnode
= (ip
->i_d
.di_mode
== 0);
486 xfs_iocore_inode_reinit(ip
);
487 XFS_STATS_INC(xs_ig_found
);
492 error
= ENOMEM
; /* If we got no inode we are out of memory */
498 * Do the setup for the various locks within the incore inode.
505 mrlock_init(&ip
->i_lock
, MRLOCK_ALLOW_EQUAL_PRI
|MRLOCK_BARRIER
,
506 "xfsino", (long)vp
->v_number
);
507 mrlock_init(&ip
->i_iolock
, MRLOCK_BARRIER
, "xfsio", vp
->v_number
);
508 init_waitqueue_head(&ip
->i_ipin_wait
);
509 atomic_set(&ip
->i_pincount
, 0);
510 init_sema(&ip
->i_flock
, 1, "xfsfino", vp
->v_number
);
514 * Look for the inode corresponding to the given ino in the hash table.
515 * If it is there and its i_transp pointer matches tp, return it.
516 * Otherwise, return NULL.
519 xfs_inode_incore(xfs_mount_t
*mp
,
526 ih
= XFS_IHASH(mp
, ino
);
527 read_lock(&ih
->ih_lock
);
528 for (ip
= ih
->ih_next
; ip
!= NULL
; ip
= ip
->i_next
) {
529 if (ip
->i_ino
== ino
) {
531 * If we find it and tp matches, return it.
532 * Otherwise break from the loop and return
535 if (ip
->i_transp
== tp
) {
536 read_unlock(&ih
->ih_lock
);
542 read_unlock(&ih
->ih_lock
);
547 * Decrement reference count of an inode structure and unlock it.
549 * ip -- the inode being released
550 * lock_flags -- this parameter indicates the inode's locks to be
551 * to be released. See the comment on xfs_iunlock() for a list
555 xfs_iput(xfs_inode_t
*ip
,
558 vnode_t
*vp
= XFS_ITOV(ip
);
560 vn_trace_entry(vp
, "xfs_iput", (inst_t
*)__return_address
);
562 xfs_iunlock(ip
, lock_flags
);
568 * Special iput for brand-new inodes that are still locked
571 xfs_iput_new(xfs_inode_t
*ip
,
574 vnode_t
*vp
= XFS_ITOV(ip
);
575 struct inode
*inode
= LINVFS_GET_IP(vp
);
577 vn_trace_entry(vp
, "xfs_iput_new", (inst_t
*)__return_address
);
579 if (inode
->i_state
& I_NEW
)
580 unlock_new_inode(inode
);
582 xfs_iunlock(ip
, lock_flags
);
588 * This routine embodies the part of the reclaim code that pulls
589 * the inode from the inode hash table and the mount structure's
591 * This should only be called from xfs_reclaim().
594 xfs_ireclaim(xfs_inode_t
*ip
)
599 * Remove from old hash list and mount list.
601 XFS_STATS_INC(xs_ig_reclaims
);
606 * Here we do a spurious inode lock in order to coordinate with
607 * xfs_sync(). This is because xfs_sync() references the inodes
608 * in the mount list without taking references on the corresponding
609 * vnodes. We make that OK here by ensuring that we wait until
610 * the inode is unlocked in xfs_sync() before we go ahead and
611 * free it. We get both the regular lock and the io lock because
612 * the xfs_sync() code may need to drop the regular one but will
613 * still hold the io lock.
615 xfs_ilock(ip
, XFS_ILOCK_EXCL
| XFS_IOLOCK_EXCL
);
618 * Release dquots (and their references) if any. An inode may escape
619 * xfs_inactive and get here via vn_alloc->vn_reclaim path.
621 XFS_QM_DQDETACH(ip
->i_mount
, ip
);
624 * Pull our behavior descriptor from the vnode chain.
626 vp
= XFS_ITOV_NULL(ip
);
628 vn_bhv_remove(VN_BHV_HEAD(vp
), XFS_ITOBHV(ip
));
632 * Free all memory associated with the inode.
638 * This routine removes an about-to-be-destroyed inode from
639 * all of the lists in which it is located with the exception
640 * of the behavior chain.
650 xfs_chashlist_t
*chl
, *chm
;
654 write_lock(&ih
->ih_lock
);
655 if ((iq
= ip
->i_next
)) {
656 iq
->i_prevp
= ip
->i_prevp
;
659 write_unlock(&ih
->ih_lock
);
662 * Remove from cluster hash list
663 * 1) delete the chashlist if this is the last inode on the chashlist
664 * 2) unchain from list of inodes
665 * 3) point chashlist->chl_ip to 'chl_next' if to this inode.
668 ch
= XFS_CHASH(mp
, ip
->i_blkno
);
669 s
= mutex_spinlock(&ch
->ch_lock
);
671 if (ip
->i_cnext
== ip
) {
672 /* Last inode on chashlist */
673 ASSERT(ip
->i_cnext
== ip
&& ip
->i_cprev
== ip
);
674 ASSERT(ip
->i_chash
!= NULL
);
676 for (chl
= ch
->ch_list
; chl
!= NULL
; chl
= chl
->chl_next
) {
677 if (chl
->chl_blkno
== ip
->i_blkno
) {
679 /* first item on the list */
680 ch
->ch_list
= chl
->chl_next
;
682 chm
->chl_next
= chl
->chl_next
;
684 kmem_zone_free(xfs_chashlist_zone
, chl
);
687 ASSERT(chl
->chl_ip
!= ip
);
691 ASSERT_ALWAYS(chl
!= NULL
);
693 /* delete one inode from a non-empty list */
695 iq
->i_cprev
= ip
->i_cprev
;
696 ip
->i_cprev
->i_cnext
= iq
;
697 if (ip
->i_chash
->chl_ip
== ip
) {
698 ip
->i_chash
->chl_ip
= iq
;
700 ip
->i_chash
= __return_address
;
701 ip
->i_cprev
= __return_address
;
702 ip
->i_cnext
= __return_address
;
704 mutex_spinunlock(&ch
->ch_lock
, s
);
707 * Remove from mount's inode list.
710 ASSERT((ip
->i_mnext
!= NULL
) && (ip
->i_mprev
!= NULL
));
712 iq
->i_mprev
= ip
->i_mprev
;
713 ip
->i_mprev
->i_mnext
= iq
;
716 * Fix up the head pointer if it points to the inode being deleted.
718 if (mp
->m_inodes
== ip
) {
726 /* Deal with the deleted inodes list */
727 list_del_init(&ip
->i_reclaim
);
730 XFS_MOUNT_IUNLOCK(mp
);
734 * This is a wrapper routine around the xfs_ilock() routine
735 * used to centralize some grungy code. It is used in places
736 * that wish to lock the inode solely for reading the extents.
737 * The reason these places can't just call xfs_ilock(SHARED)
738 * is that the inode lock also guards to bringing in of the
739 * extents from disk for a file in b-tree format. If the inode
740 * is in b-tree format, then we need to lock the inode exclusively
741 * until the extents are read in. Locking it exclusively all
742 * the time would limit our parallelism unnecessarily, though.
743 * What we do instead is check to see if the extents have been
744 * read in yet, and only lock the inode exclusively if they
747 * The function returns a value which should be given to the
748 * corresponding xfs_iunlock_map_shared(). This value is
749 * the mode in which the lock was actually taken.
752 xfs_ilock_map_shared(
757 if ((ip
->i_d
.di_format
== XFS_DINODE_FMT_BTREE
) &&
758 ((ip
->i_df
.if_flags
& XFS_IFEXTENTS
) == 0)) {
759 lock_mode
= XFS_ILOCK_EXCL
;
761 lock_mode
= XFS_ILOCK_SHARED
;
764 xfs_ilock(ip
, lock_mode
);
770 * This is simply the unlock routine to go with xfs_ilock_map_shared().
771 * All it does is call xfs_iunlock() with the given lock_mode.
774 xfs_iunlock_map_shared(
776 unsigned int lock_mode
)
778 xfs_iunlock(ip
, lock_mode
);
782 * The xfs inode contains 2 locks: a multi-reader lock called the
783 * i_iolock and a multi-reader lock called the i_lock. This routine
784 * allows either or both of the locks to be obtained.
786 * The 2 locks should always be ordered so that the IO lock is
787 * obtained first in order to prevent deadlock.
789 * ip -- the inode being locked
790 * lock_flags -- this parameter indicates the inode's locks
791 * to be locked. It can be:
796 * XFS_IOLOCK_SHARED | XFS_ILOCK_SHARED,
797 * XFS_IOLOCK_SHARED | XFS_ILOCK_EXCL,
798 * XFS_IOLOCK_EXCL | XFS_ILOCK_SHARED,
799 * XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL
802 xfs_ilock(xfs_inode_t
*ip
,
806 * You can't set both SHARED and EXCL for the same lock,
807 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
808 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
810 ASSERT((lock_flags
& (XFS_IOLOCK_SHARED
| XFS_IOLOCK_EXCL
)) !=
811 (XFS_IOLOCK_SHARED
| XFS_IOLOCK_EXCL
));
812 ASSERT((lock_flags
& (XFS_ILOCK_SHARED
| XFS_ILOCK_EXCL
)) !=
813 (XFS_ILOCK_SHARED
| XFS_ILOCK_EXCL
));
814 ASSERT((lock_flags
& ~XFS_LOCK_MASK
) == 0);
816 if (lock_flags
& XFS_IOLOCK_EXCL
) {
817 mrupdate(&ip
->i_iolock
);
818 } else if (lock_flags
& XFS_IOLOCK_SHARED
) {
819 mraccess(&ip
->i_iolock
);
821 if (lock_flags
& XFS_ILOCK_EXCL
) {
822 mrupdate(&ip
->i_lock
);
823 } else if (lock_flags
& XFS_ILOCK_SHARED
) {
824 mraccess(&ip
->i_lock
);
826 xfs_ilock_trace(ip
, 1, lock_flags
, (inst_t
*)__return_address
);
830 * This is just like xfs_ilock(), except that the caller
831 * is guaranteed not to sleep. It returns 1 if it gets
832 * the requested locks and 0 otherwise. If the IO lock is
833 * obtained but the inode lock cannot be, then the IO lock
834 * is dropped before returning.
836 * ip -- the inode being locked
837 * lock_flags -- this parameter indicates the inode's locks to be
838 * to be locked. See the comment for xfs_ilock() for a list
843 xfs_ilock_nowait(xfs_inode_t
*ip
,
850 * You can't set both SHARED and EXCL for the same lock,
851 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
852 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
854 ASSERT((lock_flags
& (XFS_IOLOCK_SHARED
| XFS_IOLOCK_EXCL
)) !=
855 (XFS_IOLOCK_SHARED
| XFS_IOLOCK_EXCL
));
856 ASSERT((lock_flags
& (XFS_ILOCK_SHARED
| XFS_ILOCK_EXCL
)) !=
857 (XFS_ILOCK_SHARED
| XFS_ILOCK_EXCL
));
858 ASSERT((lock_flags
& ~XFS_LOCK_MASK
) == 0);
861 if (lock_flags
& XFS_IOLOCK_EXCL
) {
862 iolocked
= mrtryupdate(&ip
->i_iolock
);
866 } else if (lock_flags
& XFS_IOLOCK_SHARED
) {
867 iolocked
= mrtryaccess(&ip
->i_iolock
);
872 if (lock_flags
& XFS_ILOCK_EXCL
) {
873 ilocked
= mrtryupdate(&ip
->i_lock
);
876 mrunlock(&ip
->i_iolock
);
880 } else if (lock_flags
& XFS_ILOCK_SHARED
) {
881 ilocked
= mrtryaccess(&ip
->i_lock
);
884 mrunlock(&ip
->i_iolock
);
889 xfs_ilock_trace(ip
, 2, lock_flags
, (inst_t
*)__return_address
);
894 * xfs_iunlock() is used to drop the inode locks acquired with
895 * xfs_ilock() and xfs_ilock_nowait(). The caller must pass
896 * in the flags given to xfs_ilock() or xfs_ilock_nowait() so
897 * that we know which locks to drop.
899 * ip -- the inode being unlocked
900 * lock_flags -- this parameter indicates the inode's locks to be
901 * to be unlocked. See the comment for xfs_ilock() for a list
902 * of valid values for this parameter.
906 xfs_iunlock(xfs_inode_t
*ip
,
910 * You can't set both SHARED and EXCL for the same lock,
911 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
912 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
914 ASSERT((lock_flags
& (XFS_IOLOCK_SHARED
| XFS_IOLOCK_EXCL
)) !=
915 (XFS_IOLOCK_SHARED
| XFS_IOLOCK_EXCL
));
916 ASSERT((lock_flags
& (XFS_ILOCK_SHARED
| XFS_ILOCK_EXCL
)) !=
917 (XFS_ILOCK_SHARED
| XFS_ILOCK_EXCL
));
918 ASSERT((lock_flags
& ~(XFS_LOCK_MASK
| XFS_IUNLOCK_NONOTIFY
)) == 0);
919 ASSERT(lock_flags
!= 0);
921 if (lock_flags
& (XFS_IOLOCK_SHARED
| XFS_IOLOCK_EXCL
)) {
922 ASSERT(!(lock_flags
& XFS_IOLOCK_SHARED
) ||
923 (ismrlocked(&ip
->i_iolock
, MR_ACCESS
)));
924 ASSERT(!(lock_flags
& XFS_IOLOCK_EXCL
) ||
925 (ismrlocked(&ip
->i_iolock
, MR_UPDATE
)));
926 mrunlock(&ip
->i_iolock
);
929 if (lock_flags
& (XFS_ILOCK_SHARED
| XFS_ILOCK_EXCL
)) {
930 ASSERT(!(lock_flags
& XFS_ILOCK_SHARED
) ||
931 (ismrlocked(&ip
->i_lock
, MR_ACCESS
)));
932 ASSERT(!(lock_flags
& XFS_ILOCK_EXCL
) ||
933 (ismrlocked(&ip
->i_lock
, MR_UPDATE
)));
934 mrunlock(&ip
->i_lock
);
937 * Let the AIL know that this item has been unlocked in case
938 * it is in the AIL and anyone is waiting on it. Don't do
939 * this if the caller has asked us not to.
941 if (!(lock_flags
& XFS_IUNLOCK_NONOTIFY
) &&
942 ip
->i_itemp
!= NULL
) {
943 xfs_trans_unlocked_item(ip
->i_mount
,
944 (xfs_log_item_t
*)(ip
->i_itemp
));
947 xfs_ilock_trace(ip
, 3, lock_flags
, (inst_t
*)__return_address
);
951 * give up write locks. the i/o lock cannot be held nested
952 * if it is being demoted.
955 xfs_ilock_demote(xfs_inode_t
*ip
,
958 ASSERT(lock_flags
& (XFS_IOLOCK_EXCL
|XFS_ILOCK_EXCL
));
959 ASSERT((lock_flags
& ~(XFS_IOLOCK_EXCL
|XFS_ILOCK_EXCL
)) == 0);
961 if (lock_flags
& XFS_ILOCK_EXCL
) {
962 ASSERT(ismrlocked(&ip
->i_lock
, MR_UPDATE
));
963 mrdemote(&ip
->i_lock
);
965 if (lock_flags
& XFS_IOLOCK_EXCL
) {
966 ASSERT(ismrlocked(&ip
->i_iolock
, MR_UPDATE
));
967 mrdemote(&ip
->i_iolock
);
972 * The following three routines simply manage the i_flock
973 * semaphore embedded in the inode. This semaphore synchronizes
974 * processes attempting to flush the in-core inode back to disk.
977 xfs_iflock(xfs_inode_t
*ip
)
979 psema(&(ip
->i_flock
), PINOD
|PLTWAIT
);
983 xfs_iflock_nowait(xfs_inode_t
*ip
)
985 return (cpsema(&(ip
->i_flock
)));
989 xfs_ifunlock(xfs_inode_t
*ip
)
991 ASSERT(valusema(&(ip
->i_flock
)) <= 0);
992 vsema(&(ip
->i_flock
));