x86: 64-bit, add the new split_large_page() function
[wrt350n-kernel.git] / fs / xfs / xfs_iget.c
blobfb69ef180b275f9d0300dc5faa23483e7936f5e7
1 /*
2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_types.h"
21 #include "xfs_bit.h"
22 #include "xfs_log.h"
23 #include "xfs_inum.h"
24 #include "xfs_trans.h"
25 #include "xfs_sb.h"
26 #include "xfs_ag.h"
27 #include "xfs_dir2.h"
28 #include "xfs_dmapi.h"
29 #include "xfs_mount.h"
30 #include "xfs_bmap_btree.h"
31 #include "xfs_alloc_btree.h"
32 #include "xfs_ialloc_btree.h"
33 #include "xfs_dir2_sf.h"
34 #include "xfs_attr_sf.h"
35 #include "xfs_dinode.h"
36 #include "xfs_inode.h"
37 #include "xfs_btree.h"
38 #include "xfs_ialloc.h"
39 #include "xfs_quota.h"
40 #include "xfs_utils.h"
43 * Look up an inode by number in the given file system.
44 * The inode is looked up in the cache held in each AG.
45 * If the inode is found in the cache, attach it to the provided
46 * vnode.
48 * If it is not in core, read it in from the file system's device,
49 * add it to the cache and attach the provided vnode.
51 * The inode is locked according to the value of the lock_flags parameter.
52 * This flag parameter indicates how and if the inode's IO lock and inode lock
53 * should be taken.
55 * mp -- the mount point structure for the current file system. It points
56 * to the inode hash table.
57 * tp -- a pointer to the current transaction if there is one. This is
58 * simply passed through to the xfs_iread() call.
59 * ino -- the number of the inode desired. This is the unique identifier
60 * within the file system for the inode being requested.
61 * lock_flags -- flags indicating how to lock the inode. See the comment
62 * for xfs_ilock() for a list of valid values.
63 * bno -- the block number starting the buffer containing the inode,
64 * if known (as by bulkstat), else 0.
66 STATIC int
67 xfs_iget_core(
68 bhv_vnode_t *vp,
69 xfs_mount_t *mp,
70 xfs_trans_t *tp,
71 xfs_ino_t ino,
72 uint flags,
73 uint lock_flags,
74 xfs_inode_t **ipp,
75 xfs_daddr_t bno)
77 xfs_inode_t *ip;
78 xfs_inode_t *iq;
79 bhv_vnode_t *inode_vp;
80 int error;
81 xfs_icluster_t *icl, *new_icl = NULL;
82 unsigned long first_index, mask;
83 xfs_perag_t *pag;
84 xfs_agino_t agino;
86 /* the radix tree exists only in inode capable AGs */
87 if (XFS_INO_TO_AGNO(mp, ino) >= mp->m_maxagi)
88 return EINVAL;
90 /* get the perag structure and ensure that it's inode capable */
91 pag = xfs_get_perag(mp, ino);
92 if (!pag->pagi_inodeok)
93 return EINVAL;
94 ASSERT(pag->pag_ici_init);
95 agino = XFS_INO_TO_AGINO(mp, ino);
97 again:
98 read_lock(&pag->pag_ici_lock);
99 ip = radix_tree_lookup(&pag->pag_ici_root, agino);
101 if (ip != NULL) {
103 * If INEW is set this inode is being set up
104 * we need to pause and try again.
106 if (xfs_iflags_test(ip, XFS_INEW)) {
107 read_unlock(&pag->pag_ici_lock);
108 delay(1);
109 XFS_STATS_INC(xs_ig_frecycle);
111 goto again;
114 inode_vp = XFS_ITOV_NULL(ip);
115 if (inode_vp == NULL) {
117 * If IRECLAIM is set this inode is
118 * on its way out of the system,
119 * we need to pause and try again.
121 if (xfs_iflags_test(ip, XFS_IRECLAIM)) {
122 read_unlock(&pag->pag_ici_lock);
123 delay(1);
124 XFS_STATS_INC(xs_ig_frecycle);
126 goto again;
128 ASSERT(xfs_iflags_test(ip, XFS_IRECLAIMABLE));
131 * If lookup is racing with unlink, then we
132 * should return an error immediately so we
133 * don't remove it from the reclaim list and
134 * potentially leak the inode.
136 if ((ip->i_d.di_mode == 0) &&
137 !(flags & XFS_IGET_CREATE)) {
138 read_unlock(&pag->pag_ici_lock);
139 xfs_put_perag(mp, pag);
140 return ENOENT;
144 * There may be transactions sitting in the
145 * incore log buffers or being flushed to disk
146 * at this time. We can't clear the
147 * XFS_IRECLAIMABLE flag until these
148 * transactions have hit the disk, otherwise we
149 * will void the guarantee the flag provides
150 * xfs_iunpin()
152 if (xfs_ipincount(ip)) {
153 read_unlock(&pag->pag_ici_lock);
154 xfs_log_force(mp, 0,
155 XFS_LOG_FORCE|XFS_LOG_SYNC);
156 XFS_STATS_INC(xs_ig_frecycle);
157 goto again;
160 vn_trace_exit(ip, "xfs_iget.alloc",
161 (inst_t *)__return_address);
163 XFS_STATS_INC(xs_ig_found);
165 xfs_iflags_clear(ip, XFS_IRECLAIMABLE);
166 read_unlock(&pag->pag_ici_lock);
168 XFS_MOUNT_ILOCK(mp);
169 list_del_init(&ip->i_reclaim);
170 XFS_MOUNT_IUNLOCK(mp);
172 goto finish_inode;
174 } else if (vp != inode_vp) {
175 struct inode *inode = vn_to_inode(inode_vp);
177 /* The inode is being torn down, pause and
178 * try again.
180 if (inode->i_state & (I_FREEING | I_CLEAR)) {
181 read_unlock(&pag->pag_ici_lock);
182 delay(1);
183 XFS_STATS_INC(xs_ig_frecycle);
185 goto again;
187 /* Chances are the other vnode (the one in the inode) is being torn
188 * down right now, and we landed on top of it. Question is, what do
189 * we do? Unhook the old inode and hook up the new one?
191 cmn_err(CE_PANIC,
192 "xfs_iget_core: ambiguous vns: vp/0x%p, invp/0x%p",
193 inode_vp, vp);
197 * Inode cache hit
199 read_unlock(&pag->pag_ici_lock);
200 XFS_STATS_INC(xs_ig_found);
202 finish_inode:
203 if (ip->i_d.di_mode == 0) {
204 if (!(flags & XFS_IGET_CREATE)) {
205 xfs_put_perag(mp, pag);
206 return ENOENT;
208 xfs_iocore_inode_reinit(ip);
211 if (lock_flags != 0)
212 xfs_ilock(ip, lock_flags);
214 xfs_iflags_clear(ip, XFS_ISTALE);
215 vn_trace_exit(ip, "xfs_iget.found",
216 (inst_t *)__return_address);
217 goto return_ip;
221 * Inode cache miss
223 read_unlock(&pag->pag_ici_lock);
224 XFS_STATS_INC(xs_ig_missed);
227 * Read the disk inode attributes into a new inode structure and get
228 * a new vnode for it. This should also initialize i_ino and i_mount.
230 error = xfs_iread(mp, tp, ino, &ip, bno,
231 (flags & XFS_IGET_BULKSTAT) ? XFS_IMAP_BULKSTAT : 0);
232 if (error) {
233 xfs_put_perag(mp, pag);
234 return error;
237 vn_trace_exit(ip, "xfs_iget.alloc", (inst_t *)__return_address);
239 xfs_inode_lock_init(ip, vp);
240 xfs_iocore_inode_init(ip);
241 if (lock_flags)
242 xfs_ilock(ip, lock_flags);
244 if ((ip->i_d.di_mode == 0) && !(flags & XFS_IGET_CREATE)) {
245 xfs_idestroy(ip);
246 xfs_put_perag(mp, pag);
247 return ENOENT;
251 * This is a bit messy - we preallocate everything we _might_
252 * need before we pick up the ici lock. That way we don't have to
253 * juggle locks and go all the way back to the start.
255 new_icl = kmem_zone_alloc(xfs_icluster_zone, KM_SLEEP);
256 if (radix_tree_preload(GFP_KERNEL)) {
257 delay(1);
258 goto again;
260 mask = ~(((XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog)) - 1);
261 first_index = agino & mask;
262 write_lock(&pag->pag_ici_lock);
265 * Find the cluster if it exists
267 icl = NULL;
268 if (radix_tree_gang_lookup(&pag->pag_ici_root, (void**)&iq,
269 first_index, 1)) {
270 if ((XFS_INO_TO_AGINO(mp, iq->i_ino) & mask) == first_index)
271 icl = iq->i_cluster;
275 * insert the new inode
277 error = radix_tree_insert(&pag->pag_ici_root, agino, ip);
278 if (unlikely(error)) {
279 BUG_ON(error != -EEXIST);
280 write_unlock(&pag->pag_ici_lock);
281 radix_tree_preload_end();
282 xfs_idestroy(ip);
283 XFS_STATS_INC(xs_ig_dup);
284 goto again;
288 * These values _must_ be set before releasing ihlock!
290 ip->i_udquot = ip->i_gdquot = NULL;
291 xfs_iflags_set(ip, XFS_INEW);
293 ASSERT(ip->i_cluster == NULL);
295 if (!icl) {
296 spin_lock_init(&new_icl->icl_lock);
297 INIT_HLIST_HEAD(&new_icl->icl_inodes);
298 icl = new_icl;
299 new_icl = NULL;
300 } else {
301 ASSERT(!hlist_empty(&icl->icl_inodes));
303 spin_lock(&icl->icl_lock);
304 hlist_add_head(&ip->i_cnode, &icl->icl_inodes);
305 ip->i_cluster = icl;
306 spin_unlock(&icl->icl_lock);
308 write_unlock(&pag->pag_ici_lock);
309 radix_tree_preload_end();
310 if (new_icl)
311 kmem_zone_free(xfs_icluster_zone, new_icl);
314 * Link ip to its mount and thread it on the mount's inode list.
316 XFS_MOUNT_ILOCK(mp);
317 if ((iq = mp->m_inodes)) {
318 ASSERT(iq->i_mprev->i_mnext == iq);
319 ip->i_mprev = iq->i_mprev;
320 iq->i_mprev->i_mnext = ip;
321 iq->i_mprev = ip;
322 ip->i_mnext = iq;
323 } else {
324 ip->i_mnext = ip;
325 ip->i_mprev = ip;
327 mp->m_inodes = ip;
329 XFS_MOUNT_IUNLOCK(mp);
330 xfs_put_perag(mp, pag);
332 return_ip:
333 ASSERT(ip->i_df.if_ext_max ==
334 XFS_IFORK_DSIZE(ip) / sizeof(xfs_bmbt_rec_t));
336 ASSERT(((ip->i_d.di_flags & XFS_DIFLAG_REALTIME) != 0) ==
337 ((ip->i_iocore.io_flags & XFS_IOCORE_RT) != 0));
339 xfs_iflags_set(ip, XFS_IMODIFIED);
340 *ipp = ip;
343 * If we have a real type for an on-disk inode, we can set ops(&unlock)
344 * now. If it's a new inode being created, xfs_ialloc will handle it.
346 xfs_initialize_vnode(mp, vp, ip);
347 return 0;
352 * The 'normal' internal xfs_iget, if needed it will
353 * 'allocate', or 'get', the vnode.
356 xfs_iget(
357 xfs_mount_t *mp,
358 xfs_trans_t *tp,
359 xfs_ino_t ino,
360 uint flags,
361 uint lock_flags,
362 xfs_inode_t **ipp,
363 xfs_daddr_t bno)
365 struct inode *inode;
366 bhv_vnode_t *vp = NULL;
367 int error;
369 XFS_STATS_INC(xs_ig_attempts);
371 retry:
372 inode = iget_locked(mp->m_super, ino);
373 if (inode) {
374 xfs_inode_t *ip;
376 vp = vn_from_inode(inode);
377 if (inode->i_state & I_NEW) {
378 vn_initialize(inode);
379 error = xfs_iget_core(vp, mp, tp, ino, flags,
380 lock_flags, ipp, bno);
381 if (error) {
382 vn_mark_bad(vp);
383 if (inode->i_state & I_NEW)
384 unlock_new_inode(inode);
385 iput(inode);
387 } else {
389 * If the inode is not fully constructed due to
390 * filehandle mismatches wait for the inode to go
391 * away and try again.
393 * iget_locked will call __wait_on_freeing_inode
394 * to wait for the inode to go away.
396 if (is_bad_inode(inode) ||
397 ((ip = xfs_vtoi(vp)) == NULL)) {
398 iput(inode);
399 delay(1);
400 goto retry;
403 if (lock_flags != 0)
404 xfs_ilock(ip, lock_flags);
405 XFS_STATS_INC(xs_ig_found);
406 *ipp = ip;
407 error = 0;
409 } else
410 error = ENOMEM; /* If we got no inode we are out of memory */
412 return error;
416 * Do the setup for the various locks within the incore inode.
418 void
419 xfs_inode_lock_init(
420 xfs_inode_t *ip,
421 bhv_vnode_t *vp)
423 mrlock_init(&ip->i_lock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER,
424 "xfsino", ip->i_ino);
425 mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino);
426 init_waitqueue_head(&ip->i_ipin_wait);
427 atomic_set(&ip->i_pincount, 0);
428 initnsema(&ip->i_flock, 1, "xfsfino");
432 * Look for the inode corresponding to the given ino in the hash table.
433 * If it is there and its i_transp pointer matches tp, return it.
434 * Otherwise, return NULL.
436 xfs_inode_t *
437 xfs_inode_incore(xfs_mount_t *mp,
438 xfs_ino_t ino,
439 xfs_trans_t *tp)
441 xfs_inode_t *ip;
442 xfs_perag_t *pag;
444 pag = xfs_get_perag(mp, ino);
445 read_lock(&pag->pag_ici_lock);
446 ip = radix_tree_lookup(&pag->pag_ici_root, XFS_INO_TO_AGINO(mp, ino));
447 read_unlock(&pag->pag_ici_lock);
448 xfs_put_perag(mp, pag);
450 /* the returned inode must match the transaction */
451 if (ip && (ip->i_transp != tp))
452 return NULL;
453 return ip;
457 * Decrement reference count of an inode structure and unlock it.
459 * ip -- the inode being released
460 * lock_flags -- this parameter indicates the inode's locks to be
461 * to be released. See the comment on xfs_iunlock() for a list
462 * of valid values.
464 void
465 xfs_iput(xfs_inode_t *ip,
466 uint lock_flags)
468 bhv_vnode_t *vp = XFS_ITOV(ip);
470 vn_trace_entry(ip, "xfs_iput", (inst_t *)__return_address);
471 xfs_iunlock(ip, lock_flags);
472 VN_RELE(vp);
476 * Special iput for brand-new inodes that are still locked
478 void
479 xfs_iput_new(xfs_inode_t *ip,
480 uint lock_flags)
482 bhv_vnode_t *vp = XFS_ITOV(ip);
483 struct inode *inode = vn_to_inode(vp);
485 vn_trace_entry(ip, "xfs_iput_new", (inst_t *)__return_address);
487 if ((ip->i_d.di_mode == 0)) {
488 ASSERT(!xfs_iflags_test(ip, XFS_IRECLAIMABLE));
489 vn_mark_bad(vp);
491 if (inode->i_state & I_NEW)
492 unlock_new_inode(inode);
493 if (lock_flags)
494 xfs_iunlock(ip, lock_flags);
495 VN_RELE(vp);
500 * This routine embodies the part of the reclaim code that pulls
501 * the inode from the inode hash table and the mount structure's
502 * inode list.
503 * This should only be called from xfs_reclaim().
505 void
506 xfs_ireclaim(xfs_inode_t *ip)
508 bhv_vnode_t *vp;
511 * Remove from old hash list and mount list.
513 XFS_STATS_INC(xs_ig_reclaims);
515 xfs_iextract(ip);
518 * Here we do a spurious inode lock in order to coordinate with
519 * xfs_sync(). This is because xfs_sync() references the inodes
520 * in the mount list without taking references on the corresponding
521 * vnodes. We make that OK here by ensuring that we wait until
522 * the inode is unlocked in xfs_sync() before we go ahead and
523 * free it. We get both the regular lock and the io lock because
524 * the xfs_sync() code may need to drop the regular one but will
525 * still hold the io lock.
527 xfs_ilock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
530 * Release dquots (and their references) if any. An inode may escape
531 * xfs_inactive and get here via vn_alloc->vn_reclaim path.
533 XFS_QM_DQDETACH(ip->i_mount, ip);
536 * Pull our behavior descriptor from the vnode chain.
538 vp = XFS_ITOV_NULL(ip);
539 if (vp) {
540 vn_to_inode(vp)->i_private = NULL;
541 ip->i_vnode = NULL;
545 * Free all memory associated with the inode.
547 xfs_iunlock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
548 xfs_idestroy(ip);
552 * This routine removes an about-to-be-destroyed inode from
553 * all of the lists in which it is located with the exception
554 * of the behavior chain.
556 void
557 xfs_iextract(
558 xfs_inode_t *ip)
560 xfs_mount_t *mp = ip->i_mount;
561 xfs_perag_t *pag = xfs_get_perag(mp, ip->i_ino);
562 xfs_inode_t *iq;
564 write_lock(&pag->pag_ici_lock);
565 radix_tree_delete(&pag->pag_ici_root, XFS_INO_TO_AGINO(mp, ip->i_ino));
566 write_unlock(&pag->pag_ici_lock);
567 xfs_put_perag(mp, pag);
570 * Remove from cluster list
572 mp = ip->i_mount;
573 spin_lock(&ip->i_cluster->icl_lock);
574 hlist_del(&ip->i_cnode);
575 spin_unlock(&ip->i_cluster->icl_lock);
577 /* was last inode in cluster? */
578 if (hlist_empty(&ip->i_cluster->icl_inodes))
579 kmem_zone_free(xfs_icluster_zone, ip->i_cluster);
582 * Remove from mount's inode list.
584 XFS_MOUNT_ILOCK(mp);
585 ASSERT((ip->i_mnext != NULL) && (ip->i_mprev != NULL));
586 iq = ip->i_mnext;
587 iq->i_mprev = ip->i_mprev;
588 ip->i_mprev->i_mnext = iq;
591 * Fix up the head pointer if it points to the inode being deleted.
593 if (mp->m_inodes == ip) {
594 if (ip == iq) {
595 mp->m_inodes = NULL;
596 } else {
597 mp->m_inodes = iq;
601 /* Deal with the deleted inodes list */
602 list_del_init(&ip->i_reclaim);
604 mp->m_ireclaims++;
605 XFS_MOUNT_IUNLOCK(mp);
609 * This is a wrapper routine around the xfs_ilock() routine
610 * used to centralize some grungy code. It is used in places
611 * that wish to lock the inode solely for reading the extents.
612 * The reason these places can't just call xfs_ilock(SHARED)
613 * is that the inode lock also guards to bringing in of the
614 * extents from disk for a file in b-tree format. If the inode
615 * is in b-tree format, then we need to lock the inode exclusively
616 * until the extents are read in. Locking it exclusively all
617 * the time would limit our parallelism unnecessarily, though.
618 * What we do instead is check to see if the extents have been
619 * read in yet, and only lock the inode exclusively if they
620 * have not.
622 * The function returns a value which should be given to the
623 * corresponding xfs_iunlock_map_shared(). This value is
624 * the mode in which the lock was actually taken.
626 uint
627 xfs_ilock_map_shared(
628 xfs_inode_t *ip)
630 uint lock_mode;
632 if ((ip->i_d.di_format == XFS_DINODE_FMT_BTREE) &&
633 ((ip->i_df.if_flags & XFS_IFEXTENTS) == 0)) {
634 lock_mode = XFS_ILOCK_EXCL;
635 } else {
636 lock_mode = XFS_ILOCK_SHARED;
639 xfs_ilock(ip, lock_mode);
641 return lock_mode;
645 * This is simply the unlock routine to go with xfs_ilock_map_shared().
646 * All it does is call xfs_iunlock() with the given lock_mode.
648 void
649 xfs_iunlock_map_shared(
650 xfs_inode_t *ip,
651 unsigned int lock_mode)
653 xfs_iunlock(ip, lock_mode);
657 * The xfs inode contains 2 locks: a multi-reader lock called the
658 * i_iolock and a multi-reader lock called the i_lock. This routine
659 * allows either or both of the locks to be obtained.
661 * The 2 locks should always be ordered so that the IO lock is
662 * obtained first in order to prevent deadlock.
664 * ip -- the inode being locked
665 * lock_flags -- this parameter indicates the inode's locks
666 * to be locked. It can be:
667 * XFS_IOLOCK_SHARED,
668 * XFS_IOLOCK_EXCL,
669 * XFS_ILOCK_SHARED,
670 * XFS_ILOCK_EXCL,
671 * XFS_IOLOCK_SHARED | XFS_ILOCK_SHARED,
672 * XFS_IOLOCK_SHARED | XFS_ILOCK_EXCL,
673 * XFS_IOLOCK_EXCL | XFS_ILOCK_SHARED,
674 * XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL
676 void
677 xfs_ilock(xfs_inode_t *ip,
678 uint lock_flags)
681 * You can't set both SHARED and EXCL for the same lock,
682 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
683 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
685 ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
686 (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
687 ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
688 (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
689 ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_DEP_MASK)) == 0);
691 if (lock_flags & XFS_IOLOCK_EXCL) {
692 mrupdate_nested(&ip->i_iolock, XFS_IOLOCK_DEP(lock_flags));
693 } else if (lock_flags & XFS_IOLOCK_SHARED) {
694 mraccess_nested(&ip->i_iolock, XFS_IOLOCK_DEP(lock_flags));
696 if (lock_flags & XFS_ILOCK_EXCL) {
697 mrupdate_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
698 } else if (lock_flags & XFS_ILOCK_SHARED) {
699 mraccess_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
701 xfs_ilock_trace(ip, 1, lock_flags, (inst_t *)__return_address);
705 * This is just like xfs_ilock(), except that the caller
706 * is guaranteed not to sleep. It returns 1 if it gets
707 * the requested locks and 0 otherwise. If the IO lock is
708 * obtained but the inode lock cannot be, then the IO lock
709 * is dropped before returning.
711 * ip -- the inode being locked
712 * lock_flags -- this parameter indicates the inode's locks to be
713 * to be locked. See the comment for xfs_ilock() for a list
714 * of valid values.
718 xfs_ilock_nowait(xfs_inode_t *ip,
719 uint lock_flags)
721 int iolocked;
722 int ilocked;
725 * You can't set both SHARED and EXCL for the same lock,
726 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
727 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
729 ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
730 (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
731 ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
732 (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
733 ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_DEP_MASK)) == 0);
735 iolocked = 0;
736 if (lock_flags & XFS_IOLOCK_EXCL) {
737 iolocked = mrtryupdate(&ip->i_iolock);
738 if (!iolocked) {
739 return 0;
741 } else if (lock_flags & XFS_IOLOCK_SHARED) {
742 iolocked = mrtryaccess(&ip->i_iolock);
743 if (!iolocked) {
744 return 0;
747 if (lock_flags & XFS_ILOCK_EXCL) {
748 ilocked = mrtryupdate(&ip->i_lock);
749 if (!ilocked) {
750 if (iolocked) {
751 mrunlock(&ip->i_iolock);
753 return 0;
755 } else if (lock_flags & XFS_ILOCK_SHARED) {
756 ilocked = mrtryaccess(&ip->i_lock);
757 if (!ilocked) {
758 if (iolocked) {
759 mrunlock(&ip->i_iolock);
761 return 0;
764 xfs_ilock_trace(ip, 2, lock_flags, (inst_t *)__return_address);
765 return 1;
769 * xfs_iunlock() is used to drop the inode locks acquired with
770 * xfs_ilock() and xfs_ilock_nowait(). The caller must pass
771 * in the flags given to xfs_ilock() or xfs_ilock_nowait() so
772 * that we know which locks to drop.
774 * ip -- the inode being unlocked
775 * lock_flags -- this parameter indicates the inode's locks to be
776 * to be unlocked. See the comment for xfs_ilock() for a list
777 * of valid values for this parameter.
780 void
781 xfs_iunlock(xfs_inode_t *ip,
782 uint lock_flags)
785 * You can't set both SHARED and EXCL for the same lock,
786 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
787 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
789 ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
790 (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
791 ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
792 (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
793 ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_IUNLOCK_NONOTIFY |
794 XFS_LOCK_DEP_MASK)) == 0);
795 ASSERT(lock_flags != 0);
797 if (lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) {
798 ASSERT(!(lock_flags & XFS_IOLOCK_SHARED) ||
799 (ismrlocked(&ip->i_iolock, MR_ACCESS)));
800 ASSERT(!(lock_flags & XFS_IOLOCK_EXCL) ||
801 (ismrlocked(&ip->i_iolock, MR_UPDATE)));
802 mrunlock(&ip->i_iolock);
805 if (lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) {
806 ASSERT(!(lock_flags & XFS_ILOCK_SHARED) ||
807 (ismrlocked(&ip->i_lock, MR_ACCESS)));
808 ASSERT(!(lock_flags & XFS_ILOCK_EXCL) ||
809 (ismrlocked(&ip->i_lock, MR_UPDATE)));
810 mrunlock(&ip->i_lock);
813 * Let the AIL know that this item has been unlocked in case
814 * it is in the AIL and anyone is waiting on it. Don't do
815 * this if the caller has asked us not to.
817 if (!(lock_flags & XFS_IUNLOCK_NONOTIFY) &&
818 ip->i_itemp != NULL) {
819 xfs_trans_unlocked_item(ip->i_mount,
820 (xfs_log_item_t*)(ip->i_itemp));
823 xfs_ilock_trace(ip, 3, lock_flags, (inst_t *)__return_address);
827 * give up write locks. the i/o lock cannot be held nested
828 * if it is being demoted.
830 void
831 xfs_ilock_demote(xfs_inode_t *ip,
832 uint lock_flags)
834 ASSERT(lock_flags & (XFS_IOLOCK_EXCL|XFS_ILOCK_EXCL));
835 ASSERT((lock_flags & ~(XFS_IOLOCK_EXCL|XFS_ILOCK_EXCL)) == 0);
837 if (lock_flags & XFS_ILOCK_EXCL) {
838 ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE));
839 mrdemote(&ip->i_lock);
841 if (lock_flags & XFS_IOLOCK_EXCL) {
842 ASSERT(ismrlocked(&ip->i_iolock, MR_UPDATE));
843 mrdemote(&ip->i_iolock);
848 * The following three routines simply manage the i_flock
849 * semaphore embedded in the inode. This semaphore synchronizes
850 * processes attempting to flush the in-core inode back to disk.
852 void
853 xfs_iflock(xfs_inode_t *ip)
855 psema(&(ip->i_flock), PINOD|PLTWAIT);
859 xfs_iflock_nowait(xfs_inode_t *ip)
861 return (cpsema(&(ip->i_flock)));
864 void
865 xfs_ifunlock(xfs_inode_t *ip)
867 ASSERT(issemalocked(&(ip->i_flock)));
868 vsema(&(ip->i_flock));