MIPS: Alchemy: devboards: factor out PB1200 IRQ cascade code.
[linux-2.6/linux-mips.git] / fs / xfs / xfs_iget.c
blobe281eb4a1c4978791b511acf94ac07cb86efca1c
1 /*
2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_types.h"
21 #include "xfs_acl.h"
22 #include "xfs_bit.h"
23 #include "xfs_log.h"
24 #include "xfs_inum.h"
25 #include "xfs_trans.h"
26 #include "xfs_sb.h"
27 #include "xfs_ag.h"
28 #include "xfs_dir2.h"
29 #include "xfs_dmapi.h"
30 #include "xfs_mount.h"
31 #include "xfs_bmap_btree.h"
32 #include "xfs_alloc_btree.h"
33 #include "xfs_ialloc_btree.h"
34 #include "xfs_dir2_sf.h"
35 #include "xfs_attr_sf.h"
36 #include "xfs_dinode.h"
37 #include "xfs_inode.h"
38 #include "xfs_btree.h"
39 #include "xfs_ialloc.h"
40 #include "xfs_quota.h"
41 #include "xfs_utils.h"
42 #include "xfs_trans_priv.h"
43 #include "xfs_inode_item.h"
44 #include "xfs_bmap.h"
45 #include "xfs_btree_trace.h"
46 #include "xfs_trace.h"
50 * Allocate and initialise an xfs_inode.
52 STATIC struct xfs_inode *
53 xfs_inode_alloc(
54 struct xfs_mount *mp,
55 xfs_ino_t ino)
57 struct xfs_inode *ip;
60 * if this didn't occur in transactions, we could use
61 * KM_MAYFAIL and return NULL here on ENOMEM. Set the
62 * code up to do this anyway.
64 ip = kmem_zone_alloc(xfs_inode_zone, KM_SLEEP);
65 if (!ip)
66 return NULL;
67 if (inode_init_always(mp->m_super, VFS_I(ip))) {
68 kmem_zone_free(xfs_inode_zone, ip);
69 return NULL;
72 ASSERT(atomic_read(&ip->i_iocount) == 0);
73 ASSERT(atomic_read(&ip->i_pincount) == 0);
74 ASSERT(!spin_is_locked(&ip->i_flags_lock));
75 ASSERT(completion_done(&ip->i_flush));
77 mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino);
79 /* initialise the xfs inode */
80 ip->i_ino = ino;
81 ip->i_mount = mp;
82 memset(&ip->i_imap, 0, sizeof(struct xfs_imap));
83 ip->i_afp = NULL;
84 memset(&ip->i_df, 0, sizeof(xfs_ifork_t));
85 ip->i_flags = 0;
86 ip->i_update_core = 0;
87 ip->i_delayed_blks = 0;
88 memset(&ip->i_d, 0, sizeof(xfs_icdinode_t));
89 ip->i_size = 0;
90 ip->i_new_size = 0;
92 /* prevent anyone from using this yet */
93 VFS_I(ip)->i_state = I_NEW;
95 return ip;
98 STATIC void
99 xfs_inode_free(
100 struct xfs_inode *ip)
102 switch (ip->i_d.di_mode & S_IFMT) {
103 case S_IFREG:
104 case S_IFDIR:
105 case S_IFLNK:
106 xfs_idestroy_fork(ip, XFS_DATA_FORK);
107 break;
110 if (ip->i_afp)
111 xfs_idestroy_fork(ip, XFS_ATTR_FORK);
113 if (ip->i_itemp) {
115 * Only if we are shutting down the fs will we see an
116 * inode still in the AIL. If it is there, we should remove
117 * it to prevent a use-after-free from occurring.
119 xfs_log_item_t *lip = &ip->i_itemp->ili_item;
120 struct xfs_ail *ailp = lip->li_ailp;
122 ASSERT(((lip->li_flags & XFS_LI_IN_AIL) == 0) ||
123 XFS_FORCED_SHUTDOWN(ip->i_mount));
124 if (lip->li_flags & XFS_LI_IN_AIL) {
125 spin_lock(&ailp->xa_lock);
126 if (lip->li_flags & XFS_LI_IN_AIL)
127 xfs_trans_ail_delete(ailp, lip);
128 else
129 spin_unlock(&ailp->xa_lock);
131 xfs_inode_item_destroy(ip);
132 ip->i_itemp = NULL;
135 /* asserts to verify all state is correct here */
136 ASSERT(atomic_read(&ip->i_iocount) == 0);
137 ASSERT(atomic_read(&ip->i_pincount) == 0);
138 ASSERT(!spin_is_locked(&ip->i_flags_lock));
139 ASSERT(completion_done(&ip->i_flush));
141 kmem_zone_free(xfs_inode_zone, ip);
145 * Check the validity of the inode we just found it the cache
147 static int
148 xfs_iget_cache_hit(
149 struct xfs_perag *pag,
150 struct xfs_inode *ip,
151 int flags,
152 int lock_flags) __releases(pag->pag_ici_lock)
154 struct inode *inode = VFS_I(ip);
155 struct xfs_mount *mp = ip->i_mount;
156 int error;
158 spin_lock(&ip->i_flags_lock);
161 * If we are racing with another cache hit that is currently
162 * instantiating this inode or currently recycling it out of
163 * reclaimabe state, wait for the initialisation to complete
164 * before continuing.
166 * XXX(hch): eventually we should do something equivalent to
167 * wait_on_inode to wait for these flags to be cleared
168 * instead of polling for it.
170 if (ip->i_flags & (XFS_INEW|XFS_IRECLAIM)) {
171 trace_xfs_iget_skip(ip);
172 XFS_STATS_INC(xs_ig_frecycle);
173 error = EAGAIN;
174 goto out_error;
178 * If lookup is racing with unlink return an error immediately.
180 if (ip->i_d.di_mode == 0 && !(flags & XFS_IGET_CREATE)) {
181 error = ENOENT;
182 goto out_error;
186 * If IRECLAIMABLE is set, we've torn down the VFS inode already.
187 * Need to carefully get it back into useable state.
189 if (ip->i_flags & XFS_IRECLAIMABLE) {
190 trace_xfs_iget_reclaim(ip);
193 * We need to set XFS_INEW atomically with clearing the
194 * reclaimable tag so that we do have an indicator of the
195 * inode still being initialized.
197 ip->i_flags |= XFS_INEW;
198 ip->i_flags &= ~XFS_IRECLAIMABLE;
199 __xfs_inode_clear_reclaim_tag(mp, pag, ip);
201 spin_unlock(&ip->i_flags_lock);
202 read_unlock(&pag->pag_ici_lock);
204 error = -inode_init_always(mp->m_super, inode);
205 if (error) {
207 * Re-initializing the inode failed, and we are in deep
208 * trouble. Try to re-add it to the reclaim list.
210 read_lock(&pag->pag_ici_lock);
211 spin_lock(&ip->i_flags_lock);
213 ip->i_flags &= ~XFS_INEW;
214 ip->i_flags |= XFS_IRECLAIMABLE;
215 __xfs_inode_set_reclaim_tag(pag, ip);
216 trace_xfs_iget_reclaim(ip);
217 goto out_error;
219 inode->i_state = I_NEW;
220 } else {
221 /* If the VFS inode is being torn down, pause and try again. */
222 if (!igrab(inode)) {
223 error = EAGAIN;
224 goto out_error;
227 /* We've got a live one. */
228 spin_unlock(&ip->i_flags_lock);
229 read_unlock(&pag->pag_ici_lock);
232 if (lock_flags != 0)
233 xfs_ilock(ip, lock_flags);
235 xfs_iflags_clear(ip, XFS_ISTALE);
236 XFS_STATS_INC(xs_ig_found);
238 trace_xfs_iget_found(ip);
239 return 0;
241 out_error:
242 spin_unlock(&ip->i_flags_lock);
243 read_unlock(&pag->pag_ici_lock);
244 return error;
248 static int
249 xfs_iget_cache_miss(
250 struct xfs_mount *mp,
251 struct xfs_perag *pag,
252 xfs_trans_t *tp,
253 xfs_ino_t ino,
254 struct xfs_inode **ipp,
255 xfs_daddr_t bno,
256 int flags,
257 int lock_flags)
259 struct xfs_inode *ip;
260 int error;
261 unsigned long first_index, mask;
262 xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ino);
264 ip = xfs_inode_alloc(mp, ino);
265 if (!ip)
266 return ENOMEM;
268 error = xfs_iread(mp, tp, ip, bno, flags);
269 if (error)
270 goto out_destroy;
272 xfs_itrace_entry(ip);
274 if ((ip->i_d.di_mode == 0) && !(flags & XFS_IGET_CREATE)) {
275 error = ENOENT;
276 goto out_destroy;
280 * Preload the radix tree so we can insert safely under the
281 * write spinlock. Note that we cannot sleep inside the preload
282 * region.
284 if (radix_tree_preload(GFP_KERNEL)) {
285 error = EAGAIN;
286 goto out_destroy;
290 * Because the inode hasn't been added to the radix-tree yet it can't
291 * be found by another thread, so we can do the non-sleeping lock here.
293 if (lock_flags) {
294 if (!xfs_ilock_nowait(ip, lock_flags))
295 BUG();
298 mask = ~(((XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog)) - 1);
299 first_index = agino & mask;
300 write_lock(&pag->pag_ici_lock);
302 /* insert the new inode */
303 error = radix_tree_insert(&pag->pag_ici_root, agino, ip);
304 if (unlikely(error)) {
305 WARN_ON(error != -EEXIST);
306 XFS_STATS_INC(xs_ig_dup);
307 error = EAGAIN;
308 goto out_preload_end;
311 /* These values _must_ be set before releasing the radix tree lock! */
312 ip->i_udquot = ip->i_gdquot = NULL;
313 xfs_iflags_set(ip, XFS_INEW);
315 write_unlock(&pag->pag_ici_lock);
316 radix_tree_preload_end();
318 trace_xfs_iget_alloc(ip);
319 *ipp = ip;
320 return 0;
322 out_preload_end:
323 write_unlock(&pag->pag_ici_lock);
324 radix_tree_preload_end();
325 if (lock_flags)
326 xfs_iunlock(ip, lock_flags);
327 out_destroy:
328 __destroy_inode(VFS_I(ip));
329 xfs_inode_free(ip);
330 return error;
334 * Look up an inode by number in the given file system.
335 * The inode is looked up in the cache held in each AG.
336 * If the inode is found in the cache, initialise the vfs inode
337 * if necessary.
339 * If it is not in core, read it in from the file system's device,
340 * add it to the cache and initialise the vfs inode.
342 * The inode is locked according to the value of the lock_flags parameter.
343 * This flag parameter indicates how and if the inode's IO lock and inode lock
344 * should be taken.
346 * mp -- the mount point structure for the current file system. It points
347 * to the inode hash table.
348 * tp -- a pointer to the current transaction if there is one. This is
349 * simply passed through to the xfs_iread() call.
350 * ino -- the number of the inode desired. This is the unique identifier
351 * within the file system for the inode being requested.
352 * lock_flags -- flags indicating how to lock the inode. See the comment
353 * for xfs_ilock() for a list of valid values.
354 * bno -- the block number starting the buffer containing the inode,
355 * if known (as by bulkstat), else 0.
358 xfs_iget(
359 xfs_mount_t *mp,
360 xfs_trans_t *tp,
361 xfs_ino_t ino,
362 uint flags,
363 uint lock_flags,
364 xfs_inode_t **ipp,
365 xfs_daddr_t bno)
367 xfs_inode_t *ip;
368 int error;
369 xfs_perag_t *pag;
370 xfs_agino_t agino;
372 /* the radix tree exists only in inode capable AGs */
373 if (XFS_INO_TO_AGNO(mp, ino) >= mp->m_maxagi)
374 return EINVAL;
376 /* get the perag structure and ensure that it's inode capable */
377 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ino));
378 if (!pag->pagi_inodeok)
379 return EINVAL;
380 ASSERT(pag->pag_ici_init);
381 agino = XFS_INO_TO_AGINO(mp, ino);
383 again:
384 error = 0;
385 read_lock(&pag->pag_ici_lock);
386 ip = radix_tree_lookup(&pag->pag_ici_root, agino);
388 if (ip) {
389 error = xfs_iget_cache_hit(pag, ip, flags, lock_flags);
390 if (error)
391 goto out_error_or_again;
392 } else {
393 read_unlock(&pag->pag_ici_lock);
394 XFS_STATS_INC(xs_ig_missed);
396 error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip, bno,
397 flags, lock_flags);
398 if (error)
399 goto out_error_or_again;
401 xfs_perag_put(pag);
403 *ipp = ip;
405 ASSERT(ip->i_df.if_ext_max ==
406 XFS_IFORK_DSIZE(ip) / sizeof(xfs_bmbt_rec_t));
408 * If we have a real type for an on-disk inode, we can set ops(&unlock)
409 * now. If it's a new inode being created, xfs_ialloc will handle it.
411 if (xfs_iflags_test(ip, XFS_INEW) && ip->i_d.di_mode != 0)
412 xfs_setup_inode(ip);
413 return 0;
415 out_error_or_again:
416 if (error == EAGAIN) {
417 delay(1);
418 goto again;
420 xfs_perag_put(pag);
421 return error;
425 * Decrement reference count of an inode structure and unlock it.
427 * ip -- the inode being released
428 * lock_flags -- this parameter indicates the inode's locks to be
429 * to be released. See the comment on xfs_iunlock() for a list
430 * of valid values.
432 void
433 xfs_iput(xfs_inode_t *ip,
434 uint lock_flags)
436 xfs_itrace_entry(ip);
437 xfs_iunlock(ip, lock_flags);
438 IRELE(ip);
442 * Special iput for brand-new inodes that are still locked
444 void
445 xfs_iput_new(
446 xfs_inode_t *ip,
447 uint lock_flags)
449 struct inode *inode = VFS_I(ip);
451 xfs_itrace_entry(ip);
453 if ((ip->i_d.di_mode == 0)) {
454 ASSERT(!xfs_iflags_test(ip, XFS_IRECLAIMABLE));
455 make_bad_inode(inode);
457 if (inode->i_state & I_NEW)
458 unlock_new_inode(inode);
459 if (lock_flags)
460 xfs_iunlock(ip, lock_flags);
461 IRELE(ip);
465 * This is called free all the memory associated with an inode.
466 * It must free the inode itself and any buffers allocated for
467 * if_extents/if_data and if_broot. It must also free the lock
468 * associated with the inode.
470 * Note: because we don't initialise everything on reallocation out
471 * of the zone, we must ensure we nullify everything correctly before
472 * freeing the structure.
474 void
475 xfs_ireclaim(
476 struct xfs_inode *ip)
478 struct xfs_mount *mp = ip->i_mount;
479 struct xfs_perag *pag;
480 xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
482 XFS_STATS_INC(xs_ig_reclaims);
485 * Remove the inode from the per-AG radix tree.
487 * Because radix_tree_delete won't complain even if the item was never
488 * added to the tree assert that it's been there before to catch
489 * problems with the inode life time early on.
491 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
492 write_lock(&pag->pag_ici_lock);
493 if (!radix_tree_delete(&pag->pag_ici_root, agino))
494 ASSERT(0);
495 write_unlock(&pag->pag_ici_lock);
496 xfs_perag_put(pag);
499 * Here we do an (almost) spurious inode lock in order to coordinate
500 * with inode cache radix tree lookups. This is because the lookup
501 * can reference the inodes in the cache without taking references.
503 * We make that OK here by ensuring that we wait until the inode is
504 * unlocked after the lookup before we go ahead and free it. We get
505 * both the ilock and the iolock because the code may need to drop the
506 * ilock one but will still hold the iolock.
508 xfs_ilock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
509 xfs_qm_dqdetach(ip);
510 xfs_iunlock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
512 xfs_inode_free(ip);
516 * This is a wrapper routine around the xfs_ilock() routine
517 * used to centralize some grungy code. It is used in places
518 * that wish to lock the inode solely for reading the extents.
519 * The reason these places can't just call xfs_ilock(SHARED)
520 * is that the inode lock also guards to bringing in of the
521 * extents from disk for a file in b-tree format. If the inode
522 * is in b-tree format, then we need to lock the inode exclusively
523 * until the extents are read in. Locking it exclusively all
524 * the time would limit our parallelism unnecessarily, though.
525 * What we do instead is check to see if the extents have been
526 * read in yet, and only lock the inode exclusively if they
527 * have not.
529 * The function returns a value which should be given to the
530 * corresponding xfs_iunlock_map_shared(). This value is
531 * the mode in which the lock was actually taken.
533 uint
534 xfs_ilock_map_shared(
535 xfs_inode_t *ip)
537 uint lock_mode;
539 if ((ip->i_d.di_format == XFS_DINODE_FMT_BTREE) &&
540 ((ip->i_df.if_flags & XFS_IFEXTENTS) == 0)) {
541 lock_mode = XFS_ILOCK_EXCL;
542 } else {
543 lock_mode = XFS_ILOCK_SHARED;
546 xfs_ilock(ip, lock_mode);
548 return lock_mode;
552 * This is simply the unlock routine to go with xfs_ilock_map_shared().
553 * All it does is call xfs_iunlock() with the given lock_mode.
555 void
556 xfs_iunlock_map_shared(
557 xfs_inode_t *ip,
558 unsigned int lock_mode)
560 xfs_iunlock(ip, lock_mode);
564 * The xfs inode contains 2 locks: a multi-reader lock called the
565 * i_iolock and a multi-reader lock called the i_lock. This routine
566 * allows either or both of the locks to be obtained.
568 * The 2 locks should always be ordered so that the IO lock is
569 * obtained first in order to prevent deadlock.
571 * ip -- the inode being locked
572 * lock_flags -- this parameter indicates the inode's locks
573 * to be locked. It can be:
574 * XFS_IOLOCK_SHARED,
575 * XFS_IOLOCK_EXCL,
576 * XFS_ILOCK_SHARED,
577 * XFS_ILOCK_EXCL,
578 * XFS_IOLOCK_SHARED | XFS_ILOCK_SHARED,
579 * XFS_IOLOCK_SHARED | XFS_ILOCK_EXCL,
580 * XFS_IOLOCK_EXCL | XFS_ILOCK_SHARED,
581 * XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL
583 void
584 xfs_ilock(
585 xfs_inode_t *ip,
586 uint lock_flags)
589 * You can't set both SHARED and EXCL for the same lock,
590 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
591 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
593 ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
594 (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
595 ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
596 (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
597 ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_DEP_MASK)) == 0);
599 if (lock_flags & XFS_IOLOCK_EXCL)
600 mrupdate_nested(&ip->i_iolock, XFS_IOLOCK_DEP(lock_flags));
601 else if (lock_flags & XFS_IOLOCK_SHARED)
602 mraccess_nested(&ip->i_iolock, XFS_IOLOCK_DEP(lock_flags));
604 if (lock_flags & XFS_ILOCK_EXCL)
605 mrupdate_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
606 else if (lock_flags & XFS_ILOCK_SHARED)
607 mraccess_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
609 trace_xfs_ilock(ip, lock_flags, _RET_IP_);
613 * This is just like xfs_ilock(), except that the caller
614 * is guaranteed not to sleep. It returns 1 if it gets
615 * the requested locks and 0 otherwise. If the IO lock is
616 * obtained but the inode lock cannot be, then the IO lock
617 * is dropped before returning.
619 * ip -- the inode being locked
620 * lock_flags -- this parameter indicates the inode's locks to be
621 * to be locked. See the comment for xfs_ilock() for a list
622 * of valid values.
625 xfs_ilock_nowait(
626 xfs_inode_t *ip,
627 uint lock_flags)
630 * You can't set both SHARED and EXCL for the same lock,
631 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
632 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
634 ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
635 (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
636 ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
637 (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
638 ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_DEP_MASK)) == 0);
640 if (lock_flags & XFS_IOLOCK_EXCL) {
641 if (!mrtryupdate(&ip->i_iolock))
642 goto out;
643 } else if (lock_flags & XFS_IOLOCK_SHARED) {
644 if (!mrtryaccess(&ip->i_iolock))
645 goto out;
647 if (lock_flags & XFS_ILOCK_EXCL) {
648 if (!mrtryupdate(&ip->i_lock))
649 goto out_undo_iolock;
650 } else if (lock_flags & XFS_ILOCK_SHARED) {
651 if (!mrtryaccess(&ip->i_lock))
652 goto out_undo_iolock;
654 trace_xfs_ilock_nowait(ip, lock_flags, _RET_IP_);
655 return 1;
657 out_undo_iolock:
658 if (lock_flags & XFS_IOLOCK_EXCL)
659 mrunlock_excl(&ip->i_iolock);
660 else if (lock_flags & XFS_IOLOCK_SHARED)
661 mrunlock_shared(&ip->i_iolock);
662 out:
663 return 0;
667 * xfs_iunlock() is used to drop the inode locks acquired with
668 * xfs_ilock() and xfs_ilock_nowait(). The caller must pass
669 * in the flags given to xfs_ilock() or xfs_ilock_nowait() so
670 * that we know which locks to drop.
672 * ip -- the inode being unlocked
673 * lock_flags -- this parameter indicates the inode's locks to be
674 * to be unlocked. See the comment for xfs_ilock() for a list
675 * of valid values for this parameter.
678 void
679 xfs_iunlock(
680 xfs_inode_t *ip,
681 uint lock_flags)
684 * You can't set both SHARED and EXCL for the same lock,
685 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
686 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
688 ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
689 (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
690 ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
691 (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
692 ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_IUNLOCK_NONOTIFY |
693 XFS_LOCK_DEP_MASK)) == 0);
694 ASSERT(lock_flags != 0);
696 if (lock_flags & XFS_IOLOCK_EXCL)
697 mrunlock_excl(&ip->i_iolock);
698 else if (lock_flags & XFS_IOLOCK_SHARED)
699 mrunlock_shared(&ip->i_iolock);
701 if (lock_flags & XFS_ILOCK_EXCL)
702 mrunlock_excl(&ip->i_lock);
703 else if (lock_flags & XFS_ILOCK_SHARED)
704 mrunlock_shared(&ip->i_lock);
706 if ((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) &&
707 !(lock_flags & XFS_IUNLOCK_NONOTIFY) && ip->i_itemp) {
709 * Let the AIL know that this item has been unlocked in case
710 * it is in the AIL and anyone is waiting on it. Don't do
711 * this if the caller has asked us not to.
713 xfs_trans_unlocked_item(ip->i_itemp->ili_item.li_ailp,
714 (xfs_log_item_t*)(ip->i_itemp));
716 trace_xfs_iunlock(ip, lock_flags, _RET_IP_);
720 * give up write locks. the i/o lock cannot be held nested
721 * if it is being demoted.
723 void
724 xfs_ilock_demote(
725 xfs_inode_t *ip,
726 uint lock_flags)
728 ASSERT(lock_flags & (XFS_IOLOCK_EXCL|XFS_ILOCK_EXCL));
729 ASSERT((lock_flags & ~(XFS_IOLOCK_EXCL|XFS_ILOCK_EXCL)) == 0);
731 if (lock_flags & XFS_ILOCK_EXCL)
732 mrdemote(&ip->i_lock);
733 if (lock_flags & XFS_IOLOCK_EXCL)
734 mrdemote(&ip->i_iolock);
736 trace_xfs_ilock_demote(ip, lock_flags, _RET_IP_);
739 #ifdef DEBUG
741 * Debug-only routine, without additional rw_semaphore APIs, we can
742 * now only answer requests regarding whether we hold the lock for write
743 * (reader state is outside our visibility, we only track writer state).
745 * Note: this means !xfs_isilocked would give false positives, so don't do that.
748 xfs_isilocked(
749 xfs_inode_t *ip,
750 uint lock_flags)
752 if ((lock_flags & (XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)) ==
753 XFS_ILOCK_EXCL) {
754 if (!ip->i_lock.mr_writer)
755 return 0;
758 if ((lock_flags & (XFS_IOLOCK_EXCL|XFS_IOLOCK_SHARED)) ==
759 XFS_IOLOCK_EXCL) {
760 if (!ip->i_iolock.mr_writer)
761 return 0;
764 return 1;
766 #endif