KVM: s390: Fix memslot initialization for userspace_addr != 0
[zen-stable.git] / fs / xfs / xfs_iget.c
blobecbf8b4d2e2e4d0f8ae4506b64a4b787dc84e5db
1 /*
2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_types.h"
21 #include "xfs_acl.h"
22 #include "xfs_bit.h"
23 #include "xfs_log.h"
24 #include "xfs_inum.h"
25 #include "xfs_trans.h"
26 #include "xfs_sb.h"
27 #include "xfs_ag.h"
28 #include "xfs_dir2.h"
29 #include "xfs_dmapi.h"
30 #include "xfs_mount.h"
31 #include "xfs_bmap_btree.h"
32 #include "xfs_alloc_btree.h"
33 #include "xfs_ialloc_btree.h"
34 #include "xfs_dir2_sf.h"
35 #include "xfs_attr_sf.h"
36 #include "xfs_dinode.h"
37 #include "xfs_inode.h"
38 #include "xfs_btree.h"
39 #include "xfs_ialloc.h"
40 #include "xfs_quota.h"
41 #include "xfs_utils.h"
42 #include "xfs_trans_priv.h"
43 #include "xfs_inode_item.h"
44 #include "xfs_bmap.h"
45 #include "xfs_btree_trace.h"
46 #include "xfs_dir2_trace.h"
50 * Allocate and initialise an xfs_inode.
52 STATIC struct xfs_inode *
53 xfs_inode_alloc(
54 struct xfs_mount *mp,
55 xfs_ino_t ino)
57 struct xfs_inode *ip;
60 * if this didn't occur in transactions, we could use
61 * KM_MAYFAIL and return NULL here on ENOMEM. Set the
62 * code up to do this anyway.
64 ip = kmem_zone_alloc(xfs_inode_zone, KM_SLEEP);
65 if (!ip)
66 return NULL;
67 if (inode_init_always(mp->m_super, VFS_I(ip))) {
68 kmem_zone_free(xfs_inode_zone, ip);
69 return NULL;
72 ASSERT(atomic_read(&ip->i_iocount) == 0);
73 ASSERT(atomic_read(&ip->i_pincount) == 0);
74 ASSERT(!spin_is_locked(&ip->i_flags_lock));
75 ASSERT(completion_done(&ip->i_flush));
77 /* initialise the xfs inode */
78 ip->i_ino = ino;
79 ip->i_mount = mp;
80 memset(&ip->i_imap, 0, sizeof(struct xfs_imap));
81 ip->i_afp = NULL;
82 memset(&ip->i_df, 0, sizeof(xfs_ifork_t));
83 ip->i_flags = 0;
84 ip->i_update_core = 0;
85 ip->i_update_size = 0;
86 ip->i_delayed_blks = 0;
87 memset(&ip->i_d, 0, sizeof(xfs_icdinode_t));
88 ip->i_size = 0;
89 ip->i_new_size = 0;
92 * Initialize inode's trace buffers.
94 #ifdef XFS_INODE_TRACE
95 ip->i_trace = ktrace_alloc(INODE_TRACE_SIZE, KM_NOFS);
96 #endif
97 #ifdef XFS_BMAP_TRACE
98 ip->i_xtrace = ktrace_alloc(XFS_BMAP_KTRACE_SIZE, KM_NOFS);
99 #endif
100 #ifdef XFS_BTREE_TRACE
101 ip->i_btrace = ktrace_alloc(XFS_BMBT_KTRACE_SIZE, KM_NOFS);
102 #endif
103 #ifdef XFS_RW_TRACE
104 ip->i_rwtrace = ktrace_alloc(XFS_RW_KTRACE_SIZE, KM_NOFS);
105 #endif
106 #ifdef XFS_ILOCK_TRACE
107 ip->i_lock_trace = ktrace_alloc(XFS_ILOCK_KTRACE_SIZE, KM_NOFS);
108 #endif
109 #ifdef XFS_DIR2_TRACE
110 ip->i_dir_trace = ktrace_alloc(XFS_DIR2_KTRACE_SIZE, KM_NOFS);
111 #endif
113 /* prevent anyone from using this yet */
114 VFS_I(ip)->i_state = I_NEW|I_LOCK;
116 return ip;
119 STATIC void
120 xfs_inode_free(
121 struct xfs_inode *ip)
123 switch (ip->i_d.di_mode & S_IFMT) {
124 case S_IFREG:
125 case S_IFDIR:
126 case S_IFLNK:
127 xfs_idestroy_fork(ip, XFS_DATA_FORK);
128 break;
131 if (ip->i_afp)
132 xfs_idestroy_fork(ip, XFS_ATTR_FORK);
134 #ifdef XFS_INODE_TRACE
135 ktrace_free(ip->i_trace);
136 #endif
137 #ifdef XFS_BMAP_TRACE
138 ktrace_free(ip->i_xtrace);
139 #endif
140 #ifdef XFS_BTREE_TRACE
141 ktrace_free(ip->i_btrace);
142 #endif
143 #ifdef XFS_RW_TRACE
144 ktrace_free(ip->i_rwtrace);
145 #endif
146 #ifdef XFS_ILOCK_TRACE
147 ktrace_free(ip->i_lock_trace);
148 #endif
149 #ifdef XFS_DIR2_TRACE
150 ktrace_free(ip->i_dir_trace);
151 #endif
153 if (ip->i_itemp) {
155 * Only if we are shutting down the fs will we see an
156 * inode still in the AIL. If it is there, we should remove
157 * it to prevent a use-after-free from occurring.
159 xfs_log_item_t *lip = &ip->i_itemp->ili_item;
160 struct xfs_ail *ailp = lip->li_ailp;
162 ASSERT(((lip->li_flags & XFS_LI_IN_AIL) == 0) ||
163 XFS_FORCED_SHUTDOWN(ip->i_mount));
164 if (lip->li_flags & XFS_LI_IN_AIL) {
165 spin_lock(&ailp->xa_lock);
166 if (lip->li_flags & XFS_LI_IN_AIL)
167 xfs_trans_ail_delete(ailp, lip);
168 else
169 spin_unlock(&ailp->xa_lock);
171 xfs_inode_item_destroy(ip);
172 ip->i_itemp = NULL;
175 /* asserts to verify all state is correct here */
176 ASSERT(atomic_read(&ip->i_iocount) == 0);
177 ASSERT(atomic_read(&ip->i_pincount) == 0);
178 ASSERT(!spin_is_locked(&ip->i_flags_lock));
179 ASSERT(completion_done(&ip->i_flush));
181 kmem_zone_free(xfs_inode_zone, ip);
185 * Check the validity of the inode we just found it the cache
187 static int
188 xfs_iget_cache_hit(
189 struct xfs_perag *pag,
190 struct xfs_inode *ip,
191 int flags,
192 int lock_flags) __releases(pag->pag_ici_lock)
194 struct inode *inode = VFS_I(ip);
195 struct xfs_mount *mp = ip->i_mount;
196 int error;
198 spin_lock(&ip->i_flags_lock);
201 * If we are racing with another cache hit that is currently
202 * instantiating this inode or currently recycling it out of
203 * reclaimabe state, wait for the initialisation to complete
204 * before continuing.
206 * XXX(hch): eventually we should do something equivalent to
207 * wait_on_inode to wait for these flags to be cleared
208 * instead of polling for it.
210 if (ip->i_flags & (XFS_INEW|XFS_IRECLAIM)) {
211 XFS_STATS_INC(xs_ig_frecycle);
212 error = EAGAIN;
213 goto out_error;
217 * If lookup is racing with unlink return an error immediately.
219 if (ip->i_d.di_mode == 0 && !(flags & XFS_IGET_CREATE)) {
220 error = ENOENT;
221 goto out_error;
225 * If IRECLAIMABLE is set, we've torn down the VFS inode already.
226 * Need to carefully get it back into useable state.
228 if (ip->i_flags & XFS_IRECLAIMABLE) {
229 xfs_itrace_exit_tag(ip, "xfs_iget.alloc");
232 * We need to set XFS_INEW atomically with clearing the
233 * reclaimable tag so that we do have an indicator of the
234 * inode still being initialized.
236 ip->i_flags |= XFS_INEW;
237 ip->i_flags &= ~XFS_IRECLAIMABLE;
238 __xfs_inode_clear_reclaim_tag(mp, pag, ip);
240 spin_unlock(&ip->i_flags_lock);
241 read_unlock(&pag->pag_ici_lock);
243 error = -inode_init_always(mp->m_super, inode);
244 if (error) {
246 * Re-initializing the inode failed, and we are in deep
247 * trouble. Try to re-add it to the reclaim list.
249 read_lock(&pag->pag_ici_lock);
250 spin_lock(&ip->i_flags_lock);
252 ip->i_flags &= ~XFS_INEW;
253 ip->i_flags |= XFS_IRECLAIMABLE;
254 __xfs_inode_set_reclaim_tag(pag, ip);
255 goto out_error;
257 inode->i_state = I_LOCK|I_NEW;
258 } else {
259 /* If the VFS inode is being torn down, pause and try again. */
260 if (!igrab(inode)) {
261 error = EAGAIN;
262 goto out_error;
265 /* We've got a live one. */
266 spin_unlock(&ip->i_flags_lock);
267 read_unlock(&pag->pag_ici_lock);
270 if (lock_flags != 0)
271 xfs_ilock(ip, lock_flags);
273 xfs_iflags_clear(ip, XFS_ISTALE);
274 xfs_itrace_exit_tag(ip, "xfs_iget.found");
275 XFS_STATS_INC(xs_ig_found);
276 return 0;
278 out_error:
279 spin_unlock(&ip->i_flags_lock);
280 read_unlock(&pag->pag_ici_lock);
281 return error;
285 static int
286 xfs_iget_cache_miss(
287 struct xfs_mount *mp,
288 struct xfs_perag *pag,
289 xfs_trans_t *tp,
290 xfs_ino_t ino,
291 struct xfs_inode **ipp,
292 xfs_daddr_t bno,
293 int flags,
294 int lock_flags) __releases(pag->pag_ici_lock)
296 struct xfs_inode *ip;
297 int error;
298 unsigned long first_index, mask;
299 xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ino);
301 ip = xfs_inode_alloc(mp, ino);
302 if (!ip)
303 return ENOMEM;
305 error = xfs_iread(mp, tp, ip, bno, flags);
306 if (error)
307 goto out_destroy;
309 xfs_itrace_exit_tag(ip, "xfs_iget.alloc");
311 if ((ip->i_d.di_mode == 0) && !(flags & XFS_IGET_CREATE)) {
312 error = ENOENT;
313 goto out_destroy;
317 * Preload the radix tree so we can insert safely under the
318 * write spinlock. Note that we cannot sleep inside the preload
319 * region.
321 if (radix_tree_preload(GFP_KERNEL)) {
322 error = EAGAIN;
323 goto out_destroy;
327 * Because the inode hasn't been added to the radix-tree yet it can't
328 * be found by another thread, so we can do the non-sleeping lock here.
330 if (lock_flags) {
331 if (!xfs_ilock_nowait(ip, lock_flags))
332 BUG();
335 mask = ~(((XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog)) - 1);
336 first_index = agino & mask;
337 write_lock(&pag->pag_ici_lock);
339 /* insert the new inode */
340 error = radix_tree_insert(&pag->pag_ici_root, agino, ip);
341 if (unlikely(error)) {
342 WARN_ON(error != -EEXIST);
343 XFS_STATS_INC(xs_ig_dup);
344 error = EAGAIN;
345 goto out_preload_end;
348 /* These values _must_ be set before releasing the radix tree lock! */
349 ip->i_udquot = ip->i_gdquot = NULL;
350 xfs_iflags_set(ip, XFS_INEW);
352 write_unlock(&pag->pag_ici_lock);
353 radix_tree_preload_end();
354 *ipp = ip;
355 return 0;
357 out_preload_end:
358 write_unlock(&pag->pag_ici_lock);
359 radix_tree_preload_end();
360 if (lock_flags)
361 xfs_iunlock(ip, lock_flags);
362 out_destroy:
363 __destroy_inode(VFS_I(ip));
364 xfs_inode_free(ip);
365 return error;
369 * Look up an inode by number in the given file system.
370 * The inode is looked up in the cache held in each AG.
371 * If the inode is found in the cache, initialise the vfs inode
372 * if necessary.
374 * If it is not in core, read it in from the file system's device,
375 * add it to the cache and initialise the vfs inode.
377 * The inode is locked according to the value of the lock_flags parameter.
378 * This flag parameter indicates how and if the inode's IO lock and inode lock
379 * should be taken.
381 * mp -- the mount point structure for the current file system. It points
382 * to the inode hash table.
383 * tp -- a pointer to the current transaction if there is one. This is
384 * simply passed through to the xfs_iread() call.
385 * ino -- the number of the inode desired. This is the unique identifier
386 * within the file system for the inode being requested.
387 * lock_flags -- flags indicating how to lock the inode. See the comment
388 * for xfs_ilock() for a list of valid values.
389 * bno -- the block number starting the buffer containing the inode,
390 * if known (as by bulkstat), else 0.
393 xfs_iget(
394 xfs_mount_t *mp,
395 xfs_trans_t *tp,
396 xfs_ino_t ino,
397 uint flags,
398 uint lock_flags,
399 xfs_inode_t **ipp,
400 xfs_daddr_t bno)
402 xfs_inode_t *ip;
403 int error;
404 xfs_perag_t *pag;
405 xfs_agino_t agino;
407 /* the radix tree exists only in inode capable AGs */
408 if (XFS_INO_TO_AGNO(mp, ino) >= mp->m_maxagi)
409 return EINVAL;
411 /* get the perag structure and ensure that it's inode capable */
412 pag = xfs_get_perag(mp, ino);
413 if (!pag->pagi_inodeok)
414 return EINVAL;
415 ASSERT(pag->pag_ici_init);
416 agino = XFS_INO_TO_AGINO(mp, ino);
418 again:
419 error = 0;
420 read_lock(&pag->pag_ici_lock);
421 ip = radix_tree_lookup(&pag->pag_ici_root, agino);
423 if (ip) {
424 error = xfs_iget_cache_hit(pag, ip, flags, lock_flags);
425 if (error)
426 goto out_error_or_again;
427 } else {
428 read_unlock(&pag->pag_ici_lock);
429 XFS_STATS_INC(xs_ig_missed);
431 error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip, bno,
432 flags, lock_flags);
433 if (error)
434 goto out_error_or_again;
436 xfs_put_perag(mp, pag);
438 *ipp = ip;
440 ASSERT(ip->i_df.if_ext_max ==
441 XFS_IFORK_DSIZE(ip) / sizeof(xfs_bmbt_rec_t));
443 * If we have a real type for an on-disk inode, we can set ops(&unlock)
444 * now. If it's a new inode being created, xfs_ialloc will handle it.
446 if (xfs_iflags_test(ip, XFS_INEW) && ip->i_d.di_mode != 0)
447 xfs_setup_inode(ip);
448 return 0;
450 out_error_or_again:
451 if (error == EAGAIN) {
452 delay(1);
453 goto again;
455 xfs_put_perag(mp, pag);
456 return error;
461 * Look for the inode corresponding to the given ino in the hash table.
462 * If it is there and its i_transp pointer matches tp, return it.
463 * Otherwise, return NULL.
465 xfs_inode_t *
466 xfs_inode_incore(xfs_mount_t *mp,
467 xfs_ino_t ino,
468 xfs_trans_t *tp)
470 xfs_inode_t *ip;
471 xfs_perag_t *pag;
473 pag = xfs_get_perag(mp, ino);
474 read_lock(&pag->pag_ici_lock);
475 ip = radix_tree_lookup(&pag->pag_ici_root, XFS_INO_TO_AGINO(mp, ino));
476 read_unlock(&pag->pag_ici_lock);
477 xfs_put_perag(mp, pag);
479 /* the returned inode must match the transaction */
480 if (ip && (ip->i_transp != tp))
481 return NULL;
482 return ip;
486 * Decrement reference count of an inode structure and unlock it.
488 * ip -- the inode being released
489 * lock_flags -- this parameter indicates the inode's locks to be
490 * to be released. See the comment on xfs_iunlock() for a list
491 * of valid values.
493 void
494 xfs_iput(xfs_inode_t *ip,
495 uint lock_flags)
497 xfs_itrace_entry(ip);
498 xfs_iunlock(ip, lock_flags);
499 IRELE(ip);
503 * Special iput for brand-new inodes that are still locked
505 void
506 xfs_iput_new(
507 xfs_inode_t *ip,
508 uint lock_flags)
510 struct inode *inode = VFS_I(ip);
512 xfs_itrace_entry(ip);
514 if ((ip->i_d.di_mode == 0)) {
515 ASSERT(!xfs_iflags_test(ip, XFS_IRECLAIMABLE));
516 make_bad_inode(inode);
518 if (inode->i_state & I_NEW)
519 unlock_new_inode(inode);
520 if (lock_flags)
521 xfs_iunlock(ip, lock_flags);
522 IRELE(ip);
526 * This is called free all the memory associated with an inode.
527 * It must free the inode itself and any buffers allocated for
528 * if_extents/if_data and if_broot. It must also free the lock
529 * associated with the inode.
531 * Note: because we don't initialise everything on reallocation out
532 * of the zone, we must ensure we nullify everything correctly before
533 * freeing the structure.
535 void
536 xfs_ireclaim(
537 struct xfs_inode *ip)
539 struct xfs_mount *mp = ip->i_mount;
540 struct xfs_perag *pag;
542 XFS_STATS_INC(xs_ig_reclaims);
545 * Remove the inode from the per-AG radix tree. It doesn't matter
546 * if it was never added to it because radix_tree_delete can deal
547 * with that case just fine.
549 pag = xfs_get_perag(mp, ip->i_ino);
550 write_lock(&pag->pag_ici_lock);
551 radix_tree_delete(&pag->pag_ici_root, XFS_INO_TO_AGINO(mp, ip->i_ino));
552 write_unlock(&pag->pag_ici_lock);
553 xfs_put_perag(mp, pag);
556 * Here we do an (almost) spurious inode lock in order to coordinate
557 * with inode cache radix tree lookups. This is because the lookup
558 * can reference the inodes in the cache without taking references.
560 * We make that OK here by ensuring that we wait until the inode is
561 * unlocked after the lookup before we go ahead and free it. We get
562 * both the ilock and the iolock because the code may need to drop the
563 * ilock one but will still hold the iolock.
565 xfs_ilock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
566 xfs_qm_dqdetach(ip);
567 xfs_iunlock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
569 xfs_inode_free(ip);
573 * This is a wrapper routine around the xfs_ilock() routine
574 * used to centralize some grungy code. It is used in places
575 * that wish to lock the inode solely for reading the extents.
576 * The reason these places can't just call xfs_ilock(SHARED)
577 * is that the inode lock also guards to bringing in of the
578 * extents from disk for a file in b-tree format. If the inode
579 * is in b-tree format, then we need to lock the inode exclusively
580 * until the extents are read in. Locking it exclusively all
581 * the time would limit our parallelism unnecessarily, though.
582 * What we do instead is check to see if the extents have been
583 * read in yet, and only lock the inode exclusively if they
584 * have not.
586 * The function returns a value which should be given to the
587 * corresponding xfs_iunlock_map_shared(). This value is
588 * the mode in which the lock was actually taken.
590 uint
591 xfs_ilock_map_shared(
592 xfs_inode_t *ip)
594 uint lock_mode;
596 if ((ip->i_d.di_format == XFS_DINODE_FMT_BTREE) &&
597 ((ip->i_df.if_flags & XFS_IFEXTENTS) == 0)) {
598 lock_mode = XFS_ILOCK_EXCL;
599 } else {
600 lock_mode = XFS_ILOCK_SHARED;
603 xfs_ilock(ip, lock_mode);
605 return lock_mode;
609 * This is simply the unlock routine to go with xfs_ilock_map_shared().
610 * All it does is call xfs_iunlock() with the given lock_mode.
612 void
613 xfs_iunlock_map_shared(
614 xfs_inode_t *ip,
615 unsigned int lock_mode)
617 xfs_iunlock(ip, lock_mode);
621 * The xfs inode contains 2 locks: a multi-reader lock called the
622 * i_iolock and a multi-reader lock called the i_lock. This routine
623 * allows either or both of the locks to be obtained.
625 * The 2 locks should always be ordered so that the IO lock is
626 * obtained first in order to prevent deadlock.
628 * ip -- the inode being locked
629 * lock_flags -- this parameter indicates the inode's locks
630 * to be locked. It can be:
631 * XFS_IOLOCK_SHARED,
632 * XFS_IOLOCK_EXCL,
633 * XFS_ILOCK_SHARED,
634 * XFS_ILOCK_EXCL,
635 * XFS_IOLOCK_SHARED | XFS_ILOCK_SHARED,
636 * XFS_IOLOCK_SHARED | XFS_ILOCK_EXCL,
637 * XFS_IOLOCK_EXCL | XFS_ILOCK_SHARED,
638 * XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL
640 void
641 xfs_ilock(
642 xfs_inode_t *ip,
643 uint lock_flags)
646 * You can't set both SHARED and EXCL for the same lock,
647 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
648 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
650 ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
651 (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
652 ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
653 (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
654 ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_DEP_MASK)) == 0);
656 if (lock_flags & XFS_IOLOCK_EXCL)
657 mrupdate_nested(&ip->i_iolock, XFS_IOLOCK_DEP(lock_flags));
658 else if (lock_flags & XFS_IOLOCK_SHARED)
659 mraccess_nested(&ip->i_iolock, XFS_IOLOCK_DEP(lock_flags));
661 if (lock_flags & XFS_ILOCK_EXCL)
662 mrupdate_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
663 else if (lock_flags & XFS_ILOCK_SHARED)
664 mraccess_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
666 xfs_ilock_trace(ip, 1, lock_flags, (inst_t *)__return_address);
670 * This is just like xfs_ilock(), except that the caller
671 * is guaranteed not to sleep. It returns 1 if it gets
672 * the requested locks and 0 otherwise. If the IO lock is
673 * obtained but the inode lock cannot be, then the IO lock
674 * is dropped before returning.
676 * ip -- the inode being locked
677 * lock_flags -- this parameter indicates the inode's locks to be
678 * to be locked. See the comment for xfs_ilock() for a list
679 * of valid values.
682 xfs_ilock_nowait(
683 xfs_inode_t *ip,
684 uint lock_flags)
687 * You can't set both SHARED and EXCL for the same lock,
688 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
689 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
691 ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
692 (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
693 ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
694 (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
695 ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_DEP_MASK)) == 0);
697 if (lock_flags & XFS_IOLOCK_EXCL) {
698 if (!mrtryupdate(&ip->i_iolock))
699 goto out;
700 } else if (lock_flags & XFS_IOLOCK_SHARED) {
701 if (!mrtryaccess(&ip->i_iolock))
702 goto out;
704 if (lock_flags & XFS_ILOCK_EXCL) {
705 if (!mrtryupdate(&ip->i_lock))
706 goto out_undo_iolock;
707 } else if (lock_flags & XFS_ILOCK_SHARED) {
708 if (!mrtryaccess(&ip->i_lock))
709 goto out_undo_iolock;
711 xfs_ilock_trace(ip, 2, lock_flags, (inst_t *)__return_address);
712 return 1;
714 out_undo_iolock:
715 if (lock_flags & XFS_IOLOCK_EXCL)
716 mrunlock_excl(&ip->i_iolock);
717 else if (lock_flags & XFS_IOLOCK_SHARED)
718 mrunlock_shared(&ip->i_iolock);
719 out:
720 return 0;
724 * xfs_iunlock() is used to drop the inode locks acquired with
725 * xfs_ilock() and xfs_ilock_nowait(). The caller must pass
726 * in the flags given to xfs_ilock() or xfs_ilock_nowait() so
727 * that we know which locks to drop.
729 * ip -- the inode being unlocked
730 * lock_flags -- this parameter indicates the inode's locks to be
731 * to be unlocked. See the comment for xfs_ilock() for a list
732 * of valid values for this parameter.
735 void
736 xfs_iunlock(
737 xfs_inode_t *ip,
738 uint lock_flags)
741 * You can't set both SHARED and EXCL for the same lock,
742 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
743 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
745 ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
746 (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
747 ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
748 (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
749 ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_IUNLOCK_NONOTIFY |
750 XFS_LOCK_DEP_MASK)) == 0);
751 ASSERT(lock_flags != 0);
753 if (lock_flags & XFS_IOLOCK_EXCL)
754 mrunlock_excl(&ip->i_iolock);
755 else if (lock_flags & XFS_IOLOCK_SHARED)
756 mrunlock_shared(&ip->i_iolock);
758 if (lock_flags & XFS_ILOCK_EXCL)
759 mrunlock_excl(&ip->i_lock);
760 else if (lock_flags & XFS_ILOCK_SHARED)
761 mrunlock_shared(&ip->i_lock);
763 if ((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) &&
764 !(lock_flags & XFS_IUNLOCK_NONOTIFY) && ip->i_itemp) {
766 * Let the AIL know that this item has been unlocked in case
767 * it is in the AIL and anyone is waiting on it. Don't do
768 * this if the caller has asked us not to.
770 xfs_trans_unlocked_item(ip->i_itemp->ili_item.li_ailp,
771 (xfs_log_item_t*)(ip->i_itemp));
773 xfs_ilock_trace(ip, 3, lock_flags, (inst_t *)__return_address);
777 * give up write locks. the i/o lock cannot be held nested
778 * if it is being demoted.
780 void
781 xfs_ilock_demote(
782 xfs_inode_t *ip,
783 uint lock_flags)
785 ASSERT(lock_flags & (XFS_IOLOCK_EXCL|XFS_ILOCK_EXCL));
786 ASSERT((lock_flags & ~(XFS_IOLOCK_EXCL|XFS_ILOCK_EXCL)) == 0);
788 if (lock_flags & XFS_ILOCK_EXCL)
789 mrdemote(&ip->i_lock);
790 if (lock_flags & XFS_IOLOCK_EXCL)
791 mrdemote(&ip->i_iolock);
794 #ifdef DEBUG
796 * Debug-only routine, without additional rw_semaphore APIs, we can
797 * now only answer requests regarding whether we hold the lock for write
798 * (reader state is outside our visibility, we only track writer state).
800 * Note: this means !xfs_isilocked would give false positives, so don't do that.
803 xfs_isilocked(
804 xfs_inode_t *ip,
805 uint lock_flags)
807 if ((lock_flags & (XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)) ==
808 XFS_ILOCK_EXCL) {
809 if (!ip->i_lock.mr_writer)
810 return 0;
813 if ((lock_flags & (XFS_IOLOCK_EXCL|XFS_IOLOCK_SHARED)) ==
814 XFS_IOLOCK_EXCL) {
815 if (!ip->i_iolock.mr_writer)
816 return 0;
819 return 1;
821 #endif
823 #ifdef XFS_INODE_TRACE
825 #define KTRACE_ENTER(ip, vk, s, line, ra) \
826 ktrace_enter((ip)->i_trace, \
827 /* 0 */ (void *)(__psint_t)(vk), \
828 /* 1 */ (void *)(s), \
829 /* 2 */ (void *)(__psint_t) line, \
830 /* 3 */ (void *)(__psint_t)atomic_read(&VFS_I(ip)->i_count), \
831 /* 4 */ (void *)(ra), \
832 /* 5 */ NULL, \
833 /* 6 */ (void *)(__psint_t)current_cpu(), \
834 /* 7 */ (void *)(__psint_t)current_pid(), \
835 /* 8 */ (void *)__return_address, \
836 /* 9 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL)
839 * Vnode tracing code.
841 void
842 _xfs_itrace_entry(xfs_inode_t *ip, const char *func, inst_t *ra)
844 KTRACE_ENTER(ip, INODE_KTRACE_ENTRY, func, 0, ra);
847 void
848 _xfs_itrace_exit(xfs_inode_t *ip, const char *func, inst_t *ra)
850 KTRACE_ENTER(ip, INODE_KTRACE_EXIT, func, 0, ra);
853 void
854 xfs_itrace_hold(xfs_inode_t *ip, char *file, int line, inst_t *ra)
856 KTRACE_ENTER(ip, INODE_KTRACE_HOLD, file, line, ra);
859 void
860 _xfs_itrace_ref(xfs_inode_t *ip, char *file, int line, inst_t *ra)
862 KTRACE_ENTER(ip, INODE_KTRACE_REF, file, line, ra);
865 void
866 xfs_itrace_rele(xfs_inode_t *ip, char *file, int line, inst_t *ra)
868 KTRACE_ENTER(ip, INODE_KTRACE_RELE, file, line, ra);
870 #endif /* XFS_INODE_TRACE */