2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 #include <linux/log2.h>
22 #include "xfs_shared.h"
23 #include "xfs_format.h"
24 #include "xfs_log_format.h"
25 #include "xfs_trans_resv.h"
29 #include "xfs_mount.h"
30 #include "xfs_inode.h"
31 #include "xfs_da_format.h"
32 #include "xfs_da_btree.h"
34 #include "xfs_attr_sf.h"
36 #include "xfs_trans_space.h"
37 #include "xfs_trans.h"
38 #include "xfs_buf_item.h"
39 #include "xfs_inode_item.h"
40 #include "xfs_ialloc.h"
42 #include "xfs_bmap_util.h"
43 #include "xfs_error.h"
44 #include "xfs_quota.h"
45 #include "xfs_dinode.h"
46 #include "xfs_filestream.h"
47 #include "xfs_cksum.h"
48 #include "xfs_trace.h"
49 #include "xfs_icache.h"
50 #include "xfs_symlink.h"
51 #include "xfs_trans_priv.h"
53 #include "xfs_bmap_btree.h"
55 kmem_zone_t
*xfs_inode_zone
;
58 * Used in xfs_itruncate_extents(). This is the maximum number of extents
59 * freed from a file in a single transaction.
61 #define XFS_ITRUNC_MAX_EXTENTS 2
63 STATIC
int xfs_iflush_int(xfs_inode_t
*, xfs_buf_t
*);
66 * helper function to extract extent size hint from inode
72 if ((ip
->i_d
.di_flags
& XFS_DIFLAG_EXTSIZE
) && ip
->i_d
.di_extsize
)
73 return ip
->i_d
.di_extsize
;
74 if (XFS_IS_REALTIME_INODE(ip
))
75 return ip
->i_mount
->m_sb
.sb_rextsize
;
80 * This is a wrapper routine around the xfs_ilock() routine used to centralize
81 * some grungy code. It is used in places that wish to lock the inode solely
82 * for reading the extents. The reason these places can't just call
83 * xfs_ilock(SHARED) is that the inode lock also guards to bringing in of the
84 * extents from disk for a file in b-tree format. If the inode is in b-tree
85 * format, then we need to lock the inode exclusively until the extents are read
86 * in. Locking it exclusively all the time would limit our parallelism
87 * unnecessarily, though. What we do instead is check to see if the extents
88 * have been read in yet, and only lock the inode exclusively if they have not.
90 * The function returns a value which should be given to the corresponding
91 * xfs_iunlock_map_shared(). This value is the mode in which the lock was
100 if ((ip
->i_d
.di_format
== XFS_DINODE_FMT_BTREE
) &&
101 ((ip
->i_df
.if_flags
& XFS_IFEXTENTS
) == 0)) {
102 lock_mode
= XFS_ILOCK_EXCL
;
104 lock_mode
= XFS_ILOCK_SHARED
;
107 xfs_ilock(ip
, lock_mode
);
113 * This is simply the unlock routine to go with xfs_ilock_map_shared().
114 * All it does is call xfs_iunlock() with the given lock_mode.
117 xfs_iunlock_map_shared(
119 unsigned int lock_mode
)
121 xfs_iunlock(ip
, lock_mode
);
125 * The xfs inode contains 2 locks: a multi-reader lock called the
126 * i_iolock and a multi-reader lock called the i_lock. This routine
127 * allows either or both of the locks to be obtained.
129 * The 2 locks should always be ordered so that the IO lock is
130 * obtained first in order to prevent deadlock.
132 * ip -- the inode being locked
133 * lock_flags -- this parameter indicates the inode's locks
134 * to be locked. It can be:
139 * XFS_IOLOCK_SHARED | XFS_ILOCK_SHARED,
140 * XFS_IOLOCK_SHARED | XFS_ILOCK_EXCL,
141 * XFS_IOLOCK_EXCL | XFS_ILOCK_SHARED,
142 * XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL
149 trace_xfs_ilock(ip
, lock_flags
, _RET_IP_
);
152 * You can't set both SHARED and EXCL for the same lock,
153 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
154 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
156 ASSERT((lock_flags
& (XFS_IOLOCK_SHARED
| XFS_IOLOCK_EXCL
)) !=
157 (XFS_IOLOCK_SHARED
| XFS_IOLOCK_EXCL
));
158 ASSERT((lock_flags
& (XFS_ILOCK_SHARED
| XFS_ILOCK_EXCL
)) !=
159 (XFS_ILOCK_SHARED
| XFS_ILOCK_EXCL
));
160 ASSERT((lock_flags
& ~(XFS_LOCK_MASK
| XFS_LOCK_DEP_MASK
)) == 0);
162 if (lock_flags
& XFS_IOLOCK_EXCL
)
163 mrupdate_nested(&ip
->i_iolock
, XFS_IOLOCK_DEP(lock_flags
));
164 else if (lock_flags
& XFS_IOLOCK_SHARED
)
165 mraccess_nested(&ip
->i_iolock
, XFS_IOLOCK_DEP(lock_flags
));
167 if (lock_flags
& XFS_ILOCK_EXCL
)
168 mrupdate_nested(&ip
->i_lock
, XFS_ILOCK_DEP(lock_flags
));
169 else if (lock_flags
& XFS_ILOCK_SHARED
)
170 mraccess_nested(&ip
->i_lock
, XFS_ILOCK_DEP(lock_flags
));
174 * This is just like xfs_ilock(), except that the caller
175 * is guaranteed not to sleep. It returns 1 if it gets
176 * the requested locks and 0 otherwise. If the IO lock is
177 * obtained but the inode lock cannot be, then the IO lock
178 * is dropped before returning.
180 * ip -- the inode being locked
181 * lock_flags -- this parameter indicates the inode's locks to be
182 * to be locked. See the comment for xfs_ilock() for a list
190 trace_xfs_ilock_nowait(ip
, lock_flags
, _RET_IP_
);
193 * You can't set both SHARED and EXCL for the same lock,
194 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
195 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
197 ASSERT((lock_flags
& (XFS_IOLOCK_SHARED
| XFS_IOLOCK_EXCL
)) !=
198 (XFS_IOLOCK_SHARED
| XFS_IOLOCK_EXCL
));
199 ASSERT((lock_flags
& (XFS_ILOCK_SHARED
| XFS_ILOCK_EXCL
)) !=
200 (XFS_ILOCK_SHARED
| XFS_ILOCK_EXCL
));
201 ASSERT((lock_flags
& ~(XFS_LOCK_MASK
| XFS_LOCK_DEP_MASK
)) == 0);
203 if (lock_flags
& XFS_IOLOCK_EXCL
) {
204 if (!mrtryupdate(&ip
->i_iolock
))
206 } else if (lock_flags
& XFS_IOLOCK_SHARED
) {
207 if (!mrtryaccess(&ip
->i_iolock
))
210 if (lock_flags
& XFS_ILOCK_EXCL
) {
211 if (!mrtryupdate(&ip
->i_lock
))
212 goto out_undo_iolock
;
213 } else if (lock_flags
& XFS_ILOCK_SHARED
) {
214 if (!mrtryaccess(&ip
->i_lock
))
215 goto out_undo_iolock
;
220 if (lock_flags
& XFS_IOLOCK_EXCL
)
221 mrunlock_excl(&ip
->i_iolock
);
222 else if (lock_flags
& XFS_IOLOCK_SHARED
)
223 mrunlock_shared(&ip
->i_iolock
);
229 * xfs_iunlock() is used to drop the inode locks acquired with
230 * xfs_ilock() and xfs_ilock_nowait(). The caller must pass
231 * in the flags given to xfs_ilock() or xfs_ilock_nowait() so
232 * that we know which locks to drop.
234 * ip -- the inode being unlocked
235 * lock_flags -- this parameter indicates the inode's locks to be
236 * to be unlocked. See the comment for xfs_ilock() for a list
237 * of valid values for this parameter.
246 * You can't set both SHARED and EXCL for the same lock,
247 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
248 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
250 ASSERT((lock_flags
& (XFS_IOLOCK_SHARED
| XFS_IOLOCK_EXCL
)) !=
251 (XFS_IOLOCK_SHARED
| XFS_IOLOCK_EXCL
));
252 ASSERT((lock_flags
& (XFS_ILOCK_SHARED
| XFS_ILOCK_EXCL
)) !=
253 (XFS_ILOCK_SHARED
| XFS_ILOCK_EXCL
));
254 ASSERT((lock_flags
& ~(XFS_LOCK_MASK
| XFS_LOCK_DEP_MASK
)) == 0);
255 ASSERT(lock_flags
!= 0);
257 if (lock_flags
& XFS_IOLOCK_EXCL
)
258 mrunlock_excl(&ip
->i_iolock
);
259 else if (lock_flags
& XFS_IOLOCK_SHARED
)
260 mrunlock_shared(&ip
->i_iolock
);
262 if (lock_flags
& XFS_ILOCK_EXCL
)
263 mrunlock_excl(&ip
->i_lock
);
264 else if (lock_flags
& XFS_ILOCK_SHARED
)
265 mrunlock_shared(&ip
->i_lock
);
267 trace_xfs_iunlock(ip
, lock_flags
, _RET_IP_
);
271 * give up write locks. the i/o lock cannot be held nested
272 * if it is being demoted.
279 ASSERT(lock_flags
& (XFS_IOLOCK_EXCL
|XFS_ILOCK_EXCL
));
280 ASSERT((lock_flags
& ~(XFS_IOLOCK_EXCL
|XFS_ILOCK_EXCL
)) == 0);
282 if (lock_flags
& XFS_ILOCK_EXCL
)
283 mrdemote(&ip
->i_lock
);
284 if (lock_flags
& XFS_IOLOCK_EXCL
)
285 mrdemote(&ip
->i_iolock
);
287 trace_xfs_ilock_demote(ip
, lock_flags
, _RET_IP_
);
290 #if defined(DEBUG) || defined(XFS_WARN)
296 if (lock_flags
& (XFS_ILOCK_EXCL
|XFS_ILOCK_SHARED
)) {
297 if (!(lock_flags
& XFS_ILOCK_SHARED
))
298 return !!ip
->i_lock
.mr_writer
;
299 return rwsem_is_locked(&ip
->i_lock
.mr_lock
);
302 if (lock_flags
& (XFS_IOLOCK_EXCL
|XFS_IOLOCK_SHARED
)) {
303 if (!(lock_flags
& XFS_IOLOCK_SHARED
))
304 return !!ip
->i_iolock
.mr_writer
;
305 return rwsem_is_locked(&ip
->i_iolock
.mr_lock
);
315 int xfs_small_retries
;
316 int xfs_middle_retries
;
317 int xfs_lots_retries
;
322 * Bump the subclass so xfs_lock_inodes() acquires each lock with
326 xfs_lock_inumorder(int lock_mode
, int subclass
)
328 if (lock_mode
& (XFS_IOLOCK_SHARED
|XFS_IOLOCK_EXCL
))
329 lock_mode
|= (subclass
+ XFS_LOCK_INUMORDER
) << XFS_IOLOCK_SHIFT
;
330 if (lock_mode
& (XFS_ILOCK_SHARED
|XFS_ILOCK_EXCL
))
331 lock_mode
|= (subclass
+ XFS_LOCK_INUMORDER
) << XFS_ILOCK_SHIFT
;
337 * The following routine will lock n inodes in exclusive mode.
338 * We assume the caller calls us with the inodes in i_ino order.
340 * We need to detect deadlock where an inode that we lock
341 * is in the AIL and we start waiting for another inode that is locked
342 * by a thread in a long running transaction (such as truncate). This can
343 * result in deadlock since the long running trans might need to wait
344 * for the inode we just locked in order to push the tail and free space
353 int attempts
= 0, i
, j
, try_lock
;
356 ASSERT(ips
&& (inodes
>= 2)); /* we need at least two */
362 for (; i
< inodes
; i
++) {
365 if (i
&& (ips
[i
] == ips
[i
-1])) /* Already locked */
369 * If try_lock is not set yet, make sure all locked inodes
370 * are not in the AIL.
371 * If any are, set try_lock to be used later.
375 for (j
= (i
- 1); j
>= 0 && !try_lock
; j
--) {
376 lp
= (xfs_log_item_t
*)ips
[j
]->i_itemp
;
377 if (lp
&& (lp
->li_flags
& XFS_LI_IN_AIL
)) {
384 * If any of the previous locks we have locked is in the AIL,
385 * we must TRY to get the second and subsequent locks. If
386 * we can't get any, we must release all we have
391 /* try_lock must be 0 if i is 0. */
393 * try_lock means we have an inode locked
394 * that is in the AIL.
397 if (!xfs_ilock_nowait(ips
[i
], xfs_lock_inumorder(lock_mode
, i
))) {
401 * Unlock all previous guys and try again.
402 * xfs_iunlock will try to push the tail
403 * if the inode is in the AIL.
406 for(j
= i
- 1; j
>= 0; j
--) {
409 * Check to see if we've already
411 * Not the first one going back,
412 * and the inode ptr is the same.
414 if ((j
!= (i
- 1)) && ips
[j
] ==
418 xfs_iunlock(ips
[j
], lock_mode
);
421 if ((attempts
% 5) == 0) {
422 delay(1); /* Don't just spin the CPU */
432 xfs_ilock(ips
[i
], xfs_lock_inumorder(lock_mode
, i
));
438 if (attempts
< 5) xfs_small_retries
++;
439 else if (attempts
< 100) xfs_middle_retries
++;
440 else xfs_lots_retries
++;
448 * xfs_lock_two_inodes() can only be used to lock one type of lock
449 * at a time - the iolock or the ilock, but not both at once. If
450 * we lock both at once, lockdep will report false positives saying
451 * we have violated locking orders.
463 if (lock_mode
& (XFS_IOLOCK_SHARED
|XFS_IOLOCK_EXCL
))
464 ASSERT((lock_mode
& (XFS_ILOCK_SHARED
|XFS_ILOCK_EXCL
)) == 0);
465 ASSERT(ip0
->i_ino
!= ip1
->i_ino
);
467 if (ip0
->i_ino
> ip1
->i_ino
) {
474 xfs_ilock(ip0
, xfs_lock_inumorder(lock_mode
, 0));
477 * If the first lock we have locked is in the AIL, we must TRY to get
478 * the second lock. If we can't get it, we must release the first one
481 lp
= (xfs_log_item_t
*)ip0
->i_itemp
;
482 if (lp
&& (lp
->li_flags
& XFS_LI_IN_AIL
)) {
483 if (!xfs_ilock_nowait(ip1
, xfs_lock_inumorder(lock_mode
, 1))) {
484 xfs_iunlock(ip0
, lock_mode
);
485 if ((++attempts
% 5) == 0)
486 delay(1); /* Don't just spin the CPU */
490 xfs_ilock(ip1
, xfs_lock_inumorder(lock_mode
, 1));
497 struct xfs_inode
*ip
)
499 wait_queue_head_t
*wq
= bit_waitqueue(&ip
->i_flags
, __XFS_IFLOCK_BIT
);
500 DEFINE_WAIT_BIT(wait
, &ip
->i_flags
, __XFS_IFLOCK_BIT
);
503 prepare_to_wait_exclusive(wq
, &wait
.wait
, TASK_UNINTERRUPTIBLE
);
504 if (xfs_isiflocked(ip
))
506 } while (!xfs_iflock_nowait(ip
));
508 finish_wait(wq
, &wait
.wait
);
517 if (di_flags
& XFS_DIFLAG_ANY
) {
518 if (di_flags
& XFS_DIFLAG_REALTIME
)
519 flags
|= XFS_XFLAG_REALTIME
;
520 if (di_flags
& XFS_DIFLAG_PREALLOC
)
521 flags
|= XFS_XFLAG_PREALLOC
;
522 if (di_flags
& XFS_DIFLAG_IMMUTABLE
)
523 flags
|= XFS_XFLAG_IMMUTABLE
;
524 if (di_flags
& XFS_DIFLAG_APPEND
)
525 flags
|= XFS_XFLAG_APPEND
;
526 if (di_flags
& XFS_DIFLAG_SYNC
)
527 flags
|= XFS_XFLAG_SYNC
;
528 if (di_flags
& XFS_DIFLAG_NOATIME
)
529 flags
|= XFS_XFLAG_NOATIME
;
530 if (di_flags
& XFS_DIFLAG_NODUMP
)
531 flags
|= XFS_XFLAG_NODUMP
;
532 if (di_flags
& XFS_DIFLAG_RTINHERIT
)
533 flags
|= XFS_XFLAG_RTINHERIT
;
534 if (di_flags
& XFS_DIFLAG_PROJINHERIT
)
535 flags
|= XFS_XFLAG_PROJINHERIT
;
536 if (di_flags
& XFS_DIFLAG_NOSYMLINKS
)
537 flags
|= XFS_XFLAG_NOSYMLINKS
;
538 if (di_flags
& XFS_DIFLAG_EXTSIZE
)
539 flags
|= XFS_XFLAG_EXTSIZE
;
540 if (di_flags
& XFS_DIFLAG_EXTSZINHERIT
)
541 flags
|= XFS_XFLAG_EXTSZINHERIT
;
542 if (di_flags
& XFS_DIFLAG_NODEFRAG
)
543 flags
|= XFS_XFLAG_NODEFRAG
;
544 if (di_flags
& XFS_DIFLAG_FILESTREAM
)
545 flags
|= XFS_XFLAG_FILESTREAM
;
555 xfs_icdinode_t
*dic
= &ip
->i_d
;
557 return _xfs_dic2xflags(dic
->di_flags
) |
558 (XFS_IFORK_Q(ip
) ? XFS_XFLAG_HASATTR
: 0);
565 return _xfs_dic2xflags(be16_to_cpu(dip
->di_flags
)) |
566 (XFS_DFORK_Q(dip
) ? XFS_XFLAG_HASATTR
: 0);
570 * Lookups up an inode from "name". If ci_name is not NULL, then a CI match
571 * is allowed, otherwise it has to be an exact match. If a CI match is found,
572 * ci_name->name will point to a the actual name (caller must free) or
573 * will be set to NULL if an exact match is found.
578 struct xfs_name
*name
,
580 struct xfs_name
*ci_name
)
586 trace_xfs_lookup(dp
, name
);
588 if (XFS_FORCED_SHUTDOWN(dp
->i_mount
))
589 return XFS_ERROR(EIO
);
591 lock_mode
= xfs_ilock_map_shared(dp
);
592 error
= xfs_dir_lookup(NULL
, dp
, name
, &inum
, ci_name
);
593 xfs_iunlock_map_shared(dp
, lock_mode
);
598 error
= xfs_iget(dp
->i_mount
, NULL
, inum
, 0, 0, ipp
);
606 kmem_free(ci_name
->name
);
613 * Allocate an inode on disk and return a copy of its in-core version.
614 * The in-core inode is locked exclusively. Set mode, nlink, and rdev
615 * appropriately within the inode. The uid and gid for the inode are
616 * set according to the contents of the given cred structure.
618 * Use xfs_dialloc() to allocate the on-disk inode. If xfs_dialloc()
619 * has a free inode available, call xfs_iget() to obtain the in-core
620 * version of the allocated inode. Finally, fill in the inode and
621 * log its initial contents. In this case, ialloc_context would be
624 * If xfs_dialloc() does not have an available inode, it will replenish
625 * its supply by doing an allocation. Since we can only do one
626 * allocation within a transaction without deadlocks, we must commit
627 * the current transaction before returning the inode itself.
628 * In this case, therefore, we will set ialloc_context and return.
629 * The caller should then commit the current transaction, start a new
630 * transaction, and call xfs_ialloc() again to actually get the inode.
632 * To ensure that some other process does not grab the inode that
633 * was allocated during the first call to xfs_ialloc(), this routine
634 * also returns the [locked] bp pointing to the head of the freelist
635 * as ialloc_context. The caller should hold this buffer across
636 * the commit and pass it back into this routine on the second call.
638 * If we are allocating quota inodes, we do not have a parent inode
639 * to attach to or associate with (i.e. pip == NULL) because they
640 * are not linked into the directory structure - they are attached
641 * directly to the superblock - and so have no parent.
652 xfs_buf_t
**ialloc_context
,
655 struct xfs_mount
*mp
= tp
->t_mountp
;
664 * Call the space management code to pick
665 * the on-disk inode to be allocated.
667 error
= xfs_dialloc(tp
, pip
? pip
->i_ino
: 0, mode
, okalloc
,
668 ialloc_context
, &ino
);
671 if (*ialloc_context
|| ino
== NULLFSINO
) {
675 ASSERT(*ialloc_context
== NULL
);
678 * Get the in-core inode with the lock held exclusively.
679 * This is because we're setting fields here we need
680 * to prevent others from looking at until we're done.
682 error
= xfs_iget(mp
, tp
, ino
, XFS_IGET_CREATE
,
683 XFS_ILOCK_EXCL
, &ip
);
688 ip
->i_d
.di_mode
= mode
;
689 ip
->i_d
.di_onlink
= 0;
690 ip
->i_d
.di_nlink
= nlink
;
691 ASSERT(ip
->i_d
.di_nlink
== nlink
);
692 ip
->i_d
.di_uid
= xfs_kuid_to_uid(current_fsuid());
693 ip
->i_d
.di_gid
= xfs_kgid_to_gid(current_fsgid());
694 xfs_set_projid(ip
, prid
);
695 memset(&(ip
->i_d
.di_pad
[0]), 0, sizeof(ip
->i_d
.di_pad
));
698 * If the superblock version is up to where we support new format
699 * inodes and this is currently an old format inode, then change
700 * the inode version number now. This way we only do the conversion
701 * here rather than here and in the flush/logging code.
703 if (xfs_sb_version_hasnlink(&mp
->m_sb
) &&
704 ip
->i_d
.di_version
== 1) {
705 ip
->i_d
.di_version
= 2;
707 * We've already zeroed the old link count, the projid field,
713 * Project ids won't be stored on disk if we are using a version 1 inode.
715 if ((prid
!= 0) && (ip
->i_d
.di_version
== 1))
716 xfs_bump_ino_vers2(tp
, ip
);
718 if (pip
&& XFS_INHERIT_GID(pip
)) {
719 ip
->i_d
.di_gid
= pip
->i_d
.di_gid
;
720 if ((pip
->i_d
.di_mode
& S_ISGID
) && S_ISDIR(mode
)) {
721 ip
->i_d
.di_mode
|= S_ISGID
;
726 * If the group ID of the new file does not match the effective group
727 * ID or one of the supplementary group IDs, the S_ISGID bit is cleared
728 * (and only if the irix_sgid_inherit compatibility variable is set).
730 if ((irix_sgid_inherit
) &&
731 (ip
->i_d
.di_mode
& S_ISGID
) &&
732 (!in_group_p(xfs_gid_to_kgid(ip
->i_d
.di_gid
)))) {
733 ip
->i_d
.di_mode
&= ~S_ISGID
;
737 ip
->i_d
.di_nextents
= 0;
738 ASSERT(ip
->i_d
.di_nblocks
== 0);
741 ip
->i_d
.di_mtime
.t_sec
= (__int32_t
)tv
.tv_sec
;
742 ip
->i_d
.di_mtime
.t_nsec
= (__int32_t
)tv
.tv_nsec
;
743 ip
->i_d
.di_atime
= ip
->i_d
.di_mtime
;
744 ip
->i_d
.di_ctime
= ip
->i_d
.di_mtime
;
747 * di_gen will have been taken care of in xfs_iread.
749 ip
->i_d
.di_extsize
= 0;
750 ip
->i_d
.di_dmevmask
= 0;
751 ip
->i_d
.di_dmstate
= 0;
752 ip
->i_d
.di_flags
= 0;
754 if (ip
->i_d
.di_version
== 3) {
755 ASSERT(ip
->i_d
.di_ino
== ino
);
756 ASSERT(uuid_equal(&ip
->i_d
.di_uuid
, &mp
->m_sb
.sb_uuid
));
758 ip
->i_d
.di_changecount
= 1;
760 ip
->i_d
.di_flags2
= 0;
761 memset(&(ip
->i_d
.di_pad2
[0]), 0, sizeof(ip
->i_d
.di_pad2
));
762 ip
->i_d
.di_crtime
= ip
->i_d
.di_mtime
;
766 flags
= XFS_ILOG_CORE
;
767 switch (mode
& S_IFMT
) {
772 ip
->i_d
.di_format
= XFS_DINODE_FMT_DEV
;
773 ip
->i_df
.if_u2
.if_rdev
= rdev
;
774 ip
->i_df
.if_flags
= 0;
775 flags
|= XFS_ILOG_DEV
;
779 * we can't set up filestreams until after the VFS inode
780 * is set up properly.
782 if (pip
&& xfs_inode_is_filestream(pip
))
786 if (pip
&& (pip
->i_d
.di_flags
& XFS_DIFLAG_ANY
)) {
790 if (pip
->i_d
.di_flags
& XFS_DIFLAG_RTINHERIT
)
791 di_flags
|= XFS_DIFLAG_RTINHERIT
;
792 if (pip
->i_d
.di_flags
& XFS_DIFLAG_EXTSZINHERIT
) {
793 di_flags
|= XFS_DIFLAG_EXTSZINHERIT
;
794 ip
->i_d
.di_extsize
= pip
->i_d
.di_extsize
;
796 } else if (S_ISREG(mode
)) {
797 if (pip
->i_d
.di_flags
& XFS_DIFLAG_RTINHERIT
)
798 di_flags
|= XFS_DIFLAG_REALTIME
;
799 if (pip
->i_d
.di_flags
& XFS_DIFLAG_EXTSZINHERIT
) {
800 di_flags
|= XFS_DIFLAG_EXTSIZE
;
801 ip
->i_d
.di_extsize
= pip
->i_d
.di_extsize
;
804 if ((pip
->i_d
.di_flags
& XFS_DIFLAG_NOATIME
) &&
806 di_flags
|= XFS_DIFLAG_NOATIME
;
807 if ((pip
->i_d
.di_flags
& XFS_DIFLAG_NODUMP
) &&
809 di_flags
|= XFS_DIFLAG_NODUMP
;
810 if ((pip
->i_d
.di_flags
& XFS_DIFLAG_SYNC
) &&
812 di_flags
|= XFS_DIFLAG_SYNC
;
813 if ((pip
->i_d
.di_flags
& XFS_DIFLAG_NOSYMLINKS
) &&
814 xfs_inherit_nosymlinks
)
815 di_flags
|= XFS_DIFLAG_NOSYMLINKS
;
816 if (pip
->i_d
.di_flags
& XFS_DIFLAG_PROJINHERIT
)
817 di_flags
|= XFS_DIFLAG_PROJINHERIT
;
818 if ((pip
->i_d
.di_flags
& XFS_DIFLAG_NODEFRAG
) &&
819 xfs_inherit_nodefrag
)
820 di_flags
|= XFS_DIFLAG_NODEFRAG
;
821 if (pip
->i_d
.di_flags
& XFS_DIFLAG_FILESTREAM
)
822 di_flags
|= XFS_DIFLAG_FILESTREAM
;
823 ip
->i_d
.di_flags
|= di_flags
;
827 ip
->i_d
.di_format
= XFS_DINODE_FMT_EXTENTS
;
828 ip
->i_df
.if_flags
= XFS_IFEXTENTS
;
829 ip
->i_df
.if_bytes
= ip
->i_df
.if_real_bytes
= 0;
830 ip
->i_df
.if_u1
.if_extents
= NULL
;
836 * Attribute fork settings for new inode.
838 ip
->i_d
.di_aformat
= XFS_DINODE_FMT_EXTENTS
;
839 ip
->i_d
.di_anextents
= 0;
842 * Log the new values stuffed into the inode.
844 xfs_trans_ijoin(tp
, ip
, XFS_ILOCK_EXCL
);
845 xfs_trans_log_inode(tp
, ip
, flags
);
847 /* now that we have an i_mode we can setup inode ops and unlock */
850 /* now we have set up the vfs inode we can associate the filestream */
852 error
= xfs_filestream_associate(pip
, ip
);
856 xfs_iflags_set(ip
, XFS_IFILESTREAM
);
864 * Allocates a new inode from disk and return a pointer to the
865 * incore copy. This routine will internally commit the current
866 * transaction and allocate a new one if the Space Manager needed
867 * to do an allocation to replenish the inode free-list.
869 * This routine is designed to be called from xfs_create and
875 xfs_trans_t
**tpp
, /* input: current transaction;
876 output: may be a new transaction. */
877 xfs_inode_t
*dp
, /* directory within whose allocate
882 prid_t prid
, /* project id */
883 int okalloc
, /* ok to allocate new space */
884 xfs_inode_t
**ipp
, /* pointer to inode; it will be
892 xfs_buf_t
*ialloc_context
= NULL
;
898 ASSERT(tp
->t_flags
& XFS_TRANS_PERM_LOG_RES
);
901 * xfs_ialloc will return a pointer to an incore inode if
902 * the Space Manager has an available inode on the free
903 * list. Otherwise, it will do an allocation and replenish
904 * the freelist. Since we can only do one allocation per
905 * transaction without deadlocks, we will need to commit the
906 * current transaction and start a new one. We will then
907 * need to call xfs_ialloc again to get the inode.
909 * If xfs_ialloc did an allocation to replenish the freelist,
910 * it returns the bp containing the head of the freelist as
911 * ialloc_context. We will hold a lock on it across the
912 * transaction commit so that no other process can steal
913 * the inode(s) that we've just allocated.
915 code
= xfs_ialloc(tp
, dp
, mode
, nlink
, rdev
, prid
, okalloc
,
916 &ialloc_context
, &ip
);
919 * Return an error if we were unable to allocate a new inode.
920 * This should only happen if we run out of space on disk or
921 * encounter a disk error.
927 if (!ialloc_context
&& !ip
) {
929 return XFS_ERROR(ENOSPC
);
933 * If the AGI buffer is non-NULL, then we were unable to get an
934 * inode in one operation. We need to commit the current
935 * transaction and call xfs_ialloc() again. It is guaranteed
936 * to succeed the second time.
938 if (ialloc_context
) {
939 struct xfs_trans_res tres
;
942 * Normally, xfs_trans_commit releases all the locks.
943 * We call bhold to hang on to the ialloc_context across
944 * the commit. Holding this buffer prevents any other
945 * processes from doing any allocations in this
948 xfs_trans_bhold(tp
, ialloc_context
);
950 * Save the log reservation so we can use
951 * them in the next transaction.
953 tres
.tr_logres
= xfs_trans_get_log_res(tp
);
954 tres
.tr_logcount
= xfs_trans_get_log_count(tp
);
957 * We want the quota changes to be associated with the next
958 * transaction, NOT this one. So, detach the dqinfo from this
959 * and attach it to the next transaction.
964 dqinfo
= (void *)tp
->t_dqinfo
;
966 tflags
= tp
->t_flags
& XFS_TRANS_DQ_DIRTY
;
967 tp
->t_flags
&= ~(XFS_TRANS_DQ_DIRTY
);
970 ntp
= xfs_trans_dup(tp
);
971 code
= xfs_trans_commit(tp
, 0);
973 if (committed
!= NULL
) {
977 * If we get an error during the commit processing,
978 * release the buffer that is still held and return
982 xfs_buf_relse(ialloc_context
);
984 tp
->t_dqinfo
= dqinfo
;
985 xfs_trans_free_dqinfo(tp
);
993 * transaction commit worked ok so we can drop the extra ticket
994 * reference that we gained in xfs_trans_dup()
996 xfs_log_ticket_put(tp
->t_ticket
);
997 tres
.tr_logflags
= XFS_TRANS_PERM_LOG_RES
;
998 code
= xfs_trans_reserve(tp
, &tres
, 0, 0);
1001 * Re-attach the quota info that we detached from prev trx.
1004 tp
->t_dqinfo
= dqinfo
;
1005 tp
->t_flags
|= tflags
;
1009 xfs_buf_relse(ialloc_context
);
1014 xfs_trans_bjoin(tp
, ialloc_context
);
1017 * Call ialloc again. Since we've locked out all
1018 * other allocations in this allocation group,
1019 * this call should always succeed.
1021 code
= xfs_ialloc(tp
, dp
, mode
, nlink
, rdev
, prid
,
1022 okalloc
, &ialloc_context
, &ip
);
1025 * If we get an error at this point, return to the caller
1026 * so that the current transaction can be aborted.
1033 ASSERT(!ialloc_context
&& ip
);
1036 if (committed
!= NULL
)
1047 * Decrement the link count on an inode & log the change.
1048 * If this causes the link count to go to zero, initiate the
1049 * logging activity required to truncate a file.
1058 xfs_trans_ichgtime(tp
, ip
, XFS_ICHGTIME_CHG
);
1060 ASSERT (ip
->i_d
.di_nlink
> 0);
1062 drop_nlink(VFS_I(ip
));
1063 xfs_trans_log_inode(tp
, ip
, XFS_ILOG_CORE
);
1066 if (ip
->i_d
.di_nlink
== 0) {
1068 * We're dropping the last link to this file.
1069 * Move the on-disk inode to the AGI unlinked list.
1070 * From xfs_inactive() we will pull the inode from
1071 * the list and free it.
1073 error
= xfs_iunlink(tp
, ip
);
1079 * This gets called when the inode's version needs to be changed from 1 to 2.
1080 * Currently this happens when the nlink field overflows the old 16-bit value
1081 * or when chproj is called to change the project for the first time.
1082 * As a side effect the superblock version will also get rev'd
1083 * to contain the NLINK bit.
1092 ASSERT(xfs_isilocked(ip
, XFS_ILOCK_EXCL
));
1093 ASSERT(ip
->i_d
.di_version
== 1);
1095 ip
->i_d
.di_version
= 2;
1096 ip
->i_d
.di_onlink
= 0;
1097 memset(&(ip
->i_d
.di_pad
[0]), 0, sizeof(ip
->i_d
.di_pad
));
1099 if (!xfs_sb_version_hasnlink(&mp
->m_sb
)) {
1100 spin_lock(&mp
->m_sb_lock
);
1101 if (!xfs_sb_version_hasnlink(&mp
->m_sb
)) {
1102 xfs_sb_version_addnlink(&mp
->m_sb
);
1103 spin_unlock(&mp
->m_sb_lock
);
1104 xfs_mod_sb(tp
, XFS_SB_VERSIONNUM
);
1106 spin_unlock(&mp
->m_sb_lock
);
1109 /* Caller must log the inode */
1113 * Increment the link count on an inode & log the change.
1120 xfs_trans_ichgtime(tp
, ip
, XFS_ICHGTIME_CHG
);
1122 ASSERT(ip
->i_d
.di_nlink
> 0);
1124 inc_nlink(VFS_I(ip
));
1125 if ((ip
->i_d
.di_version
== 1) &&
1126 (ip
->i_d
.di_nlink
> XFS_MAXLINK_1
)) {
1128 * The inode has increased its number of links beyond
1129 * what can fit in an old format inode. It now needs
1130 * to be converted to a version 2 inode with a 32 bit
1131 * link count. If this is the first inode in the file
1132 * system to do this, then we need to bump the superblock
1133 * version number as well.
1135 xfs_bump_ino_vers2(tp
, ip
);
1138 xfs_trans_log_inode(tp
, ip
, XFS_ILOG_CORE
);
1145 struct xfs_name
*name
,
1150 int is_dir
= S_ISDIR(mode
);
1151 struct xfs_mount
*mp
= dp
->i_mount
;
1152 struct xfs_inode
*ip
= NULL
;
1153 struct xfs_trans
*tp
= NULL
;
1155 xfs_bmap_free_t free_list
;
1156 xfs_fsblock_t first_block
;
1157 bool unlock_dp_on_error
= false;
1161 struct xfs_dquot
*udqp
= NULL
;
1162 struct xfs_dquot
*gdqp
= NULL
;
1163 struct xfs_dquot
*pdqp
= NULL
;
1164 struct xfs_trans_res tres
;
1167 trace_xfs_create(dp
, name
);
1169 if (XFS_FORCED_SHUTDOWN(mp
))
1170 return XFS_ERROR(EIO
);
1172 if (dp
->i_d
.di_flags
& XFS_DIFLAG_PROJINHERIT
)
1173 prid
= xfs_get_projid(dp
);
1175 prid
= XFS_PROJID_DEFAULT
;
1178 * Make sure that we have allocated dquot(s) on disk.
1180 error
= xfs_qm_vop_dqalloc(dp
, xfs_kuid_to_uid(current_fsuid()),
1181 xfs_kgid_to_gid(current_fsgid()), prid
,
1182 XFS_QMOPT_QUOTALL
| XFS_QMOPT_INHERIT
,
1183 &udqp
, &gdqp
, &pdqp
);
1189 resblks
= XFS_MKDIR_SPACE_RES(mp
, name
->len
);
1190 tres
.tr_logres
= M_RES(mp
)->tr_mkdir
.tr_logres
;
1191 tres
.tr_logcount
= XFS_MKDIR_LOG_COUNT
;
1192 tp
= xfs_trans_alloc(mp
, XFS_TRANS_MKDIR
);
1194 resblks
= XFS_CREATE_SPACE_RES(mp
, name
->len
);
1195 tres
.tr_logres
= M_RES(mp
)->tr_create
.tr_logres
;
1196 tres
.tr_logcount
= XFS_CREATE_LOG_COUNT
;
1197 tp
= xfs_trans_alloc(mp
, XFS_TRANS_CREATE
);
1200 cancel_flags
= XFS_TRANS_RELEASE_LOG_RES
;
1203 * Initially assume that the file does not exist and
1204 * reserve the resources for that case. If that is not
1205 * the case we'll drop the one we have and get a more
1206 * appropriate transaction later.
1208 tres
.tr_logflags
= XFS_TRANS_PERM_LOG_RES
;
1209 error
= xfs_trans_reserve(tp
, &tres
, resblks
, 0);
1210 if (error
== ENOSPC
) {
1211 /* flush outstanding delalloc blocks and retry */
1212 xfs_flush_inodes(mp
);
1213 error
= xfs_trans_reserve(tp
, &tres
, resblks
, 0);
1215 if (error
== ENOSPC
) {
1216 /* No space at all so try a "no-allocation" reservation */
1218 error
= xfs_trans_reserve(tp
, &tres
, 0, 0);
1222 goto out_trans_cancel
;
1225 xfs_ilock(dp
, XFS_ILOCK_EXCL
| XFS_ILOCK_PARENT
);
1226 unlock_dp_on_error
= true;
1228 xfs_bmap_init(&free_list
, &first_block
);
1231 * Reserve disk quota and the inode.
1233 error
= xfs_trans_reserve_quota(tp
, mp
, udqp
, gdqp
,
1234 pdqp
, resblks
, 1, 0);
1236 goto out_trans_cancel
;
1238 error
= xfs_dir_canenter(tp
, dp
, name
, resblks
);
1240 goto out_trans_cancel
;
1243 * A newly created regular or special file just has one directory
1244 * entry pointing to them, but a directory also the "." entry
1245 * pointing to itself.
1247 error
= xfs_dir_ialloc(&tp
, dp
, mode
, is_dir
? 2 : 1, rdev
,
1248 prid
, resblks
> 0, &ip
, &committed
);
1250 if (error
== ENOSPC
)
1251 goto out_trans_cancel
;
1252 goto out_trans_abort
;
1256 * Now we join the directory inode to the transaction. We do not do it
1257 * earlier because xfs_dir_ialloc might commit the previous transaction
1258 * (and release all the locks). An error from here on will result in
1259 * the transaction cancel unlocking dp so don't do it explicitly in the
1262 xfs_trans_ijoin(tp
, dp
, XFS_ILOCK_EXCL
);
1263 unlock_dp_on_error
= false;
1265 error
= xfs_dir_createname(tp
, dp
, name
, ip
->i_ino
,
1266 &first_block
, &free_list
, resblks
?
1267 resblks
- XFS_IALLOC_SPACE_RES(mp
) : 0);
1269 ASSERT(error
!= ENOSPC
);
1270 goto out_trans_abort
;
1272 xfs_trans_ichgtime(tp
, dp
, XFS_ICHGTIME_MOD
| XFS_ICHGTIME_CHG
);
1273 xfs_trans_log_inode(tp
, dp
, XFS_ILOG_CORE
);
1276 error
= xfs_dir_init(tp
, ip
, dp
);
1278 goto out_bmap_cancel
;
1280 error
= xfs_bumplink(tp
, dp
);
1282 goto out_bmap_cancel
;
1286 * If this is a synchronous mount, make sure that the
1287 * create transaction goes to disk before returning to
1290 if (mp
->m_flags
& (XFS_MOUNT_WSYNC
|XFS_MOUNT_DIRSYNC
))
1291 xfs_trans_set_sync(tp
);
1294 * Attach the dquot(s) to the inodes and modify them incore.
1295 * These ids of the inode couldn't have changed since the new
1296 * inode has been locked ever since it was created.
1298 xfs_qm_vop_create_dqattach(tp
, ip
, udqp
, gdqp
, pdqp
);
1300 error
= xfs_bmap_finish(&tp
, &free_list
, &committed
);
1302 goto out_bmap_cancel
;
1304 error
= xfs_trans_commit(tp
, XFS_TRANS_RELEASE_LOG_RES
);
1306 goto out_release_inode
;
1308 xfs_qm_dqrele(udqp
);
1309 xfs_qm_dqrele(gdqp
);
1310 xfs_qm_dqrele(pdqp
);
1316 xfs_bmap_cancel(&free_list
);
1318 cancel_flags
|= XFS_TRANS_ABORT
;
1320 xfs_trans_cancel(tp
, cancel_flags
);
1323 * Wait until after the current transaction is aborted to
1324 * release the inode. This prevents recursive transactions
1325 * and deadlocks from xfs_inactive.
1330 xfs_qm_dqrele(udqp
);
1331 xfs_qm_dqrele(gdqp
);
1332 xfs_qm_dqrele(pdqp
);
1334 if (unlock_dp_on_error
)
1335 xfs_iunlock(dp
, XFS_ILOCK_EXCL
);
1343 struct xfs_name
*target_name
)
1345 xfs_mount_t
*mp
= tdp
->i_mount
;
1348 xfs_bmap_free_t free_list
;
1349 xfs_fsblock_t first_block
;
1354 trace_xfs_link(tdp
, target_name
);
1356 ASSERT(!S_ISDIR(sip
->i_d
.di_mode
));
1358 if (XFS_FORCED_SHUTDOWN(mp
))
1359 return XFS_ERROR(EIO
);
1361 error
= xfs_qm_dqattach(sip
, 0);
1365 error
= xfs_qm_dqattach(tdp
, 0);
1369 tp
= xfs_trans_alloc(mp
, XFS_TRANS_LINK
);
1370 cancel_flags
= XFS_TRANS_RELEASE_LOG_RES
;
1371 resblks
= XFS_LINK_SPACE_RES(mp
, target_name
->len
);
1372 error
= xfs_trans_reserve(tp
, &M_RES(mp
)->tr_link
, resblks
, 0);
1373 if (error
== ENOSPC
) {
1375 error
= xfs_trans_reserve(tp
, &M_RES(mp
)->tr_link
, 0, 0);
1382 xfs_lock_two_inodes(sip
, tdp
, XFS_ILOCK_EXCL
);
1384 xfs_trans_ijoin(tp
, sip
, XFS_ILOCK_EXCL
);
1385 xfs_trans_ijoin(tp
, tdp
, XFS_ILOCK_EXCL
);
1388 * If we are using project inheritance, we only allow hard link
1389 * creation in our tree when the project IDs are the same; else
1390 * the tree quota mechanism could be circumvented.
1392 if (unlikely((tdp
->i_d
.di_flags
& XFS_DIFLAG_PROJINHERIT
) &&
1393 (xfs_get_projid(tdp
) != xfs_get_projid(sip
)))) {
1394 error
= XFS_ERROR(EXDEV
);
1398 error
= xfs_dir_canenter(tp
, tdp
, target_name
, resblks
);
1402 xfs_bmap_init(&free_list
, &first_block
);
1404 error
= xfs_dir_createname(tp
, tdp
, target_name
, sip
->i_ino
,
1405 &first_block
, &free_list
, resblks
);
1408 xfs_trans_ichgtime(tp
, tdp
, XFS_ICHGTIME_MOD
| XFS_ICHGTIME_CHG
);
1409 xfs_trans_log_inode(tp
, tdp
, XFS_ILOG_CORE
);
1411 error
= xfs_bumplink(tp
, sip
);
1416 * If this is a synchronous mount, make sure that the
1417 * link transaction goes to disk before returning to
1420 if (mp
->m_flags
& (XFS_MOUNT_WSYNC
|XFS_MOUNT_DIRSYNC
)) {
1421 xfs_trans_set_sync(tp
);
1424 error
= xfs_bmap_finish (&tp
, &free_list
, &committed
);
1426 xfs_bmap_cancel(&free_list
);
1430 return xfs_trans_commit(tp
, XFS_TRANS_RELEASE_LOG_RES
);
1433 cancel_flags
|= XFS_TRANS_ABORT
;
1435 xfs_trans_cancel(tp
, cancel_flags
);
1441 * Free up the underlying blocks past new_size. The new size must be smaller
1442 * than the current size. This routine can be used both for the attribute and
1443 * data fork, and does not modify the inode size, which is left to the caller.
1445 * The transaction passed to this routine must have made a permanent log
1446 * reservation of at least XFS_ITRUNCATE_LOG_RES. This routine may commit the
1447 * given transaction and start new ones, so make sure everything involved in
1448 * the transaction is tidy before calling here. Some transaction will be
1449 * returned to the caller to be committed. The incoming transaction must
1450 * already include the inode, and both inode locks must be held exclusively.
1451 * The inode must also be "held" within the transaction. On return the inode
1452 * will be "held" within the returned transaction. This routine does NOT
1453 * require any disk space to be reserved for it within the transaction.
1455 * If we get an error, we must return with the inode locked and linked into the
1456 * current transaction. This keeps things simple for the higher level code,
1457 * because it always knows that the inode is locked and held in the transaction
1458 * that returns to it whether errors occur or not. We don't mark the inode
1459 * dirty on error so that transactions can be easily aborted if possible.
1462 xfs_itruncate_extents(
1463 struct xfs_trans
**tpp
,
1464 struct xfs_inode
*ip
,
1466 xfs_fsize_t new_size
)
1468 struct xfs_mount
*mp
= ip
->i_mount
;
1469 struct xfs_trans
*tp
= *tpp
;
1470 struct xfs_trans
*ntp
;
1471 xfs_bmap_free_t free_list
;
1472 xfs_fsblock_t first_block
;
1473 xfs_fileoff_t first_unmap_block
;
1474 xfs_fileoff_t last_block
;
1475 xfs_filblks_t unmap_len
;
1480 ASSERT(xfs_isilocked(ip
, XFS_ILOCK_EXCL
));
1481 ASSERT(!atomic_read(&VFS_I(ip
)->i_count
) ||
1482 xfs_isilocked(ip
, XFS_IOLOCK_EXCL
));
1483 ASSERT(new_size
<= XFS_ISIZE(ip
));
1484 ASSERT(tp
->t_flags
& XFS_TRANS_PERM_LOG_RES
);
1485 ASSERT(ip
->i_itemp
!= NULL
);
1486 ASSERT(ip
->i_itemp
->ili_lock_flags
== 0);
1487 ASSERT(!XFS_NOT_DQATTACHED(mp
, ip
));
1489 trace_xfs_itruncate_extents_start(ip
, new_size
);
1492 * Since it is possible for space to become allocated beyond
1493 * the end of the file (in a crash where the space is allocated
1494 * but the inode size is not yet updated), simply remove any
1495 * blocks which show up between the new EOF and the maximum
1496 * possible file size. If the first block to be removed is
1497 * beyond the maximum file size (ie it is the same as last_block),
1498 * then there is nothing to do.
1500 first_unmap_block
= XFS_B_TO_FSB(mp
, (xfs_ufsize_t
)new_size
);
1501 last_block
= XFS_B_TO_FSB(mp
, mp
->m_super
->s_maxbytes
);
1502 if (first_unmap_block
== last_block
)
1505 ASSERT(first_unmap_block
< last_block
);
1506 unmap_len
= last_block
- first_unmap_block
+ 1;
1508 xfs_bmap_init(&free_list
, &first_block
);
1509 error
= xfs_bunmapi(tp
, ip
,
1510 first_unmap_block
, unmap_len
,
1511 xfs_bmapi_aflag(whichfork
),
1512 XFS_ITRUNC_MAX_EXTENTS
,
1513 &first_block
, &free_list
,
1516 goto out_bmap_cancel
;
1519 * Duplicate the transaction that has the permanent
1520 * reservation and commit the old transaction.
1522 error
= xfs_bmap_finish(&tp
, &free_list
, &committed
);
1524 xfs_trans_ijoin(tp
, ip
, 0);
1526 goto out_bmap_cancel
;
1530 * Mark the inode dirty so it will be logged and
1531 * moved forward in the log as part of every commit.
1533 xfs_trans_log_inode(tp
, ip
, XFS_ILOG_CORE
);
1536 ntp
= xfs_trans_dup(tp
);
1537 error
= xfs_trans_commit(tp
, 0);
1540 xfs_trans_ijoin(tp
, ip
, 0);
1546 * Transaction commit worked ok so we can drop the extra ticket
1547 * reference that we gained in xfs_trans_dup()
1549 xfs_log_ticket_put(tp
->t_ticket
);
1550 error
= xfs_trans_reserve(tp
, &M_RES(mp
)->tr_itruncate
, 0, 0);
1556 * Always re-log the inode so that our permanent transaction can keep
1557 * on rolling it forward in the log.
1559 xfs_trans_log_inode(tp
, ip
, XFS_ILOG_CORE
);
1561 trace_xfs_itruncate_extents_end(ip
, new_size
);
1568 * If the bunmapi call encounters an error, return to the caller where
1569 * the transaction can be properly aborted. We just need to make sure
1570 * we're not holding any resources that we were not when we came in.
1572 xfs_bmap_cancel(&free_list
);
1580 xfs_mount_t
*mp
= ip
->i_mount
;
1583 if (!S_ISREG(ip
->i_d
.di_mode
) || (ip
->i_d
.di_mode
== 0))
1586 /* If this is a read-only mount, don't do this (would generate I/O) */
1587 if (mp
->m_flags
& XFS_MOUNT_RDONLY
)
1590 if (!XFS_FORCED_SHUTDOWN(mp
)) {
1594 * If we are using filestreams, and we have an unlinked
1595 * file that we are processing the last close on, then nothing
1596 * will be able to reopen and write to this file. Purge this
1597 * inode from the filestreams cache so that it doesn't delay
1598 * teardown of the inode.
1600 if ((ip
->i_d
.di_nlink
== 0) && xfs_inode_is_filestream(ip
))
1601 xfs_filestream_deassociate(ip
);
1604 * If we previously truncated this file and removed old data
1605 * in the process, we want to initiate "early" writeout on
1606 * the last close. This is an attempt to combat the notorious
1607 * NULL files problem which is particularly noticeable from a
1608 * truncate down, buffered (re-)write (delalloc), followed by
1609 * a crash. What we are effectively doing here is
1610 * significantly reducing the time window where we'd otherwise
1611 * be exposed to that problem.
1613 truncated
= xfs_iflags_test_and_clear(ip
, XFS_ITRUNCATED
);
1615 xfs_iflags_clear(ip
, XFS_IDIRTY_RELEASE
);
1616 if (VN_DIRTY(VFS_I(ip
)) && ip
->i_delayed_blks
> 0) {
1617 error
= -filemap_flush(VFS_I(ip
)->i_mapping
);
1624 if (ip
->i_d
.di_nlink
== 0)
1627 if (xfs_can_free_eofblocks(ip
, false)) {
1630 * If we can't get the iolock just skip truncating the blocks
1631 * past EOF because we could deadlock with the mmap_sem
1632 * otherwise. We'll get another chance to drop them once the
1633 * last reference to the inode is dropped, so we'll never leak
1634 * blocks permanently.
1636 * Further, check if the inode is being opened, written and
1637 * closed frequently and we have delayed allocation blocks
1638 * outstanding (e.g. streaming writes from the NFS server),
1639 * truncating the blocks past EOF will cause fragmentation to
1642 * In this case don't do the truncation, either, but we have to
1643 * be careful how we detect this case. Blocks beyond EOF show
1644 * up as i_delayed_blks even when the inode is clean, so we
1645 * need to truncate them away first before checking for a dirty
1646 * release. Hence on the first dirty close we will still remove
1647 * the speculative allocation, but after that we will leave it
1650 if (xfs_iflags_test(ip
, XFS_IDIRTY_RELEASE
))
1653 error
= xfs_free_eofblocks(mp
, ip
, true);
1654 if (error
&& error
!= EAGAIN
)
1657 /* delalloc blocks after truncation means it really is dirty */
1658 if (ip
->i_delayed_blks
)
1659 xfs_iflags_set(ip
, XFS_IDIRTY_RELEASE
);
1665 * xfs_inactive_truncate
1667 * Called to perform a truncate when an inode becomes unlinked.
1670 xfs_inactive_truncate(
1671 struct xfs_inode
*ip
)
1673 struct xfs_mount
*mp
= ip
->i_mount
;
1674 struct xfs_trans
*tp
;
1677 tp
= xfs_trans_alloc(mp
, XFS_TRANS_INACTIVE
);
1678 error
= xfs_trans_reserve(tp
, &M_RES(mp
)->tr_itruncate
, 0, 0);
1680 ASSERT(XFS_FORCED_SHUTDOWN(mp
));
1681 xfs_trans_cancel(tp
, 0);
1685 xfs_ilock(ip
, XFS_ILOCK_EXCL
);
1686 xfs_trans_ijoin(tp
, ip
, 0);
1689 * Log the inode size first to prevent stale data exposure in the event
1690 * of a system crash before the truncate completes. See the related
1691 * comment in xfs_setattr_size() for details.
1693 ip
->i_d
.di_size
= 0;
1694 xfs_trans_log_inode(tp
, ip
, XFS_ILOG_CORE
);
1696 error
= xfs_itruncate_extents(&tp
, ip
, XFS_DATA_FORK
, 0);
1698 goto error_trans_cancel
;
1700 ASSERT(ip
->i_d
.di_nextents
== 0);
1702 error
= xfs_trans_commit(tp
, XFS_TRANS_RELEASE_LOG_RES
);
1706 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
1710 xfs_trans_cancel(tp
, XFS_TRANS_RELEASE_LOG_RES
| XFS_TRANS_ABORT
);
1712 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
1717 * xfs_inactive_ifree()
1719 * Perform the inode free when an inode is unlinked.
1723 struct xfs_inode
*ip
)
1725 xfs_bmap_free_t free_list
;
1726 xfs_fsblock_t first_block
;
1728 struct xfs_mount
*mp
= ip
->i_mount
;
1729 struct xfs_trans
*tp
;
1732 tp
= xfs_trans_alloc(mp
, XFS_TRANS_INACTIVE
);
1733 error
= xfs_trans_reserve(tp
, &M_RES(mp
)->tr_ifree
, 0, 0);
1735 ASSERT(XFS_FORCED_SHUTDOWN(mp
));
1736 xfs_trans_cancel(tp
, XFS_TRANS_RELEASE_LOG_RES
);
1740 xfs_ilock(ip
, XFS_ILOCK_EXCL
);
1741 xfs_trans_ijoin(tp
, ip
, 0);
1743 xfs_bmap_init(&free_list
, &first_block
);
1744 error
= xfs_ifree(tp
, ip
, &free_list
);
1747 * If we fail to free the inode, shut down. The cancel
1748 * might do that, we need to make sure. Otherwise the
1749 * inode might be lost for a long time or forever.
1751 if (!XFS_FORCED_SHUTDOWN(mp
)) {
1752 xfs_notice(mp
, "%s: xfs_ifree returned error %d",
1754 xfs_force_shutdown(mp
, SHUTDOWN_META_IO_ERROR
);
1756 xfs_trans_cancel(tp
, XFS_TRANS_RELEASE_LOG_RES
|XFS_TRANS_ABORT
);
1757 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
1762 * Credit the quota account(s). The inode is gone.
1764 xfs_trans_mod_dquot_byino(tp
, ip
, XFS_TRANS_DQ_ICOUNT
, -1);
1767 * Just ignore errors at this point. There is nothing we can
1768 * do except to try to keep going. Make sure it's not a silent
1771 error
= xfs_bmap_finish(&tp
, &free_list
, &committed
);
1773 xfs_notice(mp
, "%s: xfs_bmap_finish returned error %d",
1775 error
= xfs_trans_commit(tp
, XFS_TRANS_RELEASE_LOG_RES
);
1777 xfs_notice(mp
, "%s: xfs_trans_commit returned error %d",
1780 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
1787 * This is called when the vnode reference count for the vnode
1788 * goes to zero. If the file has been unlinked, then it must
1789 * now be truncated. Also, we clear all of the read-ahead state
1790 * kept for the inode here since the file is now closed.
1796 struct xfs_mount
*mp
;
1801 * If the inode is already free, then there can be nothing
1804 if (ip
->i_d
.di_mode
== 0) {
1805 ASSERT(ip
->i_df
.if_real_bytes
== 0);
1806 ASSERT(ip
->i_df
.if_broot_bytes
== 0);
1812 /* If this is a read-only mount, don't do this (would generate I/O) */
1813 if (mp
->m_flags
& XFS_MOUNT_RDONLY
)
1816 if (ip
->i_d
.di_nlink
!= 0) {
1818 * force is true because we are evicting an inode from the
1819 * cache. Post-eof blocks must be freed, lest we end up with
1820 * broken free space accounting.
1822 if (xfs_can_free_eofblocks(ip
, true))
1823 xfs_free_eofblocks(mp
, ip
, false);
1828 if (S_ISREG(ip
->i_d
.di_mode
) &&
1829 (ip
->i_d
.di_size
!= 0 || XFS_ISIZE(ip
) != 0 ||
1830 ip
->i_d
.di_nextents
> 0 || ip
->i_delayed_blks
> 0))
1833 error
= xfs_qm_dqattach(ip
, 0);
1837 if (S_ISLNK(ip
->i_d
.di_mode
))
1838 error
= xfs_inactive_symlink(ip
);
1840 error
= xfs_inactive_truncate(ip
);
1845 * If there are attributes associated with the file then blow them away
1846 * now. The code calls a routine that recursively deconstructs the
1847 * attribute fork. We need to just commit the current transaction
1848 * because we can't use it for xfs_attr_inactive().
1850 if (ip
->i_d
.di_anextents
> 0) {
1851 ASSERT(ip
->i_d
.di_forkoff
!= 0);
1853 error
= xfs_attr_inactive(ip
);
1859 xfs_idestroy_fork(ip
, XFS_ATTR_FORK
);
1861 ASSERT(ip
->i_d
.di_anextents
== 0);
1866 error
= xfs_inactive_ifree(ip
);
1871 * Release the dquots held by inode, if any.
1873 xfs_qm_dqdetach(ip
);
1877 * This is called when the inode's link count goes to 0.
1878 * We place the on-disk inode on a list in the AGI. It
1879 * will be pulled from this list when the inode is freed.
1896 ASSERT(ip
->i_d
.di_nlink
== 0);
1897 ASSERT(ip
->i_d
.di_mode
!= 0);
1902 * Get the agi buffer first. It ensures lock ordering
1905 error
= xfs_read_agi(mp
, tp
, XFS_INO_TO_AGNO(mp
, ip
->i_ino
), &agibp
);
1908 agi
= XFS_BUF_TO_AGI(agibp
);
1911 * Get the index into the agi hash table for the
1912 * list this inode will go on.
1914 agino
= XFS_INO_TO_AGINO(mp
, ip
->i_ino
);
1916 bucket_index
= agino
% XFS_AGI_UNLINKED_BUCKETS
;
1917 ASSERT(agi
->agi_unlinked
[bucket_index
]);
1918 ASSERT(be32_to_cpu(agi
->agi_unlinked
[bucket_index
]) != agino
);
1920 if (agi
->agi_unlinked
[bucket_index
] != cpu_to_be32(NULLAGINO
)) {
1922 * There is already another inode in the bucket we need
1923 * to add ourselves to. Add us at the front of the list.
1924 * Here we put the head pointer into our next pointer,
1925 * and then we fall through to point the head at us.
1927 error
= xfs_imap_to_bp(mp
, tp
, &ip
->i_imap
, &dip
, &ibp
,
1932 ASSERT(dip
->di_next_unlinked
== cpu_to_be32(NULLAGINO
));
1933 dip
->di_next_unlinked
= agi
->agi_unlinked
[bucket_index
];
1934 offset
= ip
->i_imap
.im_boffset
+
1935 offsetof(xfs_dinode_t
, di_next_unlinked
);
1937 /* need to recalc the inode CRC if appropriate */
1938 xfs_dinode_calc_crc(mp
, dip
);
1940 xfs_trans_inode_buf(tp
, ibp
);
1941 xfs_trans_log_buf(tp
, ibp
, offset
,
1942 (offset
+ sizeof(xfs_agino_t
) - 1));
1943 xfs_inobp_check(mp
, ibp
);
1947 * Point the bucket head pointer at the inode being inserted.
1950 agi
->agi_unlinked
[bucket_index
] = cpu_to_be32(agino
);
1951 offset
= offsetof(xfs_agi_t
, agi_unlinked
) +
1952 (sizeof(xfs_agino_t
) * bucket_index
);
1953 xfs_trans_log_buf(tp
, agibp
, offset
,
1954 (offset
+ sizeof(xfs_agino_t
) - 1));
1959 * Pull the on-disk inode from the AGI unlinked list.
1972 xfs_agnumber_t agno
;
1974 xfs_agino_t next_agino
;
1975 xfs_buf_t
*last_ibp
;
1976 xfs_dinode_t
*last_dip
= NULL
;
1978 int offset
, last_offset
= 0;
1982 agno
= XFS_INO_TO_AGNO(mp
, ip
->i_ino
);
1985 * Get the agi buffer first. It ensures lock ordering
1988 error
= xfs_read_agi(mp
, tp
, agno
, &agibp
);
1992 agi
= XFS_BUF_TO_AGI(agibp
);
1995 * Get the index into the agi hash table for the
1996 * list this inode will go on.
1998 agino
= XFS_INO_TO_AGINO(mp
, ip
->i_ino
);
2000 bucket_index
= agino
% XFS_AGI_UNLINKED_BUCKETS
;
2001 ASSERT(agi
->agi_unlinked
[bucket_index
] != cpu_to_be32(NULLAGINO
));
2002 ASSERT(agi
->agi_unlinked
[bucket_index
]);
2004 if (be32_to_cpu(agi
->agi_unlinked
[bucket_index
]) == agino
) {
2006 * We're at the head of the list. Get the inode's on-disk
2007 * buffer to see if there is anyone after us on the list.
2008 * Only modify our next pointer if it is not already NULLAGINO.
2009 * This saves us the overhead of dealing with the buffer when
2010 * there is no need to change it.
2012 error
= xfs_imap_to_bp(mp
, tp
, &ip
->i_imap
, &dip
, &ibp
,
2015 xfs_warn(mp
, "%s: xfs_imap_to_bp returned error %d.",
2019 next_agino
= be32_to_cpu(dip
->di_next_unlinked
);
2020 ASSERT(next_agino
!= 0);
2021 if (next_agino
!= NULLAGINO
) {
2022 dip
->di_next_unlinked
= cpu_to_be32(NULLAGINO
);
2023 offset
= ip
->i_imap
.im_boffset
+
2024 offsetof(xfs_dinode_t
, di_next_unlinked
);
2026 /* need to recalc the inode CRC if appropriate */
2027 xfs_dinode_calc_crc(mp
, dip
);
2029 xfs_trans_inode_buf(tp
, ibp
);
2030 xfs_trans_log_buf(tp
, ibp
, offset
,
2031 (offset
+ sizeof(xfs_agino_t
) - 1));
2032 xfs_inobp_check(mp
, ibp
);
2034 xfs_trans_brelse(tp
, ibp
);
2037 * Point the bucket head pointer at the next inode.
2039 ASSERT(next_agino
!= 0);
2040 ASSERT(next_agino
!= agino
);
2041 agi
->agi_unlinked
[bucket_index
] = cpu_to_be32(next_agino
);
2042 offset
= offsetof(xfs_agi_t
, agi_unlinked
) +
2043 (sizeof(xfs_agino_t
) * bucket_index
);
2044 xfs_trans_log_buf(tp
, agibp
, offset
,
2045 (offset
+ sizeof(xfs_agino_t
) - 1));
2048 * We need to search the list for the inode being freed.
2050 next_agino
= be32_to_cpu(agi
->agi_unlinked
[bucket_index
]);
2052 while (next_agino
!= agino
) {
2053 struct xfs_imap imap
;
2056 xfs_trans_brelse(tp
, last_ibp
);
2059 next_ino
= XFS_AGINO_TO_INO(mp
, agno
, next_agino
);
2061 error
= xfs_imap(mp
, tp
, next_ino
, &imap
, 0);
2064 "%s: xfs_imap returned error %d.",
2069 error
= xfs_imap_to_bp(mp
, tp
, &imap
, &last_dip
,
2073 "%s: xfs_imap_to_bp returned error %d.",
2078 last_offset
= imap
.im_boffset
;
2079 next_agino
= be32_to_cpu(last_dip
->di_next_unlinked
);
2080 ASSERT(next_agino
!= NULLAGINO
);
2081 ASSERT(next_agino
!= 0);
2085 * Now last_ibp points to the buffer previous to us on the
2086 * unlinked list. Pull us from the list.
2088 error
= xfs_imap_to_bp(mp
, tp
, &ip
->i_imap
, &dip
, &ibp
,
2091 xfs_warn(mp
, "%s: xfs_imap_to_bp(2) returned error %d.",
2095 next_agino
= be32_to_cpu(dip
->di_next_unlinked
);
2096 ASSERT(next_agino
!= 0);
2097 ASSERT(next_agino
!= agino
);
2098 if (next_agino
!= NULLAGINO
) {
2099 dip
->di_next_unlinked
= cpu_to_be32(NULLAGINO
);
2100 offset
= ip
->i_imap
.im_boffset
+
2101 offsetof(xfs_dinode_t
, di_next_unlinked
);
2103 /* need to recalc the inode CRC if appropriate */
2104 xfs_dinode_calc_crc(mp
, dip
);
2106 xfs_trans_inode_buf(tp
, ibp
);
2107 xfs_trans_log_buf(tp
, ibp
, offset
,
2108 (offset
+ sizeof(xfs_agino_t
) - 1));
2109 xfs_inobp_check(mp
, ibp
);
2111 xfs_trans_brelse(tp
, ibp
);
2114 * Point the previous inode on the list to the next inode.
2116 last_dip
->di_next_unlinked
= cpu_to_be32(next_agino
);
2117 ASSERT(next_agino
!= 0);
2118 offset
= last_offset
+ offsetof(xfs_dinode_t
, di_next_unlinked
);
2120 /* need to recalc the inode CRC if appropriate */
2121 xfs_dinode_calc_crc(mp
, last_dip
);
2123 xfs_trans_inode_buf(tp
, last_ibp
);
2124 xfs_trans_log_buf(tp
, last_ibp
, offset
,
2125 (offset
+ sizeof(xfs_agino_t
) - 1));
2126 xfs_inobp_check(mp
, last_ibp
);
2132 * A big issue when freeing the inode cluster is that we _cannot_ skip any
2133 * inodes that are in memory - they all must be marked stale and attached to
2134 * the cluster buffer.
2138 xfs_inode_t
*free_ip
,
2142 xfs_mount_t
*mp
= free_ip
->i_mount
;
2143 int blks_per_cluster
;
2150 xfs_inode_log_item_t
*iip
;
2151 xfs_log_item_t
*lip
;
2152 struct xfs_perag
*pag
;
2154 pag
= xfs_perag_get(mp
, XFS_INO_TO_AGNO(mp
, inum
));
2155 if (mp
->m_sb
.sb_blocksize
>= XFS_INODE_CLUSTER_SIZE(mp
)) {
2156 blks_per_cluster
= 1;
2157 ninodes
= mp
->m_sb
.sb_inopblock
;
2158 nbufs
= XFS_IALLOC_BLOCKS(mp
);
2160 blks_per_cluster
= XFS_INODE_CLUSTER_SIZE(mp
) /
2161 mp
->m_sb
.sb_blocksize
;
2162 ninodes
= blks_per_cluster
* mp
->m_sb
.sb_inopblock
;
2163 nbufs
= XFS_IALLOC_BLOCKS(mp
) / blks_per_cluster
;
2166 for (j
= 0; j
< nbufs
; j
++, inum
+= ninodes
) {
2167 blkno
= XFS_AGB_TO_DADDR(mp
, XFS_INO_TO_AGNO(mp
, inum
),
2168 XFS_INO_TO_AGBNO(mp
, inum
));
2171 * We obtain and lock the backing buffer first in the process
2172 * here, as we have to ensure that any dirty inode that we
2173 * can't get the flush lock on is attached to the buffer.
2174 * If we scan the in-memory inodes first, then buffer IO can
2175 * complete before we get a lock on it, and hence we may fail
2176 * to mark all the active inodes on the buffer stale.
2178 bp
= xfs_trans_get_buf(tp
, mp
->m_ddev_targp
, blkno
,
2179 mp
->m_bsize
* blks_per_cluster
,
2186 * This buffer may not have been correctly initialised as we
2187 * didn't read it from disk. That's not important because we are
2188 * only using to mark the buffer as stale in the log, and to
2189 * attach stale cached inodes on it. That means it will never be
2190 * dispatched for IO. If it is, we want to know about it, and we
2191 * want it to fail. We can acheive this by adding a write
2192 * verifier to the buffer.
2194 bp
->b_ops
= &xfs_inode_buf_ops
;
2197 * Walk the inodes already attached to the buffer and mark them
2198 * stale. These will all have the flush locks held, so an
2199 * in-memory inode walk can't lock them. By marking them all
2200 * stale first, we will not attempt to lock them in the loop
2201 * below as the XFS_ISTALE flag will be set.
2205 if (lip
->li_type
== XFS_LI_INODE
) {
2206 iip
= (xfs_inode_log_item_t
*)lip
;
2207 ASSERT(iip
->ili_logged
== 1);
2208 lip
->li_cb
= xfs_istale_done
;
2209 xfs_trans_ail_copy_lsn(mp
->m_ail
,
2210 &iip
->ili_flush_lsn
,
2211 &iip
->ili_item
.li_lsn
);
2212 xfs_iflags_set(iip
->ili_inode
, XFS_ISTALE
);
2214 lip
= lip
->li_bio_list
;
2219 * For each inode in memory attempt to add it to the inode
2220 * buffer and set it up for being staled on buffer IO
2221 * completion. This is safe as we've locked out tail pushing
2222 * and flushing by locking the buffer.
2224 * We have already marked every inode that was part of a
2225 * transaction stale above, which means there is no point in
2226 * even trying to lock them.
2228 for (i
= 0; i
< ninodes
; i
++) {
2231 ip
= radix_tree_lookup(&pag
->pag_ici_root
,
2232 XFS_INO_TO_AGINO(mp
, (inum
+ i
)));
2234 /* Inode not in memory, nothing to do */
2241 * because this is an RCU protected lookup, we could
2242 * find a recently freed or even reallocated inode
2243 * during the lookup. We need to check under the
2244 * i_flags_lock for a valid inode here. Skip it if it
2245 * is not valid, the wrong inode or stale.
2247 spin_lock(&ip
->i_flags_lock
);
2248 if (ip
->i_ino
!= inum
+ i
||
2249 __xfs_iflags_test(ip
, XFS_ISTALE
)) {
2250 spin_unlock(&ip
->i_flags_lock
);
2254 spin_unlock(&ip
->i_flags_lock
);
2257 * Don't try to lock/unlock the current inode, but we
2258 * _cannot_ skip the other inodes that we did not find
2259 * in the list attached to the buffer and are not
2260 * already marked stale. If we can't lock it, back off
2263 if (ip
!= free_ip
&&
2264 !xfs_ilock_nowait(ip
, XFS_ILOCK_EXCL
)) {
2272 xfs_iflags_set(ip
, XFS_ISTALE
);
2275 * we don't need to attach clean inodes or those only
2276 * with unlogged changes (which we throw away, anyway).
2279 if (!iip
|| xfs_inode_clean(ip
)) {
2280 ASSERT(ip
!= free_ip
);
2282 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
2286 iip
->ili_last_fields
= iip
->ili_fields
;
2287 iip
->ili_fields
= 0;
2288 iip
->ili_logged
= 1;
2289 xfs_trans_ail_copy_lsn(mp
->m_ail
, &iip
->ili_flush_lsn
,
2290 &iip
->ili_item
.li_lsn
);
2292 xfs_buf_attach_iodone(bp
, xfs_istale_done
,
2296 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
2299 xfs_trans_stale_inode_buf(tp
, bp
);
2300 xfs_trans_binval(tp
, bp
);
2308 * This is called to return an inode to the inode free list.
2309 * The inode should already be truncated to 0 length and have
2310 * no pages associated with it. This routine also assumes that
2311 * the inode is already a part of the transaction.
2313 * The on-disk copy of the inode will have been added to the list
2314 * of unlinked inodes in the AGI. We need to remove the inode from
2315 * that list atomically with respect to freeing it here.
2321 xfs_bmap_free_t
*flist
)
2325 xfs_ino_t first_ino
;
2327 ASSERT(xfs_isilocked(ip
, XFS_ILOCK_EXCL
));
2328 ASSERT(ip
->i_d
.di_nlink
== 0);
2329 ASSERT(ip
->i_d
.di_nextents
== 0);
2330 ASSERT(ip
->i_d
.di_anextents
== 0);
2331 ASSERT(ip
->i_d
.di_size
== 0 || !S_ISREG(ip
->i_d
.di_mode
));
2332 ASSERT(ip
->i_d
.di_nblocks
== 0);
2335 * Pull the on-disk inode from the AGI unlinked list.
2337 error
= xfs_iunlink_remove(tp
, ip
);
2341 error
= xfs_difree(tp
, ip
->i_ino
, flist
, &delete, &first_ino
);
2345 ip
->i_d
.di_mode
= 0; /* mark incore inode as free */
2346 ip
->i_d
.di_flags
= 0;
2347 ip
->i_d
.di_dmevmask
= 0;
2348 ip
->i_d
.di_forkoff
= 0; /* mark the attr fork not in use */
2349 ip
->i_d
.di_format
= XFS_DINODE_FMT_EXTENTS
;
2350 ip
->i_d
.di_aformat
= XFS_DINODE_FMT_EXTENTS
;
2352 * Bump the generation count so no one will be confused
2353 * by reincarnations of this inode.
2356 xfs_trans_log_inode(tp
, ip
, XFS_ILOG_CORE
);
2359 error
= xfs_ifree_cluster(ip
, tp
, first_ino
);
2365 * This is called to unpin an inode. The caller must have the inode locked
2366 * in at least shared mode so that the buffer cannot be subsequently pinned
2367 * once someone is waiting for it to be unpinned.
2371 struct xfs_inode
*ip
)
2373 ASSERT(xfs_isilocked(ip
, XFS_ILOCK_EXCL
|XFS_ILOCK_SHARED
));
2375 trace_xfs_inode_unpin_nowait(ip
, _RET_IP_
);
2377 /* Give the log a push to start the unpinning I/O */
2378 xfs_log_force_lsn(ip
->i_mount
, ip
->i_itemp
->ili_last_lsn
, 0);
2384 struct xfs_inode
*ip
)
2386 wait_queue_head_t
*wq
= bit_waitqueue(&ip
->i_flags
, __XFS_IPINNED_BIT
);
2387 DEFINE_WAIT_BIT(wait
, &ip
->i_flags
, __XFS_IPINNED_BIT
);
2392 prepare_to_wait(wq
, &wait
.wait
, TASK_UNINTERRUPTIBLE
);
2393 if (xfs_ipincount(ip
))
2395 } while (xfs_ipincount(ip
));
2396 finish_wait(wq
, &wait
.wait
);
2401 struct xfs_inode
*ip
)
2403 if (xfs_ipincount(ip
))
2404 __xfs_iunpin_wait(ip
);
2408 * Removing an inode from the namespace involves removing the directory entry
2409 * and dropping the link count on the inode. Removing the directory entry can
2410 * result in locking an AGF (directory blocks were freed) and removing a link
2411 * count can result in placing the inode on an unlinked list which results in
2414 * The big problem here is that we have an ordering constraint on AGF and AGI
2415 * locking - inode allocation locks the AGI, then can allocate a new extent for
2416 * new inodes, locking the AGF after the AGI. Similarly, freeing the inode
2417 * removes the inode from the unlinked list, requiring that we lock the AGI
2418 * first, and then freeing the inode can result in an inode chunk being freed
2419 * and hence freeing disk space requiring that we lock an AGF.
2421 * Hence the ordering that is imposed by other parts of the code is AGI before
2422 * AGF. This means we cannot remove the directory entry before we drop the inode
2423 * reference count and put it on the unlinked list as this results in a lock
2424 * order of AGF then AGI, and this can deadlock against inode allocation and
2425 * freeing. Therefore we must drop the link counts before we remove the
2428 * This is still safe from a transactional point of view - it is not until we
2429 * get to xfs_bmap_finish() that we have the possibility of multiple
2430 * transactions in this operation. Hence as long as we remove the directory
2431 * entry and drop the link count in the first transaction of the remove
2432 * operation, there are no transactional constraints on the ordering here.
2437 struct xfs_name
*name
,
2440 xfs_mount_t
*mp
= dp
->i_mount
;
2441 xfs_trans_t
*tp
= NULL
;
2442 int is_dir
= S_ISDIR(ip
->i_d
.di_mode
);
2444 xfs_bmap_free_t free_list
;
2445 xfs_fsblock_t first_block
;
2452 trace_xfs_remove(dp
, name
);
2454 if (XFS_FORCED_SHUTDOWN(mp
))
2455 return XFS_ERROR(EIO
);
2457 error
= xfs_qm_dqattach(dp
, 0);
2461 error
= xfs_qm_dqattach(ip
, 0);
2466 tp
= xfs_trans_alloc(mp
, XFS_TRANS_RMDIR
);
2467 log_count
= XFS_DEFAULT_LOG_COUNT
;
2469 tp
= xfs_trans_alloc(mp
, XFS_TRANS_REMOVE
);
2470 log_count
= XFS_REMOVE_LOG_COUNT
;
2472 cancel_flags
= XFS_TRANS_RELEASE_LOG_RES
;
2475 * We try to get the real space reservation first,
2476 * allowing for directory btree deletion(s) implying
2477 * possible bmap insert(s). If we can't get the space
2478 * reservation then we use 0 instead, and avoid the bmap
2479 * btree insert(s) in the directory code by, if the bmap
2480 * insert tries to happen, instead trimming the LAST
2481 * block from the directory.
2483 resblks
= XFS_REMOVE_SPACE_RES(mp
);
2484 error
= xfs_trans_reserve(tp
, &M_RES(mp
)->tr_remove
, resblks
, 0);
2485 if (error
== ENOSPC
) {
2487 error
= xfs_trans_reserve(tp
, &M_RES(mp
)->tr_remove
, 0, 0);
2490 ASSERT(error
!= ENOSPC
);
2492 goto out_trans_cancel
;
2495 xfs_lock_two_inodes(dp
, ip
, XFS_ILOCK_EXCL
);
2497 xfs_trans_ijoin(tp
, dp
, XFS_ILOCK_EXCL
);
2498 xfs_trans_ijoin(tp
, ip
, XFS_ILOCK_EXCL
);
2501 * If we're removing a directory perform some additional validation.
2503 cancel_flags
|= XFS_TRANS_ABORT
;
2505 ASSERT(ip
->i_d
.di_nlink
>= 2);
2506 if (ip
->i_d
.di_nlink
!= 2) {
2507 error
= XFS_ERROR(ENOTEMPTY
);
2508 goto out_trans_cancel
;
2510 if (!xfs_dir_isempty(ip
)) {
2511 error
= XFS_ERROR(ENOTEMPTY
);
2512 goto out_trans_cancel
;
2515 /* Drop the link from ip's "..". */
2516 error
= xfs_droplink(tp
, dp
);
2518 goto out_trans_cancel
;
2520 /* Drop the "." link from ip to self. */
2521 error
= xfs_droplink(tp
, ip
);
2523 goto out_trans_cancel
;
2526 * When removing a non-directory we need to log the parent
2527 * inode here. For a directory this is done implicitly
2528 * by the xfs_droplink call for the ".." entry.
2530 xfs_trans_log_inode(tp
, dp
, XFS_ILOG_CORE
);
2532 xfs_trans_ichgtime(tp
, dp
, XFS_ICHGTIME_MOD
| XFS_ICHGTIME_CHG
);
2534 /* Drop the link from dp to ip. */
2535 error
= xfs_droplink(tp
, ip
);
2537 goto out_trans_cancel
;
2539 /* Determine if this is the last link while the inode is locked */
2540 link_zero
= (ip
->i_d
.di_nlink
== 0);
2542 xfs_bmap_init(&free_list
, &first_block
);
2543 error
= xfs_dir_removename(tp
, dp
, name
, ip
->i_ino
,
2544 &first_block
, &free_list
, resblks
);
2546 ASSERT(error
!= ENOENT
);
2547 goto out_bmap_cancel
;
2551 * If this is a synchronous mount, make sure that the
2552 * remove transaction goes to disk before returning to
2555 if (mp
->m_flags
& (XFS_MOUNT_WSYNC
|XFS_MOUNT_DIRSYNC
))
2556 xfs_trans_set_sync(tp
);
2558 error
= xfs_bmap_finish(&tp
, &free_list
, &committed
);
2560 goto out_bmap_cancel
;
2562 error
= xfs_trans_commit(tp
, XFS_TRANS_RELEASE_LOG_RES
);
2567 * If we are using filestreams, kill the stream association.
2568 * If the file is still open it may get a new one but that
2569 * will get killed on last close in xfs_close() so we don't
2570 * have to worry about that.
2572 if (!is_dir
&& link_zero
&& xfs_inode_is_filestream(ip
))
2573 xfs_filestream_deassociate(ip
);
2578 xfs_bmap_cancel(&free_list
);
2580 xfs_trans_cancel(tp
, cancel_flags
);
2586 * Enter all inodes for a rename transaction into a sorted array.
2589 xfs_sort_for_rename(
2590 xfs_inode_t
*dp1
, /* in: old (source) directory inode */
2591 xfs_inode_t
*dp2
, /* in: new (target) directory inode */
2592 xfs_inode_t
*ip1
, /* in: inode of old entry */
2593 xfs_inode_t
*ip2
, /* in: inode of new entry, if it
2594 already exists, NULL otherwise. */
2595 xfs_inode_t
**i_tab
,/* out: array of inode returned, sorted */
2596 int *num_inodes
) /* out: number of inodes in array */
2602 * i_tab contains a list of pointers to inodes. We initialize
2603 * the table here & we'll sort it. We will then use it to
2604 * order the acquisition of the inode locks.
2606 * Note that the table may contain duplicates. e.g., dp1 == dp2.
2620 * Sort the elements via bubble sort. (Remember, there are at
2621 * most 4 elements to sort, so this is adequate.)
2623 for (i
= 0; i
< *num_inodes
; i
++) {
2624 for (j
= 1; j
< *num_inodes
; j
++) {
2625 if (i_tab
[j
]->i_ino
< i_tab
[j
-1]->i_ino
) {
2627 i_tab
[j
] = i_tab
[j
-1];
2639 xfs_inode_t
*src_dp
,
2640 struct xfs_name
*src_name
,
2641 xfs_inode_t
*src_ip
,
2642 xfs_inode_t
*target_dp
,
2643 struct xfs_name
*target_name
,
2644 xfs_inode_t
*target_ip
)
2646 xfs_trans_t
*tp
= NULL
;
2647 xfs_mount_t
*mp
= src_dp
->i_mount
;
2648 int new_parent
; /* moving to a new dir */
2649 int src_is_directory
; /* src_name is a directory */
2651 xfs_bmap_free_t free_list
;
2652 xfs_fsblock_t first_block
;
2655 xfs_inode_t
*inodes
[4];
2659 trace_xfs_rename(src_dp
, target_dp
, src_name
, target_name
);
2661 new_parent
= (src_dp
!= target_dp
);
2662 src_is_directory
= S_ISDIR(src_ip
->i_d
.di_mode
);
2664 xfs_sort_for_rename(src_dp
, target_dp
, src_ip
, target_ip
,
2665 inodes
, &num_inodes
);
2667 xfs_bmap_init(&free_list
, &first_block
);
2668 tp
= xfs_trans_alloc(mp
, XFS_TRANS_RENAME
);
2669 cancel_flags
= XFS_TRANS_RELEASE_LOG_RES
;
2670 spaceres
= XFS_RENAME_SPACE_RES(mp
, target_name
->len
);
2671 error
= xfs_trans_reserve(tp
, &M_RES(mp
)->tr_rename
, spaceres
, 0);
2672 if (error
== ENOSPC
) {
2674 error
= xfs_trans_reserve(tp
, &M_RES(mp
)->tr_rename
, 0, 0);
2677 xfs_trans_cancel(tp
, 0);
2682 * Attach the dquots to the inodes
2684 error
= xfs_qm_vop_rename_dqattach(inodes
);
2686 xfs_trans_cancel(tp
, cancel_flags
);
2691 * Lock all the participating inodes. Depending upon whether
2692 * the target_name exists in the target directory, and
2693 * whether the target directory is the same as the source
2694 * directory, we can lock from 2 to 4 inodes.
2696 xfs_lock_inodes(inodes
, num_inodes
, XFS_ILOCK_EXCL
);
2699 * Join all the inodes to the transaction. From this point on,
2700 * we can rely on either trans_commit or trans_cancel to unlock
2703 xfs_trans_ijoin(tp
, src_dp
, XFS_ILOCK_EXCL
);
2705 xfs_trans_ijoin(tp
, target_dp
, XFS_ILOCK_EXCL
);
2706 xfs_trans_ijoin(tp
, src_ip
, XFS_ILOCK_EXCL
);
2708 xfs_trans_ijoin(tp
, target_ip
, XFS_ILOCK_EXCL
);
2711 * If we are using project inheritance, we only allow renames
2712 * into our tree when the project IDs are the same; else the
2713 * tree quota mechanism would be circumvented.
2715 if (unlikely((target_dp
->i_d
.di_flags
& XFS_DIFLAG_PROJINHERIT
) &&
2716 (xfs_get_projid(target_dp
) != xfs_get_projid(src_ip
)))) {
2717 error
= XFS_ERROR(EXDEV
);
2722 * Set up the target.
2724 if (target_ip
== NULL
) {
2726 * If there's no space reservation, check the entry will
2727 * fit before actually inserting it.
2729 error
= xfs_dir_canenter(tp
, target_dp
, target_name
, spaceres
);
2733 * If target does not exist and the rename crosses
2734 * directories, adjust the target directory link count
2735 * to account for the ".." reference from the new entry.
2737 error
= xfs_dir_createname(tp
, target_dp
, target_name
,
2738 src_ip
->i_ino
, &first_block
,
2739 &free_list
, spaceres
);
2740 if (error
== ENOSPC
)
2745 xfs_trans_ichgtime(tp
, target_dp
,
2746 XFS_ICHGTIME_MOD
| XFS_ICHGTIME_CHG
);
2748 if (new_parent
&& src_is_directory
) {
2749 error
= xfs_bumplink(tp
, target_dp
);
2753 } else { /* target_ip != NULL */
2755 * If target exists and it's a directory, check that both
2756 * target and source are directories and that target can be
2757 * destroyed, or that neither is a directory.
2759 if (S_ISDIR(target_ip
->i_d
.di_mode
)) {
2761 * Make sure target dir is empty.
2763 if (!(xfs_dir_isempty(target_ip
)) ||
2764 (target_ip
->i_d
.di_nlink
> 2)) {
2765 error
= XFS_ERROR(EEXIST
);
2771 * Link the source inode under the target name.
2772 * If the source inode is a directory and we are moving
2773 * it across directories, its ".." entry will be
2774 * inconsistent until we replace that down below.
2776 * In case there is already an entry with the same
2777 * name at the destination directory, remove it first.
2779 error
= xfs_dir_replace(tp
, target_dp
, target_name
,
2781 &first_block
, &free_list
, spaceres
);
2785 xfs_trans_ichgtime(tp
, target_dp
,
2786 XFS_ICHGTIME_MOD
| XFS_ICHGTIME_CHG
);
2789 * Decrement the link count on the target since the target
2790 * dir no longer points to it.
2792 error
= xfs_droplink(tp
, target_ip
);
2796 if (src_is_directory
) {
2798 * Drop the link from the old "." entry.
2800 error
= xfs_droplink(tp
, target_ip
);
2804 } /* target_ip != NULL */
2807 * Remove the source.
2809 if (new_parent
&& src_is_directory
) {
2811 * Rewrite the ".." entry to point to the new
2814 error
= xfs_dir_replace(tp
, src_ip
, &xfs_name_dotdot
,
2816 &first_block
, &free_list
, spaceres
);
2817 ASSERT(error
!= EEXIST
);
2823 * We always want to hit the ctime on the source inode.
2825 * This isn't strictly required by the standards since the source
2826 * inode isn't really being changed, but old unix file systems did
2827 * it and some incremental backup programs won't work without it.
2829 xfs_trans_ichgtime(tp
, src_ip
, XFS_ICHGTIME_CHG
);
2830 xfs_trans_log_inode(tp
, src_ip
, XFS_ILOG_CORE
);
2833 * Adjust the link count on src_dp. This is necessary when
2834 * renaming a directory, either within one parent when
2835 * the target existed, or across two parent directories.
2837 if (src_is_directory
&& (new_parent
|| target_ip
!= NULL
)) {
2840 * Decrement link count on src_directory since the
2841 * entry that's moved no longer points to it.
2843 error
= xfs_droplink(tp
, src_dp
);
2848 error
= xfs_dir_removename(tp
, src_dp
, src_name
, src_ip
->i_ino
,
2849 &first_block
, &free_list
, spaceres
);
2853 xfs_trans_ichgtime(tp
, src_dp
, XFS_ICHGTIME_MOD
| XFS_ICHGTIME_CHG
);
2854 xfs_trans_log_inode(tp
, src_dp
, XFS_ILOG_CORE
);
2856 xfs_trans_log_inode(tp
, target_dp
, XFS_ILOG_CORE
);
2859 * If this is a synchronous mount, make sure that the
2860 * rename transaction goes to disk before returning to
2863 if (mp
->m_flags
& (XFS_MOUNT_WSYNC
|XFS_MOUNT_DIRSYNC
)) {
2864 xfs_trans_set_sync(tp
);
2867 error
= xfs_bmap_finish(&tp
, &free_list
, &committed
);
2869 xfs_bmap_cancel(&free_list
);
2870 xfs_trans_cancel(tp
, (XFS_TRANS_RELEASE_LOG_RES
|
2876 * trans_commit will unlock src_ip, target_ip & decrement
2877 * the vnode references.
2879 return xfs_trans_commit(tp
, XFS_TRANS_RELEASE_LOG_RES
);
2882 cancel_flags
|= XFS_TRANS_ABORT
;
2884 xfs_bmap_cancel(&free_list
);
2885 xfs_trans_cancel(tp
, cancel_flags
);
2895 xfs_mount_t
*mp
= ip
->i_mount
;
2896 struct xfs_perag
*pag
;
2897 unsigned long first_index
, mask
;
2898 unsigned long inodes_per_cluster
;
2900 xfs_inode_t
**ilist
;
2907 pag
= xfs_perag_get(mp
, XFS_INO_TO_AGNO(mp
, ip
->i_ino
));
2909 inodes_per_cluster
= XFS_INODE_CLUSTER_SIZE(mp
) >> mp
->m_sb
.sb_inodelog
;
2910 ilist_size
= inodes_per_cluster
* sizeof(xfs_inode_t
*);
2911 ilist
= kmem_alloc(ilist_size
, KM_MAYFAIL
|KM_NOFS
);
2915 mask
= ~(((XFS_INODE_CLUSTER_SIZE(mp
) >> mp
->m_sb
.sb_inodelog
)) - 1);
2916 first_index
= XFS_INO_TO_AGINO(mp
, ip
->i_ino
) & mask
;
2918 /* really need a gang lookup range call here */
2919 nr_found
= radix_tree_gang_lookup(&pag
->pag_ici_root
, (void**)ilist
,
2920 first_index
, inodes_per_cluster
);
2924 for (i
= 0; i
< nr_found
; i
++) {
2930 * because this is an RCU protected lookup, we could find a
2931 * recently freed or even reallocated inode during the lookup.
2932 * We need to check under the i_flags_lock for a valid inode
2933 * here. Skip it if it is not valid or the wrong inode.
2935 spin_lock(&ip
->i_flags_lock
);
2937 (XFS_INO_TO_AGINO(mp
, iq
->i_ino
) & mask
) != first_index
) {
2938 spin_unlock(&ip
->i_flags_lock
);
2941 spin_unlock(&ip
->i_flags_lock
);
2944 * Do an un-protected check to see if the inode is dirty and
2945 * is a candidate for flushing. These checks will be repeated
2946 * later after the appropriate locks are acquired.
2948 if (xfs_inode_clean(iq
) && xfs_ipincount(iq
) == 0)
2952 * Try to get locks. If any are unavailable or it is pinned,
2953 * then this inode cannot be flushed and is skipped.
2956 if (!xfs_ilock_nowait(iq
, XFS_ILOCK_SHARED
))
2958 if (!xfs_iflock_nowait(iq
)) {
2959 xfs_iunlock(iq
, XFS_ILOCK_SHARED
);
2962 if (xfs_ipincount(iq
)) {
2964 xfs_iunlock(iq
, XFS_ILOCK_SHARED
);
2969 * arriving here means that this inode can be flushed. First
2970 * re-check that it's dirty before flushing.
2972 if (!xfs_inode_clean(iq
)) {
2974 error
= xfs_iflush_int(iq
, bp
);
2976 xfs_iunlock(iq
, XFS_ILOCK_SHARED
);
2977 goto cluster_corrupt_out
;
2983 xfs_iunlock(iq
, XFS_ILOCK_SHARED
);
2987 XFS_STATS_INC(xs_icluster_flushcnt
);
2988 XFS_STATS_ADD(xs_icluster_flushinode
, clcount
);
2999 cluster_corrupt_out
:
3001 * Corruption detected in the clustering loop. Invalidate the
3002 * inode buffer and shut down the filesystem.
3006 * Clean up the buffer. If it was delwri, just release it --
3007 * brelse can handle it with no problems. If not, shut down the
3008 * filesystem before releasing the buffer.
3010 bufwasdelwri
= (bp
->b_flags
& _XBF_DELWRI_Q
);
3014 xfs_force_shutdown(mp
, SHUTDOWN_CORRUPT_INCORE
);
3016 if (!bufwasdelwri
) {
3018 * Just like incore_relse: if we have b_iodone functions,
3019 * mark the buffer as an error and call them. Otherwise
3020 * mark it as stale and brelse.
3025 xfs_buf_ioerror(bp
, EIO
);
3026 xfs_buf_ioend(bp
, 0);
3034 * Unlocks the flush lock
3036 xfs_iflush_abort(iq
, false);
3039 return XFS_ERROR(EFSCORRUPTED
);
3043 * Flush dirty inode metadata into the backing buffer.
3045 * The caller must have the inode lock and the inode flush lock held. The
3046 * inode lock will still be held upon return to the caller, and the inode
3047 * flush lock will be released after the inode has reached the disk.
3049 * The caller must write out the buffer returned in *bpp and release it.
3053 struct xfs_inode
*ip
,
3054 struct xfs_buf
**bpp
)
3056 struct xfs_mount
*mp
= ip
->i_mount
;
3058 struct xfs_dinode
*dip
;
3061 XFS_STATS_INC(xs_iflush_count
);
3063 ASSERT(xfs_isilocked(ip
, XFS_ILOCK_EXCL
|XFS_ILOCK_SHARED
));
3064 ASSERT(xfs_isiflocked(ip
));
3065 ASSERT(ip
->i_d
.di_format
!= XFS_DINODE_FMT_BTREE
||
3066 ip
->i_d
.di_nextents
> XFS_IFORK_MAXEXT(ip
, XFS_DATA_FORK
));
3070 xfs_iunpin_wait(ip
);
3073 * For stale inodes we cannot rely on the backing buffer remaining
3074 * stale in cache for the remaining life of the stale inode and so
3075 * xfs_imap_to_bp() below may give us a buffer that no longer contains
3076 * inodes below. We have to check this after ensuring the inode is
3077 * unpinned so that it is safe to reclaim the stale inode after the
3080 if (xfs_iflags_test(ip
, XFS_ISTALE
)) {
3086 * This may have been unpinned because the filesystem is shutting
3087 * down forcibly. If that's the case we must not write this inode
3088 * to disk, because the log record didn't make it to disk.
3090 * We also have to remove the log item from the AIL in this case,
3091 * as we wait for an empty AIL as part of the unmount process.
3093 if (XFS_FORCED_SHUTDOWN(mp
)) {
3094 error
= XFS_ERROR(EIO
);
3099 * Get the buffer containing the on-disk inode.
3101 error
= xfs_imap_to_bp(mp
, NULL
, &ip
->i_imap
, &dip
, &bp
, XBF_TRYLOCK
,
3109 * First flush out the inode that xfs_iflush was called with.
3111 error
= xfs_iflush_int(ip
, bp
);
3116 * If the buffer is pinned then push on the log now so we won't
3117 * get stuck waiting in the write for too long.
3119 if (xfs_buf_ispinned(bp
))
3120 xfs_log_force(mp
, 0);
3124 * see if other inodes can be gathered into this write
3126 error
= xfs_iflush_cluster(ip
, bp
);
3128 goto cluster_corrupt_out
;
3135 xfs_force_shutdown(mp
, SHUTDOWN_CORRUPT_INCORE
);
3136 cluster_corrupt_out
:
3137 error
= XFS_ERROR(EFSCORRUPTED
);
3140 * Unlocks the flush lock
3142 xfs_iflush_abort(ip
, false);
3148 struct xfs_inode
*ip
,
3151 struct xfs_inode_log_item
*iip
= ip
->i_itemp
;
3152 struct xfs_dinode
*dip
;
3153 struct xfs_mount
*mp
= ip
->i_mount
;
3155 ASSERT(xfs_isilocked(ip
, XFS_ILOCK_EXCL
|XFS_ILOCK_SHARED
));
3156 ASSERT(xfs_isiflocked(ip
));
3157 ASSERT(ip
->i_d
.di_format
!= XFS_DINODE_FMT_BTREE
||
3158 ip
->i_d
.di_nextents
> XFS_IFORK_MAXEXT(ip
, XFS_DATA_FORK
));
3159 ASSERT(iip
!= NULL
&& iip
->ili_fields
!= 0);
3161 /* set *dip = inode's place in the buffer */
3162 dip
= (xfs_dinode_t
*)xfs_buf_offset(bp
, ip
->i_imap
.im_boffset
);
3164 if (XFS_TEST_ERROR(dip
->di_magic
!= cpu_to_be16(XFS_DINODE_MAGIC
),
3165 mp
, XFS_ERRTAG_IFLUSH_1
, XFS_RANDOM_IFLUSH_1
)) {
3166 xfs_alert_tag(mp
, XFS_PTAG_IFLUSH
,
3167 "%s: Bad inode %Lu magic number 0x%x, ptr 0x%p",
3168 __func__
, ip
->i_ino
, be16_to_cpu(dip
->di_magic
), dip
);
3171 if (XFS_TEST_ERROR(ip
->i_d
.di_magic
!= XFS_DINODE_MAGIC
,
3172 mp
, XFS_ERRTAG_IFLUSH_2
, XFS_RANDOM_IFLUSH_2
)) {
3173 xfs_alert_tag(mp
, XFS_PTAG_IFLUSH
,
3174 "%s: Bad inode %Lu, ptr 0x%p, magic number 0x%x",
3175 __func__
, ip
->i_ino
, ip
, ip
->i_d
.di_magic
);
3178 if (S_ISREG(ip
->i_d
.di_mode
)) {
3180 (ip
->i_d
.di_format
!= XFS_DINODE_FMT_EXTENTS
) &&
3181 (ip
->i_d
.di_format
!= XFS_DINODE_FMT_BTREE
),
3182 mp
, XFS_ERRTAG_IFLUSH_3
, XFS_RANDOM_IFLUSH_3
)) {
3183 xfs_alert_tag(mp
, XFS_PTAG_IFLUSH
,
3184 "%s: Bad regular inode %Lu, ptr 0x%p",
3185 __func__
, ip
->i_ino
, ip
);
3188 } else if (S_ISDIR(ip
->i_d
.di_mode
)) {
3190 (ip
->i_d
.di_format
!= XFS_DINODE_FMT_EXTENTS
) &&
3191 (ip
->i_d
.di_format
!= XFS_DINODE_FMT_BTREE
) &&
3192 (ip
->i_d
.di_format
!= XFS_DINODE_FMT_LOCAL
),
3193 mp
, XFS_ERRTAG_IFLUSH_4
, XFS_RANDOM_IFLUSH_4
)) {
3194 xfs_alert_tag(mp
, XFS_PTAG_IFLUSH
,
3195 "%s: Bad directory inode %Lu, ptr 0x%p",
3196 __func__
, ip
->i_ino
, ip
);
3200 if (XFS_TEST_ERROR(ip
->i_d
.di_nextents
+ ip
->i_d
.di_anextents
>
3201 ip
->i_d
.di_nblocks
, mp
, XFS_ERRTAG_IFLUSH_5
,
3202 XFS_RANDOM_IFLUSH_5
)) {
3203 xfs_alert_tag(mp
, XFS_PTAG_IFLUSH
,
3204 "%s: detected corrupt incore inode %Lu, "
3205 "total extents = %d, nblocks = %Ld, ptr 0x%p",
3206 __func__
, ip
->i_ino
,
3207 ip
->i_d
.di_nextents
+ ip
->i_d
.di_anextents
,
3208 ip
->i_d
.di_nblocks
, ip
);
3211 if (XFS_TEST_ERROR(ip
->i_d
.di_forkoff
> mp
->m_sb
.sb_inodesize
,
3212 mp
, XFS_ERRTAG_IFLUSH_6
, XFS_RANDOM_IFLUSH_6
)) {
3213 xfs_alert_tag(mp
, XFS_PTAG_IFLUSH
,
3214 "%s: bad inode %Lu, forkoff 0x%x, ptr 0x%p",
3215 __func__
, ip
->i_ino
, ip
->i_d
.di_forkoff
, ip
);
3220 * Inode item log recovery for v1/v2 inodes are dependent on the
3221 * di_flushiter count for correct sequencing. We bump the flush
3222 * iteration count so we can detect flushes which postdate a log record
3223 * during recovery. This is redundant as we now log every change and
3224 * hence this can't happen but we need to still do it to ensure
3225 * backwards compatibility with old kernels that predate logging all
3228 if (ip
->i_d
.di_version
< 3)
3229 ip
->i_d
.di_flushiter
++;
3232 * Copy the dirty parts of the inode into the on-disk
3233 * inode. We always copy out the core of the inode,
3234 * because if the inode is dirty at all the core must
3237 xfs_dinode_to_disk(dip
, &ip
->i_d
);
3239 /* Wrap, we never let the log put out DI_MAX_FLUSH */
3240 if (ip
->i_d
.di_flushiter
== DI_MAX_FLUSH
)
3241 ip
->i_d
.di_flushiter
= 0;
3244 * If this is really an old format inode and the superblock version
3245 * has not been updated to support only new format inodes, then
3246 * convert back to the old inode format. If the superblock version
3247 * has been updated, then make the conversion permanent.
3249 ASSERT(ip
->i_d
.di_version
== 1 || xfs_sb_version_hasnlink(&mp
->m_sb
));
3250 if (ip
->i_d
.di_version
== 1) {
3251 if (!xfs_sb_version_hasnlink(&mp
->m_sb
)) {
3255 ASSERT(ip
->i_d
.di_nlink
<= XFS_MAXLINK_1
);
3256 dip
->di_onlink
= cpu_to_be16(ip
->i_d
.di_nlink
);
3259 * The superblock version has already been bumped,
3260 * so just make the conversion to the new inode
3263 ip
->i_d
.di_version
= 2;
3264 dip
->di_version
= 2;
3265 ip
->i_d
.di_onlink
= 0;
3267 memset(&(ip
->i_d
.di_pad
[0]), 0, sizeof(ip
->i_d
.di_pad
));
3268 memset(&(dip
->di_pad
[0]), 0,
3269 sizeof(dip
->di_pad
));
3270 ASSERT(xfs_get_projid(ip
) == 0);
3274 xfs_iflush_fork(ip
, dip
, iip
, XFS_DATA_FORK
, bp
);
3275 if (XFS_IFORK_Q(ip
))
3276 xfs_iflush_fork(ip
, dip
, iip
, XFS_ATTR_FORK
, bp
);
3277 xfs_inobp_check(mp
, bp
);
3280 * We've recorded everything logged in the inode, so we'd like to clear
3281 * the ili_fields bits so we don't log and flush things unnecessarily.
3282 * However, we can't stop logging all this information until the data
3283 * we've copied into the disk buffer is written to disk. If we did we
3284 * might overwrite the copy of the inode in the log with all the data
3285 * after re-logging only part of it, and in the face of a crash we
3286 * wouldn't have all the data we need to recover.
3288 * What we do is move the bits to the ili_last_fields field. When
3289 * logging the inode, these bits are moved back to the ili_fields field.
3290 * In the xfs_iflush_done() routine we clear ili_last_fields, since we
3291 * know that the information those bits represent is permanently on
3292 * disk. As long as the flush completes before the inode is logged
3293 * again, then both ili_fields and ili_last_fields will be cleared.
3295 * We can play with the ili_fields bits here, because the inode lock
3296 * must be held exclusively in order to set bits there and the flush
3297 * lock protects the ili_last_fields bits. Set ili_logged so the flush
3298 * done routine can tell whether or not to look in the AIL. Also, store
3299 * the current LSN of the inode so that we can tell whether the item has
3300 * moved in the AIL from xfs_iflush_done(). In order to read the lsn we
3301 * need the AIL lock, because it is a 64 bit value that cannot be read
3304 iip
->ili_last_fields
= iip
->ili_fields
;
3305 iip
->ili_fields
= 0;
3306 iip
->ili_logged
= 1;
3308 xfs_trans_ail_copy_lsn(mp
->m_ail
, &iip
->ili_flush_lsn
,
3309 &iip
->ili_item
.li_lsn
);
3312 * Attach the function xfs_iflush_done to the inode's
3313 * buffer. This will remove the inode from the AIL
3314 * and unlock the inode's flush lock when the inode is
3315 * completely written to disk.
3317 xfs_buf_attach_iodone(bp
, xfs_iflush_done
, &iip
->ili_item
);
3319 /* update the lsn in the on disk inode if required */
3320 if (ip
->i_d
.di_version
== 3)
3321 dip
->di_lsn
= cpu_to_be64(iip
->ili_item
.li_lsn
);
3323 /* generate the checksum. */
3324 xfs_dinode_calc_crc(mp
, dip
);
3326 ASSERT(bp
->b_fspriv
!= NULL
);
3327 ASSERT(bp
->b_iodone
!= NULL
);
3331 return XFS_ERROR(EFSCORRUPTED
);