2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 #include "xfs_shared.h"
21 #include "xfs_format.h"
22 #include "xfs_log_format.h"
23 #include "xfs_trans_resv.h"
24 #include "xfs_mount.h"
25 #include "xfs_defer.h"
26 #include "xfs_inode.h"
27 #include "xfs_error.h"
28 #include "xfs_cksum.h"
29 #include "xfs_icache.h"
30 #include "xfs_trans.h"
31 #include "xfs_ialloc.h"
35 * Check that none of the inode's in the buffer have a next
36 * unlinked field of 0.
48 j
= mp
->m_inode_cluster_size
>> mp
->m_sb
.sb_inodelog
;
50 for (i
= 0; i
< j
; i
++) {
51 dip
= xfs_buf_offset(bp
, i
* mp
->m_sb
.sb_inodesize
);
52 if (!dip
->di_next_unlinked
) {
54 "Detected bogus zero next_unlinked field in inode %d buffer 0x%llx.",
55 i
, (long long)bp
->b_bn
);
62 xfs_dinode_good_version(
66 if (xfs_sb_version_hascrc(&mp
->m_sb
))
69 return version
== 1 || version
== 2;
73 * If we are doing readahead on an inode buffer, we might be in log recovery
74 * reading an inode allocation buffer that hasn't yet been replayed, and hence
75 * has not had the inode cores stamped into it. Hence for readahead, the buffer
76 * may be potentially invalid.
78 * If the readahead buffer is invalid, we need to mark it with an error and
79 * clear the DONE status of the buffer so that a followup read will re-read it
80 * from disk. We don't report the error otherwise to avoid warnings during log
81 * recovery and we don't get unnecssary panics on debug kernels. We use EIO here
82 * because all we want to do is say readahead failed; there is no-one to report
83 * the error to, so this will distinguish it from a non-ra verifier failure.
84 * Changes to this readahead error behavour also need to be reflected in
85 * xfs_dquot_buf_readahead_verify().
92 struct xfs_mount
*mp
= bp
->b_target
->bt_mount
;
97 * Validate the magic number and version of every inode in the buffer
99 ni
= XFS_BB_TO_FSB(mp
, bp
->b_length
) * mp
->m_sb
.sb_inopblock
;
100 for (i
= 0; i
< ni
; i
++) {
104 dip
= xfs_buf_offset(bp
, (i
<< mp
->m_sb
.sb_inodelog
));
105 di_ok
= dip
->di_magic
== cpu_to_be16(XFS_DINODE_MAGIC
) &&
106 xfs_dinode_good_version(mp
, dip
->di_version
);
107 if (unlikely(XFS_TEST_ERROR(!di_ok
, mp
,
108 XFS_ERRTAG_ITOBP_INOTOBP
,
109 XFS_RANDOM_ITOBP_INOTOBP
))) {
111 bp
->b_flags
&= ~XBF_DONE
;
112 xfs_buf_ioerror(bp
, -EIO
);
116 xfs_buf_ioerror(bp
, -EFSCORRUPTED
);
117 xfs_verifier_error(bp
);
120 "bad inode magic/vsn daddr %lld #%d (magic=%x)",
121 (unsigned long long)bp
->b_bn
, i
,
122 be16_to_cpu(dip
->di_magic
));
126 xfs_inobp_check(mp
, bp
);
131 xfs_inode_buf_read_verify(
134 xfs_inode_buf_verify(bp
, false);
138 xfs_inode_buf_readahead_verify(
141 xfs_inode_buf_verify(bp
, true);
145 xfs_inode_buf_write_verify(
148 xfs_inode_buf_verify(bp
, false);
151 const struct xfs_buf_ops xfs_inode_buf_ops
= {
153 .verify_read
= xfs_inode_buf_read_verify
,
154 .verify_write
= xfs_inode_buf_write_verify
,
157 const struct xfs_buf_ops xfs_inode_buf_ra_ops
= {
158 .name
= "xxfs_inode_ra",
159 .verify_read
= xfs_inode_buf_readahead_verify
,
160 .verify_write
= xfs_inode_buf_write_verify
,
165 * This routine is called to map an inode to the buffer containing the on-disk
166 * version of the inode. It returns a pointer to the buffer containing the
167 * on-disk inode in the bpp parameter, and in the dipp parameter it returns a
168 * pointer to the on-disk inode within that buffer.
170 * If a non-zero error is returned, then the contents of bpp and dipp are
175 struct xfs_mount
*mp
,
176 struct xfs_trans
*tp
,
177 struct xfs_imap
*imap
,
178 struct xfs_dinode
**dipp
,
179 struct xfs_buf
**bpp
,
186 buf_flags
|= XBF_UNMAPPED
;
187 error
= xfs_trans_read_buf(mp
, tp
, mp
->m_ddev_targp
, imap
->im_blkno
,
188 (int)imap
->im_len
, buf_flags
, &bp
,
191 if (error
== -EAGAIN
) {
192 ASSERT(buf_flags
& XBF_TRYLOCK
);
196 if (error
== -EFSCORRUPTED
&&
197 (iget_flags
& XFS_IGET_UNTRUSTED
))
200 xfs_warn(mp
, "%s: xfs_trans_read_buf() returned error %d.",
206 *dipp
= xfs_buf_offset(bp
, imap
->im_boffset
);
212 struct xfs_inode
*ip
,
213 struct xfs_dinode
*from
)
215 struct xfs_icdinode
*to
= &ip
->i_d
;
216 struct inode
*inode
= VFS_I(ip
);
220 * Convert v1 inodes immediately to v2 inode format as this is the
221 * minimum inode version format we support in the rest of the code.
223 to
->di_version
= from
->di_version
;
224 if (to
->di_version
== 1) {
225 set_nlink(inode
, be16_to_cpu(from
->di_onlink
));
226 to
->di_projid_lo
= 0;
227 to
->di_projid_hi
= 0;
230 set_nlink(inode
, be32_to_cpu(from
->di_nlink
));
231 to
->di_projid_lo
= be16_to_cpu(from
->di_projid_lo
);
232 to
->di_projid_hi
= be16_to_cpu(from
->di_projid_hi
);
235 to
->di_format
= from
->di_format
;
236 to
->di_uid
= be32_to_cpu(from
->di_uid
);
237 to
->di_gid
= be32_to_cpu(from
->di_gid
);
238 to
->di_flushiter
= be16_to_cpu(from
->di_flushiter
);
241 * Time is signed, so need to convert to signed 32 bit before
242 * storing in inode timestamp which may be 64 bit. Otherwise
243 * a time before epoch is converted to a time long after epoch
246 inode
->i_atime
.tv_sec
= (int)be32_to_cpu(from
->di_atime
.t_sec
);
247 inode
->i_atime
.tv_nsec
= (int)be32_to_cpu(from
->di_atime
.t_nsec
);
248 inode
->i_mtime
.tv_sec
= (int)be32_to_cpu(from
->di_mtime
.t_sec
);
249 inode
->i_mtime
.tv_nsec
= (int)be32_to_cpu(from
->di_mtime
.t_nsec
);
250 inode
->i_ctime
.tv_sec
= (int)be32_to_cpu(from
->di_ctime
.t_sec
);
251 inode
->i_ctime
.tv_nsec
= (int)be32_to_cpu(from
->di_ctime
.t_nsec
);
252 inode
->i_generation
= be32_to_cpu(from
->di_gen
);
253 inode
->i_mode
= be16_to_cpu(from
->di_mode
);
255 to
->di_size
= be64_to_cpu(from
->di_size
);
256 to
->di_nblocks
= be64_to_cpu(from
->di_nblocks
);
257 to
->di_extsize
= be32_to_cpu(from
->di_extsize
);
258 to
->di_nextents
= be32_to_cpu(from
->di_nextents
);
259 to
->di_anextents
= be16_to_cpu(from
->di_anextents
);
260 to
->di_forkoff
= from
->di_forkoff
;
261 to
->di_aformat
= from
->di_aformat
;
262 to
->di_dmevmask
= be32_to_cpu(from
->di_dmevmask
);
263 to
->di_dmstate
= be16_to_cpu(from
->di_dmstate
);
264 to
->di_flags
= be16_to_cpu(from
->di_flags
);
266 if (to
->di_version
== 3) {
267 inode
->i_version
= be64_to_cpu(from
->di_changecount
);
268 to
->di_crtime
.t_sec
= be32_to_cpu(from
->di_crtime
.t_sec
);
269 to
->di_crtime
.t_nsec
= be32_to_cpu(from
->di_crtime
.t_nsec
);
270 to
->di_flags2
= be64_to_cpu(from
->di_flags2
);
271 to
->di_cowextsize
= be32_to_cpu(from
->di_cowextsize
);
277 struct xfs_inode
*ip
,
278 struct xfs_dinode
*to
,
281 struct xfs_icdinode
*from
= &ip
->i_d
;
282 struct inode
*inode
= VFS_I(ip
);
284 to
->di_magic
= cpu_to_be16(XFS_DINODE_MAGIC
);
287 to
->di_version
= from
->di_version
;
288 to
->di_format
= from
->di_format
;
289 to
->di_uid
= cpu_to_be32(from
->di_uid
);
290 to
->di_gid
= cpu_to_be32(from
->di_gid
);
291 to
->di_projid_lo
= cpu_to_be16(from
->di_projid_lo
);
292 to
->di_projid_hi
= cpu_to_be16(from
->di_projid_hi
);
294 memset(to
->di_pad
, 0, sizeof(to
->di_pad
));
295 to
->di_atime
.t_sec
= cpu_to_be32(inode
->i_atime
.tv_sec
);
296 to
->di_atime
.t_nsec
= cpu_to_be32(inode
->i_atime
.tv_nsec
);
297 to
->di_mtime
.t_sec
= cpu_to_be32(inode
->i_mtime
.tv_sec
);
298 to
->di_mtime
.t_nsec
= cpu_to_be32(inode
->i_mtime
.tv_nsec
);
299 to
->di_ctime
.t_sec
= cpu_to_be32(inode
->i_ctime
.tv_sec
);
300 to
->di_ctime
.t_nsec
= cpu_to_be32(inode
->i_ctime
.tv_nsec
);
301 to
->di_nlink
= cpu_to_be32(inode
->i_nlink
);
302 to
->di_gen
= cpu_to_be32(inode
->i_generation
);
303 to
->di_mode
= cpu_to_be16(inode
->i_mode
);
305 to
->di_size
= cpu_to_be64(from
->di_size
);
306 to
->di_nblocks
= cpu_to_be64(from
->di_nblocks
);
307 to
->di_extsize
= cpu_to_be32(from
->di_extsize
);
308 to
->di_nextents
= cpu_to_be32(from
->di_nextents
);
309 to
->di_anextents
= cpu_to_be16(from
->di_anextents
);
310 to
->di_forkoff
= from
->di_forkoff
;
311 to
->di_aformat
= from
->di_aformat
;
312 to
->di_dmevmask
= cpu_to_be32(from
->di_dmevmask
);
313 to
->di_dmstate
= cpu_to_be16(from
->di_dmstate
);
314 to
->di_flags
= cpu_to_be16(from
->di_flags
);
316 if (from
->di_version
== 3) {
317 to
->di_changecount
= cpu_to_be64(inode
->i_version
);
318 to
->di_crtime
.t_sec
= cpu_to_be32(from
->di_crtime
.t_sec
);
319 to
->di_crtime
.t_nsec
= cpu_to_be32(from
->di_crtime
.t_nsec
);
320 to
->di_flags2
= cpu_to_be64(from
->di_flags2
);
321 to
->di_cowextsize
= cpu_to_be32(from
->di_cowextsize
);
322 to
->di_ino
= cpu_to_be64(ip
->i_ino
);
323 to
->di_lsn
= cpu_to_be64(lsn
);
324 memset(to
->di_pad2
, 0, sizeof(to
->di_pad2
));
325 uuid_copy(&to
->di_uuid
, &ip
->i_mount
->m_sb
.sb_meta_uuid
);
326 to
->di_flushiter
= 0;
328 to
->di_flushiter
= cpu_to_be16(from
->di_flushiter
);
333 xfs_log_dinode_to_disk(
334 struct xfs_log_dinode
*from
,
335 struct xfs_dinode
*to
)
337 to
->di_magic
= cpu_to_be16(from
->di_magic
);
338 to
->di_mode
= cpu_to_be16(from
->di_mode
);
339 to
->di_version
= from
->di_version
;
340 to
->di_format
= from
->di_format
;
342 to
->di_uid
= cpu_to_be32(from
->di_uid
);
343 to
->di_gid
= cpu_to_be32(from
->di_gid
);
344 to
->di_nlink
= cpu_to_be32(from
->di_nlink
);
345 to
->di_projid_lo
= cpu_to_be16(from
->di_projid_lo
);
346 to
->di_projid_hi
= cpu_to_be16(from
->di_projid_hi
);
347 memcpy(to
->di_pad
, from
->di_pad
, sizeof(to
->di_pad
));
349 to
->di_atime
.t_sec
= cpu_to_be32(from
->di_atime
.t_sec
);
350 to
->di_atime
.t_nsec
= cpu_to_be32(from
->di_atime
.t_nsec
);
351 to
->di_mtime
.t_sec
= cpu_to_be32(from
->di_mtime
.t_sec
);
352 to
->di_mtime
.t_nsec
= cpu_to_be32(from
->di_mtime
.t_nsec
);
353 to
->di_ctime
.t_sec
= cpu_to_be32(from
->di_ctime
.t_sec
);
354 to
->di_ctime
.t_nsec
= cpu_to_be32(from
->di_ctime
.t_nsec
);
356 to
->di_size
= cpu_to_be64(from
->di_size
);
357 to
->di_nblocks
= cpu_to_be64(from
->di_nblocks
);
358 to
->di_extsize
= cpu_to_be32(from
->di_extsize
);
359 to
->di_nextents
= cpu_to_be32(from
->di_nextents
);
360 to
->di_anextents
= cpu_to_be16(from
->di_anextents
);
361 to
->di_forkoff
= from
->di_forkoff
;
362 to
->di_aformat
= from
->di_aformat
;
363 to
->di_dmevmask
= cpu_to_be32(from
->di_dmevmask
);
364 to
->di_dmstate
= cpu_to_be16(from
->di_dmstate
);
365 to
->di_flags
= cpu_to_be16(from
->di_flags
);
366 to
->di_gen
= cpu_to_be32(from
->di_gen
);
368 if (from
->di_version
== 3) {
369 to
->di_changecount
= cpu_to_be64(from
->di_changecount
);
370 to
->di_crtime
.t_sec
= cpu_to_be32(from
->di_crtime
.t_sec
);
371 to
->di_crtime
.t_nsec
= cpu_to_be32(from
->di_crtime
.t_nsec
);
372 to
->di_flags2
= cpu_to_be64(from
->di_flags2
);
373 to
->di_cowextsize
= cpu_to_be32(from
->di_cowextsize
);
374 to
->di_ino
= cpu_to_be64(from
->di_ino
);
375 to
->di_lsn
= cpu_to_be64(from
->di_lsn
);
376 memcpy(to
->di_pad2
, from
->di_pad2
, sizeof(to
->di_pad2
));
377 uuid_copy(&to
->di_uuid
, &from
->di_uuid
);
378 to
->di_flushiter
= 0;
380 to
->di_flushiter
= cpu_to_be16(from
->di_flushiter
);
386 struct xfs_mount
*mp
,
387 struct xfs_inode
*ip
,
388 struct xfs_dinode
*dip
)
394 if (dip
->di_magic
!= cpu_to_be16(XFS_DINODE_MAGIC
))
397 /* don't allow invalid i_size */
398 if (be64_to_cpu(dip
->di_size
) & (1ULL << 63))
401 mode
= be16_to_cpu(dip
->di_mode
);
402 if (mode
&& xfs_mode_to_ftype(mode
) == XFS_DIR3_FT_UNKNOWN
)
405 /* No zero-length symlinks/dirs. */
406 if ((S_ISLNK(mode
) || S_ISDIR(mode
)) && dip
->di_size
== 0)
409 /* only version 3 or greater inodes are extensively verified here */
410 if (dip
->di_version
< 3)
413 if (!xfs_sb_version_hascrc(&mp
->m_sb
))
415 if (!xfs_verify_cksum((char *)dip
, mp
->m_sb
.sb_inodesize
,
418 if (be64_to_cpu(dip
->di_ino
) != ip
->i_ino
)
420 if (!uuid_equal(&dip
->di_uuid
, &mp
->m_sb
.sb_meta_uuid
))
423 flags
= be16_to_cpu(dip
->di_flags
);
424 flags2
= be64_to_cpu(dip
->di_flags2
);
426 /* don't allow reflink/cowextsize if we don't have reflink */
427 if ((flags2
& (XFS_DIFLAG2_REFLINK
| XFS_DIFLAG2_COWEXTSIZE
)) &&
428 !xfs_sb_version_hasreflink(&mp
->m_sb
))
431 /* don't let reflink and realtime mix */
432 if ((flags2
& XFS_DIFLAG2_REFLINK
) && (flags
& XFS_DIFLAG_REALTIME
))
435 /* don't let reflink and dax mix */
436 if ((flags2
& XFS_DIFLAG2_REFLINK
) && (flags2
& XFS_DIFLAG2_DAX
))
444 struct xfs_mount
*mp
,
445 struct xfs_dinode
*dip
)
449 if (dip
->di_version
< 3)
452 ASSERT(xfs_sb_version_hascrc(&mp
->m_sb
));
453 crc
= xfs_start_cksum((char *)dip
, mp
->m_sb
.sb_inodesize
,
455 dip
->di_crc
= xfs_end_cksum(crc
);
459 * Read the disk inode attributes into the in-core inode structure.
461 * For version 5 superblocks, if we are initialising a new inode and we are not
462 * utilising the XFS_MOUNT_IKEEP inode cluster mode, we can simple build the new
463 * inode core with a random generation number. If we are keeping inodes around,
464 * we need to read the inode cluster to get the existing generation number off
465 * disk. Further, if we are using version 4 superblocks (i.e. v1/v2 inode
466 * format) then log recovery is dependent on the di_flushiter field being
467 * initialised from the current on-disk value and hence we must also read the
482 * Fill in the location information in the in-core inode.
484 error
= xfs_imap(mp
, tp
, ip
->i_ino
, &ip
->i_imap
, iget_flags
);
488 /* shortcut IO on inode allocation if possible */
489 if ((iget_flags
& XFS_IGET_CREATE
) &&
490 xfs_sb_version_hascrc(&mp
->m_sb
) &&
491 !(mp
->m_flags
& XFS_MOUNT_IKEEP
)) {
492 /* initialise the on-disk inode core */
493 memset(&ip
->i_d
, 0, sizeof(ip
->i_d
));
494 VFS_I(ip
)->i_generation
= prandom_u32();
495 if (xfs_sb_version_hascrc(&mp
->m_sb
))
496 ip
->i_d
.di_version
= 3;
498 ip
->i_d
.di_version
= 2;
503 * Get pointers to the on-disk inode and the buffer containing it.
505 error
= xfs_imap_to_bp(mp
, tp
, &ip
->i_imap
, &dip
, &bp
, 0, iget_flags
);
509 /* even unallocated inodes are verified */
510 if (!xfs_dinode_verify(mp
, ip
, dip
)) {
511 xfs_alert(mp
, "%s: validation failed for inode %lld failed",
512 __func__
, ip
->i_ino
);
514 XFS_CORRUPTION_ERROR(__func__
, XFS_ERRLEVEL_LOW
, mp
, dip
);
515 error
= -EFSCORRUPTED
;
520 * If the on-disk inode is already linked to a directory
521 * entry, copy all of the inode into the in-core inode.
522 * xfs_iformat_fork() handles copying in the inode format
523 * specific information.
524 * Otherwise, just get the truly permanent information.
527 xfs_inode_from_disk(ip
, dip
);
528 error
= xfs_iformat_fork(ip
, dip
);
531 xfs_alert(mp
, "%s: xfs_iformat() returned error %d",
538 * Partial initialisation of the in-core inode. Just the bits
539 * that xfs_ialloc won't overwrite or relies on being correct.
541 ip
->i_d
.di_version
= dip
->di_version
;
542 VFS_I(ip
)->i_generation
= be32_to_cpu(dip
->di_gen
);
543 ip
->i_d
.di_flushiter
= be16_to_cpu(dip
->di_flushiter
);
546 * Make sure to pull in the mode here as well in
547 * case the inode is released without being used.
548 * This ensures that xfs_inactive() will see that
549 * the inode is already free and not try to mess
550 * with the uninitialized part of it.
552 VFS_I(ip
)->i_mode
= 0;
555 ASSERT(ip
->i_d
.di_version
>= 2);
556 ip
->i_delayed_blks
= 0;
559 * Mark the buffer containing the inode as something to keep
560 * around for a while. This helps to keep recently accessed
561 * meta-data in-core longer.
563 xfs_buf_set_ref(bp
, XFS_INO_REF
);
566 * Use xfs_trans_brelse() to release the buffer containing the on-disk
567 * inode, because it was acquired with xfs_trans_read_buf() in
568 * xfs_imap_to_bp() above. If tp is NULL, this is just a normal
569 * brelse(). If we're within a transaction, then xfs_trans_brelse()
570 * will only release the buffer if it is not dirty within the
571 * transaction. It will be OK to release the buffer in this case,
572 * because inodes on disk are never destroyed and we will be locking the
573 * new in-core inode before putting it in the cache where other
574 * processes can find it. Thus we don't have to worry about the inode
575 * being changed just because we released the buffer.
578 xfs_trans_brelse(tp
, bp
);