2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 #include "xfs_shared.h"
21 #include "xfs_format.h"
22 #include "xfs_log_format.h"
23 #include "xfs_trans_resv.h"
24 #include "xfs_mount.h"
25 #include "xfs_defer.h"
26 #include "xfs_inode.h"
27 #include "xfs_error.h"
28 #include "xfs_cksum.h"
29 #include "xfs_icache.h"
30 #include "xfs_trans.h"
31 #include "xfs_ialloc.h"
35 * Check that none of the inode's in the buffer have a next
36 * unlinked field of 0.
48 j
= mp
->m_inode_cluster_size
>> mp
->m_sb
.sb_inodelog
;
50 for (i
= 0; i
< j
; i
++) {
51 dip
= xfs_buf_offset(bp
, i
* mp
->m_sb
.sb_inodesize
);
52 if (!dip
->di_next_unlinked
) {
54 "Detected bogus zero next_unlinked field in inode %d buffer 0x%llx.",
55 i
, (long long)bp
->b_bn
);
62 xfs_dinode_good_version(
66 if (xfs_sb_version_hascrc(&mp
->m_sb
))
69 return version
== 1 || version
== 2;
73 * If we are doing readahead on an inode buffer, we might be in log recovery
74 * reading an inode allocation buffer that hasn't yet been replayed, and hence
75 * has not had the inode cores stamped into it. Hence for readahead, the buffer
76 * may be potentially invalid.
78 * If the readahead buffer is invalid, we need to mark it with an error and
79 * clear the DONE status of the buffer so that a followup read will re-read it
80 * from disk. We don't report the error otherwise to avoid warnings during log
81 * recovery and we don't get unnecssary panics on debug kernels. We use EIO here
82 * because all we want to do is say readahead failed; there is no-one to report
83 * the error to, so this will distinguish it from a non-ra verifier failure.
84 * Changes to this readahead error behavour also need to be reflected in
85 * xfs_dquot_buf_readahead_verify().
92 struct xfs_mount
*mp
= bp
->b_target
->bt_mount
;
97 * Validate the magic number and version of every inode in the buffer
99 ni
= XFS_BB_TO_FSB(mp
, bp
->b_length
) * mp
->m_sb
.sb_inopblock
;
100 for (i
= 0; i
< ni
; i
++) {
104 dip
= xfs_buf_offset(bp
, (i
<< mp
->m_sb
.sb_inodelog
));
105 di_ok
= dip
->di_magic
== cpu_to_be16(XFS_DINODE_MAGIC
) &&
106 xfs_dinode_good_version(mp
, dip
->di_version
);
107 if (unlikely(XFS_TEST_ERROR(!di_ok
, mp
,
108 XFS_ERRTAG_ITOBP_INOTOBP
))) {
110 bp
->b_flags
&= ~XBF_DONE
;
111 xfs_buf_ioerror(bp
, -EIO
);
115 xfs_buf_ioerror(bp
, -EFSCORRUPTED
);
116 xfs_verifier_error(bp
);
119 "bad inode magic/vsn daddr %lld #%d (magic=%x)",
120 (unsigned long long)bp
->b_bn
, i
,
121 be16_to_cpu(dip
->di_magic
));
125 xfs_inobp_check(mp
, bp
);
130 xfs_inode_buf_read_verify(
133 xfs_inode_buf_verify(bp
, false);
137 xfs_inode_buf_readahead_verify(
140 xfs_inode_buf_verify(bp
, true);
144 xfs_inode_buf_write_verify(
147 xfs_inode_buf_verify(bp
, false);
150 const struct xfs_buf_ops xfs_inode_buf_ops
= {
152 .verify_read
= xfs_inode_buf_read_verify
,
153 .verify_write
= xfs_inode_buf_write_verify
,
156 const struct xfs_buf_ops xfs_inode_buf_ra_ops
= {
157 .name
= "xxfs_inode_ra",
158 .verify_read
= xfs_inode_buf_readahead_verify
,
159 .verify_write
= xfs_inode_buf_write_verify
,
164 * This routine is called to map an inode to the buffer containing the on-disk
165 * version of the inode. It returns a pointer to the buffer containing the
166 * on-disk inode in the bpp parameter, and in the dipp parameter it returns a
167 * pointer to the on-disk inode within that buffer.
169 * If a non-zero error is returned, then the contents of bpp and dipp are
174 struct xfs_mount
*mp
,
175 struct xfs_trans
*tp
,
176 struct xfs_imap
*imap
,
177 struct xfs_dinode
**dipp
,
178 struct xfs_buf
**bpp
,
185 buf_flags
|= XBF_UNMAPPED
;
186 error
= xfs_trans_read_buf(mp
, tp
, mp
->m_ddev_targp
, imap
->im_blkno
,
187 (int)imap
->im_len
, buf_flags
, &bp
,
190 if (error
== -EAGAIN
) {
191 ASSERT(buf_flags
& XBF_TRYLOCK
);
195 if (error
== -EFSCORRUPTED
&&
196 (iget_flags
& XFS_IGET_UNTRUSTED
))
199 xfs_warn(mp
, "%s: xfs_trans_read_buf() returned error %d.",
205 *dipp
= xfs_buf_offset(bp
, imap
->im_boffset
);
211 struct xfs_inode
*ip
,
212 struct xfs_dinode
*from
)
214 struct xfs_icdinode
*to
= &ip
->i_d
;
215 struct inode
*inode
= VFS_I(ip
);
219 * Convert v1 inodes immediately to v2 inode format as this is the
220 * minimum inode version format we support in the rest of the code.
222 to
->di_version
= from
->di_version
;
223 if (to
->di_version
== 1) {
224 set_nlink(inode
, be16_to_cpu(from
->di_onlink
));
225 to
->di_projid_lo
= 0;
226 to
->di_projid_hi
= 0;
229 set_nlink(inode
, be32_to_cpu(from
->di_nlink
));
230 to
->di_projid_lo
= be16_to_cpu(from
->di_projid_lo
);
231 to
->di_projid_hi
= be16_to_cpu(from
->di_projid_hi
);
234 to
->di_format
= from
->di_format
;
235 to
->di_uid
= be32_to_cpu(from
->di_uid
);
236 to
->di_gid
= be32_to_cpu(from
->di_gid
);
237 to
->di_flushiter
= be16_to_cpu(from
->di_flushiter
);
240 * Time is signed, so need to convert to signed 32 bit before
241 * storing in inode timestamp which may be 64 bit. Otherwise
242 * a time before epoch is converted to a time long after epoch
245 inode
->i_atime
.tv_sec
= (int)be32_to_cpu(from
->di_atime
.t_sec
);
246 inode
->i_atime
.tv_nsec
= (int)be32_to_cpu(from
->di_atime
.t_nsec
);
247 inode
->i_mtime
.tv_sec
= (int)be32_to_cpu(from
->di_mtime
.t_sec
);
248 inode
->i_mtime
.tv_nsec
= (int)be32_to_cpu(from
->di_mtime
.t_nsec
);
249 inode
->i_ctime
.tv_sec
= (int)be32_to_cpu(from
->di_ctime
.t_sec
);
250 inode
->i_ctime
.tv_nsec
= (int)be32_to_cpu(from
->di_ctime
.t_nsec
);
251 inode
->i_generation
= be32_to_cpu(from
->di_gen
);
252 inode
->i_mode
= be16_to_cpu(from
->di_mode
);
254 to
->di_size
= be64_to_cpu(from
->di_size
);
255 to
->di_nblocks
= be64_to_cpu(from
->di_nblocks
);
256 to
->di_extsize
= be32_to_cpu(from
->di_extsize
);
257 to
->di_nextents
= be32_to_cpu(from
->di_nextents
);
258 to
->di_anextents
= be16_to_cpu(from
->di_anextents
);
259 to
->di_forkoff
= from
->di_forkoff
;
260 to
->di_aformat
= from
->di_aformat
;
261 to
->di_dmevmask
= be32_to_cpu(from
->di_dmevmask
);
262 to
->di_dmstate
= be16_to_cpu(from
->di_dmstate
);
263 to
->di_flags
= be16_to_cpu(from
->di_flags
);
265 if (to
->di_version
== 3) {
266 inode
->i_version
= be64_to_cpu(from
->di_changecount
);
267 to
->di_crtime
.t_sec
= be32_to_cpu(from
->di_crtime
.t_sec
);
268 to
->di_crtime
.t_nsec
= be32_to_cpu(from
->di_crtime
.t_nsec
);
269 to
->di_flags2
= be64_to_cpu(from
->di_flags2
);
270 to
->di_cowextsize
= be32_to_cpu(from
->di_cowextsize
);
276 struct xfs_inode
*ip
,
277 struct xfs_dinode
*to
,
280 struct xfs_icdinode
*from
= &ip
->i_d
;
281 struct inode
*inode
= VFS_I(ip
);
283 to
->di_magic
= cpu_to_be16(XFS_DINODE_MAGIC
);
286 to
->di_version
= from
->di_version
;
287 to
->di_format
= from
->di_format
;
288 to
->di_uid
= cpu_to_be32(from
->di_uid
);
289 to
->di_gid
= cpu_to_be32(from
->di_gid
);
290 to
->di_projid_lo
= cpu_to_be16(from
->di_projid_lo
);
291 to
->di_projid_hi
= cpu_to_be16(from
->di_projid_hi
);
293 memset(to
->di_pad
, 0, sizeof(to
->di_pad
));
294 to
->di_atime
.t_sec
= cpu_to_be32(inode
->i_atime
.tv_sec
);
295 to
->di_atime
.t_nsec
= cpu_to_be32(inode
->i_atime
.tv_nsec
);
296 to
->di_mtime
.t_sec
= cpu_to_be32(inode
->i_mtime
.tv_sec
);
297 to
->di_mtime
.t_nsec
= cpu_to_be32(inode
->i_mtime
.tv_nsec
);
298 to
->di_ctime
.t_sec
= cpu_to_be32(inode
->i_ctime
.tv_sec
);
299 to
->di_ctime
.t_nsec
= cpu_to_be32(inode
->i_ctime
.tv_nsec
);
300 to
->di_nlink
= cpu_to_be32(inode
->i_nlink
);
301 to
->di_gen
= cpu_to_be32(inode
->i_generation
);
302 to
->di_mode
= cpu_to_be16(inode
->i_mode
);
304 to
->di_size
= cpu_to_be64(from
->di_size
);
305 to
->di_nblocks
= cpu_to_be64(from
->di_nblocks
);
306 to
->di_extsize
= cpu_to_be32(from
->di_extsize
);
307 to
->di_nextents
= cpu_to_be32(from
->di_nextents
);
308 to
->di_anextents
= cpu_to_be16(from
->di_anextents
);
309 to
->di_forkoff
= from
->di_forkoff
;
310 to
->di_aformat
= from
->di_aformat
;
311 to
->di_dmevmask
= cpu_to_be32(from
->di_dmevmask
);
312 to
->di_dmstate
= cpu_to_be16(from
->di_dmstate
);
313 to
->di_flags
= cpu_to_be16(from
->di_flags
);
315 if (from
->di_version
== 3) {
316 to
->di_changecount
= cpu_to_be64(inode
->i_version
);
317 to
->di_crtime
.t_sec
= cpu_to_be32(from
->di_crtime
.t_sec
);
318 to
->di_crtime
.t_nsec
= cpu_to_be32(from
->di_crtime
.t_nsec
);
319 to
->di_flags2
= cpu_to_be64(from
->di_flags2
);
320 to
->di_cowextsize
= cpu_to_be32(from
->di_cowextsize
);
321 to
->di_ino
= cpu_to_be64(ip
->i_ino
);
322 to
->di_lsn
= cpu_to_be64(lsn
);
323 memset(to
->di_pad2
, 0, sizeof(to
->di_pad2
));
324 uuid_copy(&to
->di_uuid
, &ip
->i_mount
->m_sb
.sb_meta_uuid
);
325 to
->di_flushiter
= 0;
327 to
->di_flushiter
= cpu_to_be16(from
->di_flushiter
);
332 xfs_log_dinode_to_disk(
333 struct xfs_log_dinode
*from
,
334 struct xfs_dinode
*to
)
336 to
->di_magic
= cpu_to_be16(from
->di_magic
);
337 to
->di_mode
= cpu_to_be16(from
->di_mode
);
338 to
->di_version
= from
->di_version
;
339 to
->di_format
= from
->di_format
;
341 to
->di_uid
= cpu_to_be32(from
->di_uid
);
342 to
->di_gid
= cpu_to_be32(from
->di_gid
);
343 to
->di_nlink
= cpu_to_be32(from
->di_nlink
);
344 to
->di_projid_lo
= cpu_to_be16(from
->di_projid_lo
);
345 to
->di_projid_hi
= cpu_to_be16(from
->di_projid_hi
);
346 memcpy(to
->di_pad
, from
->di_pad
, sizeof(to
->di_pad
));
348 to
->di_atime
.t_sec
= cpu_to_be32(from
->di_atime
.t_sec
);
349 to
->di_atime
.t_nsec
= cpu_to_be32(from
->di_atime
.t_nsec
);
350 to
->di_mtime
.t_sec
= cpu_to_be32(from
->di_mtime
.t_sec
);
351 to
->di_mtime
.t_nsec
= cpu_to_be32(from
->di_mtime
.t_nsec
);
352 to
->di_ctime
.t_sec
= cpu_to_be32(from
->di_ctime
.t_sec
);
353 to
->di_ctime
.t_nsec
= cpu_to_be32(from
->di_ctime
.t_nsec
);
355 to
->di_size
= cpu_to_be64(from
->di_size
);
356 to
->di_nblocks
= cpu_to_be64(from
->di_nblocks
);
357 to
->di_extsize
= cpu_to_be32(from
->di_extsize
);
358 to
->di_nextents
= cpu_to_be32(from
->di_nextents
);
359 to
->di_anextents
= cpu_to_be16(from
->di_anextents
);
360 to
->di_forkoff
= from
->di_forkoff
;
361 to
->di_aformat
= from
->di_aformat
;
362 to
->di_dmevmask
= cpu_to_be32(from
->di_dmevmask
);
363 to
->di_dmstate
= cpu_to_be16(from
->di_dmstate
);
364 to
->di_flags
= cpu_to_be16(from
->di_flags
);
365 to
->di_gen
= cpu_to_be32(from
->di_gen
);
367 if (from
->di_version
== 3) {
368 to
->di_changecount
= cpu_to_be64(from
->di_changecount
);
369 to
->di_crtime
.t_sec
= cpu_to_be32(from
->di_crtime
.t_sec
);
370 to
->di_crtime
.t_nsec
= cpu_to_be32(from
->di_crtime
.t_nsec
);
371 to
->di_flags2
= cpu_to_be64(from
->di_flags2
);
372 to
->di_cowextsize
= cpu_to_be32(from
->di_cowextsize
);
373 to
->di_ino
= cpu_to_be64(from
->di_ino
);
374 to
->di_lsn
= cpu_to_be64(from
->di_lsn
);
375 memcpy(to
->di_pad2
, from
->di_pad2
, sizeof(to
->di_pad2
));
376 uuid_copy(&to
->di_uuid
, &from
->di_uuid
);
377 to
->di_flushiter
= 0;
379 to
->di_flushiter
= cpu_to_be16(from
->di_flushiter
);
385 struct xfs_mount
*mp
,
387 struct xfs_dinode
*dip
)
393 if (dip
->di_magic
!= cpu_to_be16(XFS_DINODE_MAGIC
))
396 /* don't allow invalid i_size */
397 if (be64_to_cpu(dip
->di_size
) & (1ULL << 63))
400 mode
= be16_to_cpu(dip
->di_mode
);
401 if (mode
&& xfs_mode_to_ftype(mode
) == XFS_DIR3_FT_UNKNOWN
)
404 /* No zero-length symlinks/dirs. */
405 if ((S_ISLNK(mode
) || S_ISDIR(mode
)) && dip
->di_size
== 0)
408 /* only version 3 or greater inodes are extensively verified here */
409 if (dip
->di_version
< 3)
412 if (!xfs_sb_version_hascrc(&mp
->m_sb
))
414 if (!xfs_verify_cksum((char *)dip
, mp
->m_sb
.sb_inodesize
,
417 if (be64_to_cpu(dip
->di_ino
) != ino
)
419 if (!uuid_equal(&dip
->di_uuid
, &mp
->m_sb
.sb_meta_uuid
))
422 flags
= be16_to_cpu(dip
->di_flags
);
423 flags2
= be64_to_cpu(dip
->di_flags2
);
425 /* don't allow reflink/cowextsize if we don't have reflink */
426 if ((flags2
& (XFS_DIFLAG2_REFLINK
| XFS_DIFLAG2_COWEXTSIZE
)) &&
427 !xfs_sb_version_hasreflink(&mp
->m_sb
))
430 /* don't let reflink and realtime mix */
431 if ((flags2
& XFS_DIFLAG2_REFLINK
) && (flags
& XFS_DIFLAG_REALTIME
))
434 /* don't let reflink and dax mix */
435 if ((flags2
& XFS_DIFLAG2_REFLINK
) && (flags2
& XFS_DIFLAG2_DAX
))
443 struct xfs_mount
*mp
,
444 struct xfs_dinode
*dip
)
448 if (dip
->di_version
< 3)
451 ASSERT(xfs_sb_version_hascrc(&mp
->m_sb
));
452 crc
= xfs_start_cksum_update((char *)dip
, mp
->m_sb
.sb_inodesize
,
454 dip
->di_crc
= xfs_end_cksum(crc
);
458 * Read the disk inode attributes into the in-core inode structure.
460 * For version 5 superblocks, if we are initialising a new inode and we are not
461 * utilising the XFS_MOUNT_IKEEP inode cluster mode, we can simple build the new
462 * inode core with a random generation number. If we are keeping inodes around,
463 * we need to read the inode cluster to get the existing generation number off
464 * disk. Further, if we are using version 4 superblocks (i.e. v1/v2 inode
465 * format) then log recovery is dependent on the di_flushiter field being
466 * initialised from the current on-disk value and hence we must also read the
481 * Fill in the location information in the in-core inode.
483 error
= xfs_imap(mp
, tp
, ip
->i_ino
, &ip
->i_imap
, iget_flags
);
487 /* shortcut IO on inode allocation if possible */
488 if ((iget_flags
& XFS_IGET_CREATE
) &&
489 xfs_sb_version_hascrc(&mp
->m_sb
) &&
490 !(mp
->m_flags
& XFS_MOUNT_IKEEP
)) {
491 /* initialise the on-disk inode core */
492 memset(&ip
->i_d
, 0, sizeof(ip
->i_d
));
493 VFS_I(ip
)->i_generation
= prandom_u32();
494 if (xfs_sb_version_hascrc(&mp
->m_sb
))
495 ip
->i_d
.di_version
= 3;
497 ip
->i_d
.di_version
= 2;
502 * Get pointers to the on-disk inode and the buffer containing it.
504 error
= xfs_imap_to_bp(mp
, tp
, &ip
->i_imap
, &dip
, &bp
, 0, iget_flags
);
508 /* even unallocated inodes are verified */
509 if (!xfs_dinode_verify(mp
, ip
->i_ino
, dip
)) {
510 xfs_alert(mp
, "%s: validation failed for inode %lld",
511 __func__
, ip
->i_ino
);
513 XFS_CORRUPTION_ERROR(__func__
, XFS_ERRLEVEL_LOW
, mp
, dip
);
514 error
= -EFSCORRUPTED
;
519 * If the on-disk inode is already linked to a directory
520 * entry, copy all of the inode into the in-core inode.
521 * xfs_iformat_fork() handles copying in the inode format
522 * specific information.
523 * Otherwise, just get the truly permanent information.
526 xfs_inode_from_disk(ip
, dip
);
527 error
= xfs_iformat_fork(ip
, dip
);
530 xfs_alert(mp
, "%s: xfs_iformat() returned error %d",
537 * Partial initialisation of the in-core inode. Just the bits
538 * that xfs_ialloc won't overwrite or relies on being correct.
540 ip
->i_d
.di_version
= dip
->di_version
;
541 VFS_I(ip
)->i_generation
= be32_to_cpu(dip
->di_gen
);
542 ip
->i_d
.di_flushiter
= be16_to_cpu(dip
->di_flushiter
);
545 * Make sure to pull in the mode here as well in
546 * case the inode is released without being used.
547 * This ensures that xfs_inactive() will see that
548 * the inode is already free and not try to mess
549 * with the uninitialized part of it.
551 VFS_I(ip
)->i_mode
= 0;
554 ASSERT(ip
->i_d
.di_version
>= 2);
555 ip
->i_delayed_blks
= 0;
558 * Mark the buffer containing the inode as something to keep
559 * around for a while. This helps to keep recently accessed
560 * meta-data in-core longer.
562 xfs_buf_set_ref(bp
, XFS_INO_REF
);
565 * Use xfs_trans_brelse() to release the buffer containing the on-disk
566 * inode, because it was acquired with xfs_trans_read_buf() in
567 * xfs_imap_to_bp() above. If tp is NULL, this is just a normal
568 * brelse(). If we're within a transaction, then xfs_trans_brelse()
569 * will only release the buffer if it is not dirty within the
570 * transaction. It will be OK to release the buffer in this case,
571 * because inodes on disk are never destroyed and we will be locking the
572 * new in-core inode before putting it in the cache where other
573 * processes can find it. Thus we don't have to worry about the inode
574 * being changed just because we released the buffer.
577 xfs_trans_brelse(tp
, bp
);