1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
14 #include "xfs_mount.h"
15 #include "xfs_defer.h"
16 #include "xfs_inode.h"
17 #include "xfs_trans.h"
19 #include "xfs_log_priv.h"
20 #include "xfs_log_recover.h"
21 #include "xfs_inode_item.h"
22 #include "xfs_extfree_item.h"
23 #include "xfs_trans_priv.h"
24 #include "xfs_alloc.h"
25 #include "xfs_ialloc.h"
26 #include "xfs_quota.h"
27 #include "xfs_trace.h"
28 #include "xfs_icache.h"
29 #include "xfs_bmap_btree.h"
30 #include "xfs_error.h"
32 #include "xfs_rmap_item.h"
33 #include "xfs_buf_item.h"
34 #include "xfs_refcount_item.h"
35 #include "xfs_bmap_item.h"
37 #define BLK_AVG(blk1, blk2) ((blk1+blk2) >> 1)
44 xlog_clear_stale_blocks(
49 xlog_recover_check_summary(
52 #define xlog_recover_check_summary(log)
55 xlog_do_recovery_pass(
56 struct xlog
*, xfs_daddr_t
, xfs_daddr_t
, int, xfs_daddr_t
*);
59 * This structure is used during recovery to record the buf log items which
60 * have been canceled and should not be replayed.
62 struct xfs_buf_cancel
{
66 struct list_head bc_list
;
70 * Sector aligned buffer routines for buffer create/read/write/access
74 * Verify the log-relative block number and length in basic blocks are valid for
75 * an operation involving the given XFS log buffer. Returns true if the fields
76 * are valid, false otherwise.
84 if (blk_no
< 0 || blk_no
>= log
->l_logBBsize
)
86 if (bbcount
<= 0 || (blk_no
+ bbcount
) > log
->l_logBBsize
)
92 * Allocate a buffer to hold log data. The buffer needs to be able to map to
93 * a range of nbblks basic blocks at any valid offset within the log.
100 int align_mask
= xfs_buftarg_dma_alignment(log
->l_targ
);
103 * Pass log block 0 since we don't have an addr yet, buffer will be
106 if (XFS_IS_CORRUPT(log
->l_mp
, !xlog_verify_bno(log
, 0, nbblks
))) {
107 xfs_warn(log
->l_mp
, "Invalid block length (0x%x) for buffer",
113 * We do log I/O in units of log sectors (a power-of-2 multiple of the
114 * basic block size), so we round up the requested size to accommodate
115 * the basic blocks required for complete log sectors.
117 * In addition, the buffer may be used for a non-sector-aligned block
118 * offset, in which case an I/O of the requested size could extend
119 * beyond the end of the buffer. If the requested size is only 1 basic
120 * block it will never straddle a sector boundary, so this won't be an
121 * issue. Nor will this be a problem if the log I/O is done in basic
122 * blocks (sector size 1). But otherwise we extend the buffer by one
123 * extra log sector to ensure there's space to accommodate this
126 if (nbblks
> 1 && log
->l_sectBBsize
> 1)
127 nbblks
+= log
->l_sectBBsize
;
128 nbblks
= round_up(nbblks
, log
->l_sectBBsize
);
129 return kmem_alloc_io(BBTOB(nbblks
), align_mask
, KM_MAYFAIL
| KM_ZERO
);
133 * Return the address of the start of the given block number's data
134 * in a log buffer. The buffer covers a log sector-aligned region.
136 static inline unsigned int
141 return BBTOB(blk_no
& ((xfs_daddr_t
)log
->l_sectBBsize
- 1));
154 if (XFS_IS_CORRUPT(log
->l_mp
, !xlog_verify_bno(log
, blk_no
, nbblks
))) {
156 "Invalid log block/length (0x%llx, 0x%x) for buffer",
158 return -EFSCORRUPTED
;
161 blk_no
= round_down(blk_no
, log
->l_sectBBsize
);
162 nbblks
= round_up(nbblks
, log
->l_sectBBsize
);
165 error
= xfs_rw_bdev(log
->l_targ
->bt_bdev
, log
->l_logBBstart
+ blk_no
,
166 BBTOB(nbblks
), data
, op
);
167 if (error
&& !XFS_FORCED_SHUTDOWN(log
->l_mp
)) {
169 "log recovery %s I/O error at daddr 0x%llx len %d error %d",
170 op
== REQ_OP_WRITE
? "write" : "read",
171 blk_no
, nbblks
, error
);
183 return xlog_do_io(log
, blk_no
, nbblks
, data
, REQ_OP_READ
);
196 error
= xlog_do_io(log
, blk_no
, nbblks
, data
, REQ_OP_READ
);
198 *offset
= data
+ xlog_align(log
, blk_no
);
209 return xlog_do_io(log
, blk_no
, nbblks
, data
, REQ_OP_WRITE
);
214 * dump debug superblock and log record information
217 xlog_header_check_dump(
219 xlog_rec_header_t
*head
)
221 xfs_debug(mp
, "%s: SB : uuid = %pU, fmt = %d",
222 __func__
, &mp
->m_sb
.sb_uuid
, XLOG_FMT
);
223 xfs_debug(mp
, " log : uuid = %pU, fmt = %d",
224 &head
->h_fs_uuid
, be32_to_cpu(head
->h_fmt
));
227 #define xlog_header_check_dump(mp, head)
231 * check log record header for recovery
234 xlog_header_check_recover(
236 xlog_rec_header_t
*head
)
238 ASSERT(head
->h_magicno
== cpu_to_be32(XLOG_HEADER_MAGIC_NUM
));
241 * IRIX doesn't write the h_fmt field and leaves it zeroed
242 * (XLOG_FMT_UNKNOWN). This stops us from trying to recover
243 * a dirty log created in IRIX.
245 if (XFS_IS_CORRUPT(mp
, head
->h_fmt
!= cpu_to_be32(XLOG_FMT
))) {
247 "dirty log written in incompatible format - can't recover");
248 xlog_header_check_dump(mp
, head
);
249 return -EFSCORRUPTED
;
251 if (XFS_IS_CORRUPT(mp
, !uuid_equal(&mp
->m_sb
.sb_uuid
,
252 &head
->h_fs_uuid
))) {
254 "dirty log entry has mismatched uuid - can't recover");
255 xlog_header_check_dump(mp
, head
);
256 return -EFSCORRUPTED
;
262 * read the head block of the log and check the header
265 xlog_header_check_mount(
267 xlog_rec_header_t
*head
)
269 ASSERT(head
->h_magicno
== cpu_to_be32(XLOG_HEADER_MAGIC_NUM
));
271 if (uuid_is_null(&head
->h_fs_uuid
)) {
273 * IRIX doesn't write the h_fs_uuid or h_fmt fields. If
274 * h_fs_uuid is null, we assume this log was last mounted
275 * by IRIX and continue.
277 xfs_warn(mp
, "null uuid in log - IRIX style log");
278 } else if (XFS_IS_CORRUPT(mp
, !uuid_equal(&mp
->m_sb
.sb_uuid
,
279 &head
->h_fs_uuid
))) {
280 xfs_warn(mp
, "log has mismatched uuid - can't recover");
281 xlog_header_check_dump(mp
, head
);
282 return -EFSCORRUPTED
;
293 * We're not going to bother about retrying
294 * this during recovery. One strike!
296 if (!XFS_FORCED_SHUTDOWN(bp
->b_mount
)) {
297 xfs_buf_ioerror_alert(bp
, __this_address
);
298 xfs_force_shutdown(bp
->b_mount
, SHUTDOWN_META_IO_ERROR
);
303 * On v5 supers, a bli could be attached to update the metadata LSN.
307 xfs_buf_item_relse(bp
);
308 ASSERT(bp
->b_log_item
== NULL
);
315 * This routine finds (to an approximation) the first block in the physical
316 * log which contains the given cycle. It uses a binary search algorithm.
317 * Note that the algorithm can not be perfect because the disk will not
318 * necessarily be perfect.
321 xlog_find_cycle_start(
324 xfs_daddr_t first_blk
,
325 xfs_daddr_t
*last_blk
,
335 mid_blk
= BLK_AVG(first_blk
, end_blk
);
336 while (mid_blk
!= first_blk
&& mid_blk
!= end_blk
) {
337 error
= xlog_bread(log
, mid_blk
, 1, buffer
, &offset
);
340 mid_cycle
= xlog_get_cycle(offset
);
341 if (mid_cycle
== cycle
)
342 end_blk
= mid_blk
; /* last_half_cycle == mid_cycle */
344 first_blk
= mid_blk
; /* first_half_cycle == mid_cycle */
345 mid_blk
= BLK_AVG(first_blk
, end_blk
);
347 ASSERT((mid_blk
== first_blk
&& mid_blk
+1 == end_blk
) ||
348 (mid_blk
== end_blk
&& mid_blk
-1 == first_blk
));
356 * Check that a range of blocks does not contain stop_on_cycle_no.
357 * Fill in *new_blk with the block offset where such a block is
358 * found, or with -1 (an invalid block number) if there is no such
359 * block in the range. The scan needs to occur from front to back
360 * and the pointer into the region must be updated since a later
361 * routine will need to perform another test.
364 xlog_find_verify_cycle(
366 xfs_daddr_t start_blk
,
368 uint stop_on_cycle_no
,
369 xfs_daddr_t
*new_blk
)
379 * Greedily allocate a buffer big enough to handle the full
380 * range of basic blocks we'll be examining. If that fails,
381 * try a smaller size. We need to be able to read at least
382 * a log sector, or we're out of luck.
384 bufblks
= 1 << ffs(nbblks
);
385 while (bufblks
> log
->l_logBBsize
)
387 while (!(buffer
= xlog_alloc_buffer(log
, bufblks
))) {
389 if (bufblks
< log
->l_sectBBsize
)
393 for (i
= start_blk
; i
< start_blk
+ nbblks
; i
+= bufblks
) {
396 bcount
= min(bufblks
, (start_blk
+ nbblks
- i
));
398 error
= xlog_bread(log
, i
, bcount
, buffer
, &buf
);
402 for (j
= 0; j
< bcount
; j
++) {
403 cycle
= xlog_get_cycle(buf
);
404 if (cycle
== stop_on_cycle_no
) {
421 * Potentially backup over partial log record write.
423 * In the typical case, last_blk is the number of the block directly after
424 * a good log record. Therefore, we subtract one to get the block number
425 * of the last block in the given buffer. extra_bblks contains the number
426 * of blocks we would have read on a previous read. This happens when the
427 * last log record is split over the end of the physical log.
429 * extra_bblks is the number of blocks potentially verified on a previous
430 * call to this routine.
433 xlog_find_verify_log_record(
435 xfs_daddr_t start_blk
,
436 xfs_daddr_t
*last_blk
,
442 xlog_rec_header_t
*head
= NULL
;
445 int num_blks
= *last_blk
- start_blk
;
448 ASSERT(start_blk
!= 0 || *last_blk
!= start_blk
);
450 buffer
= xlog_alloc_buffer(log
, num_blks
);
452 buffer
= xlog_alloc_buffer(log
, 1);
457 error
= xlog_bread(log
, start_blk
, num_blks
, buffer
, &offset
);
460 offset
+= ((num_blks
- 1) << BBSHIFT
);
463 for (i
= (*last_blk
) - 1; i
>= 0; i
--) {
465 /* valid log record not found */
467 "Log inconsistent (didn't find previous header)");
469 error
= -EFSCORRUPTED
;
474 error
= xlog_bread(log
, i
, 1, buffer
, &offset
);
479 head
= (xlog_rec_header_t
*)offset
;
481 if (head
->h_magicno
== cpu_to_be32(XLOG_HEADER_MAGIC_NUM
))
489 * We hit the beginning of the physical log & still no header. Return
490 * to caller. If caller can handle a return of -1, then this routine
491 * will be called again for the end of the physical log.
499 * We have the final block of the good log (the first block
500 * of the log record _before_ the head. So we check the uuid.
502 if ((error
= xlog_header_check_mount(log
->l_mp
, head
)))
506 * We may have found a log record header before we expected one.
507 * last_blk will be the 1st block # with a given cycle #. We may end
508 * up reading an entire log record. In this case, we don't want to
509 * reset last_blk. Only when last_blk points in the middle of a log
510 * record do we update last_blk.
512 if (xfs_sb_version_haslogv2(&log
->l_mp
->m_sb
)) {
513 uint h_size
= be32_to_cpu(head
->h_size
);
515 xhdrs
= h_size
/ XLOG_HEADER_CYCLE_SIZE
;
516 if (h_size
% XLOG_HEADER_CYCLE_SIZE
)
522 if (*last_blk
- i
+ extra_bblks
!=
523 BTOBB(be32_to_cpu(head
->h_len
)) + xhdrs
)
532 * Head is defined to be the point of the log where the next log write
533 * could go. This means that incomplete LR writes at the end are
534 * eliminated when calculating the head. We aren't guaranteed that previous
535 * LR have complete transactions. We only know that a cycle number of
536 * current cycle number -1 won't be present in the log if we start writing
537 * from our current block number.
539 * last_blk contains the block number of the first block with a given
542 * Return: zero if normal, non-zero if error.
547 xfs_daddr_t
*return_head_blk
)
551 xfs_daddr_t new_blk
, first_blk
, start_blk
, last_blk
, head_blk
;
553 uint first_half_cycle
, last_half_cycle
;
555 int error
, log_bbnum
= log
->l_logBBsize
;
557 /* Is the end of the log device zeroed? */
558 error
= xlog_find_zeroed(log
, &first_blk
);
560 xfs_warn(log
->l_mp
, "empty log check failed");
564 *return_head_blk
= first_blk
;
566 /* Is the whole lot zeroed? */
568 /* Linux XFS shouldn't generate totally zeroed logs -
569 * mkfs etc write a dummy unmount record to a fresh
570 * log so we can store the uuid in there
572 xfs_warn(log
->l_mp
, "totally zeroed log");
578 first_blk
= 0; /* get cycle # of 1st block */
579 buffer
= xlog_alloc_buffer(log
, 1);
583 error
= xlog_bread(log
, 0, 1, buffer
, &offset
);
585 goto out_free_buffer
;
587 first_half_cycle
= xlog_get_cycle(offset
);
589 last_blk
= head_blk
= log_bbnum
- 1; /* get cycle # of last block */
590 error
= xlog_bread(log
, last_blk
, 1, buffer
, &offset
);
592 goto out_free_buffer
;
594 last_half_cycle
= xlog_get_cycle(offset
);
595 ASSERT(last_half_cycle
!= 0);
598 * If the 1st half cycle number is equal to the last half cycle number,
599 * then the entire log is stamped with the same cycle number. In this
600 * case, head_blk can't be set to zero (which makes sense). The below
601 * math doesn't work out properly with head_blk equal to zero. Instead,
602 * we set it to log_bbnum which is an invalid block number, but this
603 * value makes the math correct. If head_blk doesn't changed through
604 * all the tests below, *head_blk is set to zero at the very end rather
605 * than log_bbnum. In a sense, log_bbnum and zero are the same block
606 * in a circular file.
608 if (first_half_cycle
== last_half_cycle
) {
610 * In this case we believe that the entire log should have
611 * cycle number last_half_cycle. We need to scan backwards
612 * from the end verifying that there are no holes still
613 * containing last_half_cycle - 1. If we find such a hole,
614 * then the start of that hole will be the new head. The
615 * simple case looks like
616 * x | x ... | x - 1 | x
617 * Another case that fits this picture would be
618 * x | x + 1 | x ... | x
619 * In this case the head really is somewhere at the end of the
620 * log, as one of the latest writes at the beginning was
623 * x | x + 1 | x ... | x - 1 | x
624 * This is really the combination of the above two cases, and
625 * the head has to end up at the start of the x-1 hole at the
628 * In the 256k log case, we will read from the beginning to the
629 * end of the log and search for cycle numbers equal to x-1.
630 * We don't worry about the x+1 blocks that we encounter,
631 * because we know that they cannot be the head since the log
634 head_blk
= log_bbnum
;
635 stop_on_cycle
= last_half_cycle
- 1;
638 * In this case we want to find the first block with cycle
639 * number matching last_half_cycle. We expect the log to be
641 * x + 1 ... | x ... | x
642 * The first block with cycle number x (last_half_cycle) will
643 * be where the new head belongs. First we do a binary search
644 * for the first occurrence of last_half_cycle. The binary
645 * search may not be totally accurate, so then we scan back
646 * from there looking for occurrences of last_half_cycle before
647 * us. If that backwards scan wraps around the beginning of
648 * the log, then we look for occurrences of last_half_cycle - 1
649 * at the end of the log. The cases we're looking for look
651 * v binary search stopped here
652 * x + 1 ... | x | x + 1 | x ... | x
653 * ^ but we want to locate this spot
655 * <---------> less than scan distance
656 * x + 1 ... | x ... | x - 1 | x
657 * ^ we want to locate this spot
659 stop_on_cycle
= last_half_cycle
;
660 error
= xlog_find_cycle_start(log
, buffer
, first_blk
, &head_blk
,
663 goto out_free_buffer
;
667 * Now validate the answer. Scan back some number of maximum possible
668 * blocks and make sure each one has the expected cycle number. The
669 * maximum is determined by the total possible amount of buffering
670 * in the in-core log. The following number can be made tighter if
671 * we actually look at the block size of the filesystem.
673 num_scan_bblks
= min_t(int, log_bbnum
, XLOG_TOTAL_REC_SHIFT(log
));
674 if (head_blk
>= num_scan_bblks
) {
676 * We are guaranteed that the entire check can be performed
679 start_blk
= head_blk
- num_scan_bblks
;
680 if ((error
= xlog_find_verify_cycle(log
,
681 start_blk
, num_scan_bblks
,
682 stop_on_cycle
, &new_blk
)))
683 goto out_free_buffer
;
686 } else { /* need to read 2 parts of log */
688 * We are going to scan backwards in the log in two parts.
689 * First we scan the physical end of the log. In this part
690 * of the log, we are looking for blocks with cycle number
691 * last_half_cycle - 1.
692 * If we find one, then we know that the log starts there, as
693 * we've found a hole that didn't get written in going around
694 * the end of the physical log. The simple case for this is
695 * x + 1 ... | x ... | x - 1 | x
696 * <---------> less than scan distance
697 * If all of the blocks at the end of the log have cycle number
698 * last_half_cycle, then we check the blocks at the start of
699 * the log looking for occurrences of last_half_cycle. If we
700 * find one, then our current estimate for the location of the
701 * first occurrence of last_half_cycle is wrong and we move
702 * back to the hole we've found. This case looks like
703 * x + 1 ... | x | x + 1 | x ...
704 * ^ binary search stopped here
705 * Another case we need to handle that only occurs in 256k
707 * x + 1 ... | x ... | x+1 | x ...
708 * ^ binary search stops here
709 * In a 256k log, the scan at the end of the log will see the
710 * x + 1 blocks. We need to skip past those since that is
711 * certainly not the head of the log. By searching for
712 * last_half_cycle-1 we accomplish that.
714 ASSERT(head_blk
<= INT_MAX
&&
715 (xfs_daddr_t
) num_scan_bblks
>= head_blk
);
716 start_blk
= log_bbnum
- (num_scan_bblks
- head_blk
);
717 if ((error
= xlog_find_verify_cycle(log
, start_blk
,
718 num_scan_bblks
- (int)head_blk
,
719 (stop_on_cycle
- 1), &new_blk
)))
720 goto out_free_buffer
;
727 * Scan beginning of log now. The last part of the physical
728 * log is good. This scan needs to verify that it doesn't find
729 * the last_half_cycle.
732 ASSERT(head_blk
<= INT_MAX
);
733 if ((error
= xlog_find_verify_cycle(log
,
734 start_blk
, (int)head_blk
,
735 stop_on_cycle
, &new_blk
)))
736 goto out_free_buffer
;
743 * Now we need to make sure head_blk is not pointing to a block in
744 * the middle of a log record.
746 num_scan_bblks
= XLOG_REC_SHIFT(log
);
747 if (head_blk
>= num_scan_bblks
) {
748 start_blk
= head_blk
- num_scan_bblks
; /* don't read head_blk */
750 /* start ptr at last block ptr before head_blk */
751 error
= xlog_find_verify_log_record(log
, start_blk
, &head_blk
, 0);
755 goto out_free_buffer
;
758 ASSERT(head_blk
<= INT_MAX
);
759 error
= xlog_find_verify_log_record(log
, start_blk
, &head_blk
, 0);
761 goto out_free_buffer
;
763 /* We hit the beginning of the log during our search */
764 start_blk
= log_bbnum
- (num_scan_bblks
- head_blk
);
766 ASSERT(start_blk
<= INT_MAX
&&
767 (xfs_daddr_t
) log_bbnum
-start_blk
>= 0);
768 ASSERT(head_blk
<= INT_MAX
);
769 error
= xlog_find_verify_log_record(log
, start_blk
,
770 &new_blk
, (int)head_blk
);
774 goto out_free_buffer
;
775 if (new_blk
!= log_bbnum
)
778 goto out_free_buffer
;
782 if (head_blk
== log_bbnum
)
783 *return_head_blk
= 0;
785 *return_head_blk
= head_blk
;
787 * When returning here, we have a good block number. Bad block
788 * means that during a previous crash, we didn't have a clean break
789 * from cycle number N to cycle number N-1. In this case, we need
790 * to find the first block with cycle number N-1.
797 xfs_warn(log
->l_mp
, "failed to find log head");
802 * Seek backwards in the log for log record headers.
804 * Given a starting log block, walk backwards until we find the provided number
805 * of records or hit the provided tail block. The return value is the number of
806 * records encountered or a negative error code. The log block and buffer
807 * pointer of the last record seen are returned in rblk and rhead respectively.
810 xlog_rseek_logrec_hdr(
812 xfs_daddr_t head_blk
,
813 xfs_daddr_t tail_blk
,
817 struct xlog_rec_header
**rhead
,
829 * Walk backwards from the head block until we hit the tail or the first
832 end_blk
= head_blk
> tail_blk
? tail_blk
: 0;
833 for (i
= (int) head_blk
- 1; i
>= end_blk
; i
--) {
834 error
= xlog_bread(log
, i
, 1, buffer
, &offset
);
838 if (*(__be32
*) offset
== cpu_to_be32(XLOG_HEADER_MAGIC_NUM
)) {
840 *rhead
= (struct xlog_rec_header
*) offset
;
841 if (++found
== count
)
847 * If we haven't hit the tail block or the log record header count,
848 * start looking again from the end of the physical log. Note that
849 * callers can pass head == tail if the tail is not yet known.
851 if (tail_blk
>= head_blk
&& found
!= count
) {
852 for (i
= log
->l_logBBsize
- 1; i
>= (int) tail_blk
; i
--) {
853 error
= xlog_bread(log
, i
, 1, buffer
, &offset
);
857 if (*(__be32
*)offset
==
858 cpu_to_be32(XLOG_HEADER_MAGIC_NUM
)) {
861 *rhead
= (struct xlog_rec_header
*) offset
;
862 if (++found
== count
)
875 * Seek forward in the log for log record headers.
877 * Given head and tail blocks, walk forward from the tail block until we find
878 * the provided number of records or hit the head block. The return value is the
879 * number of records encountered or a negative error code. The log block and
880 * buffer pointer of the last record seen are returned in rblk and rhead
884 xlog_seek_logrec_hdr(
886 xfs_daddr_t head_blk
,
887 xfs_daddr_t tail_blk
,
891 struct xlog_rec_header
**rhead
,
903 * Walk forward from the tail block until we hit the head or the last
906 end_blk
= head_blk
> tail_blk
? head_blk
: log
->l_logBBsize
- 1;
907 for (i
= (int) tail_blk
; i
<= end_blk
; i
++) {
908 error
= xlog_bread(log
, i
, 1, buffer
, &offset
);
912 if (*(__be32
*) offset
== cpu_to_be32(XLOG_HEADER_MAGIC_NUM
)) {
914 *rhead
= (struct xlog_rec_header
*) offset
;
915 if (++found
== count
)
921 * If we haven't hit the head block or the log record header count,
922 * start looking again from the start of the physical log.
924 if (tail_blk
> head_blk
&& found
!= count
) {
925 for (i
= 0; i
< (int) head_blk
; i
++) {
926 error
= xlog_bread(log
, i
, 1, buffer
, &offset
);
930 if (*(__be32
*)offset
==
931 cpu_to_be32(XLOG_HEADER_MAGIC_NUM
)) {
934 *rhead
= (struct xlog_rec_header
*) offset
;
935 if (++found
== count
)
948 * Calculate distance from head to tail (i.e., unused space in the log).
953 xfs_daddr_t head_blk
,
954 xfs_daddr_t tail_blk
)
956 if (head_blk
< tail_blk
)
957 return tail_blk
- head_blk
;
959 return tail_blk
+ (log
->l_logBBsize
- head_blk
);
963 * Verify the log tail. This is particularly important when torn or incomplete
964 * writes have been detected near the front of the log and the head has been
965 * walked back accordingly.
967 * We also have to handle the case where the tail was pinned and the head
968 * blocked behind the tail right before a crash. If the tail had been pushed
969 * immediately prior to the crash and the subsequent checkpoint was only
970 * partially written, it's possible it overwrote the last referenced tail in the
971 * log with garbage. This is not a coherency problem because the tail must have
972 * been pushed before it can be overwritten, but appears as log corruption to
973 * recovery because we have no way to know the tail was updated if the
974 * subsequent checkpoint didn't write successfully.
976 * Therefore, CRC check the log from tail to head. If a failure occurs and the
977 * offending record is within max iclog bufs from the head, walk the tail
978 * forward and retry until a valid tail is found or corruption is detected out
979 * of the range of a possible overwrite.
984 xfs_daddr_t head_blk
,
985 xfs_daddr_t
*tail_blk
,
988 struct xlog_rec_header
*thead
;
990 xfs_daddr_t first_bad
;
993 xfs_daddr_t tmp_tail
;
994 xfs_daddr_t orig_tail
= *tail_blk
;
996 buffer
= xlog_alloc_buffer(log
, 1);
1001 * Make sure the tail points to a record (returns positive count on
1004 error
= xlog_seek_logrec_hdr(log
, head_blk
, *tail_blk
, 1, buffer
,
1005 &tmp_tail
, &thead
, &wrapped
);
1008 if (*tail_blk
!= tmp_tail
)
1009 *tail_blk
= tmp_tail
;
1012 * Run a CRC check from the tail to the head. We can't just check
1013 * MAX_ICLOGS records past the tail because the tail may point to stale
1014 * blocks cleared during the search for the head/tail. These blocks are
1015 * overwritten with zero-length records and thus record count is not a
1016 * reliable indicator of the iclog state before a crash.
1019 error
= xlog_do_recovery_pass(log
, head_blk
, *tail_blk
,
1020 XLOG_RECOVER_CRCPASS
, &first_bad
);
1021 while ((error
== -EFSBADCRC
|| error
== -EFSCORRUPTED
) && first_bad
) {
1025 * Is corruption within range of the head? If so, retry from
1026 * the next record. Otherwise return an error.
1028 tail_distance
= xlog_tail_distance(log
, head_blk
, first_bad
);
1029 if (tail_distance
> BTOBB(XLOG_MAX_ICLOGS
* hsize
))
1032 /* skip to the next record; returns positive count on success */
1033 error
= xlog_seek_logrec_hdr(log
, head_blk
, first_bad
, 2,
1034 buffer
, &tmp_tail
, &thead
, &wrapped
);
1038 *tail_blk
= tmp_tail
;
1040 error
= xlog_do_recovery_pass(log
, head_blk
, *tail_blk
,
1041 XLOG_RECOVER_CRCPASS
, &first_bad
);
1044 if (!error
&& *tail_blk
!= orig_tail
)
1046 "Tail block (0x%llx) overwrite detected. Updated to 0x%llx",
1047 orig_tail
, *tail_blk
);
1054 * Detect and trim torn writes from the head of the log.
1056 * Storage without sector atomicity guarantees can result in torn writes in the
1057 * log in the event of a crash. Our only means to detect this scenario is via
1058 * CRC verification. While we can't always be certain that CRC verification
1059 * failure is due to a torn write vs. an unrelated corruption, we do know that
1060 * only a certain number (XLOG_MAX_ICLOGS) of log records can be written out at
1061 * one time. Therefore, CRC verify up to XLOG_MAX_ICLOGS records at the head of
1062 * the log and treat failures in this range as torn writes as a matter of
1063 * policy. In the event of CRC failure, the head is walked back to the last good
1064 * record in the log and the tail is updated from that record and verified.
1069 xfs_daddr_t
*head_blk
, /* in/out: unverified head */
1070 xfs_daddr_t
*tail_blk
, /* out: tail block */
1072 xfs_daddr_t
*rhead_blk
, /* start blk of last record */
1073 struct xlog_rec_header
**rhead
, /* ptr to last record */
1074 bool *wrapped
) /* last rec. wraps phys. log */
1076 struct xlog_rec_header
*tmp_rhead
;
1078 xfs_daddr_t first_bad
;
1079 xfs_daddr_t tmp_rhead_blk
;
1085 * Check the head of the log for torn writes. Search backwards from the
1086 * head until we hit the tail or the maximum number of log record I/Os
1087 * that could have been in flight at one time. Use a temporary buffer so
1088 * we don't trash the rhead/buffer pointers from the caller.
1090 tmp_buffer
= xlog_alloc_buffer(log
, 1);
1093 error
= xlog_rseek_logrec_hdr(log
, *head_blk
, *tail_blk
,
1094 XLOG_MAX_ICLOGS
, tmp_buffer
,
1095 &tmp_rhead_blk
, &tmp_rhead
, &tmp_wrapped
);
1096 kmem_free(tmp_buffer
);
1101 * Now run a CRC verification pass over the records starting at the
1102 * block found above to the current head. If a CRC failure occurs, the
1103 * log block of the first bad record is saved in first_bad.
1105 error
= xlog_do_recovery_pass(log
, *head_blk
, tmp_rhead_blk
,
1106 XLOG_RECOVER_CRCPASS
, &first_bad
);
1107 if ((error
== -EFSBADCRC
|| error
== -EFSCORRUPTED
) && first_bad
) {
1109 * We've hit a potential torn write. Reset the error and warn
1114 "Torn write (CRC failure) detected at log block 0x%llx. Truncating head block from 0x%llx.",
1115 first_bad
, *head_blk
);
1118 * Get the header block and buffer pointer for the last good
1119 * record before the bad record.
1121 * Note that xlog_find_tail() clears the blocks at the new head
1122 * (i.e., the records with invalid CRC) if the cycle number
1123 * matches the the current cycle.
1125 found
= xlog_rseek_logrec_hdr(log
, first_bad
, *tail_blk
, 1,
1126 buffer
, rhead_blk
, rhead
, wrapped
);
1129 if (found
== 0) /* XXX: right thing to do here? */
1133 * Reset the head block to the starting block of the first bad
1134 * log record and set the tail block based on the last good
1137 * Bail out if the updated head/tail match as this indicates
1138 * possible corruption outside of the acceptable
1139 * (XLOG_MAX_ICLOGS) range. This is a job for xfs_repair...
1141 *head_blk
= first_bad
;
1142 *tail_blk
= BLOCK_LSN(be64_to_cpu((*rhead
)->h_tail_lsn
));
1143 if (*head_blk
== *tail_blk
) {
1151 return xlog_verify_tail(log
, *head_blk
, tail_blk
,
1152 be32_to_cpu((*rhead
)->h_size
));
1156 * We need to make sure we handle log wrapping properly, so we can't use the
1157 * calculated logbno directly. Make sure it wraps to the correct bno inside the
1160 * The log is limited to 32 bit sizes, so we use the appropriate modulus
1161 * operation here and cast it back to a 64 bit daddr on return.
1163 static inline xfs_daddr_t
1170 div_s64_rem(bno
, log
->l_logBBsize
, &mod
);
1175 * Check whether the head of the log points to an unmount record. In other
1176 * words, determine whether the log is clean. If so, update the in-core state
1180 xlog_check_unmount_rec(
1182 xfs_daddr_t
*head_blk
,
1183 xfs_daddr_t
*tail_blk
,
1184 struct xlog_rec_header
*rhead
,
1185 xfs_daddr_t rhead_blk
,
1189 struct xlog_op_header
*op_head
;
1190 xfs_daddr_t umount_data_blk
;
1191 xfs_daddr_t after_umount_blk
;
1199 * Look for unmount record. If we find it, then we know there was a
1200 * clean unmount. Since 'i' could be the last block in the physical
1201 * log, we convert to a log block before comparing to the head_blk.
1203 * Save the current tail lsn to use to pass to xlog_clear_stale_blocks()
1204 * below. We won't want to clear the unmount record if there is one, so
1205 * we pass the lsn of the unmount record rather than the block after it.
1207 if (xfs_sb_version_haslogv2(&log
->l_mp
->m_sb
)) {
1208 int h_size
= be32_to_cpu(rhead
->h_size
);
1209 int h_version
= be32_to_cpu(rhead
->h_version
);
1211 if ((h_version
& XLOG_VERSION_2
) &&
1212 (h_size
> XLOG_HEADER_CYCLE_SIZE
)) {
1213 hblks
= h_size
/ XLOG_HEADER_CYCLE_SIZE
;
1214 if (h_size
% XLOG_HEADER_CYCLE_SIZE
)
1223 after_umount_blk
= xlog_wrap_logbno(log
,
1224 rhead_blk
+ hblks
+ BTOBB(be32_to_cpu(rhead
->h_len
)));
1226 if (*head_blk
== after_umount_blk
&&
1227 be32_to_cpu(rhead
->h_num_logops
) == 1) {
1228 umount_data_blk
= xlog_wrap_logbno(log
, rhead_blk
+ hblks
);
1229 error
= xlog_bread(log
, umount_data_blk
, 1, buffer
, &offset
);
1233 op_head
= (struct xlog_op_header
*)offset
;
1234 if (op_head
->oh_flags
& XLOG_UNMOUNT_TRANS
) {
1236 * Set tail and last sync so that newly written log
1237 * records will point recovery to after the current
1240 xlog_assign_atomic_lsn(&log
->l_tail_lsn
,
1241 log
->l_curr_cycle
, after_umount_blk
);
1242 xlog_assign_atomic_lsn(&log
->l_last_sync_lsn
,
1243 log
->l_curr_cycle
, after_umount_blk
);
1244 *tail_blk
= after_umount_blk
;
1256 xfs_daddr_t head_blk
,
1257 struct xlog_rec_header
*rhead
,
1258 xfs_daddr_t rhead_blk
,
1262 * Reset log values according to the state of the log when we
1263 * crashed. In the case where head_blk == 0, we bump curr_cycle
1264 * one because the next write starts a new cycle rather than
1265 * continuing the cycle of the last good log record. At this
1266 * point we have guaranteed that all partial log records have been
1267 * accounted for. Therefore, we know that the last good log record
1268 * written was complete and ended exactly on the end boundary
1269 * of the physical log.
1271 log
->l_prev_block
= rhead_blk
;
1272 log
->l_curr_block
= (int)head_blk
;
1273 log
->l_curr_cycle
= be32_to_cpu(rhead
->h_cycle
);
1275 log
->l_curr_cycle
++;
1276 atomic64_set(&log
->l_tail_lsn
, be64_to_cpu(rhead
->h_tail_lsn
));
1277 atomic64_set(&log
->l_last_sync_lsn
, be64_to_cpu(rhead
->h_lsn
));
1278 xlog_assign_grant_head(&log
->l_reserve_head
.grant
, log
->l_curr_cycle
,
1279 BBTOB(log
->l_curr_block
));
1280 xlog_assign_grant_head(&log
->l_write_head
.grant
, log
->l_curr_cycle
,
1281 BBTOB(log
->l_curr_block
));
1285 * Find the sync block number or the tail of the log.
1287 * This will be the block number of the last record to have its
1288 * associated buffers synced to disk. Every log record header has
1289 * a sync lsn embedded in it. LSNs hold block numbers, so it is easy
1290 * to get a sync block number. The only concern is to figure out which
1291 * log record header to believe.
1293 * The following algorithm uses the log record header with the largest
1294 * lsn. The entire log record does not need to be valid. We only care
1295 * that the header is valid.
1297 * We could speed up search by using current head_blk buffer, but it is not
1303 xfs_daddr_t
*head_blk
,
1304 xfs_daddr_t
*tail_blk
)
1306 xlog_rec_header_t
*rhead
;
1307 char *offset
= NULL
;
1310 xfs_daddr_t rhead_blk
;
1312 bool wrapped
= false;
1316 * Find previous log record
1318 if ((error
= xlog_find_head(log
, head_blk
)))
1320 ASSERT(*head_blk
< INT_MAX
);
1322 buffer
= xlog_alloc_buffer(log
, 1);
1325 if (*head_blk
== 0) { /* special case */
1326 error
= xlog_bread(log
, 0, 1, buffer
, &offset
);
1330 if (xlog_get_cycle(offset
) == 0) {
1332 /* leave all other log inited values alone */
1338 * Search backwards through the log looking for the log record header
1339 * block. This wraps all the way back around to the head so something is
1340 * seriously wrong if we can't find it.
1342 error
= xlog_rseek_logrec_hdr(log
, *head_blk
, *head_blk
, 1, buffer
,
1343 &rhead_blk
, &rhead
, &wrapped
);
1347 xfs_warn(log
->l_mp
, "%s: couldn't find sync record", __func__
);
1348 error
= -EFSCORRUPTED
;
1351 *tail_blk
= BLOCK_LSN(be64_to_cpu(rhead
->h_tail_lsn
));
1354 * Set the log state based on the current head record.
1356 xlog_set_state(log
, *head_blk
, rhead
, rhead_blk
, wrapped
);
1357 tail_lsn
= atomic64_read(&log
->l_tail_lsn
);
1360 * Look for an unmount record at the head of the log. This sets the log
1361 * state to determine whether recovery is necessary.
1363 error
= xlog_check_unmount_rec(log
, head_blk
, tail_blk
, rhead
,
1364 rhead_blk
, buffer
, &clean
);
1369 * Verify the log head if the log is not clean (e.g., we have anything
1370 * but an unmount record at the head). This uses CRC verification to
1371 * detect and trim torn writes. If discovered, CRC failures are
1372 * considered torn writes and the log head is trimmed accordingly.
1374 * Note that we can only run CRC verification when the log is dirty
1375 * because there's no guarantee that the log data behind an unmount
1376 * record is compatible with the current architecture.
1379 xfs_daddr_t orig_head
= *head_blk
;
1381 error
= xlog_verify_head(log
, head_blk
, tail_blk
, buffer
,
1382 &rhead_blk
, &rhead
, &wrapped
);
1386 /* update in-core state again if the head changed */
1387 if (*head_blk
!= orig_head
) {
1388 xlog_set_state(log
, *head_blk
, rhead
, rhead_blk
,
1390 tail_lsn
= atomic64_read(&log
->l_tail_lsn
);
1391 error
= xlog_check_unmount_rec(log
, head_blk
, tail_blk
,
1392 rhead
, rhead_blk
, buffer
,
1400 * Note that the unmount was clean. If the unmount was not clean, we
1401 * need to know this to rebuild the superblock counters from the perag
1402 * headers if we have a filesystem using non-persistent counters.
1405 log
->l_mp
->m_flags
|= XFS_MOUNT_WAS_CLEAN
;
1408 * Make sure that there are no blocks in front of the head
1409 * with the same cycle number as the head. This can happen
1410 * because we allow multiple outstanding log writes concurrently,
1411 * and the later writes might make it out before earlier ones.
1413 * We use the lsn from before modifying it so that we'll never
1414 * overwrite the unmount record after a clean unmount.
1416 * Do this only if we are going to recover the filesystem
1418 * NOTE: This used to say "if (!readonly)"
1419 * However on Linux, we can & do recover a read-only filesystem.
1420 * We only skip recovery if NORECOVERY is specified on mount,
1421 * in which case we would not be here.
1423 * But... if the -device- itself is readonly, just skip this.
1424 * We can't recover this device anyway, so it won't matter.
1426 if (!xfs_readonly_buftarg(log
->l_targ
))
1427 error
= xlog_clear_stale_blocks(log
, tail_lsn
);
1433 xfs_warn(log
->l_mp
, "failed to locate log tail");
1438 * Is the log zeroed at all?
1440 * The last binary search should be changed to perform an X block read
1441 * once X becomes small enough. You can then search linearly through
1442 * the X blocks. This will cut down on the number of reads we need to do.
1444 * If the log is partially zeroed, this routine will pass back the blkno
1445 * of the first block with cycle number 0. It won't have a complete LR
1449 * 0 => the log is completely written to
1450 * 1 => use *blk_no as the first block of the log
1451 * <0 => error has occurred
1456 xfs_daddr_t
*blk_no
)
1460 uint first_cycle
, last_cycle
;
1461 xfs_daddr_t new_blk
, last_blk
, start_blk
;
1462 xfs_daddr_t num_scan_bblks
;
1463 int error
, log_bbnum
= log
->l_logBBsize
;
1467 /* check totally zeroed log */
1468 buffer
= xlog_alloc_buffer(log
, 1);
1471 error
= xlog_bread(log
, 0, 1, buffer
, &offset
);
1473 goto out_free_buffer
;
1475 first_cycle
= xlog_get_cycle(offset
);
1476 if (first_cycle
== 0) { /* completely zeroed log */
1482 /* check partially zeroed log */
1483 error
= xlog_bread(log
, log_bbnum
-1, 1, buffer
, &offset
);
1485 goto out_free_buffer
;
1487 last_cycle
= xlog_get_cycle(offset
);
1488 if (last_cycle
!= 0) { /* log completely written to */
1493 /* we have a partially zeroed log */
1494 last_blk
= log_bbnum
-1;
1495 error
= xlog_find_cycle_start(log
, buffer
, 0, &last_blk
, 0);
1497 goto out_free_buffer
;
1500 * Validate the answer. Because there is no way to guarantee that
1501 * the entire log is made up of log records which are the same size,
1502 * we scan over the defined maximum blocks. At this point, the maximum
1503 * is not chosen to mean anything special. XXXmiken
1505 num_scan_bblks
= XLOG_TOTAL_REC_SHIFT(log
);
1506 ASSERT(num_scan_bblks
<= INT_MAX
);
1508 if (last_blk
< num_scan_bblks
)
1509 num_scan_bblks
= last_blk
;
1510 start_blk
= last_blk
- num_scan_bblks
;
1513 * We search for any instances of cycle number 0 that occur before
1514 * our current estimate of the head. What we're trying to detect is
1515 * 1 ... | 0 | 1 | 0...
1516 * ^ binary search ends here
1518 if ((error
= xlog_find_verify_cycle(log
, start_blk
,
1519 (int)num_scan_bblks
, 0, &new_blk
)))
1520 goto out_free_buffer
;
1525 * Potentially backup over partial log record write. We don't need
1526 * to search the end of the log because we know it is zero.
1528 error
= xlog_find_verify_log_record(log
, start_blk
, &last_blk
, 0);
1532 goto out_free_buffer
;
1543 * These are simple subroutines used by xlog_clear_stale_blocks() below
1544 * to initialize a buffer full of empty log record headers and write
1545 * them into the log.
1556 xlog_rec_header_t
*recp
= (xlog_rec_header_t
*)buf
;
1558 memset(buf
, 0, BBSIZE
);
1559 recp
->h_magicno
= cpu_to_be32(XLOG_HEADER_MAGIC_NUM
);
1560 recp
->h_cycle
= cpu_to_be32(cycle
);
1561 recp
->h_version
= cpu_to_be32(
1562 xfs_sb_version_haslogv2(&log
->l_mp
->m_sb
) ? 2 : 1);
1563 recp
->h_lsn
= cpu_to_be64(xlog_assign_lsn(cycle
, block
));
1564 recp
->h_tail_lsn
= cpu_to_be64(xlog_assign_lsn(tail_cycle
, tail_block
));
1565 recp
->h_fmt
= cpu_to_be32(XLOG_FMT
);
1566 memcpy(&recp
->h_fs_uuid
, &log
->l_mp
->m_sb
.sb_uuid
, sizeof(uuid_t
));
1570 xlog_write_log_records(
1581 int sectbb
= log
->l_sectBBsize
;
1582 int end_block
= start_block
+ blocks
;
1588 * Greedily allocate a buffer big enough to handle the full
1589 * range of basic blocks to be written. If that fails, try
1590 * a smaller size. We need to be able to write at least a
1591 * log sector, or we're out of luck.
1593 bufblks
= 1 << ffs(blocks
);
1594 while (bufblks
> log
->l_logBBsize
)
1596 while (!(buffer
= xlog_alloc_buffer(log
, bufblks
))) {
1598 if (bufblks
< sectbb
)
1602 /* We may need to do a read at the start to fill in part of
1603 * the buffer in the starting sector not covered by the first
1606 balign
= round_down(start_block
, sectbb
);
1607 if (balign
!= start_block
) {
1608 error
= xlog_bread_noalign(log
, start_block
, 1, buffer
);
1610 goto out_free_buffer
;
1612 j
= start_block
- balign
;
1615 for (i
= start_block
; i
< end_block
; i
+= bufblks
) {
1616 int bcount
, endcount
;
1618 bcount
= min(bufblks
, end_block
- start_block
);
1619 endcount
= bcount
- j
;
1621 /* We may need to do a read at the end to fill in part of
1622 * the buffer in the final sector not covered by the write.
1623 * If this is the same sector as the above read, skip it.
1625 ealign
= round_down(end_block
, sectbb
);
1626 if (j
== 0 && (start_block
+ endcount
> ealign
)) {
1627 error
= xlog_bread_noalign(log
, ealign
, sectbb
,
1628 buffer
+ BBTOB(ealign
- start_block
));
1634 offset
= buffer
+ xlog_align(log
, start_block
);
1635 for (; j
< endcount
; j
++) {
1636 xlog_add_record(log
, offset
, cycle
, i
+j
,
1637 tail_cycle
, tail_block
);
1640 error
= xlog_bwrite(log
, start_block
, endcount
, buffer
);
1643 start_block
+= endcount
;
1653 * This routine is called to blow away any incomplete log writes out
1654 * in front of the log head. We do this so that we won't become confused
1655 * if we come up, write only a little bit more, and then crash again.
1656 * If we leave the partial log records out there, this situation could
1657 * cause us to think those partial writes are valid blocks since they
1658 * have the current cycle number. We get rid of them by overwriting them
1659 * with empty log records with the old cycle number rather than the
1662 * The tail lsn is passed in rather than taken from
1663 * the log so that we will not write over the unmount record after a
1664 * clean unmount in a 512 block log. Doing so would leave the log without
1665 * any valid log records in it until a new one was written. If we crashed
1666 * during that time we would not be able to recover.
1669 xlog_clear_stale_blocks(
1673 int tail_cycle
, head_cycle
;
1674 int tail_block
, head_block
;
1675 int tail_distance
, max_distance
;
1679 tail_cycle
= CYCLE_LSN(tail_lsn
);
1680 tail_block
= BLOCK_LSN(tail_lsn
);
1681 head_cycle
= log
->l_curr_cycle
;
1682 head_block
= log
->l_curr_block
;
1685 * Figure out the distance between the new head of the log
1686 * and the tail. We want to write over any blocks beyond the
1687 * head that we may have written just before the crash, but
1688 * we don't want to overwrite the tail of the log.
1690 if (head_cycle
== tail_cycle
) {
1692 * The tail is behind the head in the physical log,
1693 * so the distance from the head to the tail is the
1694 * distance from the head to the end of the log plus
1695 * the distance from the beginning of the log to the
1698 if (XFS_IS_CORRUPT(log
->l_mp
,
1699 head_block
< tail_block
||
1700 head_block
>= log
->l_logBBsize
))
1701 return -EFSCORRUPTED
;
1702 tail_distance
= tail_block
+ (log
->l_logBBsize
- head_block
);
1705 * The head is behind the tail in the physical log,
1706 * so the distance from the head to the tail is just
1707 * the tail block minus the head block.
1709 if (XFS_IS_CORRUPT(log
->l_mp
,
1710 head_block
>= tail_block
||
1711 head_cycle
!= tail_cycle
+ 1))
1712 return -EFSCORRUPTED
;
1713 tail_distance
= tail_block
- head_block
;
1717 * If the head is right up against the tail, we can't clear
1720 if (tail_distance
<= 0) {
1721 ASSERT(tail_distance
== 0);
1725 max_distance
= XLOG_TOTAL_REC_SHIFT(log
);
1727 * Take the smaller of the maximum amount of outstanding I/O
1728 * we could have and the distance to the tail to clear out.
1729 * We take the smaller so that we don't overwrite the tail and
1730 * we don't waste all day writing from the head to the tail
1733 max_distance
= min(max_distance
, tail_distance
);
1735 if ((head_block
+ max_distance
) <= log
->l_logBBsize
) {
1737 * We can stomp all the blocks we need to without
1738 * wrapping around the end of the log. Just do it
1739 * in a single write. Use the cycle number of the
1740 * current cycle minus one so that the log will look like:
1743 error
= xlog_write_log_records(log
, (head_cycle
- 1),
1744 head_block
, max_distance
, tail_cycle
,
1750 * We need to wrap around the end of the physical log in
1751 * order to clear all the blocks. Do it in two separate
1752 * I/Os. The first write should be from the head to the
1753 * end of the physical log, and it should use the current
1754 * cycle number minus one just like above.
1756 distance
= log
->l_logBBsize
- head_block
;
1757 error
= xlog_write_log_records(log
, (head_cycle
- 1),
1758 head_block
, distance
, tail_cycle
,
1765 * Now write the blocks at the start of the physical log.
1766 * This writes the remainder of the blocks we want to clear.
1767 * It uses the current cycle number since we're now on the
1768 * same cycle as the head so that we get:
1769 * n ... n ... | n - 1 ...
1770 * ^^^^^ blocks we're writing
1772 distance
= max_distance
- (log
->l_logBBsize
- head_block
);
1773 error
= xlog_write_log_records(log
, head_cycle
, 0, distance
,
1774 tail_cycle
, tail_block
);
1782 /******************************************************************************
1784 * Log recover routines
1786 ******************************************************************************
1790 * Sort the log items in the transaction.
1792 * The ordering constraints are defined by the inode allocation and unlink
1793 * behaviour. The rules are:
1795 * 1. Every item is only logged once in a given transaction. Hence it
1796 * represents the last logged state of the item. Hence ordering is
1797 * dependent on the order in which operations need to be performed so
1798 * required initial conditions are always met.
1800 * 2. Cancelled buffers are recorded in pass 1 in a separate table and
1801 * there's nothing to replay from them so we can simply cull them
1802 * from the transaction. However, we can't do that until after we've
1803 * replayed all the other items because they may be dependent on the
1804 * cancelled buffer and replaying the cancelled buffer can remove it
1805 * form the cancelled buffer table. Hence they have tobe done last.
1807 * 3. Inode allocation buffers must be replayed before inode items that
1808 * read the buffer and replay changes into it. For filesystems using the
1809 * ICREATE transactions, this means XFS_LI_ICREATE objects need to get
1810 * treated the same as inode allocation buffers as they create and
1811 * initialise the buffers directly.
1813 * 4. Inode unlink buffers must be replayed after inode items are replayed.
1814 * This ensures that inodes are completely flushed to the inode buffer
1815 * in a "free" state before we remove the unlinked inode list pointer.
1817 * Hence the ordering needs to be inode allocation buffers first, inode items
1818 * second, inode unlink buffers third and cancelled buffers last.
1820 * But there's a problem with that - we can't tell an inode allocation buffer
1821 * apart from a regular buffer, so we can't separate them. We can, however,
1822 * tell an inode unlink buffer from the others, and so we can separate them out
1823 * from all the other buffers and move them to last.
1825 * Hence, 4 lists, in order from head to tail:
1826 * - buffer_list for all buffers except cancelled/inode unlink buffers
1827 * - item_list for all non-buffer items
1828 * - inode_buffer_list for inode unlink buffers
1829 * - cancel_list for the cancelled buffers
1831 * Note that we add objects to the tail of the lists so that first-to-last
1832 * ordering is preserved within the lists. Adding objects to the head of the
1833 * list means when we traverse from the head we walk them in last-to-first
1834 * order. For cancelled buffers and inode unlink buffers this doesn't matter,
1835 * but for all other items there may be specific ordering that we need to
1839 xlog_recover_reorder_trans(
1841 struct xlog_recover
*trans
,
1844 xlog_recover_item_t
*item
, *n
;
1846 LIST_HEAD(sort_list
);
1847 LIST_HEAD(cancel_list
);
1848 LIST_HEAD(buffer_list
);
1849 LIST_HEAD(inode_buffer_list
);
1850 LIST_HEAD(inode_list
);
1852 list_splice_init(&trans
->r_itemq
, &sort_list
);
1853 list_for_each_entry_safe(item
, n
, &sort_list
, ri_list
) {
1854 xfs_buf_log_format_t
*buf_f
= item
->ri_buf
[0].i_addr
;
1856 switch (ITEM_TYPE(item
)) {
1857 case XFS_LI_ICREATE
:
1858 list_move_tail(&item
->ri_list
, &buffer_list
);
1861 if (buf_f
->blf_flags
& XFS_BLF_CANCEL
) {
1862 trace_xfs_log_recover_item_reorder_head(log
,
1864 list_move(&item
->ri_list
, &cancel_list
);
1867 if (buf_f
->blf_flags
& XFS_BLF_INODE_BUF
) {
1868 list_move(&item
->ri_list
, &inode_buffer_list
);
1871 list_move_tail(&item
->ri_list
, &buffer_list
);
1875 case XFS_LI_QUOTAOFF
:
1884 trace_xfs_log_recover_item_reorder_tail(log
,
1886 list_move_tail(&item
->ri_list
, &inode_list
);
1890 "%s: unrecognized type of log operation",
1894 * return the remaining items back to the transaction
1895 * item list so they can be freed in caller.
1897 if (!list_empty(&sort_list
))
1898 list_splice_init(&sort_list
, &trans
->r_itemq
);
1904 ASSERT(list_empty(&sort_list
));
1905 if (!list_empty(&buffer_list
))
1906 list_splice(&buffer_list
, &trans
->r_itemq
);
1907 if (!list_empty(&inode_list
))
1908 list_splice_tail(&inode_list
, &trans
->r_itemq
);
1909 if (!list_empty(&inode_buffer_list
))
1910 list_splice_tail(&inode_buffer_list
, &trans
->r_itemq
);
1911 if (!list_empty(&cancel_list
))
1912 list_splice_tail(&cancel_list
, &trans
->r_itemq
);
1917 * Build up the table of buf cancel records so that we don't replay
1918 * cancelled data in the second pass. For buffer records that are
1919 * not cancel records, there is nothing to do here so we just return.
1921 * If we get a cancel record which is already in the table, this indicates
1922 * that the buffer was cancelled multiple times. In order to ensure
1923 * that during pass 2 we keep the record in the table until we reach its
1924 * last occurrence in the log, we keep a reference count in the cancel
1925 * record in the table to tell us how many times we expect to see this
1926 * record during the second pass.
1929 xlog_recover_buffer_pass1(
1931 struct xlog_recover_item
*item
)
1933 xfs_buf_log_format_t
*buf_f
= item
->ri_buf
[0].i_addr
;
1934 struct list_head
*bucket
;
1935 struct xfs_buf_cancel
*bcp
;
1937 if (!xfs_buf_log_check_iovec(&item
->ri_buf
[0])) {
1938 xfs_err(log
->l_mp
, "bad buffer log item size (%d)",
1939 item
->ri_buf
[0].i_len
);
1940 return -EFSCORRUPTED
;
1944 * If this isn't a cancel buffer item, then just return.
1946 if (!(buf_f
->blf_flags
& XFS_BLF_CANCEL
)) {
1947 trace_xfs_log_recover_buf_not_cancel(log
, buf_f
);
1952 * Insert an xfs_buf_cancel record into the hash table of them.
1953 * If there is already an identical record, bump its reference count.
1955 bucket
= XLOG_BUF_CANCEL_BUCKET(log
, buf_f
->blf_blkno
);
1956 list_for_each_entry(bcp
, bucket
, bc_list
) {
1957 if (bcp
->bc_blkno
== buf_f
->blf_blkno
&&
1958 bcp
->bc_len
== buf_f
->blf_len
) {
1960 trace_xfs_log_recover_buf_cancel_ref_inc(log
, buf_f
);
1965 bcp
= kmem_alloc(sizeof(struct xfs_buf_cancel
), 0);
1966 bcp
->bc_blkno
= buf_f
->blf_blkno
;
1967 bcp
->bc_len
= buf_f
->blf_len
;
1968 bcp
->bc_refcount
= 1;
1969 list_add_tail(&bcp
->bc_list
, bucket
);
1971 trace_xfs_log_recover_buf_cancel_add(log
, buf_f
);
1976 * Check to see whether the buffer being recovered has a corresponding
1977 * entry in the buffer cancel record table. If it is, return the cancel
1978 * buffer structure to the caller.
1980 STATIC
struct xfs_buf_cancel
*
1981 xlog_peek_buffer_cancelled(
1985 unsigned short flags
)
1987 struct list_head
*bucket
;
1988 struct xfs_buf_cancel
*bcp
;
1990 if (!log
->l_buf_cancel_table
) {
1991 /* empty table means no cancelled buffers in the log */
1992 ASSERT(!(flags
& XFS_BLF_CANCEL
));
1996 bucket
= XLOG_BUF_CANCEL_BUCKET(log
, blkno
);
1997 list_for_each_entry(bcp
, bucket
, bc_list
) {
1998 if (bcp
->bc_blkno
== blkno
&& bcp
->bc_len
== len
)
2003 * We didn't find a corresponding entry in the table, so return 0 so
2004 * that the buffer is NOT cancelled.
2006 ASSERT(!(flags
& XFS_BLF_CANCEL
));
2011 * If the buffer is being cancelled then return 1 so that it will be cancelled,
2012 * otherwise return 0. If the buffer is actually a buffer cancel item
2013 * (XFS_BLF_CANCEL is set), then decrement the refcount on the entry in the
2014 * table and remove it from the table if this is the last reference.
2016 * We remove the cancel record from the table when we encounter its last
2017 * occurrence in the log so that if the same buffer is re-used again after its
2018 * last cancellation we actually replay the changes made at that point.
2021 xlog_check_buffer_cancelled(
2025 unsigned short flags
)
2027 struct xfs_buf_cancel
*bcp
;
2029 bcp
= xlog_peek_buffer_cancelled(log
, blkno
, len
, flags
);
2034 * We've go a match, so return 1 so that the recovery of this buffer
2035 * is cancelled. If this buffer is actually a buffer cancel log
2036 * item, then decrement the refcount on the one in the table and
2037 * remove it if this is the last reference.
2039 if (flags
& XFS_BLF_CANCEL
) {
2040 if (--bcp
->bc_refcount
== 0) {
2041 list_del(&bcp
->bc_list
);
2049 * Perform recovery for a buffer full of inodes. In these buffers, the only
2050 * data which should be recovered is that which corresponds to the
2051 * di_next_unlinked pointers in the on disk inode structures. The rest of the
2052 * data for the inodes is always logged through the inodes themselves rather
2053 * than the inode buffer and is recovered in xlog_recover_inode_pass2().
2055 * The only time when buffers full of inodes are fully recovered is when the
2056 * buffer is full of newly allocated inodes. In this case the buffer will
2057 * not be marked as an inode buffer and so will be sent to
2058 * xlog_recover_do_reg_buffer() below during recovery.
2061 xlog_recover_do_inode_buffer(
2062 struct xfs_mount
*mp
,
2063 xlog_recover_item_t
*item
,
2065 xfs_buf_log_format_t
*buf_f
)
2071 int reg_buf_offset
= 0;
2072 int reg_buf_bytes
= 0;
2073 int next_unlinked_offset
;
2075 xfs_agino_t
*logged_nextp
;
2076 xfs_agino_t
*buffer_nextp
;
2078 trace_xfs_log_recover_buf_inode_buf(mp
->m_log
, buf_f
);
2081 * Post recovery validation only works properly on CRC enabled
2084 if (xfs_sb_version_hascrc(&mp
->m_sb
))
2085 bp
->b_ops
= &xfs_inode_buf_ops
;
2087 inodes_per_buf
= BBTOB(bp
->b_length
) >> mp
->m_sb
.sb_inodelog
;
2088 for (i
= 0; i
< inodes_per_buf
; i
++) {
2089 next_unlinked_offset
= (i
* mp
->m_sb
.sb_inodesize
) +
2090 offsetof(xfs_dinode_t
, di_next_unlinked
);
2092 while (next_unlinked_offset
>=
2093 (reg_buf_offset
+ reg_buf_bytes
)) {
2095 * The next di_next_unlinked field is beyond
2096 * the current logged region. Find the next
2097 * logged region that contains or is beyond
2098 * the current di_next_unlinked field.
2101 bit
= xfs_next_bit(buf_f
->blf_data_map
,
2102 buf_f
->blf_map_size
, bit
);
2105 * If there are no more logged regions in the
2106 * buffer, then we're done.
2111 nbits
= xfs_contig_bits(buf_f
->blf_data_map
,
2112 buf_f
->blf_map_size
, bit
);
2114 reg_buf_offset
= bit
<< XFS_BLF_SHIFT
;
2115 reg_buf_bytes
= nbits
<< XFS_BLF_SHIFT
;
2120 * If the current logged region starts after the current
2121 * di_next_unlinked field, then move on to the next
2122 * di_next_unlinked field.
2124 if (next_unlinked_offset
< reg_buf_offset
)
2127 ASSERT(item
->ri_buf
[item_index
].i_addr
!= NULL
);
2128 ASSERT((item
->ri_buf
[item_index
].i_len
% XFS_BLF_CHUNK
) == 0);
2129 ASSERT((reg_buf_offset
+ reg_buf_bytes
) <= BBTOB(bp
->b_length
));
2132 * The current logged region contains a copy of the
2133 * current di_next_unlinked field. Extract its value
2134 * and copy it to the buffer copy.
2136 logged_nextp
= item
->ri_buf
[item_index
].i_addr
+
2137 next_unlinked_offset
- reg_buf_offset
;
2138 if (XFS_IS_CORRUPT(mp
, *logged_nextp
== 0)) {
2140 "Bad inode buffer log record (ptr = "PTR_FMT
", bp = "PTR_FMT
"). "
2141 "Trying to replay bad (0) inode di_next_unlinked field.",
2143 return -EFSCORRUPTED
;
2146 buffer_nextp
= xfs_buf_offset(bp
, next_unlinked_offset
);
2147 *buffer_nextp
= *logged_nextp
;
2150 * If necessary, recalculate the CRC in the on-disk inode. We
2151 * have to leave the inode in a consistent state for whoever
2154 xfs_dinode_calc_crc(mp
,
2155 xfs_buf_offset(bp
, i
* mp
->m_sb
.sb_inodesize
));
2163 * V5 filesystems know the age of the buffer on disk being recovered. We can
2164 * have newer objects on disk than we are replaying, and so for these cases we
2165 * don't want to replay the current change as that will make the buffer contents
2166 * temporarily invalid on disk.
2168 * The magic number might not match the buffer type we are going to recover
2169 * (e.g. reallocated blocks), so we ignore the xfs_buf_log_format flags. Hence
2170 * extract the LSN of the existing object in the buffer based on it's current
2171 * magic number. If we don't recognise the magic number in the buffer, then
2172 * return a LSN of -1 so that the caller knows it was an unrecognised block and
2173 * so can recover the buffer.
2175 * Note: we cannot rely solely on magic number matches to determine that the
2176 * buffer has a valid LSN - we also need to verify that it belongs to this
2177 * filesystem, so we need to extract the object's LSN and compare it to that
2178 * which we read from the superblock. If the UUIDs don't match, then we've got a
2179 * stale metadata block from an old filesystem instance that we need to recover
2183 xlog_recover_get_buf_lsn(
2184 struct xfs_mount
*mp
,
2190 void *blk
= bp
->b_addr
;
2194 /* v4 filesystems always recover immediately */
2195 if (!xfs_sb_version_hascrc(&mp
->m_sb
))
2196 goto recover_immediately
;
2198 magic32
= be32_to_cpu(*(__be32
*)blk
);
2200 case XFS_ABTB_CRC_MAGIC
:
2201 case XFS_ABTC_CRC_MAGIC
:
2202 case XFS_ABTB_MAGIC
:
2203 case XFS_ABTC_MAGIC
:
2204 case XFS_RMAP_CRC_MAGIC
:
2205 case XFS_REFC_CRC_MAGIC
:
2206 case XFS_IBT_CRC_MAGIC
:
2207 case XFS_IBT_MAGIC
: {
2208 struct xfs_btree_block
*btb
= blk
;
2210 lsn
= be64_to_cpu(btb
->bb_u
.s
.bb_lsn
);
2211 uuid
= &btb
->bb_u
.s
.bb_uuid
;
2214 case XFS_BMAP_CRC_MAGIC
:
2215 case XFS_BMAP_MAGIC
: {
2216 struct xfs_btree_block
*btb
= blk
;
2218 lsn
= be64_to_cpu(btb
->bb_u
.l
.bb_lsn
);
2219 uuid
= &btb
->bb_u
.l
.bb_uuid
;
2223 lsn
= be64_to_cpu(((struct xfs_agf
*)blk
)->agf_lsn
);
2224 uuid
= &((struct xfs_agf
*)blk
)->agf_uuid
;
2226 case XFS_AGFL_MAGIC
:
2227 lsn
= be64_to_cpu(((struct xfs_agfl
*)blk
)->agfl_lsn
);
2228 uuid
= &((struct xfs_agfl
*)blk
)->agfl_uuid
;
2231 lsn
= be64_to_cpu(((struct xfs_agi
*)blk
)->agi_lsn
);
2232 uuid
= &((struct xfs_agi
*)blk
)->agi_uuid
;
2234 case XFS_SYMLINK_MAGIC
:
2235 lsn
= be64_to_cpu(((struct xfs_dsymlink_hdr
*)blk
)->sl_lsn
);
2236 uuid
= &((struct xfs_dsymlink_hdr
*)blk
)->sl_uuid
;
2238 case XFS_DIR3_BLOCK_MAGIC
:
2239 case XFS_DIR3_DATA_MAGIC
:
2240 case XFS_DIR3_FREE_MAGIC
:
2241 lsn
= be64_to_cpu(((struct xfs_dir3_blk_hdr
*)blk
)->lsn
);
2242 uuid
= &((struct xfs_dir3_blk_hdr
*)blk
)->uuid
;
2244 case XFS_ATTR3_RMT_MAGIC
:
2246 * Remote attr blocks are written synchronously, rather than
2247 * being logged. That means they do not contain a valid LSN
2248 * (i.e. transactionally ordered) in them, and hence any time we
2249 * see a buffer to replay over the top of a remote attribute
2250 * block we should simply do so.
2252 goto recover_immediately
;
2255 * superblock uuids are magic. We may or may not have a
2256 * sb_meta_uuid on disk, but it will be set in the in-core
2257 * superblock. We set the uuid pointer for verification
2258 * according to the superblock feature mask to ensure we check
2259 * the relevant UUID in the superblock.
2261 lsn
= be64_to_cpu(((struct xfs_dsb
*)blk
)->sb_lsn
);
2262 if (xfs_sb_version_hasmetauuid(&mp
->m_sb
))
2263 uuid
= &((struct xfs_dsb
*)blk
)->sb_meta_uuid
;
2265 uuid
= &((struct xfs_dsb
*)blk
)->sb_uuid
;
2271 if (lsn
!= (xfs_lsn_t
)-1) {
2272 if (!uuid_equal(&mp
->m_sb
.sb_meta_uuid
, uuid
))
2273 goto recover_immediately
;
2277 magicda
= be16_to_cpu(((struct xfs_da_blkinfo
*)blk
)->magic
);
2279 case XFS_DIR3_LEAF1_MAGIC
:
2280 case XFS_DIR3_LEAFN_MAGIC
:
2281 case XFS_DA3_NODE_MAGIC
:
2282 lsn
= be64_to_cpu(((struct xfs_da3_blkinfo
*)blk
)->lsn
);
2283 uuid
= &((struct xfs_da3_blkinfo
*)blk
)->uuid
;
2289 if (lsn
!= (xfs_lsn_t
)-1) {
2290 if (!uuid_equal(&mp
->m_sb
.sb_uuid
, uuid
))
2291 goto recover_immediately
;
2296 * We do individual object checks on dquot and inode buffers as they
2297 * have their own individual LSN records. Also, we could have a stale
2298 * buffer here, so we have to at least recognise these buffer types.
2300 * A notd complexity here is inode unlinked list processing - it logs
2301 * the inode directly in the buffer, but we don't know which inodes have
2302 * been modified, and there is no global buffer LSN. Hence we need to
2303 * recover all inode buffer types immediately. This problem will be
2304 * fixed by logical logging of the unlinked list modifications.
2306 magic16
= be16_to_cpu(*(__be16
*)blk
);
2308 case XFS_DQUOT_MAGIC
:
2309 case XFS_DINODE_MAGIC
:
2310 goto recover_immediately
;
2315 /* unknown buffer contents, recover immediately */
2317 recover_immediately
:
2318 return (xfs_lsn_t
)-1;
2323 * Validate the recovered buffer is of the correct type and attach the
2324 * appropriate buffer operations to them for writeback. Magic numbers are in a
2326 * the first 16 bits of the buffer (inode buffer, dquot buffer),
2327 * the first 32 bits of the buffer (most blocks),
2328 * inside a struct xfs_da_blkinfo at the start of the buffer.
2331 xlog_recover_validate_buf_type(
2332 struct xfs_mount
*mp
,
2334 xfs_buf_log_format_t
*buf_f
,
2335 xfs_lsn_t current_lsn
)
2337 struct xfs_da_blkinfo
*info
= bp
->b_addr
;
2341 char *warnmsg
= NULL
;
2344 * We can only do post recovery validation on items on CRC enabled
2345 * fielsystems as we need to know when the buffer was written to be able
2346 * to determine if we should have replayed the item. If we replay old
2347 * metadata over a newer buffer, then it will enter a temporarily
2348 * inconsistent state resulting in verification failures. Hence for now
2349 * just avoid the verification stage for non-crc filesystems
2351 if (!xfs_sb_version_hascrc(&mp
->m_sb
))
2354 magic32
= be32_to_cpu(*(__be32
*)bp
->b_addr
);
2355 magic16
= be16_to_cpu(*(__be16
*)bp
->b_addr
);
2356 magicda
= be16_to_cpu(info
->magic
);
2357 switch (xfs_blft_from_flags(buf_f
)) {
2358 case XFS_BLFT_BTREE_BUF
:
2360 case XFS_ABTB_CRC_MAGIC
:
2361 case XFS_ABTB_MAGIC
:
2362 bp
->b_ops
= &xfs_bnobt_buf_ops
;
2364 case XFS_ABTC_CRC_MAGIC
:
2365 case XFS_ABTC_MAGIC
:
2366 bp
->b_ops
= &xfs_cntbt_buf_ops
;
2368 case XFS_IBT_CRC_MAGIC
:
2370 bp
->b_ops
= &xfs_inobt_buf_ops
;
2372 case XFS_FIBT_CRC_MAGIC
:
2373 case XFS_FIBT_MAGIC
:
2374 bp
->b_ops
= &xfs_finobt_buf_ops
;
2376 case XFS_BMAP_CRC_MAGIC
:
2377 case XFS_BMAP_MAGIC
:
2378 bp
->b_ops
= &xfs_bmbt_buf_ops
;
2380 case XFS_RMAP_CRC_MAGIC
:
2381 bp
->b_ops
= &xfs_rmapbt_buf_ops
;
2383 case XFS_REFC_CRC_MAGIC
:
2384 bp
->b_ops
= &xfs_refcountbt_buf_ops
;
2387 warnmsg
= "Bad btree block magic!";
2391 case XFS_BLFT_AGF_BUF
:
2392 if (magic32
!= XFS_AGF_MAGIC
) {
2393 warnmsg
= "Bad AGF block magic!";
2396 bp
->b_ops
= &xfs_agf_buf_ops
;
2398 case XFS_BLFT_AGFL_BUF
:
2399 if (magic32
!= XFS_AGFL_MAGIC
) {
2400 warnmsg
= "Bad AGFL block magic!";
2403 bp
->b_ops
= &xfs_agfl_buf_ops
;
2405 case XFS_BLFT_AGI_BUF
:
2406 if (magic32
!= XFS_AGI_MAGIC
) {
2407 warnmsg
= "Bad AGI block magic!";
2410 bp
->b_ops
= &xfs_agi_buf_ops
;
2412 case XFS_BLFT_UDQUOT_BUF
:
2413 case XFS_BLFT_PDQUOT_BUF
:
2414 case XFS_BLFT_GDQUOT_BUF
:
2415 #ifdef CONFIG_XFS_QUOTA
2416 if (magic16
!= XFS_DQUOT_MAGIC
) {
2417 warnmsg
= "Bad DQUOT block magic!";
2420 bp
->b_ops
= &xfs_dquot_buf_ops
;
2423 "Trying to recover dquots without QUOTA support built in!");
2427 case XFS_BLFT_DINO_BUF
:
2428 if (magic16
!= XFS_DINODE_MAGIC
) {
2429 warnmsg
= "Bad INODE block magic!";
2432 bp
->b_ops
= &xfs_inode_buf_ops
;
2434 case XFS_BLFT_SYMLINK_BUF
:
2435 if (magic32
!= XFS_SYMLINK_MAGIC
) {
2436 warnmsg
= "Bad symlink block magic!";
2439 bp
->b_ops
= &xfs_symlink_buf_ops
;
2441 case XFS_BLFT_DIR_BLOCK_BUF
:
2442 if (magic32
!= XFS_DIR2_BLOCK_MAGIC
&&
2443 magic32
!= XFS_DIR3_BLOCK_MAGIC
) {
2444 warnmsg
= "Bad dir block magic!";
2447 bp
->b_ops
= &xfs_dir3_block_buf_ops
;
2449 case XFS_BLFT_DIR_DATA_BUF
:
2450 if (magic32
!= XFS_DIR2_DATA_MAGIC
&&
2451 magic32
!= XFS_DIR3_DATA_MAGIC
) {
2452 warnmsg
= "Bad dir data magic!";
2455 bp
->b_ops
= &xfs_dir3_data_buf_ops
;
2457 case XFS_BLFT_DIR_FREE_BUF
:
2458 if (magic32
!= XFS_DIR2_FREE_MAGIC
&&
2459 magic32
!= XFS_DIR3_FREE_MAGIC
) {
2460 warnmsg
= "Bad dir3 free magic!";
2463 bp
->b_ops
= &xfs_dir3_free_buf_ops
;
2465 case XFS_BLFT_DIR_LEAF1_BUF
:
2466 if (magicda
!= XFS_DIR2_LEAF1_MAGIC
&&
2467 magicda
!= XFS_DIR3_LEAF1_MAGIC
) {
2468 warnmsg
= "Bad dir leaf1 magic!";
2471 bp
->b_ops
= &xfs_dir3_leaf1_buf_ops
;
2473 case XFS_BLFT_DIR_LEAFN_BUF
:
2474 if (magicda
!= XFS_DIR2_LEAFN_MAGIC
&&
2475 magicda
!= XFS_DIR3_LEAFN_MAGIC
) {
2476 warnmsg
= "Bad dir leafn magic!";
2479 bp
->b_ops
= &xfs_dir3_leafn_buf_ops
;
2481 case XFS_BLFT_DA_NODE_BUF
:
2482 if (magicda
!= XFS_DA_NODE_MAGIC
&&
2483 magicda
!= XFS_DA3_NODE_MAGIC
) {
2484 warnmsg
= "Bad da node magic!";
2487 bp
->b_ops
= &xfs_da3_node_buf_ops
;
2489 case XFS_BLFT_ATTR_LEAF_BUF
:
2490 if (magicda
!= XFS_ATTR_LEAF_MAGIC
&&
2491 magicda
!= XFS_ATTR3_LEAF_MAGIC
) {
2492 warnmsg
= "Bad attr leaf magic!";
2495 bp
->b_ops
= &xfs_attr3_leaf_buf_ops
;
2497 case XFS_BLFT_ATTR_RMT_BUF
:
2498 if (magic32
!= XFS_ATTR3_RMT_MAGIC
) {
2499 warnmsg
= "Bad attr remote magic!";
2502 bp
->b_ops
= &xfs_attr3_rmt_buf_ops
;
2504 case XFS_BLFT_SB_BUF
:
2505 if (magic32
!= XFS_SB_MAGIC
) {
2506 warnmsg
= "Bad SB block magic!";
2509 bp
->b_ops
= &xfs_sb_buf_ops
;
2511 #ifdef CONFIG_XFS_RT
2512 case XFS_BLFT_RTBITMAP_BUF
:
2513 case XFS_BLFT_RTSUMMARY_BUF
:
2514 /* no magic numbers for verification of RT buffers */
2515 bp
->b_ops
= &xfs_rtbuf_ops
;
2517 #endif /* CONFIG_XFS_RT */
2519 xfs_warn(mp
, "Unknown buffer type %d!",
2520 xfs_blft_from_flags(buf_f
));
2525 * Nothing else to do in the case of a NULL current LSN as this means
2526 * the buffer is more recent than the change in the log and will be
2529 if (current_lsn
== NULLCOMMITLSN
)
2533 xfs_warn(mp
, warnmsg
);
2538 * We must update the metadata LSN of the buffer as it is written out to
2539 * ensure that older transactions never replay over this one and corrupt
2540 * the buffer. This can occur if log recovery is interrupted at some
2541 * point after the current transaction completes, at which point a
2542 * subsequent mount starts recovery from the beginning.
2544 * Write verifiers update the metadata LSN from log items attached to
2545 * the buffer. Therefore, initialize a bli purely to carry the LSN to
2546 * the verifier. We'll clean it up in our ->iodone() callback.
2549 struct xfs_buf_log_item
*bip
;
2551 ASSERT(!bp
->b_iodone
|| bp
->b_iodone
== xlog_recover_iodone
);
2552 bp
->b_iodone
= xlog_recover_iodone
;
2553 xfs_buf_item_init(bp
, mp
);
2554 bip
= bp
->b_log_item
;
2555 bip
->bli_item
.li_lsn
= current_lsn
;
2560 * Perform a 'normal' buffer recovery. Each logged region of the
2561 * buffer should be copied over the corresponding region in the
2562 * given buffer. The bitmap in the buf log format structure indicates
2563 * where to place the logged data.
2566 xlog_recover_do_reg_buffer(
2567 struct xfs_mount
*mp
,
2568 xlog_recover_item_t
*item
,
2570 xfs_buf_log_format_t
*buf_f
,
2571 xfs_lsn_t current_lsn
)
2577 const size_t size_disk_dquot
= sizeof(struct xfs_disk_dquot
);
2579 trace_xfs_log_recover_buf_reg_buf(mp
->m_log
, buf_f
);
2582 i
= 1; /* 0 is the buf format structure */
2584 bit
= xfs_next_bit(buf_f
->blf_data_map
,
2585 buf_f
->blf_map_size
, bit
);
2588 nbits
= xfs_contig_bits(buf_f
->blf_data_map
,
2589 buf_f
->blf_map_size
, bit
);
2591 ASSERT(item
->ri_buf
[i
].i_addr
!= NULL
);
2592 ASSERT(item
->ri_buf
[i
].i_len
% XFS_BLF_CHUNK
== 0);
2593 ASSERT(BBTOB(bp
->b_length
) >=
2594 ((uint
)bit
<< XFS_BLF_SHIFT
) + (nbits
<< XFS_BLF_SHIFT
));
2597 * The dirty regions logged in the buffer, even though
2598 * contiguous, may span multiple chunks. This is because the
2599 * dirty region may span a physical page boundary in a buffer
2600 * and hence be split into two separate vectors for writing into
2601 * the log. Hence we need to trim nbits back to the length of
2602 * the current region being copied out of the log.
2604 if (item
->ri_buf
[i
].i_len
< (nbits
<< XFS_BLF_SHIFT
))
2605 nbits
= item
->ri_buf
[i
].i_len
>> XFS_BLF_SHIFT
;
2608 * Do a sanity check if this is a dquot buffer. Just checking
2609 * the first dquot in the buffer should do. XXXThis is
2610 * probably a good thing to do for other buf types also.
2613 if (buf_f
->blf_flags
&
2614 (XFS_BLF_UDQUOT_BUF
|XFS_BLF_PDQUOT_BUF
|XFS_BLF_GDQUOT_BUF
)) {
2615 if (item
->ri_buf
[i
].i_addr
== NULL
) {
2617 "XFS: NULL dquot in %s.", __func__
);
2620 if (item
->ri_buf
[i
].i_len
< size_disk_dquot
) {
2622 "XFS: dquot too small (%d) in %s.",
2623 item
->ri_buf
[i
].i_len
, __func__
);
2626 fa
= xfs_dquot_verify(mp
, item
->ri_buf
[i
].i_addr
,
2630 "dquot corrupt at %pS trying to replay into block 0x%llx",
2636 memcpy(xfs_buf_offset(bp
,
2637 (uint
)bit
<< XFS_BLF_SHIFT
), /* dest */
2638 item
->ri_buf
[i
].i_addr
, /* source */
2639 nbits
<<XFS_BLF_SHIFT
); /* length */
2645 /* Shouldn't be any more regions */
2646 ASSERT(i
== item
->ri_total
);
2648 xlog_recover_validate_buf_type(mp
, bp
, buf_f
, current_lsn
);
2652 * Perform a dquot buffer recovery.
2653 * Simple algorithm: if we have found a QUOTAOFF log item of the same type
2654 * (ie. USR or GRP), then just toss this buffer away; don't recover it.
2655 * Else, treat it as a regular buffer and do recovery.
2657 * Return false if the buffer was tossed and true if we recovered the buffer to
2658 * indicate to the caller if the buffer needs writing.
2661 xlog_recover_do_dquot_buffer(
2662 struct xfs_mount
*mp
,
2664 struct xlog_recover_item
*item
,
2666 struct xfs_buf_log_format
*buf_f
)
2670 trace_xfs_log_recover_buf_dquot_buf(log
, buf_f
);
2673 * Filesystems are required to send in quota flags at mount time.
2679 if (buf_f
->blf_flags
& XFS_BLF_UDQUOT_BUF
)
2680 type
|= XFS_DQ_USER
;
2681 if (buf_f
->blf_flags
& XFS_BLF_PDQUOT_BUF
)
2682 type
|= XFS_DQ_PROJ
;
2683 if (buf_f
->blf_flags
& XFS_BLF_GDQUOT_BUF
)
2684 type
|= XFS_DQ_GROUP
;
2686 * This type of quotas was turned off, so ignore this buffer
2688 if (log
->l_quotaoffs_flag
& type
)
2691 xlog_recover_do_reg_buffer(mp
, item
, bp
, buf_f
, NULLCOMMITLSN
);
2696 * This routine replays a modification made to a buffer at runtime.
2697 * There are actually two types of buffer, regular and inode, which
2698 * are handled differently. Inode buffers are handled differently
2699 * in that we only recover a specific set of data from them, namely
2700 * the inode di_next_unlinked fields. This is because all other inode
2701 * data is actually logged via inode records and any data we replay
2702 * here which overlaps that may be stale.
2704 * When meta-data buffers are freed at run time we log a buffer item
2705 * with the XFS_BLF_CANCEL bit set to indicate that previous copies
2706 * of the buffer in the log should not be replayed at recovery time.
2707 * This is so that if the blocks covered by the buffer are reused for
2708 * file data before we crash we don't end up replaying old, freed
2709 * meta-data into a user's file.
2711 * To handle the cancellation of buffer log items, we make two passes
2712 * over the log during recovery. During the first we build a table of
2713 * those buffers which have been cancelled, and during the second we
2714 * only replay those buffers which do not have corresponding cancel
2715 * records in the table. See xlog_recover_buffer_pass[1,2] above
2716 * for more details on the implementation of the table of cancel records.
2719 xlog_recover_buffer_pass2(
2721 struct list_head
*buffer_list
,
2722 struct xlog_recover_item
*item
,
2723 xfs_lsn_t current_lsn
)
2725 xfs_buf_log_format_t
*buf_f
= item
->ri_buf
[0].i_addr
;
2726 xfs_mount_t
*mp
= log
->l_mp
;
2733 * In this pass we only want to recover all the buffers which have
2734 * not been cancelled and are not cancellation buffers themselves.
2736 if (xlog_check_buffer_cancelled(log
, buf_f
->blf_blkno
,
2737 buf_f
->blf_len
, buf_f
->blf_flags
)) {
2738 trace_xfs_log_recover_buf_cancel(log
, buf_f
);
2742 trace_xfs_log_recover_buf_recover(log
, buf_f
);
2745 if (buf_f
->blf_flags
& XFS_BLF_INODE_BUF
)
2746 buf_flags
|= XBF_UNMAPPED
;
2748 error
= xfs_buf_read(mp
->m_ddev_targp
, buf_f
->blf_blkno
, buf_f
->blf_len
,
2749 buf_flags
, &bp
, NULL
);
2754 * Recover the buffer only if we get an LSN from it and it's less than
2755 * the lsn of the transaction we are replaying.
2757 * Note that we have to be extremely careful of readahead here.
2758 * Readahead does not attach verfiers to the buffers so if we don't
2759 * actually do any replay after readahead because of the LSN we found
2760 * in the buffer if more recent than that current transaction then we
2761 * need to attach the verifier directly. Failure to do so can lead to
2762 * future recovery actions (e.g. EFI and unlinked list recovery) can
2763 * operate on the buffers and they won't get the verifier attached. This
2764 * can lead to blocks on disk having the correct content but a stale
2767 * It is safe to assume these clean buffers are currently up to date.
2768 * If the buffer is dirtied by a later transaction being replayed, then
2769 * the verifier will be reset to match whatever recover turns that
2772 lsn
= xlog_recover_get_buf_lsn(mp
, bp
);
2773 if (lsn
&& lsn
!= -1 && XFS_LSN_CMP(lsn
, current_lsn
) >= 0) {
2774 trace_xfs_log_recover_buf_skip(log
, buf_f
);
2775 xlog_recover_validate_buf_type(mp
, bp
, buf_f
, NULLCOMMITLSN
);
2779 if (buf_f
->blf_flags
& XFS_BLF_INODE_BUF
) {
2780 error
= xlog_recover_do_inode_buffer(mp
, item
, bp
, buf_f
);
2783 } else if (buf_f
->blf_flags
&
2784 (XFS_BLF_UDQUOT_BUF
|XFS_BLF_PDQUOT_BUF
|XFS_BLF_GDQUOT_BUF
)) {
2787 dirty
= xlog_recover_do_dquot_buffer(mp
, log
, item
, bp
, buf_f
);
2791 xlog_recover_do_reg_buffer(mp
, item
, bp
, buf_f
, current_lsn
);
2795 * Perform delayed write on the buffer. Asynchronous writes will be
2796 * slower when taking into account all the buffers to be flushed.
2798 * Also make sure that only inode buffers with good sizes stay in
2799 * the buffer cache. The kernel moves inodes in buffers of 1 block
2800 * or inode_cluster_size bytes, whichever is bigger. The inode
2801 * buffers in the log can be a different size if the log was generated
2802 * by an older kernel using unclustered inode buffers or a newer kernel
2803 * running with a different inode cluster size. Regardless, if the
2804 * the inode buffer size isn't max(blocksize, inode_cluster_size)
2805 * for *our* value of inode_cluster_size, then we need to keep
2806 * the buffer out of the buffer cache so that the buffer won't
2807 * overlap with future reads of those inodes.
2809 if (XFS_DINODE_MAGIC
==
2810 be16_to_cpu(*((__be16
*)xfs_buf_offset(bp
, 0))) &&
2811 (BBTOB(bp
->b_length
) != M_IGEO(log
->l_mp
)->inode_cluster_size
)) {
2813 error
= xfs_bwrite(bp
);
2815 ASSERT(bp
->b_mount
== mp
);
2816 bp
->b_iodone
= xlog_recover_iodone
;
2817 xfs_buf_delwri_queue(bp
, buffer_list
);
2826 * Inode fork owner changes
2828 * If we have been told that we have to reparent the inode fork, it's because an
2829 * extent swap operation on a CRC enabled filesystem has been done and we are
2830 * replaying it. We need to walk the BMBT of the appropriate fork and change the
2833 * The complexity here is that we don't have an inode context to work with, so
2834 * after we've replayed the inode we need to instantiate one. This is where the
2837 * We are in the middle of log recovery, so we can't run transactions. That
2838 * means we cannot use cache coherent inode instantiation via xfs_iget(), as
2839 * that will result in the corresponding iput() running the inode through
2840 * xfs_inactive(). If we've just replayed an inode core that changes the link
2841 * count to zero (i.e. it's been unlinked), then xfs_inactive() will run
2842 * transactions (bad!).
2844 * So, to avoid this, we instantiate an inode directly from the inode core we've
2845 * just recovered. We have the buffer still locked, and all we really need to
2846 * instantiate is the inode core and the forks being modified. We can do this
2847 * manually, then run the inode btree owner change, and then tear down the
2848 * xfs_inode without having to run any transactions at all.
2850 * Also, because we don't have a transaction context available here but need to
2851 * gather all the buffers we modify for writeback so we pass the buffer_list
2852 * instead for the operation to use.
2856 xfs_recover_inode_owner_change(
2857 struct xfs_mount
*mp
,
2858 struct xfs_dinode
*dip
,
2859 struct xfs_inode_log_format
*in_f
,
2860 struct list_head
*buffer_list
)
2862 struct xfs_inode
*ip
;
2865 ASSERT(in_f
->ilf_fields
& (XFS_ILOG_DOWNER
|XFS_ILOG_AOWNER
));
2867 ip
= xfs_inode_alloc(mp
, in_f
->ilf_ino
);
2871 /* instantiate the inode */
2872 xfs_inode_from_disk(ip
, dip
);
2873 ASSERT(ip
->i_d
.di_version
>= 3);
2875 error
= xfs_iformat_fork(ip
, dip
);
2879 if (!xfs_inode_verify_forks(ip
)) {
2880 error
= -EFSCORRUPTED
;
2884 if (in_f
->ilf_fields
& XFS_ILOG_DOWNER
) {
2885 ASSERT(in_f
->ilf_fields
& XFS_ILOG_DBROOT
);
2886 error
= xfs_bmbt_change_owner(NULL
, ip
, XFS_DATA_FORK
,
2887 ip
->i_ino
, buffer_list
);
2892 if (in_f
->ilf_fields
& XFS_ILOG_AOWNER
) {
2893 ASSERT(in_f
->ilf_fields
& XFS_ILOG_ABROOT
);
2894 error
= xfs_bmbt_change_owner(NULL
, ip
, XFS_ATTR_FORK
,
2895 ip
->i_ino
, buffer_list
);
2906 xlog_recover_inode_pass2(
2908 struct list_head
*buffer_list
,
2909 struct xlog_recover_item
*item
,
2910 xfs_lsn_t current_lsn
)
2912 struct xfs_inode_log_format
*in_f
;
2913 xfs_mount_t
*mp
= log
->l_mp
;
2922 struct xfs_log_dinode
*ldip
;
2926 if (item
->ri_buf
[0].i_len
== sizeof(struct xfs_inode_log_format
)) {
2927 in_f
= item
->ri_buf
[0].i_addr
;
2929 in_f
= kmem_alloc(sizeof(struct xfs_inode_log_format
), 0);
2931 error
= xfs_inode_item_format_convert(&item
->ri_buf
[0], in_f
);
2937 * Inode buffers can be freed, look out for it,
2938 * and do not replay the inode.
2940 if (xlog_check_buffer_cancelled(log
, in_f
->ilf_blkno
,
2941 in_f
->ilf_len
, 0)) {
2943 trace_xfs_log_recover_inode_cancel(log
, in_f
);
2946 trace_xfs_log_recover_inode_recover(log
, in_f
);
2948 error
= xfs_buf_read(mp
->m_ddev_targp
, in_f
->ilf_blkno
, in_f
->ilf_len
,
2949 0, &bp
, &xfs_inode_buf_ops
);
2952 ASSERT(in_f
->ilf_fields
& XFS_ILOG_CORE
);
2953 dip
= xfs_buf_offset(bp
, in_f
->ilf_boffset
);
2956 * Make sure the place we're flushing out to really looks
2959 if (XFS_IS_CORRUPT(mp
, !xfs_verify_magic16(bp
, dip
->di_magic
))) {
2961 "%s: Bad inode magic number, dip = "PTR_FMT
", dino bp = "PTR_FMT
", ino = %Ld",
2962 __func__
, dip
, bp
, in_f
->ilf_ino
);
2963 error
= -EFSCORRUPTED
;
2966 ldip
= item
->ri_buf
[1].i_addr
;
2967 if (XFS_IS_CORRUPT(mp
, ldip
->di_magic
!= XFS_DINODE_MAGIC
)) {
2969 "%s: Bad inode log record, rec ptr "PTR_FMT
", ino %Ld",
2970 __func__
, item
, in_f
->ilf_ino
);
2971 error
= -EFSCORRUPTED
;
2976 * If the inode has an LSN in it, recover the inode only if it's less
2977 * than the lsn of the transaction we are replaying. Note: we still
2978 * need to replay an owner change even though the inode is more recent
2979 * than the transaction as there is no guarantee that all the btree
2980 * blocks are more recent than this transaction, too.
2982 if (dip
->di_version
>= 3) {
2983 xfs_lsn_t lsn
= be64_to_cpu(dip
->di_lsn
);
2985 if (lsn
&& lsn
!= -1 && XFS_LSN_CMP(lsn
, current_lsn
) >= 0) {
2986 trace_xfs_log_recover_inode_skip(log
, in_f
);
2988 goto out_owner_change
;
2993 * di_flushiter is only valid for v1/2 inodes. All changes for v3 inodes
2994 * are transactional and if ordering is necessary we can determine that
2995 * more accurately by the LSN field in the V3 inode core. Don't trust
2996 * the inode versions we might be changing them here - use the
2997 * superblock flag to determine whether we need to look at di_flushiter
2998 * to skip replay when the on disk inode is newer than the log one
3000 if (!xfs_sb_version_hascrc(&mp
->m_sb
) &&
3001 ldip
->di_flushiter
< be16_to_cpu(dip
->di_flushiter
)) {
3003 * Deal with the wrap case, DI_MAX_FLUSH is less
3004 * than smaller numbers
3006 if (be16_to_cpu(dip
->di_flushiter
) == DI_MAX_FLUSH
&&
3007 ldip
->di_flushiter
< (DI_MAX_FLUSH
>> 1)) {
3010 trace_xfs_log_recover_inode_skip(log
, in_f
);
3016 /* Take the opportunity to reset the flush iteration count */
3017 ldip
->di_flushiter
= 0;
3019 if (unlikely(S_ISREG(ldip
->di_mode
))) {
3020 if ((ldip
->di_format
!= XFS_DINODE_FMT_EXTENTS
) &&
3021 (ldip
->di_format
!= XFS_DINODE_FMT_BTREE
)) {
3022 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(3)",
3023 XFS_ERRLEVEL_LOW
, mp
, ldip
,
3026 "%s: Bad regular inode log record, rec ptr "PTR_FMT
", "
3027 "ino ptr = "PTR_FMT
", ino bp = "PTR_FMT
", ino %Ld",
3028 __func__
, item
, dip
, bp
, in_f
->ilf_ino
);
3029 error
= -EFSCORRUPTED
;
3032 } else if (unlikely(S_ISDIR(ldip
->di_mode
))) {
3033 if ((ldip
->di_format
!= XFS_DINODE_FMT_EXTENTS
) &&
3034 (ldip
->di_format
!= XFS_DINODE_FMT_BTREE
) &&
3035 (ldip
->di_format
!= XFS_DINODE_FMT_LOCAL
)) {
3036 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(4)",
3037 XFS_ERRLEVEL_LOW
, mp
, ldip
,
3040 "%s: Bad dir inode log record, rec ptr "PTR_FMT
", "
3041 "ino ptr = "PTR_FMT
", ino bp = "PTR_FMT
", ino %Ld",
3042 __func__
, item
, dip
, bp
, in_f
->ilf_ino
);
3043 error
= -EFSCORRUPTED
;
3047 if (unlikely(ldip
->di_nextents
+ ldip
->di_anextents
> ldip
->di_nblocks
)){
3048 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(5)",
3049 XFS_ERRLEVEL_LOW
, mp
, ldip
,
3052 "%s: Bad inode log record, rec ptr "PTR_FMT
", dino ptr "PTR_FMT
", "
3053 "dino bp "PTR_FMT
", ino %Ld, total extents = %d, nblocks = %Ld",
3054 __func__
, item
, dip
, bp
, in_f
->ilf_ino
,
3055 ldip
->di_nextents
+ ldip
->di_anextents
,
3057 error
= -EFSCORRUPTED
;
3060 if (unlikely(ldip
->di_forkoff
> mp
->m_sb
.sb_inodesize
)) {
3061 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(6)",
3062 XFS_ERRLEVEL_LOW
, mp
, ldip
,
3065 "%s: Bad inode log record, rec ptr "PTR_FMT
", dino ptr "PTR_FMT
", "
3066 "dino bp "PTR_FMT
", ino %Ld, forkoff 0x%x", __func__
,
3067 item
, dip
, bp
, in_f
->ilf_ino
, ldip
->di_forkoff
);
3068 error
= -EFSCORRUPTED
;
3071 isize
= xfs_log_dinode_size(ldip
->di_version
);
3072 if (unlikely(item
->ri_buf
[1].i_len
> isize
)) {
3073 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(7)",
3074 XFS_ERRLEVEL_LOW
, mp
, ldip
,
3077 "%s: Bad inode log record length %d, rec ptr "PTR_FMT
,
3078 __func__
, item
->ri_buf
[1].i_len
, item
);
3079 error
= -EFSCORRUPTED
;
3083 /* recover the log dinode inode into the on disk inode */
3084 xfs_log_dinode_to_disk(ldip
, dip
);
3086 fields
= in_f
->ilf_fields
;
3087 if (fields
& XFS_ILOG_DEV
)
3088 xfs_dinode_put_rdev(dip
, in_f
->ilf_u
.ilfu_rdev
);
3090 if (in_f
->ilf_size
== 2)
3091 goto out_owner_change
;
3092 len
= item
->ri_buf
[2].i_len
;
3093 src
= item
->ri_buf
[2].i_addr
;
3094 ASSERT(in_f
->ilf_size
<= 4);
3095 ASSERT((in_f
->ilf_size
== 3) || (fields
& XFS_ILOG_AFORK
));
3096 ASSERT(!(fields
& XFS_ILOG_DFORK
) ||
3097 (len
== in_f
->ilf_dsize
));
3099 switch (fields
& XFS_ILOG_DFORK
) {
3100 case XFS_ILOG_DDATA
:
3102 memcpy(XFS_DFORK_DPTR(dip
), src
, len
);
3105 case XFS_ILOG_DBROOT
:
3106 xfs_bmbt_to_bmdr(mp
, (struct xfs_btree_block
*)src
, len
,
3107 (xfs_bmdr_block_t
*)XFS_DFORK_DPTR(dip
),
3108 XFS_DFORK_DSIZE(dip
, mp
));
3113 * There are no data fork flags set.
3115 ASSERT((fields
& XFS_ILOG_DFORK
) == 0);
3120 * If we logged any attribute data, recover it. There may or
3121 * may not have been any other non-core data logged in this
3124 if (in_f
->ilf_fields
& XFS_ILOG_AFORK
) {
3125 if (in_f
->ilf_fields
& XFS_ILOG_DFORK
) {
3130 len
= item
->ri_buf
[attr_index
].i_len
;
3131 src
= item
->ri_buf
[attr_index
].i_addr
;
3132 ASSERT(len
== in_f
->ilf_asize
);
3134 switch (in_f
->ilf_fields
& XFS_ILOG_AFORK
) {
3135 case XFS_ILOG_ADATA
:
3137 dest
= XFS_DFORK_APTR(dip
);
3138 ASSERT(len
<= XFS_DFORK_ASIZE(dip
, mp
));
3139 memcpy(dest
, src
, len
);
3142 case XFS_ILOG_ABROOT
:
3143 dest
= XFS_DFORK_APTR(dip
);
3144 xfs_bmbt_to_bmdr(mp
, (struct xfs_btree_block
*)src
,
3145 len
, (xfs_bmdr_block_t
*)dest
,
3146 XFS_DFORK_ASIZE(dip
, mp
));
3150 xfs_warn(log
->l_mp
, "%s: Invalid flag", __func__
);
3152 error
= -EFSCORRUPTED
;
3158 /* Recover the swapext owner change unless inode has been deleted */
3159 if ((in_f
->ilf_fields
& (XFS_ILOG_DOWNER
|XFS_ILOG_AOWNER
)) &&
3160 (dip
->di_mode
!= 0))
3161 error
= xfs_recover_inode_owner_change(mp
, dip
, in_f
,
3163 /* re-generate the checksum. */
3164 xfs_dinode_calc_crc(log
->l_mp
, dip
);
3166 ASSERT(bp
->b_mount
== mp
);
3167 bp
->b_iodone
= xlog_recover_iodone
;
3168 xfs_buf_delwri_queue(bp
, buffer_list
);
3179 * Recover QUOTAOFF records. We simply make a note of it in the xlog
3180 * structure, so that we know not to do any dquot item or dquot buffer recovery,
3184 xlog_recover_quotaoff_pass1(
3186 struct xlog_recover_item
*item
)
3188 xfs_qoff_logformat_t
*qoff_f
= item
->ri_buf
[0].i_addr
;
3192 * The logitem format's flag tells us if this was user quotaoff,
3193 * group/project quotaoff or both.
3195 if (qoff_f
->qf_flags
& XFS_UQUOTA_ACCT
)
3196 log
->l_quotaoffs_flag
|= XFS_DQ_USER
;
3197 if (qoff_f
->qf_flags
& XFS_PQUOTA_ACCT
)
3198 log
->l_quotaoffs_flag
|= XFS_DQ_PROJ
;
3199 if (qoff_f
->qf_flags
& XFS_GQUOTA_ACCT
)
3200 log
->l_quotaoffs_flag
|= XFS_DQ_GROUP
;
3206 * Recover a dquot record
3209 xlog_recover_dquot_pass2(
3211 struct list_head
*buffer_list
,
3212 struct xlog_recover_item
*item
,
3213 xfs_lsn_t current_lsn
)
3215 xfs_mount_t
*mp
= log
->l_mp
;
3217 struct xfs_disk_dquot
*ddq
, *recddq
;
3220 xfs_dq_logformat_t
*dq_f
;
3225 * Filesystems are required to send in quota flags at mount time.
3227 if (mp
->m_qflags
== 0)
3230 recddq
= item
->ri_buf
[1].i_addr
;
3231 if (recddq
== NULL
) {
3232 xfs_alert(log
->l_mp
, "NULL dquot in %s.", __func__
);
3233 return -EFSCORRUPTED
;
3235 if (item
->ri_buf
[1].i_len
< sizeof(struct xfs_disk_dquot
)) {
3236 xfs_alert(log
->l_mp
, "dquot too small (%d) in %s.",
3237 item
->ri_buf
[1].i_len
, __func__
);
3238 return -EFSCORRUPTED
;
3242 * This type of quotas was turned off, so ignore this record.
3244 type
= recddq
->d_flags
& (XFS_DQ_USER
| XFS_DQ_PROJ
| XFS_DQ_GROUP
);
3246 if (log
->l_quotaoffs_flag
& type
)
3250 * At this point we know that quota was _not_ turned off.
3251 * Since the mount flags are not indicating to us otherwise, this
3252 * must mean that quota is on, and the dquot needs to be replayed.
3253 * Remember that we may not have fully recovered the superblock yet,
3254 * so we can't do the usual trick of looking at the SB quota bits.
3256 * The other possibility, of course, is that the quota subsystem was
3257 * removed since the last mount - ENOSYS.
3259 dq_f
= item
->ri_buf
[0].i_addr
;
3261 fa
= xfs_dquot_verify(mp
, recddq
, dq_f
->qlf_id
, 0);
3263 xfs_alert(mp
, "corrupt dquot ID 0x%x in log at %pS",
3265 return -EFSCORRUPTED
;
3267 ASSERT(dq_f
->qlf_len
== 1);
3270 * At this point we are assuming that the dquots have been allocated
3271 * and hence the buffer has valid dquots stamped in it. It should,
3272 * therefore, pass verifier validation. If the dquot is bad, then the
3273 * we'll return an error here, so we don't need to specifically check
3274 * the dquot in the buffer after the verifier has run.
3276 error
= xfs_trans_read_buf(mp
, NULL
, mp
->m_ddev_targp
, dq_f
->qlf_blkno
,
3277 XFS_FSB_TO_BB(mp
, dq_f
->qlf_len
), 0, &bp
,
3278 &xfs_dquot_buf_ops
);
3283 ddq
= xfs_buf_offset(bp
, dq_f
->qlf_boffset
);
3286 * If the dquot has an LSN in it, recover the dquot only if it's less
3287 * than the lsn of the transaction we are replaying.
3289 if (xfs_sb_version_hascrc(&mp
->m_sb
)) {
3290 struct xfs_dqblk
*dqb
= (struct xfs_dqblk
*)ddq
;
3291 xfs_lsn_t lsn
= be64_to_cpu(dqb
->dd_lsn
);
3293 if (lsn
&& lsn
!= -1 && XFS_LSN_CMP(lsn
, current_lsn
) >= 0) {
3298 memcpy(ddq
, recddq
, item
->ri_buf
[1].i_len
);
3299 if (xfs_sb_version_hascrc(&mp
->m_sb
)) {
3300 xfs_update_cksum((char *)ddq
, sizeof(struct xfs_dqblk
),
3304 ASSERT(dq_f
->qlf_size
== 2);
3305 ASSERT(bp
->b_mount
== mp
);
3306 bp
->b_iodone
= xlog_recover_iodone
;
3307 xfs_buf_delwri_queue(bp
, buffer_list
);
3315 * This routine is called to create an in-core extent free intent
3316 * item from the efi format structure which was logged on disk.
3317 * It allocates an in-core efi, copies the extents from the format
3318 * structure into it, and adds the efi to the AIL with the given
3322 xlog_recover_efi_pass2(
3324 struct xlog_recover_item
*item
,
3328 struct xfs_mount
*mp
= log
->l_mp
;
3329 struct xfs_efi_log_item
*efip
;
3330 struct xfs_efi_log_format
*efi_formatp
;
3332 efi_formatp
= item
->ri_buf
[0].i_addr
;
3334 efip
= xfs_efi_init(mp
, efi_formatp
->efi_nextents
);
3335 error
= xfs_efi_copy_format(&item
->ri_buf
[0], &efip
->efi_format
);
3337 xfs_efi_item_free(efip
);
3340 atomic_set(&efip
->efi_next_extent
, efi_formatp
->efi_nextents
);
3342 spin_lock(&log
->l_ailp
->ail_lock
);
3344 * The EFI has two references. One for the EFD and one for EFI to ensure
3345 * it makes it into the AIL. Insert the EFI into the AIL directly and
3346 * drop the EFI reference. Note that xfs_trans_ail_update() drops the
3349 xfs_trans_ail_update(log
->l_ailp
, &efip
->efi_item
, lsn
);
3350 xfs_efi_release(efip
);
3356 * This routine is called when an EFD format structure is found in a committed
3357 * transaction in the log. Its purpose is to cancel the corresponding EFI if it
3358 * was still in the log. To do this it searches the AIL for the EFI with an id
3359 * equal to that in the EFD format structure. If we find it we drop the EFD
3360 * reference, which removes the EFI from the AIL and frees it.
3363 xlog_recover_efd_pass2(
3365 struct xlog_recover_item
*item
)
3367 xfs_efd_log_format_t
*efd_formatp
;
3368 xfs_efi_log_item_t
*efip
= NULL
;
3369 struct xfs_log_item
*lip
;
3371 struct xfs_ail_cursor cur
;
3372 struct xfs_ail
*ailp
= log
->l_ailp
;
3374 efd_formatp
= item
->ri_buf
[0].i_addr
;
3375 ASSERT((item
->ri_buf
[0].i_len
== (sizeof(xfs_efd_log_format_32_t
) +
3376 ((efd_formatp
->efd_nextents
- 1) * sizeof(xfs_extent_32_t
)))) ||
3377 (item
->ri_buf
[0].i_len
== (sizeof(xfs_efd_log_format_64_t
) +
3378 ((efd_formatp
->efd_nextents
- 1) * sizeof(xfs_extent_64_t
)))));
3379 efi_id
= efd_formatp
->efd_efi_id
;
3382 * Search for the EFI with the id in the EFD format structure in the
3385 spin_lock(&ailp
->ail_lock
);
3386 lip
= xfs_trans_ail_cursor_first(ailp
, &cur
, 0);
3387 while (lip
!= NULL
) {
3388 if (lip
->li_type
== XFS_LI_EFI
) {
3389 efip
= (xfs_efi_log_item_t
*)lip
;
3390 if (efip
->efi_format
.efi_id
== efi_id
) {
3392 * Drop the EFD reference to the EFI. This
3393 * removes the EFI from the AIL and frees it.
3395 spin_unlock(&ailp
->ail_lock
);
3396 xfs_efi_release(efip
);
3397 spin_lock(&ailp
->ail_lock
);
3401 lip
= xfs_trans_ail_cursor_next(ailp
, &cur
);
3404 xfs_trans_ail_cursor_done(&cur
);
3405 spin_unlock(&ailp
->ail_lock
);
3411 * This routine is called to create an in-core extent rmap update
3412 * item from the rui format structure which was logged on disk.
3413 * It allocates an in-core rui, copies the extents from the format
3414 * structure into it, and adds the rui to the AIL with the given
3418 xlog_recover_rui_pass2(
3420 struct xlog_recover_item
*item
,
3424 struct xfs_mount
*mp
= log
->l_mp
;
3425 struct xfs_rui_log_item
*ruip
;
3426 struct xfs_rui_log_format
*rui_formatp
;
3428 rui_formatp
= item
->ri_buf
[0].i_addr
;
3430 ruip
= xfs_rui_init(mp
, rui_formatp
->rui_nextents
);
3431 error
= xfs_rui_copy_format(&item
->ri_buf
[0], &ruip
->rui_format
);
3433 xfs_rui_item_free(ruip
);
3436 atomic_set(&ruip
->rui_next_extent
, rui_formatp
->rui_nextents
);
3438 spin_lock(&log
->l_ailp
->ail_lock
);
3440 * The RUI has two references. One for the RUD and one for RUI to ensure
3441 * it makes it into the AIL. Insert the RUI into the AIL directly and
3442 * drop the RUI reference. Note that xfs_trans_ail_update() drops the
3445 xfs_trans_ail_update(log
->l_ailp
, &ruip
->rui_item
, lsn
);
3446 xfs_rui_release(ruip
);
3452 * This routine is called when an RUD format structure is found in a committed
3453 * transaction in the log. Its purpose is to cancel the corresponding RUI if it
3454 * was still in the log. To do this it searches the AIL for the RUI with an id
3455 * equal to that in the RUD format structure. If we find it we drop the RUD
3456 * reference, which removes the RUI from the AIL and frees it.
3459 xlog_recover_rud_pass2(
3461 struct xlog_recover_item
*item
)
3463 struct xfs_rud_log_format
*rud_formatp
;
3464 struct xfs_rui_log_item
*ruip
= NULL
;
3465 struct xfs_log_item
*lip
;
3467 struct xfs_ail_cursor cur
;
3468 struct xfs_ail
*ailp
= log
->l_ailp
;
3470 rud_formatp
= item
->ri_buf
[0].i_addr
;
3471 ASSERT(item
->ri_buf
[0].i_len
== sizeof(struct xfs_rud_log_format
));
3472 rui_id
= rud_formatp
->rud_rui_id
;
3475 * Search for the RUI with the id in the RUD format structure in the
3478 spin_lock(&ailp
->ail_lock
);
3479 lip
= xfs_trans_ail_cursor_first(ailp
, &cur
, 0);
3480 while (lip
!= NULL
) {
3481 if (lip
->li_type
== XFS_LI_RUI
) {
3482 ruip
= (struct xfs_rui_log_item
*)lip
;
3483 if (ruip
->rui_format
.rui_id
== rui_id
) {
3485 * Drop the RUD reference to the RUI. This
3486 * removes the RUI from the AIL and frees it.
3488 spin_unlock(&ailp
->ail_lock
);
3489 xfs_rui_release(ruip
);
3490 spin_lock(&ailp
->ail_lock
);
3494 lip
= xfs_trans_ail_cursor_next(ailp
, &cur
);
3497 xfs_trans_ail_cursor_done(&cur
);
3498 spin_unlock(&ailp
->ail_lock
);
3504 * Copy an CUI format buffer from the given buf, and into the destination
3505 * CUI format structure. The CUI/CUD items were designed not to need any
3506 * special alignment handling.
3509 xfs_cui_copy_format(
3510 struct xfs_log_iovec
*buf
,
3511 struct xfs_cui_log_format
*dst_cui_fmt
)
3513 struct xfs_cui_log_format
*src_cui_fmt
;
3516 src_cui_fmt
= buf
->i_addr
;
3517 len
= xfs_cui_log_format_sizeof(src_cui_fmt
->cui_nextents
);
3519 if (buf
->i_len
== len
) {
3520 memcpy(dst_cui_fmt
, src_cui_fmt
, len
);
3523 XFS_ERROR_REPORT(__func__
, XFS_ERRLEVEL_LOW
, NULL
);
3524 return -EFSCORRUPTED
;
3528 * This routine is called to create an in-core extent refcount update
3529 * item from the cui format structure which was logged on disk.
3530 * It allocates an in-core cui, copies the extents from the format
3531 * structure into it, and adds the cui to the AIL with the given
3535 xlog_recover_cui_pass2(
3537 struct xlog_recover_item
*item
,
3541 struct xfs_mount
*mp
= log
->l_mp
;
3542 struct xfs_cui_log_item
*cuip
;
3543 struct xfs_cui_log_format
*cui_formatp
;
3545 cui_formatp
= item
->ri_buf
[0].i_addr
;
3547 cuip
= xfs_cui_init(mp
, cui_formatp
->cui_nextents
);
3548 error
= xfs_cui_copy_format(&item
->ri_buf
[0], &cuip
->cui_format
);
3550 xfs_cui_item_free(cuip
);
3553 atomic_set(&cuip
->cui_next_extent
, cui_formatp
->cui_nextents
);
3555 spin_lock(&log
->l_ailp
->ail_lock
);
3557 * The CUI has two references. One for the CUD and one for CUI to ensure
3558 * it makes it into the AIL. Insert the CUI into the AIL directly and
3559 * drop the CUI reference. Note that xfs_trans_ail_update() drops the
3562 xfs_trans_ail_update(log
->l_ailp
, &cuip
->cui_item
, lsn
);
3563 xfs_cui_release(cuip
);
3569 * This routine is called when an CUD format structure is found in a committed
3570 * transaction in the log. Its purpose is to cancel the corresponding CUI if it
3571 * was still in the log. To do this it searches the AIL for the CUI with an id
3572 * equal to that in the CUD format structure. If we find it we drop the CUD
3573 * reference, which removes the CUI from the AIL and frees it.
3576 xlog_recover_cud_pass2(
3578 struct xlog_recover_item
*item
)
3580 struct xfs_cud_log_format
*cud_formatp
;
3581 struct xfs_cui_log_item
*cuip
= NULL
;
3582 struct xfs_log_item
*lip
;
3584 struct xfs_ail_cursor cur
;
3585 struct xfs_ail
*ailp
= log
->l_ailp
;
3587 cud_formatp
= item
->ri_buf
[0].i_addr
;
3588 if (item
->ri_buf
[0].i_len
!= sizeof(struct xfs_cud_log_format
)) {
3589 XFS_ERROR_REPORT(__func__
, XFS_ERRLEVEL_LOW
, log
->l_mp
);
3590 return -EFSCORRUPTED
;
3592 cui_id
= cud_formatp
->cud_cui_id
;
3595 * Search for the CUI with the id in the CUD format structure in the
3598 spin_lock(&ailp
->ail_lock
);
3599 lip
= xfs_trans_ail_cursor_first(ailp
, &cur
, 0);
3600 while (lip
!= NULL
) {
3601 if (lip
->li_type
== XFS_LI_CUI
) {
3602 cuip
= (struct xfs_cui_log_item
*)lip
;
3603 if (cuip
->cui_format
.cui_id
== cui_id
) {
3605 * Drop the CUD reference to the CUI. This
3606 * removes the CUI from the AIL and frees it.
3608 spin_unlock(&ailp
->ail_lock
);
3609 xfs_cui_release(cuip
);
3610 spin_lock(&ailp
->ail_lock
);
3614 lip
= xfs_trans_ail_cursor_next(ailp
, &cur
);
3617 xfs_trans_ail_cursor_done(&cur
);
3618 spin_unlock(&ailp
->ail_lock
);
3624 * Copy an BUI format buffer from the given buf, and into the destination
3625 * BUI format structure. The BUI/BUD items were designed not to need any
3626 * special alignment handling.
3629 xfs_bui_copy_format(
3630 struct xfs_log_iovec
*buf
,
3631 struct xfs_bui_log_format
*dst_bui_fmt
)
3633 struct xfs_bui_log_format
*src_bui_fmt
;
3636 src_bui_fmt
= buf
->i_addr
;
3637 len
= xfs_bui_log_format_sizeof(src_bui_fmt
->bui_nextents
);
3639 if (buf
->i_len
== len
) {
3640 memcpy(dst_bui_fmt
, src_bui_fmt
, len
);
3643 XFS_ERROR_REPORT(__func__
, XFS_ERRLEVEL_LOW
, NULL
);
3644 return -EFSCORRUPTED
;
3648 * This routine is called to create an in-core extent bmap update
3649 * item from the bui format structure which was logged on disk.
3650 * It allocates an in-core bui, copies the extents from the format
3651 * structure into it, and adds the bui to the AIL with the given
3655 xlog_recover_bui_pass2(
3657 struct xlog_recover_item
*item
,
3661 struct xfs_mount
*mp
= log
->l_mp
;
3662 struct xfs_bui_log_item
*buip
;
3663 struct xfs_bui_log_format
*bui_formatp
;
3665 bui_formatp
= item
->ri_buf
[0].i_addr
;
3667 if (bui_formatp
->bui_nextents
!= XFS_BUI_MAX_FAST_EXTENTS
) {
3668 XFS_ERROR_REPORT(__func__
, XFS_ERRLEVEL_LOW
, log
->l_mp
);
3669 return -EFSCORRUPTED
;
3671 buip
= xfs_bui_init(mp
);
3672 error
= xfs_bui_copy_format(&item
->ri_buf
[0], &buip
->bui_format
);
3674 xfs_bui_item_free(buip
);
3677 atomic_set(&buip
->bui_next_extent
, bui_formatp
->bui_nextents
);
3679 spin_lock(&log
->l_ailp
->ail_lock
);
3681 * The RUI has two references. One for the RUD and one for RUI to ensure
3682 * it makes it into the AIL. Insert the RUI into the AIL directly and
3683 * drop the RUI reference. Note that xfs_trans_ail_update() drops the
3686 xfs_trans_ail_update(log
->l_ailp
, &buip
->bui_item
, lsn
);
3687 xfs_bui_release(buip
);
3693 * This routine is called when an BUD format structure is found in a committed
3694 * transaction in the log. Its purpose is to cancel the corresponding BUI if it
3695 * was still in the log. To do this it searches the AIL for the BUI with an id
3696 * equal to that in the BUD format structure. If we find it we drop the BUD
3697 * reference, which removes the BUI from the AIL and frees it.
3700 xlog_recover_bud_pass2(
3702 struct xlog_recover_item
*item
)
3704 struct xfs_bud_log_format
*bud_formatp
;
3705 struct xfs_bui_log_item
*buip
= NULL
;
3706 struct xfs_log_item
*lip
;
3708 struct xfs_ail_cursor cur
;
3709 struct xfs_ail
*ailp
= log
->l_ailp
;
3711 bud_formatp
= item
->ri_buf
[0].i_addr
;
3712 if (item
->ri_buf
[0].i_len
!= sizeof(struct xfs_bud_log_format
)) {
3713 XFS_ERROR_REPORT(__func__
, XFS_ERRLEVEL_LOW
, log
->l_mp
);
3714 return -EFSCORRUPTED
;
3716 bui_id
= bud_formatp
->bud_bui_id
;
3719 * Search for the BUI with the id in the BUD format structure in the
3722 spin_lock(&ailp
->ail_lock
);
3723 lip
= xfs_trans_ail_cursor_first(ailp
, &cur
, 0);
3724 while (lip
!= NULL
) {
3725 if (lip
->li_type
== XFS_LI_BUI
) {
3726 buip
= (struct xfs_bui_log_item
*)lip
;
3727 if (buip
->bui_format
.bui_id
== bui_id
) {
3729 * Drop the BUD reference to the BUI. This
3730 * removes the BUI from the AIL and frees it.
3732 spin_unlock(&ailp
->ail_lock
);
3733 xfs_bui_release(buip
);
3734 spin_lock(&ailp
->ail_lock
);
3738 lip
= xfs_trans_ail_cursor_next(ailp
, &cur
);
3741 xfs_trans_ail_cursor_done(&cur
);
3742 spin_unlock(&ailp
->ail_lock
);
3748 * This routine is called when an inode create format structure is found in a
3749 * committed transaction in the log. It's purpose is to initialise the inodes
3750 * being allocated on disk. This requires us to get inode cluster buffers that
3751 * match the range to be initialised, stamped with inode templates and written
3752 * by delayed write so that subsequent modifications will hit the cached buffer
3753 * and only need writing out at the end of recovery.
3756 xlog_recover_do_icreate_pass2(
3758 struct list_head
*buffer_list
,
3759 xlog_recover_item_t
*item
)
3761 struct xfs_mount
*mp
= log
->l_mp
;
3762 struct xfs_icreate_log
*icl
;
3763 struct xfs_ino_geometry
*igeo
= M_IGEO(mp
);
3764 xfs_agnumber_t agno
;
3765 xfs_agblock_t agbno
;
3768 xfs_agblock_t length
;
3774 icl
= (struct xfs_icreate_log
*)item
->ri_buf
[0].i_addr
;
3775 if (icl
->icl_type
!= XFS_LI_ICREATE
) {
3776 xfs_warn(log
->l_mp
, "xlog_recover_do_icreate_trans: bad type");
3780 if (icl
->icl_size
!= 1) {
3781 xfs_warn(log
->l_mp
, "xlog_recover_do_icreate_trans: bad icl size");
3785 agno
= be32_to_cpu(icl
->icl_ag
);
3786 if (agno
>= mp
->m_sb
.sb_agcount
) {
3787 xfs_warn(log
->l_mp
, "xlog_recover_do_icreate_trans: bad agno");
3790 agbno
= be32_to_cpu(icl
->icl_agbno
);
3791 if (!agbno
|| agbno
== NULLAGBLOCK
|| agbno
>= mp
->m_sb
.sb_agblocks
) {
3792 xfs_warn(log
->l_mp
, "xlog_recover_do_icreate_trans: bad agbno");
3795 isize
= be32_to_cpu(icl
->icl_isize
);
3796 if (isize
!= mp
->m_sb
.sb_inodesize
) {
3797 xfs_warn(log
->l_mp
, "xlog_recover_do_icreate_trans: bad isize");
3800 count
= be32_to_cpu(icl
->icl_count
);
3802 xfs_warn(log
->l_mp
, "xlog_recover_do_icreate_trans: bad count");
3805 length
= be32_to_cpu(icl
->icl_length
);
3806 if (!length
|| length
>= mp
->m_sb
.sb_agblocks
) {
3807 xfs_warn(log
->l_mp
, "xlog_recover_do_icreate_trans: bad length");
3812 * The inode chunk is either full or sparse and we only support
3813 * m_ino_geo.ialloc_min_blks sized sparse allocations at this time.
3815 if (length
!= igeo
->ialloc_blks
&&
3816 length
!= igeo
->ialloc_min_blks
) {
3818 "%s: unsupported chunk length", __FUNCTION__
);
3822 /* verify inode count is consistent with extent length */
3823 if ((count
>> mp
->m_sb
.sb_inopblog
) != length
) {
3825 "%s: inconsistent inode count and chunk length",
3831 * The icreate transaction can cover multiple cluster buffers and these
3832 * buffers could have been freed and reused. Check the individual
3833 * buffers for cancellation so we don't overwrite anything written after
3836 bb_per_cluster
= XFS_FSB_TO_BB(mp
, igeo
->blocks_per_cluster
);
3837 nbufs
= length
/ igeo
->blocks_per_cluster
;
3838 for (i
= 0, cancel_count
= 0; i
< nbufs
; i
++) {
3841 daddr
= XFS_AGB_TO_DADDR(mp
, agno
,
3842 agbno
+ i
* igeo
->blocks_per_cluster
);
3843 if (xlog_check_buffer_cancelled(log
, daddr
, bb_per_cluster
, 0))
3848 * We currently only use icreate for a single allocation at a time. This
3849 * means we should expect either all or none of the buffers to be
3850 * cancelled. Be conservative and skip replay if at least one buffer is
3851 * cancelled, but warn the user that something is awry if the buffers
3852 * are not consistent.
3854 * XXX: This must be refined to only skip cancelled clusters once we use
3855 * icreate for multiple chunk allocations.
3857 ASSERT(!cancel_count
|| cancel_count
== nbufs
);
3859 if (cancel_count
!= nbufs
)
3861 "WARNING: partial inode chunk cancellation, skipped icreate.");
3862 trace_xfs_log_recover_icreate_cancel(log
, icl
);
3866 trace_xfs_log_recover_icreate_recover(log
, icl
);
3867 return xfs_ialloc_inode_init(mp
, NULL
, buffer_list
, count
, agno
, agbno
,
3868 length
, be32_to_cpu(icl
->icl_gen
));
3872 xlog_recover_buffer_ra_pass2(
3874 struct xlog_recover_item
*item
)
3876 struct xfs_buf_log_format
*buf_f
= item
->ri_buf
[0].i_addr
;
3877 struct xfs_mount
*mp
= log
->l_mp
;
3879 if (xlog_peek_buffer_cancelled(log
, buf_f
->blf_blkno
,
3880 buf_f
->blf_len
, buf_f
->blf_flags
)) {
3884 xfs_buf_readahead(mp
->m_ddev_targp
, buf_f
->blf_blkno
,
3885 buf_f
->blf_len
, NULL
);
3889 xlog_recover_inode_ra_pass2(
3891 struct xlog_recover_item
*item
)
3893 struct xfs_inode_log_format ilf_buf
;
3894 struct xfs_inode_log_format
*ilfp
;
3895 struct xfs_mount
*mp
= log
->l_mp
;
3898 if (item
->ri_buf
[0].i_len
== sizeof(struct xfs_inode_log_format
)) {
3899 ilfp
= item
->ri_buf
[0].i_addr
;
3902 memset(ilfp
, 0, sizeof(*ilfp
));
3903 error
= xfs_inode_item_format_convert(&item
->ri_buf
[0], ilfp
);
3908 if (xlog_peek_buffer_cancelled(log
, ilfp
->ilf_blkno
, ilfp
->ilf_len
, 0))
3911 xfs_buf_readahead(mp
->m_ddev_targp
, ilfp
->ilf_blkno
,
3912 ilfp
->ilf_len
, &xfs_inode_buf_ra_ops
);
3916 xlog_recover_dquot_ra_pass2(
3918 struct xlog_recover_item
*item
)
3920 struct xfs_mount
*mp
= log
->l_mp
;
3921 struct xfs_disk_dquot
*recddq
;
3922 struct xfs_dq_logformat
*dq_f
;
3927 if (mp
->m_qflags
== 0)
3930 recddq
= item
->ri_buf
[1].i_addr
;
3933 if (item
->ri_buf
[1].i_len
< sizeof(struct xfs_disk_dquot
))
3936 type
= recddq
->d_flags
& (XFS_DQ_USER
| XFS_DQ_PROJ
| XFS_DQ_GROUP
);
3938 if (log
->l_quotaoffs_flag
& type
)
3941 dq_f
= item
->ri_buf
[0].i_addr
;
3943 ASSERT(dq_f
->qlf_len
== 1);
3945 len
= XFS_FSB_TO_BB(mp
, dq_f
->qlf_len
);
3946 if (xlog_peek_buffer_cancelled(log
, dq_f
->qlf_blkno
, len
, 0))
3949 xfs_buf_readahead(mp
->m_ddev_targp
, dq_f
->qlf_blkno
, len
,
3950 &xfs_dquot_buf_ra_ops
);
3954 xlog_recover_ra_pass2(
3956 struct xlog_recover_item
*item
)
3958 switch (ITEM_TYPE(item
)) {
3960 xlog_recover_buffer_ra_pass2(log
, item
);
3963 xlog_recover_inode_ra_pass2(log
, item
);
3966 xlog_recover_dquot_ra_pass2(log
, item
);
3970 case XFS_LI_QUOTAOFF
:
3983 xlog_recover_commit_pass1(
3985 struct xlog_recover
*trans
,
3986 struct xlog_recover_item
*item
)
3988 trace_xfs_log_recover_item_recover(log
, trans
, item
, XLOG_RECOVER_PASS1
);
3990 switch (ITEM_TYPE(item
)) {
3992 return xlog_recover_buffer_pass1(log
, item
);
3993 case XFS_LI_QUOTAOFF
:
3994 return xlog_recover_quotaoff_pass1(log
, item
);
3999 case XFS_LI_ICREATE
:
4006 /* nothing to do in pass 1 */
4009 xfs_warn(log
->l_mp
, "%s: invalid item type (%d)",
4010 __func__
, ITEM_TYPE(item
));
4012 return -EFSCORRUPTED
;
4017 xlog_recover_commit_pass2(
4019 struct xlog_recover
*trans
,
4020 struct list_head
*buffer_list
,
4021 struct xlog_recover_item
*item
)
4023 trace_xfs_log_recover_item_recover(log
, trans
, item
, XLOG_RECOVER_PASS2
);
4025 switch (ITEM_TYPE(item
)) {
4027 return xlog_recover_buffer_pass2(log
, buffer_list
, item
,
4030 return xlog_recover_inode_pass2(log
, buffer_list
, item
,
4033 return xlog_recover_efi_pass2(log
, item
, trans
->r_lsn
);
4035 return xlog_recover_efd_pass2(log
, item
);
4037 return xlog_recover_rui_pass2(log
, item
, trans
->r_lsn
);
4039 return xlog_recover_rud_pass2(log
, item
);
4041 return xlog_recover_cui_pass2(log
, item
, trans
->r_lsn
);
4043 return xlog_recover_cud_pass2(log
, item
);
4045 return xlog_recover_bui_pass2(log
, item
, trans
->r_lsn
);
4047 return xlog_recover_bud_pass2(log
, item
);
4049 return xlog_recover_dquot_pass2(log
, buffer_list
, item
,
4051 case XFS_LI_ICREATE
:
4052 return xlog_recover_do_icreate_pass2(log
, buffer_list
, item
);
4053 case XFS_LI_QUOTAOFF
:
4054 /* nothing to do in pass2 */
4057 xfs_warn(log
->l_mp
, "%s: invalid item type (%d)",
4058 __func__
, ITEM_TYPE(item
));
4060 return -EFSCORRUPTED
;
4065 xlog_recover_items_pass2(
4067 struct xlog_recover
*trans
,
4068 struct list_head
*buffer_list
,
4069 struct list_head
*item_list
)
4071 struct xlog_recover_item
*item
;
4074 list_for_each_entry(item
, item_list
, ri_list
) {
4075 error
= xlog_recover_commit_pass2(log
, trans
,
4085 * Perform the transaction.
4087 * If the transaction modifies a buffer or inode, do it now. Otherwise,
4088 * EFIs and EFDs get queued up by adding entries into the AIL for them.
4091 xlog_recover_commit_trans(
4093 struct xlog_recover
*trans
,
4095 struct list_head
*buffer_list
)
4098 int items_queued
= 0;
4099 struct xlog_recover_item
*item
;
4100 struct xlog_recover_item
*next
;
4101 LIST_HEAD (ra_list
);
4102 LIST_HEAD (done_list
);
4104 #define XLOG_RECOVER_COMMIT_QUEUE_MAX 100
4106 hlist_del_init(&trans
->r_list
);
4108 error
= xlog_recover_reorder_trans(log
, trans
, pass
);
4112 list_for_each_entry_safe(item
, next
, &trans
->r_itemq
, ri_list
) {
4114 case XLOG_RECOVER_PASS1
:
4115 error
= xlog_recover_commit_pass1(log
, trans
, item
);
4117 case XLOG_RECOVER_PASS2
:
4118 xlog_recover_ra_pass2(log
, item
);
4119 list_move_tail(&item
->ri_list
, &ra_list
);
4121 if (items_queued
>= XLOG_RECOVER_COMMIT_QUEUE_MAX
) {
4122 error
= xlog_recover_items_pass2(log
, trans
,
4123 buffer_list
, &ra_list
);
4124 list_splice_tail_init(&ra_list
, &done_list
);
4138 if (!list_empty(&ra_list
)) {
4140 error
= xlog_recover_items_pass2(log
, trans
,
4141 buffer_list
, &ra_list
);
4142 list_splice_tail_init(&ra_list
, &done_list
);
4145 if (!list_empty(&done_list
))
4146 list_splice_init(&done_list
, &trans
->r_itemq
);
4152 xlog_recover_add_item(
4153 struct list_head
*head
)
4155 xlog_recover_item_t
*item
;
4157 item
= kmem_zalloc(sizeof(xlog_recover_item_t
), 0);
4158 INIT_LIST_HEAD(&item
->ri_list
);
4159 list_add_tail(&item
->ri_list
, head
);
4163 xlog_recover_add_to_cont_trans(
4165 struct xlog_recover
*trans
,
4169 xlog_recover_item_t
*item
;
4170 char *ptr
, *old_ptr
;
4174 * If the transaction is empty, the header was split across this and the
4175 * previous record. Copy the rest of the header.
4177 if (list_empty(&trans
->r_itemq
)) {
4178 ASSERT(len
<= sizeof(struct xfs_trans_header
));
4179 if (len
> sizeof(struct xfs_trans_header
)) {
4180 xfs_warn(log
->l_mp
, "%s: bad header length", __func__
);
4181 return -EFSCORRUPTED
;
4184 xlog_recover_add_item(&trans
->r_itemq
);
4185 ptr
= (char *)&trans
->r_theader
+
4186 sizeof(struct xfs_trans_header
) - len
;
4187 memcpy(ptr
, dp
, len
);
4191 /* take the tail entry */
4192 item
= list_entry(trans
->r_itemq
.prev
, xlog_recover_item_t
, ri_list
);
4194 old_ptr
= item
->ri_buf
[item
->ri_cnt
-1].i_addr
;
4195 old_len
= item
->ri_buf
[item
->ri_cnt
-1].i_len
;
4197 ptr
= kmem_realloc(old_ptr
, len
+ old_len
, 0);
4198 memcpy(&ptr
[old_len
], dp
, len
);
4199 item
->ri_buf
[item
->ri_cnt
-1].i_len
+= len
;
4200 item
->ri_buf
[item
->ri_cnt
-1].i_addr
= ptr
;
4201 trace_xfs_log_recover_item_add_cont(log
, trans
, item
, 0);
4206 * The next region to add is the start of a new region. It could be
4207 * a whole region or it could be the first part of a new region. Because
4208 * of this, the assumption here is that the type and size fields of all
4209 * format structures fit into the first 32 bits of the structure.
4211 * This works because all regions must be 32 bit aligned. Therefore, we
4212 * either have both fields or we have neither field. In the case we have
4213 * neither field, the data part of the region is zero length. We only have
4214 * a log_op_header and can throw away the header since a new one will appear
4215 * later. If we have at least 4 bytes, then we can determine how many regions
4216 * will appear in the current log item.
4219 xlog_recover_add_to_trans(
4221 struct xlog_recover
*trans
,
4225 struct xfs_inode_log_format
*in_f
; /* any will do */
4226 xlog_recover_item_t
*item
;
4231 if (list_empty(&trans
->r_itemq
)) {
4232 /* we need to catch log corruptions here */
4233 if (*(uint
*)dp
!= XFS_TRANS_HEADER_MAGIC
) {
4234 xfs_warn(log
->l_mp
, "%s: bad header magic number",
4237 return -EFSCORRUPTED
;
4240 if (len
> sizeof(struct xfs_trans_header
)) {
4241 xfs_warn(log
->l_mp
, "%s: bad header length", __func__
);
4243 return -EFSCORRUPTED
;
4247 * The transaction header can be arbitrarily split across op
4248 * records. If we don't have the whole thing here, copy what we
4249 * do have and handle the rest in the next record.
4251 if (len
== sizeof(struct xfs_trans_header
))
4252 xlog_recover_add_item(&trans
->r_itemq
);
4253 memcpy(&trans
->r_theader
, dp
, len
);
4257 ptr
= kmem_alloc(len
, 0);
4258 memcpy(ptr
, dp
, len
);
4259 in_f
= (struct xfs_inode_log_format
*)ptr
;
4261 /* take the tail entry */
4262 item
= list_entry(trans
->r_itemq
.prev
, xlog_recover_item_t
, ri_list
);
4263 if (item
->ri_total
!= 0 &&
4264 item
->ri_total
== item
->ri_cnt
) {
4265 /* tail item is in use, get a new one */
4266 xlog_recover_add_item(&trans
->r_itemq
);
4267 item
= list_entry(trans
->r_itemq
.prev
,
4268 xlog_recover_item_t
, ri_list
);
4271 if (item
->ri_total
== 0) { /* first region to be added */
4272 if (in_f
->ilf_size
== 0 ||
4273 in_f
->ilf_size
> XLOG_MAX_REGIONS_IN_ITEM
) {
4275 "bad number of regions (%d) in inode log format",
4279 return -EFSCORRUPTED
;
4282 item
->ri_total
= in_f
->ilf_size
;
4284 kmem_zalloc(item
->ri_total
* sizeof(xfs_log_iovec_t
),
4288 if (item
->ri_total
<= item
->ri_cnt
) {
4290 "log item region count (%d) overflowed size (%d)",
4291 item
->ri_cnt
, item
->ri_total
);
4294 return -EFSCORRUPTED
;
4297 /* Description region is ri_buf[0] */
4298 item
->ri_buf
[item
->ri_cnt
].i_addr
= ptr
;
4299 item
->ri_buf
[item
->ri_cnt
].i_len
= len
;
4301 trace_xfs_log_recover_item_add(log
, trans
, item
, 0);
4306 * Free up any resources allocated by the transaction
4308 * Remember that EFIs, EFDs, and IUNLINKs are handled later.
4311 xlog_recover_free_trans(
4312 struct xlog_recover
*trans
)
4314 xlog_recover_item_t
*item
, *n
;
4317 hlist_del_init(&trans
->r_list
);
4319 list_for_each_entry_safe(item
, n
, &trans
->r_itemq
, ri_list
) {
4320 /* Free the regions in the item. */
4321 list_del(&item
->ri_list
);
4322 for (i
= 0; i
< item
->ri_cnt
; i
++)
4323 kmem_free(item
->ri_buf
[i
].i_addr
);
4324 /* Free the item itself */
4325 kmem_free(item
->ri_buf
);
4328 /* Free the transaction recover structure */
4333 * On error or completion, trans is freed.
4336 xlog_recovery_process_trans(
4338 struct xlog_recover
*trans
,
4343 struct list_head
*buffer_list
)
4346 bool freeit
= false;
4348 /* mask off ophdr transaction container flags */
4349 flags
&= ~XLOG_END_TRANS
;
4350 if (flags
& XLOG_WAS_CONT_TRANS
)
4351 flags
&= ~XLOG_CONTINUE_TRANS
;
4354 * Callees must not free the trans structure. We'll decide if we need to
4355 * free it or not based on the operation being done and it's result.
4358 /* expected flag values */
4360 case XLOG_CONTINUE_TRANS
:
4361 error
= xlog_recover_add_to_trans(log
, trans
, dp
, len
);
4363 case XLOG_WAS_CONT_TRANS
:
4364 error
= xlog_recover_add_to_cont_trans(log
, trans
, dp
, len
);
4366 case XLOG_COMMIT_TRANS
:
4367 error
= xlog_recover_commit_trans(log
, trans
, pass
,
4369 /* success or fail, we are now done with this transaction. */
4373 /* unexpected flag values */
4374 case XLOG_UNMOUNT_TRANS
:
4375 /* just skip trans */
4376 xfs_warn(log
->l_mp
, "%s: Unmount LR", __func__
);
4379 case XLOG_START_TRANS
:
4381 xfs_warn(log
->l_mp
, "%s: bad flag 0x%x", __func__
, flags
);
4383 error
= -EFSCORRUPTED
;
4386 if (error
|| freeit
)
4387 xlog_recover_free_trans(trans
);
4392 * Lookup the transaction recovery structure associated with the ID in the
4393 * current ophdr. If the transaction doesn't exist and the start flag is set in
4394 * the ophdr, then allocate a new transaction for future ID matches to find.
4395 * Either way, return what we found during the lookup - an existing transaction
4398 STATIC
struct xlog_recover
*
4399 xlog_recover_ophdr_to_trans(
4400 struct hlist_head rhash
[],
4401 struct xlog_rec_header
*rhead
,
4402 struct xlog_op_header
*ohead
)
4404 struct xlog_recover
*trans
;
4406 struct hlist_head
*rhp
;
4408 tid
= be32_to_cpu(ohead
->oh_tid
);
4409 rhp
= &rhash
[XLOG_RHASH(tid
)];
4410 hlist_for_each_entry(trans
, rhp
, r_list
) {
4411 if (trans
->r_log_tid
== tid
)
4416 * skip over non-start transaction headers - we could be
4417 * processing slack space before the next transaction starts
4419 if (!(ohead
->oh_flags
& XLOG_START_TRANS
))
4422 ASSERT(be32_to_cpu(ohead
->oh_len
) == 0);
4425 * This is a new transaction so allocate a new recovery container to
4426 * hold the recovery ops that will follow.
4428 trans
= kmem_zalloc(sizeof(struct xlog_recover
), 0);
4429 trans
->r_log_tid
= tid
;
4430 trans
->r_lsn
= be64_to_cpu(rhead
->h_lsn
);
4431 INIT_LIST_HEAD(&trans
->r_itemq
);
4432 INIT_HLIST_NODE(&trans
->r_list
);
4433 hlist_add_head(&trans
->r_list
, rhp
);
4436 * Nothing more to do for this ophdr. Items to be added to this new
4437 * transaction will be in subsequent ophdr containers.
4443 xlog_recover_process_ophdr(
4445 struct hlist_head rhash
[],
4446 struct xlog_rec_header
*rhead
,
4447 struct xlog_op_header
*ohead
,
4451 struct list_head
*buffer_list
)
4453 struct xlog_recover
*trans
;
4457 /* Do we understand who wrote this op? */
4458 if (ohead
->oh_clientid
!= XFS_TRANSACTION
&&
4459 ohead
->oh_clientid
!= XFS_LOG
) {
4460 xfs_warn(log
->l_mp
, "%s: bad clientid 0x%x",
4461 __func__
, ohead
->oh_clientid
);
4463 return -EFSCORRUPTED
;
4467 * Check the ophdr contains all the data it is supposed to contain.
4469 len
= be32_to_cpu(ohead
->oh_len
);
4470 if (dp
+ len
> end
) {
4471 xfs_warn(log
->l_mp
, "%s: bad length 0x%x", __func__
, len
);
4473 return -EFSCORRUPTED
;
4476 trans
= xlog_recover_ophdr_to_trans(rhash
, rhead
, ohead
);
4478 /* nothing to do, so skip over this ophdr */
4483 * The recovered buffer queue is drained only once we know that all
4484 * recovery items for the current LSN have been processed. This is
4487 * - Buffer write submission updates the metadata LSN of the buffer.
4488 * - Log recovery skips items with a metadata LSN >= the current LSN of
4489 * the recovery item.
4490 * - Separate recovery items against the same metadata buffer can share
4491 * a current LSN. I.e., consider that the LSN of a recovery item is
4492 * defined as the starting LSN of the first record in which its
4493 * transaction appears, that a record can hold multiple transactions,
4494 * and/or that a transaction can span multiple records.
4496 * In other words, we are allowed to submit a buffer from log recovery
4497 * once per current LSN. Otherwise, we may incorrectly skip recovery
4498 * items and cause corruption.
4500 * We don't know up front whether buffers are updated multiple times per
4501 * LSN. Therefore, track the current LSN of each commit log record as it
4502 * is processed and drain the queue when it changes. Use commit records
4503 * because they are ordered correctly by the logging code.
4505 if (log
->l_recovery_lsn
!= trans
->r_lsn
&&
4506 ohead
->oh_flags
& XLOG_COMMIT_TRANS
) {
4507 error
= xfs_buf_delwri_submit(buffer_list
);
4510 log
->l_recovery_lsn
= trans
->r_lsn
;
4513 return xlog_recovery_process_trans(log
, trans
, dp
, len
,
4514 ohead
->oh_flags
, pass
, buffer_list
);
4518 * There are two valid states of the r_state field. 0 indicates that the
4519 * transaction structure is in a normal state. We have either seen the
4520 * start of the transaction or the last operation we added was not a partial
4521 * operation. If the last operation we added to the transaction was a
4522 * partial operation, we need to mark r_state with XLOG_WAS_CONT_TRANS.
4524 * NOTE: skip LRs with 0 data length.
4527 xlog_recover_process_data(
4529 struct hlist_head rhash
[],
4530 struct xlog_rec_header
*rhead
,
4533 struct list_head
*buffer_list
)
4535 struct xlog_op_header
*ohead
;
4540 end
= dp
+ be32_to_cpu(rhead
->h_len
);
4541 num_logops
= be32_to_cpu(rhead
->h_num_logops
);
4543 /* check the log format matches our own - else we can't recover */
4544 if (xlog_header_check_recover(log
->l_mp
, rhead
))
4547 trace_xfs_log_recover_record(log
, rhead
, pass
);
4548 while ((dp
< end
) && num_logops
) {
4550 ohead
= (struct xlog_op_header
*)dp
;
4551 dp
+= sizeof(*ohead
);
4554 /* errors will abort recovery */
4555 error
= xlog_recover_process_ophdr(log
, rhash
, rhead
, ohead
,
4556 dp
, end
, pass
, buffer_list
);
4560 dp
+= be32_to_cpu(ohead
->oh_len
);
4566 /* Recover the EFI if necessary. */
4568 xlog_recover_process_efi(
4569 struct xfs_mount
*mp
,
4570 struct xfs_ail
*ailp
,
4571 struct xfs_log_item
*lip
)
4573 struct xfs_efi_log_item
*efip
;
4577 * Skip EFIs that we've already processed.
4579 efip
= container_of(lip
, struct xfs_efi_log_item
, efi_item
);
4580 if (test_bit(XFS_EFI_RECOVERED
, &efip
->efi_flags
))
4583 spin_unlock(&ailp
->ail_lock
);
4584 error
= xfs_efi_recover(mp
, efip
);
4585 spin_lock(&ailp
->ail_lock
);
4590 /* Release the EFI since we're cancelling everything. */
4592 xlog_recover_cancel_efi(
4593 struct xfs_mount
*mp
,
4594 struct xfs_ail
*ailp
,
4595 struct xfs_log_item
*lip
)
4597 struct xfs_efi_log_item
*efip
;
4599 efip
= container_of(lip
, struct xfs_efi_log_item
, efi_item
);
4601 spin_unlock(&ailp
->ail_lock
);
4602 xfs_efi_release(efip
);
4603 spin_lock(&ailp
->ail_lock
);
4606 /* Recover the RUI if necessary. */
4608 xlog_recover_process_rui(
4609 struct xfs_mount
*mp
,
4610 struct xfs_ail
*ailp
,
4611 struct xfs_log_item
*lip
)
4613 struct xfs_rui_log_item
*ruip
;
4617 * Skip RUIs that we've already processed.
4619 ruip
= container_of(lip
, struct xfs_rui_log_item
, rui_item
);
4620 if (test_bit(XFS_RUI_RECOVERED
, &ruip
->rui_flags
))
4623 spin_unlock(&ailp
->ail_lock
);
4624 error
= xfs_rui_recover(mp
, ruip
);
4625 spin_lock(&ailp
->ail_lock
);
4630 /* Release the RUI since we're cancelling everything. */
4632 xlog_recover_cancel_rui(
4633 struct xfs_mount
*mp
,
4634 struct xfs_ail
*ailp
,
4635 struct xfs_log_item
*lip
)
4637 struct xfs_rui_log_item
*ruip
;
4639 ruip
= container_of(lip
, struct xfs_rui_log_item
, rui_item
);
4641 spin_unlock(&ailp
->ail_lock
);
4642 xfs_rui_release(ruip
);
4643 spin_lock(&ailp
->ail_lock
);
4646 /* Recover the CUI if necessary. */
4648 xlog_recover_process_cui(
4649 struct xfs_trans
*parent_tp
,
4650 struct xfs_ail
*ailp
,
4651 struct xfs_log_item
*lip
)
4653 struct xfs_cui_log_item
*cuip
;
4657 * Skip CUIs that we've already processed.
4659 cuip
= container_of(lip
, struct xfs_cui_log_item
, cui_item
);
4660 if (test_bit(XFS_CUI_RECOVERED
, &cuip
->cui_flags
))
4663 spin_unlock(&ailp
->ail_lock
);
4664 error
= xfs_cui_recover(parent_tp
, cuip
);
4665 spin_lock(&ailp
->ail_lock
);
4670 /* Release the CUI since we're cancelling everything. */
4672 xlog_recover_cancel_cui(
4673 struct xfs_mount
*mp
,
4674 struct xfs_ail
*ailp
,
4675 struct xfs_log_item
*lip
)
4677 struct xfs_cui_log_item
*cuip
;
4679 cuip
= container_of(lip
, struct xfs_cui_log_item
, cui_item
);
4681 spin_unlock(&ailp
->ail_lock
);
4682 xfs_cui_release(cuip
);
4683 spin_lock(&ailp
->ail_lock
);
4686 /* Recover the BUI if necessary. */
4688 xlog_recover_process_bui(
4689 struct xfs_trans
*parent_tp
,
4690 struct xfs_ail
*ailp
,
4691 struct xfs_log_item
*lip
)
4693 struct xfs_bui_log_item
*buip
;
4697 * Skip BUIs that we've already processed.
4699 buip
= container_of(lip
, struct xfs_bui_log_item
, bui_item
);
4700 if (test_bit(XFS_BUI_RECOVERED
, &buip
->bui_flags
))
4703 spin_unlock(&ailp
->ail_lock
);
4704 error
= xfs_bui_recover(parent_tp
, buip
);
4705 spin_lock(&ailp
->ail_lock
);
4710 /* Release the BUI since we're cancelling everything. */
4712 xlog_recover_cancel_bui(
4713 struct xfs_mount
*mp
,
4714 struct xfs_ail
*ailp
,
4715 struct xfs_log_item
*lip
)
4717 struct xfs_bui_log_item
*buip
;
4719 buip
= container_of(lip
, struct xfs_bui_log_item
, bui_item
);
4721 spin_unlock(&ailp
->ail_lock
);
4722 xfs_bui_release(buip
);
4723 spin_lock(&ailp
->ail_lock
);
4726 /* Is this log item a deferred action intent? */
4727 static inline bool xlog_item_is_intent(struct xfs_log_item
*lip
)
4729 switch (lip
->li_type
) {
4740 /* Take all the collected deferred ops and finish them in order. */
4742 xlog_finish_defer_ops(
4743 struct xfs_trans
*parent_tp
)
4745 struct xfs_mount
*mp
= parent_tp
->t_mountp
;
4746 struct xfs_trans
*tp
;
4752 * We're finishing the defer_ops that accumulated as a result of
4753 * recovering unfinished intent items during log recovery. We
4754 * reserve an itruncate transaction because it is the largest
4755 * permanent transaction type. Since we're the only user of the fs
4756 * right now, take 93% (15/16) of the available free blocks. Use
4757 * weird math to avoid a 64-bit division.
4759 freeblks
= percpu_counter_sum(&mp
->m_fdblocks
);
4762 resblks
= min_t(int64_t, UINT_MAX
, freeblks
);
4763 resblks
= (resblks
* 15) >> 4;
4764 error
= xfs_trans_alloc(mp
, &M_RES(mp
)->tr_itruncate
, resblks
,
4765 0, XFS_TRANS_RESERVE
, &tp
);
4768 /* transfer all collected dfops to this transaction */
4769 xfs_defer_move(tp
, parent_tp
);
4771 return xfs_trans_commit(tp
);
4775 * When this is called, all of the log intent items which did not have
4776 * corresponding log done items should be in the AIL. What we do now
4777 * is update the data structures associated with each one.
4779 * Since we process the log intent items in normal transactions, they
4780 * will be removed at some point after the commit. This prevents us
4781 * from just walking down the list processing each one. We'll use a
4782 * flag in the intent item to skip those that we've already processed
4783 * and use the AIL iteration mechanism's generation count to try to
4784 * speed this up at least a bit.
4786 * When we start, we know that the intents are the only things in the
4787 * AIL. As we process them, however, other items are added to the
4791 xlog_recover_process_intents(
4794 struct xfs_trans
*parent_tp
;
4795 struct xfs_ail_cursor cur
;
4796 struct xfs_log_item
*lip
;
4797 struct xfs_ail
*ailp
;
4799 #if defined(DEBUG) || defined(XFS_WARN)
4804 * The intent recovery handlers commit transactions to complete recovery
4805 * for individual intents, but any new deferred operations that are
4806 * queued during that process are held off until the very end. The
4807 * purpose of this transaction is to serve as a container for deferred
4808 * operations. Each intent recovery handler must transfer dfops here
4809 * before its local transaction commits, and we'll finish the entire
4812 error
= xfs_trans_alloc_empty(log
->l_mp
, &parent_tp
);
4817 spin_lock(&ailp
->ail_lock
);
4818 lip
= xfs_trans_ail_cursor_first(ailp
, &cur
, 0);
4819 #if defined(DEBUG) || defined(XFS_WARN)
4820 last_lsn
= xlog_assign_lsn(log
->l_curr_cycle
, log
->l_curr_block
);
4822 while (lip
!= NULL
) {
4824 * We're done when we see something other than an intent.
4825 * There should be no intents left in the AIL now.
4827 if (!xlog_item_is_intent(lip
)) {
4829 for (; lip
; lip
= xfs_trans_ail_cursor_next(ailp
, &cur
))
4830 ASSERT(!xlog_item_is_intent(lip
));
4836 * We should never see a redo item with a LSN higher than
4837 * the last transaction we found in the log at the start
4840 ASSERT(XFS_LSN_CMP(last_lsn
, lip
->li_lsn
) >= 0);
4843 * NOTE: If your intent processing routine can create more
4844 * deferred ops, you /must/ attach them to the dfops in this
4845 * routine or else those subsequent intents will get
4846 * replayed in the wrong order!
4848 switch (lip
->li_type
) {
4850 error
= xlog_recover_process_efi(log
->l_mp
, ailp
, lip
);
4853 error
= xlog_recover_process_rui(log
->l_mp
, ailp
, lip
);
4856 error
= xlog_recover_process_cui(parent_tp
, ailp
, lip
);
4859 error
= xlog_recover_process_bui(parent_tp
, ailp
, lip
);
4864 lip
= xfs_trans_ail_cursor_next(ailp
, &cur
);
4867 xfs_trans_ail_cursor_done(&cur
);
4868 spin_unlock(&ailp
->ail_lock
);
4870 error
= xlog_finish_defer_ops(parent_tp
);
4871 xfs_trans_cancel(parent_tp
);
4877 * A cancel occurs when the mount has failed and we're bailing out.
4878 * Release all pending log intent items so they don't pin the AIL.
4881 xlog_recover_cancel_intents(
4884 struct xfs_log_item
*lip
;
4885 struct xfs_ail_cursor cur
;
4886 struct xfs_ail
*ailp
;
4889 spin_lock(&ailp
->ail_lock
);
4890 lip
= xfs_trans_ail_cursor_first(ailp
, &cur
, 0);
4891 while (lip
!= NULL
) {
4893 * We're done when we see something other than an intent.
4894 * There should be no intents left in the AIL now.
4896 if (!xlog_item_is_intent(lip
)) {
4898 for (; lip
; lip
= xfs_trans_ail_cursor_next(ailp
, &cur
))
4899 ASSERT(!xlog_item_is_intent(lip
));
4904 switch (lip
->li_type
) {
4906 xlog_recover_cancel_efi(log
->l_mp
, ailp
, lip
);
4909 xlog_recover_cancel_rui(log
->l_mp
, ailp
, lip
);
4912 xlog_recover_cancel_cui(log
->l_mp
, ailp
, lip
);
4915 xlog_recover_cancel_bui(log
->l_mp
, ailp
, lip
);
4919 lip
= xfs_trans_ail_cursor_next(ailp
, &cur
);
4922 xfs_trans_ail_cursor_done(&cur
);
4923 spin_unlock(&ailp
->ail_lock
);
4927 * This routine performs a transaction to null out a bad inode pointer
4928 * in an agi unlinked inode hash bucket.
4931 xlog_recover_clear_agi_bucket(
4933 xfs_agnumber_t agno
,
4942 error
= xfs_trans_alloc(mp
, &M_RES(mp
)->tr_clearagi
, 0, 0, 0, &tp
);
4946 error
= xfs_read_agi(mp
, tp
, agno
, &agibp
);
4950 agi
= XFS_BUF_TO_AGI(agibp
);
4951 agi
->agi_unlinked
[bucket
] = cpu_to_be32(NULLAGINO
);
4952 offset
= offsetof(xfs_agi_t
, agi_unlinked
) +
4953 (sizeof(xfs_agino_t
) * bucket
);
4954 xfs_trans_log_buf(tp
, agibp
, offset
,
4955 (offset
+ sizeof(xfs_agino_t
) - 1));
4957 error
= xfs_trans_commit(tp
);
4963 xfs_trans_cancel(tp
);
4965 xfs_warn(mp
, "%s: failed to clear agi %d. Continuing.", __func__
, agno
);
4970 xlog_recover_process_one_iunlink(
4971 struct xfs_mount
*mp
,
4972 xfs_agnumber_t agno
,
4976 struct xfs_buf
*ibp
;
4977 struct xfs_dinode
*dip
;
4978 struct xfs_inode
*ip
;
4982 ino
= XFS_AGINO_TO_INO(mp
, agno
, agino
);
4983 error
= xfs_iget(mp
, NULL
, ino
, 0, 0, &ip
);
4988 * Get the on disk inode to find the next inode in the bucket.
4990 error
= xfs_imap_to_bp(mp
, NULL
, &ip
->i_imap
, &dip
, &ibp
, 0, 0);
4994 xfs_iflags_clear(ip
, XFS_IRECOVERY
);
4995 ASSERT(VFS_I(ip
)->i_nlink
== 0);
4996 ASSERT(VFS_I(ip
)->i_mode
!= 0);
4998 /* setup for the next pass */
4999 agino
= be32_to_cpu(dip
->di_next_unlinked
);
5003 * Prevent any DMAPI event from being sent when the reference on
5004 * the inode is dropped.
5006 ip
->i_d
.di_dmevmask
= 0;
5015 * We can't read in the inode this bucket points to, or this inode
5016 * is messed up. Just ditch this bucket of inodes. We will lose
5017 * some inodes and space, but at least we won't hang.
5019 * Call xlog_recover_clear_agi_bucket() to perform a transaction to
5020 * clear the inode pointer in the bucket.
5022 xlog_recover_clear_agi_bucket(mp
, agno
, bucket
);
5027 * Recover AGI unlinked lists
5029 * This is called during recovery to process any inodes which we unlinked but
5030 * not freed when the system crashed. These inodes will be on the lists in the
5031 * AGI blocks. What we do here is scan all the AGIs and fully truncate and free
5032 * any inodes found on the lists. Each inode is removed from the lists when it
5033 * has been fully truncated and is freed. The freeing of the inode and its
5034 * removal from the list must be atomic.
5036 * If everything we touch in the agi processing loop is already in memory, this
5037 * loop can hold the cpu for a long time. It runs without lock contention,
5038 * memory allocation contention, the need wait for IO, etc, and so will run
5039 * until we either run out of inodes to process, run low on memory or we run out
5042 * This behaviour is bad for latency on single CPU and non-preemptible kernels,
5043 * and can prevent other filesytem work (such as CIL pushes) from running. This
5044 * can lead to deadlocks if the recovery process runs out of log reservation
5045 * space. Hence we need to yield the CPU when there is other kernel work
5046 * scheduled on this CPU to ensure other scheduled work can run without undue
5050 xlog_recover_process_iunlinks(
5054 xfs_agnumber_t agno
;
5063 for (agno
= 0; agno
< mp
->m_sb
.sb_agcount
; agno
++) {
5065 * Find the agi for this ag.
5067 error
= xfs_read_agi(mp
, NULL
, agno
, &agibp
);
5070 * AGI is b0rked. Don't process it.
5072 * We should probably mark the filesystem as corrupt
5073 * after we've recovered all the ag's we can....
5078 * Unlock the buffer so that it can be acquired in the normal
5079 * course of the transaction to truncate and free each inode.
5080 * Because we are not racing with anyone else here for the AGI
5081 * buffer, we don't even need to hold it locked to read the
5082 * initial unlinked bucket entries out of the buffer. We keep
5083 * buffer reference though, so that it stays pinned in memory
5084 * while we need the buffer.
5086 agi
= XFS_BUF_TO_AGI(agibp
);
5087 xfs_buf_unlock(agibp
);
5089 for (bucket
= 0; bucket
< XFS_AGI_UNLINKED_BUCKETS
; bucket
++) {
5090 agino
= be32_to_cpu(agi
->agi_unlinked
[bucket
]);
5091 while (agino
!= NULLAGINO
) {
5092 agino
= xlog_recover_process_one_iunlink(mp
,
5093 agno
, agino
, bucket
);
5097 xfs_buf_rele(agibp
);
5103 struct xlog_rec_header
*rhead
,
5109 for (i
= 0; i
< BTOBB(be32_to_cpu(rhead
->h_len
)) &&
5110 i
< (XLOG_HEADER_CYCLE_SIZE
/ BBSIZE
); i
++) {
5111 *(__be32
*)dp
= *(__be32
*)&rhead
->h_cycle_data
[i
];
5115 if (xfs_sb_version_haslogv2(&log
->l_mp
->m_sb
)) {
5116 xlog_in_core_2_t
*xhdr
= (xlog_in_core_2_t
*)rhead
;
5117 for ( ; i
< BTOBB(be32_to_cpu(rhead
->h_len
)); i
++) {
5118 j
= i
/ (XLOG_HEADER_CYCLE_SIZE
/ BBSIZE
);
5119 k
= i
% (XLOG_HEADER_CYCLE_SIZE
/ BBSIZE
);
5120 *(__be32
*)dp
= xhdr
[j
].hic_xheader
.xh_cycle_data
[k
];
5127 * CRC check, unpack and process a log record.
5130 xlog_recover_process(
5132 struct hlist_head rhash
[],
5133 struct xlog_rec_header
*rhead
,
5136 struct list_head
*buffer_list
)
5138 __le32 old_crc
= rhead
->h_crc
;
5141 crc
= xlog_cksum(log
, rhead
, dp
, be32_to_cpu(rhead
->h_len
));
5144 * Nothing else to do if this is a CRC verification pass. Just return
5145 * if this a record with a non-zero crc. Unfortunately, mkfs always
5146 * sets old_crc to 0 so we must consider this valid even on v5 supers.
5147 * Otherwise, return EFSBADCRC on failure so the callers up the stack
5148 * know precisely what failed.
5150 if (pass
== XLOG_RECOVER_CRCPASS
) {
5151 if (old_crc
&& crc
!= old_crc
)
5157 * We're in the normal recovery path. Issue a warning if and only if the
5158 * CRC in the header is non-zero. This is an advisory warning and the
5159 * zero CRC check prevents warnings from being emitted when upgrading
5160 * the kernel from one that does not add CRCs by default.
5162 if (crc
!= old_crc
) {
5163 if (old_crc
|| xfs_sb_version_hascrc(&log
->l_mp
->m_sb
)) {
5164 xfs_alert(log
->l_mp
,
5165 "log record CRC mismatch: found 0x%x, expected 0x%x.",
5166 le32_to_cpu(old_crc
),
5168 xfs_hex_dump(dp
, 32);
5172 * If the filesystem is CRC enabled, this mismatch becomes a
5173 * fatal log corruption failure.
5175 if (xfs_sb_version_hascrc(&log
->l_mp
->m_sb
)) {
5176 XFS_ERROR_REPORT(__func__
, XFS_ERRLEVEL_LOW
, log
->l_mp
);
5177 return -EFSCORRUPTED
;
5181 xlog_unpack_data(rhead
, dp
, log
);
5183 return xlog_recover_process_data(log
, rhash
, rhead
, dp
, pass
,
5188 xlog_valid_rec_header(
5190 struct xlog_rec_header
*rhead
,
5195 if (XFS_IS_CORRUPT(log
->l_mp
,
5196 rhead
->h_magicno
!= cpu_to_be32(XLOG_HEADER_MAGIC_NUM
)))
5197 return -EFSCORRUPTED
;
5198 if (XFS_IS_CORRUPT(log
->l_mp
,
5199 (!rhead
->h_version
||
5200 (be32_to_cpu(rhead
->h_version
) &
5201 (~XLOG_VERSION_OKBITS
))))) {
5202 xfs_warn(log
->l_mp
, "%s: unrecognised log version (%d).",
5203 __func__
, be32_to_cpu(rhead
->h_version
));
5204 return -EFSCORRUPTED
;
5207 /* LR body must have data or it wouldn't have been written */
5208 hlen
= be32_to_cpu(rhead
->h_len
);
5209 if (XFS_IS_CORRUPT(log
->l_mp
, hlen
<= 0 || hlen
> INT_MAX
))
5210 return -EFSCORRUPTED
;
5211 if (XFS_IS_CORRUPT(log
->l_mp
,
5212 blkno
> log
->l_logBBsize
|| blkno
> INT_MAX
))
5213 return -EFSCORRUPTED
;
5218 * Read the log from tail to head and process the log records found.
5219 * Handle the two cases where the tail and head are in the same cycle
5220 * and where the active portion of the log wraps around the end of
5221 * the physical log separately. The pass parameter is passed through
5222 * to the routines called to process the data and is not looked at
5226 xlog_do_recovery_pass(
5228 xfs_daddr_t head_blk
,
5229 xfs_daddr_t tail_blk
,
5231 xfs_daddr_t
*first_bad
) /* out: first bad log rec */
5233 xlog_rec_header_t
*rhead
;
5234 xfs_daddr_t blk_no
, rblk_no
;
5235 xfs_daddr_t rhead_blk
;
5238 int error
= 0, h_size
, h_len
;
5240 int bblks
, split_bblks
;
5241 int hblks
, split_hblks
, wrapped_hblks
;
5243 struct hlist_head rhash
[XLOG_RHASH_SIZE
];
5244 LIST_HEAD (buffer_list
);
5246 ASSERT(head_blk
!= tail_blk
);
5247 blk_no
= rhead_blk
= tail_blk
;
5249 for (i
= 0; i
< XLOG_RHASH_SIZE
; i
++)
5250 INIT_HLIST_HEAD(&rhash
[i
]);
5253 * Read the header of the tail block and get the iclog buffer size from
5254 * h_size. Use this to tell how many sectors make up the log header.
5256 if (xfs_sb_version_haslogv2(&log
->l_mp
->m_sb
)) {
5258 * When using variable length iclogs, read first sector of
5259 * iclog header and extract the header size from it. Get a
5260 * new hbp that is the correct size.
5262 hbp
= xlog_alloc_buffer(log
, 1);
5266 error
= xlog_bread(log
, tail_blk
, 1, hbp
, &offset
);
5270 rhead
= (xlog_rec_header_t
*)offset
;
5271 error
= xlog_valid_rec_header(log
, rhead
, tail_blk
);
5276 * xfsprogs has a bug where record length is based on lsunit but
5277 * h_size (iclog size) is hardcoded to 32k. Now that we
5278 * unconditionally CRC verify the unmount record, this means the
5279 * log buffer can be too small for the record and cause an
5282 * Detect this condition here. Use lsunit for the buffer size as
5283 * long as this looks like the mkfs case. Otherwise, return an
5284 * error to avoid a buffer overrun.
5286 h_size
= be32_to_cpu(rhead
->h_size
);
5287 h_len
= be32_to_cpu(rhead
->h_len
);
5288 if (h_len
> h_size
) {
5289 if (h_len
<= log
->l_mp
->m_logbsize
&&
5290 be32_to_cpu(rhead
->h_num_logops
) == 1) {
5292 "invalid iclog size (%d bytes), using lsunit (%d bytes)",
5293 h_size
, log
->l_mp
->m_logbsize
);
5294 h_size
= log
->l_mp
->m_logbsize
;
5296 XFS_ERROR_REPORT(__func__
, XFS_ERRLEVEL_LOW
,
5298 error
= -EFSCORRUPTED
;
5303 if ((be32_to_cpu(rhead
->h_version
) & XLOG_VERSION_2
) &&
5304 (h_size
> XLOG_HEADER_CYCLE_SIZE
)) {
5305 hblks
= h_size
/ XLOG_HEADER_CYCLE_SIZE
;
5306 if (h_size
% XLOG_HEADER_CYCLE_SIZE
)
5309 hbp
= xlog_alloc_buffer(log
, hblks
);
5314 ASSERT(log
->l_sectBBsize
== 1);
5316 hbp
= xlog_alloc_buffer(log
, 1);
5317 h_size
= XLOG_BIG_RECORD_BSIZE
;
5322 dbp
= xlog_alloc_buffer(log
, BTOBB(h_size
));
5328 memset(rhash
, 0, sizeof(rhash
));
5329 if (tail_blk
> head_blk
) {
5331 * Perform recovery around the end of the physical log.
5332 * When the head is not on the same cycle number as the tail,
5333 * we can't do a sequential recovery.
5335 while (blk_no
< log
->l_logBBsize
) {
5337 * Check for header wrapping around physical end-of-log
5342 if (blk_no
+ hblks
<= log
->l_logBBsize
) {
5343 /* Read header in one read */
5344 error
= xlog_bread(log
, blk_no
, hblks
, hbp
,
5349 /* This LR is split across physical log end */
5350 if (blk_no
!= log
->l_logBBsize
) {
5351 /* some data before physical log end */
5352 ASSERT(blk_no
<= INT_MAX
);
5353 split_hblks
= log
->l_logBBsize
- (int)blk_no
;
5354 ASSERT(split_hblks
> 0);
5355 error
= xlog_bread(log
, blk_no
,
5363 * Note: this black magic still works with
5364 * large sector sizes (non-512) only because:
5365 * - we increased the buffer size originally
5366 * by 1 sector giving us enough extra space
5367 * for the second read;
5368 * - the log start is guaranteed to be sector
5370 * - we read the log end (LR header start)
5371 * _first_, then the log start (LR header end)
5372 * - order is important.
5374 wrapped_hblks
= hblks
- split_hblks
;
5375 error
= xlog_bread_noalign(log
, 0,
5377 offset
+ BBTOB(split_hblks
));
5381 rhead
= (xlog_rec_header_t
*)offset
;
5382 error
= xlog_valid_rec_header(log
, rhead
,
5383 split_hblks
? blk_no
: 0);
5387 bblks
= (int)BTOBB(be32_to_cpu(rhead
->h_len
));
5391 * Read the log record data in multiple reads if it
5392 * wraps around the end of the log. Note that if the
5393 * header already wrapped, blk_no could point past the
5394 * end of the log. The record data is contiguous in
5397 if (blk_no
+ bblks
<= log
->l_logBBsize
||
5398 blk_no
>= log
->l_logBBsize
) {
5399 rblk_no
= xlog_wrap_logbno(log
, blk_no
);
5400 error
= xlog_bread(log
, rblk_no
, bblks
, dbp
,
5405 /* This log record is split across the
5406 * physical end of log */
5409 if (blk_no
!= log
->l_logBBsize
) {
5410 /* some data is before the physical
5412 ASSERT(!wrapped_hblks
);
5413 ASSERT(blk_no
<= INT_MAX
);
5415 log
->l_logBBsize
- (int)blk_no
;
5416 ASSERT(split_bblks
> 0);
5417 error
= xlog_bread(log
, blk_no
,
5425 * Note: this black magic still works with
5426 * large sector sizes (non-512) only because:
5427 * - we increased the buffer size originally
5428 * by 1 sector giving us enough extra space
5429 * for the second read;
5430 * - the log start is guaranteed to be sector
5432 * - we read the log end (LR header start)
5433 * _first_, then the log start (LR header end)
5434 * - order is important.
5436 error
= xlog_bread_noalign(log
, 0,
5437 bblks
- split_bblks
,
5438 offset
+ BBTOB(split_bblks
));
5443 error
= xlog_recover_process(log
, rhash
, rhead
, offset
,
5444 pass
, &buffer_list
);
5452 ASSERT(blk_no
>= log
->l_logBBsize
);
5453 blk_no
-= log
->l_logBBsize
;
5457 /* read first part of physical log */
5458 while (blk_no
< head_blk
) {
5459 error
= xlog_bread(log
, blk_no
, hblks
, hbp
, &offset
);
5463 rhead
= (xlog_rec_header_t
*)offset
;
5464 error
= xlog_valid_rec_header(log
, rhead
, blk_no
);
5468 /* blocks in data section */
5469 bblks
= (int)BTOBB(be32_to_cpu(rhead
->h_len
));
5470 error
= xlog_bread(log
, blk_no
+hblks
, bblks
, dbp
,
5475 error
= xlog_recover_process(log
, rhash
, rhead
, offset
, pass
,
5480 blk_no
+= bblks
+ hblks
;
5490 * Submit buffers that have been added from the last record processed,
5491 * regardless of error status.
5493 if (!list_empty(&buffer_list
))
5494 error2
= xfs_buf_delwri_submit(&buffer_list
);
5496 if (error
&& first_bad
)
5497 *first_bad
= rhead_blk
;
5500 * Transactions are freed at commit time but transactions without commit
5501 * records on disk are never committed. Free any that may be left in the
5504 for (i
= 0; i
< XLOG_RHASH_SIZE
; i
++) {
5505 struct hlist_node
*tmp
;
5506 struct xlog_recover
*trans
;
5508 hlist_for_each_entry_safe(trans
, tmp
, &rhash
[i
], r_list
)
5509 xlog_recover_free_trans(trans
);
5512 return error
? error
: error2
;
5516 * Do the recovery of the log. We actually do this in two phases.
5517 * The two passes are necessary in order to implement the function
5518 * of cancelling a record written into the log. The first pass
5519 * determines those things which have been cancelled, and the
5520 * second pass replays log items normally except for those which
5521 * have been cancelled. The handling of the replay and cancellations
5522 * takes place in the log item type specific routines.
5524 * The table of items which have cancel records in the log is allocated
5525 * and freed at this level, since only here do we know when all of
5526 * the log recovery has been completed.
5529 xlog_do_log_recovery(
5531 xfs_daddr_t head_blk
,
5532 xfs_daddr_t tail_blk
)
5536 ASSERT(head_blk
!= tail_blk
);
5539 * First do a pass to find all of the cancelled buf log items.
5540 * Store them in the buf_cancel_table for use in the second pass.
5542 log
->l_buf_cancel_table
= kmem_zalloc(XLOG_BC_TABLE_SIZE
*
5543 sizeof(struct list_head
),
5545 for (i
= 0; i
< XLOG_BC_TABLE_SIZE
; i
++)
5546 INIT_LIST_HEAD(&log
->l_buf_cancel_table
[i
]);
5548 error
= xlog_do_recovery_pass(log
, head_blk
, tail_blk
,
5549 XLOG_RECOVER_PASS1
, NULL
);
5551 kmem_free(log
->l_buf_cancel_table
);
5552 log
->l_buf_cancel_table
= NULL
;
5556 * Then do a second pass to actually recover the items in the log.
5557 * When it is complete free the table of buf cancel items.
5559 error
= xlog_do_recovery_pass(log
, head_blk
, tail_blk
,
5560 XLOG_RECOVER_PASS2
, NULL
);
5565 for (i
= 0; i
< XLOG_BC_TABLE_SIZE
; i
++)
5566 ASSERT(list_empty(&log
->l_buf_cancel_table
[i
]));
5570 kmem_free(log
->l_buf_cancel_table
);
5571 log
->l_buf_cancel_table
= NULL
;
5577 * Do the actual recovery
5582 xfs_daddr_t head_blk
,
5583 xfs_daddr_t tail_blk
)
5585 struct xfs_mount
*mp
= log
->l_mp
;
5590 trace_xfs_log_recover(log
, head_blk
, tail_blk
);
5593 * First replay the images in the log.
5595 error
= xlog_do_log_recovery(log
, head_blk
, tail_blk
);
5600 * If IO errors happened during recovery, bail out.
5602 if (XFS_FORCED_SHUTDOWN(mp
)) {
5607 * We now update the tail_lsn since much of the recovery has completed
5608 * and there may be space available to use. If there were no extent
5609 * or iunlinks, we can free up the entire log and set the tail_lsn to
5610 * be the last_sync_lsn. This was set in xlog_find_tail to be the
5611 * lsn of the last known good LR on disk. If there are extent frees
5612 * or iunlinks they will have some entries in the AIL; so we look at
5613 * the AIL to determine how to set the tail_lsn.
5615 xlog_assign_tail_lsn(mp
);
5618 * Now that we've finished replaying all buffer and inode
5619 * updates, re-read in the superblock and reverify it.
5622 bp
->b_flags
&= ~(XBF_DONE
| XBF_ASYNC
);
5623 ASSERT(!(bp
->b_flags
& XBF_WRITE
));
5624 bp
->b_flags
|= XBF_READ
;
5625 bp
->b_ops
= &xfs_sb_buf_ops
;
5627 error
= xfs_buf_submit(bp
);
5629 if (!XFS_FORCED_SHUTDOWN(mp
)) {
5630 xfs_buf_ioerror_alert(bp
, __this_address
);
5637 /* Convert superblock from on-disk format */
5639 xfs_sb_from_disk(sbp
, XFS_BUF_TO_SBP(bp
));
5642 /* re-initialise in-core superblock and geometry structures */
5643 xfs_reinit_percpu_counters(mp
);
5644 error
= xfs_initialize_perag(mp
, sbp
->sb_agcount
, &mp
->m_maxagi
);
5646 xfs_warn(mp
, "Failed post-recovery per-ag init: %d", error
);
5649 mp
->m_alloc_set_aside
= xfs_alloc_set_aside(mp
);
5651 xlog_recover_check_summary(log
);
5653 /* Normal transactions can now occur */
5654 log
->l_flags
&= ~XLOG_ACTIVE_RECOVERY
;
5659 * Perform recovery and re-initialize some log variables in xlog_find_tail.
5661 * Return error or zero.
5667 xfs_daddr_t head_blk
, tail_blk
;
5670 /* find the tail of the log */
5671 error
= xlog_find_tail(log
, &head_blk
, &tail_blk
);
5676 * The superblock was read before the log was available and thus the LSN
5677 * could not be verified. Check the superblock LSN against the current
5678 * LSN now that it's known.
5680 if (xfs_sb_version_hascrc(&log
->l_mp
->m_sb
) &&
5681 !xfs_log_check_lsn(log
->l_mp
, log
->l_mp
->m_sb
.sb_lsn
))
5684 if (tail_blk
!= head_blk
) {
5685 /* There used to be a comment here:
5687 * disallow recovery on read-only mounts. note -- mount
5688 * checks for ENOSPC and turns it into an intelligent
5690 * ...but this is no longer true. Now, unless you specify
5691 * NORECOVERY (in which case this function would never be
5692 * called), we just go ahead and recover. We do this all
5693 * under the vfs layer, so we can get away with it unless
5694 * the device itself is read-only, in which case we fail.
5696 if ((error
= xfs_dev_is_read_only(log
->l_mp
, "recovery"))) {
5701 * Version 5 superblock log feature mask validation. We know the
5702 * log is dirty so check if there are any unknown log features
5703 * in what we need to recover. If there are unknown features
5704 * (e.g. unsupported transactions, then simply reject the
5705 * attempt at recovery before touching anything.
5707 if (XFS_SB_VERSION_NUM(&log
->l_mp
->m_sb
) == XFS_SB_VERSION_5
&&
5708 xfs_sb_has_incompat_log_feature(&log
->l_mp
->m_sb
,
5709 XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN
)) {
5711 "Superblock has unknown incompatible log features (0x%x) enabled.",
5712 (log
->l_mp
->m_sb
.sb_features_log_incompat
&
5713 XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN
));
5715 "The log can not be fully and/or safely recovered by this kernel.");
5717 "Please recover the log on a kernel that supports the unknown features.");
5722 * Delay log recovery if the debug hook is set. This is debug
5723 * instrumention to coordinate simulation of I/O failures with
5726 if (xfs_globals
.log_recovery_delay
) {
5727 xfs_notice(log
->l_mp
,
5728 "Delaying log recovery for %d seconds.",
5729 xfs_globals
.log_recovery_delay
);
5730 msleep(xfs_globals
.log_recovery_delay
* 1000);
5733 xfs_notice(log
->l_mp
, "Starting recovery (logdev: %s)",
5734 log
->l_mp
->m_logname
? log
->l_mp
->m_logname
5737 error
= xlog_do_recover(log
, head_blk
, tail_blk
);
5738 log
->l_flags
|= XLOG_RECOVERY_NEEDED
;
5744 * In the first part of recovery we replay inodes and buffers and build
5745 * up the list of extent free items which need to be processed. Here
5746 * we process the extent free items and clean up the on disk unlinked
5747 * inode lists. This is separated from the first part of recovery so
5748 * that the root and real-time bitmap inodes can be read in from disk in
5749 * between the two stages. This is necessary so that we can free space
5750 * in the real-time portion of the file system.
5753 xlog_recover_finish(
5757 * Now we're ready to do the transactions needed for the
5758 * rest of recovery. Start with completing all the extent
5759 * free intent records and then process the unlinked inode
5760 * lists. At this point, we essentially run in normal mode
5761 * except that we're still performing recovery actions
5762 * rather than accepting new requests.
5764 if (log
->l_flags
& XLOG_RECOVERY_NEEDED
) {
5766 error
= xlog_recover_process_intents(log
);
5768 xfs_alert(log
->l_mp
, "Failed to recover intents");
5773 * Sync the log to get all the intents out of the AIL.
5774 * This isn't absolutely necessary, but it helps in
5775 * case the unlink transactions would have problems
5776 * pushing the intents out of the way.
5778 xfs_log_force(log
->l_mp
, XFS_LOG_SYNC
);
5780 xlog_recover_process_iunlinks(log
);
5782 xlog_recover_check_summary(log
);
5784 xfs_notice(log
->l_mp
, "Ending recovery (logdev: %s)",
5785 log
->l_mp
->m_logname
? log
->l_mp
->m_logname
5787 log
->l_flags
&= ~XLOG_RECOVERY_NEEDED
;
5789 xfs_info(log
->l_mp
, "Ending clean mount");
5795 xlog_recover_cancel(
5798 if (log
->l_flags
& XLOG_RECOVERY_NEEDED
)
5799 xlog_recover_cancel_intents(log
);
5804 * Read all of the agf and agi counters and check that they
5805 * are consistent with the superblock counters.
5808 xlog_recover_check_summary(
5815 xfs_agnumber_t agno
;
5826 for (agno
= 0; agno
< mp
->m_sb
.sb_agcount
; agno
++) {
5827 error
= xfs_read_agf(mp
, NULL
, agno
, 0, &agfbp
);
5829 xfs_alert(mp
, "%s agf read failed agno %d error %d",
5830 __func__
, agno
, error
);
5832 agfp
= XFS_BUF_TO_AGF(agfbp
);
5833 freeblks
+= be32_to_cpu(agfp
->agf_freeblks
) +
5834 be32_to_cpu(agfp
->agf_flcount
);
5835 xfs_buf_relse(agfbp
);
5838 error
= xfs_read_agi(mp
, NULL
, agno
, &agibp
);
5840 xfs_alert(mp
, "%s agi read failed agno %d error %d",
5841 __func__
, agno
, error
);
5843 struct xfs_agi
*agi
= XFS_BUF_TO_AGI(agibp
);
5845 itotal
+= be32_to_cpu(agi
->agi_count
);
5846 ifree
+= be32_to_cpu(agi
->agi_freecount
);
5847 xfs_buf_relse(agibp
);