1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright (C) 2016 Oracle. All Rights Reserved.
4 * Author: Darrick J. Wong <darrick.wong@oracle.com>
8 #include "xfs_format.h"
9 #include "xfs_log_format.h"
10 #include "xfs_trans_resv.h"
12 #include "xfs_shared.h"
13 #include "xfs_mount.h"
14 #include "xfs_defer.h"
15 #include "xfs_inode.h"
16 #include "xfs_trans.h"
17 #include "xfs_trans_priv.h"
18 #include "xfs_bmap_item.h"
21 #include "xfs_icache.h"
22 #include "xfs_bmap_btree.h"
23 #include "xfs_trans_space.h"
24 #include "xfs_error.h"
25 #include "xfs_log_priv.h"
26 #include "xfs_log_recover.h"
28 #include "xfs_trace.h"
30 struct kmem_cache
*xfs_bui_cache
;
31 struct kmem_cache
*xfs_bud_cache
;
33 static const struct xfs_item_ops xfs_bui_item_ops
;
35 static inline struct xfs_bui_log_item
*BUI_ITEM(struct xfs_log_item
*lip
)
37 return container_of(lip
, struct xfs_bui_log_item
, bui_item
);
42 struct xfs_bui_log_item
*buip
)
44 kvfree(buip
->bui_item
.li_lv_shadow
);
45 kmem_cache_free(xfs_bui_cache
, buip
);
49 * Freeing the BUI requires that we remove it from the AIL if it has already
50 * been placed there. However, the BUI may not yet have been placed in the AIL
51 * when called by xfs_bui_release() from BUD processing due to the ordering of
52 * committed vs unpin operations in bulk insert operations. Hence the reference
53 * count to ensure only the last caller frees the BUI.
57 struct xfs_bui_log_item
*buip
)
59 ASSERT(atomic_read(&buip
->bui_refcount
) > 0);
60 if (!atomic_dec_and_test(&buip
->bui_refcount
))
63 xfs_trans_ail_delete(&buip
->bui_item
, 0);
64 xfs_bui_item_free(buip
);
70 struct xfs_log_item
*lip
,
74 struct xfs_bui_log_item
*buip
= BUI_ITEM(lip
);
77 *nbytes
+= xfs_bui_log_format_sizeof(buip
->bui_format
.bui_nextents
);
81 * This is called to fill in the vector of log iovecs for the
82 * given bui log item. We use only 1 iovec, and we point that
83 * at the bui_log_format structure embedded in the bui item.
84 * It is at this point that we assert that all of the extent
85 * slots in the bui item have been filled.
89 struct xfs_log_item
*lip
,
90 struct xfs_log_vec
*lv
)
92 struct xfs_bui_log_item
*buip
= BUI_ITEM(lip
);
93 struct xfs_log_iovec
*vecp
= NULL
;
95 ASSERT(atomic_read(&buip
->bui_next_extent
) ==
96 buip
->bui_format
.bui_nextents
);
98 buip
->bui_format
.bui_type
= XFS_LI_BUI
;
99 buip
->bui_format
.bui_size
= 1;
101 xlog_copy_iovec(lv
, &vecp
, XLOG_REG_TYPE_BUI_FORMAT
, &buip
->bui_format
,
102 xfs_bui_log_format_sizeof(buip
->bui_format
.bui_nextents
));
106 * The unpin operation is the last place an BUI is manipulated in the log. It is
107 * either inserted in the AIL or aborted in the event of a log I/O error. In
108 * either case, the BUI transaction has been successfully committed to make it
109 * this far. Therefore, we expect whoever committed the BUI to either construct
110 * and commit the BUD or drop the BUD's reference in the event of error. Simply
111 * drop the log's BUI reference now that the log is done with it.
115 struct xfs_log_item
*lip
,
118 struct xfs_bui_log_item
*buip
= BUI_ITEM(lip
);
120 xfs_bui_release(buip
);
124 * The BUI has been either committed or aborted if the transaction has been
125 * cancelled. If the transaction was cancelled, an BUD isn't going to be
126 * constructed and thus we free the BUI here directly.
129 xfs_bui_item_release(
130 struct xfs_log_item
*lip
)
132 xfs_bui_release(BUI_ITEM(lip
));
136 * Allocate and initialize an bui item with the given number of extents.
138 STATIC
struct xfs_bui_log_item
*
140 struct xfs_mount
*mp
)
143 struct xfs_bui_log_item
*buip
;
145 buip
= kmem_cache_zalloc(xfs_bui_cache
, GFP_KERNEL
| __GFP_NOFAIL
);
147 xfs_log_item_init(mp
, &buip
->bui_item
, XFS_LI_BUI
, &xfs_bui_item_ops
);
148 buip
->bui_format
.bui_nextents
= XFS_BUI_MAX_FAST_EXTENTS
;
149 buip
->bui_format
.bui_id
= (uintptr_t)(void *)buip
;
150 atomic_set(&buip
->bui_next_extent
, 0);
151 atomic_set(&buip
->bui_refcount
, 2);
156 static inline struct xfs_bud_log_item
*BUD_ITEM(struct xfs_log_item
*lip
)
158 return container_of(lip
, struct xfs_bud_log_item
, bud_item
);
163 struct xfs_log_item
*lip
,
168 *nbytes
+= sizeof(struct xfs_bud_log_format
);
172 * This is called to fill in the vector of log iovecs for the
173 * given bud log item. We use only 1 iovec, and we point that
174 * at the bud_log_format structure embedded in the bud item.
175 * It is at this point that we assert that all of the extent
176 * slots in the bud item have been filled.
180 struct xfs_log_item
*lip
,
181 struct xfs_log_vec
*lv
)
183 struct xfs_bud_log_item
*budp
= BUD_ITEM(lip
);
184 struct xfs_log_iovec
*vecp
= NULL
;
186 budp
->bud_format
.bud_type
= XFS_LI_BUD
;
187 budp
->bud_format
.bud_size
= 1;
189 xlog_copy_iovec(lv
, &vecp
, XLOG_REG_TYPE_BUD_FORMAT
, &budp
->bud_format
,
190 sizeof(struct xfs_bud_log_format
));
194 * The BUD is either committed or aborted if the transaction is cancelled. If
195 * the transaction is cancelled, drop our reference to the BUI and free the
199 xfs_bud_item_release(
200 struct xfs_log_item
*lip
)
202 struct xfs_bud_log_item
*budp
= BUD_ITEM(lip
);
204 xfs_bui_release(budp
->bud_buip
);
205 kvfree(budp
->bud_item
.li_lv_shadow
);
206 kmem_cache_free(xfs_bud_cache
, budp
);
209 static struct xfs_log_item
*
211 struct xfs_log_item
*lip
)
213 return &BUD_ITEM(lip
)->bud_buip
->bui_item
;
216 static const struct xfs_item_ops xfs_bud_item_ops
= {
217 .flags
= XFS_ITEM_RELEASE_WHEN_COMMITTED
|
218 XFS_ITEM_INTENT_DONE
,
219 .iop_size
= xfs_bud_item_size
,
220 .iop_format
= xfs_bud_item_format
,
221 .iop_release
= xfs_bud_item_release
,
222 .iop_intent
= xfs_bud_item_intent
,
225 static inline struct xfs_bmap_intent
*bi_entry(const struct list_head
*e
)
227 return list_entry(e
, struct xfs_bmap_intent
, bi_list
);
230 /* Sort bmap intents by inode. */
232 xfs_bmap_update_diff_items(
234 const struct list_head
*a
,
235 const struct list_head
*b
)
237 struct xfs_bmap_intent
*ba
= bi_entry(a
);
238 struct xfs_bmap_intent
*bb
= bi_entry(b
);
240 return ba
->bi_owner
->i_ino
- bb
->bi_owner
->i_ino
;
243 /* Log bmap updates in the intent item. */
245 xfs_bmap_update_log_item(
246 struct xfs_trans
*tp
,
247 struct xfs_bui_log_item
*buip
,
248 struct xfs_bmap_intent
*bi
)
251 struct xfs_map_extent
*map
;
254 * atomic_inc_return gives us the value after the increment;
255 * we want to use it as an array index so we need to subtract 1 from
258 next_extent
= atomic_inc_return(&buip
->bui_next_extent
) - 1;
259 ASSERT(next_extent
< buip
->bui_format
.bui_nextents
);
260 map
= &buip
->bui_format
.bui_extents
[next_extent
];
261 map
->me_owner
= bi
->bi_owner
->i_ino
;
262 map
->me_startblock
= bi
->bi_bmap
.br_startblock
;
263 map
->me_startoff
= bi
->bi_bmap
.br_startoff
;
264 map
->me_len
= bi
->bi_bmap
.br_blockcount
;
266 switch (bi
->bi_type
) {
269 map
->me_flags
= bi
->bi_type
;
274 if (bi
->bi_bmap
.br_state
== XFS_EXT_UNWRITTEN
)
275 map
->me_flags
|= XFS_BMAP_EXTENT_UNWRITTEN
;
276 if (bi
->bi_whichfork
== XFS_ATTR_FORK
)
277 map
->me_flags
|= XFS_BMAP_EXTENT_ATTR_FORK
;
278 if (xfs_ifork_is_realtime(bi
->bi_owner
, bi
->bi_whichfork
))
279 map
->me_flags
|= XFS_BMAP_EXTENT_REALTIME
;
282 static struct xfs_log_item
*
283 xfs_bmap_update_create_intent(
284 struct xfs_trans
*tp
,
285 struct list_head
*items
,
289 struct xfs_mount
*mp
= tp
->t_mountp
;
290 struct xfs_bui_log_item
*buip
= xfs_bui_init(mp
);
291 struct xfs_bmap_intent
*bi
;
293 ASSERT(count
== XFS_BUI_MAX_FAST_EXTENTS
);
296 list_sort(mp
, items
, xfs_bmap_update_diff_items
);
297 list_for_each_entry(bi
, items
, bi_list
)
298 xfs_bmap_update_log_item(tp
, buip
, bi
);
299 return &buip
->bui_item
;
302 /* Get an BUD so we can process all the deferred bmap updates. */
303 static struct xfs_log_item
*
304 xfs_bmap_update_create_done(
305 struct xfs_trans
*tp
,
306 struct xfs_log_item
*intent
,
309 struct xfs_bui_log_item
*buip
= BUI_ITEM(intent
);
310 struct xfs_bud_log_item
*budp
;
312 budp
= kmem_cache_zalloc(xfs_bud_cache
, GFP_KERNEL
| __GFP_NOFAIL
);
313 xfs_log_item_init(tp
->t_mountp
, &budp
->bud_item
, XFS_LI_BUD
,
315 budp
->bud_buip
= buip
;
316 budp
->bud_format
.bud_bui_id
= buip
->bui_format
.bui_id
;
318 return &budp
->bud_item
;
321 /* Take a passive ref to the group containing the space we're mapping. */
323 xfs_bmap_update_get_group(
324 struct xfs_mount
*mp
,
325 struct xfs_bmap_intent
*bi
)
327 enum xfs_group_type type
= XG_TYPE_AG
;
329 if (xfs_ifork_is_realtime(bi
->bi_owner
, bi
->bi_whichfork
))
333 * Bump the intent count on behalf of the deferred rmap and refcount
334 * intent items that that we can queue when we finish this bmap work.
335 * This new intent item will bump the intent count before the bmap
336 * intent drops the intent count, ensuring that the intent count
337 * remains nonzero across the transaction roll.
339 bi
->bi_group
= xfs_group_intent_get(mp
, bi
->bi_bmap
.br_startblock
,
343 /* Add this deferred BUI to the transaction. */
346 struct xfs_trans
*tp
,
347 struct xfs_bmap_intent
*bi
)
349 xfs_bmap_update_get_group(tp
->t_mountp
, bi
);
352 * Ensure the deferred mapping is pre-recorded in i_delayed_blks.
354 * Otherwise stat can report zero blocks for an inode that actually has
355 * data when the entire mapping is in the process of being overwritten
356 * using the out of place write path. This is undone in xfs_bmapi_remap
357 * after it has incremented di_nblocks for a successful operation.
359 if (bi
->bi_type
== XFS_BMAP_MAP
)
360 bi
->bi_owner
->i_delayed_blks
+= bi
->bi_bmap
.br_blockcount
;
362 trace_xfs_bmap_defer(bi
);
363 xfs_defer_add(tp
, &bi
->bi_list
, &xfs_bmap_update_defer_type
);
366 /* Cancel a deferred bmap update. */
368 xfs_bmap_update_cancel_item(
369 struct list_head
*item
)
371 struct xfs_bmap_intent
*bi
= bi_entry(item
);
373 if (bi
->bi_type
== XFS_BMAP_MAP
)
374 bi
->bi_owner
->i_delayed_blks
-= bi
->bi_bmap
.br_blockcount
;
376 xfs_group_intent_put(bi
->bi_group
);
377 kmem_cache_free(xfs_bmap_intent_cache
, bi
);
380 /* Process a deferred bmap update. */
382 xfs_bmap_update_finish_item(
383 struct xfs_trans
*tp
,
384 struct xfs_log_item
*done
,
385 struct list_head
*item
,
386 struct xfs_btree_cur
**state
)
388 struct xfs_bmap_intent
*bi
= bi_entry(item
);
391 error
= xfs_bmap_finish_one(tp
, bi
);
392 if (!error
&& bi
->bi_bmap
.br_blockcount
> 0) {
393 ASSERT(bi
->bi_type
== XFS_BMAP_UNMAP
);
397 xfs_bmap_update_cancel_item(item
);
401 /* Abort all pending BUIs. */
403 xfs_bmap_update_abort_intent(
404 struct xfs_log_item
*intent
)
406 xfs_bui_release(BUI_ITEM(intent
));
409 /* Is this recovered BUI ok? */
412 struct xfs_mount
*mp
,
413 struct xfs_bui_log_item
*buip
)
415 struct xfs_map_extent
*map
;
417 /* Only one mapping operation per BUI... */
418 if (buip
->bui_format
.bui_nextents
!= XFS_BUI_MAX_FAST_EXTENTS
)
421 map
= &buip
->bui_format
.bui_extents
[0];
423 if (map
->me_flags
& ~XFS_BMAP_EXTENT_FLAGS
)
426 switch (map
->me_flags
& XFS_BMAP_EXTENT_TYPE_MASK
) {
434 if (!xfs_verify_ino(mp
, map
->me_owner
))
437 if (!xfs_verify_fileext(mp
, map
->me_startoff
, map
->me_len
))
440 if (map
->me_flags
& XFS_BMAP_EXTENT_REALTIME
)
441 return xfs_verify_rtbext(mp
, map
->me_startblock
, map
->me_len
);
443 return xfs_verify_fsbext(mp
, map
->me_startblock
, map
->me_len
);
446 static inline struct xfs_bmap_intent
*
447 xfs_bui_recover_work(
448 struct xfs_mount
*mp
,
449 struct xfs_defer_pending
*dfp
,
450 struct xfs_inode
**ipp
,
451 struct xfs_map_extent
*map
)
453 struct xfs_bmap_intent
*bi
;
456 error
= xlog_recover_iget(mp
, map
->me_owner
, ipp
);
458 return ERR_PTR(error
);
460 bi
= kmem_cache_zalloc(xfs_bmap_intent_cache
,
461 GFP_KERNEL
| __GFP_NOFAIL
);
462 bi
->bi_whichfork
= (map
->me_flags
& XFS_BMAP_EXTENT_ATTR_FORK
) ?
463 XFS_ATTR_FORK
: XFS_DATA_FORK
;
464 bi
->bi_type
= map
->me_flags
& XFS_BMAP_EXTENT_TYPE_MASK
;
465 bi
->bi_bmap
.br_startblock
= map
->me_startblock
;
466 bi
->bi_bmap
.br_startoff
= map
->me_startoff
;
467 bi
->bi_bmap
.br_blockcount
= map
->me_len
;
468 bi
->bi_bmap
.br_state
= (map
->me_flags
& XFS_BMAP_EXTENT_UNWRITTEN
) ?
469 XFS_EXT_UNWRITTEN
: XFS_EXT_NORM
;
471 xfs_bmap_update_get_group(mp
, bi
);
473 /* see xfs_bmap_defer_add for details */
474 if (bi
->bi_type
== XFS_BMAP_MAP
)
475 bi
->bi_owner
->i_delayed_blks
+= bi
->bi_bmap
.br_blockcount
;
476 xfs_defer_add_item(dfp
, &bi
->bi_list
);
481 * Process a bmap update intent item that was recovered from the log.
482 * We need to update some inode's bmbt.
485 xfs_bmap_recover_work(
486 struct xfs_defer_pending
*dfp
,
487 struct list_head
*capture_list
)
489 struct xfs_trans_res resv
;
490 struct xfs_log_item
*lip
= dfp
->dfp_intent
;
491 struct xfs_bui_log_item
*buip
= BUI_ITEM(lip
);
492 struct xfs_trans
*tp
;
493 struct xfs_inode
*ip
= NULL
;
494 struct xfs_mount
*mp
= lip
->li_log
->l_mp
;
495 struct xfs_map_extent
*map
;
496 struct xfs_bmap_intent
*work
;
500 if (!xfs_bui_validate(mp
, buip
)) {
501 XFS_CORRUPTION_ERROR(__func__
, XFS_ERRLEVEL_LOW
, mp
,
502 &buip
->bui_format
, sizeof(buip
->bui_format
));
503 return -EFSCORRUPTED
;
506 map
= &buip
->bui_format
.bui_extents
[0];
507 work
= xfs_bui_recover_work(mp
, dfp
, &ip
, map
);
509 return PTR_ERR(work
);
511 /* Allocate transaction and do the work. */
512 resv
= xlog_recover_resv(&M_RES(mp
)->tr_itruncate
);
513 error
= xfs_trans_alloc(mp
, &resv
,
514 XFS_EXTENTADD_SPACE_RES(mp
, XFS_DATA_FORK
), 0, 0, &tp
);
518 xfs_ilock(ip
, XFS_ILOCK_EXCL
);
519 xfs_trans_ijoin(tp
, ip
, 0);
521 if (!!(map
->me_flags
& XFS_BMAP_EXTENT_REALTIME
) !=
522 xfs_ifork_is_realtime(ip
, work
->bi_whichfork
)) {
523 error
= -EFSCORRUPTED
;
527 if (work
->bi_type
== XFS_BMAP_MAP
)
528 iext_delta
= XFS_IEXT_ADD_NOSPLIT_CNT
;
530 iext_delta
= XFS_IEXT_PUNCH_HOLE_CNT
;
532 error
= xfs_iext_count_extend(tp
, ip
, work
->bi_whichfork
, iext_delta
);
536 error
= xlog_recover_finish_intent(tp
, dfp
);
537 if (error
== -EFSCORRUPTED
)
538 XFS_CORRUPTION_ERROR(__func__
, XFS_ERRLEVEL_LOW
, mp
,
539 &buip
->bui_format
, sizeof(buip
->bui_format
));
544 * Commit transaction, which frees the transaction and saves the inode
545 * for later replay activities.
547 error
= xfs_defer_ops_capture_and_commit(tp
, capture_list
);
551 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
556 xfs_trans_cancel(tp
);
558 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
564 /* Relog an intent item to push the log tail forward. */
565 static struct xfs_log_item
*
566 xfs_bmap_relog_intent(
567 struct xfs_trans
*tp
,
568 struct xfs_log_item
*intent
,
569 struct xfs_log_item
*done_item
)
571 struct xfs_bui_log_item
*buip
;
572 struct xfs_map_extent
*map
;
575 count
= BUI_ITEM(intent
)->bui_format
.bui_nextents
;
576 map
= BUI_ITEM(intent
)->bui_format
.bui_extents
;
578 buip
= xfs_bui_init(tp
->t_mountp
);
579 memcpy(buip
->bui_format
.bui_extents
, map
, count
* sizeof(*map
));
580 atomic_set(&buip
->bui_next_extent
, count
);
582 return &buip
->bui_item
;
585 const struct xfs_defer_op_type xfs_bmap_update_defer_type
= {
587 .max_items
= XFS_BUI_MAX_FAST_EXTENTS
,
588 .create_intent
= xfs_bmap_update_create_intent
,
589 .abort_intent
= xfs_bmap_update_abort_intent
,
590 .create_done
= xfs_bmap_update_create_done
,
591 .finish_item
= xfs_bmap_update_finish_item
,
592 .cancel_item
= xfs_bmap_update_cancel_item
,
593 .recover_work
= xfs_bmap_recover_work
,
594 .relog_intent
= xfs_bmap_relog_intent
,
599 struct xfs_log_item
*lip
,
602 return BUI_ITEM(lip
)->bui_format
.bui_id
== intent_id
;
605 static const struct xfs_item_ops xfs_bui_item_ops
= {
606 .flags
= XFS_ITEM_INTENT
,
607 .iop_size
= xfs_bui_item_size
,
608 .iop_format
= xfs_bui_item_format
,
609 .iop_unpin
= xfs_bui_item_unpin
,
610 .iop_release
= xfs_bui_item_release
,
611 .iop_match
= xfs_bui_item_match
,
616 struct xfs_bui_log_format
*dst
,
617 const struct xfs_bui_log_format
*src
)
621 memcpy(dst
, src
, offsetof(struct xfs_bui_log_format
, bui_extents
));
623 for (i
= 0; i
< src
->bui_nextents
; i
++)
624 memcpy(&dst
->bui_extents
[i
], &src
->bui_extents
[i
],
625 sizeof(struct xfs_map_extent
));
629 * This routine is called to create an in-core extent bmap update
630 * item from the bui format structure which was logged on disk.
631 * It allocates an in-core bui, copies the extents from the format
632 * structure into it, and adds the bui to the AIL with the given
636 xlog_recover_bui_commit_pass2(
638 struct list_head
*buffer_list
,
639 struct xlog_recover_item
*item
,
642 struct xfs_mount
*mp
= log
->l_mp
;
643 struct xfs_bui_log_item
*buip
;
644 struct xfs_bui_log_format
*bui_formatp
;
647 bui_formatp
= item
->ri_buf
[0].i_addr
;
649 if (item
->ri_buf
[0].i_len
< xfs_bui_log_format_sizeof(0)) {
650 XFS_CORRUPTION_ERROR(__func__
, XFS_ERRLEVEL_LOW
, mp
,
651 item
->ri_buf
[0].i_addr
, item
->ri_buf
[0].i_len
);
652 return -EFSCORRUPTED
;
655 if (bui_formatp
->bui_nextents
!= XFS_BUI_MAX_FAST_EXTENTS
) {
656 XFS_CORRUPTION_ERROR(__func__
, XFS_ERRLEVEL_LOW
, mp
,
657 item
->ri_buf
[0].i_addr
, item
->ri_buf
[0].i_len
);
658 return -EFSCORRUPTED
;
661 len
= xfs_bui_log_format_sizeof(bui_formatp
->bui_nextents
);
662 if (item
->ri_buf
[0].i_len
!= len
) {
663 XFS_CORRUPTION_ERROR(__func__
, XFS_ERRLEVEL_LOW
, mp
,
664 item
->ri_buf
[0].i_addr
, item
->ri_buf
[0].i_len
);
665 return -EFSCORRUPTED
;
668 buip
= xfs_bui_init(mp
);
669 xfs_bui_copy_format(&buip
->bui_format
, bui_formatp
);
670 atomic_set(&buip
->bui_next_extent
, bui_formatp
->bui_nextents
);
672 xlog_recover_intent_item(log
, &buip
->bui_item
, lsn
,
673 &xfs_bmap_update_defer_type
);
677 const struct xlog_recover_item_ops xlog_bui_item_ops
= {
678 .item_type
= XFS_LI_BUI
,
679 .commit_pass2
= xlog_recover_bui_commit_pass2
,
683 * This routine is called when an BUD format structure is found in a committed
684 * transaction in the log. Its purpose is to cancel the corresponding BUI if it
685 * was still in the log. To do this it searches the AIL for the BUI with an id
686 * equal to that in the BUD format structure. If we find it we drop the BUD
687 * reference, which removes the BUI from the AIL and frees it.
690 xlog_recover_bud_commit_pass2(
692 struct list_head
*buffer_list
,
693 struct xlog_recover_item
*item
,
696 struct xfs_bud_log_format
*bud_formatp
;
698 bud_formatp
= item
->ri_buf
[0].i_addr
;
699 if (item
->ri_buf
[0].i_len
!= sizeof(struct xfs_bud_log_format
)) {
700 XFS_CORRUPTION_ERROR(__func__
, XFS_ERRLEVEL_LOW
, log
->l_mp
,
701 item
->ri_buf
[0].i_addr
, item
->ri_buf
[0].i_len
);
702 return -EFSCORRUPTED
;
705 xlog_recover_release_intent(log
, XFS_LI_BUI
, bud_formatp
->bud_bui_id
);
709 const struct xlog_recover_item_ops xlog_bud_item_ops
= {
710 .item_type
= XFS_LI_BUD
,
711 .commit_pass2
= xlog_recover_bud_commit_pass2
,