1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright (C) 2016 Oracle. All Rights Reserved.
4 * Author: Darrick J. Wong <darrick.wong@oracle.com>
8 #include "xfs_format.h"
9 #include "xfs_log_format.h"
10 #include "xfs_trans_resv.h"
12 #include "xfs_shared.h"
13 #include "xfs_mount.h"
14 #include "xfs_defer.h"
15 #include "xfs_trans.h"
16 #include "xfs_trans_priv.h"
17 #include "xfs_rmap_item.h"
20 #include "xfs_error.h"
22 kmem_zone_t
*xfs_rui_zone
;
23 kmem_zone_t
*xfs_rud_zone
;
25 static inline struct xfs_rui_log_item
*RUI_ITEM(struct xfs_log_item
*lip
)
27 return container_of(lip
, struct xfs_rui_log_item
, rui_item
);
32 struct xfs_rui_log_item
*ruip
)
34 if (ruip
->rui_format
.rui_nextents
> XFS_RUI_MAX_FAST_EXTENTS
)
37 kmem_cache_free(xfs_rui_zone
, ruip
);
41 * Freeing the RUI requires that we remove it from the AIL if it has already
42 * been placed there. However, the RUI may not yet have been placed in the AIL
43 * when called by xfs_rui_release() from RUD processing due to the ordering of
44 * committed vs unpin operations in bulk insert operations. Hence the reference
45 * count to ensure only the last caller frees the RUI.
49 struct xfs_rui_log_item
*ruip
)
51 ASSERT(atomic_read(&ruip
->rui_refcount
) > 0);
52 if (atomic_dec_and_test(&ruip
->rui_refcount
)) {
53 xfs_trans_ail_remove(&ruip
->rui_item
, SHUTDOWN_LOG_IO_ERROR
);
54 xfs_rui_item_free(ruip
);
60 struct xfs_log_item
*lip
,
64 struct xfs_rui_log_item
*ruip
= RUI_ITEM(lip
);
67 *nbytes
+= xfs_rui_log_format_sizeof(ruip
->rui_format
.rui_nextents
);
71 * This is called to fill in the vector of log iovecs for the
72 * given rui log item. We use only 1 iovec, and we point that
73 * at the rui_log_format structure embedded in the rui item.
74 * It is at this point that we assert that all of the extent
75 * slots in the rui item have been filled.
79 struct xfs_log_item
*lip
,
80 struct xfs_log_vec
*lv
)
82 struct xfs_rui_log_item
*ruip
= RUI_ITEM(lip
);
83 struct xfs_log_iovec
*vecp
= NULL
;
85 ASSERT(atomic_read(&ruip
->rui_next_extent
) ==
86 ruip
->rui_format
.rui_nextents
);
88 ruip
->rui_format
.rui_type
= XFS_LI_RUI
;
89 ruip
->rui_format
.rui_size
= 1;
91 xlog_copy_iovec(lv
, &vecp
, XLOG_REG_TYPE_RUI_FORMAT
, &ruip
->rui_format
,
92 xfs_rui_log_format_sizeof(ruip
->rui_format
.rui_nextents
));
96 * The unpin operation is the last place an RUI is manipulated in the log. It is
97 * either inserted in the AIL or aborted in the event of a log I/O error. In
98 * either case, the RUI transaction has been successfully committed to make it
99 * this far. Therefore, we expect whoever committed the RUI to either construct
100 * and commit the RUD or drop the RUD's reference in the event of error. Simply
101 * drop the log's RUI reference now that the log is done with it.
105 struct xfs_log_item
*lip
,
108 struct xfs_rui_log_item
*ruip
= RUI_ITEM(lip
);
110 xfs_rui_release(ruip
);
114 * The RUI has been either committed or aborted if the transaction has been
115 * cancelled. If the transaction was cancelled, an RUD isn't going to be
116 * constructed and thus we free the RUI here directly.
119 xfs_rui_item_release(
120 struct xfs_log_item
*lip
)
122 xfs_rui_release(RUI_ITEM(lip
));
125 static const struct xfs_item_ops xfs_rui_item_ops
= {
126 .iop_size
= xfs_rui_item_size
,
127 .iop_format
= xfs_rui_item_format
,
128 .iop_unpin
= xfs_rui_item_unpin
,
129 .iop_release
= xfs_rui_item_release
,
133 * Allocate and initialize an rui item with the given number of extents.
135 struct xfs_rui_log_item
*
137 struct xfs_mount
*mp
,
141 struct xfs_rui_log_item
*ruip
;
143 ASSERT(nextents
> 0);
144 if (nextents
> XFS_RUI_MAX_FAST_EXTENTS
)
145 ruip
= kmem_zalloc(xfs_rui_log_item_sizeof(nextents
), 0);
147 ruip
= kmem_zone_zalloc(xfs_rui_zone
, 0);
149 xfs_log_item_init(mp
, &ruip
->rui_item
, XFS_LI_RUI
, &xfs_rui_item_ops
);
150 ruip
->rui_format
.rui_nextents
= nextents
;
151 ruip
->rui_format
.rui_id
= (uintptr_t)(void *)ruip
;
152 atomic_set(&ruip
->rui_next_extent
, 0);
153 atomic_set(&ruip
->rui_refcount
, 2);
159 * Copy an RUI format buffer from the given buf, and into the destination
160 * RUI format structure. The RUI/RUD items were designed not to need any
161 * special alignment handling.
165 struct xfs_log_iovec
*buf
,
166 struct xfs_rui_log_format
*dst_rui_fmt
)
168 struct xfs_rui_log_format
*src_rui_fmt
;
171 src_rui_fmt
= buf
->i_addr
;
172 len
= xfs_rui_log_format_sizeof(src_rui_fmt
->rui_nextents
);
174 if (buf
->i_len
!= len
) {
175 XFS_ERROR_REPORT(__func__
, XFS_ERRLEVEL_LOW
, NULL
);
176 return -EFSCORRUPTED
;
179 memcpy(dst_rui_fmt
, src_rui_fmt
, len
);
183 static inline struct xfs_rud_log_item
*RUD_ITEM(struct xfs_log_item
*lip
)
185 return container_of(lip
, struct xfs_rud_log_item
, rud_item
);
190 struct xfs_log_item
*lip
,
195 *nbytes
+= sizeof(struct xfs_rud_log_format
);
199 * This is called to fill in the vector of log iovecs for the
200 * given rud log item. We use only 1 iovec, and we point that
201 * at the rud_log_format structure embedded in the rud item.
202 * It is at this point that we assert that all of the extent
203 * slots in the rud item have been filled.
207 struct xfs_log_item
*lip
,
208 struct xfs_log_vec
*lv
)
210 struct xfs_rud_log_item
*rudp
= RUD_ITEM(lip
);
211 struct xfs_log_iovec
*vecp
= NULL
;
213 rudp
->rud_format
.rud_type
= XFS_LI_RUD
;
214 rudp
->rud_format
.rud_size
= 1;
216 xlog_copy_iovec(lv
, &vecp
, XLOG_REG_TYPE_RUD_FORMAT
, &rudp
->rud_format
,
217 sizeof(struct xfs_rud_log_format
));
221 * The RUD is either committed or aborted if the transaction is cancelled. If
222 * the transaction is cancelled, drop our reference to the RUI and free the
226 xfs_rud_item_release(
227 struct xfs_log_item
*lip
)
229 struct xfs_rud_log_item
*rudp
= RUD_ITEM(lip
);
231 xfs_rui_release(rudp
->rud_ruip
);
232 kmem_cache_free(xfs_rud_zone
, rudp
);
235 static const struct xfs_item_ops xfs_rud_item_ops
= {
236 .flags
= XFS_ITEM_RELEASE_WHEN_COMMITTED
,
237 .iop_size
= xfs_rud_item_size
,
238 .iop_format
= xfs_rud_item_format
,
239 .iop_release
= xfs_rud_item_release
,
242 static struct xfs_rud_log_item
*
244 struct xfs_trans
*tp
,
245 struct xfs_rui_log_item
*ruip
)
247 struct xfs_rud_log_item
*rudp
;
249 rudp
= kmem_zone_zalloc(xfs_rud_zone
, 0);
250 xfs_log_item_init(tp
->t_mountp
, &rudp
->rud_item
, XFS_LI_RUD
,
252 rudp
->rud_ruip
= ruip
;
253 rudp
->rud_format
.rud_rui_id
= ruip
->rui_format
.rui_id
;
255 xfs_trans_add_item(tp
, &rudp
->rud_item
);
259 /* Set the map extent flags for this reverse mapping. */
261 xfs_trans_set_rmap_flags(
262 struct xfs_map_extent
*rmap
,
263 enum xfs_rmap_intent_type type
,
268 if (state
== XFS_EXT_UNWRITTEN
)
269 rmap
->me_flags
|= XFS_RMAP_EXTENT_UNWRITTEN
;
270 if (whichfork
== XFS_ATTR_FORK
)
271 rmap
->me_flags
|= XFS_RMAP_EXTENT_ATTR_FORK
;
274 rmap
->me_flags
|= XFS_RMAP_EXTENT_MAP
;
276 case XFS_RMAP_MAP_SHARED
:
277 rmap
->me_flags
|= XFS_RMAP_EXTENT_MAP_SHARED
;
280 rmap
->me_flags
|= XFS_RMAP_EXTENT_UNMAP
;
282 case XFS_RMAP_UNMAP_SHARED
:
283 rmap
->me_flags
|= XFS_RMAP_EXTENT_UNMAP_SHARED
;
285 case XFS_RMAP_CONVERT
:
286 rmap
->me_flags
|= XFS_RMAP_EXTENT_CONVERT
;
288 case XFS_RMAP_CONVERT_SHARED
:
289 rmap
->me_flags
|= XFS_RMAP_EXTENT_CONVERT_SHARED
;
292 rmap
->me_flags
|= XFS_RMAP_EXTENT_ALLOC
;
295 rmap
->me_flags
|= XFS_RMAP_EXTENT_FREE
;
303 * Finish an rmap update and log it to the RUD. Note that the transaction is
304 * marked dirty regardless of whether the rmap update succeeds or fails to
305 * support the RUI/RUD lifecycle rules.
308 xfs_trans_log_finish_rmap_update(
309 struct xfs_trans
*tp
,
310 struct xfs_rud_log_item
*rudp
,
311 enum xfs_rmap_intent_type type
,
314 xfs_fileoff_t startoff
,
315 xfs_fsblock_t startblock
,
316 xfs_filblks_t blockcount
,
318 struct xfs_btree_cur
**pcur
)
322 error
= xfs_rmap_finish_one(tp
, type
, owner
, whichfork
, startoff
,
323 startblock
, blockcount
, state
, pcur
);
326 * Mark the transaction dirty, even on error. This ensures the
327 * transaction is aborted, which:
329 * 1.) releases the RUI and frees the RUD
330 * 2.) shuts down the filesystem
332 tp
->t_flags
|= XFS_TRANS_DIRTY
;
333 set_bit(XFS_LI_DIRTY
, &rudp
->rud_item
.li_flags
);
338 /* Sort rmap intents by AG. */
340 xfs_rmap_update_diff_items(
345 struct xfs_mount
*mp
= priv
;
346 struct xfs_rmap_intent
*ra
;
347 struct xfs_rmap_intent
*rb
;
349 ra
= container_of(a
, struct xfs_rmap_intent
, ri_list
);
350 rb
= container_of(b
, struct xfs_rmap_intent
, ri_list
);
351 return XFS_FSB_TO_AGNO(mp
, ra
->ri_bmap
.br_startblock
) -
352 XFS_FSB_TO_AGNO(mp
, rb
->ri_bmap
.br_startblock
);
357 xfs_rmap_update_create_intent(
358 struct xfs_trans
*tp
,
361 struct xfs_rui_log_item
*ruip
;
366 ruip
= xfs_rui_init(tp
->t_mountp
, count
);
367 ASSERT(ruip
!= NULL
);
370 * Get a log_item_desc to point at the new item.
372 xfs_trans_add_item(tp
, &ruip
->rui_item
);
376 /* Log rmap updates in the intent item. */
378 xfs_rmap_update_log_item(
379 struct xfs_trans
*tp
,
381 struct list_head
*item
)
383 struct xfs_rui_log_item
*ruip
= intent
;
384 struct xfs_rmap_intent
*rmap
;
386 struct xfs_map_extent
*map
;
388 rmap
= container_of(item
, struct xfs_rmap_intent
, ri_list
);
390 tp
->t_flags
|= XFS_TRANS_DIRTY
;
391 set_bit(XFS_LI_DIRTY
, &ruip
->rui_item
.li_flags
);
394 * atomic_inc_return gives us the value after the increment;
395 * we want to use it as an array index so we need to subtract 1 from
398 next_extent
= atomic_inc_return(&ruip
->rui_next_extent
) - 1;
399 ASSERT(next_extent
< ruip
->rui_format
.rui_nextents
);
400 map
= &ruip
->rui_format
.rui_extents
[next_extent
];
401 map
->me_owner
= rmap
->ri_owner
;
402 map
->me_startblock
= rmap
->ri_bmap
.br_startblock
;
403 map
->me_startoff
= rmap
->ri_bmap
.br_startoff
;
404 map
->me_len
= rmap
->ri_bmap
.br_blockcount
;
405 xfs_trans_set_rmap_flags(map
, rmap
->ri_type
, rmap
->ri_whichfork
,
406 rmap
->ri_bmap
.br_state
);
409 /* Get an RUD so we can process all the deferred rmap updates. */
411 xfs_rmap_update_create_done(
412 struct xfs_trans
*tp
,
416 return xfs_trans_get_rud(tp
, intent
);
419 /* Process a deferred rmap update. */
421 xfs_rmap_update_finish_item(
422 struct xfs_trans
*tp
,
423 struct list_head
*item
,
427 struct xfs_rmap_intent
*rmap
;
430 rmap
= container_of(item
, struct xfs_rmap_intent
, ri_list
);
431 error
= xfs_trans_log_finish_rmap_update(tp
, done_item
,
433 rmap
->ri_owner
, rmap
->ri_whichfork
,
434 rmap
->ri_bmap
.br_startoff
,
435 rmap
->ri_bmap
.br_startblock
,
436 rmap
->ri_bmap
.br_blockcount
,
437 rmap
->ri_bmap
.br_state
,
438 (struct xfs_btree_cur
**)state
);
443 /* Clean up after processing deferred rmaps. */
445 xfs_rmap_update_finish_cleanup(
446 struct xfs_trans
*tp
,
450 struct xfs_btree_cur
*rcur
= state
;
452 xfs_rmap_finish_one_cleanup(tp
, rcur
, error
);
455 /* Abort all pending RUIs. */
457 xfs_rmap_update_abort_intent(
460 xfs_rui_release(intent
);
463 /* Cancel a deferred rmap update. */
465 xfs_rmap_update_cancel_item(
466 struct list_head
*item
)
468 struct xfs_rmap_intent
*rmap
;
470 rmap
= container_of(item
, struct xfs_rmap_intent
, ri_list
);
474 const struct xfs_defer_op_type xfs_rmap_update_defer_type
= {
475 .max_items
= XFS_RUI_MAX_FAST_EXTENTS
,
476 .diff_items
= xfs_rmap_update_diff_items
,
477 .create_intent
= xfs_rmap_update_create_intent
,
478 .abort_intent
= xfs_rmap_update_abort_intent
,
479 .log_item
= xfs_rmap_update_log_item
,
480 .create_done
= xfs_rmap_update_create_done
,
481 .finish_item
= xfs_rmap_update_finish_item
,
482 .finish_cleanup
= xfs_rmap_update_finish_cleanup
,
483 .cancel_item
= xfs_rmap_update_cancel_item
,
487 * Process an rmap update intent item that was recovered from the log.
488 * We need to update the rmapbt.
492 struct xfs_mount
*mp
,
493 struct xfs_rui_log_item
*ruip
)
497 struct xfs_map_extent
*rmap
;
498 xfs_fsblock_t startblock_fsb
;
500 struct xfs_rud_log_item
*rudp
;
501 enum xfs_rmap_intent_type type
;
504 struct xfs_trans
*tp
;
505 struct xfs_btree_cur
*rcur
= NULL
;
507 ASSERT(!test_bit(XFS_RUI_RECOVERED
, &ruip
->rui_flags
));
510 * First check the validity of the extents described by the
511 * RUI. If any are bad, then assume that all are bad and
514 for (i
= 0; i
< ruip
->rui_format
.rui_nextents
; i
++) {
515 rmap
= &ruip
->rui_format
.rui_extents
[i
];
516 startblock_fsb
= XFS_BB_TO_FSB(mp
,
517 XFS_FSB_TO_DADDR(mp
, rmap
->me_startblock
));
518 switch (rmap
->me_flags
& XFS_RMAP_EXTENT_TYPE_MASK
) {
519 case XFS_RMAP_EXTENT_MAP
:
520 case XFS_RMAP_EXTENT_MAP_SHARED
:
521 case XFS_RMAP_EXTENT_UNMAP
:
522 case XFS_RMAP_EXTENT_UNMAP_SHARED
:
523 case XFS_RMAP_EXTENT_CONVERT
:
524 case XFS_RMAP_EXTENT_CONVERT_SHARED
:
525 case XFS_RMAP_EXTENT_ALLOC
:
526 case XFS_RMAP_EXTENT_FREE
:
533 if (!op_ok
|| startblock_fsb
== 0 ||
535 startblock_fsb
>= mp
->m_sb
.sb_dblocks
||
536 rmap
->me_len
>= mp
->m_sb
.sb_agblocks
||
537 (rmap
->me_flags
& ~XFS_RMAP_EXTENT_FLAGS
)) {
539 * This will pull the RUI from the AIL and
540 * free the memory associated with it.
542 set_bit(XFS_RUI_RECOVERED
, &ruip
->rui_flags
);
543 xfs_rui_release(ruip
);
544 return -EFSCORRUPTED
;
548 error
= xfs_trans_alloc(mp
, &M_RES(mp
)->tr_itruncate
,
549 mp
->m_rmap_maxlevels
, 0, XFS_TRANS_RESERVE
, &tp
);
552 rudp
= xfs_trans_get_rud(tp
, ruip
);
554 for (i
= 0; i
< ruip
->rui_format
.rui_nextents
; i
++) {
555 rmap
= &ruip
->rui_format
.rui_extents
[i
];
556 state
= (rmap
->me_flags
& XFS_RMAP_EXTENT_UNWRITTEN
) ?
557 XFS_EXT_UNWRITTEN
: XFS_EXT_NORM
;
558 whichfork
= (rmap
->me_flags
& XFS_RMAP_EXTENT_ATTR_FORK
) ?
559 XFS_ATTR_FORK
: XFS_DATA_FORK
;
560 switch (rmap
->me_flags
& XFS_RMAP_EXTENT_TYPE_MASK
) {
561 case XFS_RMAP_EXTENT_MAP
:
564 case XFS_RMAP_EXTENT_MAP_SHARED
:
565 type
= XFS_RMAP_MAP_SHARED
;
567 case XFS_RMAP_EXTENT_UNMAP
:
568 type
= XFS_RMAP_UNMAP
;
570 case XFS_RMAP_EXTENT_UNMAP_SHARED
:
571 type
= XFS_RMAP_UNMAP_SHARED
;
573 case XFS_RMAP_EXTENT_CONVERT
:
574 type
= XFS_RMAP_CONVERT
;
576 case XFS_RMAP_EXTENT_CONVERT_SHARED
:
577 type
= XFS_RMAP_CONVERT_SHARED
;
579 case XFS_RMAP_EXTENT_ALLOC
:
580 type
= XFS_RMAP_ALLOC
;
582 case XFS_RMAP_EXTENT_FREE
:
583 type
= XFS_RMAP_FREE
;
586 XFS_ERROR_REPORT(__func__
, XFS_ERRLEVEL_LOW
, NULL
);
587 error
= -EFSCORRUPTED
;
590 error
= xfs_trans_log_finish_rmap_update(tp
, rudp
, type
,
591 rmap
->me_owner
, whichfork
,
592 rmap
->me_startoff
, rmap
->me_startblock
,
593 rmap
->me_len
, state
, &rcur
);
599 xfs_rmap_finish_one_cleanup(tp
, rcur
, error
);
600 set_bit(XFS_RUI_RECOVERED
, &ruip
->rui_flags
);
601 error
= xfs_trans_commit(tp
);
605 xfs_rmap_finish_one_cleanup(tp
, rcur
, error
);
606 xfs_trans_cancel(tp
);