1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
13 #include "xfs_mount.h"
14 #include "xfs_trans.h"
15 #include "xfs_buf_item.h"
16 #include "xfs_trans_priv.h"
17 #include "xfs_trace.h"
21 kmem_zone_t
*xfs_buf_item_zone
;
23 static inline struct xfs_buf_log_item
*BUF_ITEM(struct xfs_log_item
*lip
)
25 return container_of(lip
, struct xfs_buf_log_item
, bli_item
);
28 STATIC
void xfs_buf_do_callbacks(struct xfs_buf
*bp
);
31 xfs_buf_log_format_size(
32 struct xfs_buf_log_format
*blfp
)
34 return offsetof(struct xfs_buf_log_format
, blf_data_map
) +
35 (blfp
->blf_map_size
* sizeof(blfp
->blf_data_map
[0]));
39 * This returns the number of log iovecs needed to log the
42 * It calculates this as 1 iovec for the buf log format structure
43 * and 1 for each stretch of non-contiguous chunks to be logged.
44 * Contiguous chunks are logged in a single iovec.
46 * If the XFS_BLI_STALE flag has been set, then log nothing.
49 xfs_buf_item_size_segment(
50 struct xfs_buf_log_item
*bip
,
51 struct xfs_buf_log_format
*blfp
,
55 struct xfs_buf
*bp
= bip
->bli_buf
;
59 last_bit
= xfs_next_bit(blfp
->blf_data_map
, blfp
->blf_map_size
, 0);
64 * initial count for a dirty buffer is 2 vectors - the format structure
65 * and the first dirty region.
68 *nbytes
+= xfs_buf_log_format_size(blfp
) + XFS_BLF_CHUNK
;
70 while (last_bit
!= -1) {
72 * This takes the bit number to start looking from and
73 * returns the next set bit from there. It returns -1
74 * if there are no more bits set or the start bit is
75 * beyond the end of the bitmap.
77 next_bit
= xfs_next_bit(blfp
->blf_data_map
, blfp
->blf_map_size
,
80 * If we run out of bits, leave the loop,
81 * else if we find a new set of bits bump the number of vecs,
82 * else keep scanning the current set of bits.
86 } else if (next_bit
!= last_bit
+ 1) {
89 } else if (xfs_buf_offset(bp
, next_bit
* XFS_BLF_CHUNK
) !=
90 (xfs_buf_offset(bp
, last_bit
* XFS_BLF_CHUNK
) +
97 *nbytes
+= XFS_BLF_CHUNK
;
102 * This returns the number of log iovecs needed to log the given buf log item.
104 * It calculates this as 1 iovec for the buf log format structure and 1 for each
105 * stretch of non-contiguous chunks to be logged. Contiguous chunks are logged
108 * Discontiguous buffers need a format structure per region that that is being
109 * logged. This makes the changes in the buffer appear to log recovery as though
110 * they came from separate buffers, just like would occur if multiple buffers
111 * were used instead of a single discontiguous buffer. This enables
112 * discontiguous buffers to be in-memory constructs, completely transparent to
113 * what ends up on disk.
115 * If the XFS_BLI_STALE flag has been set, then log nothing but the buf log
120 struct xfs_log_item
*lip
,
124 struct xfs_buf_log_item
*bip
= BUF_ITEM(lip
);
127 ASSERT(atomic_read(&bip
->bli_refcount
) > 0);
128 if (bip
->bli_flags
& XFS_BLI_STALE
) {
130 * The buffer is stale, so all we need to log
131 * is the buf log format structure with the
134 trace_xfs_buf_item_size_stale(bip
);
135 ASSERT(bip
->__bli_format
.blf_flags
& XFS_BLF_CANCEL
);
136 *nvecs
+= bip
->bli_format_count
;
137 for (i
= 0; i
< bip
->bli_format_count
; i
++) {
138 *nbytes
+= xfs_buf_log_format_size(&bip
->bli_formats
[i
]);
143 ASSERT(bip
->bli_flags
& XFS_BLI_LOGGED
);
145 if (bip
->bli_flags
& XFS_BLI_ORDERED
) {
147 * The buffer has been logged just to order it.
148 * It is not being included in the transaction
149 * commit, so no vectors are used at all.
151 trace_xfs_buf_item_size_ordered(bip
);
152 *nvecs
= XFS_LOG_VEC_ORDERED
;
157 * the vector count is based on the number of buffer vectors we have
158 * dirty bits in. This will only be greater than one when we have a
159 * compound buffer with more than one segment dirty. Hence for compound
160 * buffers we need to track which segment the dirty bits correspond to,
161 * and when we move from one segment to the next increment the vector
162 * count for the extra buf log format structure that will need to be
165 for (i
= 0; i
< bip
->bli_format_count
; i
++) {
166 xfs_buf_item_size_segment(bip
, &bip
->bli_formats
[i
],
169 trace_xfs_buf_item_size(bip
);
173 xfs_buf_item_copy_iovec(
174 struct xfs_log_vec
*lv
,
175 struct xfs_log_iovec
**vecp
,
181 offset
+= first_bit
* XFS_BLF_CHUNK
;
182 xlog_copy_iovec(lv
, vecp
, XLOG_REG_TYPE_BCHUNK
,
183 xfs_buf_offset(bp
, offset
),
184 nbits
* XFS_BLF_CHUNK
);
188 xfs_buf_item_straddle(
194 return xfs_buf_offset(bp
, offset
+ (next_bit
<< XFS_BLF_SHIFT
)) !=
195 (xfs_buf_offset(bp
, offset
+ (last_bit
<< XFS_BLF_SHIFT
)) +
200 xfs_buf_item_format_segment(
201 struct xfs_buf_log_item
*bip
,
202 struct xfs_log_vec
*lv
,
203 struct xfs_log_iovec
**vecp
,
205 struct xfs_buf_log_format
*blfp
)
207 struct xfs_buf
*bp
= bip
->bli_buf
;
214 /* copy the flags across from the base format item */
215 blfp
->blf_flags
= bip
->__bli_format
.blf_flags
;
218 * Base size is the actual size of the ondisk structure - it reflects
219 * the actual size of the dirty bitmap rather than the size of the in
222 base_size
= xfs_buf_log_format_size(blfp
);
224 first_bit
= xfs_next_bit(blfp
->blf_data_map
, blfp
->blf_map_size
, 0);
225 if (!(bip
->bli_flags
& XFS_BLI_STALE
) && first_bit
== -1) {
227 * If the map is not be dirty in the transaction, mark
228 * the size as zero and do not advance the vector pointer.
233 blfp
= xlog_copy_iovec(lv
, vecp
, XLOG_REG_TYPE_BFORMAT
, blfp
, base_size
);
236 if (bip
->bli_flags
& XFS_BLI_STALE
) {
238 * The buffer is stale, so all we need to log
239 * is the buf log format structure with the
242 trace_xfs_buf_item_format_stale(bip
);
243 ASSERT(blfp
->blf_flags
& XFS_BLF_CANCEL
);
249 * Fill in an iovec for each set of contiguous chunks.
251 last_bit
= first_bit
;
255 * This takes the bit number to start looking from and
256 * returns the next set bit from there. It returns -1
257 * if there are no more bits set or the start bit is
258 * beyond the end of the bitmap.
260 next_bit
= xfs_next_bit(blfp
->blf_data_map
, blfp
->blf_map_size
,
263 * If we run out of bits fill in the last iovec and get out of
264 * the loop. Else if we start a new set of bits then fill in
265 * the iovec for the series we were looking at and start
266 * counting the bits in the new one. Else we're still in the
267 * same set of bits so just keep counting and scanning.
269 if (next_bit
== -1) {
270 xfs_buf_item_copy_iovec(lv
, vecp
, bp
, offset
,
274 } else if (next_bit
!= last_bit
+ 1 ||
275 xfs_buf_item_straddle(bp
, offset
, next_bit
, last_bit
)) {
276 xfs_buf_item_copy_iovec(lv
, vecp
, bp
, offset
,
279 first_bit
= next_bit
;
290 * This is called to fill in the vector of log iovecs for the
291 * given log buf item. It fills the first entry with a buf log
292 * format structure, and the rest point to contiguous chunks
297 struct xfs_log_item
*lip
,
298 struct xfs_log_vec
*lv
)
300 struct xfs_buf_log_item
*bip
= BUF_ITEM(lip
);
301 struct xfs_buf
*bp
= bip
->bli_buf
;
302 struct xfs_log_iovec
*vecp
= NULL
;
306 ASSERT(atomic_read(&bip
->bli_refcount
) > 0);
307 ASSERT((bip
->bli_flags
& XFS_BLI_LOGGED
) ||
308 (bip
->bli_flags
& XFS_BLI_STALE
));
309 ASSERT((bip
->bli_flags
& XFS_BLI_STALE
) ||
310 (xfs_blft_from_flags(&bip
->__bli_format
) > XFS_BLFT_UNKNOWN_BUF
311 && xfs_blft_from_flags(&bip
->__bli_format
) < XFS_BLFT_MAX_BUF
));
312 ASSERT(!(bip
->bli_flags
& XFS_BLI_ORDERED
) ||
313 (bip
->bli_flags
& XFS_BLI_STALE
));
317 * If it is an inode buffer, transfer the in-memory state to the
318 * format flags and clear the in-memory state.
320 * For buffer based inode allocation, we do not transfer
321 * this state if the inode buffer allocation has not yet been committed
322 * to the log as setting the XFS_BLI_INODE_BUF flag will prevent
323 * correct replay of the inode allocation.
325 * For icreate item based inode allocation, the buffers aren't written
326 * to the journal during allocation, and hence we should always tag the
327 * buffer as an inode buffer so that the correct unlinked list replay
328 * occurs during recovery.
330 if (bip
->bli_flags
& XFS_BLI_INODE_BUF
) {
331 if (xfs_sb_version_hascrc(&lip
->li_mountp
->m_sb
) ||
332 !((bip
->bli_flags
& XFS_BLI_INODE_ALLOC_BUF
) &&
333 xfs_log_item_in_current_chkpt(lip
)))
334 bip
->__bli_format
.blf_flags
|= XFS_BLF_INODE_BUF
;
335 bip
->bli_flags
&= ~XFS_BLI_INODE_BUF
;
338 for (i
= 0; i
< bip
->bli_format_count
; i
++) {
339 xfs_buf_item_format_segment(bip
, lv
, &vecp
, offset
,
340 &bip
->bli_formats
[i
]);
341 offset
+= BBTOB(bp
->b_maps
[i
].bm_len
);
345 * Check to make sure everything is consistent.
347 trace_xfs_buf_item_format(bip
);
351 * This is called to pin the buffer associated with the buf log item in memory
352 * so it cannot be written out.
354 * We also always take a reference to the buffer log item here so that the bli
355 * is held while the item is pinned in memory. This means that we can
356 * unconditionally drop the reference count a transaction holds when the
357 * transaction is completed.
361 struct xfs_log_item
*lip
)
363 struct xfs_buf_log_item
*bip
= BUF_ITEM(lip
);
365 ASSERT(atomic_read(&bip
->bli_refcount
) > 0);
366 ASSERT((bip
->bli_flags
& XFS_BLI_LOGGED
) ||
367 (bip
->bli_flags
& XFS_BLI_ORDERED
) ||
368 (bip
->bli_flags
& XFS_BLI_STALE
));
370 trace_xfs_buf_item_pin(bip
);
372 atomic_inc(&bip
->bli_refcount
);
373 atomic_inc(&bip
->bli_buf
->b_pin_count
);
377 * This is called to unpin the buffer associated with the buf log
378 * item which was previously pinned with a call to xfs_buf_item_pin().
380 * Also drop the reference to the buf item for the current transaction.
381 * If the XFS_BLI_STALE flag is set and we are the last reference,
382 * then free up the buf log item and unlock the buffer.
384 * If the remove flag is set we are called from uncommit in the
385 * forced-shutdown path. If that is true and the reference count on
386 * the log item is going to drop to zero we need to free the item's
387 * descriptor in the transaction.
391 struct xfs_log_item
*lip
,
394 struct xfs_buf_log_item
*bip
= BUF_ITEM(lip
);
395 xfs_buf_t
*bp
= bip
->bli_buf
;
396 struct xfs_ail
*ailp
= lip
->li_ailp
;
397 int stale
= bip
->bli_flags
& XFS_BLI_STALE
;
400 ASSERT(bp
->b_log_item
== bip
);
401 ASSERT(atomic_read(&bip
->bli_refcount
) > 0);
403 trace_xfs_buf_item_unpin(bip
);
405 freed
= atomic_dec_and_test(&bip
->bli_refcount
);
407 if (atomic_dec_and_test(&bp
->b_pin_count
))
408 wake_up_all(&bp
->b_waiters
);
410 if (freed
&& stale
) {
411 ASSERT(bip
->bli_flags
& XFS_BLI_STALE
);
412 ASSERT(xfs_buf_islocked(bp
));
413 ASSERT(bp
->b_flags
& XBF_STALE
);
414 ASSERT(bip
->__bli_format
.blf_flags
& XFS_BLF_CANCEL
);
416 trace_xfs_buf_item_unpin_stale(bip
);
420 * If we are in a transaction context, we have to
421 * remove the log item from the transaction as we are
422 * about to release our reference to the buffer. If we
423 * don't, the unlock that occurs later in
424 * xfs_trans_uncommit() will try to reference the
425 * buffer which we no longer have a hold on.
427 if (!list_empty(&lip
->li_trans
))
428 xfs_trans_del_item(lip
);
431 * Since the transaction no longer refers to the buffer,
432 * the buffer should no longer refer to the transaction.
438 * If we get called here because of an IO error, we may
439 * or may not have the item on the AIL. xfs_trans_ail_delete()
440 * will take care of that situation.
441 * xfs_trans_ail_delete() drops the AIL lock.
443 if (bip
->bli_flags
& XFS_BLI_STALE_INODE
) {
444 xfs_buf_do_callbacks(bp
);
445 bp
->b_log_item
= NULL
;
446 list_del_init(&bp
->b_li_list
);
449 spin_lock(&ailp
->ail_lock
);
450 xfs_trans_ail_delete(ailp
, lip
, SHUTDOWN_LOG_IO_ERROR
);
451 xfs_buf_item_relse(bp
);
452 ASSERT(bp
->b_log_item
== NULL
);
455 } else if (freed
&& remove
) {
457 * There are currently two references to the buffer - the active
458 * LRU reference and the buf log item. What we are about to do
459 * here - simulate a failed IO completion - requires 3
462 * The LRU reference is removed by the xfs_buf_stale() call. The
463 * buf item reference is removed by the xfs_buf_iodone()
464 * callback that is run by xfs_buf_do_callbacks() during ioend
465 * processing (via the bp->b_iodone callback), and then finally
466 * the ioend processing will drop the IO reference if the buffer
467 * is marked XBF_ASYNC.
469 * Hence we need to take an additional reference here so that IO
470 * completion processing doesn't free the buffer prematurely.
474 bp
->b_flags
|= XBF_ASYNC
;
475 xfs_buf_ioerror(bp
, -EIO
);
476 bp
->b_flags
&= ~XBF_DONE
;
483 * Buffer IO error rate limiting. Limit it to no more than 10 messages per 30
484 * seconds so as to not spam logs too much on repeated detection of the same
488 static DEFINE_RATELIMIT_STATE(xfs_buf_write_fail_rl_state
, 30 * HZ
, 10);
492 struct xfs_log_item
*lip
,
493 struct list_head
*buffer_list
)
495 struct xfs_buf_log_item
*bip
= BUF_ITEM(lip
);
496 struct xfs_buf
*bp
= bip
->bli_buf
;
497 uint rval
= XFS_ITEM_SUCCESS
;
499 if (xfs_buf_ispinned(bp
))
500 return XFS_ITEM_PINNED
;
501 if (!xfs_buf_trylock(bp
)) {
503 * If we have just raced with a buffer being pinned and it has
504 * been marked stale, we could end up stalling until someone else
505 * issues a log force to unpin the stale buffer. Check for the
506 * race condition here so xfsaild recognizes the buffer is pinned
507 * and queues a log force to move it along.
509 if (xfs_buf_ispinned(bp
))
510 return XFS_ITEM_PINNED
;
511 return XFS_ITEM_LOCKED
;
514 ASSERT(!(bip
->bli_flags
& XFS_BLI_STALE
));
516 trace_xfs_buf_item_push(bip
);
518 /* has a previous flush failed due to IO errors? */
519 if ((bp
->b_flags
& XBF_WRITE_FAIL
) &&
520 ___ratelimit(&xfs_buf_write_fail_rl_state
, "XFS: Failing async write")) {
521 xfs_warn(bp
->b_mount
,
522 "Failing async write on buffer block 0x%llx. Retrying async write.",
523 (long long)bp
->b_bn
);
526 if (!xfs_buf_delwri_queue(bp
, buffer_list
))
527 rval
= XFS_ITEM_FLUSHING
;
533 * Drop the buffer log item refcount and take appropriate action. This helper
534 * determines whether the bli must be freed or not, since a decrement to zero
535 * does not necessarily mean the bli is unused.
537 * Return true if the bli is freed, false otherwise.
541 struct xfs_buf_log_item
*bip
)
543 struct xfs_log_item
*lip
= &bip
->bli_item
;
547 /* drop the bli ref and return if it wasn't the last one */
548 if (!atomic_dec_and_test(&bip
->bli_refcount
))
552 * We dropped the last ref and must free the item if clean or aborted.
553 * If the bli is dirty and non-aborted, the buffer was clean in the
554 * transaction but still awaiting writeback from previous changes. In
555 * that case, the bli is freed on buffer writeback completion.
557 aborted
= test_bit(XFS_LI_ABORTED
, &lip
->li_flags
) ||
558 XFS_FORCED_SHUTDOWN(lip
->li_mountp
);
559 dirty
= bip
->bli_flags
& XFS_BLI_DIRTY
;
560 if (dirty
&& !aborted
)
564 * The bli is aborted or clean. An aborted item may be in the AIL
565 * regardless of dirty state. For example, consider an aborted
566 * transaction that invalidated a dirty bli and cleared the dirty
570 xfs_trans_ail_remove(lip
, SHUTDOWN_LOG_IO_ERROR
);
571 xfs_buf_item_relse(bip
->bli_buf
);
576 * Release the buffer associated with the buf log item. If there is no dirty
577 * logged data associated with the buffer recorded in the buf log item, then
578 * free the buf log item and remove the reference to it in the buffer.
580 * This call ignores the recursion count. It is only called when the buffer
581 * should REALLY be unlocked, regardless of the recursion count.
583 * We unconditionally drop the transaction's reference to the log item. If the
584 * item was logged, then another reference was taken when it was pinned, so we
585 * can safely drop the transaction reference now. This also allows us to avoid
586 * potential races with the unpin code freeing the bli by not referencing the
587 * bli after we've dropped the reference count.
589 * If the XFS_BLI_HOLD flag is set in the buf log item, then free the log item
590 * if necessary but do not unlock the buffer. This is for support of
591 * xfs_trans_bhold(). Make sure the XFS_BLI_HOLD field is cleared if we don't
595 xfs_buf_item_release(
596 struct xfs_log_item
*lip
)
598 struct xfs_buf_log_item
*bip
= BUF_ITEM(lip
);
599 struct xfs_buf
*bp
= bip
->bli_buf
;
601 bool hold
= bip
->bli_flags
& XFS_BLI_HOLD
;
602 bool stale
= bip
->bli_flags
& XFS_BLI_STALE
;
603 #if defined(DEBUG) || defined(XFS_WARN)
604 bool ordered
= bip
->bli_flags
& XFS_BLI_ORDERED
;
605 bool dirty
= bip
->bli_flags
& XFS_BLI_DIRTY
;
606 bool aborted
= test_bit(XFS_LI_ABORTED
,
610 trace_xfs_buf_item_release(bip
);
613 * The bli dirty state should match whether the blf has logged segments
614 * except for ordered buffers, where only the bli should be dirty.
616 ASSERT((!ordered
&& dirty
== xfs_buf_item_dirty_format(bip
)) ||
617 (ordered
&& dirty
&& !xfs_buf_item_dirty_format(bip
)));
618 ASSERT(!stale
|| (bip
->__bli_format
.blf_flags
& XFS_BLF_CANCEL
));
621 * Clear the buffer's association with this transaction and
622 * per-transaction state from the bli, which has been copied above.
625 bip
->bli_flags
&= ~(XFS_BLI_LOGGED
| XFS_BLI_HOLD
| XFS_BLI_ORDERED
);
628 * Unref the item and unlock the buffer unless held or stale. Stale
629 * buffers remain locked until final unpin unless the bli is freed by
630 * the unref call. The latter implies shutdown because buffer
631 * invalidation dirties the bli and transaction.
633 released
= xfs_buf_item_put(bip
);
634 if (hold
|| (stale
&& !released
))
636 ASSERT(!stale
|| aborted
);
641 xfs_buf_item_committing(
642 struct xfs_log_item
*lip
,
643 xfs_lsn_t commit_lsn
)
645 return xfs_buf_item_release(lip
);
649 * This is called to find out where the oldest active copy of the
650 * buf log item in the on disk log resides now that the last log
651 * write of it completed at the given lsn.
652 * We always re-log all the dirty data in a buffer, so usually the
653 * latest copy in the on disk log is the only one that matters. For
654 * those cases we simply return the given lsn.
656 * The one exception to this is for buffers full of newly allocated
657 * inodes. These buffers are only relogged with the XFS_BLI_INODE_BUF
658 * flag set, indicating that only the di_next_unlinked fields from the
659 * inodes in the buffers will be replayed during recovery. If the
660 * original newly allocated inode images have not yet been flushed
661 * when the buffer is so relogged, then we need to make sure that we
662 * keep the old images in the 'active' portion of the log. We do this
663 * by returning the original lsn of that transaction here rather than
667 xfs_buf_item_committed(
668 struct xfs_log_item
*lip
,
671 struct xfs_buf_log_item
*bip
= BUF_ITEM(lip
);
673 trace_xfs_buf_item_committed(bip
);
675 if ((bip
->bli_flags
& XFS_BLI_INODE_ALLOC_BUF
) && lip
->li_lsn
!= 0)
680 static const struct xfs_item_ops xfs_buf_item_ops
= {
681 .iop_size
= xfs_buf_item_size
,
682 .iop_format
= xfs_buf_item_format
,
683 .iop_pin
= xfs_buf_item_pin
,
684 .iop_unpin
= xfs_buf_item_unpin
,
685 .iop_release
= xfs_buf_item_release
,
686 .iop_committing
= xfs_buf_item_committing
,
687 .iop_committed
= xfs_buf_item_committed
,
688 .iop_push
= xfs_buf_item_push
,
692 xfs_buf_item_get_format(
693 struct xfs_buf_log_item
*bip
,
696 ASSERT(bip
->bli_formats
== NULL
);
697 bip
->bli_format_count
= count
;
700 bip
->bli_formats
= &bip
->__bli_format
;
704 bip
->bli_formats
= kmem_zalloc(count
* sizeof(struct xfs_buf_log_format
),
706 if (!bip
->bli_formats
)
712 xfs_buf_item_free_format(
713 struct xfs_buf_log_item
*bip
)
715 if (bip
->bli_formats
!= &bip
->__bli_format
) {
716 kmem_free(bip
->bli_formats
);
717 bip
->bli_formats
= NULL
;
722 * Allocate a new buf log item to go with the given buffer.
723 * Set the buffer's b_log_item field to point to the new
729 struct xfs_mount
*mp
)
731 struct xfs_buf_log_item
*bip
= bp
->b_log_item
;
738 * Check to see if there is already a buf log item for
739 * this buffer. If we do already have one, there is
740 * nothing to do here so return.
742 ASSERT(bp
->b_mount
== mp
);
744 ASSERT(bip
->bli_item
.li_type
== XFS_LI_BUF
);
745 ASSERT(!bp
->b_transp
);
746 ASSERT(bip
->bli_buf
== bp
);
750 bip
= kmem_zone_zalloc(xfs_buf_item_zone
, 0);
751 xfs_log_item_init(mp
, &bip
->bli_item
, XFS_LI_BUF
, &xfs_buf_item_ops
);
755 * chunks is the number of XFS_BLF_CHUNK size pieces the buffer
756 * can be divided into. Make sure not to truncate any pieces.
757 * map_size is the size of the bitmap needed to describe the
758 * chunks of the buffer.
760 * Discontiguous buffer support follows the layout of the underlying
761 * buffer. This makes the implementation as simple as possible.
763 error
= xfs_buf_item_get_format(bip
, bp
->b_map_count
);
765 if (error
) { /* to stop gcc throwing set-but-unused warnings */
766 kmem_cache_free(xfs_buf_item_zone
, bip
);
771 for (i
= 0; i
< bip
->bli_format_count
; i
++) {
772 chunks
= DIV_ROUND_UP(BBTOB(bp
->b_maps
[i
].bm_len
),
774 map_size
= DIV_ROUND_UP(chunks
, NBWORD
);
776 bip
->bli_formats
[i
].blf_type
= XFS_LI_BUF
;
777 bip
->bli_formats
[i
].blf_blkno
= bp
->b_maps
[i
].bm_bn
;
778 bip
->bli_formats
[i
].blf_len
= bp
->b_maps
[i
].bm_len
;
779 bip
->bli_formats
[i
].blf_map_size
= map_size
;
782 bp
->b_log_item
= bip
;
789 * Mark bytes first through last inclusive as dirty in the buf
793 xfs_buf_item_log_segment(
809 * Convert byte offsets to bit numbers.
811 first_bit
= first
>> XFS_BLF_SHIFT
;
812 last_bit
= last
>> XFS_BLF_SHIFT
;
815 * Calculate the total number of bits to be set.
817 bits_to_set
= last_bit
- first_bit
+ 1;
820 * Get a pointer to the first word in the bitmap
823 word_num
= first_bit
>> BIT_TO_WORD_SHIFT
;
824 wordp
= &map
[word_num
];
827 * Calculate the starting bit in the first word.
829 bit
= first_bit
& (uint
)(NBWORD
- 1);
832 * First set any bits in the first word of our range.
833 * If it starts at bit 0 of the word, it will be
834 * set below rather than here. That is what the variable
835 * bit tells us. The variable bits_set tracks the number
836 * of bits that have been set so far. End_bit is the number
837 * of the last bit to be set in this word plus one.
840 end_bit
= min(bit
+ bits_to_set
, (uint
)NBWORD
);
841 mask
= ((1U << (end_bit
- bit
)) - 1) << bit
;
844 bits_set
= end_bit
- bit
;
850 * Now set bits a whole word at a time that are between
851 * first_bit and last_bit.
853 while ((bits_to_set
- bits_set
) >= NBWORD
) {
860 * Finally, set any bits left to be set in one last partial word.
862 end_bit
= bits_to_set
- bits_set
;
864 mask
= (1U << end_bit
) - 1;
870 * Mark bytes first through last inclusive as dirty in the buf
875 struct xfs_buf_log_item
*bip
,
882 struct xfs_buf
*bp
= bip
->bli_buf
;
885 * walk each buffer segment and mark them dirty appropriately.
888 for (i
= 0; i
< bip
->bli_format_count
; i
++) {
891 end
= start
+ BBTOB(bp
->b_maps
[i
].bm_len
) - 1;
893 /* skip to the map that includes the first byte to log */
895 start
+= BBTOB(bp
->b_maps
[i
].bm_len
);
900 * Trim the range to this segment and mark it in the bitmap.
901 * Note that we must convert buffer offsets to segment relative
902 * offsets (e.g., the first byte of each segment is byte 0 of
909 xfs_buf_item_log_segment(first
- start
, end
- start
,
910 &bip
->bli_formats
[i
].blf_data_map
[0]);
912 start
+= BBTOB(bp
->b_maps
[i
].bm_len
);
918 * Return true if the buffer has any ranges logged/dirtied by a transaction,
922 xfs_buf_item_dirty_format(
923 struct xfs_buf_log_item
*bip
)
927 for (i
= 0; i
< bip
->bli_format_count
; i
++) {
928 if (!xfs_bitmap_empty(bip
->bli_formats
[i
].blf_data_map
,
929 bip
->bli_formats
[i
].blf_map_size
))
938 struct xfs_buf_log_item
*bip
)
940 xfs_buf_item_free_format(bip
);
941 kmem_free(bip
->bli_item
.li_lv_shadow
);
942 kmem_cache_free(xfs_buf_item_zone
, bip
);
946 * This is called when the buf log item is no longer needed. It should
947 * free the buf log item associated with the given buffer and clear
948 * the buffer's pointer to the buf log item. If there are no more
949 * items in the list, clear the b_iodone field of the buffer (see
950 * xfs_buf_attach_iodone() below).
956 struct xfs_buf_log_item
*bip
= bp
->b_log_item
;
958 trace_xfs_buf_item_relse(bp
, _RET_IP_
);
959 ASSERT(!test_bit(XFS_LI_IN_AIL
, &bip
->bli_item
.li_flags
));
961 bp
->b_log_item
= NULL
;
962 if (list_empty(&bp
->b_li_list
))
966 xfs_buf_item_free(bip
);
971 * Add the given log item with its callback to the list of callbacks
972 * to be called when the buffer's I/O completes. If it is not set
973 * already, set the buffer's b_iodone() routine to be
974 * xfs_buf_iodone_callbacks() and link the log item into the list of
975 * items rooted at b_li_list.
978 xfs_buf_attach_iodone(
980 void (*cb
)(struct xfs_buf
*, struct xfs_log_item
*),
981 struct xfs_log_item
*lip
)
983 ASSERT(xfs_buf_islocked(bp
));
986 list_add_tail(&lip
->li_bio_list
, &bp
->b_li_list
);
988 ASSERT(bp
->b_iodone
== NULL
||
989 bp
->b_iodone
== xfs_buf_iodone_callbacks
);
990 bp
->b_iodone
= xfs_buf_iodone_callbacks
;
994 * We can have many callbacks on a buffer. Running the callbacks individually
995 * can cause a lot of contention on the AIL lock, so we allow for a single
996 * callback to be able to scan the remaining items in bp->b_li_list for other
997 * items of the same type and callback to be processed in the first call.
999 * As a result, the loop walking the callback list below will also modify the
1000 * list. it removes the first item from the list and then runs the callback.
1001 * The loop then restarts from the new first item int the list. This allows the
1002 * callback to scan and modify the list attached to the buffer and we don't
1003 * have to care about maintaining a next item pointer.
1006 xfs_buf_do_callbacks(
1009 struct xfs_buf_log_item
*blip
= bp
->b_log_item
;
1010 struct xfs_log_item
*lip
;
1012 /* If there is a buf_log_item attached, run its callback */
1014 lip
= &blip
->bli_item
;
1015 lip
->li_cb(bp
, lip
);
1018 while (!list_empty(&bp
->b_li_list
)) {
1019 lip
= list_first_entry(&bp
->b_li_list
, struct xfs_log_item
,
1023 * Remove the item from the list, so we don't have any
1024 * confusion if the item is added to another buf.
1025 * Don't touch the log item after calling its
1026 * callback, because it could have freed itself.
1028 list_del_init(&lip
->li_bio_list
);
1029 lip
->li_cb(bp
, lip
);
1034 * Invoke the error state callback for each log item affected by the failed I/O.
1036 * If a metadata buffer write fails with a non-permanent error, the buffer is
1037 * eventually resubmitted and so the completion callbacks are not run. The error
1038 * state may need to be propagated to the log items attached to the buffer,
1039 * however, so the next AIL push of the item knows hot to handle it correctly.
1042 xfs_buf_do_callbacks_fail(
1045 struct xfs_log_item
*lip
;
1046 struct xfs_ail
*ailp
;
1049 * Buffer log item errors are handled directly by xfs_buf_item_push()
1050 * and xfs_buf_iodone_callback_error, and they have no IO error
1051 * callbacks. Check only for items in b_li_list.
1053 if (list_empty(&bp
->b_li_list
))
1056 lip
= list_first_entry(&bp
->b_li_list
, struct xfs_log_item
,
1058 ailp
= lip
->li_ailp
;
1059 spin_lock(&ailp
->ail_lock
);
1060 list_for_each_entry(lip
, &bp
->b_li_list
, li_bio_list
) {
1061 if (lip
->li_ops
->iop_error
)
1062 lip
->li_ops
->iop_error(lip
, bp
);
1064 spin_unlock(&ailp
->ail_lock
);
1068 xfs_buf_iodone_callback_error(
1071 struct xfs_buf_log_item
*bip
= bp
->b_log_item
;
1072 struct xfs_log_item
*lip
;
1073 struct xfs_mount
*mp
;
1074 static ulong lasttime
;
1075 static xfs_buftarg_t
*lasttarg
;
1076 struct xfs_error_cfg
*cfg
;
1079 * The failed buffer might not have a buf_log_item attached or the
1080 * log_item list might be empty. Get the mp from the available
1083 lip
= list_first_entry_or_null(&bp
->b_li_list
, struct xfs_log_item
,
1085 mp
= lip
? lip
->li_mountp
: bip
->bli_item
.li_mountp
;
1088 * If we've already decided to shutdown the filesystem because of
1089 * I/O errors, there's no point in giving this a retry.
1091 if (XFS_FORCED_SHUTDOWN(mp
))
1094 if (bp
->b_target
!= lasttarg
||
1095 time_after(jiffies
, (lasttime
+ 5*HZ
))) {
1097 xfs_buf_ioerror_alert(bp
, __func__
);
1099 lasttarg
= bp
->b_target
;
1101 /* synchronous writes will have callers process the error */
1102 if (!(bp
->b_flags
& XBF_ASYNC
))
1105 trace_xfs_buf_item_iodone_async(bp
, _RET_IP_
);
1106 ASSERT(bp
->b_iodone
!= NULL
);
1108 cfg
= xfs_error_get_cfg(mp
, XFS_ERR_METADATA
, bp
->b_error
);
1111 * If the write was asynchronous then no one will be looking for the
1112 * error. If this is the first failure of this type, clear the error
1113 * state and write the buffer out again. This means we always retry an
1114 * async write failure at least once, but we also need to set the buffer
1115 * up to behave correctly now for repeated failures.
1117 if (!(bp
->b_flags
& (XBF_STALE
| XBF_WRITE_FAIL
)) ||
1118 bp
->b_last_error
!= bp
->b_error
) {
1119 bp
->b_flags
|= (XBF_WRITE
| XBF_DONE
| XBF_WRITE_FAIL
);
1120 bp
->b_last_error
= bp
->b_error
;
1121 if (cfg
->retry_timeout
!= XFS_ERR_RETRY_FOREVER
&&
1122 !bp
->b_first_retry_time
)
1123 bp
->b_first_retry_time
= jiffies
;
1125 xfs_buf_ioerror(bp
, 0);
1131 * Repeated failure on an async write. Take action according to the
1132 * error configuration we have been set up to use.
1135 if (cfg
->max_retries
!= XFS_ERR_RETRY_FOREVER
&&
1136 ++bp
->b_retries
> cfg
->max_retries
)
1137 goto permanent_error
;
1138 if (cfg
->retry_timeout
!= XFS_ERR_RETRY_FOREVER
&&
1139 time_after(jiffies
, cfg
->retry_timeout
+ bp
->b_first_retry_time
))
1140 goto permanent_error
;
1142 /* At unmount we may treat errors differently */
1143 if ((mp
->m_flags
& XFS_MOUNT_UNMOUNTING
) && mp
->m_fail_unmount
)
1144 goto permanent_error
;
1147 * Still a transient error, run IO completion failure callbacks and let
1148 * the higher layers retry the buffer.
1150 xfs_buf_do_callbacks_fail(bp
);
1151 xfs_buf_ioerror(bp
, 0);
1156 * Permanent error - we need to trigger a shutdown if we haven't already
1157 * to indicate that inconsistency will result from this action.
1160 xfs_force_shutdown(mp
, SHUTDOWN_META_IO_ERROR
);
1163 bp
->b_flags
|= XBF_DONE
;
1164 trace_xfs_buf_error_relse(bp
, _RET_IP_
);
1169 * This is the iodone() function for buffers which have had callbacks attached
1170 * to them by xfs_buf_attach_iodone(). We need to iterate the items on the
1171 * callback list, mark the buffer as having no more callbacks and then push the
1172 * buffer through IO completion processing.
1175 xfs_buf_iodone_callbacks(
1179 * If there is an error, process it. Some errors require us
1180 * to run callbacks after failure processing is done so we
1181 * detect that and take appropriate action.
1183 if (bp
->b_error
&& xfs_buf_iodone_callback_error(bp
))
1187 * Successful IO or permanent error. Either way, we can clear the
1188 * retry state here in preparation for the next error that may occur.
1190 bp
->b_last_error
= 0;
1192 bp
->b_first_retry_time
= 0;
1194 xfs_buf_do_callbacks(bp
);
1195 bp
->b_log_item
= NULL
;
1196 list_del_init(&bp
->b_li_list
);
1197 bp
->b_iodone
= NULL
;
1202 * This is the iodone() function for buffers which have been
1203 * logged. It is called when they are eventually flushed out.
1204 * It should remove the buf item from the AIL, and free the buf item.
1205 * It is called by xfs_buf_iodone_callbacks() above which will take
1206 * care of cleaning up the buffer itself.
1211 struct xfs_log_item
*lip
)
1213 struct xfs_ail
*ailp
= lip
->li_ailp
;
1215 ASSERT(BUF_ITEM(lip
)->bli_buf
== bp
);
1220 * If we are forcibly shutting down, this may well be
1221 * off the AIL already. That's because we simulate the
1222 * log-committed callbacks to unpin these buffers. Or we may never
1223 * have put this item on AIL because of the transaction was
1224 * aborted forcibly. xfs_trans_ail_delete() takes care of these.
1226 * Either way, AIL is useless if we're forcing a shutdown.
1228 spin_lock(&ailp
->ail_lock
);
1229 xfs_trans_ail_delete(ailp
, lip
, SHUTDOWN_CORRUPT_INCORE
);
1230 xfs_buf_item_free(BUF_ITEM(lip
));
1234 * Requeue a failed buffer for writeback.
1236 * We clear the log item failed state here as well, but we have to be careful
1237 * about reference counts because the only active reference counts on the buffer
1238 * may be the failed log items. Hence if we clear the log item failed state
1239 * before queuing the buffer for IO we can release all active references to
1240 * the buffer and free it, leading to use after free problems in
1241 * xfs_buf_delwri_queue. It makes no difference to the buffer or log items which
1242 * order we process them in - the buffer is locked, and we own the buffer list
1243 * so nothing on them is going to change while we are performing this action.
1245 * Hence we can safely queue the buffer for IO before we clear the failed log
1246 * item state, therefore always having an active reference to the buffer and
1247 * avoiding the transient zero-reference state that leads to use-after-free.
1249 * Return true if the buffer was added to the buffer list, false if it was
1250 * already on the buffer list.
1253 xfs_buf_resubmit_failed_buffers(
1255 struct list_head
*buffer_list
)
1257 struct xfs_log_item
*lip
;
1260 ret
= xfs_buf_delwri_queue(bp
, buffer_list
);
1263 * XFS_LI_FAILED set/clear is protected by ail_lock, caller of this
1264 * function already have it acquired
1266 list_for_each_entry(lip
, &bp
->b_li_list
, li_bio_list
)
1267 xfs_clear_li_failed(lip
);