1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
13 #include "xfs_mount.h"
14 #include "xfs_trans.h"
15 #include "xfs_buf_item.h"
16 #include "xfs_trans_priv.h"
17 #include "xfs_trace.h"
21 kmem_zone_t
*xfs_buf_item_zone
;
23 static inline struct xfs_buf_log_item
*BUF_ITEM(struct xfs_log_item
*lip
)
25 return container_of(lip
, struct xfs_buf_log_item
, bli_item
);
28 STATIC
void xfs_buf_do_callbacks(struct xfs_buf
*bp
);
30 /* Is this log iovec plausibly large enough to contain the buffer log format? */
32 xfs_buf_log_check_iovec(
33 struct xfs_log_iovec
*iovec
)
35 struct xfs_buf_log_format
*blfp
= iovec
->i_addr
;
39 if (offsetof(struct xfs_buf_log_format
, blf_data_map
) > iovec
->i_len
)
42 item_end
= (char *)iovec
->i_addr
+ iovec
->i_len
;
43 bmp_end
= (char *)&blfp
->blf_data_map
[blfp
->blf_map_size
];
44 return bmp_end
<= item_end
;
48 xfs_buf_log_format_size(
49 struct xfs_buf_log_format
*blfp
)
51 return offsetof(struct xfs_buf_log_format
, blf_data_map
) +
52 (blfp
->blf_map_size
* sizeof(blfp
->blf_data_map
[0]));
56 * This returns the number of log iovecs needed to log the
59 * It calculates this as 1 iovec for the buf log format structure
60 * and 1 for each stretch of non-contiguous chunks to be logged.
61 * Contiguous chunks are logged in a single iovec.
63 * If the XFS_BLI_STALE flag has been set, then log nothing.
66 xfs_buf_item_size_segment(
67 struct xfs_buf_log_item
*bip
,
68 struct xfs_buf_log_format
*blfp
,
72 struct xfs_buf
*bp
= bip
->bli_buf
;
76 last_bit
= xfs_next_bit(blfp
->blf_data_map
, blfp
->blf_map_size
, 0);
81 * initial count for a dirty buffer is 2 vectors - the format structure
82 * and the first dirty region.
85 *nbytes
+= xfs_buf_log_format_size(blfp
) + XFS_BLF_CHUNK
;
87 while (last_bit
!= -1) {
89 * This takes the bit number to start looking from and
90 * returns the next set bit from there. It returns -1
91 * if there are no more bits set or the start bit is
92 * beyond the end of the bitmap.
94 next_bit
= xfs_next_bit(blfp
->blf_data_map
, blfp
->blf_map_size
,
97 * If we run out of bits, leave the loop,
98 * else if we find a new set of bits bump the number of vecs,
99 * else keep scanning the current set of bits.
101 if (next_bit
== -1) {
103 } else if (next_bit
!= last_bit
+ 1) {
106 } else if (xfs_buf_offset(bp
, next_bit
* XFS_BLF_CHUNK
) !=
107 (xfs_buf_offset(bp
, last_bit
* XFS_BLF_CHUNK
) +
114 *nbytes
+= XFS_BLF_CHUNK
;
119 * This returns the number of log iovecs needed to log the given buf log item.
121 * It calculates this as 1 iovec for the buf log format structure and 1 for each
122 * stretch of non-contiguous chunks to be logged. Contiguous chunks are logged
125 * Discontiguous buffers need a format structure per region that that is being
126 * logged. This makes the changes in the buffer appear to log recovery as though
127 * they came from separate buffers, just like would occur if multiple buffers
128 * were used instead of a single discontiguous buffer. This enables
129 * discontiguous buffers to be in-memory constructs, completely transparent to
130 * what ends up on disk.
132 * If the XFS_BLI_STALE flag has been set, then log nothing but the buf log
137 struct xfs_log_item
*lip
,
141 struct xfs_buf_log_item
*bip
= BUF_ITEM(lip
);
144 ASSERT(atomic_read(&bip
->bli_refcount
) > 0);
145 if (bip
->bli_flags
& XFS_BLI_STALE
) {
147 * The buffer is stale, so all we need to log
148 * is the buf log format structure with the
151 trace_xfs_buf_item_size_stale(bip
);
152 ASSERT(bip
->__bli_format
.blf_flags
& XFS_BLF_CANCEL
);
153 *nvecs
+= bip
->bli_format_count
;
154 for (i
= 0; i
< bip
->bli_format_count
; i
++) {
155 *nbytes
+= xfs_buf_log_format_size(&bip
->bli_formats
[i
]);
160 ASSERT(bip
->bli_flags
& XFS_BLI_LOGGED
);
162 if (bip
->bli_flags
& XFS_BLI_ORDERED
) {
164 * The buffer has been logged just to order it.
165 * It is not being included in the transaction
166 * commit, so no vectors are used at all.
168 trace_xfs_buf_item_size_ordered(bip
);
169 *nvecs
= XFS_LOG_VEC_ORDERED
;
174 * the vector count is based on the number of buffer vectors we have
175 * dirty bits in. This will only be greater than one when we have a
176 * compound buffer with more than one segment dirty. Hence for compound
177 * buffers we need to track which segment the dirty bits correspond to,
178 * and when we move from one segment to the next increment the vector
179 * count for the extra buf log format structure that will need to be
182 for (i
= 0; i
< bip
->bli_format_count
; i
++) {
183 xfs_buf_item_size_segment(bip
, &bip
->bli_formats
[i
],
186 trace_xfs_buf_item_size(bip
);
190 xfs_buf_item_copy_iovec(
191 struct xfs_log_vec
*lv
,
192 struct xfs_log_iovec
**vecp
,
198 offset
+= first_bit
* XFS_BLF_CHUNK
;
199 xlog_copy_iovec(lv
, vecp
, XLOG_REG_TYPE_BCHUNK
,
200 xfs_buf_offset(bp
, offset
),
201 nbits
* XFS_BLF_CHUNK
);
205 xfs_buf_item_straddle(
211 return xfs_buf_offset(bp
, offset
+ (next_bit
<< XFS_BLF_SHIFT
)) !=
212 (xfs_buf_offset(bp
, offset
+ (last_bit
<< XFS_BLF_SHIFT
)) +
217 xfs_buf_item_format_segment(
218 struct xfs_buf_log_item
*bip
,
219 struct xfs_log_vec
*lv
,
220 struct xfs_log_iovec
**vecp
,
222 struct xfs_buf_log_format
*blfp
)
224 struct xfs_buf
*bp
= bip
->bli_buf
;
231 /* copy the flags across from the base format item */
232 blfp
->blf_flags
= bip
->__bli_format
.blf_flags
;
235 * Base size is the actual size of the ondisk structure - it reflects
236 * the actual size of the dirty bitmap rather than the size of the in
239 base_size
= xfs_buf_log_format_size(blfp
);
241 first_bit
= xfs_next_bit(blfp
->blf_data_map
, blfp
->blf_map_size
, 0);
242 if (!(bip
->bli_flags
& XFS_BLI_STALE
) && first_bit
== -1) {
244 * If the map is not be dirty in the transaction, mark
245 * the size as zero and do not advance the vector pointer.
250 blfp
= xlog_copy_iovec(lv
, vecp
, XLOG_REG_TYPE_BFORMAT
, blfp
, base_size
);
253 if (bip
->bli_flags
& XFS_BLI_STALE
) {
255 * The buffer is stale, so all we need to log
256 * is the buf log format structure with the
259 trace_xfs_buf_item_format_stale(bip
);
260 ASSERT(blfp
->blf_flags
& XFS_BLF_CANCEL
);
266 * Fill in an iovec for each set of contiguous chunks.
268 last_bit
= first_bit
;
272 * This takes the bit number to start looking from and
273 * returns the next set bit from there. It returns -1
274 * if there are no more bits set or the start bit is
275 * beyond the end of the bitmap.
277 next_bit
= xfs_next_bit(blfp
->blf_data_map
, blfp
->blf_map_size
,
280 * If we run out of bits fill in the last iovec and get out of
281 * the loop. Else if we start a new set of bits then fill in
282 * the iovec for the series we were looking at and start
283 * counting the bits in the new one. Else we're still in the
284 * same set of bits so just keep counting and scanning.
286 if (next_bit
== -1) {
287 xfs_buf_item_copy_iovec(lv
, vecp
, bp
, offset
,
291 } else if (next_bit
!= last_bit
+ 1 ||
292 xfs_buf_item_straddle(bp
, offset
, next_bit
, last_bit
)) {
293 xfs_buf_item_copy_iovec(lv
, vecp
, bp
, offset
,
296 first_bit
= next_bit
;
307 * This is called to fill in the vector of log iovecs for the
308 * given log buf item. It fills the first entry with a buf log
309 * format structure, and the rest point to contiguous chunks
314 struct xfs_log_item
*lip
,
315 struct xfs_log_vec
*lv
)
317 struct xfs_buf_log_item
*bip
= BUF_ITEM(lip
);
318 struct xfs_buf
*bp
= bip
->bli_buf
;
319 struct xfs_log_iovec
*vecp
= NULL
;
323 ASSERT(atomic_read(&bip
->bli_refcount
) > 0);
324 ASSERT((bip
->bli_flags
& XFS_BLI_LOGGED
) ||
325 (bip
->bli_flags
& XFS_BLI_STALE
));
326 ASSERT((bip
->bli_flags
& XFS_BLI_STALE
) ||
327 (xfs_blft_from_flags(&bip
->__bli_format
) > XFS_BLFT_UNKNOWN_BUF
328 && xfs_blft_from_flags(&bip
->__bli_format
) < XFS_BLFT_MAX_BUF
));
329 ASSERT(!(bip
->bli_flags
& XFS_BLI_ORDERED
) ||
330 (bip
->bli_flags
& XFS_BLI_STALE
));
334 * If it is an inode buffer, transfer the in-memory state to the
335 * format flags and clear the in-memory state.
337 * For buffer based inode allocation, we do not transfer
338 * this state if the inode buffer allocation has not yet been committed
339 * to the log as setting the XFS_BLI_INODE_BUF flag will prevent
340 * correct replay of the inode allocation.
342 * For icreate item based inode allocation, the buffers aren't written
343 * to the journal during allocation, and hence we should always tag the
344 * buffer as an inode buffer so that the correct unlinked list replay
345 * occurs during recovery.
347 if (bip
->bli_flags
& XFS_BLI_INODE_BUF
) {
348 if (xfs_sb_version_hascrc(&lip
->li_mountp
->m_sb
) ||
349 !((bip
->bli_flags
& XFS_BLI_INODE_ALLOC_BUF
) &&
350 xfs_log_item_in_current_chkpt(lip
)))
351 bip
->__bli_format
.blf_flags
|= XFS_BLF_INODE_BUF
;
352 bip
->bli_flags
&= ~XFS_BLI_INODE_BUF
;
355 for (i
= 0; i
< bip
->bli_format_count
; i
++) {
356 xfs_buf_item_format_segment(bip
, lv
, &vecp
, offset
,
357 &bip
->bli_formats
[i
]);
358 offset
+= BBTOB(bp
->b_maps
[i
].bm_len
);
362 * Check to make sure everything is consistent.
364 trace_xfs_buf_item_format(bip
);
368 * This is called to pin the buffer associated with the buf log item in memory
369 * so it cannot be written out.
371 * We also always take a reference to the buffer log item here so that the bli
372 * is held while the item is pinned in memory. This means that we can
373 * unconditionally drop the reference count a transaction holds when the
374 * transaction is completed.
378 struct xfs_log_item
*lip
)
380 struct xfs_buf_log_item
*bip
= BUF_ITEM(lip
);
382 ASSERT(atomic_read(&bip
->bli_refcount
) > 0);
383 ASSERT((bip
->bli_flags
& XFS_BLI_LOGGED
) ||
384 (bip
->bli_flags
& XFS_BLI_ORDERED
) ||
385 (bip
->bli_flags
& XFS_BLI_STALE
));
387 trace_xfs_buf_item_pin(bip
);
389 atomic_inc(&bip
->bli_refcount
);
390 atomic_inc(&bip
->bli_buf
->b_pin_count
);
394 * This is called to unpin the buffer associated with the buf log
395 * item which was previously pinned with a call to xfs_buf_item_pin().
397 * Also drop the reference to the buf item for the current transaction.
398 * If the XFS_BLI_STALE flag is set and we are the last reference,
399 * then free up the buf log item and unlock the buffer.
401 * If the remove flag is set we are called from uncommit in the
402 * forced-shutdown path. If that is true and the reference count on
403 * the log item is going to drop to zero we need to free the item's
404 * descriptor in the transaction.
408 struct xfs_log_item
*lip
,
411 struct xfs_buf_log_item
*bip
= BUF_ITEM(lip
);
412 xfs_buf_t
*bp
= bip
->bli_buf
;
413 struct xfs_ail
*ailp
= lip
->li_ailp
;
414 int stale
= bip
->bli_flags
& XFS_BLI_STALE
;
417 ASSERT(bp
->b_log_item
== bip
);
418 ASSERT(atomic_read(&bip
->bli_refcount
) > 0);
420 trace_xfs_buf_item_unpin(bip
);
422 freed
= atomic_dec_and_test(&bip
->bli_refcount
);
424 if (atomic_dec_and_test(&bp
->b_pin_count
))
425 wake_up_all(&bp
->b_waiters
);
427 if (freed
&& stale
) {
428 ASSERT(bip
->bli_flags
& XFS_BLI_STALE
);
429 ASSERT(xfs_buf_islocked(bp
));
430 ASSERT(bp
->b_flags
& XBF_STALE
);
431 ASSERT(bip
->__bli_format
.blf_flags
& XFS_BLF_CANCEL
);
433 trace_xfs_buf_item_unpin_stale(bip
);
437 * If we are in a transaction context, we have to
438 * remove the log item from the transaction as we are
439 * about to release our reference to the buffer. If we
440 * don't, the unlock that occurs later in
441 * xfs_trans_uncommit() will try to reference the
442 * buffer which we no longer have a hold on.
444 if (!list_empty(&lip
->li_trans
))
445 xfs_trans_del_item(lip
);
448 * Since the transaction no longer refers to the buffer,
449 * the buffer should no longer refer to the transaction.
455 * If we get called here because of an IO error, we may
456 * or may not have the item on the AIL. xfs_trans_ail_delete()
457 * will take care of that situation.
458 * xfs_trans_ail_delete() drops the AIL lock.
460 if (bip
->bli_flags
& XFS_BLI_STALE_INODE
) {
461 xfs_buf_do_callbacks(bp
);
462 bp
->b_log_item
= NULL
;
463 list_del_init(&bp
->b_li_list
);
466 spin_lock(&ailp
->ail_lock
);
467 xfs_trans_ail_delete(ailp
, lip
, SHUTDOWN_LOG_IO_ERROR
);
468 xfs_buf_item_relse(bp
);
469 ASSERT(bp
->b_log_item
== NULL
);
472 } else if (freed
&& remove
) {
474 * There are currently two references to the buffer - the active
475 * LRU reference and the buf log item. What we are about to do
476 * here - simulate a failed IO completion - requires 3
479 * The LRU reference is removed by the xfs_buf_stale() call. The
480 * buf item reference is removed by the xfs_buf_iodone()
481 * callback that is run by xfs_buf_do_callbacks() during ioend
482 * processing (via the bp->b_iodone callback), and then finally
483 * the ioend processing will drop the IO reference if the buffer
484 * is marked XBF_ASYNC.
486 * Hence we need to take an additional reference here so that IO
487 * completion processing doesn't free the buffer prematurely.
491 bp
->b_flags
|= XBF_ASYNC
;
492 xfs_buf_ioerror(bp
, -EIO
);
493 bp
->b_flags
&= ~XBF_DONE
;
500 * Buffer IO error rate limiting. Limit it to no more than 10 messages per 30
501 * seconds so as to not spam logs too much on repeated detection of the same
505 static DEFINE_RATELIMIT_STATE(xfs_buf_write_fail_rl_state
, 30 * HZ
, 10);
509 struct xfs_log_item
*lip
,
510 struct list_head
*buffer_list
)
512 struct xfs_buf_log_item
*bip
= BUF_ITEM(lip
);
513 struct xfs_buf
*bp
= bip
->bli_buf
;
514 uint rval
= XFS_ITEM_SUCCESS
;
516 if (xfs_buf_ispinned(bp
))
517 return XFS_ITEM_PINNED
;
518 if (!xfs_buf_trylock(bp
)) {
520 * If we have just raced with a buffer being pinned and it has
521 * been marked stale, we could end up stalling until someone else
522 * issues a log force to unpin the stale buffer. Check for the
523 * race condition here so xfsaild recognizes the buffer is pinned
524 * and queues a log force to move it along.
526 if (xfs_buf_ispinned(bp
))
527 return XFS_ITEM_PINNED
;
528 return XFS_ITEM_LOCKED
;
531 ASSERT(!(bip
->bli_flags
& XFS_BLI_STALE
));
533 trace_xfs_buf_item_push(bip
);
535 /* has a previous flush failed due to IO errors? */
536 if ((bp
->b_flags
& XBF_WRITE_FAIL
) &&
537 ___ratelimit(&xfs_buf_write_fail_rl_state
, "XFS: Failing async write")) {
538 xfs_warn(bp
->b_mount
,
539 "Failing async write on buffer block 0x%llx. Retrying async write.",
540 (long long)bp
->b_bn
);
543 if (!xfs_buf_delwri_queue(bp
, buffer_list
))
544 rval
= XFS_ITEM_FLUSHING
;
550 * Drop the buffer log item refcount and take appropriate action. This helper
551 * determines whether the bli must be freed or not, since a decrement to zero
552 * does not necessarily mean the bli is unused.
554 * Return true if the bli is freed, false otherwise.
558 struct xfs_buf_log_item
*bip
)
560 struct xfs_log_item
*lip
= &bip
->bli_item
;
564 /* drop the bli ref and return if it wasn't the last one */
565 if (!atomic_dec_and_test(&bip
->bli_refcount
))
569 * We dropped the last ref and must free the item if clean or aborted.
570 * If the bli is dirty and non-aborted, the buffer was clean in the
571 * transaction but still awaiting writeback from previous changes. In
572 * that case, the bli is freed on buffer writeback completion.
574 aborted
= test_bit(XFS_LI_ABORTED
, &lip
->li_flags
) ||
575 XFS_FORCED_SHUTDOWN(lip
->li_mountp
);
576 dirty
= bip
->bli_flags
& XFS_BLI_DIRTY
;
577 if (dirty
&& !aborted
)
581 * The bli is aborted or clean. An aborted item may be in the AIL
582 * regardless of dirty state. For example, consider an aborted
583 * transaction that invalidated a dirty bli and cleared the dirty
587 xfs_trans_ail_remove(lip
, SHUTDOWN_LOG_IO_ERROR
);
588 xfs_buf_item_relse(bip
->bli_buf
);
593 * Release the buffer associated with the buf log item. If there is no dirty
594 * logged data associated with the buffer recorded in the buf log item, then
595 * free the buf log item and remove the reference to it in the buffer.
597 * This call ignores the recursion count. It is only called when the buffer
598 * should REALLY be unlocked, regardless of the recursion count.
600 * We unconditionally drop the transaction's reference to the log item. If the
601 * item was logged, then another reference was taken when it was pinned, so we
602 * can safely drop the transaction reference now. This also allows us to avoid
603 * potential races with the unpin code freeing the bli by not referencing the
604 * bli after we've dropped the reference count.
606 * If the XFS_BLI_HOLD flag is set in the buf log item, then free the log item
607 * if necessary but do not unlock the buffer. This is for support of
608 * xfs_trans_bhold(). Make sure the XFS_BLI_HOLD field is cleared if we don't
612 xfs_buf_item_release(
613 struct xfs_log_item
*lip
)
615 struct xfs_buf_log_item
*bip
= BUF_ITEM(lip
);
616 struct xfs_buf
*bp
= bip
->bli_buf
;
618 bool hold
= bip
->bli_flags
& XFS_BLI_HOLD
;
619 bool stale
= bip
->bli_flags
& XFS_BLI_STALE
;
620 #if defined(DEBUG) || defined(XFS_WARN)
621 bool ordered
= bip
->bli_flags
& XFS_BLI_ORDERED
;
622 bool dirty
= bip
->bli_flags
& XFS_BLI_DIRTY
;
623 bool aborted
= test_bit(XFS_LI_ABORTED
,
627 trace_xfs_buf_item_release(bip
);
630 * The bli dirty state should match whether the blf has logged segments
631 * except for ordered buffers, where only the bli should be dirty.
633 ASSERT((!ordered
&& dirty
== xfs_buf_item_dirty_format(bip
)) ||
634 (ordered
&& dirty
&& !xfs_buf_item_dirty_format(bip
)));
635 ASSERT(!stale
|| (bip
->__bli_format
.blf_flags
& XFS_BLF_CANCEL
));
638 * Clear the buffer's association with this transaction and
639 * per-transaction state from the bli, which has been copied above.
642 bip
->bli_flags
&= ~(XFS_BLI_LOGGED
| XFS_BLI_HOLD
| XFS_BLI_ORDERED
);
645 * Unref the item and unlock the buffer unless held or stale. Stale
646 * buffers remain locked until final unpin unless the bli is freed by
647 * the unref call. The latter implies shutdown because buffer
648 * invalidation dirties the bli and transaction.
650 released
= xfs_buf_item_put(bip
);
651 if (hold
|| (stale
&& !released
))
653 ASSERT(!stale
|| aborted
);
658 xfs_buf_item_committing(
659 struct xfs_log_item
*lip
,
660 xfs_lsn_t commit_lsn
)
662 return xfs_buf_item_release(lip
);
666 * This is called to find out where the oldest active copy of the
667 * buf log item in the on disk log resides now that the last log
668 * write of it completed at the given lsn.
669 * We always re-log all the dirty data in a buffer, so usually the
670 * latest copy in the on disk log is the only one that matters. For
671 * those cases we simply return the given lsn.
673 * The one exception to this is for buffers full of newly allocated
674 * inodes. These buffers are only relogged with the XFS_BLI_INODE_BUF
675 * flag set, indicating that only the di_next_unlinked fields from the
676 * inodes in the buffers will be replayed during recovery. If the
677 * original newly allocated inode images have not yet been flushed
678 * when the buffer is so relogged, then we need to make sure that we
679 * keep the old images in the 'active' portion of the log. We do this
680 * by returning the original lsn of that transaction here rather than
684 xfs_buf_item_committed(
685 struct xfs_log_item
*lip
,
688 struct xfs_buf_log_item
*bip
= BUF_ITEM(lip
);
690 trace_xfs_buf_item_committed(bip
);
692 if ((bip
->bli_flags
& XFS_BLI_INODE_ALLOC_BUF
) && lip
->li_lsn
!= 0)
697 static const struct xfs_item_ops xfs_buf_item_ops
= {
698 .iop_size
= xfs_buf_item_size
,
699 .iop_format
= xfs_buf_item_format
,
700 .iop_pin
= xfs_buf_item_pin
,
701 .iop_unpin
= xfs_buf_item_unpin
,
702 .iop_release
= xfs_buf_item_release
,
703 .iop_committing
= xfs_buf_item_committing
,
704 .iop_committed
= xfs_buf_item_committed
,
705 .iop_push
= xfs_buf_item_push
,
709 xfs_buf_item_get_format(
710 struct xfs_buf_log_item
*bip
,
713 ASSERT(bip
->bli_formats
== NULL
);
714 bip
->bli_format_count
= count
;
717 bip
->bli_formats
= &bip
->__bli_format
;
721 bip
->bli_formats
= kmem_zalloc(count
* sizeof(struct xfs_buf_log_format
),
726 xfs_buf_item_free_format(
727 struct xfs_buf_log_item
*bip
)
729 if (bip
->bli_formats
!= &bip
->__bli_format
) {
730 kmem_free(bip
->bli_formats
);
731 bip
->bli_formats
= NULL
;
736 * Allocate a new buf log item to go with the given buffer.
737 * Set the buffer's b_log_item field to point to the new
743 struct xfs_mount
*mp
)
745 struct xfs_buf_log_item
*bip
= bp
->b_log_item
;
751 * Check to see if there is already a buf log item for
752 * this buffer. If we do already have one, there is
753 * nothing to do here so return.
755 ASSERT(bp
->b_mount
== mp
);
757 ASSERT(bip
->bli_item
.li_type
== XFS_LI_BUF
);
758 ASSERT(!bp
->b_transp
);
759 ASSERT(bip
->bli_buf
== bp
);
763 bip
= kmem_zone_zalloc(xfs_buf_item_zone
, 0);
764 xfs_log_item_init(mp
, &bip
->bli_item
, XFS_LI_BUF
, &xfs_buf_item_ops
);
768 * chunks is the number of XFS_BLF_CHUNK size pieces the buffer
769 * can be divided into. Make sure not to truncate any pieces.
770 * map_size is the size of the bitmap needed to describe the
771 * chunks of the buffer.
773 * Discontiguous buffer support follows the layout of the underlying
774 * buffer. This makes the implementation as simple as possible.
776 xfs_buf_item_get_format(bip
, bp
->b_map_count
);
778 for (i
= 0; i
< bip
->bli_format_count
; i
++) {
779 chunks
= DIV_ROUND_UP(BBTOB(bp
->b_maps
[i
].bm_len
),
781 map_size
= DIV_ROUND_UP(chunks
, NBWORD
);
783 if (map_size
> XFS_BLF_DATAMAP_SIZE
) {
784 kmem_cache_free(xfs_buf_item_zone
, bip
);
786 "buffer item dirty bitmap (%u uints) too small to reflect %u bytes!",
788 BBTOB(bp
->b_maps
[i
].bm_len
));
789 return -EFSCORRUPTED
;
792 bip
->bli_formats
[i
].blf_type
= XFS_LI_BUF
;
793 bip
->bli_formats
[i
].blf_blkno
= bp
->b_maps
[i
].bm_bn
;
794 bip
->bli_formats
[i
].blf_len
= bp
->b_maps
[i
].bm_len
;
795 bip
->bli_formats
[i
].blf_map_size
= map_size
;
798 bp
->b_log_item
= bip
;
805 * Mark bytes first through last inclusive as dirty in the buf
809 xfs_buf_item_log_segment(
824 ASSERT(first
< XFS_BLF_DATAMAP_SIZE
* XFS_BLF_CHUNK
* NBWORD
);
825 ASSERT(last
< XFS_BLF_DATAMAP_SIZE
* XFS_BLF_CHUNK
* NBWORD
);
828 * Convert byte offsets to bit numbers.
830 first_bit
= first
>> XFS_BLF_SHIFT
;
831 last_bit
= last
>> XFS_BLF_SHIFT
;
834 * Calculate the total number of bits to be set.
836 bits_to_set
= last_bit
- first_bit
+ 1;
839 * Get a pointer to the first word in the bitmap
842 word_num
= first_bit
>> BIT_TO_WORD_SHIFT
;
843 wordp
= &map
[word_num
];
846 * Calculate the starting bit in the first word.
848 bit
= first_bit
& (uint
)(NBWORD
- 1);
851 * First set any bits in the first word of our range.
852 * If it starts at bit 0 of the word, it will be
853 * set below rather than here. That is what the variable
854 * bit tells us. The variable bits_set tracks the number
855 * of bits that have been set so far. End_bit is the number
856 * of the last bit to be set in this word plus one.
859 end_bit
= min(bit
+ bits_to_set
, (uint
)NBWORD
);
860 mask
= ((1U << (end_bit
- bit
)) - 1) << bit
;
863 bits_set
= end_bit
- bit
;
869 * Now set bits a whole word at a time that are between
870 * first_bit and last_bit.
872 while ((bits_to_set
- bits_set
) >= NBWORD
) {
879 * Finally, set any bits left to be set in one last partial word.
881 end_bit
= bits_to_set
- bits_set
;
883 mask
= (1U << end_bit
) - 1;
889 * Mark bytes first through last inclusive as dirty in the buf
894 struct xfs_buf_log_item
*bip
,
901 struct xfs_buf
*bp
= bip
->bli_buf
;
904 * walk each buffer segment and mark them dirty appropriately.
907 for (i
= 0; i
< bip
->bli_format_count
; i
++) {
910 end
= start
+ BBTOB(bp
->b_maps
[i
].bm_len
) - 1;
912 /* skip to the map that includes the first byte to log */
914 start
+= BBTOB(bp
->b_maps
[i
].bm_len
);
919 * Trim the range to this segment and mark it in the bitmap.
920 * Note that we must convert buffer offsets to segment relative
921 * offsets (e.g., the first byte of each segment is byte 0 of
928 xfs_buf_item_log_segment(first
- start
, end
- start
,
929 &bip
->bli_formats
[i
].blf_data_map
[0]);
931 start
+= BBTOB(bp
->b_maps
[i
].bm_len
);
937 * Return true if the buffer has any ranges logged/dirtied by a transaction,
941 xfs_buf_item_dirty_format(
942 struct xfs_buf_log_item
*bip
)
946 for (i
= 0; i
< bip
->bli_format_count
; i
++) {
947 if (!xfs_bitmap_empty(bip
->bli_formats
[i
].blf_data_map
,
948 bip
->bli_formats
[i
].blf_map_size
))
957 struct xfs_buf_log_item
*bip
)
959 xfs_buf_item_free_format(bip
);
960 kmem_free(bip
->bli_item
.li_lv_shadow
);
961 kmem_cache_free(xfs_buf_item_zone
, bip
);
965 * This is called when the buf log item is no longer needed. It should
966 * free the buf log item associated with the given buffer and clear
967 * the buffer's pointer to the buf log item. If there are no more
968 * items in the list, clear the b_iodone field of the buffer (see
969 * xfs_buf_attach_iodone() below).
975 struct xfs_buf_log_item
*bip
= bp
->b_log_item
;
977 trace_xfs_buf_item_relse(bp
, _RET_IP_
);
978 ASSERT(!test_bit(XFS_LI_IN_AIL
, &bip
->bli_item
.li_flags
));
980 bp
->b_log_item
= NULL
;
981 if (list_empty(&bp
->b_li_list
))
985 xfs_buf_item_free(bip
);
990 * Add the given log item with its callback to the list of callbacks
991 * to be called when the buffer's I/O completes. If it is not set
992 * already, set the buffer's b_iodone() routine to be
993 * xfs_buf_iodone_callbacks() and link the log item into the list of
994 * items rooted at b_li_list.
997 xfs_buf_attach_iodone(
999 void (*cb
)(struct xfs_buf
*, struct xfs_log_item
*),
1000 struct xfs_log_item
*lip
)
1002 ASSERT(xfs_buf_islocked(bp
));
1005 list_add_tail(&lip
->li_bio_list
, &bp
->b_li_list
);
1007 ASSERT(bp
->b_iodone
== NULL
||
1008 bp
->b_iodone
== xfs_buf_iodone_callbacks
);
1009 bp
->b_iodone
= xfs_buf_iodone_callbacks
;
1013 * We can have many callbacks on a buffer. Running the callbacks individually
1014 * can cause a lot of contention on the AIL lock, so we allow for a single
1015 * callback to be able to scan the remaining items in bp->b_li_list for other
1016 * items of the same type and callback to be processed in the first call.
1018 * As a result, the loop walking the callback list below will also modify the
1019 * list. it removes the first item from the list and then runs the callback.
1020 * The loop then restarts from the new first item int the list. This allows the
1021 * callback to scan and modify the list attached to the buffer and we don't
1022 * have to care about maintaining a next item pointer.
1025 xfs_buf_do_callbacks(
1028 struct xfs_buf_log_item
*blip
= bp
->b_log_item
;
1029 struct xfs_log_item
*lip
;
1031 /* If there is a buf_log_item attached, run its callback */
1033 lip
= &blip
->bli_item
;
1034 lip
->li_cb(bp
, lip
);
1037 while (!list_empty(&bp
->b_li_list
)) {
1038 lip
= list_first_entry(&bp
->b_li_list
, struct xfs_log_item
,
1042 * Remove the item from the list, so we don't have any
1043 * confusion if the item is added to another buf.
1044 * Don't touch the log item after calling its
1045 * callback, because it could have freed itself.
1047 list_del_init(&lip
->li_bio_list
);
1048 lip
->li_cb(bp
, lip
);
1053 * Invoke the error state callback for each log item affected by the failed I/O.
1055 * If a metadata buffer write fails with a non-permanent error, the buffer is
1056 * eventually resubmitted and so the completion callbacks are not run. The error
1057 * state may need to be propagated to the log items attached to the buffer,
1058 * however, so the next AIL push of the item knows hot to handle it correctly.
1061 xfs_buf_do_callbacks_fail(
1064 struct xfs_log_item
*lip
;
1065 struct xfs_ail
*ailp
;
1068 * Buffer log item errors are handled directly by xfs_buf_item_push()
1069 * and xfs_buf_iodone_callback_error, and they have no IO error
1070 * callbacks. Check only for items in b_li_list.
1072 if (list_empty(&bp
->b_li_list
))
1075 lip
= list_first_entry(&bp
->b_li_list
, struct xfs_log_item
,
1077 ailp
= lip
->li_ailp
;
1078 spin_lock(&ailp
->ail_lock
);
1079 list_for_each_entry(lip
, &bp
->b_li_list
, li_bio_list
) {
1080 if (lip
->li_ops
->iop_error
)
1081 lip
->li_ops
->iop_error(lip
, bp
);
1083 spin_unlock(&ailp
->ail_lock
);
1087 xfs_buf_iodone_callback_error(
1090 struct xfs_buf_log_item
*bip
= bp
->b_log_item
;
1091 struct xfs_log_item
*lip
;
1092 struct xfs_mount
*mp
;
1093 static ulong lasttime
;
1094 static xfs_buftarg_t
*lasttarg
;
1095 struct xfs_error_cfg
*cfg
;
1098 * The failed buffer might not have a buf_log_item attached or the
1099 * log_item list might be empty. Get the mp from the available
1102 lip
= list_first_entry_or_null(&bp
->b_li_list
, struct xfs_log_item
,
1104 mp
= lip
? lip
->li_mountp
: bip
->bli_item
.li_mountp
;
1107 * If we've already decided to shutdown the filesystem because of
1108 * I/O errors, there's no point in giving this a retry.
1110 if (XFS_FORCED_SHUTDOWN(mp
))
1113 if (bp
->b_target
!= lasttarg
||
1114 time_after(jiffies
, (lasttime
+ 5*HZ
))) {
1116 xfs_buf_ioerror_alert(bp
, __this_address
);
1118 lasttarg
= bp
->b_target
;
1120 /* synchronous writes will have callers process the error */
1121 if (!(bp
->b_flags
& XBF_ASYNC
))
1124 trace_xfs_buf_item_iodone_async(bp
, _RET_IP_
);
1125 ASSERT(bp
->b_iodone
!= NULL
);
1127 cfg
= xfs_error_get_cfg(mp
, XFS_ERR_METADATA
, bp
->b_error
);
1130 * If the write was asynchronous then no one will be looking for the
1131 * error. If this is the first failure of this type, clear the error
1132 * state and write the buffer out again. This means we always retry an
1133 * async write failure at least once, but we also need to set the buffer
1134 * up to behave correctly now for repeated failures.
1136 if (!(bp
->b_flags
& (XBF_STALE
| XBF_WRITE_FAIL
)) ||
1137 bp
->b_last_error
!= bp
->b_error
) {
1138 bp
->b_flags
|= (XBF_WRITE
| XBF_DONE
| XBF_WRITE_FAIL
);
1139 bp
->b_last_error
= bp
->b_error
;
1140 if (cfg
->retry_timeout
!= XFS_ERR_RETRY_FOREVER
&&
1141 !bp
->b_first_retry_time
)
1142 bp
->b_first_retry_time
= jiffies
;
1144 xfs_buf_ioerror(bp
, 0);
1150 * Repeated failure on an async write. Take action according to the
1151 * error configuration we have been set up to use.
1154 if (cfg
->max_retries
!= XFS_ERR_RETRY_FOREVER
&&
1155 ++bp
->b_retries
> cfg
->max_retries
)
1156 goto permanent_error
;
1157 if (cfg
->retry_timeout
!= XFS_ERR_RETRY_FOREVER
&&
1158 time_after(jiffies
, cfg
->retry_timeout
+ bp
->b_first_retry_time
))
1159 goto permanent_error
;
1161 /* At unmount we may treat errors differently */
1162 if ((mp
->m_flags
& XFS_MOUNT_UNMOUNTING
) && mp
->m_fail_unmount
)
1163 goto permanent_error
;
1166 * Still a transient error, run IO completion failure callbacks and let
1167 * the higher layers retry the buffer.
1169 xfs_buf_do_callbacks_fail(bp
);
1170 xfs_buf_ioerror(bp
, 0);
1175 * Permanent error - we need to trigger a shutdown if we haven't already
1176 * to indicate that inconsistency will result from this action.
1179 xfs_force_shutdown(mp
, SHUTDOWN_META_IO_ERROR
);
1182 bp
->b_flags
|= XBF_DONE
;
1183 trace_xfs_buf_error_relse(bp
, _RET_IP_
);
1188 * This is the iodone() function for buffers which have had callbacks attached
1189 * to them by xfs_buf_attach_iodone(). We need to iterate the items on the
1190 * callback list, mark the buffer as having no more callbacks and then push the
1191 * buffer through IO completion processing.
1194 xfs_buf_iodone_callbacks(
1198 * If there is an error, process it. Some errors require us
1199 * to run callbacks after failure processing is done so we
1200 * detect that and take appropriate action.
1202 if (bp
->b_error
&& xfs_buf_iodone_callback_error(bp
))
1206 * Successful IO or permanent error. Either way, we can clear the
1207 * retry state here in preparation for the next error that may occur.
1209 bp
->b_last_error
= 0;
1211 bp
->b_first_retry_time
= 0;
1213 xfs_buf_do_callbacks(bp
);
1214 bp
->b_log_item
= NULL
;
1215 list_del_init(&bp
->b_li_list
);
1216 bp
->b_iodone
= NULL
;
1221 * This is the iodone() function for buffers which have been
1222 * logged. It is called when they are eventually flushed out.
1223 * It should remove the buf item from the AIL, and free the buf item.
1224 * It is called by xfs_buf_iodone_callbacks() above which will take
1225 * care of cleaning up the buffer itself.
1230 struct xfs_log_item
*lip
)
1232 struct xfs_ail
*ailp
= lip
->li_ailp
;
1234 ASSERT(BUF_ITEM(lip
)->bli_buf
== bp
);
1239 * If we are forcibly shutting down, this may well be
1240 * off the AIL already. That's because we simulate the
1241 * log-committed callbacks to unpin these buffers. Or we may never
1242 * have put this item on AIL because of the transaction was
1243 * aborted forcibly. xfs_trans_ail_delete() takes care of these.
1245 * Either way, AIL is useless if we're forcing a shutdown.
1247 spin_lock(&ailp
->ail_lock
);
1248 xfs_trans_ail_delete(ailp
, lip
, SHUTDOWN_CORRUPT_INCORE
);
1249 xfs_buf_item_free(BUF_ITEM(lip
));
1253 * Requeue a failed buffer for writeback.
1255 * We clear the log item failed state here as well, but we have to be careful
1256 * about reference counts because the only active reference counts on the buffer
1257 * may be the failed log items. Hence if we clear the log item failed state
1258 * before queuing the buffer for IO we can release all active references to
1259 * the buffer and free it, leading to use after free problems in
1260 * xfs_buf_delwri_queue. It makes no difference to the buffer or log items which
1261 * order we process them in - the buffer is locked, and we own the buffer list
1262 * so nothing on them is going to change while we are performing this action.
1264 * Hence we can safely queue the buffer for IO before we clear the failed log
1265 * item state, therefore always having an active reference to the buffer and
1266 * avoiding the transient zero-reference state that leads to use-after-free.
1268 * Return true if the buffer was added to the buffer list, false if it was
1269 * already on the buffer list.
1272 xfs_buf_resubmit_failed_buffers(
1274 struct list_head
*buffer_list
)
1276 struct xfs_log_item
*lip
;
1279 ret
= xfs_buf_delwri_queue(bp
, buffer_list
);
1282 * XFS_LI_FAILED set/clear is protected by ail_lock, caller of this
1283 * function already have it acquired
1285 list_for_each_entry(lip
, &bp
->b_li_list
, li_bio_list
)
1286 xfs_clear_li_failed(lip
);