Merge tag 'random_for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso...
[linux/fpc-iii.git] / fs / xfs / xfs_buf_item.c
blobbecf4a97efc65c95240e45c1b05b751919d51317
1 /*
2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_types.h"
21 #include "xfs_bit.h"
22 #include "xfs_log.h"
23 #include "xfs_trans.h"
24 #include "xfs_sb.h"
25 #include "xfs_ag.h"
26 #include "xfs_mount.h"
27 #include "xfs_buf_item.h"
28 #include "xfs_trans_priv.h"
29 #include "xfs_error.h"
30 #include "xfs_trace.h"
33 kmem_zone_t *xfs_buf_item_zone;
35 static inline struct xfs_buf_log_item *BUF_ITEM(struct xfs_log_item *lip)
37 return container_of(lip, struct xfs_buf_log_item, bli_item);
41 #ifdef XFS_TRANS_DEBUG
43 * This function uses an alternate strategy for tracking the bytes
44 * that the user requests to be logged. This can then be used
45 * in conjunction with the bli_orig array in the buf log item to
46 * catch bugs in our callers' code.
48 * We also double check the bits set in xfs_buf_item_log using a
49 * simple algorithm to check that every byte is accounted for.
51 STATIC void
52 xfs_buf_item_log_debug(
53 xfs_buf_log_item_t *bip,
54 uint first,
55 uint last)
57 uint x;
58 uint byte;
59 uint nbytes;
60 uint chunk_num;
61 uint word_num;
62 uint bit_num;
63 uint bit_set;
64 uint *wordp;
66 ASSERT(bip->bli_logged != NULL);
67 byte = first;
68 nbytes = last - first + 1;
69 bfset(bip->bli_logged, first, nbytes);
70 for (x = 0; x < nbytes; x++) {
71 chunk_num = byte >> XFS_BLF_SHIFT;
72 word_num = chunk_num >> BIT_TO_WORD_SHIFT;
73 bit_num = chunk_num & (NBWORD - 1);
74 wordp = &(bip->bli_format.blf_data_map[word_num]);
75 bit_set = *wordp & (1 << bit_num);
76 ASSERT(bit_set);
77 byte++;
82 * This function is called when we flush something into a buffer without
83 * logging it. This happens for things like inodes which are logged
84 * separately from the buffer.
86 void
87 xfs_buf_item_flush_log_debug(
88 xfs_buf_t *bp,
89 uint first,
90 uint last)
92 xfs_buf_log_item_t *bip = bp->b_fspriv;
93 uint nbytes;
95 if (bip == NULL || (bip->bli_item.li_type != XFS_LI_BUF))
96 return;
98 ASSERT(bip->bli_logged != NULL);
99 nbytes = last - first + 1;
100 bfset(bip->bli_logged, first, nbytes);
104 * This function is called to verify that our callers have logged
105 * all the bytes that they changed.
107 * It does this by comparing the original copy of the buffer stored in
108 * the buf log item's bli_orig array to the current copy of the buffer
109 * and ensuring that all bytes which mismatch are set in the bli_logged
110 * array of the buf log item.
112 STATIC void
113 xfs_buf_item_log_check(
114 xfs_buf_log_item_t *bip)
116 char *orig;
117 char *buffer;
118 int x;
119 xfs_buf_t *bp;
121 ASSERT(bip->bli_orig != NULL);
122 ASSERT(bip->bli_logged != NULL);
124 bp = bip->bli_buf;
125 ASSERT(bp->b_length > 0);
126 ASSERT(bp->b_addr != NULL);
127 orig = bip->bli_orig;
128 buffer = bp->b_addr;
129 for (x = 0; x < BBTOB(bp->b_length); x++) {
130 if (orig[x] != buffer[x] && !btst(bip->bli_logged, x)) {
131 xfs_emerg(bp->b_mount,
132 "%s: bip %x buffer %x orig %x index %d",
133 __func__, bip, bp, orig, x);
134 ASSERT(0);
138 #else
139 #define xfs_buf_item_log_debug(x,y,z)
140 #define xfs_buf_item_log_check(x)
141 #endif
143 STATIC void xfs_buf_do_callbacks(struct xfs_buf *bp);
146 * This returns the number of log iovecs needed to log the
147 * given buf log item.
149 * It calculates this as 1 iovec for the buf log format structure
150 * and 1 for each stretch of non-contiguous chunks to be logged.
151 * Contiguous chunks are logged in a single iovec.
153 * If the XFS_BLI_STALE flag has been set, then log nothing.
155 STATIC uint
156 xfs_buf_item_size_segment(
157 struct xfs_buf_log_item *bip,
158 struct xfs_buf_log_format *blfp)
160 struct xfs_buf *bp = bip->bli_buf;
161 uint nvecs;
162 int next_bit;
163 int last_bit;
165 last_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size, 0);
166 if (last_bit == -1)
167 return 0;
170 * initial count for a dirty buffer is 2 vectors - the format structure
171 * and the first dirty region.
173 nvecs = 2;
175 while (last_bit != -1) {
177 * This takes the bit number to start looking from and
178 * returns the next set bit from there. It returns -1
179 * if there are no more bits set or the start bit is
180 * beyond the end of the bitmap.
182 next_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size,
183 last_bit + 1);
185 * If we run out of bits, leave the loop,
186 * else if we find a new set of bits bump the number of vecs,
187 * else keep scanning the current set of bits.
189 if (next_bit == -1) {
190 break;
191 } else if (next_bit != last_bit + 1) {
192 last_bit = next_bit;
193 nvecs++;
194 } else if (xfs_buf_offset(bp, next_bit * XFS_BLF_CHUNK) !=
195 (xfs_buf_offset(bp, last_bit * XFS_BLF_CHUNK) +
196 XFS_BLF_CHUNK)) {
197 last_bit = next_bit;
198 nvecs++;
199 } else {
200 last_bit++;
204 return nvecs;
208 * This returns the number of log iovecs needed to log the given buf log item.
210 * It calculates this as 1 iovec for the buf log format structure and 1 for each
211 * stretch of non-contiguous chunks to be logged. Contiguous chunks are logged
212 * in a single iovec.
214 * Discontiguous buffers need a format structure per region that that is being
215 * logged. This makes the changes in the buffer appear to log recovery as though
216 * they came from separate buffers, just like would occur if multiple buffers
217 * were used instead of a single discontiguous buffer. This enables
218 * discontiguous buffers to be in-memory constructs, completely transparent to
219 * what ends up on disk.
221 * If the XFS_BLI_STALE flag has been set, then log nothing but the buf log
222 * format structures.
224 STATIC uint
225 xfs_buf_item_size(
226 struct xfs_log_item *lip)
228 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
229 uint nvecs;
230 int i;
232 ASSERT(atomic_read(&bip->bli_refcount) > 0);
233 if (bip->bli_flags & XFS_BLI_STALE) {
235 * The buffer is stale, so all we need to log
236 * is the buf log format structure with the
237 * cancel flag in it.
239 trace_xfs_buf_item_size_stale(bip);
240 ASSERT(bip->bli_format.blf_flags & XFS_BLF_CANCEL);
241 return bip->bli_format_count;
244 ASSERT(bip->bli_flags & XFS_BLI_LOGGED);
247 * the vector count is based on the number of buffer vectors we have
248 * dirty bits in. This will only be greater than one when we have a
249 * compound buffer with more than one segment dirty. Hence for compound
250 * buffers we need to track which segment the dirty bits correspond to,
251 * and when we move from one segment to the next increment the vector
252 * count for the extra buf log format structure that will need to be
253 * written.
255 nvecs = 0;
256 for (i = 0; i < bip->bli_format_count; i++) {
257 nvecs += xfs_buf_item_size_segment(bip, &bip->bli_formats[i]);
260 trace_xfs_buf_item_size(bip);
261 return nvecs;
264 static struct xfs_log_iovec *
265 xfs_buf_item_format_segment(
266 struct xfs_buf_log_item *bip,
267 struct xfs_log_iovec *vecp,
268 uint offset,
269 struct xfs_buf_log_format *blfp)
271 struct xfs_buf *bp = bip->bli_buf;
272 uint base_size;
273 uint nvecs;
274 int first_bit;
275 int last_bit;
276 int next_bit;
277 uint nbits;
278 uint buffer_offset;
280 /* copy the flags across from the base format item */
281 blfp->blf_flags = bip->bli_format.blf_flags;
284 * Base size is the actual size of the ondisk structure - it reflects
285 * the actual size of the dirty bitmap rather than the size of the in
286 * memory structure.
288 base_size = offsetof(struct xfs_buf_log_format, blf_data_map) +
289 (blfp->blf_map_size * sizeof(blfp->blf_data_map[0]));
290 vecp->i_addr = blfp;
291 vecp->i_len = base_size;
292 vecp->i_type = XLOG_REG_TYPE_BFORMAT;
293 vecp++;
294 nvecs = 1;
296 if (bip->bli_flags & XFS_BLI_STALE) {
298 * The buffer is stale, so all we need to log
299 * is the buf log format structure with the
300 * cancel flag in it.
302 trace_xfs_buf_item_format_stale(bip);
303 ASSERT(blfp->blf_flags & XFS_BLF_CANCEL);
304 blfp->blf_size = nvecs;
305 return vecp;
309 * Fill in an iovec for each set of contiguous chunks.
311 first_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size, 0);
312 ASSERT(first_bit != -1);
313 last_bit = first_bit;
314 nbits = 1;
315 for (;;) {
317 * This takes the bit number to start looking from and
318 * returns the next set bit from there. It returns -1
319 * if there are no more bits set or the start bit is
320 * beyond the end of the bitmap.
322 next_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size,
323 (uint)last_bit + 1);
325 * If we run out of bits fill in the last iovec and get
326 * out of the loop.
327 * Else if we start a new set of bits then fill in the
328 * iovec for the series we were looking at and start
329 * counting the bits in the new one.
330 * Else we're still in the same set of bits so just
331 * keep counting and scanning.
333 if (next_bit == -1) {
334 buffer_offset = offset + first_bit * XFS_BLF_CHUNK;
335 vecp->i_addr = xfs_buf_offset(bp, buffer_offset);
336 vecp->i_len = nbits * XFS_BLF_CHUNK;
337 vecp->i_type = XLOG_REG_TYPE_BCHUNK;
338 nvecs++;
339 break;
340 } else if (next_bit != last_bit + 1) {
341 buffer_offset = offset + first_bit * XFS_BLF_CHUNK;
342 vecp->i_addr = xfs_buf_offset(bp, buffer_offset);
343 vecp->i_len = nbits * XFS_BLF_CHUNK;
344 vecp->i_type = XLOG_REG_TYPE_BCHUNK;
345 nvecs++;
346 vecp++;
347 first_bit = next_bit;
348 last_bit = next_bit;
349 nbits = 1;
350 } else if (xfs_buf_offset(bp, offset +
351 (next_bit << XFS_BLF_SHIFT)) !=
352 (xfs_buf_offset(bp, offset +
353 (last_bit << XFS_BLF_SHIFT)) +
354 XFS_BLF_CHUNK)) {
355 buffer_offset = offset + first_bit * XFS_BLF_CHUNK;
356 vecp->i_addr = xfs_buf_offset(bp, buffer_offset);
357 vecp->i_len = nbits * XFS_BLF_CHUNK;
358 vecp->i_type = XLOG_REG_TYPE_BCHUNK;
360 * You would think we need to bump the nvecs here too, but we do not
361 * this number is used by recovery, and it gets confused by the boundary
362 * split here
363 * nvecs++;
365 vecp++;
366 first_bit = next_bit;
367 last_bit = next_bit;
368 nbits = 1;
369 } else {
370 last_bit++;
371 nbits++;
374 bip->bli_format.blf_size = nvecs;
375 return vecp;
379 * This is called to fill in the vector of log iovecs for the
380 * given log buf item. It fills the first entry with a buf log
381 * format structure, and the rest point to contiguous chunks
382 * within the buffer.
384 STATIC void
385 xfs_buf_item_format(
386 struct xfs_log_item *lip,
387 struct xfs_log_iovec *vecp)
389 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
390 struct xfs_buf *bp = bip->bli_buf;
391 uint offset = 0;
392 int i;
394 ASSERT(atomic_read(&bip->bli_refcount) > 0);
395 ASSERT((bip->bli_flags & XFS_BLI_LOGGED) ||
396 (bip->bli_flags & XFS_BLI_STALE));
399 * If it is an inode buffer, transfer the in-memory state to the
400 * format flags and clear the in-memory state. We do not transfer
401 * this state if the inode buffer allocation has not yet been committed
402 * to the log as setting the XFS_BLI_INODE_BUF flag will prevent
403 * correct replay of the inode allocation.
405 if (bip->bli_flags & XFS_BLI_INODE_BUF) {
406 if (!((bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF) &&
407 xfs_log_item_in_current_chkpt(lip)))
408 bip->bli_format.blf_flags |= XFS_BLF_INODE_BUF;
409 bip->bli_flags &= ~XFS_BLI_INODE_BUF;
412 for (i = 0; i < bip->bli_format_count; i++) {
413 vecp = xfs_buf_item_format_segment(bip, vecp, offset,
414 &bip->bli_formats[i]);
415 offset += bp->b_maps[i].bm_len;
419 * Check to make sure everything is consistent.
421 trace_xfs_buf_item_format(bip);
422 xfs_buf_item_log_check(bip);
426 * This is called to pin the buffer associated with the buf log item in memory
427 * so it cannot be written out.
429 * We also always take a reference to the buffer log item here so that the bli
430 * is held while the item is pinned in memory. This means that we can
431 * unconditionally drop the reference count a transaction holds when the
432 * transaction is completed.
434 STATIC void
435 xfs_buf_item_pin(
436 struct xfs_log_item *lip)
438 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
440 ASSERT(atomic_read(&bip->bli_refcount) > 0);
441 ASSERT((bip->bli_flags & XFS_BLI_LOGGED) ||
442 (bip->bli_flags & XFS_BLI_STALE));
444 trace_xfs_buf_item_pin(bip);
446 atomic_inc(&bip->bli_refcount);
447 atomic_inc(&bip->bli_buf->b_pin_count);
451 * This is called to unpin the buffer associated with the buf log
452 * item which was previously pinned with a call to xfs_buf_item_pin().
454 * Also drop the reference to the buf item for the current transaction.
455 * If the XFS_BLI_STALE flag is set and we are the last reference,
456 * then free up the buf log item and unlock the buffer.
458 * If the remove flag is set we are called from uncommit in the
459 * forced-shutdown path. If that is true and the reference count on
460 * the log item is going to drop to zero we need to free the item's
461 * descriptor in the transaction.
463 STATIC void
464 xfs_buf_item_unpin(
465 struct xfs_log_item *lip,
466 int remove)
468 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
469 xfs_buf_t *bp = bip->bli_buf;
470 struct xfs_ail *ailp = lip->li_ailp;
471 int stale = bip->bli_flags & XFS_BLI_STALE;
472 int freed;
474 ASSERT(bp->b_fspriv == bip);
475 ASSERT(atomic_read(&bip->bli_refcount) > 0);
477 trace_xfs_buf_item_unpin(bip);
479 freed = atomic_dec_and_test(&bip->bli_refcount);
481 if (atomic_dec_and_test(&bp->b_pin_count))
482 wake_up_all(&bp->b_waiters);
484 if (freed && stale) {
485 ASSERT(bip->bli_flags & XFS_BLI_STALE);
486 ASSERT(xfs_buf_islocked(bp));
487 ASSERT(XFS_BUF_ISSTALE(bp));
488 ASSERT(bip->bli_format.blf_flags & XFS_BLF_CANCEL);
490 trace_xfs_buf_item_unpin_stale(bip);
492 if (remove) {
494 * If we are in a transaction context, we have to
495 * remove the log item from the transaction as we are
496 * about to release our reference to the buffer. If we
497 * don't, the unlock that occurs later in
498 * xfs_trans_uncommit() will try to reference the
499 * buffer which we no longer have a hold on.
501 if (lip->li_desc)
502 xfs_trans_del_item(lip);
505 * Since the transaction no longer refers to the buffer,
506 * the buffer should no longer refer to the transaction.
508 bp->b_transp = NULL;
512 * If we get called here because of an IO error, we may
513 * or may not have the item on the AIL. xfs_trans_ail_delete()
514 * will take care of that situation.
515 * xfs_trans_ail_delete() drops the AIL lock.
517 if (bip->bli_flags & XFS_BLI_STALE_INODE) {
518 xfs_buf_do_callbacks(bp);
519 bp->b_fspriv = NULL;
520 bp->b_iodone = NULL;
521 } else {
522 spin_lock(&ailp->xa_lock);
523 xfs_trans_ail_delete(ailp, lip, SHUTDOWN_LOG_IO_ERROR);
524 xfs_buf_item_relse(bp);
525 ASSERT(bp->b_fspriv == NULL);
527 xfs_buf_relse(bp);
528 } else if (freed && remove) {
530 * There are currently two references to the buffer - the active
531 * LRU reference and the buf log item. What we are about to do
532 * here - simulate a failed IO completion - requires 3
533 * references.
535 * The LRU reference is removed by the xfs_buf_stale() call. The
536 * buf item reference is removed by the xfs_buf_iodone()
537 * callback that is run by xfs_buf_do_callbacks() during ioend
538 * processing (via the bp->b_iodone callback), and then finally
539 * the ioend processing will drop the IO reference if the buffer
540 * is marked XBF_ASYNC.
542 * Hence we need to take an additional reference here so that IO
543 * completion processing doesn't free the buffer prematurely.
545 xfs_buf_lock(bp);
546 xfs_buf_hold(bp);
547 bp->b_flags |= XBF_ASYNC;
548 xfs_buf_ioerror(bp, EIO);
549 XFS_BUF_UNDONE(bp);
550 xfs_buf_stale(bp);
551 xfs_buf_ioend(bp, 0);
555 STATIC uint
556 xfs_buf_item_push(
557 struct xfs_log_item *lip,
558 struct list_head *buffer_list)
560 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
561 struct xfs_buf *bp = bip->bli_buf;
562 uint rval = XFS_ITEM_SUCCESS;
564 if (xfs_buf_ispinned(bp))
565 return XFS_ITEM_PINNED;
566 if (!xfs_buf_trylock(bp))
567 return XFS_ITEM_LOCKED;
569 ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
571 trace_xfs_buf_item_push(bip);
573 if (!xfs_buf_delwri_queue(bp, buffer_list))
574 rval = XFS_ITEM_FLUSHING;
575 xfs_buf_unlock(bp);
576 return rval;
580 * Release the buffer associated with the buf log item. If there is no dirty
581 * logged data associated with the buffer recorded in the buf log item, then
582 * free the buf log item and remove the reference to it in the buffer.
584 * This call ignores the recursion count. It is only called when the buffer
585 * should REALLY be unlocked, regardless of the recursion count.
587 * We unconditionally drop the transaction's reference to the log item. If the
588 * item was logged, then another reference was taken when it was pinned, so we
589 * can safely drop the transaction reference now. This also allows us to avoid
590 * potential races with the unpin code freeing the bli by not referencing the
591 * bli after we've dropped the reference count.
593 * If the XFS_BLI_HOLD flag is set in the buf log item, then free the log item
594 * if necessary but do not unlock the buffer. This is for support of
595 * xfs_trans_bhold(). Make sure the XFS_BLI_HOLD field is cleared if we don't
596 * free the item.
598 STATIC void
599 xfs_buf_item_unlock(
600 struct xfs_log_item *lip)
602 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
603 struct xfs_buf *bp = bip->bli_buf;
604 int aborted;
605 uint hold;
607 /* Clear the buffer's association with this transaction. */
608 bp->b_transp = NULL;
611 * If this is a transaction abort, don't return early. Instead, allow
612 * the brelse to happen. Normally it would be done for stale
613 * (cancelled) buffers at unpin time, but we'll never go through the
614 * pin/unpin cycle if we abort inside commit.
616 aborted = (lip->li_flags & XFS_LI_ABORTED) != 0;
619 * Before possibly freeing the buf item, determine if we should
620 * release the buffer at the end of this routine.
622 hold = bip->bli_flags & XFS_BLI_HOLD;
624 /* Clear the per transaction state. */
625 bip->bli_flags &= ~(XFS_BLI_LOGGED | XFS_BLI_HOLD);
628 * If the buf item is marked stale, then don't do anything. We'll
629 * unlock the buffer and free the buf item when the buffer is unpinned
630 * for the last time.
632 if (bip->bli_flags & XFS_BLI_STALE) {
633 trace_xfs_buf_item_unlock_stale(bip);
634 ASSERT(bip->bli_format.blf_flags & XFS_BLF_CANCEL);
635 if (!aborted) {
636 atomic_dec(&bip->bli_refcount);
637 return;
641 trace_xfs_buf_item_unlock(bip);
644 * If the buf item isn't tracking any data, free it, otherwise drop the
645 * reference we hold to it.
647 if (xfs_bitmap_empty(bip->bli_format.blf_data_map,
648 bip->bli_format.blf_map_size))
649 xfs_buf_item_relse(bp);
650 else
651 atomic_dec(&bip->bli_refcount);
653 if (!hold)
654 xfs_buf_relse(bp);
658 * This is called to find out where the oldest active copy of the
659 * buf log item in the on disk log resides now that the last log
660 * write of it completed at the given lsn.
661 * We always re-log all the dirty data in a buffer, so usually the
662 * latest copy in the on disk log is the only one that matters. For
663 * those cases we simply return the given lsn.
665 * The one exception to this is for buffers full of newly allocated
666 * inodes. These buffers are only relogged with the XFS_BLI_INODE_BUF
667 * flag set, indicating that only the di_next_unlinked fields from the
668 * inodes in the buffers will be replayed during recovery. If the
669 * original newly allocated inode images have not yet been flushed
670 * when the buffer is so relogged, then we need to make sure that we
671 * keep the old images in the 'active' portion of the log. We do this
672 * by returning the original lsn of that transaction here rather than
673 * the current one.
675 STATIC xfs_lsn_t
676 xfs_buf_item_committed(
677 struct xfs_log_item *lip,
678 xfs_lsn_t lsn)
680 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
682 trace_xfs_buf_item_committed(bip);
684 if ((bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF) && lip->li_lsn != 0)
685 return lip->li_lsn;
686 return lsn;
689 STATIC void
690 xfs_buf_item_committing(
691 struct xfs_log_item *lip,
692 xfs_lsn_t commit_lsn)
697 * This is the ops vector shared by all buf log items.
699 static const struct xfs_item_ops xfs_buf_item_ops = {
700 .iop_size = xfs_buf_item_size,
701 .iop_format = xfs_buf_item_format,
702 .iop_pin = xfs_buf_item_pin,
703 .iop_unpin = xfs_buf_item_unpin,
704 .iop_unlock = xfs_buf_item_unlock,
705 .iop_committed = xfs_buf_item_committed,
706 .iop_push = xfs_buf_item_push,
707 .iop_committing = xfs_buf_item_committing
710 STATIC int
711 xfs_buf_item_get_format(
712 struct xfs_buf_log_item *bip,
713 int count)
715 ASSERT(bip->bli_formats == NULL);
716 bip->bli_format_count = count;
718 if (count == 1) {
719 bip->bli_formats = &bip->bli_format;
720 return 0;
723 bip->bli_formats = kmem_zalloc(count * sizeof(struct xfs_buf_log_format),
724 KM_SLEEP);
725 if (!bip->bli_formats)
726 return ENOMEM;
727 return 0;
730 STATIC void
731 xfs_buf_item_free_format(
732 struct xfs_buf_log_item *bip)
734 if (bip->bli_formats != &bip->bli_format) {
735 kmem_free(bip->bli_formats);
736 bip->bli_formats = NULL;
741 * Allocate a new buf log item to go with the given buffer.
742 * Set the buffer's b_fsprivate field to point to the new
743 * buf log item. If there are other item's attached to the
744 * buffer (see xfs_buf_attach_iodone() below), then put the
745 * buf log item at the front.
747 void
748 xfs_buf_item_init(
749 xfs_buf_t *bp,
750 xfs_mount_t *mp)
752 xfs_log_item_t *lip = bp->b_fspriv;
753 xfs_buf_log_item_t *bip;
754 int chunks;
755 int map_size;
756 int error;
757 int i;
760 * Check to see if there is already a buf log item for
761 * this buffer. If there is, it is guaranteed to be
762 * the first. If we do already have one, there is
763 * nothing to do here so return.
765 ASSERT(bp->b_target->bt_mount == mp);
766 if (lip != NULL && lip->li_type == XFS_LI_BUF)
767 return;
769 bip = kmem_zone_zalloc(xfs_buf_item_zone, KM_SLEEP);
770 xfs_log_item_init(mp, &bip->bli_item, XFS_LI_BUF, &xfs_buf_item_ops);
771 bip->bli_buf = bp;
772 xfs_buf_hold(bp);
775 * chunks is the number of XFS_BLF_CHUNK size pieces the buffer
776 * can be divided into. Make sure not to truncate any pieces.
777 * map_size is the size of the bitmap needed to describe the
778 * chunks of the buffer.
780 * Discontiguous buffer support follows the layout of the underlying
781 * buffer. This makes the implementation as simple as possible.
783 error = xfs_buf_item_get_format(bip, bp->b_map_count);
784 ASSERT(error == 0);
786 for (i = 0; i < bip->bli_format_count; i++) {
787 chunks = DIV_ROUND_UP(BBTOB(bp->b_maps[i].bm_len),
788 XFS_BLF_CHUNK);
789 map_size = DIV_ROUND_UP(chunks, NBWORD);
791 bip->bli_formats[i].blf_type = XFS_LI_BUF;
792 bip->bli_formats[i].blf_blkno = bp->b_maps[i].bm_bn;
793 bip->bli_formats[i].blf_len = bp->b_maps[i].bm_len;
794 bip->bli_formats[i].blf_map_size = map_size;
797 #ifdef XFS_TRANS_DEBUG
799 * Allocate the arrays for tracking what needs to be logged
800 * and what our callers request to be logged. bli_orig
801 * holds a copy of the original, clean buffer for comparison
802 * against, and bli_logged keeps a 1 bit flag per byte in
803 * the buffer to indicate which bytes the callers have asked
804 * to have logged.
806 bip->bli_orig = kmem_alloc(BBTOB(bp->b_length), KM_SLEEP);
807 memcpy(bip->bli_orig, bp->b_addr, BBTOB(bp->b_length));
808 bip->bli_logged = kmem_zalloc(BBTOB(bp->b_length) / NBBY, KM_SLEEP);
809 #endif
812 * Put the buf item into the list of items attached to the
813 * buffer at the front.
815 if (bp->b_fspriv)
816 bip->bli_item.li_bio_list = bp->b_fspriv;
817 bp->b_fspriv = bip;
822 * Mark bytes first through last inclusive as dirty in the buf
823 * item's bitmap.
825 void
826 xfs_buf_item_log_segment(
827 struct xfs_buf_log_item *bip,
828 uint first,
829 uint last,
830 uint *map)
832 uint first_bit;
833 uint last_bit;
834 uint bits_to_set;
835 uint bits_set;
836 uint word_num;
837 uint *wordp;
838 uint bit;
839 uint end_bit;
840 uint mask;
843 * Convert byte offsets to bit numbers.
845 first_bit = first >> XFS_BLF_SHIFT;
846 last_bit = last >> XFS_BLF_SHIFT;
849 * Calculate the total number of bits to be set.
851 bits_to_set = last_bit - first_bit + 1;
854 * Get a pointer to the first word in the bitmap
855 * to set a bit in.
857 word_num = first_bit >> BIT_TO_WORD_SHIFT;
858 wordp = &map[word_num];
861 * Calculate the starting bit in the first word.
863 bit = first_bit & (uint)(NBWORD - 1);
866 * First set any bits in the first word of our range.
867 * If it starts at bit 0 of the word, it will be
868 * set below rather than here. That is what the variable
869 * bit tells us. The variable bits_set tracks the number
870 * of bits that have been set so far. End_bit is the number
871 * of the last bit to be set in this word plus one.
873 if (bit) {
874 end_bit = MIN(bit + bits_to_set, (uint)NBWORD);
875 mask = ((1 << (end_bit - bit)) - 1) << bit;
876 *wordp |= mask;
877 wordp++;
878 bits_set = end_bit - bit;
879 } else {
880 bits_set = 0;
884 * Now set bits a whole word at a time that are between
885 * first_bit and last_bit.
887 while ((bits_to_set - bits_set) >= NBWORD) {
888 *wordp |= 0xffffffff;
889 bits_set += NBWORD;
890 wordp++;
894 * Finally, set any bits left to be set in one last partial word.
896 end_bit = bits_to_set - bits_set;
897 if (end_bit) {
898 mask = (1 << end_bit) - 1;
899 *wordp |= mask;
902 xfs_buf_item_log_debug(bip, first, last);
906 * Mark bytes first through last inclusive as dirty in the buf
907 * item's bitmap.
909 void
910 xfs_buf_item_log(
911 xfs_buf_log_item_t *bip,
912 uint first,
913 uint last)
915 int i;
916 uint start;
917 uint end;
918 struct xfs_buf *bp = bip->bli_buf;
921 * Mark the item as having some dirty data for
922 * quick reference in xfs_buf_item_dirty.
924 bip->bli_flags |= XFS_BLI_DIRTY;
927 * walk each buffer segment and mark them dirty appropriately.
929 start = 0;
930 for (i = 0; i < bip->bli_format_count; i++) {
931 if (start > last)
932 break;
933 end = start + BBTOB(bp->b_maps[i].bm_len);
934 if (first > end) {
935 start += BBTOB(bp->b_maps[i].bm_len);
936 continue;
938 if (first < start)
939 first = start;
940 if (end > last)
941 end = last;
943 xfs_buf_item_log_segment(bip, first, end,
944 &bip->bli_formats[i].blf_data_map[0]);
946 start += bp->b_maps[i].bm_len;
952 * Return 1 if the buffer has some data that has been logged (at any
953 * point, not just the current transaction) and 0 if not.
955 uint
956 xfs_buf_item_dirty(
957 xfs_buf_log_item_t *bip)
959 return (bip->bli_flags & XFS_BLI_DIRTY);
962 STATIC void
963 xfs_buf_item_free(
964 xfs_buf_log_item_t *bip)
966 #ifdef XFS_TRANS_DEBUG
967 kmem_free(bip->bli_orig);
968 kmem_free(bip->bli_logged);
969 #endif /* XFS_TRANS_DEBUG */
971 xfs_buf_item_free_format(bip);
972 kmem_zone_free(xfs_buf_item_zone, bip);
976 * This is called when the buf log item is no longer needed. It should
977 * free the buf log item associated with the given buffer and clear
978 * the buffer's pointer to the buf log item. If there are no more
979 * items in the list, clear the b_iodone field of the buffer (see
980 * xfs_buf_attach_iodone() below).
982 void
983 xfs_buf_item_relse(
984 xfs_buf_t *bp)
986 xfs_buf_log_item_t *bip;
988 trace_xfs_buf_item_relse(bp, _RET_IP_);
990 bip = bp->b_fspriv;
991 bp->b_fspriv = bip->bli_item.li_bio_list;
992 if (bp->b_fspriv == NULL)
993 bp->b_iodone = NULL;
995 xfs_buf_rele(bp);
996 xfs_buf_item_free(bip);
1001 * Add the given log item with its callback to the list of callbacks
1002 * to be called when the buffer's I/O completes. If it is not set
1003 * already, set the buffer's b_iodone() routine to be
1004 * xfs_buf_iodone_callbacks() and link the log item into the list of
1005 * items rooted at b_fsprivate. Items are always added as the second
1006 * entry in the list if there is a first, because the buf item code
1007 * assumes that the buf log item is first.
1009 void
1010 xfs_buf_attach_iodone(
1011 xfs_buf_t *bp,
1012 void (*cb)(xfs_buf_t *, xfs_log_item_t *),
1013 xfs_log_item_t *lip)
1015 xfs_log_item_t *head_lip;
1017 ASSERT(xfs_buf_islocked(bp));
1019 lip->li_cb = cb;
1020 head_lip = bp->b_fspriv;
1021 if (head_lip) {
1022 lip->li_bio_list = head_lip->li_bio_list;
1023 head_lip->li_bio_list = lip;
1024 } else {
1025 bp->b_fspriv = lip;
1028 ASSERT(bp->b_iodone == NULL ||
1029 bp->b_iodone == xfs_buf_iodone_callbacks);
1030 bp->b_iodone = xfs_buf_iodone_callbacks;
1034 * We can have many callbacks on a buffer. Running the callbacks individually
1035 * can cause a lot of contention on the AIL lock, so we allow for a single
1036 * callback to be able to scan the remaining lip->li_bio_list for other items
1037 * of the same type and callback to be processed in the first call.
1039 * As a result, the loop walking the callback list below will also modify the
1040 * list. it removes the first item from the list and then runs the callback.
1041 * The loop then restarts from the new head of the list. This allows the
1042 * callback to scan and modify the list attached to the buffer and we don't
1043 * have to care about maintaining a next item pointer.
1045 STATIC void
1046 xfs_buf_do_callbacks(
1047 struct xfs_buf *bp)
1049 struct xfs_log_item *lip;
1051 while ((lip = bp->b_fspriv) != NULL) {
1052 bp->b_fspriv = lip->li_bio_list;
1053 ASSERT(lip->li_cb != NULL);
1055 * Clear the next pointer so we don't have any
1056 * confusion if the item is added to another buf.
1057 * Don't touch the log item after calling its
1058 * callback, because it could have freed itself.
1060 lip->li_bio_list = NULL;
1061 lip->li_cb(bp, lip);
1066 * This is the iodone() function for buffers which have had callbacks
1067 * attached to them by xfs_buf_attach_iodone(). It should remove each
1068 * log item from the buffer's list and call the callback of each in turn.
1069 * When done, the buffer's fsprivate field is set to NULL and the buffer
1070 * is unlocked with a call to iodone().
1072 void
1073 xfs_buf_iodone_callbacks(
1074 struct xfs_buf *bp)
1076 struct xfs_log_item *lip = bp->b_fspriv;
1077 struct xfs_mount *mp = lip->li_mountp;
1078 static ulong lasttime;
1079 static xfs_buftarg_t *lasttarg;
1081 if (likely(!xfs_buf_geterror(bp)))
1082 goto do_callbacks;
1085 * If we've already decided to shutdown the filesystem because of
1086 * I/O errors, there's no point in giving this a retry.
1088 if (XFS_FORCED_SHUTDOWN(mp)) {
1089 xfs_buf_stale(bp);
1090 XFS_BUF_DONE(bp);
1091 trace_xfs_buf_item_iodone(bp, _RET_IP_);
1092 goto do_callbacks;
1095 if (bp->b_target != lasttarg ||
1096 time_after(jiffies, (lasttime + 5*HZ))) {
1097 lasttime = jiffies;
1098 xfs_buf_ioerror_alert(bp, __func__);
1100 lasttarg = bp->b_target;
1103 * If the write was asynchronous then no one will be looking for the
1104 * error. Clear the error state and write the buffer out again.
1106 * XXX: This helps against transient write errors, but we need to find
1107 * a way to shut the filesystem down if the writes keep failing.
1109 * In practice we'll shut the filesystem down soon as non-transient
1110 * erorrs tend to affect the whole device and a failing log write
1111 * will make us give up. But we really ought to do better here.
1113 if (XFS_BUF_ISASYNC(bp)) {
1114 ASSERT(bp->b_iodone != NULL);
1116 trace_xfs_buf_item_iodone_async(bp, _RET_IP_);
1118 xfs_buf_ioerror(bp, 0); /* errno of 0 unsets the flag */
1120 if (!XFS_BUF_ISSTALE(bp)) {
1121 bp->b_flags |= XBF_WRITE | XBF_ASYNC | XBF_DONE;
1122 xfs_buf_iorequest(bp);
1123 } else {
1124 xfs_buf_relse(bp);
1127 return;
1131 * If the write of the buffer was synchronous, we want to make
1132 * sure to return the error to the caller of xfs_bwrite().
1134 xfs_buf_stale(bp);
1135 XFS_BUF_DONE(bp);
1137 trace_xfs_buf_error_relse(bp, _RET_IP_);
1139 do_callbacks:
1140 xfs_buf_do_callbacks(bp);
1141 bp->b_fspriv = NULL;
1142 bp->b_iodone = NULL;
1143 xfs_buf_ioend(bp, 0);
1147 * This is the iodone() function for buffers which have been
1148 * logged. It is called when they are eventually flushed out.
1149 * It should remove the buf item from the AIL, and free the buf item.
1150 * It is called by xfs_buf_iodone_callbacks() above which will take
1151 * care of cleaning up the buffer itself.
1153 void
1154 xfs_buf_iodone(
1155 struct xfs_buf *bp,
1156 struct xfs_log_item *lip)
1158 struct xfs_ail *ailp = lip->li_ailp;
1160 ASSERT(BUF_ITEM(lip)->bli_buf == bp);
1162 xfs_buf_rele(bp);
1165 * If we are forcibly shutting down, this may well be
1166 * off the AIL already. That's because we simulate the
1167 * log-committed callbacks to unpin these buffers. Or we may never
1168 * have put this item on AIL because of the transaction was
1169 * aborted forcibly. xfs_trans_ail_delete() takes care of these.
1171 * Either way, AIL is useless if we're forcing a shutdown.
1173 spin_lock(&ailp->xa_lock);
1174 xfs_trans_ail_delete(ailp, lip, SHUTDOWN_CORRUPT_INCORE);
1175 xfs_buf_item_free(BUF_ITEM(lip));