hfs: get rid of hfs_sync_super
[linux/fpc-iii.git] / fs / xfs / xfs_buf_item.c
blob45df2b857d482fe478c3f4965ddfa57a648eb6ee
1 /*
2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_types.h"
21 #include "xfs_bit.h"
22 #include "xfs_log.h"
23 #include "xfs_trans.h"
24 #include "xfs_sb.h"
25 #include "xfs_ag.h"
26 #include "xfs_mount.h"
27 #include "xfs_buf_item.h"
28 #include "xfs_trans_priv.h"
29 #include "xfs_error.h"
30 #include "xfs_trace.h"
33 kmem_zone_t *xfs_buf_item_zone;
35 static inline struct xfs_buf_log_item *BUF_ITEM(struct xfs_log_item *lip)
37 return container_of(lip, struct xfs_buf_log_item, bli_item);
41 #ifdef XFS_TRANS_DEBUG
43 * This function uses an alternate strategy for tracking the bytes
44 * that the user requests to be logged. This can then be used
45 * in conjunction with the bli_orig array in the buf log item to
46 * catch bugs in our callers' code.
48 * We also double check the bits set in xfs_buf_item_log using a
49 * simple algorithm to check that every byte is accounted for.
51 STATIC void
52 xfs_buf_item_log_debug(
53 xfs_buf_log_item_t *bip,
54 uint first,
55 uint last)
57 uint x;
58 uint byte;
59 uint nbytes;
60 uint chunk_num;
61 uint word_num;
62 uint bit_num;
63 uint bit_set;
64 uint *wordp;
66 ASSERT(bip->bli_logged != NULL);
67 byte = first;
68 nbytes = last - first + 1;
69 bfset(bip->bli_logged, first, nbytes);
70 for (x = 0; x < nbytes; x++) {
71 chunk_num = byte >> XFS_BLF_SHIFT;
72 word_num = chunk_num >> BIT_TO_WORD_SHIFT;
73 bit_num = chunk_num & (NBWORD - 1);
74 wordp = &(bip->bli_format.blf_data_map[word_num]);
75 bit_set = *wordp & (1 << bit_num);
76 ASSERT(bit_set);
77 byte++;
82 * This function is called when we flush something into a buffer without
83 * logging it. This happens for things like inodes which are logged
84 * separately from the buffer.
86 void
87 xfs_buf_item_flush_log_debug(
88 xfs_buf_t *bp,
89 uint first,
90 uint last)
92 xfs_buf_log_item_t *bip = bp->b_fspriv;
93 uint nbytes;
95 if (bip == NULL || (bip->bli_item.li_type != XFS_LI_BUF))
96 return;
98 ASSERT(bip->bli_logged != NULL);
99 nbytes = last - first + 1;
100 bfset(bip->bli_logged, first, nbytes);
104 * This function is called to verify that our callers have logged
105 * all the bytes that they changed.
107 * It does this by comparing the original copy of the buffer stored in
108 * the buf log item's bli_orig array to the current copy of the buffer
109 * and ensuring that all bytes which mismatch are set in the bli_logged
110 * array of the buf log item.
112 STATIC void
113 xfs_buf_item_log_check(
114 xfs_buf_log_item_t *bip)
116 char *orig;
117 char *buffer;
118 int x;
119 xfs_buf_t *bp;
121 ASSERT(bip->bli_orig != NULL);
122 ASSERT(bip->bli_logged != NULL);
124 bp = bip->bli_buf;
125 ASSERT(bp->b_length > 0);
126 ASSERT(bp->b_addr != NULL);
127 orig = bip->bli_orig;
128 buffer = bp->b_addr;
129 for (x = 0; x < BBTOB(bp->b_length); x++) {
130 if (orig[x] != buffer[x] && !btst(bip->bli_logged, x)) {
131 xfs_emerg(bp->b_mount,
132 "%s: bip %x buffer %x orig %x index %d",
133 __func__, bip, bp, orig, x);
134 ASSERT(0);
138 #else
139 #define xfs_buf_item_log_debug(x,y,z)
140 #define xfs_buf_item_log_check(x)
141 #endif
143 STATIC void xfs_buf_do_callbacks(struct xfs_buf *bp);
146 * This returns the number of log iovecs needed to log the
147 * given buf log item.
149 * It calculates this as 1 iovec for the buf log format structure
150 * and 1 for each stretch of non-contiguous chunks to be logged.
151 * Contiguous chunks are logged in a single iovec.
153 * If the XFS_BLI_STALE flag has been set, then log nothing.
155 STATIC uint
156 xfs_buf_item_size(
157 struct xfs_log_item *lip)
159 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
160 struct xfs_buf *bp = bip->bli_buf;
161 uint nvecs;
162 int next_bit;
163 int last_bit;
165 ASSERT(atomic_read(&bip->bli_refcount) > 0);
166 if (bip->bli_flags & XFS_BLI_STALE) {
168 * The buffer is stale, so all we need to log
169 * is the buf log format structure with the
170 * cancel flag in it.
172 trace_xfs_buf_item_size_stale(bip);
173 ASSERT(bip->bli_format.blf_flags & XFS_BLF_CANCEL);
174 return 1;
177 ASSERT(bip->bli_flags & XFS_BLI_LOGGED);
178 nvecs = 1;
179 last_bit = xfs_next_bit(bip->bli_format.blf_data_map,
180 bip->bli_format.blf_map_size, 0);
181 ASSERT(last_bit != -1);
182 nvecs++;
183 while (last_bit != -1) {
185 * This takes the bit number to start looking from and
186 * returns the next set bit from there. It returns -1
187 * if there are no more bits set or the start bit is
188 * beyond the end of the bitmap.
190 next_bit = xfs_next_bit(bip->bli_format.blf_data_map,
191 bip->bli_format.blf_map_size,
192 last_bit + 1);
194 * If we run out of bits, leave the loop,
195 * else if we find a new set of bits bump the number of vecs,
196 * else keep scanning the current set of bits.
198 if (next_bit == -1) {
199 last_bit = -1;
200 } else if (next_bit != last_bit + 1) {
201 last_bit = next_bit;
202 nvecs++;
203 } else if (xfs_buf_offset(bp, next_bit * XFS_BLF_CHUNK) !=
204 (xfs_buf_offset(bp, last_bit * XFS_BLF_CHUNK) +
205 XFS_BLF_CHUNK)) {
206 last_bit = next_bit;
207 nvecs++;
208 } else {
209 last_bit++;
213 trace_xfs_buf_item_size(bip);
214 return nvecs;
218 * This is called to fill in the vector of log iovecs for the
219 * given log buf item. It fills the first entry with a buf log
220 * format structure, and the rest point to contiguous chunks
221 * within the buffer.
223 STATIC void
224 xfs_buf_item_format(
225 struct xfs_log_item *lip,
226 struct xfs_log_iovec *vecp)
228 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
229 struct xfs_buf *bp = bip->bli_buf;
230 uint base_size;
231 uint nvecs;
232 int first_bit;
233 int last_bit;
234 int next_bit;
235 uint nbits;
236 uint buffer_offset;
238 ASSERT(atomic_read(&bip->bli_refcount) > 0);
239 ASSERT((bip->bli_flags & XFS_BLI_LOGGED) ||
240 (bip->bli_flags & XFS_BLI_STALE));
243 * The size of the base structure is the size of the
244 * declared structure plus the space for the extra words
245 * of the bitmap. We subtract one from the map size, because
246 * the first element of the bitmap is accounted for in the
247 * size of the base structure.
249 base_size =
250 (uint)(sizeof(xfs_buf_log_format_t) +
251 ((bip->bli_format.blf_map_size - 1) * sizeof(uint)));
252 vecp->i_addr = &bip->bli_format;
253 vecp->i_len = base_size;
254 vecp->i_type = XLOG_REG_TYPE_BFORMAT;
255 vecp++;
256 nvecs = 1;
259 * If it is an inode buffer, transfer the in-memory state to the
260 * format flags and clear the in-memory state. We do not transfer
261 * this state if the inode buffer allocation has not yet been committed
262 * to the log as setting the XFS_BLI_INODE_BUF flag will prevent
263 * correct replay of the inode allocation.
265 if (bip->bli_flags & XFS_BLI_INODE_BUF) {
266 if (!((bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF) &&
267 xfs_log_item_in_current_chkpt(lip)))
268 bip->bli_format.blf_flags |= XFS_BLF_INODE_BUF;
269 bip->bli_flags &= ~XFS_BLI_INODE_BUF;
272 if (bip->bli_flags & XFS_BLI_STALE) {
274 * The buffer is stale, so all we need to log
275 * is the buf log format structure with the
276 * cancel flag in it.
278 trace_xfs_buf_item_format_stale(bip);
279 ASSERT(bip->bli_format.blf_flags & XFS_BLF_CANCEL);
280 bip->bli_format.blf_size = nvecs;
281 return;
285 * Fill in an iovec for each set of contiguous chunks.
287 first_bit = xfs_next_bit(bip->bli_format.blf_data_map,
288 bip->bli_format.blf_map_size, 0);
289 ASSERT(first_bit != -1);
290 last_bit = first_bit;
291 nbits = 1;
292 for (;;) {
294 * This takes the bit number to start looking from and
295 * returns the next set bit from there. It returns -1
296 * if there are no more bits set or the start bit is
297 * beyond the end of the bitmap.
299 next_bit = xfs_next_bit(bip->bli_format.blf_data_map,
300 bip->bli_format.blf_map_size,
301 (uint)last_bit + 1);
303 * If we run out of bits fill in the last iovec and get
304 * out of the loop.
305 * Else if we start a new set of bits then fill in the
306 * iovec for the series we were looking at and start
307 * counting the bits in the new one.
308 * Else we're still in the same set of bits so just
309 * keep counting and scanning.
311 if (next_bit == -1) {
312 buffer_offset = first_bit * XFS_BLF_CHUNK;
313 vecp->i_addr = xfs_buf_offset(bp, buffer_offset);
314 vecp->i_len = nbits * XFS_BLF_CHUNK;
315 vecp->i_type = XLOG_REG_TYPE_BCHUNK;
316 nvecs++;
317 break;
318 } else if (next_bit != last_bit + 1) {
319 buffer_offset = first_bit * XFS_BLF_CHUNK;
320 vecp->i_addr = xfs_buf_offset(bp, buffer_offset);
321 vecp->i_len = nbits * XFS_BLF_CHUNK;
322 vecp->i_type = XLOG_REG_TYPE_BCHUNK;
323 nvecs++;
324 vecp++;
325 first_bit = next_bit;
326 last_bit = next_bit;
327 nbits = 1;
328 } else if (xfs_buf_offset(bp, next_bit << XFS_BLF_SHIFT) !=
329 (xfs_buf_offset(bp, last_bit << XFS_BLF_SHIFT) +
330 XFS_BLF_CHUNK)) {
331 buffer_offset = first_bit * XFS_BLF_CHUNK;
332 vecp->i_addr = xfs_buf_offset(bp, buffer_offset);
333 vecp->i_len = nbits * XFS_BLF_CHUNK;
334 vecp->i_type = XLOG_REG_TYPE_BCHUNK;
335 /* You would think we need to bump the nvecs here too, but we do not
336 * this number is used by recovery, and it gets confused by the boundary
337 * split here
338 * nvecs++;
340 vecp++;
341 first_bit = next_bit;
342 last_bit = next_bit;
343 nbits = 1;
344 } else {
345 last_bit++;
346 nbits++;
349 bip->bli_format.blf_size = nvecs;
352 * Check to make sure everything is consistent.
354 trace_xfs_buf_item_format(bip);
355 xfs_buf_item_log_check(bip);
359 * This is called to pin the buffer associated with the buf log item in memory
360 * so it cannot be written out.
362 * We also always take a reference to the buffer log item here so that the bli
363 * is held while the item is pinned in memory. This means that we can
364 * unconditionally drop the reference count a transaction holds when the
365 * transaction is completed.
367 STATIC void
368 xfs_buf_item_pin(
369 struct xfs_log_item *lip)
371 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
373 ASSERT(atomic_read(&bip->bli_refcount) > 0);
374 ASSERT((bip->bli_flags & XFS_BLI_LOGGED) ||
375 (bip->bli_flags & XFS_BLI_STALE));
377 trace_xfs_buf_item_pin(bip);
379 atomic_inc(&bip->bli_refcount);
380 atomic_inc(&bip->bli_buf->b_pin_count);
384 * This is called to unpin the buffer associated with the buf log
385 * item which was previously pinned with a call to xfs_buf_item_pin().
387 * Also drop the reference to the buf item for the current transaction.
388 * If the XFS_BLI_STALE flag is set and we are the last reference,
389 * then free up the buf log item and unlock the buffer.
391 * If the remove flag is set we are called from uncommit in the
392 * forced-shutdown path. If that is true and the reference count on
393 * the log item is going to drop to zero we need to free the item's
394 * descriptor in the transaction.
396 STATIC void
397 xfs_buf_item_unpin(
398 struct xfs_log_item *lip,
399 int remove)
401 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
402 xfs_buf_t *bp = bip->bli_buf;
403 struct xfs_ail *ailp = lip->li_ailp;
404 int stale = bip->bli_flags & XFS_BLI_STALE;
405 int freed;
407 ASSERT(bp->b_fspriv == bip);
408 ASSERT(atomic_read(&bip->bli_refcount) > 0);
410 trace_xfs_buf_item_unpin(bip);
412 freed = atomic_dec_and_test(&bip->bli_refcount);
414 if (atomic_dec_and_test(&bp->b_pin_count))
415 wake_up_all(&bp->b_waiters);
417 if (freed && stale) {
418 ASSERT(bip->bli_flags & XFS_BLI_STALE);
419 ASSERT(xfs_buf_islocked(bp));
420 ASSERT(XFS_BUF_ISSTALE(bp));
421 ASSERT(bip->bli_format.blf_flags & XFS_BLF_CANCEL);
423 trace_xfs_buf_item_unpin_stale(bip);
425 if (remove) {
427 * If we are in a transaction context, we have to
428 * remove the log item from the transaction as we are
429 * about to release our reference to the buffer. If we
430 * don't, the unlock that occurs later in
431 * xfs_trans_uncommit() will try to reference the
432 * buffer which we no longer have a hold on.
434 if (lip->li_desc)
435 xfs_trans_del_item(lip);
438 * Since the transaction no longer refers to the buffer,
439 * the buffer should no longer refer to the transaction.
441 bp->b_transp = NULL;
445 * If we get called here because of an IO error, we may
446 * or may not have the item on the AIL. xfs_trans_ail_delete()
447 * will take care of that situation.
448 * xfs_trans_ail_delete() drops the AIL lock.
450 if (bip->bli_flags & XFS_BLI_STALE_INODE) {
451 xfs_buf_do_callbacks(bp);
452 bp->b_fspriv = NULL;
453 bp->b_iodone = NULL;
454 } else {
455 spin_lock(&ailp->xa_lock);
456 xfs_trans_ail_delete(ailp, lip, SHUTDOWN_LOG_IO_ERROR);
457 xfs_buf_item_relse(bp);
458 ASSERT(bp->b_fspriv == NULL);
460 xfs_buf_relse(bp);
461 } else if (freed && remove) {
462 xfs_buf_lock(bp);
463 xfs_buf_ioerror(bp, EIO);
464 XFS_BUF_UNDONE(bp);
465 xfs_buf_stale(bp);
466 xfs_buf_ioend(bp, 0);
470 STATIC uint
471 xfs_buf_item_push(
472 struct xfs_log_item *lip,
473 struct list_head *buffer_list)
475 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
476 struct xfs_buf *bp = bip->bli_buf;
477 uint rval = XFS_ITEM_SUCCESS;
479 if (xfs_buf_ispinned(bp))
480 return XFS_ITEM_PINNED;
481 if (!xfs_buf_trylock(bp))
482 return XFS_ITEM_LOCKED;
484 ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
486 trace_xfs_buf_item_push(bip);
488 if (!xfs_buf_delwri_queue(bp, buffer_list))
489 rval = XFS_ITEM_FLUSHING;
490 xfs_buf_unlock(bp);
491 return rval;
495 * Release the buffer associated with the buf log item. If there is no dirty
496 * logged data associated with the buffer recorded in the buf log item, then
497 * free the buf log item and remove the reference to it in the buffer.
499 * This call ignores the recursion count. It is only called when the buffer
500 * should REALLY be unlocked, regardless of the recursion count.
502 * We unconditionally drop the transaction's reference to the log item. If the
503 * item was logged, then another reference was taken when it was pinned, so we
504 * can safely drop the transaction reference now. This also allows us to avoid
505 * potential races with the unpin code freeing the bli by not referencing the
506 * bli after we've dropped the reference count.
508 * If the XFS_BLI_HOLD flag is set in the buf log item, then free the log item
509 * if necessary but do not unlock the buffer. This is for support of
510 * xfs_trans_bhold(). Make sure the XFS_BLI_HOLD field is cleared if we don't
511 * free the item.
513 STATIC void
514 xfs_buf_item_unlock(
515 struct xfs_log_item *lip)
517 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
518 struct xfs_buf *bp = bip->bli_buf;
519 int aborted;
520 uint hold;
522 /* Clear the buffer's association with this transaction. */
523 bp->b_transp = NULL;
526 * If this is a transaction abort, don't return early. Instead, allow
527 * the brelse to happen. Normally it would be done for stale
528 * (cancelled) buffers at unpin time, but we'll never go through the
529 * pin/unpin cycle if we abort inside commit.
531 aborted = (lip->li_flags & XFS_LI_ABORTED) != 0;
534 * Before possibly freeing the buf item, determine if we should
535 * release the buffer at the end of this routine.
537 hold = bip->bli_flags & XFS_BLI_HOLD;
539 /* Clear the per transaction state. */
540 bip->bli_flags &= ~(XFS_BLI_LOGGED | XFS_BLI_HOLD);
543 * If the buf item is marked stale, then don't do anything. We'll
544 * unlock the buffer and free the buf item when the buffer is unpinned
545 * for the last time.
547 if (bip->bli_flags & XFS_BLI_STALE) {
548 trace_xfs_buf_item_unlock_stale(bip);
549 ASSERT(bip->bli_format.blf_flags & XFS_BLF_CANCEL);
550 if (!aborted) {
551 atomic_dec(&bip->bli_refcount);
552 return;
556 trace_xfs_buf_item_unlock(bip);
559 * If the buf item isn't tracking any data, free it, otherwise drop the
560 * reference we hold to it.
562 if (xfs_bitmap_empty(bip->bli_format.blf_data_map,
563 bip->bli_format.blf_map_size))
564 xfs_buf_item_relse(bp);
565 else
566 atomic_dec(&bip->bli_refcount);
568 if (!hold)
569 xfs_buf_relse(bp);
573 * This is called to find out where the oldest active copy of the
574 * buf log item in the on disk log resides now that the last log
575 * write of it completed at the given lsn.
576 * We always re-log all the dirty data in a buffer, so usually the
577 * latest copy in the on disk log is the only one that matters. For
578 * those cases we simply return the given lsn.
580 * The one exception to this is for buffers full of newly allocated
581 * inodes. These buffers are only relogged with the XFS_BLI_INODE_BUF
582 * flag set, indicating that only the di_next_unlinked fields from the
583 * inodes in the buffers will be replayed during recovery. If the
584 * original newly allocated inode images have not yet been flushed
585 * when the buffer is so relogged, then we need to make sure that we
586 * keep the old images in the 'active' portion of the log. We do this
587 * by returning the original lsn of that transaction here rather than
588 * the current one.
590 STATIC xfs_lsn_t
591 xfs_buf_item_committed(
592 struct xfs_log_item *lip,
593 xfs_lsn_t lsn)
595 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
597 trace_xfs_buf_item_committed(bip);
599 if ((bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF) && lip->li_lsn != 0)
600 return lip->li_lsn;
601 return lsn;
604 STATIC void
605 xfs_buf_item_committing(
606 struct xfs_log_item *lip,
607 xfs_lsn_t commit_lsn)
612 * This is the ops vector shared by all buf log items.
614 static const struct xfs_item_ops xfs_buf_item_ops = {
615 .iop_size = xfs_buf_item_size,
616 .iop_format = xfs_buf_item_format,
617 .iop_pin = xfs_buf_item_pin,
618 .iop_unpin = xfs_buf_item_unpin,
619 .iop_unlock = xfs_buf_item_unlock,
620 .iop_committed = xfs_buf_item_committed,
621 .iop_push = xfs_buf_item_push,
622 .iop_committing = xfs_buf_item_committing
627 * Allocate a new buf log item to go with the given buffer.
628 * Set the buffer's b_fsprivate field to point to the new
629 * buf log item. If there are other item's attached to the
630 * buffer (see xfs_buf_attach_iodone() below), then put the
631 * buf log item at the front.
633 void
634 xfs_buf_item_init(
635 xfs_buf_t *bp,
636 xfs_mount_t *mp)
638 xfs_log_item_t *lip = bp->b_fspriv;
639 xfs_buf_log_item_t *bip;
640 int chunks;
641 int map_size;
644 * Check to see if there is already a buf log item for
645 * this buffer. If there is, it is guaranteed to be
646 * the first. If we do already have one, there is
647 * nothing to do here so return.
649 ASSERT(bp->b_target->bt_mount == mp);
650 if (lip != NULL && lip->li_type == XFS_LI_BUF)
651 return;
654 * chunks is the number of XFS_BLF_CHUNK size pieces
655 * the buffer can be divided into. Make sure not to
656 * truncate any pieces. map_size is the size of the
657 * bitmap needed to describe the chunks of the buffer.
659 chunks = (int)((BBTOB(bp->b_length) + (XFS_BLF_CHUNK - 1)) >>
660 XFS_BLF_SHIFT);
661 map_size = (int)((chunks + NBWORD) >> BIT_TO_WORD_SHIFT);
663 bip = (xfs_buf_log_item_t*)kmem_zone_zalloc(xfs_buf_item_zone,
664 KM_SLEEP);
665 xfs_log_item_init(mp, &bip->bli_item, XFS_LI_BUF, &xfs_buf_item_ops);
666 bip->bli_buf = bp;
667 xfs_buf_hold(bp);
668 bip->bli_format.blf_type = XFS_LI_BUF;
669 bip->bli_format.blf_blkno = (__int64_t)XFS_BUF_ADDR(bp);
670 bip->bli_format.blf_len = (ushort)bp->b_length;
671 bip->bli_format.blf_map_size = map_size;
673 #ifdef XFS_TRANS_DEBUG
675 * Allocate the arrays for tracking what needs to be logged
676 * and what our callers request to be logged. bli_orig
677 * holds a copy of the original, clean buffer for comparison
678 * against, and bli_logged keeps a 1 bit flag per byte in
679 * the buffer to indicate which bytes the callers have asked
680 * to have logged.
682 bip->bli_orig = kmem_alloc(BBTOB(bp->b_length), KM_SLEEP);
683 memcpy(bip->bli_orig, bp->b_addr, BBTOB(bp->b_length));
684 bip->bli_logged = kmem_zalloc(BBTOB(bp->b_length) / NBBY, KM_SLEEP);
685 #endif
688 * Put the buf item into the list of items attached to the
689 * buffer at the front.
691 if (bp->b_fspriv)
692 bip->bli_item.li_bio_list = bp->b_fspriv;
693 bp->b_fspriv = bip;
698 * Mark bytes first through last inclusive as dirty in the buf
699 * item's bitmap.
701 void
702 xfs_buf_item_log(
703 xfs_buf_log_item_t *bip,
704 uint first,
705 uint last)
707 uint first_bit;
708 uint last_bit;
709 uint bits_to_set;
710 uint bits_set;
711 uint word_num;
712 uint *wordp;
713 uint bit;
714 uint end_bit;
715 uint mask;
718 * Mark the item as having some dirty data for
719 * quick reference in xfs_buf_item_dirty.
721 bip->bli_flags |= XFS_BLI_DIRTY;
724 * Convert byte offsets to bit numbers.
726 first_bit = first >> XFS_BLF_SHIFT;
727 last_bit = last >> XFS_BLF_SHIFT;
730 * Calculate the total number of bits to be set.
732 bits_to_set = last_bit - first_bit + 1;
735 * Get a pointer to the first word in the bitmap
736 * to set a bit in.
738 word_num = first_bit >> BIT_TO_WORD_SHIFT;
739 wordp = &(bip->bli_format.blf_data_map[word_num]);
742 * Calculate the starting bit in the first word.
744 bit = first_bit & (uint)(NBWORD - 1);
747 * First set any bits in the first word of our range.
748 * If it starts at bit 0 of the word, it will be
749 * set below rather than here. That is what the variable
750 * bit tells us. The variable bits_set tracks the number
751 * of bits that have been set so far. End_bit is the number
752 * of the last bit to be set in this word plus one.
754 if (bit) {
755 end_bit = MIN(bit + bits_to_set, (uint)NBWORD);
756 mask = ((1 << (end_bit - bit)) - 1) << bit;
757 *wordp |= mask;
758 wordp++;
759 bits_set = end_bit - bit;
760 } else {
761 bits_set = 0;
765 * Now set bits a whole word at a time that are between
766 * first_bit and last_bit.
768 while ((bits_to_set - bits_set) >= NBWORD) {
769 *wordp |= 0xffffffff;
770 bits_set += NBWORD;
771 wordp++;
775 * Finally, set any bits left to be set in one last partial word.
777 end_bit = bits_to_set - bits_set;
778 if (end_bit) {
779 mask = (1 << end_bit) - 1;
780 *wordp |= mask;
783 xfs_buf_item_log_debug(bip, first, last);
788 * Return 1 if the buffer has some data that has been logged (at any
789 * point, not just the current transaction) and 0 if not.
791 uint
792 xfs_buf_item_dirty(
793 xfs_buf_log_item_t *bip)
795 return (bip->bli_flags & XFS_BLI_DIRTY);
798 STATIC void
799 xfs_buf_item_free(
800 xfs_buf_log_item_t *bip)
802 #ifdef XFS_TRANS_DEBUG
803 kmem_free(bip->bli_orig);
804 kmem_free(bip->bli_logged);
805 #endif /* XFS_TRANS_DEBUG */
807 kmem_zone_free(xfs_buf_item_zone, bip);
811 * This is called when the buf log item is no longer needed. It should
812 * free the buf log item associated with the given buffer and clear
813 * the buffer's pointer to the buf log item. If there are no more
814 * items in the list, clear the b_iodone field of the buffer (see
815 * xfs_buf_attach_iodone() below).
817 void
818 xfs_buf_item_relse(
819 xfs_buf_t *bp)
821 xfs_buf_log_item_t *bip;
823 trace_xfs_buf_item_relse(bp, _RET_IP_);
825 bip = bp->b_fspriv;
826 bp->b_fspriv = bip->bli_item.li_bio_list;
827 if (bp->b_fspriv == NULL)
828 bp->b_iodone = NULL;
830 xfs_buf_rele(bp);
831 xfs_buf_item_free(bip);
836 * Add the given log item with its callback to the list of callbacks
837 * to be called when the buffer's I/O completes. If it is not set
838 * already, set the buffer's b_iodone() routine to be
839 * xfs_buf_iodone_callbacks() and link the log item into the list of
840 * items rooted at b_fsprivate. Items are always added as the second
841 * entry in the list if there is a first, because the buf item code
842 * assumes that the buf log item is first.
844 void
845 xfs_buf_attach_iodone(
846 xfs_buf_t *bp,
847 void (*cb)(xfs_buf_t *, xfs_log_item_t *),
848 xfs_log_item_t *lip)
850 xfs_log_item_t *head_lip;
852 ASSERT(xfs_buf_islocked(bp));
854 lip->li_cb = cb;
855 head_lip = bp->b_fspriv;
856 if (head_lip) {
857 lip->li_bio_list = head_lip->li_bio_list;
858 head_lip->li_bio_list = lip;
859 } else {
860 bp->b_fspriv = lip;
863 ASSERT(bp->b_iodone == NULL ||
864 bp->b_iodone == xfs_buf_iodone_callbacks);
865 bp->b_iodone = xfs_buf_iodone_callbacks;
869 * We can have many callbacks on a buffer. Running the callbacks individually
870 * can cause a lot of contention on the AIL lock, so we allow for a single
871 * callback to be able to scan the remaining lip->li_bio_list for other items
872 * of the same type and callback to be processed in the first call.
874 * As a result, the loop walking the callback list below will also modify the
875 * list. it removes the first item from the list and then runs the callback.
876 * The loop then restarts from the new head of the list. This allows the
877 * callback to scan and modify the list attached to the buffer and we don't
878 * have to care about maintaining a next item pointer.
880 STATIC void
881 xfs_buf_do_callbacks(
882 struct xfs_buf *bp)
884 struct xfs_log_item *lip;
886 while ((lip = bp->b_fspriv) != NULL) {
887 bp->b_fspriv = lip->li_bio_list;
888 ASSERT(lip->li_cb != NULL);
890 * Clear the next pointer so we don't have any
891 * confusion if the item is added to another buf.
892 * Don't touch the log item after calling its
893 * callback, because it could have freed itself.
895 lip->li_bio_list = NULL;
896 lip->li_cb(bp, lip);
901 * This is the iodone() function for buffers which have had callbacks
902 * attached to them by xfs_buf_attach_iodone(). It should remove each
903 * log item from the buffer's list and call the callback of each in turn.
904 * When done, the buffer's fsprivate field is set to NULL and the buffer
905 * is unlocked with a call to iodone().
907 void
908 xfs_buf_iodone_callbacks(
909 struct xfs_buf *bp)
911 struct xfs_log_item *lip = bp->b_fspriv;
912 struct xfs_mount *mp = lip->li_mountp;
913 static ulong lasttime;
914 static xfs_buftarg_t *lasttarg;
916 if (likely(!xfs_buf_geterror(bp)))
917 goto do_callbacks;
920 * If we've already decided to shutdown the filesystem because of
921 * I/O errors, there's no point in giving this a retry.
923 if (XFS_FORCED_SHUTDOWN(mp)) {
924 xfs_buf_stale(bp);
925 XFS_BUF_DONE(bp);
926 trace_xfs_buf_item_iodone(bp, _RET_IP_);
927 goto do_callbacks;
930 if (bp->b_target != lasttarg ||
931 time_after(jiffies, (lasttime + 5*HZ))) {
932 lasttime = jiffies;
933 xfs_buf_ioerror_alert(bp, __func__);
935 lasttarg = bp->b_target;
938 * If the write was asynchronous then no one will be looking for the
939 * error. Clear the error state and write the buffer out again.
941 * XXX: This helps against transient write errors, but we need to find
942 * a way to shut the filesystem down if the writes keep failing.
944 * In practice we'll shut the filesystem down soon as non-transient
945 * erorrs tend to affect the whole device and a failing log write
946 * will make us give up. But we really ought to do better here.
948 if (XFS_BUF_ISASYNC(bp)) {
949 ASSERT(bp->b_iodone != NULL);
951 trace_xfs_buf_item_iodone_async(bp, _RET_IP_);
953 xfs_buf_ioerror(bp, 0); /* errno of 0 unsets the flag */
955 if (!XFS_BUF_ISSTALE(bp)) {
956 bp->b_flags |= XBF_WRITE | XBF_ASYNC | XBF_DONE;
957 xfs_bdstrat_cb(bp);
958 } else {
959 xfs_buf_relse(bp);
962 return;
966 * If the write of the buffer was synchronous, we want to make
967 * sure to return the error to the caller of xfs_bwrite().
969 xfs_buf_stale(bp);
970 XFS_BUF_DONE(bp);
972 trace_xfs_buf_error_relse(bp, _RET_IP_);
974 do_callbacks:
975 xfs_buf_do_callbacks(bp);
976 bp->b_fspriv = NULL;
977 bp->b_iodone = NULL;
978 xfs_buf_ioend(bp, 0);
982 * This is the iodone() function for buffers which have been
983 * logged. It is called when they are eventually flushed out.
984 * It should remove the buf item from the AIL, and free the buf item.
985 * It is called by xfs_buf_iodone_callbacks() above which will take
986 * care of cleaning up the buffer itself.
988 void
989 xfs_buf_iodone(
990 struct xfs_buf *bp,
991 struct xfs_log_item *lip)
993 struct xfs_ail *ailp = lip->li_ailp;
995 ASSERT(BUF_ITEM(lip)->bli_buf == bp);
997 xfs_buf_rele(bp);
1000 * If we are forcibly shutting down, this may well be
1001 * off the AIL already. That's because we simulate the
1002 * log-committed callbacks to unpin these buffers. Or we may never
1003 * have put this item on AIL because of the transaction was
1004 * aborted forcibly. xfs_trans_ail_delete() takes care of these.
1006 * Either way, AIL is useless if we're forcing a shutdown.
1008 spin_lock(&ailp->xa_lock);
1009 xfs_trans_ail_delete(ailp, lip, SHUTDOWN_CORRUPT_INCORE);
1010 xfs_buf_item_free(BUF_ITEM(lip));