HID: hiddev: Fix slab-out-of-bounds write in hiddev_ioctl_usage()
[linux/fpc-iii.git] / fs / xfs / xfs_trans_buf.c
blob75798412859a7ba2f47b01945c54b7ee82ff4e7e
1 /*
2 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_shared.h"
21 #include "xfs_format.h"
22 #include "xfs_log_format.h"
23 #include "xfs_trans_resv.h"
24 #include "xfs_mount.h"
25 #include "xfs_inode.h"
26 #include "xfs_trans.h"
27 #include "xfs_buf_item.h"
28 #include "xfs_trans_priv.h"
29 #include "xfs_error.h"
30 #include "xfs_trace.h"
33 * Check to see if a buffer matching the given parameters is already
34 * a part of the given transaction.
36 STATIC struct xfs_buf *
37 xfs_trans_buf_item_match(
38 struct xfs_trans *tp,
39 struct xfs_buftarg *target,
40 struct xfs_buf_map *map,
41 int nmaps)
43 struct xfs_log_item_desc *lidp;
44 struct xfs_buf_log_item *blip;
45 int len = 0;
46 int i;
48 for (i = 0; i < nmaps; i++)
49 len += map[i].bm_len;
51 list_for_each_entry(lidp, &tp->t_items, lid_trans) {
52 blip = (struct xfs_buf_log_item *)lidp->lid_item;
53 if (blip->bli_item.li_type == XFS_LI_BUF &&
54 blip->bli_buf->b_target == target &&
55 XFS_BUF_ADDR(blip->bli_buf) == map[0].bm_bn &&
56 blip->bli_buf->b_length == len) {
57 ASSERT(blip->bli_buf->b_map_count == nmaps);
58 return blip->bli_buf;
62 return NULL;
66 * Add the locked buffer to the transaction.
68 * The buffer must be locked, and it cannot be associated with any
69 * transaction.
71 * If the buffer does not yet have a buf log item associated with it,
72 * then allocate one for it. Then add the buf item to the transaction.
74 STATIC void
75 _xfs_trans_bjoin(
76 struct xfs_trans *tp,
77 struct xfs_buf *bp,
78 int reset_recur)
80 struct xfs_buf_log_item *bip;
82 ASSERT(bp->b_transp == NULL);
85 * The xfs_buf_log_item pointer is stored in b_fsprivate. If
86 * it doesn't have one yet, then allocate one and initialize it.
87 * The checks to see if one is there are in xfs_buf_item_init().
89 xfs_buf_item_init(bp, tp->t_mountp);
90 bip = bp->b_fspriv;
91 ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
92 ASSERT(!(bip->__bli_format.blf_flags & XFS_BLF_CANCEL));
93 ASSERT(!(bip->bli_flags & XFS_BLI_LOGGED));
94 if (reset_recur)
95 bip->bli_recur = 0;
98 * Take a reference for this transaction on the buf item.
100 atomic_inc(&bip->bli_refcount);
103 * Get a log_item_desc to point at the new item.
105 xfs_trans_add_item(tp, &bip->bli_item);
108 * Initialize b_fsprivate2 so we can find it with incore_match()
109 * in xfs_trans_get_buf() and friends above.
111 bp->b_transp = tp;
115 void
116 xfs_trans_bjoin(
117 struct xfs_trans *tp,
118 struct xfs_buf *bp)
120 _xfs_trans_bjoin(tp, bp, 0);
121 trace_xfs_trans_bjoin(bp->b_fspriv);
125 * Get and lock the buffer for the caller if it is not already
126 * locked within the given transaction. If it is already locked
127 * within the transaction, just increment its lock recursion count
128 * and return a pointer to it.
130 * If the transaction pointer is NULL, make this just a normal
131 * get_buf() call.
133 struct xfs_buf *
134 xfs_trans_get_buf_map(
135 struct xfs_trans *tp,
136 struct xfs_buftarg *target,
137 struct xfs_buf_map *map,
138 int nmaps,
139 xfs_buf_flags_t flags)
141 xfs_buf_t *bp;
142 xfs_buf_log_item_t *bip;
144 if (!tp)
145 return xfs_buf_get_map(target, map, nmaps, flags);
148 * If we find the buffer in the cache with this transaction
149 * pointer in its b_fsprivate2 field, then we know we already
150 * have it locked. In this case we just increment the lock
151 * recursion count and return the buffer to the caller.
153 bp = xfs_trans_buf_item_match(tp, target, map, nmaps);
154 if (bp != NULL) {
155 ASSERT(xfs_buf_islocked(bp));
156 if (XFS_FORCED_SHUTDOWN(tp->t_mountp)) {
157 xfs_buf_stale(bp);
158 XFS_BUF_DONE(bp);
161 ASSERT(bp->b_transp == tp);
162 bip = bp->b_fspriv;
163 ASSERT(bip != NULL);
164 ASSERT(atomic_read(&bip->bli_refcount) > 0);
165 bip->bli_recur++;
166 trace_xfs_trans_get_buf_recur(bip);
167 return bp;
170 bp = xfs_buf_get_map(target, map, nmaps, flags);
171 if (bp == NULL) {
172 return NULL;
175 ASSERT(!bp->b_error);
177 _xfs_trans_bjoin(tp, bp, 1);
178 trace_xfs_trans_get_buf(bp->b_fspriv);
179 return bp;
183 * Get and lock the superblock buffer of this file system for the
184 * given transaction.
186 * We don't need to use incore_match() here, because the superblock
187 * buffer is a private buffer which we keep a pointer to in the
188 * mount structure.
190 xfs_buf_t *
191 xfs_trans_getsb(xfs_trans_t *tp,
192 struct xfs_mount *mp,
193 int flags)
195 xfs_buf_t *bp;
196 xfs_buf_log_item_t *bip;
199 * Default to just trying to lock the superblock buffer
200 * if tp is NULL.
202 if (tp == NULL)
203 return xfs_getsb(mp, flags);
206 * If the superblock buffer already has this transaction
207 * pointer in its b_fsprivate2 field, then we know we already
208 * have it locked. In this case we just increment the lock
209 * recursion count and return the buffer to the caller.
211 bp = mp->m_sb_bp;
212 if (bp->b_transp == tp) {
213 bip = bp->b_fspriv;
214 ASSERT(bip != NULL);
215 ASSERT(atomic_read(&bip->bli_refcount) > 0);
216 bip->bli_recur++;
217 trace_xfs_trans_getsb_recur(bip);
218 return bp;
221 bp = xfs_getsb(mp, flags);
222 if (bp == NULL)
223 return NULL;
225 _xfs_trans_bjoin(tp, bp, 1);
226 trace_xfs_trans_getsb(bp->b_fspriv);
227 return bp;
231 * Get and lock the buffer for the caller if it is not already
232 * locked within the given transaction. If it has not yet been
233 * read in, read it from disk. If it is already locked
234 * within the transaction and already read in, just increment its
235 * lock recursion count and return a pointer to it.
237 * If the transaction pointer is NULL, make this just a normal
238 * read_buf() call.
241 xfs_trans_read_buf_map(
242 struct xfs_mount *mp,
243 struct xfs_trans *tp,
244 struct xfs_buftarg *target,
245 struct xfs_buf_map *map,
246 int nmaps,
247 xfs_buf_flags_t flags,
248 struct xfs_buf **bpp,
249 const struct xfs_buf_ops *ops)
251 struct xfs_buf *bp = NULL;
252 struct xfs_buf_log_item *bip;
253 int error;
255 *bpp = NULL;
257 * If we find the buffer in the cache with this transaction
258 * pointer in its b_fsprivate2 field, then we know we already
259 * have it locked. If it is already read in we just increment
260 * the lock recursion count and return the buffer to the caller.
261 * If the buffer is not yet read in, then we read it in, increment
262 * the lock recursion count, and return it to the caller.
264 if (tp)
265 bp = xfs_trans_buf_item_match(tp, target, map, nmaps);
266 if (bp) {
267 ASSERT(xfs_buf_islocked(bp));
268 ASSERT(bp->b_transp == tp);
269 ASSERT(bp->b_fspriv != NULL);
270 ASSERT(!bp->b_error);
271 ASSERT(bp->b_flags & XBF_DONE);
274 * We never locked this buf ourselves, so we shouldn't
275 * brelse it either. Just get out.
277 if (XFS_FORCED_SHUTDOWN(mp)) {
278 trace_xfs_trans_read_buf_shut(bp, _RET_IP_);
279 return -EIO;
282 bip = bp->b_fspriv;
283 bip->bli_recur++;
285 ASSERT(atomic_read(&bip->bli_refcount) > 0);
286 trace_xfs_trans_read_buf_recur(bip);
287 *bpp = bp;
288 return 0;
291 bp = xfs_buf_read_map(target, map, nmaps, flags, ops);
292 if (!bp) {
293 if (!(flags & XBF_TRYLOCK))
294 return -ENOMEM;
295 return tp ? 0 : -EAGAIN;
299 * If we've had a read error, then the contents of the buffer are
300 * invalid and should not be used. To ensure that a followup read tries
301 * to pull the buffer from disk again, we clear the XBF_DONE flag and
302 * mark the buffer stale. This ensures that anyone who has a current
303 * reference to the buffer will interpret it's contents correctly and
304 * future cache lookups will also treat it as an empty, uninitialised
305 * buffer.
307 if (bp->b_error) {
308 error = bp->b_error;
309 if (!XFS_FORCED_SHUTDOWN(mp))
310 xfs_buf_ioerror_alert(bp, __func__);
311 bp->b_flags &= ~XBF_DONE;
312 xfs_buf_stale(bp);
314 if (tp && (tp->t_flags & XFS_TRANS_DIRTY))
315 xfs_force_shutdown(tp->t_mountp, SHUTDOWN_META_IO_ERROR);
316 xfs_buf_relse(bp);
318 /* bad CRC means corrupted metadata */
319 if (error == -EFSBADCRC)
320 error = -EFSCORRUPTED;
321 return error;
324 if (XFS_FORCED_SHUTDOWN(mp)) {
325 xfs_buf_relse(bp);
326 trace_xfs_trans_read_buf_shut(bp, _RET_IP_);
327 return -EIO;
330 if (tp) {
331 _xfs_trans_bjoin(tp, bp, 1);
332 trace_xfs_trans_read_buf(bp->b_fspriv);
334 *bpp = bp;
335 return 0;
340 * Release the buffer bp which was previously acquired with one of the
341 * xfs_trans_... buffer allocation routines if the buffer has not
342 * been modified within this transaction. If the buffer is modified
343 * within this transaction, do decrement the recursion count but do
344 * not release the buffer even if the count goes to 0. If the buffer is not
345 * modified within the transaction, decrement the recursion count and
346 * release the buffer if the recursion count goes to 0.
348 * If the buffer is to be released and it was not modified before
349 * this transaction began, then free the buf_log_item associated with it.
351 * If the transaction pointer is NULL, make this just a normal
352 * brelse() call.
354 void
355 xfs_trans_brelse(xfs_trans_t *tp,
356 xfs_buf_t *bp)
358 xfs_buf_log_item_t *bip;
361 * Default to a normal brelse() call if the tp is NULL.
363 if (tp == NULL) {
364 ASSERT(bp->b_transp == NULL);
365 xfs_buf_relse(bp);
366 return;
369 ASSERT(bp->b_transp == tp);
370 bip = bp->b_fspriv;
371 ASSERT(bip->bli_item.li_type == XFS_LI_BUF);
372 ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
373 ASSERT(!(bip->__bli_format.blf_flags & XFS_BLF_CANCEL));
374 ASSERT(atomic_read(&bip->bli_refcount) > 0);
376 trace_xfs_trans_brelse(bip);
379 * If the release is just for a recursive lock,
380 * then decrement the count and return.
382 if (bip->bli_recur > 0) {
383 bip->bli_recur--;
384 return;
388 * If the buffer is dirty within this transaction, we can't
389 * release it until we commit.
391 if (bip->bli_item.li_desc->lid_flags & XFS_LID_DIRTY)
392 return;
395 * If the buffer has been invalidated, then we can't release
396 * it until the transaction commits to disk unless it is re-dirtied
397 * as part of this transaction. This prevents us from pulling
398 * the item from the AIL before we should.
400 if (bip->bli_flags & XFS_BLI_STALE)
401 return;
403 ASSERT(!(bip->bli_flags & XFS_BLI_LOGGED));
406 * Free up the log item descriptor tracking the released item.
408 xfs_trans_del_item(&bip->bli_item);
411 * Clear the hold flag in the buf log item if it is set.
412 * We wouldn't want the next user of the buffer to
413 * get confused.
415 if (bip->bli_flags & XFS_BLI_HOLD) {
416 bip->bli_flags &= ~XFS_BLI_HOLD;
420 * Drop our reference to the buf log item.
422 atomic_dec(&bip->bli_refcount);
425 * If the buf item is not tracking data in the log, then
426 * we must free it before releasing the buffer back to the
427 * free pool. Before releasing the buffer to the free pool,
428 * clear the transaction pointer in b_fsprivate2 to dissolve
429 * its relation to this transaction.
431 if (!xfs_buf_item_dirty(bip)) {
432 /***
433 ASSERT(bp->b_pincount == 0);
434 ***/
435 ASSERT(atomic_read(&bip->bli_refcount) == 0);
436 ASSERT(!(bip->bli_item.li_flags & XFS_LI_IN_AIL));
437 ASSERT(!(bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF));
438 xfs_buf_item_relse(bp);
441 bp->b_transp = NULL;
442 xfs_buf_relse(bp);
446 * Mark the buffer as not needing to be unlocked when the buf item's
447 * iop_unlock() routine is called. The buffer must already be locked
448 * and associated with the given transaction.
450 /* ARGSUSED */
451 void
452 xfs_trans_bhold(xfs_trans_t *tp,
453 xfs_buf_t *bp)
455 xfs_buf_log_item_t *bip = bp->b_fspriv;
457 ASSERT(bp->b_transp == tp);
458 ASSERT(bip != NULL);
459 ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
460 ASSERT(!(bip->__bli_format.blf_flags & XFS_BLF_CANCEL));
461 ASSERT(atomic_read(&bip->bli_refcount) > 0);
463 bip->bli_flags |= XFS_BLI_HOLD;
464 trace_xfs_trans_bhold(bip);
468 * Cancel the previous buffer hold request made on this buffer
469 * for this transaction.
471 void
472 xfs_trans_bhold_release(xfs_trans_t *tp,
473 xfs_buf_t *bp)
475 xfs_buf_log_item_t *bip = bp->b_fspriv;
477 ASSERT(bp->b_transp == tp);
478 ASSERT(bip != NULL);
479 ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
480 ASSERT(!(bip->__bli_format.blf_flags & XFS_BLF_CANCEL));
481 ASSERT(atomic_read(&bip->bli_refcount) > 0);
482 ASSERT(bip->bli_flags & XFS_BLI_HOLD);
484 bip->bli_flags &= ~XFS_BLI_HOLD;
485 trace_xfs_trans_bhold_release(bip);
489 * This is called to mark bytes first through last inclusive of the given
490 * buffer as needing to be logged when the transaction is committed.
491 * The buffer must already be associated with the given transaction.
493 * First and last are numbers relative to the beginning of this buffer,
494 * so the first byte in the buffer is numbered 0 regardless of the
495 * value of b_blkno.
497 void
498 xfs_trans_log_buf(xfs_trans_t *tp,
499 xfs_buf_t *bp,
500 uint first,
501 uint last)
503 xfs_buf_log_item_t *bip = bp->b_fspriv;
505 ASSERT(bp->b_transp == tp);
506 ASSERT(bip != NULL);
507 ASSERT(first <= last && last < BBTOB(bp->b_length));
508 ASSERT(bp->b_iodone == NULL ||
509 bp->b_iodone == xfs_buf_iodone_callbacks);
512 * Mark the buffer as needing to be written out eventually,
513 * and set its iodone function to remove the buffer's buf log
514 * item from the AIL and free it when the buffer is flushed
515 * to disk. See xfs_buf_attach_iodone() for more details
516 * on li_cb and xfs_buf_iodone_callbacks().
517 * If we end up aborting this transaction, we trap this buffer
518 * inside the b_bdstrat callback so that this won't get written to
519 * disk.
521 XFS_BUF_DONE(bp);
523 ASSERT(atomic_read(&bip->bli_refcount) > 0);
524 bp->b_iodone = xfs_buf_iodone_callbacks;
525 bip->bli_item.li_cb = xfs_buf_iodone;
527 trace_xfs_trans_log_buf(bip);
530 * If we invalidated the buffer within this transaction, then
531 * cancel the invalidation now that we're dirtying the buffer
532 * again. There are no races with the code in xfs_buf_item_unpin(),
533 * because we have a reference to the buffer this entire time.
535 if (bip->bli_flags & XFS_BLI_STALE) {
536 bip->bli_flags &= ~XFS_BLI_STALE;
537 ASSERT(XFS_BUF_ISSTALE(bp));
538 XFS_BUF_UNSTALE(bp);
539 bip->__bli_format.blf_flags &= ~XFS_BLF_CANCEL;
542 tp->t_flags |= XFS_TRANS_DIRTY;
543 bip->bli_item.li_desc->lid_flags |= XFS_LID_DIRTY;
546 * If we have an ordered buffer we are not logging any dirty range but
547 * it still needs to be marked dirty and that it has been logged.
549 bip->bli_flags |= XFS_BLI_DIRTY | XFS_BLI_LOGGED;
550 if (!(bip->bli_flags & XFS_BLI_ORDERED))
551 xfs_buf_item_log(bip, first, last);
556 * Invalidate a buffer that is being used within a transaction.
558 * Typically this is because the blocks in the buffer are being freed, so we
559 * need to prevent it from being written out when we're done. Allowing it
560 * to be written again might overwrite data in the free blocks if they are
561 * reallocated to a file.
563 * We prevent the buffer from being written out by marking it stale. We can't
564 * get rid of the buf log item at this point because the buffer may still be
565 * pinned by another transaction. If that is the case, then we'll wait until
566 * the buffer is committed to disk for the last time (we can tell by the ref
567 * count) and free it in xfs_buf_item_unpin(). Until that happens we will
568 * keep the buffer locked so that the buffer and buf log item are not reused.
570 * We also set the XFS_BLF_CANCEL flag in the buf log format structure and log
571 * the buf item. This will be used at recovery time to determine that copies
572 * of the buffer in the log before this should not be replayed.
574 * We mark the item descriptor and the transaction dirty so that we'll hold
575 * the buffer until after the commit.
577 * Since we're invalidating the buffer, we also clear the state about which
578 * parts of the buffer have been logged. We also clear the flag indicating
579 * that this is an inode buffer since the data in the buffer will no longer
580 * be valid.
582 * We set the stale bit in the buffer as well since we're getting rid of it.
584 void
585 xfs_trans_binval(
586 xfs_trans_t *tp,
587 xfs_buf_t *bp)
589 xfs_buf_log_item_t *bip = bp->b_fspriv;
590 int i;
592 ASSERT(bp->b_transp == tp);
593 ASSERT(bip != NULL);
594 ASSERT(atomic_read(&bip->bli_refcount) > 0);
596 trace_xfs_trans_binval(bip);
598 if (bip->bli_flags & XFS_BLI_STALE) {
600 * If the buffer is already invalidated, then
601 * just return.
603 ASSERT(XFS_BUF_ISSTALE(bp));
604 ASSERT(!(bip->bli_flags & (XFS_BLI_LOGGED | XFS_BLI_DIRTY)));
605 ASSERT(!(bip->__bli_format.blf_flags & XFS_BLF_INODE_BUF));
606 ASSERT(!(bip->__bli_format.blf_flags & XFS_BLFT_MASK));
607 ASSERT(bip->__bli_format.blf_flags & XFS_BLF_CANCEL);
608 ASSERT(bip->bli_item.li_desc->lid_flags & XFS_LID_DIRTY);
609 ASSERT(tp->t_flags & XFS_TRANS_DIRTY);
610 return;
613 xfs_buf_stale(bp);
615 bip->bli_flags |= XFS_BLI_STALE;
616 bip->bli_flags &= ~(XFS_BLI_INODE_BUF | XFS_BLI_LOGGED | XFS_BLI_DIRTY);
617 bip->__bli_format.blf_flags &= ~XFS_BLF_INODE_BUF;
618 bip->__bli_format.blf_flags |= XFS_BLF_CANCEL;
619 bip->__bli_format.blf_flags &= ~XFS_BLFT_MASK;
620 for (i = 0; i < bip->bli_format_count; i++) {
621 memset(bip->bli_formats[i].blf_data_map, 0,
622 (bip->bli_formats[i].blf_map_size * sizeof(uint)));
624 bip->bli_item.li_desc->lid_flags |= XFS_LID_DIRTY;
625 tp->t_flags |= XFS_TRANS_DIRTY;
629 * This call is used to indicate that the buffer contains on-disk inodes which
630 * must be handled specially during recovery. They require special handling
631 * because only the di_next_unlinked from the inodes in the buffer should be
632 * recovered. The rest of the data in the buffer is logged via the inodes
633 * themselves.
635 * All we do is set the XFS_BLI_INODE_BUF flag in the items flags so it can be
636 * transferred to the buffer's log format structure so that we'll know what to
637 * do at recovery time.
639 void
640 xfs_trans_inode_buf(
641 xfs_trans_t *tp,
642 xfs_buf_t *bp)
644 xfs_buf_log_item_t *bip = bp->b_fspriv;
646 ASSERT(bp->b_transp == tp);
647 ASSERT(bip != NULL);
648 ASSERT(atomic_read(&bip->bli_refcount) > 0);
650 bip->bli_flags |= XFS_BLI_INODE_BUF;
651 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DINO_BUF);
655 * This call is used to indicate that the buffer is going to
656 * be staled and was an inode buffer. This means it gets
657 * special processing during unpin - where any inodes
658 * associated with the buffer should be removed from ail.
659 * There is also special processing during recovery,
660 * any replay of the inodes in the buffer needs to be
661 * prevented as the buffer may have been reused.
663 void
664 xfs_trans_stale_inode_buf(
665 xfs_trans_t *tp,
666 xfs_buf_t *bp)
668 xfs_buf_log_item_t *bip = bp->b_fspriv;
670 ASSERT(bp->b_transp == tp);
671 ASSERT(bip != NULL);
672 ASSERT(atomic_read(&bip->bli_refcount) > 0);
674 bip->bli_flags |= XFS_BLI_STALE_INODE;
675 bip->bli_item.li_cb = xfs_buf_iodone;
676 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DINO_BUF);
680 * Mark the buffer as being one which contains newly allocated
681 * inodes. We need to make sure that even if this buffer is
682 * relogged as an 'inode buf' we still recover all of the inode
683 * images in the face of a crash. This works in coordination with
684 * xfs_buf_item_committed() to ensure that the buffer remains in the
685 * AIL at its original location even after it has been relogged.
687 /* ARGSUSED */
688 void
689 xfs_trans_inode_alloc_buf(
690 xfs_trans_t *tp,
691 xfs_buf_t *bp)
693 xfs_buf_log_item_t *bip = bp->b_fspriv;
695 ASSERT(bp->b_transp == tp);
696 ASSERT(bip != NULL);
697 ASSERT(atomic_read(&bip->bli_refcount) > 0);
699 bip->bli_flags |= XFS_BLI_INODE_ALLOC_BUF;
700 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DINO_BUF);
704 * Mark the buffer as ordered for this transaction. This means
705 * that the contents of the buffer are not recorded in the transaction
706 * but it is tracked in the AIL as though it was. This allows us
707 * to record logical changes in transactions rather than the physical
708 * changes we make to the buffer without changing writeback ordering
709 * constraints of metadata buffers.
711 void
712 xfs_trans_ordered_buf(
713 struct xfs_trans *tp,
714 struct xfs_buf *bp)
716 struct xfs_buf_log_item *bip = bp->b_fspriv;
718 ASSERT(bp->b_transp == tp);
719 ASSERT(bip != NULL);
720 ASSERT(atomic_read(&bip->bli_refcount) > 0);
722 bip->bli_flags |= XFS_BLI_ORDERED;
723 trace_xfs_buf_item_ordered(bip);
727 * Set the type of the buffer for log recovery so that it can correctly identify
728 * and hence attach the correct buffer ops to the buffer after replay.
730 void
731 xfs_trans_buf_set_type(
732 struct xfs_trans *tp,
733 struct xfs_buf *bp,
734 enum xfs_blft type)
736 struct xfs_buf_log_item *bip = bp->b_fspriv;
738 if (!tp)
739 return;
741 ASSERT(bp->b_transp == tp);
742 ASSERT(bip != NULL);
743 ASSERT(atomic_read(&bip->bli_refcount) > 0);
745 xfs_blft_to_flags(&bip->__bli_format, type);
748 void
749 xfs_trans_buf_copy_type(
750 struct xfs_buf *dst_bp,
751 struct xfs_buf *src_bp)
753 struct xfs_buf_log_item *sbip = src_bp->b_fspriv;
754 struct xfs_buf_log_item *dbip = dst_bp->b_fspriv;
755 enum xfs_blft type;
757 type = xfs_blft_from_flags(&sbip->__bli_format);
758 xfs_blft_to_flags(&dbip->__bli_format, type);
762 * Similar to xfs_trans_inode_buf(), this marks the buffer as a cluster of
763 * dquots. However, unlike in inode buffer recovery, dquot buffers get
764 * recovered in their entirety. (Hence, no XFS_BLI_DQUOT_ALLOC_BUF flag).
765 * The only thing that makes dquot buffers different from regular
766 * buffers is that we must not replay dquot bufs when recovering
767 * if a _corresponding_ quotaoff has happened. We also have to distinguish
768 * between usr dquot bufs and grp dquot bufs, because usr and grp quotas
769 * can be turned off independently.
771 /* ARGSUSED */
772 void
773 xfs_trans_dquot_buf(
774 xfs_trans_t *tp,
775 xfs_buf_t *bp,
776 uint type)
778 struct xfs_buf_log_item *bip = bp->b_fspriv;
780 ASSERT(type == XFS_BLF_UDQUOT_BUF ||
781 type == XFS_BLF_PDQUOT_BUF ||
782 type == XFS_BLF_GDQUOT_BUF);
784 bip->__bli_format.blf_flags |= type;
786 switch (type) {
787 case XFS_BLF_UDQUOT_BUF:
788 type = XFS_BLFT_UDQUOT_BUF;
789 break;
790 case XFS_BLF_PDQUOT_BUF:
791 type = XFS_BLFT_PDQUOT_BUF;
792 break;
793 case XFS_BLF_GDQUOT_BUF:
794 type = XFS_BLFT_GDQUOT_BUF;
795 break;
796 default:
797 type = XFS_BLFT_UNKNOWN_BUF;
798 break;
801 xfs_trans_buf_set_type(tp, bp, type);