2 * Copyright (c) 2010 Red Hat, Inc. All Rights Reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it would be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write the Free Software Foundation,
15 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 #include "xfs_types.h"
22 #include "xfs_trans.h"
23 #include "xfs_trans_priv.h"
24 #include "xfs_log_priv.h"
27 #include "xfs_mount.h"
28 #include "xfs_error.h"
29 #include "xfs_alloc.h"
30 #include "xfs_extent_busy.h"
31 #include "xfs_discard.h"
34 * Allocate a new ticket. Failing to get a new ticket makes it really hard to
35 * recover, so we don't allow failure here. Also, we allocate in a context that
36 * we don't want to be issuing transactions from, so we need to tell the
37 * allocation code this as well.
39 * We don't reserve any space for the ticket - we are going to steal whatever
40 * space we require from transactions as they commit. To ensure we reserve all
41 * the space required, we need to set the current reservation of the ticket to
42 * zero so that we know to steal the initial transaction overhead from the
43 * first transaction commit.
45 static struct xlog_ticket
*
46 xlog_cil_ticket_alloc(
49 struct xlog_ticket
*tic
;
51 tic
= xlog_ticket_alloc(log
, 0, 1, XFS_TRANSACTION
, 0,
53 tic
->t_trans_type
= XFS_TRANS_CHECKPOINT
;
56 * set the current reservation to zero so we know to steal the basic
57 * transaction overhead reservation from the first transaction commit.
64 * After the first stage of log recovery is done, we know where the head and
65 * tail of the log are. We need this log initialisation done before we can
66 * initialise the first CIL checkpoint context.
68 * Here we allocate a log ticket to track space usage during a CIL push. This
69 * ticket is passed to xlog_write() directly so that we don't slowly leak log
70 * space by failing to account for space used by log headers and additional
71 * region headers for split regions.
74 xlog_cil_init_post_recovery(
77 log
->l_cilp
->xc_ctx
->ticket
= xlog_cil_ticket_alloc(log
);
78 log
->l_cilp
->xc_ctx
->sequence
= 1;
79 log
->l_cilp
->xc_ctx
->commit_lsn
= xlog_assign_lsn(log
->l_curr_cycle
,
84 xlog_cil_lv_item_format(
85 struct xfs_log_item
*lip
,
86 struct xfs_log_vec
*lv
)
91 /* format new vectors into array */
92 lip
->li_ops
->iop_format(lip
, lv
->lv_iovecp
);
94 /* copy data into existing array */
96 for (index
= 0; index
< lv
->lv_niovecs
; index
++) {
97 struct xfs_log_iovec
*vec
= &lv
->lv_iovecp
[index
];
99 memcpy(ptr
, vec
->i_addr
, vec
->i_len
);
105 * some size calculations for log vectors over-estimate, so the caller
106 * doesn't know the amount of space actually used by the item. Return
107 * the byte count to the caller so they can check and store it
110 return ptr
- lv
->lv_buf
;
114 * Prepare the log item for insertion into the CIL. Calculate the difference in
115 * log space and vectors it will consume, and if it is a new item pin it as
119 xfs_cil_prepare_item(
121 struct xfs_log_vec
*lv
,
122 struct xfs_log_vec
*old_lv
,
126 /* Account for the new LV being passed in */
127 if (lv
->lv_buf_len
!= XFS_LOG_VEC_ORDERED
) {
128 *diff_len
+= lv
->lv_buf_len
;
129 *diff_iovecs
+= lv
->lv_niovecs
;
133 * If there is no old LV, this is the first time we've seen the item in
134 * this CIL context and so we need to pin it. If we are replacing the
135 * old_lv, then remove the space it accounts for and free it.
138 lv
->lv_item
->li_ops
->iop_pin(lv
->lv_item
);
139 else if (old_lv
!= lv
) {
140 ASSERT(lv
->lv_buf_len
!= XFS_LOG_VEC_ORDERED
);
142 *diff_len
-= old_lv
->lv_buf_len
;
143 *diff_iovecs
-= old_lv
->lv_niovecs
;
147 /* attach new log vector to log item */
148 lv
->lv_item
->li_lv
= lv
;
151 * If this is the first time the item is being committed to the
152 * CIL, store the sequence number on the log item so we can
153 * tell in future commits whether this is the first checkpoint
154 * the item is being committed into.
156 if (!lv
->lv_item
->li_seq
)
157 lv
->lv_item
->li_seq
= log
->l_cilp
->xc_ctx
->sequence
;
161 * Format log item into a flat buffers
163 * For delayed logging, we need to hold a formatted buffer containing all the
164 * changes on the log item. This enables us to relog the item in memory and
165 * write it out asynchronously without needing to relock the object that was
166 * modified at the time it gets written into the iclog.
168 * This function builds a vector for the changes in each log item in the
169 * transaction. It then works out the length of the buffer needed for each log
170 * item, allocates them and formats the vector for the item into the buffer.
171 * The buffer is then attached to the log item are then inserted into the
172 * Committed Item List for tracking until the next checkpoint is written out.
174 * We don't set up region headers during this process; we simply copy the
175 * regions into the flat buffer. We can do this because we still have to do a
176 * formatting step to write the regions into the iclog buffer. Writing the
177 * ophdrs during the iclog write means that we can support splitting large
178 * regions across iclog boundares without needing a change in the format of the
179 * item/region encapsulation.
181 * Hence what we need to do now is change the rewrite the vector array to point
182 * to the copied region inside the buffer we just allocated. This allows us to
183 * format the regions into the iclog as though they are being formatted
184 * directly out of the objects themselves.
187 xlog_cil_insert_format_items(
189 struct xfs_trans
*tp
,
193 struct xfs_log_item_desc
*lidp
;
196 /* Bail out if we didn't find a log item. */
197 if (list_empty(&tp
->t_items
)) {
202 list_for_each_entry(lidp
, &tp
->t_items
, lid_trans
) {
203 struct xfs_log_item
*lip
= lidp
->lid_item
;
204 struct xfs_log_vec
*lv
;
205 struct xfs_log_vec
*old_lv
;
209 bool ordered
= false;
211 /* Skip items which aren't dirty in this transaction. */
212 if (!(lidp
->lid_flags
& XFS_LID_DIRTY
))
215 /* get number of vecs and size of data to be stored */
216 lip
->li_ops
->iop_size(lip
, &niovecs
, &nbytes
);
218 /* Skip items that do not have any vectors for writing */
223 * Ordered items need to be tracked but we do not wish to write
224 * them. We need a logvec to track the object, but we do not
225 * need an iovec or buffer to be allocated for copying data.
227 if (niovecs
== XFS_LOG_VEC_ORDERED
) {
233 /* grab the old item if it exists for reservation accounting */
236 /* calc buffer size */
237 buf_size
= sizeof(struct xfs_log_vec
) + nbytes
+
238 niovecs
* sizeof(struct xfs_log_iovec
);
240 /* compare to existing item size */
241 if (lip
->li_lv
&& buf_size
<= lip
->li_lv
->lv_size
) {
242 /* same or smaller, optimise common overwrite case */
250 * set the item up as though it is a new insertion so
251 * that the space reservation accounting is correct.
253 *diff_iovecs
-= lv
->lv_niovecs
;
254 *diff_len
-= lv
->lv_buf_len
;
256 /* Ensure the lv is set up according to ->iop_size */
257 lv
->lv_niovecs
= niovecs
;
258 lv
->lv_buf
= (char *)lv
+ buf_size
- nbytes
;
260 lv
->lv_buf_len
= xlog_cil_lv_item_format(lip
, lv
);
264 /* allocate new data chunk */
265 lv
= kmem_zalloc(buf_size
, KM_SLEEP
|KM_NOFS
);
267 lv
->lv_size
= buf_size
;
268 lv
->lv_niovecs
= niovecs
;
270 /* track as an ordered logvec */
271 ASSERT(lip
->li_lv
== NULL
);
272 lv
->lv_buf_len
= XFS_LOG_VEC_ORDERED
;
276 /* The allocated iovec region lies beyond the log vector. */
277 lv
->lv_iovecp
= (struct xfs_log_iovec
*)&lv
[1];
279 /* The allocated data region lies beyond the iovec region */
280 lv
->lv_buf
= (char *)lv
+ buf_size
- nbytes
;
282 lv
->lv_buf_len
= xlog_cil_lv_item_format(lip
, lv
);
284 ASSERT(lv
->lv_buf_len
<= nbytes
);
285 xfs_cil_prepare_item(log
, lv
, old_lv
, diff_len
, diff_iovecs
);
290 * Insert the log items into the CIL and calculate the difference in space
291 * consumed by the item. Add the space to the checkpoint ticket and calculate
292 * if the change requires additional log metadata. If it does, take that space
293 * as well. Remove the amount of space we added to the checkpoint ticket from
294 * the current transaction ticket so that the accounting works out correctly.
297 xlog_cil_insert_items(
299 struct xfs_trans
*tp
)
301 struct xfs_cil
*cil
= log
->l_cilp
;
302 struct xfs_cil_ctx
*ctx
= cil
->xc_ctx
;
303 struct xfs_log_item_desc
*lidp
;
311 * We can do this safely because the context can't checkpoint until we
312 * are done so it doesn't matter exactly how we update the CIL.
314 xlog_cil_insert_format_items(log
, tp
, &len
, &diff_iovecs
);
317 * Now (re-)position everything modified at the tail of the CIL.
318 * We do this here so we only need to take the CIL lock once during
319 * the transaction commit.
321 spin_lock(&cil
->xc_cil_lock
);
322 list_for_each_entry(lidp
, &tp
->t_items
, lid_trans
) {
323 struct xfs_log_item
*lip
= lidp
->lid_item
;
325 /* Skip items which aren't dirty in this transaction. */
326 if (!(lidp
->lid_flags
& XFS_LID_DIRTY
))
329 list_move_tail(&lip
->li_cil
, &cil
->xc_cil
);
332 /* account for space used by new iovec headers */
333 len
+= diff_iovecs
* sizeof(xlog_op_header_t
);
334 ctx
->nvecs
+= diff_iovecs
;
336 /* attach the transaction to the CIL if it has any busy extents */
337 if (!list_empty(&tp
->t_busy
))
338 list_splice_init(&tp
->t_busy
, &ctx
->busy_extents
);
341 * Now transfer enough transaction reservation to the context ticket
342 * for the checkpoint. The context ticket is special - the unit
343 * reservation has to grow as well as the current reservation as we
344 * steal from tickets so we can correctly determine the space used
345 * during the transaction commit.
347 if (ctx
->ticket
->t_curr_res
== 0) {
348 ctx
->ticket
->t_curr_res
= ctx
->ticket
->t_unit_res
;
349 tp
->t_ticket
->t_curr_res
-= ctx
->ticket
->t_unit_res
;
352 /* do we need space for more log record headers? */
353 iclog_space
= log
->l_iclog_size
- log
->l_iclog_hsize
;
354 if (len
> 0 && (ctx
->space_used
/ iclog_space
!=
355 (ctx
->space_used
+ len
) / iclog_space
)) {
358 hdrs
= (len
+ iclog_space
- 1) / iclog_space
;
359 /* need to take into account split region headers, too */
360 hdrs
*= log
->l_iclog_hsize
+ sizeof(struct xlog_op_header
);
361 ctx
->ticket
->t_unit_res
+= hdrs
;
362 ctx
->ticket
->t_curr_res
+= hdrs
;
363 tp
->t_ticket
->t_curr_res
-= hdrs
;
364 ASSERT(tp
->t_ticket
->t_curr_res
>= len
);
366 tp
->t_ticket
->t_curr_res
-= len
;
367 ctx
->space_used
+= len
;
369 spin_unlock(&cil
->xc_cil_lock
);
373 xlog_cil_free_logvec(
374 struct xfs_log_vec
*log_vector
)
376 struct xfs_log_vec
*lv
;
378 for (lv
= log_vector
; lv
; ) {
379 struct xfs_log_vec
*next
= lv
->lv_next
;
386 * Mark all items committed and clear busy extents. We free the log vector
387 * chains in a separate pass so that we unpin the log items as quickly as
395 struct xfs_cil_ctx
*ctx
= args
;
396 struct xfs_mount
*mp
= ctx
->cil
->xc_log
->l_mp
;
398 xfs_trans_committed_bulk(ctx
->cil
->xc_log
->l_ailp
, ctx
->lv_chain
,
399 ctx
->start_lsn
, abort
);
401 xfs_extent_busy_sort(&ctx
->busy_extents
);
402 xfs_extent_busy_clear(mp
, &ctx
->busy_extents
,
403 (mp
->m_flags
& XFS_MOUNT_DISCARD
) && !abort
);
405 spin_lock(&ctx
->cil
->xc_push_lock
);
406 list_del(&ctx
->committing
);
407 spin_unlock(&ctx
->cil
->xc_push_lock
);
409 xlog_cil_free_logvec(ctx
->lv_chain
);
411 if (!list_empty(&ctx
->busy_extents
)) {
412 ASSERT(mp
->m_flags
& XFS_MOUNT_DISCARD
);
414 xfs_discard_extents(mp
, &ctx
->busy_extents
);
415 xfs_extent_busy_clear(mp
, &ctx
->busy_extents
, false);
422 * Push the Committed Item List to the log. If @push_seq flag is zero, then it
423 * is a background flush and so we can chose to ignore it. Otherwise, if the
424 * current sequence is the same as @push_seq we need to do a flush. If
425 * @push_seq is less than the current sequence, then it has already been
426 * flushed and we don't need to do anything - the caller will wait for it to
427 * complete if necessary.
429 * @push_seq is a value rather than a flag because that allows us to do an
430 * unlocked check of the sequence number for a match. Hence we can allows log
431 * forces to run racily and not issue pushes for the same sequence twice. If we
432 * get a race between multiple pushes for the same sequence they will block on
433 * the first one and then abort, hence avoiding needless pushes.
439 struct xfs_cil
*cil
= log
->l_cilp
;
440 struct xfs_log_vec
*lv
;
441 struct xfs_cil_ctx
*ctx
;
442 struct xfs_cil_ctx
*new_ctx
;
443 struct xlog_in_core
*commit_iclog
;
444 struct xlog_ticket
*tic
;
447 struct xfs_trans_header thdr
;
448 struct xfs_log_iovec lhdr
;
449 struct xfs_log_vec lvhdr
= { NULL
};
450 xfs_lsn_t commit_lsn
;
456 new_ctx
= kmem_zalloc(sizeof(*new_ctx
), KM_SLEEP
|KM_NOFS
);
457 new_ctx
->ticket
= xlog_cil_ticket_alloc(log
);
459 down_write(&cil
->xc_ctx_lock
);
462 spin_lock(&cil
->xc_push_lock
);
463 push_seq
= cil
->xc_push_seq
;
464 ASSERT(push_seq
<= ctx
->sequence
);
467 * Check if we've anything to push. If there is nothing, then we don't
468 * move on to a new sequence number and so we have to be able to push
469 * this sequence again later.
471 if (list_empty(&cil
->xc_cil
)) {
472 cil
->xc_push_seq
= 0;
473 spin_unlock(&cil
->xc_push_lock
);
476 spin_unlock(&cil
->xc_push_lock
);
479 /* check for a previously pushed seqeunce */
480 if (push_seq
< cil
->xc_ctx
->sequence
)
484 * pull all the log vectors off the items in the CIL, and
485 * remove the items from the CIL. We don't need the CIL lock
486 * here because it's only needed on the transaction commit
487 * side which is currently locked out by the flush lock.
491 while (!list_empty(&cil
->xc_cil
)) {
492 struct xfs_log_item
*item
;
494 item
= list_first_entry(&cil
->xc_cil
,
495 struct xfs_log_item
, li_cil
);
496 list_del_init(&item
->li_cil
);
498 ctx
->lv_chain
= item
->li_lv
;
500 lv
->lv_next
= item
->li_lv
;
503 num_iovecs
+= lv
->lv_niovecs
;
507 * initialise the new context and attach it to the CIL. Then attach
508 * the current context to the CIL committing lsit so it can be found
509 * during log forces to extract the commit lsn of the sequence that
510 * needs to be forced.
512 INIT_LIST_HEAD(&new_ctx
->committing
);
513 INIT_LIST_HEAD(&new_ctx
->busy_extents
);
514 new_ctx
->sequence
= ctx
->sequence
+ 1;
516 cil
->xc_ctx
= new_ctx
;
519 * mirror the new sequence into the cil structure so that we can do
520 * unlocked checks against the current sequence in log forces without
521 * risking deferencing a freed context pointer.
523 cil
->xc_current_sequence
= new_ctx
->sequence
;
526 * The switch is now done, so we can drop the context lock and move out
527 * of a shared context. We can't just go straight to the commit record,
528 * though - we need to synchronise with previous and future commits so
529 * that the commit records are correctly ordered in the log to ensure
530 * that we process items during log IO completion in the correct order.
532 * For example, if we get an EFI in one checkpoint and the EFD in the
533 * next (e.g. due to log forces), we do not want the checkpoint with
534 * the EFD to be committed before the checkpoint with the EFI. Hence
535 * we must strictly order the commit records of the checkpoints so
536 * that: a) the checkpoint callbacks are attached to the iclogs in the
537 * correct order; and b) the checkpoints are replayed in correct order
540 * Hence we need to add this context to the committing context list so
541 * that higher sequences will wait for us to write out a commit record
544 spin_lock(&cil
->xc_push_lock
);
545 list_add(&ctx
->committing
, &cil
->xc_committing
);
546 spin_unlock(&cil
->xc_push_lock
);
547 up_write(&cil
->xc_ctx_lock
);
550 * Build a checkpoint transaction header and write it to the log to
551 * begin the transaction. We need to account for the space used by the
552 * transaction header here as it is not accounted for in xlog_write().
554 * The LSN we need to pass to the log items on transaction commit is
555 * the LSN reported by the first log vector write. If we use the commit
556 * record lsn then we can move the tail beyond the grant write head.
559 thdr
.th_magic
= XFS_TRANS_HEADER_MAGIC
;
560 thdr
.th_type
= XFS_TRANS_CHECKPOINT
;
561 thdr
.th_tid
= tic
->t_tid
;
562 thdr
.th_num_items
= num_iovecs
;
564 lhdr
.i_len
= sizeof(xfs_trans_header_t
);
565 lhdr
.i_type
= XLOG_REG_TYPE_TRANSHDR
;
566 tic
->t_curr_res
-= lhdr
.i_len
+ sizeof(xlog_op_header_t
);
568 lvhdr
.lv_niovecs
= 1;
569 lvhdr
.lv_iovecp
= &lhdr
;
570 lvhdr
.lv_next
= ctx
->lv_chain
;
572 error
= xlog_write(log
, &lvhdr
, tic
, &ctx
->start_lsn
, NULL
, 0);
574 goto out_abort_free_ticket
;
577 * now that we've written the checkpoint into the log, strictly
578 * order the commit records so replay will get them in the right order.
581 spin_lock(&cil
->xc_push_lock
);
582 list_for_each_entry(new_ctx
, &cil
->xc_committing
, committing
) {
584 * Higher sequences will wait for this one so skip them.
585 * Don't wait for own own sequence, either.
587 if (new_ctx
->sequence
>= ctx
->sequence
)
589 if (!new_ctx
->commit_lsn
) {
591 * It is still being pushed! Wait for the push to
592 * complete, then start again from the beginning.
594 xlog_wait(&cil
->xc_commit_wait
, &cil
->xc_push_lock
);
598 spin_unlock(&cil
->xc_push_lock
);
600 /* xfs_log_done always frees the ticket on error. */
601 commit_lsn
= xfs_log_done(log
->l_mp
, tic
, &commit_iclog
, 0);
602 if (commit_lsn
== -1)
605 /* attach all the transactions w/ busy extents to iclog */
606 ctx
->log_cb
.cb_func
= xlog_cil_committed
;
607 ctx
->log_cb
.cb_arg
= ctx
;
608 error
= xfs_log_notify(log
->l_mp
, commit_iclog
, &ctx
->log_cb
);
613 * now the checkpoint commit is complete and we've attached the
614 * callbacks to the iclog we can assign the commit LSN to the context
615 * and wake up anyone who is waiting for the commit to complete.
617 spin_lock(&cil
->xc_push_lock
);
618 ctx
->commit_lsn
= commit_lsn
;
619 wake_up_all(&cil
->xc_commit_wait
);
620 spin_unlock(&cil
->xc_push_lock
);
622 /* release the hounds! */
623 return xfs_log_release_iclog(log
->l_mp
, commit_iclog
);
626 up_write(&cil
->xc_ctx_lock
);
627 xfs_log_ticket_put(new_ctx
->ticket
);
631 out_abort_free_ticket
:
632 xfs_log_ticket_put(tic
);
634 xlog_cil_committed(ctx
, XFS_LI_ABORTED
);
635 return XFS_ERROR(EIO
);
640 struct work_struct
*work
)
642 struct xfs_cil
*cil
= container_of(work
, struct xfs_cil
,
644 xlog_cil_push(cil
->xc_log
);
648 * We need to push CIL every so often so we don't cache more than we can fit in
649 * the log. The limit really is that a checkpoint can't be more than half the
650 * log (the current checkpoint is not allowed to overwrite the previous
651 * checkpoint), but commit latency and memory usage limit this to a smaller
655 xlog_cil_push_background(
658 struct xfs_cil
*cil
= log
->l_cilp
;
661 * The cil won't be empty because we are called while holding the
662 * context lock so whatever we added to the CIL will still be there
664 ASSERT(!list_empty(&cil
->xc_cil
));
667 * don't do a background push if we haven't used up all the
668 * space available yet.
670 if (cil
->xc_ctx
->space_used
< XLOG_CIL_SPACE_LIMIT(log
))
673 spin_lock(&cil
->xc_push_lock
);
674 if (cil
->xc_push_seq
< cil
->xc_current_sequence
) {
675 cil
->xc_push_seq
= cil
->xc_current_sequence
;
676 queue_work(log
->l_mp
->m_cil_workqueue
, &cil
->xc_push_work
);
678 spin_unlock(&cil
->xc_push_lock
);
683 xlog_cil_push_foreground(
687 struct xfs_cil
*cil
= log
->l_cilp
;
692 ASSERT(push_seq
&& push_seq
<= cil
->xc_current_sequence
);
694 /* start on any pending background push to minimise wait time on it */
695 flush_work(&cil
->xc_push_work
);
698 * If the CIL is empty or we've already pushed the sequence then
699 * there's no work we need to do.
701 spin_lock(&cil
->xc_push_lock
);
702 if (list_empty(&cil
->xc_cil
) || push_seq
<= cil
->xc_push_seq
) {
703 spin_unlock(&cil
->xc_push_lock
);
707 cil
->xc_push_seq
= push_seq
;
708 spin_unlock(&cil
->xc_push_lock
);
710 /* do the push now */
715 * Commit a transaction with the given vector to the Committed Item List.
717 * To do this, we need to format the item, pin it in memory if required and
718 * account for the space used by the transaction. Once we have done that we
719 * need to release the unused reservation for the transaction, attach the
720 * transaction to the checkpoint context so we carry the busy extents through
721 * to checkpoint completion, and then unlock all the items in the transaction.
723 * Called with the context lock already held in read mode to lock out
724 * background commit, returns without it held once background commits are
729 struct xfs_mount
*mp
,
730 struct xfs_trans
*tp
,
731 xfs_lsn_t
*commit_lsn
,
734 struct xlog
*log
= mp
->m_log
;
735 struct xfs_cil
*cil
= log
->l_cilp
;
738 if (flags
& XFS_TRANS_RELEASE_LOG_RES
)
739 log_flags
= XFS_LOG_REL_PERM_RESERV
;
741 /* lock out background commit */
742 down_read(&cil
->xc_ctx_lock
);
744 xlog_cil_insert_items(log
, tp
);
746 /* check we didn't blow the reservation */
747 if (tp
->t_ticket
->t_curr_res
< 0)
748 xlog_print_tic_res(mp
, tp
->t_ticket
);
750 tp
->t_commit_lsn
= cil
->xc_ctx
->sequence
;
752 *commit_lsn
= tp
->t_commit_lsn
;
754 xfs_log_done(mp
, tp
->t_ticket
, NULL
, log_flags
);
755 xfs_trans_unreserve_and_mod_sb(tp
);
758 * Once all the items of the transaction have been copied to the CIL,
759 * the items can be unlocked and freed.
761 * This needs to be done before we drop the CIL context lock because we
762 * have to update state in the log items and unlock them before they go
763 * to disk. If we don't, then the CIL checkpoint can race with us and
764 * we can run checkpoint completion before we've updated and unlocked
765 * the log items. This affects (at least) processing of stale buffers,
768 xfs_trans_free_items(tp
, tp
->t_commit_lsn
, 0);
770 xlog_cil_push_background(log
);
772 up_read(&cil
->xc_ctx_lock
);
777 * Conditionally push the CIL based on the sequence passed in.
779 * We only need to push if we haven't already pushed the sequence
780 * number given. Hence the only time we will trigger a push here is
781 * if the push sequence is the same as the current context.
783 * We return the current commit lsn to allow the callers to determine if a
784 * iclog flush is necessary following this call.
791 struct xfs_cil
*cil
= log
->l_cilp
;
792 struct xfs_cil_ctx
*ctx
;
793 xfs_lsn_t commit_lsn
= NULLCOMMITLSN
;
795 ASSERT(sequence
<= cil
->xc_current_sequence
);
798 * check to see if we need to force out the current context.
799 * xlog_cil_push() handles racing pushes for the same sequence,
800 * so no need to deal with it here.
802 xlog_cil_push_foreground(log
, sequence
);
805 * See if we can find a previous sequence still committing.
806 * We need to wait for all previous sequence commits to complete
807 * before allowing the force of push_seq to go ahead. Hence block
808 * on commits for those as well.
811 spin_lock(&cil
->xc_push_lock
);
812 list_for_each_entry(ctx
, &cil
->xc_committing
, committing
) {
813 if (ctx
->sequence
> sequence
)
815 if (!ctx
->commit_lsn
) {
817 * It is still being pushed! Wait for the push to
818 * complete, then start again from the beginning.
820 xlog_wait(&cil
->xc_commit_wait
, &cil
->xc_push_lock
);
823 if (ctx
->sequence
!= sequence
)
826 commit_lsn
= ctx
->commit_lsn
;
828 spin_unlock(&cil
->xc_push_lock
);
833 * Check if the current log item was first committed in this sequence.
834 * We can't rely on just the log item being in the CIL, we have to check
835 * the recorded commit sequence number.
837 * Note: for this to be used in a non-racy manner, it has to be called with
838 * CIL flushing locked out. As a result, it should only be used during the
839 * transaction commit process when deciding what to format into the item.
842 xfs_log_item_in_current_chkpt(
843 struct xfs_log_item
*lip
)
845 struct xfs_cil_ctx
*ctx
;
847 if (list_empty(&lip
->li_cil
))
850 ctx
= lip
->li_mountp
->m_log
->l_cilp
->xc_ctx
;
853 * li_seq is written on the first commit of a log item to record the
854 * first checkpoint it is written to. Hence if it is different to the
855 * current sequence, we're in a new checkpoint.
857 if (XFS_LSN_CMP(lip
->li_seq
, ctx
->sequence
) != 0)
863 * Perform initial CIL structure initialisation.
870 struct xfs_cil_ctx
*ctx
;
872 cil
= kmem_zalloc(sizeof(*cil
), KM_SLEEP
|KM_MAYFAIL
);
876 ctx
= kmem_zalloc(sizeof(*ctx
), KM_SLEEP
|KM_MAYFAIL
);
882 INIT_WORK(&cil
->xc_push_work
, xlog_cil_push_work
);
883 INIT_LIST_HEAD(&cil
->xc_cil
);
884 INIT_LIST_HEAD(&cil
->xc_committing
);
885 spin_lock_init(&cil
->xc_cil_lock
);
886 spin_lock_init(&cil
->xc_push_lock
);
887 init_rwsem(&cil
->xc_ctx_lock
);
888 init_waitqueue_head(&cil
->xc_commit_wait
);
890 INIT_LIST_HEAD(&ctx
->committing
);
891 INIT_LIST_HEAD(&ctx
->busy_extents
);
895 cil
->xc_current_sequence
= ctx
->sequence
;
906 if (log
->l_cilp
->xc_ctx
) {
907 if (log
->l_cilp
->xc_ctx
->ticket
)
908 xfs_log_ticket_put(log
->l_cilp
->xc_ctx
->ticket
);
909 kmem_free(log
->l_cilp
->xc_ctx
);
912 ASSERT(list_empty(&log
->l_cilp
->xc_cil
));
913 kmem_free(log
->l_cilp
);