2 * Copyright (c) 2010 Red Hat, Inc. All Rights Reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it would be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write the Free Software Foundation,
15 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 #include "xfs_log_format.h"
21 #include "xfs_shared.h"
22 #include "xfs_trans_resv.h"
25 #include "xfs_mount.h"
26 #include "xfs_error.h"
27 #include "xfs_alloc.h"
28 #include "xfs_extent_busy.h"
29 #include "xfs_discard.h"
30 #include "xfs_trans.h"
31 #include "xfs_trans_priv.h"
33 #include "xfs_log_priv.h"
36 * Allocate a new ticket. Failing to get a new ticket makes it really hard to
37 * recover, so we don't allow failure here. Also, we allocate in a context that
38 * we don't want to be issuing transactions from, so we need to tell the
39 * allocation code this as well.
41 * We don't reserve any space for the ticket - we are going to steal whatever
42 * space we require from transactions as they commit. To ensure we reserve all
43 * the space required, we need to set the current reservation of the ticket to
44 * zero so that we know to steal the initial transaction overhead from the
45 * first transaction commit.
47 static struct xlog_ticket
*
48 xlog_cil_ticket_alloc(
51 struct xlog_ticket
*tic
;
53 tic
= xlog_ticket_alloc(log
, 0, 1, XFS_TRANSACTION
, 0,
55 tic
->t_trans_type
= XFS_TRANS_CHECKPOINT
;
58 * set the current reservation to zero so we know to steal the basic
59 * transaction overhead reservation from the first transaction commit.
66 * After the first stage of log recovery is done, we know where the head and
67 * tail of the log are. We need this log initialisation done before we can
68 * initialise the first CIL checkpoint context.
70 * Here we allocate a log ticket to track space usage during a CIL push. This
71 * ticket is passed to xlog_write() directly so that we don't slowly leak log
72 * space by failing to account for space used by log headers and additional
73 * region headers for split regions.
76 xlog_cil_init_post_recovery(
79 log
->l_cilp
->xc_ctx
->ticket
= xlog_cil_ticket_alloc(log
);
80 log
->l_cilp
->xc_ctx
->sequence
= 1;
81 log
->l_cilp
->xc_ctx
->commit_lsn
= xlog_assign_lsn(log
->l_curr_cycle
,
86 * Prepare the log item for insertion into the CIL. Calculate the difference in
87 * log space and vectors it will consume, and if it is a new item pin it as
93 struct xfs_log_vec
*lv
,
94 struct xfs_log_vec
*old_lv
,
98 /* Account for the new LV being passed in */
99 if (lv
->lv_buf_len
!= XFS_LOG_VEC_ORDERED
) {
100 *diff_len
+= lv
->lv_buf_len
;
101 *diff_iovecs
+= lv
->lv_niovecs
;
105 * If there is no old LV, this is the first time we've seen the item in
106 * this CIL context and so we need to pin it. If we are replacing the
107 * old_lv, then remove the space it accounts for and free it.
110 lv
->lv_item
->li_ops
->iop_pin(lv
->lv_item
);
111 else if (old_lv
!= lv
) {
112 ASSERT(lv
->lv_buf_len
!= XFS_LOG_VEC_ORDERED
);
114 *diff_len
-= old_lv
->lv_buf_len
;
115 *diff_iovecs
-= old_lv
->lv_niovecs
;
119 /* attach new log vector to log item */
120 lv
->lv_item
->li_lv
= lv
;
123 * If this is the first time the item is being committed to the
124 * CIL, store the sequence number on the log item so we can
125 * tell in future commits whether this is the first checkpoint
126 * the item is being committed into.
128 if (!lv
->lv_item
->li_seq
)
129 lv
->lv_item
->li_seq
= log
->l_cilp
->xc_ctx
->sequence
;
133 * Format log item into a flat buffers
135 * For delayed logging, we need to hold a formatted buffer containing all the
136 * changes on the log item. This enables us to relog the item in memory and
137 * write it out asynchronously without needing to relock the object that was
138 * modified at the time it gets written into the iclog.
140 * This function builds a vector for the changes in each log item in the
141 * transaction. It then works out the length of the buffer needed for each log
142 * item, allocates them and formats the vector for the item into the buffer.
143 * The buffer is then attached to the log item are then inserted into the
144 * Committed Item List for tracking until the next checkpoint is written out.
146 * We don't set up region headers during this process; we simply copy the
147 * regions into the flat buffer. We can do this because we still have to do a
148 * formatting step to write the regions into the iclog buffer. Writing the
149 * ophdrs during the iclog write means that we can support splitting large
150 * regions across iclog boundares without needing a change in the format of the
151 * item/region encapsulation.
153 * Hence what we need to do now is change the rewrite the vector array to point
154 * to the copied region inside the buffer we just allocated. This allows us to
155 * format the regions into the iclog as though they are being formatted
156 * directly out of the objects themselves.
159 xlog_cil_insert_format_items(
161 struct xfs_trans
*tp
,
165 struct xfs_log_item_desc
*lidp
;
168 /* Bail out if we didn't find a log item. */
169 if (list_empty(&tp
->t_items
)) {
174 list_for_each_entry(lidp
, &tp
->t_items
, lid_trans
) {
175 struct xfs_log_item
*lip
= lidp
->lid_item
;
176 struct xfs_log_vec
*lv
;
177 struct xfs_log_vec
*old_lv
;
181 bool ordered
= false;
183 /* Skip items which aren't dirty in this transaction. */
184 if (!(lidp
->lid_flags
& XFS_LID_DIRTY
))
187 /* get number of vecs and size of data to be stored */
188 lip
->li_ops
->iop_size(lip
, &niovecs
, &nbytes
);
190 /* Skip items that do not have any vectors for writing */
195 * Ordered items need to be tracked but we do not wish to write
196 * them. We need a logvec to track the object, but we do not
197 * need an iovec or buffer to be allocated for copying data.
199 if (niovecs
== XFS_LOG_VEC_ORDERED
) {
206 * We 64-bit align the length of each iovec so that the start
207 * of the next one is naturally aligned. We'll need to
208 * account for that slack space here.
210 nbytes
+= niovecs
* sizeof(uint64_t);
212 /* grab the old item if it exists for reservation accounting */
215 /* calc buffer size */
216 buf_size
= sizeof(struct xfs_log_vec
) + nbytes
+
217 niovecs
* sizeof(struct xfs_log_iovec
);
219 /* compare to existing item size */
220 if (lip
->li_lv
&& buf_size
<= lip
->li_lv
->lv_size
) {
221 /* same or smaller, optimise common overwrite case */
229 * set the item up as though it is a new insertion so
230 * that the space reservation accounting is correct.
232 *diff_iovecs
-= lv
->lv_niovecs
;
233 *diff_len
-= lv
->lv_buf_len
;
235 /* allocate new data chunk */
236 lv
= kmem_zalloc(buf_size
, KM_SLEEP
|KM_NOFS
);
238 lv
->lv_size
= buf_size
;
240 /* track as an ordered logvec */
241 ASSERT(lip
->li_lv
== NULL
);
242 lv
->lv_buf_len
= XFS_LOG_VEC_ORDERED
;
245 lv
->lv_iovecp
= (struct xfs_log_iovec
*)&lv
[1];
248 /* Ensure the lv is set up according to ->iop_size */
249 lv
->lv_niovecs
= niovecs
;
251 /* The allocated data region lies beyond the iovec region */
253 lv
->lv_buf
= (char *)lv
+ buf_size
- nbytes
;
254 lip
->li_ops
->iop_format(lip
, lv
);
256 ASSERT(lv
->lv_buf_len
<= nbytes
);
257 xfs_cil_prepare_item(log
, lv
, old_lv
, diff_len
, diff_iovecs
);
262 * Insert the log items into the CIL and calculate the difference in space
263 * consumed by the item. Add the space to the checkpoint ticket and calculate
264 * if the change requires additional log metadata. If it does, take that space
265 * as well. Remove the amount of space we added to the checkpoint ticket from
266 * the current transaction ticket so that the accounting works out correctly.
269 xlog_cil_insert_items(
271 struct xfs_trans
*tp
)
273 struct xfs_cil
*cil
= log
->l_cilp
;
274 struct xfs_cil_ctx
*ctx
= cil
->xc_ctx
;
275 struct xfs_log_item_desc
*lidp
;
283 * We can do this safely because the context can't checkpoint until we
284 * are done so it doesn't matter exactly how we update the CIL.
286 xlog_cil_insert_format_items(log
, tp
, &len
, &diff_iovecs
);
289 * Now (re-)position everything modified at the tail of the CIL.
290 * We do this here so we only need to take the CIL lock once during
291 * the transaction commit.
293 spin_lock(&cil
->xc_cil_lock
);
294 list_for_each_entry(lidp
, &tp
->t_items
, lid_trans
) {
295 struct xfs_log_item
*lip
= lidp
->lid_item
;
297 /* Skip items which aren't dirty in this transaction. */
298 if (!(lidp
->lid_flags
& XFS_LID_DIRTY
))
301 list_move_tail(&lip
->li_cil
, &cil
->xc_cil
);
304 /* account for space used by new iovec headers */
305 len
+= diff_iovecs
* sizeof(xlog_op_header_t
);
306 ctx
->nvecs
+= diff_iovecs
;
308 /* attach the transaction to the CIL if it has any busy extents */
309 if (!list_empty(&tp
->t_busy
))
310 list_splice_init(&tp
->t_busy
, &ctx
->busy_extents
);
313 * Now transfer enough transaction reservation to the context ticket
314 * for the checkpoint. The context ticket is special - the unit
315 * reservation has to grow as well as the current reservation as we
316 * steal from tickets so we can correctly determine the space used
317 * during the transaction commit.
319 if (ctx
->ticket
->t_curr_res
== 0) {
320 ctx
->ticket
->t_curr_res
= ctx
->ticket
->t_unit_res
;
321 tp
->t_ticket
->t_curr_res
-= ctx
->ticket
->t_unit_res
;
324 /* do we need space for more log record headers? */
325 iclog_space
= log
->l_iclog_size
- log
->l_iclog_hsize
;
326 if (len
> 0 && (ctx
->space_used
/ iclog_space
!=
327 (ctx
->space_used
+ len
) / iclog_space
)) {
330 hdrs
= (len
+ iclog_space
- 1) / iclog_space
;
331 /* need to take into account split region headers, too */
332 hdrs
*= log
->l_iclog_hsize
+ sizeof(struct xlog_op_header
);
333 ctx
->ticket
->t_unit_res
+= hdrs
;
334 ctx
->ticket
->t_curr_res
+= hdrs
;
335 tp
->t_ticket
->t_curr_res
-= hdrs
;
336 ASSERT(tp
->t_ticket
->t_curr_res
>= len
);
338 tp
->t_ticket
->t_curr_res
-= len
;
339 ctx
->space_used
+= len
;
341 spin_unlock(&cil
->xc_cil_lock
);
345 xlog_cil_free_logvec(
346 struct xfs_log_vec
*log_vector
)
348 struct xfs_log_vec
*lv
;
350 for (lv
= log_vector
; lv
; ) {
351 struct xfs_log_vec
*next
= lv
->lv_next
;
358 * Mark all items committed and clear busy extents. We free the log vector
359 * chains in a separate pass so that we unpin the log items as quickly as
367 struct xfs_cil_ctx
*ctx
= args
;
368 struct xfs_mount
*mp
= ctx
->cil
->xc_log
->l_mp
;
370 xfs_trans_committed_bulk(ctx
->cil
->xc_log
->l_ailp
, ctx
->lv_chain
,
371 ctx
->start_lsn
, abort
);
373 xfs_extent_busy_sort(&ctx
->busy_extents
);
374 xfs_extent_busy_clear(mp
, &ctx
->busy_extents
,
375 (mp
->m_flags
& XFS_MOUNT_DISCARD
) && !abort
);
377 spin_lock(&ctx
->cil
->xc_push_lock
);
378 list_del(&ctx
->committing
);
379 spin_unlock(&ctx
->cil
->xc_push_lock
);
381 xlog_cil_free_logvec(ctx
->lv_chain
);
383 if (!list_empty(&ctx
->busy_extents
)) {
384 ASSERT(mp
->m_flags
& XFS_MOUNT_DISCARD
);
386 xfs_discard_extents(mp
, &ctx
->busy_extents
);
387 xfs_extent_busy_clear(mp
, &ctx
->busy_extents
, false);
394 * Push the Committed Item List to the log. If @push_seq flag is zero, then it
395 * is a background flush and so we can chose to ignore it. Otherwise, if the
396 * current sequence is the same as @push_seq we need to do a flush. If
397 * @push_seq is less than the current sequence, then it has already been
398 * flushed and we don't need to do anything - the caller will wait for it to
399 * complete if necessary.
401 * @push_seq is a value rather than a flag because that allows us to do an
402 * unlocked check of the sequence number for a match. Hence we can allows log
403 * forces to run racily and not issue pushes for the same sequence twice. If we
404 * get a race between multiple pushes for the same sequence they will block on
405 * the first one and then abort, hence avoiding needless pushes.
411 struct xfs_cil
*cil
= log
->l_cilp
;
412 struct xfs_log_vec
*lv
;
413 struct xfs_cil_ctx
*ctx
;
414 struct xfs_cil_ctx
*new_ctx
;
415 struct xlog_in_core
*commit_iclog
;
416 struct xlog_ticket
*tic
;
419 struct xfs_trans_header thdr
;
420 struct xfs_log_iovec lhdr
;
421 struct xfs_log_vec lvhdr
= { NULL
};
422 xfs_lsn_t commit_lsn
;
428 new_ctx
= kmem_zalloc(sizeof(*new_ctx
), KM_SLEEP
|KM_NOFS
);
429 new_ctx
->ticket
= xlog_cil_ticket_alloc(log
);
431 down_write(&cil
->xc_ctx_lock
);
434 spin_lock(&cil
->xc_push_lock
);
435 push_seq
= cil
->xc_push_seq
;
436 ASSERT(push_seq
<= ctx
->sequence
);
439 * Check if we've anything to push. If there is nothing, then we don't
440 * move on to a new sequence number and so we have to be able to push
441 * this sequence again later.
443 if (list_empty(&cil
->xc_cil
)) {
444 cil
->xc_push_seq
= 0;
445 spin_unlock(&cil
->xc_push_lock
);
448 spin_unlock(&cil
->xc_push_lock
);
451 /* check for a previously pushed seqeunce */
452 if (push_seq
< cil
->xc_ctx
->sequence
)
456 * pull all the log vectors off the items in the CIL, and
457 * remove the items from the CIL. We don't need the CIL lock
458 * here because it's only needed on the transaction commit
459 * side which is currently locked out by the flush lock.
463 while (!list_empty(&cil
->xc_cil
)) {
464 struct xfs_log_item
*item
;
466 item
= list_first_entry(&cil
->xc_cil
,
467 struct xfs_log_item
, li_cil
);
468 list_del_init(&item
->li_cil
);
470 ctx
->lv_chain
= item
->li_lv
;
472 lv
->lv_next
= item
->li_lv
;
475 num_iovecs
+= lv
->lv_niovecs
;
479 * initialise the new context and attach it to the CIL. Then attach
480 * the current context to the CIL committing lsit so it can be found
481 * during log forces to extract the commit lsn of the sequence that
482 * needs to be forced.
484 INIT_LIST_HEAD(&new_ctx
->committing
);
485 INIT_LIST_HEAD(&new_ctx
->busy_extents
);
486 new_ctx
->sequence
= ctx
->sequence
+ 1;
488 cil
->xc_ctx
= new_ctx
;
491 * mirror the new sequence into the cil structure so that we can do
492 * unlocked checks against the current sequence in log forces without
493 * risking deferencing a freed context pointer.
495 cil
->xc_current_sequence
= new_ctx
->sequence
;
498 * The switch is now done, so we can drop the context lock and move out
499 * of a shared context. We can't just go straight to the commit record,
500 * though - we need to synchronise with previous and future commits so
501 * that the commit records are correctly ordered in the log to ensure
502 * that we process items during log IO completion in the correct order.
504 * For example, if we get an EFI in one checkpoint and the EFD in the
505 * next (e.g. due to log forces), we do not want the checkpoint with
506 * the EFD to be committed before the checkpoint with the EFI. Hence
507 * we must strictly order the commit records of the checkpoints so
508 * that: a) the checkpoint callbacks are attached to the iclogs in the
509 * correct order; and b) the checkpoints are replayed in correct order
512 * Hence we need to add this context to the committing context list so
513 * that higher sequences will wait for us to write out a commit record
516 spin_lock(&cil
->xc_push_lock
);
517 list_add(&ctx
->committing
, &cil
->xc_committing
);
518 spin_unlock(&cil
->xc_push_lock
);
519 up_write(&cil
->xc_ctx_lock
);
522 * Build a checkpoint transaction header and write it to the log to
523 * begin the transaction. We need to account for the space used by the
524 * transaction header here as it is not accounted for in xlog_write().
526 * The LSN we need to pass to the log items on transaction commit is
527 * the LSN reported by the first log vector write. If we use the commit
528 * record lsn then we can move the tail beyond the grant write head.
531 thdr
.th_magic
= XFS_TRANS_HEADER_MAGIC
;
532 thdr
.th_type
= XFS_TRANS_CHECKPOINT
;
533 thdr
.th_tid
= tic
->t_tid
;
534 thdr
.th_num_items
= num_iovecs
;
536 lhdr
.i_len
= sizeof(xfs_trans_header_t
);
537 lhdr
.i_type
= XLOG_REG_TYPE_TRANSHDR
;
538 tic
->t_curr_res
-= lhdr
.i_len
+ sizeof(xlog_op_header_t
);
540 lvhdr
.lv_niovecs
= 1;
541 lvhdr
.lv_iovecp
= &lhdr
;
542 lvhdr
.lv_next
= ctx
->lv_chain
;
544 error
= xlog_write(log
, &lvhdr
, tic
, &ctx
->start_lsn
, NULL
, 0);
546 goto out_abort_free_ticket
;
549 * now that we've written the checkpoint into the log, strictly
550 * order the commit records so replay will get them in the right order.
553 spin_lock(&cil
->xc_push_lock
);
554 list_for_each_entry(new_ctx
, &cil
->xc_committing
, committing
) {
556 * Higher sequences will wait for this one so skip them.
557 * Don't wait for own own sequence, either.
559 if (new_ctx
->sequence
>= ctx
->sequence
)
561 if (!new_ctx
->commit_lsn
) {
563 * It is still being pushed! Wait for the push to
564 * complete, then start again from the beginning.
566 xlog_wait(&cil
->xc_commit_wait
, &cil
->xc_push_lock
);
570 spin_unlock(&cil
->xc_push_lock
);
572 /* xfs_log_done always frees the ticket on error. */
573 commit_lsn
= xfs_log_done(log
->l_mp
, tic
, &commit_iclog
, 0);
574 if (commit_lsn
== -1)
577 /* attach all the transactions w/ busy extents to iclog */
578 ctx
->log_cb
.cb_func
= xlog_cil_committed
;
579 ctx
->log_cb
.cb_arg
= ctx
;
580 error
= xfs_log_notify(log
->l_mp
, commit_iclog
, &ctx
->log_cb
);
585 * now the checkpoint commit is complete and we've attached the
586 * callbacks to the iclog we can assign the commit LSN to the context
587 * and wake up anyone who is waiting for the commit to complete.
589 spin_lock(&cil
->xc_push_lock
);
590 ctx
->commit_lsn
= commit_lsn
;
591 wake_up_all(&cil
->xc_commit_wait
);
592 spin_unlock(&cil
->xc_push_lock
);
594 /* release the hounds! */
595 return xfs_log_release_iclog(log
->l_mp
, commit_iclog
);
598 up_write(&cil
->xc_ctx_lock
);
599 xfs_log_ticket_put(new_ctx
->ticket
);
603 out_abort_free_ticket
:
604 xfs_log_ticket_put(tic
);
606 xlog_cil_committed(ctx
, XFS_LI_ABORTED
);
607 return XFS_ERROR(EIO
);
612 struct work_struct
*work
)
614 struct xfs_cil
*cil
= container_of(work
, struct xfs_cil
,
616 xlog_cil_push(cil
->xc_log
);
620 * We need to push CIL every so often so we don't cache more than we can fit in
621 * the log. The limit really is that a checkpoint can't be more than half the
622 * log (the current checkpoint is not allowed to overwrite the previous
623 * checkpoint), but commit latency and memory usage limit this to a smaller
627 xlog_cil_push_background(
630 struct xfs_cil
*cil
= log
->l_cilp
;
633 * The cil won't be empty because we are called while holding the
634 * context lock so whatever we added to the CIL will still be there
636 ASSERT(!list_empty(&cil
->xc_cil
));
639 * don't do a background push if we haven't used up all the
640 * space available yet.
642 if (cil
->xc_ctx
->space_used
< XLOG_CIL_SPACE_LIMIT(log
))
645 spin_lock(&cil
->xc_push_lock
);
646 if (cil
->xc_push_seq
< cil
->xc_current_sequence
) {
647 cil
->xc_push_seq
= cil
->xc_current_sequence
;
648 queue_work(log
->l_mp
->m_cil_workqueue
, &cil
->xc_push_work
);
650 spin_unlock(&cil
->xc_push_lock
);
655 xlog_cil_push_foreground(
659 struct xfs_cil
*cil
= log
->l_cilp
;
664 ASSERT(push_seq
&& push_seq
<= cil
->xc_current_sequence
);
666 /* start on any pending background push to minimise wait time on it */
667 flush_work(&cil
->xc_push_work
);
670 * If the CIL is empty or we've already pushed the sequence then
671 * there's no work we need to do.
673 spin_lock(&cil
->xc_push_lock
);
674 if (list_empty(&cil
->xc_cil
) || push_seq
<= cil
->xc_push_seq
) {
675 spin_unlock(&cil
->xc_push_lock
);
679 cil
->xc_push_seq
= push_seq
;
680 spin_unlock(&cil
->xc_push_lock
);
682 /* do the push now */
690 struct xfs_cil
*cil
= log
->l_cilp
;
693 spin_lock(&cil
->xc_push_lock
);
694 if (list_empty(&cil
->xc_cil
))
696 spin_unlock(&cil
->xc_push_lock
);
701 * Commit a transaction with the given vector to the Committed Item List.
703 * To do this, we need to format the item, pin it in memory if required and
704 * account for the space used by the transaction. Once we have done that we
705 * need to release the unused reservation for the transaction, attach the
706 * transaction to the checkpoint context so we carry the busy extents through
707 * to checkpoint completion, and then unlock all the items in the transaction.
709 * Called with the context lock already held in read mode to lock out
710 * background commit, returns without it held once background commits are
715 struct xfs_mount
*mp
,
716 struct xfs_trans
*tp
,
717 xfs_lsn_t
*commit_lsn
,
720 struct xlog
*log
= mp
->m_log
;
721 struct xfs_cil
*cil
= log
->l_cilp
;
724 if (flags
& XFS_TRANS_RELEASE_LOG_RES
)
725 log_flags
= XFS_LOG_REL_PERM_RESERV
;
727 /* lock out background commit */
728 down_read(&cil
->xc_ctx_lock
);
730 xlog_cil_insert_items(log
, tp
);
732 /* check we didn't blow the reservation */
733 if (tp
->t_ticket
->t_curr_res
< 0)
734 xlog_print_tic_res(mp
, tp
->t_ticket
);
736 tp
->t_commit_lsn
= cil
->xc_ctx
->sequence
;
738 *commit_lsn
= tp
->t_commit_lsn
;
740 xfs_log_done(mp
, tp
->t_ticket
, NULL
, log_flags
);
741 xfs_trans_unreserve_and_mod_sb(tp
);
744 * Once all the items of the transaction have been copied to the CIL,
745 * the items can be unlocked and freed.
747 * This needs to be done before we drop the CIL context lock because we
748 * have to update state in the log items and unlock them before they go
749 * to disk. If we don't, then the CIL checkpoint can race with us and
750 * we can run checkpoint completion before we've updated and unlocked
751 * the log items. This affects (at least) processing of stale buffers,
754 xfs_trans_free_items(tp
, tp
->t_commit_lsn
, 0);
756 xlog_cil_push_background(log
);
758 up_read(&cil
->xc_ctx_lock
);
763 * Conditionally push the CIL based on the sequence passed in.
765 * We only need to push if we haven't already pushed the sequence
766 * number given. Hence the only time we will trigger a push here is
767 * if the push sequence is the same as the current context.
769 * We return the current commit lsn to allow the callers to determine if a
770 * iclog flush is necessary following this call.
777 struct xfs_cil
*cil
= log
->l_cilp
;
778 struct xfs_cil_ctx
*ctx
;
779 xfs_lsn_t commit_lsn
= NULLCOMMITLSN
;
781 ASSERT(sequence
<= cil
->xc_current_sequence
);
784 * check to see if we need to force out the current context.
785 * xlog_cil_push() handles racing pushes for the same sequence,
786 * so no need to deal with it here.
788 xlog_cil_push_foreground(log
, sequence
);
791 * See if we can find a previous sequence still committing.
792 * We need to wait for all previous sequence commits to complete
793 * before allowing the force of push_seq to go ahead. Hence block
794 * on commits for those as well.
797 spin_lock(&cil
->xc_push_lock
);
798 list_for_each_entry(ctx
, &cil
->xc_committing
, committing
) {
799 if (ctx
->sequence
> sequence
)
801 if (!ctx
->commit_lsn
) {
803 * It is still being pushed! Wait for the push to
804 * complete, then start again from the beginning.
806 xlog_wait(&cil
->xc_commit_wait
, &cil
->xc_push_lock
);
809 if (ctx
->sequence
!= sequence
)
812 commit_lsn
= ctx
->commit_lsn
;
814 spin_unlock(&cil
->xc_push_lock
);
819 * Check if the current log item was first committed in this sequence.
820 * We can't rely on just the log item being in the CIL, we have to check
821 * the recorded commit sequence number.
823 * Note: for this to be used in a non-racy manner, it has to be called with
824 * CIL flushing locked out. As a result, it should only be used during the
825 * transaction commit process when deciding what to format into the item.
828 xfs_log_item_in_current_chkpt(
829 struct xfs_log_item
*lip
)
831 struct xfs_cil_ctx
*ctx
;
833 if (list_empty(&lip
->li_cil
))
836 ctx
= lip
->li_mountp
->m_log
->l_cilp
->xc_ctx
;
839 * li_seq is written on the first commit of a log item to record the
840 * first checkpoint it is written to. Hence if it is different to the
841 * current sequence, we're in a new checkpoint.
843 if (XFS_LSN_CMP(lip
->li_seq
, ctx
->sequence
) != 0)
849 * Perform initial CIL structure initialisation.
856 struct xfs_cil_ctx
*ctx
;
858 cil
= kmem_zalloc(sizeof(*cil
), KM_SLEEP
|KM_MAYFAIL
);
862 ctx
= kmem_zalloc(sizeof(*ctx
), KM_SLEEP
|KM_MAYFAIL
);
868 INIT_WORK(&cil
->xc_push_work
, xlog_cil_push_work
);
869 INIT_LIST_HEAD(&cil
->xc_cil
);
870 INIT_LIST_HEAD(&cil
->xc_committing
);
871 spin_lock_init(&cil
->xc_cil_lock
);
872 spin_lock_init(&cil
->xc_push_lock
);
873 init_rwsem(&cil
->xc_ctx_lock
);
874 init_waitqueue_head(&cil
->xc_commit_wait
);
876 INIT_LIST_HEAD(&ctx
->committing
);
877 INIT_LIST_HEAD(&ctx
->busy_extents
);
881 cil
->xc_current_sequence
= ctx
->sequence
;
892 if (log
->l_cilp
->xc_ctx
) {
893 if (log
->l_cilp
->xc_ctx
->ticket
)
894 xfs_log_ticket_put(log
->l_cilp
->xc_ctx
->ticket
);
895 kmem_free(log
->l_cilp
->xc_ctx
);
898 ASSERT(list_empty(&log
->l_cilp
->xc_cil
));
899 kmem_free(log
->l_cilp
);