1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2010 Red Hat, Inc. All Rights Reserved.
8 #include "xfs_format.h"
9 #include "xfs_log_format.h"
10 #include "xfs_shared.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_mount.h"
13 #include "xfs_extent_busy.h"
14 #include "xfs_trans.h"
15 #include "xfs_trans_priv.h"
17 #include "xfs_log_priv.h"
18 #include "xfs_trace.h"
19 #include "xfs_discard.h"
22 * Allocate a new ticket. Failing to get a new ticket makes it really hard to
23 * recover, so we don't allow failure here. Also, we allocate in a context that
24 * we don't want to be issuing transactions from, so we need to tell the
25 * allocation code this as well.
27 * We don't reserve any space for the ticket - we are going to steal whatever
28 * space we require from transactions as they commit. To ensure we reserve all
29 * the space required, we need to set the current reservation of the ticket to
30 * zero so that we know to steal the initial transaction overhead from the
31 * first transaction commit.
33 static struct xlog_ticket
*
34 xlog_cil_ticket_alloc(
37 struct xlog_ticket
*tic
;
39 tic
= xlog_ticket_alloc(log
, 0, 1, 0);
42 * set the current reservation to zero so we know to steal the basic
43 * transaction overhead reservation from the first transaction commit.
46 tic
->t_iclog_hdrs
= 0;
51 xlog_cil_set_iclog_hdr_count(struct xfs_cil
*cil
)
53 struct xlog
*log
= cil
->xc_log
;
55 atomic_set(&cil
->xc_iclog_hdrs
,
56 (XLOG_CIL_BLOCKING_SPACE_LIMIT(log
) /
57 (log
->l_iclog_size
- log
->l_iclog_hsize
)));
61 * Check if the current log item was first committed in this sequence.
62 * We can't rely on just the log item being in the CIL, we have to check
63 * the recorded commit sequence number.
65 * Note: for this to be used in a non-racy manner, it has to be called with
66 * CIL flushing locked out. As a result, it should only be used during the
67 * transaction commit process when deciding what to format into the item.
70 xlog_item_in_current_chkpt(
72 struct xfs_log_item
*lip
)
74 if (test_bit(XLOG_CIL_EMPTY
, &cil
->xc_flags
))
78 * li_seq is written on the first commit of a log item to record the
79 * first checkpoint it is written to. Hence if it is different to the
80 * current sequence, we're in a new checkpoint.
82 return lip
->li_seq
== READ_ONCE(cil
->xc_current_sequence
);
86 xfs_log_item_in_current_chkpt(
87 struct xfs_log_item
*lip
)
89 return xlog_item_in_current_chkpt(lip
->li_log
->l_cilp
, lip
);
93 * Unavoidable forward declaration - xlog_cil_push_work() calls
94 * xlog_cil_ctx_alloc() itself.
96 static void xlog_cil_push_work(struct work_struct
*work
);
98 static struct xfs_cil_ctx
*
99 xlog_cil_ctx_alloc(void)
101 struct xfs_cil_ctx
*ctx
;
103 ctx
= kzalloc(sizeof(*ctx
), GFP_KERNEL
| __GFP_NOFAIL
);
104 INIT_LIST_HEAD(&ctx
->committing
);
105 INIT_LIST_HEAD(&ctx
->busy_extents
.extent_list
);
106 INIT_LIST_HEAD(&ctx
->log_items
);
107 INIT_LIST_HEAD(&ctx
->lv_chain
);
108 INIT_WORK(&ctx
->push_work
, xlog_cil_push_work
);
113 * Aggregate the CIL per cpu structures into global counts, lists, etc and
114 * clear the percpu state ready for the next context to use. This is called
115 * from the push code with the context lock held exclusively, hence nothing else
116 * will be accessing or modifying the per-cpu counters.
119 xlog_cil_push_pcp_aggregate(
121 struct xfs_cil_ctx
*ctx
)
123 struct xlog_cil_pcp
*cilpcp
;
126 for_each_cpu(cpu
, &ctx
->cil_pcpmask
) {
127 cilpcp
= per_cpu_ptr(cil
->xc_pcp
, cpu
);
129 ctx
->ticket
->t_curr_res
+= cilpcp
->space_reserved
;
130 cilpcp
->space_reserved
= 0;
132 if (!list_empty(&cilpcp
->busy_extents
)) {
133 list_splice_init(&cilpcp
->busy_extents
,
134 &ctx
->busy_extents
.extent_list
);
136 if (!list_empty(&cilpcp
->log_items
))
137 list_splice_init(&cilpcp
->log_items
, &ctx
->log_items
);
140 * We're in the middle of switching cil contexts. Reset the
141 * counter we use to detect when the current context is nearing
144 cilpcp
->space_used
= 0;
149 * Aggregate the CIL per-cpu space used counters into the global atomic value.
150 * This is called when the per-cpu counter aggregation will first pass the soft
151 * limit threshold so we can switch to atomic counter aggregation for accurate
152 * detection of hard limit traversal.
155 xlog_cil_insert_pcp_aggregate(
157 struct xfs_cil_ctx
*ctx
)
162 /* Trigger atomic updates then aggregate only for the first caller */
163 if (!test_and_clear_bit(XLOG_CIL_PCP_SPACE
, &cil
->xc_flags
))
167 * We can race with other cpus setting cil_pcpmask. However, we've
168 * atomically cleared PCP_SPACE which forces other threads to add to
169 * the global space used count. cil_pcpmask is a superset of cilpcp
170 * structures that could have a nonzero space_used.
172 for_each_cpu(cpu
, &ctx
->cil_pcpmask
) {
173 struct xlog_cil_pcp
*cilpcp
= per_cpu_ptr(cil
->xc_pcp
, cpu
);
175 count
+= xchg(&cilpcp
->space_used
, 0);
177 atomic_add(count
, &ctx
->space_used
);
183 struct xfs_cil_ctx
*ctx
)
185 xlog_cil_set_iclog_hdr_count(cil
);
186 set_bit(XLOG_CIL_EMPTY
, &cil
->xc_flags
);
187 set_bit(XLOG_CIL_PCP_SPACE
, &cil
->xc_flags
);
188 ctx
->sequence
= ++cil
->xc_current_sequence
;
194 * After the first stage of log recovery is done, we know where the head and
195 * tail of the log are. We need this log initialisation done before we can
196 * initialise the first CIL checkpoint context.
198 * Here we allocate a log ticket to track space usage during a CIL push. This
199 * ticket is passed to xlog_write() directly so that we don't slowly leak log
200 * space by failing to account for space used by log headers and additional
201 * region headers for split regions.
204 xlog_cil_init_post_recovery(
207 log
->l_cilp
->xc_ctx
->ticket
= xlog_cil_ticket_alloc(log
);
208 log
->l_cilp
->xc_ctx
->sequence
= 1;
209 xlog_cil_set_iclog_hdr_count(log
->l_cilp
);
213 xlog_cil_iovec_space(
216 return round_up((sizeof(struct xfs_log_vec
) +
217 niovecs
* sizeof(struct xfs_log_iovec
)),
222 * Allocate or pin log vector buffers for CIL insertion.
224 * The CIL currently uses disposable buffers for copying a snapshot of the
225 * modified items into the log during a push. The biggest problem with this is
226 * the requirement to allocate the disposable buffer during the commit if:
227 * a) does not exist; or
230 * If we do this allocation within xlog_cil_insert_format_items(), it is done
231 * under the xc_ctx_lock, which means that a CIL push cannot occur during
232 * the memory allocation. This means that we have a potential deadlock situation
233 * under low memory conditions when we have lots of dirty metadata pinned in
234 * the CIL and we need a CIL commit to occur to free memory.
236 * To avoid this, we need to move the memory allocation outside the
237 * xc_ctx_lock, but because the log vector buffers are disposable, that opens
238 * up a TOCTOU race condition w.r.t. the CIL committing and removing the log
239 * vector buffers between the check and the formatting of the item into the
240 * log vector buffer within the xc_ctx_lock.
242 * Because the log vector buffer needs to be unchanged during the CIL push
243 * process, we cannot share the buffer between the transaction commit (which
244 * modifies the buffer) and the CIL push context that is writing the changes
245 * into the log. This means skipping preallocation of buffer space is
246 * unreliable, but we most definitely do not want to be allocating and freeing
247 * buffers unnecessarily during commits when overwrites can be done safely.
249 * The simplest solution to this problem is to allocate a shadow buffer when a
250 * log item is committed for the second time, and then to only use this buffer
251 * if necessary. The buffer can remain attached to the log item until such time
252 * it is needed, and this is the buffer that is reallocated to match the size of
253 * the incoming modification. Then during the formatting of the item we can swap
254 * the active buffer with the new one if we can't reuse the existing buffer. We
255 * don't free the old buffer as it may be reused on the next modification if
256 * it's size is right, otherwise we'll free and reallocate it at that point.
258 * This function builds a vector for the changes in each log item in the
259 * transaction. It then works out the length of the buffer needed for each log
260 * item, allocates them and attaches the vector to the log item in preparation
261 * for the formatting step which occurs under the xc_ctx_lock.
263 * While this means the memory footprint goes up, it avoids the repeated
264 * alloc/free pattern that repeated modifications of an item would otherwise
265 * cause, and hence minimises the CPU overhead of such behaviour.
268 xlog_cil_alloc_shadow_bufs(
270 struct xfs_trans
*tp
)
272 struct xfs_log_item
*lip
;
274 list_for_each_entry(lip
, &tp
->t_items
, li_trans
) {
275 struct xfs_log_vec
*lv
;
279 bool ordered
= false;
281 /* Skip items which aren't dirty in this transaction. */
282 if (!test_bit(XFS_LI_DIRTY
, &lip
->li_flags
))
285 /* get number of vecs and size of data to be stored */
286 lip
->li_ops
->iop_size(lip
, &niovecs
, &nbytes
);
289 * Ordered items need to be tracked but we do not wish to write
290 * them. We need a logvec to track the object, but we do not
291 * need an iovec or buffer to be allocated for copying data.
293 if (niovecs
== XFS_LOG_VEC_ORDERED
) {
300 * We 64-bit align the length of each iovec so that the start of
301 * the next one is naturally aligned. We'll need to account for
302 * that slack space here.
304 * We also add the xlog_op_header to each region when
305 * formatting, but that's not accounted to the size of the item
306 * at this point. Hence we'll need an addition number of bytes
307 * for each vector to hold an opheader.
309 * Then round nbytes up to 64-bit alignment so that the initial
310 * buffer alignment is easy to calculate and verify.
313 (sizeof(uint64_t) + sizeof(struct xlog_op_header
));
314 nbytes
= round_up(nbytes
, sizeof(uint64_t));
317 * The data buffer needs to start 64-bit aligned, so round up
318 * that space to ensure we can align it appropriately and not
319 * overrun the buffer.
321 buf_size
= nbytes
+ xlog_cil_iovec_space(niovecs
);
324 * if we have no shadow buffer, or it is too small, we need to
327 if (!lip
->li_lv_shadow
||
328 buf_size
> lip
->li_lv_shadow
->lv_size
) {
330 * We free and allocate here as a realloc would copy
331 * unnecessary data. We don't use kvzalloc() for the
332 * same reason - we don't need to zero the data area in
333 * the buffer, only the log vector header and the iovec
336 kvfree(lip
->li_lv_shadow
);
337 lv
= xlog_kvmalloc(buf_size
);
339 memset(lv
, 0, xlog_cil_iovec_space(niovecs
));
341 INIT_LIST_HEAD(&lv
->lv_list
);
343 lv
->lv_size
= buf_size
;
345 lv
->lv_buf_len
= XFS_LOG_VEC_ORDERED
;
347 lv
->lv_iovecp
= (struct xfs_log_iovec
*)&lv
[1];
348 lip
->li_lv_shadow
= lv
;
350 /* same or smaller, optimise common overwrite case */
351 lv
= lip
->li_lv_shadow
;
353 lv
->lv_buf_len
= XFS_LOG_VEC_ORDERED
;
359 /* Ensure the lv is set up according to ->iop_size */
360 lv
->lv_niovecs
= niovecs
;
362 /* The allocated data region lies beyond the iovec region */
363 lv
->lv_buf
= (char *)lv
+ xlog_cil_iovec_space(niovecs
);
369 * Prepare the log item for insertion into the CIL. Calculate the difference in
370 * log space it will consume, and if it is a new item pin it as well.
373 xfs_cil_prepare_item(
375 struct xfs_log_vec
*lv
,
376 struct xfs_log_vec
*old_lv
,
379 /* Account for the new LV being passed in */
380 if (lv
->lv_buf_len
!= XFS_LOG_VEC_ORDERED
)
381 *diff_len
+= lv
->lv_bytes
;
384 * If there is no old LV, this is the first time we've seen the item in
385 * this CIL context and so we need to pin it. If we are replacing the
386 * old_lv, then remove the space it accounts for and make it the shadow
387 * buffer for later freeing. In both cases we are now switching to the
388 * shadow buffer, so update the pointer to it appropriately.
391 if (lv
->lv_item
->li_ops
->iop_pin
)
392 lv
->lv_item
->li_ops
->iop_pin(lv
->lv_item
);
393 lv
->lv_item
->li_lv_shadow
= NULL
;
394 } else if (old_lv
!= lv
) {
395 ASSERT(lv
->lv_buf_len
!= XFS_LOG_VEC_ORDERED
);
397 *diff_len
-= old_lv
->lv_bytes
;
398 lv
->lv_item
->li_lv_shadow
= old_lv
;
401 /* attach new log vector to log item */
402 lv
->lv_item
->li_lv
= lv
;
405 * If this is the first time the item is being committed to the
406 * CIL, store the sequence number on the log item so we can
407 * tell in future commits whether this is the first checkpoint
408 * the item is being committed into.
410 if (!lv
->lv_item
->li_seq
)
411 lv
->lv_item
->li_seq
= log
->l_cilp
->xc_ctx
->sequence
;
415 * Format log item into a flat buffers
417 * For delayed logging, we need to hold a formatted buffer containing all the
418 * changes on the log item. This enables us to relog the item in memory and
419 * write it out asynchronously without needing to relock the object that was
420 * modified at the time it gets written into the iclog.
422 * This function takes the prepared log vectors attached to each log item, and
423 * formats the changes into the log vector buffer. The buffer it uses is
424 * dependent on the current state of the vector in the CIL - the shadow lv is
425 * guaranteed to be large enough for the current modification, but we will only
426 * use that if we can't reuse the existing lv. If we can't reuse the existing
427 * lv, then simple swap it out for the shadow lv. We don't free it - that is
428 * done lazily either by th enext modification or the freeing of the log item.
430 * We don't set up region headers during this process; we simply copy the
431 * regions into the flat buffer. We can do this because we still have to do a
432 * formatting step to write the regions into the iclog buffer. Writing the
433 * ophdrs during the iclog write means that we can support splitting large
434 * regions across iclog boundares without needing a change in the format of the
435 * item/region encapsulation.
437 * Hence what we need to do now is change the rewrite the vector array to point
438 * to the copied region inside the buffer we just allocated. This allows us to
439 * format the regions into the iclog as though they are being formatted
440 * directly out of the objects themselves.
443 xlog_cil_insert_format_items(
445 struct xfs_trans
*tp
,
448 struct xfs_log_item
*lip
;
450 /* Bail out if we didn't find a log item. */
451 if (list_empty(&tp
->t_items
)) {
456 list_for_each_entry(lip
, &tp
->t_items
, li_trans
) {
457 struct xfs_log_vec
*lv
;
458 struct xfs_log_vec
*old_lv
= NULL
;
459 struct xfs_log_vec
*shadow
;
460 bool ordered
= false;
462 /* Skip items which aren't dirty in this transaction. */
463 if (!test_bit(XFS_LI_DIRTY
, &lip
->li_flags
))
467 * The formatting size information is already attached to
468 * the shadow lv on the log item.
470 shadow
= lip
->li_lv_shadow
;
471 if (shadow
->lv_buf_len
== XFS_LOG_VEC_ORDERED
)
474 /* Skip items that do not have any vectors for writing */
475 if (!shadow
->lv_niovecs
&& !ordered
)
478 /* compare to existing item size */
480 if (lip
->li_lv
&& shadow
->lv_size
<= lip
->li_lv
->lv_size
) {
481 /* same or smaller, optimise common overwrite case */
488 * set the item up as though it is a new insertion so
489 * that the space reservation accounting is correct.
491 *diff_len
-= lv
->lv_bytes
;
493 /* Ensure the lv is set up according to ->iop_size */
494 lv
->lv_niovecs
= shadow
->lv_niovecs
;
496 /* reset the lv buffer information for new formatting */
499 lv
->lv_buf
= (char *)lv
+
500 xlog_cil_iovec_space(lv
->lv_niovecs
);
502 /* switch to shadow buffer! */
506 /* track as an ordered logvec */
507 ASSERT(lip
->li_lv
== NULL
);
512 ASSERT(IS_ALIGNED((unsigned long)lv
->lv_buf
, sizeof(uint64_t)));
513 lip
->li_ops
->iop_format(lip
, lv
);
515 xfs_cil_prepare_item(log
, lv
, old_lv
, diff_len
);
520 * The use of lockless waitqueue_active() requires that the caller has
521 * serialised itself against the wakeup call in xlog_cil_push_work(). That
522 * can be done by either holding the push lock or the context lock.
525 xlog_cil_over_hard_limit(
529 if (waitqueue_active(&log
->l_cilp
->xc_push_wait
))
531 if (space_used
>= XLOG_CIL_BLOCKING_SPACE_LIMIT(log
))
537 * Insert the log items into the CIL and calculate the difference in space
538 * consumed by the item. Add the space to the checkpoint ticket and calculate
539 * if the change requires additional log metadata. If it does, take that space
540 * as well. Remove the amount of space we added to the checkpoint ticket from
541 * the current transaction ticket so that the accounting works out correctly.
544 xlog_cil_insert_items(
546 struct xfs_trans
*tp
,
547 uint32_t released_space
)
549 struct xfs_cil
*cil
= log
->l_cilp
;
550 struct xfs_cil_ctx
*ctx
= cil
->xc_ctx
;
551 struct xfs_log_item
*lip
;
553 int iovhdr_res
= 0, split_res
= 0, ctx_res
= 0;
557 struct xlog_cil_pcp
*cilpcp
;
562 * We can do this safely because the context can't checkpoint until we
563 * are done so it doesn't matter exactly how we update the CIL.
565 xlog_cil_insert_format_items(log
, tp
, &len
);
568 * Subtract the space released by intent cancelation from the space we
569 * consumed so that we remove it from the CIL space and add it back to
570 * the current transaction reservation context.
572 len
-= released_space
;
575 * Grab the per-cpu pointer for the CIL before we start any accounting.
576 * That ensures that we are running with pre-emption disabled and so we
577 * can't be scheduled away between split sample/update operations that
578 * are done without outside locking to serialise them.
581 cilpcp
= this_cpu_ptr(cil
->xc_pcp
);
583 /* Tell the future push that there was work added by this CPU. */
584 if (!cpumask_test_cpu(cpu_nr
, &ctx
->cil_pcpmask
))
585 cpumask_test_and_set_cpu(cpu_nr
, &ctx
->cil_pcpmask
);
588 * We need to take the CIL checkpoint unit reservation on the first
589 * commit into the CIL. Test the XLOG_CIL_EMPTY bit first so we don't
590 * unnecessarily do an atomic op in the fast path here. We can clear the
591 * XLOG_CIL_EMPTY bit as we are under the xc_ctx_lock here and that
592 * needs to be held exclusively to reset the XLOG_CIL_EMPTY bit.
594 if (test_bit(XLOG_CIL_EMPTY
, &cil
->xc_flags
) &&
595 test_and_clear_bit(XLOG_CIL_EMPTY
, &cil
->xc_flags
))
596 ctx_res
= ctx
->ticket
->t_unit_res
;
599 * Check if we need to steal iclog headers. atomic_read() is not a
600 * locked atomic operation, so we can check the value before we do any
601 * real atomic ops in the fast path. If we've already taken the CIL unit
602 * reservation from this commit, we've already got one iclog header
603 * space reserved so we have to account for that otherwise we risk
604 * overrunning the reservation on this ticket.
606 * If the CIL is already at the hard limit, we might need more header
607 * space that originally reserved. So steal more header space from every
608 * commit that occurs once we are over the hard limit to ensure the CIL
609 * push won't run out of reservation space.
611 * This can steal more than we need, but that's OK.
613 * The cil->xc_ctx_lock provides the serialisation necessary for safely
614 * calling xlog_cil_over_hard_limit() in this context.
616 space_used
= atomic_read(&ctx
->space_used
) + cilpcp
->space_used
+ len
;
617 if (atomic_read(&cil
->xc_iclog_hdrs
) > 0 ||
618 xlog_cil_over_hard_limit(log
, space_used
)) {
619 split_res
= log
->l_iclog_hsize
+
620 sizeof(struct xlog_op_header
);
622 ctx_res
+= split_res
* (tp
->t_ticket
->t_iclog_hdrs
- 1);
624 ctx_res
= split_res
* tp
->t_ticket
->t_iclog_hdrs
;
625 atomic_sub(tp
->t_ticket
->t_iclog_hdrs
, &cil
->xc_iclog_hdrs
);
627 cilpcp
->space_reserved
+= ctx_res
;
630 * Accurately account when over the soft limit, otherwise fold the
631 * percpu count into the global count if over the per-cpu threshold.
633 if (!test_bit(XLOG_CIL_PCP_SPACE
, &cil
->xc_flags
)) {
634 atomic_add(len
, &ctx
->space_used
);
635 } else if (cilpcp
->space_used
+ len
>
636 (XLOG_CIL_SPACE_LIMIT(log
) / num_online_cpus())) {
637 space_used
= atomic_add_return(cilpcp
->space_used
+ len
,
639 cilpcp
->space_used
= 0;
642 * If we just transitioned over the soft limit, we need to
643 * transition to the global atomic counter.
645 if (space_used
>= XLOG_CIL_SPACE_LIMIT(log
))
646 xlog_cil_insert_pcp_aggregate(cil
, ctx
);
648 cilpcp
->space_used
+= len
;
650 /* attach the transaction to the CIL if it has any busy extents */
651 if (!list_empty(&tp
->t_busy
))
652 list_splice_init(&tp
->t_busy
, &cilpcp
->busy_extents
);
655 * Now update the order of everything modified in the transaction
656 * and insert items into the CIL if they aren't already there.
657 * We do this here so we only need to take the CIL lock once during
658 * the transaction commit.
660 order
= atomic_inc_return(&ctx
->order_id
);
661 list_for_each_entry(lip
, &tp
->t_items
, li_trans
) {
662 /* Skip items which aren't dirty in this transaction. */
663 if (!test_bit(XFS_LI_DIRTY
, &lip
->li_flags
))
666 lip
->li_order_id
= order
;
667 if (!list_empty(&lip
->li_cil
))
669 list_add_tail(&lip
->li_cil
, &cilpcp
->log_items
);
674 * If we've overrun the reservation, dump the tx details before we move
675 * the log items. Shutdown is imminent...
677 tp
->t_ticket
->t_curr_res
-= ctx_res
+ len
;
678 if (WARN_ON(tp
->t_ticket
->t_curr_res
< 0)) {
679 xfs_warn(log
->l_mp
, "Transaction log reservation overrun:");
681 " log items: %d bytes (iov hdrs: %d bytes)",
683 xfs_warn(log
->l_mp
, " split region headers: %d bytes",
685 xfs_warn(log
->l_mp
, " ctx ticket: %d bytes", ctx_res
);
686 xlog_print_trans(tp
);
687 xlog_force_shutdown(log
, SHUTDOWN_LOG_IO_ERROR
);
692 xlog_cil_ail_insert_batch(
693 struct xfs_ail
*ailp
,
694 struct xfs_ail_cursor
*cur
,
695 struct xfs_log_item
**log_items
,
697 xfs_lsn_t commit_lsn
)
701 spin_lock(&ailp
->ail_lock
);
702 /* xfs_trans_ail_update_bulk drops ailp->ail_lock */
703 xfs_trans_ail_update_bulk(ailp
, cur
, log_items
, nr_items
, commit_lsn
);
705 for (i
= 0; i
< nr_items
; i
++) {
706 struct xfs_log_item
*lip
= log_items
[i
];
708 if (lip
->li_ops
->iop_unpin
)
709 lip
->li_ops
->iop_unpin(lip
, 0);
714 * Take the checkpoint's log vector chain of items and insert the attached log
715 * items into the AIL. This uses bulk insertion techniques to minimise AIL lock
718 * The AIL tracks log items via the start record LSN of the checkpoint,
719 * not the commit record LSN. This is because we can pipeline multiple
720 * checkpoints, and so the start record of checkpoint N+1 can be
721 * written before the commit record of checkpoint N. i.e:
724 * +-------------+------------+----------------+
725 * start N+1 commit N+1
727 * The tail of the log cannot be moved to the LSN of commit N when all
728 * the items of that checkpoint are written back, because then the
729 * start record for N+1 is no longer in the active portion of the log
730 * and recovery will fail/corrupt the filesystem.
732 * Hence when all the log items in checkpoint N are written back, the
733 * tail of the log most now only move as far forwards as the start LSN
736 * If we are called with the aborted flag set, it is because a log write during
737 * a CIL checkpoint commit has failed. In this case, all the items in the
738 * checkpoint have already gone through iop_committed and iop_committing, which
739 * means that checkpoint commit abort handling is treated exactly the same as an
740 * iclog write error even though we haven't started any IO yet. Hence in this
741 * case all we need to do is iop_committed processing, followed by an
742 * iop_unpin(aborted) call.
744 * The AIL cursor is used to optimise the insert process. If commit_lsn is not
745 * at the end of the AIL, the insert cursor avoids the need to walk the AIL to
746 * find the insertion point on every xfs_log_item_batch_insert() call. This
747 * saves a lot of needless list walking and is a net win, even though it
748 * slightly increases that amount of AIL lock traffic to set it up and tear it
753 struct xfs_cil_ctx
*ctx
,
756 #define LOG_ITEM_BATCH_SIZE 32
757 struct xfs_ail
*ailp
= ctx
->cil
->xc_log
->l_ailp
;
758 struct xfs_log_item
*log_items
[LOG_ITEM_BATCH_SIZE
];
759 struct xfs_log_vec
*lv
;
760 struct xfs_ail_cursor cur
;
765 * Update the AIL head LSN with the commit record LSN of this
766 * checkpoint. As iclogs are always completed in order, this should
767 * always be the same (as iclogs can contain multiple commit records) or
768 * higher LSN than the current head. We do this before insertion of the
769 * items so that log space checks during insertion will reflect the
770 * space that this checkpoint has already consumed. We call
771 * xfs_ail_update_finish() so that tail space and space-based wakeups
772 * will be recalculated appropriately.
774 ASSERT(XFS_LSN_CMP(ctx
->commit_lsn
, ailp
->ail_head_lsn
) >= 0 ||
776 spin_lock(&ailp
->ail_lock
);
777 xfs_trans_ail_cursor_last(ailp
, &cur
, ctx
->start_lsn
);
778 old_head
= ailp
->ail_head_lsn
;
779 ailp
->ail_head_lsn
= ctx
->commit_lsn
;
780 /* xfs_ail_update_finish() drops the ail_lock */
781 xfs_ail_update_finish(ailp
, NULLCOMMITLSN
);
784 * We move the AIL head forwards to account for the space used in the
785 * log before we remove that space from the grant heads. This prevents a
786 * transient condition where reservation space appears to become
787 * available on return, only for it to disappear again immediately as
788 * the AIL head update accounts in the log tail space.
790 smp_wmb(); /* paired with smp_rmb in xlog_grant_space_left */
791 xlog_grant_return_space(ailp
->ail_log
, old_head
, ailp
->ail_head_lsn
);
793 /* unpin all the log items */
794 list_for_each_entry(lv
, &ctx
->lv_chain
, lv_list
) {
795 struct xfs_log_item
*lip
= lv
->lv_item
;
799 set_bit(XFS_LI_ABORTED
, &lip
->li_flags
);
801 if (lip
->li_ops
->flags
& XFS_ITEM_RELEASE_WHEN_COMMITTED
) {
802 lip
->li_ops
->iop_release(lip
);
806 if (lip
->li_ops
->iop_committed
)
807 item_lsn
= lip
->li_ops
->iop_committed(lip
,
810 item_lsn
= ctx
->start_lsn
;
812 /* item_lsn of -1 means the item needs no further processing */
813 if (XFS_LSN_CMP(item_lsn
, (xfs_lsn_t
)-1) == 0)
817 * if we are aborting the operation, no point in inserting the
818 * object into the AIL as we are in a shutdown situation.
821 ASSERT(xlog_is_shutdown(ailp
->ail_log
));
822 if (lip
->li_ops
->iop_unpin
)
823 lip
->li_ops
->iop_unpin(lip
, 1);
827 if (item_lsn
!= ctx
->start_lsn
) {
830 * Not a bulk update option due to unusual item_lsn.
831 * Push into AIL immediately, rechecking the lsn once
832 * we have the ail lock. Then unpin the item. This does
833 * not affect the AIL cursor the bulk insert path is
836 spin_lock(&ailp
->ail_lock
);
837 if (XFS_LSN_CMP(item_lsn
, lip
->li_lsn
) > 0)
838 xfs_trans_ail_update(ailp
, lip
, item_lsn
);
840 spin_unlock(&ailp
->ail_lock
);
841 if (lip
->li_ops
->iop_unpin
)
842 lip
->li_ops
->iop_unpin(lip
, 0);
846 /* Item is a candidate for bulk AIL insert. */
847 log_items
[i
++] = lv
->lv_item
;
848 if (i
>= LOG_ITEM_BATCH_SIZE
) {
849 xlog_cil_ail_insert_batch(ailp
, &cur
, log_items
,
850 LOG_ITEM_BATCH_SIZE
, ctx
->start_lsn
);
855 /* make sure we insert the remainder! */
857 xlog_cil_ail_insert_batch(ailp
, &cur
, log_items
, i
,
860 spin_lock(&ailp
->ail_lock
);
861 xfs_trans_ail_cursor_done(&cur
);
862 spin_unlock(&ailp
->ail_lock
);
866 xlog_cil_free_logvec(
867 struct list_head
*lv_chain
)
869 struct xfs_log_vec
*lv
;
871 while (!list_empty(lv_chain
)) {
872 lv
= list_first_entry(lv_chain
, struct xfs_log_vec
, lv_list
);
873 list_del_init(&lv
->lv_list
);
879 * Mark all items committed and clear busy extents. We free the log vector
880 * chains in a separate pass so that we unpin the log items as quickly as
885 struct xfs_cil_ctx
*ctx
)
887 struct xfs_mount
*mp
= ctx
->cil
->xc_log
->l_mp
;
888 bool abort
= xlog_is_shutdown(ctx
->cil
->xc_log
);
891 * If the I/O failed, we're aborting the commit and already shutdown.
892 * Wake any commit waiters before aborting the log items so we don't
893 * block async log pushers on callbacks. Async log pushers explicitly do
894 * not wait on log force completion because they may be holding locks
895 * required to unpin items.
898 spin_lock(&ctx
->cil
->xc_push_lock
);
899 wake_up_all(&ctx
->cil
->xc_start_wait
);
900 wake_up_all(&ctx
->cil
->xc_commit_wait
);
901 spin_unlock(&ctx
->cil
->xc_push_lock
);
904 xlog_cil_ail_insert(ctx
, abort
);
906 xfs_extent_busy_sort(&ctx
->busy_extents
.extent_list
);
907 xfs_extent_busy_clear(&ctx
->busy_extents
.extent_list
,
908 xfs_has_discard(mp
) && !abort
);
910 spin_lock(&ctx
->cil
->xc_push_lock
);
911 list_del(&ctx
->committing
);
912 spin_unlock(&ctx
->cil
->xc_push_lock
);
914 xlog_cil_free_logvec(&ctx
->lv_chain
);
916 if (!list_empty(&ctx
->busy_extents
.extent_list
)) {
917 ctx
->busy_extents
.owner
= ctx
;
918 xfs_discard_extents(mp
, &ctx
->busy_extents
);
926 xlog_cil_process_committed(
927 struct list_head
*list
)
929 struct xfs_cil_ctx
*ctx
;
931 while ((ctx
= list_first_entry_or_null(list
,
932 struct xfs_cil_ctx
, iclog_entry
))) {
933 list_del(&ctx
->iclog_entry
);
934 xlog_cil_committed(ctx
);
939 * Record the LSN of the iclog we were just granted space to start writing into.
940 * If the context doesn't have a start_lsn recorded, then this iclog will
941 * contain the start record for the checkpoint. Otherwise this write contains
942 * the commit record for the checkpoint.
945 xlog_cil_set_ctx_write_state(
946 struct xfs_cil_ctx
*ctx
,
947 struct xlog_in_core
*iclog
)
949 struct xfs_cil
*cil
= ctx
->cil
;
950 xfs_lsn_t lsn
= be64_to_cpu(iclog
->ic_header
.h_lsn
);
952 ASSERT(!ctx
->commit_lsn
);
953 if (!ctx
->start_lsn
) {
954 spin_lock(&cil
->xc_push_lock
);
956 * The LSN we need to pass to the log items on transaction
957 * commit is the LSN reported by the first log vector write, not
958 * the commit lsn. If we use the commit record lsn then we can
959 * move the grant write head beyond the tail LSN and overwrite
962 ctx
->start_lsn
= lsn
;
963 wake_up_all(&cil
->xc_start_wait
);
964 spin_unlock(&cil
->xc_push_lock
);
967 * Make sure the metadata we are about to overwrite in the log
968 * has been flushed to stable storage before this iclog is
971 spin_lock(&cil
->xc_log
->l_icloglock
);
972 iclog
->ic_flags
|= XLOG_ICL_NEED_FLUSH
;
973 spin_unlock(&cil
->xc_log
->l_icloglock
);
978 * Take a reference to the iclog for the context so that we still hold
979 * it when xlog_write is done and has released it. This means the
980 * context controls when the iclog is released for IO.
982 atomic_inc(&iclog
->ic_refcnt
);
985 * xlog_state_get_iclog_space() guarantees there is enough space in the
986 * iclog for an entire commit record, so we can attach the context
987 * callbacks now. This needs to be done before we make the commit_lsn
988 * visible to waiters so that checkpoints with commit records in the
989 * same iclog order their IO completion callbacks in the same order that
990 * the commit records appear in the iclog.
992 spin_lock(&cil
->xc_log
->l_icloglock
);
993 list_add_tail(&ctx
->iclog_entry
, &iclog
->ic_callbacks
);
994 spin_unlock(&cil
->xc_log
->l_icloglock
);
997 * Now we can record the commit LSN and wake anyone waiting for this
998 * sequence to have the ordered commit record assigned to a physical
999 * location in the log.
1001 spin_lock(&cil
->xc_push_lock
);
1002 ctx
->commit_iclog
= iclog
;
1003 ctx
->commit_lsn
= lsn
;
1004 wake_up_all(&cil
->xc_commit_wait
);
1005 spin_unlock(&cil
->xc_push_lock
);
1010 * Ensure that the order of log writes follows checkpoint sequence order. This
1011 * relies on the context LSN being zero until the log write has guaranteed the
1012 * LSN that the log write will start at via xlog_state_get_iclog_space().
1020 xlog_cil_order_write(
1021 struct xfs_cil
*cil
,
1023 enum _record_type record
)
1025 struct xfs_cil_ctx
*ctx
;
1028 spin_lock(&cil
->xc_push_lock
);
1029 list_for_each_entry(ctx
, &cil
->xc_committing
, committing
) {
1031 * Avoid getting stuck in this loop because we were woken by the
1032 * shutdown, but then went back to sleep once already in the
1035 if (xlog_is_shutdown(cil
->xc_log
)) {
1036 spin_unlock(&cil
->xc_push_lock
);
1041 * Higher sequences will wait for this one so skip them.
1042 * Don't wait for our own sequence, either.
1044 if (ctx
->sequence
>= sequence
)
1047 /* Wait until the LSN for the record has been recorded. */
1050 if (!ctx
->start_lsn
) {
1051 xlog_wait(&cil
->xc_start_wait
, &cil
->xc_push_lock
);
1055 case _COMMIT_RECORD
:
1056 if (!ctx
->commit_lsn
) {
1057 xlog_wait(&cil
->xc_commit_wait
, &cil
->xc_push_lock
);
1063 spin_unlock(&cil
->xc_push_lock
);
1068 * Write out the log vector change now attached to the CIL context. This will
1069 * write a start record that needs to be strictly ordered in ascending CIL
1070 * sequence order so that log recovery will always use in-order start LSNs when
1071 * replaying checkpoints.
1074 xlog_cil_write_chain(
1075 struct xfs_cil_ctx
*ctx
,
1078 struct xlog
*log
= ctx
->cil
->xc_log
;
1081 error
= xlog_cil_order_write(ctx
->cil
, ctx
->sequence
, _START_RECORD
);
1084 return xlog_write(log
, ctx
, &ctx
->lv_chain
, ctx
->ticket
, chain_len
);
1088 * Write out the commit record of a checkpoint transaction to close off a
1089 * running log write. These commit records are strictly ordered in ascending CIL
1090 * sequence order so that log recovery will always replay the checkpoints in the
1094 xlog_cil_write_commit_record(
1095 struct xfs_cil_ctx
*ctx
)
1097 struct xlog
*log
= ctx
->cil
->xc_log
;
1098 struct xlog_op_header ophdr
= {
1099 .oh_clientid
= XFS_TRANSACTION
,
1100 .oh_tid
= cpu_to_be32(ctx
->ticket
->t_tid
),
1101 .oh_flags
= XLOG_COMMIT_TRANS
,
1103 struct xfs_log_iovec reg
= {
1105 .i_len
= sizeof(struct xlog_op_header
),
1106 .i_type
= XLOG_REG_TYPE_COMMIT
,
1108 struct xfs_log_vec vec
= {
1113 LIST_HEAD(lv_chain
);
1114 list_add(&vec
.lv_list
, &lv_chain
);
1116 if (xlog_is_shutdown(log
))
1119 error
= xlog_cil_order_write(ctx
->cil
, ctx
->sequence
, _COMMIT_RECORD
);
1123 /* account for space used by record data */
1124 ctx
->ticket
->t_curr_res
-= reg
.i_len
;
1125 error
= xlog_write(log
, ctx
, &lv_chain
, ctx
->ticket
, reg
.i_len
);
1127 xlog_force_shutdown(log
, SHUTDOWN_LOG_IO_ERROR
);
1131 struct xlog_cil_trans_hdr
{
1132 struct xlog_op_header oph
[2];
1133 struct xfs_trans_header thdr
;
1134 struct xfs_log_iovec lhdr
[2];
1138 * Build a checkpoint transaction header to begin the journal transaction. We
1139 * need to account for the space used by the transaction header here as it is
1140 * not accounted for in xlog_write().
1142 * This is the only place we write a transaction header, so we also build the
1143 * log opheaders that indicate the start of a log transaction and wrap the
1144 * transaction header. We keep the start record in it's own log vector rather
1145 * than compacting them into a single region as this ends up making the logic
1146 * in xlog_write() for handling empty opheaders for start, commit and unmount
1147 * records much simpler.
1150 xlog_cil_build_trans_hdr(
1151 struct xfs_cil_ctx
*ctx
,
1152 struct xlog_cil_trans_hdr
*hdr
,
1153 struct xfs_log_vec
*lvhdr
,
1156 struct xlog_ticket
*tic
= ctx
->ticket
;
1157 __be32 tid
= cpu_to_be32(tic
->t_tid
);
1159 memset(hdr
, 0, sizeof(*hdr
));
1161 /* Log start record */
1162 hdr
->oph
[0].oh_tid
= tid
;
1163 hdr
->oph
[0].oh_clientid
= XFS_TRANSACTION
;
1164 hdr
->oph
[0].oh_flags
= XLOG_START_TRANS
;
1166 /* log iovec region pointer */
1167 hdr
->lhdr
[0].i_addr
= &hdr
->oph
[0];
1168 hdr
->lhdr
[0].i_len
= sizeof(struct xlog_op_header
);
1169 hdr
->lhdr
[0].i_type
= XLOG_REG_TYPE_LRHEADER
;
1172 hdr
->oph
[1].oh_tid
= tid
;
1173 hdr
->oph
[1].oh_clientid
= XFS_TRANSACTION
;
1174 hdr
->oph
[1].oh_len
= cpu_to_be32(sizeof(struct xfs_trans_header
));
1176 /* transaction header in host byte order format */
1177 hdr
->thdr
.th_magic
= XFS_TRANS_HEADER_MAGIC
;
1178 hdr
->thdr
.th_type
= XFS_TRANS_CHECKPOINT
;
1179 hdr
->thdr
.th_tid
= tic
->t_tid
;
1180 hdr
->thdr
.th_num_items
= num_iovecs
;
1182 /* log iovec region pointer */
1183 hdr
->lhdr
[1].i_addr
= &hdr
->oph
[1];
1184 hdr
->lhdr
[1].i_len
= sizeof(struct xlog_op_header
) +
1185 sizeof(struct xfs_trans_header
);
1186 hdr
->lhdr
[1].i_type
= XLOG_REG_TYPE_TRANSHDR
;
1188 lvhdr
->lv_niovecs
= 2;
1189 lvhdr
->lv_iovecp
= &hdr
->lhdr
[0];
1190 lvhdr
->lv_bytes
= hdr
->lhdr
[0].i_len
+ hdr
->lhdr
[1].i_len
;
1192 tic
->t_curr_res
-= lvhdr
->lv_bytes
;
1196 * CIL item reordering compare function. We want to order in ascending ID order,
1197 * but we want to leave items with the same ID in the order they were added to
1198 * the list. This is important for operations like reflink where we log 4 order
1199 * dependent intents in a single transaction when we overwrite an existing
1200 * shared extent with a new shared extent. i.e. BUI(unmap), CUI(drop),
1201 * CUI (inc), BUI(remap)...
1206 const struct list_head
*a
,
1207 const struct list_head
*b
)
1209 struct xfs_log_vec
*l1
= container_of(a
, struct xfs_log_vec
, lv_list
);
1210 struct xfs_log_vec
*l2
= container_of(b
, struct xfs_log_vec
, lv_list
);
1212 return l1
->lv_order_id
> l2
->lv_order_id
;
1216 * Pull all the log vectors off the items in the CIL, and remove the items from
1217 * the CIL. We don't need the CIL lock here because it's only needed on the
1218 * transaction commit side which is currently locked out by the flush lock.
1220 * If a log item is marked with a whiteout, we do not need to write it to the
1221 * journal and so we just move them to the whiteout list for the caller to
1222 * dispose of appropriately.
1225 xlog_cil_build_lv_chain(
1226 struct xfs_cil_ctx
*ctx
,
1227 struct list_head
*whiteouts
,
1228 uint32_t *num_iovecs
,
1229 uint32_t *num_bytes
)
1231 while (!list_empty(&ctx
->log_items
)) {
1232 struct xfs_log_item
*item
;
1233 struct xfs_log_vec
*lv
;
1235 item
= list_first_entry(&ctx
->log_items
,
1236 struct xfs_log_item
, li_cil
);
1238 if (test_bit(XFS_LI_WHITEOUT
, &item
->li_flags
)) {
1239 list_move(&item
->li_cil
, whiteouts
);
1240 trace_xfs_cil_whiteout_skip(item
);
1245 lv
->lv_order_id
= item
->li_order_id
;
1247 /* we don't write ordered log vectors */
1248 if (lv
->lv_buf_len
!= XFS_LOG_VEC_ORDERED
)
1249 *num_bytes
+= lv
->lv_bytes
;
1250 *num_iovecs
+= lv
->lv_niovecs
;
1251 list_add_tail(&lv
->lv_list
, &ctx
->lv_chain
);
1253 list_del_init(&item
->li_cil
);
1254 item
->li_order_id
= 0;
1260 xlog_cil_cleanup_whiteouts(
1261 struct list_head
*whiteouts
)
1263 while (!list_empty(whiteouts
)) {
1264 struct xfs_log_item
*item
= list_first_entry(whiteouts
,
1265 struct xfs_log_item
, li_cil
);
1266 list_del_init(&item
->li_cil
);
1267 trace_xfs_cil_whiteout_unpin(item
);
1268 item
->li_ops
->iop_unpin(item
, 1);
1273 * Push the Committed Item List to the log.
1275 * If the current sequence is the same as xc_push_seq we need to do a flush. If
1276 * xc_push_seq is less than the current sequence, then it has already been
1277 * flushed and we don't need to do anything - the caller will wait for it to
1278 * complete if necessary.
1280 * xc_push_seq is checked unlocked against the sequence number for a match.
1281 * Hence we can allow log forces to run racily and not issue pushes for the
1282 * same sequence twice. If we get a race between multiple pushes for the same
1283 * sequence they will block on the first one and then abort, hence avoiding
1286 * This runs from a workqueue so it does not inherent any specific memory
1287 * allocation context. However, we do not want to block on memory reclaim
1288 * recursing back into the filesystem because this push may have been triggered
1289 * by memory reclaim itself. Hence we really need to run under full GFP_NOFS
1294 struct work_struct
*work
)
1296 unsigned int nofs_flags
= memalloc_nofs_save();
1297 struct xfs_cil_ctx
*ctx
=
1298 container_of(work
, struct xfs_cil_ctx
, push_work
);
1299 struct xfs_cil
*cil
= ctx
->cil
;
1300 struct xlog
*log
= cil
->xc_log
;
1301 struct xfs_cil_ctx
*new_ctx
;
1305 struct xlog_cil_trans_hdr thdr
;
1306 struct xfs_log_vec lvhdr
= {};
1308 bool push_commit_stable
;
1309 LIST_HEAD (whiteouts
);
1310 struct xlog_ticket
*ticket
;
1312 new_ctx
= xlog_cil_ctx_alloc();
1313 new_ctx
->ticket
= xlog_cil_ticket_alloc(log
);
1315 down_write(&cil
->xc_ctx_lock
);
1317 spin_lock(&cil
->xc_push_lock
);
1318 push_seq
= cil
->xc_push_seq
;
1319 ASSERT(push_seq
<= ctx
->sequence
);
1320 push_commit_stable
= cil
->xc_push_commit_stable
;
1321 cil
->xc_push_commit_stable
= false;
1324 * As we are about to switch to a new, empty CIL context, we no longer
1325 * need to throttle tasks on CIL space overruns. Wake any waiters that
1326 * the hard push throttle may have caught so they can start committing
1327 * to the new context. The ctx->xc_push_lock provides the serialisation
1328 * necessary for safely using the lockless waitqueue_active() check in
1331 if (waitqueue_active(&cil
->xc_push_wait
))
1332 wake_up_all(&cil
->xc_push_wait
);
1334 xlog_cil_push_pcp_aggregate(cil
, ctx
);
1337 * Check if we've anything to push. If there is nothing, then we don't
1338 * move on to a new sequence number and so we have to be able to push
1339 * this sequence again later.
1341 if (test_bit(XLOG_CIL_EMPTY
, &cil
->xc_flags
)) {
1342 cil
->xc_push_seq
= 0;
1343 spin_unlock(&cil
->xc_push_lock
);
1348 /* check for a previously pushed sequence */
1349 if (push_seq
< ctx
->sequence
) {
1350 spin_unlock(&cil
->xc_push_lock
);
1355 * We are now going to push this context, so add it to the committing
1356 * list before we do anything else. This ensures that anyone waiting on
1357 * this push can easily detect the difference between a "push in
1358 * progress" and "CIL is empty, nothing to do".
1360 * IOWs, a wait loop can now check for:
1361 * the current sequence not being found on the committing list;
1363 * an unchanged sequence number
1364 * to detect a push that had nothing to do and therefore does not need
1365 * waiting on. If the CIL is not empty, we get put on the committing
1366 * list before emptying the CIL and bumping the sequence number. Hence
1367 * an empty CIL and an unchanged sequence number means we jumped out
1368 * above after doing nothing.
1370 * Hence the waiter will either find the commit sequence on the
1371 * committing list or the sequence number will be unchanged and the CIL
1372 * still dirty. In that latter case, the push has not yet started, and
1373 * so the waiter will have to continue trying to check the CIL
1374 * committing list until it is found. In extreme cases of delay, the
1375 * sequence may fully commit between the attempts the wait makes to wait
1376 * on the commit sequence.
1378 list_add(&ctx
->committing
, &cil
->xc_committing
);
1379 spin_unlock(&cil
->xc_push_lock
);
1381 xlog_cil_build_lv_chain(ctx
, &whiteouts
, &num_iovecs
, &num_bytes
);
1384 * Switch the contexts so we can drop the context lock and move out
1385 * of a shared context. We can't just go straight to the commit record,
1386 * though - we need to synchronise with previous and future commits so
1387 * that the commit records are correctly ordered in the log to ensure
1388 * that we process items during log IO completion in the correct order.
1390 * For example, if we get an EFI in one checkpoint and the EFD in the
1391 * next (e.g. due to log forces), we do not want the checkpoint with
1392 * the EFD to be committed before the checkpoint with the EFI. Hence
1393 * we must strictly order the commit records of the checkpoints so
1394 * that: a) the checkpoint callbacks are attached to the iclogs in the
1395 * correct order; and b) the checkpoints are replayed in correct order
1398 * Hence we need to add this context to the committing context list so
1399 * that higher sequences will wait for us to write out a commit record
1402 * xfs_log_force_seq requires us to mirror the new sequence into the cil
1403 * structure atomically with the addition of this sequence to the
1404 * committing list. This also ensures that we can do unlocked checks
1405 * against the current sequence in log forces without risking
1406 * deferencing a freed context pointer.
1408 spin_lock(&cil
->xc_push_lock
);
1409 xlog_cil_ctx_switch(cil
, new_ctx
);
1410 spin_unlock(&cil
->xc_push_lock
);
1411 up_write(&cil
->xc_ctx_lock
);
1414 * Sort the log vector chain before we add the transaction headers.
1415 * This ensures we always have the transaction headers at the start
1418 list_sort(NULL
, &ctx
->lv_chain
, xlog_cil_order_cmp
);
1421 * Build a checkpoint transaction header and write it to the log to
1422 * begin the transaction. We need to account for the space used by the
1423 * transaction header here as it is not accounted for in xlog_write().
1424 * Add the lvhdr to the head of the lv chain we pass to xlog_write() so
1425 * it gets written into the iclog first.
1427 xlog_cil_build_trans_hdr(ctx
, &thdr
, &lvhdr
, num_iovecs
);
1428 num_bytes
+= lvhdr
.lv_bytes
;
1429 list_add(&lvhdr
.lv_list
, &ctx
->lv_chain
);
1432 * Take the lvhdr back off the lv_chain immediately after calling
1433 * xlog_cil_write_chain() as it should not be passed to log IO
1436 error
= xlog_cil_write_chain(ctx
, num_bytes
);
1437 list_del(&lvhdr
.lv_list
);
1439 goto out_abort_free_ticket
;
1441 error
= xlog_cil_write_commit_record(ctx
);
1443 goto out_abort_free_ticket
;
1446 * Grab the ticket from the ctx so we can ungrant it after releasing the
1447 * commit_iclog. The ctx may be freed by the time we return from
1448 * releasing the commit_iclog (i.e. checkpoint has been completed and
1449 * callback run) so we can't reference the ctx after the call to
1450 * xlog_state_release_iclog().
1452 ticket
= ctx
->ticket
;
1455 * If the checkpoint spans multiple iclogs, wait for all previous iclogs
1456 * to complete before we submit the commit_iclog. We can't use state
1457 * checks for this - ACTIVE can be either a past completed iclog or a
1458 * future iclog being filled, while WANT_SYNC through SYNC_DONE can be a
1459 * past or future iclog awaiting IO or ordered IO completion to be run.
1460 * In the latter case, if it's a future iclog and we wait on it, the we
1461 * will hang because it won't get processed through to ic_force_wait
1462 * wakeup until this commit_iclog is written to disk. Hence we use the
1463 * iclog header lsn and compare it to the commit lsn to determine if we
1464 * need to wait on iclogs or not.
1466 spin_lock(&log
->l_icloglock
);
1467 if (ctx
->start_lsn
!= ctx
->commit_lsn
) {
1470 plsn
= be64_to_cpu(ctx
->commit_iclog
->ic_prev
->ic_header
.h_lsn
);
1471 if (plsn
&& XFS_LSN_CMP(plsn
, ctx
->commit_lsn
) < 0) {
1473 * Waiting on ic_force_wait orders the completion of
1474 * iclogs older than ic_prev. Hence we only need to wait
1475 * on the most recent older iclog here.
1477 xlog_wait_on_iclog(ctx
->commit_iclog
->ic_prev
);
1478 spin_lock(&log
->l_icloglock
);
1482 * We need to issue a pre-flush so that the ordering for this
1483 * checkpoint is correctly preserved down to stable storage.
1485 ctx
->commit_iclog
->ic_flags
|= XLOG_ICL_NEED_FLUSH
;
1489 * The commit iclog must be written to stable storage to guarantee
1490 * journal IO vs metadata writeback IO is correctly ordered on stable
1493 * If the push caller needs the commit to be immediately stable and the
1494 * commit_iclog is not yet marked as XLOG_STATE_WANT_SYNC to indicate it
1495 * will be written when released, switch it's state to WANT_SYNC right
1498 ctx
->commit_iclog
->ic_flags
|= XLOG_ICL_NEED_FUA
;
1499 if (push_commit_stable
&&
1500 ctx
->commit_iclog
->ic_state
== XLOG_STATE_ACTIVE
)
1501 xlog_state_switch_iclogs(log
, ctx
->commit_iclog
, 0);
1502 ticket
= ctx
->ticket
;
1503 xlog_state_release_iclog(log
, ctx
->commit_iclog
, ticket
);
1505 /* Not safe to reference ctx now! */
1507 spin_unlock(&log
->l_icloglock
);
1508 xlog_cil_cleanup_whiteouts(&whiteouts
);
1509 xfs_log_ticket_ungrant(log
, ticket
);
1510 memalloc_nofs_restore(nofs_flags
);
1514 up_write(&cil
->xc_ctx_lock
);
1515 xfs_log_ticket_put(new_ctx
->ticket
);
1517 memalloc_nofs_restore(nofs_flags
);
1520 out_abort_free_ticket
:
1521 ASSERT(xlog_is_shutdown(log
));
1522 xlog_cil_cleanup_whiteouts(&whiteouts
);
1523 if (!ctx
->commit_iclog
) {
1524 xfs_log_ticket_ungrant(log
, ctx
->ticket
);
1525 xlog_cil_committed(ctx
);
1526 memalloc_nofs_restore(nofs_flags
);
1529 spin_lock(&log
->l_icloglock
);
1530 ticket
= ctx
->ticket
;
1531 xlog_state_release_iclog(log
, ctx
->commit_iclog
, ticket
);
1532 /* Not safe to reference ctx now! */
1533 spin_unlock(&log
->l_icloglock
);
1534 xfs_log_ticket_ungrant(log
, ticket
);
1535 memalloc_nofs_restore(nofs_flags
);
1539 * We need to push CIL every so often so we don't cache more than we can fit in
1540 * the log. The limit really is that a checkpoint can't be more than half the
1541 * log (the current checkpoint is not allowed to overwrite the previous
1542 * checkpoint), but commit latency and memory usage limit this to a smaller
1546 xlog_cil_push_background(
1549 struct xfs_cil
*cil
= log
->l_cilp
;
1550 int space_used
= atomic_read(&cil
->xc_ctx
->space_used
);
1553 * The cil won't be empty because we are called while holding the
1554 * context lock so whatever we added to the CIL will still be there.
1556 ASSERT(!test_bit(XLOG_CIL_EMPTY
, &cil
->xc_flags
));
1560 * - we haven't used up all the space available yet; or
1561 * - we've already queued up a push; and
1562 * - we're not over the hard limit; and
1563 * - nothing has been over the hard limit.
1565 * If so, we don't need to take the push lock as there's nothing to do.
1567 if (space_used
< XLOG_CIL_SPACE_LIMIT(log
) ||
1568 (cil
->xc_push_seq
== cil
->xc_current_sequence
&&
1569 space_used
< XLOG_CIL_BLOCKING_SPACE_LIMIT(log
) &&
1570 !waitqueue_active(&cil
->xc_push_wait
))) {
1571 up_read(&cil
->xc_ctx_lock
);
1575 spin_lock(&cil
->xc_push_lock
);
1576 if (cil
->xc_push_seq
< cil
->xc_current_sequence
) {
1577 cil
->xc_push_seq
= cil
->xc_current_sequence
;
1578 queue_work(cil
->xc_push_wq
, &cil
->xc_ctx
->push_work
);
1582 * Drop the context lock now, we can't hold that if we need to sleep
1583 * because we are over the blocking threshold. The push_lock is still
1584 * held, so blocking threshold sleep/wakeup is still correctly
1587 up_read(&cil
->xc_ctx_lock
);
1590 * If we are well over the space limit, throttle the work that is being
1591 * done until the push work on this context has begun. Enforce the hard
1592 * throttle on all transaction commits once it has been activated, even
1593 * if the committing transactions have resulted in the space usage
1594 * dipping back down under the hard limit.
1596 * The ctx->xc_push_lock provides the serialisation necessary for safely
1597 * calling xlog_cil_over_hard_limit() in this context.
1599 if (xlog_cil_over_hard_limit(log
, space_used
)) {
1600 trace_xfs_log_cil_wait(log
, cil
->xc_ctx
->ticket
);
1601 ASSERT(space_used
< log
->l_logsize
);
1602 xlog_wait(&cil
->xc_push_wait
, &cil
->xc_push_lock
);
1606 spin_unlock(&cil
->xc_push_lock
);
1611 * xlog_cil_push_now() is used to trigger an immediate CIL push to the sequence
1612 * number that is passed. When it returns, the work will be queued for
1613 * @push_seq, but it won't be completed.
1615 * If the caller is performing a synchronous force, we will flush the workqueue
1616 * to get previously queued work moving to minimise the wait time they will
1617 * undergo waiting for all outstanding pushes to complete. The caller is
1618 * expected to do the required waiting for push_seq to complete.
1620 * If the caller is performing an async push, we need to ensure that the
1621 * checkpoint is fully flushed out of the iclogs when we finish the push. If we
1622 * don't do this, then the commit record may remain sitting in memory in an
1623 * ACTIVE iclog. This then requires another full log force to push to disk,
1624 * which defeats the purpose of having an async, non-blocking CIL force
1625 * mechanism. Hence in this case we need to pass a flag to the push work to
1626 * indicate it needs to flush the commit record itself.
1634 struct xfs_cil
*cil
= log
->l_cilp
;
1639 ASSERT(push_seq
&& push_seq
<= cil
->xc_current_sequence
);
1641 /* start on any pending background push to minimise wait time on it */
1643 flush_workqueue(cil
->xc_push_wq
);
1645 spin_lock(&cil
->xc_push_lock
);
1648 * If this is an async flush request, we always need to set the
1649 * xc_push_commit_stable flag even if something else has already queued
1650 * a push. The flush caller is asking for the CIL to be on stable
1651 * storage when the next push completes, so regardless of who has queued
1652 * the push, the flush requires stable semantics from it.
1654 cil
->xc_push_commit_stable
= async
;
1657 * If the CIL is empty or we've already pushed the sequence then
1658 * there's no more work that we need to do.
1660 if (test_bit(XLOG_CIL_EMPTY
, &cil
->xc_flags
) ||
1661 push_seq
<= cil
->xc_push_seq
) {
1662 spin_unlock(&cil
->xc_push_lock
);
1666 cil
->xc_push_seq
= push_seq
;
1667 queue_work(cil
->xc_push_wq
, &cil
->xc_ctx
->push_work
);
1668 spin_unlock(&cil
->xc_push_lock
);
1675 struct xfs_cil
*cil
= log
->l_cilp
;
1678 spin_lock(&cil
->xc_push_lock
);
1679 if (test_bit(XLOG_CIL_EMPTY
, &cil
->xc_flags
))
1681 spin_unlock(&cil
->xc_push_lock
);
1686 * If there are intent done items in this transaction and the related intent was
1687 * committed in the current (same) CIL checkpoint, we don't need to write either
1688 * the intent or intent done item to the journal as the change will be
1689 * journalled atomically within this checkpoint. As we cannot remove items from
1690 * the CIL here, mark the related intent with a whiteout so that the CIL push
1691 * can remove it rather than writing it to the journal. Then remove the intent
1692 * done item from the current transaction and release it so it doesn't get put
1693 * into the CIL at all.
1696 xlog_cil_process_intents(
1697 struct xfs_cil
*cil
,
1698 struct xfs_trans
*tp
)
1700 struct xfs_log_item
*lip
, *ilip
, *next
;
1703 list_for_each_entry_safe(lip
, next
, &tp
->t_items
, li_trans
) {
1704 if (!(lip
->li_ops
->flags
& XFS_ITEM_INTENT_DONE
))
1707 ilip
= lip
->li_ops
->iop_intent(lip
);
1708 if (!ilip
|| !xlog_item_in_current_chkpt(cil
, ilip
))
1710 set_bit(XFS_LI_WHITEOUT
, &ilip
->li_flags
);
1711 trace_xfs_cil_whiteout_mark(ilip
);
1712 len
+= ilip
->li_lv
->lv_bytes
;
1713 kvfree(ilip
->li_lv
);
1716 xfs_trans_del_item(lip
);
1717 lip
->li_ops
->iop_release(lip
);
1723 * Commit a transaction with the given vector to the Committed Item List.
1725 * To do this, we need to format the item, pin it in memory if required and
1726 * account for the space used by the transaction. Once we have done that we
1727 * need to release the unused reservation for the transaction, attach the
1728 * transaction to the checkpoint context so we carry the busy extents through
1729 * to checkpoint completion, and then unlock all the items in the transaction.
1731 * Called with the context lock already held in read mode to lock out
1732 * background commit, returns without it held once background commits are
1738 struct xfs_trans
*tp
,
1739 xfs_csn_t
*commit_seq
,
1742 struct xfs_cil
*cil
= log
->l_cilp
;
1743 struct xfs_log_item
*lip
, *next
;
1744 uint32_t released_space
= 0;
1747 * Do all necessary memory allocation before we lock the CIL.
1748 * This ensures the allocation does not deadlock with a CIL
1749 * push in memory reclaim (e.g. from kswapd).
1751 xlog_cil_alloc_shadow_bufs(log
, tp
);
1753 /* lock out background commit */
1754 down_read(&cil
->xc_ctx_lock
);
1756 if (tp
->t_flags
& XFS_TRANS_HAS_INTENT_DONE
)
1757 released_space
= xlog_cil_process_intents(cil
, tp
);
1759 xlog_cil_insert_items(log
, tp
, released_space
);
1761 if (regrant
&& !xlog_is_shutdown(log
))
1762 xfs_log_ticket_regrant(log
, tp
->t_ticket
);
1764 xfs_log_ticket_ungrant(log
, tp
->t_ticket
);
1765 tp
->t_ticket
= NULL
;
1766 xfs_trans_unreserve_and_mod_sb(tp
);
1769 * Once all the items of the transaction have been copied to the CIL,
1770 * the items can be unlocked and possibly freed.
1772 * This needs to be done before we drop the CIL context lock because we
1773 * have to update state in the log items and unlock them before they go
1774 * to disk. If we don't, then the CIL checkpoint can race with us and
1775 * we can run checkpoint completion before we've updated and unlocked
1776 * the log items. This affects (at least) processing of stale buffers,
1779 trace_xfs_trans_commit_items(tp
, _RET_IP_
);
1780 list_for_each_entry_safe(lip
, next
, &tp
->t_items
, li_trans
) {
1781 xfs_trans_del_item(lip
);
1782 if (lip
->li_ops
->iop_committing
)
1783 lip
->li_ops
->iop_committing(lip
, cil
->xc_ctx
->sequence
);
1786 *commit_seq
= cil
->xc_ctx
->sequence
;
1788 /* xlog_cil_push_background() releases cil->xc_ctx_lock */
1789 xlog_cil_push_background(log
);
1793 * Flush the CIL to stable storage but don't wait for it to complete. This
1794 * requires the CIL push to ensure the commit record for the push hits the disk,
1795 * but otherwise is no different to a push done from a log force.
1801 xfs_csn_t seq
= log
->l_cilp
->xc_current_sequence
;
1803 trace_xfs_log_force(log
->l_mp
, seq
, _RET_IP_
);
1804 xlog_cil_push_now(log
, seq
, true);
1807 * If the CIL is empty, make sure that any previous checkpoint that may
1808 * still be in an active iclog is pushed to stable storage.
1810 if (test_bit(XLOG_CIL_EMPTY
, &log
->l_cilp
->xc_flags
))
1811 xfs_log_force(log
->l_mp
, 0);
1815 * Conditionally push the CIL based on the sequence passed in.
1817 * We only need to push if we haven't already pushed the sequence number given.
1818 * Hence the only time we will trigger a push here is if the push sequence is
1819 * the same as the current context.
1821 * We return the current commit lsn to allow the callers to determine if a
1822 * iclog flush is necessary following this call.
1829 struct xfs_cil
*cil
= log
->l_cilp
;
1830 struct xfs_cil_ctx
*ctx
;
1831 xfs_lsn_t commit_lsn
= NULLCOMMITLSN
;
1833 ASSERT(sequence
<= cil
->xc_current_sequence
);
1836 sequence
= cil
->xc_current_sequence
;
1837 trace_xfs_log_force(log
->l_mp
, sequence
, _RET_IP_
);
1840 * check to see if we need to force out the current context.
1841 * xlog_cil_push() handles racing pushes for the same sequence,
1842 * so no need to deal with it here.
1845 xlog_cil_push_now(log
, sequence
, false);
1848 * See if we can find a previous sequence still committing.
1849 * We need to wait for all previous sequence commits to complete
1850 * before allowing the force of push_seq to go ahead. Hence block
1851 * on commits for those as well.
1853 spin_lock(&cil
->xc_push_lock
);
1854 list_for_each_entry(ctx
, &cil
->xc_committing
, committing
) {
1856 * Avoid getting stuck in this loop because we were woken by the
1857 * shutdown, but then went back to sleep once already in the
1860 if (xlog_is_shutdown(log
))
1862 if (ctx
->sequence
> sequence
)
1864 if (!ctx
->commit_lsn
) {
1866 * It is still being pushed! Wait for the push to
1867 * complete, then start again from the beginning.
1869 XFS_STATS_INC(log
->l_mp
, xs_log_force_sleep
);
1870 xlog_wait(&cil
->xc_commit_wait
, &cil
->xc_push_lock
);
1873 if (ctx
->sequence
!= sequence
)
1876 commit_lsn
= ctx
->commit_lsn
;
1880 * The call to xlog_cil_push_now() executes the push in the background.
1881 * Hence by the time we have got here it our sequence may not have been
1882 * pushed yet. This is true if the current sequence still matches the
1883 * push sequence after the above wait loop and the CIL still contains
1884 * dirty objects. This is guaranteed by the push code first adding the
1885 * context to the committing list before emptying the CIL.
1887 * Hence if we don't find the context in the committing list and the
1888 * current sequence number is unchanged then the CIL contents are
1889 * significant. If the CIL is empty, if means there was nothing to push
1890 * and that means there is nothing to wait for. If the CIL is not empty,
1891 * it means we haven't yet started the push, because if it had started
1892 * we would have found the context on the committing list.
1894 if (sequence
== cil
->xc_current_sequence
&&
1895 !test_bit(XLOG_CIL_EMPTY
, &cil
->xc_flags
)) {
1896 spin_unlock(&cil
->xc_push_lock
);
1900 spin_unlock(&cil
->xc_push_lock
);
1904 * We detected a shutdown in progress. We need to trigger the log force
1905 * to pass through it's iclog state machine error handling, even though
1906 * we are already in a shutdown state. Hence we can't return
1907 * NULLCOMMITLSN here as that has special meaning to log forces (i.e.
1908 * LSN is already stable), so we return a zero LSN instead.
1911 spin_unlock(&cil
->xc_push_lock
);
1916 * Perform initial CIL structure initialisation.
1922 struct xfs_cil
*cil
;
1923 struct xfs_cil_ctx
*ctx
;
1924 struct xlog_cil_pcp
*cilpcp
;
1927 cil
= kzalloc(sizeof(*cil
), GFP_KERNEL
| __GFP_RETRY_MAYFAIL
);
1931 * Limit the CIL pipeline depth to 4 concurrent works to bound the
1932 * concurrency the log spinlocks will be exposed to.
1934 cil
->xc_push_wq
= alloc_workqueue("xfs-cil/%s",
1935 XFS_WQFLAGS(WQ_FREEZABLE
| WQ_MEM_RECLAIM
| WQ_UNBOUND
),
1936 4, log
->l_mp
->m_super
->s_id
);
1937 if (!cil
->xc_push_wq
)
1938 goto out_destroy_cil
;
1941 cil
->xc_pcp
= alloc_percpu(struct xlog_cil_pcp
);
1943 goto out_destroy_wq
;
1945 for_each_possible_cpu(cpu
) {
1946 cilpcp
= per_cpu_ptr(cil
->xc_pcp
, cpu
);
1947 INIT_LIST_HEAD(&cilpcp
->busy_extents
);
1948 INIT_LIST_HEAD(&cilpcp
->log_items
);
1951 INIT_LIST_HEAD(&cil
->xc_committing
);
1952 spin_lock_init(&cil
->xc_push_lock
);
1953 init_waitqueue_head(&cil
->xc_push_wait
);
1954 init_rwsem(&cil
->xc_ctx_lock
);
1955 init_waitqueue_head(&cil
->xc_start_wait
);
1956 init_waitqueue_head(&cil
->xc_commit_wait
);
1959 ctx
= xlog_cil_ctx_alloc();
1960 xlog_cil_ctx_switch(cil
, ctx
);
1964 destroy_workqueue(cil
->xc_push_wq
);
1974 struct xfs_cil
*cil
= log
->l_cilp
;
1977 if (cil
->xc_ctx
->ticket
)
1978 xfs_log_ticket_put(cil
->xc_ctx
->ticket
);
1982 ASSERT(test_bit(XLOG_CIL_EMPTY
, &cil
->xc_flags
));
1983 free_percpu(cil
->xc_pcp
);
1984 destroy_workqueue(cil
->xc_push_wq
);