1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
4 * Copyright (c) 2008 Dave Chinner
9 #include "xfs_shared.h"
10 #include "xfs_format.h"
11 #include "xfs_log_format.h"
12 #include "xfs_trans_resv.h"
13 #include "xfs_mount.h"
14 #include "xfs_trans.h"
15 #include "xfs_trans_priv.h"
16 #include "xfs_trace.h"
17 #include "xfs_errortag.h"
18 #include "xfs_error.h"
20 #include "xfs_log_priv.h"
24 * Check that the list is sorted as it should be.
26 * Called with the ail lock held, but we don't want to assert fail with it
27 * held otherwise we'll lock everything up and won't be able to debug the
28 * cause. Hence we sample and check the state under the AIL lock and return if
29 * everything is fine, otherwise we drop the lock and run the ASSERT checks.
30 * Asserts may not be fatal, so pick the lock back up and continue onwards.
35 struct xfs_log_item
*lip
)
36 __must_hold(&ailp
->ail_lock
)
38 struct xfs_log_item
*prev_lip
;
39 struct xfs_log_item
*next_lip
;
40 xfs_lsn_t prev_lsn
= NULLCOMMITLSN
;
41 xfs_lsn_t next_lsn
= NULLCOMMITLSN
;
46 if (list_empty(&ailp
->ail_head
))
50 * Sample then check the next and previous entries are valid.
52 in_ail
= test_bit(XFS_LI_IN_AIL
, &lip
->li_flags
);
53 prev_lip
= list_entry(lip
->li_ail
.prev
, struct xfs_log_item
, li_ail
);
54 if (&prev_lip
->li_ail
!= &ailp
->ail_head
)
55 prev_lsn
= prev_lip
->li_lsn
;
56 next_lip
= list_entry(lip
->li_ail
.next
, struct xfs_log_item
, li_ail
);
57 if (&next_lip
->li_ail
!= &ailp
->ail_head
)
58 next_lsn
= next_lip
->li_lsn
;
62 (prev_lsn
== NULLCOMMITLSN
|| XFS_LSN_CMP(prev_lsn
, lsn
) <= 0) &&
63 (next_lsn
== NULLCOMMITLSN
|| XFS_LSN_CMP(next_lsn
, lsn
) >= 0))
66 spin_unlock(&ailp
->ail_lock
);
68 ASSERT(prev_lsn
== NULLCOMMITLSN
|| XFS_LSN_CMP(prev_lsn
, lsn
) <= 0);
69 ASSERT(next_lsn
== NULLCOMMITLSN
|| XFS_LSN_CMP(next_lsn
, lsn
) >= 0);
70 spin_lock(&ailp
->ail_lock
);
73 #define xfs_ail_check(a,l)
77 * Return a pointer to the last item in the AIL. If the AIL is empty, then
80 static struct xfs_log_item
*
84 if (list_empty(&ailp
->ail_head
))
87 return list_entry(ailp
->ail_head
.prev
, struct xfs_log_item
, li_ail
);
91 * Return a pointer to the item which follows the given item in the AIL. If
92 * the given item is the last item in the list, then return NULL.
94 static struct xfs_log_item
*
97 struct xfs_log_item
*lip
)
99 if (lip
->li_ail
.next
== &ailp
->ail_head
)
102 return list_first_entry(&lip
->li_ail
, struct xfs_log_item
, li_ail
);
106 * This is called by the log manager code to determine the LSN of the tail of
107 * the log. This is exactly the LSN of the first item in the AIL. If the AIL
108 * is empty, then this function returns 0.
110 * We need the AIL lock in order to get a coherent read of the lsn of the last
115 struct xfs_ail
*ailp
)
117 struct xfs_log_item
*lip
= xfs_ail_min(ailp
);
126 struct xfs_ail
*ailp
)
130 spin_lock(&ailp
->ail_lock
);
131 lsn
= __xfs_ail_min_lsn(ailp
);
132 spin_unlock(&ailp
->ail_lock
);
138 * The cursor keeps track of where our current traversal is up to by tracking
139 * the next item in the list for us. However, for this to be safe, removing an
140 * object from the AIL needs to invalidate any cursor that points to it. hence
141 * the traversal cursor needs to be linked to the struct xfs_ail so that
142 * deletion can search all the active cursors for invalidation.
145 xfs_trans_ail_cursor_init(
146 struct xfs_ail
*ailp
,
147 struct xfs_ail_cursor
*cur
)
150 list_add_tail(&cur
->list
, &ailp
->ail_cursors
);
154 * Get the next item in the traversal and advance the cursor. If the cursor
155 * was invalidated (indicated by a lip of 1), restart the traversal.
157 struct xfs_log_item
*
158 xfs_trans_ail_cursor_next(
159 struct xfs_ail
*ailp
,
160 struct xfs_ail_cursor
*cur
)
162 struct xfs_log_item
*lip
= cur
->item
;
164 if ((uintptr_t)lip
& 1)
165 lip
= xfs_ail_min(ailp
);
167 cur
->item
= xfs_ail_next(ailp
, lip
);
172 * When the traversal is complete, we need to remove the cursor from the list
173 * of traversing cursors.
176 xfs_trans_ail_cursor_done(
177 struct xfs_ail_cursor
*cur
)
180 list_del_init(&cur
->list
);
184 * Invalidate any cursor that is pointing to this item. This is called when an
185 * item is removed from the AIL. Any cursor pointing to this object is now
186 * invalid and the traversal needs to be terminated so it doesn't reference a
187 * freed object. We set the low bit of the cursor item pointer so we can
188 * distinguish between an invalidation and the end of the list when getting the
189 * next item from the cursor.
192 xfs_trans_ail_cursor_clear(
193 struct xfs_ail
*ailp
,
194 struct xfs_log_item
*lip
)
196 struct xfs_ail_cursor
*cur
;
198 list_for_each_entry(cur
, &ailp
->ail_cursors
, list
) {
199 if (cur
->item
== lip
)
200 cur
->item
= (struct xfs_log_item
*)
201 ((uintptr_t)cur
->item
| 1);
206 * Find the first item in the AIL with the given @lsn by searching in ascending
207 * LSN order and initialise the cursor to point to the next item for a
208 * ascending traversal. Pass a @lsn of zero to initialise the cursor to the
209 * first item in the AIL. Returns NULL if the list is empty.
211 struct xfs_log_item
*
212 xfs_trans_ail_cursor_first(
213 struct xfs_ail
*ailp
,
214 struct xfs_ail_cursor
*cur
,
217 struct xfs_log_item
*lip
;
219 xfs_trans_ail_cursor_init(ailp
, cur
);
222 lip
= xfs_ail_min(ailp
);
226 list_for_each_entry(lip
, &ailp
->ail_head
, li_ail
) {
227 if (XFS_LSN_CMP(lip
->li_lsn
, lsn
) >= 0)
234 cur
->item
= xfs_ail_next(ailp
, lip
);
238 static struct xfs_log_item
*
239 __xfs_trans_ail_cursor_last(
240 struct xfs_ail
*ailp
,
243 struct xfs_log_item
*lip
;
245 list_for_each_entry_reverse(lip
, &ailp
->ail_head
, li_ail
) {
246 if (XFS_LSN_CMP(lip
->li_lsn
, lsn
) <= 0)
253 * Find the last item in the AIL with the given @lsn by searching in descending
254 * LSN order and initialise the cursor to point to that item. If there is no
255 * item with the value of @lsn, then it sets the cursor to the last item with an
256 * LSN lower than @lsn. Returns NULL if the list is empty.
258 struct xfs_log_item
*
259 xfs_trans_ail_cursor_last(
260 struct xfs_ail
*ailp
,
261 struct xfs_ail_cursor
*cur
,
264 xfs_trans_ail_cursor_init(ailp
, cur
);
265 cur
->item
= __xfs_trans_ail_cursor_last(ailp
, lsn
);
270 * Splice the log item list into the AIL at the given LSN. We splice to the
271 * tail of the given LSN to maintain insert order for push traversals. The
272 * cursor is optional, allowing repeated updates to the same LSN to avoid
273 * repeated traversals. This should not be called with an empty list.
277 struct xfs_ail
*ailp
,
278 struct xfs_ail_cursor
*cur
,
279 struct list_head
*list
,
282 struct xfs_log_item
*lip
;
284 ASSERT(!list_empty(list
));
287 * Use the cursor to determine the insertion point if one is
288 * provided. If not, or if the one we got is not valid,
289 * find the place in the AIL where the items belong.
291 lip
= cur
? cur
->item
: NULL
;
292 if (!lip
|| (uintptr_t)lip
& 1)
293 lip
= __xfs_trans_ail_cursor_last(ailp
, lsn
);
296 * If a cursor is provided, we know we're processing the AIL
297 * in lsn order, and future items to be spliced in will
298 * follow the last one being inserted now. Update the
299 * cursor to point to that last item, now while we have a
300 * reliable pointer to it.
303 cur
->item
= list_entry(list
->prev
, struct xfs_log_item
, li_ail
);
306 * Finally perform the splice. Unless the AIL was empty,
307 * lip points to the item in the AIL _after_ which the new
308 * items should go. If lip is null the AIL was empty, so
309 * the new items go at the head of the AIL.
312 list_splice(list
, &lip
->li_ail
);
314 list_splice(list
, &ailp
->ail_head
);
318 * Delete the given item from the AIL. Return a pointer to the item.
322 struct xfs_ail
*ailp
,
323 struct xfs_log_item
*lip
)
325 xfs_ail_check(ailp
, lip
);
326 list_del(&lip
->li_ail
);
327 xfs_trans_ail_cursor_clear(ailp
, lip
);
331 * Requeue a failed buffer for writeback.
333 * We clear the log item failed state here as well, but we have to be careful
334 * about reference counts because the only active reference counts on the buffer
335 * may be the failed log items. Hence if we clear the log item failed state
336 * before queuing the buffer for IO we can release all active references to
337 * the buffer and free it, leading to use after free problems in
338 * xfs_buf_delwri_queue. It makes no difference to the buffer or log items which
339 * order we process them in - the buffer is locked, and we own the buffer list
340 * so nothing on them is going to change while we are performing this action.
342 * Hence we can safely queue the buffer for IO before we clear the failed log
343 * item state, therefore always having an active reference to the buffer and
344 * avoiding the transient zero-reference state that leads to use-after-free.
347 xfsaild_resubmit_item(
348 struct xfs_log_item
*lip
,
349 struct list_head
*buffer_list
)
351 struct xfs_buf
*bp
= lip
->li_buf
;
353 if (!xfs_buf_trylock(bp
))
354 return XFS_ITEM_LOCKED
;
356 if (!xfs_buf_delwri_queue(bp
, buffer_list
)) {
358 return XFS_ITEM_FLUSHING
;
361 /* protected by ail_lock */
362 list_for_each_entry(lip
, &bp
->b_li_list
, li_bio_list
) {
363 if (bp
->b_flags
& _XBF_INODES
)
364 clear_bit(XFS_LI_FAILED
, &lip
->li_flags
);
366 xfs_clear_li_failed(lip
);
370 return XFS_ITEM_SUCCESS
;
375 struct xfs_ail
*ailp
,
376 struct xfs_log_item
*lip
)
379 * If log item pinning is enabled, skip the push and track the item as
380 * pinned. This can help induce head-behind-tail conditions.
382 if (XFS_TEST_ERROR(false, ailp
->ail_log
->l_mp
, XFS_ERRTAG_LOG_ITEM_PIN
))
383 return XFS_ITEM_PINNED
;
386 * Consider the item pinned if a push callback is not defined so the
387 * caller will force the log. This should only happen for intent items
388 * as they are unpinned once the associated done item is committed to
391 if (!lip
->li_ops
->iop_push
)
392 return XFS_ITEM_PINNED
;
393 if (test_bit(XFS_LI_FAILED
, &lip
->li_flags
))
394 return xfsaild_resubmit_item(lip
, &ailp
->ail_buf_list
);
395 return lip
->li_ops
->iop_push(lip
, &ailp
->ail_buf_list
);
399 * Compute the LSN that we'd need to push the log tail towards in order to have
400 * at least 25% of the log space free. If the log free space already meets this
401 * threshold, this function returns the lowest LSN in the AIL to slowly keep
402 * writeback ticking over and the tail of the log moving forward.
405 xfs_ail_calc_push_target(
406 struct xfs_ail
*ailp
)
408 struct xlog
*log
= ailp
->ail_log
;
409 struct xfs_log_item
*lip
;
410 xfs_lsn_t target_lsn
;
414 uint32_t target_block
;
415 uint32_t target_cycle
;
417 lockdep_assert_held(&ailp
->ail_lock
);
419 lip
= xfs_ail_max(ailp
);
421 return NULLCOMMITLSN
;
423 max_lsn
= lip
->li_lsn
;
424 min_lsn
= __xfs_ail_min_lsn(ailp
);
427 * If we are supposed to push all the items in the AIL, we want to push
428 * to the current head. We then clear the push flag so that we don't
429 * keep pushing newly queued items beyond where the push all command was
430 * run. If the push waiter wants to empty the ail, it should queue
431 * itself on the ail_empty wait queue.
433 if (test_and_clear_bit(XFS_AIL_OPSTATE_PUSH_ALL
, &ailp
->ail_opstate
))
436 /* If someone wants the AIL empty, keep pushing everything we have. */
437 if (waitqueue_active(&ailp
->ail_empty
))
441 * Background pushing - attempt to keep 25% of the log free and if we
442 * have that much free retain the existing target.
444 free_bytes
= log
->l_logsize
- xlog_lsn_sub(log
, max_lsn
, min_lsn
);
445 if (free_bytes
>= log
->l_logsize
>> 2)
446 return ailp
->ail_target
;
448 target_cycle
= CYCLE_LSN(min_lsn
);
449 target_block
= BLOCK_LSN(min_lsn
) + (log
->l_logBBsize
>> 2);
450 if (target_block
>= log
->l_logBBsize
) {
451 target_block
-= log
->l_logBBsize
;
454 target_lsn
= xlog_assign_lsn(target_cycle
, target_block
);
456 /* Cap the target to the highest LSN known to be in the AIL. */
457 if (XFS_LSN_CMP(target_lsn
, max_lsn
) > 0)
460 /* If the existing target is higher than the new target, keep it. */
461 if (XFS_LSN_CMP(ailp
->ail_target
, target_lsn
) >= 0)
462 return ailp
->ail_target
;
468 struct xfs_ail
*ailp
)
470 struct xfs_mount
*mp
= ailp
->ail_log
->l_mp
;
471 struct xfs_ail_cursor cur
;
472 struct xfs_log_item
*lip
;
480 * If we encountered pinned items or did not finish writing out all
481 * buffers the last time we ran, force a background CIL push to get the
482 * items unpinned in the near future. We do not wait on the CIL push as
483 * that could stall us for seconds if there is enough background IO
484 * load. Stalling for that long when the tail of the log is pinned and
485 * needs flushing will hard stop the transaction subsystem when log
488 if (ailp
->ail_log_flush
&& ailp
->ail_last_pushed_lsn
== 0 &&
489 (!list_empty_careful(&ailp
->ail_buf_list
) ||
490 xfs_ail_min_lsn(ailp
))) {
491 ailp
->ail_log_flush
= 0;
493 XFS_STATS_INC(mp
, xs_push_ail_flush
);
494 xlog_cil_flush(ailp
->ail_log
);
497 spin_lock(&ailp
->ail_lock
);
498 WRITE_ONCE(ailp
->ail_target
, xfs_ail_calc_push_target(ailp
));
499 if (ailp
->ail_target
== NULLCOMMITLSN
)
502 /* we're done if the AIL is empty or our push has reached the end */
503 lip
= xfs_trans_ail_cursor_first(ailp
, &cur
, ailp
->ail_last_pushed_lsn
);
505 goto out_done_cursor
;
507 XFS_STATS_INC(mp
, xs_push_ail
);
509 ASSERT(ailp
->ail_target
!= NULLCOMMITLSN
);
512 while ((XFS_LSN_CMP(lip
->li_lsn
, ailp
->ail_target
) <= 0)) {
515 if (test_bit(XFS_LI_FLUSHING
, &lip
->li_flags
))
519 * Note that iop_push may unlock and reacquire the AIL lock. We
520 * rely on the AIL cursor implementation to be able to deal with
523 lock_result
= xfsaild_push_item(ailp
, lip
);
524 switch (lock_result
) {
525 case XFS_ITEM_SUCCESS
:
526 XFS_STATS_INC(mp
, xs_push_ail_success
);
527 trace_xfs_ail_push(lip
);
529 ailp
->ail_last_pushed_lsn
= lsn
;
532 case XFS_ITEM_FLUSHING
:
534 * The item or its backing buffer is already being
535 * flushed. The typical reason for that is that an
536 * inode buffer is locked because we already pushed the
537 * updates to it as part of inode clustering.
539 * We do not want to stop flushing just because lots
540 * of items are already being flushed, but we need to
541 * re-try the flushing relatively soon if most of the
542 * AIL is being flushed.
544 XFS_STATS_INC(mp
, xs_push_ail_flushing
);
545 trace_xfs_ail_flushing(lip
);
548 ailp
->ail_last_pushed_lsn
= lsn
;
551 case XFS_ITEM_PINNED
:
552 XFS_STATS_INC(mp
, xs_push_ail_pinned
);
553 trace_xfs_ail_pinned(lip
);
556 ailp
->ail_log_flush
++;
558 case XFS_ITEM_LOCKED
:
559 XFS_STATS_INC(mp
, xs_push_ail_locked
);
560 trace_xfs_ail_locked(lip
);
572 * Are there too many items we can't do anything with?
574 * If we are skipping too many items because we can't flush
575 * them or they are already being flushed, we back off and
576 * given them time to complete whatever operation is being
577 * done. i.e. remove pressure from the AIL while we can't make
578 * progress so traversals don't slow down further inserts and
579 * removals to/from the AIL.
581 * The value of 100 is an arbitrary magic number based on
588 lip
= xfs_trans_ail_cursor_next(ailp
, &cur
);
591 if (lip
->li_lsn
!= lsn
&& count
> 1000)
597 xfs_trans_ail_cursor_done(&cur
);
599 spin_unlock(&ailp
->ail_lock
);
601 if (xfs_buf_delwri_submit_nowait(&ailp
->ail_buf_list
))
602 ailp
->ail_log_flush
++;
604 if (!count
|| XFS_LSN_CMP(lsn
, ailp
->ail_target
) >= 0) {
606 * We reached the target or the AIL is empty, so wait a bit
607 * longer for I/O to complete and remove pushed items from the
608 * AIL before we start the next scan from the start of the AIL.
611 ailp
->ail_last_pushed_lsn
= 0;
612 } else if (((stuck
+ flushing
) * 100) / count
> 90) {
614 * Either there is a lot of contention on the AIL or we are
615 * stuck due to operations in progress. "Stuck" in this case
616 * is defined as >90% of the items we tried to push were stuck.
618 * Backoff a bit more to allow some I/O to complete before
619 * restarting from the start of the AIL. This prevents us from
620 * spinning on the same items, and if they are pinned will all
621 * the restart to issue a log force to unpin the stuck items.
624 ailp
->ail_last_pushed_lsn
= 0;
627 * Assume we have more work to do in a short while.
639 struct xfs_ail
*ailp
= data
;
640 long tout
= 0; /* milliseconds */
641 unsigned int noreclaim_flag
;
643 noreclaim_flag
= memalloc_noreclaim_save();
648 * Long waits of 50ms or more occur when we've run out of items
649 * to push, so we only want uninterruptible state if we're
650 * actually blocked on something.
652 if (tout
&& tout
<= 20)
653 set_current_state(TASK_KILLABLE
|TASK_FREEZABLE
);
655 set_current_state(TASK_INTERRUPTIBLE
|TASK_FREEZABLE
);
658 * Check kthread_should_stop() after we set the task state to
659 * guarantee that we either see the stop bit and exit or the
660 * task state is reset to runnable such that it's not scheduled
661 * out indefinitely and detects the stop bit at next iteration.
662 * A memory barrier is included in above task state set to
663 * serialize again kthread_stop().
665 if (kthread_should_stop()) {
666 __set_current_state(TASK_RUNNING
);
669 * The caller forces out the AIL before stopping the
670 * thread in the common case, which means the delwri
671 * queue is drained. In the shutdown case, the queue may
672 * still hold relogged buffers that haven't been
673 * submitted because they were pinned since added to the
676 * Log I/O error processing stales the underlying buffer
677 * and clears the delwri state, expecting the buf to be
678 * removed on the next submission attempt. That won't
679 * happen if we're shutting down, so this is the last
680 * opportunity to release such buffers from the queue.
682 ASSERT(list_empty(&ailp
->ail_buf_list
) ||
683 xlog_is_shutdown(ailp
->ail_log
));
684 xfs_buf_delwri_cancel(&ailp
->ail_buf_list
);
688 /* Idle if the AIL is empty. */
689 spin_lock(&ailp
->ail_lock
);
690 if (!xfs_ail_min(ailp
) && list_empty(&ailp
->ail_buf_list
)) {
691 spin_unlock(&ailp
->ail_lock
);
696 spin_unlock(&ailp
->ail_lock
);
699 schedule_timeout(msecs_to_jiffies(tout
));
701 __set_current_state(TASK_RUNNING
);
705 tout
= xfsaild_push(ailp
);
708 memalloc_noreclaim_restore(noreclaim_flag
);
713 * Push out all items in the AIL immediately and wait until the AIL is empty.
716 xfs_ail_push_all_sync(
717 struct xfs_ail
*ailp
)
721 spin_lock(&ailp
->ail_lock
);
722 while (xfs_ail_max(ailp
) != NULL
) {
723 prepare_to_wait(&ailp
->ail_empty
, &wait
, TASK_UNINTERRUPTIBLE
);
724 wake_up_process(ailp
->ail_task
);
725 spin_unlock(&ailp
->ail_lock
);
727 spin_lock(&ailp
->ail_lock
);
729 spin_unlock(&ailp
->ail_lock
);
731 finish_wait(&ailp
->ail_empty
, &wait
);
735 __xfs_ail_assign_tail_lsn(
736 struct xfs_ail
*ailp
)
738 struct xlog
*log
= ailp
->ail_log
;
741 assert_spin_locked(&ailp
->ail_lock
);
743 if (xlog_is_shutdown(log
))
746 tail_lsn
= __xfs_ail_min_lsn(ailp
);
748 tail_lsn
= ailp
->ail_head_lsn
;
750 WRITE_ONCE(log
->l_tail_space
,
751 xlog_lsn_sub(log
, ailp
->ail_head_lsn
, tail_lsn
));
752 trace_xfs_log_assign_tail_lsn(log
, tail_lsn
);
753 atomic64_set(&log
->l_tail_lsn
, tail_lsn
);
757 * Callers should pass the original tail lsn so that we can detect if the tail
758 * has moved as a result of the operation that was performed. If the caller
759 * needs to force a tail space update, it should pass NULLCOMMITLSN to bypass
760 * the "did the tail LSN change?" checks. If the caller wants to avoid a tail
761 * update (e.g. it knows the tail did not change) it should pass an @old_lsn of
765 xfs_ail_update_finish(
766 struct xfs_ail
*ailp
,
767 xfs_lsn_t old_lsn
) __releases(ailp
->ail_lock
)
769 struct xlog
*log
= ailp
->ail_log
;
771 /* If the tail lsn hasn't changed, don't do updates or wakeups. */
772 if (!old_lsn
|| old_lsn
== __xfs_ail_min_lsn(ailp
)) {
773 spin_unlock(&ailp
->ail_lock
);
777 __xfs_ail_assign_tail_lsn(ailp
);
778 if (list_empty(&ailp
->ail_head
))
779 wake_up_all(&ailp
->ail_empty
);
780 spin_unlock(&ailp
->ail_lock
);
781 xfs_log_space_wake(log
->l_mp
);
785 * xfs_trans_ail_update - bulk AIL insertion operation.
787 * @xfs_trans_ail_update takes an array of log items that all need to be
788 * positioned at the same LSN in the AIL. If an item is not in the AIL, it will
789 * be added. Otherwise, it will be repositioned by removing it and re-adding
790 * it to the AIL. If we move the first item in the AIL, update the log tail to
791 * match the new minimum LSN in the AIL.
793 * This function takes the AIL lock once to execute the update operations on
794 * all the items in the array, and as such should not be called with the AIL
795 * lock held. As a result, once we have the AIL lock, we need to check each log
796 * item LSN to confirm it needs to be moved forward in the AIL.
798 * To optimise the insert operation, we delete all the items from the AIL in
799 * the first pass, moving them into a temporary list, then splice the temporary
800 * list into the correct position in the AIL. This avoids needing to do an
801 * insert operation on every item.
803 * This function must be called with the AIL lock held. The lock is dropped
807 xfs_trans_ail_update_bulk(
808 struct xfs_ail
*ailp
,
809 struct xfs_ail_cursor
*cur
,
810 struct xfs_log_item
**log_items
,
812 xfs_lsn_t lsn
) __releases(ailp
->ail_lock
)
814 struct xfs_log_item
*mlip
;
815 xfs_lsn_t tail_lsn
= 0;
819 ASSERT(nr_items
> 0); /* Not required, but true. */
820 mlip
= xfs_ail_min(ailp
);
822 for (i
= 0; i
< nr_items
; i
++) {
823 struct xfs_log_item
*lip
= log_items
[i
];
824 if (test_and_set_bit(XFS_LI_IN_AIL
, &lip
->li_flags
)) {
825 /* check if we really need to move the item */
826 if (XFS_LSN_CMP(lsn
, lip
->li_lsn
) <= 0)
829 trace_xfs_ail_move(lip
, lip
->li_lsn
, lsn
);
830 if (mlip
== lip
&& !tail_lsn
)
831 tail_lsn
= lip
->li_lsn
;
833 xfs_ail_delete(ailp
, lip
);
835 trace_xfs_ail_insert(lip
, 0, lsn
);
838 list_add_tail(&lip
->li_ail
, &tmp
);
841 if (!list_empty(&tmp
))
842 xfs_ail_splice(ailp
, cur
, &tmp
, lsn
);
845 * If this is the first insert, wake up the push daemon so it can
846 * actively scan for items to push. We also need to do a log tail
847 * LSN update to ensure that it is correctly tracked by the log, so
848 * set the tail_lsn to NULLCOMMITLSN so that xfs_ail_update_finish()
849 * will see that the tail lsn has changed and will update the tail
853 wake_up_process(ailp
->ail_task
);
854 tail_lsn
= NULLCOMMITLSN
;
857 xfs_ail_update_finish(ailp
, tail_lsn
);
860 /* Insert a log item into the AIL. */
862 xfs_trans_ail_insert(
863 struct xfs_ail
*ailp
,
864 struct xfs_log_item
*lip
,
867 spin_lock(&ailp
->ail_lock
);
868 xfs_trans_ail_update_bulk(ailp
, NULL
, &lip
, 1, lsn
);
872 * Delete one log item from the AIL.
874 * If this item was at the tail of the AIL, return the LSN of the log item so
875 * that we can use it to check if the LSN of the tail of the log has moved
876 * when finishing up the AIL delete process in xfs_ail_update_finish().
880 struct xfs_ail
*ailp
,
881 struct xfs_log_item
*lip
)
883 struct xfs_log_item
*mlip
= xfs_ail_min(ailp
);
884 xfs_lsn_t lsn
= lip
->li_lsn
;
886 trace_xfs_ail_delete(lip
, mlip
->li_lsn
, lip
->li_lsn
);
887 xfs_ail_delete(ailp
, lip
);
888 clear_bit(XFS_LI_IN_AIL
, &lip
->li_flags
);
897 xfs_trans_ail_delete(
898 struct xfs_log_item
*lip
,
901 struct xfs_ail
*ailp
= lip
->li_ailp
;
902 struct xlog
*log
= ailp
->ail_log
;
905 spin_lock(&ailp
->ail_lock
);
906 if (!test_bit(XFS_LI_IN_AIL
, &lip
->li_flags
)) {
907 spin_unlock(&ailp
->ail_lock
);
908 if (shutdown_type
&& !xlog_is_shutdown(log
)) {
909 xfs_alert_tag(log
->l_mp
, XFS_PTAG_AILDELETE
,
910 "%s: attempting to delete a log item that is not in the AIL",
912 xlog_force_shutdown(log
, shutdown_type
);
917 /* xfs_ail_update_finish() drops the AIL lock */
918 xfs_clear_li_failed(lip
);
919 tail_lsn
= xfs_ail_delete_one(ailp
, lip
);
920 xfs_ail_update_finish(ailp
, tail_lsn
);
927 struct xfs_ail
*ailp
;
929 ailp
= kzalloc(sizeof(struct xfs_ail
),
930 GFP_KERNEL
| __GFP_RETRY_MAYFAIL
);
934 ailp
->ail_log
= mp
->m_log
;
935 INIT_LIST_HEAD(&ailp
->ail_head
);
936 INIT_LIST_HEAD(&ailp
->ail_cursors
);
937 spin_lock_init(&ailp
->ail_lock
);
938 INIT_LIST_HEAD(&ailp
->ail_buf_list
);
939 init_waitqueue_head(&ailp
->ail_empty
);
941 ailp
->ail_task
= kthread_run(xfsaild
, ailp
, "xfsaild/%s",
943 if (IS_ERR(ailp
->ail_task
))
955 xfs_trans_ail_destroy(
958 struct xfs_ail
*ailp
= mp
->m_ail
;
960 kthread_stop(ailp
->ail_task
);