2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 #include "xfs_types.h"
24 #include "xfs_trans.h"
27 #include "xfs_mount.h"
28 #include "xfs_bmap_btree.h"
29 #include "xfs_inode.h"
30 #include "xfs_dinode.h"
31 #include "xfs_error.h"
32 #include "xfs_filestream.h"
33 #include "xfs_vnodeops.h"
34 #include "xfs_inode_item.h"
35 #include "xfs_quota.h"
36 #include "xfs_trace.h"
37 #include "xfs_fsops.h"
39 #include <linux/kthread.h>
40 #include <linux/freezer.h>
43 * The inode lookup is done in batches to keep the amount of lock traffic and
44 * radix tree lookups to a minimum. The batch size is a trade off between
45 * lookup reduction and stack usage. This is in the reclaim path, so we can't
48 #define XFS_LOOKUP_BATCH 32
51 xfs_inode_ag_walk_grab(
54 struct inode
*inode
= VFS_I(ip
);
56 ASSERT(rcu_read_lock_held());
59 * check for stale RCU freed inode
61 * If the inode has been reallocated, it doesn't matter if it's not in
62 * the AG we are walking - we are walking for writeback, so if it
63 * passes all the "valid inode" checks and is dirty, then we'll write
64 * it back anyway. If it has been reallocated and still being
65 * initialised, the XFS_INEW check below will catch it.
67 spin_lock(&ip
->i_flags_lock
);
69 goto out_unlock_noent
;
71 /* avoid new or reclaimable inodes. Leave for reclaim code to flush */
72 if (__xfs_iflags_test(ip
, XFS_INEW
| XFS_IRECLAIMABLE
| XFS_IRECLAIM
))
73 goto out_unlock_noent
;
74 spin_unlock(&ip
->i_flags_lock
);
76 /* nothing to sync during shutdown */
77 if (XFS_FORCED_SHUTDOWN(ip
->i_mount
))
80 /* If we can't grab the inode, it must on it's way to reclaim. */
84 if (is_bad_inode(inode
)) {
93 spin_unlock(&ip
->i_flags_lock
);
100 struct xfs_perag
*pag
,
101 int (*execute
)(struct xfs_inode
*ip
,
102 struct xfs_perag
*pag
, int flags
),
105 uint32_t first_index
;
117 struct xfs_inode
*batch
[XFS_LOOKUP_BATCH
];
122 nr_found
= radix_tree_gang_lookup(&pag
->pag_ici_root
,
123 (void **)batch
, first_index
,
131 * Grab the inodes before we drop the lock. if we found
132 * nothing, nr == 0 and the loop will be skipped.
134 for (i
= 0; i
< nr_found
; i
++) {
135 struct xfs_inode
*ip
= batch
[i
];
137 if (done
|| xfs_inode_ag_walk_grab(ip
))
141 * Update the index for the next lookup. Catch
142 * overflows into the next AG range which can occur if
143 * we have inodes in the last block of the AG and we
144 * are currently pointing to the last inode.
146 * Because we may see inodes that are from the wrong AG
147 * due to RCU freeing and reallocation, only update the
148 * index if it lies in this AG. It was a race that lead
149 * us to see this inode, so another lookup from the
150 * same index will not find it again.
152 if (XFS_INO_TO_AGNO(mp
, ip
->i_ino
) != pag
->pag_agno
)
154 first_index
= XFS_INO_TO_AGINO(mp
, ip
->i_ino
+ 1);
155 if (first_index
< XFS_INO_TO_AGINO(mp
, ip
->i_ino
))
159 /* unlock now we've grabbed the inodes. */
162 for (i
= 0; i
< nr_found
; i
++) {
165 error
= execute(batch
[i
], pag
, flags
);
167 if (error
== EAGAIN
) {
171 if (error
&& last_error
!= EFSCORRUPTED
)
175 /* bail out if the filesystem is corrupted. */
176 if (error
== EFSCORRUPTED
)
179 } while (nr_found
&& !done
);
189 xfs_inode_ag_iterator(
190 struct xfs_mount
*mp
,
191 int (*execute
)(struct xfs_inode
*ip
,
192 struct xfs_perag
*pag
, int flags
),
195 struct xfs_perag
*pag
;
201 while ((pag
= xfs_perag_get(mp
, ag
))) {
202 ag
= pag
->pag_agno
+ 1;
203 error
= xfs_inode_ag_walk(mp
, pag
, execute
, flags
);
207 if (error
== EFSCORRUPTED
)
211 return XFS_ERROR(last_error
);
216 struct xfs_inode
*ip
,
217 struct xfs_perag
*pag
,
220 struct inode
*inode
= VFS_I(ip
);
221 struct address_space
*mapping
= inode
->i_mapping
;
224 if (!mapping_tagged(mapping
, PAGECACHE_TAG_DIRTY
))
227 if (!xfs_ilock_nowait(ip
, XFS_IOLOCK_SHARED
)) {
228 if (flags
& SYNC_TRYLOCK
)
230 xfs_ilock(ip
, XFS_IOLOCK_SHARED
);
233 error
= xfs_flush_pages(ip
, 0, -1, (flags
& SYNC_WAIT
) ?
234 0 : XBF_ASYNC
, FI_NONE
);
235 xfs_iunlock(ip
, XFS_IOLOCK_SHARED
);
238 if (flags
& SYNC_WAIT
)
245 struct xfs_inode
*ip
,
246 struct xfs_perag
*pag
,
251 xfs_ilock(ip
, XFS_ILOCK_SHARED
);
252 if (xfs_inode_clean(ip
))
254 if (!xfs_iflock_nowait(ip
)) {
255 if (!(flags
& SYNC_WAIT
))
260 if (xfs_inode_clean(ip
)) {
265 error
= xfs_iflush(ip
, flags
);
268 xfs_iunlock(ip
, XFS_ILOCK_SHARED
);
273 * Write out pagecache data for the whole filesystem.
277 struct xfs_mount
*mp
,
282 ASSERT((flags
& ~(SYNC_TRYLOCK
|SYNC_WAIT
)) == 0);
284 error
= xfs_inode_ag_iterator(mp
, xfs_sync_inode_data
, flags
);
286 return XFS_ERROR(error
);
288 xfs_log_force(mp
, (flags
& SYNC_WAIT
) ? XFS_LOG_SYNC
: 0);
293 * Write out inode metadata (attributes) for the whole filesystem.
297 struct xfs_mount
*mp
,
300 ASSERT((flags
& ~SYNC_WAIT
) == 0);
302 return xfs_inode_ag_iterator(mp
, xfs_sync_inode_attr
, flags
);
307 struct xfs_mount
*mp
)
312 * If the buffer is pinned then push on the log so we won't get stuck
313 * waiting in the write for someone, maybe ourselves, to flush the log.
315 * Even though we just pushed the log above, we did not have the
316 * superblock buffer locked at that point so it can become pinned in
317 * between there and here.
319 bp
= xfs_getsb(mp
, 0);
320 if (XFS_BUF_ISPINNED(bp
))
321 xfs_log_force(mp
, 0);
323 return xfs_bwrite(mp
, bp
);
327 * When remounting a filesystem read-only or freezing the filesystem, we have
328 * two phases to execute. This first phase is syncing the data before we
329 * quiesce the filesystem, and the second is flushing all the inodes out after
330 * we've waited for all the transactions created by the first phase to
331 * complete. The second phase ensures that the inodes are written to their
332 * location on disk rather than just existing in transactions in the log. This
333 * means after a quiesce there is no log replay required to write the inodes to
334 * disk (this is the main difference between a sync and a quiesce).
337 * First stage of freeze - no writers will make progress now we are here,
338 * so we flush delwri and delalloc buffers here, then wait for all I/O to
339 * complete. Data is frozen at that point. Metadata is not frozen,
340 * transactions can still occur here so don't bother flushing the buftarg
341 * because it'll just get dirty again.
345 struct xfs_mount
*mp
)
347 int error
, error2
= 0;
349 /* push non-blocking */
350 xfs_sync_data(mp
, 0);
351 xfs_qm_sync(mp
, SYNC_TRYLOCK
);
353 /* push and block till complete */
354 xfs_sync_data(mp
, SYNC_WAIT
);
355 xfs_qm_sync(mp
, SYNC_WAIT
);
357 /* write superblock and hoover up shutdown errors */
358 error
= xfs_sync_fsdata(mp
);
360 /* make sure all delwri buffers are written out */
361 xfs_flush_buftarg(mp
->m_ddev_targp
, 1);
363 /* mark the log as covered if needed */
364 if (xfs_log_need_covered(mp
))
365 error2
= xfs_fs_log_dummy(mp
);
367 /* flush data-only devices */
368 if (mp
->m_rtdev_targp
)
369 XFS_bflush(mp
->m_rtdev_targp
);
371 return error
? error
: error2
;
376 struct xfs_mount
*mp
)
378 int count
= 0, pincount
;
380 xfs_reclaim_inodes(mp
, 0);
381 xfs_flush_buftarg(mp
->m_ddev_targp
, 0);
384 * This loop must run at least twice. The first instance of the loop
385 * will flush most meta data but that will generate more meta data
386 * (typically directory updates). Which then must be flushed and
387 * logged before we can write the unmount record. We also so sync
388 * reclaim of inodes to catch any that the above delwri flush skipped.
391 xfs_reclaim_inodes(mp
, SYNC_WAIT
);
392 xfs_sync_attr(mp
, SYNC_WAIT
);
393 pincount
= xfs_flush_buftarg(mp
->m_ddev_targp
, 1);
402 * Second stage of a quiesce. The data is already synced, now we have to take
403 * care of the metadata. New transactions are already blocked, so we need to
404 * wait for any remaining transactions to drain out before proceding.
408 struct xfs_mount
*mp
)
412 /* wait for all modifications to complete */
413 while (atomic_read(&mp
->m_active_trans
) > 0)
416 /* flush inodes and push all remaining buffers out to disk */
420 * Just warn here till VFS can correctly support
421 * read-only remount without racing.
423 WARN_ON(atomic_read(&mp
->m_active_trans
) != 0);
425 /* Push the superblock and write an unmount record */
426 error
= xfs_log_sbcount(mp
, 1);
428 xfs_warn(mp
, "xfs_attr_quiesce: failed to log sb changes. "
429 "Frozen image may not be consistent.");
430 xfs_log_unmount_write(mp
);
431 xfs_unmountfs_writesb(mp
);
435 * Enqueue a work item to be picked up by the vfs xfssyncd thread.
436 * Doing this has two advantages:
437 * - It saves on stack space, which is tight in certain situations
438 * - It can be used (with care) as a mechanism to avoid deadlocks.
439 * Flushing while allocating in a full filesystem requires both.
442 xfs_syncd_queue_work(
443 struct xfs_mount
*mp
,
445 void (*syncer
)(struct xfs_mount
*, void *),
446 struct completion
*completion
)
448 struct xfs_sync_work
*work
;
450 work
= kmem_alloc(sizeof(struct xfs_sync_work
), KM_SLEEP
);
451 INIT_LIST_HEAD(&work
->w_list
);
452 work
->w_syncer
= syncer
;
455 work
->w_completion
= completion
;
456 spin_lock(&mp
->m_sync_lock
);
457 list_add_tail(&work
->w_list
, &mp
->m_sync_list
);
458 spin_unlock(&mp
->m_sync_lock
);
459 wake_up_process(mp
->m_sync_task
);
463 * Flush delayed allocate data, attempting to free up reserved space
464 * from existing allocations. At this point a new allocation attempt
465 * has failed with ENOSPC and we are in the process of scratching our
466 * heads, looking about for more room...
469 xfs_flush_inodes_work(
470 struct xfs_mount
*mp
,
473 struct inode
*inode
= arg
;
474 xfs_sync_data(mp
, SYNC_TRYLOCK
);
475 xfs_sync_data(mp
, SYNC_TRYLOCK
| SYNC_WAIT
);
483 struct inode
*inode
= VFS_I(ip
);
484 DECLARE_COMPLETION_ONSTACK(completion
);
487 xfs_syncd_queue_work(ip
->i_mount
, inode
, xfs_flush_inodes_work
, &completion
);
488 wait_for_completion(&completion
);
489 xfs_log_force(ip
->i_mount
, XFS_LOG_SYNC
);
493 * Every sync period we need to unpin all items, reclaim inodes and sync
494 * disk quotas. We might need to cover the log to indicate that the
495 * filesystem is idle and not frozen.
499 struct xfs_mount
*mp
,
504 if (!(mp
->m_flags
& XFS_MOUNT_RDONLY
)) {
505 /* dgc: errors ignored here */
506 if (mp
->m_super
->s_frozen
== SB_UNFROZEN
&&
507 xfs_log_need_covered(mp
))
508 error
= xfs_fs_log_dummy(mp
);
510 xfs_log_force(mp
, 0);
511 xfs_reclaim_inodes(mp
, 0);
512 error
= xfs_qm_sync(mp
, SYNC_TRYLOCK
);
515 wake_up(&mp
->m_wait_single_sync_task
);
522 struct xfs_mount
*mp
= arg
;
524 xfs_sync_work_t
*work
, *n
;
528 timeleft
= xfs_syncd_centisecs
* msecs_to_jiffies(10);
530 if (list_empty(&mp
->m_sync_list
))
531 timeleft
= schedule_timeout_interruptible(timeleft
);
534 if (kthread_should_stop() && list_empty(&mp
->m_sync_list
))
537 spin_lock(&mp
->m_sync_lock
);
539 * We can get woken by laptop mode, to do a sync -
540 * that's the (only!) case where the list would be
541 * empty with time remaining.
543 if (!timeleft
|| list_empty(&mp
->m_sync_list
)) {
545 timeleft
= xfs_syncd_centisecs
*
546 msecs_to_jiffies(10);
547 INIT_LIST_HEAD(&mp
->m_sync_work
.w_list
);
548 list_add_tail(&mp
->m_sync_work
.w_list
,
551 list_splice_init(&mp
->m_sync_list
, &tmp
);
552 spin_unlock(&mp
->m_sync_lock
);
554 list_for_each_entry_safe(work
, n
, &tmp
, w_list
) {
555 (*work
->w_syncer
)(mp
, work
->w_data
);
556 list_del(&work
->w_list
);
557 if (work
== &mp
->m_sync_work
)
559 if (work
->w_completion
)
560 complete(work
->w_completion
);
570 struct xfs_mount
*mp
)
572 mp
->m_sync_work
.w_syncer
= xfs_sync_worker
;
573 mp
->m_sync_work
.w_mount
= mp
;
574 mp
->m_sync_work
.w_completion
= NULL
;
575 mp
->m_sync_task
= kthread_run(xfssyncd
, mp
, "xfssyncd/%s", mp
->m_fsname
);
576 if (IS_ERR(mp
->m_sync_task
))
577 return -PTR_ERR(mp
->m_sync_task
);
583 struct xfs_mount
*mp
)
585 kthread_stop(mp
->m_sync_task
);
589 __xfs_inode_set_reclaim_tag(
590 struct xfs_perag
*pag
,
591 struct xfs_inode
*ip
)
593 radix_tree_tag_set(&pag
->pag_ici_root
,
594 XFS_INO_TO_AGINO(ip
->i_mount
, ip
->i_ino
),
595 XFS_ICI_RECLAIM_TAG
);
597 if (!pag
->pag_ici_reclaimable
) {
598 /* propagate the reclaim tag up into the perag radix tree */
599 spin_lock(&ip
->i_mount
->m_perag_lock
);
600 radix_tree_tag_set(&ip
->i_mount
->m_perag_tree
,
601 XFS_INO_TO_AGNO(ip
->i_mount
, ip
->i_ino
),
602 XFS_ICI_RECLAIM_TAG
);
603 spin_unlock(&ip
->i_mount
->m_perag_lock
);
604 trace_xfs_perag_set_reclaim(ip
->i_mount
, pag
->pag_agno
,
607 pag
->pag_ici_reclaimable
++;
611 * We set the inode flag atomically with the radix tree tag.
612 * Once we get tag lookups on the radix tree, this inode flag
616 xfs_inode_set_reclaim_tag(
619 struct xfs_mount
*mp
= ip
->i_mount
;
620 struct xfs_perag
*pag
;
622 pag
= xfs_perag_get(mp
, XFS_INO_TO_AGNO(mp
, ip
->i_ino
));
623 spin_lock(&pag
->pag_ici_lock
);
624 spin_lock(&ip
->i_flags_lock
);
625 __xfs_inode_set_reclaim_tag(pag
, ip
);
626 __xfs_iflags_set(ip
, XFS_IRECLAIMABLE
);
627 spin_unlock(&ip
->i_flags_lock
);
628 spin_unlock(&pag
->pag_ici_lock
);
633 __xfs_inode_clear_reclaim(
637 pag
->pag_ici_reclaimable
--;
638 if (!pag
->pag_ici_reclaimable
) {
639 /* clear the reclaim tag from the perag radix tree */
640 spin_lock(&ip
->i_mount
->m_perag_lock
);
641 radix_tree_tag_clear(&ip
->i_mount
->m_perag_tree
,
642 XFS_INO_TO_AGNO(ip
->i_mount
, ip
->i_ino
),
643 XFS_ICI_RECLAIM_TAG
);
644 spin_unlock(&ip
->i_mount
->m_perag_lock
);
645 trace_xfs_perag_clear_reclaim(ip
->i_mount
, pag
->pag_agno
,
651 __xfs_inode_clear_reclaim_tag(
656 radix_tree_tag_clear(&pag
->pag_ici_root
,
657 XFS_INO_TO_AGINO(mp
, ip
->i_ino
), XFS_ICI_RECLAIM_TAG
);
658 __xfs_inode_clear_reclaim(pag
, ip
);
662 * Grab the inode for reclaim exclusively.
663 * Return 0 if we grabbed it, non-zero otherwise.
666 xfs_reclaim_inode_grab(
667 struct xfs_inode
*ip
,
670 ASSERT(rcu_read_lock_held());
672 /* quick check for stale RCU freed inode */
677 * do some unlocked checks first to avoid unnecessary lock traffic.
678 * The first is a flush lock check, the second is a already in reclaim
679 * check. Only do these checks if we are not going to block on locks.
681 if ((flags
& SYNC_TRYLOCK
) &&
682 (!ip
->i_flush
.done
|| __xfs_iflags_test(ip
, XFS_IRECLAIM
))) {
687 * The radix tree lock here protects a thread in xfs_iget from racing
688 * with us starting reclaim on the inode. Once we have the
689 * XFS_IRECLAIM flag set it will not touch us.
691 * Due to RCU lookup, we may find inodes that have been freed and only
692 * have XFS_IRECLAIM set. Indeed, we may see reallocated inodes that
693 * aren't candidates for reclaim at all, so we must check the
694 * XFS_IRECLAIMABLE is set first before proceeding to reclaim.
696 spin_lock(&ip
->i_flags_lock
);
697 if (!__xfs_iflags_test(ip
, XFS_IRECLAIMABLE
) ||
698 __xfs_iflags_test(ip
, XFS_IRECLAIM
)) {
699 /* not a reclaim candidate. */
700 spin_unlock(&ip
->i_flags_lock
);
703 __xfs_iflags_set(ip
, XFS_IRECLAIM
);
704 spin_unlock(&ip
->i_flags_lock
);
709 * Inodes in different states need to be treated differently, and the return
710 * value of xfs_iflush is not sufficient to get this right. The following table
711 * lists the inode states and the reclaim actions necessary for non-blocking
715 * inode state iflush ret required action
716 * --------------- ---------- ---------------
718 * shutdown EIO unpin and reclaim
719 * clean, unpinned 0 reclaim
720 * stale, unpinned 0 reclaim
721 * clean, pinned(*) 0 requeue
722 * stale, pinned EAGAIN requeue
723 * dirty, delwri ok 0 requeue
724 * dirty, delwri blocked EAGAIN requeue
725 * dirty, sync flush 0 reclaim
727 * (*) dgc: I don't think the clean, pinned state is possible but it gets
728 * handled anyway given the order of checks implemented.
730 * As can be seen from the table, the return value of xfs_iflush() is not
731 * sufficient to correctly decide the reclaim action here. The checks in
732 * xfs_iflush() might look like duplicates, but they are not.
734 * Also, because we get the flush lock first, we know that any inode that has
735 * been flushed delwri has had the flush completed by the time we check that
736 * the inode is clean. The clean inode check needs to be done before flushing
737 * the inode delwri otherwise we would loop forever requeuing clean inodes as
738 * we cannot tell apart a successful delwri flush and a clean inode from the
739 * return value of xfs_iflush().
741 * Note that because the inode is flushed delayed write by background
742 * writeback, the flush lock may already be held here and waiting on it can
743 * result in very long latencies. Hence for sync reclaims, where we wait on the
744 * flush lock, the caller should push out delayed write inodes first before
745 * trying to reclaim them to minimise the amount of time spent waiting. For
746 * background relaim, we just requeue the inode for the next pass.
748 * Hence the order of actions after gaining the locks should be:
750 * shutdown => unpin and reclaim
751 * pinned, delwri => requeue
752 * pinned, sync => unpin
755 * dirty, delwri => flush and requeue
756 * dirty, sync => flush, wait and reclaim
760 struct xfs_inode
*ip
,
761 struct xfs_perag
*pag
,
766 xfs_ilock(ip
, XFS_ILOCK_EXCL
);
767 if (!xfs_iflock_nowait(ip
)) {
768 if (!(sync_mode
& SYNC_WAIT
))
773 if (is_bad_inode(VFS_I(ip
)))
775 if (XFS_FORCED_SHUTDOWN(ip
->i_mount
)) {
779 if (xfs_ipincount(ip
)) {
780 if (!(sync_mode
& SYNC_WAIT
)) {
786 if (xfs_iflags_test(ip
, XFS_ISTALE
))
788 if (xfs_inode_clean(ip
))
791 /* Now we have an inode that needs flushing */
792 error
= xfs_iflush(ip
, sync_mode
);
793 if (sync_mode
& SYNC_WAIT
) {
799 * When we have to flush an inode but don't have SYNC_WAIT set, we
800 * flush the inode out using a delwri buffer and wait for the next
801 * call into reclaim to find it in a clean state instead of waiting for
802 * it now. We also don't return errors here - if the error is transient
803 * then the next reclaim pass will flush the inode, and if the error
804 * is permanent then the next sync reclaim will reclaim the inode and
807 if (error
&& error
!= EAGAIN
&& !XFS_FORCED_SHUTDOWN(ip
->i_mount
)) {
808 xfs_warn(ip
->i_mount
,
809 "inode 0x%llx background reclaim flush failed with %d",
810 (long long)ip
->i_ino
, error
);
813 xfs_iflags_clear(ip
, XFS_IRECLAIM
);
814 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
816 * We could return EAGAIN here to make reclaim rescan the inode tree in
817 * a short while. However, this just burns CPU time scanning the tree
818 * waiting for IO to complete and xfssyncd never goes back to the idle
819 * state. Instead, return 0 to let the next scheduled background reclaim
820 * attempt to reclaim the inode again.
826 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
828 XFS_STATS_INC(xs_ig_reclaims
);
830 * Remove the inode from the per-AG radix tree.
832 * Because radix_tree_delete won't complain even if the item was never
833 * added to the tree assert that it's been there before to catch
834 * problems with the inode life time early on.
836 spin_lock(&pag
->pag_ici_lock
);
837 if (!radix_tree_delete(&pag
->pag_ici_root
,
838 XFS_INO_TO_AGINO(ip
->i_mount
, ip
->i_ino
)))
840 __xfs_inode_clear_reclaim(pag
, ip
);
841 spin_unlock(&pag
->pag_ici_lock
);
844 * Here we do an (almost) spurious inode lock in order to coordinate
845 * with inode cache radix tree lookups. This is because the lookup
846 * can reference the inodes in the cache without taking references.
848 * We make that OK here by ensuring that we wait until the inode is
849 * unlocked after the lookup before we go ahead and free it. We get
850 * both the ilock and the iolock because the code may need to drop the
851 * ilock one but will still hold the iolock.
853 xfs_ilock(ip
, XFS_ILOCK_EXCL
| XFS_IOLOCK_EXCL
);
855 xfs_iunlock(ip
, XFS_ILOCK_EXCL
| XFS_IOLOCK_EXCL
);
863 * Walk the AGs and reclaim the inodes in them. Even if the filesystem is
864 * corrupted, we still want to try to reclaim all the inodes. If we don't,
865 * then a shut down during filesystem unmount reclaim walk leak all the
866 * unreclaimed inodes.
869 xfs_reclaim_inodes_ag(
870 struct xfs_mount
*mp
,
874 struct xfs_perag
*pag
;
878 int trylock
= flags
& SYNC_TRYLOCK
;
884 while ((pag
= xfs_perag_get_tag(mp
, ag
, XFS_ICI_RECLAIM_TAG
))) {
885 unsigned long first_index
= 0;
889 ag
= pag
->pag_agno
+ 1;
892 if (!mutex_trylock(&pag
->pag_ici_reclaim_lock
)) {
897 first_index
= pag
->pag_ici_reclaim_cursor
;
899 mutex_lock(&pag
->pag_ici_reclaim_lock
);
902 struct xfs_inode
*batch
[XFS_LOOKUP_BATCH
];
906 nr_found
= radix_tree_gang_lookup_tag(
908 (void **)batch
, first_index
,
910 XFS_ICI_RECLAIM_TAG
);
917 * Grab the inodes before we drop the lock. if we found
918 * nothing, nr == 0 and the loop will be skipped.
920 for (i
= 0; i
< nr_found
; i
++) {
921 struct xfs_inode
*ip
= batch
[i
];
923 if (done
|| xfs_reclaim_inode_grab(ip
, flags
))
927 * Update the index for the next lookup. Catch
928 * overflows into the next AG range which can
929 * occur if we have inodes in the last block of
930 * the AG and we are currently pointing to the
933 * Because we may see inodes that are from the
934 * wrong AG due to RCU freeing and
935 * reallocation, only update the index if it
936 * lies in this AG. It was a race that lead us
937 * to see this inode, so another lookup from
938 * the same index will not find it again.
940 if (XFS_INO_TO_AGNO(mp
, ip
->i_ino
) !=
943 first_index
= XFS_INO_TO_AGINO(mp
, ip
->i_ino
+ 1);
944 if (first_index
< XFS_INO_TO_AGINO(mp
, ip
->i_ino
))
948 /* unlock now we've grabbed the inodes. */
951 for (i
= 0; i
< nr_found
; i
++) {
954 error
= xfs_reclaim_inode(batch
[i
], pag
, flags
);
955 if (error
&& last_error
!= EFSCORRUPTED
)
959 *nr_to_scan
-= XFS_LOOKUP_BATCH
;
961 } while (nr_found
&& !done
&& *nr_to_scan
> 0);
963 if (trylock
&& !done
)
964 pag
->pag_ici_reclaim_cursor
= first_index
;
966 pag
->pag_ici_reclaim_cursor
= 0;
967 mutex_unlock(&pag
->pag_ici_reclaim_lock
);
972 * if we skipped any AG, and we still have scan count remaining, do
973 * another pass this time using blocking reclaim semantics (i.e
974 * waiting on the reclaim locks and ignoring the reclaim cursors). This
975 * ensure that when we get more reclaimers than AGs we block rather
976 * than spin trying to execute reclaim.
978 if (trylock
&& skipped
&& *nr_to_scan
> 0) {
982 return XFS_ERROR(last_error
);
990 int nr_to_scan
= INT_MAX
;
992 return xfs_reclaim_inodes_ag(mp
, mode
, &nr_to_scan
);
996 * Shrinker infrastructure.
999 xfs_reclaim_inode_shrink(
1000 struct shrinker
*shrink
,
1004 struct xfs_mount
*mp
;
1005 struct xfs_perag
*pag
;
1009 mp
= container_of(shrink
, struct xfs_mount
, m_inode_shrink
);
1011 if (!(gfp_mask
& __GFP_FS
))
1014 xfs_reclaim_inodes_ag(mp
, SYNC_TRYLOCK
, &nr_to_scan
);
1015 /* terminate if we don't exhaust the scan */
1022 while ((pag
= xfs_perag_get_tag(mp
, ag
, XFS_ICI_RECLAIM_TAG
))) {
1023 ag
= pag
->pag_agno
+ 1;
1024 reclaimable
+= pag
->pag_ici_reclaimable
;
1031 xfs_inode_shrinker_register(
1032 struct xfs_mount
*mp
)
1034 mp
->m_inode_shrink
.shrink
= xfs_reclaim_inode_shrink
;
1035 mp
->m_inode_shrink
.seeks
= DEFAULT_SEEKS
;
1036 register_shrinker(&mp
->m_inode_shrink
);
1040 xfs_inode_shrinker_unregister(
1041 struct xfs_mount
*mp
)
1043 unregister_shrinker(&mp
->m_inode_shrink
);