1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
13 #include "xfs_mount.h"
14 #include "xfs_inode.h"
15 #include "xfs_btree.h"
17 #include "xfs_alloc.h"
18 #include "xfs_fsops.h"
19 #include "xfs_trans.h"
20 #include "xfs_buf_item.h"
22 #include "xfs_log_priv.h"
24 #include "xfs_extfree_item.h"
25 #include "xfs_mru_cache.h"
26 #include "xfs_inode_item.h"
27 #include "xfs_icache.h"
28 #include "xfs_trace.h"
29 #include "xfs_icreate_item.h"
30 #include "xfs_filestream.h"
31 #include "xfs_quota.h"
32 #include "xfs_sysfs.h"
33 #include "xfs_ondisk.h"
34 #include "xfs_rmap_item.h"
35 #include "xfs_refcount_item.h"
36 #include "xfs_bmap_item.h"
37 #include "xfs_reflink.h"
39 #include <linux/magic.h>
40 #include <linux/fs_context.h>
41 #include <linux/fs_parser.h>
43 static const struct super_operations xfs_super_operations
;
45 static struct kset
*xfs_kset
; /* top-level xfs sysfs dir */
47 static struct xfs_kobj xfs_dbg_kobj
; /* global debug sysfs attrs */
51 * Table driven mount option parser.
54 Opt_logbufs
, Opt_logbsize
, Opt_logdev
, Opt_rtdev
,
55 Opt_wsync
, Opt_noalign
, Opt_swalloc
, Opt_sunit
, Opt_swidth
, Opt_nouuid
,
56 Opt_grpid
, Opt_nogrpid
, Opt_bsdgroups
, Opt_sysvgroups
,
57 Opt_allocsize
, Opt_norecovery
, Opt_inode64
, Opt_inode32
, Opt_ikeep
,
58 Opt_noikeep
, Opt_largeio
, Opt_nolargeio
, Opt_attr2
, Opt_noattr2
,
59 Opt_filestreams
, Opt_quota
, Opt_noquota
, Opt_usrquota
, Opt_grpquota
,
60 Opt_prjquota
, Opt_uquota
, Opt_gquota
, Opt_pquota
,
61 Opt_uqnoenforce
, Opt_gqnoenforce
, Opt_pqnoenforce
, Opt_qnoenforce
,
62 Opt_discard
, Opt_nodiscard
, Opt_dax
,
65 static const struct fs_parameter_spec xfs_fs_parameters
[] = {
66 fsparam_u32("logbufs", Opt_logbufs
),
67 fsparam_string("logbsize", Opt_logbsize
),
68 fsparam_string("logdev", Opt_logdev
),
69 fsparam_string("rtdev", Opt_rtdev
),
70 fsparam_flag("wsync", Opt_wsync
),
71 fsparam_flag("noalign", Opt_noalign
),
72 fsparam_flag("swalloc", Opt_swalloc
),
73 fsparam_u32("sunit", Opt_sunit
),
74 fsparam_u32("swidth", Opt_swidth
),
75 fsparam_flag("nouuid", Opt_nouuid
),
76 fsparam_flag("grpid", Opt_grpid
),
77 fsparam_flag("nogrpid", Opt_nogrpid
),
78 fsparam_flag("bsdgroups", Opt_bsdgroups
),
79 fsparam_flag("sysvgroups", Opt_sysvgroups
),
80 fsparam_string("allocsize", Opt_allocsize
),
81 fsparam_flag("norecovery", Opt_norecovery
),
82 fsparam_flag("inode64", Opt_inode64
),
83 fsparam_flag("inode32", Opt_inode32
),
84 fsparam_flag("ikeep", Opt_ikeep
),
85 fsparam_flag("noikeep", Opt_noikeep
),
86 fsparam_flag("largeio", Opt_largeio
),
87 fsparam_flag("nolargeio", Opt_nolargeio
),
88 fsparam_flag("attr2", Opt_attr2
),
89 fsparam_flag("noattr2", Opt_noattr2
),
90 fsparam_flag("filestreams", Opt_filestreams
),
91 fsparam_flag("quota", Opt_quota
),
92 fsparam_flag("noquota", Opt_noquota
),
93 fsparam_flag("usrquota", Opt_usrquota
),
94 fsparam_flag("grpquota", Opt_grpquota
),
95 fsparam_flag("prjquota", Opt_prjquota
),
96 fsparam_flag("uquota", Opt_uquota
),
97 fsparam_flag("gquota", Opt_gquota
),
98 fsparam_flag("pquota", Opt_pquota
),
99 fsparam_flag("uqnoenforce", Opt_uqnoenforce
),
100 fsparam_flag("gqnoenforce", Opt_gqnoenforce
),
101 fsparam_flag("pqnoenforce", Opt_pqnoenforce
),
102 fsparam_flag("qnoenforce", Opt_qnoenforce
),
103 fsparam_flag("discard", Opt_discard
),
104 fsparam_flag("nodiscard", Opt_nodiscard
),
105 fsparam_flag("dax", Opt_dax
),
109 struct proc_xfs_info
{
119 static struct proc_xfs_info xfs_info_set
[] = {
120 /* the few simple ones we can get from the mount struct */
121 { XFS_MOUNT_IKEEP
, ",ikeep" },
122 { XFS_MOUNT_WSYNC
, ",wsync" },
123 { XFS_MOUNT_NOALIGN
, ",noalign" },
124 { XFS_MOUNT_SWALLOC
, ",swalloc" },
125 { XFS_MOUNT_NOUUID
, ",nouuid" },
126 { XFS_MOUNT_NORECOVERY
, ",norecovery" },
127 { XFS_MOUNT_ATTR2
, ",attr2" },
128 { XFS_MOUNT_FILESTREAMS
, ",filestreams" },
129 { XFS_MOUNT_GRPID
, ",grpid" },
130 { XFS_MOUNT_DISCARD
, ",discard" },
131 { XFS_MOUNT_LARGEIO
, ",largeio" },
132 { XFS_MOUNT_DAX
, ",dax" },
135 struct xfs_mount
*mp
= XFS_M(root
->d_sb
);
136 struct proc_xfs_info
*xfs_infop
;
138 for (xfs_infop
= xfs_info_set
; xfs_infop
->flag
; xfs_infop
++) {
139 if (mp
->m_flags
& xfs_infop
->flag
)
140 seq_puts(m
, xfs_infop
->str
);
143 seq_printf(m
, ",inode%d",
144 (mp
->m_flags
& XFS_MOUNT_SMALL_INUMS
) ? 32 : 64);
146 if (mp
->m_flags
& XFS_MOUNT_ALLOCSIZE
)
147 seq_printf(m
, ",allocsize=%dk",
148 (1 << mp
->m_allocsize_log
) >> 10);
150 if (mp
->m_logbufs
> 0)
151 seq_printf(m
, ",logbufs=%d", mp
->m_logbufs
);
152 if (mp
->m_logbsize
> 0)
153 seq_printf(m
, ",logbsize=%dk", mp
->m_logbsize
>> 10);
156 seq_show_option(m
, "logdev", mp
->m_logname
);
158 seq_show_option(m
, "rtdev", mp
->m_rtname
);
160 if (mp
->m_dalign
> 0)
161 seq_printf(m
, ",sunit=%d",
162 (int)XFS_FSB_TO_BB(mp
, mp
->m_dalign
));
163 if (mp
->m_swidth
> 0)
164 seq_printf(m
, ",swidth=%d",
165 (int)XFS_FSB_TO_BB(mp
, mp
->m_swidth
));
167 if (mp
->m_qflags
& (XFS_UQUOTA_ACCT
|XFS_UQUOTA_ENFD
))
168 seq_puts(m
, ",usrquota");
169 else if (mp
->m_qflags
& XFS_UQUOTA_ACCT
)
170 seq_puts(m
, ",uqnoenforce");
172 if (mp
->m_qflags
& XFS_PQUOTA_ACCT
) {
173 if (mp
->m_qflags
& XFS_PQUOTA_ENFD
)
174 seq_puts(m
, ",prjquota");
176 seq_puts(m
, ",pqnoenforce");
178 if (mp
->m_qflags
& XFS_GQUOTA_ACCT
) {
179 if (mp
->m_qflags
& XFS_GQUOTA_ENFD
)
180 seq_puts(m
, ",grpquota");
182 seq_puts(m
, ",gqnoenforce");
185 if (!(mp
->m_qflags
& XFS_ALL_QUOTA_ACCT
))
186 seq_puts(m
, ",noquota");
192 * Set parameters for inode allocation heuristics, taking into account
193 * filesystem size and inode32/inode64 mount options; i.e. specifically
194 * whether or not XFS_MOUNT_SMALL_INUMS is set.
196 * Inode allocation patterns are altered only if inode32 is requested
197 * (XFS_MOUNT_SMALL_INUMS), and the filesystem is sufficiently large.
198 * If altered, XFS_MOUNT_32BITINODES is set as well.
200 * An agcount independent of that in the mount structure is provided
201 * because in the growfs case, mp->m_sb.sb_agcount is not yet updated
202 * to the potentially higher ag count.
204 * Returns the maximum AG index which may contain inodes.
208 struct xfs_mount
*mp
,
209 xfs_agnumber_t agcount
)
211 xfs_agnumber_t index
;
212 xfs_agnumber_t maxagi
= 0;
213 xfs_sb_t
*sbp
= &mp
->m_sb
;
214 xfs_agnumber_t max_metadata
;
219 * Calculate how much should be reserved for inodes to meet
220 * the max inode percentage. Used only for inode32.
222 if (M_IGEO(mp
)->maxicount
) {
225 icount
= sbp
->sb_dblocks
* sbp
->sb_imax_pct
;
227 icount
+= sbp
->sb_agblocks
- 1;
228 do_div(icount
, sbp
->sb_agblocks
);
229 max_metadata
= icount
;
231 max_metadata
= agcount
;
234 /* Get the last possible inode in the filesystem */
235 agino
= XFS_AGB_TO_AGINO(mp
, sbp
->sb_agblocks
- 1);
236 ino
= XFS_AGINO_TO_INO(mp
, agcount
- 1, agino
);
239 * If user asked for no more than 32-bit inodes, and the fs is
240 * sufficiently large, set XFS_MOUNT_32BITINODES if we must alter
241 * the allocator to accommodate the request.
243 if ((mp
->m_flags
& XFS_MOUNT_SMALL_INUMS
) && ino
> XFS_MAXINUMBER_32
)
244 mp
->m_flags
|= XFS_MOUNT_32BITINODES
;
246 mp
->m_flags
&= ~XFS_MOUNT_32BITINODES
;
248 for (index
= 0; index
< agcount
; index
++) {
249 struct xfs_perag
*pag
;
251 ino
= XFS_AGINO_TO_INO(mp
, index
, agino
);
253 pag
= xfs_perag_get(mp
, index
);
255 if (mp
->m_flags
& XFS_MOUNT_32BITINODES
) {
256 if (ino
> XFS_MAXINUMBER_32
) {
257 pag
->pagi_inodeok
= 0;
258 pag
->pagf_metadata
= 0;
260 pag
->pagi_inodeok
= 1;
262 if (index
< max_metadata
)
263 pag
->pagf_metadata
= 1;
265 pag
->pagf_metadata
= 0;
268 pag
->pagi_inodeok
= 1;
269 pag
->pagf_metadata
= 0;
275 return (mp
->m_flags
& XFS_MOUNT_32BITINODES
) ? maxagi
: agcount
;
282 struct block_device
**bdevp
)
286 *bdevp
= blkdev_get_by_path(name
, FMODE_READ
|FMODE_WRITE
|FMODE_EXCL
,
288 if (IS_ERR(*bdevp
)) {
289 error
= PTR_ERR(*bdevp
);
290 xfs_warn(mp
, "Invalid device [%s], error=%d", name
, error
);
298 struct block_device
*bdev
)
301 blkdev_put(bdev
, FMODE_READ
|FMODE_WRITE
|FMODE_EXCL
);
305 xfs_blkdev_issue_flush(
306 xfs_buftarg_t
*buftarg
)
308 blkdev_issue_flush(buftarg
->bt_bdev
, GFP_NOFS
, NULL
);
313 struct xfs_mount
*mp
)
315 struct dax_device
*dax_ddev
= mp
->m_ddev_targp
->bt_daxdev
;
317 if (mp
->m_logdev_targp
&& mp
->m_logdev_targp
!= mp
->m_ddev_targp
) {
318 struct block_device
*logdev
= mp
->m_logdev_targp
->bt_bdev
;
319 struct dax_device
*dax_logdev
= mp
->m_logdev_targp
->bt_daxdev
;
321 xfs_free_buftarg(mp
->m_logdev_targp
);
322 xfs_blkdev_put(logdev
);
323 fs_put_dax(dax_logdev
);
325 if (mp
->m_rtdev_targp
) {
326 struct block_device
*rtdev
= mp
->m_rtdev_targp
->bt_bdev
;
327 struct dax_device
*dax_rtdev
= mp
->m_rtdev_targp
->bt_daxdev
;
329 xfs_free_buftarg(mp
->m_rtdev_targp
);
330 xfs_blkdev_put(rtdev
);
331 fs_put_dax(dax_rtdev
);
333 xfs_free_buftarg(mp
->m_ddev_targp
);
334 fs_put_dax(dax_ddev
);
338 * The file system configurations are:
339 * (1) device (partition) with data and internal log
340 * (2) logical volume with data and log subvolumes.
341 * (3) logical volume with data, log, and realtime subvolumes.
343 * We only have to handle opening the log and realtime volumes here if
344 * they are present. The data subvolume has already been opened by
345 * get_sb_bdev() and is stored in sb->s_bdev.
349 struct xfs_mount
*mp
)
351 struct block_device
*ddev
= mp
->m_super
->s_bdev
;
352 struct dax_device
*dax_ddev
= fs_dax_get_by_bdev(ddev
);
353 struct dax_device
*dax_logdev
= NULL
, *dax_rtdev
= NULL
;
354 struct block_device
*logdev
= NULL
, *rtdev
= NULL
;
358 * Open real time and log devices - order is important.
361 error
= xfs_blkdev_get(mp
, mp
->m_logname
, &logdev
);
364 dax_logdev
= fs_dax_get_by_bdev(logdev
);
368 error
= xfs_blkdev_get(mp
, mp
->m_rtname
, &rtdev
);
370 goto out_close_logdev
;
372 if (rtdev
== ddev
|| rtdev
== logdev
) {
374 "Cannot mount filesystem with identical rtdev and ddev/logdev.");
376 goto out_close_rtdev
;
378 dax_rtdev
= fs_dax_get_by_bdev(rtdev
);
382 * Setup xfs_mount buffer target pointers
385 mp
->m_ddev_targp
= xfs_alloc_buftarg(mp
, ddev
, dax_ddev
);
386 if (!mp
->m_ddev_targp
)
387 goto out_close_rtdev
;
390 mp
->m_rtdev_targp
= xfs_alloc_buftarg(mp
, rtdev
, dax_rtdev
);
391 if (!mp
->m_rtdev_targp
)
392 goto out_free_ddev_targ
;
395 if (logdev
&& logdev
!= ddev
) {
396 mp
->m_logdev_targp
= xfs_alloc_buftarg(mp
, logdev
, dax_logdev
);
397 if (!mp
->m_logdev_targp
)
398 goto out_free_rtdev_targ
;
400 mp
->m_logdev_targp
= mp
->m_ddev_targp
;
406 if (mp
->m_rtdev_targp
)
407 xfs_free_buftarg(mp
->m_rtdev_targp
);
409 xfs_free_buftarg(mp
->m_ddev_targp
);
411 xfs_blkdev_put(rtdev
);
412 fs_put_dax(dax_rtdev
);
414 if (logdev
&& logdev
!= ddev
) {
415 xfs_blkdev_put(logdev
);
416 fs_put_dax(dax_logdev
);
419 fs_put_dax(dax_ddev
);
424 * Setup xfs_mount buffer target pointers based on superblock
428 struct xfs_mount
*mp
)
432 error
= xfs_setsize_buftarg(mp
->m_ddev_targp
, mp
->m_sb
.sb_sectsize
);
436 if (mp
->m_logdev_targp
&& mp
->m_logdev_targp
!= mp
->m_ddev_targp
) {
437 unsigned int log_sector_size
= BBSIZE
;
439 if (xfs_sb_version_hassector(&mp
->m_sb
))
440 log_sector_size
= mp
->m_sb
.sb_logsectsize
;
441 error
= xfs_setsize_buftarg(mp
->m_logdev_targp
,
446 if (mp
->m_rtdev_targp
) {
447 error
= xfs_setsize_buftarg(mp
->m_rtdev_targp
,
448 mp
->m_sb
.sb_sectsize
);
457 xfs_init_mount_workqueues(
458 struct xfs_mount
*mp
)
460 mp
->m_buf_workqueue
= alloc_workqueue("xfs-buf/%s",
461 WQ_MEM_RECLAIM
|WQ_FREEZABLE
, 1, mp
->m_super
->s_id
);
462 if (!mp
->m_buf_workqueue
)
465 mp
->m_unwritten_workqueue
= alloc_workqueue("xfs-conv/%s",
466 WQ_MEM_RECLAIM
|WQ_FREEZABLE
, 0, mp
->m_super
->s_id
);
467 if (!mp
->m_unwritten_workqueue
)
468 goto out_destroy_buf
;
470 mp
->m_cil_workqueue
= alloc_workqueue("xfs-cil/%s",
471 WQ_MEM_RECLAIM
| WQ_FREEZABLE
| WQ_UNBOUND
,
472 0, mp
->m_super
->s_id
);
473 if (!mp
->m_cil_workqueue
)
474 goto out_destroy_unwritten
;
476 mp
->m_reclaim_workqueue
= alloc_workqueue("xfs-reclaim/%s",
477 WQ_MEM_RECLAIM
|WQ_FREEZABLE
, 0, mp
->m_super
->s_id
);
478 if (!mp
->m_reclaim_workqueue
)
479 goto out_destroy_cil
;
481 mp
->m_eofblocks_workqueue
= alloc_workqueue("xfs-eofblocks/%s",
482 WQ_MEM_RECLAIM
|WQ_FREEZABLE
, 0, mp
->m_super
->s_id
);
483 if (!mp
->m_eofblocks_workqueue
)
484 goto out_destroy_reclaim
;
486 mp
->m_sync_workqueue
= alloc_workqueue("xfs-sync/%s", WQ_FREEZABLE
, 0,
488 if (!mp
->m_sync_workqueue
)
489 goto out_destroy_eofb
;
494 destroy_workqueue(mp
->m_eofblocks_workqueue
);
496 destroy_workqueue(mp
->m_reclaim_workqueue
);
498 destroy_workqueue(mp
->m_cil_workqueue
);
499 out_destroy_unwritten
:
500 destroy_workqueue(mp
->m_unwritten_workqueue
);
502 destroy_workqueue(mp
->m_buf_workqueue
);
508 xfs_destroy_mount_workqueues(
509 struct xfs_mount
*mp
)
511 destroy_workqueue(mp
->m_sync_workqueue
);
512 destroy_workqueue(mp
->m_eofblocks_workqueue
);
513 destroy_workqueue(mp
->m_reclaim_workqueue
);
514 destroy_workqueue(mp
->m_cil_workqueue
);
515 destroy_workqueue(mp
->m_unwritten_workqueue
);
516 destroy_workqueue(mp
->m_buf_workqueue
);
520 * Flush all dirty data to disk. Must not be called while holding an XFS_ILOCK
521 * or a page lock. We use sync_inodes_sb() here to ensure we block while waiting
522 * for IO to complete so that we effectively throttle multiple callers to the
523 * rate at which IO is completing.
527 struct xfs_mount
*mp
)
529 struct super_block
*sb
= mp
->m_super
;
531 if (down_read_trylock(&sb
->s_umount
)) {
533 up_read(&sb
->s_umount
);
537 /* Catch misguided souls that try to use this interface on XFS */
538 STATIC
struct inode
*
540 struct super_block
*sb
)
549 struct xfs_inode
*ip
,
552 struct xfs_ifork
*ifp
= XFS_IFORK_PTR(ip
, whichfork
);
553 struct xfs_bmbt_irec got
;
554 struct xfs_iext_cursor icur
;
556 if (!ifp
|| !xfs_iext_lookup_extent(ip
, ifp
, 0, &icur
, &got
))
559 if (isnullstartblock(got
.br_startblock
)) {
560 xfs_warn(ip
->i_mount
,
561 "ino %llx %s fork has delalloc extent at [0x%llx:0x%llx]",
563 whichfork
== XFS_DATA_FORK
? "data" : "cow",
564 got
.br_startoff
, got
.br_blockcount
);
566 } while (xfs_iext_next_extent(ifp
, &icur
, &got
));
569 #define xfs_check_delalloc(ip, whichfork) do { } while (0)
573 * Now that the generic code is guaranteed not to be accessing
574 * the linux inode, we can inactivate and reclaim the inode.
577 xfs_fs_destroy_inode(
580 struct xfs_inode
*ip
= XFS_I(inode
);
582 trace_xfs_destroy_inode(ip
);
584 ASSERT(!rwsem_is_locked(&inode
->i_rwsem
));
585 XFS_STATS_INC(ip
->i_mount
, vn_rele
);
586 XFS_STATS_INC(ip
->i_mount
, vn_remove
);
590 if (!XFS_FORCED_SHUTDOWN(ip
->i_mount
) && ip
->i_delayed_blks
) {
591 xfs_check_delalloc(ip
, XFS_DATA_FORK
);
592 xfs_check_delalloc(ip
, XFS_COW_FORK
);
596 XFS_STATS_INC(ip
->i_mount
, vn_reclaim
);
599 * We should never get here with one of the reclaim flags already set.
601 ASSERT_ALWAYS(!xfs_iflags_test(ip
, XFS_IRECLAIMABLE
));
602 ASSERT_ALWAYS(!xfs_iflags_test(ip
, XFS_IRECLAIM
));
605 * We always use background reclaim here because even if the
606 * inode is clean, it still may be under IO and hence we have
607 * to take the flush lock. The background reclaim path handles
608 * this more efficiently than we can here, so simply let background
609 * reclaim tear down all inodes.
611 xfs_inode_set_reclaim_tag(ip
);
619 struct xfs_inode
*ip
= XFS_I(inode
);
620 struct xfs_mount
*mp
= ip
->i_mount
;
621 struct xfs_trans
*tp
;
623 if (!(inode
->i_sb
->s_flags
& SB_LAZYTIME
))
625 if (flag
!= I_DIRTY_SYNC
|| !(inode
->i_state
& I_DIRTY_TIME
))
628 if (xfs_trans_alloc(mp
, &M_RES(mp
)->tr_fsyncts
, 0, 0, 0, &tp
))
630 xfs_ilock(ip
, XFS_ILOCK_EXCL
);
631 xfs_trans_ijoin(tp
, ip
, XFS_ILOCK_EXCL
);
632 xfs_trans_log_inode(tp
, ip
, XFS_ILOG_TIMESTAMP
);
633 xfs_trans_commit(tp
);
637 * Slab object creation initialisation for the XFS inode.
638 * This covers only the idempotent fields in the XFS inode;
639 * all other fields need to be initialised on allocation
640 * from the slab. This avoids the need to repeatedly initialise
641 * fields in the xfs inode that left in the initialise state
642 * when freeing the inode.
645 xfs_fs_inode_init_once(
648 struct xfs_inode
*ip
= inode
;
650 memset(ip
, 0, sizeof(struct xfs_inode
));
653 inode_init_once(VFS_I(ip
));
656 atomic_set(&ip
->i_pincount
, 0);
657 spin_lock_init(&ip
->i_flags_lock
);
659 mrlock_init(&ip
->i_mmaplock
, MRLOCK_ALLOW_EQUAL_PRI
|MRLOCK_BARRIER
,
660 "xfsino", ip
->i_ino
);
661 mrlock_init(&ip
->i_lock
, MRLOCK_ALLOW_EQUAL_PRI
|MRLOCK_BARRIER
,
662 "xfsino", ip
->i_ino
);
666 * We do an unlocked check for XFS_IDONTCACHE here because we are already
667 * serialised against cache hits here via the inode->i_lock and igrab() in
668 * xfs_iget_cache_hit(). Hence a lookup that might clear this flag will not be
669 * racing with us, and it avoids needing to grab a spinlock here for every inode
670 * we drop the final reference on.
676 struct xfs_inode
*ip
= XFS_I(inode
);
679 * If this unlinked inode is in the middle of recovery, don't
680 * drop the inode just yet; log recovery will take care of
681 * that. See the comment for this inode flag.
683 if (ip
->i_flags
& XFS_IRECOVERY
) {
684 ASSERT(ip
->i_mount
->m_log
->l_flags
& XLOG_RECOVERY_NEEDED
);
688 return generic_drop_inode(inode
) || (ip
->i_flags
& XFS_IDONTCACHE
);
693 struct xfs_mount
*mp
)
696 kfree(mp
->m_logname
);
702 struct super_block
*sb
,
705 struct xfs_mount
*mp
= XFS_M(sb
);
708 * Doing anything during the async pass would be counterproductive.
713 xfs_log_force(mp
, XFS_LOG_SYNC
);
716 * The disk must be active because we're syncing.
717 * We schedule log work now (now that the disk is
718 * active) instead of later (when it might not be).
720 flush_delayed_work(&mp
->m_log
->l_work
);
728 struct dentry
*dentry
,
729 struct kstatfs
*statp
)
731 struct xfs_mount
*mp
= XFS_M(dentry
->d_sb
);
732 xfs_sb_t
*sbp
= &mp
->m_sb
;
733 struct xfs_inode
*ip
= XFS_I(d_inode(dentry
));
734 uint64_t fakeinos
, id
;
741 statp
->f_type
= XFS_SUPER_MAGIC
;
742 statp
->f_namelen
= MAXNAMELEN
- 1;
744 id
= huge_encode_dev(mp
->m_ddev_targp
->bt_dev
);
745 statp
->f_fsid
.val
[0] = (u32
)id
;
746 statp
->f_fsid
.val
[1] = (u32
)(id
>> 32);
748 icount
= percpu_counter_sum(&mp
->m_icount
);
749 ifree
= percpu_counter_sum(&mp
->m_ifree
);
750 fdblocks
= percpu_counter_sum(&mp
->m_fdblocks
);
752 spin_lock(&mp
->m_sb_lock
);
753 statp
->f_bsize
= sbp
->sb_blocksize
;
754 lsize
= sbp
->sb_logstart
? sbp
->sb_logblocks
: 0;
755 statp
->f_blocks
= sbp
->sb_dblocks
- lsize
;
756 spin_unlock(&mp
->m_sb_lock
);
758 statp
->f_bfree
= fdblocks
- mp
->m_alloc_set_aside
;
759 statp
->f_bavail
= statp
->f_bfree
;
761 fakeinos
= XFS_FSB_TO_INO(mp
, statp
->f_bfree
);
762 statp
->f_files
= min(icount
+ fakeinos
, (uint64_t)XFS_MAXINUMBER
);
763 if (M_IGEO(mp
)->maxicount
)
764 statp
->f_files
= min_t(typeof(statp
->f_files
),
766 M_IGEO(mp
)->maxicount
);
768 /* If sb_icount overshot maxicount, report actual allocation */
769 statp
->f_files
= max_t(typeof(statp
->f_files
),
773 /* make sure statp->f_ffree does not underflow */
774 ffree
= statp
->f_files
- (icount
- ifree
);
775 statp
->f_ffree
= max_t(int64_t, ffree
, 0);
778 if ((ip
->i_d
.di_flags
& XFS_DIFLAG_PROJINHERIT
) &&
779 ((mp
->m_qflags
& (XFS_PQUOTA_ACCT
|XFS_PQUOTA_ENFD
))) ==
780 (XFS_PQUOTA_ACCT
|XFS_PQUOTA_ENFD
))
781 xfs_qm_statvfs(ip
, statp
);
783 if (XFS_IS_REALTIME_MOUNT(mp
) &&
784 (ip
->i_d
.di_flags
& (XFS_DIFLAG_RTINHERIT
| XFS_DIFLAG_REALTIME
))) {
785 statp
->f_blocks
= sbp
->sb_rblocks
;
786 statp
->f_bavail
= statp
->f_bfree
=
787 sbp
->sb_frextents
* sbp
->sb_rextsize
;
794 xfs_save_resvblks(struct xfs_mount
*mp
)
796 uint64_t resblks
= 0;
798 mp
->m_resblks_save
= mp
->m_resblks
;
799 xfs_reserve_blocks(mp
, &resblks
, NULL
);
803 xfs_restore_resvblks(struct xfs_mount
*mp
)
807 if (mp
->m_resblks_save
) {
808 resblks
= mp
->m_resblks_save
;
809 mp
->m_resblks_save
= 0;
811 resblks
= xfs_default_resblks(mp
);
813 xfs_reserve_blocks(mp
, &resblks
, NULL
);
817 * Trigger writeback of all the dirty metadata in the file system.
819 * This ensures that the metadata is written to their location on disk rather
820 * than just existing in transactions in the log. This means after a quiesce
821 * there is no log replay required to write the inodes to disk - this is the
822 * primary difference between a sync and a quiesce.
824 * Note: xfs_log_quiesce() stops background log work - the callers must ensure
825 * it is started again when appropriate.
829 struct xfs_mount
*mp
)
833 /* wait for all modifications to complete */
834 while (atomic_read(&mp
->m_active_trans
) > 0)
837 /* force the log to unpin objects from the now complete transactions */
838 xfs_log_force(mp
, XFS_LOG_SYNC
);
840 /* reclaim inodes to do any IO before the freeze completes */
841 xfs_reclaim_inodes(mp
, 0);
842 xfs_reclaim_inodes(mp
, SYNC_WAIT
);
844 /* Push the superblock and write an unmount record */
845 error
= xfs_log_sbcount(mp
);
847 xfs_warn(mp
, "xfs_attr_quiesce: failed to log sb changes. "
848 "Frozen image may not be consistent.");
850 * Just warn here till VFS can correctly support
851 * read-only remount without racing.
853 WARN_ON(atomic_read(&mp
->m_active_trans
) != 0);
859 * Second stage of a freeze. The data is already frozen so we only
860 * need to take care of the metadata. Once that's done sync the superblock
861 * to the log to dirty it in case of a crash while frozen. This ensures that we
862 * will recover the unlinked inode lists on the next mount.
866 struct super_block
*sb
)
868 struct xfs_mount
*mp
= XFS_M(sb
);
870 xfs_stop_block_reaping(mp
);
871 xfs_save_resvblks(mp
);
872 xfs_quiesce_attr(mp
);
873 return xfs_sync_sb(mp
, true);
878 struct super_block
*sb
)
880 struct xfs_mount
*mp
= XFS_M(sb
);
882 xfs_restore_resvblks(mp
);
883 xfs_log_work_queue(mp
);
884 xfs_start_block_reaping(mp
);
889 * This function fills in xfs_mount_t fields based on mount args.
890 * Note: the superblock _has_ now been read in.
894 struct xfs_mount
*mp
)
896 int ronly
= (mp
->m_flags
& XFS_MOUNT_RDONLY
);
898 /* Fail a mount where the logbuf is smaller than the log stripe */
899 if (xfs_sb_version_haslogv2(&mp
->m_sb
)) {
900 if (mp
->m_logbsize
<= 0 &&
901 mp
->m_sb
.sb_logsunit
> XLOG_BIG_RECORD_BSIZE
) {
902 mp
->m_logbsize
= mp
->m_sb
.sb_logsunit
;
903 } else if (mp
->m_logbsize
> 0 &&
904 mp
->m_logbsize
< mp
->m_sb
.sb_logsunit
) {
906 "logbuf size must be greater than or equal to log stripe size");
910 /* Fail a mount if the logbuf is larger than 32K */
911 if (mp
->m_logbsize
> XLOG_BIG_RECORD_BSIZE
) {
913 "logbuf size for version 1 logs must be 16K or 32K");
919 * V5 filesystems always use attr2 format for attributes.
921 if (xfs_sb_version_hascrc(&mp
->m_sb
) &&
922 (mp
->m_flags
& XFS_MOUNT_NOATTR2
)) {
923 xfs_warn(mp
, "Cannot mount a V5 filesystem as noattr2. "
924 "attr2 is always enabled for V5 filesystems.");
929 * mkfs'ed attr2 will turn on attr2 mount unless explicitly
930 * told by noattr2 to turn it off
932 if (xfs_sb_version_hasattr2(&mp
->m_sb
) &&
933 !(mp
->m_flags
& XFS_MOUNT_NOATTR2
))
934 mp
->m_flags
|= XFS_MOUNT_ATTR2
;
937 * prohibit r/w mounts of read-only filesystems
939 if ((mp
->m_sb
.sb_flags
& XFS_SBF_READONLY
) && !ronly
) {
941 "cannot mount a read-only filesystem as read-write");
945 if ((mp
->m_qflags
& (XFS_GQUOTA_ACCT
| XFS_GQUOTA_ACTIVE
)) &&
946 (mp
->m_qflags
& (XFS_PQUOTA_ACCT
| XFS_PQUOTA_ACTIVE
)) &&
947 !xfs_sb_version_has_pquotino(&mp
->m_sb
)) {
949 "Super block does not support project and group quota together");
957 xfs_init_percpu_counters(
958 struct xfs_mount
*mp
)
962 error
= percpu_counter_init(&mp
->m_icount
, 0, GFP_KERNEL
);
966 error
= percpu_counter_init(&mp
->m_ifree
, 0, GFP_KERNEL
);
970 error
= percpu_counter_init(&mp
->m_fdblocks
, 0, GFP_KERNEL
);
974 error
= percpu_counter_init(&mp
->m_delalloc_blks
, 0, GFP_KERNEL
);
981 percpu_counter_destroy(&mp
->m_fdblocks
);
983 percpu_counter_destroy(&mp
->m_ifree
);
985 percpu_counter_destroy(&mp
->m_icount
);
990 xfs_reinit_percpu_counters(
991 struct xfs_mount
*mp
)
993 percpu_counter_set(&mp
->m_icount
, mp
->m_sb
.sb_icount
);
994 percpu_counter_set(&mp
->m_ifree
, mp
->m_sb
.sb_ifree
);
995 percpu_counter_set(&mp
->m_fdblocks
, mp
->m_sb
.sb_fdblocks
);
999 xfs_destroy_percpu_counters(
1000 struct xfs_mount
*mp
)
1002 percpu_counter_destroy(&mp
->m_icount
);
1003 percpu_counter_destroy(&mp
->m_ifree
);
1004 percpu_counter_destroy(&mp
->m_fdblocks
);
1005 ASSERT(XFS_FORCED_SHUTDOWN(mp
) ||
1006 percpu_counter_sum(&mp
->m_delalloc_blks
) == 0);
1007 percpu_counter_destroy(&mp
->m_delalloc_blks
);
1012 struct super_block
*sb
)
1014 struct xfs_mount
*mp
= XFS_M(sb
);
1016 /* if ->fill_super failed, we have no mount to tear down */
1020 xfs_notice(mp
, "Unmounting Filesystem");
1021 xfs_filestream_unmount(mp
);
1025 free_percpu(mp
->m_stats
.xs_stats
);
1026 xfs_destroy_percpu_counters(mp
);
1027 xfs_destroy_mount_workqueues(mp
);
1028 xfs_close_devices(mp
);
1030 sb
->s_fs_info
= NULL
;
1035 xfs_fs_nr_cached_objects(
1036 struct super_block
*sb
,
1037 struct shrink_control
*sc
)
1039 /* Paranoia: catch incorrect calls during mount setup or teardown */
1040 if (WARN_ON_ONCE(!sb
->s_fs_info
))
1042 return xfs_reclaim_inodes_count(XFS_M(sb
));
1046 xfs_fs_free_cached_objects(
1047 struct super_block
*sb
,
1048 struct shrink_control
*sc
)
1050 return xfs_reclaim_inodes_nr(XFS_M(sb
), sc
->nr_to_scan
);
1053 static const struct super_operations xfs_super_operations
= {
1054 .alloc_inode
= xfs_fs_alloc_inode
,
1055 .destroy_inode
= xfs_fs_destroy_inode
,
1056 .dirty_inode
= xfs_fs_dirty_inode
,
1057 .drop_inode
= xfs_fs_drop_inode
,
1058 .put_super
= xfs_fs_put_super
,
1059 .sync_fs
= xfs_fs_sync_fs
,
1060 .freeze_fs
= xfs_fs_freeze
,
1061 .unfreeze_fs
= xfs_fs_unfreeze
,
1062 .statfs
= xfs_fs_statfs
,
1063 .show_options
= xfs_fs_show_options
,
1064 .nr_cached_objects
= xfs_fs_nr_cached_objects
,
1065 .free_cached_objects
= xfs_fs_free_cached_objects
,
1074 int last
, shift_left_factor
= 0, _res
;
1078 value
= kstrdup(s
, GFP_KERNEL
);
1082 last
= strlen(value
) - 1;
1083 if (value
[last
] == 'K' || value
[last
] == 'k') {
1084 shift_left_factor
= 10;
1087 if (value
[last
] == 'M' || value
[last
] == 'm') {
1088 shift_left_factor
= 20;
1091 if (value
[last
] == 'G' || value
[last
] == 'g') {
1092 shift_left_factor
= 30;
1096 if (kstrtoint(value
, base
, &_res
))
1099 *res
= _res
<< shift_left_factor
;
1104 * Set mount state from a mount option.
1106 * NOTE: mp->m_super is NULL here!
1110 struct fs_context
*fc
,
1111 struct fs_parameter
*param
)
1113 struct xfs_mount
*mp
= fc
->s_fs_info
;
1114 struct fs_parse_result result
;
1118 opt
= fs_parse(fc
, xfs_fs_parameters
, param
, &result
);
1124 mp
->m_logbufs
= result
.uint_32
;
1127 if (suffix_kstrtoint(param
->string
, 10, &mp
->m_logbsize
))
1131 kfree(mp
->m_logname
);
1132 mp
->m_logname
= kstrdup(param
->string
, GFP_KERNEL
);
1137 kfree(mp
->m_rtname
);
1138 mp
->m_rtname
= kstrdup(param
->string
, GFP_KERNEL
);
1143 if (suffix_kstrtoint(param
->string
, 10, &size
))
1145 mp
->m_allocsize_log
= ffs(size
) - 1;
1146 mp
->m_flags
|= XFS_MOUNT_ALLOCSIZE
;
1150 mp
->m_flags
|= XFS_MOUNT_GRPID
;
1153 case Opt_sysvgroups
:
1154 mp
->m_flags
&= ~XFS_MOUNT_GRPID
;
1157 mp
->m_flags
|= XFS_MOUNT_WSYNC
;
1159 case Opt_norecovery
:
1160 mp
->m_flags
|= XFS_MOUNT_NORECOVERY
;
1163 mp
->m_flags
|= XFS_MOUNT_NOALIGN
;
1166 mp
->m_flags
|= XFS_MOUNT_SWALLOC
;
1169 mp
->m_dalign
= result
.uint_32
;
1172 mp
->m_swidth
= result
.uint_32
;
1175 mp
->m_flags
|= XFS_MOUNT_SMALL_INUMS
;
1178 mp
->m_flags
&= ~XFS_MOUNT_SMALL_INUMS
;
1181 mp
->m_flags
|= XFS_MOUNT_NOUUID
;
1184 mp
->m_flags
|= XFS_MOUNT_IKEEP
;
1187 mp
->m_flags
&= ~XFS_MOUNT_IKEEP
;
1190 mp
->m_flags
|= XFS_MOUNT_LARGEIO
;
1193 mp
->m_flags
&= ~XFS_MOUNT_LARGEIO
;
1196 mp
->m_flags
|= XFS_MOUNT_ATTR2
;
1199 mp
->m_flags
&= ~XFS_MOUNT_ATTR2
;
1200 mp
->m_flags
|= XFS_MOUNT_NOATTR2
;
1202 case Opt_filestreams
:
1203 mp
->m_flags
|= XFS_MOUNT_FILESTREAMS
;
1206 mp
->m_qflags
&= ~XFS_ALL_QUOTA_ACCT
;
1207 mp
->m_qflags
&= ~XFS_ALL_QUOTA_ENFD
;
1208 mp
->m_qflags
&= ~XFS_ALL_QUOTA_ACTIVE
;
1213 mp
->m_qflags
|= (XFS_UQUOTA_ACCT
| XFS_UQUOTA_ACTIVE
|
1216 case Opt_qnoenforce
:
1217 case Opt_uqnoenforce
:
1218 mp
->m_qflags
|= (XFS_UQUOTA_ACCT
| XFS_UQUOTA_ACTIVE
);
1219 mp
->m_qflags
&= ~XFS_UQUOTA_ENFD
;
1223 mp
->m_qflags
|= (XFS_PQUOTA_ACCT
| XFS_PQUOTA_ACTIVE
|
1226 case Opt_pqnoenforce
:
1227 mp
->m_qflags
|= (XFS_PQUOTA_ACCT
| XFS_PQUOTA_ACTIVE
);
1228 mp
->m_qflags
&= ~XFS_PQUOTA_ENFD
;
1232 mp
->m_qflags
|= (XFS_GQUOTA_ACCT
| XFS_GQUOTA_ACTIVE
|
1235 case Opt_gqnoenforce
:
1236 mp
->m_qflags
|= (XFS_GQUOTA_ACCT
| XFS_GQUOTA_ACTIVE
);
1237 mp
->m_qflags
&= ~XFS_GQUOTA_ENFD
;
1240 mp
->m_flags
|= XFS_MOUNT_DISCARD
;
1243 mp
->m_flags
&= ~XFS_MOUNT_DISCARD
;
1245 #ifdef CONFIG_FS_DAX
1247 mp
->m_flags
|= XFS_MOUNT_DAX
;
1251 xfs_warn(mp
, "unknown mount option [%s].", param
->key
);
1259 xfs_fc_validate_params(
1260 struct xfs_mount
*mp
)
1263 * no recovery flag requires a read-only mount
1265 if ((mp
->m_flags
& XFS_MOUNT_NORECOVERY
) &&
1266 !(mp
->m_flags
& XFS_MOUNT_RDONLY
)) {
1267 xfs_warn(mp
, "no-recovery mounts must be read-only.");
1271 if ((mp
->m_flags
& XFS_MOUNT_NOALIGN
) &&
1272 (mp
->m_dalign
|| mp
->m_swidth
)) {
1274 "sunit and swidth options incompatible with the noalign option");
1278 if (!IS_ENABLED(CONFIG_XFS_QUOTA
) && mp
->m_qflags
!= 0) {
1279 xfs_warn(mp
, "quota support not available in this kernel.");
1283 if ((mp
->m_dalign
&& !mp
->m_swidth
) ||
1284 (!mp
->m_dalign
&& mp
->m_swidth
)) {
1285 xfs_warn(mp
, "sunit and swidth must be specified together");
1289 if (mp
->m_dalign
&& (mp
->m_swidth
% mp
->m_dalign
!= 0)) {
1291 "stripe width (%d) must be a multiple of the stripe unit (%d)",
1292 mp
->m_swidth
, mp
->m_dalign
);
1296 if (mp
->m_logbufs
!= -1 &&
1297 mp
->m_logbufs
!= 0 &&
1298 (mp
->m_logbufs
< XLOG_MIN_ICLOGS
||
1299 mp
->m_logbufs
> XLOG_MAX_ICLOGS
)) {
1300 xfs_warn(mp
, "invalid logbufs value: %d [not %d-%d]",
1301 mp
->m_logbufs
, XLOG_MIN_ICLOGS
, XLOG_MAX_ICLOGS
);
1305 if (mp
->m_logbsize
!= -1 &&
1306 mp
->m_logbsize
!= 0 &&
1307 (mp
->m_logbsize
< XLOG_MIN_RECORD_BSIZE
||
1308 mp
->m_logbsize
> XLOG_MAX_RECORD_BSIZE
||
1309 !is_power_of_2(mp
->m_logbsize
))) {
1311 "invalid logbufsize: %d [not 16k,32k,64k,128k or 256k]",
1316 if ((mp
->m_flags
& XFS_MOUNT_ALLOCSIZE
) &&
1317 (mp
->m_allocsize_log
> XFS_MAX_IO_LOG
||
1318 mp
->m_allocsize_log
< XFS_MIN_IO_LOG
)) {
1319 xfs_warn(mp
, "invalid log iosize: %d [not %d-%d]",
1320 mp
->m_allocsize_log
, XFS_MIN_IO_LOG
, XFS_MAX_IO_LOG
);
1329 struct super_block
*sb
,
1330 struct fs_context
*fc
)
1332 struct xfs_mount
*mp
= sb
->s_fs_info
;
1334 int flags
= 0, error
;
1338 error
= xfs_fc_validate_params(mp
);
1340 goto out_free_names
;
1342 sb_min_blocksize(sb
, BBSIZE
);
1343 sb
->s_xattr
= xfs_xattr_handlers
;
1344 sb
->s_export_op
= &xfs_export_operations
;
1345 #ifdef CONFIG_XFS_QUOTA
1346 sb
->s_qcop
= &xfs_quotactl_operations
;
1347 sb
->s_quota_types
= QTYPE_MASK_USR
| QTYPE_MASK_GRP
| QTYPE_MASK_PRJ
;
1349 sb
->s_op
= &xfs_super_operations
;
1352 * Delay mount work if the debug hook is set. This is debug
1353 * instrumention to coordinate simulation of xfs mount failures with
1354 * VFS superblock operations
1356 if (xfs_globals
.mount_delay
) {
1357 xfs_notice(mp
, "Delaying mount for %d seconds.",
1358 xfs_globals
.mount_delay
);
1359 msleep(xfs_globals
.mount_delay
* 1000);
1362 if (fc
->sb_flags
& SB_SILENT
)
1363 flags
|= XFS_MFSI_QUIET
;
1365 error
= xfs_open_devices(mp
);
1367 goto out_free_names
;
1369 error
= xfs_init_mount_workqueues(mp
);
1371 goto out_close_devices
;
1373 error
= xfs_init_percpu_counters(mp
);
1375 goto out_destroy_workqueues
;
1377 /* Allocate stats memory before we do operations that might use it */
1378 mp
->m_stats
.xs_stats
= alloc_percpu(struct xfsstats
);
1379 if (!mp
->m_stats
.xs_stats
) {
1381 goto out_destroy_counters
;
1384 error
= xfs_readsb(mp
, flags
);
1386 goto out_free_stats
;
1388 error
= xfs_finish_flags(mp
);
1392 error
= xfs_setup_devices(mp
);
1397 * XFS block mappings use 54 bits to store the logical block offset.
1398 * This should suffice to handle the maximum file size that the VFS
1399 * supports (currently 2^63 bytes on 64-bit and ULONG_MAX << PAGE_SHIFT
1400 * bytes on 32-bit), but as XFS and VFS have gotten the s_maxbytes
1401 * calculation wrong on 32-bit kernels in the past, we'll add a WARN_ON
1402 * to check this assertion.
1404 * Avoid integer overflow by comparing the maximum bmbt offset to the
1405 * maximum pagecache offset in units of fs blocks.
1407 if (XFS_B_TO_FSBT(mp
, MAX_LFS_FILESIZE
) > XFS_MAX_FILEOFF
) {
1409 "MAX_LFS_FILESIZE block offset (%llu) exceeds extent map maximum (%llu)!",
1410 XFS_B_TO_FSBT(mp
, MAX_LFS_FILESIZE
),
1416 error
= xfs_filestream_mount(mp
);
1421 * we must configure the block size in the superblock before we run the
1422 * full mount process as the mount process can lookup and cache inodes.
1424 sb
->s_magic
= XFS_SUPER_MAGIC
;
1425 sb
->s_blocksize
= mp
->m_sb
.sb_blocksize
;
1426 sb
->s_blocksize_bits
= ffs(sb
->s_blocksize
) - 1;
1427 sb
->s_maxbytes
= MAX_LFS_FILESIZE
;
1428 sb
->s_max_links
= XFS_MAXLINK
;
1429 sb
->s_time_gran
= 1;
1430 sb
->s_time_min
= S32_MIN
;
1431 sb
->s_time_max
= S32_MAX
;
1432 sb
->s_iflags
|= SB_I_CGROUPWB
;
1434 set_posix_acl_flag(sb
);
1436 /* version 5 superblocks support inode version counters. */
1437 if (XFS_SB_VERSION_NUM(&mp
->m_sb
) == XFS_SB_VERSION_5
)
1438 sb
->s_flags
|= SB_I_VERSION
;
1440 if (mp
->m_flags
& XFS_MOUNT_DAX
) {
1441 bool rtdev_is_dax
= false, datadev_is_dax
;
1444 "DAX enabled. Warning: EXPERIMENTAL, use at your own risk");
1446 datadev_is_dax
= bdev_dax_supported(mp
->m_ddev_targp
->bt_bdev
,
1448 if (mp
->m_rtdev_targp
)
1449 rtdev_is_dax
= bdev_dax_supported(
1450 mp
->m_rtdev_targp
->bt_bdev
, sb
->s_blocksize
);
1451 if (!rtdev_is_dax
&& !datadev_is_dax
) {
1453 "DAX unsupported by block device. Turning off DAX.");
1454 mp
->m_flags
&= ~XFS_MOUNT_DAX
;
1456 if (xfs_sb_version_hasreflink(&mp
->m_sb
)) {
1458 "DAX and reflink cannot be used together!");
1460 goto out_filestream_unmount
;
1464 if (mp
->m_flags
& XFS_MOUNT_DISCARD
) {
1465 struct request_queue
*q
= bdev_get_queue(sb
->s_bdev
);
1467 if (!blk_queue_discard(q
)) {
1468 xfs_warn(mp
, "mounting with \"discard\" option, but "
1469 "the device does not support discard");
1470 mp
->m_flags
&= ~XFS_MOUNT_DISCARD
;
1474 if (xfs_sb_version_hasreflink(&mp
->m_sb
)) {
1475 if (mp
->m_sb
.sb_rblocks
) {
1477 "reflink not compatible with realtime device!");
1479 goto out_filestream_unmount
;
1482 if (xfs_globals
.always_cow
) {
1483 xfs_info(mp
, "using DEBUG-only always_cow mode.");
1484 mp
->m_always_cow
= true;
1488 if (xfs_sb_version_hasrmapbt(&mp
->m_sb
) && mp
->m_sb
.sb_rblocks
) {
1490 "reverse mapping btree not compatible with realtime device!");
1492 goto out_filestream_unmount
;
1495 error
= xfs_mountfs(mp
);
1497 goto out_filestream_unmount
;
1499 root
= igrab(VFS_I(mp
->m_rootip
));
1504 sb
->s_root
= d_make_root(root
);
1512 out_filestream_unmount
:
1513 xfs_filestream_unmount(mp
);
1517 free_percpu(mp
->m_stats
.xs_stats
);
1518 out_destroy_counters
:
1519 xfs_destroy_percpu_counters(mp
);
1520 out_destroy_workqueues
:
1521 xfs_destroy_mount_workqueues(mp
);
1523 xfs_close_devices(mp
);
1525 sb
->s_fs_info
= NULL
;
1530 xfs_filestream_unmount(mp
);
1537 struct fs_context
*fc
)
1539 return get_tree_bdev(fc
, xfs_fc_fill_super
);
1544 struct xfs_mount
*mp
)
1546 struct xfs_sb
*sbp
= &mp
->m_sb
;
1549 if (mp
->m_flags
& XFS_MOUNT_NORECOVERY
) {
1551 "ro->rw transition prohibited on norecovery mount");
1555 if (XFS_SB_VERSION_NUM(sbp
) == XFS_SB_VERSION_5
&&
1556 xfs_sb_has_ro_compat_feature(sbp
, XFS_SB_FEAT_RO_COMPAT_UNKNOWN
)) {
1558 "ro->rw transition prohibited on unknown (0x%x) ro-compat filesystem",
1559 (sbp
->sb_features_ro_compat
&
1560 XFS_SB_FEAT_RO_COMPAT_UNKNOWN
));
1564 mp
->m_flags
&= ~XFS_MOUNT_RDONLY
;
1567 * If this is the first remount to writeable state we might have some
1568 * superblock changes to update.
1570 if (mp
->m_update_sb
) {
1571 error
= xfs_sync_sb(mp
, false);
1573 xfs_warn(mp
, "failed to write sb changes");
1576 mp
->m_update_sb
= false;
1580 * Fill out the reserve pool if it is empty. Use the stashed value if
1581 * it is non-zero, otherwise go with the default.
1583 xfs_restore_resvblks(mp
);
1584 xfs_log_work_queue(mp
);
1586 /* Recover any CoW blocks that never got remapped. */
1587 error
= xfs_reflink_recover_cow(mp
);
1590 "Error %d recovering leftover CoW allocations.", error
);
1591 xfs_force_shutdown(mp
, SHUTDOWN_CORRUPT_INCORE
);
1594 xfs_start_block_reaping(mp
);
1596 /* Create the per-AG metadata reservation pool .*/
1597 error
= xfs_fs_reserve_ag_blocks(mp
);
1598 if (error
&& error
!= -ENOSPC
)
1606 struct xfs_mount
*mp
)
1611 * Cancel background eofb scanning so it cannot race with the final
1612 * log force+buftarg wait and deadlock the remount.
1614 xfs_stop_block_reaping(mp
);
1616 /* Get rid of any leftover CoW reservations... */
1617 error
= xfs_icache_free_cowblocks(mp
, NULL
);
1619 xfs_force_shutdown(mp
, SHUTDOWN_CORRUPT_INCORE
);
1623 /* Free the per-AG metadata reservation pool. */
1624 error
= xfs_fs_unreserve_ag_blocks(mp
);
1626 xfs_force_shutdown(mp
, SHUTDOWN_CORRUPT_INCORE
);
1631 * Before we sync the metadata, we need to free up the reserve block
1632 * pool so that the used block count in the superblock on disk is
1633 * correct at the end of the remount. Stash the current* reserve pool
1634 * size so that if we get remounted rw, we can return it to the same
1637 xfs_save_resvblks(mp
);
1639 xfs_quiesce_attr(mp
);
1640 mp
->m_flags
|= XFS_MOUNT_RDONLY
;
1646 * Logically we would return an error here to prevent users from believing
1647 * they might have changed mount options using remount which can't be changed.
1649 * But unfortunately mount(8) adds all options from mtab and fstab to the mount
1650 * arguments in some cases so we can't blindly reject options, but have to
1651 * check for each specified option if it actually differs from the currently
1652 * set option and only reject it if that's the case.
1654 * Until that is implemented we return success for every remount request, and
1655 * silently ignore all options that we can't actually change.
1659 struct fs_context
*fc
)
1661 struct xfs_mount
*mp
= XFS_M(fc
->root
->d_sb
);
1662 struct xfs_mount
*new_mp
= fc
->s_fs_info
;
1663 xfs_sb_t
*sbp
= &mp
->m_sb
;
1664 int flags
= fc
->sb_flags
;
1667 error
= xfs_fc_validate_params(new_mp
);
1671 sync_filesystem(mp
->m_super
);
1673 /* inode32 -> inode64 */
1674 if ((mp
->m_flags
& XFS_MOUNT_SMALL_INUMS
) &&
1675 !(new_mp
->m_flags
& XFS_MOUNT_SMALL_INUMS
)) {
1676 mp
->m_flags
&= ~XFS_MOUNT_SMALL_INUMS
;
1677 mp
->m_maxagi
= xfs_set_inode_alloc(mp
, sbp
->sb_agcount
);
1680 /* inode64 -> inode32 */
1681 if (!(mp
->m_flags
& XFS_MOUNT_SMALL_INUMS
) &&
1682 (new_mp
->m_flags
& XFS_MOUNT_SMALL_INUMS
)) {
1683 mp
->m_flags
|= XFS_MOUNT_SMALL_INUMS
;
1684 mp
->m_maxagi
= xfs_set_inode_alloc(mp
, sbp
->sb_agcount
);
1688 if ((mp
->m_flags
& XFS_MOUNT_RDONLY
) && !(flags
& SB_RDONLY
)) {
1689 error
= xfs_remount_rw(mp
);
1695 if (!(mp
->m_flags
& XFS_MOUNT_RDONLY
) && (flags
& SB_RDONLY
)) {
1696 error
= xfs_remount_ro(mp
);
1704 static void xfs_fc_free(
1705 struct fs_context
*fc
)
1707 struct xfs_mount
*mp
= fc
->s_fs_info
;
1710 * mp is stored in the fs_context when it is initialized.
1711 * mp is transferred to the superblock on a successful mount,
1712 * but if an error occurs before the transfer we have to free
1719 static const struct fs_context_operations xfs_context_ops
= {
1720 .parse_param
= xfs_fc_parse_param
,
1721 .get_tree
= xfs_fc_get_tree
,
1722 .reconfigure
= xfs_fc_reconfigure
,
1723 .free
= xfs_fc_free
,
1726 static int xfs_init_fs_context(
1727 struct fs_context
*fc
)
1729 struct xfs_mount
*mp
;
1731 mp
= kmem_alloc(sizeof(struct xfs_mount
), KM_ZERO
);
1735 spin_lock_init(&mp
->m_sb_lock
);
1736 spin_lock_init(&mp
->m_agirotor_lock
);
1737 INIT_RADIX_TREE(&mp
->m_perag_tree
, GFP_ATOMIC
);
1738 spin_lock_init(&mp
->m_perag_lock
);
1739 mutex_init(&mp
->m_growlock
);
1740 atomic_set(&mp
->m_active_trans
, 0);
1741 INIT_DELAYED_WORK(&mp
->m_reclaim_work
, xfs_reclaim_worker
);
1742 INIT_DELAYED_WORK(&mp
->m_eofblocks_work
, xfs_eofblocks_worker
);
1743 INIT_DELAYED_WORK(&mp
->m_cowblocks_work
, xfs_cowblocks_worker
);
1744 mp
->m_kobj
.kobject
.kset
= xfs_kset
;
1746 * We don't create the finobt per-ag space reservation until after log
1747 * recovery, so we must set this to true so that an ifree transaction
1748 * started during log recovery will not depend on space reservations
1749 * for finobt expansion.
1751 mp
->m_finobt_nores
= true;
1754 * These can be overridden by the mount option parsing.
1757 mp
->m_logbsize
= -1;
1758 mp
->m_allocsize_log
= 16; /* 64k */
1761 * Copy binary VFS mount flags we are interested in.
1763 if (fc
->sb_flags
& SB_RDONLY
)
1764 mp
->m_flags
|= XFS_MOUNT_RDONLY
;
1765 if (fc
->sb_flags
& SB_DIRSYNC
)
1766 mp
->m_flags
|= XFS_MOUNT_DIRSYNC
;
1767 if (fc
->sb_flags
& SB_SYNCHRONOUS
)
1768 mp
->m_flags
|= XFS_MOUNT_WSYNC
;
1771 fc
->ops
= &xfs_context_ops
;
1776 static struct file_system_type xfs_fs_type
= {
1777 .owner
= THIS_MODULE
,
1779 .init_fs_context
= xfs_init_fs_context
,
1780 .parameters
= xfs_fs_parameters
,
1781 .kill_sb
= kill_block_super
,
1782 .fs_flags
= FS_REQUIRES_DEV
,
1784 MODULE_ALIAS_FS("xfs");
1787 xfs_init_zones(void)
1789 xfs_log_ticket_zone
= kmem_cache_create("xfs_log_ticket",
1790 sizeof(struct xlog_ticket
),
1792 if (!xfs_log_ticket_zone
)
1795 xfs_bmap_free_item_zone
= kmem_cache_create("xfs_bmap_free_item",
1796 sizeof(struct xfs_extent_free_item
),
1798 if (!xfs_bmap_free_item_zone
)
1799 goto out_destroy_log_ticket_zone
;
1801 xfs_btree_cur_zone
= kmem_cache_create("xfs_btree_cur",
1802 sizeof(struct xfs_btree_cur
),
1804 if (!xfs_btree_cur_zone
)
1805 goto out_destroy_bmap_free_item_zone
;
1807 xfs_da_state_zone
= kmem_cache_create("xfs_da_state",
1808 sizeof(struct xfs_da_state
),
1810 if (!xfs_da_state_zone
)
1811 goto out_destroy_btree_cur_zone
;
1813 xfs_ifork_zone
= kmem_cache_create("xfs_ifork",
1814 sizeof(struct xfs_ifork
),
1816 if (!xfs_ifork_zone
)
1817 goto out_destroy_da_state_zone
;
1819 xfs_trans_zone
= kmem_cache_create("xf_trans",
1820 sizeof(struct xfs_trans
),
1822 if (!xfs_trans_zone
)
1823 goto out_destroy_ifork_zone
;
1827 * The size of the zone allocated buf log item is the maximum
1828 * size possible under XFS. This wastes a little bit of memory,
1829 * but it is much faster.
1831 xfs_buf_item_zone
= kmem_cache_create("xfs_buf_item",
1832 sizeof(struct xfs_buf_log_item
),
1834 if (!xfs_buf_item_zone
)
1835 goto out_destroy_trans_zone
;
1837 xfs_efd_zone
= kmem_cache_create("xfs_efd_item",
1838 (sizeof(struct xfs_efd_log_item
) +
1839 (XFS_EFD_MAX_FAST_EXTENTS
- 1) *
1840 sizeof(struct xfs_extent
)),
1843 goto out_destroy_buf_item_zone
;
1845 xfs_efi_zone
= kmem_cache_create("xfs_efi_item",
1846 (sizeof(struct xfs_efi_log_item
) +
1847 (XFS_EFI_MAX_FAST_EXTENTS
- 1) *
1848 sizeof(struct xfs_extent
)),
1851 goto out_destroy_efd_zone
;
1853 xfs_inode_zone
= kmem_cache_create("xfs_inode",
1854 sizeof(struct xfs_inode
), 0,
1855 (SLAB_HWCACHE_ALIGN
|
1856 SLAB_RECLAIM_ACCOUNT
|
1857 SLAB_MEM_SPREAD
| SLAB_ACCOUNT
),
1858 xfs_fs_inode_init_once
);
1859 if (!xfs_inode_zone
)
1860 goto out_destroy_efi_zone
;
1862 xfs_ili_zone
= kmem_cache_create("xfs_ili",
1863 sizeof(struct xfs_inode_log_item
), 0,
1864 SLAB_RECLAIM_ACCOUNT
| SLAB_MEM_SPREAD
,
1867 goto out_destroy_inode_zone
;
1869 xfs_icreate_zone
= kmem_cache_create("xfs_icr",
1870 sizeof(struct xfs_icreate_item
),
1872 if (!xfs_icreate_zone
)
1873 goto out_destroy_ili_zone
;
1875 xfs_rud_zone
= kmem_cache_create("xfs_rud_item",
1876 sizeof(struct xfs_rud_log_item
),
1879 goto out_destroy_icreate_zone
;
1881 xfs_rui_zone
= kmem_cache_create("xfs_rui_item",
1882 xfs_rui_log_item_sizeof(XFS_RUI_MAX_FAST_EXTENTS
),
1885 goto out_destroy_rud_zone
;
1887 xfs_cud_zone
= kmem_cache_create("xfs_cud_item",
1888 sizeof(struct xfs_cud_log_item
),
1891 goto out_destroy_rui_zone
;
1893 xfs_cui_zone
= kmem_cache_create("xfs_cui_item",
1894 xfs_cui_log_item_sizeof(XFS_CUI_MAX_FAST_EXTENTS
),
1897 goto out_destroy_cud_zone
;
1899 xfs_bud_zone
= kmem_cache_create("xfs_bud_item",
1900 sizeof(struct xfs_bud_log_item
),
1903 goto out_destroy_cui_zone
;
1905 xfs_bui_zone
= kmem_cache_create("xfs_bui_item",
1906 xfs_bui_log_item_sizeof(XFS_BUI_MAX_FAST_EXTENTS
),
1909 goto out_destroy_bud_zone
;
1913 out_destroy_bud_zone
:
1914 kmem_cache_destroy(xfs_bud_zone
);
1915 out_destroy_cui_zone
:
1916 kmem_cache_destroy(xfs_cui_zone
);
1917 out_destroy_cud_zone
:
1918 kmem_cache_destroy(xfs_cud_zone
);
1919 out_destroy_rui_zone
:
1920 kmem_cache_destroy(xfs_rui_zone
);
1921 out_destroy_rud_zone
:
1922 kmem_cache_destroy(xfs_rud_zone
);
1923 out_destroy_icreate_zone
:
1924 kmem_cache_destroy(xfs_icreate_zone
);
1925 out_destroy_ili_zone
:
1926 kmem_cache_destroy(xfs_ili_zone
);
1927 out_destroy_inode_zone
:
1928 kmem_cache_destroy(xfs_inode_zone
);
1929 out_destroy_efi_zone
:
1930 kmem_cache_destroy(xfs_efi_zone
);
1931 out_destroy_efd_zone
:
1932 kmem_cache_destroy(xfs_efd_zone
);
1933 out_destroy_buf_item_zone
:
1934 kmem_cache_destroy(xfs_buf_item_zone
);
1935 out_destroy_trans_zone
:
1936 kmem_cache_destroy(xfs_trans_zone
);
1937 out_destroy_ifork_zone
:
1938 kmem_cache_destroy(xfs_ifork_zone
);
1939 out_destroy_da_state_zone
:
1940 kmem_cache_destroy(xfs_da_state_zone
);
1941 out_destroy_btree_cur_zone
:
1942 kmem_cache_destroy(xfs_btree_cur_zone
);
1943 out_destroy_bmap_free_item_zone
:
1944 kmem_cache_destroy(xfs_bmap_free_item_zone
);
1945 out_destroy_log_ticket_zone
:
1946 kmem_cache_destroy(xfs_log_ticket_zone
);
1952 xfs_destroy_zones(void)
1955 * Make sure all delayed rcu free are flushed before we
1959 kmem_cache_destroy(xfs_bui_zone
);
1960 kmem_cache_destroy(xfs_bud_zone
);
1961 kmem_cache_destroy(xfs_cui_zone
);
1962 kmem_cache_destroy(xfs_cud_zone
);
1963 kmem_cache_destroy(xfs_rui_zone
);
1964 kmem_cache_destroy(xfs_rud_zone
);
1965 kmem_cache_destroy(xfs_icreate_zone
);
1966 kmem_cache_destroy(xfs_ili_zone
);
1967 kmem_cache_destroy(xfs_inode_zone
);
1968 kmem_cache_destroy(xfs_efi_zone
);
1969 kmem_cache_destroy(xfs_efd_zone
);
1970 kmem_cache_destroy(xfs_buf_item_zone
);
1971 kmem_cache_destroy(xfs_trans_zone
);
1972 kmem_cache_destroy(xfs_ifork_zone
);
1973 kmem_cache_destroy(xfs_da_state_zone
);
1974 kmem_cache_destroy(xfs_btree_cur_zone
);
1975 kmem_cache_destroy(xfs_bmap_free_item_zone
);
1976 kmem_cache_destroy(xfs_log_ticket_zone
);
1980 xfs_init_workqueues(void)
1983 * The allocation workqueue can be used in memory reclaim situations
1984 * (writepage path), and parallelism is only limited by the number of
1985 * AGs in all the filesystems mounted. Hence use the default large
1986 * max_active value for this workqueue.
1988 xfs_alloc_wq
= alloc_workqueue("xfsalloc",
1989 WQ_MEM_RECLAIM
|WQ_FREEZABLE
, 0);
1993 xfs_discard_wq
= alloc_workqueue("xfsdiscard", WQ_UNBOUND
, 0);
1994 if (!xfs_discard_wq
)
1995 goto out_free_alloc_wq
;
1999 destroy_workqueue(xfs_alloc_wq
);
2004 xfs_destroy_workqueues(void)
2006 destroy_workqueue(xfs_discard_wq
);
2007 destroy_workqueue(xfs_alloc_wq
);
2015 xfs_check_ondisk_structs();
2017 printk(KERN_INFO XFS_VERSION_STRING
" with "
2018 XFS_BUILD_OPTIONS
" enabled\n");
2022 error
= xfs_init_zones();
2026 error
= xfs_init_workqueues();
2028 goto out_destroy_zones
;
2030 error
= xfs_mru_cache_init();
2032 goto out_destroy_wq
;
2034 error
= xfs_buf_init();
2036 goto out_mru_cache_uninit
;
2038 error
= xfs_init_procfs();
2040 goto out_buf_terminate
;
2042 error
= xfs_sysctl_register();
2044 goto out_cleanup_procfs
;
2046 xfs_kset
= kset_create_and_add("xfs", NULL
, fs_kobj
);
2049 goto out_sysctl_unregister
;
2052 xfsstats
.xs_kobj
.kobject
.kset
= xfs_kset
;
2054 xfsstats
.xs_stats
= alloc_percpu(struct xfsstats
);
2055 if (!xfsstats
.xs_stats
) {
2057 goto out_kset_unregister
;
2060 error
= xfs_sysfs_init(&xfsstats
.xs_kobj
, &xfs_stats_ktype
, NULL
,
2063 goto out_free_stats
;
2066 xfs_dbg_kobj
.kobject
.kset
= xfs_kset
;
2067 error
= xfs_sysfs_init(&xfs_dbg_kobj
, &xfs_dbg_ktype
, NULL
, "debug");
2069 goto out_remove_stats_kobj
;
2072 error
= xfs_qm_init();
2074 goto out_remove_dbg_kobj
;
2076 error
= register_filesystem(&xfs_fs_type
);
2083 out_remove_dbg_kobj
:
2085 xfs_sysfs_del(&xfs_dbg_kobj
);
2086 out_remove_stats_kobj
:
2088 xfs_sysfs_del(&xfsstats
.xs_kobj
);
2090 free_percpu(xfsstats
.xs_stats
);
2091 out_kset_unregister
:
2092 kset_unregister(xfs_kset
);
2093 out_sysctl_unregister
:
2094 xfs_sysctl_unregister();
2096 xfs_cleanup_procfs();
2098 xfs_buf_terminate();
2099 out_mru_cache_uninit
:
2100 xfs_mru_cache_uninit();
2102 xfs_destroy_workqueues();
2104 xfs_destroy_zones();
2113 unregister_filesystem(&xfs_fs_type
);
2115 xfs_sysfs_del(&xfs_dbg_kobj
);
2117 xfs_sysfs_del(&xfsstats
.xs_kobj
);
2118 free_percpu(xfsstats
.xs_stats
);
2119 kset_unregister(xfs_kset
);
2120 xfs_sysctl_unregister();
2121 xfs_cleanup_procfs();
2122 xfs_buf_terminate();
2123 xfs_mru_cache_uninit();
2124 xfs_destroy_workqueues();
2125 xfs_destroy_zones();
2126 xfs_uuid_table_free();
2129 module_init(init_xfs_fs
);
2130 module_exit(exit_xfs_fs
);
2132 MODULE_AUTHOR("Silicon Graphics, Inc.");
2133 MODULE_DESCRIPTION(XFS_VERSION_STRING
" with " XFS_BUILD_OPTIONS
" enabled");
2134 MODULE_LICENSE("GPL");