2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 #include "xfs_types.h"
24 #include "xfs_trans.h"
28 #include "xfs_dmapi.h"
29 #include "xfs_mount.h"
30 #include "xfs_bmap_btree.h"
31 #include "xfs_alloc_btree.h"
32 #include "xfs_ialloc_btree.h"
33 #include "xfs_dir2_sf.h"
34 #include "xfs_attr_sf.h"
35 #include "xfs_dinode.h"
36 #include "xfs_inode.h"
37 #include "xfs_btree.h"
38 #include "xfs_ialloc.h"
39 #include "xfs_alloc.h"
40 #include "xfs_rtalloc.h"
42 #include "xfs_error.h"
44 #include "xfs_quota.h"
45 #include "xfs_fsops.h"
47 STATIC
void xfs_mount_log_sbunit(xfs_mount_t
*, __int64_t
);
48 STATIC
int xfs_uuid_mount(xfs_mount_t
*);
49 STATIC
void xfs_uuid_unmount(xfs_mount_t
*mp
);
50 STATIC
void xfs_unmountfs_wait(xfs_mount_t
*);
54 STATIC
void xfs_icsb_destroy_counters(xfs_mount_t
*);
55 STATIC
void xfs_icsb_balance_counter(xfs_mount_t
*, xfs_sb_field_t
,
57 STATIC
void xfs_icsb_sync_counters(xfs_mount_t
*);
58 STATIC
int xfs_icsb_modify_counters(xfs_mount_t
*, xfs_sb_field_t
,
60 STATIC
int xfs_icsb_disable_counter(xfs_mount_t
*, xfs_sb_field_t
);
64 #define xfs_icsb_destroy_counters(mp) do { } while (0)
65 #define xfs_icsb_balance_counter(mp, a, b, c) do { } while (0)
66 #define xfs_icsb_sync_counters(mp) do { } while (0)
67 #define xfs_icsb_modify_counters(mp, a, b, c) do { } while (0)
73 short type
; /* 0 = integer
74 * 1 = binary / string (no translation)
77 { offsetof(xfs_sb_t
, sb_magicnum
), 0 },
78 { offsetof(xfs_sb_t
, sb_blocksize
), 0 },
79 { offsetof(xfs_sb_t
, sb_dblocks
), 0 },
80 { offsetof(xfs_sb_t
, sb_rblocks
), 0 },
81 { offsetof(xfs_sb_t
, sb_rextents
), 0 },
82 { offsetof(xfs_sb_t
, sb_uuid
), 1 },
83 { offsetof(xfs_sb_t
, sb_logstart
), 0 },
84 { offsetof(xfs_sb_t
, sb_rootino
), 0 },
85 { offsetof(xfs_sb_t
, sb_rbmino
), 0 },
86 { offsetof(xfs_sb_t
, sb_rsumino
), 0 },
87 { offsetof(xfs_sb_t
, sb_rextsize
), 0 },
88 { offsetof(xfs_sb_t
, sb_agblocks
), 0 },
89 { offsetof(xfs_sb_t
, sb_agcount
), 0 },
90 { offsetof(xfs_sb_t
, sb_rbmblocks
), 0 },
91 { offsetof(xfs_sb_t
, sb_logblocks
), 0 },
92 { offsetof(xfs_sb_t
, sb_versionnum
), 0 },
93 { offsetof(xfs_sb_t
, sb_sectsize
), 0 },
94 { offsetof(xfs_sb_t
, sb_inodesize
), 0 },
95 { offsetof(xfs_sb_t
, sb_inopblock
), 0 },
96 { offsetof(xfs_sb_t
, sb_fname
[0]), 1 },
97 { offsetof(xfs_sb_t
, sb_blocklog
), 0 },
98 { offsetof(xfs_sb_t
, sb_sectlog
), 0 },
99 { offsetof(xfs_sb_t
, sb_inodelog
), 0 },
100 { offsetof(xfs_sb_t
, sb_inopblog
), 0 },
101 { offsetof(xfs_sb_t
, sb_agblklog
), 0 },
102 { offsetof(xfs_sb_t
, sb_rextslog
), 0 },
103 { offsetof(xfs_sb_t
, sb_inprogress
), 0 },
104 { offsetof(xfs_sb_t
, sb_imax_pct
), 0 },
105 { offsetof(xfs_sb_t
, sb_icount
), 0 },
106 { offsetof(xfs_sb_t
, sb_ifree
), 0 },
107 { offsetof(xfs_sb_t
, sb_fdblocks
), 0 },
108 { offsetof(xfs_sb_t
, sb_frextents
), 0 },
109 { offsetof(xfs_sb_t
, sb_uquotino
), 0 },
110 { offsetof(xfs_sb_t
, sb_gquotino
), 0 },
111 { offsetof(xfs_sb_t
, sb_qflags
), 0 },
112 { offsetof(xfs_sb_t
, sb_flags
), 0 },
113 { offsetof(xfs_sb_t
, sb_shared_vn
), 0 },
114 { offsetof(xfs_sb_t
, sb_inoalignmt
), 0 },
115 { offsetof(xfs_sb_t
, sb_unit
), 0 },
116 { offsetof(xfs_sb_t
, sb_width
), 0 },
117 { offsetof(xfs_sb_t
, sb_dirblklog
), 0 },
118 { offsetof(xfs_sb_t
, sb_logsectlog
), 0 },
119 { offsetof(xfs_sb_t
, sb_logsectsize
),0 },
120 { offsetof(xfs_sb_t
, sb_logsunit
), 0 },
121 { offsetof(xfs_sb_t
, sb_features2
), 0 },
122 { sizeof(xfs_sb_t
), 0 }
126 * Return a pointer to an initialized xfs_mount structure.
133 mp
= kmem_zalloc(sizeof(xfs_mount_t
), KM_SLEEP
);
135 if (xfs_icsb_init_counters(mp
)) {
136 mp
->m_flags
|= XFS_MOUNT_NO_PERCPU_SB
;
139 spin_lock_init(&mp
->m_ail_lock
);
140 spin_lock_init(&mp
->m_sb_lock
);
141 mutex_init(&mp
->m_ilock
);
142 mutex_init(&mp
->m_growlock
);
144 * Initialize the AIL.
146 xfs_trans_ail_init(mp
);
148 atomic_set(&mp
->m_active_trans
, 0);
154 * Free up the resources associated with a mount structure. Assume that
155 * the structure was initially zeroed, so we can tell which fields got
165 for (agno
= 0; agno
< mp
->m_maxagi
; agno
++)
166 if (mp
->m_perag
[agno
].pagb_list
)
167 kmem_free(mp
->m_perag
[agno
].pagb_list
,
168 sizeof(xfs_perag_busy_t
) *
170 kmem_free(mp
->m_perag
,
171 sizeof(xfs_perag_t
) * mp
->m_sb
.sb_agcount
);
174 spinlock_destroy(&mp
->m_ail_lock
);
175 spinlock_destroy(&mp
->m_sb_lock
);
176 mutex_destroy(&mp
->m_ilock
);
177 mutex_destroy(&mp
->m_growlock
);
181 if (mp
->m_fsname
!= NULL
)
182 kmem_free(mp
->m_fsname
, mp
->m_fsname_len
);
183 if (mp
->m_rtname
!= NULL
)
184 kmem_free(mp
->m_rtname
, strlen(mp
->m_rtname
) + 1);
185 if (mp
->m_logname
!= NULL
)
186 kmem_free(mp
->m_logname
, strlen(mp
->m_logname
) + 1);
188 xfs_icsb_destroy_counters(mp
);
192 * Check size of device based on the (data/realtime) block count.
193 * Note: this check is used by the growfs code as well as mount.
196 xfs_sb_validate_fsb_count(
200 ASSERT(PAGE_SHIFT
>= sbp
->sb_blocklog
);
201 ASSERT(sbp
->sb_blocklog
>= BBSHIFT
);
203 #if XFS_BIG_BLKNOS /* Limited by ULONG_MAX of page cache index */
204 if (nblocks
>> (PAGE_CACHE_SHIFT
- sbp
->sb_blocklog
) > ULONG_MAX
)
206 #else /* Limited by UINT_MAX of sectors */
207 if (nblocks
<< (sbp
->sb_blocklog
- BBSHIFT
) > UINT_MAX
)
214 * Check the validity of the SB found.
217 xfs_mount_validate_sb(
223 * If the log device and data device have the
224 * same device number, the log is internal.
225 * Consequently, the sb_logstart should be non-zero. If
226 * we have a zero sb_logstart in this case, we may be trying to mount
227 * a volume filesystem in a non-volume manner.
229 if (sbp
->sb_magicnum
!= XFS_SB_MAGIC
) {
230 xfs_fs_mount_cmn_err(flags
, "bad magic number");
231 return XFS_ERROR(EWRONGFS
);
234 if (!XFS_SB_GOOD_VERSION(sbp
)) {
235 xfs_fs_mount_cmn_err(flags
, "bad version");
236 return XFS_ERROR(EWRONGFS
);
240 sbp
->sb_logstart
== 0 && mp
->m_logdev_targp
== mp
->m_ddev_targp
)) {
241 xfs_fs_mount_cmn_err(flags
,
242 "filesystem is marked as having an external log; "
243 "specify logdev on the\nmount command line.");
244 return XFS_ERROR(EINVAL
);
248 sbp
->sb_logstart
!= 0 && mp
->m_logdev_targp
!= mp
->m_ddev_targp
)) {
249 xfs_fs_mount_cmn_err(flags
,
250 "filesystem is marked as having an internal log; "
251 "do not specify logdev on\nthe mount command line.");
252 return XFS_ERROR(EINVAL
);
256 * More sanity checking. These were stolen directly from
260 sbp
->sb_agcount
<= 0 ||
261 sbp
->sb_sectsize
< XFS_MIN_SECTORSIZE
||
262 sbp
->sb_sectsize
> XFS_MAX_SECTORSIZE
||
263 sbp
->sb_sectlog
< XFS_MIN_SECTORSIZE_LOG
||
264 sbp
->sb_sectlog
> XFS_MAX_SECTORSIZE_LOG
||
265 sbp
->sb_blocksize
< XFS_MIN_BLOCKSIZE
||
266 sbp
->sb_blocksize
> XFS_MAX_BLOCKSIZE
||
267 sbp
->sb_blocklog
< XFS_MIN_BLOCKSIZE_LOG
||
268 sbp
->sb_blocklog
> XFS_MAX_BLOCKSIZE_LOG
||
269 sbp
->sb_inodesize
< XFS_DINODE_MIN_SIZE
||
270 sbp
->sb_inodesize
> XFS_DINODE_MAX_SIZE
||
271 sbp
->sb_inodelog
< XFS_DINODE_MIN_LOG
||
272 sbp
->sb_inodelog
> XFS_DINODE_MAX_LOG
||
273 (sbp
->sb_blocklog
- sbp
->sb_inodelog
!= sbp
->sb_inopblog
) ||
274 (sbp
->sb_rextsize
* sbp
->sb_blocksize
> XFS_MAX_RTEXTSIZE
) ||
275 (sbp
->sb_rextsize
* sbp
->sb_blocksize
< XFS_MIN_RTEXTSIZE
) ||
276 (sbp
->sb_imax_pct
> 100 /* zero sb_imax_pct is valid */))) {
277 xfs_fs_mount_cmn_err(flags
, "SB sanity check 1 failed");
278 return XFS_ERROR(EFSCORRUPTED
);
282 * Sanity check AG count, size fields against data size field
285 sbp
->sb_dblocks
== 0 ||
287 (xfs_drfsbno_t
)sbp
->sb_agcount
* sbp
->sb_agblocks
||
288 sbp
->sb_dblocks
< (xfs_drfsbno_t
)(sbp
->sb_agcount
- 1) *
289 sbp
->sb_agblocks
+ XFS_MIN_AG_BLOCKS
)) {
290 xfs_fs_mount_cmn_err(flags
, "SB sanity check 2 failed");
291 return XFS_ERROR(EFSCORRUPTED
);
294 if (xfs_sb_validate_fsb_count(sbp
, sbp
->sb_dblocks
) ||
295 xfs_sb_validate_fsb_count(sbp
, sbp
->sb_rblocks
)) {
296 xfs_fs_mount_cmn_err(flags
,
297 "file system too large to be mounted on this system.");
298 return XFS_ERROR(E2BIG
);
301 if (unlikely(sbp
->sb_inprogress
)) {
302 xfs_fs_mount_cmn_err(flags
, "file system busy");
303 return XFS_ERROR(EFSCORRUPTED
);
307 * Version 1 directory format has never worked on Linux.
309 if (unlikely(!XFS_SB_VERSION_HASDIRV2(sbp
))) {
310 xfs_fs_mount_cmn_err(flags
,
311 "file system using version 1 directory format");
312 return XFS_ERROR(ENOSYS
);
316 * Until this is fixed only page-sized or smaller data blocks work.
318 if (unlikely(sbp
->sb_blocksize
> PAGE_SIZE
)) {
319 xfs_fs_mount_cmn_err(flags
,
320 "file system with blocksize %d bytes",
322 xfs_fs_mount_cmn_err(flags
,
323 "only pagesize (%ld) or less will currently work.",
325 return XFS_ERROR(ENOSYS
);
332 xfs_initialize_perag_icache(
335 if (!pag
->pag_ici_init
) {
336 rwlock_init(&pag
->pag_ici_lock
);
337 INIT_RADIX_TREE(&pag
->pag_ici_root
, GFP_ATOMIC
);
338 pag
->pag_ici_init
= 1;
343 xfs_initialize_perag(
345 xfs_agnumber_t agcount
)
347 xfs_agnumber_t index
, max_metadata
;
351 xfs_sb_t
*sbp
= &mp
->m_sb
;
352 xfs_ino_t max_inum
= XFS_MAXINUMBER_32
;
354 /* Check to see if the filesystem can overflow 32 bit inodes */
355 agino
= XFS_OFFBNO_TO_AGINO(mp
, sbp
->sb_agblocks
- 1, 0);
356 ino
= XFS_AGINO_TO_INO(mp
, agcount
- 1, agino
);
358 /* Clear the mount flag if no inode can overflow 32 bits
359 * on this filesystem, or if specifically requested..
361 if ((mp
->m_flags
& XFS_MOUNT_SMALL_INUMS
) && ino
> max_inum
) {
362 mp
->m_flags
|= XFS_MOUNT_32BITINODES
;
364 mp
->m_flags
&= ~XFS_MOUNT_32BITINODES
;
367 /* If we can overflow then setup the ag headers accordingly */
368 if (mp
->m_flags
& XFS_MOUNT_32BITINODES
) {
369 /* Calculate how much should be reserved for inodes to
370 * meet the max inode percentage.
372 if (mp
->m_maxicount
) {
375 icount
= sbp
->sb_dblocks
* sbp
->sb_imax_pct
;
377 icount
+= sbp
->sb_agblocks
- 1;
378 do_div(icount
, sbp
->sb_agblocks
);
379 max_metadata
= icount
;
381 max_metadata
= agcount
;
383 for (index
= 0; index
< agcount
; index
++) {
384 ino
= XFS_AGINO_TO_INO(mp
, index
, agino
);
385 if (ino
> max_inum
) {
390 /* This ag is preferred for inodes */
391 pag
= &mp
->m_perag
[index
];
392 pag
->pagi_inodeok
= 1;
393 if (index
< max_metadata
)
394 pag
->pagf_metadata
= 1;
395 xfs_initialize_perag_icache(pag
);
398 /* Setup default behavior for smaller filesystems */
399 for (index
= 0; index
< agcount
; index
++) {
400 pag
= &mp
->m_perag
[index
];
401 pag
->pagi_inodeok
= 1;
402 xfs_initialize_perag_icache(pag
);
413 to
->sb_magicnum
= be32_to_cpu(from
->sb_magicnum
);
414 to
->sb_blocksize
= be32_to_cpu(from
->sb_blocksize
);
415 to
->sb_dblocks
= be64_to_cpu(from
->sb_dblocks
);
416 to
->sb_rblocks
= be64_to_cpu(from
->sb_rblocks
);
417 to
->sb_rextents
= be64_to_cpu(from
->sb_rextents
);
418 memcpy(&to
->sb_uuid
, &from
->sb_uuid
, sizeof(to
->sb_uuid
));
419 to
->sb_logstart
= be64_to_cpu(from
->sb_logstart
);
420 to
->sb_rootino
= be64_to_cpu(from
->sb_rootino
);
421 to
->sb_rbmino
= be64_to_cpu(from
->sb_rbmino
);
422 to
->sb_rsumino
= be64_to_cpu(from
->sb_rsumino
);
423 to
->sb_rextsize
= be32_to_cpu(from
->sb_rextsize
);
424 to
->sb_agblocks
= be32_to_cpu(from
->sb_agblocks
);
425 to
->sb_agcount
= be32_to_cpu(from
->sb_agcount
);
426 to
->sb_rbmblocks
= be32_to_cpu(from
->sb_rbmblocks
);
427 to
->sb_logblocks
= be32_to_cpu(from
->sb_logblocks
);
428 to
->sb_versionnum
= be16_to_cpu(from
->sb_versionnum
);
429 to
->sb_sectsize
= be16_to_cpu(from
->sb_sectsize
);
430 to
->sb_inodesize
= be16_to_cpu(from
->sb_inodesize
);
431 to
->sb_inopblock
= be16_to_cpu(from
->sb_inopblock
);
432 memcpy(&to
->sb_fname
, &from
->sb_fname
, sizeof(to
->sb_fname
));
433 to
->sb_blocklog
= from
->sb_blocklog
;
434 to
->sb_sectlog
= from
->sb_sectlog
;
435 to
->sb_inodelog
= from
->sb_inodelog
;
436 to
->sb_inopblog
= from
->sb_inopblog
;
437 to
->sb_agblklog
= from
->sb_agblklog
;
438 to
->sb_rextslog
= from
->sb_rextslog
;
439 to
->sb_inprogress
= from
->sb_inprogress
;
440 to
->sb_imax_pct
= from
->sb_imax_pct
;
441 to
->sb_icount
= be64_to_cpu(from
->sb_icount
);
442 to
->sb_ifree
= be64_to_cpu(from
->sb_ifree
);
443 to
->sb_fdblocks
= be64_to_cpu(from
->sb_fdblocks
);
444 to
->sb_frextents
= be64_to_cpu(from
->sb_frextents
);
445 to
->sb_uquotino
= be64_to_cpu(from
->sb_uquotino
);
446 to
->sb_gquotino
= be64_to_cpu(from
->sb_gquotino
);
447 to
->sb_qflags
= be16_to_cpu(from
->sb_qflags
);
448 to
->sb_flags
= from
->sb_flags
;
449 to
->sb_shared_vn
= from
->sb_shared_vn
;
450 to
->sb_inoalignmt
= be32_to_cpu(from
->sb_inoalignmt
);
451 to
->sb_unit
= be32_to_cpu(from
->sb_unit
);
452 to
->sb_width
= be32_to_cpu(from
->sb_width
);
453 to
->sb_dirblklog
= from
->sb_dirblklog
;
454 to
->sb_logsectlog
= from
->sb_logsectlog
;
455 to
->sb_logsectsize
= be16_to_cpu(from
->sb_logsectsize
);
456 to
->sb_logsunit
= be32_to_cpu(from
->sb_logsunit
);
457 to
->sb_features2
= be32_to_cpu(from
->sb_features2
);
461 * Copy in core superblock to ondisk one.
463 * The fields argument is mask of superblock fields to copy.
471 xfs_caddr_t to_ptr
= (xfs_caddr_t
)to
;
472 xfs_caddr_t from_ptr
= (xfs_caddr_t
)from
;
482 f
= (xfs_sb_field_t
)xfs_lowbit64((__uint64_t
)fields
);
483 first
= xfs_sb_info
[f
].offset
;
484 size
= xfs_sb_info
[f
+ 1].offset
- first
;
486 ASSERT(xfs_sb_info
[f
].type
== 0 || xfs_sb_info
[f
].type
== 1);
488 if (size
== 1 || xfs_sb_info
[f
].type
== 1) {
489 memcpy(to_ptr
+ first
, from_ptr
+ first
, size
);
493 *(__be16
*)(to_ptr
+ first
) =
494 cpu_to_be16(*(__u16
*)(from_ptr
+ first
));
497 *(__be32
*)(to_ptr
+ first
) =
498 cpu_to_be32(*(__u32
*)(from_ptr
+ first
));
501 *(__be64
*)(to_ptr
+ first
) =
502 cpu_to_be64(*(__u64
*)(from_ptr
+ first
));
509 fields
&= ~(1LL << f
);
516 * Does the initial read of the superblock.
519 xfs_readsb(xfs_mount_t
*mp
, int flags
)
521 unsigned int sector_size
;
522 unsigned int extra_flags
;
526 ASSERT(mp
->m_sb_bp
== NULL
);
527 ASSERT(mp
->m_ddev_targp
!= NULL
);
530 * Allocate a (locked) buffer to hold the superblock.
531 * This will be kept around at all times to optimize
532 * access to the superblock.
534 sector_size
= xfs_getsize_buftarg(mp
->m_ddev_targp
);
535 extra_flags
= XFS_BUF_LOCK
| XFS_BUF_MANAGE
| XFS_BUF_MAPPED
;
537 bp
= xfs_buf_read_flags(mp
->m_ddev_targp
, XFS_SB_DADDR
,
538 BTOBB(sector_size
), extra_flags
);
539 if (!bp
|| XFS_BUF_ISERROR(bp
)) {
540 xfs_fs_mount_cmn_err(flags
, "SB read failed");
541 error
= bp
? XFS_BUF_GETERROR(bp
) : ENOMEM
;
544 ASSERT(XFS_BUF_ISBUSY(bp
));
545 ASSERT(XFS_BUF_VALUSEMA(bp
) <= 0);
548 * Initialize the mount structure from the superblock.
549 * But first do some basic consistency checking.
551 xfs_sb_from_disk(&mp
->m_sb
, XFS_BUF_TO_SBP(bp
));
553 error
= xfs_mount_validate_sb(mp
, &(mp
->m_sb
), flags
);
555 xfs_fs_mount_cmn_err(flags
, "SB validate failed");
560 * We must be able to do sector-sized and sector-aligned IO.
562 if (sector_size
> mp
->m_sb
.sb_sectsize
) {
563 xfs_fs_mount_cmn_err(flags
,
564 "device supports only %u byte sectors (not %u)",
565 sector_size
, mp
->m_sb
.sb_sectsize
);
571 * If device sector size is smaller than the superblock size,
572 * re-read the superblock so the buffer is correctly sized.
574 if (sector_size
< mp
->m_sb
.sb_sectsize
) {
575 XFS_BUF_UNMANAGE(bp
);
577 sector_size
= mp
->m_sb
.sb_sectsize
;
578 bp
= xfs_buf_read_flags(mp
->m_ddev_targp
, XFS_SB_DADDR
,
579 BTOBB(sector_size
), extra_flags
);
580 if (!bp
|| XFS_BUF_ISERROR(bp
)) {
581 xfs_fs_mount_cmn_err(flags
, "SB re-read failed");
582 error
= bp
? XFS_BUF_GETERROR(bp
) : ENOMEM
;
585 ASSERT(XFS_BUF_ISBUSY(bp
));
586 ASSERT(XFS_BUF_VALUSEMA(bp
) <= 0);
589 /* Initialize per-cpu counters */
590 xfs_icsb_reinit_counters(mp
);
594 ASSERT(XFS_BUF_VALUSEMA(bp
) > 0);
599 XFS_BUF_UNMANAGE(bp
);
609 * Mount initialization code establishing various mount
610 * fields from the superblock associated with the given
614 xfs_mount_common(xfs_mount_t
*mp
, xfs_sb_t
*sbp
)
618 mp
->m_agfrotor
= mp
->m_agirotor
= 0;
619 spin_lock_init(&mp
->m_agirotor_lock
);
620 mp
->m_maxagi
= mp
->m_sb
.sb_agcount
;
621 mp
->m_blkbit_log
= sbp
->sb_blocklog
+ XFS_NBBYLOG
;
622 mp
->m_blkbb_log
= sbp
->sb_blocklog
- BBSHIFT
;
623 mp
->m_sectbb_log
= sbp
->sb_sectlog
- BBSHIFT
;
624 mp
->m_agno_log
= xfs_highbit32(sbp
->sb_agcount
- 1) + 1;
625 mp
->m_agino_log
= sbp
->sb_inopblog
+ sbp
->sb_agblklog
;
626 mp
->m_litino
= sbp
->sb_inodesize
-
627 ((uint
)sizeof(xfs_dinode_core_t
) + (uint
)sizeof(xfs_agino_t
));
628 mp
->m_blockmask
= sbp
->sb_blocksize
- 1;
629 mp
->m_blockwsize
= sbp
->sb_blocksize
>> XFS_WORDLOG
;
630 mp
->m_blockwmask
= mp
->m_blockwsize
- 1;
631 INIT_LIST_HEAD(&mp
->m_del_inodes
);
634 * Setup for attributes, in case they get created.
635 * This value is for inodes getting attributes for the first time,
636 * the per-inode value is for old attribute values.
638 ASSERT(sbp
->sb_inodesize
>= 256 && sbp
->sb_inodesize
<= 2048);
639 switch (sbp
->sb_inodesize
) {
641 mp
->m_attroffset
= XFS_LITINO(mp
) -
642 XFS_BMDR_SPACE_CALC(MINABTPTRS
);
647 mp
->m_attroffset
= XFS_BMDR_SPACE_CALC(6 * MINABTPTRS
);
652 ASSERT(mp
->m_attroffset
< XFS_LITINO(mp
));
654 for (i
= 0; i
< 2; i
++) {
655 mp
->m_alloc_mxr
[i
] = XFS_BTREE_BLOCK_MAXRECS(sbp
->sb_blocksize
,
657 mp
->m_alloc_mnr
[i
] = XFS_BTREE_BLOCK_MINRECS(sbp
->sb_blocksize
,
660 for (i
= 0; i
< 2; i
++) {
661 mp
->m_bmap_dmxr
[i
] = XFS_BTREE_BLOCK_MAXRECS(sbp
->sb_blocksize
,
663 mp
->m_bmap_dmnr
[i
] = XFS_BTREE_BLOCK_MINRECS(sbp
->sb_blocksize
,
666 for (i
= 0; i
< 2; i
++) {
667 mp
->m_inobt_mxr
[i
] = XFS_BTREE_BLOCK_MAXRECS(sbp
->sb_blocksize
,
669 mp
->m_inobt_mnr
[i
] = XFS_BTREE_BLOCK_MINRECS(sbp
->sb_blocksize
,
673 mp
->m_bsize
= XFS_FSB_TO_BB(mp
, 1);
674 mp
->m_ialloc_inos
= (int)MAX((__uint16_t
)XFS_INODES_PER_CHUNK
,
676 mp
->m_ialloc_blks
= mp
->m_ialloc_inos
>> sbp
->sb_inopblog
;
680 * xfs_initialize_perag_data
682 * Read in each per-ag structure so we can count up the number of
683 * allocated inodes, free inodes and used filesystem blocks as this
684 * information is no longer persistent in the superblock. Once we have
685 * this information, write it into the in-core superblock structure.
688 xfs_initialize_perag_data(xfs_mount_t
*mp
, xfs_agnumber_t agcount
)
690 xfs_agnumber_t index
;
692 xfs_sb_t
*sbp
= &mp
->m_sb
;
696 uint64_t bfreelst
= 0;
700 for (index
= 0; index
< agcount
; index
++) {
702 * read the agf, then the agi. This gets us
703 * all the inforamtion we need and populates the
704 * per-ag structures for us.
706 error
= xfs_alloc_pagf_init(mp
, NULL
, index
, 0);
710 error
= xfs_ialloc_pagi_init(mp
, NULL
, index
);
713 pag
= &mp
->m_perag
[index
];
714 ifree
+= pag
->pagi_freecount
;
715 ialloc
+= pag
->pagi_count
;
716 bfree
+= pag
->pagf_freeblks
;
717 bfreelst
+= pag
->pagf_flcount
;
718 btree
+= pag
->pagf_btreeblks
;
721 * Overwrite incore superblock counters with just-read data
723 spin_lock(&mp
->m_sb_lock
);
724 sbp
->sb_ifree
= ifree
;
725 sbp
->sb_icount
= ialloc
;
726 sbp
->sb_fdblocks
= bfree
+ bfreelst
+ btree
;
727 spin_unlock(&mp
->m_sb_lock
);
729 /* Fixup the per-cpu counters as well. */
730 xfs_icsb_reinit_counters(mp
);
736 * Update alignment values based on mount options and sb values
739 xfs_update_alignment(xfs_mount_t
*mp
, int mfsi_flags
, __uint64_t
*update_flags
)
741 xfs_sb_t
*sbp
= &(mp
->m_sb
);
743 if (mp
->m_dalign
&& !(mfsi_flags
& XFS_MFSI_SECOND
)) {
745 * If stripe unit and stripe width are not multiples
746 * of the fs blocksize turn off alignment.
748 if ((BBTOB(mp
->m_dalign
) & mp
->m_blockmask
) ||
749 (BBTOB(mp
->m_swidth
) & mp
->m_blockmask
)) {
750 if (mp
->m_flags
& XFS_MOUNT_RETERR
) {
752 "XFS: alignment check 1 failed");
753 return XFS_ERROR(EINVAL
);
755 mp
->m_dalign
= mp
->m_swidth
= 0;
758 * Convert the stripe unit and width to FSBs.
760 mp
->m_dalign
= XFS_BB_TO_FSBT(mp
, mp
->m_dalign
);
761 if (mp
->m_dalign
&& (sbp
->sb_agblocks
% mp
->m_dalign
)) {
762 if (mp
->m_flags
& XFS_MOUNT_RETERR
) {
763 return XFS_ERROR(EINVAL
);
765 xfs_fs_cmn_err(CE_WARN
, mp
,
766 "stripe alignment turned off: sunit(%d)/swidth(%d) incompatible with agsize(%d)",
767 mp
->m_dalign
, mp
->m_swidth
,
772 } else if (mp
->m_dalign
) {
773 mp
->m_swidth
= XFS_BB_TO_FSBT(mp
, mp
->m_swidth
);
775 if (mp
->m_flags
& XFS_MOUNT_RETERR
) {
776 xfs_fs_cmn_err(CE_WARN
, mp
,
777 "stripe alignment turned off: sunit(%d) less than bsize(%d)",
780 return XFS_ERROR(EINVAL
);
787 * Update superblock with new values
790 if (XFS_SB_VERSION_HASDALIGN(sbp
)) {
791 if (sbp
->sb_unit
!= mp
->m_dalign
) {
792 sbp
->sb_unit
= mp
->m_dalign
;
793 *update_flags
|= XFS_SB_UNIT
;
795 if (sbp
->sb_width
!= mp
->m_swidth
) {
796 sbp
->sb_width
= mp
->m_swidth
;
797 *update_flags
|= XFS_SB_WIDTH
;
800 } else if ((mp
->m_flags
& XFS_MOUNT_NOALIGN
) != XFS_MOUNT_NOALIGN
&&
801 XFS_SB_VERSION_HASDALIGN(&mp
->m_sb
)) {
802 mp
->m_dalign
= sbp
->sb_unit
;
803 mp
->m_swidth
= sbp
->sb_width
;
810 * Set the maximum inode count for this filesystem
813 xfs_set_maxicount(xfs_mount_t
*mp
)
815 xfs_sb_t
*sbp
= &(mp
->m_sb
);
818 if (sbp
->sb_imax_pct
) {
820 * Make sure the maximum inode count is a multiple
821 * of the units we allocate inodes in.
823 icount
= sbp
->sb_dblocks
* sbp
->sb_imax_pct
;
825 do_div(icount
, mp
->m_ialloc_blks
);
826 mp
->m_maxicount
= (icount
* mp
->m_ialloc_blks
) <<
834 * Set the default minimum read and write sizes unless
835 * already specified in a mount option.
836 * We use smaller I/O sizes when the file system
837 * is being used for NFS service (wsync mount option).
840 xfs_set_rw_sizes(xfs_mount_t
*mp
)
842 xfs_sb_t
*sbp
= &(mp
->m_sb
);
843 int readio_log
, writeio_log
;
845 if (!(mp
->m_flags
& XFS_MOUNT_DFLT_IOSIZE
)) {
846 if (mp
->m_flags
& XFS_MOUNT_WSYNC
) {
847 readio_log
= XFS_WSYNC_READIO_LOG
;
848 writeio_log
= XFS_WSYNC_WRITEIO_LOG
;
850 readio_log
= XFS_READIO_LOG_LARGE
;
851 writeio_log
= XFS_WRITEIO_LOG_LARGE
;
854 readio_log
= mp
->m_readio_log
;
855 writeio_log
= mp
->m_writeio_log
;
858 if (sbp
->sb_blocklog
> readio_log
) {
859 mp
->m_readio_log
= sbp
->sb_blocklog
;
861 mp
->m_readio_log
= readio_log
;
863 mp
->m_readio_blocks
= 1 << (mp
->m_readio_log
- sbp
->sb_blocklog
);
864 if (sbp
->sb_blocklog
> writeio_log
) {
865 mp
->m_writeio_log
= sbp
->sb_blocklog
;
867 mp
->m_writeio_log
= writeio_log
;
869 mp
->m_writeio_blocks
= 1 << (mp
->m_writeio_log
- sbp
->sb_blocklog
);
873 * Set whether we're using inode alignment.
876 xfs_set_inoalignment(xfs_mount_t
*mp
)
878 if (XFS_SB_VERSION_HASALIGN(&mp
->m_sb
) &&
879 mp
->m_sb
.sb_inoalignmt
>=
880 XFS_B_TO_FSBT(mp
, mp
->m_inode_cluster_size
))
881 mp
->m_inoalign_mask
= mp
->m_sb
.sb_inoalignmt
- 1;
883 mp
->m_inoalign_mask
= 0;
885 * If we are using stripe alignment, check whether
886 * the stripe unit is a multiple of the inode alignment
888 if (mp
->m_dalign
&& mp
->m_inoalign_mask
&&
889 !(mp
->m_dalign
& mp
->m_inoalign_mask
))
890 mp
->m_sinoalign
= mp
->m_dalign
;
896 * Check that the data (and log if separate) are an ok size.
899 xfs_check_sizes(xfs_mount_t
*mp
, int mfsi_flags
)
905 d
= (xfs_daddr_t
)XFS_FSB_TO_BB(mp
, mp
->m_sb
.sb_dblocks
);
906 if (XFS_BB_TO_FSB(mp
, d
) != mp
->m_sb
.sb_dblocks
) {
907 cmn_err(CE_WARN
, "XFS: size check 1 failed");
908 return XFS_ERROR(E2BIG
);
910 error
= xfs_read_buf(mp
, mp
->m_ddev_targp
,
911 d
- XFS_FSS_TO_BB(mp
, 1),
912 XFS_FSS_TO_BB(mp
, 1), 0, &bp
);
916 cmn_err(CE_WARN
, "XFS: size check 2 failed");
918 error
= XFS_ERROR(E2BIG
);
922 if (((mfsi_flags
& XFS_MFSI_CLIENT
) == 0) &&
923 mp
->m_logdev_targp
!= mp
->m_ddev_targp
) {
924 d
= (xfs_daddr_t
)XFS_FSB_TO_BB(mp
, mp
->m_sb
.sb_logblocks
);
925 if (XFS_BB_TO_FSB(mp
, d
) != mp
->m_sb
.sb_logblocks
) {
926 cmn_err(CE_WARN
, "XFS: size check 3 failed");
927 return XFS_ERROR(E2BIG
);
929 error
= xfs_read_buf(mp
, mp
->m_logdev_targp
,
930 d
- XFS_FSB_TO_BB(mp
, 1),
931 XFS_FSB_TO_BB(mp
, 1), 0, &bp
);
935 cmn_err(CE_WARN
, "XFS: size check 3 failed");
937 error
= XFS_ERROR(E2BIG
);
947 * This function does the following on an initial mount of a file system:
948 * - reads the superblock from disk and init the mount struct
949 * - if we're a 32-bit kernel, do a size check on the superblock
950 * so we don't mount terabyte filesystems
951 * - init mount struct realtime fields
952 * - allocate inode hash table for fs
953 * - init directory manager
954 * - perform recovery and init the log manager
961 xfs_sb_t
*sbp
= &(mp
->m_sb
);
963 bhv_vnode_t
*rvp
= NULL
;
965 __int64_t update_flags
= 0LL;
966 uint quotamount
, quotaflags
;
968 int uuid_mounted
= 0;
971 if (mp
->m_sb_bp
== NULL
) {
972 error
= xfs_readsb(mp
, mfsi_flags
);
976 xfs_mount_common(mp
, sbp
);
979 * Check if sb_agblocks is aligned at stripe boundary
980 * If sb_agblocks is NOT aligned turn off m_dalign since
981 * allocator alignment is within an ag, therefore ag has
982 * to be aligned at stripe boundary.
984 error
= xfs_update_alignment(mp
, mfsi_flags
, &update_flags
);
988 xfs_alloc_compute_maxlevels(mp
);
989 xfs_bmap_compute_maxlevels(mp
, XFS_DATA_FORK
);
990 xfs_bmap_compute_maxlevels(mp
, XFS_ATTR_FORK
);
991 xfs_ialloc_compute_maxlevels(mp
);
993 xfs_set_maxicount(mp
);
995 mp
->m_maxioffset
= xfs_max_file_offset(sbp
->sb_blocklog
);
998 * XFS uses the uuid from the superblock as the unique
999 * identifier for fsid. We can not use the uuid from the volume
1000 * since a single partition filesystem is identical to a single
1001 * partition volume/filesystem.
1003 if ((mfsi_flags
& XFS_MFSI_SECOND
) == 0 &&
1004 (mp
->m_flags
& XFS_MOUNT_NOUUID
) == 0) {
1005 if (xfs_uuid_mount(mp
)) {
1006 error
= XFS_ERROR(EINVAL
);
1013 * Set the minimum read and write sizes
1015 xfs_set_rw_sizes(mp
);
1018 * Set the inode cluster size.
1019 * This may still be overridden by the file system
1020 * block size if it is larger than the chosen cluster size.
1022 mp
->m_inode_cluster_size
= XFS_INODE_BIG_CLUSTER_SIZE
;
1025 * Set inode alignment fields
1027 xfs_set_inoalignment(mp
);
1030 * Check that the data (and log if separate) are an ok size.
1032 error
= xfs_check_sizes(mp
, mfsi_flags
);
1037 * Initialize realtime fields in the mount structure
1039 error
= xfs_rtmount_init(mp
);
1041 cmn_err(CE_WARN
, "XFS: RT mount failed");
1046 * For client case we are done now
1048 if (mfsi_flags
& XFS_MFSI_CLIENT
) {
1053 * Copies the low order bits of the timestamp and the randomly
1054 * set "sequence" number out of a UUID.
1056 uuid_getnodeuniq(&sbp
->sb_uuid
, mp
->m_fixedfsid
);
1058 mp
->m_dmevmask
= 0; /* not persistent; set after each mount */
1063 * Initialize the attribute manager's entries.
1065 mp
->m_attr_magicpct
= (mp
->m_sb
.sb_blocksize
* 37) / 100;
1068 * Initialize the precomputed transaction reservations values.
1073 * Allocate and initialize the per-ag data.
1075 init_rwsem(&mp
->m_peraglock
);
1077 kmem_zalloc(sbp
->sb_agcount
* sizeof(xfs_perag_t
), KM_SLEEP
);
1079 mp
->m_maxagi
= xfs_initialize_perag(mp
, sbp
->sb_agcount
);
1082 * log's mount-time initialization. Perform 1st part recovery if needed
1084 if (likely(sbp
->sb_logblocks
> 0)) { /* check for volume case */
1085 error
= xfs_log_mount(mp
, mp
->m_logdev_targp
,
1086 XFS_FSB_TO_DADDR(mp
, sbp
->sb_logstart
),
1087 XFS_FSB_TO_BB(mp
, sbp
->sb_logblocks
));
1089 cmn_err(CE_WARN
, "XFS: log mount failed");
1092 } else { /* No log has been defined */
1093 cmn_err(CE_WARN
, "XFS: no log defined");
1094 XFS_ERROR_REPORT("xfs_mountfs_int(1)", XFS_ERRLEVEL_LOW
, mp
);
1095 error
= XFS_ERROR(EFSCORRUPTED
);
1100 * Now the log is mounted, we know if it was an unclean shutdown or
1101 * not. If it was, with the first phase of recovery has completed, we
1102 * have consistent AG blocks on disk. We have not recovered EFIs yet,
1103 * but they are recovered transactionally in the second recovery phase
1106 * Hence we can safely re-initialise incore superblock counters from
1107 * the per-ag data. These may not be correct if the filesystem was not
1108 * cleanly unmounted, so we need to wait for recovery to finish before
1111 * If the filesystem was cleanly unmounted, then we can trust the
1112 * values in the superblock to be correct and we don't need to do
1115 * If we are currently making the filesystem, the initialisation will
1116 * fail as the perag data is in an undefined state.
1119 if (xfs_sb_version_haslazysbcount(&mp
->m_sb
) &&
1120 !XFS_LAST_UNMOUNT_WAS_CLEAN(mp
) &&
1121 !mp
->m_sb
.sb_inprogress
) {
1122 error
= xfs_initialize_perag_data(mp
, sbp
->sb_agcount
);
1128 * Get and sanity-check the root inode.
1129 * Save the pointer to it in the mount structure.
1131 error
= xfs_iget(mp
, NULL
, sbp
->sb_rootino
, 0, XFS_ILOCK_EXCL
, &rip
, 0);
1133 cmn_err(CE_WARN
, "XFS: failed to read root inode");
1137 ASSERT(rip
!= NULL
);
1138 rvp
= XFS_ITOV(rip
);
1140 if (unlikely((rip
->i_d
.di_mode
& S_IFMT
) != S_IFDIR
)) {
1141 cmn_err(CE_WARN
, "XFS: corrupted root inode");
1142 cmn_err(CE_WARN
, "Device %s - root %llu is not a directory",
1143 XFS_BUFTARG_NAME(mp
->m_ddev_targp
),
1144 (unsigned long long)rip
->i_ino
);
1145 xfs_iunlock(rip
, XFS_ILOCK_EXCL
);
1146 XFS_ERROR_REPORT("xfs_mountfs_int(2)", XFS_ERRLEVEL_LOW
,
1148 error
= XFS_ERROR(EFSCORRUPTED
);
1151 mp
->m_rootip
= rip
; /* save it */
1153 xfs_iunlock(rip
, XFS_ILOCK_EXCL
);
1156 * Initialize realtime inode pointers in the mount structure
1158 error
= xfs_rtmount_inodes(mp
);
1161 * Free up the root inode.
1163 cmn_err(CE_WARN
, "XFS: failed to read RT inodes");
1168 * If fs is not mounted readonly, then update the superblock
1169 * unit and width changes.
1171 if (update_flags
&& !(mp
->m_flags
& XFS_MOUNT_RDONLY
))
1172 xfs_mount_log_sbunit(mp
, update_flags
);
1175 * Initialise the XFS quota management subsystem for this mount
1177 error
= XFS_QM_INIT(mp
, "amount
, "aflags
);
1182 * Finish recovering the file system. This part needed to be
1183 * delayed until after the root and real-time bitmap inodes
1184 * were consistently read in.
1186 error
= xfs_log_mount_finish(mp
, mfsi_flags
);
1188 cmn_err(CE_WARN
, "XFS: log mount finish failed");
1193 * Complete the quota initialisation, post-log-replay component.
1195 error
= XFS_QM_MOUNT(mp
, quotamount
, quotaflags
, mfsi_flags
);
1200 * Now we are mounted, reserve a small amount of unused space for
1201 * privileged transactions. This is needed so that transaction
1202 * space required for critical operations can dip into this pool
1203 * when at ENOSPC. This is needed for operations like create with
1204 * attr, unwritten extent conversion at ENOSPC, etc. Data allocations
1205 * are not allowed to use this reserved space.
1207 * We default to 5% or 1024 fsbs of space reserved, whichever is smaller.
1208 * This may drive us straight to ENOSPC on mount, but that implies
1209 * we were already there on the last unmount.
1211 resblks
= mp
->m_sb
.sb_dblocks
;
1212 do_div(resblks
, 20);
1213 resblks
= min_t(__uint64_t
, resblks
, 1024);
1214 xfs_reserve_blocks(mp
, &resblks
, NULL
);
1220 * Free up the root inode.
1224 xfs_log_unmount_dealloc(mp
);
1226 for (agno
= 0; agno
< sbp
->sb_agcount
; agno
++)
1227 if (mp
->m_perag
[agno
].pagb_list
)
1228 kmem_free(mp
->m_perag
[agno
].pagb_list
,
1229 sizeof(xfs_perag_busy_t
) * XFS_PAGB_NUM_SLOTS
);
1230 kmem_free(mp
->m_perag
, sbp
->sb_agcount
* sizeof(xfs_perag_t
));
1235 xfs_uuid_unmount(mp
);
1243 * This flushes out the inodes,dquots and the superblock, unmounts the
1244 * log and makes sure that incore structures are freed.
1247 xfs_unmountfs(xfs_mount_t
*mp
, struct cred
*cr
)
1252 * We can potentially deadlock here if we have an inode cluster
1253 * that has been freed has it's buffer still pinned in memory because
1254 * the transaction is still sitting in a iclog. The stale inodes
1255 * on that buffer will have their flush locks held until the
1256 * transaction hits the disk and the callbacks run. the inode
1257 * flush takes the flush lock unconditionally and with nothing to
1258 * push out the iclog we will never get that unlocked. hence we
1259 * need to force the log first.
1261 xfs_log_force(mp
, (xfs_lsn_t
)0, XFS_LOG_FORCE
| XFS_LOG_SYNC
);
1264 XFS_QM_DQPURGEALL(mp
, XFS_QMOPT_QUOTALL
| XFS_QMOPT_UMOUNTING
);
1267 * Flush out the log synchronously so that we know for sure
1268 * that nothing is pinned. This is important because bflush()
1269 * will skip pinned buffers.
1271 xfs_log_force(mp
, (xfs_lsn_t
)0, XFS_LOG_FORCE
| XFS_LOG_SYNC
);
1273 xfs_binval(mp
->m_ddev_targp
);
1274 if (mp
->m_rtdev_targp
) {
1275 xfs_binval(mp
->m_rtdev_targp
);
1279 * Unreserve any blocks we have so that when we unmount we don't account
1280 * the reserved free space as used. This is really only necessary for
1281 * lazy superblock counting because it trusts the incore superblock
1282 * counters to be aboslutely correct on clean unmount.
1284 * We don't bother correcting this elsewhere for lazy superblock
1285 * counting because on mount of an unclean filesystem we reconstruct the
1286 * correct counter value and this is irrelevant.
1288 * For non-lazy counter filesystems, this doesn't matter at all because
1289 * we only every apply deltas to the superblock and hence the incore
1290 * value does not matter....
1293 xfs_reserve_blocks(mp
, &resblks
, NULL
);
1295 xfs_log_sbcount(mp
, 1);
1296 xfs_unmountfs_writesb(mp
);
1297 xfs_unmountfs_wait(mp
); /* wait for async bufs */
1298 xfs_log_unmount(mp
); /* Done! No more fs ops. */
1303 * All inodes from this mount point should be freed.
1305 ASSERT(mp
->m_inodes
== NULL
);
1307 xfs_unmountfs_close(mp
, cr
);
1308 if ((mp
->m_flags
& XFS_MOUNT_NOUUID
) == 0)
1309 xfs_uuid_unmount(mp
);
1311 #if defined(DEBUG) || defined(INDUCE_IO_ERROR)
1312 xfs_errortag_clearall(mp
, 0);
1319 xfs_unmountfs_close(xfs_mount_t
*mp
, struct cred
*cr
)
1321 if (mp
->m_logdev_targp
&& mp
->m_logdev_targp
!= mp
->m_ddev_targp
)
1322 xfs_free_buftarg(mp
->m_logdev_targp
, 1);
1323 if (mp
->m_rtdev_targp
)
1324 xfs_free_buftarg(mp
->m_rtdev_targp
, 1);
1325 xfs_free_buftarg(mp
->m_ddev_targp
, 0);
1329 xfs_unmountfs_wait(xfs_mount_t
*mp
)
1331 if (mp
->m_logdev_targp
!= mp
->m_ddev_targp
)
1332 xfs_wait_buftarg(mp
->m_logdev_targp
);
1333 if (mp
->m_rtdev_targp
)
1334 xfs_wait_buftarg(mp
->m_rtdev_targp
);
1335 xfs_wait_buftarg(mp
->m_ddev_targp
);
1339 xfs_fs_writable(xfs_mount_t
*mp
)
1341 return !(xfs_test_for_freeze(mp
) || XFS_FORCED_SHUTDOWN(mp
) ||
1342 (mp
->m_flags
& XFS_MOUNT_RDONLY
));
1348 * Called either periodically to keep the on disk superblock values
1349 * roughly up to date or from unmount to make sure the values are
1350 * correct on a clean unmount.
1352 * Note this code can be called during the process of freezing, so
1353 * we may need to use the transaction allocator which does not not
1354 * block when the transaction subsystem is in its frozen state.
1364 if (!xfs_fs_writable(mp
))
1367 xfs_icsb_sync_counters(mp
);
1370 * we don't need to do this if we are updating the superblock
1371 * counters on every modification.
1373 if (!xfs_sb_version_haslazysbcount(&mp
->m_sb
))
1376 tp
= _xfs_trans_alloc(mp
, XFS_TRANS_SB_COUNT
);
1377 error
= xfs_trans_reserve(tp
, 0, mp
->m_sb
.sb_sectsize
+ 128, 0, 0,
1378 XFS_DEFAULT_LOG_COUNT
);
1380 xfs_trans_cancel(tp
, 0);
1384 xfs_mod_sb(tp
, XFS_SB_IFREE
| XFS_SB_ICOUNT
| XFS_SB_FDBLOCKS
);
1386 xfs_trans_set_sync(tp
);
1387 xfs_trans_commit(tp
, 0);
1397 xfs_dsb_t
*sb
= XFS_BUF_TO_SBP(bp
);
1400 if (!(sb
->sb_flags
& XFS_SBF_READONLY
))
1401 sb
->sb_flags
|= XFS_SBF_READONLY
;
1403 version
= be16_to_cpu(sb
->sb_versionnum
);
1404 if ((version
& XFS_SB_VERSION_NUMBITS
) != XFS_SB_VERSION_4
||
1405 !(version
& XFS_SB_VERSION_SHAREDBIT
))
1406 version
|= XFS_SB_VERSION_SHAREDBIT
;
1407 sb
->sb_versionnum
= cpu_to_be16(version
);
1411 xfs_unmountfs_writesb(xfs_mount_t
*mp
)
1417 * skip superblock write if fs is read-only, or
1418 * if we are doing a forced umount.
1420 if (!((mp
->m_flags
& XFS_MOUNT_RDONLY
) ||
1421 XFS_FORCED_SHUTDOWN(mp
))) {
1423 sbp
= xfs_getsb(mp
, 0);
1426 * mark shared-readonly if desired
1428 if (mp
->m_mk_sharedro
)
1429 xfs_mark_shared_ro(mp
, sbp
);
1431 XFS_BUF_UNDONE(sbp
);
1432 XFS_BUF_UNREAD(sbp
);
1433 XFS_BUF_UNDELAYWRITE(sbp
);
1435 XFS_BUF_UNASYNC(sbp
);
1436 ASSERT(XFS_BUF_TARGET(sbp
) == mp
->m_ddev_targp
);
1437 xfsbdstrat(mp
, sbp
);
1438 /* Nevermind errors we might get here. */
1439 error
= xfs_iowait(sbp
);
1441 xfs_ioerror_alert("xfs_unmountfs_writesb",
1442 mp
, sbp
, XFS_BUF_ADDR(sbp
));
1443 if (error
&& mp
->m_mk_sharedro
)
1444 xfs_fs_cmn_err(CE_ALERT
, mp
, "Superblock write error detected while unmounting. Filesystem may not be marked shared readonly");
1451 * xfs_mod_sb() can be used to copy arbitrary changes to the
1452 * in-core superblock into the superblock buffer to be logged.
1453 * It does not provide the higher level of locking that is
1454 * needed to protect the in-core superblock from concurrent
1458 xfs_mod_sb(xfs_trans_t
*tp
, __int64_t fields
)
1470 bp
= xfs_trans_getsb(tp
, mp
, 0);
1471 first
= sizeof(xfs_sb_t
);
1474 /* translate/copy */
1476 xfs_sb_to_disk(XFS_BUF_TO_SBP(bp
), &mp
->m_sb
, fields
);
1478 /* find modified range */
1480 f
= (xfs_sb_field_t
)xfs_lowbit64((__uint64_t
)fields
);
1481 ASSERT((1LL << f
) & XFS_SB_MOD_BITS
);
1482 first
= xfs_sb_info
[f
].offset
;
1484 f
= (xfs_sb_field_t
)xfs_highbit64((__uint64_t
)fields
);
1485 ASSERT((1LL << f
) & XFS_SB_MOD_BITS
);
1486 last
= xfs_sb_info
[f
+ 1].offset
- 1;
1488 xfs_trans_log_buf(tp
, bp
, first
, last
);
1493 * xfs_mod_incore_sb_unlocked() is a utility routine common used to apply
1494 * a delta to a specified field in the in-core superblock. Simply
1495 * switch on the field indicated and apply the delta to that field.
1496 * Fields are not allowed to dip below zero, so if the delta would
1497 * do this do not apply it and return EINVAL.
1499 * The m_sb_lock must be held when this routine is called.
1502 xfs_mod_incore_sb_unlocked(
1504 xfs_sb_field_t field
,
1508 int scounter
; /* short counter for 32 bit fields */
1509 long long lcounter
; /* long counter for 64 bit fields */
1510 long long res_used
, rem
;
1513 * With the in-core superblock spin lock held, switch
1514 * on the indicated field. Apply the delta to the
1515 * proper field. If the fields value would dip below
1516 * 0, then do not apply the delta and return EINVAL.
1519 case XFS_SBS_ICOUNT
:
1520 lcounter
= (long long)mp
->m_sb
.sb_icount
;
1524 return XFS_ERROR(EINVAL
);
1526 mp
->m_sb
.sb_icount
= lcounter
;
1529 lcounter
= (long long)mp
->m_sb
.sb_ifree
;
1533 return XFS_ERROR(EINVAL
);
1535 mp
->m_sb
.sb_ifree
= lcounter
;
1537 case XFS_SBS_FDBLOCKS
:
1538 lcounter
= (long long)
1539 mp
->m_sb
.sb_fdblocks
- XFS_ALLOC_SET_ASIDE(mp
);
1540 res_used
= (long long)(mp
->m_resblks
- mp
->m_resblks_avail
);
1542 if (delta
> 0) { /* Putting blocks back */
1543 if (res_used
> delta
) {
1544 mp
->m_resblks_avail
+= delta
;
1546 rem
= delta
- res_used
;
1547 mp
->m_resblks_avail
= mp
->m_resblks
;
1550 } else { /* Taking blocks away */
1555 * If were out of blocks, use any available reserved blocks if
1561 lcounter
= (long long)mp
->m_resblks_avail
+ delta
;
1563 return XFS_ERROR(ENOSPC
);
1565 mp
->m_resblks_avail
= lcounter
;
1567 } else { /* not reserved */
1568 return XFS_ERROR(ENOSPC
);
1573 mp
->m_sb
.sb_fdblocks
= lcounter
+ XFS_ALLOC_SET_ASIDE(mp
);
1575 case XFS_SBS_FREXTENTS
:
1576 lcounter
= (long long)mp
->m_sb
.sb_frextents
;
1579 return XFS_ERROR(ENOSPC
);
1581 mp
->m_sb
.sb_frextents
= lcounter
;
1583 case XFS_SBS_DBLOCKS
:
1584 lcounter
= (long long)mp
->m_sb
.sb_dblocks
;
1588 return XFS_ERROR(EINVAL
);
1590 mp
->m_sb
.sb_dblocks
= lcounter
;
1592 case XFS_SBS_AGCOUNT
:
1593 scounter
= mp
->m_sb
.sb_agcount
;
1597 return XFS_ERROR(EINVAL
);
1599 mp
->m_sb
.sb_agcount
= scounter
;
1601 case XFS_SBS_IMAX_PCT
:
1602 scounter
= mp
->m_sb
.sb_imax_pct
;
1606 return XFS_ERROR(EINVAL
);
1608 mp
->m_sb
.sb_imax_pct
= scounter
;
1610 case XFS_SBS_REXTSIZE
:
1611 scounter
= mp
->m_sb
.sb_rextsize
;
1615 return XFS_ERROR(EINVAL
);
1617 mp
->m_sb
.sb_rextsize
= scounter
;
1619 case XFS_SBS_RBMBLOCKS
:
1620 scounter
= mp
->m_sb
.sb_rbmblocks
;
1624 return XFS_ERROR(EINVAL
);
1626 mp
->m_sb
.sb_rbmblocks
= scounter
;
1628 case XFS_SBS_RBLOCKS
:
1629 lcounter
= (long long)mp
->m_sb
.sb_rblocks
;
1633 return XFS_ERROR(EINVAL
);
1635 mp
->m_sb
.sb_rblocks
= lcounter
;
1637 case XFS_SBS_REXTENTS
:
1638 lcounter
= (long long)mp
->m_sb
.sb_rextents
;
1642 return XFS_ERROR(EINVAL
);
1644 mp
->m_sb
.sb_rextents
= lcounter
;
1646 case XFS_SBS_REXTSLOG
:
1647 scounter
= mp
->m_sb
.sb_rextslog
;
1651 return XFS_ERROR(EINVAL
);
1653 mp
->m_sb
.sb_rextslog
= scounter
;
1657 return XFS_ERROR(EINVAL
);
1662 * xfs_mod_incore_sb() is used to change a field in the in-core
1663 * superblock structure by the specified delta. This modification
1664 * is protected by the m_sb_lock. Just use the xfs_mod_incore_sb_unlocked()
1665 * routine to do the work.
1670 xfs_sb_field_t field
,
1676 /* check for per-cpu counters */
1678 #ifdef HAVE_PERCPU_SB
1679 case XFS_SBS_ICOUNT
:
1681 case XFS_SBS_FDBLOCKS
:
1682 if (!(mp
->m_flags
& XFS_MOUNT_NO_PERCPU_SB
)) {
1683 status
= xfs_icsb_modify_counters(mp
, field
,
1690 spin_lock(&mp
->m_sb_lock
);
1691 status
= xfs_mod_incore_sb_unlocked(mp
, field
, delta
, rsvd
);
1692 spin_unlock(&mp
->m_sb_lock
);
1700 * xfs_mod_incore_sb_batch() is used to change more than one field
1701 * in the in-core superblock structure at a time. This modification
1702 * is protected by a lock internal to this module. The fields and
1703 * changes to those fields are specified in the array of xfs_mod_sb
1704 * structures passed in.
1706 * Either all of the specified deltas will be applied or none of
1707 * them will. If any modified field dips below 0, then all modifications
1708 * will be backed out and EINVAL will be returned.
1711 xfs_mod_incore_sb_batch(xfs_mount_t
*mp
, xfs_mod_sb_t
*msb
, uint nmsb
, int rsvd
)
1717 * Loop through the array of mod structures and apply each
1718 * individually. If any fail, then back out all those
1719 * which have already been applied. Do all of this within
1720 * the scope of the m_sb_lock so that all of the changes will
1723 spin_lock(&mp
->m_sb_lock
);
1725 for (msbp
= &msbp
[0]; msbp
< (msb
+ nmsb
); msbp
++) {
1727 * Apply the delta at index n. If it fails, break
1728 * from the loop so we'll fall into the undo loop
1731 switch (msbp
->msb_field
) {
1732 #ifdef HAVE_PERCPU_SB
1733 case XFS_SBS_ICOUNT
:
1735 case XFS_SBS_FDBLOCKS
:
1736 if (!(mp
->m_flags
& XFS_MOUNT_NO_PERCPU_SB
)) {
1737 spin_unlock(&mp
->m_sb_lock
);
1738 status
= xfs_icsb_modify_counters(mp
,
1740 msbp
->msb_delta
, rsvd
);
1741 spin_lock(&mp
->m_sb_lock
);
1747 status
= xfs_mod_incore_sb_unlocked(mp
,
1749 msbp
->msb_delta
, rsvd
);
1759 * If we didn't complete the loop above, then back out
1760 * any changes made to the superblock. If you add code
1761 * between the loop above and here, make sure that you
1762 * preserve the value of status. Loop back until
1763 * we step below the beginning of the array. Make sure
1764 * we don't touch anything back there.
1768 while (msbp
>= msb
) {
1769 switch (msbp
->msb_field
) {
1770 #ifdef HAVE_PERCPU_SB
1771 case XFS_SBS_ICOUNT
:
1773 case XFS_SBS_FDBLOCKS
:
1774 if (!(mp
->m_flags
& XFS_MOUNT_NO_PERCPU_SB
)) {
1775 spin_unlock(&mp
->m_sb_lock
);
1776 status
= xfs_icsb_modify_counters(mp
,
1780 spin_lock(&mp
->m_sb_lock
);
1786 status
= xfs_mod_incore_sb_unlocked(mp
,
1792 ASSERT(status
== 0);
1796 spin_unlock(&mp
->m_sb_lock
);
1801 * xfs_getsb() is called to obtain the buffer for the superblock.
1802 * The buffer is returned locked and read in from disk.
1803 * The buffer should be released with a call to xfs_brelse().
1805 * If the flags parameter is BUF_TRYLOCK, then we'll only return
1806 * the superblock buffer if it can be locked without sleeping.
1807 * If it can't then we'll return NULL.
1816 ASSERT(mp
->m_sb_bp
!= NULL
);
1818 if (flags
& XFS_BUF_TRYLOCK
) {
1819 if (!XFS_BUF_CPSEMA(bp
)) {
1823 XFS_BUF_PSEMA(bp
, PRIBIO
);
1826 ASSERT(XFS_BUF_ISDONE(bp
));
1831 * Used to free the superblock along various error paths.
1840 * Use xfs_getsb() so that the buffer will be locked
1841 * when we call xfs_buf_relse().
1843 bp
= xfs_getsb(mp
, 0);
1844 XFS_BUF_UNMANAGE(bp
);
1850 * See if the UUID is unique among mounted XFS filesystems.
1851 * Mount fails if UUID is nil or a FS with the same UUID is already mounted.
1857 if (uuid_is_nil(&mp
->m_sb
.sb_uuid
)) {
1859 "XFS: Filesystem %s has nil UUID - can't mount",
1863 if (!uuid_table_insert(&mp
->m_sb
.sb_uuid
)) {
1865 "XFS: Filesystem %s has duplicate UUID - can't mount",
1873 * Remove filesystem from the UUID table.
1879 uuid_table_remove(&mp
->m_sb
.sb_uuid
);
1883 * Used to log changes to the superblock unit and width fields which could
1884 * be altered by the mount options. Only the first superblock is updated.
1887 xfs_mount_log_sbunit(
1893 ASSERT(fields
& (XFS_SB_UNIT
|XFS_SB_WIDTH
|XFS_SB_UUID
));
1895 tp
= xfs_trans_alloc(mp
, XFS_TRANS_SB_UNIT
);
1896 if (xfs_trans_reserve(tp
, 0, mp
->m_sb
.sb_sectsize
+ 128, 0, 0,
1897 XFS_DEFAULT_LOG_COUNT
)) {
1898 xfs_trans_cancel(tp
, 0);
1901 xfs_mod_sb(tp
, fields
);
1902 xfs_trans_commit(tp
, 0);
1906 #ifdef HAVE_PERCPU_SB
1908 * Per-cpu incore superblock counters
1910 * Simple concept, difficult implementation
1912 * Basically, replace the incore superblock counters with a distributed per cpu
1913 * counter for contended fields (e.g. free block count).
1915 * Difficulties arise in that the incore sb is used for ENOSPC checking, and
1916 * hence needs to be accurately read when we are running low on space. Hence
1917 * there is a method to enable and disable the per-cpu counters based on how
1918 * much "stuff" is available in them.
1920 * Basically, a counter is enabled if there is enough free resource to justify
1921 * running a per-cpu fast-path. If the per-cpu counter runs out (i.e. a local
1922 * ENOSPC), then we disable the counters to synchronise all callers and
1923 * re-distribute the available resources.
1925 * If, once we redistributed the available resources, we still get a failure,
1926 * we disable the per-cpu counter and go through the slow path.
1928 * The slow path is the current xfs_mod_incore_sb() function. This means that
1929 * when we disable a per-cpu counter, we need to drain it's resources back to
1930 * the global superblock. We do this after disabling the counter to prevent
1931 * more threads from queueing up on the counter.
1933 * Essentially, this means that we still need a lock in the fast path to enable
1934 * synchronisation between the global counters and the per-cpu counters. This
1935 * is not a problem because the lock will be local to a CPU almost all the time
1936 * and have little contention except when we get to ENOSPC conditions.
1938 * Basically, this lock becomes a barrier that enables us to lock out the fast
1939 * path while we do things like enabling and disabling counters and
1940 * synchronising the counters.
1944 * 1. m_sb_lock before picking up per-cpu locks
1945 * 2. per-cpu locks always picked up via for_each_online_cpu() order
1946 * 3. accurate counter sync requires m_sb_lock + per cpu locks
1947 * 4. modifying per-cpu counters requires holding per-cpu lock
1948 * 5. modifying global counters requires holding m_sb_lock
1949 * 6. enabling or disabling a counter requires holding the m_sb_lock
1950 * and _none_ of the per-cpu locks.
1952 * Disabled counters are only ever re-enabled by a balance operation
1953 * that results in more free resources per CPU than a given threshold.
1954 * To ensure counters don't remain disabled, they are rebalanced when
1955 * the global resource goes above a higher threshold (i.e. some hysteresis
1956 * is present to prevent thrashing).
1959 #ifdef CONFIG_HOTPLUG_CPU
1961 * hot-plug CPU notifier support.
1963 * We need a notifier per filesystem as we need to be able to identify
1964 * the filesystem to balance the counters out. This is achieved by
1965 * having a notifier block embedded in the xfs_mount_t and doing pointer
1966 * magic to get the mount pointer from the notifier block address.
1969 xfs_icsb_cpu_notify(
1970 struct notifier_block
*nfb
,
1971 unsigned long action
,
1974 xfs_icsb_cnts_t
*cntp
;
1977 mp
= (xfs_mount_t
*)container_of(nfb
, xfs_mount_t
, m_icsb_notifier
);
1978 cntp
= (xfs_icsb_cnts_t
*)
1979 per_cpu_ptr(mp
->m_sb_cnts
, (unsigned long)hcpu
);
1981 case CPU_UP_PREPARE
:
1982 case CPU_UP_PREPARE_FROZEN
:
1983 /* Easy Case - initialize the area and locks, and
1984 * then rebalance when online does everything else for us. */
1985 memset(cntp
, 0, sizeof(xfs_icsb_cnts_t
));
1988 case CPU_ONLINE_FROZEN
:
1990 xfs_icsb_balance_counter(mp
, XFS_SBS_ICOUNT
, 0, 0);
1991 xfs_icsb_balance_counter(mp
, XFS_SBS_IFREE
, 0, 0);
1992 xfs_icsb_balance_counter(mp
, XFS_SBS_FDBLOCKS
, 0, 0);
1993 xfs_icsb_unlock(mp
);
1996 case CPU_DEAD_FROZEN
:
1997 /* Disable all the counters, then fold the dead cpu's
1998 * count into the total on the global superblock and
1999 * re-enable the counters. */
2001 spin_lock(&mp
->m_sb_lock
);
2002 xfs_icsb_disable_counter(mp
, XFS_SBS_ICOUNT
);
2003 xfs_icsb_disable_counter(mp
, XFS_SBS_IFREE
);
2004 xfs_icsb_disable_counter(mp
, XFS_SBS_FDBLOCKS
);
2006 mp
->m_sb
.sb_icount
+= cntp
->icsb_icount
;
2007 mp
->m_sb
.sb_ifree
+= cntp
->icsb_ifree
;
2008 mp
->m_sb
.sb_fdblocks
+= cntp
->icsb_fdblocks
;
2010 memset(cntp
, 0, sizeof(xfs_icsb_cnts_t
));
2012 xfs_icsb_balance_counter(mp
, XFS_SBS_ICOUNT
,
2013 XFS_ICSB_SB_LOCKED
, 0);
2014 xfs_icsb_balance_counter(mp
, XFS_SBS_IFREE
,
2015 XFS_ICSB_SB_LOCKED
, 0);
2016 xfs_icsb_balance_counter(mp
, XFS_SBS_FDBLOCKS
,
2017 XFS_ICSB_SB_LOCKED
, 0);
2018 spin_unlock(&mp
->m_sb_lock
);
2019 xfs_icsb_unlock(mp
);
2025 #endif /* CONFIG_HOTPLUG_CPU */
2028 xfs_icsb_init_counters(
2031 xfs_icsb_cnts_t
*cntp
;
2034 mp
->m_sb_cnts
= alloc_percpu(xfs_icsb_cnts_t
);
2035 if (mp
->m_sb_cnts
== NULL
)
2038 #ifdef CONFIG_HOTPLUG_CPU
2039 mp
->m_icsb_notifier
.notifier_call
= xfs_icsb_cpu_notify
;
2040 mp
->m_icsb_notifier
.priority
= 0;
2041 register_hotcpu_notifier(&mp
->m_icsb_notifier
);
2042 #endif /* CONFIG_HOTPLUG_CPU */
2044 for_each_online_cpu(i
) {
2045 cntp
= (xfs_icsb_cnts_t
*)per_cpu_ptr(mp
->m_sb_cnts
, i
);
2046 memset(cntp
, 0, sizeof(xfs_icsb_cnts_t
));
2049 mutex_init(&mp
->m_icsb_mutex
);
2052 * start with all counters disabled so that the
2053 * initial balance kicks us off correctly
2055 mp
->m_icsb_counters
= -1;
2060 xfs_icsb_reinit_counters(
2065 * start with all counters disabled so that the
2066 * initial balance kicks us off correctly
2068 mp
->m_icsb_counters
= -1;
2069 xfs_icsb_balance_counter(mp
, XFS_SBS_ICOUNT
, 0, 0);
2070 xfs_icsb_balance_counter(mp
, XFS_SBS_IFREE
, 0, 0);
2071 xfs_icsb_balance_counter(mp
, XFS_SBS_FDBLOCKS
, 0, 0);
2072 xfs_icsb_unlock(mp
);
2076 xfs_icsb_destroy_counters(
2079 if (mp
->m_sb_cnts
) {
2080 unregister_hotcpu_notifier(&mp
->m_icsb_notifier
);
2081 free_percpu(mp
->m_sb_cnts
);
2083 mutex_destroy(&mp
->m_icsb_mutex
);
2088 xfs_icsb_cnts_t
*icsbp
)
2090 while (test_and_set_bit(XFS_ICSB_FLAG_LOCK
, &icsbp
->icsb_flags
)) {
2096 xfs_icsb_unlock_cntr(
2097 xfs_icsb_cnts_t
*icsbp
)
2099 clear_bit(XFS_ICSB_FLAG_LOCK
, &icsbp
->icsb_flags
);
2104 xfs_icsb_lock_all_counters(
2107 xfs_icsb_cnts_t
*cntp
;
2110 for_each_online_cpu(i
) {
2111 cntp
= (xfs_icsb_cnts_t
*)per_cpu_ptr(mp
->m_sb_cnts
, i
);
2112 xfs_icsb_lock_cntr(cntp
);
2117 xfs_icsb_unlock_all_counters(
2120 xfs_icsb_cnts_t
*cntp
;
2123 for_each_online_cpu(i
) {
2124 cntp
= (xfs_icsb_cnts_t
*)per_cpu_ptr(mp
->m_sb_cnts
, i
);
2125 xfs_icsb_unlock_cntr(cntp
);
2132 xfs_icsb_cnts_t
*cnt
,
2135 xfs_icsb_cnts_t
*cntp
;
2138 memset(cnt
, 0, sizeof(xfs_icsb_cnts_t
));
2140 if (!(flags
& XFS_ICSB_LAZY_COUNT
))
2141 xfs_icsb_lock_all_counters(mp
);
2143 for_each_online_cpu(i
) {
2144 cntp
= (xfs_icsb_cnts_t
*)per_cpu_ptr(mp
->m_sb_cnts
, i
);
2145 cnt
->icsb_icount
+= cntp
->icsb_icount
;
2146 cnt
->icsb_ifree
+= cntp
->icsb_ifree
;
2147 cnt
->icsb_fdblocks
+= cntp
->icsb_fdblocks
;
2150 if (!(flags
& XFS_ICSB_LAZY_COUNT
))
2151 xfs_icsb_unlock_all_counters(mp
);
2155 xfs_icsb_counter_disabled(
2157 xfs_sb_field_t field
)
2159 ASSERT((field
>= XFS_SBS_ICOUNT
) && (field
<= XFS_SBS_FDBLOCKS
));
2160 return test_bit(field
, &mp
->m_icsb_counters
);
2164 xfs_icsb_disable_counter(
2166 xfs_sb_field_t field
)
2168 xfs_icsb_cnts_t cnt
;
2170 ASSERT((field
>= XFS_SBS_ICOUNT
) && (field
<= XFS_SBS_FDBLOCKS
));
2173 * If we are already disabled, then there is nothing to do
2174 * here. We check before locking all the counters to avoid
2175 * the expensive lock operation when being called in the
2176 * slow path and the counter is already disabled. This is
2177 * safe because the only time we set or clear this state is under
2180 if (xfs_icsb_counter_disabled(mp
, field
))
2183 xfs_icsb_lock_all_counters(mp
);
2184 if (!test_and_set_bit(field
, &mp
->m_icsb_counters
)) {
2185 /* drain back to superblock */
2187 xfs_icsb_count(mp
, &cnt
, XFS_ICSB_SB_LOCKED
|XFS_ICSB_LAZY_COUNT
);
2189 case XFS_SBS_ICOUNT
:
2190 mp
->m_sb
.sb_icount
= cnt
.icsb_icount
;
2193 mp
->m_sb
.sb_ifree
= cnt
.icsb_ifree
;
2195 case XFS_SBS_FDBLOCKS
:
2196 mp
->m_sb
.sb_fdblocks
= cnt
.icsb_fdblocks
;
2203 xfs_icsb_unlock_all_counters(mp
);
2209 xfs_icsb_enable_counter(
2211 xfs_sb_field_t field
,
2215 xfs_icsb_cnts_t
*cntp
;
2218 ASSERT((field
>= XFS_SBS_ICOUNT
) && (field
<= XFS_SBS_FDBLOCKS
));
2220 xfs_icsb_lock_all_counters(mp
);
2221 for_each_online_cpu(i
) {
2222 cntp
= per_cpu_ptr(mp
->m_sb_cnts
, i
);
2224 case XFS_SBS_ICOUNT
:
2225 cntp
->icsb_icount
= count
+ resid
;
2228 cntp
->icsb_ifree
= count
+ resid
;
2230 case XFS_SBS_FDBLOCKS
:
2231 cntp
->icsb_fdblocks
= count
+ resid
;
2239 clear_bit(field
, &mp
->m_icsb_counters
);
2240 xfs_icsb_unlock_all_counters(mp
);
2244 xfs_icsb_sync_counters_flags(
2248 xfs_icsb_cnts_t cnt
;
2250 /* Pass 1: lock all counters */
2251 if ((flags
& XFS_ICSB_SB_LOCKED
) == 0)
2252 spin_lock(&mp
->m_sb_lock
);
2254 xfs_icsb_count(mp
, &cnt
, flags
);
2256 /* Step 3: update mp->m_sb fields */
2257 if (!xfs_icsb_counter_disabled(mp
, XFS_SBS_ICOUNT
))
2258 mp
->m_sb
.sb_icount
= cnt
.icsb_icount
;
2259 if (!xfs_icsb_counter_disabled(mp
, XFS_SBS_IFREE
))
2260 mp
->m_sb
.sb_ifree
= cnt
.icsb_ifree
;
2261 if (!xfs_icsb_counter_disabled(mp
, XFS_SBS_FDBLOCKS
))
2262 mp
->m_sb
.sb_fdblocks
= cnt
.icsb_fdblocks
;
2264 if ((flags
& XFS_ICSB_SB_LOCKED
) == 0)
2265 spin_unlock(&mp
->m_sb_lock
);
2269 * Accurate update of per-cpu counters to incore superblock
2272 xfs_icsb_sync_counters(
2275 xfs_icsb_sync_counters_flags(mp
, 0);
2279 * Balance and enable/disable counters as necessary.
2281 * Thresholds for re-enabling counters are somewhat magic. inode counts are
2282 * chosen to be the same number as single on disk allocation chunk per CPU, and
2283 * free blocks is something far enough zero that we aren't going thrash when we
2284 * get near ENOSPC. We also need to supply a minimum we require per cpu to
2285 * prevent looping endlessly when xfs_alloc_space asks for more than will
2286 * be distributed to a single CPU but each CPU has enough blocks to be
2289 * Note that we can be called when counters are already disabled.
2290 * xfs_icsb_disable_counter() optimises the counter locking in this case to
2291 * prevent locking every per-cpu counter needlessly.
2294 #define XFS_ICSB_INO_CNTR_REENABLE (uint64_t)64
2295 #define XFS_ICSB_FDBLK_CNTR_REENABLE(mp) \
2296 (uint64_t)(512 + XFS_ALLOC_SET_ASIDE(mp))
2298 xfs_icsb_balance_counter(
2300 xfs_sb_field_t field
,
2304 uint64_t count
, resid
;
2305 int weight
= num_online_cpus();
2306 uint64_t min
= (uint64_t)min_per_cpu
;
2308 if (!(flags
& XFS_ICSB_SB_LOCKED
))
2309 spin_lock(&mp
->m_sb_lock
);
2311 /* disable counter and sync counter */
2312 xfs_icsb_disable_counter(mp
, field
);
2314 /* update counters - first CPU gets residual*/
2316 case XFS_SBS_ICOUNT
:
2317 count
= mp
->m_sb
.sb_icount
;
2318 resid
= do_div(count
, weight
);
2319 if (count
< max(min
, XFS_ICSB_INO_CNTR_REENABLE
))
2323 count
= mp
->m_sb
.sb_ifree
;
2324 resid
= do_div(count
, weight
);
2325 if (count
< max(min
, XFS_ICSB_INO_CNTR_REENABLE
))
2328 case XFS_SBS_FDBLOCKS
:
2329 count
= mp
->m_sb
.sb_fdblocks
;
2330 resid
= do_div(count
, weight
);
2331 if (count
< max(min
, XFS_ICSB_FDBLK_CNTR_REENABLE(mp
)))
2336 count
= resid
= 0; /* quiet, gcc */
2340 xfs_icsb_enable_counter(mp
, field
, count
, resid
);
2342 if (!(flags
& XFS_ICSB_SB_LOCKED
))
2343 spin_unlock(&mp
->m_sb_lock
);
2347 xfs_icsb_modify_counters(
2349 xfs_sb_field_t field
,
2353 xfs_icsb_cnts_t
*icsbp
;
2354 long long lcounter
; /* long counter for 64 bit fields */
2360 icsbp
= (xfs_icsb_cnts_t
*)per_cpu_ptr(mp
->m_sb_cnts
, cpu
);
2363 * if the counter is disabled, go to slow path
2365 if (unlikely(xfs_icsb_counter_disabled(mp
, field
)))
2367 xfs_icsb_lock_cntr(icsbp
);
2368 if (unlikely(xfs_icsb_counter_disabled(mp
, field
))) {
2369 xfs_icsb_unlock_cntr(icsbp
);
2374 case XFS_SBS_ICOUNT
:
2375 lcounter
= icsbp
->icsb_icount
;
2377 if (unlikely(lcounter
< 0))
2378 goto balance_counter
;
2379 icsbp
->icsb_icount
= lcounter
;
2383 lcounter
= icsbp
->icsb_ifree
;
2385 if (unlikely(lcounter
< 0))
2386 goto balance_counter
;
2387 icsbp
->icsb_ifree
= lcounter
;
2390 case XFS_SBS_FDBLOCKS
:
2391 BUG_ON((mp
->m_resblks
- mp
->m_resblks_avail
) != 0);
2393 lcounter
= icsbp
->icsb_fdblocks
- XFS_ALLOC_SET_ASIDE(mp
);
2395 if (unlikely(lcounter
< 0))
2396 goto balance_counter
;
2397 icsbp
->icsb_fdblocks
= lcounter
+ XFS_ALLOC_SET_ASIDE(mp
);
2403 xfs_icsb_unlock_cntr(icsbp
);
2411 * serialise with a mutex so we don't burn lots of cpu on
2412 * the superblock lock. We still need to hold the superblock
2413 * lock, however, when we modify the global structures.
2418 * Now running atomically.
2420 * If the counter is enabled, someone has beaten us to rebalancing.
2421 * Drop the lock and try again in the fast path....
2423 if (!(xfs_icsb_counter_disabled(mp
, field
))) {
2424 xfs_icsb_unlock(mp
);
2429 * The counter is currently disabled. Because we are
2430 * running atomically here, we know a rebalance cannot
2431 * be in progress. Hence we can go straight to operating
2432 * on the global superblock. We do not call xfs_mod_incore_sb()
2433 * here even though we need to get the m_sb_lock. Doing so
2434 * will cause us to re-enter this function and deadlock.
2435 * Hence we get the m_sb_lock ourselves and then call
2436 * xfs_mod_incore_sb_unlocked() as the unlocked path operates
2437 * directly on the global counters.
2439 spin_lock(&mp
->m_sb_lock
);
2440 ret
= xfs_mod_incore_sb_unlocked(mp
, field
, delta
, rsvd
);
2441 spin_unlock(&mp
->m_sb_lock
);
2444 * Now that we've modified the global superblock, we
2445 * may be able to re-enable the distributed counters
2446 * (e.g. lots of space just got freed). After that
2450 xfs_icsb_balance_counter(mp
, field
, 0, 0);
2451 xfs_icsb_unlock(mp
);
2455 xfs_icsb_unlock_cntr(icsbp
);
2459 * We may have multiple threads here if multiple per-cpu
2460 * counters run dry at the same time. This will mean we can
2461 * do more balances than strictly necessary but it is not
2462 * the common slowpath case.
2467 * running atomically.
2469 * This will leave the counter in the correct state for future
2470 * accesses. After the rebalance, we simply try again and our retry
2471 * will either succeed through the fast path or slow path without
2472 * another balance operation being required.
2474 xfs_icsb_balance_counter(mp
, field
, 0, delta
);
2475 xfs_icsb_unlock(mp
);