2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
23 #include "xfs_trans.h"
27 #include "xfs_alloc.h"
28 #include "xfs_dmapi.h"
29 #include "xfs_quota.h"
30 #include "xfs_mount.h"
31 #include "xfs_bmap_btree.h"
32 #include "xfs_alloc_btree.h"
33 #include "xfs_ialloc_btree.h"
34 #include "xfs_dir2_sf.h"
35 #include "xfs_attr_sf.h"
36 #include "xfs_dinode.h"
37 #include "xfs_inode.h"
38 #include "xfs_btree.h"
39 #include "xfs_ialloc.h"
41 #include "xfs_rtalloc.h"
42 #include "xfs_error.h"
43 #include "xfs_itable.h"
47 #include "xfs_buf_item.h"
48 #include "xfs_utils.h"
49 #include "xfs_version.h"
51 #include <linux/namei.h>
52 #include <linux/init.h>
53 #include <linux/mount.h>
54 #include <linux/mempool.h>
55 #include <linux/writeback.h>
56 #include <linux/kthread.h>
57 #include <linux/freezer.h>
59 static struct quotactl_ops xfs_quotactl_operations
;
60 static struct super_operations xfs_super_operations
;
61 static kmem_zone_t
*xfs_vnode_zone
;
62 static kmem_zone_t
*xfs_ioend_zone
;
63 mempool_t
*xfs_ioend_pool
;
65 STATIC
struct xfs_mount_args
*
67 struct super_block
*sb
,
70 struct xfs_mount_args
*args
;
72 args
= kmem_zalloc(sizeof(struct xfs_mount_args
), KM_SLEEP
);
73 args
->logbufs
= args
->logbufsize
= -1;
74 strncpy(args
->fsname
, sb
->s_id
, MAXNAMELEN
);
76 /* Copy the already-parsed mount(2) flags we're interested in */
77 if (sb
->s_flags
& MS_DIRSYNC
)
78 args
->flags
|= XFSMNT_DIRSYNC
;
79 if (sb
->s_flags
& MS_SYNCHRONOUS
)
80 args
->flags
|= XFSMNT_WSYNC
;
82 args
->flags
|= XFSMNT_QUIET
;
83 args
->flags
|= XFSMNT_32BITINODES
;
90 unsigned int blockshift
)
92 unsigned int pagefactor
= 1;
93 unsigned int bitshift
= BITS_PER_LONG
- 1;
95 /* Figure out maximum filesize, on Linux this can depend on
96 * the filesystem blocksize (on 32 bit platforms).
97 * __block_prepare_write does this in an [unsigned] long...
98 * page->index << (PAGE_CACHE_SHIFT - bbits)
99 * So, for page sized blocks (4K on 32 bit platforms),
100 * this wraps at around 8Tb (hence MAX_LFS_FILESIZE which is
101 * (((u64)PAGE_CACHE_SIZE << (BITS_PER_LONG-1))-1)
102 * but for smaller blocksizes it is less (bbits = log2 bsize).
103 * Note1: get_block_t takes a long (implicit cast from above)
104 * Note2: The Large Block Device (LBD and HAVE_SECTOR_T) patch
105 * can optionally convert the [unsigned] long from above into
106 * an [unsigned] long long.
109 #if BITS_PER_LONG == 32
110 # if defined(CONFIG_LBD)
111 ASSERT(sizeof(sector_t
) == 8);
112 pagefactor
= PAGE_CACHE_SIZE
;
113 bitshift
= BITS_PER_LONG
;
115 pagefactor
= PAGE_CACHE_SIZE
>> (PAGE_CACHE_SHIFT
- blockshift
);
119 return (((__uint64_t
)pagefactor
) << bitshift
) - 1;
126 switch (inode
->i_mode
& S_IFMT
) {
128 inode
->i_op
= &xfs_inode_operations
;
129 inode
->i_fop
= &xfs_file_operations
;
130 inode
->i_mapping
->a_ops
= &xfs_address_space_operations
;
133 inode
->i_op
= &xfs_dir_inode_operations
;
134 inode
->i_fop
= &xfs_dir_file_operations
;
137 inode
->i_op
= &xfs_symlink_inode_operations
;
139 inode
->i_mapping
->a_ops
= &xfs_address_space_operations
;
142 inode
->i_op
= &xfs_inode_operations
;
143 init_special_inode(inode
, inode
->i_mode
, inode
->i_rdev
);
149 xfs_revalidate_inode(
154 struct inode
*inode
= vn_to_inode(vp
);
156 inode
->i_mode
= ip
->i_d
.di_mode
;
157 inode
->i_nlink
= ip
->i_d
.di_nlink
;
158 inode
->i_uid
= ip
->i_d
.di_uid
;
159 inode
->i_gid
= ip
->i_d
.di_gid
;
161 switch (inode
->i_mode
& S_IFMT
) {
165 MKDEV(sysv_major(ip
->i_df
.if_u2
.if_rdev
) & 0x1ff,
166 sysv_minor(ip
->i_df
.if_u2
.if_rdev
));
173 inode
->i_generation
= ip
->i_d
.di_gen
;
174 i_size_write(inode
, ip
->i_d
.di_size
);
176 XFS_FSB_TO_BB(mp
, ip
->i_d
.di_nblocks
+ ip
->i_delayed_blks
);
177 inode
->i_atime
.tv_sec
= ip
->i_d
.di_atime
.t_sec
;
178 inode
->i_atime
.tv_nsec
= ip
->i_d
.di_atime
.t_nsec
;
179 inode
->i_mtime
.tv_sec
= ip
->i_d
.di_mtime
.t_sec
;
180 inode
->i_mtime
.tv_nsec
= ip
->i_d
.di_mtime
.t_nsec
;
181 inode
->i_ctime
.tv_sec
= ip
->i_d
.di_ctime
.t_sec
;
182 inode
->i_ctime
.tv_nsec
= ip
->i_d
.di_ctime
.t_nsec
;
183 if (ip
->i_d
.di_flags
& XFS_DIFLAG_IMMUTABLE
)
184 inode
->i_flags
|= S_IMMUTABLE
;
186 inode
->i_flags
&= ~S_IMMUTABLE
;
187 if (ip
->i_d
.di_flags
& XFS_DIFLAG_APPEND
)
188 inode
->i_flags
|= S_APPEND
;
190 inode
->i_flags
&= ~S_APPEND
;
191 if (ip
->i_d
.di_flags
& XFS_DIFLAG_SYNC
)
192 inode
->i_flags
|= S_SYNC
;
194 inode
->i_flags
&= ~S_SYNC
;
195 if (ip
->i_d
.di_flags
& XFS_DIFLAG_NOATIME
)
196 inode
->i_flags
|= S_NOATIME
;
198 inode
->i_flags
&= ~S_NOATIME
;
199 vp
->v_flag
&= ~VMODIFIED
;
203 xfs_initialize_vnode(
206 bhv_desc_t
*inode_bhv
,
209 xfs_inode_t
*ip
= XFS_BHVTOI(inode_bhv
);
210 struct inode
*inode
= vn_to_inode(vp
);
212 if (!inode_bhv
->bd_vobj
) {
213 vp
->v_vfsp
= bhvtovfs(bdp
);
214 bhv_desc_init(inode_bhv
, ip
, vp
, &xfs_vnodeops
);
215 bhv_insert(VN_BHV_HEAD(vp
), inode_bhv
);
219 * We need to set the ops vectors, and unlock the inode, but if
220 * we have been called during the new inode create process, it is
221 * too early to fill in the Linux inode. We will get called a
222 * second time once the inode is properly set up, and then we can
225 if (ip
->i_d
.di_mode
!= 0 && unlock
&& (inode
->i_state
& I_NEW
)) {
226 xfs_revalidate_inode(XFS_BHVTOM(bdp
), vp
, ip
);
227 xfs_set_inodeops(inode
);
229 xfs_iflags_clear(ip
, XFS_INEW
);
232 unlock_new_inode(inode
);
240 struct block_device
**bdevp
)
244 *bdevp
= open_bdev_excl(name
, 0, mp
);
245 if (IS_ERR(*bdevp
)) {
246 error
= PTR_ERR(*bdevp
);
247 printk("XFS: Invalid device [%s], error=%d\n", name
, error
);
255 struct block_device
*bdev
)
258 close_bdev_excl(bdev
);
262 * Try to write out the superblock using barriers.
268 xfs_buf_t
*sbp
= xfs_getsb(mp
, 0);
273 XFS_BUF_UNDELAYWRITE(sbp
);
275 XFS_BUF_UNASYNC(sbp
);
276 XFS_BUF_ORDERED(sbp
);
279 error
= xfs_iowait(sbp
);
282 * Clear all the flags we set and possible error state in the
283 * buffer. We only did the write to try out whether barriers
284 * worked and shouldn't leave any traces in the superblock
288 XFS_BUF_ERROR(sbp
, 0);
289 XFS_BUF_UNORDERED(sbp
);
296 xfs_mountfs_check_barriers(xfs_mount_t
*mp
)
300 if (mp
->m_logdev_targp
!= mp
->m_ddev_targp
) {
301 xfs_fs_cmn_err(CE_NOTE
, mp
,
302 "Disabling barriers, not supported with external log device");
303 mp
->m_flags
&= ~XFS_MOUNT_BARRIER
;
307 if (mp
->m_ddev_targp
->bt_bdev
->bd_disk
->queue
->ordered
==
308 QUEUE_ORDERED_NONE
) {
309 xfs_fs_cmn_err(CE_NOTE
, mp
,
310 "Disabling barriers, not supported by the underlying device");
311 mp
->m_flags
&= ~XFS_MOUNT_BARRIER
;
315 if (xfs_readonly_buftarg(mp
->m_ddev_targp
)) {
316 xfs_fs_cmn_err(CE_NOTE
, mp
,
317 "Disabling barriers, underlying device is readonly");
318 mp
->m_flags
&= ~XFS_MOUNT_BARRIER
;
322 error
= xfs_barrier_test(mp
);
324 xfs_fs_cmn_err(CE_NOTE
, mp
,
325 "Disabling barriers, trial barrier write failed");
326 mp
->m_flags
&= ~XFS_MOUNT_BARRIER
;
332 xfs_blkdev_issue_flush(
333 xfs_buftarg_t
*buftarg
)
335 blkdev_issue_flush(buftarg
->bt_bdev
, NULL
);
338 STATIC
struct inode
*
340 struct super_block
*sb
)
344 vp
= kmem_zone_alloc(xfs_vnode_zone
, KM_SLEEP
);
347 return vn_to_inode(vp
);
351 xfs_fs_destroy_inode(
354 kmem_zone_free(xfs_vnode_zone
, vn_from_inode(inode
));
358 xfs_fs_inode_init_once(
363 if ((flags
& (SLAB_CTOR_VERIFY
|SLAB_CTOR_CONSTRUCTOR
)) ==
364 SLAB_CTOR_CONSTRUCTOR
)
365 inode_init_once(vn_to_inode((bhv_vnode_t
*)vnode
));
371 xfs_vnode_zone
= kmem_zone_init_flags(sizeof(bhv_vnode_t
), "xfs_vnode",
372 KM_ZONE_HWALIGN
| KM_ZONE_RECLAIM
|
374 xfs_fs_inode_init_once
);
378 xfs_ioend_zone
= kmem_zone_init(sizeof(xfs_ioend_t
), "xfs_ioend");
380 goto out_destroy_vnode_zone
;
382 xfs_ioend_pool
= mempool_create_slab_pool(4 * MAX_BUF_PER_PAGE
,
385 goto out_free_ioend_zone
;
389 kmem_zone_destroy(xfs_ioend_zone
);
390 out_destroy_vnode_zone
:
391 kmem_zone_destroy(xfs_vnode_zone
);
397 xfs_destroy_zones(void)
399 mempool_destroy(xfs_ioend_pool
);
400 kmem_zone_destroy(xfs_vnode_zone
);
401 kmem_zone_destroy(xfs_ioend_zone
);
405 * Attempt to flush the inode, this will actually fail
406 * if the inode is pinned, but we dirty the inode again
407 * at the point when it is unpinned after a log write,
408 * since this is when the inode itself becomes flushable.
415 bhv_vnode_t
*vp
= vn_from_inode(inode
);
416 int error
= 0, flags
= FLUSH_INODE
;
419 vn_trace_entry(vp
, __FUNCTION__
, (inst_t
*)__return_address
);
422 error
= bhv_vop_iflush(vp
, flags
);
424 error
= sync
? bhv_vop_iflush(vp
, flags
| FLUSH_LOG
) : 0;
433 bhv_vnode_t
*vp
= vn_from_inode(inode
);
435 vn_trace_entry(vp
, __FUNCTION__
, (inst_t
*)__return_address
);
437 XFS_STATS_INC(vn_rele
);
438 XFS_STATS_INC(vn_remove
);
439 XFS_STATS_INC(vn_reclaim
);
440 XFS_STATS_DEC(vn_active
);
443 * This can happen because xfs_iget_core calls xfs_idestroy if we
444 * find an inode with di_mode == 0 but without IGET_CREATE set.
447 bhv_vop_inactive(vp
, NULL
);
450 vp
->v_flag
&= ~VMODIFIED
;
454 if (bhv_vop_reclaim(vp
))
455 panic("%s: cannot reclaim 0x%p\n", __FUNCTION__
, vp
);
457 ASSERT(VNHEAD(vp
) == NULL
);
459 #ifdef XFS_VNODE_TRACE
460 ktrace_free(vp
->v_trace
);
465 * Enqueue a work item to be picked up by the vfs xfssyncd thread.
466 * Doing this has two advantages:
467 * - It saves on stack space, which is tight in certain situations
468 * - It can be used (with care) as a mechanism to avoid deadlocks.
469 * Flushing while allocating in a full filesystem requires both.
472 xfs_syncd_queue_work(
475 void (*syncer
)(bhv_vfs_t
*, void *))
477 struct bhv_vfs_sync_work
*work
;
479 work
= kmem_alloc(sizeof(struct bhv_vfs_sync_work
), KM_SLEEP
);
480 INIT_LIST_HEAD(&work
->w_list
);
481 work
->w_syncer
= syncer
;
484 spin_lock(&vfs
->vfs_sync_lock
);
485 list_add_tail(&work
->w_list
, &vfs
->vfs_sync_list
);
486 spin_unlock(&vfs
->vfs_sync_lock
);
487 wake_up_process(vfs
->vfs_sync_task
);
491 * Flush delayed allocate data, attempting to free up reserved space
492 * from existing allocations. At this point a new allocation attempt
493 * has failed with ENOSPC and we are in the process of scratching our
494 * heads, looking about for more room...
497 xfs_flush_inode_work(
501 filemap_flush(((struct inode
*)inode
)->i_mapping
);
502 iput((struct inode
*)inode
);
509 struct inode
*inode
= vn_to_inode(XFS_ITOV(ip
));
510 struct bhv_vfs
*vfs
= XFS_MTOVFS(ip
->i_mount
);
513 xfs_syncd_queue_work(vfs
, inode
, xfs_flush_inode_work
);
514 delay(msecs_to_jiffies(500));
518 * This is the "bigger hammer" version of xfs_flush_inode_work...
519 * (IOW, "If at first you don't succeed, use a Bigger Hammer").
522 xfs_flush_device_work(
526 sync_blockdev(vfs
->vfs_super
->s_bdev
);
527 iput((struct inode
*)inode
);
534 struct inode
*inode
= vn_to_inode(XFS_ITOV(ip
));
535 struct bhv_vfs
*vfs
= XFS_MTOVFS(ip
->i_mount
);
538 xfs_syncd_queue_work(vfs
, inode
, xfs_flush_device_work
);
539 delay(msecs_to_jiffies(500));
540 xfs_log_force(ip
->i_mount
, (xfs_lsn_t
)0, XFS_LOG_FORCE
|XFS_LOG_SYNC
);
550 if (!(vfsp
->vfs_flag
& VFS_RDONLY
))
551 error
= bhv_vfs_sync(vfsp
, SYNC_FSDATA
| SYNC_BDFLUSH
| \
552 SYNC_ATTR
| SYNC_REFCACHE
, NULL
);
553 vfsp
->vfs_sync_seq
++;
554 wake_up(&vfsp
->vfs_wait_single_sync_task
);
562 bhv_vfs_t
*vfsp
= (bhv_vfs_t
*) arg
;
563 bhv_vfs_sync_work_t
*work
, *n
;
566 timeleft
= xfs_syncd_centisecs
* msecs_to_jiffies(10);
568 timeleft
= schedule_timeout_interruptible(timeleft
);
571 if (kthread_should_stop() && list_empty(&vfsp
->vfs_sync_list
))
574 spin_lock(&vfsp
->vfs_sync_lock
);
576 * We can get woken by laptop mode, to do a sync -
577 * that's the (only!) case where the list would be
578 * empty with time remaining.
580 if (!timeleft
|| list_empty(&vfsp
->vfs_sync_list
)) {
582 timeleft
= xfs_syncd_centisecs
*
583 msecs_to_jiffies(10);
584 INIT_LIST_HEAD(&vfsp
->vfs_sync_work
.w_list
);
585 list_add_tail(&vfsp
->vfs_sync_work
.w_list
,
586 &vfsp
->vfs_sync_list
);
588 list_for_each_entry_safe(work
, n
, &vfsp
->vfs_sync_list
, w_list
)
589 list_move(&work
->w_list
, &tmp
);
590 spin_unlock(&vfsp
->vfs_sync_lock
);
592 list_for_each_entry_safe(work
, n
, &tmp
, w_list
) {
593 (*work
->w_syncer
)(vfsp
, work
->w_data
);
594 list_del(&work
->w_list
);
595 if (work
== &vfsp
->vfs_sync_work
)
597 kmem_free(work
, sizeof(struct bhv_vfs_sync_work
));
608 vfsp
->vfs_sync_work
.w_syncer
= vfs_sync_worker
;
609 vfsp
->vfs_sync_work
.w_vfs
= vfsp
;
610 vfsp
->vfs_sync_task
= kthread_run(xfssyncd
, vfsp
, "xfssyncd");
611 if (IS_ERR(vfsp
->vfs_sync_task
))
612 return -PTR_ERR(vfsp
->vfs_sync_task
);
620 kthread_stop(vfsp
->vfs_sync_task
);
625 struct super_block
*sb
)
627 bhv_vfs_t
*vfsp
= vfs_from_sb(sb
);
630 xfs_fs_stop_syncd(vfsp
);
631 bhv_vfs_sync(vfsp
, SYNC_ATTR
| SYNC_DELWRI
, NULL
);
632 error
= bhv_vfs_unmount(vfsp
, 0, NULL
);
634 printk("XFS: unmount got error=%d\n", error
);
635 printk("%s: vfs=0x%p left dangling!\n", __FUNCTION__
, vfsp
);
637 vfs_deallocate(vfsp
);
643 struct super_block
*sb
)
645 if (!(sb
->s_flags
& MS_RDONLY
))
646 bhv_vfs_sync(vfs_from_sb(sb
), SYNC_FSDATA
, NULL
);
652 struct super_block
*sb
,
655 bhv_vfs_t
*vfsp
= vfs_from_sb(sb
);
659 if (unlikely(sb
->s_frozen
== SB_FREEZE_WRITE
)) {
661 * First stage of freeze - no more writers will make progress
662 * now we are here, so we flush delwri and delalloc buffers
663 * here, then wait for all I/O to complete. Data is frozen at
664 * that point. Metadata is not frozen, transactions can still
665 * occur here so don't bother flushing the buftarg (i.e
666 * SYNC_QUIESCE) because it'll just get dirty again.
668 flags
= SYNC_FSDATA
| SYNC_DELWRI
| SYNC_WAIT
| SYNC_IOWAIT
;
670 flags
= SYNC_FSDATA
| (wait
? SYNC_WAIT
: 0);
672 error
= bhv_vfs_sync(vfsp
, flags
, NULL
);
675 if (unlikely(laptop_mode
)) {
676 int prev_sync_seq
= vfsp
->vfs_sync_seq
;
679 * The disk must be active because we're syncing.
680 * We schedule xfssyncd now (now that the disk is
681 * active) instead of later (when it might not be).
683 wake_up_process(vfsp
->vfs_sync_task
);
685 * We have to wait for the sync iteration to complete.
686 * If we don't, the disk activity caused by the sync
687 * will come after the sync is completed, and that
688 * triggers another sync from laptop mode.
690 wait_event(vfsp
->vfs_wait_single_sync_task
,
691 vfsp
->vfs_sync_seq
!= prev_sync_seq
);
699 struct dentry
*dentry
,
700 struct kstatfs
*statp
)
702 return -bhv_vfs_statvfs(vfs_from_sb(dentry
->d_sb
), statp
,
703 vn_from_inode(dentry
->d_inode
));
708 struct super_block
*sb
,
712 bhv_vfs_t
*vfsp
= vfs_from_sb(sb
);
713 struct xfs_mount_args
*args
= xfs_args_allocate(sb
, 0);
716 error
= bhv_vfs_parseargs(vfsp
, options
, args
, 1);
718 error
= bhv_vfs_mntupdate(vfsp
, flags
, args
);
719 kmem_free(args
, sizeof(*args
));
725 struct super_block
*sb
)
727 bhv_vfs_freeze(vfs_from_sb(sb
));
733 struct vfsmount
*mnt
)
735 return -bhv_vfs_showargs(vfs_from_sb(mnt
->mnt_sb
), m
);
740 struct super_block
*sb
,
743 return -bhv_vfs_quotactl(vfs_from_sb(sb
), Q_XQUOTASYNC
, 0, NULL
);
748 struct super_block
*sb
,
749 struct fs_quota_stat
*fqs
)
751 return -bhv_vfs_quotactl(vfs_from_sb(sb
), Q_XGETQSTAT
, 0, (caddr_t
)fqs
);
756 struct super_block
*sb
,
760 return -bhv_vfs_quotactl(vfs_from_sb(sb
), op
, 0, (caddr_t
)&flags
);
765 struct super_block
*sb
,
768 struct fs_disk_quota
*fdq
)
770 return -bhv_vfs_quotactl(vfs_from_sb(sb
),
771 (type
== USRQUOTA
) ? Q_XGETQUOTA
:
772 ((type
== GRPQUOTA
) ? Q_XGETGQUOTA
:
773 Q_XGETPQUOTA
), id
, (caddr_t
)fdq
);
778 struct super_block
*sb
,
781 struct fs_disk_quota
*fdq
)
783 return -bhv_vfs_quotactl(vfs_from_sb(sb
),
784 (type
== USRQUOTA
) ? Q_XSETQLIM
:
785 ((type
== GRPQUOTA
) ? Q_XSETGQLIM
:
786 Q_XSETPQLIM
), id
, (caddr_t
)fdq
);
791 struct super_block
*sb
,
795 struct bhv_vnode
*rootvp
;
796 struct bhv_vfs
*vfsp
= vfs_allocate(sb
);
797 struct xfs_mount_args
*args
= xfs_args_allocate(sb
, silent
);
798 struct kstatfs statvfs
;
801 bhv_insert_all_vfsops(vfsp
);
803 error
= bhv_vfs_parseargs(vfsp
, (char *)data
, args
, 0);
805 bhv_remove_all_vfsops(vfsp
, 1);
809 sb_min_blocksize(sb
, BBSIZE
);
810 sb
->s_export_op
= &xfs_export_operations
;
811 sb
->s_qcop
= &xfs_quotactl_operations
;
812 sb
->s_op
= &xfs_super_operations
;
814 error
= bhv_vfs_mount(vfsp
, args
, NULL
);
816 bhv_remove_all_vfsops(vfsp
, 1);
820 error
= bhv_vfs_statvfs(vfsp
, &statvfs
, NULL
);
825 sb
->s_magic
= statvfs
.f_type
;
826 sb
->s_blocksize
= statvfs
.f_bsize
;
827 sb
->s_blocksize_bits
= ffs(statvfs
.f_bsize
) - 1;
828 sb
->s_maxbytes
= xfs_max_file_offset(sb
->s_blocksize_bits
);
830 set_posix_acl_flag(sb
);
832 error
= bhv_vfs_root(vfsp
, &rootvp
);
836 sb
->s_root
= d_alloc_root(vn_to_inode(rootvp
));
841 if (is_bad_inode(sb
->s_root
->d_inode
)) {
845 if ((error
= xfs_fs_start_syncd(vfsp
)))
847 vn_trace_exit(rootvp
, __FUNCTION__
, (inst_t
*)__return_address
);
849 kmem_free(args
, sizeof(*args
));
861 bhv_vfs_unmount(vfsp
, 0, NULL
);
864 vfs_deallocate(vfsp
);
865 kmem_free(args
, sizeof(*args
));
871 struct file_system_type
*fs_type
,
873 const char *dev_name
,
875 struct vfsmount
*mnt
)
877 return get_sb_bdev(fs_type
, flags
, dev_name
, data
, xfs_fs_fill_super
,
881 static struct super_operations xfs_super_operations
= {
882 .alloc_inode
= xfs_fs_alloc_inode
,
883 .destroy_inode
= xfs_fs_destroy_inode
,
884 .write_inode
= xfs_fs_write_inode
,
885 .clear_inode
= xfs_fs_clear_inode
,
886 .put_super
= xfs_fs_put_super
,
887 .write_super
= xfs_fs_write_super
,
888 .sync_fs
= xfs_fs_sync_super
,
889 .write_super_lockfs
= xfs_fs_lockfs
,
890 .statfs
= xfs_fs_statfs
,
891 .remount_fs
= xfs_fs_remount
,
892 .show_options
= xfs_fs_show_options
,
895 static struct quotactl_ops xfs_quotactl_operations
= {
896 .quota_sync
= xfs_fs_quotasync
,
897 .get_xstate
= xfs_fs_getxstate
,
898 .set_xstate
= xfs_fs_setxstate
,
899 .get_xquota
= xfs_fs_getxquota
,
900 .set_xquota
= xfs_fs_setxquota
,
903 STATIC
struct file_system_type xfs_fs_type
= {
904 .owner
= THIS_MODULE
,
906 .get_sb
= xfs_fs_get_sb
,
907 .kill_sb
= kill_block_super
,
908 .fs_flags
= FS_REQUIRES_DEV
,
917 static char message
[] __initdata
= KERN_INFO \
918 XFS_VERSION_STRING
" with " XFS_BUILD_OPTIONS
" enabled\n";
923 xfs_physmem
= si
.totalram
;
927 error
= xfs_init_zones();
931 error
= xfs_buf_init();
940 error
= register_filesystem(&xfs_fs_type
);
959 unregister_filesystem(&xfs_fs_type
);
966 module_init(init_xfs_fs
);
967 module_exit(exit_xfs_fs
);
969 MODULE_AUTHOR("Silicon Graphics, Inc.");
970 MODULE_DESCRIPTION(XFS_VERSION_STRING
" with " XFS_BUILD_OPTIONS
" enabled");
971 MODULE_LICENSE("GPL");