2 * linux/fs/hfsplus/super.c
5 * Brad Boyer (flar@allandria.com)
6 * (C) 2003 Ardis Technologies <roman@ardistech.com>
10 #include <linux/module.h>
11 #include <linux/init.h>
12 #include <linux/pagemap.h>
13 #include <linux/blkdev.h>
15 #include <linux/slab.h>
16 #include <linux/vfs.h>
17 #include <linux/nls.h>
19 static struct inode
*hfsplus_alloc_inode(struct super_block
*sb
);
20 static void hfsplus_destroy_inode(struct inode
*inode
);
22 #include "hfsplus_fs.h"
25 static int hfsplus_system_read_inode(struct inode
*inode
)
27 struct hfsplus_vh
*vhdr
= HFSPLUS_SB(inode
->i_sb
)->s_vhdr
;
29 switch (inode
->i_ino
) {
30 case HFSPLUS_EXT_CNID
:
31 hfsplus_inode_read_fork(inode
, &vhdr
->ext_file
);
32 inode
->i_mapping
->a_ops
= &hfsplus_btree_aops
;
34 case HFSPLUS_CAT_CNID
:
35 hfsplus_inode_read_fork(inode
, &vhdr
->cat_file
);
36 inode
->i_mapping
->a_ops
= &hfsplus_btree_aops
;
38 case HFSPLUS_ALLOC_CNID
:
39 hfsplus_inode_read_fork(inode
, &vhdr
->alloc_file
);
40 inode
->i_mapping
->a_ops
= &hfsplus_aops
;
42 case HFSPLUS_START_CNID
:
43 hfsplus_inode_read_fork(inode
, &vhdr
->start_file
);
45 case HFSPLUS_ATTR_CNID
:
46 hfsplus_inode_read_fork(inode
, &vhdr
->attr_file
);
47 inode
->i_mapping
->a_ops
= &hfsplus_btree_aops
;
56 struct inode
*hfsplus_iget(struct super_block
*sb
, unsigned long ino
)
58 struct hfs_find_data fd
;
62 inode
= iget_locked(sb
, ino
);
64 return ERR_PTR(-ENOMEM
);
65 if (!(inode
->i_state
& I_NEW
))
68 INIT_LIST_HEAD(&HFSPLUS_I(inode
)->open_dir_list
);
69 mutex_init(&HFSPLUS_I(inode
)->extents_lock
);
70 HFSPLUS_I(inode
)->flags
= 0;
71 HFSPLUS_I(inode
)->extent_state
= 0;
72 HFSPLUS_I(inode
)->rsrc_inode
= NULL
;
73 atomic_set(&HFSPLUS_I(inode
)->opencnt
, 0);
75 if (inode
->i_ino
>= HFSPLUS_FIRSTUSER_CNID
||
76 inode
->i_ino
== HFSPLUS_ROOT_CNID
) {
77 err
= hfs_find_init(HFSPLUS_SB(inode
->i_sb
)->cat_tree
, &fd
);
79 err
= hfsplus_find_cat(inode
->i_sb
, inode
->i_ino
, &fd
);
81 err
= hfsplus_cat_read_inode(inode
, &fd
);
85 err
= hfsplus_system_read_inode(inode
);
93 unlock_new_inode(inode
);
97 static int hfsplus_system_write_inode(struct inode
*inode
)
99 struct hfsplus_sb_info
*sbi
= HFSPLUS_SB(inode
->i_sb
);
100 struct hfsplus_vh
*vhdr
= sbi
->s_vhdr
;
101 struct hfsplus_fork_raw
*fork
;
102 struct hfs_btree
*tree
= NULL
;
104 switch (inode
->i_ino
) {
105 case HFSPLUS_EXT_CNID
:
106 fork
= &vhdr
->ext_file
;
107 tree
= sbi
->ext_tree
;
109 case HFSPLUS_CAT_CNID
:
110 fork
= &vhdr
->cat_file
;
111 tree
= sbi
->cat_tree
;
113 case HFSPLUS_ALLOC_CNID
:
114 fork
= &vhdr
->alloc_file
;
116 case HFSPLUS_START_CNID
:
117 fork
= &vhdr
->start_file
;
119 case HFSPLUS_ATTR_CNID
:
120 fork
= &vhdr
->attr_file
;
121 tree
= sbi
->attr_tree
;
127 if (fork
->total_size
!= cpu_to_be64(inode
->i_size
)) {
128 set_bit(HFSPLUS_SB_WRITEBACKUP
, &sbi
->flags
);
129 hfsplus_mark_mdb_dirty(inode
->i_sb
);
131 hfsplus_inode_write_fork(inode
, fork
);
133 int err
= hfs_btree_write(tree
);
136 pr_err("b-tree write err: %d, ino %lu\n",
144 static int hfsplus_write_inode(struct inode
*inode
,
145 struct writeback_control
*wbc
)
149 hfs_dbg(INODE
, "hfsplus_write_inode: %lu\n", inode
->i_ino
);
151 err
= hfsplus_ext_write_extent(inode
);
155 if (inode
->i_ino
>= HFSPLUS_FIRSTUSER_CNID
||
156 inode
->i_ino
== HFSPLUS_ROOT_CNID
)
157 return hfsplus_cat_write_inode(inode
);
159 return hfsplus_system_write_inode(inode
);
162 static void hfsplus_evict_inode(struct inode
*inode
)
164 hfs_dbg(INODE
, "hfsplus_evict_inode: %lu\n", inode
->i_ino
);
165 truncate_inode_pages_final(&inode
->i_data
);
167 if (HFSPLUS_IS_RSRC(inode
)) {
168 HFSPLUS_I(HFSPLUS_I(inode
)->rsrc_inode
)->rsrc_inode
= NULL
;
169 iput(HFSPLUS_I(inode
)->rsrc_inode
);
173 static int hfsplus_sync_fs(struct super_block
*sb
, int wait
)
175 struct hfsplus_sb_info
*sbi
= HFSPLUS_SB(sb
);
176 struct hfsplus_vh
*vhdr
= sbi
->s_vhdr
;
177 int write_backup
= 0;
183 hfs_dbg(SUPER
, "hfsplus_sync_fs\n");
186 * Explicitly write out the special metadata inodes.
188 * While these special inodes are marked as hashed and written
189 * out peridocically by the flusher threads we redirty them
190 * during writeout of normal inodes, and thus the life lock
191 * prevents us from getting the latest state to disk.
193 error
= filemap_write_and_wait(sbi
->cat_tree
->inode
->i_mapping
);
194 error2
= filemap_write_and_wait(sbi
->ext_tree
->inode
->i_mapping
);
197 if (sbi
->attr_tree
) {
199 filemap_write_and_wait(sbi
->attr_tree
->inode
->i_mapping
);
203 error2
= filemap_write_and_wait(sbi
->alloc_file
->i_mapping
);
207 mutex_lock(&sbi
->vh_mutex
);
208 mutex_lock(&sbi
->alloc_mutex
);
209 vhdr
->free_blocks
= cpu_to_be32(sbi
->free_blocks
);
210 vhdr
->next_cnid
= cpu_to_be32(sbi
->next_cnid
);
211 vhdr
->folder_count
= cpu_to_be32(sbi
->folder_count
);
212 vhdr
->file_count
= cpu_to_be32(sbi
->file_count
);
214 if (test_and_clear_bit(HFSPLUS_SB_WRITEBACKUP
, &sbi
->flags
)) {
215 memcpy(sbi
->s_backup_vhdr
, sbi
->s_vhdr
, sizeof(*sbi
->s_vhdr
));
219 error2
= hfsplus_submit_bio(sb
,
220 sbi
->part_start
+ HFSPLUS_VOLHEAD_SECTOR
,
221 sbi
->s_vhdr_buf
, NULL
, WRITE_SYNC
);
227 error2
= hfsplus_submit_bio(sb
,
228 sbi
->part_start
+ sbi
->sect_count
- 2,
229 sbi
->s_backup_vhdr_buf
, NULL
, WRITE_SYNC
);
233 mutex_unlock(&sbi
->alloc_mutex
);
234 mutex_unlock(&sbi
->vh_mutex
);
236 if (!test_bit(HFSPLUS_SB_NOBARRIER
, &sbi
->flags
))
237 blkdev_issue_flush(sb
->s_bdev
, GFP_KERNEL
, NULL
);
242 static void delayed_sync_fs(struct work_struct
*work
)
245 struct hfsplus_sb_info
*sbi
;
247 sbi
= container_of(work
, struct hfsplus_sb_info
, sync_work
.work
);
249 spin_lock(&sbi
->work_lock
);
250 sbi
->work_queued
= 0;
251 spin_unlock(&sbi
->work_lock
);
253 err
= hfsplus_sync_fs(sbi
->alloc_file
->i_sb
, 1);
255 pr_err("delayed sync fs err %d\n", err
);
258 void hfsplus_mark_mdb_dirty(struct super_block
*sb
)
260 struct hfsplus_sb_info
*sbi
= HFSPLUS_SB(sb
);
263 if (sb
->s_flags
& MS_RDONLY
)
266 spin_lock(&sbi
->work_lock
);
267 if (!sbi
->work_queued
) {
268 delay
= msecs_to_jiffies(dirty_writeback_interval
* 10);
269 queue_delayed_work(system_long_wq
, &sbi
->sync_work
, delay
);
270 sbi
->work_queued
= 1;
272 spin_unlock(&sbi
->work_lock
);
275 static void hfsplus_put_super(struct super_block
*sb
)
277 struct hfsplus_sb_info
*sbi
= HFSPLUS_SB(sb
);
279 hfs_dbg(SUPER
, "hfsplus_put_super\n");
281 cancel_delayed_work_sync(&sbi
->sync_work
);
283 if (!(sb
->s_flags
& MS_RDONLY
) && sbi
->s_vhdr
) {
284 struct hfsplus_vh
*vhdr
= sbi
->s_vhdr
;
286 vhdr
->modify_date
= hfsp_now2mt();
287 vhdr
->attributes
|= cpu_to_be32(HFSPLUS_VOL_UNMNT
);
288 vhdr
->attributes
&= cpu_to_be32(~HFSPLUS_VOL_INCNSTNT
);
290 hfsplus_sync_fs(sb
, 1);
293 hfs_btree_close(sbi
->attr_tree
);
294 hfs_btree_close(sbi
->cat_tree
);
295 hfs_btree_close(sbi
->ext_tree
);
296 iput(sbi
->alloc_file
);
297 iput(sbi
->hidden_dir
);
298 kfree(sbi
->s_vhdr_buf
);
299 kfree(sbi
->s_backup_vhdr_buf
);
300 unload_nls(sbi
->nls
);
301 kfree(sb
->s_fs_info
);
302 sb
->s_fs_info
= NULL
;
305 static int hfsplus_statfs(struct dentry
*dentry
, struct kstatfs
*buf
)
307 struct super_block
*sb
= dentry
->d_sb
;
308 struct hfsplus_sb_info
*sbi
= HFSPLUS_SB(sb
);
309 u64 id
= huge_encode_dev(sb
->s_bdev
->bd_dev
);
311 buf
->f_type
= HFSPLUS_SUPER_MAGIC
;
312 buf
->f_bsize
= sb
->s_blocksize
;
313 buf
->f_blocks
= sbi
->total_blocks
<< sbi
->fs_shift
;
314 buf
->f_bfree
= sbi
->free_blocks
<< sbi
->fs_shift
;
315 buf
->f_bavail
= buf
->f_bfree
;
316 buf
->f_files
= 0xFFFFFFFF;
317 buf
->f_ffree
= 0xFFFFFFFF - sbi
->next_cnid
;
318 buf
->f_fsid
.val
[0] = (u32
)id
;
319 buf
->f_fsid
.val
[1] = (u32
)(id
>> 32);
320 buf
->f_namelen
= HFSPLUS_MAX_STRLEN
;
325 static int hfsplus_remount(struct super_block
*sb
, int *flags
, char *data
)
328 if ((*flags
& MS_RDONLY
) == (sb
->s_flags
& MS_RDONLY
))
330 if (!(*flags
& MS_RDONLY
)) {
331 struct hfsplus_vh
*vhdr
= HFSPLUS_SB(sb
)->s_vhdr
;
334 if (!hfsplus_parse_options_remount(data
, &force
))
337 if (!(vhdr
->attributes
& cpu_to_be32(HFSPLUS_VOL_UNMNT
))) {
338 pr_warn("filesystem was not cleanly unmounted, running fsck.hfsplus is recommended. leaving read-only.\n");
339 sb
->s_flags
|= MS_RDONLY
;
343 } else if (vhdr
->attributes
&
344 cpu_to_be32(HFSPLUS_VOL_SOFTLOCK
)) {
345 pr_warn("filesystem is marked locked, leaving read-only.\n");
346 sb
->s_flags
|= MS_RDONLY
;
348 } else if (vhdr
->attributes
&
349 cpu_to_be32(HFSPLUS_VOL_JOURNALED
)) {
350 pr_warn("filesystem is marked journaled, leaving read-only.\n");
351 sb
->s_flags
|= MS_RDONLY
;
358 static const struct super_operations hfsplus_sops
= {
359 .alloc_inode
= hfsplus_alloc_inode
,
360 .destroy_inode
= hfsplus_destroy_inode
,
361 .write_inode
= hfsplus_write_inode
,
362 .evict_inode
= hfsplus_evict_inode
,
363 .put_super
= hfsplus_put_super
,
364 .sync_fs
= hfsplus_sync_fs
,
365 .statfs
= hfsplus_statfs
,
366 .remount_fs
= hfsplus_remount
,
367 .show_options
= hfsplus_show_options
,
370 static int hfsplus_fill_super(struct super_block
*sb
, void *data
, int silent
)
372 struct hfsplus_vh
*vhdr
;
373 struct hfsplus_sb_info
*sbi
;
374 hfsplus_cat_entry entry
;
375 struct hfs_find_data fd
;
376 struct inode
*root
, *inode
;
378 struct nls_table
*nls
= NULL
;
379 u64 last_fs_block
, last_fs_page
;
383 sbi
= kzalloc(sizeof(*sbi
), GFP_KERNEL
);
388 mutex_init(&sbi
->alloc_mutex
);
389 mutex_init(&sbi
->vh_mutex
);
390 spin_lock_init(&sbi
->work_lock
);
391 INIT_DELAYED_WORK(&sbi
->sync_work
, delayed_sync_fs
);
392 hfsplus_fill_defaults(sbi
);
395 if (!hfsplus_parse_options(data
, sbi
)) {
396 pr_err("unable to parse mount options\n");
400 /* temporarily use utf8 to correctly find the hidden dir below */
402 sbi
->nls
= load_nls("utf8");
404 pr_err("unable to load nls for utf8\n");
408 /* Grab the volume header */
409 if (hfsplus_read_wrapper(sb
)) {
411 pr_warn("unable to find HFS+ superblock\n");
416 /* Copy parts of the volume header into the superblock */
417 sb
->s_magic
= HFSPLUS_VOLHEAD_SIG
;
418 if (be16_to_cpu(vhdr
->version
) < HFSPLUS_MIN_VERSION
||
419 be16_to_cpu(vhdr
->version
) > HFSPLUS_CURRENT_VERSION
) {
420 pr_err("wrong filesystem version\n");
423 sbi
->total_blocks
= be32_to_cpu(vhdr
->total_blocks
);
424 sbi
->free_blocks
= be32_to_cpu(vhdr
->free_blocks
);
425 sbi
->next_cnid
= be32_to_cpu(vhdr
->next_cnid
);
426 sbi
->file_count
= be32_to_cpu(vhdr
->file_count
);
427 sbi
->folder_count
= be32_to_cpu(vhdr
->folder_count
);
428 sbi
->data_clump_blocks
=
429 be32_to_cpu(vhdr
->data_clump_sz
) >> sbi
->alloc_blksz_shift
;
430 if (!sbi
->data_clump_blocks
)
431 sbi
->data_clump_blocks
= 1;
432 sbi
->rsrc_clump_blocks
=
433 be32_to_cpu(vhdr
->rsrc_clump_sz
) >> sbi
->alloc_blksz_shift
;
434 if (!sbi
->rsrc_clump_blocks
)
435 sbi
->rsrc_clump_blocks
= 1;
438 last_fs_block
= sbi
->total_blocks
- 1;
439 last_fs_page
= (last_fs_block
<< sbi
->alloc_blksz_shift
) >>
442 if ((last_fs_block
> (sector_t
)(~0ULL) >> (sbi
->alloc_blksz_shift
- 9)) ||
443 (last_fs_page
> (pgoff_t
)(~0ULL))) {
444 pr_err("filesystem size too large\n");
448 /* Set up operations so we can load metadata */
449 sb
->s_op
= &hfsplus_sops
;
450 sb
->s_maxbytes
= MAX_LFS_FILESIZE
;
452 if (!(vhdr
->attributes
& cpu_to_be32(HFSPLUS_VOL_UNMNT
))) {
453 pr_warn("Filesystem was not cleanly unmounted, running fsck.hfsplus is recommended. mounting read-only.\n");
454 sb
->s_flags
|= MS_RDONLY
;
455 } else if (test_and_clear_bit(HFSPLUS_SB_FORCE
, &sbi
->flags
)) {
457 } else if (vhdr
->attributes
& cpu_to_be32(HFSPLUS_VOL_SOFTLOCK
)) {
458 pr_warn("Filesystem is marked locked, mounting read-only.\n");
459 sb
->s_flags
|= MS_RDONLY
;
460 } else if ((vhdr
->attributes
& cpu_to_be32(HFSPLUS_VOL_JOURNALED
)) &&
461 !(sb
->s_flags
& MS_RDONLY
)) {
462 pr_warn("write access to a journaled filesystem is not supported, use the force option at your own risk, mounting read-only.\n");
463 sb
->s_flags
|= MS_RDONLY
;
468 /* Load metadata objects (B*Trees) */
469 sbi
->ext_tree
= hfs_btree_open(sb
, HFSPLUS_EXT_CNID
);
470 if (!sbi
->ext_tree
) {
471 pr_err("failed to load extents file\n");
474 sbi
->cat_tree
= hfs_btree_open(sb
, HFSPLUS_CAT_CNID
);
475 if (!sbi
->cat_tree
) {
476 pr_err("failed to load catalog file\n");
477 goto out_close_ext_tree
;
479 atomic_set(&sbi
->attr_tree_state
, HFSPLUS_EMPTY_ATTR_TREE
);
480 if (vhdr
->attr_file
.total_blocks
!= 0) {
481 sbi
->attr_tree
= hfs_btree_open(sb
, HFSPLUS_ATTR_CNID
);
482 if (!sbi
->attr_tree
) {
483 pr_err("failed to load attributes file\n");
484 goto out_close_cat_tree
;
486 atomic_set(&sbi
->attr_tree_state
, HFSPLUS_VALID_ATTR_TREE
);
488 sb
->s_xattr
= hfsplus_xattr_handlers
;
490 inode
= hfsplus_iget(sb
, HFSPLUS_ALLOC_CNID
);
492 pr_err("failed to load allocation file\n");
493 err
= PTR_ERR(inode
);
494 goto out_close_attr_tree
;
496 sbi
->alloc_file
= inode
;
498 /* Load the root directory */
499 root
= hfsplus_iget(sb
, HFSPLUS_ROOT_CNID
);
501 pr_err("failed to load root directory\n");
503 goto out_put_alloc_file
;
506 sb
->s_d_op
= &hfsplus_dentry_operations
;
507 sb
->s_root
= d_make_root(root
);
510 goto out_put_alloc_file
;
513 str
.len
= sizeof(HFSP_HIDDENDIR_NAME
) - 1;
514 str
.name
= HFSP_HIDDENDIR_NAME
;
515 err
= hfs_find_init(sbi
->cat_tree
, &fd
);
518 hfsplus_cat_build_key(sb
, fd
.search_key
, HFSPLUS_ROOT_CNID
, &str
);
519 if (!hfs_brec_read(&fd
, &entry
, sizeof(entry
))) {
521 if (entry
.type
!= cpu_to_be16(HFSPLUS_FOLDER
))
523 inode
= hfsplus_iget(sb
, be32_to_cpu(entry
.folder
.id
));
525 err
= PTR_ERR(inode
);
528 sbi
->hidden_dir
= inode
;
532 if (!(sb
->s_flags
& MS_RDONLY
)) {
534 * H+LX == hfsplusutils, H+Lx == this driver, H+lx is unused
535 * all three are registered with Apple for our use
537 vhdr
->last_mount_vers
= cpu_to_be32(HFSP_MOUNT_VERSION
);
538 vhdr
->modify_date
= hfsp_now2mt();
539 be32_add_cpu(&vhdr
->write_count
, 1);
540 vhdr
->attributes
&= cpu_to_be32(~HFSPLUS_VOL_UNMNT
);
541 vhdr
->attributes
|= cpu_to_be32(HFSPLUS_VOL_INCNSTNT
);
542 hfsplus_sync_fs(sb
, 1);
544 if (!sbi
->hidden_dir
) {
545 mutex_lock(&sbi
->vh_mutex
);
546 sbi
->hidden_dir
= hfsplus_new_inode(sb
, S_IFDIR
);
547 if (!sbi
->hidden_dir
) {
548 mutex_unlock(&sbi
->vh_mutex
);
552 err
= hfsplus_create_cat(sbi
->hidden_dir
->i_ino
, root
,
553 &str
, sbi
->hidden_dir
);
555 mutex_unlock(&sbi
->vh_mutex
);
556 goto out_put_hidden_dir
;
559 err
= hfsplus_init_inode_security(sbi
->hidden_dir
,
561 if (err
== -EOPNOTSUPP
)
562 err
= 0; /* Operation is not supported. */
565 * Try to delete anyway without
568 hfsplus_delete_cat(sbi
->hidden_dir
->i_ino
,
570 mutex_unlock(&sbi
->vh_mutex
);
571 goto out_put_hidden_dir
;
574 mutex_unlock(&sbi
->vh_mutex
);
575 hfsplus_mark_inode_dirty(sbi
->hidden_dir
,
576 HFSPLUS_I_CAT_DIRTY
);
580 unload_nls(sbi
->nls
);
585 iput(sbi
->hidden_dir
);
590 iput(sbi
->alloc_file
);
592 hfs_btree_close(sbi
->attr_tree
);
594 hfs_btree_close(sbi
->cat_tree
);
596 hfs_btree_close(sbi
->ext_tree
);
598 kfree(sbi
->s_vhdr_buf
);
599 kfree(sbi
->s_backup_vhdr_buf
);
601 unload_nls(sbi
->nls
);
608 MODULE_AUTHOR("Brad Boyer");
609 MODULE_DESCRIPTION("Extended Macintosh Filesystem");
610 MODULE_LICENSE("GPL");
612 static struct kmem_cache
*hfsplus_inode_cachep
;
614 static struct inode
*hfsplus_alloc_inode(struct super_block
*sb
)
616 struct hfsplus_inode_info
*i
;
618 i
= kmem_cache_alloc(hfsplus_inode_cachep
, GFP_KERNEL
);
619 return i
? &i
->vfs_inode
: NULL
;
622 static void hfsplus_i_callback(struct rcu_head
*head
)
624 struct inode
*inode
= container_of(head
, struct inode
, i_rcu
);
626 kmem_cache_free(hfsplus_inode_cachep
, HFSPLUS_I(inode
));
629 static void hfsplus_destroy_inode(struct inode
*inode
)
631 call_rcu(&inode
->i_rcu
, hfsplus_i_callback
);
634 #define HFSPLUS_INODE_SIZE sizeof(struct hfsplus_inode_info)
636 static struct dentry
*hfsplus_mount(struct file_system_type
*fs_type
,
637 int flags
, const char *dev_name
, void *data
)
639 return mount_bdev(fs_type
, flags
, dev_name
, data
, hfsplus_fill_super
);
642 static struct file_system_type hfsplus_fs_type
= {
643 .owner
= THIS_MODULE
,
645 .mount
= hfsplus_mount
,
646 .kill_sb
= kill_block_super
,
647 .fs_flags
= FS_REQUIRES_DEV
,
649 MODULE_ALIAS_FS("hfsplus");
651 static void hfsplus_init_once(void *p
)
653 struct hfsplus_inode_info
*i
= p
;
655 inode_init_once(&i
->vfs_inode
);
658 static int __init
init_hfsplus_fs(void)
662 hfsplus_inode_cachep
= kmem_cache_create("hfsplus_icache",
663 HFSPLUS_INODE_SIZE
, 0, SLAB_HWCACHE_ALIGN
,
665 if (!hfsplus_inode_cachep
)
667 err
= hfsplus_create_attr_tree_cache();
669 goto destroy_inode_cache
;
670 err
= register_filesystem(&hfsplus_fs_type
);
672 goto destroy_attr_tree_cache
;
675 destroy_attr_tree_cache
:
676 hfsplus_destroy_attr_tree_cache();
679 kmem_cache_destroy(hfsplus_inode_cachep
);
684 static void __exit
exit_hfsplus_fs(void)
686 unregister_filesystem(&hfsplus_fs_type
);
689 * Make sure all delayed rcu free inodes are flushed before we
693 hfsplus_destroy_attr_tree_cache();
694 kmem_cache_destroy(hfsplus_inode_cachep
);
697 module_init(init_hfsplus_fs
)
698 module_exit(exit_hfsplus_fs
)