2 * linux/fs/hfsplus/super.c
5 * Brad Boyer (flar@allandria.com)
6 * (C) 2003 Ardis Technologies <roman@ardistech.com>
10 #include <linux/module.h>
11 #include <linux/init.h>
12 #include <linux/pagemap.h>
13 #include <linux/blkdev.h>
14 #include <linux/backing-dev.h>
16 #include <linux/slab.h>
17 #include <linux/vfs.h>
18 #include <linux/nls.h>
20 static struct inode
*hfsplus_alloc_inode(struct super_block
*sb
);
21 static void hfsplus_destroy_inode(struct inode
*inode
);
23 #include "hfsplus_fs.h"
26 static int hfsplus_system_read_inode(struct inode
*inode
)
28 struct hfsplus_vh
*vhdr
= HFSPLUS_SB(inode
->i_sb
)->s_vhdr
;
30 switch (inode
->i_ino
) {
31 case HFSPLUS_EXT_CNID
:
32 hfsplus_inode_read_fork(inode
, &vhdr
->ext_file
);
33 inode
->i_mapping
->a_ops
= &hfsplus_btree_aops
;
35 case HFSPLUS_CAT_CNID
:
36 hfsplus_inode_read_fork(inode
, &vhdr
->cat_file
);
37 inode
->i_mapping
->a_ops
= &hfsplus_btree_aops
;
39 case HFSPLUS_ALLOC_CNID
:
40 hfsplus_inode_read_fork(inode
, &vhdr
->alloc_file
);
41 inode
->i_mapping
->a_ops
= &hfsplus_aops
;
43 case HFSPLUS_START_CNID
:
44 hfsplus_inode_read_fork(inode
, &vhdr
->start_file
);
46 case HFSPLUS_ATTR_CNID
:
47 hfsplus_inode_read_fork(inode
, &vhdr
->attr_file
);
48 inode
->i_mapping
->a_ops
= &hfsplus_btree_aops
;
57 struct inode
*hfsplus_iget(struct super_block
*sb
, unsigned long ino
)
59 struct hfs_find_data fd
;
63 inode
= iget_locked(sb
, ino
);
65 return ERR_PTR(-ENOMEM
);
66 if (!(inode
->i_state
& I_NEW
))
69 INIT_LIST_HEAD(&HFSPLUS_I(inode
)->open_dir_list
);
70 mutex_init(&HFSPLUS_I(inode
)->extents_lock
);
71 HFSPLUS_I(inode
)->flags
= 0;
72 HFSPLUS_I(inode
)->extent_state
= 0;
73 HFSPLUS_I(inode
)->rsrc_inode
= NULL
;
74 atomic_set(&HFSPLUS_I(inode
)->opencnt
, 0);
76 if (inode
->i_ino
>= HFSPLUS_FIRSTUSER_CNID
||
77 inode
->i_ino
== HFSPLUS_ROOT_CNID
) {
78 err
= hfs_find_init(HFSPLUS_SB(inode
->i_sb
)->cat_tree
, &fd
);
80 err
= hfsplus_find_cat(inode
->i_sb
, inode
->i_ino
, &fd
);
82 err
= hfsplus_cat_read_inode(inode
, &fd
);
86 err
= hfsplus_system_read_inode(inode
);
94 unlock_new_inode(inode
);
98 static int hfsplus_system_write_inode(struct inode
*inode
)
100 struct hfsplus_sb_info
*sbi
= HFSPLUS_SB(inode
->i_sb
);
101 struct hfsplus_vh
*vhdr
= sbi
->s_vhdr
;
102 struct hfsplus_fork_raw
*fork
;
103 struct hfs_btree
*tree
= NULL
;
105 switch (inode
->i_ino
) {
106 case HFSPLUS_EXT_CNID
:
107 fork
= &vhdr
->ext_file
;
108 tree
= sbi
->ext_tree
;
110 case HFSPLUS_CAT_CNID
:
111 fork
= &vhdr
->cat_file
;
112 tree
= sbi
->cat_tree
;
114 case HFSPLUS_ALLOC_CNID
:
115 fork
= &vhdr
->alloc_file
;
117 case HFSPLUS_START_CNID
:
118 fork
= &vhdr
->start_file
;
120 case HFSPLUS_ATTR_CNID
:
121 fork
= &vhdr
->attr_file
;
122 tree
= sbi
->attr_tree
;
128 if (fork
->total_size
!= cpu_to_be64(inode
->i_size
)) {
129 set_bit(HFSPLUS_SB_WRITEBACKUP
, &sbi
->flags
);
130 hfsplus_mark_mdb_dirty(inode
->i_sb
);
132 hfsplus_inode_write_fork(inode
, fork
);
134 int err
= hfs_btree_write(tree
);
137 pr_err("b-tree write err: %d, ino %lu\n",
145 static int hfsplus_write_inode(struct inode
*inode
,
146 struct writeback_control
*wbc
)
150 hfs_dbg(INODE
, "hfsplus_write_inode: %lu\n", inode
->i_ino
);
152 err
= hfsplus_ext_write_extent(inode
);
156 if (inode
->i_ino
>= HFSPLUS_FIRSTUSER_CNID
||
157 inode
->i_ino
== HFSPLUS_ROOT_CNID
)
158 return hfsplus_cat_write_inode(inode
);
160 return hfsplus_system_write_inode(inode
);
163 static void hfsplus_evict_inode(struct inode
*inode
)
165 hfs_dbg(INODE
, "hfsplus_evict_inode: %lu\n", inode
->i_ino
);
166 truncate_inode_pages_final(&inode
->i_data
);
168 if (HFSPLUS_IS_RSRC(inode
)) {
169 HFSPLUS_I(HFSPLUS_I(inode
)->rsrc_inode
)->rsrc_inode
= NULL
;
170 iput(HFSPLUS_I(inode
)->rsrc_inode
);
174 static int hfsplus_sync_fs(struct super_block
*sb
, int wait
)
176 struct hfsplus_sb_info
*sbi
= HFSPLUS_SB(sb
);
177 struct hfsplus_vh
*vhdr
= sbi
->s_vhdr
;
178 int write_backup
= 0;
184 hfs_dbg(SUPER
, "hfsplus_sync_fs\n");
187 * Explicitly write out the special metadata inodes.
189 * While these special inodes are marked as hashed and written
190 * out peridocically by the flusher threads we redirty them
191 * during writeout of normal inodes, and thus the life lock
192 * prevents us from getting the latest state to disk.
194 error
= filemap_write_and_wait(sbi
->cat_tree
->inode
->i_mapping
);
195 error2
= filemap_write_and_wait(sbi
->ext_tree
->inode
->i_mapping
);
198 if (sbi
->attr_tree
) {
200 filemap_write_and_wait(sbi
->attr_tree
->inode
->i_mapping
);
204 error2
= filemap_write_and_wait(sbi
->alloc_file
->i_mapping
);
208 mutex_lock(&sbi
->vh_mutex
);
209 mutex_lock(&sbi
->alloc_mutex
);
210 vhdr
->free_blocks
= cpu_to_be32(sbi
->free_blocks
);
211 vhdr
->next_cnid
= cpu_to_be32(sbi
->next_cnid
);
212 vhdr
->folder_count
= cpu_to_be32(sbi
->folder_count
);
213 vhdr
->file_count
= cpu_to_be32(sbi
->file_count
);
215 if (test_and_clear_bit(HFSPLUS_SB_WRITEBACKUP
, &sbi
->flags
)) {
216 memcpy(sbi
->s_backup_vhdr
, sbi
->s_vhdr
, sizeof(*sbi
->s_vhdr
));
220 error2
= hfsplus_submit_bio(sb
,
221 sbi
->part_start
+ HFSPLUS_VOLHEAD_SECTOR
,
222 sbi
->s_vhdr_buf
, NULL
, WRITE_SYNC
);
228 error2
= hfsplus_submit_bio(sb
,
229 sbi
->part_start
+ sbi
->sect_count
- 2,
230 sbi
->s_backup_vhdr_buf
, NULL
, WRITE_SYNC
);
234 mutex_unlock(&sbi
->alloc_mutex
);
235 mutex_unlock(&sbi
->vh_mutex
);
237 if (!test_bit(HFSPLUS_SB_NOBARRIER
, &sbi
->flags
))
238 blkdev_issue_flush(sb
->s_bdev
, GFP_KERNEL
, NULL
);
243 static void delayed_sync_fs(struct work_struct
*work
)
246 struct hfsplus_sb_info
*sbi
;
248 sbi
= container_of(work
, struct hfsplus_sb_info
, sync_work
.work
);
250 spin_lock(&sbi
->work_lock
);
251 sbi
->work_queued
= 0;
252 spin_unlock(&sbi
->work_lock
);
254 err
= hfsplus_sync_fs(sbi
->alloc_file
->i_sb
, 1);
256 pr_err("delayed sync fs err %d\n", err
);
259 void hfsplus_mark_mdb_dirty(struct super_block
*sb
)
261 struct hfsplus_sb_info
*sbi
= HFSPLUS_SB(sb
);
264 if (sb
->s_flags
& MS_RDONLY
)
267 spin_lock(&sbi
->work_lock
);
268 if (!sbi
->work_queued
) {
269 delay
= msecs_to_jiffies(dirty_writeback_interval
* 10);
270 queue_delayed_work(system_long_wq
, &sbi
->sync_work
, delay
);
271 sbi
->work_queued
= 1;
273 spin_unlock(&sbi
->work_lock
);
276 static void hfsplus_put_super(struct super_block
*sb
)
278 struct hfsplus_sb_info
*sbi
= HFSPLUS_SB(sb
);
280 hfs_dbg(SUPER
, "hfsplus_put_super\n");
282 cancel_delayed_work_sync(&sbi
->sync_work
);
284 if (!(sb
->s_flags
& MS_RDONLY
) && sbi
->s_vhdr
) {
285 struct hfsplus_vh
*vhdr
= sbi
->s_vhdr
;
287 vhdr
->modify_date
= hfsp_now2mt();
288 vhdr
->attributes
|= cpu_to_be32(HFSPLUS_VOL_UNMNT
);
289 vhdr
->attributes
&= cpu_to_be32(~HFSPLUS_VOL_INCNSTNT
);
291 hfsplus_sync_fs(sb
, 1);
294 hfs_btree_close(sbi
->attr_tree
);
295 hfs_btree_close(sbi
->cat_tree
);
296 hfs_btree_close(sbi
->ext_tree
);
297 iput(sbi
->alloc_file
);
298 iput(sbi
->hidden_dir
);
299 kfree(sbi
->s_vhdr_buf
);
300 kfree(sbi
->s_backup_vhdr_buf
);
301 unload_nls(sbi
->nls
);
302 kfree(sb
->s_fs_info
);
303 sb
->s_fs_info
= NULL
;
306 static int hfsplus_statfs(struct dentry
*dentry
, struct kstatfs
*buf
)
308 struct super_block
*sb
= dentry
->d_sb
;
309 struct hfsplus_sb_info
*sbi
= HFSPLUS_SB(sb
);
310 u64 id
= huge_encode_dev(sb
->s_bdev
->bd_dev
);
312 buf
->f_type
= HFSPLUS_SUPER_MAGIC
;
313 buf
->f_bsize
= sb
->s_blocksize
;
314 buf
->f_blocks
= sbi
->total_blocks
<< sbi
->fs_shift
;
315 buf
->f_bfree
= sbi
->free_blocks
<< sbi
->fs_shift
;
316 buf
->f_bavail
= buf
->f_bfree
;
317 buf
->f_files
= 0xFFFFFFFF;
318 buf
->f_ffree
= 0xFFFFFFFF - sbi
->next_cnid
;
319 buf
->f_fsid
.val
[0] = (u32
)id
;
320 buf
->f_fsid
.val
[1] = (u32
)(id
>> 32);
321 buf
->f_namelen
= HFSPLUS_MAX_STRLEN
;
326 static int hfsplus_remount(struct super_block
*sb
, int *flags
, char *data
)
329 if ((*flags
& MS_RDONLY
) == (sb
->s_flags
& MS_RDONLY
))
331 if (!(*flags
& MS_RDONLY
)) {
332 struct hfsplus_vh
*vhdr
= HFSPLUS_SB(sb
)->s_vhdr
;
335 if (!hfsplus_parse_options_remount(data
, &force
))
338 if (!(vhdr
->attributes
& cpu_to_be32(HFSPLUS_VOL_UNMNT
))) {
339 pr_warn("filesystem was not cleanly unmounted, running fsck.hfsplus is recommended. leaving read-only.\n");
340 sb
->s_flags
|= MS_RDONLY
;
344 } else if (vhdr
->attributes
&
345 cpu_to_be32(HFSPLUS_VOL_SOFTLOCK
)) {
346 pr_warn("filesystem is marked locked, leaving read-only.\n");
347 sb
->s_flags
|= MS_RDONLY
;
349 } else if (vhdr
->attributes
&
350 cpu_to_be32(HFSPLUS_VOL_JOURNALED
)) {
351 pr_warn("filesystem is marked journaled, leaving read-only.\n");
352 sb
->s_flags
|= MS_RDONLY
;
359 static const struct super_operations hfsplus_sops
= {
360 .alloc_inode
= hfsplus_alloc_inode
,
361 .destroy_inode
= hfsplus_destroy_inode
,
362 .write_inode
= hfsplus_write_inode
,
363 .evict_inode
= hfsplus_evict_inode
,
364 .put_super
= hfsplus_put_super
,
365 .sync_fs
= hfsplus_sync_fs
,
366 .statfs
= hfsplus_statfs
,
367 .remount_fs
= hfsplus_remount
,
368 .show_options
= hfsplus_show_options
,
371 static int hfsplus_fill_super(struct super_block
*sb
, void *data
, int silent
)
373 struct hfsplus_vh
*vhdr
;
374 struct hfsplus_sb_info
*sbi
;
375 hfsplus_cat_entry entry
;
376 struct hfs_find_data fd
;
377 struct inode
*root
, *inode
;
379 struct nls_table
*nls
= NULL
;
380 u64 last_fs_block
, last_fs_page
;
384 sbi
= kzalloc(sizeof(*sbi
), GFP_KERNEL
);
389 mutex_init(&sbi
->alloc_mutex
);
390 mutex_init(&sbi
->vh_mutex
);
391 spin_lock_init(&sbi
->work_lock
);
392 INIT_DELAYED_WORK(&sbi
->sync_work
, delayed_sync_fs
);
393 hfsplus_fill_defaults(sbi
);
396 if (!hfsplus_parse_options(data
, sbi
)) {
397 pr_err("unable to parse mount options\n");
401 /* temporarily use utf8 to correctly find the hidden dir below */
403 sbi
->nls
= load_nls("utf8");
405 pr_err("unable to load nls for utf8\n");
409 /* Grab the volume header */
410 if (hfsplus_read_wrapper(sb
)) {
412 pr_warn("unable to find HFS+ superblock\n");
417 /* Copy parts of the volume header into the superblock */
418 sb
->s_magic
= HFSPLUS_VOLHEAD_SIG
;
419 if (be16_to_cpu(vhdr
->version
) < HFSPLUS_MIN_VERSION
||
420 be16_to_cpu(vhdr
->version
) > HFSPLUS_CURRENT_VERSION
) {
421 pr_err("wrong filesystem version\n");
424 sbi
->total_blocks
= be32_to_cpu(vhdr
->total_blocks
);
425 sbi
->free_blocks
= be32_to_cpu(vhdr
->free_blocks
);
426 sbi
->next_cnid
= be32_to_cpu(vhdr
->next_cnid
);
427 sbi
->file_count
= be32_to_cpu(vhdr
->file_count
);
428 sbi
->folder_count
= be32_to_cpu(vhdr
->folder_count
);
429 sbi
->data_clump_blocks
=
430 be32_to_cpu(vhdr
->data_clump_sz
) >> sbi
->alloc_blksz_shift
;
431 if (!sbi
->data_clump_blocks
)
432 sbi
->data_clump_blocks
= 1;
433 sbi
->rsrc_clump_blocks
=
434 be32_to_cpu(vhdr
->rsrc_clump_sz
) >> sbi
->alloc_blksz_shift
;
435 if (!sbi
->rsrc_clump_blocks
)
436 sbi
->rsrc_clump_blocks
= 1;
439 last_fs_block
= sbi
->total_blocks
- 1;
440 last_fs_page
= (last_fs_block
<< sbi
->alloc_blksz_shift
) >>
443 if ((last_fs_block
> (sector_t
)(~0ULL) >> (sbi
->alloc_blksz_shift
- 9)) ||
444 (last_fs_page
> (pgoff_t
)(~0ULL))) {
445 pr_err("filesystem size too large\n");
449 /* Set up operations so we can load metadata */
450 sb
->s_op
= &hfsplus_sops
;
451 sb
->s_maxbytes
= MAX_LFS_FILESIZE
;
453 if (!(vhdr
->attributes
& cpu_to_be32(HFSPLUS_VOL_UNMNT
))) {
454 pr_warn("Filesystem was not cleanly unmounted, running fsck.hfsplus is recommended. mounting read-only.\n");
455 sb
->s_flags
|= MS_RDONLY
;
456 } else if (test_and_clear_bit(HFSPLUS_SB_FORCE
, &sbi
->flags
)) {
458 } else if (vhdr
->attributes
& cpu_to_be32(HFSPLUS_VOL_SOFTLOCK
)) {
459 pr_warn("Filesystem is marked locked, mounting read-only.\n");
460 sb
->s_flags
|= MS_RDONLY
;
461 } else if ((vhdr
->attributes
& cpu_to_be32(HFSPLUS_VOL_JOURNALED
)) &&
462 !(sb
->s_flags
& MS_RDONLY
)) {
463 pr_warn("write access to a journaled filesystem is not supported, use the force option at your own risk, mounting read-only.\n");
464 sb
->s_flags
|= MS_RDONLY
;
469 /* Load metadata objects (B*Trees) */
470 sbi
->ext_tree
= hfs_btree_open(sb
, HFSPLUS_EXT_CNID
);
471 if (!sbi
->ext_tree
) {
472 pr_err("failed to load extents file\n");
475 sbi
->cat_tree
= hfs_btree_open(sb
, HFSPLUS_CAT_CNID
);
476 if (!sbi
->cat_tree
) {
477 pr_err("failed to load catalog file\n");
478 goto out_close_ext_tree
;
480 atomic_set(&sbi
->attr_tree_state
, HFSPLUS_EMPTY_ATTR_TREE
);
481 if (vhdr
->attr_file
.total_blocks
!= 0) {
482 sbi
->attr_tree
= hfs_btree_open(sb
, HFSPLUS_ATTR_CNID
);
483 if (!sbi
->attr_tree
) {
484 pr_err("failed to load attributes file\n");
485 goto out_close_cat_tree
;
487 atomic_set(&sbi
->attr_tree_state
, HFSPLUS_VALID_ATTR_TREE
);
489 sb
->s_xattr
= hfsplus_xattr_handlers
;
491 inode
= hfsplus_iget(sb
, HFSPLUS_ALLOC_CNID
);
493 pr_err("failed to load allocation file\n");
494 err
= PTR_ERR(inode
);
495 goto out_close_attr_tree
;
497 sbi
->alloc_file
= inode
;
499 /* Load the root directory */
500 root
= hfsplus_iget(sb
, HFSPLUS_ROOT_CNID
);
502 pr_err("failed to load root directory\n");
504 goto out_put_alloc_file
;
507 sb
->s_d_op
= &hfsplus_dentry_operations
;
508 sb
->s_root
= d_make_root(root
);
511 goto out_put_alloc_file
;
514 str
.len
= sizeof(HFSP_HIDDENDIR_NAME
) - 1;
515 str
.name
= HFSP_HIDDENDIR_NAME
;
516 err
= hfs_find_init(sbi
->cat_tree
, &fd
);
519 err
= hfsplus_cat_build_key(sb
, fd
.search_key
, HFSPLUS_ROOT_CNID
, &str
);
520 if (unlikely(err
< 0))
522 if (!hfs_brec_read(&fd
, &entry
, sizeof(entry
))) {
524 if (entry
.type
!= cpu_to_be16(HFSPLUS_FOLDER
))
526 inode
= hfsplus_iget(sb
, be32_to_cpu(entry
.folder
.id
));
528 err
= PTR_ERR(inode
);
531 sbi
->hidden_dir
= inode
;
535 if (!(sb
->s_flags
& MS_RDONLY
)) {
537 * H+LX == hfsplusutils, H+Lx == this driver, H+lx is unused
538 * all three are registered with Apple for our use
540 vhdr
->last_mount_vers
= cpu_to_be32(HFSP_MOUNT_VERSION
);
541 vhdr
->modify_date
= hfsp_now2mt();
542 be32_add_cpu(&vhdr
->write_count
, 1);
543 vhdr
->attributes
&= cpu_to_be32(~HFSPLUS_VOL_UNMNT
);
544 vhdr
->attributes
|= cpu_to_be32(HFSPLUS_VOL_INCNSTNT
);
545 hfsplus_sync_fs(sb
, 1);
547 if (!sbi
->hidden_dir
) {
548 mutex_lock(&sbi
->vh_mutex
);
549 sbi
->hidden_dir
= hfsplus_new_inode(sb
, S_IFDIR
);
550 if (!sbi
->hidden_dir
) {
551 mutex_unlock(&sbi
->vh_mutex
);
555 err
= hfsplus_create_cat(sbi
->hidden_dir
->i_ino
, root
,
556 &str
, sbi
->hidden_dir
);
558 mutex_unlock(&sbi
->vh_mutex
);
559 goto out_put_hidden_dir
;
562 err
= hfsplus_init_inode_security(sbi
->hidden_dir
,
564 if (err
== -EOPNOTSUPP
)
565 err
= 0; /* Operation is not supported. */
568 * Try to delete anyway without
571 hfsplus_delete_cat(sbi
->hidden_dir
->i_ino
,
573 mutex_unlock(&sbi
->vh_mutex
);
574 goto out_put_hidden_dir
;
577 mutex_unlock(&sbi
->vh_mutex
);
578 hfsplus_mark_inode_dirty(sbi
->hidden_dir
,
579 HFSPLUS_I_CAT_DIRTY
);
583 unload_nls(sbi
->nls
);
588 iput(sbi
->hidden_dir
);
593 iput(sbi
->alloc_file
);
595 hfs_btree_close(sbi
->attr_tree
);
597 hfs_btree_close(sbi
->cat_tree
);
599 hfs_btree_close(sbi
->ext_tree
);
601 kfree(sbi
->s_vhdr_buf
);
602 kfree(sbi
->s_backup_vhdr_buf
);
604 unload_nls(sbi
->nls
);
611 MODULE_AUTHOR("Brad Boyer");
612 MODULE_DESCRIPTION("Extended Macintosh Filesystem");
613 MODULE_LICENSE("GPL");
615 static struct kmem_cache
*hfsplus_inode_cachep
;
617 static struct inode
*hfsplus_alloc_inode(struct super_block
*sb
)
619 struct hfsplus_inode_info
*i
;
621 i
= kmem_cache_alloc(hfsplus_inode_cachep
, GFP_KERNEL
);
622 return i
? &i
->vfs_inode
: NULL
;
625 static void hfsplus_i_callback(struct rcu_head
*head
)
627 struct inode
*inode
= container_of(head
, struct inode
, i_rcu
);
629 kmem_cache_free(hfsplus_inode_cachep
, HFSPLUS_I(inode
));
632 static void hfsplus_destroy_inode(struct inode
*inode
)
634 call_rcu(&inode
->i_rcu
, hfsplus_i_callback
);
637 #define HFSPLUS_INODE_SIZE sizeof(struct hfsplus_inode_info)
639 static struct dentry
*hfsplus_mount(struct file_system_type
*fs_type
,
640 int flags
, const char *dev_name
, void *data
)
642 return mount_bdev(fs_type
, flags
, dev_name
, data
, hfsplus_fill_super
);
645 static struct file_system_type hfsplus_fs_type
= {
646 .owner
= THIS_MODULE
,
648 .mount
= hfsplus_mount
,
649 .kill_sb
= kill_block_super
,
650 .fs_flags
= FS_REQUIRES_DEV
,
652 MODULE_ALIAS_FS("hfsplus");
654 static void hfsplus_init_once(void *p
)
656 struct hfsplus_inode_info
*i
= p
;
658 inode_init_once(&i
->vfs_inode
);
661 static int __init
init_hfsplus_fs(void)
665 hfsplus_inode_cachep
= kmem_cache_create("hfsplus_icache",
666 HFSPLUS_INODE_SIZE
, 0, SLAB_HWCACHE_ALIGN
|SLAB_ACCOUNT
,
668 if (!hfsplus_inode_cachep
)
670 err
= hfsplus_create_attr_tree_cache();
672 goto destroy_inode_cache
;
673 err
= register_filesystem(&hfsplus_fs_type
);
675 goto destroy_attr_tree_cache
;
678 destroy_attr_tree_cache
:
679 hfsplus_destroy_attr_tree_cache();
682 kmem_cache_destroy(hfsplus_inode_cachep
);
687 static void __exit
exit_hfsplus_fs(void)
689 unregister_filesystem(&hfsplus_fs_type
);
692 * Make sure all delayed rcu free inodes are flushed before we
696 hfsplus_destroy_attr_tree_cache();
697 kmem_cache_destroy(hfsplus_inode_cachep
);
700 module_init(init_hfsplus_fs
)
701 module_exit(exit_hfsplus_fs
)