2 * linux/fs/hfsplus/super.c
5 * Brad Boyer (flar@allandria.com)
6 * (C) 2003 Ardis Technologies <roman@ardistech.com>
10 #include <linux/module.h>
11 #include <linux/init.h>
12 #include <linux/pagemap.h>
13 #include <linux/blkdev.h>
14 #include <linux/backing-dev.h>
16 #include <linux/slab.h>
17 #include <linux/vfs.h>
18 #include <linux/nls.h>
20 static struct inode
*hfsplus_alloc_inode(struct super_block
*sb
);
21 static void hfsplus_destroy_inode(struct inode
*inode
);
23 #include "hfsplus_fs.h"
26 static int hfsplus_system_read_inode(struct inode
*inode
)
28 struct hfsplus_vh
*vhdr
= HFSPLUS_SB(inode
->i_sb
)->s_vhdr
;
30 switch (inode
->i_ino
) {
31 case HFSPLUS_EXT_CNID
:
32 hfsplus_inode_read_fork(inode
, &vhdr
->ext_file
);
33 inode
->i_mapping
->a_ops
= &hfsplus_btree_aops
;
35 case HFSPLUS_CAT_CNID
:
36 hfsplus_inode_read_fork(inode
, &vhdr
->cat_file
);
37 inode
->i_mapping
->a_ops
= &hfsplus_btree_aops
;
39 case HFSPLUS_ALLOC_CNID
:
40 hfsplus_inode_read_fork(inode
, &vhdr
->alloc_file
);
41 inode
->i_mapping
->a_ops
= &hfsplus_aops
;
43 case HFSPLUS_START_CNID
:
44 hfsplus_inode_read_fork(inode
, &vhdr
->start_file
);
46 case HFSPLUS_ATTR_CNID
:
47 hfsplus_inode_read_fork(inode
, &vhdr
->attr_file
);
48 inode
->i_mapping
->a_ops
= &hfsplus_btree_aops
;
57 struct inode
*hfsplus_iget(struct super_block
*sb
, unsigned long ino
)
59 struct hfs_find_data fd
;
63 inode
= iget_locked(sb
, ino
);
65 return ERR_PTR(-ENOMEM
);
66 if (!(inode
->i_state
& I_NEW
))
69 INIT_LIST_HEAD(&HFSPLUS_I(inode
)->open_dir_list
);
70 spin_lock_init(&HFSPLUS_I(inode
)->open_dir_lock
);
71 mutex_init(&HFSPLUS_I(inode
)->extents_lock
);
72 HFSPLUS_I(inode
)->flags
= 0;
73 HFSPLUS_I(inode
)->extent_state
= 0;
74 HFSPLUS_I(inode
)->rsrc_inode
= NULL
;
75 atomic_set(&HFSPLUS_I(inode
)->opencnt
, 0);
77 if (inode
->i_ino
>= HFSPLUS_FIRSTUSER_CNID
||
78 inode
->i_ino
== HFSPLUS_ROOT_CNID
) {
79 err
= hfs_find_init(HFSPLUS_SB(inode
->i_sb
)->cat_tree
, &fd
);
81 err
= hfsplus_find_cat(inode
->i_sb
, inode
->i_ino
, &fd
);
83 err
= hfsplus_cat_read_inode(inode
, &fd
);
87 err
= hfsplus_system_read_inode(inode
);
95 unlock_new_inode(inode
);
99 static int hfsplus_system_write_inode(struct inode
*inode
)
101 struct hfsplus_sb_info
*sbi
= HFSPLUS_SB(inode
->i_sb
);
102 struct hfsplus_vh
*vhdr
= sbi
->s_vhdr
;
103 struct hfsplus_fork_raw
*fork
;
104 struct hfs_btree
*tree
= NULL
;
106 switch (inode
->i_ino
) {
107 case HFSPLUS_EXT_CNID
:
108 fork
= &vhdr
->ext_file
;
109 tree
= sbi
->ext_tree
;
111 case HFSPLUS_CAT_CNID
:
112 fork
= &vhdr
->cat_file
;
113 tree
= sbi
->cat_tree
;
115 case HFSPLUS_ALLOC_CNID
:
116 fork
= &vhdr
->alloc_file
;
118 case HFSPLUS_START_CNID
:
119 fork
= &vhdr
->start_file
;
121 case HFSPLUS_ATTR_CNID
:
122 fork
= &vhdr
->attr_file
;
123 tree
= sbi
->attr_tree
;
129 if (fork
->total_size
!= cpu_to_be64(inode
->i_size
)) {
130 set_bit(HFSPLUS_SB_WRITEBACKUP
, &sbi
->flags
);
131 hfsplus_mark_mdb_dirty(inode
->i_sb
);
133 hfsplus_inode_write_fork(inode
, fork
);
135 int err
= hfs_btree_write(tree
);
138 pr_err("b-tree write err: %d, ino %lu\n",
146 static int hfsplus_write_inode(struct inode
*inode
,
147 struct writeback_control
*wbc
)
151 hfs_dbg(INODE
, "hfsplus_write_inode: %lu\n", inode
->i_ino
);
153 err
= hfsplus_ext_write_extent(inode
);
157 if (inode
->i_ino
>= HFSPLUS_FIRSTUSER_CNID
||
158 inode
->i_ino
== HFSPLUS_ROOT_CNID
)
159 return hfsplus_cat_write_inode(inode
);
161 return hfsplus_system_write_inode(inode
);
164 static void hfsplus_evict_inode(struct inode
*inode
)
166 hfs_dbg(INODE
, "hfsplus_evict_inode: %lu\n", inode
->i_ino
);
167 truncate_inode_pages_final(&inode
->i_data
);
169 if (HFSPLUS_IS_RSRC(inode
)) {
170 HFSPLUS_I(HFSPLUS_I(inode
)->rsrc_inode
)->rsrc_inode
= NULL
;
171 iput(HFSPLUS_I(inode
)->rsrc_inode
);
175 static int hfsplus_sync_fs(struct super_block
*sb
, int wait
)
177 struct hfsplus_sb_info
*sbi
= HFSPLUS_SB(sb
);
178 struct hfsplus_vh
*vhdr
= sbi
->s_vhdr
;
179 int write_backup
= 0;
185 hfs_dbg(SUPER
, "hfsplus_sync_fs\n");
188 * Explicitly write out the special metadata inodes.
190 * While these special inodes are marked as hashed and written
191 * out peridocically by the flusher threads we redirty them
192 * during writeout of normal inodes, and thus the life lock
193 * prevents us from getting the latest state to disk.
195 error
= filemap_write_and_wait(sbi
->cat_tree
->inode
->i_mapping
);
196 error2
= filemap_write_and_wait(sbi
->ext_tree
->inode
->i_mapping
);
199 if (sbi
->attr_tree
) {
201 filemap_write_and_wait(sbi
->attr_tree
->inode
->i_mapping
);
205 error2
= filemap_write_and_wait(sbi
->alloc_file
->i_mapping
);
209 mutex_lock(&sbi
->vh_mutex
);
210 mutex_lock(&sbi
->alloc_mutex
);
211 vhdr
->free_blocks
= cpu_to_be32(sbi
->free_blocks
);
212 vhdr
->next_cnid
= cpu_to_be32(sbi
->next_cnid
);
213 vhdr
->folder_count
= cpu_to_be32(sbi
->folder_count
);
214 vhdr
->file_count
= cpu_to_be32(sbi
->file_count
);
216 if (test_and_clear_bit(HFSPLUS_SB_WRITEBACKUP
, &sbi
->flags
)) {
217 memcpy(sbi
->s_backup_vhdr
, sbi
->s_vhdr
, sizeof(*sbi
->s_vhdr
));
221 error2
= hfsplus_submit_bio(sb
,
222 sbi
->part_start
+ HFSPLUS_VOLHEAD_SECTOR
,
223 sbi
->s_vhdr_buf
, NULL
, REQ_OP_WRITE
,
230 error2
= hfsplus_submit_bio(sb
,
231 sbi
->part_start
+ sbi
->sect_count
- 2,
232 sbi
->s_backup_vhdr_buf
, NULL
, REQ_OP_WRITE
,
237 mutex_unlock(&sbi
->alloc_mutex
);
238 mutex_unlock(&sbi
->vh_mutex
);
240 if (!test_bit(HFSPLUS_SB_NOBARRIER
, &sbi
->flags
))
241 blkdev_issue_flush(sb
->s_bdev
, GFP_KERNEL
, NULL
);
246 static void delayed_sync_fs(struct work_struct
*work
)
249 struct hfsplus_sb_info
*sbi
;
251 sbi
= container_of(work
, struct hfsplus_sb_info
, sync_work
.work
);
253 spin_lock(&sbi
->work_lock
);
254 sbi
->work_queued
= 0;
255 spin_unlock(&sbi
->work_lock
);
257 err
= hfsplus_sync_fs(sbi
->alloc_file
->i_sb
, 1);
259 pr_err("delayed sync fs err %d\n", err
);
262 void hfsplus_mark_mdb_dirty(struct super_block
*sb
)
264 struct hfsplus_sb_info
*sbi
= HFSPLUS_SB(sb
);
270 spin_lock(&sbi
->work_lock
);
271 if (!sbi
->work_queued
) {
272 delay
= msecs_to_jiffies(dirty_writeback_interval
* 10);
273 queue_delayed_work(system_long_wq
, &sbi
->sync_work
, delay
);
274 sbi
->work_queued
= 1;
276 spin_unlock(&sbi
->work_lock
);
279 static void hfsplus_put_super(struct super_block
*sb
)
281 struct hfsplus_sb_info
*sbi
= HFSPLUS_SB(sb
);
283 hfs_dbg(SUPER
, "hfsplus_put_super\n");
285 cancel_delayed_work_sync(&sbi
->sync_work
);
287 if (!sb_rdonly(sb
) && sbi
->s_vhdr
) {
288 struct hfsplus_vh
*vhdr
= sbi
->s_vhdr
;
290 vhdr
->modify_date
= hfsp_now2mt();
291 vhdr
->attributes
|= cpu_to_be32(HFSPLUS_VOL_UNMNT
);
292 vhdr
->attributes
&= cpu_to_be32(~HFSPLUS_VOL_INCNSTNT
);
294 hfsplus_sync_fs(sb
, 1);
297 hfs_btree_close(sbi
->attr_tree
);
298 hfs_btree_close(sbi
->cat_tree
);
299 hfs_btree_close(sbi
->ext_tree
);
300 iput(sbi
->alloc_file
);
301 iput(sbi
->hidden_dir
);
302 kfree(sbi
->s_vhdr_buf
);
303 kfree(sbi
->s_backup_vhdr_buf
);
304 unload_nls(sbi
->nls
);
305 kfree(sb
->s_fs_info
);
306 sb
->s_fs_info
= NULL
;
309 static int hfsplus_statfs(struct dentry
*dentry
, struct kstatfs
*buf
)
311 struct super_block
*sb
= dentry
->d_sb
;
312 struct hfsplus_sb_info
*sbi
= HFSPLUS_SB(sb
);
313 u64 id
= huge_encode_dev(sb
->s_bdev
->bd_dev
);
315 buf
->f_type
= HFSPLUS_SUPER_MAGIC
;
316 buf
->f_bsize
= sb
->s_blocksize
;
317 buf
->f_blocks
= sbi
->total_blocks
<< sbi
->fs_shift
;
318 buf
->f_bfree
= sbi
->free_blocks
<< sbi
->fs_shift
;
319 buf
->f_bavail
= buf
->f_bfree
;
320 buf
->f_files
= 0xFFFFFFFF;
321 buf
->f_ffree
= 0xFFFFFFFF - sbi
->next_cnid
;
322 buf
->f_fsid
.val
[0] = (u32
)id
;
323 buf
->f_fsid
.val
[1] = (u32
)(id
>> 32);
324 buf
->f_namelen
= HFSPLUS_MAX_STRLEN
;
329 static int hfsplus_remount(struct super_block
*sb
, int *flags
, char *data
)
332 if ((bool)(*flags
& SB_RDONLY
) == sb_rdonly(sb
))
334 if (!(*flags
& SB_RDONLY
)) {
335 struct hfsplus_vh
*vhdr
= HFSPLUS_SB(sb
)->s_vhdr
;
338 if (!hfsplus_parse_options_remount(data
, &force
))
341 if (!(vhdr
->attributes
& cpu_to_be32(HFSPLUS_VOL_UNMNT
))) {
342 pr_warn("filesystem was not cleanly unmounted, running fsck.hfsplus is recommended. leaving read-only.\n");
343 sb
->s_flags
|= SB_RDONLY
;
347 } else if (vhdr
->attributes
&
348 cpu_to_be32(HFSPLUS_VOL_SOFTLOCK
)) {
349 pr_warn("filesystem is marked locked, leaving read-only.\n");
350 sb
->s_flags
|= SB_RDONLY
;
352 } else if (vhdr
->attributes
&
353 cpu_to_be32(HFSPLUS_VOL_JOURNALED
)) {
354 pr_warn("filesystem is marked journaled, leaving read-only.\n");
355 sb
->s_flags
|= SB_RDONLY
;
362 static const struct super_operations hfsplus_sops
= {
363 .alloc_inode
= hfsplus_alloc_inode
,
364 .destroy_inode
= hfsplus_destroy_inode
,
365 .write_inode
= hfsplus_write_inode
,
366 .evict_inode
= hfsplus_evict_inode
,
367 .put_super
= hfsplus_put_super
,
368 .sync_fs
= hfsplus_sync_fs
,
369 .statfs
= hfsplus_statfs
,
370 .remount_fs
= hfsplus_remount
,
371 .show_options
= hfsplus_show_options
,
374 static int hfsplus_fill_super(struct super_block
*sb
, void *data
, int silent
)
376 struct hfsplus_vh
*vhdr
;
377 struct hfsplus_sb_info
*sbi
;
378 hfsplus_cat_entry entry
;
379 struct hfs_find_data fd
;
380 struct inode
*root
, *inode
;
382 struct nls_table
*nls
= NULL
;
383 u64 last_fs_block
, last_fs_page
;
387 sbi
= kzalloc(sizeof(*sbi
), GFP_KERNEL
);
392 mutex_init(&sbi
->alloc_mutex
);
393 mutex_init(&sbi
->vh_mutex
);
394 spin_lock_init(&sbi
->work_lock
);
395 INIT_DELAYED_WORK(&sbi
->sync_work
, delayed_sync_fs
);
396 hfsplus_fill_defaults(sbi
);
399 if (!hfsplus_parse_options(data
, sbi
)) {
400 pr_err("unable to parse mount options\n");
404 /* temporarily use utf8 to correctly find the hidden dir below */
406 sbi
->nls
= load_nls("utf8");
408 pr_err("unable to load nls for utf8\n");
412 /* Grab the volume header */
413 if (hfsplus_read_wrapper(sb
)) {
415 pr_warn("unable to find HFS+ superblock\n");
420 /* Copy parts of the volume header into the superblock */
421 sb
->s_magic
= HFSPLUS_VOLHEAD_SIG
;
422 if (be16_to_cpu(vhdr
->version
) < HFSPLUS_MIN_VERSION
||
423 be16_to_cpu(vhdr
->version
) > HFSPLUS_CURRENT_VERSION
) {
424 pr_err("wrong filesystem version\n");
427 sbi
->total_blocks
= be32_to_cpu(vhdr
->total_blocks
);
428 sbi
->free_blocks
= be32_to_cpu(vhdr
->free_blocks
);
429 sbi
->next_cnid
= be32_to_cpu(vhdr
->next_cnid
);
430 sbi
->file_count
= be32_to_cpu(vhdr
->file_count
);
431 sbi
->folder_count
= be32_to_cpu(vhdr
->folder_count
);
432 sbi
->data_clump_blocks
=
433 be32_to_cpu(vhdr
->data_clump_sz
) >> sbi
->alloc_blksz_shift
;
434 if (!sbi
->data_clump_blocks
)
435 sbi
->data_clump_blocks
= 1;
436 sbi
->rsrc_clump_blocks
=
437 be32_to_cpu(vhdr
->rsrc_clump_sz
) >> sbi
->alloc_blksz_shift
;
438 if (!sbi
->rsrc_clump_blocks
)
439 sbi
->rsrc_clump_blocks
= 1;
442 last_fs_block
= sbi
->total_blocks
- 1;
443 last_fs_page
= (last_fs_block
<< sbi
->alloc_blksz_shift
) >>
446 if ((last_fs_block
> (sector_t
)(~0ULL) >> (sbi
->alloc_blksz_shift
- 9)) ||
447 (last_fs_page
> (pgoff_t
)(~0ULL))) {
448 pr_err("filesystem size too large\n");
452 /* Set up operations so we can load metadata */
453 sb
->s_op
= &hfsplus_sops
;
454 sb
->s_maxbytes
= MAX_LFS_FILESIZE
;
456 if (!(vhdr
->attributes
& cpu_to_be32(HFSPLUS_VOL_UNMNT
))) {
457 pr_warn("Filesystem was not cleanly unmounted, running fsck.hfsplus is recommended. mounting read-only.\n");
458 sb
->s_flags
|= SB_RDONLY
;
459 } else if (test_and_clear_bit(HFSPLUS_SB_FORCE
, &sbi
->flags
)) {
461 } else if (vhdr
->attributes
& cpu_to_be32(HFSPLUS_VOL_SOFTLOCK
)) {
462 pr_warn("Filesystem is marked locked, mounting read-only.\n");
463 sb
->s_flags
|= SB_RDONLY
;
464 } else if ((vhdr
->attributes
& cpu_to_be32(HFSPLUS_VOL_JOURNALED
)) &&
466 pr_warn("write access to a journaled filesystem is not supported, use the force option at your own risk, mounting read-only.\n");
467 sb
->s_flags
|= SB_RDONLY
;
472 /* Load metadata objects (B*Trees) */
473 sbi
->ext_tree
= hfs_btree_open(sb
, HFSPLUS_EXT_CNID
);
474 if (!sbi
->ext_tree
) {
475 pr_err("failed to load extents file\n");
478 sbi
->cat_tree
= hfs_btree_open(sb
, HFSPLUS_CAT_CNID
);
479 if (!sbi
->cat_tree
) {
480 pr_err("failed to load catalog file\n");
481 goto out_close_ext_tree
;
483 atomic_set(&sbi
->attr_tree_state
, HFSPLUS_EMPTY_ATTR_TREE
);
484 if (vhdr
->attr_file
.total_blocks
!= 0) {
485 sbi
->attr_tree
= hfs_btree_open(sb
, HFSPLUS_ATTR_CNID
);
486 if (!sbi
->attr_tree
) {
487 pr_err("failed to load attributes file\n");
488 goto out_close_cat_tree
;
490 atomic_set(&sbi
->attr_tree_state
, HFSPLUS_VALID_ATTR_TREE
);
492 sb
->s_xattr
= hfsplus_xattr_handlers
;
494 inode
= hfsplus_iget(sb
, HFSPLUS_ALLOC_CNID
);
496 pr_err("failed to load allocation file\n");
497 err
= PTR_ERR(inode
);
498 goto out_close_attr_tree
;
500 sbi
->alloc_file
= inode
;
502 /* Load the root directory */
503 root
= hfsplus_iget(sb
, HFSPLUS_ROOT_CNID
);
505 pr_err("failed to load root directory\n");
507 goto out_put_alloc_file
;
510 sb
->s_d_op
= &hfsplus_dentry_operations
;
511 sb
->s_root
= d_make_root(root
);
514 goto out_put_alloc_file
;
517 str
.len
= sizeof(HFSP_HIDDENDIR_NAME
) - 1;
518 str
.name
= HFSP_HIDDENDIR_NAME
;
519 err
= hfs_find_init(sbi
->cat_tree
, &fd
);
522 err
= hfsplus_cat_build_key(sb
, fd
.search_key
, HFSPLUS_ROOT_CNID
, &str
);
523 if (unlikely(err
< 0))
525 if (!hfs_brec_read(&fd
, &entry
, sizeof(entry
))) {
527 if (entry
.type
!= cpu_to_be16(HFSPLUS_FOLDER
))
529 inode
= hfsplus_iget(sb
, be32_to_cpu(entry
.folder
.id
));
531 err
= PTR_ERR(inode
);
534 sbi
->hidden_dir
= inode
;
538 if (!sb_rdonly(sb
)) {
540 * H+LX == hfsplusutils, H+Lx == this driver, H+lx is unused
541 * all three are registered with Apple for our use
543 vhdr
->last_mount_vers
= cpu_to_be32(HFSP_MOUNT_VERSION
);
544 vhdr
->modify_date
= hfsp_now2mt();
545 be32_add_cpu(&vhdr
->write_count
, 1);
546 vhdr
->attributes
&= cpu_to_be32(~HFSPLUS_VOL_UNMNT
);
547 vhdr
->attributes
|= cpu_to_be32(HFSPLUS_VOL_INCNSTNT
);
548 hfsplus_sync_fs(sb
, 1);
550 if (!sbi
->hidden_dir
) {
551 mutex_lock(&sbi
->vh_mutex
);
552 sbi
->hidden_dir
= hfsplus_new_inode(sb
, root
, S_IFDIR
);
553 if (!sbi
->hidden_dir
) {
554 mutex_unlock(&sbi
->vh_mutex
);
558 err
= hfsplus_create_cat(sbi
->hidden_dir
->i_ino
, root
,
559 &str
, sbi
->hidden_dir
);
561 mutex_unlock(&sbi
->vh_mutex
);
562 goto out_put_hidden_dir
;
565 err
= hfsplus_init_inode_security(sbi
->hidden_dir
,
567 if (err
== -EOPNOTSUPP
)
568 err
= 0; /* Operation is not supported. */
571 * Try to delete anyway without
574 hfsplus_delete_cat(sbi
->hidden_dir
->i_ino
,
576 mutex_unlock(&sbi
->vh_mutex
);
577 goto out_put_hidden_dir
;
580 mutex_unlock(&sbi
->vh_mutex
);
581 hfsplus_mark_inode_dirty(sbi
->hidden_dir
,
582 HFSPLUS_I_CAT_DIRTY
);
586 unload_nls(sbi
->nls
);
591 cancel_delayed_work_sync(&sbi
->sync_work
);
592 iput(sbi
->hidden_dir
);
597 iput(sbi
->alloc_file
);
599 hfs_btree_close(sbi
->attr_tree
);
601 hfs_btree_close(sbi
->cat_tree
);
603 hfs_btree_close(sbi
->ext_tree
);
605 kfree(sbi
->s_vhdr_buf
);
606 kfree(sbi
->s_backup_vhdr_buf
);
608 unload_nls(sbi
->nls
);
615 MODULE_AUTHOR("Brad Boyer");
616 MODULE_DESCRIPTION("Extended Macintosh Filesystem");
617 MODULE_LICENSE("GPL");
619 static struct kmem_cache
*hfsplus_inode_cachep
;
621 static struct inode
*hfsplus_alloc_inode(struct super_block
*sb
)
623 struct hfsplus_inode_info
*i
;
625 i
= kmem_cache_alloc(hfsplus_inode_cachep
, GFP_KERNEL
);
626 return i
? &i
->vfs_inode
: NULL
;
629 static void hfsplus_i_callback(struct rcu_head
*head
)
631 struct inode
*inode
= container_of(head
, struct inode
, i_rcu
);
633 kmem_cache_free(hfsplus_inode_cachep
, HFSPLUS_I(inode
));
636 static void hfsplus_destroy_inode(struct inode
*inode
)
638 call_rcu(&inode
->i_rcu
, hfsplus_i_callback
);
641 #define HFSPLUS_INODE_SIZE sizeof(struct hfsplus_inode_info)
643 static struct dentry
*hfsplus_mount(struct file_system_type
*fs_type
,
644 int flags
, const char *dev_name
, void *data
)
646 return mount_bdev(fs_type
, flags
, dev_name
, data
, hfsplus_fill_super
);
649 static struct file_system_type hfsplus_fs_type
= {
650 .owner
= THIS_MODULE
,
652 .mount
= hfsplus_mount
,
653 .kill_sb
= kill_block_super
,
654 .fs_flags
= FS_REQUIRES_DEV
,
656 MODULE_ALIAS_FS("hfsplus");
658 static void hfsplus_init_once(void *p
)
660 struct hfsplus_inode_info
*i
= p
;
662 inode_init_once(&i
->vfs_inode
);
665 static int __init
init_hfsplus_fs(void)
669 hfsplus_inode_cachep
= kmem_cache_create("hfsplus_icache",
670 HFSPLUS_INODE_SIZE
, 0, SLAB_HWCACHE_ALIGN
|SLAB_ACCOUNT
,
672 if (!hfsplus_inode_cachep
)
674 err
= hfsplus_create_attr_tree_cache();
676 goto destroy_inode_cache
;
677 err
= register_filesystem(&hfsplus_fs_type
);
679 goto destroy_attr_tree_cache
;
682 destroy_attr_tree_cache
:
683 hfsplus_destroy_attr_tree_cache();
686 kmem_cache_destroy(hfsplus_inode_cachep
);
691 static void __exit
exit_hfsplus_fs(void)
693 unregister_filesystem(&hfsplus_fs_type
);
696 * Make sure all delayed rcu free inodes are flushed before we
700 hfsplus_destroy_attr_tree_cache();
701 kmem_cache_destroy(hfsplus_inode_cachep
);
704 module_init(init_hfsplus_fs
)
705 module_exit(exit_hfsplus_fs
)