2 * linux/fs/hfsplus/inode.c
5 * Brad Boyer (flar@allandria.com)
6 * (C) 2003 Ardis Technologies <roman@ardistech.com>
8 * Inode handling routines
11 #include <linux/blkdev.h>
14 #include <linux/pagemap.h>
15 #include <linux/mpage.h>
16 #include <linux/sched.h>
18 #include "hfsplus_fs.h"
19 #include "hfsplus_raw.h"
22 static int hfsplus_readpage(struct file
*file
, struct page
*page
)
24 return block_read_full_page(page
, hfsplus_get_block
);
27 static int hfsplus_writepage(struct page
*page
, struct writeback_control
*wbc
)
29 return block_write_full_page(page
, hfsplus_get_block
, wbc
);
32 static void hfsplus_write_failed(struct address_space
*mapping
, loff_t to
)
34 struct inode
*inode
= mapping
->host
;
36 if (to
> inode
->i_size
) {
37 truncate_pagecache(inode
, to
, inode
->i_size
);
38 hfsplus_file_truncate(inode
);
42 static int hfsplus_write_begin(struct file
*file
, struct address_space
*mapping
,
43 loff_t pos
, unsigned len
, unsigned flags
,
44 struct page
**pagep
, void **fsdata
)
49 ret
= cont_write_begin(file
, mapping
, pos
, len
, flags
, pagep
, fsdata
,
51 &HFSPLUS_I(mapping
->host
)->phys_size
);
53 hfsplus_write_failed(mapping
, pos
+ len
);
58 static sector_t
hfsplus_bmap(struct address_space
*mapping
, sector_t block
)
60 return generic_block_bmap(mapping
, block
, hfsplus_get_block
);
63 static int hfsplus_releasepage(struct page
*page
, gfp_t mask
)
65 struct inode
*inode
= page
->mapping
->host
;
66 struct super_block
*sb
= inode
->i_sb
;
67 struct hfs_btree
*tree
;
68 struct hfs_bnode
*node
;
72 switch (inode
->i_ino
) {
73 case HFSPLUS_EXT_CNID
:
74 tree
= HFSPLUS_SB(sb
)->ext_tree
;
76 case HFSPLUS_CAT_CNID
:
77 tree
= HFSPLUS_SB(sb
)->cat_tree
;
79 case HFSPLUS_ATTR_CNID
:
80 tree
= HFSPLUS_SB(sb
)->attr_tree
;
88 if (tree
->node_size
>= PAGE_CACHE_SIZE
) {
90 (tree
->node_size_shift
- PAGE_CACHE_SHIFT
);
91 spin_lock(&tree
->hash_lock
);
92 node
= hfs_bnode_findhash(tree
, nidx
);
95 else if (atomic_read(&node
->refcnt
))
98 hfs_bnode_unhash(node
);
101 spin_unlock(&tree
->hash_lock
);
103 nidx
= page
->index
<<
104 (PAGE_CACHE_SHIFT
- tree
->node_size_shift
);
105 i
= 1 << (PAGE_CACHE_SHIFT
- tree
->node_size_shift
);
106 spin_lock(&tree
->hash_lock
);
108 node
= hfs_bnode_findhash(tree
, nidx
++);
111 if (atomic_read(&node
->refcnt
)) {
115 hfs_bnode_unhash(node
);
116 hfs_bnode_free(node
);
117 } while (--i
&& nidx
< tree
->node_count
);
118 spin_unlock(&tree
->hash_lock
);
120 return res
? try_to_free_buffers(page
) : 0;
123 static ssize_t
hfsplus_direct_IO(int rw
, struct kiocb
*iocb
,
124 const struct iovec
*iov
, loff_t offset
, unsigned long nr_segs
)
126 struct file
*file
= iocb
->ki_filp
;
127 struct address_space
*mapping
= file
->f_mapping
;
128 struct inode
*inode
= file_inode(file
)->i_mapping
->host
;
131 ret
= blockdev_direct_IO(rw
, iocb
, inode
, iov
, offset
, nr_segs
,
135 * In case of error extending write may have instantiated a few
136 * blocks outside i_size. Trim these off again.
138 if (unlikely((rw
& WRITE
) && ret
< 0)) {
139 loff_t isize
= i_size_read(inode
);
140 loff_t end
= offset
+ iov_length(iov
, nr_segs
);
143 hfsplus_write_failed(mapping
, end
);
149 static int hfsplus_writepages(struct address_space
*mapping
,
150 struct writeback_control
*wbc
)
152 return mpage_writepages(mapping
, wbc
, hfsplus_get_block
);
155 const struct address_space_operations hfsplus_btree_aops
= {
156 .readpage
= hfsplus_readpage
,
157 .writepage
= hfsplus_writepage
,
158 .write_begin
= hfsplus_write_begin
,
159 .write_end
= generic_write_end
,
160 .bmap
= hfsplus_bmap
,
161 .releasepage
= hfsplus_releasepage
,
164 const struct address_space_operations hfsplus_aops
= {
165 .readpage
= hfsplus_readpage
,
166 .writepage
= hfsplus_writepage
,
167 .write_begin
= hfsplus_write_begin
,
168 .write_end
= generic_write_end
,
169 .bmap
= hfsplus_bmap
,
170 .direct_IO
= hfsplus_direct_IO
,
171 .writepages
= hfsplus_writepages
,
174 const struct dentry_operations hfsplus_dentry_operations
= {
175 .d_hash
= hfsplus_hash_dentry
,
176 .d_compare
= hfsplus_compare_dentry
,
179 static struct dentry
*hfsplus_file_lookup(struct inode
*dir
,
180 struct dentry
*dentry
, unsigned int flags
)
182 struct hfs_find_data fd
;
183 struct super_block
*sb
= dir
->i_sb
;
184 struct inode
*inode
= NULL
;
185 struct hfsplus_inode_info
*hip
;
188 if (HFSPLUS_IS_RSRC(dir
) || strcmp(dentry
->d_name
.name
, "rsrc"))
191 inode
= HFSPLUS_I(dir
)->rsrc_inode
;
195 inode
= new_inode(sb
);
197 return ERR_PTR(-ENOMEM
);
199 hip
= HFSPLUS_I(inode
);
200 inode
->i_ino
= dir
->i_ino
;
201 INIT_LIST_HEAD(&hip
->open_dir_list
);
202 mutex_init(&hip
->extents_lock
);
203 hip
->extent_state
= 0;
206 set_bit(HFSPLUS_I_RSRC
, &hip
->flags
);
208 err
= hfs_find_init(HFSPLUS_SB(sb
)->cat_tree
, &fd
);
210 err
= hfsplus_find_cat(sb
, dir
->i_ino
, &fd
);
212 err
= hfsplus_cat_read_inode(inode
, &fd
);
219 hip
->rsrc_inode
= dir
;
220 HFSPLUS_I(dir
)->rsrc_inode
= inode
;
224 * __mark_inode_dirty expects inodes to be hashed. Since we don't
225 * want resource fork inodes in the regular inode space, we make them
226 * appear hashed, but do not put on any lists. hlist_del()
227 * will work fine and require no locking.
229 hlist_add_fake(&inode
->i_hash
);
231 mark_inode_dirty(inode
);
233 d_add(dentry
, inode
);
237 static void hfsplus_get_perms(struct inode
*inode
,
238 struct hfsplus_perm
*perms
, int dir
)
240 struct hfsplus_sb_info
*sbi
= HFSPLUS_SB(inode
->i_sb
);
243 mode
= be16_to_cpu(perms
->mode
);
245 i_uid_write(inode
, be32_to_cpu(perms
->owner
));
246 if (!i_uid_read(inode
) && !mode
)
247 inode
->i_uid
= sbi
->uid
;
249 i_gid_write(inode
, be32_to_cpu(perms
->group
));
250 if (!i_gid_read(inode
) && !mode
)
251 inode
->i_gid
= sbi
->gid
;
254 mode
= mode
? (mode
& S_IALLUGO
) : (S_IRWXUGO
& ~(sbi
->umask
));
257 mode
= S_IFREG
| ((S_IRUGO
|S_IWUGO
) & ~(sbi
->umask
));
258 inode
->i_mode
= mode
;
260 HFSPLUS_I(inode
)->userflags
= perms
->userflags
;
261 if (perms
->rootflags
& HFSPLUS_FLG_IMMUTABLE
)
262 inode
->i_flags
|= S_IMMUTABLE
;
264 inode
->i_flags
&= ~S_IMMUTABLE
;
265 if (perms
->rootflags
& HFSPLUS_FLG_APPEND
)
266 inode
->i_flags
|= S_APPEND
;
268 inode
->i_flags
&= ~S_APPEND
;
271 static int hfsplus_file_open(struct inode
*inode
, struct file
*file
)
273 if (HFSPLUS_IS_RSRC(inode
))
274 inode
= HFSPLUS_I(inode
)->rsrc_inode
;
275 if (!(file
->f_flags
& O_LARGEFILE
) && i_size_read(inode
) > MAX_NON_LFS
)
277 atomic_inc(&HFSPLUS_I(inode
)->opencnt
);
281 static int hfsplus_file_release(struct inode
*inode
, struct file
*file
)
283 struct super_block
*sb
= inode
->i_sb
;
285 if (HFSPLUS_IS_RSRC(inode
))
286 inode
= HFSPLUS_I(inode
)->rsrc_inode
;
287 if (atomic_dec_and_test(&HFSPLUS_I(inode
)->opencnt
)) {
288 mutex_lock(&inode
->i_mutex
);
289 hfsplus_file_truncate(inode
);
290 if (inode
->i_flags
& S_DEAD
) {
291 hfsplus_delete_cat(inode
->i_ino
,
292 HFSPLUS_SB(sb
)->hidden_dir
, NULL
);
293 hfsplus_delete_inode(inode
);
295 mutex_unlock(&inode
->i_mutex
);
300 static int hfsplus_setattr(struct dentry
*dentry
, struct iattr
*attr
)
302 struct inode
*inode
= dentry
->d_inode
;
305 error
= inode_change_ok(inode
, attr
);
309 if ((attr
->ia_valid
& ATTR_SIZE
) &&
310 attr
->ia_size
!= i_size_read(inode
)) {
311 inode_dio_wait(inode
);
312 truncate_setsize(inode
, attr
->ia_size
);
313 hfsplus_file_truncate(inode
);
316 setattr_copy(inode
, attr
);
317 mark_inode_dirty(inode
);
321 int hfsplus_file_fsync(struct file
*file
, loff_t start
, loff_t end
,
324 struct inode
*inode
= file
->f_mapping
->host
;
325 struct hfsplus_inode_info
*hip
= HFSPLUS_I(inode
);
326 struct hfsplus_sb_info
*sbi
= HFSPLUS_SB(inode
->i_sb
);
327 int error
= 0, error2
;
329 error
= filemap_write_and_wait_range(inode
->i_mapping
, start
, end
);
332 mutex_lock(&inode
->i_mutex
);
335 * Sync inode metadata into the catalog and extent trees.
337 sync_inode_metadata(inode
, 1);
340 * And explicitly write out the btrees.
342 if (test_and_clear_bit(HFSPLUS_I_CAT_DIRTY
, &hip
->flags
))
343 error
= filemap_write_and_wait(sbi
->cat_tree
->inode
->i_mapping
);
345 if (test_and_clear_bit(HFSPLUS_I_EXT_DIRTY
, &hip
->flags
)) {
347 filemap_write_and_wait(sbi
->ext_tree
->inode
->i_mapping
);
352 if (test_and_clear_bit(HFSPLUS_I_ATTR_DIRTY
, &hip
->flags
)) {
353 if (sbi
->attr_tree
) {
355 filemap_write_and_wait(
356 sbi
->attr_tree
->inode
->i_mapping
);
360 printk(KERN_ERR
"hfs: sync non-existent attributes tree\n");
364 if (test_and_clear_bit(HFSPLUS_I_ALLOC_DIRTY
, &hip
->flags
)) {
365 error2
= filemap_write_and_wait(sbi
->alloc_file
->i_mapping
);
370 if (!test_bit(HFSPLUS_SB_NOBARRIER
, &sbi
->flags
))
371 blkdev_issue_flush(inode
->i_sb
->s_bdev
, GFP_KERNEL
, NULL
);
373 mutex_unlock(&inode
->i_mutex
);
378 static const struct inode_operations hfsplus_file_inode_operations
= {
379 .lookup
= hfsplus_file_lookup
,
380 .setattr
= hfsplus_setattr
,
381 .setxattr
= generic_setxattr
,
382 .getxattr
= generic_getxattr
,
383 .listxattr
= hfsplus_listxattr
,
384 .removexattr
= hfsplus_removexattr
,
387 static const struct file_operations hfsplus_file_operations
= {
388 .llseek
= generic_file_llseek
,
389 .read
= do_sync_read
,
390 .aio_read
= generic_file_aio_read
,
391 .write
= do_sync_write
,
392 .aio_write
= generic_file_aio_write
,
393 .mmap
= generic_file_mmap
,
394 .splice_read
= generic_file_splice_read
,
395 .fsync
= hfsplus_file_fsync
,
396 .open
= hfsplus_file_open
,
397 .release
= hfsplus_file_release
,
398 .unlocked_ioctl
= hfsplus_ioctl
,
401 struct inode
*hfsplus_new_inode(struct super_block
*sb
, umode_t mode
)
403 struct hfsplus_sb_info
*sbi
= HFSPLUS_SB(sb
);
404 struct inode
*inode
= new_inode(sb
);
405 struct hfsplus_inode_info
*hip
;
410 inode
->i_ino
= sbi
->next_cnid
++;
411 inode
->i_mode
= mode
;
412 inode
->i_uid
= current_fsuid();
413 inode
->i_gid
= current_fsgid();
415 inode
->i_mtime
= inode
->i_atime
= inode
->i_ctime
= CURRENT_TIME_SEC
;
417 hip
= HFSPLUS_I(inode
);
418 INIT_LIST_HEAD(&hip
->open_dir_list
);
419 mutex_init(&hip
->extents_lock
);
420 atomic_set(&hip
->opencnt
, 0);
421 hip
->extent_state
= 0;
424 memset(hip
->first_extents
, 0, sizeof(hfsplus_extent_rec
));
425 memset(hip
->cached_extents
, 0, sizeof(hfsplus_extent_rec
));
426 hip
->alloc_blocks
= 0;
427 hip
->first_blocks
= 0;
428 hip
->cached_start
= 0;
429 hip
->cached_blocks
= 0;
432 hip
->rsrc_inode
= NULL
;
433 if (S_ISDIR(inode
->i_mode
)) {
436 inode
->i_op
= &hfsplus_dir_inode_operations
;
437 inode
->i_fop
= &hfsplus_dir_operations
;
438 } else if (S_ISREG(inode
->i_mode
)) {
440 inode
->i_op
= &hfsplus_file_inode_operations
;
441 inode
->i_fop
= &hfsplus_file_operations
;
442 inode
->i_mapping
->a_ops
= &hfsplus_aops
;
443 hip
->clump_blocks
= sbi
->data_clump_blocks
;
444 } else if (S_ISLNK(inode
->i_mode
)) {
446 inode
->i_op
= &page_symlink_inode_operations
;
447 inode
->i_mapping
->a_ops
= &hfsplus_aops
;
448 hip
->clump_blocks
= 1;
451 insert_inode_hash(inode
);
452 mark_inode_dirty(inode
);
453 hfsplus_mark_mdb_dirty(sb
);
458 void hfsplus_delete_inode(struct inode
*inode
)
460 struct super_block
*sb
= inode
->i_sb
;
462 if (S_ISDIR(inode
->i_mode
)) {
463 HFSPLUS_SB(sb
)->folder_count
--;
464 hfsplus_mark_mdb_dirty(sb
);
467 HFSPLUS_SB(sb
)->file_count
--;
468 if (S_ISREG(inode
->i_mode
)) {
469 if (!inode
->i_nlink
) {
471 hfsplus_file_truncate(inode
);
473 } else if (S_ISLNK(inode
->i_mode
)) {
475 hfsplus_file_truncate(inode
);
477 hfsplus_mark_mdb_dirty(sb
);
480 void hfsplus_inode_read_fork(struct inode
*inode
, struct hfsplus_fork_raw
*fork
)
482 struct super_block
*sb
= inode
->i_sb
;
483 struct hfsplus_sb_info
*sbi
= HFSPLUS_SB(sb
);
484 struct hfsplus_inode_info
*hip
= HFSPLUS_I(inode
);
488 memcpy(&hip
->first_extents
, &fork
->extents
, sizeof(hfsplus_extent_rec
));
489 for (count
= 0, i
= 0; i
< 8; i
++)
490 count
+= be32_to_cpu(fork
->extents
[i
].block_count
);
491 hip
->first_blocks
= count
;
492 memset(hip
->cached_extents
, 0, sizeof(hfsplus_extent_rec
));
493 hip
->cached_start
= 0;
494 hip
->cached_blocks
= 0;
496 hip
->alloc_blocks
= be32_to_cpu(fork
->total_blocks
);
497 hip
->phys_size
= inode
->i_size
= be64_to_cpu(fork
->total_size
);
499 (inode
->i_size
+ sb
->s_blocksize
- 1) >> sb
->s_blocksize_bits
;
500 inode_set_bytes(inode
, hip
->fs_blocks
<< sb
->s_blocksize_bits
);
502 be32_to_cpu(fork
->clump_size
) >> sbi
->alloc_blksz_shift
;
503 if (!hip
->clump_blocks
) {
504 hip
->clump_blocks
= HFSPLUS_IS_RSRC(inode
) ?
505 sbi
->rsrc_clump_blocks
:
506 sbi
->data_clump_blocks
;
510 void hfsplus_inode_write_fork(struct inode
*inode
,
511 struct hfsplus_fork_raw
*fork
)
513 memcpy(&fork
->extents
, &HFSPLUS_I(inode
)->first_extents
,
514 sizeof(hfsplus_extent_rec
));
515 fork
->total_size
= cpu_to_be64(inode
->i_size
);
516 fork
->total_blocks
= cpu_to_be32(HFSPLUS_I(inode
)->alloc_blocks
);
519 int hfsplus_cat_read_inode(struct inode
*inode
, struct hfs_find_data
*fd
)
521 hfsplus_cat_entry entry
;
525 type
= hfs_bnode_read_u16(fd
->bnode
, fd
->entryoffset
);
527 HFSPLUS_I(inode
)->linkid
= 0;
528 if (type
== HFSPLUS_FOLDER
) {
529 struct hfsplus_cat_folder
*folder
= &entry
.folder
;
531 if (fd
->entrylength
< sizeof(struct hfsplus_cat_folder
))
533 hfs_bnode_read(fd
->bnode
, &entry
, fd
->entryoffset
,
534 sizeof(struct hfsplus_cat_folder
));
535 hfsplus_get_perms(inode
, &folder
->permissions
, 1);
537 inode
->i_size
= 2 + be32_to_cpu(folder
->valence
);
538 inode
->i_atime
= hfsp_mt2ut(folder
->access_date
);
539 inode
->i_mtime
= hfsp_mt2ut(folder
->content_mod_date
);
540 inode
->i_ctime
= hfsp_mt2ut(folder
->attribute_mod_date
);
541 HFSPLUS_I(inode
)->create_date
= folder
->create_date
;
542 HFSPLUS_I(inode
)->fs_blocks
= 0;
543 inode
->i_op
= &hfsplus_dir_inode_operations
;
544 inode
->i_fop
= &hfsplus_dir_operations
;
545 } else if (type
== HFSPLUS_FILE
) {
546 struct hfsplus_cat_file
*file
= &entry
.file
;
548 if (fd
->entrylength
< sizeof(struct hfsplus_cat_file
))
550 hfs_bnode_read(fd
->bnode
, &entry
, fd
->entryoffset
,
551 sizeof(struct hfsplus_cat_file
));
553 hfsplus_inode_read_fork(inode
, HFSPLUS_IS_RSRC(inode
) ?
554 &file
->rsrc_fork
: &file
->data_fork
);
555 hfsplus_get_perms(inode
, &file
->permissions
, 0);
557 if (S_ISREG(inode
->i_mode
)) {
558 if (file
->permissions
.dev
)
560 be32_to_cpu(file
->permissions
.dev
));
561 inode
->i_op
= &hfsplus_file_inode_operations
;
562 inode
->i_fop
= &hfsplus_file_operations
;
563 inode
->i_mapping
->a_ops
= &hfsplus_aops
;
564 } else if (S_ISLNK(inode
->i_mode
)) {
565 inode
->i_op
= &page_symlink_inode_operations
;
566 inode
->i_mapping
->a_ops
= &hfsplus_aops
;
568 init_special_inode(inode
, inode
->i_mode
,
569 be32_to_cpu(file
->permissions
.dev
));
571 inode
->i_atime
= hfsp_mt2ut(file
->access_date
);
572 inode
->i_mtime
= hfsp_mt2ut(file
->content_mod_date
);
573 inode
->i_ctime
= hfsp_mt2ut(file
->attribute_mod_date
);
574 HFSPLUS_I(inode
)->create_date
= file
->create_date
;
576 printk(KERN_ERR
"hfs: bad catalog entry used to create inode\n");
582 int hfsplus_cat_write_inode(struct inode
*inode
)
584 struct inode
*main_inode
= inode
;
585 struct hfs_find_data fd
;
586 hfsplus_cat_entry entry
;
588 if (HFSPLUS_IS_RSRC(inode
))
589 main_inode
= HFSPLUS_I(inode
)->rsrc_inode
;
591 if (!main_inode
->i_nlink
)
594 if (hfs_find_init(HFSPLUS_SB(main_inode
->i_sb
)->cat_tree
, &fd
))
598 if (hfsplus_find_cat(main_inode
->i_sb
, main_inode
->i_ino
, &fd
))
602 if (S_ISDIR(main_inode
->i_mode
)) {
603 struct hfsplus_cat_folder
*folder
= &entry
.folder
;
605 if (fd
.entrylength
< sizeof(struct hfsplus_cat_folder
))
607 hfs_bnode_read(fd
.bnode
, &entry
, fd
.entryoffset
,
608 sizeof(struct hfsplus_cat_folder
));
609 /* simple node checks? */
610 hfsplus_cat_set_perms(inode
, &folder
->permissions
);
611 folder
->access_date
= hfsp_ut2mt(inode
->i_atime
);
612 folder
->content_mod_date
= hfsp_ut2mt(inode
->i_mtime
);
613 folder
->attribute_mod_date
= hfsp_ut2mt(inode
->i_ctime
);
614 folder
->valence
= cpu_to_be32(inode
->i_size
- 2);
615 hfs_bnode_write(fd
.bnode
, &entry
, fd
.entryoffset
,
616 sizeof(struct hfsplus_cat_folder
));
617 } else if (HFSPLUS_IS_RSRC(inode
)) {
618 struct hfsplus_cat_file
*file
= &entry
.file
;
619 hfs_bnode_read(fd
.bnode
, &entry
, fd
.entryoffset
,
620 sizeof(struct hfsplus_cat_file
));
621 hfsplus_inode_write_fork(inode
, &file
->rsrc_fork
);
622 hfs_bnode_write(fd
.bnode
, &entry
, fd
.entryoffset
,
623 sizeof(struct hfsplus_cat_file
));
625 struct hfsplus_cat_file
*file
= &entry
.file
;
627 if (fd
.entrylength
< sizeof(struct hfsplus_cat_file
))
629 hfs_bnode_read(fd
.bnode
, &entry
, fd
.entryoffset
,
630 sizeof(struct hfsplus_cat_file
));
631 hfsplus_inode_write_fork(inode
, &file
->data_fork
);
632 hfsplus_cat_set_perms(inode
, &file
->permissions
);
633 if (HFSPLUS_FLG_IMMUTABLE
&
634 (file
->permissions
.rootflags
|
635 file
->permissions
.userflags
))
636 file
->flags
|= cpu_to_be16(HFSPLUS_FILE_LOCKED
);
638 file
->flags
&= cpu_to_be16(~HFSPLUS_FILE_LOCKED
);
639 file
->access_date
= hfsp_ut2mt(inode
->i_atime
);
640 file
->content_mod_date
= hfsp_ut2mt(inode
->i_mtime
);
641 file
->attribute_mod_date
= hfsp_ut2mt(inode
->i_ctime
);
642 hfs_bnode_write(fd
.bnode
, &entry
, fd
.entryoffset
,
643 sizeof(struct hfsplus_cat_file
));
646 set_bit(HFSPLUS_I_CAT_DIRTY
, &HFSPLUS_I(inode
)->flags
);