1 // SPDX-License-Identifier: GPL-2.0
3 * linux/fs/hfsplus/inode.c
6 * Brad Boyer (flar@allandria.com)
7 * (C) 2003 Ardis Technologies <roman@ardistech.com>
9 * Inode handling routines
12 #include <linux/blkdev.h>
15 #include <linux/pagemap.h>
16 #include <linux/mpage.h>
17 #include <linux/sched.h>
18 #include <linux/cred.h>
19 #include <linux/uio.h>
20 #include <linux/fileattr.h>
22 #include "hfsplus_fs.h"
23 #include "hfsplus_raw.h"
26 static int hfsplus_read_folio(struct file
*file
, struct folio
*folio
)
28 return block_read_full_folio(folio
, hfsplus_get_block
);
31 static void hfsplus_write_failed(struct address_space
*mapping
, loff_t to
)
33 struct inode
*inode
= mapping
->host
;
35 if (to
> inode
->i_size
) {
36 truncate_pagecache(inode
, inode
->i_size
);
37 hfsplus_file_truncate(inode
);
41 int hfsplus_write_begin(struct file
*file
, struct address_space
*mapping
,
42 loff_t pos
, unsigned len
, struct folio
**foliop
, void **fsdata
)
46 ret
= cont_write_begin(file
, mapping
, pos
, len
, foliop
, fsdata
,
48 &HFSPLUS_I(mapping
->host
)->phys_size
);
50 hfsplus_write_failed(mapping
, pos
+ len
);
55 static sector_t
hfsplus_bmap(struct address_space
*mapping
, sector_t block
)
57 return generic_block_bmap(mapping
, block
, hfsplus_get_block
);
60 static bool hfsplus_release_folio(struct folio
*folio
, gfp_t mask
)
62 struct inode
*inode
= folio
->mapping
->host
;
63 struct super_block
*sb
= inode
->i_sb
;
64 struct hfs_btree
*tree
;
65 struct hfs_bnode
*node
;
70 switch (inode
->i_ino
) {
71 case HFSPLUS_EXT_CNID
:
72 tree
= HFSPLUS_SB(sb
)->ext_tree
;
74 case HFSPLUS_CAT_CNID
:
75 tree
= HFSPLUS_SB(sb
)->cat_tree
;
77 case HFSPLUS_ATTR_CNID
:
78 tree
= HFSPLUS_SB(sb
)->attr_tree
;
86 if (tree
->node_size
>= PAGE_SIZE
) {
87 nidx
= folio
->index
>>
88 (tree
->node_size_shift
- PAGE_SHIFT
);
89 spin_lock(&tree
->hash_lock
);
90 node
= hfs_bnode_findhash(tree
, nidx
);
93 else if (atomic_read(&node
->refcnt
))
96 hfs_bnode_unhash(node
);
99 spin_unlock(&tree
->hash_lock
);
101 nidx
= folio
->index
<<
102 (PAGE_SHIFT
- tree
->node_size_shift
);
103 i
= 1 << (PAGE_SHIFT
- tree
->node_size_shift
);
104 spin_lock(&tree
->hash_lock
);
106 node
= hfs_bnode_findhash(tree
, nidx
++);
109 if (atomic_read(&node
->refcnt
)) {
113 hfs_bnode_unhash(node
);
114 hfs_bnode_free(node
);
115 } while (--i
&& nidx
< tree
->node_count
);
116 spin_unlock(&tree
->hash_lock
);
118 return res
? try_to_free_buffers(folio
) : false;
121 static ssize_t
hfsplus_direct_IO(struct kiocb
*iocb
, struct iov_iter
*iter
)
123 struct file
*file
= iocb
->ki_filp
;
124 struct address_space
*mapping
= file
->f_mapping
;
125 struct inode
*inode
= mapping
->host
;
126 size_t count
= iov_iter_count(iter
);
129 ret
= blockdev_direct_IO(iocb
, inode
, iter
, hfsplus_get_block
);
132 * In case of error extending write may have instantiated a few
133 * blocks outside i_size. Trim these off again.
135 if (unlikely(iov_iter_rw(iter
) == WRITE
&& ret
< 0)) {
136 loff_t isize
= i_size_read(inode
);
137 loff_t end
= iocb
->ki_pos
+ count
;
140 hfsplus_write_failed(mapping
, end
);
146 static int hfsplus_writepages(struct address_space
*mapping
,
147 struct writeback_control
*wbc
)
149 return mpage_writepages(mapping
, wbc
, hfsplus_get_block
);
152 const struct address_space_operations hfsplus_btree_aops
= {
153 .dirty_folio
= block_dirty_folio
,
154 .invalidate_folio
= block_invalidate_folio
,
155 .read_folio
= hfsplus_read_folio
,
156 .writepages
= hfsplus_writepages
,
157 .write_begin
= hfsplus_write_begin
,
158 .write_end
= generic_write_end
,
159 .migrate_folio
= buffer_migrate_folio
,
160 .bmap
= hfsplus_bmap
,
161 .release_folio
= hfsplus_release_folio
,
164 const struct address_space_operations hfsplus_aops
= {
165 .dirty_folio
= block_dirty_folio
,
166 .invalidate_folio
= block_invalidate_folio
,
167 .read_folio
= hfsplus_read_folio
,
168 .write_begin
= hfsplus_write_begin
,
169 .write_end
= generic_write_end
,
170 .bmap
= hfsplus_bmap
,
171 .direct_IO
= hfsplus_direct_IO
,
172 .writepages
= hfsplus_writepages
,
173 .migrate_folio
= buffer_migrate_folio
,
176 const struct dentry_operations hfsplus_dentry_operations
= {
177 .d_hash
= hfsplus_hash_dentry
,
178 .d_compare
= hfsplus_compare_dentry
,
181 static void hfsplus_get_perms(struct inode
*inode
,
182 struct hfsplus_perm
*perms
, int dir
)
184 struct hfsplus_sb_info
*sbi
= HFSPLUS_SB(inode
->i_sb
);
187 mode
= be16_to_cpu(perms
->mode
);
189 i_uid_write(inode
, be32_to_cpu(perms
->owner
));
190 if ((test_bit(HFSPLUS_SB_UID
, &sbi
->flags
)) || (!i_uid_read(inode
) && !mode
))
191 inode
->i_uid
= sbi
->uid
;
193 i_gid_write(inode
, be32_to_cpu(perms
->group
));
194 if ((test_bit(HFSPLUS_SB_GID
, &sbi
->flags
)) || (!i_gid_read(inode
) && !mode
))
195 inode
->i_gid
= sbi
->gid
;
198 mode
= mode
? (mode
& S_IALLUGO
) : (S_IRWXUGO
& ~(sbi
->umask
));
201 mode
= S_IFREG
| ((S_IRUGO
|S_IWUGO
) & ~(sbi
->umask
));
202 inode
->i_mode
= mode
;
204 HFSPLUS_I(inode
)->userflags
= perms
->userflags
;
205 if (perms
->rootflags
& HFSPLUS_FLG_IMMUTABLE
)
206 inode
->i_flags
|= S_IMMUTABLE
;
208 inode
->i_flags
&= ~S_IMMUTABLE
;
209 if (perms
->rootflags
& HFSPLUS_FLG_APPEND
)
210 inode
->i_flags
|= S_APPEND
;
212 inode
->i_flags
&= ~S_APPEND
;
215 static int hfsplus_file_open(struct inode
*inode
, struct file
*file
)
217 if (HFSPLUS_IS_RSRC(inode
))
218 inode
= HFSPLUS_I(inode
)->rsrc_inode
;
219 if (!(file
->f_flags
& O_LARGEFILE
) && i_size_read(inode
) > MAX_NON_LFS
)
221 atomic_inc(&HFSPLUS_I(inode
)->opencnt
);
225 static int hfsplus_file_release(struct inode
*inode
, struct file
*file
)
227 struct super_block
*sb
= inode
->i_sb
;
229 if (HFSPLUS_IS_RSRC(inode
))
230 inode
= HFSPLUS_I(inode
)->rsrc_inode
;
231 if (atomic_dec_and_test(&HFSPLUS_I(inode
)->opencnt
)) {
233 hfsplus_file_truncate(inode
);
234 if (inode
->i_flags
& S_DEAD
) {
235 hfsplus_delete_cat(inode
->i_ino
,
236 HFSPLUS_SB(sb
)->hidden_dir
, NULL
);
237 hfsplus_delete_inode(inode
);
244 static int hfsplus_setattr(struct mnt_idmap
*idmap
,
245 struct dentry
*dentry
, struct iattr
*attr
)
247 struct inode
*inode
= d_inode(dentry
);
250 error
= setattr_prepare(&nop_mnt_idmap
, dentry
, attr
);
254 if ((attr
->ia_valid
& ATTR_SIZE
) &&
255 attr
->ia_size
!= i_size_read(inode
)) {
256 inode_dio_wait(inode
);
257 if (attr
->ia_size
> inode
->i_size
) {
258 error
= generic_cont_expand_simple(inode
,
263 truncate_setsize(inode
, attr
->ia_size
);
264 hfsplus_file_truncate(inode
);
265 inode_set_mtime_to_ts(inode
, inode_set_ctime_current(inode
));
268 setattr_copy(&nop_mnt_idmap
, inode
, attr
);
269 mark_inode_dirty(inode
);
274 int hfsplus_getattr(struct mnt_idmap
*idmap
, const struct path
*path
,
275 struct kstat
*stat
, u32 request_mask
,
276 unsigned int query_flags
)
278 struct inode
*inode
= d_inode(path
->dentry
);
279 struct hfsplus_inode_info
*hip
= HFSPLUS_I(inode
);
281 if (request_mask
& STATX_BTIME
) {
282 stat
->result_mask
|= STATX_BTIME
;
283 stat
->btime
= hfsp_mt2ut(hip
->create_date
);
286 if (inode
->i_flags
& S_APPEND
)
287 stat
->attributes
|= STATX_ATTR_APPEND
;
288 if (inode
->i_flags
& S_IMMUTABLE
)
289 stat
->attributes
|= STATX_ATTR_IMMUTABLE
;
290 if (hip
->userflags
& HFSPLUS_FLG_NODUMP
)
291 stat
->attributes
|= STATX_ATTR_NODUMP
;
293 stat
->attributes_mask
|= STATX_ATTR_APPEND
| STATX_ATTR_IMMUTABLE
|
296 generic_fillattr(&nop_mnt_idmap
, request_mask
, inode
, stat
);
300 int hfsplus_file_fsync(struct file
*file
, loff_t start
, loff_t end
,
303 struct inode
*inode
= file
->f_mapping
->host
;
304 struct hfsplus_inode_info
*hip
= HFSPLUS_I(inode
);
305 struct hfsplus_sb_info
*sbi
= HFSPLUS_SB(inode
->i_sb
);
306 int error
= 0, error2
;
308 error
= file_write_and_wait_range(file
, start
, end
);
314 * Sync inode metadata into the catalog and extent trees.
316 sync_inode_metadata(inode
, 1);
319 * And explicitly write out the btrees.
321 if (test_and_clear_bit(HFSPLUS_I_CAT_DIRTY
, &hip
->flags
))
322 error
= filemap_write_and_wait(sbi
->cat_tree
->inode
->i_mapping
);
324 if (test_and_clear_bit(HFSPLUS_I_EXT_DIRTY
, &hip
->flags
)) {
326 filemap_write_and_wait(sbi
->ext_tree
->inode
->i_mapping
);
331 if (test_and_clear_bit(HFSPLUS_I_ATTR_DIRTY
, &hip
->flags
)) {
332 if (sbi
->attr_tree
) {
334 filemap_write_and_wait(
335 sbi
->attr_tree
->inode
->i_mapping
);
339 pr_err("sync non-existent attributes tree\n");
343 if (test_and_clear_bit(HFSPLUS_I_ALLOC_DIRTY
, &hip
->flags
)) {
344 error2
= filemap_write_and_wait(sbi
->alloc_file
->i_mapping
);
349 if (!test_bit(HFSPLUS_SB_NOBARRIER
, &sbi
->flags
))
350 blkdev_issue_flush(inode
->i_sb
->s_bdev
);
357 static const struct inode_operations hfsplus_file_inode_operations
= {
358 .setattr
= hfsplus_setattr
,
359 .getattr
= hfsplus_getattr
,
360 .listxattr
= hfsplus_listxattr
,
361 .fileattr_get
= hfsplus_fileattr_get
,
362 .fileattr_set
= hfsplus_fileattr_set
,
365 static const struct file_operations hfsplus_file_operations
= {
366 .llseek
= generic_file_llseek
,
367 .read_iter
= generic_file_read_iter
,
368 .write_iter
= generic_file_write_iter
,
369 .mmap
= generic_file_mmap
,
370 .splice_read
= filemap_splice_read
,
371 .fsync
= hfsplus_file_fsync
,
372 .open
= hfsplus_file_open
,
373 .release
= hfsplus_file_release
,
374 .unlocked_ioctl
= hfsplus_ioctl
,
377 struct inode
*hfsplus_new_inode(struct super_block
*sb
, struct inode
*dir
,
380 struct hfsplus_sb_info
*sbi
= HFSPLUS_SB(sb
);
381 struct inode
*inode
= new_inode(sb
);
382 struct hfsplus_inode_info
*hip
;
387 inode
->i_ino
= sbi
->next_cnid
++;
388 inode_init_owner(&nop_mnt_idmap
, inode
, dir
, mode
);
390 simple_inode_init_ts(inode
);
392 hip
= HFSPLUS_I(inode
);
393 INIT_LIST_HEAD(&hip
->open_dir_list
);
394 spin_lock_init(&hip
->open_dir_lock
);
395 mutex_init(&hip
->extents_lock
);
396 atomic_set(&hip
->opencnt
, 0);
397 hip
->extent_state
= 0;
401 memset(hip
->first_extents
, 0, sizeof(hfsplus_extent_rec
));
402 memset(hip
->cached_extents
, 0, sizeof(hfsplus_extent_rec
));
403 hip
->alloc_blocks
= 0;
404 hip
->first_blocks
= 0;
405 hip
->cached_start
= 0;
406 hip
->cached_blocks
= 0;
409 hip
->rsrc_inode
= NULL
;
410 if (S_ISDIR(inode
->i_mode
)) {
413 inode
->i_op
= &hfsplus_dir_inode_operations
;
414 inode
->i_fop
= &hfsplus_dir_operations
;
415 } else if (S_ISREG(inode
->i_mode
)) {
417 inode
->i_op
= &hfsplus_file_inode_operations
;
418 inode
->i_fop
= &hfsplus_file_operations
;
419 inode
->i_mapping
->a_ops
= &hfsplus_aops
;
420 hip
->clump_blocks
= sbi
->data_clump_blocks
;
421 } else if (S_ISLNK(inode
->i_mode
)) {
423 inode
->i_op
= &page_symlink_inode_operations
;
424 inode_nohighmem(inode
);
425 inode
->i_mapping
->a_ops
= &hfsplus_aops
;
426 hip
->clump_blocks
= 1;
429 insert_inode_hash(inode
);
430 mark_inode_dirty(inode
);
431 hfsplus_mark_mdb_dirty(sb
);
436 void hfsplus_delete_inode(struct inode
*inode
)
438 struct super_block
*sb
= inode
->i_sb
;
440 if (S_ISDIR(inode
->i_mode
)) {
441 HFSPLUS_SB(sb
)->folder_count
--;
442 hfsplus_mark_mdb_dirty(sb
);
445 HFSPLUS_SB(sb
)->file_count
--;
446 if (S_ISREG(inode
->i_mode
)) {
447 if (!inode
->i_nlink
) {
449 hfsplus_file_truncate(inode
);
451 } else if (S_ISLNK(inode
->i_mode
)) {
453 hfsplus_file_truncate(inode
);
455 hfsplus_mark_mdb_dirty(sb
);
458 void hfsplus_inode_read_fork(struct inode
*inode
, struct hfsplus_fork_raw
*fork
)
460 struct super_block
*sb
= inode
->i_sb
;
461 struct hfsplus_sb_info
*sbi
= HFSPLUS_SB(sb
);
462 struct hfsplus_inode_info
*hip
= HFSPLUS_I(inode
);
466 memcpy(&hip
->first_extents
, &fork
->extents
, sizeof(hfsplus_extent_rec
));
467 for (count
= 0, i
= 0; i
< 8; i
++)
468 count
+= be32_to_cpu(fork
->extents
[i
].block_count
);
469 hip
->first_blocks
= count
;
470 memset(hip
->cached_extents
, 0, sizeof(hfsplus_extent_rec
));
471 hip
->cached_start
= 0;
472 hip
->cached_blocks
= 0;
474 hip
->alloc_blocks
= be32_to_cpu(fork
->total_blocks
);
475 hip
->phys_size
= inode
->i_size
= be64_to_cpu(fork
->total_size
);
477 (inode
->i_size
+ sb
->s_blocksize
- 1) >> sb
->s_blocksize_bits
;
478 inode_set_bytes(inode
, hip
->fs_blocks
<< sb
->s_blocksize_bits
);
480 be32_to_cpu(fork
->clump_size
) >> sbi
->alloc_blksz_shift
;
481 if (!hip
->clump_blocks
) {
482 hip
->clump_blocks
= HFSPLUS_IS_RSRC(inode
) ?
483 sbi
->rsrc_clump_blocks
:
484 sbi
->data_clump_blocks
;
488 void hfsplus_inode_write_fork(struct inode
*inode
,
489 struct hfsplus_fork_raw
*fork
)
491 memcpy(&fork
->extents
, &HFSPLUS_I(inode
)->first_extents
,
492 sizeof(hfsplus_extent_rec
));
493 fork
->total_size
= cpu_to_be64(inode
->i_size
);
494 fork
->total_blocks
= cpu_to_be32(HFSPLUS_I(inode
)->alloc_blocks
);
497 int hfsplus_cat_read_inode(struct inode
*inode
, struct hfs_find_data
*fd
)
499 hfsplus_cat_entry entry
;
503 type
= hfs_bnode_read_u16(fd
->bnode
, fd
->entryoffset
);
505 HFSPLUS_I(inode
)->linkid
= 0;
506 if (type
== HFSPLUS_FOLDER
) {
507 struct hfsplus_cat_folder
*folder
= &entry
.folder
;
509 if (fd
->entrylength
< sizeof(struct hfsplus_cat_folder
)) {
510 pr_err("bad catalog folder entry\n");
514 hfs_bnode_read(fd
->bnode
, &entry
, fd
->entryoffset
,
515 sizeof(struct hfsplus_cat_folder
));
516 hfsplus_get_perms(inode
, &folder
->permissions
, 1);
518 inode
->i_size
= 2 + be32_to_cpu(folder
->valence
);
519 inode_set_atime_to_ts(inode
, hfsp_mt2ut(folder
->access_date
));
520 inode_set_mtime_to_ts(inode
,
521 hfsp_mt2ut(folder
->content_mod_date
));
522 inode_set_ctime_to_ts(inode
,
523 hfsp_mt2ut(folder
->attribute_mod_date
));
524 HFSPLUS_I(inode
)->create_date
= folder
->create_date
;
525 HFSPLUS_I(inode
)->fs_blocks
= 0;
526 if (folder
->flags
& cpu_to_be16(HFSPLUS_HAS_FOLDER_COUNT
)) {
527 HFSPLUS_I(inode
)->subfolders
=
528 be32_to_cpu(folder
->subfolders
);
530 inode
->i_op
= &hfsplus_dir_inode_operations
;
531 inode
->i_fop
= &hfsplus_dir_operations
;
532 } else if (type
== HFSPLUS_FILE
) {
533 struct hfsplus_cat_file
*file
= &entry
.file
;
535 if (fd
->entrylength
< sizeof(struct hfsplus_cat_file
)) {
536 pr_err("bad catalog file entry\n");
540 hfs_bnode_read(fd
->bnode
, &entry
, fd
->entryoffset
,
541 sizeof(struct hfsplus_cat_file
));
543 hfsplus_inode_read_fork(inode
, HFSPLUS_IS_RSRC(inode
) ?
544 &file
->rsrc_fork
: &file
->data_fork
);
545 hfsplus_get_perms(inode
, &file
->permissions
, 0);
547 if (S_ISREG(inode
->i_mode
)) {
548 if (file
->permissions
.dev
)
550 be32_to_cpu(file
->permissions
.dev
));
551 inode
->i_op
= &hfsplus_file_inode_operations
;
552 inode
->i_fop
= &hfsplus_file_operations
;
553 inode
->i_mapping
->a_ops
= &hfsplus_aops
;
554 } else if (S_ISLNK(inode
->i_mode
)) {
555 inode
->i_op
= &page_symlink_inode_operations
;
556 inode_nohighmem(inode
);
557 inode
->i_mapping
->a_ops
= &hfsplus_aops
;
559 init_special_inode(inode
, inode
->i_mode
,
560 be32_to_cpu(file
->permissions
.dev
));
562 inode_set_atime_to_ts(inode
, hfsp_mt2ut(file
->access_date
));
563 inode_set_mtime_to_ts(inode
,
564 hfsp_mt2ut(file
->content_mod_date
));
565 inode_set_ctime_to_ts(inode
,
566 hfsp_mt2ut(file
->attribute_mod_date
));
567 HFSPLUS_I(inode
)->create_date
= file
->create_date
;
569 pr_err("bad catalog entry used to create inode\n");
576 int hfsplus_cat_write_inode(struct inode
*inode
)
578 struct inode
*main_inode
= inode
;
579 struct hfs_find_data fd
;
580 hfsplus_cat_entry entry
;
583 if (HFSPLUS_IS_RSRC(inode
))
584 main_inode
= HFSPLUS_I(inode
)->rsrc_inode
;
586 if (!main_inode
->i_nlink
)
589 if (hfs_find_init(HFSPLUS_SB(main_inode
->i_sb
)->cat_tree
, &fd
))
593 if (hfsplus_find_cat(main_inode
->i_sb
, main_inode
->i_ino
, &fd
))
597 if (S_ISDIR(main_inode
->i_mode
)) {
598 struct hfsplus_cat_folder
*folder
= &entry
.folder
;
600 if (fd
.entrylength
< sizeof(struct hfsplus_cat_folder
)) {
601 pr_err("bad catalog folder entry\n");
605 hfs_bnode_read(fd
.bnode
, &entry
, fd
.entryoffset
,
606 sizeof(struct hfsplus_cat_folder
));
607 /* simple node checks? */
608 hfsplus_cat_set_perms(inode
, &folder
->permissions
);
609 folder
->access_date
= hfsp_ut2mt(inode_get_atime(inode
));
610 folder
->content_mod_date
= hfsp_ut2mt(inode_get_mtime(inode
));
611 folder
->attribute_mod_date
= hfsp_ut2mt(inode_get_ctime(inode
));
612 folder
->valence
= cpu_to_be32(inode
->i_size
- 2);
613 if (folder
->flags
& cpu_to_be16(HFSPLUS_HAS_FOLDER_COUNT
)) {
615 cpu_to_be32(HFSPLUS_I(inode
)->subfolders
);
617 hfs_bnode_write(fd
.bnode
, &entry
, fd
.entryoffset
,
618 sizeof(struct hfsplus_cat_folder
));
619 } else if (HFSPLUS_IS_RSRC(inode
)) {
620 struct hfsplus_cat_file
*file
= &entry
.file
;
621 hfs_bnode_read(fd
.bnode
, &entry
, fd
.entryoffset
,
622 sizeof(struct hfsplus_cat_file
));
623 hfsplus_inode_write_fork(inode
, &file
->rsrc_fork
);
624 hfs_bnode_write(fd
.bnode
, &entry
, fd
.entryoffset
,
625 sizeof(struct hfsplus_cat_file
));
627 struct hfsplus_cat_file
*file
= &entry
.file
;
629 if (fd
.entrylength
< sizeof(struct hfsplus_cat_file
)) {
630 pr_err("bad catalog file entry\n");
634 hfs_bnode_read(fd
.bnode
, &entry
, fd
.entryoffset
,
635 sizeof(struct hfsplus_cat_file
));
636 hfsplus_inode_write_fork(inode
, &file
->data_fork
);
637 hfsplus_cat_set_perms(inode
, &file
->permissions
);
638 if (HFSPLUS_FLG_IMMUTABLE
&
639 (file
->permissions
.rootflags
|
640 file
->permissions
.userflags
))
641 file
->flags
|= cpu_to_be16(HFSPLUS_FILE_LOCKED
);
643 file
->flags
&= cpu_to_be16(~HFSPLUS_FILE_LOCKED
);
644 file
->access_date
= hfsp_ut2mt(inode_get_atime(inode
));
645 file
->content_mod_date
= hfsp_ut2mt(inode_get_mtime(inode
));
646 file
->attribute_mod_date
= hfsp_ut2mt(inode_get_ctime(inode
));
647 hfs_bnode_write(fd
.bnode
, &entry
, fd
.entryoffset
,
648 sizeof(struct hfsplus_cat_file
));
651 set_bit(HFSPLUS_I_CAT_DIRTY
, &HFSPLUS_I(inode
)->flags
);
657 int hfsplus_fileattr_get(struct dentry
*dentry
, struct fileattr
*fa
)
659 struct inode
*inode
= d_inode(dentry
);
660 struct hfsplus_inode_info
*hip
= HFSPLUS_I(inode
);
661 unsigned int flags
= 0;
663 if (inode
->i_flags
& S_IMMUTABLE
)
664 flags
|= FS_IMMUTABLE_FL
;
665 if (inode
->i_flags
& S_APPEND
)
666 flags
|= FS_APPEND_FL
;
667 if (hip
->userflags
& HFSPLUS_FLG_NODUMP
)
668 flags
|= FS_NODUMP_FL
;
670 fileattr_fill_flags(fa
, flags
);
675 int hfsplus_fileattr_set(struct mnt_idmap
*idmap
,
676 struct dentry
*dentry
, struct fileattr
*fa
)
678 struct inode
*inode
= d_inode(dentry
);
679 struct hfsplus_inode_info
*hip
= HFSPLUS_I(inode
);
680 unsigned int new_fl
= 0;
682 if (fileattr_has_fsx(fa
))
685 /* don't silently ignore unsupported ext2 flags */
686 if (fa
->flags
& ~(FS_IMMUTABLE_FL
|FS_APPEND_FL
|FS_NODUMP_FL
))
689 if (fa
->flags
& FS_IMMUTABLE_FL
)
690 new_fl
|= S_IMMUTABLE
;
692 if (fa
->flags
& FS_APPEND_FL
)
695 inode_set_flags(inode
, new_fl
, S_IMMUTABLE
| S_APPEND
);
697 if (fa
->flags
& FS_NODUMP_FL
)
698 hip
->userflags
|= HFSPLUS_FLG_NODUMP
;
700 hip
->userflags
&= ~HFSPLUS_FLG_NODUMP
;
702 inode_set_ctime_current(inode
);
703 mark_inode_dirty(inode
);