1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
6 * Regular file handling primitives for NTFS-based filesystems.
10 #include <linux/backing-dev.h>
11 #include <linux/blkdev.h>
12 #include <linux/buffer_head.h>
13 #include <linux/compat.h>
14 #include <linux/falloc.h>
15 #include <linux/fiemap.h>
16 #include <linux/fileattr.h>
22 static int ntfs_ioctl_fitrim(struct ntfs_sb_info
*sbi
, unsigned long arg
)
24 struct fstrim_range __user
*user_range
;
25 struct fstrim_range range
;
26 struct block_device
*dev
;
29 if (!capable(CAP_SYS_ADMIN
))
32 dev
= sbi
->sb
->s_bdev
;
33 if (!bdev_max_discard_sectors(dev
))
36 user_range
= (struct fstrim_range __user
*)arg
;
37 if (copy_from_user(&range
, user_range
, sizeof(range
)))
40 range
.minlen
= max_t(u32
, range
.minlen
, bdev_discard_granularity(dev
));
42 err
= ntfs_trim_fs(sbi
, &range
);
46 if (copy_to_user(user_range
, &range
, sizeof(range
)))
53 * ntfs_fileattr_get - inode_operations::fileattr_get
55 int ntfs_fileattr_get(struct dentry
*dentry
, struct fileattr
*fa
)
57 struct inode
*inode
= d_inode(dentry
);
58 struct ntfs_inode
*ni
= ntfs_i(inode
);
61 if (inode
->i_flags
& S_IMMUTABLE
)
62 flags
|= FS_IMMUTABLE_FL
;
64 if (inode
->i_flags
& S_APPEND
)
65 flags
|= FS_APPEND_FL
;
67 if (is_compressed(ni
))
71 flags
|= FS_ENCRYPT_FL
;
73 fileattr_fill_flags(fa
, flags
);
79 * ntfs_fileattr_set - inode_operations::fileattr_set
81 int ntfs_fileattr_set(struct mnt_idmap
*idmap
, struct dentry
*dentry
,
84 struct inode
*inode
= d_inode(dentry
);
85 struct ntfs_inode
*ni
= ntfs_i(inode
);
86 u32 flags
= fa
->flags
;
87 unsigned int new_fl
= 0;
89 if (fileattr_has_fsx(fa
))
92 if (flags
& ~(FS_IMMUTABLE_FL
| FS_APPEND_FL
| FS_COMPR_FL
))
95 if (flags
& FS_IMMUTABLE_FL
)
96 new_fl
|= S_IMMUTABLE
;
98 if (flags
& FS_APPEND_FL
)
101 /* Allowed to change compression for empty files and for directories only. */
102 if (!is_dedup(ni
) && !is_encrypted(ni
) &&
103 (S_ISREG(inode
->i_mode
) || S_ISDIR(inode
->i_mode
))) {
104 /* Change compress state. */
105 int err
= ni_set_compress(inode
, flags
& FS_COMPR_FL
);
110 inode_set_flags(inode
, new_fl
, S_IMMUTABLE
| S_APPEND
);
112 inode_set_ctime_current(inode
);
113 mark_inode_dirty(inode
);
119 * ntfs_ioctl - file_operations::unlocked_ioctl
121 long ntfs_ioctl(struct file
*filp
, u32 cmd
, unsigned long arg
)
123 struct inode
*inode
= file_inode(filp
);
124 struct ntfs_sb_info
*sbi
= inode
->i_sb
->s_fs_info
;
128 return ntfs_ioctl_fitrim(sbi
, arg
);
130 return -ENOTTY
; /* Inappropriate ioctl for device. */
134 long ntfs_compat_ioctl(struct file
*filp
, u32 cmd
, unsigned long arg
)
137 return ntfs_ioctl(filp
, cmd
, (unsigned long)compat_ptr(arg
));
142 * ntfs_getattr - inode_operations::getattr
144 int ntfs_getattr(struct mnt_idmap
*idmap
, const struct path
*path
,
145 struct kstat
*stat
, u32 request_mask
, u32 flags
)
147 struct inode
*inode
= d_inode(path
->dentry
);
148 struct ntfs_inode
*ni
= ntfs_i(inode
);
150 stat
->result_mask
|= STATX_BTIME
;
151 stat
->btime
= ni
->i_crtime
;
152 stat
->blksize
= ni
->mi
.sbi
->cluster_size
; /* 512, 1K, ..., 2M */
154 if (inode
->i_flags
& S_IMMUTABLE
)
155 stat
->attributes
|= STATX_ATTR_IMMUTABLE
;
157 if (inode
->i_flags
& S_APPEND
)
158 stat
->attributes
|= STATX_ATTR_APPEND
;
160 if (is_compressed(ni
))
161 stat
->attributes
|= STATX_ATTR_COMPRESSED
;
163 if (is_encrypted(ni
))
164 stat
->attributes
|= STATX_ATTR_ENCRYPTED
;
166 stat
->attributes_mask
|= STATX_ATTR_COMPRESSED
| STATX_ATTR_ENCRYPTED
|
167 STATX_ATTR_IMMUTABLE
| STATX_ATTR_APPEND
;
169 generic_fillattr(idmap
, request_mask
, inode
, stat
);
174 static int ntfs_extend_initialized_size(struct file
*file
,
175 struct ntfs_inode
*ni
,
177 const loff_t new_valid
)
179 struct inode
*inode
= &ni
->vfs_inode
;
180 struct address_space
*mapping
= inode
->i_mapping
;
181 struct ntfs_sb_info
*sbi
= inode
->i_sb
->s_fs_info
;
185 if (valid
>= new_valid
)
188 if (is_resident(ni
)) {
189 ni
->i_valid
= new_valid
;
193 WARN_ON(is_compressed(ni
));
201 if (is_sparsed(ni
)) {
202 bits
= sbi
->cluster_bits
;
205 err
= attr_data_get_block(ni
, vcn
, 1, &lcn
, &clen
, NULL
,
210 if (lcn
== SPARSE_LCN
) {
211 pos
= ((loff_t
)clen
+ vcn
) << bits
;
217 zerofrom
= pos
& (PAGE_SIZE
- 1);
218 len
= PAGE_SIZE
- zerofrom
;
220 if (pos
+ len
> new_valid
)
221 len
= new_valid
- pos
;
223 err
= ntfs_write_begin(file
, mapping
, pos
, len
, &folio
, NULL
);
227 folio_zero_range(folio
, zerofrom
, folio_size(folio
) - zerofrom
);
229 err
= ntfs_write_end(file
, mapping
, pos
, len
, len
, folio
, NULL
);
235 if (pos
>= new_valid
)
238 balance_dirty_pages_ratelimited(mapping
);
246 ntfs_inode_warn(inode
, "failed to extend initialized size to %llx.",
252 * ntfs_zero_range - Helper function for punch_hole.
254 * It zeroes a range [vbo, vbo_to).
256 static int ntfs_zero_range(struct inode
*inode
, u64 vbo
, u64 vbo_to
)
259 struct address_space
*mapping
= inode
->i_mapping
;
260 u32 blocksize
= i_blocksize(inode
);
261 pgoff_t idx
= vbo
>> PAGE_SHIFT
;
262 u32 from
= vbo
& (PAGE_SIZE
- 1);
263 pgoff_t idx_end
= (vbo_to
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
265 struct buffer_head
*head
, *bh
;
266 u32 bh_next
, bh_off
, to
;
271 for (; idx
< idx_end
; idx
+= 1, from
= 0) {
272 page_off
= (loff_t
)idx
<< PAGE_SHIFT
;
273 to
= (page_off
+ PAGE_SIZE
) > vbo_to
? (vbo_to
- page_off
) :
275 iblock
= page_off
>> inode
->i_blkbits
;
277 folio
= __filemap_get_folio(
278 mapping
, idx
, FGP_LOCK
| FGP_ACCESSED
| FGP_CREAT
,
279 mapping_gfp_constraint(mapping
, ~__GFP_FS
));
281 return PTR_ERR(folio
);
283 head
= folio_buffers(folio
);
285 head
= create_empty_buffers(folio
, blocksize
, 0);
290 bh_next
= bh_off
+ blocksize
;
292 if (bh_next
<= from
|| bh_off
>= to
)
295 if (!buffer_mapped(bh
)) {
296 ntfs_get_block(inode
, iblock
, bh
, 0);
297 /* Unmapped? It's a hole - nothing to do. */
298 if (!buffer_mapped(bh
))
302 /* Ok, it's mapped. Make sure it's up-to-date. */
303 if (folio_test_uptodate(folio
))
304 set_buffer_uptodate(bh
);
305 else if (bh_read(bh
, 0) < 0) {
312 mark_buffer_dirty(bh
);
313 } while (bh_off
= bh_next
, iblock
+= 1,
314 head
!= (bh
= bh
->b_this_page
));
316 folio_zero_segment(folio
, from
, to
);
325 mark_inode_dirty(inode
);
330 * ntfs_file_mmap - file_operations::mmap
332 static int ntfs_file_mmap(struct file
*file
, struct vm_area_struct
*vma
)
334 struct inode
*inode
= file_inode(file
);
335 struct ntfs_inode
*ni
= ntfs_i(inode
);
336 u64 from
= ((u64
)vma
->vm_pgoff
<< PAGE_SHIFT
);
337 bool rw
= vma
->vm_flags
& VM_WRITE
;
340 if (unlikely(ntfs3_forced_shutdown(inode
->i_sb
)))
343 if (is_encrypted(ni
)) {
344 ntfs_inode_warn(inode
, "mmap encrypted not supported");
349 ntfs_inode_warn(inode
, "mmap deduplicated not supported");
353 if (is_compressed(ni
) && rw
) {
354 ntfs_inode_warn(inode
, "mmap(write) compressed not supported");
359 u64 to
= min_t(loff_t
, i_size_read(inode
),
360 from
+ vma
->vm_end
- vma
->vm_start
);
362 if (is_sparsed(ni
)) {
363 /* Allocate clusters for rw map. */
364 struct ntfs_sb_info
*sbi
= inode
->i_sb
->s_fs_info
;
366 CLST vcn
= from
>> sbi
->cluster_bits
;
367 CLST end
= bytes_to_cluster(sbi
, to
);
370 for (; vcn
< end
; vcn
+= len
) {
371 err
= attr_data_get_block(ni
, vcn
, 1, &lcn
,
378 if (ni
->i_valid
< to
) {
380 err
= ntfs_extend_initialized_size(file
, ni
,
388 err
= generic_file_mmap(file
, vma
);
393 static int ntfs_extend(struct inode
*inode
, loff_t pos
, size_t count
,
396 struct ntfs_inode
*ni
= ntfs_i(inode
);
397 struct address_space
*mapping
= inode
->i_mapping
;
398 loff_t end
= pos
+ count
;
399 bool extend_init
= file
&& pos
> ni
->i_valid
;
402 if (end
<= inode
->i_size
&& !extend_init
)
405 /* Mark rw ntfs as dirty. It will be cleared at umount. */
406 ntfs_set_state(ni
->mi
.sbi
, NTFS_DIRTY_DIRTY
);
408 if (end
> inode
->i_size
) {
409 err
= ntfs_set_size(inode
, end
);
414 if (extend_init
&& !is_compressed(ni
)) {
415 err
= ntfs_extend_initialized_size(file
, ni
, ni
->i_valid
, pos
);
422 if (file
&& is_sparsed(ni
)) {
424 * This code optimizes large writes to sparse file.
425 * TODO: merge this fragment with fallocate fragment.
427 struct ntfs_sb_info
*sbi
= ni
->mi
.sbi
;
428 CLST vcn
= pos
>> sbi
->cluster_bits
;
429 CLST cend
= bytes_to_cluster(sbi
, end
);
430 CLST cend_v
= bytes_to_cluster(sbi
, ni
->i_valid
);
438 * Allocate and zero new clusters.
439 * Zeroing these clusters may be too long.
441 for (; vcn
< cend_v
; vcn
+= clen
) {
442 err
= attr_data_get_block(ni
, vcn
, cend_v
- vcn
, &lcn
,
448 * Allocate but not zero new clusters.
450 for (; vcn
< cend
; vcn
+= clen
) {
451 err
= attr_data_get_block(ni
, vcn
, cend
- vcn
, &lcn
,
458 inode_set_mtime_to_ts(inode
, inode_set_ctime_current(inode
));
459 mark_inode_dirty(inode
);
461 if (IS_SYNC(inode
)) {
464 err
= filemap_fdatawrite_range(mapping
, pos
, end
- 1);
465 err2
= sync_mapping_buffers(mapping
);
468 err2
= write_inode_now(inode
, 1);
472 err
= filemap_fdatawait_range(mapping
, pos
, end
- 1);
479 static int ntfs_truncate(struct inode
*inode
, loff_t new_size
)
481 struct super_block
*sb
= inode
->i_sb
;
482 struct ntfs_inode
*ni
= ntfs_i(inode
);
486 if (!S_ISREG(inode
->i_mode
))
489 if (is_compressed(ni
)) {
490 if (ni
->i_valid
> new_size
)
491 ni
->i_valid
= new_size
;
493 err
= block_truncate_page(inode
->i_mapping
, new_size
,
499 new_valid
= ntfs_up_block(sb
, min_t(u64
, ni
->i_valid
, new_size
));
501 truncate_setsize(inode
, new_size
);
505 down_write(&ni
->file
.run_lock
);
506 err
= attr_set_size(ni
, ATTR_DATA
, NULL
, 0, &ni
->file
.run
, new_size
,
507 &new_valid
, ni
->mi
.sbi
->options
->prealloc
, NULL
);
508 up_write(&ni
->file
.run_lock
);
510 if (new_valid
< ni
->i_valid
)
511 ni
->i_valid
= new_valid
;
515 ni
->std_fa
|= FILE_ATTRIBUTE_ARCHIVE
;
516 inode_set_mtime_to_ts(inode
, inode_set_ctime_current(inode
));
517 if (!IS_DIRSYNC(inode
)) {
520 err
= ntfs_sync_inode(inode
);
526 mark_inode_dirty(inode
);
528 /*ntfs_flush_inodes(inode->i_sb, inode, NULL);*/
534 * ntfs_fallocate - file_operations::ntfs_fallocate
536 * Preallocate space for a file. This implements ntfs's fallocate file
537 * operation, which gets called from sys_fallocate system call. User
538 * space requests 'len' bytes at 'vbo'. If FALLOC_FL_KEEP_SIZE is set
539 * we just allocate clusters without zeroing them out. Otherwise we
540 * allocate and zero out clusters via an expanding truncate.
542 static long ntfs_fallocate(struct file
*file
, int mode
, loff_t vbo
, loff_t len
)
544 struct inode
*inode
= file_inode(file
);
545 struct address_space
*mapping
= inode
->i_mapping
;
546 struct super_block
*sb
= inode
->i_sb
;
547 struct ntfs_sb_info
*sbi
= sb
->s_fs_info
;
548 struct ntfs_inode
*ni
= ntfs_i(inode
);
549 loff_t end
= vbo
+ len
;
550 loff_t vbo_down
= round_down(vbo
, max_t(unsigned long,
551 sbi
->cluster_size
, PAGE_SIZE
));
552 bool is_supported_holes
= is_sparsed(ni
) || is_compressed(ni
);
553 loff_t i_size
, new_size
;
557 /* No support for dir. */
558 if (!S_ISREG(inode
->i_mode
))
562 * vfs_fallocate checks all possible combinations of mode.
563 * Do additional checks here before ntfs_set_state(dirty).
565 if (mode
& FALLOC_FL_PUNCH_HOLE
) {
566 if (!is_supported_holes
)
568 } else if (mode
& FALLOC_FL_COLLAPSE_RANGE
) {
569 } else if (mode
& FALLOC_FL_INSERT_RANGE
) {
570 if (!is_supported_holes
)
573 ~(FALLOC_FL_KEEP_SIZE
| FALLOC_FL_PUNCH_HOLE
|
574 FALLOC_FL_COLLAPSE_RANGE
| FALLOC_FL_INSERT_RANGE
)) {
575 ntfs_inode_warn(inode
, "fallocate(0x%x) is not supported",
580 ntfs_set_state(sbi
, NTFS_DIRTY_DIRTY
);
583 i_size
= inode
->i_size
;
584 new_size
= max(end
, i_size
);
587 if (WARN_ON(ni
->ni_flags
& NI_FLAG_COMPRESSED_MASK
)) {
588 /* Should never be here, see ntfs_file_open. */
593 if (mode
& (FALLOC_FL_PUNCH_HOLE
| FALLOC_FL_COLLAPSE_RANGE
|
594 FALLOC_FL_INSERT_RANGE
)) {
595 inode_dio_wait(inode
);
596 filemap_invalidate_lock(mapping
);
600 if (mode
& FALLOC_FL_PUNCH_HOLE
) {
602 loff_t mask
, vbo_a
, end_a
, tmp
;
604 err
= filemap_write_and_wait_range(mapping
, vbo_down
,
609 truncate_pagecache(inode
, vbo_down
);
612 err
= attr_punch_hole(ni
, vbo
, len
, &frame_size
);
617 if (err
!= E_NTFS_NOTALIGNED
)
620 /* Process not aligned punch. */
622 mask
= frame_size
- 1;
623 vbo_a
= (vbo
+ mask
) & ~mask
;
626 tmp
= min(vbo_a
, end
);
628 err
= ntfs_zero_range(inode
, vbo
, tmp
);
633 if (vbo
< end_a
&& end_a
< end
) {
634 err
= ntfs_zero_range(inode
, end_a
, end
);
639 /* Aligned punch_hole */
642 err
= attr_punch_hole(ni
, vbo_a
, end_a
- vbo_a
, NULL
);
647 } else if (mode
& FALLOC_FL_COLLAPSE_RANGE
) {
649 * Write tail of the last page before removed range since
650 * it will get removed from the page cache below.
652 err
= filemap_write_and_wait_range(mapping
, vbo_down
, vbo
);
657 * Write data that will be shifted to preserve them
658 * when discarding page cache below.
660 err
= filemap_write_and_wait_range(mapping
, end
, LLONG_MAX
);
664 truncate_pagecache(inode
, vbo_down
);
667 err
= attr_collapse_range(ni
, vbo
, len
);
671 } else if (mode
& FALLOC_FL_INSERT_RANGE
) {
672 /* Check new size. */
673 err
= inode_newsize_ok(inode
, new_size
);
677 /* Write out all dirty pages. */
678 err
= filemap_write_and_wait_range(mapping
, vbo_down
,
682 truncate_pagecache(inode
, vbo_down
);
685 err
= attr_insert_range(ni
, vbo
, len
);
690 /* Check new size. */
691 u8 cluster_bits
= sbi
->cluster_bits
;
693 /* Be sure file is non resident. */
694 if (is_resident(ni
)) {
696 err
= attr_force_nonresident(ni
);
702 /* generic/213: expected -ENOSPC instead of -EFBIG. */
703 if (!is_supported_holes
) {
704 loff_t to_alloc
= new_size
- inode_get_bytes(inode
);
707 (to_alloc
>> cluster_bits
) >
708 wnd_zeroes(&sbi
->used
.bitmap
)) {
714 err
= inode_newsize_ok(inode
, new_size
);
718 if (new_size
> i_size
) {
720 * Allocate clusters, do not change 'valid' size.
722 err
= ntfs_set_size(inode
, new_size
);
727 if (is_supported_holes
) {
728 CLST vcn
= vbo
>> cluster_bits
;
729 CLST cend
= bytes_to_cluster(sbi
, end
);
730 CLST cend_v
= bytes_to_cluster(sbi
, ni
->i_valid
);
738 * Allocate and zero new clusters.
739 * Zeroing these clusters may be too long.
741 for (; vcn
< cend_v
; vcn
+= clen
) {
742 err
= attr_data_get_block(ni
, vcn
, cend_v
- vcn
,
749 * Allocate but not zero new clusters.
751 for (; vcn
< cend
; vcn
+= clen
) {
752 err
= attr_data_get_block(ni
, vcn
, cend
- vcn
,
760 if (mode
& FALLOC_FL_KEEP_SIZE
) {
762 /* True - Keep preallocated. */
763 err
= attr_set_size(ni
, ATTR_DATA
, NULL
, 0,
764 &ni
->file
.run
, i_size
, &ni
->i_valid
,
769 } else if (new_size
> i_size
) {
770 i_size_write(inode
, new_size
);
775 err
= file_modified(file
);
781 filemap_invalidate_unlock(mapping
);
784 inode_set_mtime_to_ts(inode
, inode_set_ctime_current(inode
));
785 mark_inode_dirty(inode
);
793 * ntfs_setattr - inode_operations::setattr
795 int ntfs_setattr(struct mnt_idmap
*idmap
, struct dentry
*dentry
,
798 struct inode
*inode
= d_inode(dentry
);
799 struct ntfs_inode
*ni
= ntfs_i(inode
);
800 u32 ia_valid
= attr
->ia_valid
;
801 umode_t mode
= inode
->i_mode
;
804 if (unlikely(ntfs3_forced_shutdown(inode
->i_sb
)))
807 err
= setattr_prepare(idmap
, dentry
, attr
);
811 if (ia_valid
& ATTR_SIZE
) {
812 loff_t newsize
, oldsize
;
814 if (WARN_ON(ni
->ni_flags
& NI_FLAG_COMPRESSED_MASK
)) {
815 /* Should never be here, see ntfs_file_open(). */
819 inode_dio_wait(inode
);
820 oldsize
= i_size_read(inode
);
821 newsize
= attr
->ia_size
;
823 if (newsize
<= oldsize
)
824 err
= ntfs_truncate(inode
, newsize
);
826 err
= ntfs_extend(inode
, newsize
, 0, NULL
);
831 ni
->ni_flags
|= NI_FLAG_UPDATE_PARENT
;
832 i_size_write(inode
, newsize
);
835 setattr_copy(idmap
, inode
, attr
);
837 if (mode
!= inode
->i_mode
) {
838 err
= ntfs_acl_chmod(idmap
, dentry
);
842 /* Linux 'w' -> Windows 'ro'. */
843 if (0222 & inode
->i_mode
)
844 ni
->std_fa
&= ~FILE_ATTRIBUTE_READONLY
;
846 ni
->std_fa
|= FILE_ATTRIBUTE_READONLY
;
849 if (ia_valid
& (ATTR_UID
| ATTR_GID
| ATTR_MODE
))
850 ntfs_save_wsl_perm(inode
, NULL
);
851 mark_inode_dirty(inode
);
857 * check_read_restriction:
858 * common code for ntfs_file_read_iter and ntfs_file_splice_read
860 static int check_read_restriction(struct inode
*inode
)
862 struct ntfs_inode
*ni
= ntfs_i(inode
);
864 if (unlikely(ntfs3_forced_shutdown(inode
->i_sb
)))
867 if (is_encrypted(ni
)) {
868 ntfs_inode_warn(inode
, "encrypted i/o not supported");
872 #ifndef CONFIG_NTFS3_LZX_XPRESS
873 if (ni
->ni_flags
& NI_FLAG_COMPRESSED_MASK
) {
876 "activate CONFIG_NTFS3_LZX_XPRESS to read external compressed files");
882 ntfs_inode_warn(inode
, "read deduplicated not supported");
890 * ntfs_file_read_iter - file_operations::read_iter
892 static ssize_t
ntfs_file_read_iter(struct kiocb
*iocb
, struct iov_iter
*iter
)
894 struct file
*file
= iocb
->ki_filp
;
895 struct inode
*inode
= file_inode(file
);
896 struct ntfs_inode
*ni
= ntfs_i(inode
);
899 err
= check_read_restriction(inode
);
903 if (is_compressed(ni
) && (iocb
->ki_flags
& IOCB_DIRECT
)) {
904 ntfs_inode_warn(inode
, "direct i/o + compressed not supported");
908 return generic_file_read_iter(iocb
, iter
);
912 * ntfs_file_splice_read - file_operations::splice_read
914 static ssize_t
ntfs_file_splice_read(struct file
*in
, loff_t
*ppos
,
915 struct pipe_inode_info
*pipe
, size_t len
,
918 struct inode
*inode
= file_inode(in
);
921 err
= check_read_restriction(inode
);
925 return filemap_splice_read(in
, ppos
, pipe
, len
, flags
);
929 * ntfs_get_frame_pages
931 * Return: Array of locked pages.
933 static int ntfs_get_frame_pages(struct address_space
*mapping
, pgoff_t index
,
934 struct page
**pages
, u32 pages_per_frame
,
935 bool *frame_uptodate
)
937 gfp_t gfp_mask
= mapping_gfp_mask(mapping
);
940 *frame_uptodate
= true;
942 for (npages
= 0; npages
< pages_per_frame
; npages
++, index
++) {
945 folio
= __filemap_get_folio(mapping
, index
,
946 FGP_LOCK
| FGP_ACCESSED
| FGP_CREAT
,
950 folio
= page_folio(pages
[npages
]);
958 if (!folio_test_uptodate(folio
))
959 *frame_uptodate
= false;
961 pages
[npages
] = &folio
->page
;
968 * ntfs_compress_write - Helper for ntfs_file_write_iter() (compressed files).
970 static ssize_t
ntfs_compress_write(struct kiocb
*iocb
, struct iov_iter
*from
)
973 struct file
*file
= iocb
->ki_filp
;
974 size_t count
= iov_iter_count(from
);
975 loff_t pos
= iocb
->ki_pos
;
976 struct inode
*inode
= file_inode(file
);
977 loff_t i_size
= i_size_read(inode
);
978 struct address_space
*mapping
= inode
->i_mapping
;
979 struct ntfs_inode
*ni
= ntfs_i(inode
);
980 u64 valid
= ni
->i_valid
;
981 struct ntfs_sb_info
*sbi
= ni
->mi
.sbi
;
982 struct page
*page
, **pages
= NULL
;
984 u8 frame_bits
= NTFS_LZNT_CUNIT
+ sbi
->cluster_bits
;
985 u32 frame_size
= 1u << frame_bits
;
986 u32 pages_per_frame
= frame_size
>> PAGE_SHIFT
;
994 if (frame_size
< PAGE_SIZE
) {
996 * frame_size == 8K if cluster 512
997 * frame_size == 64K if cluster 4096
999 ntfs_inode_warn(inode
, "page size is bigger than frame size");
1003 pages
= kmalloc_array(pages_per_frame
, sizeof(struct page
*), GFP_NOFS
);
1007 err
= file_remove_privs(file
);
1011 err
= file_update_time(file
);
1015 /* Zero range [valid : pos). */
1016 while (valid
< pos
) {
1019 frame
= valid
>> frame_bits
;
1020 frame_vbo
= valid
& ~(frame_size
- 1);
1021 off
= valid
& (frame_size
- 1);
1023 err
= attr_data_get_block(ni
, frame
<< NTFS_LZNT_CUNIT
, 1, &lcn
,
1024 &clen
, NULL
, false);
1028 if (lcn
== SPARSE_LCN
) {
1029 ni
->i_valid
= valid
=
1030 frame_vbo
+ ((u64
)clen
<< sbi
->cluster_bits
);
1034 /* Load full frame. */
1035 err
= ntfs_get_frame_pages(mapping
, frame_vbo
>> PAGE_SHIFT
,
1036 pages
, pages_per_frame
,
1041 if (!frame_uptodate
&& off
) {
1042 err
= ni_read_frame(ni
, frame_vbo
, pages
,
1045 for (ip
= 0; ip
< pages_per_frame
; ip
++) {
1047 folio
= page_folio(page
);
1048 folio_unlock(folio
);
1055 ip
= off
>> PAGE_SHIFT
;
1056 off
= offset_in_page(valid
);
1057 for (; ip
< pages_per_frame
; ip
++, off
= 0) {
1059 folio
= page_folio(page
);
1060 zero_user_segment(page
, off
, PAGE_SIZE
);
1061 flush_dcache_page(page
);
1062 folio_mark_uptodate(folio
);
1066 err
= ni_write_frame(ni
, pages
, pages_per_frame
);
1069 for (ip
= 0; ip
< pages_per_frame
; ip
++) {
1071 folio
= page_folio(page
);
1072 folio_mark_uptodate(folio
);
1073 folio_unlock(folio
);
1080 ni
->i_valid
= valid
= frame_vbo
+ frame_size
;
1083 /* Copy user data [pos : pos + count). */
1085 size_t copied
, bytes
;
1087 off
= pos
& (frame_size
- 1);
1088 bytes
= frame_size
- off
;
1092 frame_vbo
= pos
& ~(frame_size
- 1);
1093 index
= frame_vbo
>> PAGE_SHIFT
;
1095 if (unlikely(fault_in_iov_iter_readable(from
, bytes
))) {
1100 /* Load full frame. */
1101 err
= ntfs_get_frame_pages(mapping
, index
, pages
,
1102 pages_per_frame
, &frame_uptodate
);
1106 if (!frame_uptodate
) {
1107 loff_t to
= pos
+ bytes
;
1109 if (off
|| (to
< i_size
&& (to
& (frame_size
- 1)))) {
1110 err
= ni_read_frame(ni
, frame_vbo
, pages
,
1113 for (ip
= 0; ip
< pages_per_frame
;
1116 folio
= page_folio(page
);
1117 folio_unlock(folio
);
1127 ip
= off
>> PAGE_SHIFT
;
1128 off
= offset_in_page(pos
);
1130 /* Copy user data to pages. */
1132 size_t cp
, tail
= PAGE_SIZE
- off
;
1135 cp
= copy_page_from_iter_atomic(page
, off
,
1136 min(tail
, bytes
), from
);
1137 flush_dcache_page(page
);
1153 err
= ni_write_frame(ni
, pages
, pages_per_frame
);
1156 for (ip
= 0; ip
< pages_per_frame
; ip
++) {
1158 ClearPageDirty(page
);
1159 folio
= page_folio(page
);
1160 folio_mark_uptodate(folio
);
1161 folio_unlock(folio
);
1169 * We can loop for a long time in here. Be nice and allow
1170 * us to schedule out to avoid softlocking if preempt
1178 count
= iov_iter_count(from
);
1187 iocb
->ki_pos
+= written
;
1188 if (iocb
->ki_pos
> ni
->i_valid
)
1189 ni
->i_valid
= iocb
->ki_pos
;
1190 if (iocb
->ki_pos
> i_size
)
1191 i_size_write(inode
, iocb
->ki_pos
);
1197 * check_write_restriction:
1198 * common code for ntfs_file_write_iter and ntfs_file_splice_write
1200 static int check_write_restriction(struct inode
*inode
)
1202 struct ntfs_inode
*ni
= ntfs_i(inode
);
1204 if (unlikely(ntfs3_forced_shutdown(inode
->i_sb
)))
1207 if (is_encrypted(ni
)) {
1208 ntfs_inode_warn(inode
, "encrypted i/o not supported");
1213 ntfs_inode_warn(inode
, "write into deduplicated not supported");
1221 * ntfs_file_write_iter - file_operations::write_iter
1223 static ssize_t
ntfs_file_write_iter(struct kiocb
*iocb
, struct iov_iter
*from
)
1225 struct file
*file
= iocb
->ki_filp
;
1226 struct inode
*inode
= file_inode(file
);
1227 struct ntfs_inode
*ni
= ntfs_i(inode
);
1231 err
= check_write_restriction(inode
);
1235 if (is_compressed(ni
) && (iocb
->ki_flags
& IOCB_DIRECT
)) {
1236 ntfs_inode_warn(inode
, "direct i/o + compressed not supported");
1240 if (!inode_trylock(inode
)) {
1241 if (iocb
->ki_flags
& IOCB_NOWAIT
)
1246 ret
= generic_write_checks(iocb
, from
);
1250 err
= file_modified(iocb
->ki_filp
);
1256 if (WARN_ON(ni
->ni_flags
& NI_FLAG_COMPRESSED_MASK
)) {
1257 /* Should never be here, see ntfs_file_open(). */
1262 ret
= ntfs_extend(inode
, iocb
->ki_pos
, ret
, file
);
1266 ret
= is_compressed(ni
) ? ntfs_compress_write(iocb
, from
) :
1267 __generic_file_write_iter(iocb
, from
);
1270 inode_unlock(inode
);
1273 ret
= generic_write_sync(iocb
, ret
);
1279 * ntfs_file_open - file_operations::open
1281 int ntfs_file_open(struct inode
*inode
, struct file
*file
)
1283 struct ntfs_inode
*ni
= ntfs_i(inode
);
1285 if (unlikely(ntfs3_forced_shutdown(inode
->i_sb
)))
1288 if (unlikely((is_compressed(ni
) || is_encrypted(ni
)) &&
1289 (file
->f_flags
& O_DIRECT
))) {
1293 /* Decompress "external compressed" file if opened for rw. */
1294 if ((ni
->ni_flags
& NI_FLAG_COMPRESSED_MASK
) &&
1295 (file
->f_flags
& (O_WRONLY
| O_RDWR
| O_TRUNC
))) {
1296 #ifdef CONFIG_NTFS3_LZX_XPRESS
1297 int err
= ni_decompress_file(ni
);
1304 "activate CONFIG_NTFS3_LZX_XPRESS to write external compressed files");
1309 return generic_file_open(inode
, file
);
1313 * ntfs_file_release - file_operations::release
1315 static int ntfs_file_release(struct inode
*inode
, struct file
*file
)
1317 struct ntfs_inode
*ni
= ntfs_i(inode
);
1318 struct ntfs_sb_info
*sbi
= ni
->mi
.sbi
;
1321 /* If we are last writer on the inode, drop the block reservation. */
1322 if (sbi
->options
->prealloc
&&
1323 ((file
->f_mode
& FMODE_WRITE
) &&
1324 atomic_read(&inode
->i_writecount
) == 1)
1326 * The only file when inode->i_fop = &ntfs_file_operations and
1327 * init_rwsem(&ni->file.run_lock) is not called explicitly is MFT.
1329 * Add additional check here.
1331 && inode
->i_ino
!= MFT_REC_MFT
) {
1333 down_write(&ni
->file
.run_lock
);
1335 err
= attr_set_size(ni
, ATTR_DATA
, NULL
, 0, &ni
->file
.run
,
1336 i_size_read(inode
), &ni
->i_valid
, false,
1339 up_write(&ni
->file
.run_lock
);
1346 * ntfs_fiemap - inode_operations::fiemap
1348 int ntfs_fiemap(struct inode
*inode
, struct fiemap_extent_info
*fieinfo
,
1349 __u64 start
, __u64 len
)
1352 struct ntfs_inode
*ni
= ntfs_i(inode
);
1354 err
= fiemap_prep(inode
, fieinfo
, start
, &len
, ~FIEMAP_FLAG_XATTR
);
1360 err
= ni_fiemap(ni
, fieinfo
, start
, len
);
1368 * ntfs_file_splice_write - file_operations::splice_write
1370 static ssize_t
ntfs_file_splice_write(struct pipe_inode_info
*pipe
,
1371 struct file
*file
, loff_t
*ppos
,
1372 size_t len
, unsigned int flags
)
1375 struct inode
*inode
= file_inode(file
);
1377 err
= check_write_restriction(inode
);
1381 return iter_file_splice_write(pipe
, file
, ppos
, len
, flags
);
1385 const struct inode_operations ntfs_file_inode_operations
= {
1386 .getattr
= ntfs_getattr
,
1387 .setattr
= ntfs_setattr
,
1388 .listxattr
= ntfs_listxattr
,
1389 .get_acl
= ntfs_get_acl
,
1390 .set_acl
= ntfs_set_acl
,
1391 .fiemap
= ntfs_fiemap
,
1392 .fileattr_get
= ntfs_fileattr_get
,
1393 .fileattr_set
= ntfs_fileattr_set
,
1396 const struct file_operations ntfs_file_operations
= {
1397 .llseek
= generic_file_llseek
,
1398 .read_iter
= ntfs_file_read_iter
,
1399 .write_iter
= ntfs_file_write_iter
,
1400 .unlocked_ioctl
= ntfs_ioctl
,
1401 #ifdef CONFIG_COMPAT
1402 .compat_ioctl
= ntfs_compat_ioctl
,
1404 .splice_read
= ntfs_file_splice_read
,
1405 .splice_write
= ntfs_file_splice_write
,
1406 .mmap
= ntfs_file_mmap
,
1407 .open
= ntfs_file_open
,
1408 .fsync
= generic_file_fsync
,
1409 .fallocate
= ntfs_fallocate
,
1410 .release
= ntfs_file_release
,
1413 #if IS_ENABLED(CONFIG_NTFS_FS)
1414 const struct file_operations ntfs_legacy_file_operations
= {
1415 .llseek
= generic_file_llseek
,
1416 .read_iter
= ntfs_file_read_iter
,
1417 .splice_read
= ntfs_file_splice_read
,
1418 .open
= ntfs_file_open
,
1419 .release
= ntfs_file_release
,