1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (C) 1992, 1993, 1994, 1995
6 * Remy Card (card@masi.ibp.fr)
7 * Laboratoire MASI - Institut Blaise Pascal
8 * Universite Pierre et Marie Curie (Paris VI)
12 * linux/fs/minix/file.c
14 * Copyright (C) 1991, 1992 Linus Torvalds
16 * ext4 fs regular file handling primitives
18 * 64-bit file support on 64-bit platforms by Jakub Jelinek
19 * (jj@sunsite.ms.mff.cuni.cz)
22 #include <linux/time.h>
24 #include <linux/iomap.h>
25 #include <linux/mount.h>
26 #include <linux/path.h>
27 #include <linux/dax.h>
28 #include <linux/quotaops.h>
29 #include <linux/pagevec.h>
30 #include <linux/uio.h>
31 #include <linux/mman.h>
32 #include <linux/backing-dev.h>
34 #include "ext4_jbd2.h"
39 static bool ext4_dio_supported(struct inode
*inode
)
41 if (IS_ENABLED(CONFIG_FS_ENCRYPTION
) && IS_ENCRYPTED(inode
))
43 if (fsverity_active(inode
))
45 if (ext4_should_journal_data(inode
))
47 if (ext4_has_inline_data(inode
))
52 static ssize_t
ext4_dio_read_iter(struct kiocb
*iocb
, struct iov_iter
*to
)
55 struct inode
*inode
= file_inode(iocb
->ki_filp
);
57 if (iocb
->ki_flags
& IOCB_NOWAIT
) {
58 if (!inode_trylock_shared(inode
))
61 inode_lock_shared(inode
);
64 if (!ext4_dio_supported(inode
)) {
65 inode_unlock_shared(inode
);
67 * Fallback to buffered I/O if the operation being performed on
68 * the inode is not supported by direct I/O. The IOCB_DIRECT
69 * flag needs to be cleared here in order to ensure that the
70 * direct I/O path within generic_file_read_iter() is not
73 iocb
->ki_flags
&= ~IOCB_DIRECT
;
74 return generic_file_read_iter(iocb
, to
);
77 ret
= iomap_dio_rw(iocb
, to
, &ext4_iomap_ops
, NULL
,
79 inode_unlock_shared(inode
);
81 file_accessed(iocb
->ki_filp
);
86 static ssize_t
ext4_dax_read_iter(struct kiocb
*iocb
, struct iov_iter
*to
)
88 struct inode
*inode
= file_inode(iocb
->ki_filp
);
91 if (iocb
->ki_flags
& IOCB_NOWAIT
) {
92 if (!inode_trylock_shared(inode
))
95 inode_lock_shared(inode
);
98 * Recheck under inode lock - at this point we are sure it cannot
101 if (!IS_DAX(inode
)) {
102 inode_unlock_shared(inode
);
103 /* Fallback to buffered IO in case we cannot support DAX */
104 return generic_file_read_iter(iocb
, to
);
106 ret
= dax_iomap_rw(iocb
, to
, &ext4_iomap_ops
);
107 inode_unlock_shared(inode
);
109 file_accessed(iocb
->ki_filp
);
114 static ssize_t
ext4_file_read_iter(struct kiocb
*iocb
, struct iov_iter
*to
)
116 struct inode
*inode
= file_inode(iocb
->ki_filp
);
118 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode
->i_sb
))))
121 if (!iov_iter_count(to
))
122 return 0; /* skip atime */
126 return ext4_dax_read_iter(iocb
, to
);
128 if (iocb
->ki_flags
& IOCB_DIRECT
)
129 return ext4_dio_read_iter(iocb
, to
);
131 return generic_file_read_iter(iocb
, to
);
135 * Called when an inode is released. Note that this is different
136 * from ext4_file_open: open gets called at every open, but release
137 * gets called only when /all/ the files are closed.
139 static int ext4_release_file(struct inode
*inode
, struct file
*filp
)
141 if (ext4_test_inode_state(inode
, EXT4_STATE_DA_ALLOC_CLOSE
)) {
142 ext4_alloc_da_blocks(inode
);
143 ext4_clear_inode_state(inode
, EXT4_STATE_DA_ALLOC_CLOSE
);
145 /* if we are the last writer on the inode, drop the block reservation */
146 if ((filp
->f_mode
& FMODE_WRITE
) &&
147 (atomic_read(&inode
->i_writecount
) == 1) &&
148 !EXT4_I(inode
)->i_reserved_data_blocks
)
150 down_write(&EXT4_I(inode
)->i_data_sem
);
151 ext4_discard_preallocations(inode
);
152 up_write(&EXT4_I(inode
)->i_data_sem
);
154 if (is_dx(inode
) && filp
->private_data
)
155 ext4_htree_free_dir_info(filp
->private_data
);
161 * This tests whether the IO in question is block-aligned or not.
162 * Ext4 utilizes unwritten extents when hole-filling during direct IO, and they
163 * are converted to written only after the IO is complete. Until they are
164 * mapped, these blocks appear as holes, so dio_zero_block() will assume that
165 * it needs to zero out portions of the start and/or end block. If 2 AIO
166 * threads are at work on the same unwritten block, they must be synchronized
167 * or one thread will zero the other's data, causing corruption.
170 ext4_unaligned_io(struct inode
*inode
, struct iov_iter
*from
, loff_t pos
)
172 struct super_block
*sb
= inode
->i_sb
;
173 unsigned long blockmask
= sb
->s_blocksize
- 1;
175 if ((pos
| iov_iter_alignment(from
)) & blockmask
)
182 ext4_extending_io(struct inode
*inode
, loff_t offset
, size_t len
)
184 if (offset
+ len
> i_size_read(inode
) ||
185 offset
+ len
> EXT4_I(inode
)->i_disksize
)
190 /* Is IO overwriting allocated and initialized blocks? */
191 static bool ext4_overwrite_io(struct inode
*inode
, loff_t pos
, loff_t len
)
193 struct ext4_map_blocks map
;
194 unsigned int blkbits
= inode
->i_blkbits
;
197 if (pos
+ len
> i_size_read(inode
))
200 map
.m_lblk
= pos
>> blkbits
;
201 map
.m_len
= EXT4_MAX_BLOCKS(len
, pos
, blkbits
);
204 err
= ext4_map_blocks(NULL
, inode
, &map
, 0);
206 * 'err==len' means that all of the blocks have been preallocated,
207 * regardless of whether they have been initialized or not. To exclude
208 * unwritten extents, we need to check m_flags.
210 return err
== blklen
&& (map
.m_flags
& EXT4_MAP_MAPPED
);
213 static ssize_t
ext4_generic_write_checks(struct kiocb
*iocb
,
214 struct iov_iter
*from
)
216 struct inode
*inode
= file_inode(iocb
->ki_filp
);
219 if (unlikely(IS_IMMUTABLE(inode
)))
222 ret
= generic_write_checks(iocb
, from
);
227 * If we have encountered a bitmap-format file, the size limit
228 * is smaller than s_maxbytes, which is for extent-mapped files.
230 if (!(ext4_test_inode_flag(inode
, EXT4_INODE_EXTENTS
))) {
231 struct ext4_sb_info
*sbi
= EXT4_SB(inode
->i_sb
);
233 if (iocb
->ki_pos
>= sbi
->s_bitmap_maxbytes
)
235 iov_iter_truncate(from
, sbi
->s_bitmap_maxbytes
- iocb
->ki_pos
);
238 return iov_iter_count(from
);
241 static ssize_t
ext4_write_checks(struct kiocb
*iocb
, struct iov_iter
*from
)
245 count
= ext4_generic_write_checks(iocb
, from
);
249 ret
= file_modified(iocb
->ki_filp
);
255 static ssize_t
ext4_buffered_write_iter(struct kiocb
*iocb
,
256 struct iov_iter
*from
)
259 struct inode
*inode
= file_inode(iocb
->ki_filp
);
261 if (iocb
->ki_flags
& IOCB_NOWAIT
)
265 ret
= ext4_write_checks(iocb
, from
);
269 current
->backing_dev_info
= inode_to_bdi(inode
);
270 ret
= generic_perform_write(iocb
->ki_filp
, from
, iocb
->ki_pos
);
271 current
->backing_dev_info
= NULL
;
275 if (likely(ret
> 0)) {
277 ret
= generic_write_sync(iocb
, ret
);
283 static ssize_t
ext4_handle_inode_extension(struct inode
*inode
, loff_t offset
,
284 ssize_t written
, size_t count
)
287 bool truncate
= false;
288 u8 blkbits
= inode
->i_blkbits
;
289 ext4_lblk_t written_blk
, end_blk
;
292 * Note that EXT4_I(inode)->i_disksize can get extended up to
293 * inode->i_size while the I/O was running due to writeback of delalloc
294 * blocks. But, the code in ext4_iomap_alloc() is careful to use
295 * zeroed/unwritten extents if this is possible; thus we won't leave
296 * uninitialized blocks in a file even if we didn't succeed in writing
297 * as much as we intended.
299 WARN_ON_ONCE(i_size_read(inode
) < EXT4_I(inode
)->i_disksize
);
300 if (offset
+ count
<= EXT4_I(inode
)->i_disksize
) {
302 * We need to ensure that the inode is removed from the orphan
303 * list if it has been added prematurely, due to writeback of
306 if (!list_empty(&EXT4_I(inode
)->i_orphan
) && inode
->i_nlink
) {
307 handle
= ext4_journal_start(inode
, EXT4_HT_INODE
, 2);
309 if (IS_ERR(handle
)) {
310 ext4_orphan_del(NULL
, inode
);
311 return PTR_ERR(handle
);
314 ext4_orphan_del(handle
, inode
);
315 ext4_journal_stop(handle
);
324 handle
= ext4_journal_start(inode
, EXT4_HT_INODE
, 2);
325 if (IS_ERR(handle
)) {
326 written
= PTR_ERR(handle
);
330 if (ext4_update_inode_size(inode
, offset
+ written
))
331 ext4_mark_inode_dirty(handle
, inode
);
334 * We may need to truncate allocated but not written blocks beyond EOF.
336 written_blk
= ALIGN(offset
+ written
, 1 << blkbits
);
337 end_blk
= ALIGN(offset
+ count
, 1 << blkbits
);
338 if (written_blk
< end_blk
&& ext4_can_truncate(inode
))
342 * Remove the inode from the orphan list if it has been extended and
343 * everything went OK.
345 if (!truncate
&& inode
->i_nlink
)
346 ext4_orphan_del(handle
, inode
);
347 ext4_journal_stop(handle
);
351 ext4_truncate_failed_write(inode
);
353 * If the truncate operation failed early, then the inode may
354 * still be on the orphan list. In that case, we need to try
355 * remove the inode from the in-memory linked list.
358 ext4_orphan_del(NULL
, inode
);
364 static int ext4_dio_write_end_io(struct kiocb
*iocb
, ssize_t size
,
365 int error
, unsigned int flags
)
367 loff_t offset
= iocb
->ki_pos
;
368 struct inode
*inode
= file_inode(iocb
->ki_filp
);
373 if (size
&& flags
& IOMAP_DIO_UNWRITTEN
)
374 return ext4_convert_unwritten_extents(NULL
, inode
,
380 static const struct iomap_dio_ops ext4_dio_write_ops
= {
381 .end_io
= ext4_dio_write_end_io
,
385 * The intention here is to start with shared lock acquired then see if any
386 * condition requires an exclusive inode lock. If yes, then we restart the
387 * whole operation by releasing the shared lock and acquiring exclusive lock.
389 * - For unaligned_io we never take shared lock as it may cause data corruption
390 * when two unaligned IO tries to modify the same block e.g. while zeroing.
392 * - For extending writes case we don't take the shared lock, since it requires
393 * updating inode i_disksize and/or orphan handling with exclusive lock.
395 * - shared locking will only be true mostly with overwrites. Otherwise we will
396 * switch to exclusive i_rwsem lock.
398 static ssize_t
ext4_dio_write_checks(struct kiocb
*iocb
, struct iov_iter
*from
,
399 bool *ilock_shared
, bool *extend
)
401 struct file
*file
= iocb
->ki_filp
;
402 struct inode
*inode
= file_inode(file
);
408 ret
= ext4_generic_write_checks(iocb
, from
);
412 offset
= iocb
->ki_pos
;
414 if (ext4_extending_io(inode
, offset
, count
))
417 * Determine whether the IO operation will overwrite allocated
418 * and initialized blocks.
419 * We need exclusive i_rwsem for changing security info
420 * in file_modified().
422 if (*ilock_shared
&& (!IS_NOSEC(inode
) || *extend
||
423 !ext4_overwrite_io(inode
, offset
, count
))) {
424 inode_unlock_shared(inode
);
425 *ilock_shared
= false;
430 ret
= file_modified(file
);
437 inode_unlock_shared(inode
);
443 static ssize_t
ext4_dio_write_iter(struct kiocb
*iocb
, struct iov_iter
*from
)
447 struct inode
*inode
= file_inode(iocb
->ki_filp
);
448 loff_t offset
= iocb
->ki_pos
;
449 size_t count
= iov_iter_count(from
);
450 const struct iomap_ops
*iomap_ops
= &ext4_iomap_ops
;
451 bool extend
= false, unaligned_io
= false;
452 bool ilock_shared
= true;
455 * We initially start with shared inode lock unless it is
456 * unaligned IO which needs exclusive lock anyways.
458 if (ext4_unaligned_io(inode
, from
, offset
)) {
460 ilock_shared
= false;
463 * Quick check here without any i_rwsem lock to see if it is extending
464 * IO. A more reliable check is done in ext4_dio_write_checks() with
465 * proper locking in place.
467 if (offset
+ count
> i_size_read(inode
))
468 ilock_shared
= false;
470 if (iocb
->ki_flags
& IOCB_NOWAIT
) {
472 if (!inode_trylock_shared(inode
))
475 if (!inode_trylock(inode
))
480 inode_lock_shared(inode
);
485 /* Fallback to buffered I/O if the inode does not support direct I/O. */
486 if (!ext4_dio_supported(inode
)) {
488 inode_unlock_shared(inode
);
491 return ext4_buffered_write_iter(iocb
, from
);
494 ret
= ext4_dio_write_checks(iocb
, from
, &ilock_shared
, &extend
);
498 offset
= iocb
->ki_pos
;
502 * Unaligned direct IO must be serialized among each other as zeroing
503 * of partial blocks of two competing unaligned IOs can result in data
506 * So we make sure we don't allow any unaligned IO in flight.
507 * For IOs where we need not wait (like unaligned non-AIO DIO),
508 * below inode_dio_wait() may anyway become a no-op, since we start
509 * with exclusive lock.
512 inode_dio_wait(inode
);
515 handle
= ext4_journal_start(inode
, EXT4_HT_INODE
, 2);
516 if (IS_ERR(handle
)) {
517 ret
= PTR_ERR(handle
);
521 ret
= ext4_orphan_add(handle
, inode
);
523 ext4_journal_stop(handle
);
527 ext4_journal_stop(handle
);
531 iomap_ops
= &ext4_iomap_overwrite_ops
;
532 ret
= iomap_dio_rw(iocb
, from
, iomap_ops
, &ext4_dio_write_ops
,
533 is_sync_kiocb(iocb
) || unaligned_io
|| extend
);
536 ret
= ext4_handle_inode_extension(inode
, offset
, ret
, count
);
540 inode_unlock_shared(inode
);
544 if (ret
>= 0 && iov_iter_count(from
)) {
548 offset
= iocb
->ki_pos
;
549 err
= ext4_buffered_write_iter(iocb
, from
);
554 * We need to ensure that the pages within the page cache for
555 * the range covered by this I/O are written to disk and
556 * invalidated. This is in attempt to preserve the expected
557 * direct I/O semantics in the case we fallback to buffered I/O
558 * to complete off the I/O request.
561 endbyte
= offset
+ err
- 1;
562 err
= filemap_write_and_wait_range(iocb
->ki_filp
->f_mapping
,
565 invalidate_mapping_pages(iocb
->ki_filp
->f_mapping
,
566 offset
>> PAGE_SHIFT
,
567 endbyte
>> PAGE_SHIFT
);
575 ext4_dax_write_iter(struct kiocb
*iocb
, struct iov_iter
*from
)
582 struct inode
*inode
= file_inode(iocb
->ki_filp
);
584 if (iocb
->ki_flags
& IOCB_NOWAIT
) {
585 if (!inode_trylock(inode
))
591 ret
= ext4_write_checks(iocb
, from
);
595 offset
= iocb
->ki_pos
;
596 count
= iov_iter_count(from
);
598 if (offset
+ count
> EXT4_I(inode
)->i_disksize
) {
599 handle
= ext4_journal_start(inode
, EXT4_HT_INODE
, 2);
600 if (IS_ERR(handle
)) {
601 ret
= PTR_ERR(handle
);
605 ret
= ext4_orphan_add(handle
, inode
);
607 ext4_journal_stop(handle
);
612 ext4_journal_stop(handle
);
615 ret
= dax_iomap_rw(iocb
, from
, &ext4_iomap_ops
);
618 ret
= ext4_handle_inode_extension(inode
, offset
, ret
, count
);
622 ret
= generic_write_sync(iocb
, ret
);
628 ext4_file_write_iter(struct kiocb
*iocb
, struct iov_iter
*from
)
630 struct inode
*inode
= file_inode(iocb
->ki_filp
);
632 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode
->i_sb
))))
637 return ext4_dax_write_iter(iocb
, from
);
639 if (iocb
->ki_flags
& IOCB_DIRECT
)
640 return ext4_dio_write_iter(iocb
, from
);
642 return ext4_buffered_write_iter(iocb
, from
);
646 static vm_fault_t
ext4_dax_huge_fault(struct vm_fault
*vmf
,
647 enum page_entry_size pe_size
)
652 handle_t
*handle
= NULL
;
653 struct inode
*inode
= file_inode(vmf
->vma
->vm_file
);
654 struct super_block
*sb
= inode
->i_sb
;
657 * We have to distinguish real writes from writes which will result in a
658 * COW page; COW writes should *not* poke the journal (the file will not
659 * be changed). Doing so would cause unintended failures when mounted
662 * We check for VM_SHARED rather than vmf->cow_page since the latter is
663 * unset for pe_size != PE_SIZE_PTE (i.e. only in do_cow_fault); for
664 * other sizes, dax_iomap_fault will handle splitting / fallback so that
665 * we eventually come back with a COW page.
667 bool write
= (vmf
->flags
& FAULT_FLAG_WRITE
) &&
668 (vmf
->vma
->vm_flags
& VM_SHARED
);
672 sb_start_pagefault(sb
);
673 file_update_time(vmf
->vma
->vm_file
);
674 down_read(&EXT4_I(inode
)->i_mmap_sem
);
676 handle
= ext4_journal_start_sb(sb
, EXT4_HT_WRITE_PAGE
,
677 EXT4_DATA_TRANS_BLOCKS(sb
));
678 if (IS_ERR(handle
)) {
679 up_read(&EXT4_I(inode
)->i_mmap_sem
);
680 sb_end_pagefault(sb
);
681 return VM_FAULT_SIGBUS
;
684 down_read(&EXT4_I(inode
)->i_mmap_sem
);
686 result
= dax_iomap_fault(vmf
, pe_size
, &pfn
, &error
, &ext4_iomap_ops
);
688 ext4_journal_stop(handle
);
690 if ((result
& VM_FAULT_ERROR
) && error
== -ENOSPC
&&
691 ext4_should_retry_alloc(sb
, &retries
))
693 /* Handling synchronous page fault? */
694 if (result
& VM_FAULT_NEEDDSYNC
)
695 result
= dax_finish_sync_fault(vmf
, pe_size
, pfn
);
696 up_read(&EXT4_I(inode
)->i_mmap_sem
);
697 sb_end_pagefault(sb
);
699 up_read(&EXT4_I(inode
)->i_mmap_sem
);
705 static vm_fault_t
ext4_dax_fault(struct vm_fault
*vmf
)
707 return ext4_dax_huge_fault(vmf
, PE_SIZE_PTE
);
710 static const struct vm_operations_struct ext4_dax_vm_ops
= {
711 .fault
= ext4_dax_fault
,
712 .huge_fault
= ext4_dax_huge_fault
,
713 .page_mkwrite
= ext4_dax_fault
,
714 .pfn_mkwrite
= ext4_dax_fault
,
717 #define ext4_dax_vm_ops ext4_file_vm_ops
720 static const struct vm_operations_struct ext4_file_vm_ops
= {
721 .fault
= ext4_filemap_fault
,
722 .map_pages
= filemap_map_pages
,
723 .page_mkwrite
= ext4_page_mkwrite
,
726 static int ext4_file_mmap(struct file
*file
, struct vm_area_struct
*vma
)
728 struct inode
*inode
= file
->f_mapping
->host
;
729 struct ext4_sb_info
*sbi
= EXT4_SB(inode
->i_sb
);
730 struct dax_device
*dax_dev
= sbi
->s_daxdev
;
732 if (unlikely(ext4_forced_shutdown(sbi
)))
736 * We don't support synchronous mappings for non-DAX files and
737 * for DAX files if underneath dax_device is not synchronous.
739 if (!daxdev_mapping_supported(vma
, dax_dev
))
743 if (IS_DAX(file_inode(file
))) {
744 vma
->vm_ops
= &ext4_dax_vm_ops
;
745 vma
->vm_flags
|= VM_HUGEPAGE
;
747 vma
->vm_ops
= &ext4_file_vm_ops
;
752 static int ext4_sample_last_mounted(struct super_block
*sb
,
753 struct vfsmount
*mnt
)
755 struct ext4_sb_info
*sbi
= EXT4_SB(sb
);
761 if (likely(sbi
->s_mount_flags
& EXT4_MF_MNTDIR_SAMPLED
))
764 if (sb_rdonly(sb
) || !sb_start_intwrite_trylock(sb
))
767 sbi
->s_mount_flags
|= EXT4_MF_MNTDIR_SAMPLED
;
769 * Sample where the filesystem has been mounted and
770 * store it in the superblock for sysadmin convenience
771 * when trying to sort through large numbers of block
772 * devices or filesystem images.
774 memset(buf
, 0, sizeof(buf
));
776 path
.dentry
= mnt
->mnt_root
;
777 cp
= d_path(&path
, buf
, sizeof(buf
));
782 handle
= ext4_journal_start_sb(sb
, EXT4_HT_MISC
, 1);
783 err
= PTR_ERR(handle
);
786 BUFFER_TRACE(sbi
->s_sbh
, "get_write_access");
787 err
= ext4_journal_get_write_access(handle
, sbi
->s_sbh
);
790 strlcpy(sbi
->s_es
->s_last_mounted
, cp
,
791 sizeof(sbi
->s_es
->s_last_mounted
));
792 ext4_handle_dirty_super(handle
, sb
);
794 ext4_journal_stop(handle
);
800 static int ext4_file_open(struct inode
* inode
, struct file
* filp
)
804 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode
->i_sb
))))
807 ret
= ext4_sample_last_mounted(inode
->i_sb
, filp
->f_path
.mnt
);
811 ret
= fscrypt_file_open(inode
, filp
);
815 ret
= fsverity_file_open(inode
, filp
);
820 * Set up the jbd2_inode if we are opening the inode for
821 * writing and the journal is present
823 if (filp
->f_mode
& FMODE_WRITE
) {
824 ret
= ext4_inode_attach_jinode(inode
);
829 filp
->f_mode
|= FMODE_NOWAIT
;
830 return dquot_file_open(inode
, filp
);
834 * ext4_llseek() handles both block-mapped and extent-mapped maxbytes values
835 * by calling generic_file_llseek_size() with the appropriate maxbytes
838 loff_t
ext4_llseek(struct file
*file
, loff_t offset
, int whence
)
840 struct inode
*inode
= file
->f_mapping
->host
;
843 if (!(ext4_test_inode_flag(inode
, EXT4_INODE_EXTENTS
)))
844 maxbytes
= EXT4_SB(inode
->i_sb
)->s_bitmap_maxbytes
;
846 maxbytes
= inode
->i_sb
->s_maxbytes
;
850 return generic_file_llseek_size(file
, offset
, whence
,
851 maxbytes
, i_size_read(inode
));
853 inode_lock_shared(inode
);
854 offset
= iomap_seek_hole(inode
, offset
,
855 &ext4_iomap_report_ops
);
856 inode_unlock_shared(inode
);
859 inode_lock_shared(inode
);
860 offset
= iomap_seek_data(inode
, offset
,
861 &ext4_iomap_report_ops
);
862 inode_unlock_shared(inode
);
868 return vfs_setpos(file
, offset
, maxbytes
);
871 const struct file_operations ext4_file_operations
= {
872 .llseek
= ext4_llseek
,
873 .read_iter
= ext4_file_read_iter
,
874 .write_iter
= ext4_file_write_iter
,
875 .iopoll
= iomap_dio_iopoll
,
876 .unlocked_ioctl
= ext4_ioctl
,
878 .compat_ioctl
= ext4_compat_ioctl
,
880 .mmap
= ext4_file_mmap
,
881 .mmap_supported_flags
= MAP_SYNC
,
882 .open
= ext4_file_open
,
883 .release
= ext4_release_file
,
884 .fsync
= ext4_sync_file
,
885 .get_unmapped_area
= thp_get_unmapped_area
,
886 .splice_read
= generic_file_splice_read
,
887 .splice_write
= iter_file_splice_write
,
888 .fallocate
= ext4_fallocate
,
891 const struct inode_operations ext4_file_inode_operations
= {
892 .setattr
= ext4_setattr
,
893 .getattr
= ext4_file_getattr
,
894 .listxattr
= ext4_listxattr
,
895 .get_acl
= ext4_get_acl
,
896 .set_acl
= ext4_set_acl
,
897 .fiemap
= ext4_fiemap
,