2 * fs/dax.c - Direct Access filesystem code
3 * Copyright (c) 2013-2014 Intel Corporation
4 * Author: Matthew Wilcox <matthew.r.wilcox@intel.com>
5 * Author: Ross Zwisler <ross.zwisler@linux.intel.com>
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 #include <linux/atomic.h>
18 #include <linux/blkdev.h>
19 #include <linux/buffer_head.h>
20 #include <linux/dax.h>
22 #include <linux/genhd.h>
23 #include <linux/highmem.h>
24 #include <linux/memcontrol.h>
26 #include <linux/mutex.h>
27 #include <linux/pmem.h>
28 #include <linux/sched.h>
29 #include <linux/uio.h>
30 #include <linux/vmstat.h>
32 int dax_clear_blocks(struct inode
*inode
, sector_t block
, long size
)
34 struct block_device
*bdev
= inode
->i_sb
->s_bdev
;
35 sector_t sector
= block
<< (inode
->i_blkbits
- 9);
43 count
= bdev_direct_access(bdev
, sector
, &addr
, &pfn
, size
);
48 unsigned pgsz
= PAGE_SIZE
- offset_in_page(addr
);
51 clear_pmem(addr
, pgsz
);
64 EXPORT_SYMBOL_GPL(dax_clear_blocks
);
66 static long dax_get_addr(struct buffer_head
*bh
, void __pmem
**addr
,
70 sector_t sector
= bh
->b_blocknr
<< (blkbits
- 9);
71 return bdev_direct_access(bh
->b_bdev
, sector
, addr
, &pfn
, bh
->b_size
);
74 /* the clear_pmem() calls are ordered by a wmb_pmem() in the caller */
75 static void dax_new_buf(void __pmem
*addr
, unsigned size
, unsigned first
,
76 loff_t pos
, loff_t end
)
78 loff_t final
= end
- pos
+ first
; /* The final byte of the buffer */
81 clear_pmem(addr
, first
);
83 clear_pmem(addr
+ final
, size
- final
);
86 static bool buffer_written(struct buffer_head
*bh
)
88 return buffer_mapped(bh
) && !buffer_unwritten(bh
);
92 * When ext4 encounters a hole, it returns without modifying the buffer_head
93 * which means that we can't trust b_size. To cope with this, we set b_state
94 * to 0 before calling get_block and, if any bit is set, we know we can trust
95 * b_size. Unfortunate, really, since ext4 knows precisely how long a hole is
96 * and would save us time calling get_block repeatedly.
98 static bool buffer_size_valid(struct buffer_head
*bh
)
100 return bh
->b_state
!= 0;
103 static ssize_t
dax_io(struct inode
*inode
, struct iov_iter
*iter
,
104 loff_t start
, loff_t end
, get_block_t get_block
,
105 struct buffer_head
*bh
)
110 loff_t bh_max
= start
;
113 bool need_wmb
= false;
115 if (iov_iter_rw(iter
) != WRITE
)
116 end
= min(end
, i_size_read(inode
));
121 unsigned blkbits
= inode
->i_blkbits
;
122 long page
= pos
>> PAGE_SHIFT
;
123 sector_t block
= page
<< (PAGE_SHIFT
- blkbits
);
124 unsigned first
= pos
- (block
<< blkbits
);
128 bh
->b_size
= PAGE_ALIGN(end
- pos
);
130 retval
= get_block(inode
, block
, bh
,
131 iov_iter_rw(iter
) == WRITE
);
134 if (!buffer_size_valid(bh
))
135 bh
->b_size
= 1 << blkbits
;
136 bh_max
= pos
- first
+ bh
->b_size
;
138 unsigned done
= bh
->b_size
-
139 (bh_max
- (pos
- first
));
140 bh
->b_blocknr
+= done
>> blkbits
;
144 hole
= iov_iter_rw(iter
) != WRITE
&& !buffer_written(bh
);
147 size
= bh
->b_size
- first
;
149 retval
= dax_get_addr(bh
, &addr
, blkbits
);
152 if (buffer_unwritten(bh
) || buffer_new(bh
)) {
153 dax_new_buf(addr
, retval
, first
, pos
,
158 size
= retval
- first
;
160 max
= min(pos
+ size
, end
);
163 if (iov_iter_rw(iter
) == WRITE
) {
164 len
= copy_from_iter_pmem(addr
, max
- pos
, iter
);
167 len
= copy_to_iter((void __force
*)addr
, max
- pos
,
170 len
= iov_iter_zero(max
- pos
, iter
);
182 return (pos
== start
) ? retval
: pos
- start
;
186 * dax_do_io - Perform I/O to a DAX file
187 * @iocb: The control block for this I/O
188 * @inode: The file which the I/O is directed at
189 * @iter: The addresses to do I/O from or to
190 * @pos: The file offset where the I/O starts
191 * @get_block: The filesystem method used to translate file offsets to blocks
192 * @end_io: A filesystem callback for I/O completion
195 * This function uses the same locking scheme as do_blockdev_direct_IO:
196 * If @flags has DIO_LOCKING set, we assume that the i_mutex is held by the
197 * caller for writes. For reads, we take and release the i_mutex ourselves.
198 * If DIO_LOCKING is not set, the filesystem takes care of its own locking.
199 * As with do_blockdev_direct_IO(), we increment i_dio_count while the I/O
202 ssize_t
dax_do_io(struct kiocb
*iocb
, struct inode
*inode
,
203 struct iov_iter
*iter
, loff_t pos
, get_block_t get_block
,
204 dio_iodone_t end_io
, int flags
)
206 struct buffer_head bh
;
207 ssize_t retval
= -EINVAL
;
208 loff_t end
= pos
+ iov_iter_count(iter
);
210 memset(&bh
, 0, sizeof(bh
));
212 if ((flags
& DIO_LOCKING
) && iov_iter_rw(iter
) == READ
) {
213 struct address_space
*mapping
= inode
->i_mapping
;
214 mutex_lock(&inode
->i_mutex
);
215 retval
= filemap_write_and_wait_range(mapping
, pos
, end
- 1);
217 mutex_unlock(&inode
->i_mutex
);
222 /* Protects against truncate */
223 if (!(flags
& DIO_SKIP_DIO_COUNT
))
224 inode_dio_begin(inode
);
226 retval
= dax_io(inode
, iter
, pos
, end
, get_block
, &bh
);
228 if ((flags
& DIO_LOCKING
) && iov_iter_rw(iter
) == READ
)
229 mutex_unlock(&inode
->i_mutex
);
231 if ((retval
> 0) && end_io
)
232 end_io(iocb
, pos
, retval
, bh
.b_private
);
234 if (!(flags
& DIO_SKIP_DIO_COUNT
))
235 inode_dio_end(inode
);
239 EXPORT_SYMBOL_GPL(dax_do_io
);
242 * The user has performed a load from a hole in the file. Allocating
243 * a new page in the file would cause excessive storage usage for
244 * workloads with sparse files. We allocate a page cache page instead.
245 * We'll kick it out of the page cache if it's ever written to,
246 * otherwise it will simply fall out of the page cache under memory
247 * pressure without ever having been dirtied.
249 static int dax_load_hole(struct address_space
*mapping
, struct page
*page
,
250 struct vm_fault
*vmf
)
253 struct inode
*inode
= mapping
->host
;
255 page
= find_or_create_page(mapping
, vmf
->pgoff
,
256 GFP_KERNEL
| __GFP_ZERO
);
259 /* Recheck i_size under page lock to avoid truncate race */
260 size
= (i_size_read(inode
) + PAGE_SIZE
- 1) >> PAGE_SHIFT
;
261 if (vmf
->pgoff
>= size
) {
263 page_cache_release(page
);
264 return VM_FAULT_SIGBUS
;
268 return VM_FAULT_LOCKED
;
271 static int copy_user_bh(struct page
*to
, struct buffer_head
*bh
,
272 unsigned blkbits
, unsigned long vaddr
)
277 if (dax_get_addr(bh
, &vfrom
, blkbits
) < 0)
279 vto
= kmap_atomic(to
);
280 copy_user_page(vto
, (void __force
*)vfrom
, vaddr
, to
);
285 static int dax_insert_mapping(struct inode
*inode
, struct buffer_head
*bh
,
286 struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
288 sector_t sector
= bh
->b_blocknr
<< (inode
->i_blkbits
- 9);
289 unsigned long vaddr
= (unsigned long)vmf
->virtual_address
;
296 * Check truncate didn't happen while we were allocating a block.
297 * If it did, this block may or may not be still allocated to the
298 * file. We can't tell the filesystem to free it because we can't
299 * take i_mutex here. In the worst case, the file still has blocks
300 * allocated past the end of the file.
302 size
= (i_size_read(inode
) + PAGE_SIZE
- 1) >> PAGE_SHIFT
;
303 if (unlikely(vmf
->pgoff
>= size
)) {
308 error
= bdev_direct_access(bh
->b_bdev
, sector
, &addr
, &pfn
, bh
->b_size
);
311 if (error
< PAGE_SIZE
) {
316 if (buffer_unwritten(bh
) || buffer_new(bh
)) {
317 clear_pmem(addr
, PAGE_SIZE
);
321 error
= vm_insert_mixed(vma
, vaddr
, pfn
);
328 * __dax_fault - handle a page fault on a DAX file
329 * @vma: The virtual memory area where the fault occurred
330 * @vmf: The description of the fault
331 * @get_block: The filesystem method used to translate file offsets to blocks
332 * @complete_unwritten: The filesystem method used to convert unwritten blocks
333 * to written so the data written to them is exposed. This is required for
334 * required by write faults for filesystems that will return unwritten
335 * extent mappings from @get_block, but it is optional for reads as
336 * dax_insert_mapping() will always zero unwritten blocks. If the fs does
337 * not support unwritten extents, the it should pass NULL.
339 * When a page fault occurs, filesystems may call this helper in their
340 * fault handler for DAX files. __dax_fault() assumes the caller has done all
341 * the necessary locking for the page fault to proceed successfully.
343 int __dax_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
,
344 get_block_t get_block
, dax_iodone_t complete_unwritten
)
346 struct file
*file
= vma
->vm_file
;
347 struct address_space
*mapping
= file
->f_mapping
;
348 struct inode
*inode
= mapping
->host
;
350 struct buffer_head bh
;
351 unsigned long vaddr
= (unsigned long)vmf
->virtual_address
;
352 unsigned blkbits
= inode
->i_blkbits
;
358 size
= (i_size_read(inode
) + PAGE_SIZE
- 1) >> PAGE_SHIFT
;
359 if (vmf
->pgoff
>= size
)
360 return VM_FAULT_SIGBUS
;
362 memset(&bh
, 0, sizeof(bh
));
363 block
= (sector_t
)vmf
->pgoff
<< (PAGE_SHIFT
- blkbits
);
364 bh
.b_size
= PAGE_SIZE
;
367 page
= find_get_page(mapping
, vmf
->pgoff
);
369 if (!lock_page_or_retry(page
, vma
->vm_mm
, vmf
->flags
)) {
370 page_cache_release(page
);
371 return VM_FAULT_RETRY
;
373 if (unlikely(page
->mapping
!= mapping
)) {
375 page_cache_release(page
);
378 size
= (i_size_read(inode
) + PAGE_SIZE
- 1) >> PAGE_SHIFT
;
379 if (unlikely(vmf
->pgoff
>= size
)) {
381 * We have a struct page covering a hole in the file
382 * from a read fault and we've raced with a truncate
388 i_mmap_lock_write(mapping
);
391 error
= get_block(inode
, block
, &bh
, 0);
392 if (!error
&& (bh
.b_size
< PAGE_SIZE
))
393 error
= -EIO
; /* fs corruption? */
397 if (!buffer_mapped(&bh
) && !buffer_unwritten(&bh
) && !vmf
->cow_page
) {
398 if (vmf
->flags
& FAULT_FLAG_WRITE
) {
399 error
= get_block(inode
, block
, &bh
, 1);
400 count_vm_event(PGMAJFAULT
);
401 mem_cgroup_count_vm_event(vma
->vm_mm
, PGMAJFAULT
);
402 major
= VM_FAULT_MAJOR
;
403 if (!error
&& (bh
.b_size
< PAGE_SIZE
))
408 i_mmap_unlock_write(mapping
);
409 return dax_load_hole(mapping
, page
, vmf
);
414 struct page
*new_page
= vmf
->cow_page
;
415 if (buffer_written(&bh
))
416 error
= copy_user_bh(new_page
, &bh
, blkbits
, vaddr
);
418 clear_user_highpage(new_page
, vaddr
);
423 /* Check we didn't race with truncate */
424 size
= (i_size_read(inode
) + PAGE_SIZE
- 1) >>
426 if (vmf
->pgoff
>= size
) {
431 return VM_FAULT_LOCKED
;
434 /* Check we didn't race with a read fault installing a new page */
436 page
= find_lock_page(mapping
, vmf
->pgoff
);
439 unmap_mapping_range(mapping
, vmf
->pgoff
<< PAGE_SHIFT
,
441 delete_from_page_cache(page
);
443 page_cache_release(page
);
447 * If we successfully insert the new mapping over an unwritten extent,
448 * we need to ensure we convert the unwritten extent. If there is an
449 * error inserting the mapping, the filesystem needs to leave it as
450 * unwritten to prevent exposure of the stale underlying data to
451 * userspace, but we still need to call the completion function so
452 * the private resources on the mapping buffer can be released. We
453 * indicate what the callback should do via the uptodate variable, same
454 * as for normal BH based IO completions.
456 error
= dax_insert_mapping(inode
, &bh
, vma
, vmf
);
457 if (buffer_unwritten(&bh
)) {
458 if (complete_unwritten
)
459 complete_unwritten(&bh
, !error
);
461 WARN_ON_ONCE(!(vmf
->flags
& FAULT_FLAG_WRITE
));
465 i_mmap_unlock_write(mapping
);
467 if (error
== -ENOMEM
)
468 return VM_FAULT_OOM
| major
;
469 /* -EBUSY is fine, somebody else faulted on the same PTE */
470 if ((error
< 0) && (error
!= -EBUSY
))
471 return VM_FAULT_SIGBUS
| major
;
472 return VM_FAULT_NOPAGE
| major
;
477 page_cache_release(page
);
479 i_mmap_unlock_write(mapping
);
484 EXPORT_SYMBOL(__dax_fault
);
487 * dax_fault - handle a page fault on a DAX file
488 * @vma: The virtual memory area where the fault occurred
489 * @vmf: The description of the fault
490 * @get_block: The filesystem method used to translate file offsets to blocks
492 * When a page fault occurs, filesystems may call this helper in their
493 * fault handler for DAX files.
495 int dax_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
,
496 get_block_t get_block
, dax_iodone_t complete_unwritten
)
499 struct super_block
*sb
= file_inode(vma
->vm_file
)->i_sb
;
501 if (vmf
->flags
& FAULT_FLAG_WRITE
) {
502 sb_start_pagefault(sb
);
503 file_update_time(vma
->vm_file
);
505 result
= __dax_fault(vma
, vmf
, get_block
, complete_unwritten
);
506 if (vmf
->flags
& FAULT_FLAG_WRITE
)
507 sb_end_pagefault(sb
);
511 EXPORT_SYMBOL_GPL(dax_fault
);
513 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
515 * The 'colour' (ie low bits) within a PMD of a page offset. This comes up
516 * more often than one might expect in the below function.
518 #define PG_PMD_COLOUR ((PMD_SIZE >> PAGE_SHIFT) - 1)
520 int __dax_pmd_fault(struct vm_area_struct
*vma
, unsigned long address
,
521 pmd_t
*pmd
, unsigned int flags
, get_block_t get_block
,
522 dax_iodone_t complete_unwritten
)
524 struct file
*file
= vma
->vm_file
;
525 struct address_space
*mapping
= file
->f_mapping
;
526 struct inode
*inode
= mapping
->host
;
527 struct buffer_head bh
;
528 unsigned blkbits
= inode
->i_blkbits
;
529 unsigned long pmd_addr
= address
& PMD_MASK
;
530 bool write
= flags
& FAULT_FLAG_WRITE
;
534 sector_t block
, sector
;
538 /* Fall back to PTEs if we're going to COW */
539 if (write
&& !(vma
->vm_flags
& VM_SHARED
))
540 return VM_FAULT_FALLBACK
;
541 /* If the PMD would extend outside the VMA */
542 if (pmd_addr
< vma
->vm_start
)
543 return VM_FAULT_FALLBACK
;
544 if ((pmd_addr
+ PMD_SIZE
) > vma
->vm_end
)
545 return VM_FAULT_FALLBACK
;
547 pgoff
= linear_page_index(vma
, pmd_addr
);
548 size
= (i_size_read(inode
) + PAGE_SIZE
- 1) >> PAGE_SHIFT
;
550 return VM_FAULT_SIGBUS
;
551 /* If the PMD would cover blocks out of the file */
552 if ((pgoff
| PG_PMD_COLOUR
) >= size
)
553 return VM_FAULT_FALLBACK
;
555 memset(&bh
, 0, sizeof(bh
));
556 block
= (sector_t
)pgoff
<< (PAGE_SHIFT
- blkbits
);
558 bh
.b_size
= PMD_SIZE
;
559 i_mmap_lock_write(mapping
);
560 length
= get_block(inode
, block
, &bh
, write
);
562 return VM_FAULT_SIGBUS
;
565 * If the filesystem isn't willing to tell us the length of a hole,
566 * just fall back to PTEs. Calling get_block 512 times in a loop
569 if (!buffer_size_valid(&bh
) || bh
.b_size
< PMD_SIZE
)
572 sector
= bh
.b_blocknr
<< (blkbits
- 9);
574 if (buffer_unwritten(&bh
) || buffer_new(&bh
)) {
577 length
= bdev_direct_access(bh
.b_bdev
, sector
, &kaddr
, &pfn
,
580 result
= VM_FAULT_SIGBUS
;
583 if ((length
< PMD_SIZE
) || (pfn
& PG_PMD_COLOUR
))
586 for (i
= 0; i
< PTRS_PER_PMD
; i
++)
587 clear_pmem(kaddr
+ i
* PAGE_SIZE
, PAGE_SIZE
);
589 count_vm_event(PGMAJFAULT
);
590 mem_cgroup_count_vm_event(vma
->vm_mm
, PGMAJFAULT
);
591 result
|= VM_FAULT_MAJOR
;
595 * If we allocated new storage, make sure no process has any
596 * zero pages covering this hole
598 if (buffer_new(&bh
)) {
599 i_mmap_unlock_write(mapping
);
600 unmap_mapping_range(mapping
, pgoff
<< PAGE_SHIFT
, PMD_SIZE
, 0);
601 i_mmap_lock_write(mapping
);
605 * If a truncate happened while we were allocating blocks, we may
606 * leave blocks allocated to the file that are beyond EOF. We can't
607 * take i_mutex here, so just leave them hanging; they'll be freed
608 * when the file is deleted.
610 size
= (i_size_read(inode
) + PAGE_SIZE
- 1) >> PAGE_SHIFT
;
612 result
= VM_FAULT_SIGBUS
;
615 if ((pgoff
| PG_PMD_COLOUR
) >= size
)
618 if (!write
&& !buffer_mapped(&bh
) && buffer_uptodate(&bh
)) {
621 struct page
*zero_page
= get_huge_zero_page();
623 if (unlikely(!zero_page
))
626 ptl
= pmd_lock(vma
->vm_mm
, pmd
);
627 if (!pmd_none(*pmd
)) {
632 entry
= mk_pmd(zero_page
, vma
->vm_page_prot
);
633 entry
= pmd_mkhuge(entry
);
634 set_pmd_at(vma
->vm_mm
, pmd_addr
, pmd
, entry
);
635 result
= VM_FAULT_NOPAGE
;
638 length
= bdev_direct_access(bh
.b_bdev
, sector
, &kaddr
, &pfn
,
641 result
= VM_FAULT_SIGBUS
;
644 if ((length
< PMD_SIZE
) || (pfn
& PG_PMD_COLOUR
))
647 result
|= vmf_insert_pfn_pmd(vma
, address
, pmd
, pfn
, write
);
651 if (buffer_unwritten(&bh
))
652 complete_unwritten(&bh
, !(result
& VM_FAULT_ERROR
));
654 i_mmap_unlock_write(mapping
);
659 count_vm_event(THP_FAULT_FALLBACK
);
660 result
= VM_FAULT_FALLBACK
;
663 EXPORT_SYMBOL_GPL(__dax_pmd_fault
);
666 * dax_pmd_fault - handle a PMD fault on a DAX file
667 * @vma: The virtual memory area where the fault occurred
668 * @vmf: The description of the fault
669 * @get_block: The filesystem method used to translate file offsets to blocks
671 * When a page fault occurs, filesystems may call this helper in their
672 * pmd_fault handler for DAX files.
674 int dax_pmd_fault(struct vm_area_struct
*vma
, unsigned long address
,
675 pmd_t
*pmd
, unsigned int flags
, get_block_t get_block
,
676 dax_iodone_t complete_unwritten
)
679 struct super_block
*sb
= file_inode(vma
->vm_file
)->i_sb
;
681 if (flags
& FAULT_FLAG_WRITE
) {
682 sb_start_pagefault(sb
);
683 file_update_time(vma
->vm_file
);
685 result
= __dax_pmd_fault(vma
, address
, pmd
, flags
, get_block
,
687 if (flags
& FAULT_FLAG_WRITE
)
688 sb_end_pagefault(sb
);
692 EXPORT_SYMBOL_GPL(dax_pmd_fault
);
693 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
696 * dax_pfn_mkwrite - handle first write to DAX page
697 * @vma: The virtual memory area where the fault occurred
698 * @vmf: The description of the fault
701 int dax_pfn_mkwrite(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
703 struct super_block
*sb
= file_inode(vma
->vm_file
)->i_sb
;
705 sb_start_pagefault(sb
);
706 file_update_time(vma
->vm_file
);
707 sb_end_pagefault(sb
);
708 return VM_FAULT_NOPAGE
;
710 EXPORT_SYMBOL_GPL(dax_pfn_mkwrite
);
713 * dax_zero_page_range - zero a range within a page of a DAX file
714 * @inode: The file being truncated
715 * @from: The file offset that is being truncated to
716 * @length: The number of bytes to zero
717 * @get_block: The filesystem method used to translate file offsets to blocks
719 * This function can be called by a filesystem when it is zeroing part of a
720 * page in a DAX file. This is intended for hole-punch operations. If
721 * you are truncating a file, the helper function dax_truncate_page() may be
724 * We work in terms of PAGE_CACHE_SIZE here for commonality with
725 * block_truncate_page(), but we could go down to PAGE_SIZE if the filesystem
726 * took care of disposing of the unnecessary blocks. Even if the filesystem
727 * block size is smaller than PAGE_SIZE, we have to zero the rest of the page
728 * since the file might be mmapped.
730 int dax_zero_page_range(struct inode
*inode
, loff_t from
, unsigned length
,
731 get_block_t get_block
)
733 struct buffer_head bh
;
734 pgoff_t index
= from
>> PAGE_CACHE_SHIFT
;
735 unsigned offset
= from
& (PAGE_CACHE_SIZE
-1);
738 /* Block boundary? Nothing to do */
741 BUG_ON((offset
+ length
) > PAGE_CACHE_SIZE
);
743 memset(&bh
, 0, sizeof(bh
));
744 bh
.b_size
= PAGE_CACHE_SIZE
;
745 err
= get_block(inode
, index
, &bh
, 0);
748 if (buffer_written(&bh
)) {
750 err
= dax_get_addr(&bh
, &addr
, inode
->i_blkbits
);
753 clear_pmem(addr
+ offset
, length
);
759 EXPORT_SYMBOL_GPL(dax_zero_page_range
);
762 * dax_truncate_page - handle a partial page being truncated in a DAX file
763 * @inode: The file being truncated
764 * @from: The file offset that is being truncated to
765 * @get_block: The filesystem method used to translate file offsets to blocks
767 * Similar to block_truncate_page(), this function can be called by a
768 * filesystem when it is truncating a DAX file to handle the partial page.
770 * We work in terms of PAGE_CACHE_SIZE here for commonality with
771 * block_truncate_page(), but we could go down to PAGE_SIZE if the filesystem
772 * took care of disposing of the unnecessary blocks. Even if the filesystem
773 * block size is smaller than PAGE_SIZE, we have to zero the rest of the page
774 * since the file might be mmapped.
776 int dax_truncate_page(struct inode
*inode
, loff_t from
, get_block_t get_block
)
778 unsigned length
= PAGE_CACHE_ALIGN(from
) - from
;
779 return dax_zero_page_range(inode
, from
, length
, get_block
);
781 EXPORT_SYMBOL_GPL(dax_truncate_page
);