2 * fs/dax.c - Direct Access filesystem code
3 * Copyright (c) 2013-2014 Intel Corporation
4 * Author: Matthew Wilcox <matthew.r.wilcox@intel.com>
5 * Author: Ross Zwisler <ross.zwisler@linux.intel.com>
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 #include <linux/atomic.h>
18 #include <linux/blkdev.h>
19 #include <linux/buffer_head.h>
20 #include <linux/dax.h>
22 #include <linux/genhd.h>
23 #include <linux/highmem.h>
24 #include <linux/memcontrol.h>
26 #include <linux/mutex.h>
27 #include <linux/pagevec.h>
28 #include <linux/pmem.h>
29 #include <linux/sched.h>
30 #include <linux/uio.h>
31 #include <linux/vmstat.h>
32 #include <linux/pfn_t.h>
33 #include <linux/sizes.h>
35 static long dax_map_atomic(struct block_device
*bdev
, struct blk_dax_ctl
*dax
)
37 struct request_queue
*q
= bdev
->bd_queue
;
40 dax
->addr
= (void __pmem
*) ERR_PTR(-EIO
);
41 if (blk_queue_enter(q
, true) != 0)
44 rc
= bdev_direct_access(bdev
, dax
);
46 dax
->addr
= (void __pmem
*) ERR_PTR(rc
);
53 static void dax_unmap_atomic(struct block_device
*bdev
,
54 const struct blk_dax_ctl
*dax
)
56 if (IS_ERR(dax
->addr
))
58 blk_queue_exit(bdev
->bd_queue
);
62 * dax_clear_blocks() is called from within transaction context from XFS,
63 * and hence this means the stack from this point must follow GFP_NOFS
64 * semantics for all operations.
66 int dax_clear_blocks(struct inode
*inode
, sector_t block
, long _size
)
68 struct block_device
*bdev
= inode
->i_sb
->s_bdev
;
69 struct blk_dax_ctl dax
= {
70 .sector
= block
<< (inode
->i_blkbits
- 9),
78 count
= dax_map_atomic(bdev
, &dax
);
81 sz
= min_t(long, count
, SZ_128K
);
82 clear_pmem(dax
.addr
, sz
);
84 dax
.sector
+= sz
/ 512;
85 dax_unmap_atomic(bdev
, &dax
);
92 EXPORT_SYMBOL_GPL(dax_clear_blocks
);
94 /* the clear_pmem() calls are ordered by a wmb_pmem() in the caller */
95 static void dax_new_buf(void __pmem
*addr
, unsigned size
, unsigned first
,
96 loff_t pos
, loff_t end
)
98 loff_t final
= end
- pos
+ first
; /* The final byte of the buffer */
101 clear_pmem(addr
, first
);
103 clear_pmem(addr
+ final
, size
- final
);
106 static bool buffer_written(struct buffer_head
*bh
)
108 return buffer_mapped(bh
) && !buffer_unwritten(bh
);
112 * When ext4 encounters a hole, it returns without modifying the buffer_head
113 * which means that we can't trust b_size. To cope with this, we set b_state
114 * to 0 before calling get_block and, if any bit is set, we know we can trust
115 * b_size. Unfortunate, really, since ext4 knows precisely how long a hole is
116 * and would save us time calling get_block repeatedly.
118 static bool buffer_size_valid(struct buffer_head
*bh
)
120 return bh
->b_state
!= 0;
124 static sector_t
to_sector(const struct buffer_head
*bh
,
125 const struct inode
*inode
)
127 sector_t sector
= bh
->b_blocknr
<< (inode
->i_blkbits
- 9);
132 static ssize_t
dax_io(struct inode
*inode
, struct iov_iter
*iter
,
133 loff_t start
, loff_t end
, get_block_t get_block
,
134 struct buffer_head
*bh
)
136 loff_t pos
= start
, max
= start
, bh_max
= start
;
137 bool hole
= false, need_wmb
= false;
138 struct block_device
*bdev
= NULL
;
139 int rw
= iov_iter_rw(iter
), rc
;
141 struct blk_dax_ctl dax
= {
142 .addr
= (void __pmem
*) ERR_PTR(-EIO
),
146 end
= min(end
, i_size_read(inode
));
151 unsigned blkbits
= inode
->i_blkbits
;
152 long page
= pos
>> PAGE_SHIFT
;
153 sector_t block
= page
<< (PAGE_SHIFT
- blkbits
);
154 unsigned first
= pos
- (block
<< blkbits
);
158 bh
->b_size
= PAGE_ALIGN(end
- pos
);
160 rc
= get_block(inode
, block
, bh
, rw
== WRITE
);
163 if (!buffer_size_valid(bh
))
164 bh
->b_size
= 1 << blkbits
;
165 bh_max
= pos
- first
+ bh
->b_size
;
168 unsigned done
= bh
->b_size
-
169 (bh_max
- (pos
- first
));
170 bh
->b_blocknr
+= done
>> blkbits
;
174 hole
= rw
== READ
&& !buffer_written(bh
);
176 size
= bh
->b_size
- first
;
178 dax_unmap_atomic(bdev
, &dax
);
179 dax
.sector
= to_sector(bh
, inode
);
180 dax
.size
= bh
->b_size
;
181 map_len
= dax_map_atomic(bdev
, &dax
);
186 if (buffer_unwritten(bh
) || buffer_new(bh
)) {
187 dax_new_buf(dax
.addr
, map_len
, first
,
192 size
= map_len
- first
;
194 max
= min(pos
+ size
, end
);
197 if (iov_iter_rw(iter
) == WRITE
) {
198 len
= copy_from_iter_pmem(dax
.addr
, max
- pos
, iter
);
201 len
= copy_to_iter((void __force
*) dax
.addr
, max
- pos
,
204 len
= iov_iter_zero(max
- pos
, iter
);
212 if (!IS_ERR(dax
.addr
))
218 dax_unmap_atomic(bdev
, &dax
);
220 return (pos
== start
) ? rc
: pos
- start
;
224 * dax_do_io - Perform I/O to a DAX file
225 * @iocb: The control block for this I/O
226 * @inode: The file which the I/O is directed at
227 * @iter: The addresses to do I/O from or to
228 * @pos: The file offset where the I/O starts
229 * @get_block: The filesystem method used to translate file offsets to blocks
230 * @end_io: A filesystem callback for I/O completion
233 * This function uses the same locking scheme as do_blockdev_direct_IO:
234 * If @flags has DIO_LOCKING set, we assume that the i_mutex is held by the
235 * caller for writes. For reads, we take and release the i_mutex ourselves.
236 * If DIO_LOCKING is not set, the filesystem takes care of its own locking.
237 * As with do_blockdev_direct_IO(), we increment i_dio_count while the I/O
240 ssize_t
dax_do_io(struct kiocb
*iocb
, struct inode
*inode
,
241 struct iov_iter
*iter
, loff_t pos
, get_block_t get_block
,
242 dio_iodone_t end_io
, int flags
)
244 struct buffer_head bh
;
245 ssize_t retval
= -EINVAL
;
246 loff_t end
= pos
+ iov_iter_count(iter
);
248 memset(&bh
, 0, sizeof(bh
));
249 bh
.b_bdev
= inode
->i_sb
->s_bdev
;
251 if ((flags
& DIO_LOCKING
) && iov_iter_rw(iter
) == READ
) {
252 struct address_space
*mapping
= inode
->i_mapping
;
254 retval
= filemap_write_and_wait_range(mapping
, pos
, end
- 1);
261 /* Protects against truncate */
262 if (!(flags
& DIO_SKIP_DIO_COUNT
))
263 inode_dio_begin(inode
);
265 retval
= dax_io(inode
, iter
, pos
, end
, get_block
, &bh
);
267 if ((flags
& DIO_LOCKING
) && iov_iter_rw(iter
) == READ
)
270 if ((retval
> 0) && end_io
)
271 end_io(iocb
, pos
, retval
, bh
.b_private
);
273 if (!(flags
& DIO_SKIP_DIO_COUNT
))
274 inode_dio_end(inode
);
278 EXPORT_SYMBOL_GPL(dax_do_io
);
281 * The user has performed a load from a hole in the file. Allocating
282 * a new page in the file would cause excessive storage usage for
283 * workloads with sparse files. We allocate a page cache page instead.
284 * We'll kick it out of the page cache if it's ever written to,
285 * otherwise it will simply fall out of the page cache under memory
286 * pressure without ever having been dirtied.
288 static int dax_load_hole(struct address_space
*mapping
, struct page
*page
,
289 struct vm_fault
*vmf
)
292 struct inode
*inode
= mapping
->host
;
294 page
= find_or_create_page(mapping
, vmf
->pgoff
,
295 GFP_KERNEL
| __GFP_ZERO
);
298 /* Recheck i_size under page lock to avoid truncate race */
299 size
= (i_size_read(inode
) + PAGE_SIZE
- 1) >> PAGE_SHIFT
;
300 if (vmf
->pgoff
>= size
) {
302 page_cache_release(page
);
303 return VM_FAULT_SIGBUS
;
307 return VM_FAULT_LOCKED
;
310 static int copy_user_bh(struct page
*to
, struct inode
*inode
,
311 struct buffer_head
*bh
, unsigned long vaddr
)
313 struct blk_dax_ctl dax
= {
314 .sector
= to_sector(bh
, inode
),
317 struct block_device
*bdev
= bh
->b_bdev
;
320 if (dax_map_atomic(bdev
, &dax
) < 0)
321 return PTR_ERR(dax
.addr
);
322 vto
= kmap_atomic(to
);
323 copy_user_page(vto
, (void __force
*)dax
.addr
, vaddr
, to
);
325 dax_unmap_atomic(bdev
, &dax
);
330 #define DAX_PMD_INDEX(page_index) (page_index & (PMD_MASK >> PAGE_CACHE_SHIFT))
332 static int dax_radix_entry(struct address_space
*mapping
, pgoff_t index
,
333 sector_t sector
, bool pmd_entry
, bool dirty
)
335 struct radix_tree_root
*page_tree
= &mapping
->page_tree
;
336 pgoff_t pmd_index
= DAX_PMD_INDEX(index
);
340 WARN_ON_ONCE(pmd_entry
&& !dirty
);
341 __mark_inode_dirty(mapping
->host
, I_DIRTY_PAGES
);
343 spin_lock_irq(&mapping
->tree_lock
);
345 entry
= radix_tree_lookup(page_tree
, pmd_index
);
346 if (entry
&& RADIX_DAX_TYPE(entry
) == RADIX_DAX_PMD
) {
351 entry
= radix_tree_lookup(page_tree
, index
);
353 type
= RADIX_DAX_TYPE(entry
);
354 if (WARN_ON_ONCE(type
!= RADIX_DAX_PTE
&&
355 type
!= RADIX_DAX_PMD
)) {
360 if (!pmd_entry
|| type
== RADIX_DAX_PMD
)
364 * We only insert dirty PMD entries into the radix tree. This
365 * means we don't need to worry about removing a dirty PTE
366 * entry and inserting a clean PMD entry, thus reducing the
367 * range we would flush with a follow-up fsync/msync call.
369 radix_tree_delete(&mapping
->page_tree
, index
);
370 mapping
->nrexceptional
--;
373 if (sector
== NO_SECTOR
) {
375 * This can happen during correct operation if our pfn_mkwrite
376 * fault raced against a hole punch operation. If this
377 * happens the pte that was hole punched will have been
378 * unmapped and the radix tree entry will have been removed by
379 * the time we are called, but the call will still happen. We
380 * will return all the way up to wp_pfn_shared(), where the
381 * pte_same() check will fail, eventually causing page fault
382 * to be retried by the CPU.
387 error
= radix_tree_insert(page_tree
, index
,
388 RADIX_DAX_ENTRY(sector
, pmd_entry
));
392 mapping
->nrexceptional
++;
395 radix_tree_tag_set(page_tree
, index
, PAGECACHE_TAG_DIRTY
);
397 spin_unlock_irq(&mapping
->tree_lock
);
401 static int dax_writeback_one(struct block_device
*bdev
,
402 struct address_space
*mapping
, pgoff_t index
, void *entry
)
404 struct radix_tree_root
*page_tree
= &mapping
->page_tree
;
405 int type
= RADIX_DAX_TYPE(entry
);
406 struct radix_tree_node
*node
;
407 struct blk_dax_ctl dax
;
411 spin_lock_irq(&mapping
->tree_lock
);
413 * Regular page slots are stabilized by the page lock even
414 * without the tree itself locked. These unlocked entries
415 * need verification under the tree lock.
417 if (!__radix_tree_lookup(page_tree
, index
, &node
, &slot
))
422 /* another fsync thread may have already written back this entry */
423 if (!radix_tree_tag_get(page_tree
, index
, PAGECACHE_TAG_TOWRITE
))
426 if (WARN_ON_ONCE(type
!= RADIX_DAX_PTE
&& type
!= RADIX_DAX_PMD
)) {
431 dax
.sector
= RADIX_DAX_SECTOR(entry
);
432 dax
.size
= (type
== RADIX_DAX_PMD
? PMD_SIZE
: PAGE_SIZE
);
433 spin_unlock_irq(&mapping
->tree_lock
);
436 * We cannot hold tree_lock while calling dax_map_atomic() because it
437 * eventually calls cond_resched().
439 ret
= dax_map_atomic(bdev
, &dax
);
443 if (WARN_ON_ONCE(ret
< dax
.size
)) {
448 wb_cache_pmem(dax
.addr
, dax
.size
);
450 spin_lock_irq(&mapping
->tree_lock
);
451 radix_tree_tag_clear(page_tree
, index
, PAGECACHE_TAG_TOWRITE
);
452 spin_unlock_irq(&mapping
->tree_lock
);
454 dax_unmap_atomic(bdev
, &dax
);
458 spin_unlock_irq(&mapping
->tree_lock
);
463 * Flush the mapping to the persistent domain within the byte range of [start,
464 * end]. This is required by data integrity operations to ensure file data is
465 * on persistent storage prior to completion of the operation.
467 int dax_writeback_mapping_range(struct address_space
*mapping
, loff_t start
,
470 struct inode
*inode
= mapping
->host
;
471 struct block_device
*bdev
= inode
->i_sb
->s_bdev
;
472 pgoff_t start_index
, end_index
, pmd_index
;
473 pgoff_t indices
[PAGEVEC_SIZE
];
479 if (WARN_ON_ONCE(inode
->i_blkbits
!= PAGE_SHIFT
))
482 start_index
= start
>> PAGE_CACHE_SHIFT
;
483 end_index
= end
>> PAGE_CACHE_SHIFT
;
484 pmd_index
= DAX_PMD_INDEX(start_index
);
487 entry
= radix_tree_lookup(&mapping
->page_tree
, pmd_index
);
490 /* see if the start of our range is covered by a PMD entry */
491 if (entry
&& RADIX_DAX_TYPE(entry
) == RADIX_DAX_PMD
)
492 start_index
= pmd_index
;
494 tag_pages_for_writeback(mapping
, start_index
, end_index
);
496 pagevec_init(&pvec
, 0);
498 pvec
.nr
= find_get_entries_tag(mapping
, start_index
,
499 PAGECACHE_TAG_TOWRITE
, PAGEVEC_SIZE
,
500 pvec
.pages
, indices
);
505 for (i
= 0; i
< pvec
.nr
; i
++) {
506 if (indices
[i
] > end_index
) {
511 ret
= dax_writeback_one(bdev
, mapping
, indices
[i
],
520 EXPORT_SYMBOL_GPL(dax_writeback_mapping_range
);
522 static int dax_insert_mapping(struct inode
*inode
, struct buffer_head
*bh
,
523 struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
525 unsigned long vaddr
= (unsigned long)vmf
->virtual_address
;
526 struct address_space
*mapping
= inode
->i_mapping
;
527 struct block_device
*bdev
= bh
->b_bdev
;
528 struct blk_dax_ctl dax
= {
529 .sector
= to_sector(bh
, inode
),
535 i_mmap_lock_read(mapping
);
538 * Check truncate didn't happen while we were allocating a block.
539 * If it did, this block may or may not be still allocated to the
540 * file. We can't tell the filesystem to free it because we can't
541 * take i_mutex here. In the worst case, the file still has blocks
542 * allocated past the end of the file.
544 size
= (i_size_read(inode
) + PAGE_SIZE
- 1) >> PAGE_SHIFT
;
545 if (unlikely(vmf
->pgoff
>= size
)) {
550 if (dax_map_atomic(bdev
, &dax
) < 0) {
551 error
= PTR_ERR(dax
.addr
);
555 if (buffer_unwritten(bh
) || buffer_new(bh
)) {
556 clear_pmem(dax
.addr
, PAGE_SIZE
);
559 dax_unmap_atomic(bdev
, &dax
);
561 error
= dax_radix_entry(mapping
, vmf
->pgoff
, dax
.sector
, false,
562 vmf
->flags
& FAULT_FLAG_WRITE
);
566 error
= vm_insert_mixed(vma
, vaddr
, dax
.pfn
);
569 i_mmap_unlock_read(mapping
);
575 * __dax_fault - handle a page fault on a DAX file
576 * @vma: The virtual memory area where the fault occurred
577 * @vmf: The description of the fault
578 * @get_block: The filesystem method used to translate file offsets to blocks
579 * @complete_unwritten: The filesystem method used to convert unwritten blocks
580 * to written so the data written to them is exposed. This is required for
581 * required by write faults for filesystems that will return unwritten
582 * extent mappings from @get_block, but it is optional for reads as
583 * dax_insert_mapping() will always zero unwritten blocks. If the fs does
584 * not support unwritten extents, the it should pass NULL.
586 * When a page fault occurs, filesystems may call this helper in their
587 * fault handler for DAX files. __dax_fault() assumes the caller has done all
588 * the necessary locking for the page fault to proceed successfully.
590 int __dax_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
,
591 get_block_t get_block
, dax_iodone_t complete_unwritten
)
593 struct file
*file
= vma
->vm_file
;
594 struct address_space
*mapping
= file
->f_mapping
;
595 struct inode
*inode
= mapping
->host
;
597 struct buffer_head bh
;
598 unsigned long vaddr
= (unsigned long)vmf
->virtual_address
;
599 unsigned blkbits
= inode
->i_blkbits
;
605 size
= (i_size_read(inode
) + PAGE_SIZE
- 1) >> PAGE_SHIFT
;
606 if (vmf
->pgoff
>= size
)
607 return VM_FAULT_SIGBUS
;
609 memset(&bh
, 0, sizeof(bh
));
610 block
= (sector_t
)vmf
->pgoff
<< (PAGE_SHIFT
- blkbits
);
611 bh
.b_bdev
= inode
->i_sb
->s_bdev
;
612 bh
.b_size
= PAGE_SIZE
;
615 page
= find_get_page(mapping
, vmf
->pgoff
);
617 if (!lock_page_or_retry(page
, vma
->vm_mm
, vmf
->flags
)) {
618 page_cache_release(page
);
619 return VM_FAULT_RETRY
;
621 if (unlikely(page
->mapping
!= mapping
)) {
623 page_cache_release(page
);
626 size
= (i_size_read(inode
) + PAGE_SIZE
- 1) >> PAGE_SHIFT
;
627 if (unlikely(vmf
->pgoff
>= size
)) {
629 * We have a struct page covering a hole in the file
630 * from a read fault and we've raced with a truncate
637 error
= get_block(inode
, block
, &bh
, 0);
638 if (!error
&& (bh
.b_size
< PAGE_SIZE
))
639 error
= -EIO
; /* fs corruption? */
643 if (!buffer_mapped(&bh
) && !buffer_unwritten(&bh
) && !vmf
->cow_page
) {
644 if (vmf
->flags
& FAULT_FLAG_WRITE
) {
645 error
= get_block(inode
, block
, &bh
, 1);
646 count_vm_event(PGMAJFAULT
);
647 mem_cgroup_count_vm_event(vma
->vm_mm
, PGMAJFAULT
);
648 major
= VM_FAULT_MAJOR
;
649 if (!error
&& (bh
.b_size
< PAGE_SIZE
))
654 return dax_load_hole(mapping
, page
, vmf
);
659 struct page
*new_page
= vmf
->cow_page
;
660 if (buffer_written(&bh
))
661 error
= copy_user_bh(new_page
, inode
, &bh
, vaddr
);
663 clear_user_highpage(new_page
, vaddr
);
668 i_mmap_lock_read(mapping
);
669 /* Check we didn't race with truncate */
670 size
= (i_size_read(inode
) + PAGE_SIZE
- 1) >>
672 if (vmf
->pgoff
>= size
) {
673 i_mmap_unlock_read(mapping
);
678 return VM_FAULT_LOCKED
;
681 /* Check we didn't race with a read fault installing a new page */
683 page
= find_lock_page(mapping
, vmf
->pgoff
);
686 unmap_mapping_range(mapping
, vmf
->pgoff
<< PAGE_SHIFT
,
688 delete_from_page_cache(page
);
690 page_cache_release(page
);
695 * If we successfully insert the new mapping over an unwritten extent,
696 * we need to ensure we convert the unwritten extent. If there is an
697 * error inserting the mapping, the filesystem needs to leave it as
698 * unwritten to prevent exposure of the stale underlying data to
699 * userspace, but we still need to call the completion function so
700 * the private resources on the mapping buffer can be released. We
701 * indicate what the callback should do via the uptodate variable, same
702 * as for normal BH based IO completions.
704 error
= dax_insert_mapping(inode
, &bh
, vma
, vmf
);
705 if (buffer_unwritten(&bh
)) {
706 if (complete_unwritten
)
707 complete_unwritten(&bh
, !error
);
709 WARN_ON_ONCE(!(vmf
->flags
& FAULT_FLAG_WRITE
));
713 if (error
== -ENOMEM
)
714 return VM_FAULT_OOM
| major
;
715 /* -EBUSY is fine, somebody else faulted on the same PTE */
716 if ((error
< 0) && (error
!= -EBUSY
))
717 return VM_FAULT_SIGBUS
| major
;
718 return VM_FAULT_NOPAGE
| major
;
723 page_cache_release(page
);
727 EXPORT_SYMBOL(__dax_fault
);
730 * dax_fault - handle a page fault on a DAX file
731 * @vma: The virtual memory area where the fault occurred
732 * @vmf: The description of the fault
733 * @get_block: The filesystem method used to translate file offsets to blocks
735 * When a page fault occurs, filesystems may call this helper in their
736 * fault handler for DAX files.
738 int dax_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
,
739 get_block_t get_block
, dax_iodone_t complete_unwritten
)
742 struct super_block
*sb
= file_inode(vma
->vm_file
)->i_sb
;
744 if (vmf
->flags
& FAULT_FLAG_WRITE
) {
745 sb_start_pagefault(sb
);
746 file_update_time(vma
->vm_file
);
748 result
= __dax_fault(vma
, vmf
, get_block
, complete_unwritten
);
749 if (vmf
->flags
& FAULT_FLAG_WRITE
)
750 sb_end_pagefault(sb
);
754 EXPORT_SYMBOL_GPL(dax_fault
);
756 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
758 * The 'colour' (ie low bits) within a PMD of a page offset. This comes up
759 * more often than one might expect in the below function.
761 #define PG_PMD_COLOUR ((PMD_SIZE >> PAGE_SHIFT) - 1)
763 static void __dax_dbg(struct buffer_head
*bh
, unsigned long address
,
764 const char *reason
, const char *fn
)
767 char bname
[BDEVNAME_SIZE
];
768 bdevname(bh
->b_bdev
, bname
);
769 pr_debug("%s: %s addr: %lx dev %s state %lx start %lld "
770 "length %zd fallback: %s\n", fn
, current
->comm
,
771 address
, bname
, bh
->b_state
, (u64
)bh
->b_blocknr
,
774 pr_debug("%s: %s addr: %lx fallback: %s\n", fn
,
775 current
->comm
, address
, reason
);
779 #define dax_pmd_dbg(bh, address, reason) __dax_dbg(bh, address, reason, "dax_pmd")
781 int __dax_pmd_fault(struct vm_area_struct
*vma
, unsigned long address
,
782 pmd_t
*pmd
, unsigned int flags
, get_block_t get_block
,
783 dax_iodone_t complete_unwritten
)
785 struct file
*file
= vma
->vm_file
;
786 struct address_space
*mapping
= file
->f_mapping
;
787 struct inode
*inode
= mapping
->host
;
788 struct buffer_head bh
;
789 unsigned blkbits
= inode
->i_blkbits
;
790 unsigned long pmd_addr
= address
& PMD_MASK
;
791 bool write
= flags
& FAULT_FLAG_WRITE
;
792 struct block_device
*bdev
;
795 int error
, result
= 0;
798 /* dax pmd mappings require pfn_t_devmap() */
799 if (!IS_ENABLED(CONFIG_FS_DAX_PMD
))
800 return VM_FAULT_FALLBACK
;
802 /* Fall back to PTEs if we're going to COW */
803 if (write
&& !(vma
->vm_flags
& VM_SHARED
)) {
804 split_huge_pmd(vma
, pmd
, address
);
805 dax_pmd_dbg(NULL
, address
, "cow write");
806 return VM_FAULT_FALLBACK
;
808 /* If the PMD would extend outside the VMA */
809 if (pmd_addr
< vma
->vm_start
) {
810 dax_pmd_dbg(NULL
, address
, "vma start unaligned");
811 return VM_FAULT_FALLBACK
;
813 if ((pmd_addr
+ PMD_SIZE
) > vma
->vm_end
) {
814 dax_pmd_dbg(NULL
, address
, "vma end unaligned");
815 return VM_FAULT_FALLBACK
;
818 pgoff
= linear_page_index(vma
, pmd_addr
);
819 size
= (i_size_read(inode
) + PAGE_SIZE
- 1) >> PAGE_SHIFT
;
821 return VM_FAULT_SIGBUS
;
822 /* If the PMD would cover blocks out of the file */
823 if ((pgoff
| PG_PMD_COLOUR
) >= size
) {
824 dax_pmd_dbg(NULL
, address
,
825 "offset + huge page size > file size");
826 return VM_FAULT_FALLBACK
;
829 memset(&bh
, 0, sizeof(bh
));
830 bh
.b_bdev
= inode
->i_sb
->s_bdev
;
831 block
= (sector_t
)pgoff
<< (PAGE_SHIFT
- blkbits
);
833 bh
.b_size
= PMD_SIZE
;
835 if (get_block(inode
, block
, &bh
, 0) != 0)
836 return VM_FAULT_SIGBUS
;
838 if (!buffer_mapped(&bh
) && write
) {
839 if (get_block(inode
, block
, &bh
, 1) != 0)
840 return VM_FAULT_SIGBUS
;
847 * If the filesystem isn't willing to tell us the length of a hole,
848 * just fall back to PTEs. Calling get_block 512 times in a loop
851 if (!buffer_size_valid(&bh
) || bh
.b_size
< PMD_SIZE
) {
852 dax_pmd_dbg(&bh
, address
, "allocated block too small");
853 return VM_FAULT_FALLBACK
;
857 * If we allocated new storage, make sure no process has any
858 * zero pages covering this hole
861 loff_t lstart
= pgoff
<< PAGE_SHIFT
;
862 loff_t lend
= lstart
+ PMD_SIZE
- 1; /* inclusive */
864 truncate_pagecache_range(inode
, lstart
, lend
);
867 i_mmap_lock_read(mapping
);
870 * If a truncate happened while we were allocating blocks, we may
871 * leave blocks allocated to the file that are beyond EOF. We can't
872 * take i_mutex here, so just leave them hanging; they'll be freed
873 * when the file is deleted.
875 size
= (i_size_read(inode
) + PAGE_SIZE
- 1) >> PAGE_SHIFT
;
877 result
= VM_FAULT_SIGBUS
;
880 if ((pgoff
| PG_PMD_COLOUR
) >= size
) {
881 dax_pmd_dbg(&bh
, address
,
882 "offset + huge page size > file size");
886 if (!write
&& !buffer_mapped(&bh
) && buffer_uptodate(&bh
)) {
889 struct page
*zero_page
= get_huge_zero_page();
891 if (unlikely(!zero_page
)) {
892 dax_pmd_dbg(&bh
, address
, "no zero page");
896 ptl
= pmd_lock(vma
->vm_mm
, pmd
);
897 if (!pmd_none(*pmd
)) {
899 dax_pmd_dbg(&bh
, address
, "pmd already present");
903 dev_dbg(part_to_dev(bdev
->bd_part
),
904 "%s: %s addr: %lx pfn: <zero> sect: %llx\n",
905 __func__
, current
->comm
, address
,
906 (unsigned long long) to_sector(&bh
, inode
));
908 entry
= mk_pmd(zero_page
, vma
->vm_page_prot
);
909 entry
= pmd_mkhuge(entry
);
910 set_pmd_at(vma
->vm_mm
, pmd_addr
, pmd
, entry
);
911 result
= VM_FAULT_NOPAGE
;
914 struct blk_dax_ctl dax
= {
915 .sector
= to_sector(&bh
, inode
),
918 long length
= dax_map_atomic(bdev
, &dax
);
921 result
= VM_FAULT_SIGBUS
;
924 if (length
< PMD_SIZE
) {
925 dax_pmd_dbg(&bh
, address
, "dax-length too small");
926 dax_unmap_atomic(bdev
, &dax
);
929 if (pfn_t_to_pfn(dax
.pfn
) & PG_PMD_COLOUR
) {
930 dax_pmd_dbg(&bh
, address
, "pfn unaligned");
931 dax_unmap_atomic(bdev
, &dax
);
935 if (!pfn_t_devmap(dax
.pfn
)) {
936 dax_unmap_atomic(bdev
, &dax
);
937 dax_pmd_dbg(&bh
, address
, "pfn not in memmap");
941 if (buffer_unwritten(&bh
) || buffer_new(&bh
)) {
942 clear_pmem(dax
.addr
, PMD_SIZE
);
944 count_vm_event(PGMAJFAULT
);
945 mem_cgroup_count_vm_event(vma
->vm_mm
, PGMAJFAULT
);
946 result
|= VM_FAULT_MAJOR
;
948 dax_unmap_atomic(bdev
, &dax
);
951 * For PTE faults we insert a radix tree entry for reads, and
952 * leave it clean. Then on the first write we dirty the radix
953 * tree entry via the dax_pfn_mkwrite() path. This sequence
954 * allows the dax_pfn_mkwrite() call to be simpler and avoid a
955 * call into get_block() to translate the pgoff to a sector in
956 * order to be able to create a new radix tree entry.
958 * The PMD path doesn't have an equivalent to
959 * dax_pfn_mkwrite(), though, so for a read followed by a
960 * write we traverse all the way through __dax_pmd_fault()
961 * twice. This means we can just skip inserting a radix tree
962 * entry completely on the initial read and just wait until
963 * the write to insert a dirty entry.
966 error
= dax_radix_entry(mapping
, pgoff
, dax
.sector
,
969 dax_pmd_dbg(&bh
, address
,
970 "PMD radix insertion failed");
975 dev_dbg(part_to_dev(bdev
->bd_part
),
976 "%s: %s addr: %lx pfn: %lx sect: %llx\n",
977 __func__
, current
->comm
, address
,
978 pfn_t_to_pfn(dax
.pfn
),
979 (unsigned long long) dax
.sector
);
980 result
|= vmf_insert_pfn_pmd(vma
, address
, pmd
,
985 i_mmap_unlock_read(mapping
);
987 if (buffer_unwritten(&bh
))
988 complete_unwritten(&bh
, !(result
& VM_FAULT_ERROR
));
993 count_vm_event(THP_FAULT_FALLBACK
);
994 result
= VM_FAULT_FALLBACK
;
997 EXPORT_SYMBOL_GPL(__dax_pmd_fault
);
1000 * dax_pmd_fault - handle a PMD fault on a DAX file
1001 * @vma: The virtual memory area where the fault occurred
1002 * @vmf: The description of the fault
1003 * @get_block: The filesystem method used to translate file offsets to blocks
1005 * When a page fault occurs, filesystems may call this helper in their
1006 * pmd_fault handler for DAX files.
1008 int dax_pmd_fault(struct vm_area_struct
*vma
, unsigned long address
,
1009 pmd_t
*pmd
, unsigned int flags
, get_block_t get_block
,
1010 dax_iodone_t complete_unwritten
)
1013 struct super_block
*sb
= file_inode(vma
->vm_file
)->i_sb
;
1015 if (flags
& FAULT_FLAG_WRITE
) {
1016 sb_start_pagefault(sb
);
1017 file_update_time(vma
->vm_file
);
1019 result
= __dax_pmd_fault(vma
, address
, pmd
, flags
, get_block
,
1020 complete_unwritten
);
1021 if (flags
& FAULT_FLAG_WRITE
)
1022 sb_end_pagefault(sb
);
1026 EXPORT_SYMBOL_GPL(dax_pmd_fault
);
1027 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1030 * dax_pfn_mkwrite - handle first write to DAX page
1031 * @vma: The virtual memory area where the fault occurred
1032 * @vmf: The description of the fault
1034 int dax_pfn_mkwrite(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
1036 struct file
*file
= vma
->vm_file
;
1039 * We pass NO_SECTOR to dax_radix_entry() because we expect that a
1040 * RADIX_DAX_PTE entry already exists in the radix tree from a
1041 * previous call to __dax_fault(). We just want to look up that PTE
1042 * entry using vmf->pgoff and make sure the dirty tag is set. This
1043 * saves us from having to make a call to get_block() here to look
1046 dax_radix_entry(file
->f_mapping
, vmf
->pgoff
, NO_SECTOR
, false, true);
1047 return VM_FAULT_NOPAGE
;
1049 EXPORT_SYMBOL_GPL(dax_pfn_mkwrite
);
1052 * dax_zero_page_range - zero a range within a page of a DAX file
1053 * @inode: The file being truncated
1054 * @from: The file offset that is being truncated to
1055 * @length: The number of bytes to zero
1056 * @get_block: The filesystem method used to translate file offsets to blocks
1058 * This function can be called by a filesystem when it is zeroing part of a
1059 * page in a DAX file. This is intended for hole-punch operations. If
1060 * you are truncating a file, the helper function dax_truncate_page() may be
1063 * We work in terms of PAGE_CACHE_SIZE here for commonality with
1064 * block_truncate_page(), but we could go down to PAGE_SIZE if the filesystem
1065 * took care of disposing of the unnecessary blocks. Even if the filesystem
1066 * block size is smaller than PAGE_SIZE, we have to zero the rest of the page
1067 * since the file might be mmapped.
1069 int dax_zero_page_range(struct inode
*inode
, loff_t from
, unsigned length
,
1070 get_block_t get_block
)
1072 struct buffer_head bh
;
1073 pgoff_t index
= from
>> PAGE_CACHE_SHIFT
;
1074 unsigned offset
= from
& (PAGE_CACHE_SIZE
-1);
1077 /* Block boundary? Nothing to do */
1080 BUG_ON((offset
+ length
) > PAGE_CACHE_SIZE
);
1082 memset(&bh
, 0, sizeof(bh
));
1083 bh
.b_bdev
= inode
->i_sb
->s_bdev
;
1084 bh
.b_size
= PAGE_CACHE_SIZE
;
1085 err
= get_block(inode
, index
, &bh
, 0);
1088 if (buffer_written(&bh
)) {
1089 struct block_device
*bdev
= bh
.b_bdev
;
1090 struct blk_dax_ctl dax
= {
1091 .sector
= to_sector(&bh
, inode
),
1092 .size
= PAGE_CACHE_SIZE
,
1095 if (dax_map_atomic(bdev
, &dax
) < 0)
1096 return PTR_ERR(dax
.addr
);
1097 clear_pmem(dax
.addr
+ offset
, length
);
1099 dax_unmap_atomic(bdev
, &dax
);
1104 EXPORT_SYMBOL_GPL(dax_zero_page_range
);
1107 * dax_truncate_page - handle a partial page being truncated in a DAX file
1108 * @inode: The file being truncated
1109 * @from: The file offset that is being truncated to
1110 * @get_block: The filesystem method used to translate file offsets to blocks
1112 * Similar to block_truncate_page(), this function can be called by a
1113 * filesystem when it is truncating a DAX file to handle the partial page.
1115 * We work in terms of PAGE_CACHE_SIZE here for commonality with
1116 * block_truncate_page(), but we could go down to PAGE_SIZE if the filesystem
1117 * took care of disposing of the unnecessary blocks. Even if the filesystem
1118 * block size is smaller than PAGE_SIZE, we have to zero the rest of the page
1119 * since the file might be mmapped.
1121 int dax_truncate_page(struct inode
*inode
, loff_t from
, get_block_t get_block
)
1123 unsigned length
= PAGE_CACHE_ALIGN(from
) - from
;
1124 return dax_zero_page_range(inode
, from
, length
, get_block
);
1126 EXPORT_SYMBOL_GPL(dax_truncate_page
);