2 * fs/dax.c - Direct Access filesystem code
3 * Copyright (c) 2013-2014 Intel Corporation
4 * Author: Matthew Wilcox <matthew.r.wilcox@intel.com>
5 * Author: Ross Zwisler <ross.zwisler@linux.intel.com>
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 #include <linux/atomic.h>
18 #include <linux/blkdev.h>
19 #include <linux/buffer_head.h>
20 #include <linux/dax.h>
22 #include <linux/genhd.h>
23 #include <linux/highmem.h>
24 #include <linux/memcontrol.h>
26 #include <linux/mutex.h>
27 #include <linux/pagevec.h>
28 #include <linux/pmem.h>
29 #include <linux/sched.h>
30 #include <linux/uio.h>
31 #include <linux/vmstat.h>
32 #include <linux/pfn_t.h>
33 #include <linux/sizes.h>
34 #include <linux/iomap.h>
38 * We use lowest available bit in exceptional entry for locking, other two
39 * bits to determine entry type. In total 3 special bits.
41 #define RADIX_DAX_SHIFT (RADIX_TREE_EXCEPTIONAL_SHIFT + 3)
42 #define RADIX_DAX_PTE (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 1))
43 #define RADIX_DAX_PMD (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 2))
44 #define RADIX_DAX_TYPE_MASK (RADIX_DAX_PTE | RADIX_DAX_PMD)
45 #define RADIX_DAX_TYPE(entry) ((unsigned long)entry & RADIX_DAX_TYPE_MASK)
46 #define RADIX_DAX_SECTOR(entry) (((unsigned long)entry >> RADIX_DAX_SHIFT))
47 #define RADIX_DAX_ENTRY(sector, pmd) ((void *)((unsigned long)sector << \
48 RADIX_DAX_SHIFT | (pmd ? RADIX_DAX_PMD : RADIX_DAX_PTE) | \
49 RADIX_TREE_EXCEPTIONAL_ENTRY))
51 /* We choose 4096 entries - same as per-zone page wait tables */
52 #define DAX_WAIT_TABLE_BITS 12
53 #define DAX_WAIT_TABLE_ENTRIES (1 << DAX_WAIT_TABLE_BITS)
55 wait_queue_head_t wait_table
[DAX_WAIT_TABLE_ENTRIES
];
57 static int __init
init_dax_wait_table(void)
61 for (i
= 0; i
< DAX_WAIT_TABLE_ENTRIES
; i
++)
62 init_waitqueue_head(wait_table
+ i
);
65 fs_initcall(init_dax_wait_table
);
67 static wait_queue_head_t
*dax_entry_waitqueue(struct address_space
*mapping
,
70 unsigned long hash
= hash_long((unsigned long)mapping
^ index
,
72 return wait_table
+ hash
;
75 static long dax_map_atomic(struct block_device
*bdev
, struct blk_dax_ctl
*dax
)
77 struct request_queue
*q
= bdev
->bd_queue
;
80 dax
->addr
= ERR_PTR(-EIO
);
81 if (blk_queue_enter(q
, true) != 0)
84 rc
= bdev_direct_access(bdev
, dax
);
86 dax
->addr
= ERR_PTR(rc
);
93 static void dax_unmap_atomic(struct block_device
*bdev
,
94 const struct blk_dax_ctl
*dax
)
96 if (IS_ERR(dax
->addr
))
98 blk_queue_exit(bdev
->bd_queue
);
101 struct page
*read_dax_sector(struct block_device
*bdev
, sector_t n
)
103 struct page
*page
= alloc_pages(GFP_KERNEL
, 0);
104 struct blk_dax_ctl dax
= {
106 .sector
= n
& ~((((int) PAGE_SIZE
) / 512) - 1),
111 return ERR_PTR(-ENOMEM
);
113 rc
= dax_map_atomic(bdev
, &dax
);
116 memcpy_from_pmem(page_address(page
), dax
.addr
, PAGE_SIZE
);
117 dax_unmap_atomic(bdev
, &dax
);
121 static bool buffer_written(struct buffer_head
*bh
)
123 return buffer_mapped(bh
) && !buffer_unwritten(bh
);
127 * When ext4 encounters a hole, it returns without modifying the buffer_head
128 * which means that we can't trust b_size. To cope with this, we set b_state
129 * to 0 before calling get_block and, if any bit is set, we know we can trust
130 * b_size. Unfortunate, really, since ext4 knows precisely how long a hole is
131 * and would save us time calling get_block repeatedly.
133 static bool buffer_size_valid(struct buffer_head
*bh
)
135 return bh
->b_state
!= 0;
139 static sector_t
to_sector(const struct buffer_head
*bh
,
140 const struct inode
*inode
)
142 sector_t sector
= bh
->b_blocknr
<< (inode
->i_blkbits
- 9);
147 static ssize_t
dax_io(struct inode
*inode
, struct iov_iter
*iter
,
148 loff_t start
, loff_t end
, get_block_t get_block
,
149 struct buffer_head
*bh
)
151 loff_t pos
= start
, max
= start
, bh_max
= start
;
153 struct block_device
*bdev
= NULL
;
154 int rw
= iov_iter_rw(iter
), rc
;
156 struct blk_dax_ctl dax
= {
157 .addr
= ERR_PTR(-EIO
),
159 unsigned blkbits
= inode
->i_blkbits
;
160 sector_t file_blks
= (i_size_read(inode
) + (1 << blkbits
) - 1)
164 end
= min(end
, i_size_read(inode
));
169 long page
= pos
>> PAGE_SHIFT
;
170 sector_t block
= page
<< (PAGE_SHIFT
- blkbits
);
171 unsigned first
= pos
- (block
<< blkbits
);
175 bh
->b_size
= PAGE_ALIGN(end
- pos
);
177 rc
= get_block(inode
, block
, bh
, rw
== WRITE
);
180 if (!buffer_size_valid(bh
))
181 bh
->b_size
= 1 << blkbits
;
182 bh_max
= pos
- first
+ bh
->b_size
;
185 * We allow uninitialized buffers for writes
186 * beyond EOF as those cannot race with faults
189 (buffer_new(bh
) && block
< file_blks
) ||
190 (rw
== WRITE
&& buffer_unwritten(bh
)));
192 unsigned done
= bh
->b_size
-
193 (bh_max
- (pos
- first
));
194 bh
->b_blocknr
+= done
>> blkbits
;
198 hole
= rw
== READ
&& !buffer_written(bh
);
200 size
= bh
->b_size
- first
;
202 dax_unmap_atomic(bdev
, &dax
);
203 dax
.sector
= to_sector(bh
, inode
);
204 dax
.size
= bh
->b_size
;
205 map_len
= dax_map_atomic(bdev
, &dax
);
211 size
= map_len
- first
;
214 * pos + size is one past the last offset for IO,
215 * so pos + size can overflow loff_t at extreme offsets.
216 * Cast to u64 to catch this and get the true minimum.
218 max
= min_t(u64
, pos
+ size
, end
);
221 if (iov_iter_rw(iter
) == WRITE
) {
222 len
= copy_from_iter_pmem(dax
.addr
, max
- pos
, iter
);
224 len
= copy_to_iter((void __force
*) dax
.addr
, max
- pos
,
227 len
= iov_iter_zero(max
- pos
, iter
);
235 if (!IS_ERR(dax
.addr
))
239 dax_unmap_atomic(bdev
, &dax
);
241 return (pos
== start
) ? rc
: pos
- start
;
245 * dax_do_io - Perform I/O to a DAX file
246 * @iocb: The control block for this I/O
247 * @inode: The file which the I/O is directed at
248 * @iter: The addresses to do I/O from or to
249 * @get_block: The filesystem method used to translate file offsets to blocks
250 * @end_io: A filesystem callback for I/O completion
253 * This function uses the same locking scheme as do_blockdev_direct_IO:
254 * If @flags has DIO_LOCKING set, we assume that the i_mutex is held by the
255 * caller for writes. For reads, we take and release the i_mutex ourselves.
256 * If DIO_LOCKING is not set, the filesystem takes care of its own locking.
257 * As with do_blockdev_direct_IO(), we increment i_dio_count while the I/O
260 ssize_t
dax_do_io(struct kiocb
*iocb
, struct inode
*inode
,
261 struct iov_iter
*iter
, get_block_t get_block
,
262 dio_iodone_t end_io
, int flags
)
264 struct buffer_head bh
;
265 ssize_t retval
= -EINVAL
;
266 loff_t pos
= iocb
->ki_pos
;
267 loff_t end
= pos
+ iov_iter_count(iter
);
269 memset(&bh
, 0, sizeof(bh
));
270 bh
.b_bdev
= inode
->i_sb
->s_bdev
;
272 if ((flags
& DIO_LOCKING
) && iov_iter_rw(iter
) == READ
)
275 /* Protects against truncate */
276 if (!(flags
& DIO_SKIP_DIO_COUNT
))
277 inode_dio_begin(inode
);
279 retval
= dax_io(inode
, iter
, pos
, end
, get_block
, &bh
);
281 if ((flags
& DIO_LOCKING
) && iov_iter_rw(iter
) == READ
)
287 err
= end_io(iocb
, pos
, retval
, bh
.b_private
);
292 if (!(flags
& DIO_SKIP_DIO_COUNT
))
293 inode_dio_end(inode
);
296 EXPORT_SYMBOL_GPL(dax_do_io
);
299 * DAX radix tree locking
301 struct exceptional_entry_key
{
302 struct address_space
*mapping
;
306 struct wait_exceptional_entry_queue
{
308 struct exceptional_entry_key key
;
311 static int wake_exceptional_entry_func(wait_queue_t
*wait
, unsigned int mode
,
312 int sync
, void *keyp
)
314 struct exceptional_entry_key
*key
= keyp
;
315 struct wait_exceptional_entry_queue
*ewait
=
316 container_of(wait
, struct wait_exceptional_entry_queue
, wait
);
318 if (key
->mapping
!= ewait
->key
.mapping
||
319 key
->index
!= ewait
->key
.index
)
321 return autoremove_wake_function(wait
, mode
, sync
, NULL
);
325 * Check whether the given slot is locked. The function must be called with
326 * mapping->tree_lock held
328 static inline int slot_locked(struct address_space
*mapping
, void **slot
)
330 unsigned long entry
= (unsigned long)
331 radix_tree_deref_slot_protected(slot
, &mapping
->tree_lock
);
332 return entry
& RADIX_DAX_ENTRY_LOCK
;
336 * Mark the given slot is locked. The function must be called with
337 * mapping->tree_lock held
339 static inline void *lock_slot(struct address_space
*mapping
, void **slot
)
341 unsigned long entry
= (unsigned long)
342 radix_tree_deref_slot_protected(slot
, &mapping
->tree_lock
);
344 entry
|= RADIX_DAX_ENTRY_LOCK
;
345 radix_tree_replace_slot(slot
, (void *)entry
);
346 return (void *)entry
;
350 * Mark the given slot is unlocked. The function must be called with
351 * mapping->tree_lock held
353 static inline void *unlock_slot(struct address_space
*mapping
, void **slot
)
355 unsigned long entry
= (unsigned long)
356 radix_tree_deref_slot_protected(slot
, &mapping
->tree_lock
);
358 entry
&= ~(unsigned long)RADIX_DAX_ENTRY_LOCK
;
359 radix_tree_replace_slot(slot
, (void *)entry
);
360 return (void *)entry
;
364 * Lookup entry in radix tree, wait for it to become unlocked if it is
365 * exceptional entry and return it. The caller must call
366 * put_unlocked_mapping_entry() when he decided not to lock the entry or
367 * put_locked_mapping_entry() when he locked the entry and now wants to
370 * The function must be called with mapping->tree_lock held.
372 static void *get_unlocked_mapping_entry(struct address_space
*mapping
,
373 pgoff_t index
, void ***slotp
)
376 struct wait_exceptional_entry_queue ewait
;
377 wait_queue_head_t
*wq
= dax_entry_waitqueue(mapping
, index
);
379 init_wait(&ewait
.wait
);
380 ewait
.wait
.func
= wake_exceptional_entry_func
;
381 ewait
.key
.mapping
= mapping
;
382 ewait
.key
.index
= index
;
385 ret
= __radix_tree_lookup(&mapping
->page_tree
, index
, NULL
,
387 if (!ret
|| !radix_tree_exceptional_entry(ret
) ||
388 !slot_locked(mapping
, slot
)) {
393 prepare_to_wait_exclusive(wq
, &ewait
.wait
,
394 TASK_UNINTERRUPTIBLE
);
395 spin_unlock_irq(&mapping
->tree_lock
);
397 finish_wait(wq
, &ewait
.wait
);
398 spin_lock_irq(&mapping
->tree_lock
);
403 * Find radix tree entry at given index. If it points to a page, return with
404 * the page locked. If it points to the exceptional entry, return with the
405 * radix tree entry locked. If the radix tree doesn't contain given index,
406 * create empty exceptional entry for the index and return with it locked.
408 * Note: Unlike filemap_fault() we don't honor FAULT_FLAG_RETRY flags. For
409 * persistent memory the benefit is doubtful. We can add that later if we can
412 static void *grab_mapping_entry(struct address_space
*mapping
, pgoff_t index
)
417 spin_lock_irq(&mapping
->tree_lock
);
418 ret
= get_unlocked_mapping_entry(mapping
, index
, &slot
);
419 /* No entry for given index? Make sure radix tree is big enough. */
423 spin_unlock_irq(&mapping
->tree_lock
);
424 err
= radix_tree_preload(
425 mapping_gfp_mask(mapping
) & ~__GFP_HIGHMEM
);
428 ret
= (void *)(RADIX_TREE_EXCEPTIONAL_ENTRY
|
429 RADIX_DAX_ENTRY_LOCK
);
430 spin_lock_irq(&mapping
->tree_lock
);
431 err
= radix_tree_insert(&mapping
->page_tree
, index
, ret
);
432 radix_tree_preload_end();
434 spin_unlock_irq(&mapping
->tree_lock
);
435 /* Someone already created the entry? */
440 /* Good, we have inserted empty locked entry into the tree. */
441 mapping
->nrexceptional
++;
442 spin_unlock_irq(&mapping
->tree_lock
);
445 /* Normal page in radix tree? */
446 if (!radix_tree_exceptional_entry(ret
)) {
447 struct page
*page
= ret
;
450 spin_unlock_irq(&mapping
->tree_lock
);
452 /* Page got truncated? Retry... */
453 if (unlikely(page
->mapping
!= mapping
)) {
460 ret
= lock_slot(mapping
, slot
);
461 spin_unlock_irq(&mapping
->tree_lock
);
465 void dax_wake_mapping_entry_waiter(struct address_space
*mapping
,
466 pgoff_t index
, bool wake_all
)
468 wait_queue_head_t
*wq
= dax_entry_waitqueue(mapping
, index
);
471 * Checking for locked entry and prepare_to_wait_exclusive() happens
472 * under mapping->tree_lock, ditto for entry handling in our callers.
473 * So at this point all tasks that could have seen our entry locked
474 * must be in the waitqueue and the following check will see them.
476 if (waitqueue_active(wq
)) {
477 struct exceptional_entry_key key
;
479 key
.mapping
= mapping
;
481 __wake_up(wq
, TASK_NORMAL
, wake_all
? 0 : 1, &key
);
485 void dax_unlock_mapping_entry(struct address_space
*mapping
, pgoff_t index
)
489 spin_lock_irq(&mapping
->tree_lock
);
490 ret
= __radix_tree_lookup(&mapping
->page_tree
, index
, NULL
, &slot
);
491 if (WARN_ON_ONCE(!ret
|| !radix_tree_exceptional_entry(ret
) ||
492 !slot_locked(mapping
, slot
))) {
493 spin_unlock_irq(&mapping
->tree_lock
);
496 unlock_slot(mapping
, slot
);
497 spin_unlock_irq(&mapping
->tree_lock
);
498 dax_wake_mapping_entry_waiter(mapping
, index
, false);
501 static void put_locked_mapping_entry(struct address_space
*mapping
,
502 pgoff_t index
, void *entry
)
504 if (!radix_tree_exceptional_entry(entry
)) {
508 dax_unlock_mapping_entry(mapping
, index
);
513 * Called when we are done with radix tree entry we looked up via
514 * get_unlocked_mapping_entry() and which we didn't lock in the end.
516 static void put_unlocked_mapping_entry(struct address_space
*mapping
,
517 pgoff_t index
, void *entry
)
519 if (!radix_tree_exceptional_entry(entry
))
522 /* We have to wake up next waiter for the radix tree entry lock */
523 dax_wake_mapping_entry_waiter(mapping
, index
, false);
527 * Delete exceptional DAX entry at @index from @mapping. Wait for radix tree
528 * entry to get unlocked before deleting it.
530 int dax_delete_mapping_entry(struct address_space
*mapping
, pgoff_t index
)
534 spin_lock_irq(&mapping
->tree_lock
);
535 entry
= get_unlocked_mapping_entry(mapping
, index
, NULL
);
537 * This gets called from truncate / punch_hole path. As such, the caller
538 * must hold locks protecting against concurrent modifications of the
539 * radix tree (usually fs-private i_mmap_sem for writing). Since the
540 * caller has seen exceptional entry for this index, we better find it
541 * at that index as well...
543 if (WARN_ON_ONCE(!entry
|| !radix_tree_exceptional_entry(entry
))) {
544 spin_unlock_irq(&mapping
->tree_lock
);
547 radix_tree_delete(&mapping
->page_tree
, index
);
548 mapping
->nrexceptional
--;
549 spin_unlock_irq(&mapping
->tree_lock
);
550 dax_wake_mapping_entry_waiter(mapping
, index
, true);
556 * The user has performed a load from a hole in the file. Allocating
557 * a new page in the file would cause excessive storage usage for
558 * workloads with sparse files. We allocate a page cache page instead.
559 * We'll kick it out of the page cache if it's ever written to,
560 * otherwise it will simply fall out of the page cache under memory
561 * pressure without ever having been dirtied.
563 static int dax_load_hole(struct address_space
*mapping
, void *entry
,
564 struct vm_fault
*vmf
)
568 /* Hole page already exists? Return it... */
569 if (!radix_tree_exceptional_entry(entry
)) {
571 return VM_FAULT_LOCKED
;
574 /* This will replace locked radix tree entry with a hole page */
575 page
= find_or_create_page(mapping
, vmf
->pgoff
,
576 vmf
->gfp_mask
| __GFP_ZERO
);
578 put_locked_mapping_entry(mapping
, vmf
->pgoff
, entry
);
582 return VM_FAULT_LOCKED
;
585 static int copy_user_dax(struct block_device
*bdev
, sector_t sector
, size_t size
,
586 struct page
*to
, unsigned long vaddr
)
588 struct blk_dax_ctl dax
= {
594 if (dax_map_atomic(bdev
, &dax
) < 0)
595 return PTR_ERR(dax
.addr
);
596 vto
= kmap_atomic(to
);
597 copy_user_page(vto
, (void __force
*)dax
.addr
, vaddr
, to
);
599 dax_unmap_atomic(bdev
, &dax
);
603 #define DAX_PMD_INDEX(page_index) (page_index & (PMD_MASK >> PAGE_SHIFT))
605 static void *dax_insert_mapping_entry(struct address_space
*mapping
,
606 struct vm_fault
*vmf
,
607 void *entry
, sector_t sector
)
609 struct radix_tree_root
*page_tree
= &mapping
->page_tree
;
611 bool hole_fill
= false;
613 pgoff_t index
= vmf
->pgoff
;
615 if (vmf
->flags
& FAULT_FLAG_WRITE
)
616 __mark_inode_dirty(mapping
->host
, I_DIRTY_PAGES
);
618 /* Replacing hole page with block mapping? */
619 if (!radix_tree_exceptional_entry(entry
)) {
622 * Unmap the page now before we remove it from page cache below.
623 * The page is locked so it cannot be faulted in again.
625 unmap_mapping_range(mapping
, vmf
->pgoff
<< PAGE_SHIFT
,
627 error
= radix_tree_preload(vmf
->gfp_mask
& ~__GFP_HIGHMEM
);
629 return ERR_PTR(error
);
632 spin_lock_irq(&mapping
->tree_lock
);
633 new_entry
= (void *)((unsigned long)RADIX_DAX_ENTRY(sector
, false) |
634 RADIX_DAX_ENTRY_LOCK
);
636 __delete_from_page_cache(entry
, NULL
);
637 /* Drop pagecache reference */
639 error
= radix_tree_insert(page_tree
, index
, new_entry
);
641 new_entry
= ERR_PTR(error
);
644 mapping
->nrexceptional
++;
649 ret
= __radix_tree_lookup(page_tree
, index
, NULL
, &slot
);
650 WARN_ON_ONCE(ret
!= entry
);
651 radix_tree_replace_slot(slot
, new_entry
);
653 if (vmf
->flags
& FAULT_FLAG_WRITE
)
654 radix_tree_tag_set(page_tree
, index
, PAGECACHE_TAG_DIRTY
);
656 spin_unlock_irq(&mapping
->tree_lock
);
658 radix_tree_preload_end();
660 * We don't need hole page anymore, it has been replaced with
661 * locked radix tree entry now.
663 if (mapping
->a_ops
->freepage
)
664 mapping
->a_ops
->freepage(entry
);
671 static int dax_writeback_one(struct block_device
*bdev
,
672 struct address_space
*mapping
, pgoff_t index
, void *entry
)
674 struct radix_tree_root
*page_tree
= &mapping
->page_tree
;
675 int type
= RADIX_DAX_TYPE(entry
);
676 struct radix_tree_node
*node
;
677 struct blk_dax_ctl dax
;
681 spin_lock_irq(&mapping
->tree_lock
);
683 * Regular page slots are stabilized by the page lock even
684 * without the tree itself locked. These unlocked entries
685 * need verification under the tree lock.
687 if (!__radix_tree_lookup(page_tree
, index
, &node
, &slot
))
692 /* another fsync thread may have already written back this entry */
693 if (!radix_tree_tag_get(page_tree
, index
, PAGECACHE_TAG_TOWRITE
))
696 if (WARN_ON_ONCE(type
!= RADIX_DAX_PTE
&& type
!= RADIX_DAX_PMD
)) {
701 dax
.sector
= RADIX_DAX_SECTOR(entry
);
702 dax
.size
= (type
== RADIX_DAX_PMD
? PMD_SIZE
: PAGE_SIZE
);
703 spin_unlock_irq(&mapping
->tree_lock
);
706 * We cannot hold tree_lock while calling dax_map_atomic() because it
707 * eventually calls cond_resched().
709 ret
= dax_map_atomic(bdev
, &dax
);
713 if (WARN_ON_ONCE(ret
< dax
.size
)) {
718 wb_cache_pmem(dax
.addr
, dax
.size
);
720 spin_lock_irq(&mapping
->tree_lock
);
721 radix_tree_tag_clear(page_tree
, index
, PAGECACHE_TAG_TOWRITE
);
722 spin_unlock_irq(&mapping
->tree_lock
);
724 dax_unmap_atomic(bdev
, &dax
);
728 spin_unlock_irq(&mapping
->tree_lock
);
733 * Flush the mapping to the persistent domain within the byte range of [start,
734 * end]. This is required by data integrity operations to ensure file data is
735 * on persistent storage prior to completion of the operation.
737 int dax_writeback_mapping_range(struct address_space
*mapping
,
738 struct block_device
*bdev
, struct writeback_control
*wbc
)
740 struct inode
*inode
= mapping
->host
;
741 pgoff_t start_index
, end_index
, pmd_index
;
742 pgoff_t indices
[PAGEVEC_SIZE
];
748 if (WARN_ON_ONCE(inode
->i_blkbits
!= PAGE_SHIFT
))
751 if (!mapping
->nrexceptional
|| wbc
->sync_mode
!= WB_SYNC_ALL
)
754 start_index
= wbc
->range_start
>> PAGE_SHIFT
;
755 end_index
= wbc
->range_end
>> PAGE_SHIFT
;
756 pmd_index
= DAX_PMD_INDEX(start_index
);
759 entry
= radix_tree_lookup(&mapping
->page_tree
, pmd_index
);
762 /* see if the start of our range is covered by a PMD entry */
763 if (entry
&& RADIX_DAX_TYPE(entry
) == RADIX_DAX_PMD
)
764 start_index
= pmd_index
;
766 tag_pages_for_writeback(mapping
, start_index
, end_index
);
768 pagevec_init(&pvec
, 0);
770 pvec
.nr
= find_get_entries_tag(mapping
, start_index
,
771 PAGECACHE_TAG_TOWRITE
, PAGEVEC_SIZE
,
772 pvec
.pages
, indices
);
777 for (i
= 0; i
< pvec
.nr
; i
++) {
778 if (indices
[i
] > end_index
) {
783 ret
= dax_writeback_one(bdev
, mapping
, indices
[i
],
788 start_index
= indices
[pvec
.nr
- 1] + 1;
792 EXPORT_SYMBOL_GPL(dax_writeback_mapping_range
);
794 static int dax_insert_mapping(struct address_space
*mapping
,
795 struct block_device
*bdev
, sector_t sector
, size_t size
,
796 void **entryp
, struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
798 unsigned long vaddr
= (unsigned long)vmf
->virtual_address
;
799 struct blk_dax_ctl dax
= {
804 void *entry
= *entryp
;
806 if (dax_map_atomic(bdev
, &dax
) < 0)
807 return PTR_ERR(dax
.addr
);
808 dax_unmap_atomic(bdev
, &dax
);
810 ret
= dax_insert_mapping_entry(mapping
, vmf
, entry
, dax
.sector
);
815 return vm_insert_mixed(vma
, vaddr
, dax
.pfn
);
819 * dax_fault - handle a page fault on a DAX file
820 * @vma: The virtual memory area where the fault occurred
821 * @vmf: The description of the fault
822 * @get_block: The filesystem method used to translate file offsets to blocks
824 * When a page fault occurs, filesystems may call this helper in their
825 * fault handler for DAX files. dax_fault() assumes the caller has done all
826 * the necessary locking for the page fault to proceed successfully.
828 int dax_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
,
829 get_block_t get_block
)
831 struct file
*file
= vma
->vm_file
;
832 struct address_space
*mapping
= file
->f_mapping
;
833 struct inode
*inode
= mapping
->host
;
835 struct buffer_head bh
;
836 unsigned long vaddr
= (unsigned long)vmf
->virtual_address
;
837 unsigned blkbits
= inode
->i_blkbits
;
844 * Check whether offset isn't beyond end of file now. Caller is supposed
845 * to hold locks serializing us with truncate / punch hole so this is
848 size
= (i_size_read(inode
) + PAGE_SIZE
- 1) >> PAGE_SHIFT
;
849 if (vmf
->pgoff
>= size
)
850 return VM_FAULT_SIGBUS
;
852 memset(&bh
, 0, sizeof(bh
));
853 block
= (sector_t
)vmf
->pgoff
<< (PAGE_SHIFT
- blkbits
);
854 bh
.b_bdev
= inode
->i_sb
->s_bdev
;
855 bh
.b_size
= PAGE_SIZE
;
857 entry
= grab_mapping_entry(mapping
, vmf
->pgoff
);
859 error
= PTR_ERR(entry
);
863 error
= get_block(inode
, block
, &bh
, 0);
864 if (!error
&& (bh
.b_size
< PAGE_SIZE
))
865 error
= -EIO
; /* fs corruption? */
870 struct page
*new_page
= vmf
->cow_page
;
871 if (buffer_written(&bh
))
872 error
= copy_user_dax(bh
.b_bdev
, to_sector(&bh
, inode
),
873 bh
.b_size
, new_page
, vaddr
);
875 clear_user_highpage(new_page
, vaddr
);
878 if (!radix_tree_exceptional_entry(entry
)) {
880 return VM_FAULT_LOCKED
;
883 return VM_FAULT_DAX_LOCKED
;
886 if (!buffer_mapped(&bh
)) {
887 if (vmf
->flags
& FAULT_FLAG_WRITE
) {
888 error
= get_block(inode
, block
, &bh
, 1);
889 count_vm_event(PGMAJFAULT
);
890 mem_cgroup_count_vm_event(vma
->vm_mm
, PGMAJFAULT
);
891 major
= VM_FAULT_MAJOR
;
892 if (!error
&& (bh
.b_size
< PAGE_SIZE
))
897 return dax_load_hole(mapping
, entry
, vmf
);
901 /* Filesystem should not return unwritten buffers to us! */
902 WARN_ON_ONCE(buffer_unwritten(&bh
) || buffer_new(&bh
));
903 error
= dax_insert_mapping(mapping
, bh
.b_bdev
, to_sector(&bh
, inode
),
904 bh
.b_size
, &entry
, vma
, vmf
);
906 put_locked_mapping_entry(mapping
, vmf
->pgoff
, entry
);
908 if (error
== -ENOMEM
)
909 return VM_FAULT_OOM
| major
;
910 /* -EBUSY is fine, somebody else faulted on the same PTE */
911 if ((error
< 0) && (error
!= -EBUSY
))
912 return VM_FAULT_SIGBUS
| major
;
913 return VM_FAULT_NOPAGE
| major
;
915 EXPORT_SYMBOL_GPL(dax_fault
);
917 #if defined(CONFIG_TRANSPARENT_HUGEPAGE)
919 * The 'colour' (ie low bits) within a PMD of a page offset. This comes up
920 * more often than one might expect in the below function.
922 #define PG_PMD_COLOUR ((PMD_SIZE >> PAGE_SHIFT) - 1)
924 static void __dax_dbg(struct buffer_head
*bh
, unsigned long address
,
925 const char *reason
, const char *fn
)
928 char bname
[BDEVNAME_SIZE
];
929 bdevname(bh
->b_bdev
, bname
);
930 pr_debug("%s: %s addr: %lx dev %s state %lx start %lld "
931 "length %zd fallback: %s\n", fn
, current
->comm
,
932 address
, bname
, bh
->b_state
, (u64
)bh
->b_blocknr
,
935 pr_debug("%s: %s addr: %lx fallback: %s\n", fn
,
936 current
->comm
, address
, reason
);
940 #define dax_pmd_dbg(bh, address, reason) __dax_dbg(bh, address, reason, "dax_pmd")
943 * dax_pmd_fault - handle a PMD fault on a DAX file
944 * @vma: The virtual memory area where the fault occurred
945 * @vmf: The description of the fault
946 * @get_block: The filesystem method used to translate file offsets to blocks
948 * When a page fault occurs, filesystems may call this helper in their
949 * pmd_fault handler for DAX files.
951 int dax_pmd_fault(struct vm_area_struct
*vma
, unsigned long address
,
952 pmd_t
*pmd
, unsigned int flags
, get_block_t get_block
)
954 struct file
*file
= vma
->vm_file
;
955 struct address_space
*mapping
= file
->f_mapping
;
956 struct inode
*inode
= mapping
->host
;
957 struct buffer_head bh
;
958 unsigned blkbits
= inode
->i_blkbits
;
959 unsigned long pmd_addr
= address
& PMD_MASK
;
960 bool write
= flags
& FAULT_FLAG_WRITE
;
961 struct block_device
*bdev
;
967 /* dax pmd mappings require pfn_t_devmap() */
968 if (!IS_ENABLED(CONFIG_FS_DAX_PMD
))
969 return VM_FAULT_FALLBACK
;
971 /* Fall back to PTEs if we're going to COW */
972 if (write
&& !(vma
->vm_flags
& VM_SHARED
)) {
973 split_huge_pmd(vma
, pmd
, address
);
974 dax_pmd_dbg(NULL
, address
, "cow write");
975 return VM_FAULT_FALLBACK
;
977 /* If the PMD would extend outside the VMA */
978 if (pmd_addr
< vma
->vm_start
) {
979 dax_pmd_dbg(NULL
, address
, "vma start unaligned");
980 return VM_FAULT_FALLBACK
;
982 if ((pmd_addr
+ PMD_SIZE
) > vma
->vm_end
) {
983 dax_pmd_dbg(NULL
, address
, "vma end unaligned");
984 return VM_FAULT_FALLBACK
;
987 pgoff
= linear_page_index(vma
, pmd_addr
);
988 size
= (i_size_read(inode
) + PAGE_SIZE
- 1) >> PAGE_SHIFT
;
990 return VM_FAULT_SIGBUS
;
991 /* If the PMD would cover blocks out of the file */
992 if ((pgoff
| PG_PMD_COLOUR
) >= size
) {
993 dax_pmd_dbg(NULL
, address
,
994 "offset + huge page size > file size");
995 return VM_FAULT_FALLBACK
;
998 memset(&bh
, 0, sizeof(bh
));
999 bh
.b_bdev
= inode
->i_sb
->s_bdev
;
1000 block
= (sector_t
)pgoff
<< (PAGE_SHIFT
- blkbits
);
1002 bh
.b_size
= PMD_SIZE
;
1004 if (get_block(inode
, block
, &bh
, 0) != 0)
1005 return VM_FAULT_SIGBUS
;
1007 if (!buffer_mapped(&bh
) && write
) {
1008 if (get_block(inode
, block
, &bh
, 1) != 0)
1009 return VM_FAULT_SIGBUS
;
1011 WARN_ON_ONCE(buffer_unwritten(&bh
) || buffer_new(&bh
));
1017 * If the filesystem isn't willing to tell us the length of a hole,
1018 * just fall back to PTEs. Calling get_block 512 times in a loop
1021 if (!buffer_size_valid(&bh
) || bh
.b_size
< PMD_SIZE
) {
1022 dax_pmd_dbg(&bh
, address
, "allocated block too small");
1023 return VM_FAULT_FALLBACK
;
1027 * If we allocated new storage, make sure no process has any
1028 * zero pages covering this hole
1031 loff_t lstart
= pgoff
<< PAGE_SHIFT
;
1032 loff_t lend
= lstart
+ PMD_SIZE
- 1; /* inclusive */
1034 truncate_pagecache_range(inode
, lstart
, lend
);
1037 if (!write
&& !buffer_mapped(&bh
)) {
1040 struct page
*zero_page
= mm_get_huge_zero_page(vma
->vm_mm
);
1042 if (unlikely(!zero_page
)) {
1043 dax_pmd_dbg(&bh
, address
, "no zero page");
1047 ptl
= pmd_lock(vma
->vm_mm
, pmd
);
1048 if (!pmd_none(*pmd
)) {
1050 dax_pmd_dbg(&bh
, address
, "pmd already present");
1054 dev_dbg(part_to_dev(bdev
->bd_part
),
1055 "%s: %s addr: %lx pfn: <zero> sect: %llx\n",
1056 __func__
, current
->comm
, address
,
1057 (unsigned long long) to_sector(&bh
, inode
));
1059 entry
= mk_pmd(zero_page
, vma
->vm_page_prot
);
1060 entry
= pmd_mkhuge(entry
);
1061 set_pmd_at(vma
->vm_mm
, pmd_addr
, pmd
, entry
);
1062 result
= VM_FAULT_NOPAGE
;
1065 struct blk_dax_ctl dax
= {
1066 .sector
= to_sector(&bh
, inode
),
1069 long length
= dax_map_atomic(bdev
, &dax
);
1072 dax_pmd_dbg(&bh
, address
, "dax-error fallback");
1075 if (length
< PMD_SIZE
) {
1076 dax_pmd_dbg(&bh
, address
, "dax-length too small");
1077 dax_unmap_atomic(bdev
, &dax
);
1080 if (pfn_t_to_pfn(dax
.pfn
) & PG_PMD_COLOUR
) {
1081 dax_pmd_dbg(&bh
, address
, "pfn unaligned");
1082 dax_unmap_atomic(bdev
, &dax
);
1086 if (!pfn_t_devmap(dax
.pfn
)) {
1087 dax_unmap_atomic(bdev
, &dax
);
1088 dax_pmd_dbg(&bh
, address
, "pfn not in memmap");
1091 dax_unmap_atomic(bdev
, &dax
);
1094 * For PTE faults we insert a radix tree entry for reads, and
1095 * leave it clean. Then on the first write we dirty the radix
1096 * tree entry via the dax_pfn_mkwrite() path. This sequence
1097 * allows the dax_pfn_mkwrite() call to be simpler and avoid a
1098 * call into get_block() to translate the pgoff to a sector in
1099 * order to be able to create a new radix tree entry.
1101 * The PMD path doesn't have an equivalent to
1102 * dax_pfn_mkwrite(), though, so for a read followed by a
1103 * write we traverse all the way through dax_pmd_fault()
1104 * twice. This means we can just skip inserting a radix tree
1105 * entry completely on the initial read and just wait until
1106 * the write to insert a dirty entry.
1110 * We should insert radix-tree entry and dirty it here.
1111 * For now this is broken...
1115 dev_dbg(part_to_dev(bdev
->bd_part
),
1116 "%s: %s addr: %lx pfn: %lx sect: %llx\n",
1117 __func__
, current
->comm
, address
,
1118 pfn_t_to_pfn(dax
.pfn
),
1119 (unsigned long long) dax
.sector
);
1120 result
|= vmf_insert_pfn_pmd(vma
, address
, pmd
,
1128 count_vm_event(THP_FAULT_FALLBACK
);
1129 result
= VM_FAULT_FALLBACK
;
1132 EXPORT_SYMBOL_GPL(dax_pmd_fault
);
1133 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1136 * dax_pfn_mkwrite - handle first write to DAX page
1137 * @vma: The virtual memory area where the fault occurred
1138 * @vmf: The description of the fault
1140 int dax_pfn_mkwrite(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
1142 struct file
*file
= vma
->vm_file
;
1143 struct address_space
*mapping
= file
->f_mapping
;
1145 pgoff_t index
= vmf
->pgoff
;
1147 spin_lock_irq(&mapping
->tree_lock
);
1148 entry
= get_unlocked_mapping_entry(mapping
, index
, NULL
);
1149 if (!entry
|| !radix_tree_exceptional_entry(entry
))
1151 radix_tree_tag_set(&mapping
->page_tree
, index
, PAGECACHE_TAG_DIRTY
);
1152 put_unlocked_mapping_entry(mapping
, index
, entry
);
1154 spin_unlock_irq(&mapping
->tree_lock
);
1155 return VM_FAULT_NOPAGE
;
1157 EXPORT_SYMBOL_GPL(dax_pfn_mkwrite
);
1159 static bool dax_range_is_aligned(struct block_device
*bdev
,
1160 unsigned int offset
, unsigned int length
)
1162 unsigned short sector_size
= bdev_logical_block_size(bdev
);
1164 if (!IS_ALIGNED(offset
, sector_size
))
1166 if (!IS_ALIGNED(length
, sector_size
))
1172 int __dax_zero_page_range(struct block_device
*bdev
, sector_t sector
,
1173 unsigned int offset
, unsigned int length
)
1175 struct blk_dax_ctl dax
= {
1180 if (dax_range_is_aligned(bdev
, offset
, length
)) {
1181 sector_t start_sector
= dax
.sector
+ (offset
>> 9);
1183 return blkdev_issue_zeroout(bdev
, start_sector
,
1184 length
>> 9, GFP_NOFS
, true);
1186 if (dax_map_atomic(bdev
, &dax
) < 0)
1187 return PTR_ERR(dax
.addr
);
1188 clear_pmem(dax
.addr
+ offset
, length
);
1189 dax_unmap_atomic(bdev
, &dax
);
1193 EXPORT_SYMBOL_GPL(__dax_zero_page_range
);
1196 * dax_zero_page_range - zero a range within a page of a DAX file
1197 * @inode: The file being truncated
1198 * @from: The file offset that is being truncated to
1199 * @length: The number of bytes to zero
1200 * @get_block: The filesystem method used to translate file offsets to blocks
1202 * This function can be called by a filesystem when it is zeroing part of a
1203 * page in a DAX file. This is intended for hole-punch operations. If
1204 * you are truncating a file, the helper function dax_truncate_page() may be
1207 int dax_zero_page_range(struct inode
*inode
, loff_t from
, unsigned length
,
1208 get_block_t get_block
)
1210 struct buffer_head bh
;
1211 pgoff_t index
= from
>> PAGE_SHIFT
;
1212 unsigned offset
= from
& (PAGE_SIZE
-1);
1215 /* Block boundary? Nothing to do */
1218 BUG_ON((offset
+ length
) > PAGE_SIZE
);
1220 memset(&bh
, 0, sizeof(bh
));
1221 bh
.b_bdev
= inode
->i_sb
->s_bdev
;
1222 bh
.b_size
= PAGE_SIZE
;
1223 err
= get_block(inode
, index
, &bh
, 0);
1224 if (err
< 0 || !buffer_written(&bh
))
1227 return __dax_zero_page_range(bh
.b_bdev
, to_sector(&bh
, inode
),
1230 EXPORT_SYMBOL_GPL(dax_zero_page_range
);
1233 * dax_truncate_page - handle a partial page being truncated in a DAX file
1234 * @inode: The file being truncated
1235 * @from: The file offset that is being truncated to
1236 * @get_block: The filesystem method used to translate file offsets to blocks
1238 * Similar to block_truncate_page(), this function can be called by a
1239 * filesystem when it is truncating a DAX file to handle the partial page.
1241 int dax_truncate_page(struct inode
*inode
, loff_t from
, get_block_t get_block
)
1243 unsigned length
= PAGE_ALIGN(from
) - from
;
1244 return dax_zero_page_range(inode
, from
, length
, get_block
);
1246 EXPORT_SYMBOL_GPL(dax_truncate_page
);
1248 #ifdef CONFIG_FS_IOMAP
1250 iomap_dax_actor(struct inode
*inode
, loff_t pos
, loff_t length
, void *data
,
1251 struct iomap
*iomap
)
1253 struct iov_iter
*iter
= data
;
1254 loff_t end
= pos
+ length
, done
= 0;
1257 if (iov_iter_rw(iter
) == READ
) {
1258 end
= min(end
, i_size_read(inode
));
1262 if (iomap
->type
== IOMAP_HOLE
|| iomap
->type
== IOMAP_UNWRITTEN
)
1263 return iov_iter_zero(min(length
, end
- pos
), iter
);
1266 if (WARN_ON_ONCE(iomap
->type
!= IOMAP_MAPPED
))
1270 * Write can allocate block for an area which has a hole page mapped
1271 * into page tables. We have to tear down these mappings so that data
1272 * written by write(2) is visible in mmap.
1274 if ((iomap
->flags
& IOMAP_F_NEW
) && inode
->i_mapping
->nrpages
) {
1275 invalidate_inode_pages2_range(inode
->i_mapping
,
1277 (end
- 1) >> PAGE_SHIFT
);
1281 unsigned offset
= pos
& (PAGE_SIZE
- 1);
1282 struct blk_dax_ctl dax
= { 0 };
1285 if (fatal_signal_pending(current
)) {
1290 dax
.sector
= iomap
->blkno
+
1291 (((pos
& PAGE_MASK
) - iomap
->offset
) >> 9);
1292 dax
.size
= (length
+ offset
+ PAGE_SIZE
- 1) & PAGE_MASK
;
1293 map_len
= dax_map_atomic(iomap
->bdev
, &dax
);
1301 if (map_len
> end
- pos
)
1302 map_len
= end
- pos
;
1304 if (iov_iter_rw(iter
) == WRITE
)
1305 map_len
= copy_from_iter_pmem(dax
.addr
, map_len
, iter
);
1307 map_len
= copy_to_iter(dax
.addr
, map_len
, iter
);
1308 dax_unmap_atomic(iomap
->bdev
, &dax
);
1310 ret
= map_len
? map_len
: -EFAULT
;
1319 return done
? done
: ret
;
1323 * iomap_dax_rw - Perform I/O to a DAX file
1324 * @iocb: The control block for this I/O
1325 * @iter: The addresses to do I/O from or to
1326 * @ops: iomap ops passed from the file system
1328 * This function performs read and write operations to directly mapped
1329 * persistent memory. The callers needs to take care of read/write exclusion
1330 * and evicting any page cache pages in the region under I/O.
1333 iomap_dax_rw(struct kiocb
*iocb
, struct iov_iter
*iter
,
1334 struct iomap_ops
*ops
)
1336 struct address_space
*mapping
= iocb
->ki_filp
->f_mapping
;
1337 struct inode
*inode
= mapping
->host
;
1338 loff_t pos
= iocb
->ki_pos
, ret
= 0, done
= 0;
1341 if (iov_iter_rw(iter
) == WRITE
)
1342 flags
|= IOMAP_WRITE
;
1344 while (iov_iter_count(iter
)) {
1345 ret
= iomap_apply(inode
, pos
, iov_iter_count(iter
), flags
, ops
,
1346 iter
, iomap_dax_actor
);
1353 iocb
->ki_pos
+= done
;
1354 return done
? done
: ret
;
1356 EXPORT_SYMBOL_GPL(iomap_dax_rw
);
1359 * iomap_dax_fault - handle a page fault on a DAX file
1360 * @vma: The virtual memory area where the fault occurred
1361 * @vmf: The description of the fault
1362 * @ops: iomap ops passed from the file system
1364 * When a page fault occurs, filesystems may call this helper in their fault
1365 * or mkwrite handler for DAX files. Assumes the caller has done all the
1366 * necessary locking for the page fault to proceed successfully.
1368 int iomap_dax_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
,
1369 struct iomap_ops
*ops
)
1371 struct address_space
*mapping
= vma
->vm_file
->f_mapping
;
1372 struct inode
*inode
= mapping
->host
;
1373 unsigned long vaddr
= (unsigned long)vmf
->virtual_address
;
1374 loff_t pos
= (loff_t
)vmf
->pgoff
<< PAGE_SHIFT
;
1376 struct iomap iomap
= { 0 };
1378 int error
, major
= 0;
1382 * Check whether offset isn't beyond end of file now. Caller is supposed
1383 * to hold locks serializing us with truncate / punch hole so this is
1386 if (pos
>= i_size_read(inode
))
1387 return VM_FAULT_SIGBUS
;
1389 entry
= grab_mapping_entry(mapping
, vmf
->pgoff
);
1390 if (IS_ERR(entry
)) {
1391 error
= PTR_ERR(entry
);
1395 if ((vmf
->flags
& FAULT_FLAG_WRITE
) && !vmf
->cow_page
)
1396 flags
|= IOMAP_WRITE
;
1399 * Note that we don't bother to use iomap_apply here: DAX required
1400 * the file system block size to be equal the page size, which means
1401 * that we never have to deal with more than a single extent here.
1403 error
= ops
->iomap_begin(inode
, pos
, PAGE_SIZE
, flags
, &iomap
);
1406 if (WARN_ON_ONCE(iomap
.offset
+ iomap
.length
< pos
+ PAGE_SIZE
)) {
1407 error
= -EIO
; /* fs corruption? */
1411 sector
= iomap
.blkno
+ (((pos
& PAGE_MASK
) - iomap
.offset
) >> 9);
1413 if (vmf
->cow_page
) {
1414 switch (iomap
.type
) {
1416 case IOMAP_UNWRITTEN
:
1417 clear_user_highpage(vmf
->cow_page
, vaddr
);
1420 error
= copy_user_dax(iomap
.bdev
, sector
, PAGE_SIZE
,
1421 vmf
->cow_page
, vaddr
);
1431 if (!radix_tree_exceptional_entry(entry
)) {
1433 return VM_FAULT_LOCKED
;
1436 return VM_FAULT_DAX_LOCKED
;
1439 switch (iomap
.type
) {
1441 if (iomap
.flags
& IOMAP_F_NEW
) {
1442 count_vm_event(PGMAJFAULT
);
1443 mem_cgroup_count_vm_event(vma
->vm_mm
, PGMAJFAULT
);
1444 major
= VM_FAULT_MAJOR
;
1446 error
= dax_insert_mapping(mapping
, iomap
.bdev
, sector
,
1447 PAGE_SIZE
, &entry
, vma
, vmf
);
1449 case IOMAP_UNWRITTEN
:
1451 if (!(vmf
->flags
& FAULT_FLAG_WRITE
))
1452 return dax_load_hole(mapping
, entry
, vmf
);
1461 put_locked_mapping_entry(mapping
, vmf
->pgoff
, entry
);
1463 if (error
== -ENOMEM
)
1464 return VM_FAULT_OOM
| major
;
1465 /* -EBUSY is fine, somebody else faulted on the same PTE */
1466 if (error
< 0 && error
!= -EBUSY
)
1467 return VM_FAULT_SIGBUS
| major
;
1468 return VM_FAULT_NOPAGE
| major
;
1470 EXPORT_SYMBOL_GPL(iomap_dax_fault
);
1471 #endif /* CONFIG_FS_IOMAP */