1 // SPDX-License-Identifier: GPL-2.0-only
3 * fs/dax.c - Direct Access filesystem code
4 * Copyright (c) 2013-2014 Intel Corporation
5 * Author: Matthew Wilcox <matthew.r.wilcox@intel.com>
6 * Author: Ross Zwisler <ross.zwisler@linux.intel.com>
9 #include <linux/atomic.h>
10 #include <linux/blkdev.h>
11 #include <linux/buffer_head.h>
12 #include <linux/dax.h>
14 #include <linux/highmem.h>
15 #include <linux/memcontrol.h>
17 #include <linux/mutex.h>
18 #include <linux/pagevec.h>
19 #include <linux/sched.h>
20 #include <linux/sched/signal.h>
21 #include <linux/uio.h>
22 #include <linux/vmstat.h>
23 #include <linux/pfn_t.h>
24 #include <linux/sizes.h>
25 #include <linux/mmu_notifier.h>
26 #include <linux/iomap.h>
27 #include <linux/rmap.h>
28 #include <asm/pgalloc.h>
30 #define CREATE_TRACE_POINTS
31 #include <trace/events/fs_dax.h>
33 /* We choose 4096 entries - same as per-zone page wait tables */
34 #define DAX_WAIT_TABLE_BITS 12
35 #define DAX_WAIT_TABLE_ENTRIES (1 << DAX_WAIT_TABLE_BITS)
37 /* The 'colour' (ie low bits) within a PMD of a page offset. */
38 #define PG_PMD_COLOUR ((PMD_SIZE >> PAGE_SHIFT) - 1)
39 #define PG_PMD_NR (PMD_SIZE >> PAGE_SHIFT)
41 static wait_queue_head_t wait_table
[DAX_WAIT_TABLE_ENTRIES
];
43 static int __init
init_dax_wait_table(void)
47 for (i
= 0; i
< DAX_WAIT_TABLE_ENTRIES
; i
++)
48 init_waitqueue_head(wait_table
+ i
);
51 fs_initcall(init_dax_wait_table
);
54 * DAX pagecache entries use XArray value entries so they can't be mistaken
55 * for pages. We use one bit for locking, one bit for the entry size (PMD)
56 * and two more to tell us if the entry is a zero page or an empty entry that
57 * is just used for locking. In total four special bits.
59 * If the PMD bit isn't set the entry has size PAGE_SIZE, and if the ZERO_PAGE
60 * and EMPTY bits aren't set the entry is a normal DAX entry with a filesystem
64 #define DAX_LOCKED (1UL << 0)
65 #define DAX_PMD (1UL << 1)
66 #define DAX_ZERO_PAGE (1UL << 2)
67 #define DAX_EMPTY (1UL << 3)
69 static unsigned long dax_to_pfn(void *entry
)
71 return xa_to_value(entry
) >> DAX_SHIFT
;
74 static void *dax_make_entry(pfn_t pfn
, unsigned long flags
)
76 return xa_mk_value(flags
| (pfn_t_to_pfn(pfn
) << DAX_SHIFT
));
79 static bool dax_is_locked(void *entry
)
81 return xa_to_value(entry
) & DAX_LOCKED
;
84 static unsigned int dax_entry_order(void *entry
)
86 if (xa_to_value(entry
) & DAX_PMD
)
91 static unsigned long dax_is_pmd_entry(void *entry
)
93 return xa_to_value(entry
) & DAX_PMD
;
96 static bool dax_is_pte_entry(void *entry
)
98 return !(xa_to_value(entry
) & DAX_PMD
);
101 static int dax_is_zero_entry(void *entry
)
103 return xa_to_value(entry
) & DAX_ZERO_PAGE
;
106 static int dax_is_empty_entry(void *entry
)
108 return xa_to_value(entry
) & DAX_EMPTY
;
112 * true if the entry that was found is of a smaller order than the entry
113 * we were looking for
115 static bool dax_is_conflict(void *entry
)
117 return entry
== XA_RETRY_ENTRY
;
121 * DAX page cache entry locking
123 struct exceptional_entry_key
{
128 struct wait_exceptional_entry_queue
{
129 wait_queue_entry_t wait
;
130 struct exceptional_entry_key key
;
134 * enum dax_wake_mode: waitqueue wakeup behaviour
135 * @WAKE_ALL: wake all waiters in the waitqueue
136 * @WAKE_NEXT: wake only the first waiter in the waitqueue
143 static wait_queue_head_t
*dax_entry_waitqueue(struct xa_state
*xas
,
144 void *entry
, struct exceptional_entry_key
*key
)
147 unsigned long index
= xas
->xa_index
;
150 * If 'entry' is a PMD, align the 'index' that we use for the wait
151 * queue to the start of that PMD. This ensures that all offsets in
152 * the range covered by the PMD map to the same bit lock.
154 if (dax_is_pmd_entry(entry
))
155 index
&= ~PG_PMD_COLOUR
;
157 key
->entry_start
= index
;
159 hash
= hash_long((unsigned long)xas
->xa
^ index
, DAX_WAIT_TABLE_BITS
);
160 return wait_table
+ hash
;
163 static int wake_exceptional_entry_func(wait_queue_entry_t
*wait
,
164 unsigned int mode
, int sync
, void *keyp
)
166 struct exceptional_entry_key
*key
= keyp
;
167 struct wait_exceptional_entry_queue
*ewait
=
168 container_of(wait
, struct wait_exceptional_entry_queue
, wait
);
170 if (key
->xa
!= ewait
->key
.xa
||
171 key
->entry_start
!= ewait
->key
.entry_start
)
173 return autoremove_wake_function(wait
, mode
, sync
, NULL
);
177 * @entry may no longer be the entry at the index in the mapping.
178 * The important information it's conveying is whether the entry at
179 * this index used to be a PMD entry.
181 static void dax_wake_entry(struct xa_state
*xas
, void *entry
,
182 enum dax_wake_mode mode
)
184 struct exceptional_entry_key key
;
185 wait_queue_head_t
*wq
;
187 wq
= dax_entry_waitqueue(xas
, entry
, &key
);
190 * Checking for locked entry and prepare_to_wait_exclusive() happens
191 * under the i_pages lock, ditto for entry handling in our callers.
192 * So at this point all tasks that could have seen our entry locked
193 * must be in the waitqueue and the following check will see them.
195 if (waitqueue_active(wq
))
196 __wake_up(wq
, TASK_NORMAL
, mode
== WAKE_ALL
? 0 : 1, &key
);
200 * Look up entry in page cache, wait for it to become unlocked if it
201 * is a DAX entry and return it. The caller must subsequently call
202 * put_unlocked_entry() if it did not lock the entry or dax_unlock_entry()
203 * if it did. The entry returned may have a larger order than @order.
204 * If @order is larger than the order of the entry found in i_pages, this
205 * function returns a dax_is_conflict entry.
207 * Must be called with the i_pages lock held.
209 static void *get_unlocked_entry(struct xa_state
*xas
, unsigned int order
)
212 struct wait_exceptional_entry_queue ewait
;
213 wait_queue_head_t
*wq
;
215 init_wait(&ewait
.wait
);
216 ewait
.wait
.func
= wake_exceptional_entry_func
;
219 entry
= xas_find_conflict(xas
);
220 if (!entry
|| WARN_ON_ONCE(!xa_is_value(entry
)))
222 if (dax_entry_order(entry
) < order
)
223 return XA_RETRY_ENTRY
;
224 if (!dax_is_locked(entry
))
227 wq
= dax_entry_waitqueue(xas
, entry
, &ewait
.key
);
228 prepare_to_wait_exclusive(wq
, &ewait
.wait
,
229 TASK_UNINTERRUPTIBLE
);
233 finish_wait(wq
, &ewait
.wait
);
239 * The only thing keeping the address space around is the i_pages lock
240 * (it's cycled in clear_inode() after removing the entries from i_pages)
241 * After we call xas_unlock_irq(), we cannot touch xas->xa.
243 static void wait_entry_unlocked(struct xa_state
*xas
, void *entry
)
245 struct wait_exceptional_entry_queue ewait
;
246 wait_queue_head_t
*wq
;
248 init_wait(&ewait
.wait
);
249 ewait
.wait
.func
= wake_exceptional_entry_func
;
251 wq
= dax_entry_waitqueue(xas
, entry
, &ewait
.key
);
253 * Unlike get_unlocked_entry() there is no guarantee that this
254 * path ever successfully retrieves an unlocked entry before an
255 * inode dies. Perform a non-exclusive wait in case this path
256 * never successfully performs its own wake up.
258 prepare_to_wait(wq
, &ewait
.wait
, TASK_UNINTERRUPTIBLE
);
261 finish_wait(wq
, &ewait
.wait
);
264 static void put_unlocked_entry(struct xa_state
*xas
, void *entry
,
265 enum dax_wake_mode mode
)
267 if (entry
&& !dax_is_conflict(entry
))
268 dax_wake_entry(xas
, entry
, mode
);
272 * We used the xa_state to get the entry, but then we locked the entry and
273 * dropped the xa_lock, so we know the xa_state is stale and must be reset
276 static void dax_unlock_entry(struct xa_state
*xas
, void *entry
)
280 BUG_ON(dax_is_locked(entry
));
283 old
= xas_store(xas
, entry
);
285 BUG_ON(!dax_is_locked(old
));
286 dax_wake_entry(xas
, entry
, WAKE_NEXT
);
290 * Return: The entry stored at this location before it was locked.
292 static void *dax_lock_entry(struct xa_state
*xas
, void *entry
)
294 unsigned long v
= xa_to_value(entry
);
295 return xas_store(xas
, xa_mk_value(v
| DAX_LOCKED
));
298 static unsigned long dax_entry_size(void *entry
)
300 if (dax_is_zero_entry(entry
))
302 else if (dax_is_empty_entry(entry
))
304 else if (dax_is_pmd_entry(entry
))
310 static unsigned long dax_end_pfn(void *entry
)
312 return dax_to_pfn(entry
) + dax_entry_size(entry
) / PAGE_SIZE
;
316 * Iterate through all mapped pfns represented by an entry, i.e. skip
317 * 'empty' and 'zero' entries.
319 #define for_each_mapped_pfn(entry, pfn) \
320 for (pfn = dax_to_pfn(entry); \
321 pfn < dax_end_pfn(entry); pfn++)
323 static inline bool dax_page_is_shared(struct page
*page
)
325 return page
->mapping
== PAGE_MAPPING_DAX_SHARED
;
329 * Set the page->mapping with PAGE_MAPPING_DAX_SHARED flag, increase the
332 static inline void dax_page_share_get(struct page
*page
)
334 if (page
->mapping
!= PAGE_MAPPING_DAX_SHARED
) {
336 * Reset the index if the page was already mapped
341 page
->mapping
= PAGE_MAPPING_DAX_SHARED
;
346 static inline unsigned long dax_page_share_put(struct page
*page
)
348 return --page
->share
;
352 * When it is called in dax_insert_entry(), the shared flag will indicate that
353 * whether this entry is shared by multiple files. If so, set the page->mapping
354 * PAGE_MAPPING_DAX_SHARED, and use page->share as refcount.
356 static void dax_associate_entry(void *entry
, struct address_space
*mapping
,
357 struct vm_area_struct
*vma
, unsigned long address
, bool shared
)
359 unsigned long size
= dax_entry_size(entry
), pfn
, index
;
362 if (IS_ENABLED(CONFIG_FS_DAX_LIMITED
))
365 index
= linear_page_index(vma
, address
& ~(size
- 1));
366 for_each_mapped_pfn(entry
, pfn
) {
367 struct page
*page
= pfn_to_page(pfn
);
370 dax_page_share_get(page
);
372 WARN_ON_ONCE(page
->mapping
);
373 page
->mapping
= mapping
;
374 page
->index
= index
+ i
++;
379 static void dax_disassociate_entry(void *entry
, struct address_space
*mapping
,
384 if (IS_ENABLED(CONFIG_FS_DAX_LIMITED
))
387 for_each_mapped_pfn(entry
, pfn
) {
388 struct page
*page
= pfn_to_page(pfn
);
390 WARN_ON_ONCE(trunc
&& page_ref_count(page
) > 1);
391 if (dax_page_is_shared(page
)) {
392 /* keep the shared flag if this page is still shared */
393 if (dax_page_share_put(page
) > 0)
396 WARN_ON_ONCE(page
->mapping
&& page
->mapping
!= mapping
);
397 page
->mapping
= NULL
;
402 static struct page
*dax_busy_page(void *entry
)
406 for_each_mapped_pfn(entry
, pfn
) {
407 struct page
*page
= pfn_to_page(pfn
);
409 if (page_ref_count(page
) > 1)
416 * dax_lock_folio - Lock the DAX entry corresponding to a folio
417 * @folio: The folio whose entry we want to lock
419 * Context: Process context.
420 * Return: A cookie to pass to dax_unlock_folio() or 0 if the entry could
423 dax_entry_t
dax_lock_folio(struct folio
*folio
)
425 XA_STATE(xas
, NULL
, 0);
428 /* Ensure folio->mapping isn't freed while we look at it */
431 struct address_space
*mapping
= READ_ONCE(folio
->mapping
);
434 if (!mapping
|| !dax_mapping(mapping
))
438 * In the device-dax case there's no need to lock, a
439 * struct dev_pagemap pin is sufficient to keep the
440 * inode alive, and we assume we have dev_pagemap pin
441 * otherwise we would not have a valid pfn_to_page()
444 entry
= (void *)~0UL;
445 if (S_ISCHR(mapping
->host
->i_mode
))
448 xas
.xa
= &mapping
->i_pages
;
450 if (mapping
!= folio
->mapping
) {
451 xas_unlock_irq(&xas
);
454 xas_set(&xas
, folio
->index
);
455 entry
= xas_load(&xas
);
456 if (dax_is_locked(entry
)) {
458 wait_entry_unlocked(&xas
, entry
);
462 dax_lock_entry(&xas
, entry
);
463 xas_unlock_irq(&xas
);
467 return (dax_entry_t
)entry
;
470 void dax_unlock_folio(struct folio
*folio
, dax_entry_t cookie
)
472 struct address_space
*mapping
= folio
->mapping
;
473 XA_STATE(xas
, &mapping
->i_pages
, folio
->index
);
475 if (S_ISCHR(mapping
->host
->i_mode
))
478 dax_unlock_entry(&xas
, (void *)cookie
);
482 * dax_lock_mapping_entry - Lock the DAX entry corresponding to a mapping
483 * @mapping: the file's mapping whose entry we want to lock
484 * @index: the offset within this file
485 * @page: output the dax page corresponding to this dax entry
487 * Return: A cookie to pass to dax_unlock_mapping_entry() or 0 if the entry
488 * could not be locked.
490 dax_entry_t
dax_lock_mapping_entry(struct address_space
*mapping
, pgoff_t index
,
493 XA_STATE(xas
, NULL
, 0);
499 if (!dax_mapping(mapping
))
502 xas
.xa
= &mapping
->i_pages
;
504 xas_set(&xas
, index
);
505 entry
= xas_load(&xas
);
506 if (dax_is_locked(entry
)) {
508 wait_entry_unlocked(&xas
, entry
);
513 dax_is_zero_entry(entry
) || dax_is_empty_entry(entry
)) {
515 * Because we are looking for entry from file's mapping
516 * and index, so the entry may not be inserted for now,
517 * or even a zero/empty entry. We don't think this is
518 * an error case. So, return a special value and do
521 entry
= (void *)~0UL;
523 *page
= pfn_to_page(dax_to_pfn(entry
));
524 dax_lock_entry(&xas
, entry
);
526 xas_unlock_irq(&xas
);
530 return (dax_entry_t
)entry
;
533 void dax_unlock_mapping_entry(struct address_space
*mapping
, pgoff_t index
,
536 XA_STATE(xas
, &mapping
->i_pages
, index
);
541 dax_unlock_entry(&xas
, (void *)cookie
);
545 * Find page cache entry at given index. If it is a DAX entry, return it
546 * with the entry locked. If the page cache doesn't contain an entry at
547 * that index, add a locked empty entry.
549 * When requesting an entry with size DAX_PMD, grab_mapping_entry() will
550 * either return that locked entry or will return VM_FAULT_FALLBACK.
551 * This will happen if there are any PTE entries within the PMD range
552 * that we are requesting.
554 * We always favor PTE entries over PMD entries. There isn't a flow where we
555 * evict PTE entries in order to 'upgrade' them to a PMD entry. A PMD
556 * insertion will fail if it finds any PTE entries already in the tree, and a
557 * PTE insertion will cause an existing PMD entry to be unmapped and
558 * downgraded to PTE entries. This happens for both PMD zero pages as
559 * well as PMD empty entries.
561 * The exception to this downgrade path is for PMD entries that have
562 * real storage backing them. We will leave these real PMD entries in
563 * the tree, and PTE writes will simply dirty the entire PMD entry.
565 * Note: Unlike filemap_fault() we don't honor FAULT_FLAG_RETRY flags. For
566 * persistent memory the benefit is doubtful. We can add that later if we can
569 * On error, this function does not return an ERR_PTR. Instead it returns
570 * a VM_FAULT code, encoded as an xarray internal entry. The ERR_PTR values
571 * overlap with xarray value entries.
573 static void *grab_mapping_entry(struct xa_state
*xas
,
574 struct address_space
*mapping
, unsigned int order
)
576 unsigned long index
= xas
->xa_index
;
577 bool pmd_downgrade
; /* splitting PMD entry into PTE entries? */
581 pmd_downgrade
= false;
583 entry
= get_unlocked_entry(xas
, order
);
586 if (dax_is_conflict(entry
))
588 if (!xa_is_value(entry
)) {
589 xas_set_err(xas
, -EIO
);
594 if (dax_is_pmd_entry(entry
) &&
595 (dax_is_zero_entry(entry
) ||
596 dax_is_empty_entry(entry
))) {
597 pmd_downgrade
= true;
604 * Make sure 'entry' remains valid while we drop
607 dax_lock_entry(xas
, entry
);
610 * Besides huge zero pages the only other thing that gets
611 * downgraded are empty entries which don't need to be
614 if (dax_is_zero_entry(entry
)) {
616 unmap_mapping_pages(mapping
,
617 xas
->xa_index
& ~PG_PMD_COLOUR
,
623 dax_disassociate_entry(entry
, mapping
, false);
624 xas_store(xas
, NULL
); /* undo the PMD join */
625 dax_wake_entry(xas
, entry
, WAKE_ALL
);
626 mapping
->nrpages
-= PG_PMD_NR
;
632 dax_lock_entry(xas
, entry
);
634 unsigned long flags
= DAX_EMPTY
;
638 entry
= dax_make_entry(pfn_to_pfn_t(0), flags
);
639 dax_lock_entry(xas
, entry
);
642 mapping
->nrpages
+= 1UL << order
;
647 if (xas_nomem(xas
, mapping_gfp_mask(mapping
) & ~__GFP_HIGHMEM
))
649 if (xas
->xa_node
== XA_ERROR(-ENOMEM
))
650 return xa_mk_internal(VM_FAULT_OOM
);
652 return xa_mk_internal(VM_FAULT_SIGBUS
);
656 return xa_mk_internal(VM_FAULT_FALLBACK
);
660 * dax_layout_busy_page_range - find first pinned page in @mapping
661 * @mapping: address space to scan for a page with ref count > 1
662 * @start: Starting offset. Page containing 'start' is included.
663 * @end: End offset. Page containing 'end' is included. If 'end' is LLONG_MAX,
664 * pages from 'start' till the end of file are included.
666 * DAX requires ZONE_DEVICE mapped pages. These pages are never
667 * 'onlined' to the page allocator so they are considered idle when
668 * page->count == 1. A filesystem uses this interface to determine if
669 * any page in the mapping is busy, i.e. for DMA, or other
670 * get_user_pages() usages.
672 * It is expected that the filesystem is holding locks to block the
673 * establishment of new mappings in this address_space. I.e. it expects
674 * to be able to run unmap_mapping_range() and subsequently not race
675 * mapping_mapped() becoming true.
677 struct page
*dax_layout_busy_page_range(struct address_space
*mapping
,
678 loff_t start
, loff_t end
)
681 unsigned int scanned
= 0;
682 struct page
*page
= NULL
;
683 pgoff_t start_idx
= start
>> PAGE_SHIFT
;
685 XA_STATE(xas
, &mapping
->i_pages
, start_idx
);
688 * In the 'limited' case get_user_pages() for dax is disabled.
690 if (IS_ENABLED(CONFIG_FS_DAX_LIMITED
))
693 if (!dax_mapping(mapping
) || !mapping_mapped(mapping
))
696 /* If end == LLONG_MAX, all pages from start to till end of file */
697 if (end
== LLONG_MAX
)
700 end_idx
= end
>> PAGE_SHIFT
;
702 * If we race get_user_pages_fast() here either we'll see the
703 * elevated page count in the iteration and wait, or
704 * get_user_pages_fast() will see that the page it took a reference
705 * against is no longer mapped in the page tables and bail to the
706 * get_user_pages() slow path. The slow path is protected by
707 * pte_lock() and pmd_lock(). New references are not taken without
708 * holding those locks, and unmap_mapping_pages() will not zero the
709 * pte or pmd without holding the respective lock, so we are
710 * guaranteed to either see new references or prevent new
711 * references from being established.
713 unmap_mapping_pages(mapping
, start_idx
, end_idx
- start_idx
+ 1, 0);
716 xas_for_each(&xas
, entry
, end_idx
) {
717 if (WARN_ON_ONCE(!xa_is_value(entry
)))
719 if (unlikely(dax_is_locked(entry
)))
720 entry
= get_unlocked_entry(&xas
, 0);
722 page
= dax_busy_page(entry
);
723 put_unlocked_entry(&xas
, entry
, WAKE_NEXT
);
726 if (++scanned
% XA_CHECK_SCHED
)
730 xas_unlock_irq(&xas
);
734 xas_unlock_irq(&xas
);
737 EXPORT_SYMBOL_GPL(dax_layout_busy_page_range
);
739 struct page
*dax_layout_busy_page(struct address_space
*mapping
)
741 return dax_layout_busy_page_range(mapping
, 0, LLONG_MAX
);
743 EXPORT_SYMBOL_GPL(dax_layout_busy_page
);
745 static int __dax_invalidate_entry(struct address_space
*mapping
,
746 pgoff_t index
, bool trunc
)
748 XA_STATE(xas
, &mapping
->i_pages
, index
);
753 entry
= get_unlocked_entry(&xas
, 0);
754 if (!entry
|| WARN_ON_ONCE(!xa_is_value(entry
)))
757 (xas_get_mark(&xas
, PAGECACHE_TAG_DIRTY
) ||
758 xas_get_mark(&xas
, PAGECACHE_TAG_TOWRITE
)))
760 dax_disassociate_entry(entry
, mapping
, trunc
);
761 xas_store(&xas
, NULL
);
762 mapping
->nrpages
-= 1UL << dax_entry_order(entry
);
765 put_unlocked_entry(&xas
, entry
, WAKE_ALL
);
766 xas_unlock_irq(&xas
);
770 static int __dax_clear_dirty_range(struct address_space
*mapping
,
771 pgoff_t start
, pgoff_t end
)
773 XA_STATE(xas
, &mapping
->i_pages
, start
);
774 unsigned int scanned
= 0;
778 xas_for_each(&xas
, entry
, end
) {
779 entry
= get_unlocked_entry(&xas
, 0);
780 xas_clear_mark(&xas
, PAGECACHE_TAG_DIRTY
);
781 xas_clear_mark(&xas
, PAGECACHE_TAG_TOWRITE
);
782 put_unlocked_entry(&xas
, entry
, WAKE_NEXT
);
784 if (++scanned
% XA_CHECK_SCHED
)
788 xas_unlock_irq(&xas
);
792 xas_unlock_irq(&xas
);
798 * Delete DAX entry at @index from @mapping. Wait for it
799 * to be unlocked before deleting it.
801 int dax_delete_mapping_entry(struct address_space
*mapping
, pgoff_t index
)
803 int ret
= __dax_invalidate_entry(mapping
, index
, true);
806 * This gets called from truncate / punch_hole path. As such, the caller
807 * must hold locks protecting against concurrent modifications of the
808 * page cache (usually fs-private i_mmap_sem for writing). Since the
809 * caller has seen a DAX entry for this index, we better find it
810 * at that index as well...
817 * Invalidate DAX entry if it is clean.
819 int dax_invalidate_mapping_entry_sync(struct address_space
*mapping
,
822 return __dax_invalidate_entry(mapping
, index
, false);
825 static pgoff_t
dax_iomap_pgoff(const struct iomap
*iomap
, loff_t pos
)
827 return PHYS_PFN(iomap
->addr
+ (pos
& PAGE_MASK
) - iomap
->offset
);
830 static int copy_cow_page_dax(struct vm_fault
*vmf
, const struct iomap_iter
*iter
)
832 pgoff_t pgoff
= dax_iomap_pgoff(&iter
->iomap
, iter
->pos
);
837 id
= dax_read_lock();
838 rc
= dax_direct_access(iter
->iomap
.dax_dev
, pgoff
, 1, DAX_ACCESS
,
844 vto
= kmap_atomic(vmf
->cow_page
);
845 copy_user_page(vto
, kaddr
, vmf
->address
, vmf
->cow_page
);
852 * MAP_SYNC on a dax mapping guarantees dirty metadata is
853 * flushed on write-faults (non-cow), but not read-faults.
855 static bool dax_fault_is_synchronous(const struct iomap_iter
*iter
,
856 struct vm_area_struct
*vma
)
858 return (iter
->flags
& IOMAP_WRITE
) && (vma
->vm_flags
& VM_SYNC
) &&
859 (iter
->iomap
.flags
& IOMAP_F_DIRTY
);
863 * By this point grab_mapping_entry() has ensured that we have a locked entry
864 * of the appropriate size so we don't have to worry about downgrading PMDs to
865 * PTEs. If we happen to be trying to insert a PTE and there is a PMD
866 * already in the tree, we will skip the insertion and just dirty the PMD as
869 static void *dax_insert_entry(struct xa_state
*xas
, struct vm_fault
*vmf
,
870 const struct iomap_iter
*iter
, void *entry
, pfn_t pfn
,
873 struct address_space
*mapping
= vmf
->vma
->vm_file
->f_mapping
;
874 void *new_entry
= dax_make_entry(pfn
, flags
);
875 bool write
= iter
->flags
& IOMAP_WRITE
;
876 bool dirty
= write
&& !dax_fault_is_synchronous(iter
, vmf
->vma
);
877 bool shared
= iter
->iomap
.flags
& IOMAP_F_SHARED
;
880 __mark_inode_dirty(mapping
->host
, I_DIRTY_PAGES
);
882 if (shared
|| (dax_is_zero_entry(entry
) && !(flags
& DAX_ZERO_PAGE
))) {
883 unsigned long index
= xas
->xa_index
;
884 /* we are replacing a zero page with block mapping */
885 if (dax_is_pmd_entry(entry
))
886 unmap_mapping_pages(mapping
, index
& ~PG_PMD_COLOUR
,
889 unmap_mapping_pages(mapping
, index
, 1, false);
894 if (shared
|| dax_is_zero_entry(entry
) || dax_is_empty_entry(entry
)) {
897 dax_disassociate_entry(entry
, mapping
, false);
898 dax_associate_entry(new_entry
, mapping
, vmf
->vma
, vmf
->address
,
901 * Only swap our new entry into the page cache if the current
902 * entry is a zero page or an empty entry. If a normal PTE or
903 * PMD entry is already in the cache, we leave it alone. This
904 * means that if we are trying to insert a PTE and the
905 * existing entry is a PMD, we will just leave the PMD in the
906 * tree and dirty it if necessary.
908 old
= dax_lock_entry(xas
, new_entry
);
909 WARN_ON_ONCE(old
!= xa_mk_value(xa_to_value(entry
) |
913 xas_load(xas
); /* Walk the xa_state */
917 xas_set_mark(xas
, PAGECACHE_TAG_DIRTY
);
920 xas_set_mark(xas
, PAGECACHE_TAG_TOWRITE
);
926 static int dax_writeback_one(struct xa_state
*xas
, struct dax_device
*dax_dev
,
927 struct address_space
*mapping
, void *entry
)
929 unsigned long pfn
, index
, count
, end
;
931 struct vm_area_struct
*vma
;
934 * A page got tagged dirty in DAX mapping? Something is seriously
937 if (WARN_ON(!xa_is_value(entry
)))
940 if (unlikely(dax_is_locked(entry
))) {
941 void *old_entry
= entry
;
943 entry
= get_unlocked_entry(xas
, 0);
945 /* Entry got punched out / reallocated? */
946 if (!entry
|| WARN_ON_ONCE(!xa_is_value(entry
)))
949 * Entry got reallocated elsewhere? No need to writeback.
950 * We have to compare pfns as we must not bail out due to
951 * difference in lockbit or entry type.
953 if (dax_to_pfn(old_entry
) != dax_to_pfn(entry
))
955 if (WARN_ON_ONCE(dax_is_empty_entry(entry
) ||
956 dax_is_zero_entry(entry
))) {
961 /* Another fsync thread may have already done this entry */
962 if (!xas_get_mark(xas
, PAGECACHE_TAG_TOWRITE
))
966 /* Lock the entry to serialize with page faults */
967 dax_lock_entry(xas
, entry
);
970 * We can clear the tag now but we have to be careful so that concurrent
971 * dax_writeback_one() calls for the same index cannot finish before we
972 * actually flush the caches. This is achieved as the calls will look
973 * at the entry only under the i_pages lock and once they do that
974 * they will see the entry locked and wait for it to unlock.
976 xas_clear_mark(xas
, PAGECACHE_TAG_TOWRITE
);
980 * If dax_writeback_mapping_range() was given a wbc->range_start
981 * in the middle of a PMD, the 'index' we use needs to be
982 * aligned to the start of the PMD.
983 * This allows us to flush for PMD_SIZE and not have to worry about
984 * partial PMD writebacks.
986 pfn
= dax_to_pfn(entry
);
987 count
= 1UL << dax_entry_order(entry
);
988 index
= xas
->xa_index
& ~(count
- 1);
989 end
= index
+ count
- 1;
991 /* Walk all mappings of a given index of a file and writeprotect them */
992 i_mmap_lock_read(mapping
);
993 vma_interval_tree_foreach(vma
, &mapping
->i_mmap
, index
, end
) {
994 pfn_mkclean_range(pfn
, count
, index
, vma
);
997 i_mmap_unlock_read(mapping
);
999 dax_flush(dax_dev
, page_address(pfn_to_page(pfn
)), count
* PAGE_SIZE
);
1001 * After we have flushed the cache, we can clear the dirty tag. There
1002 * cannot be new dirty data in the pfn after the flush has completed as
1003 * the pfn mappings are writeprotected and fault waits for mapping
1008 xas_store(xas
, entry
);
1009 xas_clear_mark(xas
, PAGECACHE_TAG_DIRTY
);
1010 dax_wake_entry(xas
, entry
, WAKE_NEXT
);
1012 trace_dax_writeback_one(mapping
->host
, index
, count
);
1016 put_unlocked_entry(xas
, entry
, WAKE_NEXT
);
1021 * Flush the mapping to the persistent domain within the byte range of [start,
1022 * end]. This is required by data integrity operations to ensure file data is
1023 * on persistent storage prior to completion of the operation.
1025 int dax_writeback_mapping_range(struct address_space
*mapping
,
1026 struct dax_device
*dax_dev
, struct writeback_control
*wbc
)
1028 XA_STATE(xas
, &mapping
->i_pages
, wbc
->range_start
>> PAGE_SHIFT
);
1029 struct inode
*inode
= mapping
->host
;
1030 pgoff_t end_index
= wbc
->range_end
>> PAGE_SHIFT
;
1033 unsigned int scanned
= 0;
1035 if (WARN_ON_ONCE(inode
->i_blkbits
!= PAGE_SHIFT
))
1038 if (mapping_empty(mapping
) || wbc
->sync_mode
!= WB_SYNC_ALL
)
1041 trace_dax_writeback_range(inode
, xas
.xa_index
, end_index
);
1043 tag_pages_for_writeback(mapping
, xas
.xa_index
, end_index
);
1046 xas_for_each_marked(&xas
, entry
, end_index
, PAGECACHE_TAG_TOWRITE
) {
1047 ret
= dax_writeback_one(&xas
, dax_dev
, mapping
, entry
);
1049 mapping_set_error(mapping
, ret
);
1052 if (++scanned
% XA_CHECK_SCHED
)
1056 xas_unlock_irq(&xas
);
1060 xas_unlock_irq(&xas
);
1061 trace_dax_writeback_range_done(inode
, xas
.xa_index
, end_index
);
1064 EXPORT_SYMBOL_GPL(dax_writeback_mapping_range
);
1066 static int dax_iomap_direct_access(const struct iomap
*iomap
, loff_t pos
,
1067 size_t size
, void **kaddr
, pfn_t
*pfnp
)
1069 pgoff_t pgoff
= dax_iomap_pgoff(iomap
, pos
);
1073 id
= dax_read_lock();
1074 length
= dax_direct_access(iomap
->dax_dev
, pgoff
, PHYS_PFN(size
),
1075 DAX_ACCESS
, kaddr
, pfnp
);
1081 goto out_check_addr
;
1083 if (PFN_PHYS(length
) < size
)
1085 if (pfn_t_to_pfn(*pfnp
) & (PHYS_PFN(size
)-1))
1087 /* For larger pages we need devmap */
1088 if (length
> 1 && !pfn_t_devmap(*pfnp
))
1098 dax_read_unlock(id
);
1103 * dax_iomap_copy_around - Prepare for an unaligned write to a shared/cow page
1104 * by copying the data before and after the range to be written.
1105 * @pos: address to do copy from.
1106 * @length: size of copy operation.
1107 * @align_size: aligned w.r.t align_size (either PMD_SIZE or PAGE_SIZE)
1108 * @srcmap: iomap srcmap
1109 * @daddr: destination address to copy to.
1111 * This can be called from two places. Either during DAX write fault (page
1112 * aligned), to copy the length size data to daddr. Or, while doing normal DAX
1113 * write operation, dax_iomap_iter() might call this to do the copy of either
1114 * start or end unaligned address. In the latter case the rest of the copy of
1115 * aligned ranges is taken care by dax_iomap_iter() itself.
1116 * If the srcmap contains invalid data, such as HOLE and UNWRITTEN, zero the
1117 * area to make sure no old data remains.
1119 static int dax_iomap_copy_around(loff_t pos
, uint64_t length
, size_t align_size
,
1120 const struct iomap
*srcmap
, void *daddr
)
1122 loff_t head_off
= pos
& (align_size
- 1);
1123 size_t size
= ALIGN(head_off
+ length
, align_size
);
1124 loff_t end
= pos
+ length
;
1125 loff_t pg_end
= round_up(end
, align_size
);
1126 /* copy_all is usually in page fault case */
1127 bool copy_all
= head_off
== 0 && end
== pg_end
;
1128 /* zero the edges if srcmap is a HOLE or IOMAP_UNWRITTEN */
1129 bool zero_edge
= srcmap
->flags
& IOMAP_F_SHARED
||
1130 srcmap
->type
== IOMAP_UNWRITTEN
;
1135 ret
= dax_iomap_direct_access(srcmap
, pos
, size
, &saddr
, NULL
);
1137 return dax_mem2blk_err(ret
);
1142 memset(daddr
, 0, size
);
1144 ret
= copy_mc_to_kernel(daddr
, saddr
, length
);
1148 /* Copy the head part of the range */
1151 memset(daddr
, 0, head_off
);
1153 ret
= copy_mc_to_kernel(daddr
, saddr
, head_off
);
1159 /* Copy the tail part of the range */
1161 loff_t tail_off
= head_off
+ length
;
1162 loff_t tail_len
= pg_end
- end
;
1165 memset(daddr
+ tail_off
, 0, tail_len
);
1167 ret
= copy_mc_to_kernel(daddr
+ tail_off
,
1168 saddr
+ tail_off
, tail_len
);
1175 dax_flush(srcmap
->dax_dev
, daddr
, size
);
1176 return ret
? -EIO
: 0;
1180 * The user has performed a load from a hole in the file. Allocating a new
1181 * page in the file would cause excessive storage usage for workloads with
1182 * sparse files. Instead we insert a read-only mapping of the 4k zero page.
1183 * If this page is ever written to we will re-fault and change the mapping to
1184 * point to real DAX storage instead.
1186 static vm_fault_t
dax_load_hole(struct xa_state
*xas
, struct vm_fault
*vmf
,
1187 const struct iomap_iter
*iter
, void **entry
)
1189 struct inode
*inode
= iter
->inode
;
1190 unsigned long vaddr
= vmf
->address
;
1191 pfn_t pfn
= pfn_to_pfn_t(my_zero_pfn(vaddr
));
1194 *entry
= dax_insert_entry(xas
, vmf
, iter
, *entry
, pfn
, DAX_ZERO_PAGE
);
1196 ret
= vmf_insert_mixed(vmf
->vma
, vaddr
, pfn
);
1197 trace_dax_load_hole(inode
, vmf
, ret
);
1201 #ifdef CONFIG_FS_DAX_PMD
1202 static vm_fault_t
dax_pmd_load_hole(struct xa_state
*xas
, struct vm_fault
*vmf
,
1203 const struct iomap_iter
*iter
, void **entry
)
1205 struct address_space
*mapping
= vmf
->vma
->vm_file
->f_mapping
;
1206 unsigned long pmd_addr
= vmf
->address
& PMD_MASK
;
1207 struct vm_area_struct
*vma
= vmf
->vma
;
1208 struct inode
*inode
= mapping
->host
;
1209 pgtable_t pgtable
= NULL
;
1210 struct folio
*zero_folio
;
1215 zero_folio
= mm_get_huge_zero_folio(vmf
->vma
->vm_mm
);
1217 if (unlikely(!zero_folio
))
1220 pfn
= page_to_pfn_t(&zero_folio
->page
);
1221 *entry
= dax_insert_entry(xas
, vmf
, iter
, *entry
, pfn
,
1222 DAX_PMD
| DAX_ZERO_PAGE
);
1224 if (arch_needs_pgtable_deposit()) {
1225 pgtable
= pte_alloc_one(vma
->vm_mm
);
1227 return VM_FAULT_OOM
;
1230 ptl
= pmd_lock(vmf
->vma
->vm_mm
, vmf
->pmd
);
1231 if (!pmd_none(*(vmf
->pmd
))) {
1237 pgtable_trans_huge_deposit(vma
->vm_mm
, vmf
->pmd
, pgtable
);
1238 mm_inc_nr_ptes(vma
->vm_mm
);
1240 pmd_entry
= mk_pmd(&zero_folio
->page
, vmf
->vma
->vm_page_prot
);
1241 pmd_entry
= pmd_mkhuge(pmd_entry
);
1242 set_pmd_at(vmf
->vma
->vm_mm
, pmd_addr
, vmf
->pmd
, pmd_entry
);
1244 trace_dax_pmd_load_hole(inode
, vmf
, zero_folio
, *entry
);
1245 return VM_FAULT_NOPAGE
;
1249 pte_free(vma
->vm_mm
, pgtable
);
1250 trace_dax_pmd_load_hole_fallback(inode
, vmf
, zero_folio
, *entry
);
1251 return VM_FAULT_FALLBACK
;
1254 static vm_fault_t
dax_pmd_load_hole(struct xa_state
*xas
, struct vm_fault
*vmf
,
1255 const struct iomap_iter
*iter
, void **entry
)
1257 return VM_FAULT_FALLBACK
;
1259 #endif /* CONFIG_FS_DAX_PMD */
1261 static s64
dax_unshare_iter(struct iomap_iter
*iter
)
1263 struct iomap
*iomap
= &iter
->iomap
;
1264 const struct iomap
*srcmap
= iomap_iter_srcmap(iter
);
1265 loff_t copy_pos
= iter
->pos
;
1266 u64 copy_len
= iomap_length(iter
);
1270 void *daddr
= NULL
, *saddr
= NULL
;
1272 if (!iomap_want_unshare_iter(iter
))
1273 return iomap_length(iter
);
1276 * Extend the file range to be aligned to fsblock/pagesize, because
1277 * we need to copy entire blocks, not just the byte range specified.
1278 * Invalidate the mapping because we're about to CoW.
1280 mod
= offset_in_page(copy_pos
);
1286 mod
= offset_in_page(copy_pos
+ copy_len
);
1288 copy_len
+= PAGE_SIZE
- mod
;
1290 invalidate_inode_pages2_range(iter
->inode
->i_mapping
,
1291 copy_pos
>> PAGE_SHIFT
,
1292 (copy_pos
+ copy_len
- 1) >> PAGE_SHIFT
);
1294 id
= dax_read_lock();
1295 ret
= dax_iomap_direct_access(iomap
, copy_pos
, copy_len
, &daddr
, NULL
);
1299 ret
= dax_iomap_direct_access(srcmap
, copy_pos
, copy_len
, &saddr
, NULL
);
1303 if (copy_mc_to_kernel(daddr
, saddr
, copy_len
) == 0)
1304 ret
= iomap_length(iter
);
1309 dax_read_unlock(id
);
1310 return dax_mem2blk_err(ret
);
1313 int dax_file_unshare(struct inode
*inode
, loff_t pos
, loff_t len
,
1314 const struct iomap_ops
*ops
)
1316 struct iomap_iter iter
= {
1319 .flags
= IOMAP_WRITE
| IOMAP_UNSHARE
| IOMAP_DAX
,
1321 loff_t size
= i_size_read(inode
);
1324 if (pos
< 0 || pos
>= size
)
1327 iter
.len
= min(len
, size
- pos
);
1328 while ((ret
= iomap_iter(&iter
, ops
)) > 0)
1329 iter
.processed
= dax_unshare_iter(&iter
);
1332 EXPORT_SYMBOL_GPL(dax_file_unshare
);
1334 static int dax_memzero(struct iomap_iter
*iter
, loff_t pos
, size_t size
)
1336 const struct iomap
*iomap
= &iter
->iomap
;
1337 const struct iomap
*srcmap
= iomap_iter_srcmap(iter
);
1338 unsigned offset
= offset_in_page(pos
);
1339 pgoff_t pgoff
= dax_iomap_pgoff(iomap
, pos
);
1343 ret
= dax_direct_access(iomap
->dax_dev
, pgoff
, 1, DAX_ACCESS
, &kaddr
,
1346 return dax_mem2blk_err(ret
);
1348 memset(kaddr
+ offset
, 0, size
);
1349 if (iomap
->flags
& IOMAP_F_SHARED
)
1350 ret
= dax_iomap_copy_around(pos
, size
, PAGE_SIZE
, srcmap
,
1353 dax_flush(iomap
->dax_dev
, kaddr
+ offset
, size
);
1357 static s64
dax_zero_iter(struct iomap_iter
*iter
, bool *did_zero
)
1359 const struct iomap
*iomap
= &iter
->iomap
;
1360 const struct iomap
*srcmap
= iomap_iter_srcmap(iter
);
1361 loff_t pos
= iter
->pos
;
1362 u64 length
= iomap_length(iter
);
1365 /* already zeroed? we're done. */
1366 if (srcmap
->type
== IOMAP_HOLE
|| srcmap
->type
== IOMAP_UNWRITTEN
)
1370 * invalidate the pages whose sharing state is to be changed
1373 if (iomap
->flags
& IOMAP_F_SHARED
)
1374 invalidate_inode_pages2_range(iter
->inode
->i_mapping
,
1376 (pos
+ length
- 1) >> PAGE_SHIFT
);
1379 unsigned offset
= offset_in_page(pos
);
1380 unsigned size
= min_t(u64
, PAGE_SIZE
- offset
, length
);
1381 pgoff_t pgoff
= dax_iomap_pgoff(iomap
, pos
);
1385 id
= dax_read_lock();
1386 if (IS_ALIGNED(pos
, PAGE_SIZE
) && size
== PAGE_SIZE
)
1387 rc
= dax_zero_page_range(iomap
->dax_dev
, pgoff
, 1);
1389 rc
= dax_memzero(iter
, pos
, size
);
1390 dax_read_unlock(id
);
1397 } while (length
> 0);
1404 int dax_zero_range(struct inode
*inode
, loff_t pos
, loff_t len
, bool *did_zero
,
1405 const struct iomap_ops
*ops
)
1407 struct iomap_iter iter
= {
1411 .flags
= IOMAP_DAX
| IOMAP_ZERO
,
1415 while ((ret
= iomap_iter(&iter
, ops
)) > 0)
1416 iter
.processed
= dax_zero_iter(&iter
, did_zero
);
1419 EXPORT_SYMBOL_GPL(dax_zero_range
);
1421 int dax_truncate_page(struct inode
*inode
, loff_t pos
, bool *did_zero
,
1422 const struct iomap_ops
*ops
)
1424 unsigned int blocksize
= i_blocksize(inode
);
1425 unsigned int off
= pos
& (blocksize
- 1);
1427 /* Block boundary? Nothing to do */
1430 return dax_zero_range(inode
, pos
, blocksize
- off
, did_zero
, ops
);
1432 EXPORT_SYMBOL_GPL(dax_truncate_page
);
1434 static loff_t
dax_iomap_iter(const struct iomap_iter
*iomi
,
1435 struct iov_iter
*iter
)
1437 const struct iomap
*iomap
= &iomi
->iomap
;
1438 const struct iomap
*srcmap
= iomap_iter_srcmap(iomi
);
1439 loff_t length
= iomap_length(iomi
);
1440 loff_t pos
= iomi
->pos
;
1441 struct dax_device
*dax_dev
= iomap
->dax_dev
;
1442 loff_t end
= pos
+ length
, done
= 0;
1443 bool write
= iov_iter_rw(iter
) == WRITE
;
1444 bool cow
= write
&& iomap
->flags
& IOMAP_F_SHARED
;
1450 end
= min(end
, i_size_read(iomi
->inode
));
1454 if (iomap
->type
== IOMAP_HOLE
|| iomap
->type
== IOMAP_UNWRITTEN
)
1455 return iov_iter_zero(min(length
, end
- pos
), iter
);
1459 * In DAX mode, enforce either pure overwrites of written extents, or
1460 * writes to unwritten extents as part of a copy-on-write operation.
1462 if (WARN_ON_ONCE(iomap
->type
!= IOMAP_MAPPED
&&
1463 !(iomap
->flags
& IOMAP_F_SHARED
)))
1467 * Write can allocate block for an area which has a hole page mapped
1468 * into page tables. We have to tear down these mappings so that data
1469 * written by write(2) is visible in mmap.
1471 if (iomap
->flags
& IOMAP_F_NEW
|| cow
) {
1473 * Filesystem allows CoW on non-shared extents. The src extents
1474 * may have been mmapped with dirty mark before. To be able to
1475 * invalidate its dax entries, we need to clear the dirty mark
1479 __dax_clear_dirty_range(iomi
->inode
->i_mapping
,
1481 (end
- 1) >> PAGE_SHIFT
);
1482 invalidate_inode_pages2_range(iomi
->inode
->i_mapping
,
1484 (end
- 1) >> PAGE_SHIFT
);
1487 id
= dax_read_lock();
1489 unsigned offset
= pos
& (PAGE_SIZE
- 1);
1490 const size_t size
= ALIGN(length
+ offset
, PAGE_SIZE
);
1491 pgoff_t pgoff
= dax_iomap_pgoff(iomap
, pos
);
1493 bool recovery
= false;
1496 if (fatal_signal_pending(current
)) {
1501 map_len
= dax_direct_access(dax_dev
, pgoff
, PHYS_PFN(size
),
1502 DAX_ACCESS
, &kaddr
, NULL
);
1503 if (map_len
== -EHWPOISON
&& iov_iter_rw(iter
) == WRITE
) {
1504 map_len
= dax_direct_access(dax_dev
, pgoff
,
1505 PHYS_PFN(size
), DAX_RECOVERY_WRITE
,
1511 ret
= dax_mem2blk_err(map_len
);
1516 ret
= dax_iomap_copy_around(pos
, length
, PAGE_SIZE
,
1522 map_len
= PFN_PHYS(map_len
);
1525 if (map_len
> end
- pos
)
1526 map_len
= end
- pos
;
1529 xfer
= dax_recovery_write(dax_dev
, pgoff
, kaddr
,
1532 xfer
= dax_copy_from_iter(dax_dev
, pgoff
, kaddr
,
1535 xfer
= dax_copy_to_iter(dax_dev
, pgoff
, kaddr
,
1547 dax_read_unlock(id
);
1549 return done
? done
: ret
;
1553 * dax_iomap_rw - Perform I/O to a DAX file
1554 * @iocb: The control block for this I/O
1555 * @iter: The addresses to do I/O from or to
1556 * @ops: iomap ops passed from the file system
1558 * This function performs read and write operations to directly mapped
1559 * persistent memory. The callers needs to take care of read/write exclusion
1560 * and evicting any page cache pages in the region under I/O.
1563 dax_iomap_rw(struct kiocb
*iocb
, struct iov_iter
*iter
,
1564 const struct iomap_ops
*ops
)
1566 struct iomap_iter iomi
= {
1567 .inode
= iocb
->ki_filp
->f_mapping
->host
,
1568 .pos
= iocb
->ki_pos
,
1569 .len
= iov_iter_count(iter
),
1578 if (iov_iter_rw(iter
) == WRITE
) {
1579 lockdep_assert_held_write(&iomi
.inode
->i_rwsem
);
1580 iomi
.flags
|= IOMAP_WRITE
;
1582 lockdep_assert_held(&iomi
.inode
->i_rwsem
);
1585 if (iocb
->ki_flags
& IOCB_NOWAIT
)
1586 iomi
.flags
|= IOMAP_NOWAIT
;
1588 while ((ret
= iomap_iter(&iomi
, ops
)) > 0)
1589 iomi
.processed
= dax_iomap_iter(&iomi
, iter
);
1591 done
= iomi
.pos
- iocb
->ki_pos
;
1592 iocb
->ki_pos
= iomi
.pos
;
1593 return done
? done
: ret
;
1595 EXPORT_SYMBOL_GPL(dax_iomap_rw
);
1597 static vm_fault_t
dax_fault_return(int error
)
1600 return VM_FAULT_NOPAGE
;
1601 return vmf_error(error
);
1605 * When handling a synchronous page fault and the inode need a fsync, we can
1606 * insert the PTE/PMD into page tables only after that fsync happened. Skip
1607 * insertion for now and return the pfn so that caller can insert it after the
1610 static vm_fault_t
dax_fault_synchronous_pfnp(pfn_t
*pfnp
, pfn_t pfn
)
1612 if (WARN_ON_ONCE(!pfnp
))
1613 return VM_FAULT_SIGBUS
;
1615 return VM_FAULT_NEEDDSYNC
;
1618 static vm_fault_t
dax_fault_cow_page(struct vm_fault
*vmf
,
1619 const struct iomap_iter
*iter
)
1624 switch (iter
->iomap
.type
) {
1626 case IOMAP_UNWRITTEN
:
1627 clear_user_highpage(vmf
->cow_page
, vmf
->address
);
1630 error
= copy_cow_page_dax(vmf
, iter
);
1639 return dax_fault_return(error
);
1641 __SetPageUptodate(vmf
->cow_page
);
1642 ret
= finish_fault(vmf
);
1644 return VM_FAULT_DONE_COW
;
1649 * dax_fault_iter - Common actor to handle pfn insertion in PTE/PMD fault.
1650 * @vmf: vm fault instance
1652 * @pfnp: pfn to be returned
1653 * @xas: the dax mapping tree of a file
1654 * @entry: an unlocked dax entry to be inserted
1655 * @pmd: distinguish whether it is a pmd fault
1657 static vm_fault_t
dax_fault_iter(struct vm_fault
*vmf
,
1658 const struct iomap_iter
*iter
, pfn_t
*pfnp
,
1659 struct xa_state
*xas
, void **entry
, bool pmd
)
1661 const struct iomap
*iomap
= &iter
->iomap
;
1662 const struct iomap
*srcmap
= iomap_iter_srcmap(iter
);
1663 size_t size
= pmd
? PMD_SIZE
: PAGE_SIZE
;
1664 loff_t pos
= (loff_t
)xas
->xa_index
<< PAGE_SHIFT
;
1665 bool write
= iter
->flags
& IOMAP_WRITE
;
1666 unsigned long entry_flags
= pmd
? DAX_PMD
: 0;
1671 if (!pmd
&& vmf
->cow_page
)
1672 return dax_fault_cow_page(vmf
, iter
);
1674 /* if we are reading UNWRITTEN and HOLE, return a hole. */
1676 (iomap
->type
== IOMAP_UNWRITTEN
|| iomap
->type
== IOMAP_HOLE
)) {
1678 return dax_load_hole(xas
, vmf
, iter
, entry
);
1679 return dax_pmd_load_hole(xas
, vmf
, iter
, entry
);
1682 if (iomap
->type
!= IOMAP_MAPPED
&& !(iomap
->flags
& IOMAP_F_SHARED
)) {
1684 return pmd
? VM_FAULT_FALLBACK
: VM_FAULT_SIGBUS
;
1687 err
= dax_iomap_direct_access(iomap
, pos
, size
, &kaddr
, &pfn
);
1689 return pmd
? VM_FAULT_FALLBACK
: dax_fault_return(err
);
1691 *entry
= dax_insert_entry(xas
, vmf
, iter
, *entry
, pfn
, entry_flags
);
1693 if (write
&& iomap
->flags
& IOMAP_F_SHARED
) {
1694 err
= dax_iomap_copy_around(pos
, size
, size
, srcmap
, kaddr
);
1696 return dax_fault_return(err
);
1699 if (dax_fault_is_synchronous(iter
, vmf
->vma
))
1700 return dax_fault_synchronous_pfnp(pfnp
, pfn
);
1702 /* insert PMD pfn */
1704 return vmf_insert_pfn_pmd(vmf
, pfn
, write
);
1706 /* insert PTE pfn */
1708 return vmf_insert_mixed_mkwrite(vmf
->vma
, vmf
->address
, pfn
);
1709 return vmf_insert_mixed(vmf
->vma
, vmf
->address
, pfn
);
1712 static vm_fault_t
dax_iomap_pte_fault(struct vm_fault
*vmf
, pfn_t
*pfnp
,
1713 int *iomap_errp
, const struct iomap_ops
*ops
)
1715 struct address_space
*mapping
= vmf
->vma
->vm_file
->f_mapping
;
1716 XA_STATE(xas
, &mapping
->i_pages
, vmf
->pgoff
);
1717 struct iomap_iter iter
= {
1718 .inode
= mapping
->host
,
1719 .pos
= (loff_t
)vmf
->pgoff
<< PAGE_SHIFT
,
1721 .flags
= IOMAP_DAX
| IOMAP_FAULT
,
1727 trace_dax_pte_fault(iter
.inode
, vmf
, ret
);
1729 * Check whether offset isn't beyond end of file now. Caller is supposed
1730 * to hold locks serializing us with truncate / punch hole so this is
1733 if (iter
.pos
>= i_size_read(iter
.inode
)) {
1734 ret
= VM_FAULT_SIGBUS
;
1738 if ((vmf
->flags
& FAULT_FLAG_WRITE
) && !vmf
->cow_page
)
1739 iter
.flags
|= IOMAP_WRITE
;
1741 entry
= grab_mapping_entry(&xas
, mapping
, 0);
1742 if (xa_is_internal(entry
)) {
1743 ret
= xa_to_internal(entry
);
1748 * It is possible, particularly with mixed reads & writes to private
1749 * mappings, that we have raced with a PMD fault that overlaps with
1750 * the PTE we need to set up. If so just return and the fault will be
1753 if (pmd_trans_huge(*vmf
->pmd
) || pmd_devmap(*vmf
->pmd
)) {
1754 ret
= VM_FAULT_NOPAGE
;
1758 while ((error
= iomap_iter(&iter
, ops
)) > 0) {
1759 if (WARN_ON_ONCE(iomap_length(&iter
) < PAGE_SIZE
)) {
1760 iter
.processed
= -EIO
; /* fs corruption? */
1764 ret
= dax_fault_iter(vmf
, &iter
, pfnp
, &xas
, &entry
, false);
1765 if (ret
!= VM_FAULT_SIGBUS
&&
1766 (iter
.iomap
.flags
& IOMAP_F_NEW
)) {
1767 count_vm_event(PGMAJFAULT
);
1768 count_memcg_event_mm(vmf
->vma
->vm_mm
, PGMAJFAULT
);
1769 ret
|= VM_FAULT_MAJOR
;
1772 if (!(ret
& VM_FAULT_ERROR
))
1773 iter
.processed
= PAGE_SIZE
;
1777 *iomap_errp
= error
;
1779 ret
= dax_fault_return(error
);
1782 dax_unlock_entry(&xas
, entry
);
1784 trace_dax_pte_fault_done(iter
.inode
, vmf
, ret
);
1788 #ifdef CONFIG_FS_DAX_PMD
1789 static bool dax_fault_check_fallback(struct vm_fault
*vmf
, struct xa_state
*xas
,
1792 unsigned long pmd_addr
= vmf
->address
& PMD_MASK
;
1793 bool write
= vmf
->flags
& FAULT_FLAG_WRITE
;
1796 * Make sure that the faulting address's PMD offset (color) matches
1797 * the PMD offset from the start of the file. This is necessary so
1798 * that a PMD range in the page table overlaps exactly with a PMD
1799 * range in the page cache.
1801 if ((vmf
->pgoff
& PG_PMD_COLOUR
) !=
1802 ((vmf
->address
>> PAGE_SHIFT
) & PG_PMD_COLOUR
))
1805 /* Fall back to PTEs if we're going to COW */
1806 if (write
&& !(vmf
->vma
->vm_flags
& VM_SHARED
))
1809 /* If the PMD would extend outside the VMA */
1810 if (pmd_addr
< vmf
->vma
->vm_start
)
1812 if ((pmd_addr
+ PMD_SIZE
) > vmf
->vma
->vm_end
)
1815 /* If the PMD would extend beyond the file size */
1816 if ((xas
->xa_index
| PG_PMD_COLOUR
) >= max_pgoff
)
1822 static vm_fault_t
dax_iomap_pmd_fault(struct vm_fault
*vmf
, pfn_t
*pfnp
,
1823 const struct iomap_ops
*ops
)
1825 struct address_space
*mapping
= vmf
->vma
->vm_file
->f_mapping
;
1826 XA_STATE_ORDER(xas
, &mapping
->i_pages
, vmf
->pgoff
, PMD_ORDER
);
1827 struct iomap_iter iter
= {
1828 .inode
= mapping
->host
,
1830 .flags
= IOMAP_DAX
| IOMAP_FAULT
,
1832 vm_fault_t ret
= VM_FAULT_FALLBACK
;
1836 if (vmf
->flags
& FAULT_FLAG_WRITE
)
1837 iter
.flags
|= IOMAP_WRITE
;
1840 * Check whether offset isn't beyond end of file now. Caller is
1841 * supposed to hold locks serializing us with truncate / punch hole so
1842 * this is a reliable test.
1844 max_pgoff
= DIV_ROUND_UP(i_size_read(iter
.inode
), PAGE_SIZE
);
1846 trace_dax_pmd_fault(iter
.inode
, vmf
, max_pgoff
, 0);
1848 if (xas
.xa_index
>= max_pgoff
) {
1849 ret
= VM_FAULT_SIGBUS
;
1853 if (dax_fault_check_fallback(vmf
, &xas
, max_pgoff
))
1857 * grab_mapping_entry() will make sure we get an empty PMD entry,
1858 * a zero PMD entry or a DAX PMD. If it can't (because a PTE
1859 * entry is already in the array, for instance), it will return
1860 * VM_FAULT_FALLBACK.
1862 entry
= grab_mapping_entry(&xas
, mapping
, PMD_ORDER
);
1863 if (xa_is_internal(entry
)) {
1864 ret
= xa_to_internal(entry
);
1869 * It is possible, particularly with mixed reads & writes to private
1870 * mappings, that we have raced with a PTE fault that overlaps with
1871 * the PMD we need to set up. If so just return and the fault will be
1874 if (!pmd_none(*vmf
->pmd
) && !pmd_trans_huge(*vmf
->pmd
) &&
1875 !pmd_devmap(*vmf
->pmd
)) {
1880 iter
.pos
= (loff_t
)xas
.xa_index
<< PAGE_SHIFT
;
1881 while (iomap_iter(&iter
, ops
) > 0) {
1882 if (iomap_length(&iter
) < PMD_SIZE
)
1883 continue; /* actually breaks out of the loop */
1885 ret
= dax_fault_iter(vmf
, &iter
, pfnp
, &xas
, &entry
, true);
1886 if (ret
!= VM_FAULT_FALLBACK
)
1887 iter
.processed
= PMD_SIZE
;
1891 dax_unlock_entry(&xas
, entry
);
1893 if (ret
== VM_FAULT_FALLBACK
) {
1894 split_huge_pmd(vmf
->vma
, vmf
->pmd
, vmf
->address
);
1895 count_vm_event(THP_FAULT_FALLBACK
);
1898 trace_dax_pmd_fault_done(iter
.inode
, vmf
, max_pgoff
, ret
);
1902 static vm_fault_t
dax_iomap_pmd_fault(struct vm_fault
*vmf
, pfn_t
*pfnp
,
1903 const struct iomap_ops
*ops
)
1905 return VM_FAULT_FALLBACK
;
1907 #endif /* CONFIG_FS_DAX_PMD */
1910 * dax_iomap_fault - handle a page fault on a DAX file
1911 * @vmf: The description of the fault
1912 * @order: Order of the page to fault in
1913 * @pfnp: PFN to insert for synchronous faults if fsync is required
1914 * @iomap_errp: Storage for detailed error code in case of error
1915 * @ops: Iomap ops passed from the file system
1917 * When a page fault occurs, filesystems may call this helper in
1918 * their fault handler for DAX files. dax_iomap_fault() assumes the caller
1919 * has done all the necessary locking for page fault to proceed
1922 vm_fault_t
dax_iomap_fault(struct vm_fault
*vmf
, unsigned int order
,
1923 pfn_t
*pfnp
, int *iomap_errp
, const struct iomap_ops
*ops
)
1926 return dax_iomap_pte_fault(vmf
, pfnp
, iomap_errp
, ops
);
1927 else if (order
== PMD_ORDER
)
1928 return dax_iomap_pmd_fault(vmf
, pfnp
, ops
);
1930 return VM_FAULT_FALLBACK
;
1932 EXPORT_SYMBOL_GPL(dax_iomap_fault
);
1935 * dax_insert_pfn_mkwrite - insert PTE or PMD entry into page tables
1936 * @vmf: The description of the fault
1937 * @pfn: PFN to insert
1938 * @order: Order of entry to insert.
1940 * This function inserts a writeable PTE or PMD entry into the page tables
1941 * for an mmaped DAX file. It also marks the page cache entry as dirty.
1944 dax_insert_pfn_mkwrite(struct vm_fault
*vmf
, pfn_t pfn
, unsigned int order
)
1946 struct address_space
*mapping
= vmf
->vma
->vm_file
->f_mapping
;
1947 XA_STATE_ORDER(xas
, &mapping
->i_pages
, vmf
->pgoff
, order
);
1952 entry
= get_unlocked_entry(&xas
, order
);
1953 /* Did we race with someone splitting entry or so? */
1954 if (!entry
|| dax_is_conflict(entry
) ||
1955 (order
== 0 && !dax_is_pte_entry(entry
))) {
1956 put_unlocked_entry(&xas
, entry
, WAKE_NEXT
);
1957 xas_unlock_irq(&xas
);
1958 trace_dax_insert_pfn_mkwrite_no_entry(mapping
->host
, vmf
,
1960 return VM_FAULT_NOPAGE
;
1962 xas_set_mark(&xas
, PAGECACHE_TAG_DIRTY
);
1963 dax_lock_entry(&xas
, entry
);
1964 xas_unlock_irq(&xas
);
1966 ret
= vmf_insert_mixed_mkwrite(vmf
->vma
, vmf
->address
, pfn
);
1967 #ifdef CONFIG_FS_DAX_PMD
1968 else if (order
== PMD_ORDER
)
1969 ret
= vmf_insert_pfn_pmd(vmf
, pfn
, FAULT_FLAG_WRITE
);
1972 ret
= VM_FAULT_FALLBACK
;
1973 dax_unlock_entry(&xas
, entry
);
1974 trace_dax_insert_pfn_mkwrite(mapping
->host
, vmf
, ret
);
1979 * dax_finish_sync_fault - finish synchronous page fault
1980 * @vmf: The description of the fault
1981 * @order: Order of entry to be inserted
1982 * @pfn: PFN to insert
1984 * This function ensures that the file range touched by the page fault is
1985 * stored persistently on the media and handles inserting of appropriate page
1988 vm_fault_t
dax_finish_sync_fault(struct vm_fault
*vmf
, unsigned int order
,
1992 loff_t start
= ((loff_t
)vmf
->pgoff
) << PAGE_SHIFT
;
1993 size_t len
= PAGE_SIZE
<< order
;
1995 err
= vfs_fsync_range(vmf
->vma
->vm_file
, start
, start
+ len
- 1, 1);
1997 return VM_FAULT_SIGBUS
;
1998 return dax_insert_pfn_mkwrite(vmf
, pfn
, order
);
2000 EXPORT_SYMBOL_GPL(dax_finish_sync_fault
);
2002 static loff_t
dax_range_compare_iter(struct iomap_iter
*it_src
,
2003 struct iomap_iter
*it_dest
, u64 len
, bool *same
)
2005 const struct iomap
*smap
= &it_src
->iomap
;
2006 const struct iomap
*dmap
= &it_dest
->iomap
;
2007 loff_t pos1
= it_src
->pos
, pos2
= it_dest
->pos
;
2008 void *saddr
, *daddr
;
2011 len
= min(len
, min(smap
->length
, dmap
->length
));
2013 if (smap
->type
== IOMAP_HOLE
&& dmap
->type
== IOMAP_HOLE
) {
2018 if (smap
->type
== IOMAP_HOLE
|| dmap
->type
== IOMAP_HOLE
) {
2023 id
= dax_read_lock();
2024 ret
= dax_iomap_direct_access(smap
, pos1
, ALIGN(pos1
+ len
, PAGE_SIZE
),
2029 ret
= dax_iomap_direct_access(dmap
, pos2
, ALIGN(pos2
+ len
, PAGE_SIZE
),
2034 *same
= !memcmp(saddr
, daddr
, len
);
2037 dax_read_unlock(id
);
2041 dax_read_unlock(id
);
2045 int dax_dedupe_file_range_compare(struct inode
*src
, loff_t srcoff
,
2046 struct inode
*dst
, loff_t dstoff
, loff_t len
, bool *same
,
2047 const struct iomap_ops
*ops
)
2049 struct iomap_iter src_iter
= {
2055 struct iomap_iter dst_iter
= {
2061 int ret
, compared
= 0;
2063 while ((ret
= iomap_iter(&src_iter
, ops
)) > 0 &&
2064 (ret
= iomap_iter(&dst_iter
, ops
)) > 0) {
2065 compared
= dax_range_compare_iter(&src_iter
, &dst_iter
,
2066 min(src_iter
.len
, dst_iter
.len
), same
);
2069 src_iter
.processed
= dst_iter
.processed
= compared
;
2074 int dax_remap_file_range_prep(struct file
*file_in
, loff_t pos_in
,
2075 struct file
*file_out
, loff_t pos_out
,
2076 loff_t
*len
, unsigned int remap_flags
,
2077 const struct iomap_ops
*ops
)
2079 return __generic_remap_file_range_prep(file_in
, pos_in
, file_out
,
2080 pos_out
, len
, remap_flags
, ops
);
2082 EXPORT_SYMBOL_GPL(dax_remap_file_range_prep
);