Linux 4.19.133
[linux/fpc-iii.git] / fs / dax.c
blobd09701aa6f2f451b623c5a7e9f74b94ad3397c1b
1 /*
2 * fs/dax.c - Direct Access filesystem code
3 * Copyright (c) 2013-2014 Intel Corporation
4 * Author: Matthew Wilcox <matthew.r.wilcox@intel.com>
5 * Author: Ross Zwisler <ross.zwisler@linux.intel.com>
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
17 #include <linux/atomic.h>
18 #include <linux/blkdev.h>
19 #include <linux/buffer_head.h>
20 #include <linux/dax.h>
21 #include <linux/fs.h>
22 #include <linux/genhd.h>
23 #include <linux/highmem.h>
24 #include <linux/memcontrol.h>
25 #include <linux/mm.h>
26 #include <linux/mutex.h>
27 #include <linux/pagevec.h>
28 #include <linux/sched.h>
29 #include <linux/sched/signal.h>
30 #include <linux/uio.h>
31 #include <linux/vmstat.h>
32 #include <linux/pfn_t.h>
33 #include <linux/sizes.h>
34 #include <linux/mmu_notifier.h>
35 #include <linux/iomap.h>
36 #include "internal.h"
38 #define CREATE_TRACE_POINTS
39 #include <trace/events/fs_dax.h>
41 /* We choose 4096 entries - same as per-zone page wait tables */
42 #define DAX_WAIT_TABLE_BITS 12
43 #define DAX_WAIT_TABLE_ENTRIES (1 << DAX_WAIT_TABLE_BITS)
45 /* The 'colour' (ie low bits) within a PMD of a page offset. */
46 #define PG_PMD_COLOUR ((PMD_SIZE >> PAGE_SHIFT) - 1)
47 #define PG_PMD_NR (PMD_SIZE >> PAGE_SHIFT)
49 static wait_queue_head_t wait_table[DAX_WAIT_TABLE_ENTRIES];
51 static int __init init_dax_wait_table(void)
53 int i;
55 for (i = 0; i < DAX_WAIT_TABLE_ENTRIES; i++)
56 init_waitqueue_head(wait_table + i);
57 return 0;
59 fs_initcall(init_dax_wait_table);
62 * We use lowest available bit in exceptional entry for locking, one bit for
63 * the entry size (PMD) and two more to tell us if the entry is a zero page or
64 * an empty entry that is just used for locking. In total four special bits.
66 * If the PMD bit isn't set the entry has size PAGE_SIZE, and if the ZERO_PAGE
67 * and EMPTY bits aren't set the entry is a normal DAX entry with a filesystem
68 * block allocation.
70 #define RADIX_DAX_SHIFT (RADIX_TREE_EXCEPTIONAL_SHIFT + 4)
71 #define RADIX_DAX_ENTRY_LOCK (1 << RADIX_TREE_EXCEPTIONAL_SHIFT)
72 #define RADIX_DAX_PMD (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 1))
73 #define RADIX_DAX_ZERO_PAGE (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 2))
74 #define RADIX_DAX_EMPTY (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 3))
76 static unsigned long dax_radix_pfn(void *entry)
78 return (unsigned long)entry >> RADIX_DAX_SHIFT;
81 static void *dax_radix_locked_entry(unsigned long pfn, unsigned long flags)
83 return (void *)(RADIX_TREE_EXCEPTIONAL_ENTRY | flags |
84 (pfn << RADIX_DAX_SHIFT) | RADIX_DAX_ENTRY_LOCK);
87 static unsigned int dax_radix_order(void *entry)
89 if ((unsigned long)entry & RADIX_DAX_PMD)
90 return PMD_SHIFT - PAGE_SHIFT;
91 return 0;
94 static int dax_is_pmd_entry(void *entry)
96 return (unsigned long)entry & RADIX_DAX_PMD;
99 static int dax_is_pte_entry(void *entry)
101 return !((unsigned long)entry & RADIX_DAX_PMD);
104 static int dax_is_zero_entry(void *entry)
106 return (unsigned long)entry & RADIX_DAX_ZERO_PAGE;
109 static int dax_is_empty_entry(void *entry)
111 return (unsigned long)entry & RADIX_DAX_EMPTY;
115 * DAX radix tree locking
117 struct exceptional_entry_key {
118 struct address_space *mapping;
119 pgoff_t entry_start;
122 struct wait_exceptional_entry_queue {
123 wait_queue_entry_t wait;
124 struct exceptional_entry_key key;
127 static wait_queue_head_t *dax_entry_waitqueue(struct address_space *mapping,
128 pgoff_t index, void *entry, struct exceptional_entry_key *key)
130 unsigned long hash;
133 * If 'entry' is a PMD, align the 'index' that we use for the wait
134 * queue to the start of that PMD. This ensures that all offsets in
135 * the range covered by the PMD map to the same bit lock.
137 if (dax_is_pmd_entry(entry))
138 index &= ~PG_PMD_COLOUR;
140 key->mapping = mapping;
141 key->entry_start = index;
143 hash = hash_long((unsigned long)mapping ^ index, DAX_WAIT_TABLE_BITS);
144 return wait_table + hash;
147 static int wake_exceptional_entry_func(wait_queue_entry_t *wait, unsigned int mode,
148 int sync, void *keyp)
150 struct exceptional_entry_key *key = keyp;
151 struct wait_exceptional_entry_queue *ewait =
152 container_of(wait, struct wait_exceptional_entry_queue, wait);
154 if (key->mapping != ewait->key.mapping ||
155 key->entry_start != ewait->key.entry_start)
156 return 0;
157 return autoremove_wake_function(wait, mode, sync, NULL);
161 * @entry may no longer be the entry at the index in the mapping.
162 * The important information it's conveying is whether the entry at
163 * this index used to be a PMD entry.
165 static void dax_wake_mapping_entry_waiter(struct address_space *mapping,
166 pgoff_t index, void *entry, bool wake_all)
168 struct exceptional_entry_key key;
169 wait_queue_head_t *wq;
171 wq = dax_entry_waitqueue(mapping, index, entry, &key);
174 * Checking for locked entry and prepare_to_wait_exclusive() happens
175 * under the i_pages lock, ditto for entry handling in our callers.
176 * So at this point all tasks that could have seen our entry locked
177 * must be in the waitqueue and the following check will see them.
179 if (waitqueue_active(wq))
180 __wake_up(wq, TASK_NORMAL, wake_all ? 0 : 1, &key);
184 * Check whether the given slot is locked. Must be called with the i_pages
185 * lock held.
187 static inline int slot_locked(struct address_space *mapping, void **slot)
189 unsigned long entry = (unsigned long)
190 radix_tree_deref_slot_protected(slot, &mapping->i_pages.xa_lock);
191 return entry & RADIX_DAX_ENTRY_LOCK;
195 * Mark the given slot as locked. Must be called with the i_pages lock held.
197 static inline void *lock_slot(struct address_space *mapping, void **slot)
199 unsigned long entry = (unsigned long)
200 radix_tree_deref_slot_protected(slot, &mapping->i_pages.xa_lock);
202 entry |= RADIX_DAX_ENTRY_LOCK;
203 radix_tree_replace_slot(&mapping->i_pages, slot, (void *)entry);
204 return (void *)entry;
208 * Mark the given slot as unlocked. Must be called with the i_pages lock held.
210 static inline void *unlock_slot(struct address_space *mapping, void **slot)
212 unsigned long entry = (unsigned long)
213 radix_tree_deref_slot_protected(slot, &mapping->i_pages.xa_lock);
215 entry &= ~(unsigned long)RADIX_DAX_ENTRY_LOCK;
216 radix_tree_replace_slot(&mapping->i_pages, slot, (void *)entry);
217 return (void *)entry;
220 static void put_unlocked_mapping_entry(struct address_space *mapping,
221 pgoff_t index, void *entry);
224 * Lookup entry in radix tree, wait for it to become unlocked if it is
225 * exceptional entry and return it. The caller must call
226 * put_unlocked_mapping_entry() when he decided not to lock the entry or
227 * put_locked_mapping_entry() when he locked the entry and now wants to
228 * unlock it.
230 * Must be called with the i_pages lock held.
232 static void *get_unlocked_mapping_entry(struct address_space *mapping,
233 pgoff_t index, void ***slotp)
235 void *entry, **slot;
236 struct wait_exceptional_entry_queue ewait;
237 wait_queue_head_t *wq;
239 init_wait(&ewait.wait);
240 ewait.wait.func = wake_exceptional_entry_func;
242 for (;;) {
243 entry = __radix_tree_lookup(&mapping->i_pages, index, NULL,
244 &slot);
245 if (!entry ||
246 WARN_ON_ONCE(!radix_tree_exceptional_entry(entry)) ||
247 !slot_locked(mapping, slot)) {
248 if (slotp)
249 *slotp = slot;
250 return entry;
253 wq = dax_entry_waitqueue(mapping, index, entry, &ewait.key);
254 prepare_to_wait_exclusive(wq, &ewait.wait,
255 TASK_UNINTERRUPTIBLE);
256 xa_unlock_irq(&mapping->i_pages);
257 schedule();
258 finish_wait(wq, &ewait.wait);
259 xa_lock_irq(&mapping->i_pages);
264 * The only thing keeping the address space around is the i_pages lock
265 * (it's cycled in clear_inode() after removing the entries from i_pages)
266 * After we call xas_unlock_irq(), we cannot touch xas->xa.
268 static void wait_entry_unlocked(struct address_space *mapping, pgoff_t index,
269 void ***slotp, void *entry)
271 struct wait_exceptional_entry_queue ewait;
272 wait_queue_head_t *wq;
274 init_wait(&ewait.wait);
275 ewait.wait.func = wake_exceptional_entry_func;
277 wq = dax_entry_waitqueue(mapping, index, entry, &ewait.key);
279 * Unlike get_unlocked_entry() there is no guarantee that this
280 * path ever successfully retrieves an unlocked entry before an
281 * inode dies. Perform a non-exclusive wait in case this path
282 * never successfully performs its own wake up.
284 prepare_to_wait(wq, &ewait.wait, TASK_UNINTERRUPTIBLE);
285 xa_unlock_irq(&mapping->i_pages);
286 schedule();
287 finish_wait(wq, &ewait.wait);
290 static void unlock_mapping_entry(struct address_space *mapping, pgoff_t index)
292 void *entry, **slot;
294 xa_lock_irq(&mapping->i_pages);
295 entry = __radix_tree_lookup(&mapping->i_pages, index, NULL, &slot);
296 if (WARN_ON_ONCE(!entry || !radix_tree_exceptional_entry(entry) ||
297 !slot_locked(mapping, slot))) {
298 xa_unlock_irq(&mapping->i_pages);
299 return;
301 unlock_slot(mapping, slot);
302 xa_unlock_irq(&mapping->i_pages);
303 dax_wake_mapping_entry_waiter(mapping, index, entry, false);
306 static void put_locked_mapping_entry(struct address_space *mapping,
307 pgoff_t index)
309 unlock_mapping_entry(mapping, index);
313 * Called when we are done with radix tree entry we looked up via
314 * get_unlocked_mapping_entry() and which we didn't lock in the end.
316 static void put_unlocked_mapping_entry(struct address_space *mapping,
317 pgoff_t index, void *entry)
319 if (!entry)
320 return;
322 /* We have to wake up next waiter for the radix tree entry lock */
323 dax_wake_mapping_entry_waiter(mapping, index, entry, false);
326 static unsigned long dax_entry_size(void *entry)
328 if (dax_is_zero_entry(entry))
329 return 0;
330 else if (dax_is_empty_entry(entry))
331 return 0;
332 else if (dax_is_pmd_entry(entry))
333 return PMD_SIZE;
334 else
335 return PAGE_SIZE;
338 static unsigned long dax_radix_end_pfn(void *entry)
340 return dax_radix_pfn(entry) + dax_entry_size(entry) / PAGE_SIZE;
344 * Iterate through all mapped pfns represented by an entry, i.e. skip
345 * 'empty' and 'zero' entries.
347 #define for_each_mapped_pfn(entry, pfn) \
348 for (pfn = dax_radix_pfn(entry); \
349 pfn < dax_radix_end_pfn(entry); pfn++)
352 * TODO: for reflink+dax we need a way to associate a single page with
353 * multiple address_space instances at different linear_page_index()
354 * offsets.
356 static void dax_associate_entry(void *entry, struct address_space *mapping,
357 struct vm_area_struct *vma, unsigned long address)
359 unsigned long size = dax_entry_size(entry), pfn, index;
360 int i = 0;
362 if (IS_ENABLED(CONFIG_FS_DAX_LIMITED))
363 return;
365 index = linear_page_index(vma, address & ~(size - 1));
366 for_each_mapped_pfn(entry, pfn) {
367 struct page *page = pfn_to_page(pfn);
369 WARN_ON_ONCE(page->mapping);
370 page->mapping = mapping;
371 page->index = index + i++;
375 static void dax_disassociate_entry(void *entry, struct address_space *mapping,
376 bool trunc)
378 unsigned long pfn;
380 if (IS_ENABLED(CONFIG_FS_DAX_LIMITED))
381 return;
383 for_each_mapped_pfn(entry, pfn) {
384 struct page *page = pfn_to_page(pfn);
386 WARN_ON_ONCE(trunc && page_ref_count(page) > 1);
387 WARN_ON_ONCE(page->mapping && page->mapping != mapping);
388 page->mapping = NULL;
389 page->index = 0;
393 static struct page *dax_busy_page(void *entry)
395 unsigned long pfn;
397 for_each_mapped_pfn(entry, pfn) {
398 struct page *page = pfn_to_page(pfn);
400 if (page_ref_count(page) > 1)
401 return page;
403 return NULL;
406 bool dax_lock_mapping_entry(struct page *page)
408 pgoff_t index;
409 struct inode *inode;
410 bool did_lock = false;
411 void *entry = NULL, **slot;
412 struct address_space *mapping;
414 rcu_read_lock();
415 for (;;) {
416 mapping = READ_ONCE(page->mapping);
418 if (!mapping || !dax_mapping(mapping))
419 break;
422 * In the device-dax case there's no need to lock, a
423 * struct dev_pagemap pin is sufficient to keep the
424 * inode alive, and we assume we have dev_pagemap pin
425 * otherwise we would not have a valid pfn_to_page()
426 * translation.
428 inode = mapping->host;
429 if (S_ISCHR(inode->i_mode)) {
430 did_lock = true;
431 break;
434 xa_lock_irq(&mapping->i_pages);
435 if (mapping != page->mapping) {
436 xa_unlock_irq(&mapping->i_pages);
437 continue;
439 index = page->index;
441 entry = __radix_tree_lookup(&mapping->i_pages, index,
442 NULL, &slot);
443 if (!entry) {
444 xa_unlock_irq(&mapping->i_pages);
445 break;
446 } else if (slot_locked(mapping, slot)) {
447 rcu_read_unlock();
448 wait_entry_unlocked(mapping, index, &slot, entry);
449 rcu_read_lock();
450 continue;
452 lock_slot(mapping, slot);
453 did_lock = true;
454 xa_unlock_irq(&mapping->i_pages);
455 break;
457 rcu_read_unlock();
459 return did_lock;
462 void dax_unlock_mapping_entry(struct page *page)
464 struct address_space *mapping = page->mapping;
465 struct inode *inode = mapping->host;
467 if (S_ISCHR(inode->i_mode))
468 return;
470 unlock_mapping_entry(mapping, page->index);
474 * Find radix tree entry at given index. If it points to an exceptional entry,
475 * return it with the radix tree entry locked. If the radix tree doesn't
476 * contain given index, create an empty exceptional entry for the index and
477 * return with it locked.
479 * When requesting an entry with size RADIX_DAX_PMD, grab_mapping_entry() will
480 * either return that locked entry or will return an error. This error will
481 * happen if there are any 4k entries within the 2MiB range that we are
482 * requesting.
484 * We always favor 4k entries over 2MiB entries. There isn't a flow where we
485 * evict 4k entries in order to 'upgrade' them to a 2MiB entry. A 2MiB
486 * insertion will fail if it finds any 4k entries already in the tree, and a
487 * 4k insertion will cause an existing 2MiB entry to be unmapped and
488 * downgraded to 4k entries. This happens for both 2MiB huge zero pages as
489 * well as 2MiB empty entries.
491 * The exception to this downgrade path is for 2MiB DAX PMD entries that have
492 * real storage backing them. We will leave these real 2MiB DAX entries in
493 * the tree, and PTE writes will simply dirty the entire 2MiB DAX entry.
495 * Note: Unlike filemap_fault() we don't honor FAULT_FLAG_RETRY flags. For
496 * persistent memory the benefit is doubtful. We can add that later if we can
497 * show it helps.
499 static void *grab_mapping_entry(struct address_space *mapping, pgoff_t index,
500 unsigned long size_flag)
502 bool pmd_downgrade = false; /* splitting 2MiB entry into 4k entries? */
503 void *entry, **slot;
505 restart:
506 xa_lock_irq(&mapping->i_pages);
507 entry = get_unlocked_mapping_entry(mapping, index, &slot);
509 if (WARN_ON_ONCE(entry && !radix_tree_exceptional_entry(entry))) {
510 entry = ERR_PTR(-EIO);
511 goto out_unlock;
514 if (entry) {
515 if (size_flag & RADIX_DAX_PMD) {
516 if (dax_is_pte_entry(entry)) {
517 put_unlocked_mapping_entry(mapping, index,
518 entry);
519 entry = ERR_PTR(-EEXIST);
520 goto out_unlock;
522 } else { /* trying to grab a PTE entry */
523 if (dax_is_pmd_entry(entry) &&
524 (dax_is_zero_entry(entry) ||
525 dax_is_empty_entry(entry))) {
526 pmd_downgrade = true;
531 /* No entry for given index? Make sure radix tree is big enough. */
532 if (!entry || pmd_downgrade) {
533 int err;
535 if (pmd_downgrade) {
537 * Make sure 'entry' remains valid while we drop
538 * the i_pages lock.
540 entry = lock_slot(mapping, slot);
543 xa_unlock_irq(&mapping->i_pages);
545 * Besides huge zero pages the only other thing that gets
546 * downgraded are empty entries which don't need to be
547 * unmapped.
549 if (pmd_downgrade && dax_is_zero_entry(entry))
550 unmap_mapping_pages(mapping, index & ~PG_PMD_COLOUR,
551 PG_PMD_NR, false);
553 err = radix_tree_preload(
554 mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM);
555 if (err) {
556 if (pmd_downgrade)
557 put_locked_mapping_entry(mapping, index);
558 return ERR_PTR(err);
560 xa_lock_irq(&mapping->i_pages);
562 if (!entry) {
564 * We needed to drop the i_pages lock while calling
565 * radix_tree_preload() and we didn't have an entry to
566 * lock. See if another thread inserted an entry at
567 * our index during this time.
569 entry = __radix_tree_lookup(&mapping->i_pages, index,
570 NULL, &slot);
571 if (entry) {
572 radix_tree_preload_end();
573 xa_unlock_irq(&mapping->i_pages);
574 goto restart;
578 if (pmd_downgrade) {
579 dax_disassociate_entry(entry, mapping, false);
580 radix_tree_delete(&mapping->i_pages, index);
581 mapping->nrexceptional--;
582 dax_wake_mapping_entry_waiter(mapping, index, entry,
583 true);
586 entry = dax_radix_locked_entry(0, size_flag | RADIX_DAX_EMPTY);
588 err = __radix_tree_insert(&mapping->i_pages, index,
589 dax_radix_order(entry), entry);
590 radix_tree_preload_end();
591 if (err) {
592 xa_unlock_irq(&mapping->i_pages);
594 * Our insertion of a DAX entry failed, most likely
595 * because we were inserting a PMD entry and it
596 * collided with a PTE sized entry at a different
597 * index in the PMD range. We haven't inserted
598 * anything into the radix tree and have no waiters to
599 * wake.
601 return ERR_PTR(err);
603 /* Good, we have inserted empty locked entry into the tree. */
604 mapping->nrexceptional++;
605 xa_unlock_irq(&mapping->i_pages);
606 return entry;
608 entry = lock_slot(mapping, slot);
609 out_unlock:
610 xa_unlock_irq(&mapping->i_pages);
611 return entry;
615 * dax_layout_busy_page - find first pinned page in @mapping
616 * @mapping: address space to scan for a page with ref count > 1
618 * DAX requires ZONE_DEVICE mapped pages. These pages are never
619 * 'onlined' to the page allocator so they are considered idle when
620 * page->count == 1. A filesystem uses this interface to determine if
621 * any page in the mapping is busy, i.e. for DMA, or other
622 * get_user_pages() usages.
624 * It is expected that the filesystem is holding locks to block the
625 * establishment of new mappings in this address_space. I.e. it expects
626 * to be able to run unmap_mapping_range() and subsequently not race
627 * mapping_mapped() becoming true.
629 struct page *dax_layout_busy_page(struct address_space *mapping)
631 pgoff_t indices[PAGEVEC_SIZE];
632 struct page *page = NULL;
633 struct pagevec pvec;
634 pgoff_t index, end;
635 unsigned i;
638 * In the 'limited' case get_user_pages() for dax is disabled.
640 if (IS_ENABLED(CONFIG_FS_DAX_LIMITED))
641 return NULL;
643 if (!dax_mapping(mapping) || !mapping_mapped(mapping))
644 return NULL;
646 pagevec_init(&pvec);
647 index = 0;
648 end = -1;
651 * If we race get_user_pages_fast() here either we'll see the
652 * elevated page count in the pagevec_lookup and wait, or
653 * get_user_pages_fast() will see that the page it took a reference
654 * against is no longer mapped in the page tables and bail to the
655 * get_user_pages() slow path. The slow path is protected by
656 * pte_lock() and pmd_lock(). New references are not taken without
657 * holding those locks, and unmap_mapping_range() will not zero the
658 * pte or pmd without holding the respective lock, so we are
659 * guaranteed to either see new references or prevent new
660 * references from being established.
662 unmap_mapping_range(mapping, 0, 0, 0);
664 while (index < end && pagevec_lookup_entries(&pvec, mapping, index,
665 min(end - index, (pgoff_t)PAGEVEC_SIZE),
666 indices)) {
667 pgoff_t nr_pages = 1;
669 for (i = 0; i < pagevec_count(&pvec); i++) {
670 struct page *pvec_ent = pvec.pages[i];
671 void *entry;
673 index = indices[i];
674 if (index >= end)
675 break;
677 if (WARN_ON_ONCE(
678 !radix_tree_exceptional_entry(pvec_ent)))
679 continue;
681 xa_lock_irq(&mapping->i_pages);
682 entry = get_unlocked_mapping_entry(mapping, index, NULL);
683 if (entry) {
684 page = dax_busy_page(entry);
686 * Account for multi-order entries at
687 * the end of the pagevec.
689 if (i + 1 >= pagevec_count(&pvec))
690 nr_pages = 1UL << dax_radix_order(entry);
692 put_unlocked_mapping_entry(mapping, index, entry);
693 xa_unlock_irq(&mapping->i_pages);
694 if (page)
695 break;
699 * We don't expect normal struct page entries to exist in our
700 * tree, but we keep these pagevec calls so that this code is
701 * consistent with the common pattern for handling pagevecs
702 * throughout the kernel.
704 pagevec_remove_exceptionals(&pvec);
705 pagevec_release(&pvec);
706 index += nr_pages;
708 if (page)
709 break;
711 return page;
713 EXPORT_SYMBOL_GPL(dax_layout_busy_page);
715 static int __dax_invalidate_mapping_entry(struct address_space *mapping,
716 pgoff_t index, bool trunc)
718 int ret = 0;
719 void *entry;
720 struct radix_tree_root *pages = &mapping->i_pages;
722 xa_lock_irq(pages);
723 entry = get_unlocked_mapping_entry(mapping, index, NULL);
724 if (!entry || WARN_ON_ONCE(!radix_tree_exceptional_entry(entry)))
725 goto out;
726 if (!trunc &&
727 (radix_tree_tag_get(pages, index, PAGECACHE_TAG_DIRTY) ||
728 radix_tree_tag_get(pages, index, PAGECACHE_TAG_TOWRITE)))
729 goto out;
730 dax_disassociate_entry(entry, mapping, trunc);
731 radix_tree_delete(pages, index);
732 mapping->nrexceptional--;
733 ret = 1;
734 out:
735 put_unlocked_mapping_entry(mapping, index, entry);
736 xa_unlock_irq(pages);
737 return ret;
740 * Delete exceptional DAX entry at @index from @mapping. Wait for radix tree
741 * entry to get unlocked before deleting it.
743 int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index)
745 int ret = __dax_invalidate_mapping_entry(mapping, index, true);
748 * This gets called from truncate / punch_hole path. As such, the caller
749 * must hold locks protecting against concurrent modifications of the
750 * radix tree (usually fs-private i_mmap_sem for writing). Since the
751 * caller has seen exceptional entry for this index, we better find it
752 * at that index as well...
754 WARN_ON_ONCE(!ret);
755 return ret;
759 * Invalidate exceptional DAX entry if it is clean.
761 int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
762 pgoff_t index)
764 return __dax_invalidate_mapping_entry(mapping, index, false);
767 static int copy_user_dax(struct block_device *bdev, struct dax_device *dax_dev,
768 sector_t sector, size_t size, struct page *to,
769 unsigned long vaddr)
771 void *vto, *kaddr;
772 pgoff_t pgoff;
773 long rc;
774 int id;
776 rc = bdev_dax_pgoff(bdev, sector, size, &pgoff);
777 if (rc)
778 return rc;
780 id = dax_read_lock();
781 rc = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), &kaddr, NULL);
782 if (rc < 0) {
783 dax_read_unlock(id);
784 return rc;
786 vto = kmap_atomic(to);
787 copy_user_page(vto, (void __force *)kaddr, vaddr, to);
788 kunmap_atomic(vto);
789 dax_read_unlock(id);
790 return 0;
794 * By this point grab_mapping_entry() has ensured that we have a locked entry
795 * of the appropriate size so we don't have to worry about downgrading PMDs to
796 * PTEs. If we happen to be trying to insert a PTE and there is a PMD
797 * already in the tree, we will skip the insertion and just dirty the PMD as
798 * appropriate.
800 static void *dax_insert_mapping_entry(struct address_space *mapping,
801 struct vm_fault *vmf,
802 void *entry, pfn_t pfn_t,
803 unsigned long flags, bool dirty)
805 struct radix_tree_root *pages = &mapping->i_pages;
806 unsigned long pfn = pfn_t_to_pfn(pfn_t);
807 pgoff_t index = vmf->pgoff;
808 void *new_entry;
810 if (dirty)
811 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
813 if (dax_is_zero_entry(entry) && !(flags & RADIX_DAX_ZERO_PAGE)) {
814 /* we are replacing a zero page with block mapping */
815 if (dax_is_pmd_entry(entry))
816 unmap_mapping_pages(mapping, index & ~PG_PMD_COLOUR,
817 PG_PMD_NR, false);
818 else /* pte entry */
819 unmap_mapping_pages(mapping, vmf->pgoff, 1, false);
822 xa_lock_irq(pages);
823 new_entry = dax_radix_locked_entry(pfn, flags);
824 if (dax_entry_size(entry) != dax_entry_size(new_entry)) {
825 dax_disassociate_entry(entry, mapping, false);
826 dax_associate_entry(new_entry, mapping, vmf->vma, vmf->address);
829 if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) {
831 * Only swap our new entry into the radix tree if the current
832 * entry is a zero page or an empty entry. If a normal PTE or
833 * PMD entry is already in the tree, we leave it alone. This
834 * means that if we are trying to insert a PTE and the
835 * existing entry is a PMD, we will just leave the PMD in the
836 * tree and dirty it if necessary.
838 struct radix_tree_node *node;
839 void **slot;
840 void *ret;
842 ret = __radix_tree_lookup(pages, index, &node, &slot);
843 WARN_ON_ONCE(ret != entry);
844 __radix_tree_replace(pages, node, slot,
845 new_entry, NULL);
846 entry = new_entry;
849 if (dirty)
850 radix_tree_tag_set(pages, index, PAGECACHE_TAG_DIRTY);
852 xa_unlock_irq(pages);
853 return entry;
856 static inline unsigned long
857 pgoff_address(pgoff_t pgoff, struct vm_area_struct *vma)
859 unsigned long address;
861 address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
862 VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma);
863 return address;
866 /* Walk all mappings of a given index of a file and writeprotect them */
867 static void dax_mapping_entry_mkclean(struct address_space *mapping,
868 pgoff_t index, unsigned long pfn)
870 struct vm_area_struct *vma;
871 pte_t pte, *ptep = NULL;
872 pmd_t *pmdp = NULL;
873 spinlock_t *ptl;
875 i_mmap_lock_read(mapping);
876 vma_interval_tree_foreach(vma, &mapping->i_mmap, index, index) {
877 unsigned long address, start, end;
879 cond_resched();
881 if (!(vma->vm_flags & VM_SHARED))
882 continue;
884 address = pgoff_address(index, vma);
887 * Note because we provide start/end to follow_pte_pmd it will
888 * call mmu_notifier_invalidate_range_start() on our behalf
889 * before taking any lock.
891 if (follow_pte_pmd(vma->vm_mm, address, &start, &end, &ptep, &pmdp, &ptl))
892 continue;
895 * No need to call mmu_notifier_invalidate_range() as we are
896 * downgrading page table protection not changing it to point
897 * to a new page.
899 * See Documentation/vm/mmu_notifier.rst
901 if (pmdp) {
902 #ifdef CONFIG_FS_DAX_PMD
903 pmd_t pmd;
905 if (pfn != pmd_pfn(*pmdp))
906 goto unlock_pmd;
907 if (!pmd_dirty(*pmdp) && !pmd_write(*pmdp))
908 goto unlock_pmd;
910 flush_cache_page(vma, address, pfn);
911 pmd = pmdp_invalidate(vma, address, pmdp);
912 pmd = pmd_wrprotect(pmd);
913 pmd = pmd_mkclean(pmd);
914 set_pmd_at(vma->vm_mm, address, pmdp, pmd);
915 unlock_pmd:
916 #endif
917 spin_unlock(ptl);
918 } else {
919 if (pfn != pte_pfn(*ptep))
920 goto unlock_pte;
921 if (!pte_dirty(*ptep) && !pte_write(*ptep))
922 goto unlock_pte;
924 flush_cache_page(vma, address, pfn);
925 pte = ptep_clear_flush(vma, address, ptep);
926 pte = pte_wrprotect(pte);
927 pte = pte_mkclean(pte);
928 set_pte_at(vma->vm_mm, address, ptep, pte);
929 unlock_pte:
930 pte_unmap_unlock(ptep, ptl);
933 mmu_notifier_invalidate_range_end(vma->vm_mm, start, end);
935 i_mmap_unlock_read(mapping);
938 static int dax_writeback_one(struct dax_device *dax_dev,
939 struct address_space *mapping, pgoff_t index, void *entry)
941 struct radix_tree_root *pages = &mapping->i_pages;
942 void *entry2, **slot;
943 unsigned long pfn;
944 long ret = 0;
945 size_t size;
948 * A page got tagged dirty in DAX mapping? Something is seriously
949 * wrong.
951 if (WARN_ON(!radix_tree_exceptional_entry(entry)))
952 return -EIO;
954 xa_lock_irq(pages);
955 entry2 = get_unlocked_mapping_entry(mapping, index, &slot);
956 /* Entry got punched out / reallocated? */
957 if (!entry2 || WARN_ON_ONCE(!radix_tree_exceptional_entry(entry2)))
958 goto put_unlocked;
960 * Entry got reallocated elsewhere? No need to writeback. We have to
961 * compare pfns as we must not bail out due to difference in lockbit
962 * or entry type.
964 if (dax_radix_pfn(entry2) != dax_radix_pfn(entry))
965 goto put_unlocked;
966 if (WARN_ON_ONCE(dax_is_empty_entry(entry) ||
967 dax_is_zero_entry(entry))) {
968 ret = -EIO;
969 goto put_unlocked;
972 /* Another fsync thread may have already written back this entry */
973 if (!radix_tree_tag_get(pages, index, PAGECACHE_TAG_TOWRITE))
974 goto put_unlocked;
975 /* Lock the entry to serialize with page faults */
976 entry = lock_slot(mapping, slot);
978 * We can clear the tag now but we have to be careful so that concurrent
979 * dax_writeback_one() calls for the same index cannot finish before we
980 * actually flush the caches. This is achieved as the calls will look
981 * at the entry only under the i_pages lock and once they do that
982 * they will see the entry locked and wait for it to unlock.
984 radix_tree_tag_clear(pages, index, PAGECACHE_TAG_TOWRITE);
985 xa_unlock_irq(pages);
988 * Even if dax_writeback_mapping_range() was given a wbc->range_start
989 * in the middle of a PMD, the 'index' we are given will be aligned to
990 * the start index of the PMD, as will the pfn we pull from 'entry'.
991 * This allows us to flush for PMD_SIZE and not have to worry about
992 * partial PMD writebacks.
994 pfn = dax_radix_pfn(entry);
995 size = PAGE_SIZE << dax_radix_order(entry);
997 dax_mapping_entry_mkclean(mapping, index, pfn);
998 dax_flush(dax_dev, page_address(pfn_to_page(pfn)), size);
1000 * After we have flushed the cache, we can clear the dirty tag. There
1001 * cannot be new dirty data in the pfn after the flush has completed as
1002 * the pfn mappings are writeprotected and fault waits for mapping
1003 * entry lock.
1005 xa_lock_irq(pages);
1006 radix_tree_tag_clear(pages, index, PAGECACHE_TAG_DIRTY);
1007 xa_unlock_irq(pages);
1008 trace_dax_writeback_one(mapping->host, index, size >> PAGE_SHIFT);
1009 put_locked_mapping_entry(mapping, index);
1010 return ret;
1012 put_unlocked:
1013 put_unlocked_mapping_entry(mapping, index, entry2);
1014 xa_unlock_irq(pages);
1015 return ret;
1019 * Flush the mapping to the persistent domain within the byte range of [start,
1020 * end]. This is required by data integrity operations to ensure file data is
1021 * on persistent storage prior to completion of the operation.
1023 int dax_writeback_mapping_range(struct address_space *mapping,
1024 struct block_device *bdev, struct writeback_control *wbc)
1026 struct inode *inode = mapping->host;
1027 pgoff_t start_index, end_index;
1028 pgoff_t indices[PAGEVEC_SIZE];
1029 struct dax_device *dax_dev;
1030 struct pagevec pvec;
1031 bool done = false;
1032 int i, ret = 0;
1034 if (WARN_ON_ONCE(inode->i_blkbits != PAGE_SHIFT))
1035 return -EIO;
1037 if (!mapping->nrexceptional || wbc->sync_mode != WB_SYNC_ALL)
1038 return 0;
1040 dax_dev = dax_get_by_host(bdev->bd_disk->disk_name);
1041 if (!dax_dev)
1042 return -EIO;
1044 start_index = wbc->range_start >> PAGE_SHIFT;
1045 end_index = wbc->range_end >> PAGE_SHIFT;
1047 trace_dax_writeback_range(inode, start_index, end_index);
1049 tag_pages_for_writeback(mapping, start_index, end_index);
1051 pagevec_init(&pvec);
1052 while (!done) {
1053 pvec.nr = find_get_entries_tag(mapping, start_index,
1054 PAGECACHE_TAG_TOWRITE, PAGEVEC_SIZE,
1055 pvec.pages, indices);
1057 if (pvec.nr == 0)
1058 break;
1060 for (i = 0; i < pvec.nr; i++) {
1061 if (indices[i] > end_index) {
1062 done = true;
1063 break;
1066 ret = dax_writeback_one(dax_dev, mapping, indices[i],
1067 pvec.pages[i]);
1068 if (ret < 0) {
1069 mapping_set_error(mapping, ret);
1070 goto out;
1073 start_index = indices[pvec.nr - 1] + 1;
1075 out:
1076 put_dax(dax_dev);
1077 trace_dax_writeback_range_done(inode, start_index, end_index);
1078 return (ret < 0 ? ret : 0);
1080 EXPORT_SYMBOL_GPL(dax_writeback_mapping_range);
1082 static sector_t dax_iomap_sector(struct iomap *iomap, loff_t pos)
1084 return (iomap->addr + (pos & PAGE_MASK) - iomap->offset) >> 9;
1087 static int dax_iomap_pfn(struct iomap *iomap, loff_t pos, size_t size,
1088 pfn_t *pfnp)
1090 const sector_t sector = dax_iomap_sector(iomap, pos);
1091 pgoff_t pgoff;
1092 int id, rc;
1093 long length;
1095 rc = bdev_dax_pgoff(iomap->bdev, sector, size, &pgoff);
1096 if (rc)
1097 return rc;
1098 id = dax_read_lock();
1099 length = dax_direct_access(iomap->dax_dev, pgoff, PHYS_PFN(size),
1100 NULL, pfnp);
1101 if (length < 0) {
1102 rc = length;
1103 goto out;
1105 rc = -EINVAL;
1106 if (PFN_PHYS(length) < size)
1107 goto out;
1108 if (pfn_t_to_pfn(*pfnp) & (PHYS_PFN(size)-1))
1109 goto out;
1110 /* For larger pages we need devmap */
1111 if (length > 1 && !pfn_t_devmap(*pfnp))
1112 goto out;
1113 rc = 0;
1114 out:
1115 dax_read_unlock(id);
1116 return rc;
1120 * The user has performed a load from a hole in the file. Allocating a new
1121 * page in the file would cause excessive storage usage for workloads with
1122 * sparse files. Instead we insert a read-only mapping of the 4k zero page.
1123 * If this page is ever written to we will re-fault and change the mapping to
1124 * point to real DAX storage instead.
1126 static vm_fault_t dax_load_hole(struct address_space *mapping, void *entry,
1127 struct vm_fault *vmf)
1129 struct inode *inode = mapping->host;
1130 unsigned long vaddr = vmf->address;
1131 pfn_t pfn = pfn_to_pfn_t(my_zero_pfn(vaddr));
1132 vm_fault_t ret;
1134 dax_insert_mapping_entry(mapping, vmf, entry, pfn, RADIX_DAX_ZERO_PAGE,
1135 false);
1136 ret = vmf_insert_mixed(vmf->vma, vaddr, pfn);
1137 trace_dax_load_hole(inode, vmf, ret);
1138 return ret;
1141 static bool dax_range_is_aligned(struct block_device *bdev,
1142 unsigned int offset, unsigned int length)
1144 unsigned short sector_size = bdev_logical_block_size(bdev);
1146 if (!IS_ALIGNED(offset, sector_size))
1147 return false;
1148 if (!IS_ALIGNED(length, sector_size))
1149 return false;
1151 return true;
1154 int __dax_zero_page_range(struct block_device *bdev,
1155 struct dax_device *dax_dev, sector_t sector,
1156 unsigned int offset, unsigned int size)
1158 if (dax_range_is_aligned(bdev, offset, size)) {
1159 sector_t start_sector = sector + (offset >> 9);
1161 return blkdev_issue_zeroout(bdev, start_sector,
1162 size >> 9, GFP_NOFS, 0);
1163 } else {
1164 pgoff_t pgoff;
1165 long rc, id;
1166 void *kaddr;
1168 rc = bdev_dax_pgoff(bdev, sector, PAGE_SIZE, &pgoff);
1169 if (rc)
1170 return rc;
1172 id = dax_read_lock();
1173 rc = dax_direct_access(dax_dev, pgoff, 1, &kaddr, NULL);
1174 if (rc < 0) {
1175 dax_read_unlock(id);
1176 return rc;
1178 memset(kaddr + offset, 0, size);
1179 dax_flush(dax_dev, kaddr + offset, size);
1180 dax_read_unlock(id);
1182 return 0;
1184 EXPORT_SYMBOL_GPL(__dax_zero_page_range);
1186 static loff_t
1187 dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
1188 struct iomap *iomap)
1190 struct block_device *bdev = iomap->bdev;
1191 struct dax_device *dax_dev = iomap->dax_dev;
1192 struct iov_iter *iter = data;
1193 loff_t end = pos + length, done = 0;
1194 ssize_t ret = 0;
1195 size_t xfer;
1196 int id;
1198 if (iov_iter_rw(iter) == READ) {
1199 end = min(end, i_size_read(inode));
1200 if (pos >= end)
1201 return 0;
1203 if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN)
1204 return iov_iter_zero(min(length, end - pos), iter);
1207 if (WARN_ON_ONCE(iomap->type != IOMAP_MAPPED))
1208 return -EIO;
1211 * Write can allocate block for an area which has a hole page mapped
1212 * into page tables. We have to tear down these mappings so that data
1213 * written by write(2) is visible in mmap.
1215 if (iomap->flags & IOMAP_F_NEW) {
1216 invalidate_inode_pages2_range(inode->i_mapping,
1217 pos >> PAGE_SHIFT,
1218 (end - 1) >> PAGE_SHIFT);
1221 id = dax_read_lock();
1222 while (pos < end) {
1223 unsigned offset = pos & (PAGE_SIZE - 1);
1224 const size_t size = ALIGN(length + offset, PAGE_SIZE);
1225 const sector_t sector = dax_iomap_sector(iomap, pos);
1226 ssize_t map_len;
1227 pgoff_t pgoff;
1228 void *kaddr;
1230 if (fatal_signal_pending(current)) {
1231 ret = -EINTR;
1232 break;
1235 ret = bdev_dax_pgoff(bdev, sector, size, &pgoff);
1236 if (ret)
1237 break;
1239 map_len = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size),
1240 &kaddr, NULL);
1241 if (map_len < 0) {
1242 ret = map_len;
1243 break;
1246 map_len = PFN_PHYS(map_len);
1247 kaddr += offset;
1248 map_len -= offset;
1249 if (map_len > end - pos)
1250 map_len = end - pos;
1253 * The userspace address for the memory copy has already been
1254 * validated via access_ok() in either vfs_read() or
1255 * vfs_write(), depending on which operation we are doing.
1257 if (iov_iter_rw(iter) == WRITE)
1258 xfer = dax_copy_from_iter(dax_dev, pgoff, kaddr,
1259 map_len, iter);
1260 else
1261 xfer = dax_copy_to_iter(dax_dev, pgoff, kaddr,
1262 map_len, iter);
1264 pos += xfer;
1265 length -= xfer;
1266 done += xfer;
1268 if (xfer == 0)
1269 ret = -EFAULT;
1270 if (xfer < map_len)
1271 break;
1273 dax_read_unlock(id);
1275 return done ? done : ret;
1279 * dax_iomap_rw - Perform I/O to a DAX file
1280 * @iocb: The control block for this I/O
1281 * @iter: The addresses to do I/O from or to
1282 * @ops: iomap ops passed from the file system
1284 * This function performs read and write operations to directly mapped
1285 * persistent memory. The callers needs to take care of read/write exclusion
1286 * and evicting any page cache pages in the region under I/O.
1288 ssize_t
1289 dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
1290 const struct iomap_ops *ops)
1292 struct address_space *mapping = iocb->ki_filp->f_mapping;
1293 struct inode *inode = mapping->host;
1294 loff_t pos = iocb->ki_pos, ret = 0, done = 0;
1295 unsigned flags = 0;
1297 if (iov_iter_rw(iter) == WRITE) {
1298 lockdep_assert_held_exclusive(&inode->i_rwsem);
1299 flags |= IOMAP_WRITE;
1300 } else {
1301 lockdep_assert_held(&inode->i_rwsem);
1304 if (iocb->ki_flags & IOCB_NOWAIT)
1305 flags |= IOMAP_NOWAIT;
1307 while (iov_iter_count(iter)) {
1308 ret = iomap_apply(inode, pos, iov_iter_count(iter), flags, ops,
1309 iter, dax_iomap_actor);
1310 if (ret <= 0)
1311 break;
1312 pos += ret;
1313 done += ret;
1316 iocb->ki_pos += done;
1317 return done ? done : ret;
1319 EXPORT_SYMBOL_GPL(dax_iomap_rw);
1321 static vm_fault_t dax_fault_return(int error)
1323 if (error == 0)
1324 return VM_FAULT_NOPAGE;
1325 if (error == -ENOMEM)
1326 return VM_FAULT_OOM;
1327 return VM_FAULT_SIGBUS;
1331 * MAP_SYNC on a dax mapping guarantees dirty metadata is
1332 * flushed on write-faults (non-cow), but not read-faults.
1334 static bool dax_fault_is_synchronous(unsigned long flags,
1335 struct vm_area_struct *vma, struct iomap *iomap)
1337 return (flags & IOMAP_WRITE) && (vma->vm_flags & VM_SYNC)
1338 && (iomap->flags & IOMAP_F_DIRTY);
1341 static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
1342 int *iomap_errp, const struct iomap_ops *ops)
1344 struct vm_area_struct *vma = vmf->vma;
1345 struct address_space *mapping = vma->vm_file->f_mapping;
1346 struct inode *inode = mapping->host;
1347 unsigned long vaddr = vmf->address;
1348 loff_t pos = (loff_t)vmf->pgoff << PAGE_SHIFT;
1349 struct iomap iomap = { 0 };
1350 unsigned flags = IOMAP_FAULT;
1351 int error, major = 0;
1352 bool write = vmf->flags & FAULT_FLAG_WRITE;
1353 bool sync;
1354 vm_fault_t ret = 0;
1355 void *entry;
1356 pfn_t pfn;
1358 trace_dax_pte_fault(inode, vmf, ret);
1360 * Check whether offset isn't beyond end of file now. Caller is supposed
1361 * to hold locks serializing us with truncate / punch hole so this is
1362 * a reliable test.
1364 if (pos >= i_size_read(inode)) {
1365 ret = VM_FAULT_SIGBUS;
1366 goto out;
1369 if (write && !vmf->cow_page)
1370 flags |= IOMAP_WRITE;
1372 entry = grab_mapping_entry(mapping, vmf->pgoff, 0);
1373 if (IS_ERR(entry)) {
1374 ret = dax_fault_return(PTR_ERR(entry));
1375 goto out;
1379 * It is possible, particularly with mixed reads & writes to private
1380 * mappings, that we have raced with a PMD fault that overlaps with
1381 * the PTE we need to set up. If so just return and the fault will be
1382 * retried.
1384 if (pmd_trans_huge(*vmf->pmd) || pmd_devmap(*vmf->pmd)) {
1385 ret = VM_FAULT_NOPAGE;
1386 goto unlock_entry;
1390 * Note that we don't bother to use iomap_apply here: DAX required
1391 * the file system block size to be equal the page size, which means
1392 * that we never have to deal with more than a single extent here.
1394 error = ops->iomap_begin(inode, pos, PAGE_SIZE, flags, &iomap);
1395 if (iomap_errp)
1396 *iomap_errp = error;
1397 if (error) {
1398 ret = dax_fault_return(error);
1399 goto unlock_entry;
1401 if (WARN_ON_ONCE(iomap.offset + iomap.length < pos + PAGE_SIZE)) {
1402 error = -EIO; /* fs corruption? */
1403 goto error_finish_iomap;
1406 if (vmf->cow_page) {
1407 sector_t sector = dax_iomap_sector(&iomap, pos);
1409 switch (iomap.type) {
1410 case IOMAP_HOLE:
1411 case IOMAP_UNWRITTEN:
1412 clear_user_highpage(vmf->cow_page, vaddr);
1413 break;
1414 case IOMAP_MAPPED:
1415 error = copy_user_dax(iomap.bdev, iomap.dax_dev,
1416 sector, PAGE_SIZE, vmf->cow_page, vaddr);
1417 break;
1418 default:
1419 WARN_ON_ONCE(1);
1420 error = -EIO;
1421 break;
1424 if (error)
1425 goto error_finish_iomap;
1427 __SetPageUptodate(vmf->cow_page);
1428 ret = finish_fault(vmf);
1429 if (!ret)
1430 ret = VM_FAULT_DONE_COW;
1431 goto finish_iomap;
1434 sync = dax_fault_is_synchronous(flags, vma, &iomap);
1436 switch (iomap.type) {
1437 case IOMAP_MAPPED:
1438 if (iomap.flags & IOMAP_F_NEW) {
1439 count_vm_event(PGMAJFAULT);
1440 count_memcg_event_mm(vma->vm_mm, PGMAJFAULT);
1441 major = VM_FAULT_MAJOR;
1443 error = dax_iomap_pfn(&iomap, pos, PAGE_SIZE, &pfn);
1444 if (error < 0)
1445 goto error_finish_iomap;
1447 entry = dax_insert_mapping_entry(mapping, vmf, entry, pfn,
1448 0, write && !sync);
1451 * If we are doing synchronous page fault and inode needs fsync,
1452 * we can insert PTE into page tables only after that happens.
1453 * Skip insertion for now and return the pfn so that caller can
1454 * insert it after fsync is done.
1456 if (sync) {
1457 if (WARN_ON_ONCE(!pfnp)) {
1458 error = -EIO;
1459 goto error_finish_iomap;
1461 *pfnp = pfn;
1462 ret = VM_FAULT_NEEDDSYNC | major;
1463 goto finish_iomap;
1465 trace_dax_insert_mapping(inode, vmf, entry);
1466 if (write)
1467 ret = vmf_insert_mixed_mkwrite(vma, vaddr, pfn);
1468 else
1469 ret = vmf_insert_mixed(vma, vaddr, pfn);
1471 goto finish_iomap;
1472 case IOMAP_UNWRITTEN:
1473 case IOMAP_HOLE:
1474 if (!write) {
1475 ret = dax_load_hole(mapping, entry, vmf);
1476 goto finish_iomap;
1478 /*FALLTHRU*/
1479 default:
1480 WARN_ON_ONCE(1);
1481 error = -EIO;
1482 break;
1485 error_finish_iomap:
1486 ret = dax_fault_return(error);
1487 finish_iomap:
1488 if (ops->iomap_end) {
1489 int copied = PAGE_SIZE;
1491 if (ret & VM_FAULT_ERROR)
1492 copied = 0;
1494 * The fault is done by now and there's no way back (other
1495 * thread may be already happily using PTE we have installed).
1496 * Just ignore error from ->iomap_end since we cannot do much
1497 * with it.
1499 ops->iomap_end(inode, pos, PAGE_SIZE, copied, flags, &iomap);
1501 unlock_entry:
1502 put_locked_mapping_entry(mapping, vmf->pgoff);
1503 out:
1504 trace_dax_pte_fault_done(inode, vmf, ret);
1505 return ret | major;
1508 #ifdef CONFIG_FS_DAX_PMD
1509 static vm_fault_t dax_pmd_load_hole(struct vm_fault *vmf, struct iomap *iomap,
1510 void *entry)
1512 struct address_space *mapping = vmf->vma->vm_file->f_mapping;
1513 unsigned long pmd_addr = vmf->address & PMD_MASK;
1514 struct inode *inode = mapping->host;
1515 struct page *zero_page;
1516 void *ret = NULL;
1517 spinlock_t *ptl;
1518 pmd_t pmd_entry;
1519 pfn_t pfn;
1521 zero_page = mm_get_huge_zero_page(vmf->vma->vm_mm);
1523 if (unlikely(!zero_page))
1524 goto fallback;
1526 pfn = page_to_pfn_t(zero_page);
1527 ret = dax_insert_mapping_entry(mapping, vmf, entry, pfn,
1528 RADIX_DAX_PMD | RADIX_DAX_ZERO_PAGE, false);
1530 ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
1531 if (!pmd_none(*(vmf->pmd))) {
1532 spin_unlock(ptl);
1533 goto fallback;
1536 pmd_entry = mk_pmd(zero_page, vmf->vma->vm_page_prot);
1537 pmd_entry = pmd_mkhuge(pmd_entry);
1538 set_pmd_at(vmf->vma->vm_mm, pmd_addr, vmf->pmd, pmd_entry);
1539 spin_unlock(ptl);
1540 trace_dax_pmd_load_hole(inode, vmf, zero_page, ret);
1541 return VM_FAULT_NOPAGE;
1543 fallback:
1544 trace_dax_pmd_load_hole_fallback(inode, vmf, zero_page, ret);
1545 return VM_FAULT_FALLBACK;
1548 static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
1549 const struct iomap_ops *ops)
1551 struct vm_area_struct *vma = vmf->vma;
1552 struct address_space *mapping = vma->vm_file->f_mapping;
1553 unsigned long pmd_addr = vmf->address & PMD_MASK;
1554 bool write = vmf->flags & FAULT_FLAG_WRITE;
1555 bool sync;
1556 unsigned int iomap_flags = (write ? IOMAP_WRITE : 0) | IOMAP_FAULT;
1557 struct inode *inode = mapping->host;
1558 vm_fault_t result = VM_FAULT_FALLBACK;
1559 struct iomap iomap = { 0 };
1560 pgoff_t max_pgoff, pgoff;
1561 void *entry;
1562 loff_t pos;
1563 int error;
1564 pfn_t pfn;
1567 * Check whether offset isn't beyond end of file now. Caller is
1568 * supposed to hold locks serializing us with truncate / punch hole so
1569 * this is a reliable test.
1571 pgoff = linear_page_index(vma, pmd_addr);
1572 max_pgoff = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
1574 trace_dax_pmd_fault(inode, vmf, max_pgoff, 0);
1577 * Make sure that the faulting address's PMD offset (color) matches
1578 * the PMD offset from the start of the file. This is necessary so
1579 * that a PMD range in the page table overlaps exactly with a PMD
1580 * range in the radix tree.
1582 if ((vmf->pgoff & PG_PMD_COLOUR) !=
1583 ((vmf->address >> PAGE_SHIFT) & PG_PMD_COLOUR))
1584 goto fallback;
1586 /* Fall back to PTEs if we're going to COW */
1587 if (write && !(vma->vm_flags & VM_SHARED))
1588 goto fallback;
1590 /* If the PMD would extend outside the VMA */
1591 if (pmd_addr < vma->vm_start)
1592 goto fallback;
1593 if ((pmd_addr + PMD_SIZE) > vma->vm_end)
1594 goto fallback;
1596 if (pgoff >= max_pgoff) {
1597 result = VM_FAULT_SIGBUS;
1598 goto out;
1601 /* If the PMD would extend beyond the file size */
1602 if ((pgoff | PG_PMD_COLOUR) >= max_pgoff)
1603 goto fallback;
1606 * grab_mapping_entry() will make sure we get a 2MiB empty entry, a
1607 * 2MiB zero page entry or a DAX PMD. If it can't (because a 4k page
1608 * is already in the tree, for instance), it will return -EEXIST and
1609 * we just fall back to 4k entries.
1611 entry = grab_mapping_entry(mapping, pgoff, RADIX_DAX_PMD);
1612 if (IS_ERR(entry))
1613 goto fallback;
1616 * It is possible, particularly with mixed reads & writes to private
1617 * mappings, that we have raced with a PTE fault that overlaps with
1618 * the PMD we need to set up. If so just return and the fault will be
1619 * retried.
1621 if (!pmd_none(*vmf->pmd) && !pmd_trans_huge(*vmf->pmd) &&
1622 !pmd_devmap(*vmf->pmd)) {
1623 result = 0;
1624 goto unlock_entry;
1628 * Note that we don't use iomap_apply here. We aren't doing I/O, only
1629 * setting up a mapping, so really we're using iomap_begin() as a way
1630 * to look up our filesystem block.
1632 pos = (loff_t)pgoff << PAGE_SHIFT;
1633 error = ops->iomap_begin(inode, pos, PMD_SIZE, iomap_flags, &iomap);
1634 if (error)
1635 goto unlock_entry;
1637 if (iomap.offset + iomap.length < pos + PMD_SIZE)
1638 goto finish_iomap;
1640 sync = dax_fault_is_synchronous(iomap_flags, vma, &iomap);
1642 switch (iomap.type) {
1643 case IOMAP_MAPPED:
1644 error = dax_iomap_pfn(&iomap, pos, PMD_SIZE, &pfn);
1645 if (error < 0)
1646 goto finish_iomap;
1648 entry = dax_insert_mapping_entry(mapping, vmf, entry, pfn,
1649 RADIX_DAX_PMD, write && !sync);
1652 * If we are doing synchronous page fault and inode needs fsync,
1653 * we can insert PMD into page tables only after that happens.
1654 * Skip insertion for now and return the pfn so that caller can
1655 * insert it after fsync is done.
1657 if (sync) {
1658 if (WARN_ON_ONCE(!pfnp))
1659 goto finish_iomap;
1660 *pfnp = pfn;
1661 result = VM_FAULT_NEEDDSYNC;
1662 goto finish_iomap;
1665 trace_dax_pmd_insert_mapping(inode, vmf, PMD_SIZE, pfn, entry);
1666 result = vmf_insert_pfn_pmd(vmf, pfn, write);
1667 break;
1668 case IOMAP_UNWRITTEN:
1669 case IOMAP_HOLE:
1670 if (WARN_ON_ONCE(write))
1671 break;
1672 result = dax_pmd_load_hole(vmf, &iomap, entry);
1673 break;
1674 default:
1675 WARN_ON_ONCE(1);
1676 break;
1679 finish_iomap:
1680 if (ops->iomap_end) {
1681 int copied = PMD_SIZE;
1683 if (result == VM_FAULT_FALLBACK)
1684 copied = 0;
1686 * The fault is done by now and there's no way back (other
1687 * thread may be already happily using PMD we have installed).
1688 * Just ignore error from ->iomap_end since we cannot do much
1689 * with it.
1691 ops->iomap_end(inode, pos, PMD_SIZE, copied, iomap_flags,
1692 &iomap);
1694 unlock_entry:
1695 put_locked_mapping_entry(mapping, pgoff);
1696 fallback:
1697 if (result == VM_FAULT_FALLBACK) {
1698 split_huge_pmd(vma, vmf->pmd, vmf->address);
1699 count_vm_event(THP_FAULT_FALLBACK);
1701 out:
1702 trace_dax_pmd_fault_done(inode, vmf, max_pgoff, result);
1703 return result;
1705 #else
1706 static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
1707 const struct iomap_ops *ops)
1709 return VM_FAULT_FALLBACK;
1711 #endif /* CONFIG_FS_DAX_PMD */
1714 * dax_iomap_fault - handle a page fault on a DAX file
1715 * @vmf: The description of the fault
1716 * @pe_size: Size of the page to fault in
1717 * @pfnp: PFN to insert for synchronous faults if fsync is required
1718 * @iomap_errp: Storage for detailed error code in case of error
1719 * @ops: Iomap ops passed from the file system
1721 * When a page fault occurs, filesystems may call this helper in
1722 * their fault handler for DAX files. dax_iomap_fault() assumes the caller
1723 * has done all the necessary locking for page fault to proceed
1724 * successfully.
1726 vm_fault_t dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
1727 pfn_t *pfnp, int *iomap_errp, const struct iomap_ops *ops)
1729 switch (pe_size) {
1730 case PE_SIZE_PTE:
1731 return dax_iomap_pte_fault(vmf, pfnp, iomap_errp, ops);
1732 case PE_SIZE_PMD:
1733 return dax_iomap_pmd_fault(vmf, pfnp, ops);
1734 default:
1735 return VM_FAULT_FALLBACK;
1738 EXPORT_SYMBOL_GPL(dax_iomap_fault);
1741 * dax_insert_pfn_mkwrite - insert PTE or PMD entry into page tables
1742 * @vmf: The description of the fault
1743 * @pe_size: Size of entry to be inserted
1744 * @pfn: PFN to insert
1746 * This function inserts writeable PTE or PMD entry into page tables for mmaped
1747 * DAX file. It takes care of marking corresponding radix tree entry as dirty
1748 * as well.
1750 static vm_fault_t dax_insert_pfn_mkwrite(struct vm_fault *vmf,
1751 enum page_entry_size pe_size,
1752 pfn_t pfn)
1754 struct address_space *mapping = vmf->vma->vm_file->f_mapping;
1755 void *entry, **slot;
1756 pgoff_t index = vmf->pgoff;
1757 vm_fault_t ret;
1759 xa_lock_irq(&mapping->i_pages);
1760 entry = get_unlocked_mapping_entry(mapping, index, &slot);
1761 /* Did we race with someone splitting entry or so? */
1762 if (!entry ||
1763 (pe_size == PE_SIZE_PTE && !dax_is_pte_entry(entry)) ||
1764 (pe_size == PE_SIZE_PMD && !dax_is_pmd_entry(entry))) {
1765 put_unlocked_mapping_entry(mapping, index, entry);
1766 xa_unlock_irq(&mapping->i_pages);
1767 trace_dax_insert_pfn_mkwrite_no_entry(mapping->host, vmf,
1768 VM_FAULT_NOPAGE);
1769 return VM_FAULT_NOPAGE;
1771 radix_tree_tag_set(&mapping->i_pages, index, PAGECACHE_TAG_DIRTY);
1772 entry = lock_slot(mapping, slot);
1773 xa_unlock_irq(&mapping->i_pages);
1774 switch (pe_size) {
1775 case PE_SIZE_PTE:
1776 ret = vmf_insert_mixed_mkwrite(vmf->vma, vmf->address, pfn);
1777 break;
1778 #ifdef CONFIG_FS_DAX_PMD
1779 case PE_SIZE_PMD:
1780 ret = vmf_insert_pfn_pmd(vmf, pfn, FAULT_FLAG_WRITE);
1781 break;
1782 #endif
1783 default:
1784 ret = VM_FAULT_FALLBACK;
1786 put_locked_mapping_entry(mapping, index);
1787 trace_dax_insert_pfn_mkwrite(mapping->host, vmf, ret);
1788 return ret;
1792 * dax_finish_sync_fault - finish synchronous page fault
1793 * @vmf: The description of the fault
1794 * @pe_size: Size of entry to be inserted
1795 * @pfn: PFN to insert
1797 * This function ensures that the file range touched by the page fault is
1798 * stored persistently on the media and handles inserting of appropriate page
1799 * table entry.
1801 vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf,
1802 enum page_entry_size pe_size, pfn_t pfn)
1804 int err;
1805 loff_t start = ((loff_t)vmf->pgoff) << PAGE_SHIFT;
1806 size_t len = 0;
1808 if (pe_size == PE_SIZE_PTE)
1809 len = PAGE_SIZE;
1810 else if (pe_size == PE_SIZE_PMD)
1811 len = PMD_SIZE;
1812 else
1813 WARN_ON_ONCE(1);
1814 err = vfs_fsync_range(vmf->vma->vm_file, start, start + len - 1, 1);
1815 if (err)
1816 return VM_FAULT_SIGBUS;
1817 return dax_insert_pfn_mkwrite(vmf, pe_size, pfn);
1819 EXPORT_SYMBOL_GPL(dax_finish_sync_fault);