Merge tag 'for-gkh' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma
[linux/fpc-iii.git] / fs / dax.c
blob4becbf168b7f0df3229b1e1a5d0fb8daca02df0d
1 /*
2 * fs/dax.c - Direct Access filesystem code
3 * Copyright (c) 2013-2014 Intel Corporation
4 * Author: Matthew Wilcox <matthew.r.wilcox@intel.com>
5 * Author: Ross Zwisler <ross.zwisler@linux.intel.com>
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
17 #include <linux/atomic.h>
18 #include <linux/blkdev.h>
19 #include <linux/buffer_head.h>
20 #include <linux/dax.h>
21 #include <linux/fs.h>
22 #include <linux/genhd.h>
23 #include <linux/highmem.h>
24 #include <linux/memcontrol.h>
25 #include <linux/mm.h>
26 #include <linux/mutex.h>
27 #include <linux/pagevec.h>
28 #include <linux/sched.h>
29 #include <linux/sched/signal.h>
30 #include <linux/uio.h>
31 #include <linux/vmstat.h>
32 #include <linux/pfn_t.h>
33 #include <linux/sizes.h>
34 #include <linux/mmu_notifier.h>
35 #include <linux/iomap.h>
36 #include "internal.h"
38 #define CREATE_TRACE_POINTS
39 #include <trace/events/fs_dax.h>
41 /* We choose 4096 entries - same as per-zone page wait tables */
42 #define DAX_WAIT_TABLE_BITS 12
43 #define DAX_WAIT_TABLE_ENTRIES (1 << DAX_WAIT_TABLE_BITS)
45 /* The 'colour' (ie low bits) within a PMD of a page offset. */
46 #define PG_PMD_COLOUR ((PMD_SIZE >> PAGE_SHIFT) - 1)
47 #define PG_PMD_NR (PMD_SIZE >> PAGE_SHIFT)
49 static wait_queue_head_t wait_table[DAX_WAIT_TABLE_ENTRIES];
51 static int __init init_dax_wait_table(void)
53 int i;
55 for (i = 0; i < DAX_WAIT_TABLE_ENTRIES; i++)
56 init_waitqueue_head(wait_table + i);
57 return 0;
59 fs_initcall(init_dax_wait_table);
62 * We use lowest available bit in exceptional entry for locking, one bit for
63 * the entry size (PMD) and two more to tell us if the entry is a zero page or
64 * an empty entry that is just used for locking. In total four special bits.
66 * If the PMD bit isn't set the entry has size PAGE_SIZE, and if the ZERO_PAGE
67 * and EMPTY bits aren't set the entry is a normal DAX entry with a filesystem
68 * block allocation.
70 #define RADIX_DAX_SHIFT (RADIX_TREE_EXCEPTIONAL_SHIFT + 4)
71 #define RADIX_DAX_ENTRY_LOCK (1 << RADIX_TREE_EXCEPTIONAL_SHIFT)
72 #define RADIX_DAX_PMD (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 1))
73 #define RADIX_DAX_ZERO_PAGE (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 2))
74 #define RADIX_DAX_EMPTY (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 3))
76 static unsigned long dax_radix_pfn(void *entry)
78 return (unsigned long)entry >> RADIX_DAX_SHIFT;
81 static void *dax_radix_locked_entry(unsigned long pfn, unsigned long flags)
83 return (void *)(RADIX_TREE_EXCEPTIONAL_ENTRY | flags |
84 (pfn << RADIX_DAX_SHIFT) | RADIX_DAX_ENTRY_LOCK);
87 static unsigned int dax_radix_order(void *entry)
89 if ((unsigned long)entry & RADIX_DAX_PMD)
90 return PMD_SHIFT - PAGE_SHIFT;
91 return 0;
94 static int dax_is_pmd_entry(void *entry)
96 return (unsigned long)entry & RADIX_DAX_PMD;
99 static int dax_is_pte_entry(void *entry)
101 return !((unsigned long)entry & RADIX_DAX_PMD);
104 static int dax_is_zero_entry(void *entry)
106 return (unsigned long)entry & RADIX_DAX_ZERO_PAGE;
109 static int dax_is_empty_entry(void *entry)
111 return (unsigned long)entry & RADIX_DAX_EMPTY;
115 * DAX radix tree locking
117 struct exceptional_entry_key {
118 struct address_space *mapping;
119 pgoff_t entry_start;
122 struct wait_exceptional_entry_queue {
123 wait_queue_entry_t wait;
124 struct exceptional_entry_key key;
127 static wait_queue_head_t *dax_entry_waitqueue(struct address_space *mapping,
128 pgoff_t index, void *entry, struct exceptional_entry_key *key)
130 unsigned long hash;
133 * If 'entry' is a PMD, align the 'index' that we use for the wait
134 * queue to the start of that PMD. This ensures that all offsets in
135 * the range covered by the PMD map to the same bit lock.
137 if (dax_is_pmd_entry(entry))
138 index &= ~PG_PMD_COLOUR;
140 key->mapping = mapping;
141 key->entry_start = index;
143 hash = hash_long((unsigned long)mapping ^ index, DAX_WAIT_TABLE_BITS);
144 return wait_table + hash;
147 static int wake_exceptional_entry_func(wait_queue_entry_t *wait, unsigned int mode,
148 int sync, void *keyp)
150 struct exceptional_entry_key *key = keyp;
151 struct wait_exceptional_entry_queue *ewait =
152 container_of(wait, struct wait_exceptional_entry_queue, wait);
154 if (key->mapping != ewait->key.mapping ||
155 key->entry_start != ewait->key.entry_start)
156 return 0;
157 return autoremove_wake_function(wait, mode, sync, NULL);
161 * @entry may no longer be the entry at the index in the mapping.
162 * The important information it's conveying is whether the entry at
163 * this index used to be a PMD entry.
165 static void dax_wake_mapping_entry_waiter(struct address_space *mapping,
166 pgoff_t index, void *entry, bool wake_all)
168 struct exceptional_entry_key key;
169 wait_queue_head_t *wq;
171 wq = dax_entry_waitqueue(mapping, index, entry, &key);
174 * Checking for locked entry and prepare_to_wait_exclusive() happens
175 * under the i_pages lock, ditto for entry handling in our callers.
176 * So at this point all tasks that could have seen our entry locked
177 * must be in the waitqueue and the following check will see them.
179 if (waitqueue_active(wq))
180 __wake_up(wq, TASK_NORMAL, wake_all ? 0 : 1, &key);
184 * Check whether the given slot is locked. Must be called with the i_pages
185 * lock held.
187 static inline int slot_locked(struct address_space *mapping, void **slot)
189 unsigned long entry = (unsigned long)
190 radix_tree_deref_slot_protected(slot, &mapping->i_pages.xa_lock);
191 return entry & RADIX_DAX_ENTRY_LOCK;
195 * Mark the given slot as locked. Must be called with the i_pages lock held.
197 static inline void *lock_slot(struct address_space *mapping, void **slot)
199 unsigned long entry = (unsigned long)
200 radix_tree_deref_slot_protected(slot, &mapping->i_pages.xa_lock);
202 entry |= RADIX_DAX_ENTRY_LOCK;
203 radix_tree_replace_slot(&mapping->i_pages, slot, (void *)entry);
204 return (void *)entry;
208 * Mark the given slot as unlocked. Must be called with the i_pages lock held.
210 static inline void *unlock_slot(struct address_space *mapping, void **slot)
212 unsigned long entry = (unsigned long)
213 radix_tree_deref_slot_protected(slot, &mapping->i_pages.xa_lock);
215 entry &= ~(unsigned long)RADIX_DAX_ENTRY_LOCK;
216 radix_tree_replace_slot(&mapping->i_pages, slot, (void *)entry);
217 return (void *)entry;
221 * Lookup entry in radix tree, wait for it to become unlocked if it is
222 * exceptional entry and return it. The caller must call
223 * put_unlocked_mapping_entry() when he decided not to lock the entry or
224 * put_locked_mapping_entry() when he locked the entry and now wants to
225 * unlock it.
227 * Must be called with the i_pages lock held.
229 static void *__get_unlocked_mapping_entry(struct address_space *mapping,
230 pgoff_t index, void ***slotp, bool (*wait_fn)(void))
232 void *entry, **slot;
233 struct wait_exceptional_entry_queue ewait;
234 wait_queue_head_t *wq;
236 init_wait(&ewait.wait);
237 ewait.wait.func = wake_exceptional_entry_func;
239 for (;;) {
240 bool revalidate;
242 entry = __radix_tree_lookup(&mapping->i_pages, index, NULL,
243 &slot);
244 if (!entry ||
245 WARN_ON_ONCE(!radix_tree_exceptional_entry(entry)) ||
246 !slot_locked(mapping, slot)) {
247 if (slotp)
248 *slotp = slot;
249 return entry;
252 wq = dax_entry_waitqueue(mapping, index, entry, &ewait.key);
253 prepare_to_wait_exclusive(wq, &ewait.wait,
254 TASK_UNINTERRUPTIBLE);
255 xa_unlock_irq(&mapping->i_pages);
256 revalidate = wait_fn();
257 finish_wait(wq, &ewait.wait);
258 xa_lock_irq(&mapping->i_pages);
259 if (revalidate)
260 return ERR_PTR(-EAGAIN);
264 static bool entry_wait(void)
266 schedule();
268 * Never return an ERR_PTR() from
269 * __get_unlocked_mapping_entry(), just keep looping.
271 return false;
274 static void *get_unlocked_mapping_entry(struct address_space *mapping,
275 pgoff_t index, void ***slotp)
277 return __get_unlocked_mapping_entry(mapping, index, slotp, entry_wait);
280 static void unlock_mapping_entry(struct address_space *mapping, pgoff_t index)
282 void *entry, **slot;
284 xa_lock_irq(&mapping->i_pages);
285 entry = __radix_tree_lookup(&mapping->i_pages, index, NULL, &slot);
286 if (WARN_ON_ONCE(!entry || !radix_tree_exceptional_entry(entry) ||
287 !slot_locked(mapping, slot))) {
288 xa_unlock_irq(&mapping->i_pages);
289 return;
291 unlock_slot(mapping, slot);
292 xa_unlock_irq(&mapping->i_pages);
293 dax_wake_mapping_entry_waiter(mapping, index, entry, false);
296 static void put_locked_mapping_entry(struct address_space *mapping,
297 pgoff_t index)
299 unlock_mapping_entry(mapping, index);
303 * Called when we are done with radix tree entry we looked up via
304 * get_unlocked_mapping_entry() and which we didn't lock in the end.
306 static void put_unlocked_mapping_entry(struct address_space *mapping,
307 pgoff_t index, void *entry)
309 if (!entry)
310 return;
312 /* We have to wake up next waiter for the radix tree entry lock */
313 dax_wake_mapping_entry_waiter(mapping, index, entry, false);
316 static unsigned long dax_entry_size(void *entry)
318 if (dax_is_zero_entry(entry))
319 return 0;
320 else if (dax_is_empty_entry(entry))
321 return 0;
322 else if (dax_is_pmd_entry(entry))
323 return PMD_SIZE;
324 else
325 return PAGE_SIZE;
328 static unsigned long dax_radix_end_pfn(void *entry)
330 return dax_radix_pfn(entry) + dax_entry_size(entry) / PAGE_SIZE;
334 * Iterate through all mapped pfns represented by an entry, i.e. skip
335 * 'empty' and 'zero' entries.
337 #define for_each_mapped_pfn(entry, pfn) \
338 for (pfn = dax_radix_pfn(entry); \
339 pfn < dax_radix_end_pfn(entry); pfn++)
342 * TODO: for reflink+dax we need a way to associate a single page with
343 * multiple address_space instances at different linear_page_index()
344 * offsets.
346 static void dax_associate_entry(void *entry, struct address_space *mapping,
347 struct vm_area_struct *vma, unsigned long address)
349 unsigned long size = dax_entry_size(entry), pfn, index;
350 int i = 0;
352 if (IS_ENABLED(CONFIG_FS_DAX_LIMITED))
353 return;
355 index = linear_page_index(vma, address & ~(size - 1));
356 for_each_mapped_pfn(entry, pfn) {
357 struct page *page = pfn_to_page(pfn);
359 WARN_ON_ONCE(page->mapping);
360 page->mapping = mapping;
361 page->index = index + i++;
365 static void dax_disassociate_entry(void *entry, struct address_space *mapping,
366 bool trunc)
368 unsigned long pfn;
370 if (IS_ENABLED(CONFIG_FS_DAX_LIMITED))
371 return;
373 for_each_mapped_pfn(entry, pfn) {
374 struct page *page = pfn_to_page(pfn);
376 WARN_ON_ONCE(trunc && page_ref_count(page) > 1);
377 WARN_ON_ONCE(page->mapping && page->mapping != mapping);
378 page->mapping = NULL;
379 page->index = 0;
383 static struct page *dax_busy_page(void *entry)
385 unsigned long pfn;
387 for_each_mapped_pfn(entry, pfn) {
388 struct page *page = pfn_to_page(pfn);
390 if (page_ref_count(page) > 1)
391 return page;
393 return NULL;
396 static bool entry_wait_revalidate(void)
398 rcu_read_unlock();
399 schedule();
400 rcu_read_lock();
403 * Tell __get_unlocked_mapping_entry() to take a break, we need
404 * to revalidate page->mapping after dropping locks
406 return true;
409 bool dax_lock_mapping_entry(struct page *page)
411 pgoff_t index;
412 struct inode *inode;
413 bool did_lock = false;
414 void *entry = NULL, **slot;
415 struct address_space *mapping;
417 rcu_read_lock();
418 for (;;) {
419 mapping = READ_ONCE(page->mapping);
421 if (!dax_mapping(mapping))
422 break;
425 * In the device-dax case there's no need to lock, a
426 * struct dev_pagemap pin is sufficient to keep the
427 * inode alive, and we assume we have dev_pagemap pin
428 * otherwise we would not have a valid pfn_to_page()
429 * translation.
431 inode = mapping->host;
432 if (S_ISCHR(inode->i_mode)) {
433 did_lock = true;
434 break;
437 xa_lock_irq(&mapping->i_pages);
438 if (mapping != page->mapping) {
439 xa_unlock_irq(&mapping->i_pages);
440 continue;
442 index = page->index;
444 entry = __get_unlocked_mapping_entry(mapping, index, &slot,
445 entry_wait_revalidate);
446 if (!entry) {
447 xa_unlock_irq(&mapping->i_pages);
448 break;
449 } else if (IS_ERR(entry)) {
450 xa_unlock_irq(&mapping->i_pages);
451 WARN_ON_ONCE(PTR_ERR(entry) != -EAGAIN);
452 continue;
454 lock_slot(mapping, slot);
455 did_lock = true;
456 xa_unlock_irq(&mapping->i_pages);
457 break;
459 rcu_read_unlock();
461 return did_lock;
464 void dax_unlock_mapping_entry(struct page *page)
466 struct address_space *mapping = page->mapping;
467 struct inode *inode = mapping->host;
469 if (S_ISCHR(inode->i_mode))
470 return;
472 unlock_mapping_entry(mapping, page->index);
476 * Find radix tree entry at given index. If it points to an exceptional entry,
477 * return it with the radix tree entry locked. If the radix tree doesn't
478 * contain given index, create an empty exceptional entry for the index and
479 * return with it locked.
481 * When requesting an entry with size RADIX_DAX_PMD, grab_mapping_entry() will
482 * either return that locked entry or will return an error. This error will
483 * happen if there are any 4k entries within the 2MiB range that we are
484 * requesting.
486 * We always favor 4k entries over 2MiB entries. There isn't a flow where we
487 * evict 4k entries in order to 'upgrade' them to a 2MiB entry. A 2MiB
488 * insertion will fail if it finds any 4k entries already in the tree, and a
489 * 4k insertion will cause an existing 2MiB entry to be unmapped and
490 * downgraded to 4k entries. This happens for both 2MiB huge zero pages as
491 * well as 2MiB empty entries.
493 * The exception to this downgrade path is for 2MiB DAX PMD entries that have
494 * real storage backing them. We will leave these real 2MiB DAX entries in
495 * the tree, and PTE writes will simply dirty the entire 2MiB DAX entry.
497 * Note: Unlike filemap_fault() we don't honor FAULT_FLAG_RETRY flags. For
498 * persistent memory the benefit is doubtful. We can add that later if we can
499 * show it helps.
501 static void *grab_mapping_entry(struct address_space *mapping, pgoff_t index,
502 unsigned long size_flag)
504 bool pmd_downgrade = false; /* splitting 2MiB entry into 4k entries? */
505 void *entry, **slot;
507 restart:
508 xa_lock_irq(&mapping->i_pages);
509 entry = get_unlocked_mapping_entry(mapping, index, &slot);
511 if (WARN_ON_ONCE(entry && !radix_tree_exceptional_entry(entry))) {
512 entry = ERR_PTR(-EIO);
513 goto out_unlock;
516 if (entry) {
517 if (size_flag & RADIX_DAX_PMD) {
518 if (dax_is_pte_entry(entry)) {
519 put_unlocked_mapping_entry(mapping, index,
520 entry);
521 entry = ERR_PTR(-EEXIST);
522 goto out_unlock;
524 } else { /* trying to grab a PTE entry */
525 if (dax_is_pmd_entry(entry) &&
526 (dax_is_zero_entry(entry) ||
527 dax_is_empty_entry(entry))) {
528 pmd_downgrade = true;
533 /* No entry for given index? Make sure radix tree is big enough. */
534 if (!entry || pmd_downgrade) {
535 int err;
537 if (pmd_downgrade) {
539 * Make sure 'entry' remains valid while we drop
540 * the i_pages lock.
542 entry = lock_slot(mapping, slot);
545 xa_unlock_irq(&mapping->i_pages);
547 * Besides huge zero pages the only other thing that gets
548 * downgraded are empty entries which don't need to be
549 * unmapped.
551 if (pmd_downgrade && dax_is_zero_entry(entry))
552 unmap_mapping_pages(mapping, index & ~PG_PMD_COLOUR,
553 PG_PMD_NR, false);
555 err = radix_tree_preload(
556 mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM);
557 if (err) {
558 if (pmd_downgrade)
559 put_locked_mapping_entry(mapping, index);
560 return ERR_PTR(err);
562 xa_lock_irq(&mapping->i_pages);
564 if (!entry) {
566 * We needed to drop the i_pages lock while calling
567 * radix_tree_preload() and we didn't have an entry to
568 * lock. See if another thread inserted an entry at
569 * our index during this time.
571 entry = __radix_tree_lookup(&mapping->i_pages, index,
572 NULL, &slot);
573 if (entry) {
574 radix_tree_preload_end();
575 xa_unlock_irq(&mapping->i_pages);
576 goto restart;
580 if (pmd_downgrade) {
581 dax_disassociate_entry(entry, mapping, false);
582 radix_tree_delete(&mapping->i_pages, index);
583 mapping->nrexceptional--;
584 dax_wake_mapping_entry_waiter(mapping, index, entry,
585 true);
588 entry = dax_radix_locked_entry(0, size_flag | RADIX_DAX_EMPTY);
590 err = __radix_tree_insert(&mapping->i_pages, index,
591 dax_radix_order(entry), entry);
592 radix_tree_preload_end();
593 if (err) {
594 xa_unlock_irq(&mapping->i_pages);
596 * Our insertion of a DAX entry failed, most likely
597 * because we were inserting a PMD entry and it
598 * collided with a PTE sized entry at a different
599 * index in the PMD range. We haven't inserted
600 * anything into the radix tree and have no waiters to
601 * wake.
603 return ERR_PTR(err);
605 /* Good, we have inserted empty locked entry into the tree. */
606 mapping->nrexceptional++;
607 xa_unlock_irq(&mapping->i_pages);
608 return entry;
610 entry = lock_slot(mapping, slot);
611 out_unlock:
612 xa_unlock_irq(&mapping->i_pages);
613 return entry;
617 * dax_layout_busy_page - find first pinned page in @mapping
618 * @mapping: address space to scan for a page with ref count > 1
620 * DAX requires ZONE_DEVICE mapped pages. These pages are never
621 * 'onlined' to the page allocator so they are considered idle when
622 * page->count == 1. A filesystem uses this interface to determine if
623 * any page in the mapping is busy, i.e. for DMA, or other
624 * get_user_pages() usages.
626 * It is expected that the filesystem is holding locks to block the
627 * establishment of new mappings in this address_space. I.e. it expects
628 * to be able to run unmap_mapping_range() and subsequently not race
629 * mapping_mapped() becoming true.
631 struct page *dax_layout_busy_page(struct address_space *mapping)
633 pgoff_t indices[PAGEVEC_SIZE];
634 struct page *page = NULL;
635 struct pagevec pvec;
636 pgoff_t index, end;
637 unsigned i;
640 * In the 'limited' case get_user_pages() for dax is disabled.
642 if (IS_ENABLED(CONFIG_FS_DAX_LIMITED))
643 return NULL;
645 if (!dax_mapping(mapping) || !mapping_mapped(mapping))
646 return NULL;
648 pagevec_init(&pvec);
649 index = 0;
650 end = -1;
653 * If we race get_user_pages_fast() here either we'll see the
654 * elevated page count in the pagevec_lookup and wait, or
655 * get_user_pages_fast() will see that the page it took a reference
656 * against is no longer mapped in the page tables and bail to the
657 * get_user_pages() slow path. The slow path is protected by
658 * pte_lock() and pmd_lock(). New references are not taken without
659 * holding those locks, and unmap_mapping_range() will not zero the
660 * pte or pmd without holding the respective lock, so we are
661 * guaranteed to either see new references or prevent new
662 * references from being established.
664 unmap_mapping_range(mapping, 0, 0, 1);
666 while (index < end && pagevec_lookup_entries(&pvec, mapping, index,
667 min(end - index, (pgoff_t)PAGEVEC_SIZE),
668 indices)) {
669 for (i = 0; i < pagevec_count(&pvec); i++) {
670 struct page *pvec_ent = pvec.pages[i];
671 void *entry;
673 index = indices[i];
674 if (index >= end)
675 break;
677 if (WARN_ON_ONCE(
678 !radix_tree_exceptional_entry(pvec_ent)))
679 continue;
681 xa_lock_irq(&mapping->i_pages);
682 entry = get_unlocked_mapping_entry(mapping, index, NULL);
683 if (entry)
684 page = dax_busy_page(entry);
685 put_unlocked_mapping_entry(mapping, index, entry);
686 xa_unlock_irq(&mapping->i_pages);
687 if (page)
688 break;
692 * We don't expect normal struct page entries to exist in our
693 * tree, but we keep these pagevec calls so that this code is
694 * consistent with the common pattern for handling pagevecs
695 * throughout the kernel.
697 pagevec_remove_exceptionals(&pvec);
698 pagevec_release(&pvec);
699 index++;
701 if (page)
702 break;
704 return page;
706 EXPORT_SYMBOL_GPL(dax_layout_busy_page);
708 static int __dax_invalidate_mapping_entry(struct address_space *mapping,
709 pgoff_t index, bool trunc)
711 int ret = 0;
712 void *entry;
713 struct radix_tree_root *pages = &mapping->i_pages;
715 xa_lock_irq(pages);
716 entry = get_unlocked_mapping_entry(mapping, index, NULL);
717 if (!entry || WARN_ON_ONCE(!radix_tree_exceptional_entry(entry)))
718 goto out;
719 if (!trunc &&
720 (radix_tree_tag_get(pages, index, PAGECACHE_TAG_DIRTY) ||
721 radix_tree_tag_get(pages, index, PAGECACHE_TAG_TOWRITE)))
722 goto out;
723 dax_disassociate_entry(entry, mapping, trunc);
724 radix_tree_delete(pages, index);
725 mapping->nrexceptional--;
726 ret = 1;
727 out:
728 put_unlocked_mapping_entry(mapping, index, entry);
729 xa_unlock_irq(pages);
730 return ret;
733 * Delete exceptional DAX entry at @index from @mapping. Wait for radix tree
734 * entry to get unlocked before deleting it.
736 int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index)
738 int ret = __dax_invalidate_mapping_entry(mapping, index, true);
741 * This gets called from truncate / punch_hole path. As such, the caller
742 * must hold locks protecting against concurrent modifications of the
743 * radix tree (usually fs-private i_mmap_sem for writing). Since the
744 * caller has seen exceptional entry for this index, we better find it
745 * at that index as well...
747 WARN_ON_ONCE(!ret);
748 return ret;
752 * Invalidate exceptional DAX entry if it is clean.
754 int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
755 pgoff_t index)
757 return __dax_invalidate_mapping_entry(mapping, index, false);
760 static int copy_user_dax(struct block_device *bdev, struct dax_device *dax_dev,
761 sector_t sector, size_t size, struct page *to,
762 unsigned long vaddr)
764 void *vto, *kaddr;
765 pgoff_t pgoff;
766 long rc;
767 int id;
769 rc = bdev_dax_pgoff(bdev, sector, size, &pgoff);
770 if (rc)
771 return rc;
773 id = dax_read_lock();
774 rc = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), &kaddr, NULL);
775 if (rc < 0) {
776 dax_read_unlock(id);
777 return rc;
779 vto = kmap_atomic(to);
780 copy_user_page(vto, (void __force *)kaddr, vaddr, to);
781 kunmap_atomic(vto);
782 dax_read_unlock(id);
783 return 0;
787 * By this point grab_mapping_entry() has ensured that we have a locked entry
788 * of the appropriate size so we don't have to worry about downgrading PMDs to
789 * PTEs. If we happen to be trying to insert a PTE and there is a PMD
790 * already in the tree, we will skip the insertion and just dirty the PMD as
791 * appropriate.
793 static void *dax_insert_mapping_entry(struct address_space *mapping,
794 struct vm_fault *vmf,
795 void *entry, pfn_t pfn_t,
796 unsigned long flags, bool dirty)
798 struct radix_tree_root *pages = &mapping->i_pages;
799 unsigned long pfn = pfn_t_to_pfn(pfn_t);
800 pgoff_t index = vmf->pgoff;
801 void *new_entry;
803 if (dirty)
804 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
806 if (dax_is_zero_entry(entry) && !(flags & RADIX_DAX_ZERO_PAGE)) {
807 /* we are replacing a zero page with block mapping */
808 if (dax_is_pmd_entry(entry))
809 unmap_mapping_pages(mapping, index & ~PG_PMD_COLOUR,
810 PG_PMD_NR, false);
811 else /* pte entry */
812 unmap_mapping_pages(mapping, vmf->pgoff, 1, false);
815 xa_lock_irq(pages);
816 new_entry = dax_radix_locked_entry(pfn, flags);
817 if (dax_entry_size(entry) != dax_entry_size(new_entry)) {
818 dax_disassociate_entry(entry, mapping, false);
819 dax_associate_entry(new_entry, mapping, vmf->vma, vmf->address);
822 if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) {
824 * Only swap our new entry into the radix tree if the current
825 * entry is a zero page or an empty entry. If a normal PTE or
826 * PMD entry is already in the tree, we leave it alone. This
827 * means that if we are trying to insert a PTE and the
828 * existing entry is a PMD, we will just leave the PMD in the
829 * tree and dirty it if necessary.
831 struct radix_tree_node *node;
832 void **slot;
833 void *ret;
835 ret = __radix_tree_lookup(pages, index, &node, &slot);
836 WARN_ON_ONCE(ret != entry);
837 __radix_tree_replace(pages, node, slot,
838 new_entry, NULL);
839 entry = new_entry;
842 if (dirty)
843 radix_tree_tag_set(pages, index, PAGECACHE_TAG_DIRTY);
845 xa_unlock_irq(pages);
846 return entry;
849 static inline unsigned long
850 pgoff_address(pgoff_t pgoff, struct vm_area_struct *vma)
852 unsigned long address;
854 address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
855 VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma);
856 return address;
859 /* Walk all mappings of a given index of a file and writeprotect them */
860 static void dax_mapping_entry_mkclean(struct address_space *mapping,
861 pgoff_t index, unsigned long pfn)
863 struct vm_area_struct *vma;
864 pte_t pte, *ptep = NULL;
865 pmd_t *pmdp = NULL;
866 spinlock_t *ptl;
868 i_mmap_lock_read(mapping);
869 vma_interval_tree_foreach(vma, &mapping->i_mmap, index, index) {
870 unsigned long address, start, end;
872 cond_resched();
874 if (!(vma->vm_flags & VM_SHARED))
875 continue;
877 address = pgoff_address(index, vma);
880 * Note because we provide start/end to follow_pte_pmd it will
881 * call mmu_notifier_invalidate_range_start() on our behalf
882 * before taking any lock.
884 if (follow_pte_pmd(vma->vm_mm, address, &start, &end, &ptep, &pmdp, &ptl))
885 continue;
888 * No need to call mmu_notifier_invalidate_range() as we are
889 * downgrading page table protection not changing it to point
890 * to a new page.
892 * See Documentation/vm/mmu_notifier.rst
894 if (pmdp) {
895 #ifdef CONFIG_FS_DAX_PMD
896 pmd_t pmd;
898 if (pfn != pmd_pfn(*pmdp))
899 goto unlock_pmd;
900 if (!pmd_dirty(*pmdp) && !pmd_write(*pmdp))
901 goto unlock_pmd;
903 flush_cache_page(vma, address, pfn);
904 pmd = pmdp_huge_clear_flush(vma, address, pmdp);
905 pmd = pmd_wrprotect(pmd);
906 pmd = pmd_mkclean(pmd);
907 set_pmd_at(vma->vm_mm, address, pmdp, pmd);
908 unlock_pmd:
909 #endif
910 spin_unlock(ptl);
911 } else {
912 if (pfn != pte_pfn(*ptep))
913 goto unlock_pte;
914 if (!pte_dirty(*ptep) && !pte_write(*ptep))
915 goto unlock_pte;
917 flush_cache_page(vma, address, pfn);
918 pte = ptep_clear_flush(vma, address, ptep);
919 pte = pte_wrprotect(pte);
920 pte = pte_mkclean(pte);
921 set_pte_at(vma->vm_mm, address, ptep, pte);
922 unlock_pte:
923 pte_unmap_unlock(ptep, ptl);
926 mmu_notifier_invalidate_range_end(vma->vm_mm, start, end);
928 i_mmap_unlock_read(mapping);
931 static int dax_writeback_one(struct dax_device *dax_dev,
932 struct address_space *mapping, pgoff_t index, void *entry)
934 struct radix_tree_root *pages = &mapping->i_pages;
935 void *entry2, **slot;
936 unsigned long pfn;
937 long ret = 0;
938 size_t size;
941 * A page got tagged dirty in DAX mapping? Something is seriously
942 * wrong.
944 if (WARN_ON(!radix_tree_exceptional_entry(entry)))
945 return -EIO;
947 xa_lock_irq(pages);
948 entry2 = get_unlocked_mapping_entry(mapping, index, &slot);
949 /* Entry got punched out / reallocated? */
950 if (!entry2 || WARN_ON_ONCE(!radix_tree_exceptional_entry(entry2)))
951 goto put_unlocked;
953 * Entry got reallocated elsewhere? No need to writeback. We have to
954 * compare pfns as we must not bail out due to difference in lockbit
955 * or entry type.
957 if (dax_radix_pfn(entry2) != dax_radix_pfn(entry))
958 goto put_unlocked;
959 if (WARN_ON_ONCE(dax_is_empty_entry(entry) ||
960 dax_is_zero_entry(entry))) {
961 ret = -EIO;
962 goto put_unlocked;
965 /* Another fsync thread may have already written back this entry */
966 if (!radix_tree_tag_get(pages, index, PAGECACHE_TAG_TOWRITE))
967 goto put_unlocked;
968 /* Lock the entry to serialize with page faults */
969 entry = lock_slot(mapping, slot);
971 * We can clear the tag now but we have to be careful so that concurrent
972 * dax_writeback_one() calls for the same index cannot finish before we
973 * actually flush the caches. This is achieved as the calls will look
974 * at the entry only under the i_pages lock and once they do that
975 * they will see the entry locked and wait for it to unlock.
977 radix_tree_tag_clear(pages, index, PAGECACHE_TAG_TOWRITE);
978 xa_unlock_irq(pages);
981 * Even if dax_writeback_mapping_range() was given a wbc->range_start
982 * in the middle of a PMD, the 'index' we are given will be aligned to
983 * the start index of the PMD, as will the pfn we pull from 'entry'.
984 * This allows us to flush for PMD_SIZE and not have to worry about
985 * partial PMD writebacks.
987 pfn = dax_radix_pfn(entry);
988 size = PAGE_SIZE << dax_radix_order(entry);
990 dax_mapping_entry_mkclean(mapping, index, pfn);
991 dax_flush(dax_dev, page_address(pfn_to_page(pfn)), size);
993 * After we have flushed the cache, we can clear the dirty tag. There
994 * cannot be new dirty data in the pfn after the flush has completed as
995 * the pfn mappings are writeprotected and fault waits for mapping
996 * entry lock.
998 xa_lock_irq(pages);
999 radix_tree_tag_clear(pages, index, PAGECACHE_TAG_DIRTY);
1000 xa_unlock_irq(pages);
1001 trace_dax_writeback_one(mapping->host, index, size >> PAGE_SHIFT);
1002 put_locked_mapping_entry(mapping, index);
1003 return ret;
1005 put_unlocked:
1006 put_unlocked_mapping_entry(mapping, index, entry2);
1007 xa_unlock_irq(pages);
1008 return ret;
1012 * Flush the mapping to the persistent domain within the byte range of [start,
1013 * end]. This is required by data integrity operations to ensure file data is
1014 * on persistent storage prior to completion of the operation.
1016 int dax_writeback_mapping_range(struct address_space *mapping,
1017 struct block_device *bdev, struct writeback_control *wbc)
1019 struct inode *inode = mapping->host;
1020 pgoff_t start_index, end_index;
1021 pgoff_t indices[PAGEVEC_SIZE];
1022 struct dax_device *dax_dev;
1023 struct pagevec pvec;
1024 bool done = false;
1025 int i, ret = 0;
1027 if (WARN_ON_ONCE(inode->i_blkbits != PAGE_SHIFT))
1028 return -EIO;
1030 if (!mapping->nrexceptional || wbc->sync_mode != WB_SYNC_ALL)
1031 return 0;
1033 dax_dev = dax_get_by_host(bdev->bd_disk->disk_name);
1034 if (!dax_dev)
1035 return -EIO;
1037 start_index = wbc->range_start >> PAGE_SHIFT;
1038 end_index = wbc->range_end >> PAGE_SHIFT;
1040 trace_dax_writeback_range(inode, start_index, end_index);
1042 tag_pages_for_writeback(mapping, start_index, end_index);
1044 pagevec_init(&pvec);
1045 while (!done) {
1046 pvec.nr = find_get_entries_tag(mapping, start_index,
1047 PAGECACHE_TAG_TOWRITE, PAGEVEC_SIZE,
1048 pvec.pages, indices);
1050 if (pvec.nr == 0)
1051 break;
1053 for (i = 0; i < pvec.nr; i++) {
1054 if (indices[i] > end_index) {
1055 done = true;
1056 break;
1059 ret = dax_writeback_one(dax_dev, mapping, indices[i],
1060 pvec.pages[i]);
1061 if (ret < 0) {
1062 mapping_set_error(mapping, ret);
1063 goto out;
1066 start_index = indices[pvec.nr - 1] + 1;
1068 out:
1069 put_dax(dax_dev);
1070 trace_dax_writeback_range_done(inode, start_index, end_index);
1071 return (ret < 0 ? ret : 0);
1073 EXPORT_SYMBOL_GPL(dax_writeback_mapping_range);
1075 static sector_t dax_iomap_sector(struct iomap *iomap, loff_t pos)
1077 return (iomap->addr + (pos & PAGE_MASK) - iomap->offset) >> 9;
1080 static int dax_iomap_pfn(struct iomap *iomap, loff_t pos, size_t size,
1081 pfn_t *pfnp)
1083 const sector_t sector = dax_iomap_sector(iomap, pos);
1084 pgoff_t pgoff;
1085 int id, rc;
1086 long length;
1088 rc = bdev_dax_pgoff(iomap->bdev, sector, size, &pgoff);
1089 if (rc)
1090 return rc;
1091 id = dax_read_lock();
1092 length = dax_direct_access(iomap->dax_dev, pgoff, PHYS_PFN(size),
1093 NULL, pfnp);
1094 if (length < 0) {
1095 rc = length;
1096 goto out;
1098 rc = -EINVAL;
1099 if (PFN_PHYS(length) < size)
1100 goto out;
1101 if (pfn_t_to_pfn(*pfnp) & (PHYS_PFN(size)-1))
1102 goto out;
1103 /* For larger pages we need devmap */
1104 if (length > 1 && !pfn_t_devmap(*pfnp))
1105 goto out;
1106 rc = 0;
1107 out:
1108 dax_read_unlock(id);
1109 return rc;
1113 * The user has performed a load from a hole in the file. Allocating a new
1114 * page in the file would cause excessive storage usage for workloads with
1115 * sparse files. Instead we insert a read-only mapping of the 4k zero page.
1116 * If this page is ever written to we will re-fault and change the mapping to
1117 * point to real DAX storage instead.
1119 static vm_fault_t dax_load_hole(struct address_space *mapping, void *entry,
1120 struct vm_fault *vmf)
1122 struct inode *inode = mapping->host;
1123 unsigned long vaddr = vmf->address;
1124 pfn_t pfn = pfn_to_pfn_t(my_zero_pfn(vaddr));
1125 vm_fault_t ret;
1127 dax_insert_mapping_entry(mapping, vmf, entry, pfn, RADIX_DAX_ZERO_PAGE,
1128 false);
1129 ret = vmf_insert_mixed(vmf->vma, vaddr, pfn);
1130 trace_dax_load_hole(inode, vmf, ret);
1131 return ret;
1134 static bool dax_range_is_aligned(struct block_device *bdev,
1135 unsigned int offset, unsigned int length)
1137 unsigned short sector_size = bdev_logical_block_size(bdev);
1139 if (!IS_ALIGNED(offset, sector_size))
1140 return false;
1141 if (!IS_ALIGNED(length, sector_size))
1142 return false;
1144 return true;
1147 int __dax_zero_page_range(struct block_device *bdev,
1148 struct dax_device *dax_dev, sector_t sector,
1149 unsigned int offset, unsigned int size)
1151 if (dax_range_is_aligned(bdev, offset, size)) {
1152 sector_t start_sector = sector + (offset >> 9);
1154 return blkdev_issue_zeroout(bdev, start_sector,
1155 size >> 9, GFP_NOFS, 0);
1156 } else {
1157 pgoff_t pgoff;
1158 long rc, id;
1159 void *kaddr;
1161 rc = bdev_dax_pgoff(bdev, sector, PAGE_SIZE, &pgoff);
1162 if (rc)
1163 return rc;
1165 id = dax_read_lock();
1166 rc = dax_direct_access(dax_dev, pgoff, 1, &kaddr, NULL);
1167 if (rc < 0) {
1168 dax_read_unlock(id);
1169 return rc;
1171 memset(kaddr + offset, 0, size);
1172 dax_flush(dax_dev, kaddr + offset, size);
1173 dax_read_unlock(id);
1175 return 0;
1177 EXPORT_SYMBOL_GPL(__dax_zero_page_range);
1179 static loff_t
1180 dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
1181 struct iomap *iomap)
1183 struct block_device *bdev = iomap->bdev;
1184 struct dax_device *dax_dev = iomap->dax_dev;
1185 struct iov_iter *iter = data;
1186 loff_t end = pos + length, done = 0;
1187 ssize_t ret = 0;
1188 size_t xfer;
1189 int id;
1191 if (iov_iter_rw(iter) == READ) {
1192 end = min(end, i_size_read(inode));
1193 if (pos >= end)
1194 return 0;
1196 if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN)
1197 return iov_iter_zero(min(length, end - pos), iter);
1200 if (WARN_ON_ONCE(iomap->type != IOMAP_MAPPED))
1201 return -EIO;
1204 * Write can allocate block for an area which has a hole page mapped
1205 * into page tables. We have to tear down these mappings so that data
1206 * written by write(2) is visible in mmap.
1208 if (iomap->flags & IOMAP_F_NEW) {
1209 invalidate_inode_pages2_range(inode->i_mapping,
1210 pos >> PAGE_SHIFT,
1211 (end - 1) >> PAGE_SHIFT);
1214 id = dax_read_lock();
1215 while (pos < end) {
1216 unsigned offset = pos & (PAGE_SIZE - 1);
1217 const size_t size = ALIGN(length + offset, PAGE_SIZE);
1218 const sector_t sector = dax_iomap_sector(iomap, pos);
1219 ssize_t map_len;
1220 pgoff_t pgoff;
1221 void *kaddr;
1223 if (fatal_signal_pending(current)) {
1224 ret = -EINTR;
1225 break;
1228 ret = bdev_dax_pgoff(bdev, sector, size, &pgoff);
1229 if (ret)
1230 break;
1232 map_len = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size),
1233 &kaddr, NULL);
1234 if (map_len < 0) {
1235 ret = map_len;
1236 break;
1239 map_len = PFN_PHYS(map_len);
1240 kaddr += offset;
1241 map_len -= offset;
1242 if (map_len > end - pos)
1243 map_len = end - pos;
1246 * The userspace address for the memory copy has already been
1247 * validated via access_ok() in either vfs_read() or
1248 * vfs_write(), depending on which operation we are doing.
1250 if (iov_iter_rw(iter) == WRITE)
1251 xfer = dax_copy_from_iter(dax_dev, pgoff, kaddr,
1252 map_len, iter);
1253 else
1254 xfer = dax_copy_to_iter(dax_dev, pgoff, kaddr,
1255 map_len, iter);
1257 pos += xfer;
1258 length -= xfer;
1259 done += xfer;
1261 if (xfer == 0)
1262 ret = -EFAULT;
1263 if (xfer < map_len)
1264 break;
1266 dax_read_unlock(id);
1268 return done ? done : ret;
1272 * dax_iomap_rw - Perform I/O to a DAX file
1273 * @iocb: The control block for this I/O
1274 * @iter: The addresses to do I/O from or to
1275 * @ops: iomap ops passed from the file system
1277 * This function performs read and write operations to directly mapped
1278 * persistent memory. The callers needs to take care of read/write exclusion
1279 * and evicting any page cache pages in the region under I/O.
1281 ssize_t
1282 dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
1283 const struct iomap_ops *ops)
1285 struct address_space *mapping = iocb->ki_filp->f_mapping;
1286 struct inode *inode = mapping->host;
1287 loff_t pos = iocb->ki_pos, ret = 0, done = 0;
1288 unsigned flags = 0;
1290 if (iov_iter_rw(iter) == WRITE) {
1291 lockdep_assert_held_exclusive(&inode->i_rwsem);
1292 flags |= IOMAP_WRITE;
1293 } else {
1294 lockdep_assert_held(&inode->i_rwsem);
1297 while (iov_iter_count(iter)) {
1298 ret = iomap_apply(inode, pos, iov_iter_count(iter), flags, ops,
1299 iter, dax_iomap_actor);
1300 if (ret <= 0)
1301 break;
1302 pos += ret;
1303 done += ret;
1306 iocb->ki_pos += done;
1307 return done ? done : ret;
1309 EXPORT_SYMBOL_GPL(dax_iomap_rw);
1311 static vm_fault_t dax_fault_return(int error)
1313 if (error == 0)
1314 return VM_FAULT_NOPAGE;
1315 if (error == -ENOMEM)
1316 return VM_FAULT_OOM;
1317 return VM_FAULT_SIGBUS;
1321 * MAP_SYNC on a dax mapping guarantees dirty metadata is
1322 * flushed on write-faults (non-cow), but not read-faults.
1324 static bool dax_fault_is_synchronous(unsigned long flags,
1325 struct vm_area_struct *vma, struct iomap *iomap)
1327 return (flags & IOMAP_WRITE) && (vma->vm_flags & VM_SYNC)
1328 && (iomap->flags & IOMAP_F_DIRTY);
1331 static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
1332 int *iomap_errp, const struct iomap_ops *ops)
1334 struct vm_area_struct *vma = vmf->vma;
1335 struct address_space *mapping = vma->vm_file->f_mapping;
1336 struct inode *inode = mapping->host;
1337 unsigned long vaddr = vmf->address;
1338 loff_t pos = (loff_t)vmf->pgoff << PAGE_SHIFT;
1339 struct iomap iomap = { 0 };
1340 unsigned flags = IOMAP_FAULT;
1341 int error, major = 0;
1342 bool write = vmf->flags & FAULT_FLAG_WRITE;
1343 bool sync;
1344 vm_fault_t ret = 0;
1345 void *entry;
1346 pfn_t pfn;
1348 trace_dax_pte_fault(inode, vmf, ret);
1350 * Check whether offset isn't beyond end of file now. Caller is supposed
1351 * to hold locks serializing us with truncate / punch hole so this is
1352 * a reliable test.
1354 if (pos >= i_size_read(inode)) {
1355 ret = VM_FAULT_SIGBUS;
1356 goto out;
1359 if (write && !vmf->cow_page)
1360 flags |= IOMAP_WRITE;
1362 entry = grab_mapping_entry(mapping, vmf->pgoff, 0);
1363 if (IS_ERR(entry)) {
1364 ret = dax_fault_return(PTR_ERR(entry));
1365 goto out;
1369 * It is possible, particularly with mixed reads & writes to private
1370 * mappings, that we have raced with a PMD fault that overlaps with
1371 * the PTE we need to set up. If so just return and the fault will be
1372 * retried.
1374 if (pmd_trans_huge(*vmf->pmd) || pmd_devmap(*vmf->pmd)) {
1375 ret = VM_FAULT_NOPAGE;
1376 goto unlock_entry;
1380 * Note that we don't bother to use iomap_apply here: DAX required
1381 * the file system block size to be equal the page size, which means
1382 * that we never have to deal with more than a single extent here.
1384 error = ops->iomap_begin(inode, pos, PAGE_SIZE, flags, &iomap);
1385 if (iomap_errp)
1386 *iomap_errp = error;
1387 if (error) {
1388 ret = dax_fault_return(error);
1389 goto unlock_entry;
1391 if (WARN_ON_ONCE(iomap.offset + iomap.length < pos + PAGE_SIZE)) {
1392 error = -EIO; /* fs corruption? */
1393 goto error_finish_iomap;
1396 if (vmf->cow_page) {
1397 sector_t sector = dax_iomap_sector(&iomap, pos);
1399 switch (iomap.type) {
1400 case IOMAP_HOLE:
1401 case IOMAP_UNWRITTEN:
1402 clear_user_highpage(vmf->cow_page, vaddr);
1403 break;
1404 case IOMAP_MAPPED:
1405 error = copy_user_dax(iomap.bdev, iomap.dax_dev,
1406 sector, PAGE_SIZE, vmf->cow_page, vaddr);
1407 break;
1408 default:
1409 WARN_ON_ONCE(1);
1410 error = -EIO;
1411 break;
1414 if (error)
1415 goto error_finish_iomap;
1417 __SetPageUptodate(vmf->cow_page);
1418 ret = finish_fault(vmf);
1419 if (!ret)
1420 ret = VM_FAULT_DONE_COW;
1421 goto finish_iomap;
1424 sync = dax_fault_is_synchronous(flags, vma, &iomap);
1426 switch (iomap.type) {
1427 case IOMAP_MAPPED:
1428 if (iomap.flags & IOMAP_F_NEW) {
1429 count_vm_event(PGMAJFAULT);
1430 count_memcg_event_mm(vma->vm_mm, PGMAJFAULT);
1431 major = VM_FAULT_MAJOR;
1433 error = dax_iomap_pfn(&iomap, pos, PAGE_SIZE, &pfn);
1434 if (error < 0)
1435 goto error_finish_iomap;
1437 entry = dax_insert_mapping_entry(mapping, vmf, entry, pfn,
1438 0, write && !sync);
1441 * If we are doing synchronous page fault and inode needs fsync,
1442 * we can insert PTE into page tables only after that happens.
1443 * Skip insertion for now and return the pfn so that caller can
1444 * insert it after fsync is done.
1446 if (sync) {
1447 if (WARN_ON_ONCE(!pfnp)) {
1448 error = -EIO;
1449 goto error_finish_iomap;
1451 *pfnp = pfn;
1452 ret = VM_FAULT_NEEDDSYNC | major;
1453 goto finish_iomap;
1455 trace_dax_insert_mapping(inode, vmf, entry);
1456 if (write)
1457 ret = vmf_insert_mixed_mkwrite(vma, vaddr, pfn);
1458 else
1459 ret = vmf_insert_mixed(vma, vaddr, pfn);
1461 goto finish_iomap;
1462 case IOMAP_UNWRITTEN:
1463 case IOMAP_HOLE:
1464 if (!write) {
1465 ret = dax_load_hole(mapping, entry, vmf);
1466 goto finish_iomap;
1468 /*FALLTHRU*/
1469 default:
1470 WARN_ON_ONCE(1);
1471 error = -EIO;
1472 break;
1475 error_finish_iomap:
1476 ret = dax_fault_return(error);
1477 finish_iomap:
1478 if (ops->iomap_end) {
1479 int copied = PAGE_SIZE;
1481 if (ret & VM_FAULT_ERROR)
1482 copied = 0;
1484 * The fault is done by now and there's no way back (other
1485 * thread may be already happily using PTE we have installed).
1486 * Just ignore error from ->iomap_end since we cannot do much
1487 * with it.
1489 ops->iomap_end(inode, pos, PAGE_SIZE, copied, flags, &iomap);
1491 unlock_entry:
1492 put_locked_mapping_entry(mapping, vmf->pgoff);
1493 out:
1494 trace_dax_pte_fault_done(inode, vmf, ret);
1495 return ret | major;
1498 #ifdef CONFIG_FS_DAX_PMD
1499 static vm_fault_t dax_pmd_load_hole(struct vm_fault *vmf, struct iomap *iomap,
1500 void *entry)
1502 struct address_space *mapping = vmf->vma->vm_file->f_mapping;
1503 unsigned long pmd_addr = vmf->address & PMD_MASK;
1504 struct inode *inode = mapping->host;
1505 struct page *zero_page;
1506 void *ret = NULL;
1507 spinlock_t *ptl;
1508 pmd_t pmd_entry;
1509 pfn_t pfn;
1511 zero_page = mm_get_huge_zero_page(vmf->vma->vm_mm);
1513 if (unlikely(!zero_page))
1514 goto fallback;
1516 pfn = page_to_pfn_t(zero_page);
1517 ret = dax_insert_mapping_entry(mapping, vmf, entry, pfn,
1518 RADIX_DAX_PMD | RADIX_DAX_ZERO_PAGE, false);
1520 ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
1521 if (!pmd_none(*(vmf->pmd))) {
1522 spin_unlock(ptl);
1523 goto fallback;
1526 pmd_entry = mk_pmd(zero_page, vmf->vma->vm_page_prot);
1527 pmd_entry = pmd_mkhuge(pmd_entry);
1528 set_pmd_at(vmf->vma->vm_mm, pmd_addr, vmf->pmd, pmd_entry);
1529 spin_unlock(ptl);
1530 trace_dax_pmd_load_hole(inode, vmf, zero_page, ret);
1531 return VM_FAULT_NOPAGE;
1533 fallback:
1534 trace_dax_pmd_load_hole_fallback(inode, vmf, zero_page, ret);
1535 return VM_FAULT_FALLBACK;
1538 static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
1539 const struct iomap_ops *ops)
1541 struct vm_area_struct *vma = vmf->vma;
1542 struct address_space *mapping = vma->vm_file->f_mapping;
1543 unsigned long pmd_addr = vmf->address & PMD_MASK;
1544 bool write = vmf->flags & FAULT_FLAG_WRITE;
1545 bool sync;
1546 unsigned int iomap_flags = (write ? IOMAP_WRITE : 0) | IOMAP_FAULT;
1547 struct inode *inode = mapping->host;
1548 vm_fault_t result = VM_FAULT_FALLBACK;
1549 struct iomap iomap = { 0 };
1550 pgoff_t max_pgoff, pgoff;
1551 void *entry;
1552 loff_t pos;
1553 int error;
1554 pfn_t pfn;
1557 * Check whether offset isn't beyond end of file now. Caller is
1558 * supposed to hold locks serializing us with truncate / punch hole so
1559 * this is a reliable test.
1561 pgoff = linear_page_index(vma, pmd_addr);
1562 max_pgoff = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
1564 trace_dax_pmd_fault(inode, vmf, max_pgoff, 0);
1567 * Make sure that the faulting address's PMD offset (color) matches
1568 * the PMD offset from the start of the file. This is necessary so
1569 * that a PMD range in the page table overlaps exactly with a PMD
1570 * range in the radix tree.
1572 if ((vmf->pgoff & PG_PMD_COLOUR) !=
1573 ((vmf->address >> PAGE_SHIFT) & PG_PMD_COLOUR))
1574 goto fallback;
1576 /* Fall back to PTEs if we're going to COW */
1577 if (write && !(vma->vm_flags & VM_SHARED))
1578 goto fallback;
1580 /* If the PMD would extend outside the VMA */
1581 if (pmd_addr < vma->vm_start)
1582 goto fallback;
1583 if ((pmd_addr + PMD_SIZE) > vma->vm_end)
1584 goto fallback;
1586 if (pgoff >= max_pgoff) {
1587 result = VM_FAULT_SIGBUS;
1588 goto out;
1591 /* If the PMD would extend beyond the file size */
1592 if ((pgoff | PG_PMD_COLOUR) >= max_pgoff)
1593 goto fallback;
1596 * grab_mapping_entry() will make sure we get a 2MiB empty entry, a
1597 * 2MiB zero page entry or a DAX PMD. If it can't (because a 4k page
1598 * is already in the tree, for instance), it will return -EEXIST and
1599 * we just fall back to 4k entries.
1601 entry = grab_mapping_entry(mapping, pgoff, RADIX_DAX_PMD);
1602 if (IS_ERR(entry))
1603 goto fallback;
1606 * It is possible, particularly with mixed reads & writes to private
1607 * mappings, that we have raced with a PTE fault that overlaps with
1608 * the PMD we need to set up. If so just return and the fault will be
1609 * retried.
1611 if (!pmd_none(*vmf->pmd) && !pmd_trans_huge(*vmf->pmd) &&
1612 !pmd_devmap(*vmf->pmd)) {
1613 result = 0;
1614 goto unlock_entry;
1618 * Note that we don't use iomap_apply here. We aren't doing I/O, only
1619 * setting up a mapping, so really we're using iomap_begin() as a way
1620 * to look up our filesystem block.
1622 pos = (loff_t)pgoff << PAGE_SHIFT;
1623 error = ops->iomap_begin(inode, pos, PMD_SIZE, iomap_flags, &iomap);
1624 if (error)
1625 goto unlock_entry;
1627 if (iomap.offset + iomap.length < pos + PMD_SIZE)
1628 goto finish_iomap;
1630 sync = dax_fault_is_synchronous(iomap_flags, vma, &iomap);
1632 switch (iomap.type) {
1633 case IOMAP_MAPPED:
1634 error = dax_iomap_pfn(&iomap, pos, PMD_SIZE, &pfn);
1635 if (error < 0)
1636 goto finish_iomap;
1638 entry = dax_insert_mapping_entry(mapping, vmf, entry, pfn,
1639 RADIX_DAX_PMD, write && !sync);
1642 * If we are doing synchronous page fault and inode needs fsync,
1643 * we can insert PMD into page tables only after that happens.
1644 * Skip insertion for now and return the pfn so that caller can
1645 * insert it after fsync is done.
1647 if (sync) {
1648 if (WARN_ON_ONCE(!pfnp))
1649 goto finish_iomap;
1650 *pfnp = pfn;
1651 result = VM_FAULT_NEEDDSYNC;
1652 goto finish_iomap;
1655 trace_dax_pmd_insert_mapping(inode, vmf, PMD_SIZE, pfn, entry);
1656 result = vmf_insert_pfn_pmd(vma, vmf->address, vmf->pmd, pfn,
1657 write);
1658 break;
1659 case IOMAP_UNWRITTEN:
1660 case IOMAP_HOLE:
1661 if (WARN_ON_ONCE(write))
1662 break;
1663 result = dax_pmd_load_hole(vmf, &iomap, entry);
1664 break;
1665 default:
1666 WARN_ON_ONCE(1);
1667 break;
1670 finish_iomap:
1671 if (ops->iomap_end) {
1672 int copied = PMD_SIZE;
1674 if (result == VM_FAULT_FALLBACK)
1675 copied = 0;
1677 * The fault is done by now and there's no way back (other
1678 * thread may be already happily using PMD we have installed).
1679 * Just ignore error from ->iomap_end since we cannot do much
1680 * with it.
1682 ops->iomap_end(inode, pos, PMD_SIZE, copied, iomap_flags,
1683 &iomap);
1685 unlock_entry:
1686 put_locked_mapping_entry(mapping, pgoff);
1687 fallback:
1688 if (result == VM_FAULT_FALLBACK) {
1689 split_huge_pmd(vma, vmf->pmd, vmf->address);
1690 count_vm_event(THP_FAULT_FALLBACK);
1692 out:
1693 trace_dax_pmd_fault_done(inode, vmf, max_pgoff, result);
1694 return result;
1696 #else
1697 static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
1698 const struct iomap_ops *ops)
1700 return VM_FAULT_FALLBACK;
1702 #endif /* CONFIG_FS_DAX_PMD */
1705 * dax_iomap_fault - handle a page fault on a DAX file
1706 * @vmf: The description of the fault
1707 * @pe_size: Size of the page to fault in
1708 * @pfnp: PFN to insert for synchronous faults if fsync is required
1709 * @iomap_errp: Storage for detailed error code in case of error
1710 * @ops: Iomap ops passed from the file system
1712 * When a page fault occurs, filesystems may call this helper in
1713 * their fault handler for DAX files. dax_iomap_fault() assumes the caller
1714 * has done all the necessary locking for page fault to proceed
1715 * successfully.
1717 vm_fault_t dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
1718 pfn_t *pfnp, int *iomap_errp, const struct iomap_ops *ops)
1720 switch (pe_size) {
1721 case PE_SIZE_PTE:
1722 return dax_iomap_pte_fault(vmf, pfnp, iomap_errp, ops);
1723 case PE_SIZE_PMD:
1724 return dax_iomap_pmd_fault(vmf, pfnp, ops);
1725 default:
1726 return VM_FAULT_FALLBACK;
1729 EXPORT_SYMBOL_GPL(dax_iomap_fault);
1732 * dax_insert_pfn_mkwrite - insert PTE or PMD entry into page tables
1733 * @vmf: The description of the fault
1734 * @pe_size: Size of entry to be inserted
1735 * @pfn: PFN to insert
1737 * This function inserts writeable PTE or PMD entry into page tables for mmaped
1738 * DAX file. It takes care of marking corresponding radix tree entry as dirty
1739 * as well.
1741 static vm_fault_t dax_insert_pfn_mkwrite(struct vm_fault *vmf,
1742 enum page_entry_size pe_size,
1743 pfn_t pfn)
1745 struct address_space *mapping = vmf->vma->vm_file->f_mapping;
1746 void *entry, **slot;
1747 pgoff_t index = vmf->pgoff;
1748 vm_fault_t ret;
1750 xa_lock_irq(&mapping->i_pages);
1751 entry = get_unlocked_mapping_entry(mapping, index, &slot);
1752 /* Did we race with someone splitting entry or so? */
1753 if (!entry ||
1754 (pe_size == PE_SIZE_PTE && !dax_is_pte_entry(entry)) ||
1755 (pe_size == PE_SIZE_PMD && !dax_is_pmd_entry(entry))) {
1756 put_unlocked_mapping_entry(mapping, index, entry);
1757 xa_unlock_irq(&mapping->i_pages);
1758 trace_dax_insert_pfn_mkwrite_no_entry(mapping->host, vmf,
1759 VM_FAULT_NOPAGE);
1760 return VM_FAULT_NOPAGE;
1762 radix_tree_tag_set(&mapping->i_pages, index, PAGECACHE_TAG_DIRTY);
1763 entry = lock_slot(mapping, slot);
1764 xa_unlock_irq(&mapping->i_pages);
1765 switch (pe_size) {
1766 case PE_SIZE_PTE:
1767 ret = vmf_insert_mixed_mkwrite(vmf->vma, vmf->address, pfn);
1768 break;
1769 #ifdef CONFIG_FS_DAX_PMD
1770 case PE_SIZE_PMD:
1771 ret = vmf_insert_pfn_pmd(vmf->vma, vmf->address, vmf->pmd,
1772 pfn, true);
1773 break;
1774 #endif
1775 default:
1776 ret = VM_FAULT_FALLBACK;
1778 put_locked_mapping_entry(mapping, index);
1779 trace_dax_insert_pfn_mkwrite(mapping->host, vmf, ret);
1780 return ret;
1784 * dax_finish_sync_fault - finish synchronous page fault
1785 * @vmf: The description of the fault
1786 * @pe_size: Size of entry to be inserted
1787 * @pfn: PFN to insert
1789 * This function ensures that the file range touched by the page fault is
1790 * stored persistently on the media and handles inserting of appropriate page
1791 * table entry.
1793 vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf,
1794 enum page_entry_size pe_size, pfn_t pfn)
1796 int err;
1797 loff_t start = ((loff_t)vmf->pgoff) << PAGE_SHIFT;
1798 size_t len = 0;
1800 if (pe_size == PE_SIZE_PTE)
1801 len = PAGE_SIZE;
1802 else if (pe_size == PE_SIZE_PMD)
1803 len = PMD_SIZE;
1804 else
1805 WARN_ON_ONCE(1);
1806 err = vfs_fsync_range(vmf->vma->vm_file, start, start + len - 1, 1);
1807 if (err)
1808 return VM_FAULT_SIGBUS;
1809 return dax_insert_pfn_mkwrite(vmf, pe_size, pfn);
1811 EXPORT_SYMBOL_GPL(dax_finish_sync_fault);