1 // SPDX-License-Identifier: GPL-2.0
3 * dax: direct host memory access
4 * Copyright (C) 2020 Red Hat, Inc.
9 #include <linux/delay.h>
10 #include <linux/dax.h>
11 #include <linux/uio.h>
12 #include <linux/pagemap.h>
13 #include <linux/pfn_t.h>
14 #include <linux/iomap.h>
15 #include <linux/interval_tree.h>
18 * Default memory range size. A power of 2 so it agrees with common FUSE_INIT
19 * map_alignment values 4KB and 64KB.
21 #define FUSE_DAX_SHIFT 21
22 #define FUSE_DAX_SZ (1 << FUSE_DAX_SHIFT)
23 #define FUSE_DAX_PAGES (FUSE_DAX_SZ / PAGE_SIZE)
25 /* Number of ranges reclaimer will try to free in one invocation */
26 #define FUSE_DAX_RECLAIM_CHUNK (10)
29 * Dax memory reclaim threshold in percetage of total ranges. When free
30 * number of free ranges drops below this threshold, reclaim can trigger
33 #define FUSE_DAX_RECLAIM_THRESHOLD (20)
35 /** Translation information for file offsets to DAX window offsets */
36 struct fuse_dax_mapping
{
37 /* Pointer to inode where this memory range is mapped */
40 /* Will connect in fcd->free_ranges to keep track of free memory */
41 struct list_head list
;
43 /* For interval tree in file/inode */
44 struct interval_tree_node itn
;
46 /* Will connect in fc->busy_ranges to keep track busy memory */
47 struct list_head busy_list
;
49 /** Position in DAX window */
52 /** Length of mapping, in bytes */
55 /* Is this mapping read-only or read-write */
58 /* reference count when the mapping is used by dax iomap. */
62 /* Per-inode dax map */
63 struct fuse_inode_dax
{
64 /* Semaphore to protect modifications to the dmap tree */
65 struct rw_semaphore sem
;
67 /* Sorted rb tree of struct fuse_dax_mapping elements */
68 struct rb_root_cached tree
;
72 struct fuse_conn_dax
{
74 struct dax_device
*dev
;
76 /* Lock protecting accessess to members of this structure */
79 /* List of memory ranges which are busy */
80 unsigned long nr_busy_ranges
;
81 struct list_head busy_ranges
;
83 /* Worker to free up memory ranges */
84 struct delayed_work free_work
;
86 /* Wait queue for a dax range to become free */
87 wait_queue_head_t range_waitq
;
89 /* DAX Window Free Ranges */
91 struct list_head free_ranges
;
93 unsigned long nr_ranges
;
96 static inline struct fuse_dax_mapping
*
97 node_to_dmap(struct interval_tree_node
*node
)
102 return container_of(node
, struct fuse_dax_mapping
, itn
);
105 static struct fuse_dax_mapping
*
106 alloc_dax_mapping_reclaim(struct fuse_conn_dax
*fcd
, struct inode
*inode
);
109 __kick_dmap_free_worker(struct fuse_conn_dax
*fcd
, unsigned long delay_ms
)
111 unsigned long free_threshold
;
113 /* If number of free ranges are below threshold, start reclaim */
114 free_threshold
= max_t(unsigned long, fcd
->nr_ranges
* FUSE_DAX_RECLAIM_THRESHOLD
/ 100,
116 if (fcd
->nr_free_ranges
< free_threshold
)
117 queue_delayed_work(system_long_wq
, &fcd
->free_work
,
118 msecs_to_jiffies(delay_ms
));
121 static void kick_dmap_free_worker(struct fuse_conn_dax
*fcd
,
122 unsigned long delay_ms
)
124 spin_lock(&fcd
->lock
);
125 __kick_dmap_free_worker(fcd
, delay_ms
);
126 spin_unlock(&fcd
->lock
);
129 static struct fuse_dax_mapping
*alloc_dax_mapping(struct fuse_conn_dax
*fcd
)
131 struct fuse_dax_mapping
*dmap
;
133 spin_lock(&fcd
->lock
);
134 dmap
= list_first_entry_or_null(&fcd
->free_ranges
,
135 struct fuse_dax_mapping
, list
);
137 list_del_init(&dmap
->list
);
138 WARN_ON(fcd
->nr_free_ranges
<= 0);
139 fcd
->nr_free_ranges
--;
141 __kick_dmap_free_worker(fcd
, 0);
142 spin_unlock(&fcd
->lock
);
147 /* This assumes fcd->lock is held */
148 static void __dmap_remove_busy_list(struct fuse_conn_dax
*fcd
,
149 struct fuse_dax_mapping
*dmap
)
151 list_del_init(&dmap
->busy_list
);
152 WARN_ON(fcd
->nr_busy_ranges
== 0);
153 fcd
->nr_busy_ranges
--;
156 static void dmap_remove_busy_list(struct fuse_conn_dax
*fcd
,
157 struct fuse_dax_mapping
*dmap
)
159 spin_lock(&fcd
->lock
);
160 __dmap_remove_busy_list(fcd
, dmap
);
161 spin_unlock(&fcd
->lock
);
164 /* This assumes fcd->lock is held */
165 static void __dmap_add_to_free_pool(struct fuse_conn_dax
*fcd
,
166 struct fuse_dax_mapping
*dmap
)
168 list_add_tail(&dmap
->list
, &fcd
->free_ranges
);
169 fcd
->nr_free_ranges
++;
170 wake_up(&fcd
->range_waitq
);
173 static void dmap_add_to_free_pool(struct fuse_conn_dax
*fcd
,
174 struct fuse_dax_mapping
*dmap
)
176 /* Return fuse_dax_mapping to free list */
177 spin_lock(&fcd
->lock
);
178 __dmap_add_to_free_pool(fcd
, dmap
);
179 spin_unlock(&fcd
->lock
);
182 static int fuse_setup_one_mapping(struct inode
*inode
, unsigned long start_idx
,
183 struct fuse_dax_mapping
*dmap
, bool writable
,
186 struct fuse_mount
*fm
= get_fuse_mount(inode
);
187 struct fuse_conn_dax
*fcd
= fm
->fc
->dax
;
188 struct fuse_inode
*fi
= get_fuse_inode(inode
);
189 struct fuse_setupmapping_in inarg
;
190 loff_t offset
= start_idx
<< FUSE_DAX_SHIFT
;
194 WARN_ON(fcd
->nr_free_ranges
< 0);
196 /* Ask fuse daemon to setup mapping */
197 memset(&inarg
, 0, sizeof(inarg
));
198 inarg
.foffset
= offset
;
200 inarg
.moffset
= dmap
->window_offset
;
201 inarg
.len
= FUSE_DAX_SZ
;
202 inarg
.flags
|= FUSE_SETUPMAPPING_FLAG_READ
;
204 inarg
.flags
|= FUSE_SETUPMAPPING_FLAG_WRITE
;
205 args
.opcode
= FUSE_SETUPMAPPING
;
206 args
.nodeid
= fi
->nodeid
;
208 args
.in_args
[0].size
= sizeof(inarg
);
209 args
.in_args
[0].value
= &inarg
;
210 err
= fuse_simple_request(fm
, &args
);
213 dmap
->writable
= writable
;
216 * We don't take a reference on inode. inode is valid right now
217 * and when inode is going away, cleanup logic should first
218 * cleanup dmap entries.
221 dmap
->itn
.start
= dmap
->itn
.last
= start_idx
;
222 /* Protected by fi->dax->sem */
223 interval_tree_insert(&dmap
->itn
, &fi
->dax
->tree
);
225 spin_lock(&fcd
->lock
);
226 list_add_tail(&dmap
->busy_list
, &fcd
->busy_ranges
);
227 fcd
->nr_busy_ranges
++;
228 spin_unlock(&fcd
->lock
);
233 static int fuse_send_removemapping(struct inode
*inode
,
234 struct fuse_removemapping_in
*inargp
,
235 struct fuse_removemapping_one
*remove_one
)
237 struct fuse_inode
*fi
= get_fuse_inode(inode
);
238 struct fuse_mount
*fm
= get_fuse_mount(inode
);
241 args
.opcode
= FUSE_REMOVEMAPPING
;
242 args
.nodeid
= fi
->nodeid
;
244 args
.in_args
[0].size
= sizeof(*inargp
);
245 args
.in_args
[0].value
= inargp
;
246 args
.in_args
[1].size
= inargp
->count
* sizeof(*remove_one
);
247 args
.in_args
[1].value
= remove_one
;
248 return fuse_simple_request(fm
, &args
);
251 static int dmap_removemapping_list(struct inode
*inode
, unsigned int num
,
252 struct list_head
*to_remove
)
254 struct fuse_removemapping_one
*remove_one
, *ptr
;
255 struct fuse_removemapping_in inarg
;
256 struct fuse_dax_mapping
*dmap
;
257 int ret
, i
= 0, nr_alloc
;
259 nr_alloc
= min_t(unsigned int, num
, FUSE_REMOVEMAPPING_MAX_ENTRY
);
260 remove_one
= kmalloc_array(nr_alloc
, sizeof(*remove_one
), GFP_NOFS
);
265 list_for_each_entry(dmap
, to_remove
, list
) {
266 ptr
->moffset
= dmap
->window_offset
;
267 ptr
->len
= dmap
->length
;
271 if (i
>= nr_alloc
|| num
== 0) {
272 memset(&inarg
, 0, sizeof(inarg
));
274 ret
= fuse_send_removemapping(inode
, &inarg
,
288 * Cleanup dmap entry and add back to free list. This should be called with
291 static void dmap_reinit_add_to_free_pool(struct fuse_conn_dax
*fcd
,
292 struct fuse_dax_mapping
*dmap
)
294 pr_debug("fuse: freeing memory range start_idx=0x%lx end_idx=0x%lx window_offset=0x%llx length=0x%llx\n",
295 dmap
->itn
.start
, dmap
->itn
.last
, dmap
->window_offset
,
297 __dmap_remove_busy_list(fcd
, dmap
);
299 dmap
->itn
.start
= dmap
->itn
.last
= 0;
300 __dmap_add_to_free_pool(fcd
, dmap
);
304 * Free inode dmap entries whose range falls inside [start, end].
305 * Does not take any locks. At this point of time it should only be
306 * called from evict_inode() path where we know all dmap entries can be
309 static void inode_reclaim_dmap_range(struct fuse_conn_dax
*fcd
,
311 loff_t start
, loff_t end
)
313 struct fuse_inode
*fi
= get_fuse_inode(inode
);
314 struct fuse_dax_mapping
*dmap
, *n
;
316 LIST_HEAD(to_remove
);
317 unsigned long start_idx
= start
>> FUSE_DAX_SHIFT
;
318 unsigned long end_idx
= end
>> FUSE_DAX_SHIFT
;
319 struct interval_tree_node
*node
;
322 node
= interval_tree_iter_first(&fi
->dax
->tree
, start_idx
,
326 dmap
= node_to_dmap(node
);
327 /* inode is going away. There should not be any users of dmap */
328 WARN_ON(refcount_read(&dmap
->refcnt
) > 1);
329 interval_tree_remove(&dmap
->itn
, &fi
->dax
->tree
);
331 list_add(&dmap
->list
, &to_remove
);
334 /* Nothing to remove */
335 if (list_empty(&to_remove
))
338 WARN_ON(fi
->dax
->nr
< num
);
340 err
= dmap_removemapping_list(inode
, num
, &to_remove
);
341 if (err
&& err
!= -ENOTCONN
) {
342 pr_warn("Failed to removemappings. start=0x%llx end=0x%llx\n",
345 spin_lock(&fcd
->lock
);
346 list_for_each_entry_safe(dmap
, n
, &to_remove
, list
) {
347 list_del_init(&dmap
->list
);
348 dmap_reinit_add_to_free_pool(fcd
, dmap
);
350 spin_unlock(&fcd
->lock
);
353 static int dmap_removemapping_one(struct inode
*inode
,
354 struct fuse_dax_mapping
*dmap
)
356 struct fuse_removemapping_one forget_one
;
357 struct fuse_removemapping_in inarg
;
359 memset(&inarg
, 0, sizeof(inarg
));
361 memset(&forget_one
, 0, sizeof(forget_one
));
362 forget_one
.moffset
= dmap
->window_offset
;
363 forget_one
.len
= dmap
->length
;
365 return fuse_send_removemapping(inode
, &inarg
, &forget_one
);
369 * It is called from evict_inode() and by that time inode is going away. So
370 * this function does not take any locks like fi->dax->sem for traversing
371 * that fuse inode interval tree. If that lock is taken then lock validator
372 * complains of deadlock situation w.r.t fs_reclaim lock.
374 void fuse_dax_inode_cleanup(struct inode
*inode
)
376 struct fuse_conn
*fc
= get_fuse_conn(inode
);
377 struct fuse_inode
*fi
= get_fuse_inode(inode
);
380 * fuse_evict_inode() has already called truncate_inode_pages_final()
381 * before we arrive here. So we should not have to worry about any
382 * pages/exception entries still associated with inode.
384 inode_reclaim_dmap_range(fc
->dax
, inode
, 0, -1);
385 WARN_ON(fi
->dax
->nr
);
388 static void fuse_fill_iomap_hole(struct iomap
*iomap
, loff_t length
)
390 iomap
->addr
= IOMAP_NULL_ADDR
;
391 iomap
->length
= length
;
392 iomap
->type
= IOMAP_HOLE
;
395 static void fuse_fill_iomap(struct inode
*inode
, loff_t pos
, loff_t length
,
396 struct iomap
*iomap
, struct fuse_dax_mapping
*dmap
,
400 loff_t i_size
= i_size_read(inode
);
402 offset
= pos
- (dmap
->itn
.start
<< FUSE_DAX_SHIFT
);
403 len
= min(length
, dmap
->length
- offset
);
405 /* If length is beyond end of file, truncate further */
406 if (pos
+ len
> i_size
)
410 iomap
->addr
= dmap
->window_offset
+ offset
;
412 if (flags
& IOMAP_FAULT
)
413 iomap
->length
= ALIGN(len
, PAGE_SIZE
);
414 iomap
->type
= IOMAP_MAPPED
;
416 * increace refcnt so that reclaim code knows this dmap is in
417 * use. This assumes fi->dax->sem mutex is held either
420 refcount_inc(&dmap
->refcnt
);
422 /* iomap->private should be NULL */
423 WARN_ON_ONCE(iomap
->private);
424 iomap
->private = dmap
;
426 /* Mapping beyond end of file is hole */
427 fuse_fill_iomap_hole(iomap
, length
);
431 static int fuse_setup_new_dax_mapping(struct inode
*inode
, loff_t pos
,
432 loff_t length
, unsigned int flags
,
435 struct fuse_inode
*fi
= get_fuse_inode(inode
);
436 struct fuse_conn
*fc
= get_fuse_conn(inode
);
437 struct fuse_conn_dax
*fcd
= fc
->dax
;
438 struct fuse_dax_mapping
*dmap
, *alloc_dmap
= NULL
;
440 bool writable
= flags
& IOMAP_WRITE
;
441 unsigned long start_idx
= pos
>> FUSE_DAX_SHIFT
;
442 struct interval_tree_node
*node
;
445 * Can't do inline reclaim in fault path. We call
446 * dax_layout_busy_page() before we free a range. And
447 * fuse_wait_dax_page() drops mapping->invalidate_lock and requires it.
448 * In fault path we enter with mapping->invalidate_lock held and can't
449 * drop it. Also in fault path we hold mapping->invalidate_lock shared
450 * and not exclusive, so that creates further issues with
451 * fuse_wait_dax_page(). Hence return -EAGAIN and fuse_dax_fault()
452 * will wait for a memory range to become free and retry.
454 if (flags
& IOMAP_FAULT
) {
455 alloc_dmap
= alloc_dax_mapping(fcd
);
459 alloc_dmap
= alloc_dax_mapping_reclaim(fcd
, inode
);
460 if (IS_ERR(alloc_dmap
))
461 return PTR_ERR(alloc_dmap
);
464 /* If we are here, we should have memory allocated */
465 if (WARN_ON(!alloc_dmap
))
469 * Take write lock so that only one caller can try to setup mapping
472 down_write(&fi
->dax
->sem
);
474 * We dropped lock. Check again if somebody else setup
477 node
= interval_tree_iter_first(&fi
->dax
->tree
, start_idx
, start_idx
);
479 dmap
= node_to_dmap(node
);
480 fuse_fill_iomap(inode
, pos
, length
, iomap
, dmap
, flags
);
481 dmap_add_to_free_pool(fcd
, alloc_dmap
);
482 up_write(&fi
->dax
->sem
);
486 /* Setup one mapping */
487 ret
= fuse_setup_one_mapping(inode
, pos
>> FUSE_DAX_SHIFT
, alloc_dmap
,
490 dmap_add_to_free_pool(fcd
, alloc_dmap
);
491 up_write(&fi
->dax
->sem
);
494 fuse_fill_iomap(inode
, pos
, length
, iomap
, alloc_dmap
, flags
);
495 up_write(&fi
->dax
->sem
);
499 static int fuse_upgrade_dax_mapping(struct inode
*inode
, loff_t pos
,
500 loff_t length
, unsigned int flags
,
503 struct fuse_inode
*fi
= get_fuse_inode(inode
);
504 struct fuse_dax_mapping
*dmap
;
506 unsigned long idx
= pos
>> FUSE_DAX_SHIFT
;
507 struct interval_tree_node
*node
;
510 * Take exclusive lock so that only one caller can try to setup
511 * mapping and others wait.
513 down_write(&fi
->dax
->sem
);
514 node
= interval_tree_iter_first(&fi
->dax
->tree
, idx
, idx
);
516 /* We are holding either inode lock or invalidate_lock, and that should
517 * ensure that dmap can't be truncated. We are holding a reference
518 * on dmap and that should make sure it can't be reclaimed. So dmap
519 * should still be there in tree despite the fact we dropped and
520 * re-acquired the fi->dax->sem lock.
526 dmap
= node_to_dmap(node
);
528 /* We took an extra reference on dmap to make sure its not reclaimd.
529 * Now we hold fi->dax->sem lock and that reference is not needed
532 if (refcount_dec_and_test(&dmap
->refcnt
)) {
533 /* refcount should not hit 0. This object only goes
534 * away when fuse connection goes away
539 /* Maybe another thread already upgraded mapping while we were not
542 if (dmap
->writable
) {
547 ret
= fuse_setup_one_mapping(inode
, pos
>> FUSE_DAX_SHIFT
, dmap
, true,
552 fuse_fill_iomap(inode
, pos
, length
, iomap
, dmap
, flags
);
554 up_write(&fi
->dax
->sem
);
558 /* This is just for DAX and the mapping is ephemeral, do not use it for other
559 * purposes since there is no block device with a permanent mapping.
561 static int fuse_iomap_begin(struct inode
*inode
, loff_t pos
, loff_t length
,
562 unsigned int flags
, struct iomap
*iomap
,
563 struct iomap
*srcmap
)
565 struct fuse_inode
*fi
= get_fuse_inode(inode
);
566 struct fuse_conn
*fc
= get_fuse_conn(inode
);
567 struct fuse_dax_mapping
*dmap
;
568 bool writable
= flags
& IOMAP_WRITE
;
569 unsigned long start_idx
= pos
>> FUSE_DAX_SHIFT
;
570 struct interval_tree_node
*node
;
572 /* We don't support FIEMAP */
573 if (WARN_ON(flags
& IOMAP_REPORT
))
579 iomap
->dax_dev
= fc
->dax
->dev
;
582 * Both read/write and mmap path can race here. So we need something
583 * to make sure if we are setting up mapping, then other path waits
585 * For now, use a semaphore for this. It probably needs to be
588 down_read(&fi
->dax
->sem
);
589 node
= interval_tree_iter_first(&fi
->dax
->tree
, start_idx
, start_idx
);
591 dmap
= node_to_dmap(node
);
592 if (writable
&& !dmap
->writable
) {
593 /* Upgrade read-only mapping to read-write. This will
594 * require exclusive fi->dax->sem lock as we don't want
595 * two threads to be trying to this simultaneously
596 * for same dmap. So drop shared lock and acquire
599 * Before dropping fi->dax->sem lock, take reference
600 * on dmap so that its not freed by range reclaim.
602 refcount_inc(&dmap
->refcnt
);
603 up_read(&fi
->dax
->sem
);
604 pr_debug("%s: Upgrading mapping at offset 0x%llx length 0x%llx\n",
605 __func__
, pos
, length
);
606 return fuse_upgrade_dax_mapping(inode
, pos
, length
,
609 fuse_fill_iomap(inode
, pos
, length
, iomap
, dmap
, flags
);
610 up_read(&fi
->dax
->sem
);
614 up_read(&fi
->dax
->sem
);
615 pr_debug("%s: no mapping at offset 0x%llx length 0x%llx\n",
616 __func__
, pos
, length
);
617 if (pos
>= i_size_read(inode
))
620 return fuse_setup_new_dax_mapping(inode
, pos
, length
, flags
,
625 * If read beyond end of file happens, fs code seems to return
629 fuse_fill_iomap_hole(iomap
, length
);
630 pr_debug("%s returning hole mapping. pos=0x%llx length_asked=0x%llx length_returned=0x%llx\n",
631 __func__
, pos
, length
, iomap
->length
);
635 static int fuse_iomap_end(struct inode
*inode
, loff_t pos
, loff_t length
,
636 ssize_t written
, unsigned int flags
,
639 struct fuse_dax_mapping
*dmap
= iomap
->private;
642 if (refcount_dec_and_test(&dmap
->refcnt
)) {
643 /* refcount should not hit 0. This object only goes
644 * away when fuse connection goes away
650 /* DAX writes beyond end-of-file aren't handled using iomap, so the
651 * file size is unchanged and there is nothing to do here.
656 static const struct iomap_ops fuse_iomap_ops
= {
657 .iomap_begin
= fuse_iomap_begin
,
658 .iomap_end
= fuse_iomap_end
,
661 static void fuse_wait_dax_page(struct inode
*inode
)
663 filemap_invalidate_unlock(inode
->i_mapping
);
665 filemap_invalidate_lock(inode
->i_mapping
);
668 /* Should be called with mapping->invalidate_lock held exclusively */
669 static int __fuse_dax_break_layouts(struct inode
*inode
, bool *retry
,
670 loff_t start
, loff_t end
)
674 page
= dax_layout_busy_page_range(inode
->i_mapping
, start
, end
);
679 return ___wait_var_event(&page
->_refcount
,
680 atomic_read(&page
->_refcount
) == 1, TASK_INTERRUPTIBLE
,
681 0, 0, fuse_wait_dax_page(inode
));
684 /* dmap_end == 0 leads to unmapping of whole file */
685 int fuse_dax_break_layouts(struct inode
*inode
, u64 dmap_start
,
693 ret
= __fuse_dax_break_layouts(inode
, &retry
, dmap_start
,
695 } while (ret
== 0 && retry
);
700 ssize_t
fuse_dax_read_iter(struct kiocb
*iocb
, struct iov_iter
*to
)
702 struct inode
*inode
= file_inode(iocb
->ki_filp
);
705 if (iocb
->ki_flags
& IOCB_NOWAIT
) {
706 if (!inode_trylock_shared(inode
))
709 inode_lock_shared(inode
);
712 ret
= dax_iomap_rw(iocb
, to
, &fuse_iomap_ops
);
713 inode_unlock_shared(inode
);
715 /* TODO file_accessed(iocb->f_filp) */
719 static bool file_extending_write(struct kiocb
*iocb
, struct iov_iter
*from
)
721 struct inode
*inode
= file_inode(iocb
->ki_filp
);
723 return (iov_iter_rw(from
) == WRITE
&&
724 ((iocb
->ki_pos
) >= i_size_read(inode
) ||
725 (iocb
->ki_pos
+ iov_iter_count(from
) > i_size_read(inode
))));
728 static ssize_t
fuse_dax_direct_write(struct kiocb
*iocb
, struct iov_iter
*from
)
730 struct inode
*inode
= file_inode(iocb
->ki_filp
);
731 struct fuse_io_priv io
= FUSE_IO_PRIV_SYNC(iocb
);
734 ret
= fuse_direct_io(&io
, from
, &iocb
->ki_pos
, FUSE_DIO_WRITE
);
736 fuse_write_update_attr(inode
, iocb
->ki_pos
, ret
);
740 ssize_t
fuse_dax_write_iter(struct kiocb
*iocb
, struct iov_iter
*from
)
742 struct inode
*inode
= file_inode(iocb
->ki_filp
);
745 if (iocb
->ki_flags
& IOCB_NOWAIT
) {
746 if (!inode_trylock(inode
))
752 ret
= generic_write_checks(iocb
, from
);
756 ret
= file_remove_privs(iocb
->ki_filp
);
759 /* TODO file_update_time() but we don't want metadata I/O */
761 /* Do not use dax for file extending writes as write and on
762 * disk i_size increase are not atomic otherwise.
764 if (file_extending_write(iocb
, from
))
765 ret
= fuse_dax_direct_write(iocb
, from
);
767 ret
= dax_iomap_rw(iocb
, from
, &fuse_iomap_ops
);
773 ret
= generic_write_sync(iocb
, ret
);
777 static vm_fault_t
__fuse_dax_fault(struct vm_fault
*vmf
, unsigned int order
,
781 struct inode
*inode
= file_inode(vmf
->vma
->vm_file
);
782 struct super_block
*sb
= inode
->i_sb
;
785 struct fuse_conn
*fc
= get_fuse_conn(inode
);
786 struct fuse_conn_dax
*fcd
= fc
->dax
;
790 sb_start_pagefault(sb
);
792 if (retry
&& !(fcd
->nr_free_ranges
> 0))
793 wait_event(fcd
->range_waitq
, (fcd
->nr_free_ranges
> 0));
796 * We need to serialize against not only truncate but also against
797 * fuse dax memory range reclaim. While a range is being reclaimed,
798 * we do not want any read/write/mmap to make progress and try
799 * to populate page cache or access memory we are trying to free.
801 filemap_invalidate_lock_shared(inode
->i_mapping
);
802 ret
= dax_iomap_fault(vmf
, order
, &pfn
, &error
, &fuse_iomap_ops
);
803 if ((ret
& VM_FAULT_ERROR
) && error
== -EAGAIN
) {
806 filemap_invalidate_unlock_shared(inode
->i_mapping
);
810 if (ret
& VM_FAULT_NEEDDSYNC
)
811 ret
= dax_finish_sync_fault(vmf
, order
, pfn
);
812 filemap_invalidate_unlock_shared(inode
->i_mapping
);
815 sb_end_pagefault(sb
);
820 static vm_fault_t
fuse_dax_fault(struct vm_fault
*vmf
)
822 return __fuse_dax_fault(vmf
, 0, vmf
->flags
& FAULT_FLAG_WRITE
);
825 static vm_fault_t
fuse_dax_huge_fault(struct vm_fault
*vmf
, unsigned int order
)
827 return __fuse_dax_fault(vmf
, order
, vmf
->flags
& FAULT_FLAG_WRITE
);
830 static vm_fault_t
fuse_dax_page_mkwrite(struct vm_fault
*vmf
)
832 return __fuse_dax_fault(vmf
, 0, true);
835 static vm_fault_t
fuse_dax_pfn_mkwrite(struct vm_fault
*vmf
)
837 return __fuse_dax_fault(vmf
, 0, true);
840 static const struct vm_operations_struct fuse_dax_vm_ops
= {
841 .fault
= fuse_dax_fault
,
842 .huge_fault
= fuse_dax_huge_fault
,
843 .page_mkwrite
= fuse_dax_page_mkwrite
,
844 .pfn_mkwrite
= fuse_dax_pfn_mkwrite
,
847 int fuse_dax_mmap(struct file
*file
, struct vm_area_struct
*vma
)
850 vma
->vm_ops
= &fuse_dax_vm_ops
;
851 vm_flags_set(vma
, VM_MIXEDMAP
| VM_HUGEPAGE
);
855 static int dmap_writeback_invalidate(struct inode
*inode
,
856 struct fuse_dax_mapping
*dmap
)
859 loff_t start_pos
= dmap
->itn
.start
<< FUSE_DAX_SHIFT
;
860 loff_t end_pos
= (start_pos
+ FUSE_DAX_SZ
- 1);
862 ret
= filemap_fdatawrite_range(inode
->i_mapping
, start_pos
, end_pos
);
864 pr_debug("fuse: filemap_fdatawrite_range() failed. err=%d start_pos=0x%llx, end_pos=0x%llx\n",
865 ret
, start_pos
, end_pos
);
869 ret
= invalidate_inode_pages2_range(inode
->i_mapping
,
870 start_pos
>> PAGE_SHIFT
,
871 end_pos
>> PAGE_SHIFT
);
873 pr_debug("fuse: invalidate_inode_pages2_range() failed err=%d\n",
879 static int reclaim_one_dmap_locked(struct inode
*inode
,
880 struct fuse_dax_mapping
*dmap
)
883 struct fuse_inode
*fi
= get_fuse_inode(inode
);
886 * igrab() was done to make sure inode won't go under us, and this
887 * further avoids the race with evict().
889 ret
= dmap_writeback_invalidate(inode
, dmap
);
893 /* Remove dax mapping from inode interval tree now */
894 interval_tree_remove(&dmap
->itn
, &fi
->dax
->tree
);
897 /* It is possible that umount/shutdown has killed the fuse connection
898 * and worker thread is trying to reclaim memory in parallel. Don't
901 ret
= dmap_removemapping_one(inode
, dmap
);
902 if (ret
&& ret
!= -ENOTCONN
) {
903 pr_warn("Failed to remove mapping. offset=0x%llx len=0x%llx ret=%d\n",
904 dmap
->window_offset
, dmap
->length
, ret
);
909 /* Find first mapped dmap for an inode and return file offset. Caller needs
910 * to hold fi->dax->sem lock either shared or exclusive.
912 static struct fuse_dax_mapping
*inode_lookup_first_dmap(struct inode
*inode
)
914 struct fuse_inode
*fi
= get_fuse_inode(inode
);
915 struct fuse_dax_mapping
*dmap
;
916 struct interval_tree_node
*node
;
918 for (node
= interval_tree_iter_first(&fi
->dax
->tree
, 0, -1); node
;
919 node
= interval_tree_iter_next(node
, 0, -1)) {
920 dmap
= node_to_dmap(node
);
922 if (refcount_read(&dmap
->refcnt
) > 1)
932 * Find first mapping in the tree and free it and return it. Do not add
933 * it back to free pool.
935 static struct fuse_dax_mapping
*
936 inode_inline_reclaim_one_dmap(struct fuse_conn_dax
*fcd
, struct inode
*inode
,
939 struct fuse_inode
*fi
= get_fuse_inode(inode
);
940 struct fuse_dax_mapping
*dmap
;
941 u64 dmap_start
, dmap_end
;
942 unsigned long start_idx
;
944 struct interval_tree_node
*node
;
946 filemap_invalidate_lock(inode
->i_mapping
);
948 /* Lookup a dmap and corresponding file offset to reclaim. */
949 down_read(&fi
->dax
->sem
);
950 dmap
= inode_lookup_first_dmap(inode
);
952 start_idx
= dmap
->itn
.start
;
953 dmap_start
= start_idx
<< FUSE_DAX_SHIFT
;
954 dmap_end
= dmap_start
+ FUSE_DAX_SZ
- 1;
956 up_read(&fi
->dax
->sem
);
961 * Make sure there are no references to inode pages using
964 ret
= fuse_dax_break_layouts(inode
, dmap_start
, dmap_end
);
966 pr_debug("fuse: fuse_dax_break_layouts() failed. err=%d\n",
972 down_write(&fi
->dax
->sem
);
973 node
= interval_tree_iter_first(&fi
->dax
->tree
, start_idx
, start_idx
);
974 /* Range already got reclaimed by somebody else */
978 goto out_write_dmap_sem
;
981 dmap
= node_to_dmap(node
);
983 if (refcount_read(&dmap
->refcnt
) > 1) {
987 goto out_write_dmap_sem
;
990 ret
= reclaim_one_dmap_locked(inode
, dmap
);
993 goto out_write_dmap_sem
;
996 /* Clean up dmap. Do not add back to free list */
997 dmap_remove_busy_list(fcd
, dmap
);
999 dmap
->itn
.start
= dmap
->itn
.last
= 0;
1001 pr_debug("fuse: %s: inline reclaimed memory range. inode=%p, window_offset=0x%llx, length=0x%llx\n",
1002 __func__
, inode
, dmap
->window_offset
, dmap
->length
);
1005 up_write(&fi
->dax
->sem
);
1007 filemap_invalidate_unlock(inode
->i_mapping
);
1011 static struct fuse_dax_mapping
*
1012 alloc_dax_mapping_reclaim(struct fuse_conn_dax
*fcd
, struct inode
*inode
)
1014 struct fuse_dax_mapping
*dmap
;
1015 struct fuse_inode
*fi
= get_fuse_inode(inode
);
1020 dmap
= alloc_dax_mapping(fcd
);
1024 dmap
= inode_inline_reclaim_one_dmap(fcd
, inode
, &retry
);
1026 * Either we got a mapping or it is an error, return in both
1032 /* If we could not reclaim a mapping because it
1033 * had a reference or some other temporary failure,
1034 * Try again. We want to give up inline reclaim only
1035 * if there is no range assigned to this node. Otherwise
1036 * if a deadlock is possible if we sleep with
1037 * mapping->invalidate_lock held and worker to free memory
1038 * can't make progress due to unavailability of
1039 * mapping->invalidate_lock. So sleep only if fi->dax->nr=0
1044 * There are no mappings which can be reclaimed. Wait for one.
1045 * We are not holding fi->dax->sem. So it is possible
1046 * that range gets added now. But as we are not holding
1047 * mapping->invalidate_lock, worker should still be able to
1048 * free up a range and wake us up.
1050 if (!fi
->dax
->nr
&& !(fcd
->nr_free_ranges
> 0)) {
1051 if (wait_event_killable_exclusive(fcd
->range_waitq
,
1052 (fcd
->nr_free_ranges
> 0))) {
1053 return ERR_PTR(-EINTR
);
1059 static int lookup_and_reclaim_dmap_locked(struct fuse_conn_dax
*fcd
,
1060 struct inode
*inode
,
1061 unsigned long start_idx
)
1064 struct fuse_inode
*fi
= get_fuse_inode(inode
);
1065 struct fuse_dax_mapping
*dmap
;
1066 struct interval_tree_node
*node
;
1068 /* Find fuse dax mapping at file offset inode. */
1069 node
= interval_tree_iter_first(&fi
->dax
->tree
, start_idx
, start_idx
);
1071 /* Range already got cleaned up by somebody else */
1074 dmap
= node_to_dmap(node
);
1077 if (refcount_read(&dmap
->refcnt
) > 1)
1080 ret
= reclaim_one_dmap_locked(inode
, dmap
);
1084 /* Cleanup dmap entry and add back to free list */
1085 spin_lock(&fcd
->lock
);
1086 dmap_reinit_add_to_free_pool(fcd
, dmap
);
1087 spin_unlock(&fcd
->lock
);
1092 * Free a range of memory.
1094 * 1. Take mapping->invalidate_lock to block dax faults.
1095 * 2. Take fi->dax->sem to protect interval tree and also to make sure
1096 * read/write can not reuse a dmap which we might be freeing.
1098 static int lookup_and_reclaim_dmap(struct fuse_conn_dax
*fcd
,
1099 struct inode
*inode
,
1100 unsigned long start_idx
,
1101 unsigned long end_idx
)
1104 struct fuse_inode
*fi
= get_fuse_inode(inode
);
1105 loff_t dmap_start
= start_idx
<< FUSE_DAX_SHIFT
;
1106 loff_t dmap_end
= (dmap_start
+ FUSE_DAX_SZ
) - 1;
1108 filemap_invalidate_lock(inode
->i_mapping
);
1109 ret
= fuse_dax_break_layouts(inode
, dmap_start
, dmap_end
);
1111 pr_debug("virtio_fs: fuse_dax_break_layouts() failed. err=%d\n",
1116 down_write(&fi
->dax
->sem
);
1117 ret
= lookup_and_reclaim_dmap_locked(fcd
, inode
, start_idx
);
1118 up_write(&fi
->dax
->sem
);
1120 filemap_invalidate_unlock(inode
->i_mapping
);
1124 static int try_to_free_dmap_chunks(struct fuse_conn_dax
*fcd
,
1125 unsigned long nr_to_free
)
1127 struct fuse_dax_mapping
*dmap
, *pos
, *temp
;
1128 int ret
, nr_freed
= 0;
1129 unsigned long start_idx
= 0, end_idx
= 0;
1130 struct inode
*inode
= NULL
;
1132 /* Pick first busy range and free it for now*/
1134 if (nr_freed
>= nr_to_free
)
1138 spin_lock(&fcd
->lock
);
1140 if (!fcd
->nr_busy_ranges
) {
1141 spin_unlock(&fcd
->lock
);
1145 list_for_each_entry_safe(pos
, temp
, &fcd
->busy_ranges
,
1147 /* skip this range if it's in use. */
1148 if (refcount_read(&pos
->refcnt
) > 1)
1151 inode
= igrab(pos
->inode
);
1153 * This inode is going away. That will free
1154 * up all the ranges anyway, continue to
1160 * Take this element off list and add it tail. If
1161 * this element can't be freed, it will help with
1162 * selecting new element in next iteration of loop.
1165 list_move_tail(&dmap
->busy_list
, &fcd
->busy_ranges
);
1166 start_idx
= end_idx
= dmap
->itn
.start
;
1169 spin_unlock(&fcd
->lock
);
1173 ret
= lookup_and_reclaim_dmap(fcd
, inode
, start_idx
, end_idx
);
1182 static void fuse_dax_free_mem_worker(struct work_struct
*work
)
1185 struct fuse_conn_dax
*fcd
= container_of(work
, struct fuse_conn_dax
,
1187 ret
= try_to_free_dmap_chunks(fcd
, FUSE_DAX_RECLAIM_CHUNK
);
1189 pr_debug("fuse: try_to_free_dmap_chunks() failed with err=%d\n",
1193 /* If number of free ranges are still below threshold, requeue */
1194 kick_dmap_free_worker(fcd
, 1);
1197 static void fuse_free_dax_mem_ranges(struct list_head
*mem_list
)
1199 struct fuse_dax_mapping
*range
, *temp
;
1201 /* Free All allocated elements */
1202 list_for_each_entry_safe(range
, temp
, mem_list
, list
) {
1203 list_del(&range
->list
);
1204 if (!list_empty(&range
->busy_list
))
1205 list_del(&range
->busy_list
);
1210 void fuse_dax_conn_free(struct fuse_conn
*fc
)
1213 fuse_free_dax_mem_ranges(&fc
->dax
->free_ranges
);
1219 static int fuse_dax_mem_range_init(struct fuse_conn_dax
*fcd
)
1221 long nr_pages
, nr_ranges
;
1222 struct fuse_dax_mapping
*range
;
1224 size_t dax_size
= -1;
1227 init_waitqueue_head(&fcd
->range_waitq
);
1228 INIT_LIST_HEAD(&fcd
->free_ranges
);
1229 INIT_LIST_HEAD(&fcd
->busy_ranges
);
1230 INIT_DELAYED_WORK(&fcd
->free_work
, fuse_dax_free_mem_worker
);
1232 id
= dax_read_lock();
1233 nr_pages
= dax_direct_access(fcd
->dev
, 0, PHYS_PFN(dax_size
),
1234 DAX_ACCESS
, NULL
, NULL
);
1235 dax_read_unlock(id
);
1237 pr_debug("dax_direct_access() returned %ld\n", nr_pages
);
1241 nr_ranges
= nr_pages
/FUSE_DAX_PAGES
;
1242 pr_debug("%s: dax mapped %ld pages. nr_ranges=%ld\n",
1243 __func__
, nr_pages
, nr_ranges
);
1245 for (i
= 0; i
< nr_ranges
; i
++) {
1246 range
= kzalloc(sizeof(struct fuse_dax_mapping
), GFP_KERNEL
);
1251 /* TODO: This offset only works if virtio-fs driver is not
1252 * having some memory hidden at the beginning. This needs
1255 range
->window_offset
= i
* FUSE_DAX_SZ
;
1256 range
->length
= FUSE_DAX_SZ
;
1257 INIT_LIST_HEAD(&range
->busy_list
);
1258 refcount_set(&range
->refcnt
, 1);
1259 list_add_tail(&range
->list
, &fcd
->free_ranges
);
1262 fcd
->nr_free_ranges
= nr_ranges
;
1263 fcd
->nr_ranges
= nr_ranges
;
1266 /* Free All allocated elements */
1267 fuse_free_dax_mem_ranges(&fcd
->free_ranges
);
1271 int fuse_dax_conn_alloc(struct fuse_conn
*fc
, enum fuse_dax_mode dax_mode
,
1272 struct dax_device
*dax_dev
)
1274 struct fuse_conn_dax
*fcd
;
1277 fc
->dax_mode
= dax_mode
;
1282 fcd
= kzalloc(sizeof(*fcd
), GFP_KERNEL
);
1286 spin_lock_init(&fcd
->lock
);
1288 err
= fuse_dax_mem_range_init(fcd
);
1298 bool fuse_dax_inode_alloc(struct super_block
*sb
, struct fuse_inode
*fi
)
1300 struct fuse_conn
*fc
= get_fuse_conn_super(sb
);
1304 fi
->dax
= kzalloc(sizeof(*fi
->dax
), GFP_KERNEL_ACCOUNT
);
1308 init_rwsem(&fi
->dax
->sem
);
1309 fi
->dax
->tree
= RB_ROOT_CACHED
;
1315 static const struct address_space_operations fuse_dax_file_aops
= {
1316 .direct_IO
= noop_direct_IO
,
1317 .dirty_folio
= noop_dirty_folio
,
1320 static bool fuse_should_enable_dax(struct inode
*inode
, unsigned int flags
)
1322 struct fuse_conn
*fc
= get_fuse_conn(inode
);
1323 enum fuse_dax_mode dax_mode
= fc
->dax_mode
;
1325 if (dax_mode
== FUSE_DAX_NEVER
)
1329 * fc->dax may be NULL in 'inode' mode when filesystem device doesn't
1330 * support DAX, in which case it will silently fallback to 'never' mode.
1335 if (dax_mode
== FUSE_DAX_ALWAYS
)
1338 /* dax_mode is FUSE_DAX_INODE* */
1339 return fc
->inode_dax
&& (flags
& FUSE_ATTR_DAX
);
1342 void fuse_dax_inode_init(struct inode
*inode
, unsigned int flags
)
1344 if (!fuse_should_enable_dax(inode
, flags
))
1347 inode
->i_flags
|= S_DAX
;
1348 inode
->i_data
.a_ops
= &fuse_dax_file_aops
;
1351 void fuse_dax_dontcache(struct inode
*inode
, unsigned int flags
)
1353 struct fuse_conn
*fc
= get_fuse_conn(inode
);
1355 if (fuse_is_inode_dax_mode(fc
->dax_mode
) &&
1356 ((bool) IS_DAX(inode
) != (bool) (flags
& FUSE_ATTR_DAX
)))
1357 d_mark_dontcache(inode
);
1360 bool fuse_dax_check_alignment(struct fuse_conn
*fc
, unsigned int map_alignment
)
1362 if (fc
->dax
&& (map_alignment
> FUSE_DAX_SHIFT
)) {
1363 pr_warn("FUSE: map_alignment %u incompatible with dax mem range size %u\n",
1364 map_alignment
, FUSE_DAX_SZ
);
1370 void fuse_dax_cancel_work(struct fuse_conn
*fc
)
1372 struct fuse_conn_dax
*fcd
= fc
->dax
;
1375 cancel_delayed_work_sync(&fcd
->free_work
);
1378 EXPORT_SYMBOL_GPL(fuse_dax_cancel_work
);