1 // SPDX-License-Identifier: GPL-2.0
3 * dax: direct host memory access
4 * Copyright (C) 2020 Red Hat, Inc.
9 #include <linux/delay.h>
10 #include <linux/dax.h>
11 #include <linux/uio.h>
12 #include <linux/pfn_t.h>
13 #include <linux/iomap.h>
14 #include <linux/interval_tree.h>
17 * Default memory range size. A power of 2 so it agrees with common FUSE_INIT
18 * map_alignment values 4KB and 64KB.
20 #define FUSE_DAX_SHIFT 21
21 #define FUSE_DAX_SZ (1 << FUSE_DAX_SHIFT)
22 #define FUSE_DAX_PAGES (FUSE_DAX_SZ / PAGE_SIZE)
24 /* Number of ranges reclaimer will try to free in one invocation */
25 #define FUSE_DAX_RECLAIM_CHUNK (10)
28 * Dax memory reclaim threshold in percetage of total ranges. When free
29 * number of free ranges drops below this threshold, reclaim can trigger
32 #define FUSE_DAX_RECLAIM_THRESHOLD (20)
34 /** Translation information for file offsets to DAX window offsets */
35 struct fuse_dax_mapping
{
36 /* Pointer to inode where this memory range is mapped */
39 /* Will connect in fcd->free_ranges to keep track of free memory */
40 struct list_head list
;
42 /* For interval tree in file/inode */
43 struct interval_tree_node itn
;
45 /* Will connect in fc->busy_ranges to keep track busy memory */
46 struct list_head busy_list
;
48 /** Position in DAX window */
51 /** Length of mapping, in bytes */
54 /* Is this mapping read-only or read-write */
57 /* reference count when the mapping is used by dax iomap. */
61 /* Per-inode dax map */
62 struct fuse_inode_dax
{
63 /* Semaphore to protect modifications to the dmap tree */
64 struct rw_semaphore sem
;
66 /* Sorted rb tree of struct fuse_dax_mapping elements */
67 struct rb_root_cached tree
;
71 struct fuse_conn_dax
{
73 struct dax_device
*dev
;
75 /* Lock protecting accessess to members of this structure */
78 /* List of memory ranges which are busy */
79 unsigned long nr_busy_ranges
;
80 struct list_head busy_ranges
;
82 /* Worker to free up memory ranges */
83 struct delayed_work free_work
;
85 /* Wait queue for a dax range to become free */
86 wait_queue_head_t range_waitq
;
88 /* DAX Window Free Ranges */
90 struct list_head free_ranges
;
92 unsigned long nr_ranges
;
95 static inline struct fuse_dax_mapping
*
96 node_to_dmap(struct interval_tree_node
*node
)
101 return container_of(node
, struct fuse_dax_mapping
, itn
);
104 static struct fuse_dax_mapping
*
105 alloc_dax_mapping_reclaim(struct fuse_conn_dax
*fcd
, struct inode
*inode
);
108 __kick_dmap_free_worker(struct fuse_conn_dax
*fcd
, unsigned long delay_ms
)
110 unsigned long free_threshold
;
112 /* If number of free ranges are below threshold, start reclaim */
113 free_threshold
= max_t(unsigned long, fcd
->nr_ranges
* FUSE_DAX_RECLAIM_THRESHOLD
/ 100,
115 if (fcd
->nr_free_ranges
< free_threshold
)
116 queue_delayed_work(system_long_wq
, &fcd
->free_work
,
117 msecs_to_jiffies(delay_ms
));
120 static void kick_dmap_free_worker(struct fuse_conn_dax
*fcd
,
121 unsigned long delay_ms
)
123 spin_lock(&fcd
->lock
);
124 __kick_dmap_free_worker(fcd
, delay_ms
);
125 spin_unlock(&fcd
->lock
);
128 static struct fuse_dax_mapping
*alloc_dax_mapping(struct fuse_conn_dax
*fcd
)
130 struct fuse_dax_mapping
*dmap
;
132 spin_lock(&fcd
->lock
);
133 dmap
= list_first_entry_or_null(&fcd
->free_ranges
,
134 struct fuse_dax_mapping
, list
);
136 list_del_init(&dmap
->list
);
137 WARN_ON(fcd
->nr_free_ranges
<= 0);
138 fcd
->nr_free_ranges
--;
140 spin_unlock(&fcd
->lock
);
142 kick_dmap_free_worker(fcd
, 0);
146 /* This assumes fcd->lock is held */
147 static void __dmap_remove_busy_list(struct fuse_conn_dax
*fcd
,
148 struct fuse_dax_mapping
*dmap
)
150 list_del_init(&dmap
->busy_list
);
151 WARN_ON(fcd
->nr_busy_ranges
== 0);
152 fcd
->nr_busy_ranges
--;
155 static void dmap_remove_busy_list(struct fuse_conn_dax
*fcd
,
156 struct fuse_dax_mapping
*dmap
)
158 spin_lock(&fcd
->lock
);
159 __dmap_remove_busy_list(fcd
, dmap
);
160 spin_unlock(&fcd
->lock
);
163 /* This assumes fcd->lock is held */
164 static void __dmap_add_to_free_pool(struct fuse_conn_dax
*fcd
,
165 struct fuse_dax_mapping
*dmap
)
167 list_add_tail(&dmap
->list
, &fcd
->free_ranges
);
168 fcd
->nr_free_ranges
++;
169 wake_up(&fcd
->range_waitq
);
172 static void dmap_add_to_free_pool(struct fuse_conn_dax
*fcd
,
173 struct fuse_dax_mapping
*dmap
)
175 /* Return fuse_dax_mapping to free list */
176 spin_lock(&fcd
->lock
);
177 __dmap_add_to_free_pool(fcd
, dmap
);
178 spin_unlock(&fcd
->lock
);
181 static int fuse_setup_one_mapping(struct inode
*inode
, unsigned long start_idx
,
182 struct fuse_dax_mapping
*dmap
, bool writable
,
185 struct fuse_mount
*fm
= get_fuse_mount(inode
);
186 struct fuse_conn_dax
*fcd
= fm
->fc
->dax
;
187 struct fuse_inode
*fi
= get_fuse_inode(inode
);
188 struct fuse_setupmapping_in inarg
;
189 loff_t offset
= start_idx
<< FUSE_DAX_SHIFT
;
193 WARN_ON(fcd
->nr_free_ranges
< 0);
195 /* Ask fuse daemon to setup mapping */
196 memset(&inarg
, 0, sizeof(inarg
));
197 inarg
.foffset
= offset
;
199 inarg
.moffset
= dmap
->window_offset
;
200 inarg
.len
= FUSE_DAX_SZ
;
201 inarg
.flags
|= FUSE_SETUPMAPPING_FLAG_READ
;
203 inarg
.flags
|= FUSE_SETUPMAPPING_FLAG_WRITE
;
204 args
.opcode
= FUSE_SETUPMAPPING
;
205 args
.nodeid
= fi
->nodeid
;
207 args
.in_args
[0].size
= sizeof(inarg
);
208 args
.in_args
[0].value
= &inarg
;
209 err
= fuse_simple_request(fm
, &args
);
212 dmap
->writable
= writable
;
215 * We don't take a refernce on inode. inode is valid right now
216 * and when inode is going away, cleanup logic should first
217 * cleanup dmap entries.
220 dmap
->itn
.start
= dmap
->itn
.last
= start_idx
;
221 /* Protected by fi->dax->sem */
222 interval_tree_insert(&dmap
->itn
, &fi
->dax
->tree
);
224 spin_lock(&fcd
->lock
);
225 list_add_tail(&dmap
->busy_list
, &fcd
->busy_ranges
);
226 fcd
->nr_busy_ranges
++;
227 spin_unlock(&fcd
->lock
);
232 static int fuse_send_removemapping(struct inode
*inode
,
233 struct fuse_removemapping_in
*inargp
,
234 struct fuse_removemapping_one
*remove_one
)
236 struct fuse_inode
*fi
= get_fuse_inode(inode
);
237 struct fuse_mount
*fm
= get_fuse_mount(inode
);
240 args
.opcode
= FUSE_REMOVEMAPPING
;
241 args
.nodeid
= fi
->nodeid
;
243 args
.in_args
[0].size
= sizeof(*inargp
);
244 args
.in_args
[0].value
= inargp
;
245 args
.in_args
[1].size
= inargp
->count
* sizeof(*remove_one
);
246 args
.in_args
[1].value
= remove_one
;
247 return fuse_simple_request(fm
, &args
);
250 static int dmap_removemapping_list(struct inode
*inode
, unsigned int num
,
251 struct list_head
*to_remove
)
253 struct fuse_removemapping_one
*remove_one
, *ptr
;
254 struct fuse_removemapping_in inarg
;
255 struct fuse_dax_mapping
*dmap
;
256 int ret
, i
= 0, nr_alloc
;
258 nr_alloc
= min_t(unsigned int, num
, FUSE_REMOVEMAPPING_MAX_ENTRY
);
259 remove_one
= kmalloc_array(nr_alloc
, sizeof(*remove_one
), GFP_NOFS
);
264 list_for_each_entry(dmap
, to_remove
, list
) {
265 ptr
->moffset
= dmap
->window_offset
;
266 ptr
->len
= dmap
->length
;
270 if (i
>= nr_alloc
|| num
== 0) {
271 memset(&inarg
, 0, sizeof(inarg
));
273 ret
= fuse_send_removemapping(inode
, &inarg
,
287 * Cleanup dmap entry and add back to free list. This should be called with
290 static void dmap_reinit_add_to_free_pool(struct fuse_conn_dax
*fcd
,
291 struct fuse_dax_mapping
*dmap
)
293 pr_debug("fuse: freeing memory range start_idx=0x%lx end_idx=0x%lx window_offset=0x%llx length=0x%llx\n",
294 dmap
->itn
.start
, dmap
->itn
.last
, dmap
->window_offset
,
296 __dmap_remove_busy_list(fcd
, dmap
);
298 dmap
->itn
.start
= dmap
->itn
.last
= 0;
299 __dmap_add_to_free_pool(fcd
, dmap
);
303 * Free inode dmap entries whose range falls inside [start, end].
304 * Does not take any locks. At this point of time it should only be
305 * called from evict_inode() path where we know all dmap entries can be
308 static void inode_reclaim_dmap_range(struct fuse_conn_dax
*fcd
,
310 loff_t start
, loff_t end
)
312 struct fuse_inode
*fi
= get_fuse_inode(inode
);
313 struct fuse_dax_mapping
*dmap
, *n
;
315 LIST_HEAD(to_remove
);
316 unsigned long start_idx
= start
>> FUSE_DAX_SHIFT
;
317 unsigned long end_idx
= end
>> FUSE_DAX_SHIFT
;
318 struct interval_tree_node
*node
;
321 node
= interval_tree_iter_first(&fi
->dax
->tree
, start_idx
,
325 dmap
= node_to_dmap(node
);
326 /* inode is going away. There should not be any users of dmap */
327 WARN_ON(refcount_read(&dmap
->refcnt
) > 1);
328 interval_tree_remove(&dmap
->itn
, &fi
->dax
->tree
);
330 list_add(&dmap
->list
, &to_remove
);
333 /* Nothing to remove */
334 if (list_empty(&to_remove
))
337 WARN_ON(fi
->dax
->nr
< num
);
339 err
= dmap_removemapping_list(inode
, num
, &to_remove
);
340 if (err
&& err
!= -ENOTCONN
) {
341 pr_warn("Failed to removemappings. start=0x%llx end=0x%llx\n",
344 spin_lock(&fcd
->lock
);
345 list_for_each_entry_safe(dmap
, n
, &to_remove
, list
) {
346 list_del_init(&dmap
->list
);
347 dmap_reinit_add_to_free_pool(fcd
, dmap
);
349 spin_unlock(&fcd
->lock
);
352 static int dmap_removemapping_one(struct inode
*inode
,
353 struct fuse_dax_mapping
*dmap
)
355 struct fuse_removemapping_one forget_one
;
356 struct fuse_removemapping_in inarg
;
358 memset(&inarg
, 0, sizeof(inarg
));
360 memset(&forget_one
, 0, sizeof(forget_one
));
361 forget_one
.moffset
= dmap
->window_offset
;
362 forget_one
.len
= dmap
->length
;
364 return fuse_send_removemapping(inode
, &inarg
, &forget_one
);
368 * It is called from evict_inode() and by that time inode is going away. So
369 * this function does not take any locks like fi->dax->sem for traversing
370 * that fuse inode interval tree. If that lock is taken then lock validator
371 * complains of deadlock situation w.r.t fs_reclaim lock.
373 void fuse_dax_inode_cleanup(struct inode
*inode
)
375 struct fuse_conn
*fc
= get_fuse_conn(inode
);
376 struct fuse_inode
*fi
= get_fuse_inode(inode
);
379 * fuse_evict_inode() has already called truncate_inode_pages_final()
380 * before we arrive here. So we should not have to worry about any
381 * pages/exception entries still associated with inode.
383 inode_reclaim_dmap_range(fc
->dax
, inode
, 0, -1);
384 WARN_ON(fi
->dax
->nr
);
387 static void fuse_fill_iomap_hole(struct iomap
*iomap
, loff_t length
)
389 iomap
->addr
= IOMAP_NULL_ADDR
;
390 iomap
->length
= length
;
391 iomap
->type
= IOMAP_HOLE
;
394 static void fuse_fill_iomap(struct inode
*inode
, loff_t pos
, loff_t length
,
395 struct iomap
*iomap
, struct fuse_dax_mapping
*dmap
,
399 loff_t i_size
= i_size_read(inode
);
401 offset
= pos
- (dmap
->itn
.start
<< FUSE_DAX_SHIFT
);
402 len
= min(length
, dmap
->length
- offset
);
404 /* If length is beyond end of file, truncate further */
405 if (pos
+ len
> i_size
)
409 iomap
->addr
= dmap
->window_offset
+ offset
;
411 if (flags
& IOMAP_FAULT
)
412 iomap
->length
= ALIGN(len
, PAGE_SIZE
);
413 iomap
->type
= IOMAP_MAPPED
;
415 * increace refcnt so that reclaim code knows this dmap is in
416 * use. This assumes fi->dax->sem mutex is held either
419 refcount_inc(&dmap
->refcnt
);
421 /* iomap->private should be NULL */
422 WARN_ON_ONCE(iomap
->private);
423 iomap
->private = dmap
;
425 /* Mapping beyond end of file is hole */
426 fuse_fill_iomap_hole(iomap
, length
);
430 static int fuse_setup_new_dax_mapping(struct inode
*inode
, loff_t pos
,
431 loff_t length
, unsigned int flags
,
434 struct fuse_inode
*fi
= get_fuse_inode(inode
);
435 struct fuse_conn
*fc
= get_fuse_conn(inode
);
436 struct fuse_conn_dax
*fcd
= fc
->dax
;
437 struct fuse_dax_mapping
*dmap
, *alloc_dmap
= NULL
;
439 bool writable
= flags
& IOMAP_WRITE
;
440 unsigned long start_idx
= pos
>> FUSE_DAX_SHIFT
;
441 struct interval_tree_node
*node
;
444 * Can't do inline reclaim in fault path. We call
445 * dax_layout_busy_page() before we free a range. And
446 * fuse_wait_dax_page() drops fi->i_mmap_sem lock and requires it.
447 * In fault path we enter with fi->i_mmap_sem held and can't drop
448 * it. Also in fault path we hold fi->i_mmap_sem shared and not
449 * exclusive, so that creates further issues with fuse_wait_dax_page().
450 * Hence return -EAGAIN and fuse_dax_fault() will wait for a memory
451 * range to become free and retry.
453 if (flags
& IOMAP_FAULT
) {
454 alloc_dmap
= alloc_dax_mapping(fcd
);
458 alloc_dmap
= alloc_dax_mapping_reclaim(fcd
, inode
);
459 if (IS_ERR(alloc_dmap
))
460 return PTR_ERR(alloc_dmap
);
463 /* If we are here, we should have memory allocated */
464 if (WARN_ON(!alloc_dmap
))
468 * Take write lock so that only one caller can try to setup mapping
471 down_write(&fi
->dax
->sem
);
473 * We dropped lock. Check again if somebody else setup
476 node
= interval_tree_iter_first(&fi
->dax
->tree
, start_idx
, start_idx
);
478 dmap
= node_to_dmap(node
);
479 fuse_fill_iomap(inode
, pos
, length
, iomap
, dmap
, flags
);
480 dmap_add_to_free_pool(fcd
, alloc_dmap
);
481 up_write(&fi
->dax
->sem
);
485 /* Setup one mapping */
486 ret
= fuse_setup_one_mapping(inode
, pos
>> FUSE_DAX_SHIFT
, alloc_dmap
,
489 dmap_add_to_free_pool(fcd
, alloc_dmap
);
490 up_write(&fi
->dax
->sem
);
493 fuse_fill_iomap(inode
, pos
, length
, iomap
, alloc_dmap
, flags
);
494 up_write(&fi
->dax
->sem
);
498 static int fuse_upgrade_dax_mapping(struct inode
*inode
, loff_t pos
,
499 loff_t length
, unsigned int flags
,
502 struct fuse_inode
*fi
= get_fuse_inode(inode
);
503 struct fuse_dax_mapping
*dmap
;
505 unsigned long idx
= pos
>> FUSE_DAX_SHIFT
;
506 struct interval_tree_node
*node
;
509 * Take exclusive lock so that only one caller can try to setup
510 * mapping and others wait.
512 down_write(&fi
->dax
->sem
);
513 node
= interval_tree_iter_first(&fi
->dax
->tree
, idx
, idx
);
515 /* We are holding either inode lock or i_mmap_sem, and that should
516 * ensure that dmap can't be truncated. We are holding a reference
517 * on dmap and that should make sure it can't be reclaimed. So dmap
518 * should still be there in tree despite the fact we dropped and
519 * re-acquired the fi->dax->sem lock.
525 dmap
= node_to_dmap(node
);
527 /* We took an extra reference on dmap to make sure its not reclaimd.
528 * Now we hold fi->dax->sem lock and that reference is not needed
531 if (refcount_dec_and_test(&dmap
->refcnt
)) {
532 /* refcount should not hit 0. This object only goes
533 * away when fuse connection goes away
538 /* Maybe another thread already upgraded mapping while we were not
541 if (dmap
->writable
) {
546 ret
= fuse_setup_one_mapping(inode
, pos
>> FUSE_DAX_SHIFT
, dmap
, true,
551 fuse_fill_iomap(inode
, pos
, length
, iomap
, dmap
, flags
);
553 up_write(&fi
->dax
->sem
);
557 /* This is just for DAX and the mapping is ephemeral, do not use it for other
558 * purposes since there is no block device with a permanent mapping.
560 static int fuse_iomap_begin(struct inode
*inode
, loff_t pos
, loff_t length
,
561 unsigned int flags
, struct iomap
*iomap
,
562 struct iomap
*srcmap
)
564 struct fuse_inode
*fi
= get_fuse_inode(inode
);
565 struct fuse_conn
*fc
= get_fuse_conn(inode
);
566 struct fuse_dax_mapping
*dmap
;
567 bool writable
= flags
& IOMAP_WRITE
;
568 unsigned long start_idx
= pos
>> FUSE_DAX_SHIFT
;
569 struct interval_tree_node
*node
;
571 /* We don't support FIEMAP */
572 if (WARN_ON(flags
& IOMAP_REPORT
))
578 iomap
->dax_dev
= fc
->dax
->dev
;
581 * Both read/write and mmap path can race here. So we need something
582 * to make sure if we are setting up mapping, then other path waits
584 * For now, use a semaphore for this. It probably needs to be
587 down_read(&fi
->dax
->sem
);
588 node
= interval_tree_iter_first(&fi
->dax
->tree
, start_idx
, start_idx
);
590 dmap
= node_to_dmap(node
);
591 if (writable
&& !dmap
->writable
) {
592 /* Upgrade read-only mapping to read-write. This will
593 * require exclusive fi->dax->sem lock as we don't want
594 * two threads to be trying to this simultaneously
595 * for same dmap. So drop shared lock and acquire
598 * Before dropping fi->dax->sem lock, take reference
599 * on dmap so that its not freed by range reclaim.
601 refcount_inc(&dmap
->refcnt
);
602 up_read(&fi
->dax
->sem
);
603 pr_debug("%s: Upgrading mapping at offset 0x%llx length 0x%llx\n",
604 __func__
, pos
, length
);
605 return fuse_upgrade_dax_mapping(inode
, pos
, length
,
608 fuse_fill_iomap(inode
, pos
, length
, iomap
, dmap
, flags
);
609 up_read(&fi
->dax
->sem
);
613 up_read(&fi
->dax
->sem
);
614 pr_debug("%s: no mapping at offset 0x%llx length 0x%llx\n",
615 __func__
, pos
, length
);
616 if (pos
>= i_size_read(inode
))
619 return fuse_setup_new_dax_mapping(inode
, pos
, length
, flags
,
624 * If read beyond end of file happnes, fs code seems to return
628 fuse_fill_iomap_hole(iomap
, length
);
629 pr_debug("%s returning hole mapping. pos=0x%llx length_asked=0x%llx length_returned=0x%llx\n",
630 __func__
, pos
, length
, iomap
->length
);
634 static int fuse_iomap_end(struct inode
*inode
, loff_t pos
, loff_t length
,
635 ssize_t written
, unsigned int flags
,
638 struct fuse_dax_mapping
*dmap
= iomap
->private;
641 if (refcount_dec_and_test(&dmap
->refcnt
)) {
642 /* refcount should not hit 0. This object only goes
643 * away when fuse connection goes away
649 /* DAX writes beyond end-of-file aren't handled using iomap, so the
650 * file size is unchanged and there is nothing to do here.
655 static const struct iomap_ops fuse_iomap_ops
= {
656 .iomap_begin
= fuse_iomap_begin
,
657 .iomap_end
= fuse_iomap_end
,
660 static void fuse_wait_dax_page(struct inode
*inode
)
662 struct fuse_inode
*fi
= get_fuse_inode(inode
);
664 up_write(&fi
->i_mmap_sem
);
666 down_write(&fi
->i_mmap_sem
);
669 /* Should be called with fi->i_mmap_sem lock held exclusively */
670 static int __fuse_dax_break_layouts(struct inode
*inode
, bool *retry
,
671 loff_t start
, loff_t end
)
675 page
= dax_layout_busy_page_range(inode
->i_mapping
, start
, end
);
680 return ___wait_var_event(&page
->_refcount
,
681 atomic_read(&page
->_refcount
) == 1, TASK_INTERRUPTIBLE
,
682 0, 0, fuse_wait_dax_page(inode
));
685 /* dmap_end == 0 leads to unmapping of whole file */
686 int fuse_dax_break_layouts(struct inode
*inode
, u64 dmap_start
,
694 ret
= __fuse_dax_break_layouts(inode
, &retry
, dmap_start
,
696 } while (ret
== 0 && retry
);
701 ssize_t
fuse_dax_read_iter(struct kiocb
*iocb
, struct iov_iter
*to
)
703 struct inode
*inode
= file_inode(iocb
->ki_filp
);
706 if (iocb
->ki_flags
& IOCB_NOWAIT
) {
707 if (!inode_trylock_shared(inode
))
710 inode_lock_shared(inode
);
713 ret
= dax_iomap_rw(iocb
, to
, &fuse_iomap_ops
);
714 inode_unlock_shared(inode
);
716 /* TODO file_accessed(iocb->f_filp) */
720 static bool file_extending_write(struct kiocb
*iocb
, struct iov_iter
*from
)
722 struct inode
*inode
= file_inode(iocb
->ki_filp
);
724 return (iov_iter_rw(from
) == WRITE
&&
725 ((iocb
->ki_pos
) >= i_size_read(inode
) ||
726 (iocb
->ki_pos
+ iov_iter_count(from
) > i_size_read(inode
))));
729 static ssize_t
fuse_dax_direct_write(struct kiocb
*iocb
, struct iov_iter
*from
)
731 struct inode
*inode
= file_inode(iocb
->ki_filp
);
732 struct fuse_io_priv io
= FUSE_IO_PRIV_SYNC(iocb
);
735 ret
= fuse_direct_io(&io
, from
, &iocb
->ki_pos
, FUSE_DIO_WRITE
);
739 fuse_invalidate_attr(inode
);
740 fuse_write_update_size(inode
, iocb
->ki_pos
);
744 ssize_t
fuse_dax_write_iter(struct kiocb
*iocb
, struct iov_iter
*from
)
746 struct inode
*inode
= file_inode(iocb
->ki_filp
);
749 if (iocb
->ki_flags
& IOCB_NOWAIT
) {
750 if (!inode_trylock(inode
))
756 ret
= generic_write_checks(iocb
, from
);
760 ret
= file_remove_privs(iocb
->ki_filp
);
763 /* TODO file_update_time() but we don't want metadata I/O */
765 /* Do not use dax for file extending writes as write and on
766 * disk i_size increase are not atomic otherwise.
768 if (file_extending_write(iocb
, from
))
769 ret
= fuse_dax_direct_write(iocb
, from
);
771 ret
= dax_iomap_rw(iocb
, from
, &fuse_iomap_ops
);
777 ret
= generic_write_sync(iocb
, ret
);
781 static int fuse_dax_writepages(struct address_space
*mapping
,
782 struct writeback_control
*wbc
)
785 struct inode
*inode
= mapping
->host
;
786 struct fuse_conn
*fc
= get_fuse_conn(inode
);
788 return dax_writeback_mapping_range(mapping
, fc
->dax
->dev
, wbc
);
791 static vm_fault_t
__fuse_dax_fault(struct vm_fault
*vmf
,
792 enum page_entry_size pe_size
, bool write
)
795 struct inode
*inode
= file_inode(vmf
->vma
->vm_file
);
796 struct super_block
*sb
= inode
->i_sb
;
799 struct fuse_conn
*fc
= get_fuse_conn(inode
);
800 struct fuse_conn_dax
*fcd
= fc
->dax
;
804 sb_start_pagefault(sb
);
806 if (retry
&& !(fcd
->nr_free_ranges
> 0))
807 wait_event(fcd
->range_waitq
, (fcd
->nr_free_ranges
> 0));
810 * We need to serialize against not only truncate but also against
811 * fuse dax memory range reclaim. While a range is being reclaimed,
812 * we do not want any read/write/mmap to make progress and try
813 * to populate page cache or access memory we are trying to free.
815 down_read(&get_fuse_inode(inode
)->i_mmap_sem
);
816 ret
= dax_iomap_fault(vmf
, pe_size
, &pfn
, &error
, &fuse_iomap_ops
);
817 if ((ret
& VM_FAULT_ERROR
) && error
== -EAGAIN
) {
820 up_read(&get_fuse_inode(inode
)->i_mmap_sem
);
824 if (ret
& VM_FAULT_NEEDDSYNC
)
825 ret
= dax_finish_sync_fault(vmf
, pe_size
, pfn
);
826 up_read(&get_fuse_inode(inode
)->i_mmap_sem
);
829 sb_end_pagefault(sb
);
834 static vm_fault_t
fuse_dax_fault(struct vm_fault
*vmf
)
836 return __fuse_dax_fault(vmf
, PE_SIZE_PTE
,
837 vmf
->flags
& FAULT_FLAG_WRITE
);
840 static vm_fault_t
fuse_dax_huge_fault(struct vm_fault
*vmf
,
841 enum page_entry_size pe_size
)
843 return __fuse_dax_fault(vmf
, pe_size
, vmf
->flags
& FAULT_FLAG_WRITE
);
846 static vm_fault_t
fuse_dax_page_mkwrite(struct vm_fault
*vmf
)
848 return __fuse_dax_fault(vmf
, PE_SIZE_PTE
, true);
851 static vm_fault_t
fuse_dax_pfn_mkwrite(struct vm_fault
*vmf
)
853 return __fuse_dax_fault(vmf
, PE_SIZE_PTE
, true);
856 static const struct vm_operations_struct fuse_dax_vm_ops
= {
857 .fault
= fuse_dax_fault
,
858 .huge_fault
= fuse_dax_huge_fault
,
859 .page_mkwrite
= fuse_dax_page_mkwrite
,
860 .pfn_mkwrite
= fuse_dax_pfn_mkwrite
,
863 int fuse_dax_mmap(struct file
*file
, struct vm_area_struct
*vma
)
866 vma
->vm_ops
= &fuse_dax_vm_ops
;
867 vma
->vm_flags
|= VM_MIXEDMAP
| VM_HUGEPAGE
;
871 static int dmap_writeback_invalidate(struct inode
*inode
,
872 struct fuse_dax_mapping
*dmap
)
875 loff_t start_pos
= dmap
->itn
.start
<< FUSE_DAX_SHIFT
;
876 loff_t end_pos
= (start_pos
+ FUSE_DAX_SZ
- 1);
878 ret
= filemap_fdatawrite_range(inode
->i_mapping
, start_pos
, end_pos
);
880 pr_debug("fuse: filemap_fdatawrite_range() failed. err=%d start_pos=0x%llx, end_pos=0x%llx\n",
881 ret
, start_pos
, end_pos
);
885 ret
= invalidate_inode_pages2_range(inode
->i_mapping
,
886 start_pos
>> PAGE_SHIFT
,
887 end_pos
>> PAGE_SHIFT
);
889 pr_debug("fuse: invalidate_inode_pages2_range() failed err=%d\n",
895 static int reclaim_one_dmap_locked(struct inode
*inode
,
896 struct fuse_dax_mapping
*dmap
)
899 struct fuse_inode
*fi
= get_fuse_inode(inode
);
902 * igrab() was done to make sure inode won't go under us, and this
903 * further avoids the race with evict().
905 ret
= dmap_writeback_invalidate(inode
, dmap
);
909 /* Remove dax mapping from inode interval tree now */
910 interval_tree_remove(&dmap
->itn
, &fi
->dax
->tree
);
913 /* It is possible that umount/shutdown has killed the fuse connection
914 * and worker thread is trying to reclaim memory in parallel. Don't
917 ret
= dmap_removemapping_one(inode
, dmap
);
918 if (ret
&& ret
!= -ENOTCONN
) {
919 pr_warn("Failed to remove mapping. offset=0x%llx len=0x%llx ret=%d\n",
920 dmap
->window_offset
, dmap
->length
, ret
);
925 /* Find first mapped dmap for an inode and return file offset. Caller needs
926 * to hold fi->dax->sem lock either shared or exclusive.
928 static struct fuse_dax_mapping
*inode_lookup_first_dmap(struct inode
*inode
)
930 struct fuse_inode
*fi
= get_fuse_inode(inode
);
931 struct fuse_dax_mapping
*dmap
;
932 struct interval_tree_node
*node
;
934 for (node
= interval_tree_iter_first(&fi
->dax
->tree
, 0, -1); node
;
935 node
= interval_tree_iter_next(node
, 0, -1)) {
936 dmap
= node_to_dmap(node
);
938 if (refcount_read(&dmap
->refcnt
) > 1)
948 * Find first mapping in the tree and free it and return it. Do not add
949 * it back to free pool.
951 static struct fuse_dax_mapping
*
952 inode_inline_reclaim_one_dmap(struct fuse_conn_dax
*fcd
, struct inode
*inode
,
955 struct fuse_inode
*fi
= get_fuse_inode(inode
);
956 struct fuse_dax_mapping
*dmap
;
957 u64 dmap_start
, dmap_end
;
958 unsigned long start_idx
;
960 struct interval_tree_node
*node
;
962 down_write(&fi
->i_mmap_sem
);
964 /* Lookup a dmap and corresponding file offset to reclaim. */
965 down_read(&fi
->dax
->sem
);
966 dmap
= inode_lookup_first_dmap(inode
);
968 start_idx
= dmap
->itn
.start
;
969 dmap_start
= start_idx
<< FUSE_DAX_SHIFT
;
970 dmap_end
= dmap_start
+ FUSE_DAX_SZ
- 1;
972 up_read(&fi
->dax
->sem
);
977 * Make sure there are no references to inode pages using
980 ret
= fuse_dax_break_layouts(inode
, dmap_start
, dmap_end
);
982 pr_debug("fuse: fuse_dax_break_layouts() failed. err=%d\n",
988 down_write(&fi
->dax
->sem
);
989 node
= interval_tree_iter_first(&fi
->dax
->tree
, start_idx
, start_idx
);
990 /* Range already got reclaimed by somebody else */
994 goto out_write_dmap_sem
;
997 dmap
= node_to_dmap(node
);
999 if (refcount_read(&dmap
->refcnt
) > 1) {
1003 goto out_write_dmap_sem
;
1006 ret
= reclaim_one_dmap_locked(inode
, dmap
);
1008 dmap
= ERR_PTR(ret
);
1009 goto out_write_dmap_sem
;
1012 /* Clean up dmap. Do not add back to free list */
1013 dmap_remove_busy_list(fcd
, dmap
);
1015 dmap
->itn
.start
= dmap
->itn
.last
= 0;
1017 pr_debug("fuse: %s: inline reclaimed memory range. inode=%p, window_offset=0x%llx, length=0x%llx\n",
1018 __func__
, inode
, dmap
->window_offset
, dmap
->length
);
1021 up_write(&fi
->dax
->sem
);
1023 up_write(&fi
->i_mmap_sem
);
1027 static struct fuse_dax_mapping
*
1028 alloc_dax_mapping_reclaim(struct fuse_conn_dax
*fcd
, struct inode
*inode
)
1030 struct fuse_dax_mapping
*dmap
;
1031 struct fuse_inode
*fi
= get_fuse_inode(inode
);
1036 dmap
= alloc_dax_mapping(fcd
);
1040 dmap
= inode_inline_reclaim_one_dmap(fcd
, inode
, &retry
);
1042 * Either we got a mapping or it is an error, return in both
1048 /* If we could not reclaim a mapping because it
1049 * had a reference or some other temporary failure,
1050 * Try again. We want to give up inline reclaim only
1051 * if there is no range assigned to this node. Otherwise
1052 * if a deadlock is possible if we sleep with fi->i_mmap_sem
1053 * held and worker to free memory can't make progress due
1054 * to unavailability of fi->i_mmap_sem lock. So sleep
1055 * only if fi->dax->nr=0
1060 * There are no mappings which can be reclaimed. Wait for one.
1061 * We are not holding fi->dax->sem. So it is possible
1062 * that range gets added now. But as we are not holding
1063 * fi->i_mmap_sem, worker should still be able to free up
1064 * a range and wake us up.
1066 if (!fi
->dax
->nr
&& !(fcd
->nr_free_ranges
> 0)) {
1067 if (wait_event_killable_exclusive(fcd
->range_waitq
,
1068 (fcd
->nr_free_ranges
> 0))) {
1069 return ERR_PTR(-EINTR
);
1075 static int lookup_and_reclaim_dmap_locked(struct fuse_conn_dax
*fcd
,
1076 struct inode
*inode
,
1077 unsigned long start_idx
)
1080 struct fuse_inode
*fi
= get_fuse_inode(inode
);
1081 struct fuse_dax_mapping
*dmap
;
1082 struct interval_tree_node
*node
;
1084 /* Find fuse dax mapping at file offset inode. */
1085 node
= interval_tree_iter_first(&fi
->dax
->tree
, start_idx
, start_idx
);
1087 /* Range already got cleaned up by somebody else */
1090 dmap
= node_to_dmap(node
);
1093 if (refcount_read(&dmap
->refcnt
) > 1)
1096 ret
= reclaim_one_dmap_locked(inode
, dmap
);
1100 /* Cleanup dmap entry and add back to free list */
1101 spin_lock(&fcd
->lock
);
1102 dmap_reinit_add_to_free_pool(fcd
, dmap
);
1103 spin_unlock(&fcd
->lock
);
1108 * Free a range of memory.
1110 * 1. Take fi->i_mmap_sem to block dax faults.
1111 * 2. Take fi->dax->sem to protect interval tree and also to make sure
1112 * read/write can not reuse a dmap which we might be freeing.
1114 static int lookup_and_reclaim_dmap(struct fuse_conn_dax
*fcd
,
1115 struct inode
*inode
,
1116 unsigned long start_idx
,
1117 unsigned long end_idx
)
1120 struct fuse_inode
*fi
= get_fuse_inode(inode
);
1121 loff_t dmap_start
= start_idx
<< FUSE_DAX_SHIFT
;
1122 loff_t dmap_end
= (dmap_start
+ FUSE_DAX_SZ
) - 1;
1124 down_write(&fi
->i_mmap_sem
);
1125 ret
= fuse_dax_break_layouts(inode
, dmap_start
, dmap_end
);
1127 pr_debug("virtio_fs: fuse_dax_break_layouts() failed. err=%d\n",
1132 down_write(&fi
->dax
->sem
);
1133 ret
= lookup_and_reclaim_dmap_locked(fcd
, inode
, start_idx
);
1134 up_write(&fi
->dax
->sem
);
1136 up_write(&fi
->i_mmap_sem
);
1140 static int try_to_free_dmap_chunks(struct fuse_conn_dax
*fcd
,
1141 unsigned long nr_to_free
)
1143 struct fuse_dax_mapping
*dmap
, *pos
, *temp
;
1144 int ret
, nr_freed
= 0;
1145 unsigned long start_idx
= 0, end_idx
= 0;
1146 struct inode
*inode
= NULL
;
1148 /* Pick first busy range and free it for now*/
1150 if (nr_freed
>= nr_to_free
)
1154 spin_lock(&fcd
->lock
);
1156 if (!fcd
->nr_busy_ranges
) {
1157 spin_unlock(&fcd
->lock
);
1161 list_for_each_entry_safe(pos
, temp
, &fcd
->busy_ranges
,
1163 /* skip this range if it's in use. */
1164 if (refcount_read(&pos
->refcnt
) > 1)
1167 inode
= igrab(pos
->inode
);
1169 * This inode is going away. That will free
1170 * up all the ranges anyway, continue to
1176 * Take this element off list and add it tail. If
1177 * this element can't be freed, it will help with
1178 * selecting new element in next iteration of loop.
1181 list_move_tail(&dmap
->busy_list
, &fcd
->busy_ranges
);
1182 start_idx
= end_idx
= dmap
->itn
.start
;
1185 spin_unlock(&fcd
->lock
);
1189 ret
= lookup_and_reclaim_dmap(fcd
, inode
, start_idx
, end_idx
);
1198 static void fuse_dax_free_mem_worker(struct work_struct
*work
)
1201 struct fuse_conn_dax
*fcd
= container_of(work
, struct fuse_conn_dax
,
1203 ret
= try_to_free_dmap_chunks(fcd
, FUSE_DAX_RECLAIM_CHUNK
);
1205 pr_debug("fuse: try_to_free_dmap_chunks() failed with err=%d\n",
1209 /* If number of free ranges are still below threhold, requeue */
1210 kick_dmap_free_worker(fcd
, 1);
1213 static void fuse_free_dax_mem_ranges(struct list_head
*mem_list
)
1215 struct fuse_dax_mapping
*range
, *temp
;
1217 /* Free All allocated elements */
1218 list_for_each_entry_safe(range
, temp
, mem_list
, list
) {
1219 list_del(&range
->list
);
1220 if (!list_empty(&range
->busy_list
))
1221 list_del(&range
->busy_list
);
1226 void fuse_dax_conn_free(struct fuse_conn
*fc
)
1229 fuse_free_dax_mem_ranges(&fc
->dax
->free_ranges
);
1234 static int fuse_dax_mem_range_init(struct fuse_conn_dax
*fcd
)
1236 long nr_pages
, nr_ranges
;
1239 struct fuse_dax_mapping
*range
;
1241 size_t dax_size
= -1;
1244 init_waitqueue_head(&fcd
->range_waitq
);
1245 INIT_LIST_HEAD(&fcd
->free_ranges
);
1246 INIT_LIST_HEAD(&fcd
->busy_ranges
);
1247 INIT_DELAYED_WORK(&fcd
->free_work
, fuse_dax_free_mem_worker
);
1249 id
= dax_read_lock();
1250 nr_pages
= dax_direct_access(fcd
->dev
, 0, PHYS_PFN(dax_size
), &kaddr
,
1252 dax_read_unlock(id
);
1254 pr_debug("dax_direct_access() returned %ld\n", nr_pages
);
1258 nr_ranges
= nr_pages
/FUSE_DAX_PAGES
;
1259 pr_debug("%s: dax mapped %ld pages. nr_ranges=%ld\n",
1260 __func__
, nr_pages
, nr_ranges
);
1262 for (i
= 0; i
< nr_ranges
; i
++) {
1263 range
= kzalloc(sizeof(struct fuse_dax_mapping
), GFP_KERNEL
);
1268 /* TODO: This offset only works if virtio-fs driver is not
1269 * having some memory hidden at the beginning. This needs
1272 range
->window_offset
= i
* FUSE_DAX_SZ
;
1273 range
->length
= FUSE_DAX_SZ
;
1274 INIT_LIST_HEAD(&range
->busy_list
);
1275 refcount_set(&range
->refcnt
, 1);
1276 list_add_tail(&range
->list
, &fcd
->free_ranges
);
1279 fcd
->nr_free_ranges
= nr_ranges
;
1280 fcd
->nr_ranges
= nr_ranges
;
1283 /* Free All allocated elements */
1284 fuse_free_dax_mem_ranges(&fcd
->free_ranges
);
1288 int fuse_dax_conn_alloc(struct fuse_conn
*fc
, struct dax_device
*dax_dev
)
1290 struct fuse_conn_dax
*fcd
;
1296 fcd
= kzalloc(sizeof(*fcd
), GFP_KERNEL
);
1300 spin_lock_init(&fcd
->lock
);
1302 err
= fuse_dax_mem_range_init(fcd
);
1312 bool fuse_dax_inode_alloc(struct super_block
*sb
, struct fuse_inode
*fi
)
1314 struct fuse_conn
*fc
= get_fuse_conn_super(sb
);
1318 fi
->dax
= kzalloc(sizeof(*fi
->dax
), GFP_KERNEL_ACCOUNT
);
1322 init_rwsem(&fi
->dax
->sem
);
1323 fi
->dax
->tree
= RB_ROOT_CACHED
;
1329 static const struct address_space_operations fuse_dax_file_aops
= {
1330 .writepages
= fuse_dax_writepages
,
1331 .direct_IO
= noop_direct_IO
,
1332 .set_page_dirty
= noop_set_page_dirty
,
1333 .invalidatepage
= noop_invalidatepage
,
1336 void fuse_dax_inode_init(struct inode
*inode
)
1338 struct fuse_conn
*fc
= get_fuse_conn(inode
);
1343 inode
->i_flags
|= S_DAX
;
1344 inode
->i_data
.a_ops
= &fuse_dax_file_aops
;
1347 bool fuse_dax_check_alignment(struct fuse_conn
*fc
, unsigned int map_alignment
)
1349 if (fc
->dax
&& (map_alignment
> FUSE_DAX_SHIFT
)) {
1350 pr_warn("FUSE: map_alignment %u incompatible with dax mem range size %u\n",
1351 map_alignment
, FUSE_DAX_SZ
);
1357 void fuse_dax_cancel_work(struct fuse_conn
*fc
)
1359 struct fuse_conn_dax
*fcd
= fc
->dax
;
1362 cancel_delayed_work_sync(&fcd
->free_work
);
1365 EXPORT_SYMBOL_GPL(fuse_dax_cancel_work
);