2 FUSE: Filesystem in Userspace
3 Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu>
5 This program can be distributed under the terms of the GNU GPL.
11 #include <linux/pagemap.h>
12 #include <linux/slab.h>
13 #include <linux/kernel.h>
14 #include <linux/sched.h>
15 #include <linux/sched/signal.h>
16 #include <linux/module.h>
17 #include <linux/swap.h>
18 #include <linux/falloc.h>
19 #include <linux/uio.h>
21 #include <linux/filelock.h>
22 #include <linux/splice.h>
23 #include <linux/task_io_accounting_ops.h>
25 static int fuse_send_open(struct fuse_mount
*fm
, u64 nodeid
,
26 unsigned int open_flags
, int opcode
,
27 struct fuse_open_out
*outargp
)
29 struct fuse_open_in inarg
;
32 memset(&inarg
, 0, sizeof(inarg
));
33 inarg
.flags
= open_flags
& ~(O_CREAT
| O_EXCL
| O_NOCTTY
);
34 if (!fm
->fc
->atomic_o_trunc
)
35 inarg
.flags
&= ~O_TRUNC
;
37 if (fm
->fc
->handle_killpriv_v2
&&
38 (inarg
.flags
& O_TRUNC
) && !capable(CAP_FSETID
)) {
39 inarg
.open_flags
|= FUSE_OPEN_KILL_SUIDGID
;
45 args
.in_args
[0].size
= sizeof(inarg
);
46 args
.in_args
[0].value
= &inarg
;
48 args
.out_args
[0].size
= sizeof(*outargp
);
49 args
.out_args
[0].value
= outargp
;
51 return fuse_simple_request(fm
, &args
);
54 struct fuse_file
*fuse_file_alloc(struct fuse_mount
*fm
, bool release
)
58 ff
= kzalloc(sizeof(struct fuse_file
), GFP_KERNEL_ACCOUNT
);
64 ff
->args
= kzalloc(sizeof(*ff
->args
), GFP_KERNEL_ACCOUNT
);
71 INIT_LIST_HEAD(&ff
->write_entry
);
72 refcount_set(&ff
->count
, 1);
73 RB_CLEAR_NODE(&ff
->polled_node
);
74 init_waitqueue_head(&ff
->poll_wait
);
76 ff
->kh
= atomic64_inc_return(&fm
->fc
->khctr
);
81 void fuse_file_free(struct fuse_file
*ff
)
87 static struct fuse_file
*fuse_file_get(struct fuse_file
*ff
)
89 refcount_inc(&ff
->count
);
93 static void fuse_release_end(struct fuse_mount
*fm
, struct fuse_args
*args
,
96 struct fuse_release_args
*ra
= container_of(args
, typeof(*ra
), args
);
102 static void fuse_file_put(struct fuse_file
*ff
, bool sync
)
104 if (refcount_dec_and_test(&ff
->count
)) {
105 struct fuse_release_args
*ra
= &ff
->args
->release_args
;
106 struct fuse_args
*args
= (ra
? &ra
->args
: NULL
);
109 fuse_file_io_release(ff
, ra
->inode
);
112 /* Do nothing when server does not implement 'open' */
114 fuse_simple_request(ff
->fm
, args
);
115 fuse_release_end(ff
->fm
, args
, 0);
117 args
->end
= fuse_release_end
;
118 if (fuse_simple_background(ff
->fm
, args
,
119 GFP_KERNEL
| __GFP_NOFAIL
))
120 fuse_release_end(ff
->fm
, args
, -ENOTCONN
);
126 struct fuse_file
*fuse_file_open(struct fuse_mount
*fm
, u64 nodeid
,
127 unsigned int open_flags
, bool isdir
)
129 struct fuse_conn
*fc
= fm
->fc
;
130 struct fuse_file
*ff
;
131 int opcode
= isdir
? FUSE_OPENDIR
: FUSE_OPEN
;
132 bool open
= isdir
? !fc
->no_opendir
: !fc
->no_open
;
134 ff
= fuse_file_alloc(fm
, open
);
136 return ERR_PTR(-ENOMEM
);
139 /* Default for no-open */
140 ff
->open_flags
= FOPEN_KEEP_CACHE
| (isdir
? FOPEN_CACHE_DIR
: 0);
142 /* Store outarg for fuse_finish_open() */
143 struct fuse_open_out
*outargp
= &ff
->args
->open_outarg
;
146 err
= fuse_send_open(fm
, nodeid
, open_flags
, opcode
, outargp
);
148 ff
->fh
= outargp
->fh
;
149 ff
->open_flags
= outargp
->open_flags
;
150 } else if (err
!= -ENOSYS
) {
154 /* No release needed */
165 ff
->open_flags
&= ~FOPEN_DIRECT_IO
;
172 int fuse_do_open(struct fuse_mount
*fm
, u64 nodeid
, struct file
*file
,
175 struct fuse_file
*ff
= fuse_file_open(fm
, nodeid
, file
->f_flags
, isdir
);
178 file
->private_data
= ff
;
180 return PTR_ERR_OR_ZERO(ff
);
182 EXPORT_SYMBOL_GPL(fuse_do_open
);
184 static void fuse_link_write_file(struct file
*file
)
186 struct inode
*inode
= file_inode(file
);
187 struct fuse_inode
*fi
= get_fuse_inode(inode
);
188 struct fuse_file
*ff
= file
->private_data
;
190 * file may be written through mmap, so chain it onto the
191 * inodes's write_file list
193 spin_lock(&fi
->lock
);
194 if (list_empty(&ff
->write_entry
))
195 list_add(&ff
->write_entry
, &fi
->write_files
);
196 spin_unlock(&fi
->lock
);
199 int fuse_finish_open(struct inode
*inode
, struct file
*file
)
201 struct fuse_file
*ff
= file
->private_data
;
202 struct fuse_conn
*fc
= get_fuse_conn(inode
);
205 err
= fuse_file_io_open(file
, inode
);
209 if (ff
->open_flags
& FOPEN_STREAM
)
210 stream_open(inode
, file
);
211 else if (ff
->open_flags
& FOPEN_NONSEEKABLE
)
212 nonseekable_open(inode
, file
);
214 if ((file
->f_mode
& FMODE_WRITE
) && fc
->writeback_cache
)
215 fuse_link_write_file(file
);
220 static void fuse_truncate_update_attr(struct inode
*inode
, struct file
*file
)
222 struct fuse_conn
*fc
= get_fuse_conn(inode
);
223 struct fuse_inode
*fi
= get_fuse_inode(inode
);
225 spin_lock(&fi
->lock
);
226 fi
->attr_version
= atomic64_inc_return(&fc
->attr_version
);
227 i_size_write(inode
, 0);
228 spin_unlock(&fi
->lock
);
229 file_update_time(file
);
230 fuse_invalidate_attr_mask(inode
, FUSE_STATX_MODSIZE
);
233 static int fuse_open(struct inode
*inode
, struct file
*file
)
235 struct fuse_mount
*fm
= get_fuse_mount(inode
);
236 struct fuse_inode
*fi
= get_fuse_inode(inode
);
237 struct fuse_conn
*fc
= fm
->fc
;
238 struct fuse_file
*ff
;
240 bool is_truncate
= (file
->f_flags
& O_TRUNC
) && fc
->atomic_o_trunc
;
241 bool is_wb_truncate
= is_truncate
&& fc
->writeback_cache
;
242 bool dax_truncate
= is_truncate
&& FUSE_IS_DAX(inode
);
244 if (fuse_is_bad(inode
))
247 err
= generic_file_open(inode
, file
);
251 if (is_wb_truncate
|| dax_truncate
)
255 filemap_invalidate_lock(inode
->i_mapping
);
256 err
= fuse_dax_break_layouts(inode
, 0, 0);
258 goto out_inode_unlock
;
261 if (is_wb_truncate
|| dax_truncate
)
262 fuse_set_nowrite(inode
);
264 err
= fuse_do_open(fm
, get_node_id(inode
), file
, false);
266 ff
= file
->private_data
;
267 err
= fuse_finish_open(inode
, file
);
269 fuse_sync_release(fi
, ff
, file
->f_flags
);
270 else if (is_truncate
)
271 fuse_truncate_update_attr(inode
, file
);
274 if (is_wb_truncate
|| dax_truncate
)
275 fuse_release_nowrite(inode
);
278 truncate_pagecache(inode
, 0);
279 else if (!(ff
->open_flags
& FOPEN_KEEP_CACHE
))
280 invalidate_inode_pages2(inode
->i_mapping
);
283 filemap_invalidate_unlock(inode
->i_mapping
);
285 if (is_wb_truncate
|| dax_truncate
)
291 static void fuse_prepare_release(struct fuse_inode
*fi
, struct fuse_file
*ff
,
292 unsigned int flags
, int opcode
, bool sync
)
294 struct fuse_conn
*fc
= ff
->fm
->fc
;
295 struct fuse_release_args
*ra
= &ff
->args
->release_args
;
297 if (fuse_file_passthrough(ff
))
298 fuse_passthrough_release(ff
, fuse_inode_backing(fi
));
300 /* Inode is NULL on error path of fuse_create_open() */
302 spin_lock(&fi
->lock
);
303 list_del(&ff
->write_entry
);
304 spin_unlock(&fi
->lock
);
306 spin_lock(&fc
->lock
);
307 if (!RB_EMPTY_NODE(&ff
->polled_node
))
308 rb_erase(&ff
->polled_node
, &fc
->polled_files
);
309 spin_unlock(&fc
->lock
);
311 wake_up_interruptible_all(&ff
->poll_wait
);
316 /* ff->args was used for open outarg */
317 memset(ff
->args
, 0, sizeof(*ff
->args
));
318 ra
->inarg
.fh
= ff
->fh
;
319 ra
->inarg
.flags
= flags
;
320 ra
->args
.in_numargs
= 1;
321 ra
->args
.in_args
[0].size
= sizeof(struct fuse_release_in
);
322 ra
->args
.in_args
[0].value
= &ra
->inarg
;
323 ra
->args
.opcode
= opcode
;
324 ra
->args
.nodeid
= ff
->nodeid
;
325 ra
->args
.force
= true;
326 ra
->args
.nocreds
= true;
329 * Hold inode until release is finished.
330 * From fuse_sync_release() the refcount is 1 and everything's
331 * synchronous, so we are fine with not doing igrab() here.
333 ra
->inode
= sync
? NULL
: igrab(&fi
->inode
);
336 void fuse_file_release(struct inode
*inode
, struct fuse_file
*ff
,
337 unsigned int open_flags
, fl_owner_t id
, bool isdir
)
339 struct fuse_inode
*fi
= get_fuse_inode(inode
);
340 struct fuse_release_args
*ra
= &ff
->args
->release_args
;
341 int opcode
= isdir
? FUSE_RELEASEDIR
: FUSE_RELEASE
;
343 fuse_prepare_release(fi
, ff
, open_flags
, opcode
, false);
345 if (ra
&& ff
->flock
) {
346 ra
->inarg
.release_flags
|= FUSE_RELEASE_FLOCK_UNLOCK
;
347 ra
->inarg
.lock_owner
= fuse_lock_owner_id(ff
->fm
->fc
, id
);
351 * Normally this will send the RELEASE request, however if
352 * some asynchronous READ or WRITE requests are outstanding,
353 * the sending will be delayed.
355 * Make the release synchronous if this is a fuseblk mount,
356 * synchronous RELEASE is allowed (and desirable) in this case
357 * because the server can be trusted not to screw up.
359 fuse_file_put(ff
, ff
->fm
->fc
->destroy
);
362 void fuse_release_common(struct file
*file
, bool isdir
)
364 fuse_file_release(file_inode(file
), file
->private_data
, file
->f_flags
,
365 (fl_owner_t
) file
, isdir
);
368 static int fuse_release(struct inode
*inode
, struct file
*file
)
370 struct fuse_conn
*fc
= get_fuse_conn(inode
);
373 * Dirty pages might remain despite write_inode_now() call from
374 * fuse_flush() due to writes racing with the close.
376 if (fc
->writeback_cache
)
377 write_inode_now(inode
, 1);
379 fuse_release_common(file
, false);
381 /* return value is ignored by VFS */
385 void fuse_sync_release(struct fuse_inode
*fi
, struct fuse_file
*ff
,
388 WARN_ON(refcount_read(&ff
->count
) > 1);
389 fuse_prepare_release(fi
, ff
, flags
, FUSE_RELEASE
, true);
390 fuse_file_put(ff
, true);
392 EXPORT_SYMBOL_GPL(fuse_sync_release
);
395 * Scramble the ID space with XTEA, so that the value of the files_struct
396 * pointer is not exposed to userspace.
398 u64
fuse_lock_owner_id(struct fuse_conn
*fc
, fl_owner_t id
)
400 u32
*k
= fc
->scramble_key
;
401 u64 v
= (unsigned long) id
;
407 for (i
= 0; i
< 32; i
++) {
408 v0
+= ((v1
<< 4 ^ v1
>> 5) + v1
) ^ (sum
+ k
[sum
& 3]);
410 v1
+= ((v0
<< 4 ^ v0
>> 5) + v0
) ^ (sum
+ k
[sum
>>11 & 3]);
413 return (u64
) v0
+ ((u64
) v1
<< 32);
416 struct fuse_writepage_args
{
417 struct fuse_io_args ia
;
418 struct rb_node writepages_entry
;
419 struct list_head queue_entry
;
420 struct fuse_writepage_args
*next
;
422 struct fuse_sync_bucket
*bucket
;
425 static struct fuse_writepage_args
*fuse_find_writeback(struct fuse_inode
*fi
,
426 pgoff_t idx_from
, pgoff_t idx_to
)
430 n
= fi
->writepages
.rb_node
;
433 struct fuse_writepage_args
*wpa
;
436 wpa
= rb_entry(n
, struct fuse_writepage_args
, writepages_entry
);
437 WARN_ON(get_fuse_inode(wpa
->inode
) != fi
);
438 curr_index
= wpa
->ia
.write
.in
.offset
>> PAGE_SHIFT
;
439 if (idx_from
>= curr_index
+ wpa
->ia
.ap
.num_folios
)
441 else if (idx_to
< curr_index
)
450 * Check if any page in a range is under writeback
452 static bool fuse_range_is_writeback(struct inode
*inode
, pgoff_t idx_from
,
455 struct fuse_inode
*fi
= get_fuse_inode(inode
);
458 if (RB_EMPTY_ROOT(&fi
->writepages
))
461 spin_lock(&fi
->lock
);
462 found
= fuse_find_writeback(fi
, idx_from
, idx_to
);
463 spin_unlock(&fi
->lock
);
468 static inline bool fuse_page_is_writeback(struct inode
*inode
, pgoff_t index
)
470 return fuse_range_is_writeback(inode
, index
, index
);
474 * Wait for page writeback to be completed.
476 * Since fuse doesn't rely on the VM writeback tracking, this has to
477 * use some other means.
479 static void fuse_wait_on_page_writeback(struct inode
*inode
, pgoff_t index
)
481 struct fuse_inode
*fi
= get_fuse_inode(inode
);
483 wait_event(fi
->page_waitq
, !fuse_page_is_writeback(inode
, index
));
486 static inline bool fuse_folio_is_writeback(struct inode
*inode
,
489 pgoff_t last
= folio_next_index(folio
) - 1;
490 return fuse_range_is_writeback(inode
, folio_index(folio
), last
);
493 static void fuse_wait_on_folio_writeback(struct inode
*inode
,
496 struct fuse_inode
*fi
= get_fuse_inode(inode
);
498 wait_event(fi
->page_waitq
, !fuse_folio_is_writeback(inode
, folio
));
502 * Wait for all pending writepages on the inode to finish.
504 * This is currently done by blocking further writes with FUSE_NOWRITE
505 * and waiting for all sent writes to complete.
507 * This must be called under i_mutex, otherwise the FUSE_NOWRITE usage
508 * could conflict with truncation.
510 static void fuse_sync_writes(struct inode
*inode
)
512 fuse_set_nowrite(inode
);
513 fuse_release_nowrite(inode
);
516 static int fuse_flush(struct file
*file
, fl_owner_t id
)
518 struct inode
*inode
= file_inode(file
);
519 struct fuse_mount
*fm
= get_fuse_mount(inode
);
520 struct fuse_file
*ff
= file
->private_data
;
521 struct fuse_flush_in inarg
;
525 if (fuse_is_bad(inode
))
528 if (ff
->open_flags
& FOPEN_NOFLUSH
&& !fm
->fc
->writeback_cache
)
531 err
= write_inode_now(inode
, 1);
536 fuse_sync_writes(inode
);
539 err
= filemap_check_errors(file
->f_mapping
);
544 if (fm
->fc
->no_flush
)
547 memset(&inarg
, 0, sizeof(inarg
));
549 inarg
.lock_owner
= fuse_lock_owner_id(fm
->fc
, id
);
550 args
.opcode
= FUSE_FLUSH
;
551 args
.nodeid
= get_node_id(inode
);
553 args
.in_args
[0].size
= sizeof(inarg
);
554 args
.in_args
[0].value
= &inarg
;
557 err
= fuse_simple_request(fm
, &args
);
558 if (err
== -ENOSYS
) {
559 fm
->fc
->no_flush
= 1;
565 * In memory i_blocks is not maintained by fuse, if writeback cache is
566 * enabled, i_blocks from cached attr may not be accurate.
568 if (!err
&& fm
->fc
->writeback_cache
)
569 fuse_invalidate_attr_mask(inode
, STATX_BLOCKS
);
573 int fuse_fsync_common(struct file
*file
, loff_t start
, loff_t end
,
574 int datasync
, int opcode
)
576 struct inode
*inode
= file
->f_mapping
->host
;
577 struct fuse_mount
*fm
= get_fuse_mount(inode
);
578 struct fuse_file
*ff
= file
->private_data
;
580 struct fuse_fsync_in inarg
;
582 memset(&inarg
, 0, sizeof(inarg
));
584 inarg
.fsync_flags
= datasync
? FUSE_FSYNC_FDATASYNC
: 0;
585 args
.opcode
= opcode
;
586 args
.nodeid
= get_node_id(inode
);
588 args
.in_args
[0].size
= sizeof(inarg
);
589 args
.in_args
[0].value
= &inarg
;
590 return fuse_simple_request(fm
, &args
);
593 static int fuse_fsync(struct file
*file
, loff_t start
, loff_t end
,
596 struct inode
*inode
= file
->f_mapping
->host
;
597 struct fuse_conn
*fc
= get_fuse_conn(inode
);
600 if (fuse_is_bad(inode
))
606 * Start writeback against all dirty pages of the inode, then
607 * wait for all outstanding writes, before sending the FSYNC
610 err
= file_write_and_wait_range(file
, start
, end
);
614 fuse_sync_writes(inode
);
617 * Due to implementation of fuse writeback
618 * file_write_and_wait_range() does not catch errors.
619 * We have to do this directly after fuse_sync_writes()
621 err
= file_check_and_advance_wb_err(file
);
625 err
= sync_inode_metadata(inode
, 1);
632 err
= fuse_fsync_common(file
, start
, end
, datasync
, FUSE_FSYNC
);
633 if (err
== -ENOSYS
) {
643 void fuse_read_args_fill(struct fuse_io_args
*ia
, struct file
*file
, loff_t pos
,
644 size_t count
, int opcode
)
646 struct fuse_file
*ff
= file
->private_data
;
647 struct fuse_args
*args
= &ia
->ap
.args
;
649 ia
->read
.in
.fh
= ff
->fh
;
650 ia
->read
.in
.offset
= pos
;
651 ia
->read
.in
.size
= count
;
652 ia
->read
.in
.flags
= file
->f_flags
;
653 args
->opcode
= opcode
;
654 args
->nodeid
= ff
->nodeid
;
655 args
->in_numargs
= 1;
656 args
->in_args
[0].size
= sizeof(ia
->read
.in
);
657 args
->in_args
[0].value
= &ia
->read
.in
;
658 args
->out_argvar
= true;
659 args
->out_numargs
= 1;
660 args
->out_args
[0].size
= count
;
663 static void fuse_release_user_pages(struct fuse_args_pages
*ap
, ssize_t nres
,
668 for (i
= 0; i
< ap
->num_folios
; i
++) {
670 folio_mark_dirty_lock(ap
->folios
[i
]);
671 if (ap
->args
.is_pinned
)
672 unpin_folio(ap
->folios
[i
]);
675 if (nres
> 0 && ap
->args
.invalidate_vmap
)
676 invalidate_kernel_vmap_range(ap
->args
.vmap_base
, nres
);
679 static void fuse_io_release(struct kref
*kref
)
681 kfree(container_of(kref
, struct fuse_io_priv
, refcnt
));
684 static ssize_t
fuse_get_res_by_io(struct fuse_io_priv
*io
)
689 if (io
->bytes
>= 0 && io
->write
)
692 return io
->bytes
< 0 ? io
->size
: io
->bytes
;
696 * In case of short read, the caller sets 'pos' to the position of
697 * actual end of fuse request in IO request. Otherwise, if bytes_requested
698 * == bytes_transferred or rw == WRITE, the caller sets 'pos' to -1.
701 * User requested DIO read of 64K. It was split into two 32K fuse requests,
702 * both submitted asynchronously. The first of them was ACKed by userspace as
703 * fully completed (req->out.args[0].size == 32K) resulting in pos == -1. The
704 * second request was ACKed as short, e.g. only 1K was read, resulting in
707 * Thus, when all fuse requests are completed, the minimal non-negative 'pos'
708 * will be equal to the length of the longest contiguous fragment of
709 * transferred data starting from the beginning of IO request.
711 static void fuse_aio_complete(struct fuse_io_priv
*io
, int err
, ssize_t pos
)
715 spin_lock(&io
->lock
);
717 io
->err
= io
->err
? : err
;
718 else if (pos
>= 0 && (io
->bytes
< 0 || pos
< io
->bytes
))
722 if (!left
&& io
->blocking
)
724 spin_unlock(&io
->lock
);
726 if (!left
&& !io
->blocking
) {
727 ssize_t res
= fuse_get_res_by_io(io
);
730 struct inode
*inode
= file_inode(io
->iocb
->ki_filp
);
731 struct fuse_conn
*fc
= get_fuse_conn(inode
);
732 struct fuse_inode
*fi
= get_fuse_inode(inode
);
734 spin_lock(&fi
->lock
);
735 fi
->attr_version
= atomic64_inc_return(&fc
->attr_version
);
736 spin_unlock(&fi
->lock
);
739 io
->iocb
->ki_complete(io
->iocb
, res
);
742 kref_put(&io
->refcnt
, fuse_io_release
);
745 static struct fuse_io_args
*fuse_io_alloc(struct fuse_io_priv
*io
,
746 unsigned int nfolios
)
748 struct fuse_io_args
*ia
;
750 ia
= kzalloc(sizeof(*ia
), GFP_KERNEL
);
753 ia
->ap
.folios
= fuse_folios_alloc(nfolios
, GFP_KERNEL
,
755 if (!ia
->ap
.folios
) {
763 static void fuse_io_free(struct fuse_io_args
*ia
)
765 kfree(ia
->ap
.folios
);
769 static void fuse_aio_complete_req(struct fuse_mount
*fm
, struct fuse_args
*args
,
772 struct fuse_io_args
*ia
= container_of(args
, typeof(*ia
), ap
.args
);
773 struct fuse_io_priv
*io
= ia
->io
;
779 } else if (io
->write
) {
780 if (ia
->write
.out
.size
> ia
->write
.in
.size
) {
783 nres
= ia
->write
.out
.size
;
784 if (ia
->write
.in
.size
!= ia
->write
.out
.size
)
785 pos
= ia
->write
.in
.offset
- io
->offset
+
789 u32 outsize
= args
->out_args
[0].size
;
792 if (ia
->read
.in
.size
!= outsize
)
793 pos
= ia
->read
.in
.offset
- io
->offset
+ outsize
;
796 fuse_release_user_pages(&ia
->ap
, err
?: nres
, io
->should_dirty
);
798 fuse_aio_complete(io
, err
, pos
);
802 static ssize_t
fuse_async_req_send(struct fuse_mount
*fm
,
803 struct fuse_io_args
*ia
, size_t num_bytes
)
806 struct fuse_io_priv
*io
= ia
->io
;
808 spin_lock(&io
->lock
);
809 kref_get(&io
->refcnt
);
810 io
->size
+= num_bytes
;
812 spin_unlock(&io
->lock
);
814 ia
->ap
.args
.end
= fuse_aio_complete_req
;
815 ia
->ap
.args
.may_block
= io
->should_dirty
;
816 err
= fuse_simple_background(fm
, &ia
->ap
.args
, GFP_KERNEL
);
818 fuse_aio_complete_req(fm
, &ia
->ap
.args
, err
);
823 static ssize_t
fuse_send_read(struct fuse_io_args
*ia
, loff_t pos
, size_t count
,
826 struct file
*file
= ia
->io
->iocb
->ki_filp
;
827 struct fuse_file
*ff
= file
->private_data
;
828 struct fuse_mount
*fm
= ff
->fm
;
830 fuse_read_args_fill(ia
, file
, pos
, count
, FUSE_READ
);
832 ia
->read
.in
.read_flags
|= FUSE_READ_LOCKOWNER
;
833 ia
->read
.in
.lock_owner
= fuse_lock_owner_id(fm
->fc
, owner
);
837 return fuse_async_req_send(fm
, ia
, count
);
839 return fuse_simple_request(fm
, &ia
->ap
.args
);
842 static void fuse_read_update_size(struct inode
*inode
, loff_t size
,
845 struct fuse_conn
*fc
= get_fuse_conn(inode
);
846 struct fuse_inode
*fi
= get_fuse_inode(inode
);
848 spin_lock(&fi
->lock
);
849 if (attr_ver
>= fi
->attr_version
&& size
< inode
->i_size
&&
850 !test_bit(FUSE_I_SIZE_UNSTABLE
, &fi
->state
)) {
851 fi
->attr_version
= atomic64_inc_return(&fc
->attr_version
);
852 i_size_write(inode
, size
);
854 spin_unlock(&fi
->lock
);
857 static void fuse_short_read(struct inode
*inode
, u64 attr_ver
, size_t num_read
,
858 struct fuse_args_pages
*ap
)
860 struct fuse_conn
*fc
= get_fuse_conn(inode
);
863 * If writeback_cache is enabled, a short read means there's a hole in
864 * the file. Some data after the hole is in page cache, but has not
865 * reached the client fs yet. So the hole is not present there.
867 if (!fc
->writeback_cache
) {
868 loff_t pos
= folio_pos(ap
->folios
[0]) + num_read
;
869 fuse_read_update_size(inode
, pos
, attr_ver
);
873 static int fuse_do_readfolio(struct file
*file
, struct folio
*folio
)
875 struct inode
*inode
= folio
->mapping
->host
;
876 struct fuse_mount
*fm
= get_fuse_mount(inode
);
877 loff_t pos
= folio_pos(folio
);
878 struct fuse_folio_desc desc
= { .length
= PAGE_SIZE
};
879 struct fuse_io_args ia
= {
880 .ap
.args
.page_zeroing
= true,
881 .ap
.args
.out_pages
= true,
890 * With the temporary pages that are used to complete writeback, we can
891 * have writeback that extends beyond the lifetime of the folio. So
892 * make sure we read a properly synced folio.
894 fuse_wait_on_folio_writeback(inode
, folio
);
896 attr_ver
= fuse_get_attr_version(fm
->fc
);
898 /* Don't overflow end offset */
899 if (pos
+ (desc
.length
- 1) == LLONG_MAX
)
902 fuse_read_args_fill(&ia
, file
, pos
, desc
.length
, FUSE_READ
);
903 res
= fuse_simple_request(fm
, &ia
.ap
.args
);
907 * Short read means EOF. If file size is larger, truncate it
909 if (res
< desc
.length
)
910 fuse_short_read(inode
, attr_ver
, res
, &ia
.ap
);
912 folio_mark_uptodate(folio
);
917 static int fuse_read_folio(struct file
*file
, struct folio
*folio
)
919 struct inode
*inode
= folio
->mapping
->host
;
923 if (fuse_is_bad(inode
))
926 err
= fuse_do_readfolio(file
, folio
);
927 fuse_invalidate_atime(inode
);
933 static void fuse_readpages_end(struct fuse_mount
*fm
, struct fuse_args
*args
,
937 struct fuse_io_args
*ia
= container_of(args
, typeof(*ia
), ap
.args
);
938 struct fuse_args_pages
*ap
= &ia
->ap
;
939 size_t count
= ia
->read
.in
.size
;
940 size_t num_read
= args
->out_args
[0].size
;
941 struct address_space
*mapping
= NULL
;
943 for (i
= 0; mapping
== NULL
&& i
< ap
->num_folios
; i
++)
944 mapping
= ap
->folios
[i
]->mapping
;
947 struct inode
*inode
= mapping
->host
;
950 * Short read means EOF. If file size is larger, truncate it
952 if (!err
&& num_read
< count
)
953 fuse_short_read(inode
, ia
->read
.attr_ver
, num_read
, ap
);
955 fuse_invalidate_atime(inode
);
958 for (i
= 0; i
< ap
->num_folios
; i
++)
959 folio_end_read(ap
->folios
[i
], !err
);
961 fuse_file_put(ia
->ff
, false);
966 static void fuse_send_readpages(struct fuse_io_args
*ia
, struct file
*file
)
968 struct fuse_file
*ff
= file
->private_data
;
969 struct fuse_mount
*fm
= ff
->fm
;
970 struct fuse_args_pages
*ap
= &ia
->ap
;
971 loff_t pos
= folio_pos(ap
->folios
[0]);
972 /* Currently, all folios in FUSE are one page */
973 size_t count
= ap
->num_folios
<< PAGE_SHIFT
;
977 ap
->args
.out_pages
= true;
978 ap
->args
.page_zeroing
= true;
979 ap
->args
.page_replace
= true;
981 /* Don't overflow end offset */
982 if (pos
+ (count
- 1) == LLONG_MAX
) {
984 ap
->descs
[ap
->num_folios
- 1].length
--;
986 WARN_ON((loff_t
) (pos
+ count
) < 0);
988 fuse_read_args_fill(ia
, file
, pos
, count
, FUSE_READ
);
989 ia
->read
.attr_ver
= fuse_get_attr_version(fm
->fc
);
990 if (fm
->fc
->async_read
) {
991 ia
->ff
= fuse_file_get(ff
);
992 ap
->args
.end
= fuse_readpages_end
;
993 err
= fuse_simple_background(fm
, &ap
->args
, GFP_KERNEL
);
997 res
= fuse_simple_request(fm
, &ap
->args
);
998 err
= res
< 0 ? res
: 0;
1000 fuse_readpages_end(fm
, &ap
->args
, err
);
1003 static void fuse_readahead(struct readahead_control
*rac
)
1005 struct inode
*inode
= rac
->mapping
->host
;
1006 struct fuse_inode
*fi
= get_fuse_inode(inode
);
1007 struct fuse_conn
*fc
= get_fuse_conn(inode
);
1008 unsigned int max_pages
, nr_pages
;
1009 pgoff_t first
= readahead_index(rac
);
1010 pgoff_t last
= first
+ readahead_count(rac
) - 1;
1012 if (fuse_is_bad(inode
))
1015 wait_event(fi
->page_waitq
, !fuse_range_is_writeback(inode
, first
, last
));
1017 max_pages
= min_t(unsigned int, fc
->max_pages
,
1018 fc
->max_read
/ PAGE_SIZE
);
1021 * This is only accurate the first time through, since readahead_folio()
1022 * doesn't update readahead_count() from the previous folio until the
1023 * next call. Grab nr_pages here so we know how many pages we're going
1024 * to have to process. This means that we will exit here with
1025 * readahead_count() == folio_nr_pages(last_folio), but we will have
1026 * consumed all of the folios, and read_pages() will call
1027 * readahead_folio() again which will clean up the rac.
1029 nr_pages
= readahead_count(rac
);
1032 struct fuse_io_args
*ia
;
1033 struct fuse_args_pages
*ap
;
1034 struct folio
*folio
;
1035 unsigned cur_pages
= min(max_pages
, nr_pages
);
1037 if (fc
->num_background
>= fc
->congestion_threshold
&&
1038 rac
->ra
->async_size
>= readahead_count(rac
))
1040 * Congested and only async pages left, so skip the
1045 ia
= fuse_io_alloc(NULL
, cur_pages
);
1050 while (ap
->num_folios
< cur_pages
) {
1051 folio
= readahead_folio(rac
);
1052 ap
->folios
[ap
->num_folios
] = folio
;
1053 ap
->descs
[ap
->num_folios
].length
= folio_size(folio
);
1056 fuse_send_readpages(ia
, rac
->file
);
1057 nr_pages
-= cur_pages
;
1061 static ssize_t
fuse_cache_read_iter(struct kiocb
*iocb
, struct iov_iter
*to
)
1063 struct inode
*inode
= iocb
->ki_filp
->f_mapping
->host
;
1064 struct fuse_conn
*fc
= get_fuse_conn(inode
);
1067 * In auto invalidate mode, always update attributes on read.
1068 * Otherwise, only update if we attempt to read past EOF (to ensure
1069 * i_size is up to date).
1071 if (fc
->auto_inval_data
||
1072 (iocb
->ki_pos
+ iov_iter_count(to
) > i_size_read(inode
))) {
1074 err
= fuse_update_attributes(inode
, iocb
->ki_filp
, STATX_SIZE
);
1079 return generic_file_read_iter(iocb
, to
);
1082 static void fuse_write_args_fill(struct fuse_io_args
*ia
, struct fuse_file
*ff
,
1083 loff_t pos
, size_t count
)
1085 struct fuse_args
*args
= &ia
->ap
.args
;
1087 ia
->write
.in
.fh
= ff
->fh
;
1088 ia
->write
.in
.offset
= pos
;
1089 ia
->write
.in
.size
= count
;
1090 args
->opcode
= FUSE_WRITE
;
1091 args
->nodeid
= ff
->nodeid
;
1092 args
->in_numargs
= 2;
1093 if (ff
->fm
->fc
->minor
< 9)
1094 args
->in_args
[0].size
= FUSE_COMPAT_WRITE_IN_SIZE
;
1096 args
->in_args
[0].size
= sizeof(ia
->write
.in
);
1097 args
->in_args
[0].value
= &ia
->write
.in
;
1098 args
->in_args
[1].size
= count
;
1099 args
->out_numargs
= 1;
1100 args
->out_args
[0].size
= sizeof(ia
->write
.out
);
1101 args
->out_args
[0].value
= &ia
->write
.out
;
1104 static unsigned int fuse_write_flags(struct kiocb
*iocb
)
1106 unsigned int flags
= iocb
->ki_filp
->f_flags
;
1108 if (iocb_is_dsync(iocb
))
1110 if (iocb
->ki_flags
& IOCB_SYNC
)
1116 static ssize_t
fuse_send_write(struct fuse_io_args
*ia
, loff_t pos
,
1117 size_t count
, fl_owner_t owner
)
1119 struct kiocb
*iocb
= ia
->io
->iocb
;
1120 struct file
*file
= iocb
->ki_filp
;
1121 struct fuse_file
*ff
= file
->private_data
;
1122 struct fuse_mount
*fm
= ff
->fm
;
1123 struct fuse_write_in
*inarg
= &ia
->write
.in
;
1126 fuse_write_args_fill(ia
, ff
, pos
, count
);
1127 inarg
->flags
= fuse_write_flags(iocb
);
1128 if (owner
!= NULL
) {
1129 inarg
->write_flags
|= FUSE_WRITE_LOCKOWNER
;
1130 inarg
->lock_owner
= fuse_lock_owner_id(fm
->fc
, owner
);
1134 return fuse_async_req_send(fm
, ia
, count
);
1136 err
= fuse_simple_request(fm
, &ia
->ap
.args
);
1137 if (!err
&& ia
->write
.out
.size
> count
)
1140 return err
?: ia
->write
.out
.size
;
1143 bool fuse_write_update_attr(struct inode
*inode
, loff_t pos
, ssize_t written
)
1145 struct fuse_conn
*fc
= get_fuse_conn(inode
);
1146 struct fuse_inode
*fi
= get_fuse_inode(inode
);
1149 spin_lock(&fi
->lock
);
1150 fi
->attr_version
= atomic64_inc_return(&fc
->attr_version
);
1151 if (written
> 0 && pos
> inode
->i_size
) {
1152 i_size_write(inode
, pos
);
1155 spin_unlock(&fi
->lock
);
1157 fuse_invalidate_attr_mask(inode
, FUSE_STATX_MODSIZE
);
1162 static ssize_t
fuse_send_write_pages(struct fuse_io_args
*ia
,
1163 struct kiocb
*iocb
, struct inode
*inode
,
1164 loff_t pos
, size_t count
)
1166 struct fuse_args_pages
*ap
= &ia
->ap
;
1167 struct file
*file
= iocb
->ki_filp
;
1168 struct fuse_file
*ff
= file
->private_data
;
1169 struct fuse_mount
*fm
= ff
->fm
;
1170 unsigned int offset
, i
;
1174 for (i
= 0; i
< ap
->num_folios
; i
++)
1175 fuse_wait_on_folio_writeback(inode
, ap
->folios
[i
]);
1177 fuse_write_args_fill(ia
, ff
, pos
, count
);
1178 ia
->write
.in
.flags
= fuse_write_flags(iocb
);
1179 if (fm
->fc
->handle_killpriv_v2
&& !capable(CAP_FSETID
))
1180 ia
->write
.in
.write_flags
|= FUSE_WRITE_KILL_SUIDGID
;
1182 err
= fuse_simple_request(fm
, &ap
->args
);
1183 if (!err
&& ia
->write
.out
.size
> count
)
1186 short_write
= ia
->write
.out
.size
< count
;
1187 offset
= ap
->descs
[0].offset
;
1188 count
= ia
->write
.out
.size
;
1189 for (i
= 0; i
< ap
->num_folios
; i
++) {
1190 struct folio
*folio
= ap
->folios
[i
];
1193 folio_clear_uptodate(folio
);
1195 if (count
>= folio_size(folio
) - offset
)
1196 count
-= folio_size(folio
) - offset
;
1199 folio_clear_uptodate(folio
);
1204 if (ia
->write
.folio_locked
&& (i
== ap
->num_folios
- 1))
1205 folio_unlock(folio
);
1212 static ssize_t
fuse_fill_write_pages(struct fuse_io_args
*ia
,
1213 struct address_space
*mapping
,
1214 struct iov_iter
*ii
, loff_t pos
,
1215 unsigned int max_pages
)
1217 struct fuse_args_pages
*ap
= &ia
->ap
;
1218 struct fuse_conn
*fc
= get_fuse_conn(mapping
->host
);
1219 unsigned offset
= pos
& (PAGE_SIZE
- 1);
1220 unsigned int nr_pages
= 0;
1224 ap
->args
.in_pages
= true;
1225 ap
->descs
[0].offset
= offset
;
1229 struct folio
*folio
;
1230 pgoff_t index
= pos
>> PAGE_SHIFT
;
1231 size_t bytes
= min_t(size_t, PAGE_SIZE
- offset
,
1232 iov_iter_count(ii
));
1234 bytes
= min_t(size_t, bytes
, fc
->max_write
- count
);
1238 if (fault_in_iov_iter_readable(ii
, bytes
))
1241 folio
= __filemap_get_folio(mapping
, index
, FGP_WRITEBEGIN
,
1242 mapping_gfp_mask(mapping
));
1243 if (IS_ERR(folio
)) {
1244 err
= PTR_ERR(folio
);
1248 if (mapping_writably_mapped(mapping
))
1249 flush_dcache_folio(folio
);
1251 tmp
= copy_folio_from_iter_atomic(folio
, offset
, bytes
, ii
);
1252 flush_dcache_folio(folio
);
1255 folio_unlock(folio
);
1261 ap
->folios
[ap
->num_folios
] = folio
;
1262 ap
->descs
[ap
->num_folios
].length
= tmp
;
1269 if (offset
== PAGE_SIZE
)
1272 /* If we copied full page, mark it uptodate */
1273 if (tmp
== PAGE_SIZE
)
1274 folio_mark_uptodate(folio
);
1276 if (folio_test_uptodate(folio
)) {
1277 folio_unlock(folio
);
1279 ia
->write
.folio_locked
= true;
1282 if (!fc
->big_writes
)
1284 } while (iov_iter_count(ii
) && count
< fc
->max_write
&&
1285 nr_pages
< max_pages
&& offset
== 0);
1287 return count
> 0 ? count
: err
;
1290 static inline unsigned int fuse_wr_pages(loff_t pos
, size_t len
,
1291 unsigned int max_pages
)
1293 return min_t(unsigned int,
1294 ((pos
+ len
- 1) >> PAGE_SHIFT
) -
1295 (pos
>> PAGE_SHIFT
) + 1,
1299 static ssize_t
fuse_perform_write(struct kiocb
*iocb
, struct iov_iter
*ii
)
1301 struct address_space
*mapping
= iocb
->ki_filp
->f_mapping
;
1302 struct inode
*inode
= mapping
->host
;
1303 struct fuse_conn
*fc
= get_fuse_conn(inode
);
1304 struct fuse_inode
*fi
= get_fuse_inode(inode
);
1305 loff_t pos
= iocb
->ki_pos
;
1309 if (inode
->i_size
< pos
+ iov_iter_count(ii
))
1310 set_bit(FUSE_I_SIZE_UNSTABLE
, &fi
->state
);
1314 struct fuse_io_args ia
= {};
1315 struct fuse_args_pages
*ap
= &ia
.ap
;
1316 unsigned int nr_pages
= fuse_wr_pages(pos
, iov_iter_count(ii
),
1319 ap
->folios
= fuse_folios_alloc(nr_pages
, GFP_KERNEL
, &ap
->descs
);
1325 count
= fuse_fill_write_pages(&ia
, mapping
, ii
, pos
, nr_pages
);
1329 err
= fuse_send_write_pages(&ia
, iocb
, inode
,
1332 size_t num_written
= ia
.write
.out
.size
;
1337 /* break out of the loop on short write */
1338 if (num_written
!= count
)
1343 } while (!err
&& iov_iter_count(ii
));
1345 fuse_write_update_attr(inode
, pos
, res
);
1346 clear_bit(FUSE_I_SIZE_UNSTABLE
, &fi
->state
);
1350 iocb
->ki_pos
+= res
;
1354 static bool fuse_io_past_eof(struct kiocb
*iocb
, struct iov_iter
*iter
)
1356 struct inode
*inode
= file_inode(iocb
->ki_filp
);
1358 return iocb
->ki_pos
+ iov_iter_count(iter
) > i_size_read(inode
);
1362 * @return true if an exclusive lock for direct IO writes is needed
1364 static bool fuse_dio_wr_exclusive_lock(struct kiocb
*iocb
, struct iov_iter
*from
)
1366 struct file
*file
= iocb
->ki_filp
;
1367 struct fuse_file
*ff
= file
->private_data
;
1368 struct inode
*inode
= file_inode(iocb
->ki_filp
);
1369 struct fuse_inode
*fi
= get_fuse_inode(inode
);
1371 /* Server side has to advise that it supports parallel dio writes. */
1372 if (!(ff
->open_flags
& FOPEN_PARALLEL_DIRECT_WRITES
))
1376 * Append will need to know the eventual EOF - always needs an
1379 if (iocb
->ki_flags
& IOCB_APPEND
)
1382 /* shared locks are not allowed with parallel page cache IO */
1383 if (test_bit(FUSE_I_CACHE_IO_MODE
, &fi
->state
))
1386 /* Parallel dio beyond EOF is not supported, at least for now. */
1387 if (fuse_io_past_eof(iocb
, from
))
1393 static void fuse_dio_lock(struct kiocb
*iocb
, struct iov_iter
*from
,
1396 struct inode
*inode
= file_inode(iocb
->ki_filp
);
1397 struct fuse_inode
*fi
= get_fuse_inode(inode
);
1399 *exclusive
= fuse_dio_wr_exclusive_lock(iocb
, from
);
1403 inode_lock_shared(inode
);
1405 * New parallal dio allowed only if inode is not in caching
1406 * mode and denies new opens in caching mode. This check
1407 * should be performed only after taking shared inode lock.
1408 * Previous past eof check was without inode lock and might
1409 * have raced, so check it again.
1411 if (fuse_io_past_eof(iocb
, from
) ||
1412 fuse_inode_uncached_io_start(fi
, NULL
) != 0) {
1413 inode_unlock_shared(inode
);
1420 static void fuse_dio_unlock(struct kiocb
*iocb
, bool exclusive
)
1422 struct inode
*inode
= file_inode(iocb
->ki_filp
);
1423 struct fuse_inode
*fi
= get_fuse_inode(inode
);
1426 inode_unlock(inode
);
1428 /* Allow opens in caching mode after last parallel dio end */
1429 fuse_inode_uncached_io_end(fi
);
1430 inode_unlock_shared(inode
);
1434 static ssize_t
fuse_cache_write_iter(struct kiocb
*iocb
, struct iov_iter
*from
)
1436 struct file
*file
= iocb
->ki_filp
;
1437 struct mnt_idmap
*idmap
= file_mnt_idmap(file
);
1438 struct address_space
*mapping
= file
->f_mapping
;
1439 ssize_t written
= 0;
1440 struct inode
*inode
= mapping
->host
;
1442 struct fuse_conn
*fc
= get_fuse_conn(inode
);
1444 if (fc
->writeback_cache
) {
1445 /* Update size (EOF optimization) and mode (SUID clearing) */
1446 err
= fuse_update_attributes(mapping
->host
, file
,
1447 STATX_SIZE
| STATX_MODE
);
1451 if (fc
->handle_killpriv_v2
&&
1452 setattr_should_drop_suidgid(idmap
,
1453 file_inode(file
))) {
1457 return generic_file_write_iter(iocb
, from
);
1463 err
= count
= generic_write_checks(iocb
, from
);
1467 task_io_account_write(count
);
1469 err
= kiocb_modified(iocb
);
1473 if (iocb
->ki_flags
& IOCB_DIRECT
) {
1474 written
= generic_file_direct_write(iocb
, from
);
1475 if (written
< 0 || !iov_iter_count(from
))
1477 written
= direct_write_fallback(iocb
, from
, written
,
1478 fuse_perform_write(iocb
, from
));
1480 written
= fuse_perform_write(iocb
, from
);
1483 inode_unlock(inode
);
1485 written
= generic_write_sync(iocb
, written
);
1487 return written
? written
: err
;
1490 static inline unsigned long fuse_get_user_addr(const struct iov_iter
*ii
)
1492 return (unsigned long)iter_iov(ii
)->iov_base
+ ii
->iov_offset
;
1495 static inline size_t fuse_get_frag_size(const struct iov_iter
*ii
,
1498 return min(iov_iter_single_seg_count(ii
), max_size
);
1501 static int fuse_get_user_pages(struct fuse_args_pages
*ap
, struct iov_iter
*ii
,
1502 size_t *nbytesp
, int write
,
1503 unsigned int max_pages
,
1504 bool use_pages_for_kvec_io
)
1506 bool flush_or_invalidate
= false;
1507 unsigned int nr_pages
= 0;
1508 size_t nbytes
= 0; /* # bytes already packed in req */
1511 /* Special case for kernel I/O: can copy directly into the buffer.
1512 * However if the implementation of fuse_conn requires pages instead of
1513 * pointer (e.g., virtio-fs), use iov_iter_extract_pages() instead.
1515 if (iov_iter_is_kvec(ii
)) {
1516 void *user_addr
= (void *)fuse_get_user_addr(ii
);
1518 if (!use_pages_for_kvec_io
) {
1519 size_t frag_size
= fuse_get_frag_size(ii
, *nbytesp
);
1522 ap
->args
.in_args
[1].value
= user_addr
;
1524 ap
->args
.out_args
[0].value
= user_addr
;
1526 iov_iter_advance(ii
, frag_size
);
1527 *nbytesp
= frag_size
;
1531 if (is_vmalloc_addr(user_addr
)) {
1532 ap
->args
.vmap_base
= user_addr
;
1533 flush_or_invalidate
= true;
1538 * Until there is support for iov_iter_extract_folios(), we have to
1539 * manually extract pages using iov_iter_extract_pages() and then
1540 * copy that to a folios array.
1542 struct page
**pages
= kzalloc(max_pages
* sizeof(struct page
*),
1549 while (nbytes
< *nbytesp
&& nr_pages
< max_pages
) {
1550 unsigned nfolios
, i
;
1553 ret
= iov_iter_extract_pages(ii
, &pages
,
1555 max_pages
- nr_pages
,
1562 nfolios
= DIV_ROUND_UP(ret
+ start
, PAGE_SIZE
);
1564 for (i
= 0; i
< nfolios
; i
++) {
1565 struct folio
*folio
= page_folio(pages
[i
]);
1566 unsigned int offset
= start
+
1567 (folio_page_idx(folio
, pages
[i
]) << PAGE_SHIFT
);
1568 unsigned int len
= min_t(unsigned int, ret
, PAGE_SIZE
- start
);
1570 ap
->descs
[ap
->num_folios
].offset
= offset
;
1571 ap
->descs
[ap
->num_folios
].length
= len
;
1572 ap
->folios
[ap
->num_folios
] = folio
;
1578 nr_pages
+= nfolios
;
1582 if (write
&& flush_or_invalidate
)
1583 flush_kernel_vmap_range(ap
->args
.vmap_base
, nbytes
);
1585 ap
->args
.invalidate_vmap
= !write
&& flush_or_invalidate
;
1586 ap
->args
.is_pinned
= iov_iter_extract_will_pin(ii
);
1587 ap
->args
.user_pages
= true;
1589 ap
->args
.in_pages
= true;
1591 ap
->args
.out_pages
= true;
1596 return ret
< 0 ? ret
: 0;
1599 ssize_t
fuse_direct_io(struct fuse_io_priv
*io
, struct iov_iter
*iter
,
1600 loff_t
*ppos
, int flags
)
1602 int write
= flags
& FUSE_DIO_WRITE
;
1603 int cuse
= flags
& FUSE_DIO_CUSE
;
1604 struct file
*file
= io
->iocb
->ki_filp
;
1605 struct address_space
*mapping
= file
->f_mapping
;
1606 struct inode
*inode
= mapping
->host
;
1607 struct fuse_file
*ff
= file
->private_data
;
1608 struct fuse_conn
*fc
= ff
->fm
->fc
;
1609 size_t nmax
= write
? fc
->max_write
: fc
->max_read
;
1611 size_t count
= iov_iter_count(iter
);
1612 pgoff_t idx_from
= pos
>> PAGE_SHIFT
;
1613 pgoff_t idx_to
= (pos
+ count
- 1) >> PAGE_SHIFT
;
1616 struct fuse_io_args
*ia
;
1617 unsigned int max_pages
;
1618 bool fopen_direct_io
= ff
->open_flags
& FOPEN_DIRECT_IO
;
1620 max_pages
= iov_iter_npages(iter
, fc
->max_pages
);
1621 ia
= fuse_io_alloc(io
, max_pages
);
1625 if (fopen_direct_io
&& fc
->direct_io_allow_mmap
) {
1626 res
= filemap_write_and_wait_range(mapping
, pos
, pos
+ count
- 1);
1632 if (!cuse
&& fuse_range_is_writeback(inode
, idx_from
, idx_to
)) {
1635 fuse_sync_writes(inode
);
1637 inode_unlock(inode
);
1640 if (fopen_direct_io
&& write
) {
1641 res
= invalidate_inode_pages2_range(mapping
, idx_from
, idx_to
);
1648 io
->should_dirty
= !write
&& user_backed_iter(iter
);
1651 fl_owner_t owner
= current
->files
;
1652 size_t nbytes
= min(count
, nmax
);
1654 err
= fuse_get_user_pages(&ia
->ap
, iter
, &nbytes
, write
,
1655 max_pages
, fc
->use_pages_for_kvec_io
);
1660 if (!capable(CAP_FSETID
))
1661 ia
->write
.in
.write_flags
|= FUSE_WRITE_KILL_SUIDGID
;
1663 nres
= fuse_send_write(ia
, pos
, nbytes
, owner
);
1665 nres
= fuse_send_read(ia
, pos
, nbytes
, owner
);
1668 if (!io
->async
|| nres
< 0) {
1669 fuse_release_user_pages(&ia
->ap
, nres
, io
->should_dirty
);
1674 iov_iter_revert(iter
, nbytes
);
1678 WARN_ON(nres
> nbytes
);
1683 if (nres
!= nbytes
) {
1684 iov_iter_revert(iter
, nbytes
- nres
);
1688 max_pages
= iov_iter_npages(iter
, fc
->max_pages
);
1689 ia
= fuse_io_alloc(io
, max_pages
);
1699 return res
> 0 ? res
: err
;
1701 EXPORT_SYMBOL_GPL(fuse_direct_io
);
1703 static ssize_t
__fuse_direct_read(struct fuse_io_priv
*io
,
1704 struct iov_iter
*iter
,
1708 struct inode
*inode
= file_inode(io
->iocb
->ki_filp
);
1710 res
= fuse_direct_io(io
, iter
, ppos
, 0);
1712 fuse_invalidate_atime(inode
);
1717 static ssize_t
fuse_direct_IO(struct kiocb
*iocb
, struct iov_iter
*iter
);
1719 static ssize_t
fuse_direct_read_iter(struct kiocb
*iocb
, struct iov_iter
*to
)
1723 if (!is_sync_kiocb(iocb
)) {
1724 res
= fuse_direct_IO(iocb
, to
);
1726 struct fuse_io_priv io
= FUSE_IO_PRIV_SYNC(iocb
);
1728 res
= __fuse_direct_read(&io
, to
, &iocb
->ki_pos
);
1734 static ssize_t
fuse_direct_write_iter(struct kiocb
*iocb
, struct iov_iter
*from
)
1736 struct inode
*inode
= file_inode(iocb
->ki_filp
);
1740 fuse_dio_lock(iocb
, from
, &exclusive
);
1741 res
= generic_write_checks(iocb
, from
);
1743 task_io_account_write(res
);
1744 if (!is_sync_kiocb(iocb
)) {
1745 res
= fuse_direct_IO(iocb
, from
);
1747 struct fuse_io_priv io
= FUSE_IO_PRIV_SYNC(iocb
);
1749 res
= fuse_direct_io(&io
, from
, &iocb
->ki_pos
,
1751 fuse_write_update_attr(inode
, iocb
->ki_pos
, res
);
1754 fuse_dio_unlock(iocb
, exclusive
);
1759 static ssize_t
fuse_file_read_iter(struct kiocb
*iocb
, struct iov_iter
*to
)
1761 struct file
*file
= iocb
->ki_filp
;
1762 struct fuse_file
*ff
= file
->private_data
;
1763 struct inode
*inode
= file_inode(file
);
1765 if (fuse_is_bad(inode
))
1768 if (FUSE_IS_DAX(inode
))
1769 return fuse_dax_read_iter(iocb
, to
);
1771 /* FOPEN_DIRECT_IO overrides FOPEN_PASSTHROUGH */
1772 if (ff
->open_flags
& FOPEN_DIRECT_IO
)
1773 return fuse_direct_read_iter(iocb
, to
);
1774 else if (fuse_file_passthrough(ff
))
1775 return fuse_passthrough_read_iter(iocb
, to
);
1777 return fuse_cache_read_iter(iocb
, to
);
1780 static ssize_t
fuse_file_write_iter(struct kiocb
*iocb
, struct iov_iter
*from
)
1782 struct file
*file
= iocb
->ki_filp
;
1783 struct fuse_file
*ff
= file
->private_data
;
1784 struct inode
*inode
= file_inode(file
);
1786 if (fuse_is_bad(inode
))
1789 if (FUSE_IS_DAX(inode
))
1790 return fuse_dax_write_iter(iocb
, from
);
1792 /* FOPEN_DIRECT_IO overrides FOPEN_PASSTHROUGH */
1793 if (ff
->open_flags
& FOPEN_DIRECT_IO
)
1794 return fuse_direct_write_iter(iocb
, from
);
1795 else if (fuse_file_passthrough(ff
))
1796 return fuse_passthrough_write_iter(iocb
, from
);
1798 return fuse_cache_write_iter(iocb
, from
);
1801 static ssize_t
fuse_splice_read(struct file
*in
, loff_t
*ppos
,
1802 struct pipe_inode_info
*pipe
, size_t len
,
1805 struct fuse_file
*ff
= in
->private_data
;
1807 /* FOPEN_DIRECT_IO overrides FOPEN_PASSTHROUGH */
1808 if (fuse_file_passthrough(ff
) && !(ff
->open_flags
& FOPEN_DIRECT_IO
))
1809 return fuse_passthrough_splice_read(in
, ppos
, pipe
, len
, flags
);
1811 return filemap_splice_read(in
, ppos
, pipe
, len
, flags
);
1814 static ssize_t
fuse_splice_write(struct pipe_inode_info
*pipe
, struct file
*out
,
1815 loff_t
*ppos
, size_t len
, unsigned int flags
)
1817 struct fuse_file
*ff
= out
->private_data
;
1819 /* FOPEN_DIRECT_IO overrides FOPEN_PASSTHROUGH */
1820 if (fuse_file_passthrough(ff
) && !(ff
->open_flags
& FOPEN_DIRECT_IO
))
1821 return fuse_passthrough_splice_write(pipe
, out
, ppos
, len
, flags
);
1823 return iter_file_splice_write(pipe
, out
, ppos
, len
, flags
);
1826 static void fuse_writepage_free(struct fuse_writepage_args
*wpa
)
1828 struct fuse_args_pages
*ap
= &wpa
->ia
.ap
;
1832 fuse_sync_bucket_dec(wpa
->bucket
);
1834 for (i
= 0; i
< ap
->num_folios
; i
++)
1835 folio_put(ap
->folios
[i
]);
1837 fuse_file_put(wpa
->ia
.ff
, false);
1843 static void fuse_writepage_finish_stat(struct inode
*inode
, struct folio
*folio
)
1845 struct backing_dev_info
*bdi
= inode_to_bdi(inode
);
1847 dec_wb_stat(&bdi
->wb
, WB_WRITEBACK
);
1848 node_stat_sub_folio(folio
, NR_WRITEBACK_TEMP
);
1849 wb_writeout_inc(&bdi
->wb
);
1852 static void fuse_writepage_finish(struct fuse_writepage_args
*wpa
)
1854 struct fuse_args_pages
*ap
= &wpa
->ia
.ap
;
1855 struct inode
*inode
= wpa
->inode
;
1856 struct fuse_inode
*fi
= get_fuse_inode(inode
);
1859 for (i
= 0; i
< ap
->num_folios
; i
++)
1860 fuse_writepage_finish_stat(inode
, ap
->folios
[i
]);
1862 wake_up(&fi
->page_waitq
);
1865 /* Called under fi->lock, may release and reacquire it */
1866 static void fuse_send_writepage(struct fuse_mount
*fm
,
1867 struct fuse_writepage_args
*wpa
, loff_t size
)
1868 __releases(fi
->lock
)
1869 __acquires(fi
->lock
)
1871 struct fuse_writepage_args
*aux
, *next
;
1872 struct fuse_inode
*fi
= get_fuse_inode(wpa
->inode
);
1873 struct fuse_write_in
*inarg
= &wpa
->ia
.write
.in
;
1874 struct fuse_args
*args
= &wpa
->ia
.ap
.args
;
1875 /* Currently, all folios in FUSE are one page */
1876 __u64 data_size
= wpa
->ia
.ap
.num_folios
* PAGE_SIZE
;
1880 if (inarg
->offset
+ data_size
<= size
) {
1881 inarg
->size
= data_size
;
1882 } else if (inarg
->offset
< size
) {
1883 inarg
->size
= size
- inarg
->offset
;
1885 /* Got truncated off completely */
1889 args
->in_args
[1].size
= inarg
->size
;
1891 args
->nocreds
= true;
1893 err
= fuse_simple_background(fm
, args
, GFP_ATOMIC
);
1894 if (err
== -ENOMEM
) {
1895 spin_unlock(&fi
->lock
);
1896 err
= fuse_simple_background(fm
, args
, GFP_NOFS
| __GFP_NOFAIL
);
1897 spin_lock(&fi
->lock
);
1900 /* Fails on broken connection only */
1908 rb_erase(&wpa
->writepages_entry
, &fi
->writepages
);
1909 fuse_writepage_finish(wpa
);
1910 spin_unlock(&fi
->lock
);
1912 /* After rb_erase() aux request list is private */
1913 for (aux
= wpa
->next
; aux
; aux
= next
) {
1916 fuse_writepage_finish_stat(aux
->inode
,
1917 aux
->ia
.ap
.folios
[0]);
1918 fuse_writepage_free(aux
);
1921 fuse_writepage_free(wpa
);
1922 spin_lock(&fi
->lock
);
1926 * If fi->writectr is positive (no truncate or fsync going on) send
1927 * all queued writepage requests.
1929 * Called with fi->lock
1931 void fuse_flush_writepages(struct inode
*inode
)
1932 __releases(fi
->lock
)
1933 __acquires(fi
->lock
)
1935 struct fuse_mount
*fm
= get_fuse_mount(inode
);
1936 struct fuse_inode
*fi
= get_fuse_inode(inode
);
1937 loff_t crop
= i_size_read(inode
);
1938 struct fuse_writepage_args
*wpa
;
1940 while (fi
->writectr
>= 0 && !list_empty(&fi
->queued_writes
)) {
1941 wpa
= list_entry(fi
->queued_writes
.next
,
1942 struct fuse_writepage_args
, queue_entry
);
1943 list_del_init(&wpa
->queue_entry
);
1944 fuse_send_writepage(fm
, wpa
, crop
);
1948 static struct fuse_writepage_args
*fuse_insert_writeback(struct rb_root
*root
,
1949 struct fuse_writepage_args
*wpa
)
1951 pgoff_t idx_from
= wpa
->ia
.write
.in
.offset
>> PAGE_SHIFT
;
1952 pgoff_t idx_to
= idx_from
+ wpa
->ia
.ap
.num_folios
- 1;
1953 struct rb_node
**p
= &root
->rb_node
;
1954 struct rb_node
*parent
= NULL
;
1956 WARN_ON(!wpa
->ia
.ap
.num_folios
);
1958 struct fuse_writepage_args
*curr
;
1962 curr
= rb_entry(parent
, struct fuse_writepage_args
,
1964 WARN_ON(curr
->inode
!= wpa
->inode
);
1965 curr_index
= curr
->ia
.write
.in
.offset
>> PAGE_SHIFT
;
1967 if (idx_from
>= curr_index
+ curr
->ia
.ap
.num_folios
)
1968 p
= &(*p
)->rb_right
;
1969 else if (idx_to
< curr_index
)
1975 rb_link_node(&wpa
->writepages_entry
, parent
, p
);
1976 rb_insert_color(&wpa
->writepages_entry
, root
);
1980 static void tree_insert(struct rb_root
*root
, struct fuse_writepage_args
*wpa
)
1982 WARN_ON(fuse_insert_writeback(root
, wpa
));
1985 static void fuse_writepage_end(struct fuse_mount
*fm
, struct fuse_args
*args
,
1988 struct fuse_writepage_args
*wpa
=
1989 container_of(args
, typeof(*wpa
), ia
.ap
.args
);
1990 struct inode
*inode
= wpa
->inode
;
1991 struct fuse_inode
*fi
= get_fuse_inode(inode
);
1992 struct fuse_conn
*fc
= get_fuse_conn(inode
);
1994 mapping_set_error(inode
->i_mapping
, error
);
1996 * A writeback finished and this might have updated mtime/ctime on
1997 * server making local mtime/ctime stale. Hence invalidate attrs.
1998 * Do this only if writeback_cache is not enabled. If writeback_cache
1999 * is enabled, we trust local ctime/mtime.
2001 if (!fc
->writeback_cache
)
2002 fuse_invalidate_attr_mask(inode
, FUSE_STATX_MODIFY
);
2003 spin_lock(&fi
->lock
);
2004 rb_erase(&wpa
->writepages_entry
, &fi
->writepages
);
2006 struct fuse_mount
*fm
= get_fuse_mount(inode
);
2007 struct fuse_write_in
*inarg
= &wpa
->ia
.write
.in
;
2008 struct fuse_writepage_args
*next
= wpa
->next
;
2010 wpa
->next
= next
->next
;
2012 tree_insert(&fi
->writepages
, next
);
2015 * Skip fuse_flush_writepages() to make it easy to crop requests
2016 * based on primary request size.
2018 * 1st case (trivial): there are no concurrent activities using
2019 * fuse_set/release_nowrite. Then we're on safe side because
2020 * fuse_flush_writepages() would call fuse_send_writepage()
2023 * 2nd case: someone called fuse_set_nowrite and it is waiting
2024 * now for completion of all in-flight requests. This happens
2025 * rarely and no more than once per page, so this should be
2028 * 3rd case: someone (e.g. fuse_do_setattr()) is in the middle
2029 * of fuse_set_nowrite..fuse_release_nowrite section. The fact
2030 * that fuse_set_nowrite returned implies that all in-flight
2031 * requests were completed along with all of their secondary
2032 * requests. Further primary requests are blocked by negative
2033 * writectr. Hence there cannot be any in-flight requests and
2034 * no invocations of fuse_writepage_end() while we're in
2035 * fuse_set_nowrite..fuse_release_nowrite section.
2037 fuse_send_writepage(fm
, next
, inarg
->offset
+ inarg
->size
);
2040 fuse_writepage_finish(wpa
);
2041 spin_unlock(&fi
->lock
);
2042 fuse_writepage_free(wpa
);
2045 static struct fuse_file
*__fuse_write_file_get(struct fuse_inode
*fi
)
2047 struct fuse_file
*ff
;
2049 spin_lock(&fi
->lock
);
2050 ff
= list_first_entry_or_null(&fi
->write_files
, struct fuse_file
,
2054 spin_unlock(&fi
->lock
);
2059 static struct fuse_file
*fuse_write_file_get(struct fuse_inode
*fi
)
2061 struct fuse_file
*ff
= __fuse_write_file_get(fi
);
2066 int fuse_write_inode(struct inode
*inode
, struct writeback_control
*wbc
)
2068 struct fuse_inode
*fi
= get_fuse_inode(inode
);
2069 struct fuse_file
*ff
;
2073 * Inode is always written before the last reference is dropped and
2074 * hence this should not be reached from reclaim.
2076 * Writing back the inode from reclaim can deadlock if the request
2077 * processing itself needs an allocation. Allocations triggering
2078 * reclaim while serving a request can't be prevented, because it can
2079 * involve any number of unrelated userspace processes.
2081 WARN_ON(wbc
->for_reclaim
);
2083 ff
= __fuse_write_file_get(fi
);
2084 err
= fuse_flush_times(inode
, ff
);
2086 fuse_file_put(ff
, false);
2091 static struct fuse_writepage_args
*fuse_writepage_args_alloc(void)
2093 struct fuse_writepage_args
*wpa
;
2094 struct fuse_args_pages
*ap
;
2096 wpa
= kzalloc(sizeof(*wpa
), GFP_NOFS
);
2100 ap
->folios
= fuse_folios_alloc(1, GFP_NOFS
, &ap
->descs
);
2110 static void fuse_writepage_add_to_bucket(struct fuse_conn
*fc
,
2111 struct fuse_writepage_args
*wpa
)
2117 /* Prevent resurrection of dead bucket in unlikely race with syncfs */
2119 wpa
->bucket
= rcu_dereference(fc
->curr_bucket
);
2120 } while (unlikely(!atomic_inc_not_zero(&wpa
->bucket
->count
)));
2124 static void fuse_writepage_args_page_fill(struct fuse_writepage_args
*wpa
, struct folio
*folio
,
2125 struct folio
*tmp_folio
, uint32_t folio_index
)
2127 struct inode
*inode
= folio
->mapping
->host
;
2128 struct fuse_args_pages
*ap
= &wpa
->ia
.ap
;
2130 folio_copy(tmp_folio
, folio
);
2132 ap
->folios
[folio_index
] = tmp_folio
;
2133 ap
->descs
[folio_index
].offset
= 0;
2134 ap
->descs
[folio_index
].length
= PAGE_SIZE
;
2136 inc_wb_stat(&inode_to_bdi(inode
)->wb
, WB_WRITEBACK
);
2137 node_stat_add_folio(tmp_folio
, NR_WRITEBACK_TEMP
);
2140 static struct fuse_writepage_args
*fuse_writepage_args_setup(struct folio
*folio
,
2141 struct fuse_file
*ff
)
2143 struct inode
*inode
= folio
->mapping
->host
;
2144 struct fuse_conn
*fc
= get_fuse_conn(inode
);
2145 struct fuse_writepage_args
*wpa
;
2146 struct fuse_args_pages
*ap
;
2148 wpa
= fuse_writepage_args_alloc();
2152 fuse_writepage_add_to_bucket(fc
, wpa
);
2153 fuse_write_args_fill(&wpa
->ia
, ff
, folio_pos(folio
), 0);
2154 wpa
->ia
.write
.in
.write_flags
|= FUSE_WRITE_CACHE
;
2159 ap
->args
.in_pages
= true;
2160 ap
->args
.end
= fuse_writepage_end
;
2165 static int fuse_writepage_locked(struct folio
*folio
)
2167 struct address_space
*mapping
= folio
->mapping
;
2168 struct inode
*inode
= mapping
->host
;
2169 struct fuse_inode
*fi
= get_fuse_inode(inode
);
2170 struct fuse_writepage_args
*wpa
;
2171 struct fuse_args_pages
*ap
;
2172 struct folio
*tmp_folio
;
2173 struct fuse_file
*ff
;
2174 int error
= -ENOMEM
;
2176 tmp_folio
= folio_alloc(GFP_NOFS
| __GFP_HIGHMEM
, 0);
2181 ff
= fuse_write_file_get(fi
);
2185 wpa
= fuse_writepage_args_setup(folio
, ff
);
2188 goto err_writepage_args
;
2193 folio_start_writeback(folio
);
2194 fuse_writepage_args_page_fill(wpa
, folio
, tmp_folio
, 0);
2196 spin_lock(&fi
->lock
);
2197 tree_insert(&fi
->writepages
, wpa
);
2198 list_add_tail(&wpa
->queue_entry
, &fi
->queued_writes
);
2199 fuse_flush_writepages(inode
);
2200 spin_unlock(&fi
->lock
);
2202 folio_end_writeback(folio
);
2207 fuse_file_put(ff
, false);
2209 folio_put(tmp_folio
);
2211 mapping_set_error(folio
->mapping
, error
);
2215 struct fuse_fill_wb_data
{
2216 struct fuse_writepage_args
*wpa
;
2217 struct fuse_file
*ff
;
2218 struct inode
*inode
;
2219 struct folio
**orig_folios
;
2220 unsigned int max_folios
;
2223 static bool fuse_pages_realloc(struct fuse_fill_wb_data
*data
)
2225 struct fuse_args_pages
*ap
= &data
->wpa
->ia
.ap
;
2226 struct fuse_conn
*fc
= get_fuse_conn(data
->inode
);
2227 struct folio
**folios
;
2228 struct fuse_folio_desc
*descs
;
2229 unsigned int nfolios
= min_t(unsigned int,
2230 max_t(unsigned int, data
->max_folios
* 2,
2231 FUSE_DEFAULT_MAX_PAGES_PER_REQ
),
2233 WARN_ON(nfolios
<= data
->max_folios
);
2235 folios
= fuse_folios_alloc(nfolios
, GFP_NOFS
, &descs
);
2239 memcpy(folios
, ap
->folios
, sizeof(struct folio
*) * ap
->num_folios
);
2240 memcpy(descs
, ap
->descs
, sizeof(struct fuse_folio_desc
) * ap
->num_folios
);
2242 ap
->folios
= folios
;
2244 data
->max_folios
= nfolios
;
2249 static void fuse_writepages_send(struct fuse_fill_wb_data
*data
)
2251 struct fuse_writepage_args
*wpa
= data
->wpa
;
2252 struct inode
*inode
= data
->inode
;
2253 struct fuse_inode
*fi
= get_fuse_inode(inode
);
2254 int num_folios
= wpa
->ia
.ap
.num_folios
;
2257 spin_lock(&fi
->lock
);
2258 list_add_tail(&wpa
->queue_entry
, &fi
->queued_writes
);
2259 fuse_flush_writepages(inode
);
2260 spin_unlock(&fi
->lock
);
2262 for (i
= 0; i
< num_folios
; i
++)
2263 folio_end_writeback(data
->orig_folios
[i
]);
2267 * Check under fi->lock if the page is under writeback, and insert it onto the
2268 * rb_tree if not. Otherwise iterate auxiliary write requests, to see if there's
2269 * one already added for a page at this offset. If there's none, then insert
2270 * this new request onto the auxiliary list, otherwise reuse the existing one by
2271 * swapping the new temp page with the old one.
2273 static bool fuse_writepage_add(struct fuse_writepage_args
*new_wpa
,
2274 struct folio
*folio
)
2276 struct fuse_inode
*fi
= get_fuse_inode(new_wpa
->inode
);
2277 struct fuse_writepage_args
*tmp
;
2278 struct fuse_writepage_args
*old_wpa
;
2279 struct fuse_args_pages
*new_ap
= &new_wpa
->ia
.ap
;
2281 WARN_ON(new_ap
->num_folios
!= 0);
2282 new_ap
->num_folios
= 1;
2284 spin_lock(&fi
->lock
);
2285 old_wpa
= fuse_insert_writeback(&fi
->writepages
, new_wpa
);
2287 spin_unlock(&fi
->lock
);
2291 for (tmp
= old_wpa
->next
; tmp
; tmp
= tmp
->next
) {
2294 WARN_ON(tmp
->inode
!= new_wpa
->inode
);
2295 curr_index
= tmp
->ia
.write
.in
.offset
>> PAGE_SHIFT
;
2296 if (curr_index
== folio
->index
) {
2297 WARN_ON(tmp
->ia
.ap
.num_folios
!= 1);
2298 swap(tmp
->ia
.ap
.folios
[0], new_ap
->folios
[0]);
2304 new_wpa
->next
= old_wpa
->next
;
2305 old_wpa
->next
= new_wpa
;
2308 spin_unlock(&fi
->lock
);
2311 fuse_writepage_finish_stat(new_wpa
->inode
,
2313 fuse_writepage_free(new_wpa
);
2319 static bool fuse_writepage_need_send(struct fuse_conn
*fc
, struct folio
*folio
,
2320 struct fuse_args_pages
*ap
,
2321 struct fuse_fill_wb_data
*data
)
2323 WARN_ON(!ap
->num_folios
);
2326 * Being under writeback is unlikely but possible. For example direct
2327 * read to an mmaped fuse file will set the page dirty twice; once when
2328 * the pages are faulted with get_user_pages(), and then after the read
2331 if (fuse_folio_is_writeback(data
->inode
, folio
))
2334 /* Reached max pages */
2335 if (ap
->num_folios
== fc
->max_pages
)
2338 /* Reached max write bytes */
2339 if ((ap
->num_folios
+ 1) * PAGE_SIZE
> fc
->max_write
)
2343 if (data
->orig_folios
[ap
->num_folios
- 1]->index
+ 1 != folio_index(folio
))
2346 /* Need to grow the pages array? If so, did the expansion fail? */
2347 if (ap
->num_folios
== data
->max_folios
&& !fuse_pages_realloc(data
))
2353 static int fuse_writepages_fill(struct folio
*folio
,
2354 struct writeback_control
*wbc
, void *_data
)
2356 struct fuse_fill_wb_data
*data
= _data
;
2357 struct fuse_writepage_args
*wpa
= data
->wpa
;
2358 struct fuse_args_pages
*ap
= &wpa
->ia
.ap
;
2359 struct inode
*inode
= data
->inode
;
2360 struct fuse_inode
*fi
= get_fuse_inode(inode
);
2361 struct fuse_conn
*fc
= get_fuse_conn(inode
);
2362 struct folio
*tmp_folio
;
2367 data
->ff
= fuse_write_file_get(fi
);
2372 if (wpa
&& fuse_writepage_need_send(fc
, folio
, ap
, data
)) {
2373 fuse_writepages_send(data
);
2378 tmp_folio
= folio_alloc(GFP_NOFS
| __GFP_HIGHMEM
, 0);
2383 * The page must not be redirtied until the writeout is completed
2384 * (i.e. userspace has sent a reply to the write request). Otherwise
2385 * there could be more than one temporary page instance for each real
2388 * This is ensured by holding the page lock in page_mkwrite() while
2389 * checking fuse_page_is_writeback(). We already hold the page lock
2390 * since clear_page_dirty_for_io() and keep it held until we add the
2391 * request to the fi->writepages list and increment ap->num_folios.
2392 * After this fuse_page_is_writeback() will indicate that the page is
2393 * under writeback, so we can release the page lock.
2395 if (data
->wpa
== NULL
) {
2397 wpa
= fuse_writepage_args_setup(folio
, data
->ff
);
2399 folio_put(tmp_folio
);
2402 fuse_file_get(wpa
->ia
.ff
);
2403 data
->max_folios
= 1;
2406 folio_start_writeback(folio
);
2408 fuse_writepage_args_page_fill(wpa
, folio
, tmp_folio
, ap
->num_folios
);
2409 data
->orig_folios
[ap
->num_folios
] = folio
;
2414 * Protected by fi->lock against concurrent access by
2415 * fuse_page_is_writeback().
2417 spin_lock(&fi
->lock
);
2419 spin_unlock(&fi
->lock
);
2420 } else if (fuse_writepage_add(wpa
, folio
)) {
2423 folio_end_writeback(folio
);
2426 folio_unlock(folio
);
2431 static int fuse_writepages(struct address_space
*mapping
,
2432 struct writeback_control
*wbc
)
2434 struct inode
*inode
= mapping
->host
;
2435 struct fuse_conn
*fc
= get_fuse_conn(inode
);
2436 struct fuse_fill_wb_data data
;
2440 if (fuse_is_bad(inode
))
2443 if (wbc
->sync_mode
== WB_SYNC_NONE
&&
2444 fc
->num_background
>= fc
->congestion_threshold
)
2452 data
.orig_folios
= kcalloc(fc
->max_pages
,
2453 sizeof(struct folio
*),
2455 if (!data
.orig_folios
)
2458 err
= write_cache_pages(mapping
, wbc
, fuse_writepages_fill
, &data
);
2460 WARN_ON(!data
.wpa
->ia
.ap
.num_folios
);
2461 fuse_writepages_send(&data
);
2464 fuse_file_put(data
.ff
, false);
2466 kfree(data
.orig_folios
);
2472 * It's worthy to make sure that space is reserved on disk for the write,
2473 * but how to implement it without killing performance need more thinking.
2475 static int fuse_write_begin(struct file
*file
, struct address_space
*mapping
,
2476 loff_t pos
, unsigned len
, struct folio
**foliop
, void **fsdata
)
2478 pgoff_t index
= pos
>> PAGE_SHIFT
;
2479 struct fuse_conn
*fc
= get_fuse_conn(file_inode(file
));
2480 struct folio
*folio
;
2484 WARN_ON(!fc
->writeback_cache
);
2486 folio
= __filemap_get_folio(mapping
, index
, FGP_WRITEBEGIN
,
2487 mapping_gfp_mask(mapping
));
2491 fuse_wait_on_page_writeback(mapping
->host
, folio
->index
);
2493 if (folio_test_uptodate(folio
) || len
>= folio_size(folio
))
2496 * Check if the start of this folio comes after the end of file,
2497 * in which case the readpage can be optimized away.
2499 fsize
= i_size_read(mapping
->host
);
2500 if (fsize
<= folio_pos(folio
)) {
2501 size_t off
= offset_in_folio(folio
, pos
);
2503 folio_zero_segment(folio
, 0, off
);
2506 err
= fuse_do_readfolio(file
, folio
);
2514 folio_unlock(folio
);
2520 static int fuse_write_end(struct file
*file
, struct address_space
*mapping
,
2521 loff_t pos
, unsigned len
, unsigned copied
,
2522 struct folio
*folio
, void *fsdata
)
2524 struct inode
*inode
= folio
->mapping
->host
;
2526 /* Haven't copied anything? Skip zeroing, size extending, dirtying. */
2531 if (!folio_test_uptodate(folio
)) {
2532 /* Zero any unwritten bytes at the end of the page */
2533 size_t endoff
= pos
& ~PAGE_MASK
;
2535 folio_zero_segment(folio
, endoff
, PAGE_SIZE
);
2536 folio_mark_uptodate(folio
);
2539 if (pos
> inode
->i_size
)
2540 i_size_write(inode
, pos
);
2542 folio_mark_dirty(folio
);
2545 folio_unlock(folio
);
2551 static int fuse_launder_folio(struct folio
*folio
)
2554 if (folio_clear_dirty_for_io(folio
)) {
2555 struct inode
*inode
= folio
->mapping
->host
;
2557 /* Serialize with pending writeback for the same page */
2558 fuse_wait_on_page_writeback(inode
, folio
->index
);
2559 err
= fuse_writepage_locked(folio
);
2561 fuse_wait_on_page_writeback(inode
, folio
->index
);
2567 * Write back dirty data/metadata now (there may not be any suitable
2568 * open files later for data)
2570 static void fuse_vma_close(struct vm_area_struct
*vma
)
2574 err
= write_inode_now(vma
->vm_file
->f_mapping
->host
, 1);
2575 mapping_set_error(vma
->vm_file
->f_mapping
, err
);
2579 * Wait for writeback against this page to complete before allowing it
2580 * to be marked dirty again, and hence written back again, possibly
2581 * before the previous writepage completed.
2583 * Block here, instead of in ->writepage(), so that the userspace fs
2584 * can only block processes actually operating on the filesystem.
2586 * Otherwise unprivileged userspace fs would be able to block
2591 * - try_to_free_pages() with order > PAGE_ALLOC_COSTLY_ORDER
2593 static vm_fault_t
fuse_page_mkwrite(struct vm_fault
*vmf
)
2595 struct folio
*folio
= page_folio(vmf
->page
);
2596 struct inode
*inode
= file_inode(vmf
->vma
->vm_file
);
2598 file_update_time(vmf
->vma
->vm_file
);
2600 if (folio
->mapping
!= inode
->i_mapping
) {
2601 folio_unlock(folio
);
2602 return VM_FAULT_NOPAGE
;
2605 fuse_wait_on_folio_writeback(inode
, folio
);
2606 return VM_FAULT_LOCKED
;
2609 static const struct vm_operations_struct fuse_file_vm_ops
= {
2610 .close
= fuse_vma_close
,
2611 .fault
= filemap_fault
,
2612 .map_pages
= filemap_map_pages
,
2613 .page_mkwrite
= fuse_page_mkwrite
,
2616 static int fuse_file_mmap(struct file
*file
, struct vm_area_struct
*vma
)
2618 struct fuse_file
*ff
= file
->private_data
;
2619 struct fuse_conn
*fc
= ff
->fm
->fc
;
2620 struct inode
*inode
= file_inode(file
);
2623 /* DAX mmap is superior to direct_io mmap */
2624 if (FUSE_IS_DAX(inode
))
2625 return fuse_dax_mmap(file
, vma
);
2628 * If inode is in passthrough io mode, because it has some file open
2629 * in passthrough mode, either mmap to backing file or fail mmap,
2630 * because mixing cached mmap and passthrough io mode is not allowed.
2632 if (fuse_file_passthrough(ff
))
2633 return fuse_passthrough_mmap(file
, vma
);
2634 else if (fuse_inode_backing(get_fuse_inode(inode
)))
2638 * FOPEN_DIRECT_IO handling is special compared to O_DIRECT,
2639 * as does not allow MAP_SHARED mmap without FUSE_DIRECT_IO_ALLOW_MMAP.
2641 if (ff
->open_flags
& FOPEN_DIRECT_IO
) {
2643 * Can't provide the coherency needed for MAP_SHARED
2644 * if FUSE_DIRECT_IO_ALLOW_MMAP isn't set.
2646 if ((vma
->vm_flags
& VM_MAYSHARE
) && !fc
->direct_io_allow_mmap
)
2649 invalidate_inode_pages2(file
->f_mapping
);
2651 if (!(vma
->vm_flags
& VM_MAYSHARE
)) {
2653 return generic_file_mmap(file
, vma
);
2657 * First mmap of direct_io file enters caching inode io mode.
2658 * Also waits for parallel dio writers to go into serial mode
2659 * (exclusive instead of shared lock).
2660 * After first mmap, the inode stays in caching io mode until
2661 * the direct_io file release.
2663 rc
= fuse_file_cached_io_open(inode
, ff
);
2668 if ((vma
->vm_flags
& VM_SHARED
) && (vma
->vm_flags
& VM_MAYWRITE
))
2669 fuse_link_write_file(file
);
2671 file_accessed(file
);
2672 vma
->vm_ops
= &fuse_file_vm_ops
;
2676 static int convert_fuse_file_lock(struct fuse_conn
*fc
,
2677 const struct fuse_file_lock
*ffl
,
2678 struct file_lock
*fl
)
2680 switch (ffl
->type
) {
2686 if (ffl
->start
> OFFSET_MAX
|| ffl
->end
> OFFSET_MAX
||
2687 ffl
->end
< ffl
->start
)
2690 fl
->fl_start
= ffl
->start
;
2691 fl
->fl_end
= ffl
->end
;
2694 * Convert pid into init's pid namespace. The locks API will
2695 * translate it into the caller's pid namespace.
2698 fl
->c
.flc_pid
= pid_nr_ns(find_pid_ns(ffl
->pid
, fc
->pid_ns
), &init_pid_ns
);
2705 fl
->c
.flc_type
= ffl
->type
;
2709 static void fuse_lk_fill(struct fuse_args
*args
, struct file
*file
,
2710 const struct file_lock
*fl
, int opcode
, pid_t pid
,
2711 int flock
, struct fuse_lk_in
*inarg
)
2713 struct inode
*inode
= file_inode(file
);
2714 struct fuse_conn
*fc
= get_fuse_conn(inode
);
2715 struct fuse_file
*ff
= file
->private_data
;
2717 memset(inarg
, 0, sizeof(*inarg
));
2719 inarg
->owner
= fuse_lock_owner_id(fc
, fl
->c
.flc_owner
);
2720 inarg
->lk
.start
= fl
->fl_start
;
2721 inarg
->lk
.end
= fl
->fl_end
;
2722 inarg
->lk
.type
= fl
->c
.flc_type
;
2723 inarg
->lk
.pid
= pid
;
2725 inarg
->lk_flags
|= FUSE_LK_FLOCK
;
2726 args
->opcode
= opcode
;
2727 args
->nodeid
= get_node_id(inode
);
2728 args
->in_numargs
= 1;
2729 args
->in_args
[0].size
= sizeof(*inarg
);
2730 args
->in_args
[0].value
= inarg
;
2733 static int fuse_getlk(struct file
*file
, struct file_lock
*fl
)
2735 struct inode
*inode
= file_inode(file
);
2736 struct fuse_mount
*fm
= get_fuse_mount(inode
);
2738 struct fuse_lk_in inarg
;
2739 struct fuse_lk_out outarg
;
2742 fuse_lk_fill(&args
, file
, fl
, FUSE_GETLK
, 0, 0, &inarg
);
2743 args
.out_numargs
= 1;
2744 args
.out_args
[0].size
= sizeof(outarg
);
2745 args
.out_args
[0].value
= &outarg
;
2746 err
= fuse_simple_request(fm
, &args
);
2748 err
= convert_fuse_file_lock(fm
->fc
, &outarg
.lk
, fl
);
2753 static int fuse_setlk(struct file
*file
, struct file_lock
*fl
, int flock
)
2755 struct inode
*inode
= file_inode(file
);
2756 struct fuse_mount
*fm
= get_fuse_mount(inode
);
2758 struct fuse_lk_in inarg
;
2759 int opcode
= (fl
->c
.flc_flags
& FL_SLEEP
) ? FUSE_SETLKW
: FUSE_SETLK
;
2760 struct pid
*pid
= fl
->c
.flc_type
!= F_UNLCK
? task_tgid(current
) : NULL
;
2761 pid_t pid_nr
= pid_nr_ns(pid
, fm
->fc
->pid_ns
);
2764 if (fl
->fl_lmops
&& fl
->fl_lmops
->lm_grant
) {
2765 /* NLM needs asynchronous locks, which we don't support yet */
2769 fuse_lk_fill(&args
, file
, fl
, opcode
, pid_nr
, flock
, &inarg
);
2770 err
= fuse_simple_request(fm
, &args
);
2772 /* locking is restartable */
2779 static int fuse_file_lock(struct file
*file
, int cmd
, struct file_lock
*fl
)
2781 struct inode
*inode
= file_inode(file
);
2782 struct fuse_conn
*fc
= get_fuse_conn(inode
);
2785 if (cmd
== F_CANCELLK
) {
2787 } else if (cmd
== F_GETLK
) {
2789 posix_test_lock(file
, fl
);
2792 err
= fuse_getlk(file
, fl
);
2795 err
= posix_lock_file(file
, fl
, NULL
);
2797 err
= fuse_setlk(file
, fl
, 0);
2802 static int fuse_file_flock(struct file
*file
, int cmd
, struct file_lock
*fl
)
2804 struct inode
*inode
= file_inode(file
);
2805 struct fuse_conn
*fc
= get_fuse_conn(inode
);
2809 err
= locks_lock_file_wait(file
, fl
);
2811 struct fuse_file
*ff
= file
->private_data
;
2813 /* emulate flock with POSIX locks */
2815 err
= fuse_setlk(file
, fl
, 1);
2821 static sector_t
fuse_bmap(struct address_space
*mapping
, sector_t block
)
2823 struct inode
*inode
= mapping
->host
;
2824 struct fuse_mount
*fm
= get_fuse_mount(inode
);
2826 struct fuse_bmap_in inarg
;
2827 struct fuse_bmap_out outarg
;
2830 if (!inode
->i_sb
->s_bdev
|| fm
->fc
->no_bmap
)
2833 memset(&inarg
, 0, sizeof(inarg
));
2834 inarg
.block
= block
;
2835 inarg
.blocksize
= inode
->i_sb
->s_blocksize
;
2836 args
.opcode
= FUSE_BMAP
;
2837 args
.nodeid
= get_node_id(inode
);
2838 args
.in_numargs
= 1;
2839 args
.in_args
[0].size
= sizeof(inarg
);
2840 args
.in_args
[0].value
= &inarg
;
2841 args
.out_numargs
= 1;
2842 args
.out_args
[0].size
= sizeof(outarg
);
2843 args
.out_args
[0].value
= &outarg
;
2844 err
= fuse_simple_request(fm
, &args
);
2846 fm
->fc
->no_bmap
= 1;
2848 return err
? 0 : outarg
.block
;
2851 static loff_t
fuse_lseek(struct file
*file
, loff_t offset
, int whence
)
2853 struct inode
*inode
= file
->f_mapping
->host
;
2854 struct fuse_mount
*fm
= get_fuse_mount(inode
);
2855 struct fuse_file
*ff
= file
->private_data
;
2857 struct fuse_lseek_in inarg
= {
2862 struct fuse_lseek_out outarg
;
2865 if (fm
->fc
->no_lseek
)
2868 args
.opcode
= FUSE_LSEEK
;
2869 args
.nodeid
= ff
->nodeid
;
2870 args
.in_numargs
= 1;
2871 args
.in_args
[0].size
= sizeof(inarg
);
2872 args
.in_args
[0].value
= &inarg
;
2873 args
.out_numargs
= 1;
2874 args
.out_args
[0].size
= sizeof(outarg
);
2875 args
.out_args
[0].value
= &outarg
;
2876 err
= fuse_simple_request(fm
, &args
);
2878 if (err
== -ENOSYS
) {
2879 fm
->fc
->no_lseek
= 1;
2885 return vfs_setpos(file
, outarg
.offset
, inode
->i_sb
->s_maxbytes
);
2888 err
= fuse_update_attributes(inode
, file
, STATX_SIZE
);
2890 return generic_file_llseek(file
, offset
, whence
);
2895 static loff_t
fuse_file_llseek(struct file
*file
, loff_t offset
, int whence
)
2898 struct inode
*inode
= file_inode(file
);
2903 /* No i_mutex protection necessary for SEEK_CUR and SEEK_SET */
2904 retval
= generic_file_llseek(file
, offset
, whence
);
2908 retval
= fuse_update_attributes(inode
, file
, STATX_SIZE
);
2910 retval
= generic_file_llseek(file
, offset
, whence
);
2911 inode_unlock(inode
);
2916 retval
= fuse_lseek(file
, offset
, whence
);
2917 inode_unlock(inode
);
2927 * All files which have been polled are linked to RB tree
2928 * fuse_conn->polled_files which is indexed by kh. Walk the tree and
2929 * find the matching one.
2931 static struct rb_node
**fuse_find_polled_node(struct fuse_conn
*fc
, u64 kh
,
2932 struct rb_node
**parent_out
)
2934 struct rb_node
**link
= &fc
->polled_files
.rb_node
;
2935 struct rb_node
*last
= NULL
;
2938 struct fuse_file
*ff
;
2941 ff
= rb_entry(last
, struct fuse_file
, polled_node
);
2944 link
= &last
->rb_left
;
2945 else if (kh
> ff
->kh
)
2946 link
= &last
->rb_right
;
2957 * The file is about to be polled. Make sure it's on the polled_files
2958 * RB tree. Note that files once added to the polled_files tree are
2959 * not removed before the file is released. This is because a file
2960 * polled once is likely to be polled again.
2962 static void fuse_register_polled_file(struct fuse_conn
*fc
,
2963 struct fuse_file
*ff
)
2965 spin_lock(&fc
->lock
);
2966 if (RB_EMPTY_NODE(&ff
->polled_node
)) {
2967 struct rb_node
**link
, *parent
;
2969 link
= fuse_find_polled_node(fc
, ff
->kh
, &parent
);
2971 rb_link_node(&ff
->polled_node
, parent
, link
);
2972 rb_insert_color(&ff
->polled_node
, &fc
->polled_files
);
2974 spin_unlock(&fc
->lock
);
2977 __poll_t
fuse_file_poll(struct file
*file
, poll_table
*wait
)
2979 struct fuse_file
*ff
= file
->private_data
;
2980 struct fuse_mount
*fm
= ff
->fm
;
2981 struct fuse_poll_in inarg
= { .fh
= ff
->fh
, .kh
= ff
->kh
};
2982 struct fuse_poll_out outarg
;
2986 if (fm
->fc
->no_poll
)
2987 return DEFAULT_POLLMASK
;
2989 poll_wait(file
, &ff
->poll_wait
, wait
);
2990 inarg
.events
= mangle_poll(poll_requested_events(wait
));
2993 * Ask for notification iff there's someone waiting for it.
2994 * The client may ignore the flag and always notify.
2996 if (waitqueue_active(&ff
->poll_wait
)) {
2997 inarg
.flags
|= FUSE_POLL_SCHEDULE_NOTIFY
;
2998 fuse_register_polled_file(fm
->fc
, ff
);
3001 args
.opcode
= FUSE_POLL
;
3002 args
.nodeid
= ff
->nodeid
;
3003 args
.in_numargs
= 1;
3004 args
.in_args
[0].size
= sizeof(inarg
);
3005 args
.in_args
[0].value
= &inarg
;
3006 args
.out_numargs
= 1;
3007 args
.out_args
[0].size
= sizeof(outarg
);
3008 args
.out_args
[0].value
= &outarg
;
3009 err
= fuse_simple_request(fm
, &args
);
3012 return demangle_poll(outarg
.revents
);
3013 if (err
== -ENOSYS
) {
3014 fm
->fc
->no_poll
= 1;
3015 return DEFAULT_POLLMASK
;
3019 EXPORT_SYMBOL_GPL(fuse_file_poll
);
3022 * This is called from fuse_handle_notify() on FUSE_NOTIFY_POLL and
3023 * wakes up the poll waiters.
3025 int fuse_notify_poll_wakeup(struct fuse_conn
*fc
,
3026 struct fuse_notify_poll_wakeup_out
*outarg
)
3028 u64 kh
= outarg
->kh
;
3029 struct rb_node
**link
;
3031 spin_lock(&fc
->lock
);
3033 link
= fuse_find_polled_node(fc
, kh
, NULL
);
3035 struct fuse_file
*ff
;
3037 ff
= rb_entry(*link
, struct fuse_file
, polled_node
);
3038 wake_up_interruptible_sync(&ff
->poll_wait
);
3041 spin_unlock(&fc
->lock
);
3045 static void fuse_do_truncate(struct file
*file
)
3047 struct inode
*inode
= file
->f_mapping
->host
;
3050 attr
.ia_valid
= ATTR_SIZE
;
3051 attr
.ia_size
= i_size_read(inode
);
3053 attr
.ia_file
= file
;
3054 attr
.ia_valid
|= ATTR_FILE
;
3056 fuse_do_setattr(file_mnt_idmap(file
), file_dentry(file
), &attr
, file
);
3059 static inline loff_t
fuse_round_up(struct fuse_conn
*fc
, loff_t off
)
3061 return round_up(off
, fc
->max_pages
<< PAGE_SHIFT
);
3065 fuse_direct_IO(struct kiocb
*iocb
, struct iov_iter
*iter
)
3067 DECLARE_COMPLETION_ONSTACK(wait
);
3069 struct file
*file
= iocb
->ki_filp
;
3070 struct fuse_file
*ff
= file
->private_data
;
3072 struct inode
*inode
;
3074 size_t count
= iov_iter_count(iter
), shortened
= 0;
3075 loff_t offset
= iocb
->ki_pos
;
3076 struct fuse_io_priv
*io
;
3079 inode
= file
->f_mapping
->host
;
3080 i_size
= i_size_read(inode
);
3082 if ((iov_iter_rw(iter
) == READ
) && (offset
>= i_size
))
3085 io
= kmalloc(sizeof(struct fuse_io_priv
), GFP_KERNEL
);
3088 spin_lock_init(&io
->lock
);
3089 kref_init(&io
->refcnt
);
3093 io
->offset
= offset
;
3094 io
->write
= (iov_iter_rw(iter
) == WRITE
);
3097 * By default, we want to optimize all I/Os with async request
3098 * submission to the client filesystem if supported.
3100 io
->async
= ff
->fm
->fc
->async_dio
;
3102 io
->blocking
= is_sync_kiocb(iocb
);
3104 /* optimization for short read */
3105 if (io
->async
&& !io
->write
&& offset
+ count
> i_size
) {
3106 iov_iter_truncate(iter
, fuse_round_up(ff
->fm
->fc
, i_size
- offset
));
3107 shortened
= count
- iov_iter_count(iter
);
3112 * We cannot asynchronously extend the size of a file.
3113 * In such case the aio will behave exactly like sync io.
3115 if ((offset
+ count
> i_size
) && io
->write
)
3116 io
->blocking
= true;
3118 if (io
->async
&& io
->blocking
) {
3120 * Additional reference to keep io around after
3121 * calling fuse_aio_complete()
3123 kref_get(&io
->refcnt
);
3127 if (iov_iter_rw(iter
) == WRITE
) {
3128 ret
= fuse_direct_io(io
, iter
, &pos
, FUSE_DIO_WRITE
);
3129 fuse_invalidate_attr_mask(inode
, FUSE_STATX_MODSIZE
);
3131 ret
= __fuse_direct_read(io
, iter
, &pos
);
3133 iov_iter_reexpand(iter
, iov_iter_count(iter
) + shortened
);
3136 bool blocking
= io
->blocking
;
3138 fuse_aio_complete(io
, ret
< 0 ? ret
: 0, -1);
3140 /* we have a non-extending, async request, so return */
3142 return -EIOCBQUEUED
;
3144 wait_for_completion(&wait
);
3145 ret
= fuse_get_res_by_io(io
);
3148 kref_put(&io
->refcnt
, fuse_io_release
);
3150 if (iov_iter_rw(iter
) == WRITE
) {
3151 fuse_write_update_attr(inode
, pos
, ret
);
3152 /* For extending writes we already hold exclusive lock */
3153 if (ret
< 0 && offset
+ count
> i_size
)
3154 fuse_do_truncate(file
);
3160 static int fuse_writeback_range(struct inode
*inode
, loff_t start
, loff_t end
)
3162 int err
= filemap_write_and_wait_range(inode
->i_mapping
, start
, LLONG_MAX
);
3165 fuse_sync_writes(inode
);
3170 static long fuse_file_fallocate(struct file
*file
, int mode
, loff_t offset
,
3173 struct fuse_file
*ff
= file
->private_data
;
3174 struct inode
*inode
= file_inode(file
);
3175 struct fuse_inode
*fi
= get_fuse_inode(inode
);
3176 struct fuse_mount
*fm
= ff
->fm
;
3178 struct fuse_fallocate_in inarg
= {
3185 bool block_faults
= FUSE_IS_DAX(inode
) &&
3186 (!(mode
& FALLOC_FL_KEEP_SIZE
) ||
3187 (mode
& (FALLOC_FL_PUNCH_HOLE
| FALLOC_FL_ZERO_RANGE
)));
3189 if (mode
& ~(FALLOC_FL_KEEP_SIZE
| FALLOC_FL_PUNCH_HOLE
|
3190 FALLOC_FL_ZERO_RANGE
))
3193 if (fm
->fc
->no_fallocate
)
3198 filemap_invalidate_lock(inode
->i_mapping
);
3199 err
= fuse_dax_break_layouts(inode
, 0, 0);
3204 if (mode
& (FALLOC_FL_PUNCH_HOLE
| FALLOC_FL_ZERO_RANGE
)) {
3205 loff_t endbyte
= offset
+ length
- 1;
3207 err
= fuse_writeback_range(inode
, offset
, endbyte
);
3212 if (!(mode
& FALLOC_FL_KEEP_SIZE
) &&
3213 offset
+ length
> i_size_read(inode
)) {
3214 err
= inode_newsize_ok(inode
, offset
+ length
);
3219 err
= file_modified(file
);
3223 if (!(mode
& FALLOC_FL_KEEP_SIZE
))
3224 set_bit(FUSE_I_SIZE_UNSTABLE
, &fi
->state
);
3226 args
.opcode
= FUSE_FALLOCATE
;
3227 args
.nodeid
= ff
->nodeid
;
3228 args
.in_numargs
= 1;
3229 args
.in_args
[0].size
= sizeof(inarg
);
3230 args
.in_args
[0].value
= &inarg
;
3231 err
= fuse_simple_request(fm
, &args
);
3232 if (err
== -ENOSYS
) {
3233 fm
->fc
->no_fallocate
= 1;
3239 /* we could have extended the file */
3240 if (!(mode
& FALLOC_FL_KEEP_SIZE
)) {
3241 if (fuse_write_update_attr(inode
, offset
+ length
, length
))
3242 file_update_time(file
);
3245 if (mode
& (FALLOC_FL_PUNCH_HOLE
| FALLOC_FL_ZERO_RANGE
))
3246 truncate_pagecache_range(inode
, offset
, offset
+ length
- 1);
3248 fuse_invalidate_attr_mask(inode
, FUSE_STATX_MODSIZE
);
3251 if (!(mode
& FALLOC_FL_KEEP_SIZE
))
3252 clear_bit(FUSE_I_SIZE_UNSTABLE
, &fi
->state
);
3255 filemap_invalidate_unlock(inode
->i_mapping
);
3257 inode_unlock(inode
);
3259 fuse_flush_time_update(inode
);
3264 static ssize_t
__fuse_copy_file_range(struct file
*file_in
, loff_t pos_in
,
3265 struct file
*file_out
, loff_t pos_out
,
3266 size_t len
, unsigned int flags
)
3268 struct fuse_file
*ff_in
= file_in
->private_data
;
3269 struct fuse_file
*ff_out
= file_out
->private_data
;
3270 struct inode
*inode_in
= file_inode(file_in
);
3271 struct inode
*inode_out
= file_inode(file_out
);
3272 struct fuse_inode
*fi_out
= get_fuse_inode(inode_out
);
3273 struct fuse_mount
*fm
= ff_in
->fm
;
3274 struct fuse_conn
*fc
= fm
->fc
;
3276 struct fuse_copy_file_range_in inarg
= {
3279 .nodeid_out
= ff_out
->nodeid
,
3280 .fh_out
= ff_out
->fh
,
3285 struct fuse_write_out outarg
;
3287 /* mark unstable when write-back is not used, and file_out gets
3289 bool is_unstable
= (!fc
->writeback_cache
) &&
3290 ((pos_out
+ len
) > inode_out
->i_size
);
3292 if (fc
->no_copy_file_range
)
3295 if (file_inode(file_in
)->i_sb
!= file_inode(file_out
)->i_sb
)
3298 inode_lock(inode_in
);
3299 err
= fuse_writeback_range(inode_in
, pos_in
, pos_in
+ len
- 1);
3300 inode_unlock(inode_in
);
3304 inode_lock(inode_out
);
3306 err
= file_modified(file_out
);
3311 * Write out dirty pages in the destination file before sending the COPY
3312 * request to userspace. After the request is completed, truncate off
3313 * pages (including partial ones) from the cache that have been copied,
3314 * since these contain stale data at that point.
3316 * This should be mostly correct, but if the COPY writes to partial
3317 * pages (at the start or end) and the parts not covered by the COPY are
3318 * written through a memory map after calling fuse_writeback_range(),
3319 * then these partial page modifications will be lost on truncation.
3321 * It is unlikely that someone would rely on such mixed style
3322 * modifications. Yet this does give less guarantees than if the
3323 * copying was performed with write(2).
3325 * To fix this a mapping->invalidate_lock could be used to prevent new
3326 * faults while the copy is ongoing.
3328 err
= fuse_writeback_range(inode_out
, pos_out
, pos_out
+ len
- 1);
3333 set_bit(FUSE_I_SIZE_UNSTABLE
, &fi_out
->state
);
3335 args
.opcode
= FUSE_COPY_FILE_RANGE
;
3336 args
.nodeid
= ff_in
->nodeid
;
3337 args
.in_numargs
= 1;
3338 args
.in_args
[0].size
= sizeof(inarg
);
3339 args
.in_args
[0].value
= &inarg
;
3340 args
.out_numargs
= 1;
3341 args
.out_args
[0].size
= sizeof(outarg
);
3342 args
.out_args
[0].value
= &outarg
;
3343 err
= fuse_simple_request(fm
, &args
);
3344 if (err
== -ENOSYS
) {
3345 fc
->no_copy_file_range
= 1;
3351 truncate_inode_pages_range(inode_out
->i_mapping
,
3352 ALIGN_DOWN(pos_out
, PAGE_SIZE
),
3353 ALIGN(pos_out
+ outarg
.size
, PAGE_SIZE
) - 1);
3355 file_update_time(file_out
);
3356 fuse_write_update_attr(inode_out
, pos_out
+ outarg
.size
, outarg
.size
);
3361 clear_bit(FUSE_I_SIZE_UNSTABLE
, &fi_out
->state
);
3363 inode_unlock(inode_out
);
3364 file_accessed(file_in
);
3366 fuse_flush_time_update(inode_out
);
3371 static ssize_t
fuse_copy_file_range(struct file
*src_file
, loff_t src_off
,
3372 struct file
*dst_file
, loff_t dst_off
,
3373 size_t len
, unsigned int flags
)
3377 ret
= __fuse_copy_file_range(src_file
, src_off
, dst_file
, dst_off
,
3380 if (ret
== -EOPNOTSUPP
|| ret
== -EXDEV
)
3381 ret
= splice_copy_file_range(src_file
, src_off
, dst_file
,
3386 static const struct file_operations fuse_file_operations
= {
3387 .llseek
= fuse_file_llseek
,
3388 .read_iter
= fuse_file_read_iter
,
3389 .write_iter
= fuse_file_write_iter
,
3390 .mmap
= fuse_file_mmap
,
3392 .flush
= fuse_flush
,
3393 .release
= fuse_release
,
3394 .fsync
= fuse_fsync
,
3395 .lock
= fuse_file_lock
,
3396 .get_unmapped_area
= thp_get_unmapped_area
,
3397 .flock
= fuse_file_flock
,
3398 .splice_read
= fuse_splice_read
,
3399 .splice_write
= fuse_splice_write
,
3400 .unlocked_ioctl
= fuse_file_ioctl
,
3401 .compat_ioctl
= fuse_file_compat_ioctl
,
3402 .poll
= fuse_file_poll
,
3403 .fallocate
= fuse_file_fallocate
,
3404 .copy_file_range
= fuse_copy_file_range
,
3407 static const struct address_space_operations fuse_file_aops
= {
3408 .read_folio
= fuse_read_folio
,
3409 .readahead
= fuse_readahead
,
3410 .writepages
= fuse_writepages
,
3411 .launder_folio
= fuse_launder_folio
,
3412 .dirty_folio
= filemap_dirty_folio
,
3413 .migrate_folio
= filemap_migrate_folio
,
3415 .direct_IO
= fuse_direct_IO
,
3416 .write_begin
= fuse_write_begin
,
3417 .write_end
= fuse_write_end
,
3420 void fuse_init_file_inode(struct inode
*inode
, unsigned int flags
)
3422 struct fuse_inode
*fi
= get_fuse_inode(inode
);
3424 inode
->i_fop
= &fuse_file_operations
;
3425 inode
->i_data
.a_ops
= &fuse_file_aops
;
3427 INIT_LIST_HEAD(&fi
->write_files
);
3428 INIT_LIST_HEAD(&fi
->queued_writes
);
3431 init_waitqueue_head(&fi
->page_waitq
);
3432 init_waitqueue_head(&fi
->direct_io_waitq
);
3433 fi
->writepages
= RB_ROOT
;
3435 if (IS_ENABLED(CONFIG_FUSE_DAX
))
3436 fuse_dax_inode_init(inode
, flags
);