1 // SPDX-License-Identifier: GPL-2.0-or-later
4 * Copyright (C) 2023 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
8 #include <linux/export.h>
11 #include <linux/pagemap.h>
12 #include <linux/slab.h>
13 #include <linux/uio.h>
14 #include <linux/sched/mm.h>
15 #include <linux/task_io_accounting_ops.h>
16 #include <linux/netfs.h>
19 static void netfs_prepare_dio_read_iterator(struct netfs_io_subrequest
*subreq
)
21 struct netfs_io_request
*rreq
= subreq
->rreq
;
24 rsize
= umin(subreq
->len
, rreq
->io_streams
[0].sreq_max_len
);
27 if (unlikely(rreq
->io_streams
[0].sreq_max_segs
)) {
28 size_t limit
= netfs_limit_iter(&rreq
->iter
, 0, rsize
,
29 rreq
->io_streams
[0].sreq_max_segs
);
33 trace_netfs_sreq(subreq
, netfs_sreq_trace_limited
);
37 trace_netfs_sreq(subreq
, netfs_sreq_trace_prepare
);
39 subreq
->io_iter
= rreq
->iter
;
40 iov_iter_truncate(&subreq
->io_iter
, subreq
->len
);
41 iov_iter_advance(&rreq
->iter
, subreq
->len
);
45 * Perform a read to a buffer from the server, slicing up the region to be read
46 * according to the network rsize.
48 static int netfs_dispatch_unbuffered_reads(struct netfs_io_request
*rreq
)
50 unsigned long long start
= rreq
->start
;
51 ssize_t size
= rreq
->len
;
54 atomic_set(&rreq
->nr_outstanding
, 1);
57 struct netfs_io_subrequest
*subreq
;
60 subreq
= netfs_alloc_subrequest(rreq
);
66 subreq
->source
= NETFS_DOWNLOAD_FROM_SERVER
;
67 subreq
->start
= start
;
70 atomic_inc(&rreq
->nr_outstanding
);
71 spin_lock_bh(&rreq
->lock
);
72 list_add_tail(&subreq
->rreq_link
, &rreq
->subrequests
);
73 subreq
->prev_donated
= rreq
->prev_donated
;
74 rreq
->prev_donated
= 0;
75 trace_netfs_sreq(subreq
, netfs_sreq_trace_added
);
76 spin_unlock_bh(&rreq
->lock
);
78 netfs_stat(&netfs_n_rh_download
);
79 if (rreq
->netfs_ops
->prepare_read
) {
80 ret
= rreq
->netfs_ops
->prepare_read(subreq
);
82 atomic_dec(&rreq
->nr_outstanding
);
83 netfs_put_subrequest(subreq
, false, netfs_sreq_trace_put_cancel
);
88 netfs_prepare_dio_read_iterator(subreq
);
90 rreq
->netfs_ops
->issue_read(subreq
);
94 rreq
->submitted
+= slice
;
96 if (test_bit(NETFS_RREQ_BLOCKED
, &rreq
->flags
) &&
97 test_bit(NETFS_RREQ_NONBLOCK
, &rreq
->flags
))
102 if (atomic_dec_and_test(&rreq
->nr_outstanding
))
103 netfs_rreq_terminated(rreq
, false);
108 * Perform a read to an application buffer, bypassing the pagecache and the
111 static int netfs_unbuffered_read(struct netfs_io_request
*rreq
, bool sync
)
115 _enter("R=%x %llx-%llx",
116 rreq
->debug_id
, rreq
->start
, rreq
->start
+ rreq
->len
- 1);
118 if (rreq
->len
== 0) {
119 pr_err("Zero-sized read [R=%x]\n", rreq
->debug_id
);
123 // TODO: Use bounce buffer if requested
125 inode_dio_begin(rreq
->inode
);
127 ret
= netfs_dispatch_unbuffered_reads(rreq
);
129 if (!rreq
->submitted
) {
130 netfs_put_request(rreq
, false, netfs_rreq_trace_put_no_submit
);
131 inode_dio_end(rreq
->inode
);
137 trace_netfs_rreq(rreq
, netfs_rreq_trace_wait_ip
);
138 wait_on_bit(&rreq
->flags
, NETFS_RREQ_IN_PROGRESS
,
139 TASK_UNINTERRUPTIBLE
);
142 if (ret
== 0 && rreq
->submitted
< rreq
->len
&&
143 rreq
->origin
!= NETFS_DIO_READ
) {
144 trace_netfs_failure(rreq
, NULL
, ret
, netfs_fail_short_read
);
152 _leave(" = %d", ret
);
157 * netfs_unbuffered_read_iter_locked - Perform an unbuffered or direct I/O read
158 * @iocb: The I/O control descriptor describing the read
159 * @iter: The output buffer (also specifies read length)
161 * Perform an unbuffered I/O or direct I/O from the file in @iocb to the
162 * output buffer. No use is made of the pagecache.
164 * The caller must hold any appropriate locks.
166 ssize_t
netfs_unbuffered_read_iter_locked(struct kiocb
*iocb
, struct iov_iter
*iter
)
168 struct netfs_io_request
*rreq
;
170 size_t orig_count
= iov_iter_count(iter
);
171 bool sync
= is_sync_kiocb(iocb
);
176 return 0; /* Don't update atime */
178 ret
= kiocb_write_and_wait(iocb
, orig_count
);
181 file_accessed(iocb
->ki_filp
);
183 rreq
= netfs_alloc_request(iocb
->ki_filp
->f_mapping
, iocb
->ki_filp
,
184 iocb
->ki_pos
, orig_count
,
187 return PTR_ERR(rreq
);
189 netfs_stat(&netfs_n_rh_dio_read
);
190 trace_netfs_read(rreq
, rreq
->start
, rreq
->len
, netfs_read_trace_dio_read
);
192 /* If this is an async op, we have to keep track of the destination
193 * buffer for ourselves as the caller's iterator will be trashed when
196 * In such a case, extract an iterator to represent as much of the the
197 * output buffer as we can manage. Note that the extraction might not
198 * be able to allocate a sufficiently large bvec array and may shorten
201 if (user_backed_iter(iter
)) {
202 ret
= netfs_extract_user_iter(iter
, rreq
->len
, &rreq
->iter
, 0);
205 rreq
->direct_bv
= (struct bio_vec
*)rreq
->iter
.bvec
;
206 rreq
->direct_bv_count
= ret
;
207 rreq
->direct_bv_unpin
= iov_iter_extract_will_pin(iter
);
208 rreq
->len
= iov_iter_count(&rreq
->iter
);
211 rreq
->len
= orig_count
;
212 rreq
->direct_bv_unpin
= false;
213 iov_iter_advance(iter
, orig_count
);
216 // TODO: Set up bounce buffer if needed
221 ret
= netfs_unbuffered_read(rreq
, sync
);
223 goto out
; /* May be -EIOCBQUEUED */
225 // TODO: Copy from bounce buffer
226 iocb
->ki_pos
+= rreq
->transferred
;
227 ret
= rreq
->transferred
;
231 netfs_put_request(rreq
, false, netfs_rreq_trace_put_return
);
236 EXPORT_SYMBOL(netfs_unbuffered_read_iter_locked
);
239 * netfs_unbuffered_read_iter - Perform an unbuffered or direct I/O read
240 * @iocb: The I/O control descriptor describing the read
241 * @iter: The output buffer (also specifies read length)
243 * Perform an unbuffered I/O or direct I/O from the file in @iocb to the
244 * output buffer. No use is made of the pagecache.
246 ssize_t
netfs_unbuffered_read_iter(struct kiocb
*iocb
, struct iov_iter
*iter
)
248 struct inode
*inode
= file_inode(iocb
->ki_filp
);
252 return 0; /* Don't update atime */
254 ret
= netfs_start_io_direct(inode
);
256 ret
= netfs_unbuffered_read_iter_locked(iocb
, iter
);
257 netfs_end_io_direct(inode
);
261 EXPORT_SYMBOL(netfs_unbuffered_read_iter
);