1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Network filesystem read subrequest retrying.
4 * Copyright (C) 2024 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
9 #include <linux/slab.h>
12 static void netfs_reissue_read(struct netfs_io_request
*rreq
,
13 struct netfs_io_subrequest
*subreq
)
15 struct iov_iter
*io_iter
= &subreq
->io_iter
;
17 if (iov_iter_is_folioq(io_iter
)) {
18 subreq
->curr_folioq
= (struct folio_queue
*)io_iter
->folioq
;
19 subreq
->curr_folioq_slot
= io_iter
->folioq_slot
;
20 subreq
->curr_folio_order
= subreq
->curr_folioq
->orders
[subreq
->curr_folioq_slot
];
23 atomic_inc(&rreq
->nr_outstanding
);
24 __set_bit(NETFS_SREQ_IN_PROGRESS
, &subreq
->flags
);
25 netfs_get_subrequest(subreq
, netfs_sreq_trace_get_resubmit
);
26 subreq
->rreq
->netfs_ops
->issue_read(subreq
);
30 * Go through the list of failed/short reads, retrying all retryable ones. We
31 * need to switch failed cache reads to network downloads.
33 static void netfs_retry_read_subrequests(struct netfs_io_request
*rreq
)
35 struct netfs_io_subrequest
*subreq
;
36 struct netfs_io_stream
*stream0
= &rreq
->io_streams
[0];
40 _enter("R=%x", rreq
->debug_id
);
42 if (list_empty(&rreq
->subrequests
))
45 if (rreq
->netfs_ops
->retry_request
)
46 rreq
->netfs_ops
->retry_request(rreq
, NULL
);
48 /* If there's no renegotiation to do, just resend each retryable subreq
49 * up to the first permanently failed one.
51 if (!rreq
->netfs_ops
->prepare_read
&&
52 !test_bit(NETFS_RREQ_COPY_TO_CACHE
, &rreq
->flags
)) {
53 struct netfs_io_subrequest
*subreq
;
55 list_for_each_entry(subreq
, &rreq
->subrequests
, rreq_link
) {
56 if (test_bit(NETFS_SREQ_FAILED
, &subreq
->flags
))
58 if (__test_and_clear_bit(NETFS_SREQ_NEED_RETRY
, &subreq
->flags
)) {
59 netfs_reset_iter(subreq
);
60 netfs_reissue_read(rreq
, subreq
);
66 /* Okay, we need to renegotiate all the download requests and flip any
67 * failed cache reads over to being download requests and negotiate
68 * those also. All fully successful subreqs have been removed from the
69 * list and any spare data from those has been donated.
71 * What we do is decant the list and rebuild it one subreq at a time so
72 * that we don't end up with donations jumping over a gap we're busy
73 * populating with smaller subrequests. In the event that the subreq
74 * we just launched finishes before we insert the next subreq, it'll
75 * fill in rreq->prev_donated instead.
77 * Note: Alternatively, we could split the tail subrequest right before
78 * we reissue it and fix up the donations under lock.
80 list_splice_init(&rreq
->subrequests
, &queue
);
83 struct netfs_io_subrequest
*from
;
84 struct iov_iter source
;
85 unsigned long long start
, len
;
86 size_t part
, deferred_next_donated
= 0;
87 bool boundary
= false;
89 /* Go through the subreqs and find the next span of contiguous
90 * buffer that we then rejig (cifs, for example, needs the
91 * rsize renegotiating) and reissue.
93 from
= list_first_entry(&queue
, struct netfs_io_subrequest
, rreq_link
);
94 list_move_tail(&from
->rreq_link
, &sublist
);
95 start
= from
->start
+ from
->transferred
;
96 len
= from
->len
- from
->transferred
;
98 _debug("from R=%08x[%x] s=%llx ctl=%zx/%zx/%zx",
99 rreq
->debug_id
, from
->debug_index
,
100 from
->start
, from
->consumed
, from
->transferred
, from
->len
);
102 if (test_bit(NETFS_SREQ_FAILED
, &from
->flags
) ||
103 !test_bit(NETFS_SREQ_NEED_RETRY
, &from
->flags
))
106 deferred_next_donated
= from
->next_donated
;
107 while ((subreq
= list_first_entry_or_null(
108 &queue
, struct netfs_io_subrequest
, rreq_link
))) {
109 if (subreq
->start
!= start
+ len
||
110 subreq
->transferred
> 0 ||
111 !test_bit(NETFS_SREQ_NEED_RETRY
, &subreq
->flags
))
113 list_move_tail(&subreq
->rreq_link
, &sublist
);
115 deferred_next_donated
= subreq
->next_donated
;
116 if (test_bit(NETFS_SREQ_BOUNDARY
, &subreq
->flags
))
120 _debug(" - range: %llx-%llx %llx", start
, start
+ len
- 1, len
);
122 /* Determine the set of buffers we're going to use. Each
123 * subreq gets a subset of a single overall contiguous buffer.
125 netfs_reset_iter(from
);
126 source
= from
->io_iter
;
129 /* Work through the sublist. */
130 while ((subreq
= list_first_entry_or_null(
131 &sublist
, struct netfs_io_subrequest
, rreq_link
))) {
132 list_del(&subreq
->rreq_link
);
134 subreq
->source
= NETFS_DOWNLOAD_FROM_SERVER
;
135 subreq
->start
= start
- subreq
->transferred
;
136 subreq
->len
= len
+ subreq
->transferred
;
137 stream0
->sreq_max_len
= subreq
->len
;
139 __clear_bit(NETFS_SREQ_NEED_RETRY
, &subreq
->flags
);
140 __set_bit(NETFS_SREQ_RETRYING
, &subreq
->flags
);
142 spin_lock_bh(&rreq
->lock
);
143 list_add_tail(&subreq
->rreq_link
, &rreq
->subrequests
);
144 subreq
->prev_donated
+= rreq
->prev_donated
;
145 rreq
->prev_donated
= 0;
146 trace_netfs_sreq(subreq
, netfs_sreq_trace_retry
);
147 spin_unlock_bh(&rreq
->lock
);
151 /* Renegotiate max_len (rsize) */
152 if (rreq
->netfs_ops
->prepare_read(subreq
) < 0) {
153 trace_netfs_sreq(subreq
, netfs_sreq_trace_reprep_failed
);
154 __set_bit(NETFS_SREQ_FAILED
, &subreq
->flags
);
157 part
= umin(len
, stream0
->sreq_max_len
);
158 if (unlikely(rreq
->io_streams
[0].sreq_max_segs
))
159 part
= netfs_limit_iter(&source
, 0, part
, stream0
->sreq_max_segs
);
160 subreq
->len
= subreq
->transferred
+ part
;
161 subreq
->io_iter
= source
;
162 iov_iter_truncate(&subreq
->io_iter
, part
);
163 iov_iter_advance(&source
, part
);
168 __set_bit(NETFS_SREQ_BOUNDARY
, &subreq
->flags
);
169 subreq
->next_donated
= deferred_next_donated
;
171 __clear_bit(NETFS_SREQ_BOUNDARY
, &subreq
->flags
);
172 subreq
->next_donated
= 0;
175 netfs_reissue_read(rreq
, subreq
);
179 /* If we ran out of subrequests, allocate another. */
180 if (list_empty(&sublist
)) {
181 subreq
= netfs_alloc_subrequest(rreq
);
184 subreq
->source
= NETFS_DOWNLOAD_FROM_SERVER
;
185 subreq
->start
= start
;
187 /* We get two refs, but need just one. */
188 netfs_put_subrequest(subreq
, false, netfs_sreq_trace_new
);
189 trace_netfs_sreq(subreq
, netfs_sreq_trace_split
);
190 list_add_tail(&subreq
->rreq_link
, &sublist
);
194 /* If we managed to use fewer subreqs, we can discard the
197 while ((subreq
= list_first_entry_or_null(
198 &sublist
, struct netfs_io_subrequest
, rreq_link
))) {
199 trace_netfs_sreq(subreq
, netfs_sreq_trace_discard
);
200 list_del(&subreq
->rreq_link
);
201 netfs_put_subrequest(subreq
, false, netfs_sreq_trace_put_done
);
204 } while (!list_empty(&queue
));
208 /* If we hit ENOMEM, fail all remaining subrequests */
210 list_splice_init(&sublist
, &queue
);
211 list_for_each_entry(subreq
, &queue
, rreq_link
) {
213 subreq
->error
= -ENOMEM
;
214 __clear_bit(NETFS_SREQ_FAILED
, &subreq
->flags
);
215 __clear_bit(NETFS_SREQ_NEED_RETRY
, &subreq
->flags
);
216 __clear_bit(NETFS_SREQ_RETRYING
, &subreq
->flags
);
218 spin_lock_bh(&rreq
->lock
);
219 list_splice_tail_init(&queue
, &rreq
->subrequests
);
220 spin_unlock_bh(&rreq
->lock
);
226 void netfs_retry_reads(struct netfs_io_request
*rreq
)
228 trace_netfs_rreq(rreq
, netfs_rreq_trace_resubmit
);
230 atomic_inc(&rreq
->nr_outstanding
);
232 netfs_retry_read_subrequests(rreq
);
234 if (atomic_dec_and_test(&rreq
->nr_outstanding
))
235 netfs_rreq_terminated(rreq
, false);
239 * Unlock any the pages that haven't been unlocked yet due to abandoned
242 void netfs_unlock_abandoned_read_pages(struct netfs_io_request
*rreq
)
244 struct folio_queue
*p
;
246 for (p
= rreq
->buffer
; p
; p
= p
->next
) {
247 for (int slot
= 0; slot
< folioq_count(p
); slot
++) {
248 struct folio
*folio
= folioq_folio(p
, slot
);
250 if (folio
&& !folioq_is_marked2(p
, slot
)) {
251 trace_netfs_folio(folio
, netfs_folio_trace_abandon
);