1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Network filesystem write retrying.
4 * Copyright (C) 2024 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
10 #include <linux/pagemap.h>
11 #include <linux/slab.h>
15 * Perform retries on the streams that need it.
17 static void netfs_retry_write_stream(struct netfs_io_request
*wreq
,
18 struct netfs_io_stream
*stream
)
20 struct list_head
*next
;
22 _enter("R=%x[%x:]", wreq
->debug_id
, stream
->stream_nr
);
24 if (list_empty(&stream
->subrequests
))
27 if (stream
->source
== NETFS_UPLOAD_TO_SERVER
&&
28 wreq
->netfs_ops
->retry_request
)
29 wreq
->netfs_ops
->retry_request(wreq
, stream
);
31 if (unlikely(stream
->failed
))
34 /* If there's no renegotiation to do, just resend each failed subreq. */
35 if (!stream
->prepare_write
) {
36 struct netfs_io_subrequest
*subreq
;
38 list_for_each_entry(subreq
, &stream
->subrequests
, rreq_link
) {
39 if (test_bit(NETFS_SREQ_FAILED
, &subreq
->flags
))
41 if (__test_and_clear_bit(NETFS_SREQ_NEED_RETRY
, &subreq
->flags
)) {
42 struct iov_iter source
= subreq
->io_iter
;
44 iov_iter_revert(&source
, subreq
->len
- source
.count
);
45 netfs_get_subrequest(subreq
, netfs_sreq_trace_get_resubmit
);
46 netfs_reissue_write(stream
, subreq
, &source
);
52 next
= stream
->subrequests
.next
;
55 struct netfs_io_subrequest
*subreq
= NULL
, *from
, *to
, *tmp
;
56 struct iov_iter source
;
57 unsigned long long start
, len
;
59 bool boundary
= false;
61 /* Go through the stream and find the next span of contiguous
62 * data that we then rejig (cifs, for example, needs the wsize
63 * renegotiating) and reissue.
65 from
= list_entry(next
, struct netfs_io_subrequest
, rreq_link
);
67 start
= from
->start
+ from
->transferred
;
68 len
= from
->len
- from
->transferred
;
70 if (test_bit(NETFS_SREQ_FAILED
, &from
->flags
) ||
71 !test_bit(NETFS_SREQ_NEED_RETRY
, &from
->flags
))
74 list_for_each_continue(next
, &stream
->subrequests
) {
75 subreq
= list_entry(next
, struct netfs_io_subrequest
, rreq_link
);
76 if (subreq
->start
+ subreq
->transferred
!= start
+ len
||
77 test_bit(NETFS_SREQ_BOUNDARY
, &subreq
->flags
) ||
78 !test_bit(NETFS_SREQ_NEED_RETRY
, &subreq
->flags
))
84 /* Determine the set of buffers we're going to use. Each
85 * subreq gets a subset of a single overall contiguous buffer.
87 netfs_reset_iter(from
);
88 source
= from
->io_iter
;
91 /* Work through the sublist. */
93 list_for_each_entry_from(subreq
, &stream
->subrequests
, rreq_link
) {
97 subreq
->start
= start
;
99 __clear_bit(NETFS_SREQ_NEED_RETRY
, &subreq
->flags
);
100 subreq
->retry_count
++;
101 trace_netfs_sreq(subreq
, netfs_sreq_trace_retry
);
103 /* Renegotiate max_len (wsize) */
104 stream
->sreq_max_len
= len
;
105 stream
->prepare_write(subreq
);
107 part
= umin(len
, stream
->sreq_max_len
);
108 if (unlikely(stream
->sreq_max_segs
))
109 part
= netfs_limit_iter(&source
, 0, part
, stream
->sreq_max_segs
);
111 subreq
->transferred
= 0;
114 if (len
&& subreq
== to
&&
115 __test_and_clear_bit(NETFS_SREQ_BOUNDARY
, &to
->flags
))
118 netfs_get_subrequest(subreq
, netfs_sreq_trace_get_resubmit
);
119 netfs_reissue_write(stream
, subreq
, &source
);
124 /* If we managed to use fewer subreqs, we can discard the
125 * excess; if we used the same number, then we're done.
130 list_for_each_entry_safe_from(subreq
, tmp
,
131 &stream
->subrequests
, rreq_link
) {
132 trace_netfs_sreq(subreq
, netfs_sreq_trace_discard
);
133 list_del(&subreq
->rreq_link
);
134 netfs_put_subrequest(subreq
, false, netfs_sreq_trace_put_done
);
141 /* We ran out of subrequests, so we need to allocate some more
142 * and insert them after.
145 subreq
= netfs_alloc_subrequest(wreq
);
146 subreq
->source
= to
->source
;
147 subreq
->start
= start
;
148 subreq
->debug_index
= atomic_inc_return(&wreq
->subreq_counter
);
149 subreq
->stream_nr
= to
->stream_nr
;
150 subreq
->retry_count
= 1;
152 trace_netfs_sreq_ref(wreq
->debug_id
, subreq
->debug_index
,
153 refcount_read(&subreq
->ref
),
154 netfs_sreq_trace_new
);
155 netfs_get_subrequest(subreq
, netfs_sreq_trace_get_resubmit
);
157 list_add(&subreq
->rreq_link
, &to
->rreq_link
);
158 to
= list_next_entry(to
, rreq_link
);
159 trace_netfs_sreq(subreq
, netfs_sreq_trace_retry
);
161 stream
->sreq_max_len
= len
;
162 stream
->sreq_max_segs
= INT_MAX
;
163 switch (stream
->source
) {
164 case NETFS_UPLOAD_TO_SERVER
:
165 netfs_stat(&netfs_n_wh_upload
);
166 stream
->sreq_max_len
= umin(len
, wreq
->wsize
);
168 case NETFS_WRITE_TO_CACHE
:
169 netfs_stat(&netfs_n_wh_write
);
175 stream
->prepare_write(subreq
);
177 part
= umin(len
, stream
->sreq_max_len
);
178 subreq
->len
= subreq
->transferred
+ part
;
181 if (!len
&& boundary
) {
182 __set_bit(NETFS_SREQ_BOUNDARY
, &to
->flags
);
186 netfs_reissue_write(stream
, subreq
, &source
);
192 } while (!list_is_head(next
, &stream
->subrequests
));
196 * Perform retries on the streams that need it. If we're doing content
197 * encryption and the server copy changed due to a third-party write, we may
198 * need to do an RMW cycle and also rewrite the data to the cache.
200 void netfs_retry_writes(struct netfs_io_request
*wreq
)
202 struct netfs_io_subrequest
*subreq
;
203 struct netfs_io_stream
*stream
;
206 /* Wait for all outstanding I/O to quiesce before performing retries as
207 * we may need to renegotiate the I/O sizes.
209 for (s
= 0; s
< NR_IO_STREAMS
; s
++) {
210 stream
= &wreq
->io_streams
[s
];
214 list_for_each_entry(subreq
, &stream
->subrequests
, rreq_link
) {
215 wait_on_bit(&subreq
->flags
, NETFS_SREQ_IN_PROGRESS
,
216 TASK_UNINTERRUPTIBLE
);
220 // TODO: Enc: Fetch changed partial pages
221 // TODO: Enc: Reencrypt content if needed.
222 // TODO: Enc: Wind back transferred point.
223 // TODO: Enc: Mark cache pages for retry.
225 for (s
= 0; s
< NR_IO_STREAMS
; s
++) {
226 stream
= &wreq
->io_streams
[s
];
227 if (stream
->need_retry
) {
228 stream
->need_retry
= false;
229 netfs_retry_write_stream(wreq
, stream
);