1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Network filesystem write subrequest result collection, assessment
5 * Copyright (C) 2024 Red Hat, Inc. All Rights Reserved.
6 * Written by David Howells (dhowells@redhat.com)
9 #include <linux/export.h>
12 #include <linux/pagemap.h>
13 #include <linux/slab.h>
16 /* Notes made in the collector */
17 #define HIT_PENDING 0x01 /* A front op was still pending */
18 #define NEED_REASSESS 0x02 /* Need to loop round and reassess */
19 #define MADE_PROGRESS 0x04 /* Made progress cleaning up a stream or the folio set */
20 #define BUFFERED 0x08 /* The pagecache needs cleaning up */
21 #define NEED_RETRY 0x10 /* A front op requests retrying */
22 #define SAW_FAILURE 0x20 /* One stream or hit a permanent failure */
25 * Successful completion of write of a folio to the server and/or cache. Note
26 * that we are not allowed to lock the folio here on pain of deadlocking with
29 int netfs_folio_written_back(struct folio
*folio
)
31 enum netfs_folio_trace why
= netfs_folio_trace_clear
;
32 struct netfs_inode
*ictx
= netfs_inode(folio
->mapping
->host
);
33 struct netfs_folio
*finfo
;
34 struct netfs_group
*group
= NULL
;
37 if ((finfo
= netfs_folio_info(folio
))) {
38 /* Streaming writes cannot be redirtied whilst under writeback,
39 * so discard the streaming record.
41 unsigned long long fend
;
43 fend
= folio_pos(folio
) + finfo
->dirty_offset
+ finfo
->dirty_len
;
44 if (fend
> ictx
->zero_point
)
45 ictx
->zero_point
= fend
;
47 folio_detach_private(folio
);
48 group
= finfo
->netfs_group
;
51 why
= netfs_folio_trace_clear_s
;
55 if ((group
= netfs_folio_group(folio
))) {
56 if (group
== NETFS_FOLIO_COPY_TO_CACHE
) {
57 why
= netfs_folio_trace_clear_cc
;
58 folio_detach_private(folio
);
62 /* Need to detach the group pointer if the page didn't get
63 * redirtied. If it has been redirtied, then it must be within
66 why
= netfs_folio_trace_redirtied
;
67 if (!folio_test_dirty(folio
)) {
68 folio_detach_private(folio
);
70 why
= netfs_folio_trace_clear_g
;
75 trace_netfs_folio(folio
, why
);
76 folio_end_writeback(folio
);
81 * Unlock any folios we've finished with.
83 static void netfs_writeback_unlock_folios(struct netfs_io_request
*wreq
,
86 struct folio_queue
*folioq
= wreq
->buffer
;
87 unsigned long long collected_to
= wreq
->collected_to
;
88 unsigned int slot
= wreq
->buffer_head_slot
;
90 if (wreq
->origin
== NETFS_PGPRIV2_COPY_TO_CACHE
) {
91 if (netfs_pgpriv2_unlock_copied_folios(wreq
))
92 *notes
|= MADE_PROGRESS
;
96 if (slot
>= folioq_nr_slots(folioq
)) {
97 folioq
= netfs_delete_buffer_head(wreq
);
103 struct netfs_folio
*finfo
;
104 unsigned long long fpos
, fend
;
107 folio
= folioq_folio(folioq
, slot
);
108 if (WARN_ONCE(!folio_test_writeback(folio
),
109 "R=%08x: folio %lx is not under writeback\n",
110 wreq
->debug_id
, folio
->index
))
111 trace_netfs_folio(folio
, netfs_folio_trace_not_under_wback
);
113 fpos
= folio_pos(folio
);
114 fsize
= folio_size(folio
);
115 finfo
= netfs_folio_info(folio
);
116 flen
= finfo
? finfo
->dirty_offset
+ finfo
->dirty_len
: fsize
;
118 fend
= min_t(unsigned long long, fpos
+ flen
, wreq
->i_size
);
120 trace_netfs_collect_folio(wreq
, folio
, fend
, collected_to
);
122 /* Unlock any folio we've transferred all of. */
123 if (collected_to
< fend
)
126 wreq
->nr_group_rel
+= netfs_folio_written_back(folio
);
127 wreq
->cleaned_to
= fpos
+ fsize
;
128 *notes
|= MADE_PROGRESS
;
130 /* Clean up the head folioq. If we clear an entire folioq, then
131 * we can get rid of it provided it's not also the tail folioq
132 * being filled by the issuer.
134 folioq_clear(folioq
, slot
);
136 if (slot
>= folioq_nr_slots(folioq
)) {
137 if (READ_ONCE(wreq
->buffer_tail
) == folioq
)
139 folioq
= netfs_delete_buffer_head(wreq
);
143 if (fpos
+ fsize
>= collected_to
)
147 wreq
->buffer
= folioq
;
148 wreq
->buffer_head_slot
= slot
;
152 * Perform retries on the streams that need it.
154 static void netfs_retry_write_stream(struct netfs_io_request
*wreq
,
155 struct netfs_io_stream
*stream
)
157 struct list_head
*next
;
159 _enter("R=%x[%x:]", wreq
->debug_id
, stream
->stream_nr
);
161 if (list_empty(&stream
->subrequests
))
164 if (stream
->source
== NETFS_UPLOAD_TO_SERVER
&&
165 wreq
->netfs_ops
->retry_request
)
166 wreq
->netfs_ops
->retry_request(wreq
, stream
);
168 if (unlikely(stream
->failed
))
171 /* If there's no renegotiation to do, just resend each failed subreq. */
172 if (!stream
->prepare_write
) {
173 struct netfs_io_subrequest
*subreq
;
175 list_for_each_entry(subreq
, &stream
->subrequests
, rreq_link
) {
176 if (test_bit(NETFS_SREQ_FAILED
, &subreq
->flags
))
178 if (__test_and_clear_bit(NETFS_SREQ_NEED_RETRY
, &subreq
->flags
)) {
179 struct iov_iter source
= subreq
->io_iter
;
181 iov_iter_revert(&source
, subreq
->len
- source
.count
);
182 __set_bit(NETFS_SREQ_RETRYING
, &subreq
->flags
);
183 netfs_get_subrequest(subreq
, netfs_sreq_trace_get_resubmit
);
184 netfs_reissue_write(stream
, subreq
, &source
);
190 next
= stream
->subrequests
.next
;
193 struct netfs_io_subrequest
*subreq
= NULL
, *from
, *to
, *tmp
;
194 struct iov_iter source
;
195 unsigned long long start
, len
;
197 bool boundary
= false;
199 /* Go through the stream and find the next span of contiguous
200 * data that we then rejig (cifs, for example, needs the wsize
201 * renegotiating) and reissue.
203 from
= list_entry(next
, struct netfs_io_subrequest
, rreq_link
);
205 start
= from
->start
+ from
->transferred
;
206 len
= from
->len
- from
->transferred
;
208 if (test_bit(NETFS_SREQ_FAILED
, &from
->flags
) ||
209 !test_bit(NETFS_SREQ_NEED_RETRY
, &from
->flags
))
212 list_for_each_continue(next
, &stream
->subrequests
) {
213 subreq
= list_entry(next
, struct netfs_io_subrequest
, rreq_link
);
214 if (subreq
->start
+ subreq
->transferred
!= start
+ len
||
215 test_bit(NETFS_SREQ_BOUNDARY
, &subreq
->flags
) ||
216 !test_bit(NETFS_SREQ_NEED_RETRY
, &subreq
->flags
))
222 /* Determine the set of buffers we're going to use. Each
223 * subreq gets a subset of a single overall contiguous buffer.
225 netfs_reset_iter(from
);
226 source
= from
->io_iter
;
229 /* Work through the sublist. */
231 list_for_each_entry_from(subreq
, &stream
->subrequests
, rreq_link
) {
234 /* Renegotiate max_len (wsize) */
235 trace_netfs_sreq(subreq
, netfs_sreq_trace_retry
);
236 __clear_bit(NETFS_SREQ_NEED_RETRY
, &subreq
->flags
);
237 __set_bit(NETFS_SREQ_RETRYING
, &subreq
->flags
);
238 stream
->prepare_write(subreq
);
240 part
= min(len
, stream
->sreq_max_len
);
242 subreq
->start
= start
;
243 subreq
->transferred
= 0;
246 if (len
&& subreq
== to
&&
247 __test_and_clear_bit(NETFS_SREQ_BOUNDARY
, &to
->flags
))
250 netfs_get_subrequest(subreq
, netfs_sreq_trace_get_resubmit
);
251 netfs_reissue_write(stream
, subreq
, &source
);
256 /* If we managed to use fewer subreqs, we can discard the
257 * excess; if we used the same number, then we're done.
262 list_for_each_entry_safe_from(subreq
, tmp
,
263 &stream
->subrequests
, rreq_link
) {
264 trace_netfs_sreq(subreq
, netfs_sreq_trace_discard
);
265 list_del(&subreq
->rreq_link
);
266 netfs_put_subrequest(subreq
, false, netfs_sreq_trace_put_done
);
273 /* We ran out of subrequests, so we need to allocate some more
274 * and insert them after.
277 subreq
= netfs_alloc_subrequest(wreq
);
278 subreq
->source
= to
->source
;
279 subreq
->start
= start
;
280 subreq
->debug_index
= atomic_inc_return(&wreq
->subreq_counter
);
281 subreq
->stream_nr
= to
->stream_nr
;
282 __set_bit(NETFS_SREQ_RETRYING
, &subreq
->flags
);
284 trace_netfs_sreq_ref(wreq
->debug_id
, subreq
->debug_index
,
285 refcount_read(&subreq
->ref
),
286 netfs_sreq_trace_new
);
287 netfs_get_subrequest(subreq
, netfs_sreq_trace_get_resubmit
);
289 list_add(&subreq
->rreq_link
, &to
->rreq_link
);
290 to
= list_next_entry(to
, rreq_link
);
291 trace_netfs_sreq(subreq
, netfs_sreq_trace_retry
);
293 stream
->sreq_max_len
= len
;
294 stream
->sreq_max_segs
= INT_MAX
;
295 switch (stream
->source
) {
296 case NETFS_UPLOAD_TO_SERVER
:
297 netfs_stat(&netfs_n_wh_upload
);
298 stream
->sreq_max_len
= umin(len
, wreq
->wsize
);
300 case NETFS_WRITE_TO_CACHE
:
301 netfs_stat(&netfs_n_wh_write
);
307 stream
->prepare_write(subreq
);
309 part
= umin(len
, stream
->sreq_max_len
);
310 subreq
->len
= subreq
->transferred
+ part
;
313 if (!len
&& boundary
) {
314 __set_bit(NETFS_SREQ_BOUNDARY
, &to
->flags
);
318 netfs_reissue_write(stream
, subreq
, &source
);
324 } while (!list_is_head(next
, &stream
->subrequests
));
328 * Perform retries on the streams that need it. If we're doing content
329 * encryption and the server copy changed due to a third-party write, we may
330 * need to do an RMW cycle and also rewrite the data to the cache.
332 static void netfs_retry_writes(struct netfs_io_request
*wreq
)
334 struct netfs_io_subrequest
*subreq
;
335 struct netfs_io_stream
*stream
;
338 /* Wait for all outstanding I/O to quiesce before performing retries as
339 * we may need to renegotiate the I/O sizes.
341 for (s
= 0; s
< NR_IO_STREAMS
; s
++) {
342 stream
= &wreq
->io_streams
[s
];
346 list_for_each_entry(subreq
, &stream
->subrequests
, rreq_link
) {
347 wait_on_bit(&subreq
->flags
, NETFS_SREQ_IN_PROGRESS
,
348 TASK_UNINTERRUPTIBLE
);
352 // TODO: Enc: Fetch changed partial pages
353 // TODO: Enc: Reencrypt content if needed.
354 // TODO: Enc: Wind back transferred point.
355 // TODO: Enc: Mark cache pages for retry.
357 for (s
= 0; s
< NR_IO_STREAMS
; s
++) {
358 stream
= &wreq
->io_streams
[s
];
359 if (stream
->need_retry
) {
360 stream
->need_retry
= false;
361 netfs_retry_write_stream(wreq
, stream
);
367 * Collect and assess the results of various write subrequests. We may need to
368 * retry some of the results - or even do an RMW cycle for content crypto.
370 * Note that we have a number of parallel, overlapping lists of subrequests,
371 * one to the server and one to the local cache for example, which may not be
372 * the same size or starting position and may not even correspond in boundary
375 static void netfs_collect_write_results(struct netfs_io_request
*wreq
)
377 struct netfs_io_subrequest
*front
, *remove
;
378 struct netfs_io_stream
*stream
;
379 unsigned long long collected_to
, issued_to
;
383 _enter("%llx-%llx", wreq
->start
, wreq
->start
+ wreq
->len
);
384 trace_netfs_collect(wreq
);
385 trace_netfs_rreq(wreq
, netfs_rreq_trace_collect
);
388 issued_to
= atomic64_read(&wreq
->issued_to
);
390 collected_to
= ULLONG_MAX
;
391 if (wreq
->origin
== NETFS_WRITEBACK
||
392 wreq
->origin
== NETFS_WRITETHROUGH
||
393 wreq
->origin
== NETFS_PGPRIV2_COPY_TO_CACHE
)
398 /* Remove completed subrequests from the front of the streams and
399 * advance the completion point on each stream. We stop when we hit
400 * something that's in progress. The issuer thread may be adding stuff
401 * to the tail whilst we're doing this.
403 for (s
= 0; s
< NR_IO_STREAMS
; s
++) {
404 stream
= &wreq
->io_streams
[s
];
405 /* Read active flag before list pointers */
406 if (!smp_load_acquire(&stream
->active
))
409 front
= stream
->front
;
411 trace_netfs_collect_sreq(wreq
, front
);
412 //_debug("sreq [%x] %llx %zx/%zx",
413 // front->debug_index, front->start, front->transferred, front->len);
415 if (stream
->collected_to
< front
->start
) {
416 trace_netfs_collect_gap(wreq
, stream
, issued_to
, 'F');
417 stream
->collected_to
= front
->start
;
420 /* Stall if the front is still undergoing I/O. */
421 if (test_bit(NETFS_SREQ_IN_PROGRESS
, &front
->flags
)) {
422 notes
|= HIT_PENDING
;
425 smp_rmb(); /* Read counters after I-P flag. */
427 if (stream
->failed
) {
428 stream
->collected_to
= front
->start
+ front
->len
;
429 notes
|= MADE_PROGRESS
| SAW_FAILURE
;
432 if (front
->start
+ front
->transferred
> stream
->collected_to
) {
433 stream
->collected_to
= front
->start
+ front
->transferred
;
434 stream
->transferred
= stream
->collected_to
- wreq
->start
;
435 notes
|= MADE_PROGRESS
;
437 if (test_bit(NETFS_SREQ_FAILED
, &front
->flags
)) {
438 stream
->failed
= true;
439 stream
->error
= front
->error
;
440 if (stream
->source
== NETFS_UPLOAD_TO_SERVER
)
441 mapping_set_error(wreq
->mapping
, front
->error
);
442 notes
|= NEED_REASSESS
| SAW_FAILURE
;
445 if (front
->transferred
< front
->len
) {
446 stream
->need_retry
= true;
447 notes
|= NEED_RETRY
| MADE_PROGRESS
;
452 /* Remove if completely consumed. */
453 spin_lock_bh(&wreq
->lock
);
456 list_del_init(&front
->rreq_link
);
457 front
= list_first_entry_or_null(&stream
->subrequests
,
458 struct netfs_io_subrequest
, rreq_link
);
459 stream
->front
= front
;
460 spin_unlock_bh(&wreq
->lock
);
461 netfs_put_subrequest(remove
, false,
462 notes
& SAW_FAILURE
?
463 netfs_sreq_trace_put_cancel
:
464 netfs_sreq_trace_put_done
);
467 /* If we have an empty stream, we need to jump it forward
468 * otherwise the collection point will never advance.
470 if (!front
&& issued_to
> stream
->collected_to
) {
471 trace_netfs_collect_gap(wreq
, stream
, issued_to
, 'E');
472 stream
->collected_to
= issued_to
;
475 if (stream
->collected_to
< collected_to
)
476 collected_to
= stream
->collected_to
;
479 if (collected_to
!= ULLONG_MAX
&& collected_to
> wreq
->collected_to
)
480 wreq
->collected_to
= collected_to
;
482 for (s
= 0; s
< NR_IO_STREAMS
; s
++) {
483 stream
= &wreq
->io_streams
[s
];
485 trace_netfs_collect_stream(wreq
, stream
);
488 trace_netfs_collect_state(wreq
, wreq
->collected_to
, notes
);
490 /* Unlock any folios that we have now finished with. */
491 if (notes
& BUFFERED
) {
492 if (wreq
->cleaned_to
< wreq
->collected_to
)
493 netfs_writeback_unlock_folios(wreq
, ¬es
);
495 wreq
->cleaned_to
= wreq
->collected_to
;
498 // TODO: Discard encryption buffers
500 if (notes
& NEED_RETRY
)
502 if ((notes
& MADE_PROGRESS
) && test_bit(NETFS_RREQ_PAUSE
, &wreq
->flags
)) {
503 trace_netfs_rreq(wreq
, netfs_rreq_trace_unpause
);
504 clear_bit_unlock(NETFS_RREQ_PAUSE
, &wreq
->flags
);
505 wake_up_bit(&wreq
->flags
, NETFS_RREQ_PAUSE
);
508 if (notes
& NEED_REASSESS
) {
510 goto reassess_streams
;
512 if (notes
& MADE_PROGRESS
) {
514 goto reassess_streams
;
518 netfs_put_group_many(wreq
->group
, wreq
->nr_group_rel
);
519 wreq
->nr_group_rel
= 0;
520 _leave(" = %x", notes
);
524 /* Okay... We're going to have to retry one or both streams. Note
525 * that any partially completed op will have had any wholly transferred
526 * folios removed from it.
529 netfs_retry_writes(wreq
);
534 * Perform the collection of subrequests, folios and encryption buffers.
536 void netfs_write_collection_worker(struct work_struct
*work
)
538 struct netfs_io_request
*wreq
= container_of(work
, struct netfs_io_request
, work
);
539 struct netfs_inode
*ictx
= netfs_inode(wreq
->inode
);
543 _enter("R=%x", wreq
->debug_id
);
545 netfs_see_request(wreq
, netfs_rreq_trace_see_work
);
546 if (!test_bit(NETFS_RREQ_IN_PROGRESS
, &wreq
->flags
)) {
547 netfs_put_request(wreq
, false, netfs_rreq_trace_put_work
);
551 netfs_collect_write_results(wreq
);
553 /* We're done when the app thread has finished posting subreqs and all
554 * the queues in all the streams are empty.
556 if (!test_bit(NETFS_RREQ_ALL_QUEUED
, &wreq
->flags
)) {
557 netfs_put_request(wreq
, false, netfs_rreq_trace_put_work
);
560 smp_rmb(); /* Read ALL_QUEUED before lists. */
562 transferred
= LONG_MAX
;
563 for (s
= 0; s
< NR_IO_STREAMS
; s
++) {
564 struct netfs_io_stream
*stream
= &wreq
->io_streams
[s
];
567 if (!list_empty(&stream
->subrequests
)) {
568 netfs_put_request(wreq
, false, netfs_rreq_trace_put_work
);
571 if (stream
->transferred
< transferred
)
572 transferred
= stream
->transferred
;
575 /* Okay, declare that all I/O is complete. */
576 wreq
->transferred
= transferred
;
577 trace_netfs_rreq(wreq
, netfs_rreq_trace_write_done
);
579 if (wreq
->io_streams
[1].active
&&
580 wreq
->io_streams
[1].failed
) {
581 /* Cache write failure doesn't prevent writeback completion
582 * unless we're in disconnected mode.
584 ictx
->ops
->invalidate_cache(wreq
);
590 if (wreq
->origin
== NETFS_DIO_WRITE
&&
591 wreq
->mapping
->nrpages
) {
592 /* mmap may have got underfoot and we may now have folios
593 * locally covering the region we just wrote. Attempt to
594 * discard the folios, but leave in place any modified locally.
595 * ->write_iter() is prevented from interfering by the DIO
598 pgoff_t first
= wreq
->start
>> PAGE_SHIFT
;
599 pgoff_t last
= (wreq
->start
+ wreq
->transferred
- 1) >> PAGE_SHIFT
;
600 invalidate_inode_pages2_range(wreq
->mapping
, first
, last
);
603 if (wreq
->origin
== NETFS_DIO_WRITE
)
604 inode_dio_end(wreq
->inode
);
607 trace_netfs_rreq(wreq
, netfs_rreq_trace_wake_ip
);
608 clear_bit_unlock(NETFS_RREQ_IN_PROGRESS
, &wreq
->flags
);
609 wake_up_bit(&wreq
->flags
, NETFS_RREQ_IN_PROGRESS
);
612 size_t written
= min(wreq
->transferred
, wreq
->len
);
613 wreq
->iocb
->ki_pos
+= written
;
614 if (wreq
->iocb
->ki_complete
)
615 wreq
->iocb
->ki_complete(
616 wreq
->iocb
, wreq
->error
? wreq
->error
: written
);
617 wreq
->iocb
= VFS_PTR_POISON
;
620 netfs_clear_subrequests(wreq
, false);
621 netfs_put_request(wreq
, false, netfs_rreq_trace_put_work_complete
);
625 * Wake the collection work item.
627 void netfs_wake_write_collector(struct netfs_io_request
*wreq
, bool was_async
)
629 if (!work_pending(&wreq
->work
)) {
630 netfs_get_request(wreq
, netfs_rreq_trace_get_work
);
631 if (!queue_work(system_unbound_wq
, &wreq
->work
))
632 netfs_put_request(wreq
, was_async
, netfs_rreq_trace_put_work_nq
);
637 * netfs_write_subrequest_terminated - Note the termination of a write operation.
638 * @_op: The I/O request that has terminated.
639 * @transferred_or_error: The amount of data transferred or an error code.
640 * @was_async: The termination was asynchronous
642 * This tells the library that a contributory write I/O operation has
643 * terminated, one way or another, and that it should collect the results.
645 * The caller indicates in @transferred_or_error the outcome of the operation,
646 * supplying a positive value to indicate the number of bytes transferred or a
647 * negative error code. The library will look after reissuing I/O operations
648 * as appropriate and writing downloaded data to the cache.
650 * If @was_async is true, the caller might be running in softirq or interrupt
651 * context and we can't sleep.
653 * When this is called, ownership of the subrequest is transferred back to the
654 * library, along with a ref.
656 * Note that %_op is a void* so that the function can be passed to
657 * kiocb::term_func without the need for a casting wrapper.
659 void netfs_write_subrequest_terminated(void *_op
, ssize_t transferred_or_error
,
662 struct netfs_io_subrequest
*subreq
= _op
;
663 struct netfs_io_request
*wreq
= subreq
->rreq
;
664 struct netfs_io_stream
*stream
= &wreq
->io_streams
[subreq
->stream_nr
];
666 _enter("%x[%x] %zd", wreq
->debug_id
, subreq
->debug_index
, transferred_or_error
);
668 switch (subreq
->source
) {
669 case NETFS_UPLOAD_TO_SERVER
:
670 netfs_stat(&netfs_n_wh_upload_done
);
672 case NETFS_WRITE_TO_CACHE
:
673 netfs_stat(&netfs_n_wh_write_done
);
675 case NETFS_INVALID_WRITE
:
681 if (IS_ERR_VALUE(transferred_or_error
)) {
682 subreq
->error
= transferred_or_error
;
683 if (subreq
->error
== -EAGAIN
)
684 set_bit(NETFS_SREQ_NEED_RETRY
, &subreq
->flags
);
686 set_bit(NETFS_SREQ_FAILED
, &subreq
->flags
);
687 trace_netfs_failure(wreq
, subreq
, transferred_or_error
, netfs_fail_write
);
689 switch (subreq
->source
) {
690 case NETFS_WRITE_TO_CACHE
:
691 netfs_stat(&netfs_n_wh_write_failed
);
693 case NETFS_UPLOAD_TO_SERVER
:
694 netfs_stat(&netfs_n_wh_upload_failed
);
699 trace_netfs_rreq(wreq
, netfs_rreq_trace_set_pause
);
700 set_bit(NETFS_RREQ_PAUSE
, &wreq
->flags
);
702 if (WARN(transferred_or_error
> subreq
->len
- subreq
->transferred
,
703 "Subreq excess write: R=%x[%x] %zd > %zu - %zu",
704 wreq
->debug_id
, subreq
->debug_index
,
705 transferred_or_error
, subreq
->len
, subreq
->transferred
))
706 transferred_or_error
= subreq
->len
- subreq
->transferred
;
709 subreq
->transferred
+= transferred_or_error
;
711 if (subreq
->transferred
< subreq
->len
)
712 set_bit(NETFS_SREQ_NEED_RETRY
, &subreq
->flags
);
715 trace_netfs_sreq(subreq
, netfs_sreq_trace_terminated
);
717 clear_bit_unlock(NETFS_SREQ_IN_PROGRESS
, &subreq
->flags
);
718 wake_up_bit(&subreq
->flags
, NETFS_SREQ_IN_PROGRESS
);
720 /* If we are at the head of the queue, wake up the collector,
721 * transferring a ref to it if we were the ones to do so.
723 if (list_is_first(&subreq
->rreq_link
, &stream
->subrequests
))
724 netfs_wake_write_collector(wreq
, was_async
);
726 netfs_put_subrequest(subreq
, was_async
, netfs_sreq_trace_put_terminated
);
728 EXPORT_SYMBOL(netfs_write_subrequest_terminated
);