1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Read with PG_private_2 [DEPRECATED].
4 * Copyright (C) 2024 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
8 #include <linux/export.h>
11 #include <linux/pagemap.h>
12 #include <linux/slab.h>
13 #include <linux/task_io_accounting_ops.h>
17 * [DEPRECATED] Mark page as requiring copy-to-cache using PG_private_2. The
18 * third mark in the folio queue is used to indicate that this folio needs
21 void netfs_pgpriv2_mark_copy_to_cache(struct netfs_io_subrequest
*subreq
,
22 struct netfs_io_request
*rreq
,
23 struct folio_queue
*folioq
,
26 struct folio
*folio
= folioq_folio(folioq
, slot
);
28 trace_netfs_folio(folio
, netfs_folio_trace_copy_to_cache
);
29 folio_start_private_2(folio
);
30 folioq_mark3(folioq
, slot
);
34 * [DEPRECATED] Cancel PG_private_2 on all marked folios in the event of an
35 * unrecoverable error.
37 static void netfs_pgpriv2_cancel(struct folio_queue
*folioq
)
43 if (!folioq
->marks3
) {
44 folioq
= folioq
->next
;
48 slot
= __ffs(folioq
->marks3
);
49 folio
= folioq_folio(folioq
, slot
);
51 trace_netfs_folio(folio
, netfs_folio_trace_cancel_copy
);
52 folio_end_private_2(folio
);
53 folioq_unmark3(folioq
, slot
);
58 * [DEPRECATED] Copy a folio to the cache with PG_private_2 set.
60 static int netfs_pgpriv2_copy_folio(struct netfs_io_request
*wreq
, struct folio
*folio
)
62 struct netfs_io_stream
*cache
= &wreq
->io_streams
[1];
63 size_t fsize
= folio_size(folio
), flen
= fsize
;
64 loff_t fpos
= folio_pos(folio
), i_size
;
69 /* netfs_perform_write() may shift i_size around the page or from out
70 * of the page to beyond it, but cannot move i_size into or through the
71 * page since we have it locked.
73 i_size
= i_size_read(wreq
->inode
);
76 /* mmap beyond eof. */
78 folio_end_private_2(folio
);
82 if (fpos
+ fsize
> wreq
->i_size
)
83 wreq
->i_size
= i_size
;
85 if (flen
> i_size
- fpos
) {
88 } else if (flen
== i_size
- fpos
) {
92 _debug("folio %zx %zx", flen
, fsize
);
94 trace_netfs_folio(folio
, netfs_folio_trace_store_copy
);
96 /* Attach the folio to the rolling buffer. */
97 if (netfs_buffer_append_folio(wreq
, folio
, false) < 0)
100 cache
->submit_extendable_to
= fsize
;
101 cache
->submit_off
= 0;
102 cache
->submit_len
= flen
;
104 /* Attach the folio to one or more subrequests. For a big folio, we
105 * could end up with thousands of subrequests if the wsize is small -
106 * but we might need to wait during the creation of subrequests for
107 * network resources (eg. SMB credits).
112 wreq
->io_iter
.iov_offset
= cache
->submit_off
;
114 atomic64_set(&wreq
->issued_to
, fpos
+ cache
->submit_off
);
115 cache
->submit_extendable_to
= fsize
- cache
->submit_off
;
116 part
= netfs_advance_write(wreq
, cache
, fpos
+ cache
->submit_off
,
117 cache
->submit_len
, to_eof
);
118 cache
->submit_off
+= part
;
119 if (part
> cache
->submit_len
)
120 cache
->submit_len
= 0;
122 cache
->submit_len
-= part
;
123 } while (cache
->submit_len
> 0);
125 wreq
->io_iter
.iov_offset
= 0;
126 iov_iter_advance(&wreq
->io_iter
, fsize
);
127 atomic64_set(&wreq
->issued_to
, fpos
+ fsize
);
130 netfs_issue_write(wreq
, cache
);
137 * [DEPRECATED] Go through the buffer and write any folios that are marked with
138 * the third mark to the cache.
140 void netfs_pgpriv2_write_to_the_cache(struct netfs_io_request
*rreq
)
142 struct netfs_io_request
*wreq
;
143 struct folio_queue
*folioq
;
150 if (!fscache_resources_valid(&rreq
->cache_resources
))
153 /* Need the first folio to be able to set up the op. */
154 for (folioq
= rreq
->buffer
; folioq
; folioq
= folioq
->next
) {
155 if (folioq
->marks3
) {
156 slot
= __ffs(folioq
->marks3
);
162 folio
= folioq_folio(folioq
, slot
);
164 wreq
= netfs_create_write_req(rreq
->mapping
, NULL
, folio_pos(folio
),
165 NETFS_PGPRIV2_COPY_TO_CACHE
);
167 kleave(" [create %ld]", PTR_ERR(wreq
));
171 trace_netfs_write(wreq
, netfs_write_trace_copy_to_cache
);
172 netfs_stat(&netfs_n_wh_copy_to_cache
);
175 error
= netfs_pgpriv2_copy_folio(wreq
, folio
);
179 folioq_unmark3(folioq
, slot
);
180 if (!folioq
->marks3
) {
181 folioq
= folioq
->next
;
186 slot
= __ffs(folioq
->marks3
);
187 folio
= folioq_folio(folioq
, slot
);
190 netfs_issue_write(wreq
, &wreq
->io_streams
[1]);
191 smp_wmb(); /* Write lists before ALL_QUEUED. */
192 set_bit(NETFS_RREQ_ALL_QUEUED
, &wreq
->flags
);
194 netfs_put_request(wreq
, false, netfs_rreq_trace_put_return
);
195 _leave(" = %d", error
);
197 netfs_pgpriv2_cancel(rreq
->buffer
);
201 * [DEPRECATED] Remove the PG_private_2 mark from any folios we've finished
204 bool netfs_pgpriv2_unlock_copied_folios(struct netfs_io_request
*wreq
)
206 struct folio_queue
*folioq
= wreq
->buffer
;
207 unsigned long long collected_to
= wreq
->collected_to
;
208 unsigned int slot
= wreq
->buffer_head_slot
;
209 bool made_progress
= false;
211 if (slot
>= folioq_nr_slots(folioq
)) {
212 folioq
= netfs_delete_buffer_head(wreq
);
218 unsigned long long fpos
, fend
;
221 folio
= folioq_folio(folioq
, slot
);
222 if (WARN_ONCE(!folio_test_private_2(folio
),
223 "R=%08x: folio %lx is not marked private_2\n",
224 wreq
->debug_id
, folio
->index
))
225 trace_netfs_folio(folio
, netfs_folio_trace_not_under_wback
);
227 fpos
= folio_pos(folio
);
228 fsize
= folio_size(folio
);
231 fend
= min_t(unsigned long long, fpos
+ flen
, wreq
->i_size
);
233 trace_netfs_collect_folio(wreq
, folio
, fend
, collected_to
);
235 /* Unlock any folio we've transferred all of. */
236 if (collected_to
< fend
)
239 trace_netfs_folio(folio
, netfs_folio_trace_end_copy
);
240 folio_end_private_2(folio
);
241 wreq
->cleaned_to
= fpos
+ fsize
;
242 made_progress
= true;
244 /* Clean up the head folioq. If we clear an entire folioq, then
245 * we can get rid of it provided it's not also the tail folioq
246 * being filled by the issuer.
248 folioq_clear(folioq
, slot
);
250 if (slot
>= folioq_nr_slots(folioq
)) {
251 if (READ_ONCE(wreq
->buffer_tail
) == folioq
)
253 folioq
= netfs_delete_buffer_head(wreq
);
257 if (fpos
+ fsize
>= collected_to
)
261 wreq
->buffer
= folioq
;
262 wreq
->buffer_head_slot
= slot
;
263 return made_progress
;