1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Read with PG_private_2 [DEPRECATED].
4 * Copyright (C) 2024 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
8 #include <linux/export.h>
11 #include <linux/pagemap.h>
12 #include <linux/slab.h>
13 #include <linux/task_io_accounting_ops.h>
17 * [DEPRECATED] Copy a folio to the cache with PG_private_2 set.
19 static void netfs_pgpriv2_copy_folio(struct netfs_io_request
*creq
, struct folio
*folio
)
21 struct netfs_io_stream
*cache
= &creq
->io_streams
[1];
22 size_t fsize
= folio_size(folio
), flen
= fsize
;
23 loff_t fpos
= folio_pos(folio
), i_size
;
28 /* netfs_perform_write() may shift i_size around the page or from out
29 * of the page to beyond it, but cannot move i_size into or through the
30 * page since we have it locked.
32 i_size
= i_size_read(creq
->inode
);
35 /* mmap beyond eof. */
37 folio_end_private_2(folio
);
41 if (fpos
+ fsize
> creq
->i_size
)
42 creq
->i_size
= i_size
;
44 if (flen
> i_size
- fpos
) {
47 } else if (flen
== i_size
- fpos
) {
51 _debug("folio %zx %zx", flen
, fsize
);
53 trace_netfs_folio(folio
, netfs_folio_trace_store_copy
);
55 /* Attach the folio to the rolling buffer. */
56 if (rolling_buffer_append(&creq
->buffer
, folio
, 0) < 0) {
57 clear_bit(NETFS_RREQ_FOLIO_COPY_TO_CACHE
, &creq
->flags
);
61 cache
->submit_extendable_to
= fsize
;
62 cache
->submit_off
= 0;
63 cache
->submit_len
= flen
;
65 /* Attach the folio to one or more subrequests. For a big folio, we
66 * could end up with thousands of subrequests if the wsize is small -
67 * but we might need to wait during the creation of subrequests for
68 * network resources (eg. SMB credits).
73 creq
->buffer
.iter
.iov_offset
= cache
->submit_off
;
75 atomic64_set(&creq
->issued_to
, fpos
+ cache
->submit_off
);
76 cache
->submit_extendable_to
= fsize
- cache
->submit_off
;
77 part
= netfs_advance_write(creq
, cache
, fpos
+ cache
->submit_off
,
78 cache
->submit_len
, to_eof
);
79 cache
->submit_off
+= part
;
80 if (part
> cache
->submit_len
)
81 cache
->submit_len
= 0;
83 cache
->submit_len
-= part
;
84 } while (cache
->submit_len
> 0);
86 creq
->buffer
.iter
.iov_offset
= 0;
87 rolling_buffer_advance(&creq
->buffer
, fsize
);
88 atomic64_set(&creq
->issued_to
, fpos
+ fsize
);
91 netfs_issue_write(creq
, cache
);
95 * [DEPRECATED] Set up copying to the cache.
97 static struct netfs_io_request
*netfs_pgpriv2_begin_copy_to_cache(
98 struct netfs_io_request
*rreq
, struct folio
*folio
)
100 struct netfs_io_request
*creq
;
102 if (!fscache_resources_valid(&rreq
->cache_resources
))
105 creq
= netfs_create_write_req(rreq
->mapping
, NULL
, folio_pos(folio
),
106 NETFS_PGPRIV2_COPY_TO_CACHE
);
110 if (!creq
->io_streams
[1].avail
)
113 trace_netfs_write(creq
, netfs_write_trace_copy_to_cache
);
114 netfs_stat(&netfs_n_wh_copy_to_cache
);
115 rreq
->copy_to_cache
= creq
;
119 netfs_put_request(creq
, false, netfs_rreq_trace_put_return
);
121 rreq
->copy_to_cache
= ERR_PTR(-ENOBUFS
);
122 clear_bit(NETFS_RREQ_FOLIO_COPY_TO_CACHE
, &rreq
->flags
);
123 return ERR_PTR(-ENOBUFS
);
127 * [DEPRECATED] Mark page as requiring copy-to-cache using PG_private_2 and add
128 * it to the copy write request.
130 void netfs_pgpriv2_copy_to_cache(struct netfs_io_request
*rreq
, struct folio
*folio
)
132 struct netfs_io_request
*creq
= rreq
->copy_to_cache
;
135 creq
= netfs_pgpriv2_begin_copy_to_cache(rreq
, folio
);
139 trace_netfs_folio(folio
, netfs_folio_trace_copy_to_cache
);
140 folio_start_private_2(folio
);
141 netfs_pgpriv2_copy_folio(creq
, folio
);
145 * [DEPRECATED] End writing to the cache, flushing out any outstanding writes.
147 void netfs_pgpriv2_end_copy_to_cache(struct netfs_io_request
*rreq
)
149 struct netfs_io_request
*creq
= rreq
->copy_to_cache
;
151 if (IS_ERR_OR_NULL(creq
))
154 netfs_issue_write(creq
, &creq
->io_streams
[1]);
155 smp_wmb(); /* Write lists before ALL_QUEUED. */
156 set_bit(NETFS_RREQ_ALL_QUEUED
, &creq
->flags
);
158 netfs_put_request(creq
, false, netfs_rreq_trace_put_return
);
159 creq
->copy_to_cache
= NULL
;
163 * [DEPRECATED] Remove the PG_private_2 mark from any folios we've finished
166 bool netfs_pgpriv2_unlock_copied_folios(struct netfs_io_request
*creq
)
168 struct folio_queue
*folioq
= creq
->buffer
.tail
;
169 unsigned long long collected_to
= creq
->collected_to
;
170 unsigned int slot
= creq
->buffer
.first_tail_slot
;
171 bool made_progress
= false;
173 if (slot
>= folioq_nr_slots(folioq
)) {
174 folioq
= rolling_buffer_delete_spent(&creq
->buffer
);
180 unsigned long long fpos
, fend
;
183 folio
= folioq_folio(folioq
, slot
);
184 if (WARN_ONCE(!folio_test_private_2(folio
),
185 "R=%08x: folio %lx is not marked private_2\n",
186 creq
->debug_id
, folio
->index
))
187 trace_netfs_folio(folio
, netfs_folio_trace_not_under_wback
);
189 fpos
= folio_pos(folio
);
190 fsize
= folio_size(folio
);
193 fend
= min_t(unsigned long long, fpos
+ flen
, creq
->i_size
);
195 trace_netfs_collect_folio(creq
, folio
, fend
, collected_to
);
197 /* Unlock any folio we've transferred all of. */
198 if (collected_to
< fend
)
201 trace_netfs_folio(folio
, netfs_folio_trace_end_copy
);
202 folio_end_private_2(folio
);
203 creq
->cleaned_to
= fpos
+ fsize
;
204 made_progress
= true;
206 /* Clean up the head folioq. If we clear an entire folioq, then
207 * we can get rid of it provided it's not also the tail folioq
208 * being filled by the issuer.
210 folioq_clear(folioq
, slot
);
212 if (slot
>= folioq_nr_slots(folioq
)) {
213 folioq
= rolling_buffer_delete_spent(&creq
->buffer
);
219 if (fpos
+ fsize
>= collected_to
)
223 creq
->buffer
.tail
= folioq
;
225 creq
->buffer
.first_tail_slot
= slot
;
226 return made_progress
;