1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* handling of writes to regular files and writing back to the server
4 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
8 #include <linux/backing-dev.h>
9 #include <linux/slab.h>
11 #include <linux/pagemap.h>
12 #include <linux/writeback.h>
13 #include <linux/pagevec.h>
14 #include <linux/netfs.h>
15 #include <trace/events/netfs.h>
19 * completion of write to server
21 static void afs_pages_written_back(struct afs_vnode
*vnode
, loff_t start
, unsigned int len
)
23 _enter("{%llx:%llu},{%x @%llx}",
24 vnode
->fid
.vid
, vnode
->fid
.vnode
, len
, start
);
26 afs_prune_wb_keys(vnode
);
31 * Find a key to use for the writeback. We cached the keys used to author the
32 * writes on the vnode. wreq->netfs_priv2 will contain the last writeback key
33 * record used or NULL and we need to start from there if it's set.
34 * wreq->netfs_priv will be set to the key itself or NULL.
36 static void afs_get_writeback_key(struct netfs_io_request
*wreq
)
38 struct afs_wb_key
*wbk
, *old
= wreq
->netfs_priv2
;
39 struct afs_vnode
*vnode
= AFS_FS_I(wreq
->inode
);
41 key_put(wreq
->netfs_priv
);
42 wreq
->netfs_priv
= NULL
;
43 wreq
->netfs_priv2
= NULL
;
45 spin_lock(&vnode
->wb_lock
);
47 wbk
= list_next_entry(old
, vnode_link
);
49 wbk
= list_first_entry(&vnode
->wb_keys
, struct afs_wb_key
, vnode_link
);
51 list_for_each_entry_from(wbk
, &vnode
->wb_keys
, vnode_link
) {
52 _debug("wbk %u", key_serial(wbk
->key
));
53 if (key_validate(wbk
->key
) == 0) {
54 refcount_inc(&wbk
->usage
);
55 wreq
->netfs_priv
= key_get(wbk
->key
);
56 wreq
->netfs_priv2
= wbk
;
57 _debug("USE WB KEY %u", key_serial(wbk
->key
));
62 spin_unlock(&vnode
->wb_lock
);
67 static void afs_store_data_success(struct afs_operation
*op
)
69 struct afs_vnode
*vnode
= op
->file
[0].vnode
;
71 op
->ctime
= op
->file
[0].scb
.status
.mtime_client
;
72 afs_vnode_commit_status(op
, &op
->file
[0]);
73 if (!afs_op_error(op
)) {
74 afs_pages_written_back(vnode
, op
->store
.pos
, op
->store
.size
);
75 afs_stat_v(vnode
, n_stores
);
76 atomic_long_add(op
->store
.size
, &afs_v2net(vnode
)->n_store_bytes
);
80 static const struct afs_operation_ops afs_store_data_operation
= {
81 .issue_afs_rpc
= afs_fs_store_data
,
82 .issue_yfs_rpc
= yfs_fs_store_data
,
83 .success
= afs_store_data_success
,
87 * Prepare a subrequest to write to the server. This sets the max_len
90 void afs_prepare_write(struct netfs_io_subrequest
*subreq
)
92 struct netfs_io_stream
*stream
= &subreq
->rreq
->io_streams
[subreq
->stream_nr
];
94 //if (test_bit(NETFS_SREQ_RETRYING, &subreq->flags))
95 // subreq->max_len = 512 * 1024;
97 stream
->sreq_max_len
= 256 * 1024 * 1024;
101 * Issue a subrequest to write to the server.
103 static void afs_issue_write_worker(struct work_struct
*work
)
105 struct netfs_io_subrequest
*subreq
= container_of(work
, struct netfs_io_subrequest
, work
);
106 struct netfs_io_request
*wreq
= subreq
->rreq
;
107 struct afs_operation
*op
;
108 struct afs_vnode
*vnode
= AFS_FS_I(wreq
->inode
);
109 unsigned long long pos
= subreq
->start
+ subreq
->transferred
;
110 size_t len
= subreq
->len
- subreq
->transferred
;
113 _enter("R=%x[%x],%s{%llx:%llu.%u},%llx,%zx",
114 wreq
->debug_id
, subreq
->debug_index
,
121 #if 0 // Error injection
122 if (subreq
->debug_index
== 3)
123 return netfs_write_subrequest_terminated(subreq
, -ENOANO
, false);
125 if (!test_bit(NETFS_SREQ_RETRYING
, &subreq
->flags
)) {
126 set_bit(NETFS_SREQ_NEED_RETRY
, &subreq
->flags
);
127 return netfs_write_subrequest_terminated(subreq
, -EAGAIN
, false);
131 op
= afs_alloc_operation(wreq
->netfs_priv
, vnode
->volume
);
133 return netfs_write_subrequest_terminated(subreq
, -EAGAIN
, false);
135 afs_op_set_vnode(op
, 0, vnode
);
136 op
->file
[0].dv_delta
= 1;
137 op
->file
[0].modification
= true;
139 op
->store
.size
= len
;
140 op
->flags
|= AFS_OPERATION_UNINTR
;
141 op
->ops
= &afs_store_data_operation
;
143 afs_begin_vnode_operation(op
);
145 op
->store
.write_iter
= &subreq
->io_iter
;
146 op
->store
.i_size
= umax(pos
+ len
, vnode
->netfs
.remote_i_size
);
147 op
->mtime
= inode_get_mtime(&vnode
->netfs
.inode
);
149 afs_wait_for_operation(op
);
150 ret
= afs_put_operation(op
);
158 /* If there are more keys we can try, use the retry algorithm
159 * to rotate the keys.
161 if (wreq
->netfs_priv2
)
162 set_bit(NETFS_SREQ_NEED_RETRY
, &subreq
->flags
);
166 netfs_write_subrequest_terminated(subreq
, ret
< 0 ? ret
: subreq
->len
, false);
169 void afs_issue_write(struct netfs_io_subrequest
*subreq
)
171 subreq
->work
.func
= afs_issue_write_worker
;
172 if (!queue_work(system_unbound_wq
, &subreq
->work
))
177 * Writeback calls this when it finds a folio that needs uploading. This isn't
178 * called if writeback only has copy-to-cache to deal with.
180 void afs_begin_writeback(struct netfs_io_request
*wreq
)
182 afs_get_writeback_key(wreq
);
183 wreq
->io_streams
[0].avail
= true;
187 * Prepare to retry the writes in request. Use this to try rotating the
188 * available writeback keys.
190 void afs_retry_request(struct netfs_io_request
*wreq
, struct netfs_io_stream
*stream
)
192 struct netfs_io_subrequest
*subreq
=
193 list_first_entry(&stream
->subrequests
,
194 struct netfs_io_subrequest
, rreq_link
);
196 switch (subreq
->error
) {
203 afs_get_writeback_key(wreq
);
204 if (!wreq
->netfs_priv
)
205 stream
->failed
= true;
211 * write some of the pending data back to the server
213 int afs_writepages(struct address_space
*mapping
, struct writeback_control
*wbc
)
215 struct afs_vnode
*vnode
= AFS_FS_I(mapping
->host
);
218 /* We have to be careful as we can end up racing with setattr()
219 * truncating the pagecache since the caller doesn't take a lock here
222 if (wbc
->sync_mode
== WB_SYNC_ALL
)
223 down_read(&vnode
->validate_lock
);
224 else if (!down_read_trylock(&vnode
->validate_lock
))
227 ret
= netfs_writepages(mapping
, wbc
);
228 up_read(&vnode
->validate_lock
);
233 * flush any dirty pages for this process, and check for write errors.
234 * - the return status from this call provides a reliable indication of
235 * whether any write errors occurred for this process.
237 int afs_fsync(struct file
*file
, loff_t start
, loff_t end
, int datasync
)
239 struct afs_vnode
*vnode
= AFS_FS_I(file_inode(file
));
240 struct afs_file
*af
= file
->private_data
;
243 _enter("{%llx:%llu},{n=%pD},%d",
244 vnode
->fid
.vid
, vnode
->fid
.vnode
, file
,
247 ret
= afs_validate(vnode
, af
->key
);
251 return file_write_and_wait_range(file
, start
, end
);
255 * notification that a previously read-only page is about to become writable
256 * - if it returns an error, the caller will deliver a bus error signal
258 vm_fault_t
afs_page_mkwrite(struct vm_fault
*vmf
)
260 struct file
*file
= vmf
->vma
->vm_file
;
262 if (afs_validate(AFS_FS_I(file_inode(file
)), afs_file_key(file
)) < 0)
263 return VM_FAULT_SIGBUS
;
264 return netfs_page_mkwrite(vmf
, NULL
);
268 * Prune the keys cached for writeback. The caller must hold vnode->wb_lock.
270 void afs_prune_wb_keys(struct afs_vnode
*vnode
)
272 LIST_HEAD(graveyard
);
273 struct afs_wb_key
*wbk
, *tmp
;
275 /* Discard unused keys */
276 spin_lock(&vnode
->wb_lock
);
278 if (!mapping_tagged(&vnode
->netfs
.inode
.i_data
, PAGECACHE_TAG_WRITEBACK
) &&
279 !mapping_tagged(&vnode
->netfs
.inode
.i_data
, PAGECACHE_TAG_DIRTY
)) {
280 list_for_each_entry_safe(wbk
, tmp
, &vnode
->wb_keys
, vnode_link
) {
281 if (refcount_read(&wbk
->usage
) == 1)
282 list_move(&wbk
->vnode_link
, &graveyard
);
286 spin_unlock(&vnode
->wb_lock
);
288 while (!list_empty(&graveyard
)) {
289 wbk
= list_entry(graveyard
.next
, struct afs_wb_key
, vnode_link
);
290 list_del(&wbk
->vnode_link
);