1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* handling of writes to regular files and writing back to the server
4 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
8 #include <linux/backing-dev.h>
9 #include <linux/slab.h>
11 #include <linux/pagemap.h>
12 #include <linux/writeback.h>
13 #include <linux/pagevec.h>
14 #include <linux/netfs.h>
15 #include <trace/events/netfs.h>
19 * completion of write to server
21 static void afs_pages_written_back(struct afs_vnode
*vnode
, loff_t start
, unsigned int len
)
23 _enter("{%llx:%llu},{%x @%llx}",
24 vnode
->fid
.vid
, vnode
->fid
.vnode
, len
, start
);
26 afs_prune_wb_keys(vnode
);
31 * Find a key to use for the writeback. We cached the keys used to author the
32 * writes on the vnode. wreq->netfs_priv2 will contain the last writeback key
33 * record used or NULL and we need to start from there if it's set.
34 * wreq->netfs_priv will be set to the key itself or NULL.
36 static void afs_get_writeback_key(struct netfs_io_request
*wreq
)
38 struct afs_wb_key
*wbk
, *old
= wreq
->netfs_priv2
;
39 struct afs_vnode
*vnode
= AFS_FS_I(wreq
->inode
);
41 key_put(wreq
->netfs_priv
);
42 wreq
->netfs_priv
= NULL
;
43 wreq
->netfs_priv2
= NULL
;
45 spin_lock(&vnode
->wb_lock
);
47 wbk
= list_next_entry(old
, vnode_link
);
49 wbk
= list_first_entry(&vnode
->wb_keys
, struct afs_wb_key
, vnode_link
);
51 list_for_each_entry_from(wbk
, &vnode
->wb_keys
, vnode_link
) {
52 _debug("wbk %u", key_serial(wbk
->key
));
53 if (key_validate(wbk
->key
) == 0) {
54 refcount_inc(&wbk
->usage
);
55 wreq
->netfs_priv
= key_get(wbk
->key
);
56 wreq
->netfs_priv2
= wbk
;
57 _debug("USE WB KEY %u", key_serial(wbk
->key
));
62 spin_unlock(&vnode
->wb_lock
);
67 static void afs_store_data_success(struct afs_operation
*op
)
69 struct afs_vnode
*vnode
= op
->file
[0].vnode
;
71 op
->ctime
= op
->file
[0].scb
.status
.mtime_client
;
72 afs_vnode_commit_status(op
, &op
->file
[0]);
73 if (!afs_op_error(op
)) {
74 afs_pages_written_back(vnode
, op
->store
.pos
, op
->store
.size
);
75 afs_stat_v(vnode
, n_stores
);
76 atomic_long_add(op
->store
.size
, &afs_v2net(vnode
)->n_store_bytes
);
80 static const struct afs_operation_ops afs_store_data_operation
= {
81 .issue_afs_rpc
= afs_fs_store_data
,
82 .issue_yfs_rpc
= yfs_fs_store_data
,
83 .success
= afs_store_data_success
,
87 * Prepare a subrequest to write to the server. This sets the max_len
90 void afs_prepare_write(struct netfs_io_subrequest
*subreq
)
92 struct netfs_io_stream
*stream
= &subreq
->rreq
->io_streams
[subreq
->stream_nr
];
94 //if (test_bit(NETFS_SREQ_RETRYING, &subreq->flags))
95 // subreq->max_len = 512 * 1024;
97 stream
->sreq_max_len
= 256 * 1024 * 1024;
101 * Issue a subrequest to write to the server.
103 static void afs_issue_write_worker(struct work_struct
*work
)
105 struct netfs_io_subrequest
*subreq
= container_of(work
, struct netfs_io_subrequest
, work
);
106 struct netfs_io_request
*wreq
= subreq
->rreq
;
107 struct afs_operation
*op
;
108 struct afs_vnode
*vnode
= AFS_FS_I(wreq
->inode
);
109 unsigned long long pos
= subreq
->start
+ subreq
->transferred
;
110 size_t len
= subreq
->len
- subreq
->transferred
;
113 _enter("R=%x[%x],%s{%llx:%llu.%u},%llx,%zx",
114 wreq
->debug_id
, subreq
->debug_index
,
121 #if 0 // Error injection
122 if (subreq
->debug_index
== 3)
123 return netfs_write_subrequest_terminated(subreq
, -ENOANO
, false);
125 if (!subreq
->retry_count
) {
126 set_bit(NETFS_SREQ_NEED_RETRY
, &subreq
->flags
);
127 return netfs_write_subrequest_terminated(subreq
, -EAGAIN
, false);
131 op
= afs_alloc_operation(wreq
->netfs_priv
, vnode
->volume
);
133 return netfs_write_subrequest_terminated(subreq
, -EAGAIN
, false);
135 afs_op_set_vnode(op
, 0, vnode
);
136 op
->file
[0].dv_delta
= 1;
137 op
->file
[0].modification
= true;
139 op
->store
.size
= len
;
140 op
->flags
|= AFS_OPERATION_UNINTR
;
141 op
->ops
= &afs_store_data_operation
;
143 afs_begin_vnode_operation(op
);
145 op
->store
.write_iter
= &subreq
->io_iter
;
146 op
->store
.i_size
= umax(pos
+ len
, vnode
->netfs
.remote_i_size
);
147 op
->mtime
= inode_get_mtime(&vnode
->netfs
.inode
);
149 afs_wait_for_operation(op
);
150 ret
= afs_put_operation(op
);
153 __set_bit(NETFS_SREQ_MADE_PROGRESS
, &subreq
->flags
);
161 /* If there are more keys we can try, use the retry algorithm
162 * to rotate the keys.
164 if (wreq
->netfs_priv2
)
165 set_bit(NETFS_SREQ_NEED_RETRY
, &subreq
->flags
);
169 netfs_write_subrequest_terminated(subreq
, ret
< 0 ? ret
: subreq
->len
, false);
172 void afs_issue_write(struct netfs_io_subrequest
*subreq
)
174 subreq
->work
.func
= afs_issue_write_worker
;
175 if (!queue_work(system_unbound_wq
, &subreq
->work
))
180 * Writeback calls this when it finds a folio that needs uploading. This isn't
181 * called if writeback only has copy-to-cache to deal with.
183 void afs_begin_writeback(struct netfs_io_request
*wreq
)
185 if (S_ISREG(wreq
->inode
->i_mode
))
186 afs_get_writeback_key(wreq
);
190 * Prepare to retry the writes in request. Use this to try rotating the
191 * available writeback keys.
193 void afs_retry_request(struct netfs_io_request
*wreq
, struct netfs_io_stream
*stream
)
195 struct netfs_io_subrequest
*subreq
=
196 list_first_entry(&stream
->subrequests
,
197 struct netfs_io_subrequest
, rreq_link
);
199 switch (wreq
->origin
) {
200 case NETFS_READAHEAD
:
202 case NETFS_READ_GAPS
:
203 case NETFS_READ_SINGLE
:
204 case NETFS_READ_FOR_WRITE
:
211 switch (subreq
->error
) {
218 afs_get_writeback_key(wreq
);
219 if (!wreq
->netfs_priv
)
220 stream
->failed
= true;
226 * write some of the pending data back to the server
228 int afs_writepages(struct address_space
*mapping
, struct writeback_control
*wbc
)
230 struct afs_vnode
*vnode
= AFS_FS_I(mapping
->host
);
233 /* We have to be careful as we can end up racing with setattr()
234 * truncating the pagecache since the caller doesn't take a lock here
237 if (wbc
->sync_mode
== WB_SYNC_ALL
)
238 down_read(&vnode
->validate_lock
);
239 else if (!down_read_trylock(&vnode
->validate_lock
))
242 ret
= netfs_writepages(mapping
, wbc
);
243 up_read(&vnode
->validate_lock
);
248 * flush any dirty pages for this process, and check for write errors.
249 * - the return status from this call provides a reliable indication of
250 * whether any write errors occurred for this process.
252 int afs_fsync(struct file
*file
, loff_t start
, loff_t end
, int datasync
)
254 struct afs_vnode
*vnode
= AFS_FS_I(file_inode(file
));
255 struct afs_file
*af
= file
->private_data
;
258 _enter("{%llx:%llu},{n=%pD},%d",
259 vnode
->fid
.vid
, vnode
->fid
.vnode
, file
,
262 ret
= afs_validate(vnode
, af
->key
);
266 return file_write_and_wait_range(file
, start
, end
);
270 * notification that a previously read-only page is about to become writable
271 * - if it returns an error, the caller will deliver a bus error signal
273 vm_fault_t
afs_page_mkwrite(struct vm_fault
*vmf
)
275 struct file
*file
= vmf
->vma
->vm_file
;
277 if (afs_validate(AFS_FS_I(file_inode(file
)), afs_file_key(file
)) < 0)
278 return VM_FAULT_SIGBUS
;
279 return netfs_page_mkwrite(vmf
, NULL
);
283 * Prune the keys cached for writeback. The caller must hold vnode->wb_lock.
285 void afs_prune_wb_keys(struct afs_vnode
*vnode
)
287 LIST_HEAD(graveyard
);
288 struct afs_wb_key
*wbk
, *tmp
;
290 /* Discard unused keys */
291 spin_lock(&vnode
->wb_lock
);
293 if (!mapping_tagged(&vnode
->netfs
.inode
.i_data
, PAGECACHE_TAG_WRITEBACK
) &&
294 !mapping_tagged(&vnode
->netfs
.inode
.i_data
, PAGECACHE_TAG_DIRTY
)) {
295 list_for_each_entry_safe(wbk
, tmp
, &vnode
->wb_keys
, vnode_link
) {
296 if (refcount_read(&wbk
->usage
) == 1)
297 list_move(&wbk
->vnode_link
, &graveyard
);
301 spin_unlock(&vnode
->wb_lock
);
303 while (!list_empty(&graveyard
)) {
304 wbk
= list_entry(graveyard
.next
, struct afs_wb_key
, vnode_link
);
305 list_del(&wbk
->vnode_link
);