Linux 6.14-rc1
[linux-stable.git] / fs / afs / fs_operation.c
blob8418813ee04339babe6611b1fb2a9dca8f5e885c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Fileserver-directed operation handling.
4 * Copyright (C) 2020 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
6 */
8 #include <linux/kernel.h>
9 #include <linux/slab.h>
10 #include <linux/fs.h>
11 #include "internal.h"
13 static atomic_t afs_operation_debug_counter;
16 * Create an operation against a volume.
18 struct afs_operation *afs_alloc_operation(struct key *key, struct afs_volume *volume)
20 struct afs_operation *op;
22 _enter("");
24 op = kzalloc(sizeof(*op), GFP_KERNEL);
25 if (!op)
26 return ERR_PTR(-ENOMEM);
28 if (!key) {
29 key = afs_request_key(volume->cell);
30 if (IS_ERR(key)) {
31 kfree(op);
32 return ERR_CAST(key);
34 } else {
35 key_get(key);
38 op->key = key;
39 op->volume = afs_get_volume(volume, afs_volume_trace_get_new_op);
40 op->net = volume->cell->net;
41 op->cb_v_break = atomic_read(&volume->cb_v_break);
42 op->pre_volsync.creation = volume->creation_time;
43 op->pre_volsync.update = volume->update_time;
44 op->debug_id = atomic_inc_return(&afs_operation_debug_counter);
45 op->nr_iterations = -1;
46 afs_op_set_error(op, -EDESTADDRREQ);
48 _leave(" = [op=%08x]", op->debug_id);
49 return op;
52 struct afs_io_locker {
53 struct list_head link;
54 struct task_struct *task;
55 unsigned long have_lock;
59 * Unlock the I/O lock on a vnode.
61 static void afs_unlock_for_io(struct afs_vnode *vnode)
63 struct afs_io_locker *locker;
65 spin_lock(&vnode->lock);
66 locker = list_first_entry_or_null(&vnode->io_lock_waiters,
67 struct afs_io_locker, link);
68 if (locker) {
69 list_del(&locker->link);
70 smp_store_release(&locker->have_lock, 1); /* The unlock barrier. */
71 smp_mb__after_atomic(); /* Store have_lock before task state */
72 wake_up_process(locker->task);
73 } else {
74 clear_bit(AFS_VNODE_IO_LOCK, &vnode->flags);
76 spin_unlock(&vnode->lock);
80 * Lock the I/O lock on a vnode uninterruptibly. We can't use an ordinary
81 * mutex as lockdep will complain if we unlock it in the wrong thread.
83 static void afs_lock_for_io(struct afs_vnode *vnode)
85 struct afs_io_locker myself = { .task = current, };
87 spin_lock(&vnode->lock);
89 if (!test_and_set_bit(AFS_VNODE_IO_LOCK, &vnode->flags)) {
90 spin_unlock(&vnode->lock);
91 return;
94 list_add_tail(&myself.link, &vnode->io_lock_waiters);
95 spin_unlock(&vnode->lock);
97 for (;;) {
98 set_current_state(TASK_UNINTERRUPTIBLE);
99 if (smp_load_acquire(&myself.have_lock)) /* The lock barrier */
100 break;
101 schedule();
103 __set_current_state(TASK_RUNNING);
107 * Lock the I/O lock on a vnode interruptibly. We can't use an ordinary mutex
108 * as lockdep will complain if we unlock it in the wrong thread.
110 static int afs_lock_for_io_interruptible(struct afs_vnode *vnode)
112 struct afs_io_locker myself = { .task = current, };
113 int ret = 0;
115 spin_lock(&vnode->lock);
117 if (!test_and_set_bit(AFS_VNODE_IO_LOCK, &vnode->flags)) {
118 spin_unlock(&vnode->lock);
119 return 0;
122 list_add_tail(&myself.link, &vnode->io_lock_waiters);
123 spin_unlock(&vnode->lock);
125 for (;;) {
126 set_current_state(TASK_INTERRUPTIBLE);
127 if (smp_load_acquire(&myself.have_lock) || /* The lock barrier */
128 signal_pending(current))
129 break;
130 schedule();
132 __set_current_state(TASK_RUNNING);
134 /* If we got a signal, try to transfer the lock onto the next
135 * waiter.
137 if (unlikely(signal_pending(current))) {
138 spin_lock(&vnode->lock);
139 if (myself.have_lock) {
140 spin_unlock(&vnode->lock);
141 afs_unlock_for_io(vnode);
142 } else {
143 list_del(&myself.link);
144 spin_unlock(&vnode->lock);
146 ret = -ERESTARTSYS;
148 return ret;
152 * Lock the vnode(s) being operated upon.
154 static bool afs_get_io_locks(struct afs_operation *op)
156 struct afs_vnode *vnode = op->file[0].vnode;
157 struct afs_vnode *vnode2 = op->file[1].vnode;
159 _enter("");
161 if (op->flags & AFS_OPERATION_UNINTR) {
162 afs_lock_for_io(vnode);
163 op->flags |= AFS_OPERATION_LOCK_0;
164 _leave(" = t [1]");
165 return true;
168 if (!vnode2 || !op->file[1].need_io_lock || vnode == vnode2)
169 vnode2 = NULL;
171 if (vnode2 > vnode)
172 swap(vnode, vnode2);
174 if (afs_lock_for_io_interruptible(vnode) < 0) {
175 afs_op_set_error(op, -ERESTARTSYS);
176 op->flags |= AFS_OPERATION_STOP;
177 _leave(" = f [I 0]");
178 return false;
180 op->flags |= AFS_OPERATION_LOCK_0;
182 if (vnode2) {
183 if (afs_lock_for_io_interruptible(vnode2) < 0) {
184 afs_op_set_error(op, -ERESTARTSYS);
185 op->flags |= AFS_OPERATION_STOP;
186 afs_unlock_for_io(vnode);
187 op->flags &= ~AFS_OPERATION_LOCK_0;
188 _leave(" = f [I 1]");
189 return false;
191 op->flags |= AFS_OPERATION_LOCK_1;
194 _leave(" = t [2]");
195 return true;
198 static void afs_drop_io_locks(struct afs_operation *op)
200 struct afs_vnode *vnode = op->file[0].vnode;
201 struct afs_vnode *vnode2 = op->file[1].vnode;
203 _enter("");
205 if (op->flags & AFS_OPERATION_LOCK_1)
206 afs_unlock_for_io(vnode2);
207 if (op->flags & AFS_OPERATION_LOCK_0)
208 afs_unlock_for_io(vnode);
211 static void afs_prepare_vnode(struct afs_operation *op, struct afs_vnode_param *vp,
212 unsigned int index)
214 struct afs_vnode *vnode = vp->vnode;
216 if (vnode) {
217 vp->fid = vnode->fid;
218 vp->dv_before = vnode->status.data_version;
219 vp->cb_break_before = afs_calc_vnode_cb_break(vnode);
220 if (vnode->lock_state != AFS_VNODE_LOCK_NONE)
221 op->flags |= AFS_OPERATION_CUR_ONLY;
222 if (vp->modification)
223 set_bit(AFS_VNODE_MODIFYING, &vnode->flags);
226 if (vp->fid.vnode)
227 _debug("PREP[%u] {%llx:%llu.%u}",
228 index, vp->fid.vid, vp->fid.vnode, vp->fid.unique);
232 * Begin an operation on the fileserver.
234 * Fileserver operations are serialised on the server by vnode, so we serialise
235 * them here also using the io_lock.
237 bool afs_begin_vnode_operation(struct afs_operation *op)
239 struct afs_vnode *vnode = op->file[0].vnode;
241 ASSERT(vnode);
243 _enter("");
245 if (op->file[0].need_io_lock)
246 if (!afs_get_io_locks(op))
247 return false;
249 afs_prepare_vnode(op, &op->file[0], 0);
250 afs_prepare_vnode(op, &op->file[1], 1);
251 op->cb_v_break = atomic_read(&op->volume->cb_v_break);
252 _leave(" = true");
253 return true;
257 * Tidy up a filesystem cursor and unlock the vnode.
259 void afs_end_vnode_operation(struct afs_operation *op)
261 _enter("");
263 switch (afs_op_error(op)) {
264 case -EDESTADDRREQ:
265 case -EADDRNOTAVAIL:
266 case -ENETUNREACH:
267 case -EHOSTUNREACH:
268 afs_dump_edestaddrreq(op);
269 break;
272 afs_drop_io_locks(op);
276 * Wait for an in-progress operation to complete.
278 void afs_wait_for_operation(struct afs_operation *op)
280 _enter("");
282 while (afs_select_fileserver(op)) {
283 op->call_responded = false;
284 op->call_error = 0;
285 op->call_abort_code = 0;
286 if (test_bit(AFS_SERVER_FL_IS_YFS, &op->server->flags) &&
287 op->ops->issue_yfs_rpc)
288 op->ops->issue_yfs_rpc(op);
289 else if (op->ops->issue_afs_rpc)
290 op->ops->issue_afs_rpc(op);
291 else
292 op->call_error = -ENOTSUPP;
294 if (op->call) {
295 afs_wait_for_call_to_complete(op->call);
296 op->call_abort_code = op->call->abort_code;
297 op->call_error = op->call->error;
298 op->call_responded = op->call->responded;
299 afs_put_call(op->call);
303 if (op->call_responded && op->server)
304 set_bit(AFS_SERVER_FL_RESPONDING, &op->server->flags);
306 if (!afs_op_error(op)) {
307 _debug("success");
308 op->ops->success(op);
309 } else if (op->cumul_error.aborted) {
310 if (op->ops->aborted)
311 op->ops->aborted(op);
312 } else {
313 if (op->ops->failed)
314 op->ops->failed(op);
317 afs_end_vnode_operation(op);
319 if (!afs_op_error(op) && op->ops->edit_dir) {
320 _debug("edit_dir");
321 op->ops->edit_dir(op);
323 _leave("");
327 * Dispose of an operation.
329 int afs_put_operation(struct afs_operation *op)
331 struct afs_addr_list *alist;
332 int i, ret = afs_op_error(op);
334 _enter("op=%08x,%d", op->debug_id, ret);
336 if (op->ops && op->ops->put)
337 op->ops->put(op);
338 if (op->file[0].modification)
339 clear_bit(AFS_VNODE_MODIFYING, &op->file[0].vnode->flags);
340 if (op->file[1].modification && op->file[1].vnode != op->file[0].vnode)
341 clear_bit(AFS_VNODE_MODIFYING, &op->file[1].vnode->flags);
342 if (op->file[0].put_vnode)
343 iput(&op->file[0].vnode->netfs.inode);
344 if (op->file[1].put_vnode)
345 iput(&op->file[1].vnode->netfs.inode);
347 if (op->more_files) {
348 for (i = 0; i < op->nr_files - 2; i++)
349 if (op->more_files[i].put_vnode)
350 iput(&op->more_files[i].vnode->netfs.inode);
351 kfree(op->more_files);
354 if (op->estate) {
355 alist = op->estate->addresses;
356 if (alist) {
357 if (op->call_responded &&
358 op->addr_index != alist->preferred &&
359 test_bit(alist->preferred, &op->addr_tried))
360 WRITE_ONCE(alist->preferred, op->addr_index);
364 afs_clear_server_states(op);
365 afs_put_serverlist(op->net, op->server_list);
366 afs_put_volume(op->volume, afs_volume_trace_put_put_op);
367 key_put(op->key);
368 kfree(op);
369 return ret;
372 int afs_do_sync_operation(struct afs_operation *op)
374 afs_begin_vnode_operation(op);
375 afs_wait_for_operation(op);
376 return afs_put_operation(op);