WIP FPC-III support
[linux/fpc-iii.git] / fs / ocfs2 / dlm / dlmast.c
blob6abaded3ff6bd646ed5a52269bdeebd62ad9d1b7
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* -*- mode: c; c-basic-offset: 8; -*-
3 * vim: noexpandtab sw=8 ts=8 sts=0:
5 * dlmast.c
7 * AST and BAST functionality for local and remote nodes
9 * Copyright (C) 2004 Oracle. All rights reserved.
13 #include <linux/module.h>
14 #include <linux/fs.h>
15 #include <linux/types.h>
16 #include <linux/highmem.h>
17 #include <linux/init.h>
18 #include <linux/sysctl.h>
19 #include <linux/random.h>
20 #include <linux/blkdev.h>
21 #include <linux/socket.h>
22 #include <linux/inet.h>
23 #include <linux/spinlock.h>
26 #include "../cluster/heartbeat.h"
27 #include "../cluster/nodemanager.h"
28 #include "../cluster/tcp.h"
30 #include "dlmapi.h"
31 #include "dlmcommon.h"
33 #define MLOG_MASK_PREFIX ML_DLM
34 #include "../cluster/masklog.h"
36 static void dlm_update_lvb(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
37 struct dlm_lock *lock);
38 static int dlm_should_cancel_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock);
40 /* Should be called as an ast gets queued to see if the new
41 * lock level will obsolete a pending bast.
42 * For example, if dlm_thread queued a bast for an EX lock that
43 * was blocking another EX, but before sending the bast the
44 * lock owner downconverted to NL, the bast is now obsolete.
45 * Only the ast should be sent.
46 * This is needed because the lock and convert paths can queue
47 * asts out-of-band (not waiting for dlm_thread) in order to
48 * allow for LKM_NOQUEUE to get immediate responses. */
49 static int dlm_should_cancel_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock)
51 assert_spin_locked(&dlm->ast_lock);
52 assert_spin_locked(&lock->spinlock);
54 if (lock->ml.highest_blocked == LKM_IVMODE)
55 return 0;
56 BUG_ON(lock->ml.highest_blocked == LKM_NLMODE);
58 if (lock->bast_pending &&
59 list_empty(&lock->bast_list))
60 /* old bast already sent, ok */
61 return 0;
63 if (lock->ml.type == LKM_EXMODE)
64 /* EX blocks anything left, any bast still valid */
65 return 0;
66 else if (lock->ml.type == LKM_NLMODE)
67 /* NL blocks nothing, no reason to send any bast, cancel it */
68 return 1;
69 else if (lock->ml.highest_blocked != LKM_EXMODE)
70 /* PR only blocks EX */
71 return 1;
73 return 0;
76 void __dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock)
78 struct dlm_lock_resource *res;
80 BUG_ON(!dlm);
81 BUG_ON(!lock);
83 res = lock->lockres;
85 assert_spin_locked(&dlm->ast_lock);
87 if (!list_empty(&lock->ast_list)) {
88 mlog(ML_ERROR, "%s: res %.*s, lock %u:%llu, "
89 "AST list not empty, pending %d, newlevel %d\n",
90 dlm->name, res->lockname.len, res->lockname.name,
91 dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
92 dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)),
93 lock->ast_pending, lock->ml.type);
94 BUG();
96 if (lock->ast_pending)
97 mlog(0, "%s: res %.*s, lock %u:%llu, AST getting flushed\n",
98 dlm->name, res->lockname.len, res->lockname.name,
99 dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
100 dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)));
102 /* putting lock on list, add a ref */
103 dlm_lock_get(lock);
104 spin_lock(&lock->spinlock);
106 /* check to see if this ast obsoletes the bast */
107 if (dlm_should_cancel_bast(dlm, lock)) {
108 mlog(0, "%s: res %.*s, lock %u:%llu, Cancelling BAST\n",
109 dlm->name, res->lockname.len, res->lockname.name,
110 dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
111 dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)));
112 lock->bast_pending = 0;
113 list_del_init(&lock->bast_list);
114 lock->ml.highest_blocked = LKM_IVMODE;
115 /* removing lock from list, remove a ref. guaranteed
116 * this won't be the last ref because of the get above,
117 * so res->spinlock will not be taken here */
118 dlm_lock_put(lock);
119 /* free up the reserved bast that we are cancelling.
120 * guaranteed that this will not be the last reserved
121 * ast because *both* an ast and a bast were reserved
122 * to get to this point. the res->spinlock will not be
123 * taken here */
124 dlm_lockres_release_ast(dlm, res);
126 list_add_tail(&lock->ast_list, &dlm->pending_asts);
127 lock->ast_pending = 1;
128 spin_unlock(&lock->spinlock);
131 void dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock)
133 BUG_ON(!dlm);
134 BUG_ON(!lock);
136 spin_lock(&dlm->ast_lock);
137 __dlm_queue_ast(dlm, lock);
138 spin_unlock(&dlm->ast_lock);
142 void __dlm_queue_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock)
144 struct dlm_lock_resource *res;
146 BUG_ON(!dlm);
147 BUG_ON(!lock);
149 assert_spin_locked(&dlm->ast_lock);
151 res = lock->lockres;
153 BUG_ON(!list_empty(&lock->bast_list));
154 if (lock->bast_pending)
155 mlog(0, "%s: res %.*s, lock %u:%llu, BAST getting flushed\n",
156 dlm->name, res->lockname.len, res->lockname.name,
157 dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
158 dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)));
160 /* putting lock on list, add a ref */
161 dlm_lock_get(lock);
162 spin_lock(&lock->spinlock);
163 list_add_tail(&lock->bast_list, &dlm->pending_basts);
164 lock->bast_pending = 1;
165 spin_unlock(&lock->spinlock);
168 void dlm_queue_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock)
170 BUG_ON(!dlm);
171 BUG_ON(!lock);
173 spin_lock(&dlm->ast_lock);
174 __dlm_queue_bast(dlm, lock);
175 spin_unlock(&dlm->ast_lock);
178 static void dlm_update_lvb(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
179 struct dlm_lock *lock)
181 struct dlm_lockstatus *lksb = lock->lksb;
182 BUG_ON(!lksb);
184 /* only updates if this node masters the lockres */
185 spin_lock(&res->spinlock);
186 if (res->owner == dlm->node_num) {
187 /* check the lksb flags for the direction */
188 if (lksb->flags & DLM_LKSB_GET_LVB) {
189 mlog(0, "getting lvb from lockres for %s node\n",
190 lock->ml.node == dlm->node_num ? "master" :
191 "remote");
192 memcpy(lksb->lvb, res->lvb, DLM_LVB_LEN);
194 /* Do nothing for lvb put requests - they should be done in
195 * place when the lock is downconverted - otherwise we risk
196 * racing gets and puts which could result in old lvb data
197 * being propagated. We leave the put flag set and clear it
198 * here. In the future we might want to clear it at the time
199 * the put is actually done.
202 spin_unlock(&res->spinlock);
204 /* reset any lvb flags on the lksb */
205 lksb->flags &= ~(DLM_LKSB_PUT_LVB|DLM_LKSB_GET_LVB);
208 void dlm_do_local_ast(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
209 struct dlm_lock *lock)
211 dlm_astlockfunc_t *fn;
213 mlog(0, "%s: res %.*s, lock %u:%llu, Local AST\n", dlm->name,
214 res->lockname.len, res->lockname.name,
215 dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
216 dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)));
218 fn = lock->ast;
219 BUG_ON(lock->ml.node != dlm->node_num);
221 dlm_update_lvb(dlm, res, lock);
222 (*fn)(lock->astdata);
226 int dlm_do_remote_ast(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
227 struct dlm_lock *lock)
229 int ret;
230 struct dlm_lockstatus *lksb;
231 int lksbflags;
233 mlog(0, "%s: res %.*s, lock %u:%llu, Remote AST\n", dlm->name,
234 res->lockname.len, res->lockname.name,
235 dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
236 dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)));
238 lksb = lock->lksb;
239 BUG_ON(lock->ml.node == dlm->node_num);
241 lksbflags = lksb->flags;
242 dlm_update_lvb(dlm, res, lock);
244 /* lock request came from another node
245 * go do the ast over there */
246 ret = dlm_send_proxy_ast(dlm, res, lock, lksbflags);
247 return ret;
250 void dlm_do_local_bast(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
251 struct dlm_lock *lock, int blocked_type)
253 dlm_bastlockfunc_t *fn = lock->bast;
255 BUG_ON(lock->ml.node != dlm->node_num);
257 mlog(0, "%s: res %.*s, lock %u:%llu, Local BAST, blocked %d\n",
258 dlm->name, res->lockname.len, res->lockname.name,
259 dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
260 dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)),
261 blocked_type);
263 (*fn)(lock->astdata, blocked_type);
268 int dlm_proxy_ast_handler(struct o2net_msg *msg, u32 len, void *data,
269 void **ret_data)
271 int ret;
272 unsigned int locklen;
273 struct dlm_ctxt *dlm = data;
274 struct dlm_lock_resource *res = NULL;
275 struct dlm_lock *lock = NULL;
276 struct dlm_proxy_ast *past = (struct dlm_proxy_ast *) msg->buf;
277 char *name;
278 struct list_head *head = NULL;
279 __be64 cookie;
280 u32 flags;
281 u8 node;
283 if (!dlm_grab(dlm)) {
284 dlm_error(DLM_REJECTED);
285 return DLM_REJECTED;
288 mlog_bug_on_msg(!dlm_domain_fully_joined(dlm),
289 "Domain %s not fully joined!\n", dlm->name);
291 name = past->name;
292 locklen = past->namelen;
293 cookie = past->cookie;
294 flags = be32_to_cpu(past->flags);
295 node = past->node_idx;
297 if (locklen > DLM_LOCKID_NAME_MAX) {
298 ret = DLM_IVBUFLEN;
299 mlog(ML_ERROR, "Invalid name length (%d) in proxy ast "
300 "handler!\n", locklen);
301 goto leave;
304 if ((flags & (LKM_PUT_LVB|LKM_GET_LVB)) ==
305 (LKM_PUT_LVB|LKM_GET_LVB)) {
306 mlog(ML_ERROR, "Both PUT and GET lvb specified, (0x%x)\n",
307 flags);
308 ret = DLM_BADARGS;
309 goto leave;
312 mlog(0, "lvb: %s\n", flags & LKM_PUT_LVB ? "put lvb" :
313 (flags & LKM_GET_LVB ? "get lvb" : "none"));
315 mlog(0, "type=%d, blocked_type=%d\n", past->type, past->blocked_type);
317 if (past->type != DLM_AST &&
318 past->type != DLM_BAST) {
319 mlog(ML_ERROR, "Unknown ast type! %d, cookie=%u:%llu"
320 "name=%.*s, node=%u\n", past->type,
321 dlm_get_lock_cookie_node(be64_to_cpu(cookie)),
322 dlm_get_lock_cookie_seq(be64_to_cpu(cookie)),
323 locklen, name, node);
324 ret = DLM_IVLOCKID;
325 goto leave;
328 res = dlm_lookup_lockres(dlm, name, locklen);
329 if (!res) {
330 mlog(0, "Got %sast for unknown lockres! cookie=%u:%llu, "
331 "name=%.*s, node=%u\n", (past->type == DLM_AST ? "" : "b"),
332 dlm_get_lock_cookie_node(be64_to_cpu(cookie)),
333 dlm_get_lock_cookie_seq(be64_to_cpu(cookie)),
334 locklen, name, node);
335 ret = DLM_IVLOCKID;
336 goto leave;
339 /* cannot get a proxy ast message if this node owns it */
340 BUG_ON(res->owner == dlm->node_num);
342 mlog(0, "%s: res %.*s\n", dlm->name, res->lockname.len,
343 res->lockname.name);
345 spin_lock(&res->spinlock);
346 if (res->state & DLM_LOCK_RES_RECOVERING) {
347 mlog(0, "Responding with DLM_RECOVERING!\n");
348 ret = DLM_RECOVERING;
349 goto unlock_out;
351 if (res->state & DLM_LOCK_RES_MIGRATING) {
352 mlog(0, "Responding with DLM_MIGRATING!\n");
353 ret = DLM_MIGRATING;
354 goto unlock_out;
356 /* try convert queue for both ast/bast */
357 head = &res->converting;
358 lock = NULL;
359 list_for_each_entry(lock, head, list) {
360 if (lock->ml.cookie == cookie)
361 goto do_ast;
364 /* if not on convert, try blocked for ast, granted for bast */
365 if (past->type == DLM_AST)
366 head = &res->blocked;
367 else
368 head = &res->granted;
370 list_for_each_entry(lock, head, list) {
371 /* if lock is found but unlock is pending ignore the bast */
372 if (lock->ml.cookie == cookie) {
373 if (lock->unlock_pending)
374 break;
375 goto do_ast;
379 mlog(0, "Got %sast for unknown lock! cookie=%u:%llu, name=%.*s, "
380 "node=%u\n", past->type == DLM_AST ? "" : "b",
381 dlm_get_lock_cookie_node(be64_to_cpu(cookie)),
382 dlm_get_lock_cookie_seq(be64_to_cpu(cookie)),
383 locklen, name, node);
385 ret = DLM_NORMAL;
386 unlock_out:
387 spin_unlock(&res->spinlock);
388 goto leave;
390 do_ast:
391 ret = DLM_NORMAL;
392 if (past->type == DLM_AST) {
393 /* do not alter lock refcount. switching lists. */
394 list_move_tail(&lock->list, &res->granted);
395 mlog(0, "%s: res %.*s, lock %u:%llu, Granted type %d => %d\n",
396 dlm->name, res->lockname.len, res->lockname.name,
397 dlm_get_lock_cookie_node(be64_to_cpu(cookie)),
398 dlm_get_lock_cookie_seq(be64_to_cpu(cookie)),
399 lock->ml.type, lock->ml.convert_type);
401 if (lock->ml.convert_type != LKM_IVMODE) {
402 lock->ml.type = lock->ml.convert_type;
403 lock->ml.convert_type = LKM_IVMODE;
404 } else {
405 // should already be there....
408 lock->lksb->status = DLM_NORMAL;
410 /* if we requested the lvb, fetch it into our lksb now */
411 if (flags & LKM_GET_LVB) {
412 BUG_ON(!(lock->lksb->flags & DLM_LKSB_GET_LVB));
413 memcpy(lock->lksb->lvb, past->lvb, DLM_LVB_LEN);
416 spin_unlock(&res->spinlock);
418 if (past->type == DLM_AST)
419 dlm_do_local_ast(dlm, res, lock);
420 else
421 dlm_do_local_bast(dlm, res, lock, past->blocked_type);
423 leave:
424 if (res)
425 dlm_lockres_put(res);
427 dlm_put(dlm);
428 return ret;
433 int dlm_send_proxy_ast_msg(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
434 struct dlm_lock *lock, int msg_type,
435 int blocked_type, int flags)
437 int ret = 0;
438 struct dlm_proxy_ast past;
439 struct kvec vec[2];
440 size_t veclen = 1;
441 int status;
443 mlog(0, "%s: res %.*s, to %u, type %d, blocked_type %d\n", dlm->name,
444 res->lockname.len, res->lockname.name, lock->ml.node, msg_type,
445 blocked_type);
447 memset(&past, 0, sizeof(struct dlm_proxy_ast));
448 past.node_idx = dlm->node_num;
449 past.type = msg_type;
450 past.blocked_type = blocked_type;
451 past.namelen = res->lockname.len;
452 memcpy(past.name, res->lockname.name, past.namelen);
453 past.cookie = lock->ml.cookie;
455 vec[0].iov_len = sizeof(struct dlm_proxy_ast);
456 vec[0].iov_base = &past;
457 if (flags & DLM_LKSB_GET_LVB) {
458 be32_add_cpu(&past.flags, LKM_GET_LVB);
459 vec[1].iov_len = DLM_LVB_LEN;
460 vec[1].iov_base = lock->lksb->lvb;
461 veclen++;
464 ret = o2net_send_message_vec(DLM_PROXY_AST_MSG, dlm->key, vec, veclen,
465 lock->ml.node, &status);
466 if (ret < 0)
467 mlog(ML_ERROR, "%s: res %.*s, error %d send AST to node %u\n",
468 dlm->name, res->lockname.len, res->lockname.name, ret,
469 lock->ml.node);
470 else {
471 if (status == DLM_RECOVERING) {
472 mlog(ML_ERROR, "sent AST to node %u, it thinks this "
473 "node is dead!\n", lock->ml.node);
474 BUG();
475 } else if (status == DLM_MIGRATING) {
476 mlog(ML_ERROR, "sent AST to node %u, it returned "
477 "DLM_MIGRATING!\n", lock->ml.node);
478 BUG();
479 } else if (status != DLM_NORMAL && status != DLM_IVLOCKID) {
480 mlog(ML_ERROR, "AST to node %u returned %d!\n",
481 lock->ml.node, status);
482 /* ignore it */
484 ret = 0;
486 return ret;