Full support for Ginger Console
[linux-ginger.git] / fs / ocfs2 / dlm / dlmast.c
blob01cf8cc3d286f483d5c405d35dc574ec0fb095b7
1 /* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
4 * dlmast.c
6 * AST and BAST functionality for local and remote nodes
8 * Copyright (C) 2004 Oracle. All rights reserved.
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public
12 * License as published by the Free Software Foundation; either
13 * version 2 of the License, or (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
20 * You should have received a copy of the GNU General Public
21 * License along with this program; if not, write to the
22 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
23 * Boston, MA 021110-1307, USA.
28 #include <linux/module.h>
29 #include <linux/fs.h>
30 #include <linux/types.h>
31 #include <linux/slab.h>
32 #include <linux/highmem.h>
33 #include <linux/init.h>
34 #include <linux/sysctl.h>
35 #include <linux/random.h>
36 #include <linux/blkdev.h>
37 #include <linux/socket.h>
38 #include <linux/inet.h>
39 #include <linux/spinlock.h>
42 #include "cluster/heartbeat.h"
43 #include "cluster/nodemanager.h"
44 #include "cluster/tcp.h"
46 #include "dlmapi.h"
47 #include "dlmcommon.h"
49 #define MLOG_MASK_PREFIX ML_DLM
50 #include "cluster/masklog.h"
52 static void dlm_update_lvb(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
53 struct dlm_lock *lock);
54 static int dlm_should_cancel_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock);
56 /* Should be called as an ast gets queued to see if the new
57 * lock level will obsolete a pending bast.
58 * For example, if dlm_thread queued a bast for an EX lock that
59 * was blocking another EX, but before sending the bast the
60 * lock owner downconverted to NL, the bast is now obsolete.
61 * Only the ast should be sent.
62 * This is needed because the lock and convert paths can queue
63 * asts out-of-band (not waiting for dlm_thread) in order to
64 * allow for LKM_NOQUEUE to get immediate responses. */
65 static int dlm_should_cancel_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock)
67 assert_spin_locked(&dlm->ast_lock);
68 assert_spin_locked(&lock->spinlock);
70 if (lock->ml.highest_blocked == LKM_IVMODE)
71 return 0;
72 BUG_ON(lock->ml.highest_blocked == LKM_NLMODE);
74 if (lock->bast_pending &&
75 list_empty(&lock->bast_list))
76 /* old bast already sent, ok */
77 return 0;
79 if (lock->ml.type == LKM_EXMODE)
80 /* EX blocks anything left, any bast still valid */
81 return 0;
82 else if (lock->ml.type == LKM_NLMODE)
83 /* NL blocks nothing, no reason to send any bast, cancel it */
84 return 1;
85 else if (lock->ml.highest_blocked != LKM_EXMODE)
86 /* PR only blocks EX */
87 return 1;
89 return 0;
92 static void __dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock)
94 mlog_entry_void();
96 BUG_ON(!dlm);
97 BUG_ON(!lock);
99 assert_spin_locked(&dlm->ast_lock);
100 if (!list_empty(&lock->ast_list)) {
101 mlog(ML_ERROR, "ast list not empty!! pending=%d, newlevel=%d\n",
102 lock->ast_pending, lock->ml.type);
103 BUG();
105 if (lock->ast_pending)
106 mlog(0, "lock has an ast getting flushed right now\n");
108 /* putting lock on list, add a ref */
109 dlm_lock_get(lock);
110 spin_lock(&lock->spinlock);
112 /* check to see if this ast obsoletes the bast */
113 if (dlm_should_cancel_bast(dlm, lock)) {
114 struct dlm_lock_resource *res = lock->lockres;
115 mlog(0, "%s: cancelling bast for %.*s\n",
116 dlm->name, res->lockname.len, res->lockname.name);
117 lock->bast_pending = 0;
118 list_del_init(&lock->bast_list);
119 lock->ml.highest_blocked = LKM_IVMODE;
120 /* removing lock from list, remove a ref. guaranteed
121 * this won't be the last ref because of the get above,
122 * so res->spinlock will not be taken here */
123 dlm_lock_put(lock);
124 /* free up the reserved bast that we are cancelling.
125 * guaranteed that this will not be the last reserved
126 * ast because *both* an ast and a bast were reserved
127 * to get to this point. the res->spinlock will not be
128 * taken here */
129 dlm_lockres_release_ast(dlm, res);
131 list_add_tail(&lock->ast_list, &dlm->pending_asts);
132 lock->ast_pending = 1;
133 spin_unlock(&lock->spinlock);
136 void dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock)
138 mlog_entry_void();
140 BUG_ON(!dlm);
141 BUG_ON(!lock);
143 spin_lock(&dlm->ast_lock);
144 __dlm_queue_ast(dlm, lock);
145 spin_unlock(&dlm->ast_lock);
149 static void __dlm_queue_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock)
151 mlog_entry_void();
153 BUG_ON(!dlm);
154 BUG_ON(!lock);
155 assert_spin_locked(&dlm->ast_lock);
157 BUG_ON(!list_empty(&lock->bast_list));
158 if (lock->bast_pending)
159 mlog(0, "lock has a bast getting flushed right now\n");
161 /* putting lock on list, add a ref */
162 dlm_lock_get(lock);
163 spin_lock(&lock->spinlock);
164 list_add_tail(&lock->bast_list, &dlm->pending_basts);
165 lock->bast_pending = 1;
166 spin_unlock(&lock->spinlock);
169 void dlm_queue_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock)
171 mlog_entry_void();
173 BUG_ON(!dlm);
174 BUG_ON(!lock);
176 spin_lock(&dlm->ast_lock);
177 __dlm_queue_bast(dlm, lock);
178 spin_unlock(&dlm->ast_lock);
181 static void dlm_update_lvb(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
182 struct dlm_lock *lock)
184 struct dlm_lockstatus *lksb = lock->lksb;
185 BUG_ON(!lksb);
187 /* only updates if this node masters the lockres */
188 if (res->owner == dlm->node_num) {
190 spin_lock(&res->spinlock);
191 /* check the lksb flags for the direction */
192 if (lksb->flags & DLM_LKSB_GET_LVB) {
193 mlog(0, "getting lvb from lockres for %s node\n",
194 lock->ml.node == dlm->node_num ? "master" :
195 "remote");
196 memcpy(lksb->lvb, res->lvb, DLM_LVB_LEN);
198 /* Do nothing for lvb put requests - they should be done in
199 * place when the lock is downconverted - otherwise we risk
200 * racing gets and puts which could result in old lvb data
201 * being propagated. We leave the put flag set and clear it
202 * here. In the future we might want to clear it at the time
203 * the put is actually done.
205 spin_unlock(&res->spinlock);
208 /* reset any lvb flags on the lksb */
209 lksb->flags &= ~(DLM_LKSB_PUT_LVB|DLM_LKSB_GET_LVB);
212 void dlm_do_local_ast(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
213 struct dlm_lock *lock)
215 dlm_astlockfunc_t *fn;
216 struct dlm_lockstatus *lksb;
218 mlog_entry_void();
220 lksb = lock->lksb;
221 fn = lock->ast;
222 BUG_ON(lock->ml.node != dlm->node_num);
224 dlm_update_lvb(dlm, res, lock);
225 (*fn)(lock->astdata);
229 int dlm_do_remote_ast(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
230 struct dlm_lock *lock)
232 int ret;
233 struct dlm_lockstatus *lksb;
234 int lksbflags;
236 mlog_entry_void();
238 lksb = lock->lksb;
239 BUG_ON(lock->ml.node == dlm->node_num);
241 lksbflags = lksb->flags;
242 dlm_update_lvb(dlm, res, lock);
244 /* lock request came from another node
245 * go do the ast over there */
246 ret = dlm_send_proxy_ast(dlm, res, lock, lksbflags);
247 return ret;
250 void dlm_do_local_bast(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
251 struct dlm_lock *lock, int blocked_type)
253 dlm_bastlockfunc_t *fn = lock->bast;
255 mlog_entry_void();
256 BUG_ON(lock->ml.node != dlm->node_num);
258 (*fn)(lock->astdata, blocked_type);
263 int dlm_proxy_ast_handler(struct o2net_msg *msg, u32 len, void *data,
264 void **ret_data)
266 int ret;
267 unsigned int locklen;
268 struct dlm_ctxt *dlm = data;
269 struct dlm_lock_resource *res = NULL;
270 struct dlm_lock *lock = NULL;
271 struct dlm_proxy_ast *past = (struct dlm_proxy_ast *) msg->buf;
272 char *name;
273 struct list_head *iter, *head=NULL;
274 u64 cookie;
275 u32 flags;
276 u8 node;
278 if (!dlm_grab(dlm)) {
279 dlm_error(DLM_REJECTED);
280 return DLM_REJECTED;
283 mlog_bug_on_msg(!dlm_domain_fully_joined(dlm),
284 "Domain %s not fully joined!\n", dlm->name);
286 name = past->name;
287 locklen = past->namelen;
288 cookie = past->cookie;
289 flags = be32_to_cpu(past->flags);
290 node = past->node_idx;
292 if (locklen > DLM_LOCKID_NAME_MAX) {
293 ret = DLM_IVBUFLEN;
294 mlog(ML_ERROR, "Invalid name length (%d) in proxy ast "
295 "handler!\n", locklen);
296 goto leave;
299 if ((flags & (LKM_PUT_LVB|LKM_GET_LVB)) ==
300 (LKM_PUT_LVB|LKM_GET_LVB)) {
301 mlog(ML_ERROR, "Both PUT and GET lvb specified, (0x%x)\n",
302 flags);
303 ret = DLM_BADARGS;
304 goto leave;
307 mlog(0, "lvb: %s\n", flags & LKM_PUT_LVB ? "put lvb" :
308 (flags & LKM_GET_LVB ? "get lvb" : "none"));
310 mlog(0, "type=%d, blocked_type=%d\n", past->type, past->blocked_type);
312 if (past->type != DLM_AST &&
313 past->type != DLM_BAST) {
314 mlog(ML_ERROR, "Unknown ast type! %d, cookie=%u:%llu"
315 "name=%.*s, node=%u\n", past->type,
316 dlm_get_lock_cookie_node(be64_to_cpu(cookie)),
317 dlm_get_lock_cookie_seq(be64_to_cpu(cookie)),
318 locklen, name, node);
319 ret = DLM_IVLOCKID;
320 goto leave;
323 res = dlm_lookup_lockres(dlm, name, locklen);
324 if (!res) {
325 mlog(0, "Got %sast for unknown lockres! cookie=%u:%llu, "
326 "name=%.*s, node=%u\n", (past->type == DLM_AST ? "" : "b"),
327 dlm_get_lock_cookie_node(be64_to_cpu(cookie)),
328 dlm_get_lock_cookie_seq(be64_to_cpu(cookie)),
329 locklen, name, node);
330 ret = DLM_IVLOCKID;
331 goto leave;
334 /* cannot get a proxy ast message if this node owns it */
335 BUG_ON(res->owner == dlm->node_num);
337 mlog(0, "lockres %.*s\n", res->lockname.len, res->lockname.name);
339 spin_lock(&res->spinlock);
340 if (res->state & DLM_LOCK_RES_RECOVERING) {
341 mlog(0, "Responding with DLM_RECOVERING!\n");
342 ret = DLM_RECOVERING;
343 goto unlock_out;
345 if (res->state & DLM_LOCK_RES_MIGRATING) {
346 mlog(0, "Responding with DLM_MIGRATING!\n");
347 ret = DLM_MIGRATING;
348 goto unlock_out;
350 /* try convert queue for both ast/bast */
351 head = &res->converting;
352 lock = NULL;
353 list_for_each(iter, head) {
354 lock = list_entry (iter, struct dlm_lock, list);
355 if (lock->ml.cookie == cookie)
356 goto do_ast;
359 /* if not on convert, try blocked for ast, granted for bast */
360 if (past->type == DLM_AST)
361 head = &res->blocked;
362 else
363 head = &res->granted;
365 list_for_each(iter, head) {
366 lock = list_entry (iter, struct dlm_lock, list);
367 if (lock->ml.cookie == cookie)
368 goto do_ast;
371 mlog(0, "Got %sast for unknown lock! cookie=%u:%llu, name=%.*s, "
372 "node=%u\n", past->type == DLM_AST ? "" : "b",
373 dlm_get_lock_cookie_node(be64_to_cpu(cookie)),
374 dlm_get_lock_cookie_seq(be64_to_cpu(cookie)),
375 locklen, name, node);
377 ret = DLM_NORMAL;
378 unlock_out:
379 spin_unlock(&res->spinlock);
380 goto leave;
382 do_ast:
383 ret = DLM_NORMAL;
384 if (past->type == DLM_AST) {
385 /* do not alter lock refcount. switching lists. */
386 list_move_tail(&lock->list, &res->granted);
387 mlog(0, "ast: Adding to granted list... type=%d, "
388 "convert_type=%d\n", lock->ml.type, lock->ml.convert_type);
389 if (lock->ml.convert_type != LKM_IVMODE) {
390 lock->ml.type = lock->ml.convert_type;
391 lock->ml.convert_type = LKM_IVMODE;
392 } else {
393 // should already be there....
396 lock->lksb->status = DLM_NORMAL;
398 /* if we requested the lvb, fetch it into our lksb now */
399 if (flags & LKM_GET_LVB) {
400 BUG_ON(!(lock->lksb->flags & DLM_LKSB_GET_LVB));
401 memcpy(lock->lksb->lvb, past->lvb, DLM_LVB_LEN);
404 spin_unlock(&res->spinlock);
406 if (past->type == DLM_AST)
407 dlm_do_local_ast(dlm, res, lock);
408 else
409 dlm_do_local_bast(dlm, res, lock, past->blocked_type);
411 leave:
412 if (res)
413 dlm_lockres_put(res);
415 dlm_put(dlm);
416 return ret;
421 int dlm_send_proxy_ast_msg(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
422 struct dlm_lock *lock, int msg_type,
423 int blocked_type, int flags)
425 int ret = 0;
426 struct dlm_proxy_ast past;
427 struct kvec vec[2];
428 size_t veclen = 1;
429 int status;
431 mlog_entry("res %.*s, to=%u, type=%d, blocked_type=%d\n",
432 res->lockname.len, res->lockname.name, lock->ml.node,
433 msg_type, blocked_type);
435 memset(&past, 0, sizeof(struct dlm_proxy_ast));
436 past.node_idx = dlm->node_num;
437 past.type = msg_type;
438 past.blocked_type = blocked_type;
439 past.namelen = res->lockname.len;
440 memcpy(past.name, res->lockname.name, past.namelen);
441 past.cookie = lock->ml.cookie;
443 vec[0].iov_len = sizeof(struct dlm_proxy_ast);
444 vec[0].iov_base = &past;
445 if (flags & DLM_LKSB_GET_LVB) {
446 mlog(0, "returning requested LVB data\n");
447 be32_add_cpu(&past.flags, LKM_GET_LVB);
448 vec[1].iov_len = DLM_LVB_LEN;
449 vec[1].iov_base = lock->lksb->lvb;
450 veclen++;
453 ret = o2net_send_message_vec(DLM_PROXY_AST_MSG, dlm->key, vec, veclen,
454 lock->ml.node, &status);
455 if (ret < 0)
456 mlog_errno(ret);
457 else {
458 if (status == DLM_RECOVERING) {
459 mlog(ML_ERROR, "sent AST to node %u, it thinks this "
460 "node is dead!\n", lock->ml.node);
461 BUG();
462 } else if (status == DLM_MIGRATING) {
463 mlog(ML_ERROR, "sent AST to node %u, it returned "
464 "DLM_MIGRATING!\n", lock->ml.node);
465 BUG();
466 } else if (status != DLM_NORMAL && status != DLM_IVLOCKID) {
467 mlog(ML_ERROR, "AST to node %u returned %d!\n",
468 lock->ml.node, status);
469 /* ignore it */
471 ret = 0;
473 return ret;