1 /* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
6 * AST and BAST functionality for local and remote nodes
8 * Copyright (C) 2004 Oracle. All rights reserved.
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public
12 * License as published by the Free Software Foundation; either
13 * version 2 of the License, or (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
20 * You should have received a copy of the GNU General Public
21 * License along with this program; if not, write to the
22 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
23 * Boston, MA 021110-1307, USA.
28 #include <linux/module.h>
30 #include <linux/types.h>
31 #include <linux/slab.h>
32 #include <linux/highmem.h>
33 #include <linux/utsname.h>
34 #include <linux/init.h>
35 #include <linux/sysctl.h>
36 #include <linux/random.h>
37 #include <linux/blkdev.h>
38 #include <linux/socket.h>
39 #include <linux/inet.h>
40 #include <linux/spinlock.h>
43 #include "cluster/heartbeat.h"
44 #include "cluster/nodemanager.h"
45 #include "cluster/tcp.h"
46 #include "cluster/endian.h"
49 #include "dlmcommon.h"
51 #define MLOG_MASK_PREFIX ML_DLM
52 #include "cluster/masklog.h"
54 static void dlm_update_lvb(struct dlm_ctxt
*dlm
, struct dlm_lock_resource
*res
,
55 struct dlm_lock
*lock
);
56 static int dlm_should_cancel_bast(struct dlm_ctxt
*dlm
, struct dlm_lock
*lock
);
58 /* Should be called as an ast gets queued to see if the new
59 * lock level will obsolete a pending bast.
60 * For example, if dlm_thread queued a bast for an EX lock that
61 * was blocking another EX, but before sending the bast the
62 * lock owner downconverted to NL, the bast is now obsolete.
63 * Only the ast should be sent.
64 * This is needed because the lock and convert paths can queue
65 * asts out-of-band (not waiting for dlm_thread) in order to
66 * allow for LKM_NOQUEUE to get immediate responses. */
67 static int dlm_should_cancel_bast(struct dlm_ctxt
*dlm
, struct dlm_lock
*lock
)
69 assert_spin_locked(&dlm
->ast_lock
);
70 assert_spin_locked(&lock
->spinlock
);
72 if (lock
->ml
.highest_blocked
== LKM_IVMODE
)
74 BUG_ON(lock
->ml
.highest_blocked
== LKM_NLMODE
);
76 if (lock
->bast_pending
&&
77 list_empty(&lock
->bast_list
))
78 /* old bast already sent, ok */
81 if (lock
->ml
.type
== LKM_EXMODE
)
82 /* EX blocks anything left, any bast still valid */
84 else if (lock
->ml
.type
== LKM_NLMODE
)
85 /* NL blocks nothing, no reason to send any bast, cancel it */
87 else if (lock
->ml
.highest_blocked
!= LKM_EXMODE
)
88 /* PR only blocks EX */
94 static void __dlm_queue_ast(struct dlm_ctxt
*dlm
, struct dlm_lock
*lock
)
101 assert_spin_locked(&dlm
->ast_lock
);
102 if (!list_empty(&lock
->ast_list
)) {
103 mlog(ML_ERROR
, "ast list not empty!! pending=%d, newlevel=%d\n",
104 lock
->ast_pending
, lock
->ml
.type
);
107 BUG_ON(!list_empty(&lock
->ast_list
));
108 if (lock
->ast_pending
)
109 mlog(0, "lock has an ast getting flushed right now\n");
111 /* putting lock on list, add a ref */
113 spin_lock(&lock
->spinlock
);
115 /* check to see if this ast obsoletes the bast */
116 if (dlm_should_cancel_bast(dlm
, lock
)) {
117 struct dlm_lock_resource
*res
= lock
->lockres
;
118 mlog(0, "%s: cancelling bast for %.*s\n",
119 dlm
->name
, res
->lockname
.len
, res
->lockname
.name
);
120 lock
->bast_pending
= 0;
121 list_del_init(&lock
->bast_list
);
122 lock
->ml
.highest_blocked
= LKM_IVMODE
;
123 /* removing lock from list, remove a ref. guaranteed
124 * this won't be the last ref because of the get above,
125 * so res->spinlock will not be taken here */
127 /* free up the reserved bast that we are cancelling.
128 * guaranteed that this will not be the last reserved
129 * ast because *both* an ast and a bast were reserved
130 * to get to this point. the res->spinlock will not be
132 dlm_lockres_release_ast(dlm
, res
);
134 list_add_tail(&lock
->ast_list
, &dlm
->pending_asts
);
135 lock
->ast_pending
= 1;
136 spin_unlock(&lock
->spinlock
);
139 void dlm_queue_ast(struct dlm_ctxt
*dlm
, struct dlm_lock
*lock
)
146 spin_lock(&dlm
->ast_lock
);
147 __dlm_queue_ast(dlm
, lock
);
148 spin_unlock(&dlm
->ast_lock
);
152 static void __dlm_queue_bast(struct dlm_ctxt
*dlm
, struct dlm_lock
*lock
)
158 assert_spin_locked(&dlm
->ast_lock
);
160 BUG_ON(!list_empty(&lock
->bast_list
));
161 if (lock
->bast_pending
)
162 mlog(0, "lock has a bast getting flushed right now\n");
164 /* putting lock on list, add a ref */
166 spin_lock(&lock
->spinlock
);
167 list_add_tail(&lock
->bast_list
, &dlm
->pending_basts
);
168 lock
->bast_pending
= 1;
169 spin_unlock(&lock
->spinlock
);
172 void dlm_queue_bast(struct dlm_ctxt
*dlm
, struct dlm_lock
*lock
)
179 spin_lock(&dlm
->ast_lock
);
180 __dlm_queue_bast(dlm
, lock
);
181 spin_unlock(&dlm
->ast_lock
);
184 static void dlm_update_lvb(struct dlm_ctxt
*dlm
, struct dlm_lock_resource
*res
,
185 struct dlm_lock
*lock
)
187 struct dlm_lockstatus
*lksb
= lock
->lksb
;
190 /* only updates if this node masters the lockres */
191 if (res
->owner
== dlm
->node_num
) {
193 spin_lock(&res
->spinlock
);
194 /* check the lksb flags for the direction */
195 if (lksb
->flags
& DLM_LKSB_GET_LVB
) {
196 mlog(0, "getting lvb from lockres for %s node\n",
197 lock
->ml
.node
== dlm
->node_num
? "master" :
199 memcpy(lksb
->lvb
, res
->lvb
, DLM_LVB_LEN
);
200 } else if (lksb
->flags
& DLM_LKSB_PUT_LVB
) {
201 mlog(0, "setting lvb from lockres for %s node\n",
202 lock
->ml
.node
== dlm
->node_num
? "master" :
204 memcpy(res
->lvb
, lksb
->lvb
, DLM_LVB_LEN
);
206 spin_unlock(&res
->spinlock
);
209 /* reset any lvb flags on the lksb */
210 lksb
->flags
&= ~(DLM_LKSB_PUT_LVB
|DLM_LKSB_GET_LVB
);
213 void dlm_do_local_ast(struct dlm_ctxt
*dlm
, struct dlm_lock_resource
*res
,
214 struct dlm_lock
*lock
)
216 dlm_astlockfunc_t
*fn
;
217 struct dlm_lockstatus
*lksb
;
223 BUG_ON(lock
->ml
.node
!= dlm
->node_num
);
225 dlm_update_lvb(dlm
, res
, lock
);
226 (*fn
)(lock
->astdata
);
230 int dlm_do_remote_ast(struct dlm_ctxt
*dlm
, struct dlm_lock_resource
*res
,
231 struct dlm_lock
*lock
)
234 struct dlm_lockstatus
*lksb
;
240 BUG_ON(lock
->ml
.node
== dlm
->node_num
);
242 lksbflags
= lksb
->flags
;
243 dlm_update_lvb(dlm
, res
, lock
);
245 /* lock request came from another node
246 * go do the ast over there */
247 ret
= dlm_send_proxy_ast(dlm
, res
, lock
, lksbflags
);
251 void dlm_do_local_bast(struct dlm_ctxt
*dlm
, struct dlm_lock_resource
*res
,
252 struct dlm_lock
*lock
, int blocked_type
)
254 dlm_bastlockfunc_t
*fn
= lock
->bast
;
257 BUG_ON(lock
->ml
.node
!= dlm
->node_num
);
259 (*fn
)(lock
->astdata
, blocked_type
);
264 int dlm_proxy_ast_handler(struct o2net_msg
*msg
, u32 len
, void *data
)
267 unsigned int locklen
;
268 struct dlm_ctxt
*dlm
= data
;
269 struct dlm_lock_resource
*res
= NULL
;
270 struct dlm_lock
*lock
= NULL
;
271 struct dlm_proxy_ast
*past
= (struct dlm_proxy_ast
*) msg
->buf
;
273 struct list_head
*iter
, *head
=NULL
;
277 if (!dlm_grab(dlm
)) {
278 dlm_error(DLM_REJECTED
);
282 mlog_bug_on_msg(!dlm_domain_fully_joined(dlm
),
283 "Domain %s not fully joined!\n", dlm
->name
);
286 locklen
= past
->namelen
;
287 cookie
= be64_to_cpu(past
->cookie
);
288 flags
= be32_to_cpu(past
->flags
);
290 if (locklen
> DLM_LOCKID_NAME_MAX
) {
292 mlog(ML_ERROR
, "Invalid name length in proxy ast handler!\n");
296 if ((flags
& (LKM_PUT_LVB
|LKM_GET_LVB
)) ==
297 (LKM_PUT_LVB
|LKM_GET_LVB
)) {
298 mlog(ML_ERROR
, "both PUT and GET lvb specified\n");
303 mlog(0, "lvb: %s\n", flags
& LKM_PUT_LVB
? "put lvb" :
304 (flags
& LKM_GET_LVB
? "get lvb" : "none"));
306 mlog(0, "type=%d, blocked_type=%d\n", past
->type
, past
->blocked_type
);
308 if (past
->type
!= DLM_AST
&&
309 past
->type
!= DLM_BAST
) {
310 mlog(ML_ERROR
, "Unknown ast type! %d, cookie=%"MLFu64
", "
311 "name=%.*s\n", past
->type
, cookie
, locklen
, name
);
316 res
= dlm_lookup_lockres(dlm
, name
, locklen
);
318 mlog(ML_ERROR
, "got %sast for unknown lockres! "
319 "cookie=%"MLFu64
", name=%.*s, namelen=%u\n",
320 past
->type
== DLM_AST
? "" : "b",
321 cookie
, locklen
, name
, locklen
);
326 /* cannot get a proxy ast message if this node owns it */
327 BUG_ON(res
->owner
== dlm
->node_num
);
329 mlog(0, "lockres %.*s\n", res
->lockname
.len
, res
->lockname
.name
);
331 spin_lock(&res
->spinlock
);
332 if (res
->state
& DLM_LOCK_RES_RECOVERING
) {
333 mlog(0, "responding with DLM_RECOVERING!\n");
334 ret
= DLM_RECOVERING
;
337 if (res
->state
& DLM_LOCK_RES_MIGRATING
) {
338 mlog(0, "responding with DLM_MIGRATING!\n");
342 /* try convert queue for both ast/bast */
343 head
= &res
->converting
;
345 list_for_each(iter
, head
) {
346 lock
= list_entry (iter
, struct dlm_lock
, list
);
347 if (be64_to_cpu(lock
->ml
.cookie
) == cookie
)
351 /* if not on convert, try blocked for ast, granted for bast */
352 if (past
->type
== DLM_AST
)
353 head
= &res
->blocked
;
355 head
= &res
->granted
;
357 list_for_each(iter
, head
) {
358 lock
= list_entry (iter
, struct dlm_lock
, list
);
359 if (be64_to_cpu(lock
->ml
.cookie
) == cookie
)
363 mlog(ML_ERROR
, "got %sast for unknown lock! cookie=%"MLFu64
", "
364 "name=%.*s, namelen=%u\n",
365 past
->type
== DLM_AST
? "" : "b", cookie
, locklen
, name
, locklen
);
369 spin_unlock(&res
->spinlock
);
374 if (past
->type
== DLM_AST
) {
375 /* do not alter lock refcount. switching lists. */
376 list_del_init(&lock
->list
);
377 list_add_tail(&lock
->list
, &res
->granted
);
378 mlog(0, "ast: adding to granted list... type=%d, "
379 "convert_type=%d\n", lock
->ml
.type
, lock
->ml
.convert_type
);
380 if (lock
->ml
.convert_type
!= LKM_IVMODE
) {
381 lock
->ml
.type
= lock
->ml
.convert_type
;
382 lock
->ml
.convert_type
= LKM_IVMODE
;
384 // should already be there....
387 lock
->lksb
->status
= DLM_NORMAL
;
389 /* if we requested the lvb, fetch it into our lksb now */
390 if (flags
& LKM_GET_LVB
) {
391 BUG_ON(!(lock
->lksb
->flags
& DLM_LKSB_GET_LVB
));
392 memcpy(lock
->lksb
->lvb
, past
->lvb
, DLM_LVB_LEN
);
395 spin_unlock(&res
->spinlock
);
397 if (past
->type
== DLM_AST
)
398 dlm_do_local_ast(dlm
, res
, lock
);
400 dlm_do_local_bast(dlm
, res
, lock
, past
->blocked_type
);
405 dlm_lockres_put(res
);
413 int dlm_send_proxy_ast_msg(struct dlm_ctxt
*dlm
, struct dlm_lock_resource
*res
,
414 struct dlm_lock
*lock
, int msg_type
,
415 int blocked_type
, int flags
)
418 struct dlm_proxy_ast past
;
423 mlog_entry("res %.*s, to=%u, type=%d, blocked_type=%d\n",
424 res
->lockname
.len
, res
->lockname
.name
, lock
->ml
.node
,
425 msg_type
, blocked_type
);
427 memset(&past
, 0, sizeof(struct dlm_proxy_ast
));
428 past
.node_idx
= dlm
->node_num
;
429 past
.type
= msg_type
;
430 past
.blocked_type
= blocked_type
;
431 past
.namelen
= res
->lockname
.len
;
432 memcpy(past
.name
, res
->lockname
.name
, past
.namelen
);
433 past
.cookie
= lock
->ml
.cookie
;
435 vec
[0].iov_len
= sizeof(struct dlm_proxy_ast
);
436 vec
[0].iov_base
= &past
;
437 if (flags
& DLM_LKSB_GET_LVB
) {
438 mlog(0, "returning requested LVB data\n");
439 be32_add_cpu(&past
.flags
, LKM_GET_LVB
);
440 vec
[1].iov_len
= DLM_LVB_LEN
;
441 vec
[1].iov_base
= lock
->lksb
->lvb
;
445 ret
= o2net_send_message_vec(DLM_PROXY_AST_MSG
, dlm
->key
, vec
, veclen
,
446 lock
->ml
.node
, &status
);
450 if (status
== DLM_RECOVERING
) {
451 mlog(ML_ERROR
, "sent AST to node %u, it thinks this "
452 "node is dead!\n", lock
->ml
.node
);
454 } else if (status
== DLM_MIGRATING
) {
455 mlog(ML_ERROR
, "sent AST to node %u, it returned "
456 "DLM_MIGRATING!\n", lock
->ml
.node
);
458 } else if (status
!= DLM_NORMAL
) {
459 mlog(ML_ERROR
, "AST to node %u returned %d!\n",
460 lock
->ml
.node
, status
);