1 /* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
6 * underlying calls for lock conversion
8 * Copyright (C) 2004 Oracle. All rights reserved.
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public
12 * License as published by the Free Software Foundation; either
13 * version 2 of the License, or (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
20 * You should have received a copy of the GNU General Public
21 * License along with this program; if not, write to the
22 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
23 * Boston, MA 021110-1307, USA.
28 #include <linux/module.h>
30 #include <linux/types.h>
31 #include <linux/slab.h>
32 #include <linux/highmem.h>
33 #include <linux/utsname.h>
34 #include <linux/init.h>
35 #include <linux/sysctl.h>
36 #include <linux/random.h>
37 #include <linux/blkdev.h>
38 #include <linux/socket.h>
39 #include <linux/inet.h>
40 #include <linux/spinlock.h>
43 #include "cluster/heartbeat.h"
44 #include "cluster/nodemanager.h"
45 #include "cluster/tcp.h"
48 #include "dlmcommon.h"
50 #include "dlmconvert.h"
52 #define MLOG_MASK_PREFIX ML_DLM
53 #include "cluster/masklog.h"
55 /* NOTE: __dlmconvert_master is the only function in here that
56 * needs a spinlock held on entry (res->spinlock) and it is the
57 * only one that holds a lock on exit (res->spinlock).
58 * All other functions in here need no locks and drop all of
59 * the locks that they acquire. */
60 static enum dlm_status
__dlmconvert_master(struct dlm_ctxt
*dlm
,
61 struct dlm_lock_resource
*res
,
62 struct dlm_lock
*lock
, int flags
,
63 int type
, int *call_ast
,
65 static enum dlm_status
dlm_send_remote_convert_request(struct dlm_ctxt
*dlm
,
66 struct dlm_lock_resource
*res
,
67 struct dlm_lock
*lock
, int flags
, int type
);
70 * this is only called directly by dlmlock(), and only when the
71 * local node is the owner of the lockres
74 * taken: takes and drops res->spinlock
76 * returns: see __dlmconvert_master
78 enum dlm_status
dlmconvert_master(struct dlm_ctxt
*dlm
,
79 struct dlm_lock_resource
*res
,
80 struct dlm_lock
*lock
, int flags
, int type
)
82 int call_ast
= 0, kick_thread
= 0;
83 enum dlm_status status
;
85 spin_lock(&res
->spinlock
);
86 /* we are not in a network handler, this is fine */
87 __dlm_wait_on_lockres(res
);
88 __dlm_lockres_reserve_ast(res
);
89 res
->state
|= DLM_LOCK_RES_IN_PROGRESS
;
91 status
= __dlmconvert_master(dlm
, res
, lock
, flags
, type
,
92 &call_ast
, &kick_thread
);
94 res
->state
&= ~DLM_LOCK_RES_IN_PROGRESS
;
95 spin_unlock(&res
->spinlock
);
97 if (status
!= DLM_NORMAL
&& status
!= DLM_NOTQUEUED
)
100 /* either queue the ast or release it */
102 dlm_queue_ast(dlm
, lock
);
104 dlm_lockres_release_ast(dlm
, res
);
107 dlm_kick_thread(dlm
, res
);
112 /* performs lock conversion at the lockres master site
114 * caller needs: res->spinlock
115 * taken: takes and drops lock->spinlock
116 * held on exit: res->spinlock
117 * returns: DLM_NORMAL, DLM_NOTQUEUED, DLM_DENIED
118 * call_ast: whether ast should be called for this lock
119 * kick_thread: whether dlm_kick_thread should be called
121 static enum dlm_status
__dlmconvert_master(struct dlm_ctxt
*dlm
,
122 struct dlm_lock_resource
*res
,
123 struct dlm_lock
*lock
, int flags
,
124 int type
, int *call_ast
,
127 enum dlm_status status
= DLM_NORMAL
;
128 struct list_head
*iter
;
129 struct dlm_lock
*tmplock
=NULL
;
131 assert_spin_locked(&res
->spinlock
);
133 mlog_entry("type=%d, convert_type=%d, new convert_type=%d\n",
134 lock
->ml
.type
, lock
->ml
.convert_type
, type
);
136 spin_lock(&lock
->spinlock
);
138 /* already converting? */
139 if (lock
->ml
.convert_type
!= LKM_IVMODE
) {
140 mlog(ML_ERROR
, "attempted to convert a lock with a lock "
141 "conversion pending\n");
146 /* must be on grant queue to convert */
147 if (!dlm_lock_on_list(&res
->granted
, lock
)) {
148 mlog(ML_ERROR
, "attempted to convert a lock not on grant "
154 if (flags
& LKM_VALBLK
) {
155 switch (lock
->ml
.type
) {
157 /* EX + LKM_VALBLK + convert == set lvb */
158 mlog(0, "will set lvb: converting %s->%s\n",
159 dlm_lock_mode_name(lock
->ml
.type
),
160 dlm_lock_mode_name(type
));
161 lock
->lksb
->flags
|= DLM_LKSB_PUT_LVB
;
165 /* refetch if new level is not NL */
166 if (type
> LKM_NLMODE
) {
167 mlog(0, "will fetch new value into "
168 "lvb: converting %s->%s\n",
169 dlm_lock_mode_name(lock
->ml
.type
),
170 dlm_lock_mode_name(type
));
171 lock
->lksb
->flags
|= DLM_LKSB_GET_LVB
;
173 mlog(0, "will NOT fetch new value "
174 "into lvb: converting %s->%s\n",
175 dlm_lock_mode_name(lock
->ml
.type
),
176 dlm_lock_mode_name(type
));
177 flags
&= ~(LKM_VALBLK
);
184 /* in-place downconvert? */
185 if (type
<= lock
->ml
.type
)
188 /* upconvert from here on */
190 list_for_each(iter
, &res
->granted
) {
191 tmplock
= list_entry(iter
, struct dlm_lock
, list
);
194 if (!dlm_lock_compatible(tmplock
->ml
.type
, type
))
198 list_for_each(iter
, &res
->converting
) {
199 tmplock
= list_entry(iter
, struct dlm_lock
, list
);
200 if (!dlm_lock_compatible(tmplock
->ml
.type
, type
))
202 /* existing conversion requests take precedence */
203 if (!dlm_lock_compatible(tmplock
->ml
.convert_type
, type
))
207 /* fall thru to grant */
210 mlog(0, "res %.*s, granting %s lock\n", res
->lockname
.len
,
211 res
->lockname
.name
, dlm_lock_mode_name(type
));
212 /* immediately grant the new lock type */
213 lock
->lksb
->status
= DLM_NORMAL
;
214 if (lock
->ml
.node
== dlm
->node_num
)
215 mlog(0, "doing in-place convert for nonlocal lock\n");
216 lock
->ml
.type
= type
;
217 if (lock
->lksb
->flags
& DLM_LKSB_PUT_LVB
)
218 memcpy(res
->lvb
, lock
->lksb
->lvb
, DLM_LVB_LEN
);
225 if (flags
& LKM_NOQUEUE
) {
226 mlog(0, "failed to convert NOQUEUE lock %.*s from "
227 "%d to %d...\n", res
->lockname
.len
, res
->lockname
.name
,
228 lock
->ml
.type
, type
);
229 status
= DLM_NOTQUEUED
;
232 mlog(0, "res %.*s, queueing...\n", res
->lockname
.len
,
235 lock
->ml
.convert_type
= type
;
236 /* do not alter lock refcount. switching lists. */
237 list_move_tail(&lock
->list
, &res
->converting
);
240 spin_unlock(&lock
->spinlock
);
241 if (status
== DLM_DENIED
) {
242 __dlm_print_one_lock_resource(res
);
244 if (status
== DLM_NORMAL
)
249 void dlm_revert_pending_convert(struct dlm_lock_resource
*res
,
250 struct dlm_lock
*lock
)
252 /* do not alter lock refcount. switching lists. */
253 list_move_tail(&lock
->list
, &res
->granted
);
254 lock
->ml
.convert_type
= LKM_IVMODE
;
255 lock
->lksb
->flags
&= ~(DLM_LKSB_GET_LVB
|DLM_LKSB_PUT_LVB
);
258 /* messages the master site to do lock conversion
261 * taken: takes and drops res->spinlock, uses DLM_LOCK_RES_IN_PROGRESS
263 * returns: DLM_NORMAL, DLM_RECOVERING, status from remote node
265 enum dlm_status
dlmconvert_remote(struct dlm_ctxt
*dlm
,
266 struct dlm_lock_resource
*res
,
267 struct dlm_lock
*lock
, int flags
, int type
)
269 enum dlm_status status
;
271 mlog(0, "type=%d, convert_type=%d, busy=%d\n", lock
->ml
.type
,
272 lock
->ml
.convert_type
, res
->state
& DLM_LOCK_RES_IN_PROGRESS
);
274 spin_lock(&res
->spinlock
);
275 if (res
->state
& DLM_LOCK_RES_RECOVERING
) {
276 mlog(0, "bailing out early since res is RECOVERING "
277 "on secondary queue\n");
278 /* __dlm_print_one_lock_resource(res); */
279 status
= DLM_RECOVERING
;
282 /* will exit this call with spinlock held */
283 __dlm_wait_on_lockres(res
);
285 if (lock
->ml
.convert_type
!= LKM_IVMODE
) {
286 __dlm_print_one_lock_resource(res
);
287 mlog(ML_ERROR
, "converting a remote lock that is already "
288 "converting! (cookie=%u:%llu, conv=%d)\n",
289 dlm_get_lock_cookie_node(lock
->ml
.cookie
),
290 dlm_get_lock_cookie_seq(lock
->ml
.cookie
),
291 lock
->ml
.convert_type
);
295 res
->state
|= DLM_LOCK_RES_IN_PROGRESS
;
296 /* move lock to local convert queue */
297 /* do not alter lock refcount. switching lists. */
298 list_move_tail(&lock
->list
, &res
->converting
);
299 lock
->convert_pending
= 1;
300 lock
->ml
.convert_type
= type
;
302 if (flags
& LKM_VALBLK
) {
303 if (lock
->ml
.type
== LKM_EXMODE
) {
304 flags
|= LKM_PUT_LVB
;
305 lock
->lksb
->flags
|= DLM_LKSB_PUT_LVB
;
307 if (lock
->ml
.convert_type
== LKM_NLMODE
)
308 flags
&= ~LKM_VALBLK
;
310 flags
|= LKM_GET_LVB
;
311 lock
->lksb
->flags
|= DLM_LKSB_GET_LVB
;
315 spin_unlock(&res
->spinlock
);
317 /* no locks held here.
318 * need to wait for a reply as to whether it got queued or not. */
319 status
= dlm_send_remote_convert_request(dlm
, res
, lock
, flags
, type
);
321 spin_lock(&res
->spinlock
);
322 res
->state
&= ~DLM_LOCK_RES_IN_PROGRESS
;
323 lock
->convert_pending
= 0;
324 /* if it failed, move it back to granted queue */
325 if (status
!= DLM_NORMAL
) {
326 if (status
!= DLM_NOTQUEUED
)
328 dlm_revert_pending_convert(res
, lock
);
331 spin_unlock(&res
->spinlock
);
333 /* TODO: should this be a wake_one? */
334 /* wake up any IN_PROGRESS waiters */
340 /* sends DLM_CONVERT_LOCK_MSG to master site
345 * returns: DLM_NOLOCKMGR, status from remote node
347 static enum dlm_status
dlm_send_remote_convert_request(struct dlm_ctxt
*dlm
,
348 struct dlm_lock_resource
*res
,
349 struct dlm_lock
*lock
, int flags
, int type
)
351 struct dlm_convert_lock convert
;
358 mlog_entry("%.*s\n", res
->lockname
.len
, res
->lockname
.name
);
360 memset(&convert
, 0, sizeof(struct dlm_convert_lock
));
361 convert
.node_idx
= dlm
->node_num
;
362 convert
.requested_type
= type
;
363 convert
.cookie
= lock
->ml
.cookie
;
364 convert
.namelen
= res
->lockname
.len
;
365 convert
.flags
= cpu_to_be32(flags
);
366 memcpy(convert
.name
, res
->lockname
.name
, convert
.namelen
);
368 vec
[0].iov_len
= sizeof(struct dlm_convert_lock
);
369 vec
[0].iov_base
= &convert
;
371 if (flags
& LKM_PUT_LVB
) {
372 /* extra data to send if we are updating lvb */
373 vec
[1].iov_len
= DLM_LVB_LEN
;
374 vec
[1].iov_base
= lock
->lksb
->lvb
;
378 tmpret
= o2net_send_message_vec(DLM_CONVERT_LOCK_MSG
, dlm
->key
,
379 vec
, veclen
, res
->owner
, &status
);
381 // successfully sent and received
382 ret
= status
; // this is already a dlm_status
383 if (ret
== DLM_RECOVERING
) {
384 mlog(0, "node %u returned DLM_RECOVERING from convert "
385 "message!\n", res
->owner
);
386 } else if (ret
== DLM_MIGRATING
) {
387 mlog(0, "node %u returned DLM_MIGRATING from convert "
388 "message!\n", res
->owner
);
389 } else if (ret
== DLM_FORWARD
) {
390 mlog(0, "node %u returned DLM_FORWARD from convert "
391 "message!\n", res
->owner
);
392 } else if (ret
!= DLM_NORMAL
&& ret
!= DLM_NOTQUEUED
)
396 if (dlm_is_host_down(tmpret
)) {
397 /* instead of logging the same network error over
398 * and over, sleep here and wait for the heartbeat
399 * to notice the node is dead. times out after 5s. */
400 dlm_wait_for_node_death(dlm
, res
->owner
,
401 DLM_NODE_DEATH_WAIT_MAX
);
402 ret
= DLM_RECOVERING
;
403 mlog(0, "node %u died so returning DLM_RECOVERING "
404 "from convert message!\n", res
->owner
);
406 ret
= dlm_err_to_dlm_status(tmpret
);
413 /* handler for DLM_CONVERT_LOCK_MSG on master site
416 * taken: takes and drop res->spinlock
418 * returns: DLM_NORMAL, DLM_IVLOCKID, DLM_BADARGS,
419 * status from __dlmconvert_master
421 int dlm_convert_lock_handler(struct o2net_msg
*msg
, u32 len
, void *data
)
423 struct dlm_ctxt
*dlm
= data
;
424 struct dlm_convert_lock
*cnv
= (struct dlm_convert_lock
*)msg
->buf
;
425 struct dlm_lock_resource
*res
= NULL
;
426 struct list_head
*iter
;
427 struct dlm_lock
*lock
= NULL
;
428 struct dlm_lockstatus
*lksb
;
429 enum dlm_status status
= DLM_NORMAL
;
431 int call_ast
= 0, kick_thread
= 0, ast_reserved
= 0;
433 if (!dlm_grab(dlm
)) {
434 dlm_error(DLM_REJECTED
);
438 mlog_bug_on_msg(!dlm_domain_fully_joined(dlm
),
439 "Domain %s not fully joined!\n", dlm
->name
);
441 if (cnv
->namelen
> DLM_LOCKID_NAME_MAX
) {
442 status
= DLM_IVBUFLEN
;
447 flags
= be32_to_cpu(cnv
->flags
);
449 if ((flags
& (LKM_PUT_LVB
|LKM_GET_LVB
)) ==
450 (LKM_PUT_LVB
|LKM_GET_LVB
)) {
451 mlog(ML_ERROR
, "both PUT and GET lvb specified\n");
452 status
= DLM_BADARGS
;
456 mlog(0, "lvb: %s\n", flags
& LKM_PUT_LVB
? "put lvb" :
457 (flags
& LKM_GET_LVB
? "get lvb" : "none"));
459 status
= DLM_IVLOCKID
;
460 res
= dlm_lookup_lockres(dlm
, cnv
->name
, cnv
->namelen
);
466 spin_lock(&res
->spinlock
);
467 status
= __dlm_lockres_state_to_status(res
);
468 if (status
!= DLM_NORMAL
) {
469 spin_unlock(&res
->spinlock
);
473 list_for_each(iter
, &res
->granted
) {
474 lock
= list_entry(iter
, struct dlm_lock
, list
);
475 if (lock
->ml
.cookie
== cnv
->cookie
&&
476 lock
->ml
.node
== cnv
->node_idx
) {
483 __dlm_print_one_lock_resource(res
);
484 list_for_each(iter
, &res
->granted
) {
485 lock
= list_entry(iter
, struct dlm_lock
, list
);
486 if (lock
->ml
.node
== cnv
->node_idx
) {
487 mlog(ML_ERROR
, "There is something here "
488 "for node %u, lock->ml.cookie=%llu, "
489 "cnv->cookie=%llu\n", cnv
->node_idx
,
490 (unsigned long long)lock
->ml
.cookie
,
491 (unsigned long long)cnv
->cookie
);
497 spin_unlock(&res
->spinlock
);
499 status
= DLM_IVLOCKID
;
507 /* see if caller needed to get/put lvb */
508 if (flags
& LKM_PUT_LVB
) {
509 BUG_ON(lksb
->flags
& (DLM_LKSB_PUT_LVB
|DLM_LKSB_GET_LVB
));
510 lksb
->flags
|= DLM_LKSB_PUT_LVB
;
511 memcpy(&lksb
->lvb
[0], &cnv
->lvb
[0], DLM_LVB_LEN
);
512 } else if (flags
& LKM_GET_LVB
) {
513 BUG_ON(lksb
->flags
& (DLM_LKSB_PUT_LVB
|DLM_LKSB_GET_LVB
));
514 lksb
->flags
|= DLM_LKSB_GET_LVB
;
517 spin_lock(&res
->spinlock
);
518 status
= __dlm_lockres_state_to_status(res
);
519 if (status
== DLM_NORMAL
) {
520 __dlm_lockres_reserve_ast(res
);
522 res
->state
|= DLM_LOCK_RES_IN_PROGRESS
;
523 status
= __dlmconvert_master(dlm
, res
, lock
, flags
,
525 &call_ast
, &kick_thread
);
526 res
->state
&= ~DLM_LOCK_RES_IN_PROGRESS
;
528 spin_unlock(&res
->spinlock
);
530 if (status
!= DLM_NORMAL
) {
531 if (status
!= DLM_NOTQUEUED
)
533 lksb
->flags
&= ~(DLM_LKSB_GET_LVB
|DLM_LKSB_PUT_LVB
);
538 mlog(ML_ERROR
, "did not find lock to convert on grant queue! "
540 dlm_get_lock_cookie_node(cnv
->cookie
),
541 dlm_get_lock_cookie_seq(cnv
->cookie
));
545 /* either queue the ast or release it, if reserved */
547 dlm_queue_ast(dlm
, lock
);
548 else if (ast_reserved
)
549 dlm_lockres_release_ast(dlm
, res
);
552 dlm_kick_thread(dlm
, res
);
555 dlm_lockres_put(res
);