1 /* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
6 * underlying calls for unlocking locks
8 * Copyright (C) 2004 Oracle. All rights reserved.
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public
12 * License as published by the Free Software Foundation; either
13 * version 2 of the License, or (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
20 * You should have received a copy of the GNU General Public
21 * License along with this program; if not, write to the
22 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
23 * Boston, MA 021110-1307, USA.
28 #include <linux/module.h>
30 #include <linux/types.h>
31 #include <linux/slab.h>
32 #include <linux/highmem.h>
33 #include <linux/utsname.h>
34 #include <linux/init.h>
35 #include <linux/sysctl.h>
36 #include <linux/random.h>
37 #include <linux/blkdev.h>
38 #include <linux/socket.h>
39 #include <linux/inet.h>
40 #include <linux/spinlock.h>
41 #include <linux/delay.h>
43 #include "cluster/heartbeat.h"
44 #include "cluster/nodemanager.h"
45 #include "cluster/tcp.h"
48 #include "dlmcommon.h"
50 #define MLOG_MASK_PREFIX ML_DLM
51 #include "cluster/masklog.h"
53 #define DLM_UNLOCK_FREE_LOCK 0x00000001
54 #define DLM_UNLOCK_CALL_AST 0x00000002
55 #define DLM_UNLOCK_REMOVE_LOCK 0x00000004
56 #define DLM_UNLOCK_REGRANT_LOCK 0x00000008
57 #define DLM_UNLOCK_CLEAR_CONVERT_TYPE 0x00000010
60 static enum dlm_status
dlm_get_cancel_actions(struct dlm_ctxt
*dlm
,
61 struct dlm_lock_resource
*res
,
62 struct dlm_lock
*lock
,
63 struct dlm_lockstatus
*lksb
,
65 static enum dlm_status
dlm_get_unlock_actions(struct dlm_ctxt
*dlm
,
66 struct dlm_lock_resource
*res
,
67 struct dlm_lock
*lock
,
68 struct dlm_lockstatus
*lksb
,
71 static enum dlm_status
dlm_send_remote_unlock_request(struct dlm_ctxt
*dlm
,
72 struct dlm_lock_resource
*res
,
73 struct dlm_lock
*lock
,
74 struct dlm_lockstatus
*lksb
,
80 * according to the spec:
81 * http://opendlm.sourceforge.net/cvsmirror/opendlm/docs/dlmbook_final.pdf
83 * flags & LKM_CANCEL != 0: must be converting or blocked
84 * flags & LKM_CANCEL == 0: must be granted
86 * So to unlock a converting lock, you must first cancel the
87 * convert (passing LKM_CANCEL in flags), then call the unlock
88 * again (with no LKM_CANCEL in flags).
95 * taken: res->spinlock and lock->spinlock taken and dropped
97 * returns: DLM_NORMAL, DLM_NOLOCKMGR, status from network
98 * all callers should have taken an extra ref on lock coming in
100 static enum dlm_status
dlmunlock_common(struct dlm_ctxt
*dlm
,
101 struct dlm_lock_resource
*res
,
102 struct dlm_lock
*lock
,
103 struct dlm_lockstatus
*lksb
,
104 int flags
, int *call_ast
,
107 enum dlm_status status
;
112 mlog(0, "master_node = %d, valblk = %d\n", master_node
,
116 BUG_ON(res
->owner
!= dlm
->node_num
);
118 BUG_ON(res
->owner
== dlm
->node_num
);
120 spin_lock(&dlm
->spinlock
);
121 /* We want to be sure that we're not freeing a lock
122 * that still has AST's pending... */
123 in_use
= !list_empty(&lock
->ast_list
);
124 spin_unlock(&dlm
->spinlock
);
126 mlog(ML_ERROR
, "lockres %.*s: Someone is calling dlmunlock "
127 "while waiting for an ast!", res
->lockname
.len
,
132 spin_lock(&res
->spinlock
);
133 if (res
->state
& DLM_LOCK_RES_IN_PROGRESS
) {
135 mlog(ML_ERROR
, "lockres in progress!\n");
136 spin_unlock(&res
->spinlock
);
139 /* ok for this to sleep if not in a network handler */
140 __dlm_wait_on_lockres(res
);
141 res
->state
|= DLM_LOCK_RES_IN_PROGRESS
;
143 spin_lock(&lock
->spinlock
);
145 if (res
->state
& DLM_LOCK_RES_RECOVERING
) {
146 status
= DLM_RECOVERING
;
151 /* see above for what the spec says about
152 * LKM_CANCEL and the lock queue state */
153 if (flags
& LKM_CANCEL
)
154 status
= dlm_get_cancel_actions(dlm
, res
, lock
, lksb
, &actions
);
156 status
= dlm_get_unlock_actions(dlm
, res
, lock
, lksb
, &actions
);
158 if (status
!= DLM_NORMAL
&& (status
!= DLM_CANCELGRANT
|| !master_node
))
161 /* By now this has been masked out of cancel requests. */
162 if (flags
& LKM_VALBLK
) {
163 /* make the final update to the lvb */
165 memcpy(res
->lvb
, lksb
->lvb
, DLM_LVB_LEN
);
167 flags
|= LKM_PUT_LVB
; /* let the send function
173 /* drop locks and send message */
174 if (flags
& LKM_CANCEL
)
175 lock
->cancel_pending
= 1;
177 lock
->unlock_pending
= 1;
178 spin_unlock(&lock
->spinlock
);
179 spin_unlock(&res
->spinlock
);
180 status
= dlm_send_remote_unlock_request(dlm
, res
, lock
, lksb
,
182 spin_lock(&res
->spinlock
);
183 spin_lock(&lock
->spinlock
);
184 /* if the master told us the lock was already granted,
185 * let the ast handle all of these actions */
186 if (status
== DLM_CANCELGRANT
) {
187 actions
&= ~(DLM_UNLOCK_REMOVE_LOCK
|
188 DLM_UNLOCK_REGRANT_LOCK
|
189 DLM_UNLOCK_CLEAR_CONVERT_TYPE
);
190 } else if (status
== DLM_RECOVERING
||
191 status
== DLM_MIGRATING
||
192 status
== DLM_FORWARD
) {
193 /* must clear the actions because this unlock
194 * is about to be retried. cannot free or do
195 * any list manipulation. */
196 mlog(0, "%s:%.*s: clearing actions, %s\n",
197 dlm
->name
, res
->lockname
.len
,
199 status
==DLM_RECOVERING
?"recovering":
200 (status
==DLM_MIGRATING
?"migrating":
204 if (flags
& LKM_CANCEL
)
205 lock
->cancel_pending
= 0;
207 lock
->unlock_pending
= 0;
211 /* get an extra ref on lock. if we are just switching
212 * lists here, we dont want the lock to go away. */
215 if (actions
& DLM_UNLOCK_REMOVE_LOCK
) {
216 list_del_init(&lock
->list
);
219 if (actions
& DLM_UNLOCK_REGRANT_LOCK
) {
221 list_add_tail(&lock
->list
, &res
->granted
);
223 if (actions
& DLM_UNLOCK_CLEAR_CONVERT_TYPE
) {
224 mlog(0, "clearing convert_type at %smaster node\n",
225 master_node
? "" : "non-");
226 lock
->ml
.convert_type
= LKM_IVMODE
;
229 /* remove the extra ref on lock */
233 res
->state
&= ~DLM_LOCK_RES_IN_PROGRESS
;
234 if (!dlm_lock_on_list(&res
->converting
, lock
))
235 BUG_ON(lock
->ml
.convert_type
!= LKM_IVMODE
);
237 BUG_ON(lock
->ml
.convert_type
== LKM_IVMODE
);
238 spin_unlock(&lock
->spinlock
);
239 spin_unlock(&res
->spinlock
);
242 /* let the caller's final dlm_lock_put handle the actual kfree */
243 if (actions
& DLM_UNLOCK_FREE_LOCK
) {
244 /* this should always be coupled with list removal */
245 BUG_ON(!(actions
& DLM_UNLOCK_REMOVE_LOCK
));
246 mlog(0, "lock %u:%llu should be gone now! refs=%d\n",
247 dlm_get_lock_cookie_node(lock
->ml
.cookie
),
248 dlm_get_lock_cookie_seq(lock
->ml
.cookie
),
249 atomic_read(&lock
->lock_refs
.refcount
)-1);
252 if (actions
& DLM_UNLOCK_CALL_AST
)
255 /* if cancel or unlock succeeded, lvb work is done */
256 if (status
== DLM_NORMAL
)
257 lksb
->flags
&= ~(DLM_LKSB_PUT_LVB
|DLM_LKSB_GET_LVB
);
262 void dlm_commit_pending_unlock(struct dlm_lock_resource
*res
,
263 struct dlm_lock
*lock
)
265 /* leave DLM_LKSB_PUT_LVB on the lksb so any final
266 * update of the lvb will be sent to the new master */
267 list_del_init(&lock
->list
);
270 void dlm_commit_pending_cancel(struct dlm_lock_resource
*res
,
271 struct dlm_lock
*lock
)
273 list_move_tail(&lock
->list
, &res
->granted
);
274 lock
->ml
.convert_type
= LKM_IVMODE
;
278 static inline enum dlm_status
dlmunlock_master(struct dlm_ctxt
*dlm
,
279 struct dlm_lock_resource
*res
,
280 struct dlm_lock
*lock
,
281 struct dlm_lockstatus
*lksb
,
285 return dlmunlock_common(dlm
, res
, lock
, lksb
, flags
, call_ast
, 1);
288 static inline enum dlm_status
dlmunlock_remote(struct dlm_ctxt
*dlm
,
289 struct dlm_lock_resource
*res
,
290 struct dlm_lock
*lock
,
291 struct dlm_lockstatus
*lksb
,
292 int flags
, int *call_ast
)
294 return dlmunlock_common(dlm
, res
, lock
, lksb
, flags
, call_ast
, 0);
302 * returns: DLM_NORMAL, DLM_NOLOCKMGR, status from network
304 static enum dlm_status
dlm_send_remote_unlock_request(struct dlm_ctxt
*dlm
,
305 struct dlm_lock_resource
*res
,
306 struct dlm_lock
*lock
,
307 struct dlm_lockstatus
*lksb
,
311 struct dlm_unlock_lock unlock
;
318 mlog_entry("%.*s\n", res
->lockname
.len
, res
->lockname
.name
);
320 if (owner
== dlm
->node_num
) {
321 /* ended up trying to contact ourself. this means
322 * that the lockres had been remote but became local
323 * via a migration. just retry it, now as local */
324 mlog(0, "%s:%.*s: this node became the master due to a "
325 "migration, re-evaluate now\n", dlm
->name
,
326 res
->lockname
.len
, res
->lockname
.name
);
330 memset(&unlock
, 0, sizeof(unlock
));
331 unlock
.node_idx
= dlm
->node_num
;
332 unlock
.flags
= cpu_to_be32(flags
);
333 unlock
.cookie
= lock
->ml
.cookie
;
334 unlock
.namelen
= res
->lockname
.len
;
335 memcpy(unlock
.name
, res
->lockname
.name
, unlock
.namelen
);
337 vec
[0].iov_len
= sizeof(struct dlm_unlock_lock
);
338 vec
[0].iov_base
= &unlock
;
340 if (flags
& LKM_PUT_LVB
) {
341 /* extra data to send if we are updating lvb */
342 vec
[1].iov_len
= DLM_LVB_LEN
;
343 vec
[1].iov_base
= lock
->lksb
->lvb
;
347 tmpret
= o2net_send_message_vec(DLM_UNLOCK_LOCK_MSG
, dlm
->key
,
348 vec
, veclen
, owner
, &status
);
350 // successfully sent and received
351 if (status
== DLM_FORWARD
)
352 mlog(0, "master was in-progress. retry\n");
356 if (dlm_is_host_down(tmpret
)) {
357 /* NOTE: this seems strange, but it is what we want.
358 * when the master goes down during a cancel or
359 * unlock, the recovery code completes the operation
360 * as if the master had not died, then passes the
361 * updated state to the recovery master. this thread
362 * just needs to finish out the operation and call
366 /* something bad. this will BUG in ocfs2 */
367 ret
= dlm_err_to_dlm_status(tmpret
);
377 * taken: takes and drops res->spinlock
379 * returns: DLM_NORMAL, DLM_BADARGS, DLM_IVLOCKID,
380 * return value from dlmunlock_master
382 int dlm_unlock_lock_handler(struct o2net_msg
*msg
, u32 len
, void *data
)
384 struct dlm_ctxt
*dlm
= data
;
385 struct dlm_unlock_lock
*unlock
= (struct dlm_unlock_lock
*)msg
->buf
;
386 struct dlm_lock_resource
*res
= NULL
;
387 struct list_head
*iter
;
388 struct dlm_lock
*lock
= NULL
;
389 enum dlm_status status
= DLM_NORMAL
;
391 struct dlm_lockstatus
*lksb
= NULL
;
394 struct list_head
*queue
;
396 flags
= be32_to_cpu(unlock
->flags
);
398 if (flags
& LKM_GET_LVB
) {
399 mlog(ML_ERROR
, "bad args! GET_LVB specified on unlock!\n");
403 if ((flags
& (LKM_PUT_LVB
|LKM_CANCEL
)) == (LKM_PUT_LVB
|LKM_CANCEL
)) {
404 mlog(ML_ERROR
, "bad args! cannot modify lvb on a CANCEL "
409 if (unlock
->namelen
> DLM_LOCKID_NAME_MAX
) {
410 mlog(ML_ERROR
, "Invalid name length in unlock handler!\n");
417 mlog_bug_on_msg(!dlm_domain_fully_joined(dlm
),
418 "Domain %s not fully joined!\n", dlm
->name
);
420 mlog(0, "lvb: %s\n", flags
& LKM_PUT_LVB
? "put lvb" : "none");
422 res
= dlm_lookup_lockres(dlm
, unlock
->name
, unlock
->namelen
);
424 /* We assume here that a no lock resource simply means
425 * it was migrated away and destroyed before the other
426 * node could detect it. */
427 mlog(0, "returning DLM_FORWARD -- res no longer exists\n");
428 status
= DLM_FORWARD
;
434 spin_lock(&res
->spinlock
);
435 if (res
->state
& DLM_LOCK_RES_RECOVERING
) {
436 spin_unlock(&res
->spinlock
);
437 mlog(0, "returning DLM_RECOVERING\n");
438 status
= DLM_RECOVERING
;
442 if (res
->state
& DLM_LOCK_RES_MIGRATING
) {
443 spin_unlock(&res
->spinlock
);
444 mlog(0, "returning DLM_MIGRATING\n");
445 status
= DLM_MIGRATING
;
449 if (res
->owner
!= dlm
->node_num
) {
450 spin_unlock(&res
->spinlock
);
451 mlog(0, "returning DLM_FORWARD -- not master\n");
452 status
= DLM_FORWARD
;
456 for (i
=0; i
<3; i
++) {
457 list_for_each(iter
, queue
) {
458 lock
= list_entry(iter
, struct dlm_lock
, list
);
459 if (lock
->ml
.cookie
== unlock
->cookie
&&
460 lock
->ml
.node
== unlock
->node_idx
) {
468 /* scan granted -> converting -> blocked queues */
471 spin_unlock(&res
->spinlock
);
473 status
= DLM_IVLOCKID
;
477 /* lock was found on queue */
479 if (flags
& (LKM_VALBLK
|LKM_PUT_LVB
) &&
480 lock
->ml
.type
!= LKM_EXMODE
)
481 flags
&= ~(LKM_VALBLK
|LKM_PUT_LVB
);
483 /* unlockast only called on originating node */
484 if (flags
& LKM_PUT_LVB
) {
485 lksb
->flags
|= DLM_LKSB_PUT_LVB
;
486 memcpy(&lksb
->lvb
[0], &unlock
->lvb
[0], DLM_LVB_LEN
);
489 /* if this is in-progress, propagate the DLM_FORWARD
490 * all the way back out */
491 status
= dlmunlock_master(dlm
, res
, lock
, lksb
, flags
, &ignore
);
492 if (status
== DLM_FORWARD
)
493 mlog(0, "lockres is in progress\n");
495 if (flags
& LKM_PUT_LVB
)
496 lksb
->flags
&= ~DLM_LKSB_PUT_LVB
;
498 dlm_lockres_calc_usage(dlm
, res
);
499 dlm_kick_thread(dlm
, res
);
503 mlog(ML_ERROR
, "failed to find lock to unlock! "
505 dlm_get_lock_cookie_node(unlock
->cookie
),
506 dlm_get_lock_cookie_seq(unlock
->cookie
));
512 dlm_lockres_put(res
);
520 static enum dlm_status
dlm_get_cancel_actions(struct dlm_ctxt
*dlm
,
521 struct dlm_lock_resource
*res
,
522 struct dlm_lock
*lock
,
523 struct dlm_lockstatus
*lksb
,
526 enum dlm_status status
;
528 if (dlm_lock_on_list(&res
->blocked
, lock
)) {
529 /* cancel this outright */
531 *actions
= (DLM_UNLOCK_CALL_AST
|
532 DLM_UNLOCK_REMOVE_LOCK
);
533 } else if (dlm_lock_on_list(&res
->converting
, lock
)) {
534 /* cancel the request, put back on granted */
536 *actions
= (DLM_UNLOCK_CALL_AST
|
537 DLM_UNLOCK_REMOVE_LOCK
|
538 DLM_UNLOCK_REGRANT_LOCK
|
539 DLM_UNLOCK_CLEAR_CONVERT_TYPE
);
540 } else if (dlm_lock_on_list(&res
->granted
, lock
)) {
541 /* too late, already granted. */
542 status
= DLM_CANCELGRANT
;
543 *actions
= DLM_UNLOCK_CALL_AST
;
545 mlog(ML_ERROR
, "lock to cancel is not on any list!\n");
546 status
= DLM_IVLOCKID
;
552 static enum dlm_status
dlm_get_unlock_actions(struct dlm_ctxt
*dlm
,
553 struct dlm_lock_resource
*res
,
554 struct dlm_lock
*lock
,
555 struct dlm_lockstatus
*lksb
,
558 enum dlm_status status
;
561 if (!dlm_lock_on_list(&res
->granted
, lock
)) {
566 /* unlock granted lock */
568 *actions
= (DLM_UNLOCK_FREE_LOCK
|
569 DLM_UNLOCK_CALL_AST
|
570 DLM_UNLOCK_REMOVE_LOCK
);
575 /* there seems to be no point in doing this async
576 * since (even for the remote case) there is really
577 * no work to queue up... so just do it and fire the
578 * unlockast by hand when done... */
579 enum dlm_status
dlmunlock(struct dlm_ctxt
*dlm
, struct dlm_lockstatus
*lksb
,
580 int flags
, dlm_astunlockfunc_t
*unlockast
, void *data
)
582 enum dlm_status status
;
583 struct dlm_lock_resource
*res
;
584 struct dlm_lock
*lock
= NULL
;
585 int call_ast
, is_master
;
590 dlm_error(DLM_BADARGS
);
594 if (flags
& ~(LKM_CANCEL
| LKM_VALBLK
| LKM_INVVALBLK
)) {
595 dlm_error(DLM_BADPARAM
);
599 if ((flags
& (LKM_VALBLK
| LKM_CANCEL
)) == (LKM_VALBLK
| LKM_CANCEL
)) {
600 mlog(0, "VALBLK given with CANCEL: ignoring VALBLK\n");
601 flags
&= ~LKM_VALBLK
;
604 if (!lksb
->lockid
|| !lksb
->lockid
->lockres
) {
605 dlm_error(DLM_BADPARAM
);
615 dlm_lockres_get(res
);
618 /* need to retry up here because owner may have changed */
619 mlog(0, "lock=%p res=%p\n", lock
, res
);
621 spin_lock(&res
->spinlock
);
622 is_master
= (res
->owner
== dlm
->node_num
);
623 if (flags
& LKM_VALBLK
&& lock
->ml
.type
!= LKM_EXMODE
)
624 flags
&= ~LKM_VALBLK
;
625 spin_unlock(&res
->spinlock
);
628 status
= dlmunlock_master(dlm
, res
, lock
, lksb
, flags
,
630 mlog(0, "done calling dlmunlock_master: returned %d, "
631 "call_ast is %d\n", status
, call_ast
);
633 status
= dlmunlock_remote(dlm
, res
, lock
, lksb
, flags
,
635 mlog(0, "done calling dlmunlock_remote: returned %d, "
636 "call_ast is %d\n", status
, call_ast
);
639 if (status
== DLM_RECOVERING
||
640 status
== DLM_MIGRATING
||
641 status
== DLM_FORWARD
) {
642 /* We want to go away for a tiny bit to allow recovery
643 * / migration to complete on this resource. I don't
644 * know of any wait queue we could sleep on as this
645 * may be happening on another node. Perhaps the
646 * proper solution is to queue up requests on the
649 /* do we want to yield(); ?? */
652 mlog(0, "retrying unlock due to pending recovery/"
653 "migration/in-progress\n");
658 mlog(0, "calling unlockast(%p, %d)\n", data
, status
);
660 /* it is possible that there is one last bast
661 * pending. make sure it is flushed, then
662 * call the unlockast.
663 * not an issue if this is a mastered remotely,
664 * since this lock has been removed from the
665 * lockres queues and cannot be found. */
666 dlm_kick_thread(dlm
, NULL
);
667 wait_event(dlm
->ast_wq
,
668 dlm_lock_basts_flushed(dlm
, lock
));
670 (*unlockast
)(data
, status
);
673 if (status
== DLM_CANCELGRANT
)
676 if (status
== DLM_NORMAL
) {
677 mlog(0, "kicking the thread\n");
678 dlm_kick_thread(dlm
, res
);
682 dlm_lockres_calc_usage(dlm
, res
);
683 dlm_lockres_put(res
);
686 mlog(0, "returning status=%d!\n", status
);
689 EXPORT_SYMBOL_GPL(dlmunlock
);