2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
12 static char junk_lvb
[GDLM_LVB_SIZE
];
14 static void queue_complete(struct gdlm_lock
*lp
)
16 struct gdlm_ls
*ls
= lp
->ls
;
18 clear_bit(LFL_ACTIVE
, &lp
->flags
);
20 spin_lock(&ls
->async_lock
);
21 list_add_tail(&lp
->clist
, &ls
->complete
);
22 spin_unlock(&ls
->async_lock
);
23 wake_up(&ls
->thread_wait
);
26 static inline void gdlm_ast(void *astarg
)
28 queue_complete(astarg
);
31 static inline void gdlm_bast(void *astarg
, int mode
)
33 struct gdlm_lock
*lp
= astarg
;
34 struct gdlm_ls
*ls
= lp
->ls
;
37 printk(KERN_INFO
"lock_dlm: bast mode zero %x,%llx\n",
39 (unsigned long long)lp
->lockname
.ln_number
);
43 spin_lock(&ls
->async_lock
);
45 list_add_tail(&lp
->blist
, &ls
->blocking
);
47 } else if (lp
->bast_mode
< mode
)
49 spin_unlock(&ls
->async_lock
);
50 wake_up(&ls
->thread_wait
);
53 void gdlm_queue_delayed(struct gdlm_lock
*lp
)
55 struct gdlm_ls
*ls
= lp
->ls
;
57 spin_lock(&ls
->async_lock
);
58 list_add_tail(&lp
->delay_list
, &ls
->delayed
);
59 spin_unlock(&ls
->async_lock
);
62 /* convert gfs lock-state to dlm lock-mode */
64 static s16
make_mode(s16 lmstate
)
76 gdlm_assert(0, "unknown LM state %d", lmstate
);
80 /* convert dlm lock-mode to gfs lock-state */
82 s16
gdlm_make_lmstate(s16 dlmmode
)
87 return LM_ST_UNLOCKED
;
89 return LM_ST_EXCLUSIVE
;
91 return LM_ST_DEFERRED
;
95 gdlm_assert(0, "unknown DLM mode %d", dlmmode
);
99 /* verify agreement with GFS on the current lock state, NB: DLM_LOCK_NL and
100 DLM_LOCK_IV are both considered LM_ST_UNLOCKED by GFS. */
102 static void check_cur_state(struct gdlm_lock
*lp
, unsigned int cur_state
)
104 s16 cur
= make_mode(cur_state
);
105 if (lp
->cur
!= DLM_LOCK_IV
)
106 gdlm_assert(lp
->cur
== cur
, "%d, %d", lp
->cur
, cur
);
109 static inline unsigned int make_flags(struct gdlm_lock
*lp
,
110 unsigned int gfs_flags
,
113 unsigned int lkf
= 0;
115 if (gfs_flags
& LM_FLAG_TRY
)
116 lkf
|= DLM_LKF_NOQUEUE
;
118 if (gfs_flags
& LM_FLAG_TRY_1CB
) {
119 lkf
|= DLM_LKF_NOQUEUE
;
120 lkf
|= DLM_LKF_NOQUEUEBAST
;
123 if (gfs_flags
& LM_FLAG_PRIORITY
) {
124 lkf
|= DLM_LKF_NOORDER
;
125 lkf
|= DLM_LKF_HEADQUE
;
128 if (gfs_flags
& LM_FLAG_ANY
) {
129 if (req
== DLM_LOCK_PR
)
130 lkf
|= DLM_LKF_ALTCW
;
131 else if (req
== DLM_LOCK_CW
)
132 lkf
|= DLM_LKF_ALTPR
;
135 if (lp
->lksb
.sb_lkid
!= 0) {
136 lkf
|= DLM_LKF_CONVERT
;
138 /* Conversion deadlock avoidance by DLM */
140 if (!test_bit(LFL_FORCE_PROMOTE
, &lp
->flags
) &&
141 !(lkf
& DLM_LKF_NOQUEUE
) &&
142 cur
> DLM_LOCK_NL
&& req
> DLM_LOCK_NL
&& cur
!= req
)
143 lkf
|= DLM_LKF_CONVDEADLK
;
147 lkf
|= DLM_LKF_VALBLK
;
152 /* make_strname - convert GFS lock numbers to a string */
154 static inline void make_strname(const struct lm_lockname
*lockname
,
155 struct gdlm_strname
*str
)
157 sprintf(str
->name
, "%8x%16llx", lockname
->ln_type
,
158 (unsigned long long)lockname
->ln_number
);
159 str
->namelen
= GDLM_STRNAME_BYTES
;
162 static int gdlm_create_lp(struct gdlm_ls
*ls
, struct lm_lockname
*name
,
163 struct gdlm_lock
**lpp
)
165 struct gdlm_lock
*lp
;
167 lp
= kzalloc(sizeof(struct gdlm_lock
), GFP_KERNEL
);
171 lp
->lockname
= *name
;
172 make_strname(name
, &lp
->strname
);
174 lp
->cur
= DLM_LOCK_IV
;
176 lp
->hold_null
= NULL
;
177 INIT_LIST_HEAD(&lp
->clist
);
178 INIT_LIST_HEAD(&lp
->blist
);
179 INIT_LIST_HEAD(&lp
->delay_list
);
181 spin_lock(&ls
->async_lock
);
182 list_add(&lp
->all_list
, &ls
->all_locks
);
183 ls
->all_locks_count
++;
184 spin_unlock(&ls
->async_lock
);
190 void gdlm_delete_lp(struct gdlm_lock
*lp
)
192 struct gdlm_ls
*ls
= lp
->ls
;
194 spin_lock(&ls
->async_lock
);
195 if (!list_empty(&lp
->clist
))
196 list_del_init(&lp
->clist
);
197 if (!list_empty(&lp
->blist
))
198 list_del_init(&lp
->blist
);
199 if (!list_empty(&lp
->delay_list
))
200 list_del_init(&lp
->delay_list
);
201 gdlm_assert(!list_empty(&lp
->all_list
), "%x,%llx", lp
->lockname
.ln_type
,
202 (unsigned long long)lp
->lockname
.ln_number
);
203 list_del_init(&lp
->all_list
);
204 ls
->all_locks_count
--;
205 spin_unlock(&ls
->async_lock
);
210 int gdlm_get_lock(void *lockspace
, struct lm_lockname
*name
,
213 struct gdlm_lock
*lp
;
216 error
= gdlm_create_lp(lockspace
, name
, &lp
);
222 void gdlm_put_lock(void *lock
)
224 gdlm_delete_lp(lock
);
227 unsigned int gdlm_do_lock(struct gdlm_lock
*lp
)
229 struct gdlm_ls
*ls
= lp
->ls
;
233 * When recovery is in progress, delay lock requests for submission
234 * once recovery is done. Requests for recovery (NOEXP) and unlocks
238 if (test_bit(DFL_BLOCK_LOCKS
, &ls
->flags
) &&
239 !test_bit(LFL_NOBLOCK
, &lp
->flags
) && lp
->req
!= DLM_LOCK_NL
) {
240 gdlm_queue_delayed(lp
);
245 * Submit the actual lock request.
248 if (test_bit(LFL_NOBAST
, &lp
->flags
))
251 set_bit(LFL_ACTIVE
, &lp
->flags
);
253 log_debug("lk %x,%llx id %x %d,%d %x", lp
->lockname
.ln_type
,
254 (unsigned long long)lp
->lockname
.ln_number
, lp
->lksb
.sb_lkid
,
255 lp
->cur
, lp
->req
, lp
->lkf
);
257 error
= dlm_lock(ls
->dlm_lockspace
, lp
->req
, &lp
->lksb
, lp
->lkf
,
258 lp
->strname
.name
, lp
->strname
.namelen
, 0, gdlm_ast
,
259 lp
, bast
? gdlm_bast
: NULL
);
261 if ((error
== -EAGAIN
) && (lp
->lkf
& DLM_LKF_NOQUEUE
)) {
262 lp
->lksb
.sb_status
= -EAGAIN
;
268 log_error("%s: gdlm_lock %x,%llx err=%d cur=%d req=%d lkf=%x "
269 "flags=%lx", ls
->fsname
, lp
->lockname
.ln_type
,
270 (unsigned long long)lp
->lockname
.ln_number
, error
,
271 lp
->cur
, lp
->req
, lp
->lkf
, lp
->flags
);
277 static unsigned int gdlm_do_unlock(struct gdlm_lock
*lp
)
279 struct gdlm_ls
*ls
= lp
->ls
;
280 unsigned int lkf
= 0;
283 set_bit(LFL_DLM_UNLOCK
, &lp
->flags
);
284 set_bit(LFL_ACTIVE
, &lp
->flags
);
287 lkf
= DLM_LKF_VALBLK
;
289 log_debug("un %x,%llx %x %d %x", lp
->lockname
.ln_type
,
290 (unsigned long long)lp
->lockname
.ln_number
,
291 lp
->lksb
.sb_lkid
, lp
->cur
, lkf
);
293 error
= dlm_unlock(ls
->dlm_lockspace
, lp
->lksb
.sb_lkid
, lkf
, NULL
, lp
);
296 log_error("%s: gdlm_unlock %x,%llx err=%d cur=%d req=%d lkf=%x "
297 "flags=%lx", ls
->fsname
, lp
->lockname
.ln_type
,
298 (unsigned long long)lp
->lockname
.ln_number
, error
,
299 lp
->cur
, lp
->req
, lp
->lkf
, lp
->flags
);
305 unsigned int gdlm_lock(void *lock
, unsigned int cur_state
,
306 unsigned int req_state
, unsigned int flags
)
308 struct gdlm_lock
*lp
= lock
;
310 clear_bit(LFL_DLM_CANCEL
, &lp
->flags
);
311 if (flags
& LM_FLAG_NOEXP
)
312 set_bit(LFL_NOBLOCK
, &lp
->flags
);
314 check_cur_state(lp
, cur_state
);
315 lp
->req
= make_mode(req_state
);
316 lp
->lkf
= make_flags(lp
, flags
, lp
->cur
, lp
->req
);
318 return gdlm_do_lock(lp
);
321 unsigned int gdlm_unlock(void *lock
, unsigned int cur_state
)
323 struct gdlm_lock
*lp
= lock
;
325 clear_bit(LFL_DLM_CANCEL
, &lp
->flags
);
326 if (lp
->cur
== DLM_LOCK_IV
)
328 return gdlm_do_unlock(lp
);
331 void gdlm_cancel(void *lock
)
333 struct gdlm_lock
*lp
= lock
;
334 struct gdlm_ls
*ls
= lp
->ls
;
335 int error
, delay_list
= 0;
337 if (test_bit(LFL_DLM_CANCEL
, &lp
->flags
))
340 log_info("gdlm_cancel %x,%llx flags %lx", lp
->lockname
.ln_type
,
341 (unsigned long long)lp
->lockname
.ln_number
, lp
->flags
);
343 spin_lock(&ls
->async_lock
);
344 if (!list_empty(&lp
->delay_list
)) {
345 list_del_init(&lp
->delay_list
);
348 spin_unlock(&ls
->async_lock
);
351 set_bit(LFL_CANCEL
, &lp
->flags
);
352 set_bit(LFL_ACTIVE
, &lp
->flags
);
357 if (!test_bit(LFL_ACTIVE
, &lp
->flags
) ||
358 test_bit(LFL_DLM_UNLOCK
, &lp
->flags
)) {
359 log_info("gdlm_cancel skip %x,%llx flags %lx",
360 lp
->lockname
.ln_type
,
361 (unsigned long long)lp
->lockname
.ln_number
, lp
->flags
);
365 /* the lock is blocked in the dlm */
367 set_bit(LFL_DLM_CANCEL
, &lp
->flags
);
368 set_bit(LFL_ACTIVE
, &lp
->flags
);
370 error
= dlm_unlock(ls
->dlm_lockspace
, lp
->lksb
.sb_lkid
, DLM_LKF_CANCEL
,
373 log_info("gdlm_cancel rv %d %x,%llx flags %lx", error
,
374 lp
->lockname
.ln_type
,
375 (unsigned long long)lp
->lockname
.ln_number
, lp
->flags
);
378 clear_bit(LFL_DLM_CANCEL
, &lp
->flags
);
381 static int gdlm_add_lvb(struct gdlm_lock
*lp
)
385 lvb
= kzalloc(GDLM_LVB_SIZE
, GFP_KERNEL
);
389 lp
->lksb
.sb_lvbptr
= lvb
;
394 static void gdlm_del_lvb(struct gdlm_lock
*lp
)
398 lp
->lksb
.sb_lvbptr
= NULL
;
401 static int gdlm_ast_wait(void *word
)
407 /* This can do a synchronous dlm request (requiring a lock_dlm thread to get
408 the completion) because gfs won't call hold_lvb() during a callback (from
409 the context of a lock_dlm thread). */
411 static int hold_null_lock(struct gdlm_lock
*lp
)
413 struct gdlm_lock
*lpn
= NULL
;
417 printk(KERN_INFO
"lock_dlm: lvb already held\n");
421 error
= gdlm_create_lp(lp
->ls
, &lp
->lockname
, &lpn
);
425 lpn
->lksb
.sb_lvbptr
= junk_lvb
;
428 lpn
->req
= DLM_LOCK_NL
;
429 lpn
->lkf
= DLM_LKF_VALBLK
| DLM_LKF_EXPEDITE
;
430 set_bit(LFL_NOBAST
, &lpn
->flags
);
431 set_bit(LFL_INLOCK
, &lpn
->flags
);
432 set_bit(LFL_AST_WAIT
, &lpn
->flags
);
435 wait_on_bit(&lpn
->flags
, LFL_AST_WAIT
, gdlm_ast_wait
, TASK_UNINTERRUPTIBLE
);
436 error
= lpn
->lksb
.sb_status
;
438 printk(KERN_INFO
"lock_dlm: hold_null_lock dlm error %d\n",
448 /* This cannot do a synchronous dlm request (requiring a lock_dlm thread to get
449 the completion) because gfs may call unhold_lvb() during a callback (from
450 the context of a lock_dlm thread) which could cause a deadlock since the
451 other lock_dlm thread could be engaged in recovery. */
453 static void unhold_null_lock(struct gdlm_lock
*lp
)
455 struct gdlm_lock
*lpn
= lp
->hold_null
;
457 gdlm_assert(lpn
, "%x,%llx", lp
->lockname
.ln_type
,
458 (unsigned long long)lp
->lockname
.ln_number
);
459 lpn
->lksb
.sb_lvbptr
= NULL
;
461 set_bit(LFL_UNLOCK_DELETE
, &lpn
->flags
);
463 lp
->hold_null
= NULL
;
466 /* Acquire a NL lock because gfs requires the value block to remain
467 intact on the resource while the lvb is "held" even if it's holding no locks
470 int gdlm_hold_lvb(void *lock
, char **lvbp
)
472 struct gdlm_lock
*lp
= lock
;
475 error
= gdlm_add_lvb(lp
);
481 error
= hold_null_lock(lp
);
488 void gdlm_unhold_lvb(void *lock
, char *lvb
)
490 struct gdlm_lock
*lp
= lock
;
492 unhold_null_lock(lp
);
496 void gdlm_submit_delayed(struct gdlm_ls
*ls
)
498 struct gdlm_lock
*lp
, *safe
;
500 spin_lock(&ls
->async_lock
);
501 list_for_each_entry_safe(lp
, safe
, &ls
->delayed
, delay_list
) {
502 list_del_init(&lp
->delay_list
);
503 list_add_tail(&lp
->delay_list
, &ls
->submit
);
505 spin_unlock(&ls
->async_lock
);
506 wake_up(&ls
->thread_wait
);
509 int gdlm_release_all_locks(struct gdlm_ls
*ls
)
511 struct gdlm_lock
*lp
, *safe
;
514 spin_lock(&ls
->async_lock
);
515 list_for_each_entry_safe(lp
, safe
, &ls
->all_locks
, all_list
) {
516 list_del_init(&lp
->all_list
);
518 if (lp
->lvb
&& lp
->lvb
!= junk_lvb
)
523 spin_unlock(&ls
->async_lock
);