4 * Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved.
5 * Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved.
7 * This file is part of LVM2.
9 * This copyrighted material is made available to anyone wishing to use,
10 * modify, copy, or redistribute it subject to the terms and conditions
11 * of the GNU Lesser General Public License v.2.1.
13 * You should have received a copy of the GNU Lesser General Public License
14 * along with this program; if not, write to the Free Software Foundation,
15 * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include "locking_types.h"
21 #include "lvm-string.h"
23 #include "toolcontext.h"
34 static struct locking_type _locking
;
35 static sigset_t _oldset
;
37 static int _vg_lock_count
= 0; /* Number of locks held */
38 static int _vg_write_lock_held
= 0; /* VG write lock held? */
39 static int _signals_blocked
= 0;
40 static int _blocking_supported
= 0;
42 static volatile sig_atomic_t _sigint_caught
= 0;
43 static volatile sig_atomic_t _handler_installed
;
44 static struct sigaction _oldhandler
;
45 static int _oldmasked
;
53 static void _catch_sigint(int unused
__attribute__((unused
)))
58 int sigint_caught(void) {
59 return _sigint_caught
;
62 void sigint_clear(void)
68 * Temporarily allow keyboard interrupts to be intercepted and noted;
69 * saves interrupt handler state for sigint_restore(). Users should
70 * use the sigint_caught() predicate to check whether interrupt was
71 * requested and act appropriately. Interrupt flags are never
72 * cleared automatically by this code, but the tools clear the flag
73 * before running each command in lvm_run_command(). All other places
74 * where the flag needs to be cleared need to call sigint_clear().
77 void sigint_allow(void)
79 struct sigaction handler
;
83 * Do not overwrite the backed-up handler data -
84 * just increase nesting count.
86 if (_handler_installed
) {
91 /* Grab old sigaction for SIGINT: shall not fail. */
92 sigaction(SIGINT
, NULL
, &handler
);
93 handler
.sa_flags
&= ~SA_RESTART
; /* Clear restart flag */
94 handler
.sa_handler
= _catch_sigint
;
96 _handler_installed
= 1;
98 /* Override the signal handler: shall not fail. */
99 sigaction(SIGINT
, &handler
, &_oldhandler
);
101 /* Unmask SIGINT. Remember to mask it again on restore. */
102 sigprocmask(0, NULL
, &sigs
);
103 if ((_oldmasked
= sigismember(&sigs
, SIGINT
))) {
104 sigdelset(&sigs
, SIGINT
);
105 sigprocmask(SIG_SETMASK
, &sigs
, NULL
);
109 void sigint_restore(void)
111 if (!_handler_installed
)
114 if (_handler_installed
> 1) {
115 _handler_installed
--;
119 /* Nesting count went down to 0. */
120 _handler_installed
= 0;
124 sigprocmask(0, NULL
, &sigs
);
125 sigaddset(&sigs
, SIGINT
);
126 sigprocmask(SIG_SETMASK
, &sigs
, NULL
);
129 sigaction(SIGINT
, &_oldhandler
, NULL
);
132 static void _block_signals(uint32_t flags
__attribute((unused
)))
136 if (_signals_blocked
)
139 if (sigfillset(&set
)) {
140 log_sys_error("sigfillset", "_block_signals");
144 if (sigprocmask(SIG_SETMASK
, &set
, &_oldset
)) {
145 log_sys_error("sigprocmask", "_block_signals");
149 _signals_blocked
= 1;
154 static void _unblock_signals(void)
156 /* Don't unblock signals while any locks are held */
157 if (!_signals_blocked
|| _vg_lock_count
)
160 if (sigprocmask(SIG_SETMASK
, &_oldset
, NULL
)) {
161 log_sys_error("sigprocmask", "_block_signals");
165 _signals_blocked
= 0;
170 static void _lock_memory(lv_operation_t lv_op
)
172 if (!(_locking
.flags
& LCK_PRE_MEMLOCK
))
175 if (lv_op
== LV_SUSPEND
)
179 static void _unlock_memory(lv_operation_t lv_op
)
181 if (!(_locking
.flags
& LCK_PRE_MEMLOCK
))
184 if (lv_op
== LV_RESUME
)
188 void reset_locking(void)
190 int was_locked
= _vg_lock_count
;
193 _vg_write_lock_held
= 0;
195 _locking
.reset_locking();
201 static void _update_vg_lock_count(const char *resource
, uint32_t flags
)
203 /* Ignore locks not associated with updating VG metadata */
204 if ((flags
& LCK_SCOPE_MASK
) != LCK_VG
||
205 (flags
& LCK_CACHE
) ||
206 !strcmp(resource
, VG_GLOBAL
))
209 if ((flags
& LCK_TYPE_MASK
) == LCK_UNLOCK
)
214 /* We don't bother to reset this until all VG locks are dropped */
215 if ((flags
& LCK_TYPE_MASK
) == LCK_WRITE
)
216 _vg_write_lock_held
= 1;
217 else if (!_vg_lock_count
)
218 _vg_write_lock_held
= 0;
222 * Select a locking type
223 * type: locking type; if < 0, then read config tree value
225 int init_locking(int type
, struct cmd_context
*cmd
)
228 type
= find_config_tree_int(cmd
, "global/locking_type", 1);
230 _blocking_supported
= find_config_tree_int(cmd
,
231 "global/wait_for_locks", DEFAULT_WAIT_FOR_LOCKS
);
235 init_no_locking(&_locking
, cmd
);
236 log_warn("WARNING: Locking disabled. Be careful! "
237 "This could corrupt your metadata.");
241 log_very_verbose("%sFile-based locking selected.",
242 _blocking_supported
? "" : "Non-blocking ");
244 if (!init_file_locking(&_locking
, cmd
))
251 log_very_verbose("External locking selected.");
252 if (init_external_locking(&_locking
, cmd
))
255 if (!find_config_tree_int(cmd
, "locking/fallback_to_clustered_locking",
256 find_config_tree_int(cmd
, "global/fallback_to_clustered_locking",
257 DEFAULT_FALLBACK_TO_CLUSTERED_LOCKING
)))
261 #ifdef CLUSTER_LOCKING_INTERNAL
262 log_very_verbose("Falling back to internal clustered locking.");
266 log_very_verbose("Cluster locking selected.");
267 if (!init_cluster_locking(&_locking
, cmd
))
273 log_verbose("Read-only locking selected. "
274 "Only read operations permitted.");
275 if (!init_readonly_locking(&_locking
, cmd
))
280 log_error("Unknown locking type requested.");
284 if ((type
== 2 || type
== 3) &&
285 find_config_tree_int(cmd
, "locking/fallback_to_local_locking",
286 find_config_tree_int(cmd
, "global/fallback_to_local_locking",
287 DEFAULT_FALLBACK_TO_LOCAL_LOCKING
))) {
288 log_warn("WARNING: Falling back to local file-based locking.");
289 log_warn("Volume Groups with the clustered attribute will "
291 if (init_file_locking(&_locking
, cmd
))
295 if (!ignorelockingfailure())
298 log_verbose("Locking disabled - only read operations permitted.");
299 init_readonly_locking(&_locking
, cmd
);
304 void fin_locking(void)
306 _locking
.fin_locking();
310 * Does the LVM1 driver know of this VG name?
312 int check_lvm1_vg_inactive(struct cmd_context
*cmd
, const char *vgname
)
317 /* We'll allow operations on orphans */
318 if (is_orphan_vg(vgname
))
321 /* LVM1 is only present in 2.4 kernels. */
322 if (strncmp(cmd
->kernel_vsn
, "2.4.", 4))
325 if (dm_snprintf(path
, sizeof(path
), "%s/lvm/VGs/%s", cmd
->proc_dir
,
327 log_error("LVM1 proc VG pathname too long for %s", vgname
);
331 if (stat(path
, &info
) == 0) {
332 log_error("%s exists: Is the original LVM driver using "
333 "this volume group?", path
);
335 } else if (errno
!= ENOENT
&& errno
!= ENOTDIR
) {
336 log_sys_error("stat", path
);
344 * VG locking is by VG name.
345 * FIXME This should become VG uuid.
347 static int _lock_vol(struct cmd_context
*cmd
, const char *resource
,
348 uint32_t flags
, lv_operation_t lv_op
)
352 _block_signals(flags
);
358 log_error("Internal error: Use of P_orphans is deprecated.");
362 if (*resource
== '#' && (flags
& LCK_CACHE
)) {
363 log_error("Internal error: P_%s referenced", resource
);
367 if ((ret
= _locking
.lock_resource(cmd
, resource
, flags
))) {
368 if ((flags
& LCK_SCOPE_MASK
) == LCK_VG
&&
369 !(flags
& LCK_CACHE
)) {
370 if ((flags
& LCK_TYPE_MASK
) == LCK_UNLOCK
)
371 lvmcache_unlock_vgname(resource
);
373 lvmcache_lock_vgname(resource
, (flags
& LCK_TYPE_MASK
)
377 _update_vg_lock_count(resource
, flags
);
380 _unlock_memory(lv_op
);
386 int lock_vol(struct cmd_context
*cmd
, const char *vol
, uint32_t flags
)
388 char resource
[258] __attribute((aligned(8)));
389 lv_operation_t lv_op
;
391 switch (flags
& (LCK_SCOPE_MASK
| LCK_TYPE_MASK
)) {
398 default: lv_op
= LV_NOOP
;
402 if (flags
== LCK_NONE
) {
403 log_debug("Internal error: %s: LCK_NONE lock requested", vol
);
407 switch (flags
& LCK_SCOPE_MASK
) {
410 * Automatically set LCK_NONBLOCK if one or more VGs locked.
411 * This will enforce correctness and prevent deadlocks rather
412 * than relying on the caller to set the flag properly.
414 if (!_blocking_supported
|| vgs_locked())
415 flags
|= LCK_NONBLOCK
;
418 ((flags
& LCK_TYPE_MASK
) != LCK_UNLOCK
) &&
419 (!(flags
& LCK_CACHE
)) &&
420 !lvmcache_verify_lock_order(vol
))
423 /* Lock VG to change on-disk metadata. */
424 /* If LVM1 driver knows about the VG, it can't be accessed. */
425 if (!check_lvm1_vg_inactive(cmd
, vol
))
429 /* All LV locks are non-blocking. */
430 flags
|= LCK_NONBLOCK
;
433 log_error("Unrecognised lock scope: %d",
434 flags
& LCK_SCOPE_MASK
);
438 strncpy(resource
, vol
, sizeof(resource
));
440 if (!_lock_vol(cmd
, resource
, flags
, lv_op
))
444 * If a real lock was acquired (i.e. not LCK_CACHE),
445 * perform an immediate unlock unless LCK_HOLD was requested.
447 if (!(flags
& LCK_CACHE
) && !(flags
& LCK_HOLD
) &&
448 ((flags
& LCK_TYPE_MASK
) != LCK_UNLOCK
)) {
449 if (!_lock_vol(cmd
, resource
,
450 (flags
& ~LCK_TYPE_MASK
) | LCK_UNLOCK
, lv_op
))
457 /* Unlock list of LVs */
458 int resume_lvs(struct cmd_context
*cmd
, struct dm_list
*lvs
)
462 dm_list_iterate_items(lvl
, lvs
)
463 resume_lv(cmd
, lvl
->lv
);
468 /* Lock a list of LVs */
469 int suspend_lvs(struct cmd_context
*cmd
, struct dm_list
*lvs
)
474 dm_list_iterate_items(lvl
, lvs
) {
475 if (!suspend_lv(cmd
, lvl
->lv
)) {
476 log_error("Failed to suspend %s", lvl
->lv
->name
);
477 dm_list_uniterate(lvh
, lvs
, &lvl
->list
) {
478 lvl
= dm_list_item(lvh
, struct lv_list
);
479 resume_lv(cmd
, lvl
->lv
);
489 /* Lock a list of LVs */
490 int activate_lvs(struct cmd_context
*cmd
, struct dm_list
*lvs
, unsigned exclusive
)
495 dm_list_iterate_items(lvl
, lvs
) {
497 if (!activate_lv(cmd
, lvl
->lv
)) {
498 log_error("Failed to activate %s", lvl
->lv
->name
);
501 } else if (!activate_lv_excl(cmd
, lvl
->lv
)) {
502 log_error("Failed to activate %s", lvl
->lv
->name
);
503 dm_list_uniterate(lvh
, lvs
, &lvl
->list
) {
504 lvl
= dm_list_item(lvh
, struct lv_list
);
505 activate_lv(cmd
, lvl
->lv
);
514 int vg_write_lock_held(void)
516 return _vg_write_lock_held
;
519 int locking_is_clustered(void)
521 return (_locking
.flags
& LCK_CLUSTERED
) ? 1 : 0;
524 int remote_lock_held(const char *vol
)
528 if (!locking_is_clustered())
531 if (!_locking
.query_resource
)
535 * If an error occured, expect that volume is active
537 if (!_locking
.query_resource(vol
, &mode
)) {
542 return mode
== LCK_NULL
? 0 : 1;