2 Unix SMB/CIFS implementation.
4 trivial database library
6 Copyright (C) Andrew Tridgell 1999-2005
7 Copyright (C) Paul `Rusty' Russell 2000
8 Copyright (C) Jeremy Allison 2000-2003
10 ** NOTE! The following LGPL license applies to the tdb
11 ** library. This does NOT imply that all of Samba is released
14 This library is free software; you can redistribute it and/or
15 modify it under the terms of the GNU Lesser General Public
16 License as published by the Free Software Foundation; either
17 version 3 of the License, or (at your option) any later version.
19 This library is distributed in the hope that it will be useful,
20 but WITHOUT ANY WARRANTY; without even the implied warranty of
21 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 Lesser General Public License for more details.
24 You should have received a copy of the GNU Lesser General Public
25 License along with this library; if not, see <http://www.gnu.org/licenses/>.
28 #include "tdb_private.h"
30 _PUBLIC_
void tdb_setalarm_sigptr(struct tdb_context
*tdb
, volatile sig_atomic_t *ptr
)
32 tdb
->interrupt_sig_ptr
= ptr
;
35 static int fcntl_lock(struct tdb_context
*tdb
,
36 int rw
, off_t off
, off_t len
, bool waitflag
)
41 #ifdef USE_TDB_MUTEX_LOCKING
44 if (tdb_mutex_lock(tdb
, rw
, off
, len
, waitflag
, &ret
)) {
51 fl
.l_whence
= SEEK_SET
;
56 cmd
= waitflag
? F_SETLKW
: F_SETLK
;
58 return fcntl(tdb
->fd
, cmd
, &fl
);
61 static int fcntl_unlock(struct tdb_context
*tdb
, int rw
, off_t off
, off_t len
)
64 #if 0 /* Check they matched up locks and unlocks correctly. */
69 locks
= fopen("/proc/locks", "r");
71 while (fgets(line
, 80, locks
)) {
75 /* eg. 1: FLOCK ADVISORY WRITE 2440 08:01:2180826 0 EOF */
76 p
= strchr(line
, ':') + 1;
77 if (strncmp(p
, " POSIX ADVISORY ", strlen(" POSIX ADVISORY ")))
79 p
+= strlen(" FLOCK ADVISORY ");
80 if (strncmp(p
, "READ ", strlen("READ ")) == 0)
82 else if (strncmp(p
, "WRITE ", strlen("WRITE ")) == 0)
87 if (atoi(p
) != getpid())
89 p
= strchr(strchr(p
, ' ') + 1, ' ') + 1;
91 p
= strchr(p
, ' ') + 1;
92 if (strncmp(p
, "EOF", 3) == 0)
95 l
= atoi(p
) - start
+ 1;
99 fprintf(stderr
, "Len %u should be %u: %s",
104 fprintf(stderr
, "Type %s wrong: %s",
105 rw
== F_RDLCK
? "READ" : "WRITE", line
);
114 fprintf(stderr
, "Unlock on %u@%u not found!\n",
122 #ifdef USE_TDB_MUTEX_LOCKING
125 if (tdb_mutex_unlock(tdb
, rw
, off
, len
, &ret
)) {
132 fl
.l_whence
= SEEK_SET
;
137 return fcntl(tdb
->fd
, F_SETLKW
, &fl
);
141 * Calculate the lock offset for a list
143 * list -1 is the freelist, otherwise a hash chain.
145 * Note that we consistently (but without real reason) lock hash chains at an
146 * offset that is 4 bytes below the real offset of the corresponding list head
149 * This is the memory layout of the hashchain array:
151 * FREELIST_TOP + 0 = freelist
152 * FREELIST_TOP + 4 = hashtable list 0
153 * FREELIST_TOP + 8 = hashtable list 1
156 * Otoh lock_offset computes:
158 * freelist = FREELIST_TOP - 4
159 * list 0 = FREELIST_TOP + 0
160 * list 1 = FREELIST_TOP + 4
163 * Unfortunately we can't change this calculation in order to align the locking
164 * offset with the memory layout, as that would make the locking incompatible
165 * between different tdb versions.
167 static tdb_off_t
lock_offset(int list
)
169 return FREELIST_TOP
+ 4*list
;
172 /* a byte range locking function - return 0 on success
173 this functions locks/unlocks "len" byte at the specified offset.
175 On error, errno is also set so that errors are passed back properly
178 note that a len of zero means lock to end of file
180 int tdb_brlock(struct tdb_context
*tdb
,
181 int rw_type
, tdb_off_t offset
, size_t len
,
182 enum tdb_lock_flags flags
)
186 if (tdb
->flags
& TDB_NOLOCK
) {
190 if (flags
& TDB_LOCK_MARK_ONLY
) {
194 if ((rw_type
== F_WRLCK
) && (tdb
->read_only
|| tdb
->traverse_read
)) {
195 tdb
->ecode
= TDB_ERR_RDONLY
;
200 ret
= fcntl_lock(tdb
, rw_type
, offset
, len
,
201 flags
& TDB_LOCK_WAIT
);
202 /* Check for a sigalarm break. */
203 if (ret
== -1 && errno
== EINTR
&&
204 tdb
->interrupt_sig_ptr
&&
205 *tdb
->interrupt_sig_ptr
) {
208 } while (ret
== -1 && errno
== EINTR
);
211 tdb
->ecode
= TDB_ERR_LOCK
;
212 /* Generic lock error. errno set by fcntl.
213 * EAGAIN is an expected return from non-blocking
215 if (!(flags
& TDB_LOCK_PROBE
) && errno
!= EAGAIN
) {
216 TDB_LOG((tdb
, TDB_DEBUG_TRACE
,"tdb_brlock failed (fd=%d) at offset %u rw_type=%d flags=%d len=%zu\n",
217 tdb
->fd
, offset
, rw_type
, flags
, len
));
224 int tdb_brunlock(struct tdb_context
*tdb
,
225 int rw_type
, tdb_off_t offset
, size_t len
)
229 if (tdb
->flags
& TDB_NOLOCK
) {
234 ret
= fcntl_unlock(tdb
, rw_type
, offset
, len
);
235 } while (ret
== -1 && errno
== EINTR
);
238 TDB_LOG((tdb
, TDB_DEBUG_TRACE
,"tdb_brunlock failed (fd=%d) at offset %u rw_type=%u len=%zu\n",
239 tdb
->fd
, offset
, rw_type
, len
));
245 * Do a tdb_brlock in a loop. Some OSes (such as solaris) have too
246 * conservative deadlock detection and claim a deadlock when progress can be
247 * made. For those OSes we may loop for a while.
250 static int tdb_brlock_retry(struct tdb_context
*tdb
,
251 int rw_type
, tdb_off_t offset
, size_t len
,
252 enum tdb_lock_flags flags
)
260 ret
= tdb_brlock(tdb
, rw_type
, offset
, len
, flags
);
264 if (errno
!= EDEADLK
) {
267 /* sleep for as short a time as we can - more portable than usleep() */
270 select(0, NULL
, NULL
, NULL
, &tv
);
276 upgrade a read lock to a write lock.
278 int tdb_allrecord_upgrade(struct tdb_context
*tdb
)
282 if (tdb
->allrecord_lock
.count
!= 1) {
283 TDB_LOG((tdb
, TDB_DEBUG_ERROR
,
284 "tdb_allrecord_upgrade failed: count %u too high\n",
285 tdb
->allrecord_lock
.count
));
286 tdb
->ecode
= TDB_ERR_LOCK
;
290 if (tdb
->allrecord_lock
.off
!= 1) {
291 TDB_LOG((tdb
, TDB_DEBUG_ERROR
,
292 "tdb_allrecord_upgrade failed: already upgraded?\n"));
293 tdb
->ecode
= TDB_ERR_LOCK
;
297 if (tdb_have_mutexes(tdb
)) {
298 ret
= tdb_mutex_allrecord_upgrade(tdb
);
302 ret
= tdb_brlock_retry(tdb
, F_WRLCK
, lock_offset(tdb
->hash_size
),
303 0, TDB_LOCK_WAIT
|TDB_LOCK_PROBE
);
305 tdb_mutex_allrecord_downgrade(tdb
);
308 ret
= tdb_brlock_retry(tdb
, F_WRLCK
, FREELIST_TOP
, 0,
309 TDB_LOCK_WAIT
|TDB_LOCK_PROBE
);
313 tdb
->allrecord_lock
.ltype
= F_WRLCK
;
314 tdb
->allrecord_lock
.off
= 0;
318 TDB_LOG((tdb
, TDB_DEBUG_TRACE
,"tdb_allrecord_upgrade failed\n"));
322 static struct tdb_lock_type
*find_nestlock(struct tdb_context
*tdb
,
327 for (i
=0; i
<tdb
->num_lockrecs
; i
++) {
328 if (tdb
->lockrecs
[i
].off
== offset
) {
329 return &tdb
->lockrecs
[i
];
335 /* lock an offset in the database. */
336 int tdb_nest_lock(struct tdb_context
*tdb
, uint32_t offset
, int ltype
,
337 enum tdb_lock_flags flags
)
339 struct tdb_lock_type
*new_lck
;
341 if (offset
>= lock_offset(tdb
->hash_size
)) {
342 tdb
->ecode
= TDB_ERR_LOCK
;
343 TDB_LOG((tdb
, TDB_DEBUG_ERROR
,"tdb_lock: invalid offset %u for ltype=%d\n",
347 if (tdb
->flags
& TDB_NOLOCK
)
350 new_lck
= find_nestlock(tdb
, offset
);
352 if ((new_lck
->ltype
== F_RDLCK
) && (ltype
== F_WRLCK
)) {
353 if (!tdb_have_mutexes(tdb
)) {
356 * Upgrade the underlying fcntl
357 * lock. Mutexes don't do readlocks,
358 * so this only applies to fcntl
361 ret
= tdb_brlock(tdb
, ltype
, offset
, 1, flags
);
366 new_lck
->ltype
= F_WRLCK
;
369 * Just increment the in-memory struct, posix locks
376 if (tdb
->num_lockrecs
== tdb
->lockrecs_array_length
) {
377 new_lck
= (struct tdb_lock_type
*)realloc(
379 sizeof(*tdb
->lockrecs
) * (tdb
->num_lockrecs
+1));
380 if (new_lck
== NULL
) {
384 tdb
->lockrecs_array_length
= tdb
->num_lockrecs
+1;
385 tdb
->lockrecs
= new_lck
;
388 /* Since fcntl locks don't nest, we do a lock for the first one,
389 and simply bump the count for future ones */
390 if (tdb_brlock(tdb
, ltype
, offset
, 1, flags
)) {
394 new_lck
= &tdb
->lockrecs
[tdb
->num_lockrecs
];
396 new_lck
->off
= offset
;
398 new_lck
->ltype
= ltype
;
404 static int tdb_lock_and_recover(struct tdb_context
*tdb
)
408 /* We need to match locking order in transaction commit. */
409 if (tdb_brlock(tdb
, F_WRLCK
, FREELIST_TOP
, 0, TDB_LOCK_WAIT
)) {
413 if (tdb_brlock(tdb
, F_WRLCK
, OPEN_LOCK
, 1, TDB_LOCK_WAIT
)) {
414 tdb_brunlock(tdb
, F_WRLCK
, FREELIST_TOP
, 0);
418 ret
= tdb_transaction_recover(tdb
);
420 tdb_brunlock(tdb
, F_WRLCK
, OPEN_LOCK
, 1);
421 tdb_brunlock(tdb
, F_WRLCK
, FREELIST_TOP
, 0);
426 static bool have_data_locks(const struct tdb_context
*tdb
)
430 for (i
= 0; i
< tdb
->num_lockrecs
; i
++) {
431 if (tdb
->lockrecs
[i
].off
>= lock_offset(-1))
438 * A allrecord lock allows us to avoid per chain locks. Check if the allrecord
439 * lock is strong enough.
441 static int tdb_lock_covered_by_allrecord_lock(struct tdb_context
*tdb
,
444 if (ltype
== F_RDLCK
) {
446 * The allrecord_lock is equal (F_RDLCK) or stronger
452 if (tdb
->allrecord_lock
.ltype
== F_RDLCK
) {
454 * We ask for ltype==F_WRLCK, but the allrecord_lock
455 * is too weak. We can't upgrade here, so fail.
457 tdb
->ecode
= TDB_ERR_LOCK
;
462 * Asking for F_WRLCK, allrecord is F_WRLCK as well. Pass.
467 static int tdb_lock_list(struct tdb_context
*tdb
, int list
, int ltype
,
468 enum tdb_lock_flags waitflag
)
473 if (tdb
->allrecord_lock
.count
) {
474 return tdb_lock_covered_by_allrecord_lock(tdb
, ltype
);
478 * Check for recoveries: Someone might have kill -9'ed a process
481 check
= !have_data_locks(tdb
);
482 ret
= tdb_nest_lock(tdb
, lock_offset(list
), ltype
, waitflag
);
484 if (ret
== 0 && check
&& tdb_needs_recovery(tdb
)) {
485 tdb_nest_unlock(tdb
, lock_offset(list
), ltype
, false);
487 if (tdb_lock_and_recover(tdb
) == -1) {
490 return tdb_lock_list(tdb
, list
, ltype
, waitflag
);
495 /* lock a list in the database. list -1 is the alloc list */
496 int tdb_lock(struct tdb_context
*tdb
, int list
, int ltype
)
500 ret
= tdb_lock_list(tdb
, list
, ltype
, TDB_LOCK_WAIT
);
502 TDB_LOG((tdb
, TDB_DEBUG_ERROR
, "tdb_lock failed on list %d "
503 "ltype=%d (%s)\n", list
, ltype
, strerror(errno
)));
508 /* lock a list in the database. list -1 is the alloc list. non-blocking lock */
509 _PUBLIC_
int tdb_lock_nonblock(struct tdb_context
*tdb
, int list
, int ltype
);
510 _PUBLIC_
int tdb_lock_nonblock(struct tdb_context
*tdb
, int list
, int ltype
)
512 return tdb_lock_list(tdb
, list
, ltype
, TDB_LOCK_NOWAIT
);
516 int tdb_nest_unlock(struct tdb_context
*tdb
, uint32_t offset
, int ltype
,
520 struct tdb_lock_type
*lck
;
522 if (tdb
->flags
& TDB_NOLOCK
)
526 if (offset
>= lock_offset(tdb
->hash_size
)) {
527 TDB_LOG((tdb
, TDB_DEBUG_ERROR
, "tdb_unlock: offset %u invalid (%d)\n", offset
, tdb
->hash_size
));
531 lck
= find_nestlock(tdb
, offset
);
532 if ((lck
== NULL
) || (lck
->count
== 0)) {
533 TDB_LOG((tdb
, TDB_DEBUG_ERROR
, "tdb_unlock: count is 0\n"));
537 if (lck
->count
> 1) {
543 * This lock has count==1 left, so we need to unlock it in the
544 * kernel. We don't bother with decrementing the in-memory array
545 * element, we're about to overwrite it with the last array element
552 ret
= tdb_brunlock(tdb
, ltype
, offset
, 1);
556 * Shrink the array by overwriting the element just unlocked with the
557 * last array element.
559 *lck
= tdb
->lockrecs
[--tdb
->num_lockrecs
];
562 * We don't bother with realloc when the array shrinks, but if we have
563 * a completely idle tdb we should get rid of the locked array.
567 TDB_LOG((tdb
, TDB_DEBUG_ERROR
, "tdb_unlock: An error occurred unlocking!\n"));
571 _PUBLIC_
int tdb_unlock(struct tdb_context
*tdb
, int list
, int ltype
);
572 _PUBLIC_
int tdb_unlock(struct tdb_context
*tdb
, int list
, int ltype
)
574 /* a global lock allows us to avoid per chain locks */
575 if (tdb
->allrecord_lock
.count
) {
576 return tdb_lock_covered_by_allrecord_lock(tdb
, ltype
);
579 return tdb_nest_unlock(tdb
, lock_offset(list
), ltype
, false);
583 get the transaction lock
585 int tdb_transaction_lock(struct tdb_context
*tdb
, int ltype
,
586 enum tdb_lock_flags lockflags
)
588 return tdb_nest_lock(tdb
, TRANSACTION_LOCK
, ltype
, lockflags
);
592 release the transaction lock
594 int tdb_transaction_unlock(struct tdb_context
*tdb
, int ltype
)
596 return tdb_nest_unlock(tdb
, TRANSACTION_LOCK
, ltype
, false);
599 /* Returns 0 if all done, -1 if error, 1 if ok. */
600 static int tdb_allrecord_check(struct tdb_context
*tdb
, int ltype
,
601 enum tdb_lock_flags flags
, bool upgradable
)
603 /* There are no locks on read-only dbs */
604 if (tdb
->read_only
|| tdb
->traverse_read
) {
605 tdb
->ecode
= TDB_ERR_LOCK
;
609 if (tdb
->allrecord_lock
.count
&&
610 tdb
->allrecord_lock
.ltype
== (uint32_t)ltype
) {
611 tdb
->allrecord_lock
.count
++;
615 if (tdb
->allrecord_lock
.count
) {
616 /* a global lock of a different type exists */
617 tdb
->ecode
= TDB_ERR_LOCK
;
621 if (tdb_have_extra_locks(tdb
)) {
622 /* can't combine global and chain locks */
623 tdb
->ecode
= TDB_ERR_LOCK
;
627 if (upgradable
&& ltype
!= F_RDLCK
) {
628 /* tdb error: you can't upgrade a write lock! */
629 tdb
->ecode
= TDB_ERR_LOCK
;
635 /* We only need to lock individual bytes, but Linux merges consecutive locks
636 * so we lock in contiguous ranges. */
637 static int tdb_chainlock_gradual(struct tdb_context
*tdb
,
638 int ltype
, enum tdb_lock_flags flags
,
639 size_t off
, size_t len
)
642 enum tdb_lock_flags nb_flags
= (flags
& ~TDB_LOCK_WAIT
);
645 /* Single record. Just do blocking lock. */
646 return tdb_brlock(tdb
, ltype
, off
, len
, flags
);
649 /* First we try non-blocking. */
650 ret
= tdb_brlock(tdb
, ltype
, off
, len
, nb_flags
);
655 /* Try locking first half, then second. */
656 ret
= tdb_chainlock_gradual(tdb
, ltype
, flags
, off
, len
/ 2);
660 ret
= tdb_chainlock_gradual(tdb
, ltype
, flags
,
661 off
+ len
/ 2, len
- len
/ 2);
663 tdb_brunlock(tdb
, ltype
, off
, len
/ 2);
669 /* lock/unlock entire database. It can only be upgradable if you have some
670 * other way of guaranteeing exclusivity (ie. transaction write lock).
671 * We do the locking gradually to avoid being starved by smaller locks. */
672 int tdb_allrecord_lock(struct tdb_context
*tdb
, int ltype
,
673 enum tdb_lock_flags flags
, bool upgradable
)
677 switch (tdb_allrecord_check(tdb
, ltype
, flags
, upgradable
)) {
684 /* We cover two kinds of locks:
685 * 1) Normal chain locks. Taken for almost all operations.
686 * 2) Individual records locks. Taken after normal or free
689 * It is (1) which cause the starvation problem, so we're only
690 * gradual for that. */
692 if (tdb_have_mutexes(tdb
)) {
693 ret
= tdb_mutex_allrecord_lock(tdb
, ltype
, flags
);
695 ret
= tdb_chainlock_gradual(tdb
, ltype
, flags
, FREELIST_TOP
,
703 /* Grab individual record locks. */
704 if (tdb_brlock(tdb
, ltype
, lock_offset(tdb
->hash_size
), 0,
706 if (tdb_have_mutexes(tdb
)) {
707 tdb_mutex_allrecord_unlock(tdb
);
709 tdb_brunlock(tdb
, ltype
, FREELIST_TOP
,
715 tdb
->allrecord_lock
.count
= 1;
716 /* If it's upgradable, it's actually exclusive so we can treat
717 * it as a write lock. */
718 tdb
->allrecord_lock
.ltype
= upgradable
? F_WRLCK
: ltype
;
719 tdb
->allrecord_lock
.off
= upgradable
;
721 if (tdb_needs_recovery(tdb
)) {
722 bool mark
= flags
& TDB_LOCK_MARK_ONLY
;
723 tdb_allrecord_unlock(tdb
, ltype
, mark
);
725 tdb
->ecode
= TDB_ERR_LOCK
;
726 TDB_LOG((tdb
, TDB_DEBUG_ERROR
,
727 "tdb_lockall_mark cannot do recovery\n"));
730 if (tdb_lock_and_recover(tdb
) == -1) {
733 return tdb_allrecord_lock(tdb
, ltype
, flags
, upgradable
);
741 /* unlock entire db */
742 int tdb_allrecord_unlock(struct tdb_context
*tdb
, int ltype
, bool mark_lock
)
744 /* There are no locks on read-only dbs */
745 if (tdb
->read_only
|| tdb
->traverse_read
) {
746 tdb
->ecode
= TDB_ERR_LOCK
;
750 if (tdb
->allrecord_lock
.count
== 0) {
751 tdb
->ecode
= TDB_ERR_LOCK
;
755 /* Upgradable locks are marked as write locks. */
756 if (tdb
->allrecord_lock
.ltype
!= (uint32_t)ltype
757 && (!tdb
->allrecord_lock
.off
|| ltype
!= F_RDLCK
)) {
758 tdb
->ecode
= TDB_ERR_LOCK
;
762 if (tdb
->allrecord_lock
.count
> 1) {
763 tdb
->allrecord_lock
.count
--;
770 if (tdb_have_mutexes(tdb
)) {
771 ret
= tdb_mutex_allrecord_unlock(tdb
);
773 ret
= tdb_brunlock(tdb
, ltype
,
774 lock_offset(tdb
->hash_size
),
778 ret
= tdb_brunlock(tdb
, ltype
, FREELIST_TOP
, 0);
782 TDB_LOG((tdb
, TDB_DEBUG_ERROR
, "tdb_unlockall failed "
783 "(%s)\n", strerror(errno
)));
788 tdb
->allrecord_lock
.count
= 0;
789 tdb
->allrecord_lock
.ltype
= 0;
794 /* lock entire database with write lock */
795 _PUBLIC_
int tdb_lockall(struct tdb_context
*tdb
)
797 tdb_trace(tdb
, "tdb_lockall");
798 return tdb_allrecord_lock(tdb
, F_WRLCK
, TDB_LOCK_WAIT
, false);
801 /* lock entire database with write lock - mark only */
802 _PUBLIC_
int tdb_lockall_mark(struct tdb_context
*tdb
)
804 tdb_trace(tdb
, "tdb_lockall_mark");
805 return tdb_allrecord_lock(tdb
, F_WRLCK
, TDB_LOCK_MARK_ONLY
, false);
808 /* unlock entire database with write lock - unmark only */
809 _PUBLIC_
int tdb_lockall_unmark(struct tdb_context
*tdb
)
811 tdb_trace(tdb
, "tdb_lockall_unmark");
812 return tdb_allrecord_unlock(tdb
, F_WRLCK
, true);
815 /* lock entire database with write lock - nonblocking variant */
816 _PUBLIC_
int tdb_lockall_nonblock(struct tdb_context
*tdb
)
818 int ret
= tdb_allrecord_lock(tdb
, F_WRLCK
, TDB_LOCK_NOWAIT
, false);
819 tdb_trace_ret(tdb
, "tdb_lockall_nonblock", ret
);
823 /* unlock entire database with write lock */
824 _PUBLIC_
int tdb_unlockall(struct tdb_context
*tdb
)
826 tdb_trace(tdb
, "tdb_unlockall");
827 return tdb_allrecord_unlock(tdb
, F_WRLCK
, false);
830 /* lock entire database with read lock */
831 _PUBLIC_
int tdb_lockall_read(struct tdb_context
*tdb
)
833 tdb_trace(tdb
, "tdb_lockall_read");
834 return tdb_allrecord_lock(tdb
, F_RDLCK
, TDB_LOCK_WAIT
, false);
837 /* lock entire database with read lock - nonblock variant */
838 _PUBLIC_
int tdb_lockall_read_nonblock(struct tdb_context
*tdb
)
840 int ret
= tdb_allrecord_lock(tdb
, F_RDLCK
, TDB_LOCK_NOWAIT
, false);
841 tdb_trace_ret(tdb
, "tdb_lockall_read_nonblock", ret
);
845 /* unlock entire database with read lock */
846 _PUBLIC_
int tdb_unlockall_read(struct tdb_context
*tdb
)
848 tdb_trace(tdb
, "tdb_unlockall_read");
849 return tdb_allrecord_unlock(tdb
, F_RDLCK
, false);
852 /* lock/unlock one hash chain. This is meant to be used to reduce
853 contention - it cannot guarantee how many records will be locked */
854 _PUBLIC_
int tdb_chainlock(struct tdb_context
*tdb
, TDB_DATA key
)
856 int ret
= tdb_lock(tdb
, BUCKET(tdb
->hash_fn(&key
)), F_WRLCK
);
857 tdb_trace_1rec(tdb
, "tdb_chainlock", key
);
861 /* lock/unlock one hash chain, non-blocking. This is meant to be used
862 to reduce contention - it cannot guarantee how many records will be
864 _PUBLIC_
int tdb_chainlock_nonblock(struct tdb_context
*tdb
, TDB_DATA key
)
866 int ret
= tdb_lock_nonblock(tdb
, BUCKET(tdb
->hash_fn(&key
)), F_WRLCK
);
867 tdb_trace_1rec_ret(tdb
, "tdb_chainlock_nonblock", key
, ret
);
871 /* mark a chain as locked without actually locking it. Warning! use with great caution! */
872 _PUBLIC_
int tdb_chainlock_mark(struct tdb_context
*tdb
, TDB_DATA key
)
874 int ret
= tdb_nest_lock(tdb
, lock_offset(BUCKET(tdb
->hash_fn(&key
))),
875 F_WRLCK
, TDB_LOCK_MARK_ONLY
);
876 tdb_trace_1rec(tdb
, "tdb_chainlock_mark", key
);
880 /* unmark a chain as locked without actually locking it. Warning! use with great caution! */
881 _PUBLIC_
int tdb_chainlock_unmark(struct tdb_context
*tdb
, TDB_DATA key
)
883 tdb_trace_1rec(tdb
, "tdb_chainlock_unmark", key
);
884 return tdb_nest_unlock(tdb
, lock_offset(BUCKET(tdb
->hash_fn(&key
))),
888 _PUBLIC_
int tdb_chainunlock(struct tdb_context
*tdb
, TDB_DATA key
)
890 tdb_trace_1rec(tdb
, "tdb_chainunlock", key
);
891 return tdb_unlock(tdb
, BUCKET(tdb
->hash_fn(&key
)), F_WRLCK
);
894 _PUBLIC_
int tdb_chainlock_read(struct tdb_context
*tdb
, TDB_DATA key
)
897 ret
= tdb_lock(tdb
, BUCKET(tdb
->hash_fn(&key
)), F_RDLCK
);
898 tdb_trace_1rec(tdb
, "tdb_chainlock_read", key
);
902 _PUBLIC_
int tdb_chainunlock_read(struct tdb_context
*tdb
, TDB_DATA key
)
904 tdb_trace_1rec(tdb
, "tdb_chainunlock_read", key
);
905 return tdb_unlock(tdb
, BUCKET(tdb
->hash_fn(&key
)), F_RDLCK
);
908 _PUBLIC_
int tdb_chainlock_read_nonblock(struct tdb_context
*tdb
, TDB_DATA key
)
910 int ret
= tdb_lock_nonblock(tdb
, BUCKET(tdb
->hash_fn(&key
)), F_RDLCK
);
911 tdb_trace_1rec_ret(tdb
, "tdb_chainlock_read_nonblock", key
, ret
);
915 /* record lock stops delete underneath */
916 int tdb_lock_record(struct tdb_context
*tdb
, tdb_off_t off
)
918 if (tdb
->allrecord_lock
.count
) {
921 return off
? tdb_brlock(tdb
, F_RDLCK
, off
, 1, TDB_LOCK_WAIT
) : 0;
925 Write locks override our own fcntl readlocks, so check it here.
926 Note this is meant to be F_SETLK, *not* F_SETLKW, as it's not
927 an error to fail to get the lock here.
929 int tdb_write_lock_record(struct tdb_context
*tdb
, tdb_off_t off
)
931 struct tdb_traverse_lock
*i
;
935 for (i
= &tdb
->travlocks
; i
; i
= i
->next
)
938 if (tdb
->allrecord_lock
.count
) {
939 if (tdb
->allrecord_lock
.ltype
== F_WRLCK
) {
944 return tdb_brlock(tdb
, F_WRLCK
, off
, 1, TDB_LOCK_NOWAIT
|TDB_LOCK_PROBE
);
947 int tdb_write_unlock_record(struct tdb_context
*tdb
, tdb_off_t off
)
949 if (tdb
->allrecord_lock
.count
) {
952 return tdb_brunlock(tdb
, F_WRLCK
, off
, 1);
955 /* fcntl locks don't stack: avoid unlocking someone else's */
956 int tdb_unlock_record(struct tdb_context
*tdb
, tdb_off_t off
)
958 struct tdb_traverse_lock
*i
;
961 if (tdb
->allrecord_lock
.count
) {
967 for (i
= &tdb
->travlocks
; i
; i
= i
->next
)
970 return (count
== 1 ? tdb_brunlock(tdb
, F_RDLCK
, off
, 1) : 0);
973 bool tdb_have_extra_locks(struct tdb_context
*tdb
)
975 unsigned int extra
= tdb
->num_lockrecs
;
977 /* A transaction holds the lock for all records. */
978 if (!tdb
->transaction
&& tdb
->allrecord_lock
.count
) {
982 /* We always hold the active lock if CLEAR_IF_FIRST. */
983 if (find_nestlock(tdb
, ACTIVE_LOCK
)) {
987 /* In a transaction, we expect to hold the transaction lock */
988 if (tdb
->transaction
&& find_nestlock(tdb
, TRANSACTION_LOCK
)) {
995 /* The transaction code uses this to remove all locks. */
996 void tdb_release_transaction_locks(struct tdb_context
*tdb
)
999 unsigned int active
= 0;
1001 if (tdb
->allrecord_lock
.count
!= 0) {
1002 tdb_allrecord_unlock(tdb
, tdb
->allrecord_lock
.ltype
, false);
1003 tdb
->allrecord_lock
.count
= 0;
1006 for (i
=0;i
<tdb
->num_lockrecs
;i
++) {
1007 struct tdb_lock_type
*lck
= &tdb
->lockrecs
[i
];
1009 /* Don't release the active lock! Copy it to first entry. */
1010 if (lck
->off
== ACTIVE_LOCK
) {
1011 tdb
->lockrecs
[active
++] = *lck
;
1013 tdb_brunlock(tdb
, lck
->ltype
, lck
->off
, 1);
1016 tdb
->num_lockrecs
= active
;
1019 /* Following functions are added specifically to support CTDB. */
1021 /* Don't do actual fcntl locking, just mark tdb locked */
1022 _PUBLIC_
int tdb_transaction_write_lock_mark(struct tdb_context
*tdb
);
1023 _PUBLIC_
int tdb_transaction_write_lock_mark(struct tdb_context
*tdb
)
1025 return tdb_transaction_lock(tdb
, F_WRLCK
, TDB_LOCK_MARK_ONLY
);
1028 /* Don't do actual fcntl unlocking, just mark tdb unlocked */
1029 _PUBLIC_
int tdb_transaction_write_lock_unmark(struct tdb_context
*tdb
);
1030 _PUBLIC_
int tdb_transaction_write_lock_unmark(struct tdb_context
*tdb
)
1032 return tdb_nest_unlock(tdb
, TRANSACTION_LOCK
, F_WRLCK
, true);