2 Unix SMB/CIFS implementation.
4 trivial database library
6 Copyright (C) Anton Blanchard 2001
8 ** NOTE! The following LGPL license applies to the tdb
9 ** library. This does NOT imply that all of Samba is released
12 This library is free software; you can redistribute it and/or
13 modify it under the terms of the GNU Lesser General Public
14 License as published by the Free Software Foundation; either
15 version 2 of the License, or (at your option) any later version.
17 This library is distributed in the hope that it will be useful,
18 but WITHOUT ANY WARRANTY; without even the implied warranty of
19 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 Lesser General Public License for more details.
22 You should have received a copy of the GNU Lesser General Public
23 License along with this library; if not, write to the Free Software
24 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
46 #if defined(SPARC_SPINLOCKS)
48 static inline int __spin_trylock(spinlock_t
*lock
)
52 asm volatile("ldstub [%1], %0"
57 return (result
== 0) ? 0 : EBUSY
;
60 static inline void __spin_unlock(spinlock_t
*lock
)
62 asm volatile("":::"memory");
66 static inline void __spin_lock_init(spinlock_t
*lock
)
71 static inline int __spin_is_locked(spinlock_t
*lock
)
76 #elif defined(POWERPC_SPINLOCKS)
78 static inline int __spin_trylock(spinlock_t
*lock
)
95 return (result
== 1) ? 0 : EBUSY
;
98 static inline void __spin_unlock(spinlock_t
*lock
)
100 asm volatile("eieio":::"memory");
104 static inline void __spin_lock_init(spinlock_t
*lock
)
109 static inline int __spin_is_locked(spinlock_t
*lock
)
114 #elif defined(INTEL_SPINLOCKS)
116 static inline int __spin_trylock(spinlock_t
*lock
)
120 asm volatile("xchgl %0,%1"
121 : "=r" (oldval
), "=m" (*lock
)
125 return oldval
> 0 ? 0 : EBUSY
;
128 static inline void __spin_unlock(spinlock_t
*lock
)
130 asm volatile("":::"memory");
134 static inline void __spin_lock_init(spinlock_t
*lock
)
139 static inline int __spin_is_locked(spinlock_t
*lock
)
144 #elif defined(MIPS_SPINLOCKS) && defined(sgi) && (_COMPILER_VERSION >= 730)
146 /* Implement spinlocks on IRIX using the MIPSPro atomic fetch operations. See
147 * sync(3) for the details of the intrinsic operations.
149 * "sgi" and "_COMPILER_VERSION" are always defined by MIPSPro.
154 /* MIPSPro 7.3 has "__inline" as an extension, but not "inline. */
155 #define inline __inline
157 #endif /* STANDALONE */
159 /* Returns 0 if the lock is acquired, EBUSY otherwise. */
160 static inline int __spin_trylock(spinlock_t
*lock
)
163 val
= __lock_test_and_set(lock
, 1);
164 return val
== 0 ? 0 : EBUSY
;
167 static inline void __spin_unlock(spinlock_t
*lock
)
169 __lock_release(lock
);
172 static inline void __spin_lock_init(spinlock_t
*lock
)
174 __lock_release(lock
);
177 /* Returns 1 if the lock is held, 0 otherwise. */
178 static inline int __spin_is_locked(spinlock_t
*lock
)
181 val
= __add_and_fetch(lock
, 0);
185 #elif defined(MIPS_SPINLOCKS)
187 static inline unsigned int load_linked(unsigned long addr
)
191 __asm__
__volatile__("ll\t%0,(%1)"
198 static inline unsigned int store_conditional(unsigned long addr
, unsigned int value
)
202 __asm__
__volatile__("sc\t%0,(%2)"
204 : "0" (value
), "r" (addr
));
208 static inline int __spin_trylock(spinlock_t
*lock
)
213 mw
= load_linked(lock
);
216 } while (!store_conditional(lock
, 1));
218 asm volatile("":::"memory");
223 static inline void __spin_unlock(spinlock_t
*lock
)
225 asm volatile("":::"memory");
229 static inline void __spin_lock_init(spinlock_t
*lock
)
234 static inline int __spin_is_locked(spinlock_t
*lock
)
240 #error Need to implement spinlock code in spinlock.c
247 static void yield_cpu(void)
251 #ifdef USE_SCHED_YIELD
254 /* Linux will busy loop for delays < 2ms on real time tasks */
256 tm
.tv_nsec
= 2000000L + 1;
257 nanosleep(&tm
, NULL
);
261 static int this_is_smp(void)
263 #if defined(HAVE_SYSCONF) && defined(SYSCONF_SC_NPROC_ONLN)
264 return (sysconf(_SC_NPROC_ONLN
) > 1) ? 1 : 0;
274 static int smp_machine
= 0;
276 static inline void __spin_lock(spinlock_t
*lock
)
280 while(__spin_trylock(lock
)) {
281 while(__spin_is_locked(lock
)) {
282 if (smp_machine
&& ntries
++ < MAX_BUSY_LOOPS
)
289 static void __read_lock(tdb_rwlock_t
*rwlock
)
294 __spin_lock(&rwlock
->lock
);
296 if (!(rwlock
->count
& RWLOCK_BIAS
)) {
298 __spin_unlock(&rwlock
->lock
);
302 __spin_unlock(&rwlock
->lock
);
304 while(rwlock
->count
& RWLOCK_BIAS
) {
305 if (smp_machine
&& ntries
++ < MAX_BUSY_LOOPS
)
312 static void __write_lock(tdb_rwlock_t
*rwlock
)
317 __spin_lock(&rwlock
->lock
);
319 if (rwlock
->count
== 0) {
320 rwlock
->count
|= RWLOCK_BIAS
;
321 __spin_unlock(&rwlock
->lock
);
325 __spin_unlock(&rwlock
->lock
);
327 while(rwlock
->count
!= 0) {
328 if (smp_machine
&& ntries
++ < MAX_BUSY_LOOPS
)
335 static void __write_unlock(tdb_rwlock_t
*rwlock
)
337 __spin_lock(&rwlock
->lock
);
340 if (!(rwlock
->count
& RWLOCK_BIAS
))
341 fprintf(stderr
, "bug: write_unlock\n");
344 rwlock
->count
&= ~RWLOCK_BIAS
;
345 __spin_unlock(&rwlock
->lock
);
348 static void __read_unlock(tdb_rwlock_t
*rwlock
)
350 __spin_lock(&rwlock
->lock
);
354 fprintf(stderr
, "bug: read_unlock\n");
356 if (rwlock
->count
& RWLOCK_BIAS
)
357 fprintf(stderr
, "bug: read_unlock\n");
361 __spin_unlock(&rwlock
->lock
);
366 /* lock a list in the database. list -1 is the alloc list */
367 int tdb_spinlock(TDB_CONTEXT
*tdb
, int list
, int rw_type
)
369 tdb_rwlock_t
*rwlocks
;
371 if (!tdb
->map_ptr
) return -1;
372 rwlocks
= (tdb_rwlock_t
*)((char *)tdb
->map_ptr
+ tdb
->header
.rwlocks
);
376 __read_lock(&rwlocks
[list
+1]);
380 __write_lock(&rwlocks
[list
+1]);
384 return TDB_ERRCODE(TDB_ERR_LOCK
, -1);
389 /* unlock the database. */
390 int tdb_spinunlock(TDB_CONTEXT
*tdb
, int list
, int rw_type
)
392 tdb_rwlock_t
*rwlocks
;
394 if (!tdb
->map_ptr
) return -1;
395 rwlocks
= (tdb_rwlock_t
*)((char *)tdb
->map_ptr
+ tdb
->header
.rwlocks
);
399 __read_unlock(&rwlocks
[list
+1]);
403 __write_unlock(&rwlocks
[list
+1]);
407 return TDB_ERRCODE(TDB_ERR_LOCK
, -1);
413 int tdb_create_rwlocks(int fd
, unsigned int hash_size
)
416 tdb_rwlock_t
*rwlocks
;
418 size
= TDB_SPINLOCK_SIZE(hash_size
);
419 rwlocks
= malloc(size
);
423 for(i
= 0; i
< hash_size
+1; i
++) {
424 __spin_lock_init(&rwlocks
[i
].lock
);
425 rwlocks
[i
].count
= 0;
428 /* Write it out (appending to end) */
429 if (write(fd
, rwlocks
, size
) != size
) {
433 smp_machine
= this_is_smp();
438 int tdb_clear_spinlocks(TDB_CONTEXT
*tdb
)
440 tdb_rwlock_t
*rwlocks
;
443 if (tdb
->header
.rwlocks
== 0) return 0;
444 if (!tdb
->map_ptr
) return -1;
446 /* We're mmapped here */
447 rwlocks
= (tdb_rwlock_t
*)((char *)tdb
->map_ptr
+ tdb
->header
.rwlocks
);
448 for(i
= 0; i
< tdb
->header
.hash_size
+1; i
++) {
449 __spin_lock_init(&rwlocks
[i
].lock
);
450 rwlocks
[i
].count
= 0;
455 int tdb_create_rwlocks(int fd
, unsigned int hash_size
) { return 0; }
456 int tdb_spinlock(TDB_CONTEXT
*tdb
, int list
, int rw_type
) { return -1; }
457 int tdb_spinunlock(TDB_CONTEXT
*tdb
, int list
, int rw_type
) { return -1; }
459 /* Non-spinlock version: remove spinlock pointer */
460 int tdb_clear_spinlocks(TDB_CONTEXT
*tdb
)
462 tdb_off off
= (tdb_off
)((char *)&tdb
->header
.rwlocks
463 - (char *)&tdb
->header
);
465 tdb
->header
.rwlocks
= 0;
466 if (lseek(tdb
->fd
, off
, SEEK_SET
) != off
467 || write(tdb
->fd
, (void *)&tdb
->header
.rwlocks
,
468 sizeof(tdb
->header
.rwlocks
))
469 != sizeof(tdb
->header
.rwlocks
))