Fix the rechallenge behaviour. Previously, once it sent a rechallenge,
[mpls-ppp.git] / pppd / spinlock.c
blob4df7e47592542cc126dbbda6437150e6835ddeae
1 /*
2 Unix SMB/CIFS implementation.
4 trivial database library
6 Copyright (C) Anton Blanchard 2001
8 ** NOTE! The following LGPL license applies to the tdb
9 ** library. This does NOT imply that all of Samba is released
10 ** under the LGPL
12 This library is free software; you can redistribute it and/or
13 modify it under the terms of the GNU Lesser General Public
14 License as published by the Free Software Foundation; either
15 version 2 of the License, or (at your option) any later version.
17 This library is distributed in the hope that it will be useful,
18 but WITHOUT ANY WARRANTY; without even the implied warranty of
19 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 Lesser General Public License for more details.
22 You should have received a copy of the GNU Lesser General Public
23 License along with this library; if not, write to the Free Software
24 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
26 #include <stdlib.h>
27 #include <stdio.h>
28 #include <unistd.h>
29 #include <string.h>
30 #include <fcntl.h>
31 #include <errno.h>
32 #include <sys/stat.h>
33 #include <time.h>
34 #include <signal.h>
35 #include "tdb.h"
36 #include "spinlock.h"
38 #define DEBUG
40 #ifdef USE_SPINLOCKS
43 * ARCH SPECIFIC
46 #if defined(SPARC_SPINLOCKS)
48 static inline int __spin_trylock(spinlock_t *lock)
50 unsigned int result;
52 asm volatile("ldstub [%1], %0"
53 : "=r" (result)
54 : "r" (lock)
55 : "memory");
57 return (result == 0) ? 0 : EBUSY;
60 static inline void __spin_unlock(spinlock_t *lock)
62 asm volatile("":::"memory");
63 *lock = 0;
66 static inline void __spin_lock_init(spinlock_t *lock)
68 *lock = 0;
71 static inline int __spin_is_locked(spinlock_t *lock)
73 return (*lock != 0);
76 #elif defined(POWERPC_SPINLOCKS)
78 static inline int __spin_trylock(spinlock_t *lock)
80 unsigned int result;
82 __asm__ __volatile__(
83 "1: lwarx %0,0,%1\n\
84 cmpwi 0,%0,0\n\
85 li %0,0\n\
86 bne- 2f\n\
87 li %0,1\n\
88 stwcx. %0,0,%1\n\
89 bne- 1b\n\
90 isync\n\
91 2:" : "=&r"(result)
92 : "r"(lock)
93 : "cr0", "memory");
95 return (result == 1) ? 0 : EBUSY;
98 static inline void __spin_unlock(spinlock_t *lock)
100 asm volatile("eieio":::"memory");
101 *lock = 0;
104 static inline void __spin_lock_init(spinlock_t *lock)
106 *lock = 0;
109 static inline int __spin_is_locked(spinlock_t *lock)
111 return (*lock != 0);
114 #elif defined(INTEL_SPINLOCKS)
116 static inline int __spin_trylock(spinlock_t *lock)
118 int oldval;
120 asm volatile("xchgl %0,%1"
121 : "=r" (oldval), "=m" (*lock)
122 : "0" (0)
123 : "memory");
125 return oldval > 0 ? 0 : EBUSY;
128 static inline void __spin_unlock(spinlock_t *lock)
130 asm volatile("":::"memory");
131 *lock = 1;
134 static inline void __spin_lock_init(spinlock_t *lock)
136 *lock = 1;
139 static inline int __spin_is_locked(spinlock_t *lock)
141 return (*lock != 1);
144 #elif defined(MIPS_SPINLOCKS) && defined(sgi) && (_COMPILER_VERSION >= 730)
146 /* Implement spinlocks on IRIX using the MIPSPro atomic fetch operations. See
147 * sync(3) for the details of the intrinsic operations.
149 * "sgi" and "_COMPILER_VERSION" are always defined by MIPSPro.
152 #ifdef STANDALONE
154 /* MIPSPro 7.3 has "__inline" as an extension, but not "inline. */
155 #define inline __inline
157 #endif /* STANDALONE */
159 /* Returns 0 if the lock is acquired, EBUSY otherwise. */
160 static inline int __spin_trylock(spinlock_t *lock)
162 unsigned int val;
163 val = __lock_test_and_set(lock, 1);
164 return val == 0 ? 0 : EBUSY;
167 static inline void __spin_unlock(spinlock_t *lock)
169 __lock_release(lock);
172 static inline void __spin_lock_init(spinlock_t *lock)
174 __lock_release(lock);
177 /* Returns 1 if the lock is held, 0 otherwise. */
178 static inline int __spin_is_locked(spinlock_t *lock)
180 unsigned int val;
181 val = __add_and_fetch(lock, 0);
182 return val;
185 #elif defined(MIPS_SPINLOCKS)
187 static inline unsigned int load_linked(unsigned long addr)
189 unsigned int res;
191 __asm__ __volatile__("ll\t%0,(%1)"
192 : "=r" (res)
193 : "r" (addr));
195 return res;
198 static inline unsigned int store_conditional(unsigned long addr, unsigned int value)
200 unsigned int res;
202 __asm__ __volatile__("sc\t%0,(%2)"
203 : "=r" (res)
204 : "0" (value), "r" (addr));
205 return res;
208 static inline int __spin_trylock(spinlock_t *lock)
210 unsigned int mw;
212 do {
213 mw = load_linked(lock);
214 if (mw)
215 return EBUSY;
216 } while (!store_conditional(lock, 1));
218 asm volatile("":::"memory");
220 return 0;
223 static inline void __spin_unlock(spinlock_t *lock)
225 asm volatile("":::"memory");
226 *lock = 0;
229 static inline void __spin_lock_init(spinlock_t *lock)
231 *lock = 0;
234 static inline int __spin_is_locked(spinlock_t *lock)
236 return (*lock != 0);
239 #else
240 #error Need to implement spinlock code in spinlock.c
241 #endif
244 * OS SPECIFIC
247 static void yield_cpu(void)
249 struct timespec tm;
251 #ifdef USE_SCHED_YIELD
252 sched_yield();
253 #else
254 /* Linux will busy loop for delays < 2ms on real time tasks */
255 tm.tv_sec = 0;
256 tm.tv_nsec = 2000000L + 1;
257 nanosleep(&tm, NULL);
258 #endif
261 static int this_is_smp(void)
263 #if defined(HAVE_SYSCONF) && defined(SYSCONF_SC_NPROC_ONLN)
264 return (sysconf(_SC_NPROC_ONLN) > 1) ? 1 : 0;
265 #else
266 return 0;
267 #endif
271 * GENERIC
274 static int smp_machine = 0;
276 static inline void __spin_lock(spinlock_t *lock)
278 int ntries = 0;
280 while(__spin_trylock(lock)) {
281 while(__spin_is_locked(lock)) {
282 if (smp_machine && ntries++ < MAX_BUSY_LOOPS)
283 continue;
284 yield_cpu();
289 static void __read_lock(tdb_rwlock_t *rwlock)
291 int ntries = 0;
293 while(1) {
294 __spin_lock(&rwlock->lock);
296 if (!(rwlock->count & RWLOCK_BIAS)) {
297 rwlock->count++;
298 __spin_unlock(&rwlock->lock);
299 return;
302 __spin_unlock(&rwlock->lock);
304 while(rwlock->count & RWLOCK_BIAS) {
305 if (smp_machine && ntries++ < MAX_BUSY_LOOPS)
306 continue;
307 yield_cpu();
312 static void __write_lock(tdb_rwlock_t *rwlock)
314 int ntries = 0;
316 while(1) {
317 __spin_lock(&rwlock->lock);
319 if (rwlock->count == 0) {
320 rwlock->count |= RWLOCK_BIAS;
321 __spin_unlock(&rwlock->lock);
322 return;
325 __spin_unlock(&rwlock->lock);
327 while(rwlock->count != 0) {
328 if (smp_machine && ntries++ < MAX_BUSY_LOOPS)
329 continue;
330 yield_cpu();
335 static void __write_unlock(tdb_rwlock_t *rwlock)
337 __spin_lock(&rwlock->lock);
339 #ifdef DEBUG
340 if (!(rwlock->count & RWLOCK_BIAS))
341 fprintf(stderr, "bug: write_unlock\n");
342 #endif
344 rwlock->count &= ~RWLOCK_BIAS;
345 __spin_unlock(&rwlock->lock);
348 static void __read_unlock(tdb_rwlock_t *rwlock)
350 __spin_lock(&rwlock->lock);
352 #ifdef DEBUG
353 if (!rwlock->count)
354 fprintf(stderr, "bug: read_unlock\n");
356 if (rwlock->count & RWLOCK_BIAS)
357 fprintf(stderr, "bug: read_unlock\n");
358 #endif
360 rwlock->count--;
361 __spin_unlock(&rwlock->lock);
364 /* TDB SPECIFIC */
366 /* lock a list in the database. list -1 is the alloc list */
367 int tdb_spinlock(TDB_CONTEXT *tdb, int list, int rw_type)
369 tdb_rwlock_t *rwlocks;
371 if (!tdb->map_ptr) return -1;
372 rwlocks = (tdb_rwlock_t *)((char *)tdb->map_ptr + tdb->header.rwlocks);
374 switch(rw_type) {
375 case F_RDLCK:
376 __read_lock(&rwlocks[list+1]);
377 break;
379 case F_WRLCK:
380 __write_lock(&rwlocks[list+1]);
381 break;
383 default:
384 return TDB_ERRCODE(TDB_ERR_LOCK, -1);
386 return 0;
389 /* unlock the database. */
390 int tdb_spinunlock(TDB_CONTEXT *tdb, int list, int rw_type)
392 tdb_rwlock_t *rwlocks;
394 if (!tdb->map_ptr) return -1;
395 rwlocks = (tdb_rwlock_t *)((char *)tdb->map_ptr + tdb->header.rwlocks);
397 switch(rw_type) {
398 case F_RDLCK:
399 __read_unlock(&rwlocks[list+1]);
400 break;
402 case F_WRLCK:
403 __write_unlock(&rwlocks[list+1]);
404 break;
406 default:
407 return TDB_ERRCODE(TDB_ERR_LOCK, -1);
410 return 0;
413 int tdb_create_rwlocks(int fd, unsigned int hash_size)
415 unsigned size, i;
416 tdb_rwlock_t *rwlocks;
418 size = TDB_SPINLOCK_SIZE(hash_size);
419 rwlocks = malloc(size);
420 if (!rwlocks)
421 return -1;
423 for(i = 0; i < hash_size+1; i++) {
424 __spin_lock_init(&rwlocks[i].lock);
425 rwlocks[i].count = 0;
428 /* Write it out (appending to end) */
429 if (write(fd, rwlocks, size) != size) {
430 free(rwlocks);
431 return -1;
433 smp_machine = this_is_smp();
434 free(rwlocks);
435 return 0;
438 int tdb_clear_spinlocks(TDB_CONTEXT *tdb)
440 tdb_rwlock_t *rwlocks;
441 unsigned i;
443 if (tdb->header.rwlocks == 0) return 0;
444 if (!tdb->map_ptr) return -1;
446 /* We're mmapped here */
447 rwlocks = (tdb_rwlock_t *)((char *)tdb->map_ptr + tdb->header.rwlocks);
448 for(i = 0; i < tdb->header.hash_size+1; i++) {
449 __spin_lock_init(&rwlocks[i].lock);
450 rwlocks[i].count = 0;
452 return 0;
454 #else
455 int tdb_create_rwlocks(int fd, unsigned int hash_size) { return 0; }
456 int tdb_spinlock(TDB_CONTEXT *tdb, int list, int rw_type) { return -1; }
457 int tdb_spinunlock(TDB_CONTEXT *tdb, int list, int rw_type) { return -1; }
459 /* Non-spinlock version: remove spinlock pointer */
460 int tdb_clear_spinlocks(TDB_CONTEXT *tdb)
462 tdb_off off = (tdb_off)((char *)&tdb->header.rwlocks
463 - (char *)&tdb->header);
465 tdb->header.rwlocks = 0;
466 if (lseek(tdb->fd, off, SEEK_SET) != off
467 || write(tdb->fd, (void *)&tdb->header.rwlocks,
468 sizeof(tdb->header.rwlocks))
469 != sizeof(tdb->header.rwlocks))
470 return -1;
471 return 0;
473 #endif