2 * Copyright 1999, 2000 John D. Polstra.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
16 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
17 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
18 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
19 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
20 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
21 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
23 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 * from: FreeBSD: src/libexec/rtld-elf/sparc64/lockdflt.c,v 1.3 2002/10/09
30 * Thread locking implementation for the dynamic linker.
32 * We use the "simple, non-scalable reader-preference lock" from:
34 * J. M. Mellor-Crummey and M. L. Scott. "Scalable Reader-Writer
35 * Synchronization for Shared-Memory Multiprocessors." 3rd ACM Symp. on
36 * Principles and Practice of Parallel Programming, April 1991.
38 * In this algorithm the lock is a single word. Its low-order bit is
39 * set when a writer holds the lock. The remaining high-order bits
40 * contain a count of readers desiring the lock. The algorithm requires
41 * atomic "compare_and_store" and "add" operations, which we implement
42 * using assembly language sequences in "rtld_start.S".
51 #include "rtld_machdep.h"
53 #define WAFLAG 0x1 /* A writer holds the lock */
54 #define RC_INCR 0x2 /* Adjusts count of readers desiring lock */
56 typedef struct Struct_Lock
{
61 static sigset_t fullsigmask
, oldsigmask
;
62 static int thread_flag
;
73 * Arrange for the lock to occupy its own cache line. First, we
74 * optimistically allocate just a cache line, hoping that malloc
75 * will give us a well-aligned block of memory. If that doesn't
76 * work, we allocate a larger block and take a well-aligned cache
79 base
= xmalloc(CACHE_LINE_SIZE
);
81 if ((uintptr_t)p
% CACHE_LINE_SIZE
!= 0) {
83 base
= xmalloc(2 * CACHE_LINE_SIZE
);
85 if ((r
= (uintptr_t)p
% CACHE_LINE_SIZE
) != 0)
86 p
+= CACHE_LINE_SIZE
- r
;
95 def_lock_destroy(void *lock
)
97 Lock
*l
= (Lock
*)lock
;
103 def_rlock_acquire(void *lock
)
105 Lock
*l
= (Lock
*)lock
;
107 atomic_add_acq_int(&l
->lock
, RC_INCR
);
108 while (l
->lock
& WAFLAG
)
113 def_wlock_acquire(void *lock
)
115 Lock
*l
= (Lock
*)lock
;
116 sigset_t tmp_oldsigmask
;
119 sigprocmask(SIG_BLOCK
, &fullsigmask
, &tmp_oldsigmask
);
120 if (atomic_cmpset_acq_int(&l
->lock
, 0, WAFLAG
))
122 sigprocmask(SIG_SETMASK
, &tmp_oldsigmask
, NULL
);
124 oldsigmask
= tmp_oldsigmask
;
128 def_lock_release(void *lock
)
130 Lock
*l
= (Lock
*)lock
;
132 if ((l
->lock
& WAFLAG
) == 0)
133 atomic_add_rel_int(&l
->lock
, -RC_INCR
);
135 atomic_add_rel_int(&l
->lock
, -WAFLAG
);
136 sigprocmask(SIG_SETMASK
, &oldsigmask
, NULL
);
141 def_thread_set_flag(int mask
)
143 int old_val
= thread_flag
;
149 def_thread_clr_flag(int mask
)
151 int old_val
= thread_flag
;
152 thread_flag
&= ~mask
;
157 * Public interface exposed to the rest of the dynamic linker.
159 static struct RtldLockInfo lockinfo
;
160 static struct RtldLockInfo deflockinfo
;
163 thread_mask_set(int mask
)
165 return lockinfo
.thread_set_flag(mask
);
169 thread_mask_clear(int mask
)
171 lockinfo
.thread_clr_flag(mask
);
174 #define RTLD_LOCK_CNT 3
178 } rtld_locks
[RTLD_LOCK_CNT
];
180 rtld_lock_t rtld_bind_lock
= &rtld_locks
[0];
181 rtld_lock_t rtld_libc_lock
= &rtld_locks
[1];
182 rtld_lock_t rtld_phdr_lock
= &rtld_locks
[2];
185 rlock_acquire(rtld_lock_t lock
)
187 if (thread_mask_set(lock
->mask
)) {
188 dbg("rlock_acquire: recursed");
191 lockinfo
.rlock_acquire(lock
->handle
);
196 wlock_acquire(rtld_lock_t lock
)
198 if (thread_mask_set(lock
->mask
)) {
199 dbg("wlock_acquire: recursed");
202 lockinfo
.wlock_acquire(lock
->handle
);
207 rlock_release(rtld_lock_t lock
, int locked
)
211 thread_mask_clear(lock
->mask
);
212 lockinfo
.lock_release(lock
->handle
);
216 wlock_release(rtld_lock_t lock
, int locked
)
220 thread_mask_clear(lock
->mask
);
221 lockinfo
.lock_release(lock
->handle
);
229 deflockinfo
.rtli_version
= RTLI_VERSION
;
230 deflockinfo
.lock_create
= def_lock_create
;
231 deflockinfo
.lock_destroy
= def_lock_destroy
;
232 deflockinfo
.rlock_acquire
= def_rlock_acquire
;
233 deflockinfo
.wlock_acquire
= def_wlock_acquire
;
234 deflockinfo
.lock_release
= def_lock_release
;
235 deflockinfo
.thread_set_flag
= def_thread_set_flag
;
236 deflockinfo
.thread_clr_flag
= def_thread_clr_flag
;
237 deflockinfo
.at_fork
= NULL
;
239 for (i
= 0; i
< RTLD_LOCK_CNT
; i
++) {
240 rtld_locks
[i
].mask
= (1 << i
);
241 rtld_locks
[i
].handle
= NULL
;
244 memcpy(&lockinfo
, &deflockinfo
, sizeof(lockinfo
));
245 _rtld_thread_init(NULL
);
247 * Construct a mask to block all signals except traps which might
248 * conceivably be generated within the dynamic linker itself.
250 sigfillset(&fullsigmask
);
251 sigdelset(&fullsigmask
, SIGILL
);
252 sigdelset(&fullsigmask
, SIGTRAP
);
253 sigdelset(&fullsigmask
, SIGABRT
);
254 sigdelset(&fullsigmask
, SIGEMT
);
255 sigdelset(&fullsigmask
, SIGFPE
);
256 sigdelset(&fullsigmask
, SIGBUS
);
257 sigdelset(&fullsigmask
, SIGSEGV
);
258 sigdelset(&fullsigmask
, SIGSYS
);
262 * Callback function to allow threads implementation to
263 * register their own locking primitives if the default
264 * one is not suitable.
265 * The current context should be the only context
266 * executing at the invocation time.
269 _rtld_thread_init(struct RtldLockInfo
*pli
)
272 void *locks
[RTLD_LOCK_CNT
];
274 /* disable all locking while this function is running */
275 flags
= thread_mask_set(~0);
281 for (i
= 0; i
< RTLD_LOCK_CNT
; i
++)
282 if ((locks
[i
] = pli
->lock_create()) == NULL
)
285 if (i
< RTLD_LOCK_CNT
) {
287 pli
->lock_destroy(locks
[i
]);
291 for (i
= 0; i
< RTLD_LOCK_CNT
; i
++) {
292 if (rtld_locks
[i
].handle
== NULL
)
294 if (flags
& rtld_locks
[i
].mask
)
295 lockinfo
.lock_release(rtld_locks
[i
].handle
);
296 lockinfo
.lock_destroy(rtld_locks
[i
].handle
);
299 for (i
= 0; i
< RTLD_LOCK_CNT
; i
++) {
300 rtld_locks
[i
].handle
= locks
[i
];
301 if (flags
& rtld_locks
[i
].mask
)
302 pli
->wlock_acquire(rtld_locks
[i
].handle
);
305 lockinfo
.lock_create
= pli
->lock_create
;
306 lockinfo
.lock_destroy
= pli
->lock_destroy
;
307 lockinfo
.rlock_acquire
= pli
->rlock_acquire
;
308 lockinfo
.wlock_acquire
= pli
->wlock_acquire
;
309 lockinfo
.lock_release
= pli
->lock_release
;
310 lockinfo
.thread_set_flag
= pli
->thread_set_flag
;
311 lockinfo
.thread_clr_flag
= pli
->thread_clr_flag
;
312 lockinfo
.at_fork
= pli
->at_fork
;
314 /* restore thread locking state, this time with new locks */
315 thread_mask_clear(~0);
316 thread_mask_set(flags
);
317 dbg("_rtld_thread_init: done");