Handle invalid buffer pointers when count is zero.
[glibc/history.git] / nptl / pthread_mutex_trylock.c
blob25029bedce7c6d43671a199cf52ebe1f7cc971bd
1 /* Copyright (C) 2002, 2003, 2005-2007, 2008 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, write to the Free
17 Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
18 02111-1307 USA. */
20 #include <assert.h>
21 #include <errno.h>
22 #include <stdlib.h>
23 #include "pthreadP.h"
24 #include <lowlevellock.h>
27 int
28 __pthread_mutex_trylock (mutex)
29 pthread_mutex_t *mutex;
31 int oldval;
32 pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
34 switch (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex),
35 PTHREAD_MUTEX_TIMED_NP))
37 /* Recursive mutex. */
38 case PTHREAD_MUTEX_RECURSIVE_NP:
39 /* Check whether we already hold the mutex. */
40 if (mutex->__data.__owner == id)
42 /* Just bump the counter. */
43 if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
44 /* Overflow of the counter. */
45 return EAGAIN;
47 ++mutex->__data.__count;
48 return 0;
51 if (lll_trylock (mutex->__data.__lock) == 0)
53 /* Record the ownership. */
54 mutex->__data.__owner = id;
55 mutex->__data.__count = 1;
56 ++mutex->__data.__nusers;
57 return 0;
59 break;
61 case PTHREAD_MUTEX_ERRORCHECK_NP:
62 case PTHREAD_MUTEX_TIMED_NP:
63 case PTHREAD_MUTEX_ADAPTIVE_NP:
64 /* Normal mutex. */
65 if (lll_trylock (mutex->__data.__lock) != 0)
66 break;
68 /* Record the ownership. */
69 mutex->__data.__owner = id;
70 ++mutex->__data.__nusers;
72 return 0;
74 case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP:
75 case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP:
76 case PTHREAD_MUTEX_ROBUST_NORMAL_NP:
77 case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP:
78 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
79 &mutex->__data.__list.__next);
81 oldval = mutex->__data.__lock;
84 again:
85 if ((oldval & FUTEX_OWNER_DIED) != 0)
87 /* The previous owner died. Try locking the mutex. */
88 int newval = id | (oldval & FUTEX_WAITERS);
90 newval
91 = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
92 newval, oldval);
94 if (newval != oldval)
96 oldval = newval;
97 goto again;
100 /* We got the mutex. */
101 mutex->__data.__count = 1;
102 /* But it is inconsistent unless marked otherwise. */
103 mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
105 ENQUEUE_MUTEX (mutex);
106 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
108 /* Note that we deliberately exist here. If we fall
109 through to the end of the function __nusers would be
110 incremented which is not correct because the old
111 owner has to be discounted. */
112 return EOWNERDEAD;
115 /* Check whether we already hold the mutex. */
116 if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
118 int kind = PTHREAD_MUTEX_TYPE (mutex);
119 if (kind == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP)
121 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
122 NULL);
123 return EDEADLK;
126 if (kind == PTHREAD_MUTEX_ROBUST_RECURSIVE_NP)
128 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
129 NULL);
131 /* Just bump the counter. */
132 if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
133 /* Overflow of the counter. */
134 return EAGAIN;
136 ++mutex->__data.__count;
138 return 0;
142 oldval = lll_robust_trylock (mutex->__data.__lock, id);
143 if (oldval != 0 && (oldval & FUTEX_OWNER_DIED) == 0)
145 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
147 return EBUSY;
150 if (__builtin_expect (mutex->__data.__owner
151 == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
153 /* This mutex is now not recoverable. */
154 mutex->__data.__count = 0;
155 if (oldval == id)
156 lll_unlock (mutex->__data.__lock,
157 PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
158 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
159 return ENOTRECOVERABLE;
162 while ((oldval & FUTEX_OWNER_DIED) != 0);
164 ENQUEUE_MUTEX (mutex);
165 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
167 mutex->__data.__owner = id;
168 ++mutex->__data.__nusers;
169 mutex->__data.__count = 1;
171 return 0;
173 case PTHREAD_MUTEX_PI_RECURSIVE_NP:
174 case PTHREAD_MUTEX_PI_ERRORCHECK_NP:
175 case PTHREAD_MUTEX_PI_NORMAL_NP:
176 case PTHREAD_MUTEX_PI_ADAPTIVE_NP:
177 case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP:
178 case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP:
179 case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP:
180 case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP:
182 int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;
183 int robust = mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP;
185 if (robust)
186 /* Note: robust PI futexes are signaled by setting bit 0. */
187 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
188 (void *) (((uintptr_t) &mutex->__data.__list.__next)
189 | 1));
191 oldval = mutex->__data.__lock;
193 /* Check whether we already hold the mutex. */
194 if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
196 if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
198 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
199 return EDEADLK;
202 if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
204 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
206 /* Just bump the counter. */
207 if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
208 /* Overflow of the counter. */
209 return EAGAIN;
211 ++mutex->__data.__count;
213 return 0;
217 oldval
218 = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
219 id, 0);
221 if (oldval != 0)
223 if ((oldval & FUTEX_OWNER_DIED) == 0)
225 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
227 return EBUSY;
230 assert (robust);
232 /* The mutex owner died. The kernel will now take care of
233 everything. */
234 int private = (robust
235 ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex)
236 : PTHREAD_MUTEX_PSHARED (mutex));
237 INTERNAL_SYSCALL_DECL (__err);
238 int e = INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
239 __lll_private_flag (FUTEX_TRYLOCK_PI,
240 private), 0, 0);
242 if (INTERNAL_SYSCALL_ERROR_P (e, __err)
243 && INTERNAL_SYSCALL_ERRNO (e, __err) == EWOULDBLOCK)
245 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
247 return EBUSY;
250 oldval = mutex->__data.__lock;
253 if (__builtin_expect (oldval & FUTEX_OWNER_DIED, 0))
255 atomic_and (&mutex->__data.__lock, ~FUTEX_OWNER_DIED);
257 /* We got the mutex. */
258 mutex->__data.__count = 1;
259 /* But it is inconsistent unless marked otherwise. */
260 mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
262 ENQUEUE_MUTEX (mutex);
263 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
265 /* Note that we deliberately exit here. If we fall
266 through to the end of the function __nusers would be
267 incremented which is not correct because the old owner
268 has to be discounted. */
269 return EOWNERDEAD;
272 if (robust
273 && __builtin_expect (mutex->__data.__owner
274 == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
276 /* This mutex is now not recoverable. */
277 mutex->__data.__count = 0;
279 INTERNAL_SYSCALL_DECL (__err);
280 INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
281 __lll_private_flag (FUTEX_UNLOCK_PI,
282 PTHREAD_ROBUST_MUTEX_PSHARED (mutex)),
283 0, 0);
285 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
286 return ENOTRECOVERABLE;
289 if (robust)
291 ENQUEUE_MUTEX_PI (mutex);
292 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
295 mutex->__data.__owner = id;
296 ++mutex->__data.__nusers;
297 mutex->__data.__count = 1;
299 return 0;
302 case PTHREAD_MUTEX_PP_RECURSIVE_NP:
303 case PTHREAD_MUTEX_PP_ERRORCHECK_NP:
304 case PTHREAD_MUTEX_PP_NORMAL_NP:
305 case PTHREAD_MUTEX_PP_ADAPTIVE_NP:
307 int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;
309 oldval = mutex->__data.__lock;
311 /* Check whether we already hold the mutex. */
312 if (mutex->__data.__owner == id)
314 if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
315 return EDEADLK;
317 if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
319 /* Just bump the counter. */
320 if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
321 /* Overflow of the counter. */
322 return EAGAIN;
324 ++mutex->__data.__count;
326 return 0;
330 int oldprio = -1, ceilval;
333 int ceiling = (oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK)
334 >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
336 if (__pthread_current_priority () > ceiling)
338 if (oldprio != -1)
339 __pthread_tpp_change_priority (oldprio, -1);
340 return EINVAL;
343 int retval = __pthread_tpp_change_priority (oldprio, ceiling);
344 if (retval)
345 return retval;
347 ceilval = ceiling << PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
348 oldprio = ceiling;
350 oldval
351 = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
352 ceilval | 1, ceilval);
354 if (oldval == ceilval)
355 break;
357 while ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval);
359 if (oldval != ceilval)
361 __pthread_tpp_change_priority (oldprio, -1);
362 break;
365 assert (mutex->__data.__owner == 0);
366 /* Record the ownership. */
367 mutex->__data.__owner = id;
368 ++mutex->__data.__nusers;
369 mutex->__data.__count = 1;
371 return 0;
373 break;
375 default:
376 /* Correct code cannot set any other type. */
377 return EINVAL;
380 return EBUSY;
382 strong_alias (__pthread_mutex_trylock, pthread_mutex_trylock)