(Pathconf): Remove _PC_SOCK_MAXBUF.
[glibc/history.git] / nptl / pthread_mutex_unlock.c
blob33919d60af129e74c26e4c58fe3842800658ae69
1 /* Copyright (C) 2002, 2003, 2005, 2006 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, write to the Free
17 Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
18 02111-1307 USA. */
20 #include <errno.h>
21 #include <stdlib.h>
22 #include "pthreadP.h"
23 #include <lowlevellock.h>
26 int
27 internal_function attribute_hidden
28 __pthread_mutex_unlock_usercnt (mutex, decr)
29 pthread_mutex_t *mutex;
30 int decr;
32 int newowner = 0;
34 switch (__builtin_expect (mutex->__data.__kind, PTHREAD_MUTEX_TIMED_NP))
36 case PTHREAD_MUTEX_RECURSIVE_NP:
37 /* Recursive mutex. */
38 if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid))
39 return EPERM;
41 if (--mutex->__data.__count != 0)
42 /* We still hold the mutex. */
43 return 0;
44 goto normal;
46 case PTHREAD_MUTEX_ERRORCHECK_NP:
47 /* Error checking mutex. */
48 if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid)
49 || ! lll_mutex_islocked (mutex->__data.__lock))
50 return EPERM;
51 /* FALLTHROUGH */
53 case PTHREAD_MUTEX_TIMED_NP:
54 case PTHREAD_MUTEX_ADAPTIVE_NP:
55 /* Always reset the owner field. */
56 normal:
57 mutex->__data.__owner = 0;
58 if (decr)
59 /* One less user. */
60 --mutex->__data.__nusers;
62 /* Unlock. */
63 lll_mutex_unlock (mutex->__data.__lock);
64 break;
66 case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP:
67 /* Recursive mutex. */
68 if ((mutex->__data.__lock & FUTEX_TID_MASK)
69 == THREAD_GETMEM (THREAD_SELF, tid)
70 && __builtin_expect (mutex->__data.__owner
71 == PTHREAD_MUTEX_INCONSISTENT, 0))
73 if (--mutex->__data.__count != 0)
74 /* We still hold the mutex. */
75 return ENOTRECOVERABLE;
77 goto notrecoverable;
80 if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid))
81 return EPERM;
83 if (--mutex->__data.__count != 0)
84 /* We still hold the mutex. */
85 return 0;
87 goto robust;
89 case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP:
90 case PTHREAD_MUTEX_ROBUST_NORMAL_NP:
91 case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP:
92 if ((mutex->__data.__lock & FUTEX_TID_MASK)
93 != THREAD_GETMEM (THREAD_SELF, tid)
94 || ! lll_mutex_islocked (mutex->__data.__lock))
95 return EPERM;
97 /* If the previous owner died and the caller did not succeed in
98 making the state consistent, mark the mutex as unrecoverable
99 and make all waiters. */
100 if (__builtin_expect (mutex->__data.__owner
101 == PTHREAD_MUTEX_INCONSISTENT, 0))
102 notrecoverable:
103 newowner = PTHREAD_MUTEX_NOTRECOVERABLE;
105 robust:
106 /* Remove mutex from the list. */
107 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
108 &mutex->__data.__list.__next);
109 DEQUEUE_MUTEX (mutex);
111 mutex->__data.__owner = newowner;
112 if (decr)
113 /* One less user. */
114 --mutex->__data.__nusers;
116 /* Unlock. */
117 lll_robust_mutex_unlock (mutex->__data.__lock);
119 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
120 break;
122 case PTHREAD_MUTEX_PI_RECURSIVE_NP:
123 /* Recursive mutex. */
124 if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid))
125 return EPERM;
127 if (--mutex->__data.__count != 0)
128 /* We still hold the mutex. */
129 return 0;
130 goto continue_pi;
132 case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP:
133 /* Recursive mutex. */
134 if ((mutex->__data.__lock & FUTEX_TID_MASK)
135 == THREAD_GETMEM (THREAD_SELF, tid)
136 && __builtin_expect (mutex->__data.__owner
137 == PTHREAD_MUTEX_INCONSISTENT, 0))
139 if (--mutex->__data.__count != 0)
140 /* We still hold the mutex. */
141 return ENOTRECOVERABLE;
143 goto pi_notrecoverable;
146 if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid))
147 return EPERM;
149 if (--mutex->__data.__count != 0)
150 /* We still hold the mutex. */
151 return 0;
153 goto continue_pi;
155 case PTHREAD_MUTEX_PI_ERRORCHECK_NP:
156 case PTHREAD_MUTEX_PI_NORMAL_NP:
157 case PTHREAD_MUTEX_PI_ADAPTIVE_NP:
158 case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP:
159 case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP:
160 case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP:
161 if ((mutex->__data.__lock & FUTEX_TID_MASK)
162 != THREAD_GETMEM (THREAD_SELF, tid)
163 || ! lll_mutex_islocked (mutex->__data.__lock))
164 return EPERM;
166 /* If the previous owner died and the caller did not succeed in
167 making the state consistent, mark the mutex as unrecoverable
168 and make all waiters. */
169 if ((mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP) != 0
170 && __builtin_expect (mutex->__data.__owner
171 == PTHREAD_MUTEX_INCONSISTENT, 0))
172 pi_notrecoverable:
173 newowner = PTHREAD_MUTEX_NOTRECOVERABLE;
175 continue_pi:
176 if ((mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP) != 0)
178 /* Remove mutex from the list.
179 Note: robust PI futexes are signaled by setting bit 0. */
180 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
181 (void *) (((uintptr_t) &mutex->__data.__list.__next)
182 | 1));
183 DEQUEUE_MUTEX (mutex);
186 mutex->__data.__owner = newowner;
187 if (decr)
188 /* One less user. */
189 --mutex->__data.__nusers;
191 /* Unlock. */
192 if ((mutex->__data.__lock & FUTEX_WAITERS) != 0
193 || atomic_compare_and_exchange_bool_acq (&mutex->__data.__lock, 0,
194 THREAD_GETMEM (THREAD_SELF,
195 tid)))
197 INTERNAL_SYSCALL_DECL (__err);
198 INTERNAL_SYSCALL (futex, __err, 2, &mutex->__data.__lock,
199 FUTEX_UNLOCK_PI);
202 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
203 break;
205 case PTHREAD_MUTEX_PP_RECURSIVE_NP:
206 /* Recursive mutex. */
207 if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid))
208 return EPERM;
210 if (--mutex->__data.__count != 0)
211 /* We still hold the mutex. */
212 return 0;
213 goto pp;
215 case PTHREAD_MUTEX_PP_ERRORCHECK_NP:
216 /* Error checking mutex. */
217 if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid)
218 || (mutex->__data.__lock & ~ PTHREAD_MUTEX_PRIO_CEILING_MASK) == 0)
219 return EPERM;
220 /* FALLTHROUGH */
222 case PTHREAD_MUTEX_PP_NORMAL_NP:
223 case PTHREAD_MUTEX_PP_ADAPTIVE_NP:
224 /* Always reset the owner field. */
226 mutex->__data.__owner = 0;
228 if (decr)
229 /* One less user. */
230 --mutex->__data.__nusers;
232 /* Unlock. */
233 int newval, oldval;
236 oldval = mutex->__data.__lock;
237 newval = oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK;
239 while (atomic_compare_and_exchange_bool_acq (&mutex->__data.__lock,
240 newval, oldval));
242 if ((oldval & ~PTHREAD_MUTEX_PRIO_CEILING_MASK) > 1)
243 lll_futex_wake (&mutex->__data.__lock, 1);
245 int oldprio = newval >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
246 return __pthread_tpp_change_priority (oldprio, -1);
248 default:
249 /* Correct code cannot set any other type. */
250 return EINVAL;
253 return 0;
258 __pthread_mutex_unlock (mutex)
259 pthread_mutex_t *mutex;
261 return __pthread_mutex_unlock_usercnt (mutex, 1);
263 strong_alias (__pthread_mutex_unlock, pthread_mutex_unlock)
264 strong_alias (__pthread_mutex_unlock, __pthread_mutex_unlock_internal)