1 /* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
2 /* ***** BEGIN LICENSE BLOCK *****
3 * Version: MPL 1.1/GPL 2.0/LGPL 2.1
5 * The contents of this file are subject to the Mozilla Public License Version
6 * 1.1 (the "License"); you may not use this file except in compliance with
7 * the License. You may obtain a copy of the License at
8 * http://www.mozilla.org/MPL/
10 * Software distributed under the License is distributed on an "AS IS" basis,
11 * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
12 * for the specific language governing rights and limitations under the
15 * The Original Code is mozilla.org code.
17 * The Initial Developer of the Original Code is
18 * Netscape Communications Corporation.
19 * Portions created by the Initial Developer are Copyright (C) 1998
20 * the Initial Developer. All Rights Reserved.
24 * Alternatively, the contents of this file may be used under the terms of
25 * either of the GNU General Public License Version 2 or later (the "GPL"),
26 * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
27 * in which case the provisions of the GPL or the LGPL are applicable instead
28 * of those above. If you wish to allow use of your version of this file only
29 * under the terms of either the GPL or the LGPL, and not to allow others to
30 * use your version of this file under the terms of the MPL, indicate your
31 * decision by deleting the provisions above and replace them with the notice
32 * and other provisions required by the GPL or the LGPL. If you do not delete
33 * the provisions above, a recipient may use your version of this file under
34 * the terms of any one of the MPL, the GPL or the LGPL.
36 * ***** END LICENSE BLOCK ***** */
38 #include "nsAutoLock.h"
47 #include "nsVoidArray.h"
49 #ifdef NS_TRACE_MALLOC
51 # include "nsTraceMalloc.h"
54 static PRUintn LockStackTPI
= (PRUintn
)-1;
55 static PLHashTable
* OrderTable
= 0;
56 static PRLock
* OrderTableLock
= 0;
58 static const char* const LockTypeNames
[] = {"Lock", "Monitor", "CMonitor"};
60 struct nsNamedVector
: public nsVoidArray
{
63 #ifdef NS_TRACE_MALLOC
64 // Callsites for the inner locks/monitors stored in our base nsVoidArray.
65 // This array parallels our base nsVoidArray.
66 nsVoidArray mInnerSites
;
69 nsNamedVector(const char* name
= 0, PRUint32 initialSize
= 0)
70 : nsVoidArray(initialSize
),
76 static void * PR_CALLBACK
77 _hash_alloc_table(void *pool
, PRSize size
)
79 return operator new(size
);
82 static void PR_CALLBACK
83 _hash_free_table(void *pool
, void *item
)
85 operator delete(item
);
88 static PLHashEntry
* PR_CALLBACK
89 _hash_alloc_entry(void *pool
, const void *key
)
91 return new PLHashEntry
;
95 * Because monitors and locks may be associated with an nsAutoLockBase,
96 * without having had their associated nsNamedVector created explicitly in
97 * nsAutoMonitor::NewMonitor/DeleteMonitor, we need to provide a freeEntry
98 * PLHashTable hook, to avoid leaking nsNamedVectors which are replaced by
99 * nsAutoMonitor::NewMonitor.
101 * There is still a problem with the OrderTable containing orphaned
102 * nsNamedVector entries, for manually created locks wrapped by nsAutoLocks.
103 * (there should be no manually created monitors wrapped by nsAutoMonitors:
104 * you should use nsAutoMonitor::NewMonitor and nsAutoMonitor::DestroyMonitor
105 * instead of PR_NewMonitor and PR_DestroyMonitor). These lock vectors don't
106 * strictly leak, as they are killed on shutdown, but there are unnecessary
107 * named vectors in the hash table that outlive their associated locks.
109 * XXX so we should have nsLock, nsMonitor, etc. and strongly type their
110 * XXX nsAutoXXX counterparts to take only the non-auto types as inputs
112 static void PR_CALLBACK
113 _hash_free_entry(void *pool
, PLHashEntry
*entry
, PRUintn flag
)
115 nsNamedVector
* vec
= (nsNamedVector
*) entry
->value
;
120 if (flag
== HT_FREE_ENTRY
)
124 static const PLHashAllocOps _hash_alloc_ops
= {
125 _hash_alloc_table
, _hash_free_table
,
126 _hash_alloc_entry
, _hash_free_entry
129 PR_STATIC_CALLBACK(PRIntn
)
130 _purge_one(PLHashEntry
* he
, PRIntn cnt
, void* arg
)
132 nsNamedVector
* vec
= (nsNamedVector
*) he
->value
;
135 return HT_ENUMERATE_REMOVE
;
136 vec
->RemoveElement(arg
);
137 return HT_ENUMERATE_NEXT
;
140 PR_STATIC_CALLBACK(void)
141 OnSemaphoreRecycle(void* addr
)
144 PR_Lock(OrderTableLock
);
145 PL_HashTableEnumerateEntries(OrderTable
, _purge_one
, addr
);
146 PR_Unlock(OrderTableLock
);
150 PR_STATIC_CALLBACK(PLHashNumber
)
151 _hash_pointer(const void* key
)
153 return PLHashNumber(NS_PTR_TO_INT32(key
)) >> 2;
156 // Must be single-threaded here, early in primordial thread.
157 static void InitAutoLockStatics()
159 (void) PR_NewThreadPrivateIndex(&LockStackTPI
, 0);
160 OrderTable
= PL_NewHashTable(64, _hash_pointer
,
161 PL_CompareValues
, PL_CompareValues
,
162 &_hash_alloc_ops
, 0);
163 if (OrderTable
&& !(OrderTableLock
= PR_NewLock())) {
164 PL_HashTableDestroy(OrderTable
);
167 PR_CSetOnMonitorRecycle(OnSemaphoreRecycle
);
170 void _FreeAutoLockStatics()
172 PLHashTable
* table
= OrderTable
;
175 // Called at shutdown, so we don't need to lock.
176 PR_CSetOnMonitorRecycle(0);
177 PR_DestroyLock(OrderTableLock
);
179 PL_HashTableDestroy(table
);
183 static nsNamedVector
* GetVector(PLHashTable
* table
, const void* key
)
185 PLHashNumber hash
= _hash_pointer(key
);
186 PLHashEntry
** hep
= PL_HashTableRawLookup(table
, hash
, key
);
187 PLHashEntry
* he
= *hep
;
189 return (nsNamedVector
*) he
->value
;
190 nsNamedVector
* vec
= new nsNamedVector();
192 PL_HashTableRawAdd(table
, hep
, hash
, key
, vec
);
196 static void OnSemaphoreCreated(const void* key
, const char* name
)
198 if (key
&& OrderTable
) {
199 nsNamedVector
* value
= new nsNamedVector(name
);
201 PR_Lock(OrderTableLock
);
202 PL_HashTableAdd(OrderTable
, key
, value
);
203 PR_Unlock(OrderTableLock
);
208 // We maintain an acyclic graph in OrderTable, so recursion can't diverge.
209 static PRBool
Reachable(PLHashTable
* table
, const void* goal
, const void* start
)
213 nsNamedVector
* vec
= GetVector(table
, start
);
214 for (PRUint32 i
= 0, n
= vec
->Count(); i
< n
; i
++) {
215 void* addr
= vec
->ElementAt(i
);
216 if (addr
== goal
|| Reachable(table
, goal
, addr
))
222 static PRBool
WellOrdered(const void* addr1
, const void* addr2
,
223 const void *callsite2
, PRUint32
* index2p
,
224 nsNamedVector
** vec1p
, nsNamedVector
** vec2p
)
227 PLHashTable
* table
= OrderTable
;
228 if (!table
) return rv
;
229 PR_Lock(OrderTableLock
);
231 // Check whether we've already asserted (addr1 < addr2).
232 nsNamedVector
* vec1
= GetVector(table
, addr1
);
236 for (i
= 0, n
= vec1
->Count(); i
< n
; i
++)
237 if (vec1
->ElementAt(i
) == addr2
)
241 // Now check for (addr2 < addr1) and return false if so.
242 nsNamedVector
* vec2
= GetVector(table
, addr2
);
244 for (i
= 0, n
= vec2
->Count(); i
< n
; i
++) {
245 void* addri
= vec2
->ElementAt(i
);
247 if (addri
== addr1
|| Reachable(table
, addr1
, addri
)) {
257 // Assert (addr1 < addr2) into the order table.
258 // XXX fix plvector/nsVector to use const void*
259 vec1
->AppendElement((void*) addr2
);
260 #ifdef NS_TRACE_MALLOC
261 vec1
->mInnerSites
.AppendElement((void*) callsite2
);
268 PR_Unlock(OrderTableLock
);
272 nsAutoLockBase::nsAutoLockBase(void* addr
, nsAutoLockType type
)
274 if (LockStackTPI
== PRUintn(-1))
275 InitAutoLockStatics();
277 nsAutoLockBase
* stackTop
=
278 (nsAutoLockBase
*) PR_GetThreadPrivate(LockStackTPI
);
280 if (stackTop
->mAddr
== addr
) {
281 // Ignore reentry: it's legal for monitors, and NSPR will assert
282 // if you reenter a PRLock.
284 // Ignore null addresses: the caller promises not to use the
285 // lock at all, and NSPR will assert if you enter it.
288 #ifdef NS_TRACE_MALLOC
289 (const void*)NS_TraceMallocGetStackTrace();
298 if (!WellOrdered(stackTop
->mAddr
, addr
, node
, &i2
, &vec1
, &vec2
)) {
300 PR_snprintf(buf
, sizeof buf
,
301 "Potential deadlock between %s%s@%p and %s%s@%p",
302 vec1
->mName
? vec1
->mName
: "",
303 LockTypeNames
[stackTop
->mType
],
305 vec2
->mName
? vec2
->mName
: "",
308 #ifdef NS_TRACE_MALLOC
309 fprintf(stderr
, "\n*** %s\n\nCurrent stack:\n", buf
);
310 NS_TraceMallocPrintStackTrace(stderr
,
311 NS_TraceMallocGetStackTrace());
313 fputs("\nPrevious stack:\n", stderr
);
314 NS_TraceMallocPrintStackTrace(stderr
,
315 (nsTMStackTraceIDStruct
*)vec2
->mInnerSites
.ElementAt(i2
));
327 (void) PR_SetThreadPrivate(LockStackTPI
, this);
330 nsAutoLockBase::~nsAutoLockBase()
333 (void) PR_SetThreadPrivate(LockStackTPI
, mDown
);
336 void nsAutoLockBase::Show()
340 nsAutoLockBase
* curr
= (nsAutoLockBase
*) PR_GetThreadPrivate(LockStackTPI
);
341 nsAutoLockBase
* prev
= nsnull
;
342 while (curr
!= mDown
) {
347 PR_SetThreadPrivate(LockStackTPI
, this);
352 void nsAutoLockBase::Hide()
356 nsAutoLockBase
* curr
= (nsAutoLockBase
*) PR_GetThreadPrivate(LockStackTPI
);
357 nsAutoLockBase
* prev
= nsnull
;
358 while (curr
!= this) {
363 PR_SetThreadPrivate(LockStackTPI
, mDown
);
368 nsAutoUnlockBase::nsAutoUnlockBase(void* addr
)
372 nsAutoLockBase
* curr
= (nsAutoLockBase
*) PR_GetThreadPrivate(LockStackTPI
);
373 while (curr
&& curr
->mAddr
!= addr
)
385 nsAutoUnlockBase::~nsAutoUnlockBase()
393 PRLock
* nsAutoLock::NewLock(const char* name
)
395 PRLock
* lock
= PR_NewLock();
397 OnSemaphoreCreated(lock
, name
);
402 void nsAutoLock::DestroyLock(PRLock
* lock
)
405 OnSemaphoreRecycle(lock
);
407 PR_DestroyLock(lock
);
410 PRMonitor
* nsAutoMonitor::NewMonitor(const char* name
)
412 PRMonitor
* mon
= PR_NewMonitor();
414 OnSemaphoreCreated(mon
, name
);
419 void nsAutoMonitor::DestroyMonitor(PRMonitor
* mon
)
422 OnSemaphoreRecycle(mon
);
424 PR_DestroyMonitor(mon
);
427 void nsAutoMonitor::Enter()
431 NS_ERROR("It is not legal to enter a null monitor");
434 nsAutoLockBase
* stackTop
=
435 (nsAutoLockBase
*) PR_GetThreadPrivate(LockStackTPI
);
436 NS_ASSERTION(stackTop
== mDown
, "non-LIFO nsAutoMonitor::Enter");
438 (void) PR_SetThreadPrivate(LockStackTPI
, this);
440 PR_EnterMonitor(mMonitor
);
444 void nsAutoMonitor::Exit()
448 NS_ERROR("It is not legal to exit a null monitor");
451 (void) PR_SetThreadPrivate(LockStackTPI
, mDown
);
453 PRStatus status
= PR_ExitMonitor(mMonitor
);
454 NS_ASSERTION(status
== PR_SUCCESS
, "PR_ExitMonitor failed");
458 // XXX we don't worry about cached monitors being destroyed behind our back.
459 // XXX current NSPR (mozilla/nsprpub/pr/src/threads/prcmon.c) never destroys
460 // XXX a cached monitor! potential resource pig in conjunction with necko...
462 void nsAutoCMonitor::Enter()
465 nsAutoLockBase
* stackTop
=
466 (nsAutoLockBase
*) PR_GetThreadPrivate(LockStackTPI
);
467 NS_ASSERTION(stackTop
== mDown
, "non-LIFO nsAutoCMonitor::Enter");
469 (void) PR_SetThreadPrivate(LockStackTPI
, this);
471 PR_CEnterMonitor(mLockObject
);
475 void nsAutoCMonitor::Exit()
478 (void) PR_SetThreadPrivate(LockStackTPI
, mDown
);
480 PRStatus status
= PR_CExitMonitor(mLockObject
);
481 NS_ASSERTION(status
== PR_SUCCESS
, "PR_CExitMonitor failed");