1 // natObject.cc - Implementation of the Object class.
3 /* Copyright (C) 1998, 1999, 2000, 2001, 2002, 2003 Free Software Foundation
5 This file is part of libgcj.
7 This software is copyrighted work licensed under the terms of the
8 Libgcj License. Please consult the file "LIBGCJ_LICENSE" for
16 #pragma implementation "Object.h"
20 #include <java/lang/Object.h>
21 #include <java-threads.h>
22 #include <java-signal.h>
23 #include <java/lang/CloneNotSupportedException.h>
24 #include <java/lang/IllegalArgumentException.h>
25 #include <java/lang/IllegalMonitorStateException.h>
26 #include <java/lang/InterruptedException.h>
27 #include <java/lang/NullPointerException.h>
28 #include <java/lang/Class.h>
29 #include <java/lang/Cloneable.h>
30 #include <java/lang/Thread.h>
38 // This is used to represent synchronization information.
41 #if defined (_Jv_HaveCondDestroy) || defined (_Jv_HaveMutexDestroy)
42 // We only need to keep track of initialization state if we can
43 // possibly finalize this object.
46 _Jv_ConditionVariable_t condition
;
53 java::lang::Object::getClass (void)
55 _Jv_VTable
**dt
= (_Jv_VTable
**) this;
60 java::lang::Object::hashCode (void)
62 return _Jv_HashCode (this);
66 java::lang::Object::clone (void)
68 jclass klass
= getClass ();
72 // We also clone arrays here. If we put the array code into
73 // __JArray, then we'd have to figure out a way to find the array
74 // vtbl when creating a new array class. This is easier, if uglier.
77 __JArray
*array
= (__JArray
*) this;
78 jclass comp
= getClass()->getComponentType();
80 if (comp
->isPrimitive())
82 r
= _Jv_NewPrimArray (comp
, array
->length
);
83 eltsize
= comp
->size();
87 r
= _Jv_NewObjectArray (array
->length
, comp
, NULL
);
88 eltsize
= sizeof (jobject
);
90 // We can't use sizeof on __JArray because we must account for
91 // alignment of the element type.
92 size
= (_Jv_GetArrayElementFromElementType (array
, comp
) - (char *) array
93 + array
->length
* eltsize
);
97 if (! java::lang::Cloneable::class$
.isAssignableFrom(klass
))
98 throw new CloneNotSupportedException
;
100 size
= klass
->size();
101 r
= JvAllocObject (klass
, size
);
104 memcpy ((void *) r
, (void *) this, size
);
109 _Jv_FinalizeObject (jobject obj
)
111 // Ignore exceptions. From section 12.6 of the Java Language Spec.
116 catch (java::lang::Throwable
*t
)
124 // Synchronization code.
127 #ifndef JV_HASH_SYNCHRONIZATION
128 // This global is used to make sure that only one thread sets an
129 // object's `sync_info' field.
130 static _Jv_Mutex_t sync_mutex
;
132 // This macro is used to see if synchronization initialization is
134 #if defined (_Jv_HaveCondDestroy) || defined (_Jv_HaveMutexDestroy)
135 # define INIT_NEEDED(Obj) (! (Obj)->sync_info \
136 || ! ((_Jv_SyncInfo *) ((Obj)->sync_info))->init)
138 # define INIT_NEEDED(Obj) (! (Obj)->sync_info)
141 #if defined (_Jv_HaveCondDestroy) || defined (_Jv_HaveMutexDestroy)
142 // If we have to run a destructor for a sync_info member, then this
143 // function is registered as a finalizer for the sync_info.
145 finalize_sync_info (jobject obj
)
147 _Jv_SyncInfo
*si
= (_Jv_SyncInfo
*) obj
;
148 #if defined (_Jv_HaveCondDestroy)
149 _Jv_CondDestroy (&si
->condition
);
151 #if defined (_Jv_HaveMutexDestroy)
152 _Jv_MutexDestroy (&si
->mutex
);
158 // This is called to initialize the sync_info element of an object.
160 java::lang::Object::sync_init (void)
162 _Jv_MutexLock (&sync_mutex
);
163 // Check again to see if initialization is needed now that we have
165 if (INIT_NEEDED (this))
167 // We assume there are no pointers in the sync_info
170 // We always create a new sync_info, even if there is already
171 // one available. Any given object can only be finalized once.
172 // If we get here and sync_info is not null, then it has already
173 // been finalized. So if we just reinitialize the old one,
174 // we'll never be able to (re-)destroy the mutex and/or
175 // condition variable.
176 si
= (_Jv_SyncInfo
*) _Jv_AllocBytes (sizeof (_Jv_SyncInfo
));
177 _Jv_MutexInit (&si
->mutex
);
178 _Jv_CondInit (&si
->condition
);
179 #if defined (_Jv_HaveCondDestroy) || defined (_Jv_HaveMutexDestroy)
180 // Register a finalizer.
182 _Jv_RegisterFinalizer (si
, finalize_sync_info
);
184 sync_info
= (jobject
) si
;
186 _Jv_MutexUnlock (&sync_mutex
);
190 java::lang::Object::notify (void)
192 if (__builtin_expect (INIT_NEEDED (this), false))
194 _Jv_SyncInfo
*si
= (_Jv_SyncInfo
*) sync_info
;
195 if (__builtin_expect (_Jv_CondNotify (&si
->condition
, &si
->mutex
), false))
196 throw new IllegalMonitorStateException(JvNewStringLatin1
197 ("current thread not owner"));
201 java::lang::Object::notifyAll (void)
203 if (__builtin_expect (INIT_NEEDED (this), false))
205 _Jv_SyncInfo
*si
= (_Jv_SyncInfo
*) sync_info
;
206 if (__builtin_expect (_Jv_CondNotifyAll (&si
->condition
, &si
->mutex
), false))
207 throw new IllegalMonitorStateException(JvNewStringLatin1
208 ("current thread not owner"));
212 java::lang::Object::wait (jlong timeout
, jint nanos
)
214 if (__builtin_expect (INIT_NEEDED (this), false))
216 if (__builtin_expect (timeout
< 0 || nanos
< 0 || nanos
> 999999, false))
217 throw new IllegalArgumentException
;
218 _Jv_SyncInfo
*si
= (_Jv_SyncInfo
*) sync_info
;
219 switch (_Jv_CondWait (&si
->condition
, &si
->mutex
, timeout
, nanos
))
222 throw new IllegalMonitorStateException (JvNewStringLatin1
223 ("current thread not owner"));
224 case _JV_INTERRUPTED
:
225 if (Thread::interrupted ())
226 throw new InterruptedException
;
231 // Some runtime code.
234 // This function is called at system startup to initialize the
237 _Jv_InitializeSyncMutex (void)
239 _Jv_MutexInit (&sync_mutex
);
243 _Jv_MonitorEnter (jobject obj
)
246 if (__builtin_expect (! obj
, false))
247 throw new java::lang::NullPointerException
;
249 if (__builtin_expect (INIT_NEEDED (obj
), false))
251 _Jv_SyncInfo
*si
= (_Jv_SyncInfo
*) obj
->sync_info
;
252 _Jv_MutexLock (&si
->mutex
);
253 // FIXME: In the Windows case, this can return a nonzero error code.
254 // We should turn that into some exception ...
258 _Jv_MonitorExit (jobject obj
)
261 JvAssert (! INIT_NEEDED (obj
));
262 _Jv_SyncInfo
*si
= (_Jv_SyncInfo
*) obj
->sync_info
;
263 if (__builtin_expect (_Jv_MutexUnlock (&si
->mutex
), false))
264 throw new java::lang::IllegalMonitorStateException
;
268 _Jv_ObjectCheckMonitor (jobject obj
)
270 _Jv_SyncInfo
*si
= (_Jv_SyncInfo
*) obj
->sync_info
;
271 return _Jv_MutexCheckMonitor (&si
->mutex
);
274 #else /* JV_HASH_SYNCHRONIZATION */
276 // FIXME: We shouldn't be calling GC_register_finalizer directly.
277 #ifndef HAVE_BOEHM_GC
278 # error Hash synchronization currently requires boehm-gc
279 // That's actually a bit of a lie: It should also work with the null GC,
280 // probably even better than the alternative.
281 // To really support alternate GCs here, we would need to widen the
282 // interface to finalization, since we sometimes have to register a
283 // second finalizer for an object that already has one.
284 // We might also want to move the GC interface to a .h file, since
285 // the number of procedure call levels involved in some of these
286 // operations is already ridiculous, and would become worse if we
287 // went through the proper intermediaries.
292 // What follows currenly assumes a Linux-like platform.
293 // Some of it specifically assumes X86 or IA64 Linux, though that
294 // should be easily fixable.
296 // A Java monitor implemention based on a table of locks.
297 // Each entry in the table describes
298 // locks held for objects that hash to that location.
299 // This started out as a reimplementation of the technique used in SGIs JVM,
300 // for which we obtained permission from SGI.
301 // But in fact, this ended up quite different, though some ideas are
302 // still shared with the original.
303 // It was also influenced by some of the published IBM work,
304 // though it also differs in many ways from that.
305 // We could speed this up if we had a way to atomically update
306 // an entire cache entry, i.e. 2 contiguous words of memory.
307 // That would usually be the case with a 32 bit ABI on a 64 bit processor.
308 // But we don't currently go out of our way to target those.
309 // I don't know how to do much better with a N bit ABI on a processor
310 // that can atomically update only N bits at a time.
311 // Author: Hans-J. Boehm (Hans_Boehm@hp.com, boehm@acm.org)
314 #include <unistd.h> // for usleep, sysconf.
315 #include <gcj/javaprims.h>
316 #include <sysdep/locks.h>
317 #include <java/lang/Thread.h>
319 // Try to determine whether we are on a multiprocessor, i.e. whether
320 // spinning may be profitable.
321 // This should really use a suitable autoconf macro.
322 // False is the conservative answer, though the right one is much better.
326 #ifdef _SC_NPROCESSORS_ONLN
327 long nprocs
= sysconf(_SC_NPROCESSORS_ONLN
);
334 // A call to keep_live(p) forces p to be accessible to the GC
337 keep_live(obj_addr_t p
)
339 __asm__
__volatile__("" : : "rm"(p
) : "memory");
342 // Each hash table entry holds a single preallocated "lightweight" lock.
343 // In addition, it holds a chain of "heavyweight" locks. Lightweight
344 // locks do not support Object.wait(), and are converted to heavyweight
345 // status in response to contention. Unlike the SGI scheme, both
346 // ligtweight and heavyweight locks in one hash entry can be simultaneously
347 // in use. (The SGI scheme requires that we be able to acquire a heavyweight
348 // lock on behalf of another thread, and can thus convert a lock we don't
349 // hold to heavyweight status. Here we don't insist on that, and thus
350 // let the original holder of the lighweight lock keep it.)
353 void * reserved_for_gc
;
354 struct heavy_lock
*next
; // Hash chain link.
356 void * old_client_data
; // The only other field traced by GC.
357 GC_finalization_proc old_finalization_proc
;
358 obj_addr_t address
; // Object to which this lock corresponds.
359 // Should not be traced by GC.
360 // Cleared as heavy_lock is destroyed.
361 // Together with the rest of the heavy lock
362 // chain, this is protected by the lock
363 // bit in the hash table entry to which
364 // the chain is attached.
366 // The remaining fields save prior finalization info for
367 // the object, which we needed to replace in order to arrange
368 // for cleanup of the lock structure.
373 print_hl_list(heavy_lock
*hl
)
376 for (; 0 != p
; p
= p
->next
)
377 fprintf (stderr
, "(hl = %p, addr = %p)", p
, (void *)(p
-> address
));
379 #endif /* LOCK_DEBUG */
381 #if defined (_Jv_HaveCondDestroy) || defined (_Jv_HaveMutexDestroy)
382 // If we have to run a destructor for a sync_info member, then this
383 // function could be registered as a finalizer for the sync_info.
384 // In fact, we now only invoke it explicitly.
386 heavy_lock_finalization_proc (heavy_lock
*hl
)
388 #if defined (_Jv_HaveCondDestroy)
389 _Jv_CondDestroy (&hl
->si
.condition
);
391 #if defined (_Jv_HaveMutexDestroy)
392 _Jv_MutexDestroy (&hl
->si
.mutex
);
396 #endif /* defined (_Jv_HaveCondDestroy) || defined (_Jv_HaveMutexDestroy) */
398 // We convert the lock back to lightweight status when
399 // we exit, so that a single contention episode doesn't doom the lock
400 // forever. But we also need to make sure that lock structures for dead
401 // objects are eventually reclaimed. We do that in a an additional
402 // finalizer on the underlying object.
403 // Note that if the corresponding object is dead, it is safe to drop
404 // the heavy_lock structure from its list. It is not necessarily
405 // safe to deallocate it, since the unlock code could still be running.
408 volatile obj_addr_t address
; // Address of object for which lightweight
410 // We assume the 3 low order bits are zero.
411 // With the Boehm collector and bitmap
412 // allocation, objects of size 4 bytes are
413 // broken anyway. Thus this is primarily
414 // a constraint on statically allocated
415 // objects used for synchronization.
416 // This allows us to use the low order
418 # define LOCKED 1 // This hash entry is locked, and its
419 // state may be invalid.
420 // The lock protects both the hash_entry
421 // itself (except for the light_count
422 // and light_thr_id fields, which
423 // are protected by the lightweight
424 // lock itself), and any heavy_monitor
425 // structures attached to it.
426 # define HEAVY 2 // There may be heavyweight locks
427 // associated with this cache entry.
428 // The lightweight entry is still valid,
429 // if the leading bits of the address
430 // field are nonzero.
431 // Set if heavy_count is > 0 .
432 // Stored redundantly so a single
433 // compare-and-swap works in the easy case.
434 # define REQUEST_CONVERSION 4 // The lightweight lock is held. But
435 // one or more other threads have tried
436 // to acquire the lock, and hence request
437 // conversion to heavyweight status.
438 # define FLAGS (LOCKED | HEAVY | REQUEST_CONVERSION)
439 volatile _Jv_ThreadId_t light_thr_id
;
440 // Thr_id of holder of lightweight lock.
441 // Only updated by lightweight lock holder.
442 // Must be recognizably invalid if the
443 // lightweight lock is not held.
444 # define INVALID_THREAD_ID 0 // Works for Linux?
445 // If zero doesn't work, we have to
446 // initialize lock table.
447 volatile unsigned short light_count
;
448 // Number of times the lightweight lock
449 // is held minus one. Zero if lightweight
451 unsigned short heavy_count
; // Total number of times heavyweight locks
452 // associated with this hash entry are held
453 // or waiting to be acquired.
454 // Threads in wait() are included eventhough
455 // they have temporarily released the lock.
456 struct heavy_lock
* heavy_locks
;
457 // Chain of heavy locks. Protected
458 // by lockbit for he. Locks may
459 // remain allocated here even if HEAVY
460 // is not set and heavy_count is 0.
461 // If a lightweight and heavyweight lock
462 // correspond to the same address, the
463 // lightweight lock is the right one.
466 #ifndef JV_SYNC_TABLE_SZ
467 # define JV_SYNC_TABLE_SZ 2048 // Must be power of 2.
470 hash_entry light_locks
[JV_SYNC_TABLE_SZ
];
472 #define JV_SYNC_HASH(p) (((long)p ^ ((long)p >> 10)) & (JV_SYNC_TABLE_SZ-1))
474 // Note that the light_locks table is scanned conservatively by the
475 // collector. It is essential the the heavy_locks field is scanned.
476 // Currently the address field may or may not cause the associated object
477 // to be retained, depending on whether flag bits are set.
478 // This means that we can conceivable get an unexpected deadlock if
479 // 1) Object at address A is locked.
480 // 2) The client drops A without unlocking it.
481 // 3) Flag bits in the address entry are set, so the collector reclaims
483 // 4) A is reallocated, and an attempt is made to lock the result.
484 // This could be fixed by scanning light_locks in a more customized
485 // manner that ignores the flag bits. But it can only happen with hand
486 // generated semi-illegal .class files, and then it doesn't present a
490 void print_he(hash_entry
*he
)
492 fprintf(stderr
, "lock hash entry = %p, index = %d, address = 0x%lx\n"
493 "\tlight_thr_id = 0x%lx, light_count = %d, "
494 "heavy_count = %d\n\theavy_locks:", he
,
495 he
- light_locks
, he
-> address
, he
-> light_thr_id
,
496 he
-> light_count
, he
-> heavy_count
);
497 print_hl_list(he
-> heavy_locks
);
498 fprintf(stderr
, "\n");
500 #endif /* LOCK_DEBUG */
502 static bool mp
= false; // Known multiprocesssor.
504 // Wait for roughly 2^n units, touching as little memory as possible.
508 const unsigned MP_SPINS
= 10;
509 const unsigned YIELDS
= 4;
510 const unsigned SPINS_PER_UNIT
= 30;
511 const unsigned MIN_SLEEP_USECS
= 2001; // Shorter times spin under Linux.
512 const unsigned MAX_SLEEP_USECS
= 200000;
513 static unsigned spin_limit
= 0;
514 static unsigned yield_limit
= YIELDS
;
515 static bool spin_initialized
= false;
517 if (!spin_initialized
)
522 spin_limit
= MP_SPINS
;
523 yield_limit
= MP_SPINS
+ YIELDS
;
525 spin_initialized
= true;
529 unsigned i
= SPINS_PER_UNIT
<< n
;
531 __asm__
__volatile__("");
533 else if (n
< yield_limit
)
539 unsigned duration
= MIN_SLEEP_USECS
<< (n
- yield_limit
);
540 if (n
>= 15 + yield_limit
|| duration
> MAX_SLEEP_USECS
)
541 duration
= MAX_SLEEP_USECS
;
542 _Jv_platform_usleep(duration
);
546 // Wait for a hash entry to become unlocked.
548 wait_unlocked (hash_entry
*he
)
551 while (he
-> address
& LOCKED
)
555 // Return the heavy lock for addr if it was already allocated.
556 // The client passes in the appropriate hash_entry.
557 // We hold the lock for he.
558 static inline heavy_lock
*
559 find_heavy (obj_addr_t addr
, hash_entry
*he
)
561 heavy_lock
*hl
= he
-> heavy_locks
;
562 while (hl
!= 0 && hl
-> address
!= addr
) hl
= hl
-> next
;
566 // Unlink the heavy lock for the given address from its hash table chain.
567 // Dies miserably and conspicuously if it's not there, since that should
570 unlink_heavy (obj_addr_t addr
, hash_entry
*he
)
572 heavy_lock
**currentp
= &(he
-> heavy_locks
);
573 while ((*currentp
) -> address
!= addr
)
574 currentp
= &((*currentp
) -> next
);
575 *currentp
= (*currentp
) -> next
;
578 // Finalization procedure for objects that have associated heavy-weight
579 // locks. This may replace the real finalization procedure.
581 heavy_lock_obj_finalization_proc (void *obj
, void *cd
)
583 heavy_lock
*hl
= (heavy_lock
*)cd
;
585 // This only addresses misalignment of statics, not heap objects. It
586 // works only because registering statics for finalization is a noop,
587 // no matter what the least significant bits are.
588 #ifdef JV_LINKER_CANNOT_8BYTE_ALIGN_STATICS
589 obj_addr_t addr
= (obj_addr_t
)obj
& ~((obj_addr_t
)0x7);
591 obj_addr_t addr
= (obj_addr_t
)obj
;
593 hash_entry
*he
= light_locks
+ JV_SYNC_HASH(addr
);
594 obj_addr_t he_address
= (he
-> address
& ~LOCKED
);
596 // Acquire lock bit immediately. It's possible that the hl was already
597 // destroyed while we were waiting for the finalizer to run. If it
598 // was, the address field was set to zero. The address filed access is
599 // protected by the lock bit to ensure that we do this exactly once.
600 // The lock bit also protects updates to the objects finalizer.
601 while (!compare_and_swap(&(he
-> address
), he_address
, he_address
|LOCKED
))
603 // Hash table entry is currently locked. We can't safely
604 // touch the list of heavy locks.
606 he_address
= (he
-> address
& ~LOCKED
);
608 if (0 == hl
-> address
)
610 // remove_all_heavy destroyed hl, and took care of the real finalizer.
611 release_set(&(he
-> address
), he_address
);
614 JvAssert(hl
-> address
== addr
);
615 GC_finalization_proc old_finalization_proc
= hl
-> old_finalization_proc
;
616 if (old_finalization_proc
!= 0)
618 // We still need to run a real finalizer. In an idealized
619 // world, in which people write thread-safe finalizers, that is
620 // likely to require synchronization. Thus we reregister
621 // ourselves as the only finalizer, and simply run the real one.
622 // Thus we don't clean up the lock yet, but we're likely to do so
623 // on the next GC cycle.
624 // It's OK if remove_all_heavy actually destroys the heavy lock,
625 // since we've updated old_finalization_proc, and thus the user's
626 // finalizer won't be rerun.
627 void * old_client_data
= hl
-> old_client_data
;
628 hl
-> old_finalization_proc
= 0;
629 hl
-> old_client_data
= 0;
630 # ifdef HAVE_BOEHM_GC
631 GC_REGISTER_FINALIZER_NO_ORDER(obj
, heavy_lock_obj_finalization_proc
, cd
, 0, 0);
633 release_set(&(he
-> address
), he_address
);
634 old_finalization_proc(obj
, old_client_data
);
638 // The object is really dead, although it's conceivable that
639 // some thread may still be in the process of releasing the
640 // heavy lock. Unlink it and, if necessary, register a finalizer
641 // to destroy sync_info.
642 unlink_heavy(addr
, he
);
643 hl
-> address
= 0; // Don't destroy it again.
644 release_set(&(he
-> address
), he_address
);
645 # if defined (_Jv_HaveCondDestroy) || defined (_Jv_HaveMutexDestroy)
646 // Make sure lock is not held and then destroy condvar and mutex.
647 _Jv_MutexLock(&(hl
->si
.mutex
));
648 _Jv_MutexUnlock(&(hl
->si
.mutex
));
649 heavy_lock_finalization_proc (hl
);
654 // We hold the lock on he, and heavy_count is 0.
655 // Release the lock by replacing the address with new_address_val.
656 // Remove all heavy locks on the list. Note that the only possible way
657 // in which a lock may still be in use is if it's in the process of
660 remove_all_heavy (hash_entry
*he
, obj_addr_t new_address_val
)
662 JvAssert(he
-> heavy_count
== 0);
663 JvAssert(he
-> address
& LOCKED
);
664 heavy_lock
*hl
= he
-> heavy_locks
;
665 he
-> heavy_locks
= 0;
666 // We would really like to release the lock bit here. Unfortunately, that
667 // Creates a race between or finalizer removal, and the potential
668 // reinstallation of a new finalizer as a new heavy lock is created.
669 // This may need to be revisited.
670 for(; 0 != hl
; hl
= hl
->next
)
672 obj_addr_t obj
= hl
-> address
;
673 JvAssert(0 != obj
); // If this was previously finalized, it should no
674 // longer appear on our list.
675 hl
-> address
= 0; // Finalization proc might still see it after we
677 GC_finalization_proc old_finalization_proc
= hl
-> old_finalization_proc
;
678 void * old_client_data
= hl
-> old_client_data
;
679 # ifdef HAVE_BOEHM_GC
680 // Remove our finalization procedure.
681 // Reregister the clients if applicable.
682 GC_REGISTER_FINALIZER_NO_ORDER((GC_PTR
)obj
, old_finalization_proc
,
683 old_client_data
, 0, 0);
684 // Note that our old finalization procedure may have been
685 // previously determined to be runnable, and may still run.
686 // FIXME - direct dependency on boehm GC.
688 # if defined (_Jv_HaveCondDestroy) || defined (_Jv_HaveMutexDestroy)
689 // Wait for a possible lock holder to finish unlocking it.
690 // This is only an issue if we have to explicitly destroy the mutex
691 // or possibly if we have to destroy a condition variable that is
692 // still being notified.
693 _Jv_MutexLock(&(hl
->si
.mutex
));
694 _Jv_MutexUnlock(&(hl
->si
.mutex
));
695 heavy_lock_finalization_proc (hl
);
698 release_set(&(he
-> address
), new_address_val
);
701 // We hold the lock on he and heavy_count is 0.
702 // We release it by replacing the address field with new_address_val.
703 // Remove all heavy locks on the list if the list is sufficiently long.
704 // This is called periodically to avoid very long lists of heavy locks.
705 // This seems to otherwise become an issue with SPECjbb, for example.
707 maybe_remove_all_heavy (hash_entry
*he
, obj_addr_t new_address_val
)
709 static const int max_len
= 5;
710 heavy_lock
*hl
= he
-> heavy_locks
;
712 for (int i
= 0; i
< max_len
; ++i
)
716 release_set(&(he
-> address
), new_address_val
);
721 remove_all_heavy(he
, new_address_val
);
724 // Allocate a new heavy lock for addr, returning its address.
725 // Assumes we already have the hash_entry locked, and there
726 // is currently no lightweight or allocated lock for addr.
727 // We register a finalizer for addr, which is responsible for
728 // removing the heavy lock when addr goes away, in addition
729 // to the responsibilities of any prior finalizer.
730 // This unfortunately holds the lock bit for the hash entry while it
731 // allocates two objects (on for the finalizer).
732 // It would be nice to avoid that somehow ...
734 alloc_heavy(obj_addr_t addr
, hash_entry
*he
)
736 heavy_lock
* hl
= (heavy_lock
*) _Jv_AllocTraceTwo(sizeof (heavy_lock
));
738 hl
-> address
= addr
;
739 _Jv_MutexInit (&(hl
-> si
.mutex
));
740 _Jv_CondInit (&(hl
-> si
.condition
));
741 # if defined (_Jv_HaveCondDestroy) || defined (_Jv_HaveMutexDestroy)
742 hl
->si
.init
= true; // needed ?
744 hl
-> next
= he
-> heavy_locks
;
745 he
-> heavy_locks
= hl
;
746 // FIXME: The only call that cheats and goes directly to the GC interface.
747 # ifdef HAVE_BOEHM_GC
748 GC_REGISTER_FINALIZER_NO_ORDER(
749 (void *)addr
, heavy_lock_obj_finalization_proc
,
750 hl
, &hl
->old_finalization_proc
,
751 &hl
->old_client_data
);
752 # endif /* HAVE_BOEHM_GC */
756 // Return the heavy lock for addr, allocating if necessary.
757 // Assumes we have the cache entry locked, and there is no lightweight
760 get_heavy(obj_addr_t addr
, hash_entry
*he
)
762 heavy_lock
*hl
= find_heavy(addr
, he
);
764 hl
= alloc_heavy(addr
, he
);
769 _Jv_MonitorEnter (jobject obj
)
771 #ifdef JV_LINKER_CANNOT_8BYTE_ALIGN_STATICS
772 obj_addr_t addr
= (obj_addr_t
)obj
& ~((obj_addr_t
)FLAGS
);
774 obj_addr_t addr
= (obj_addr_t
)obj
;
777 unsigned hash
= JV_SYNC_HASH(addr
);
778 hash_entry
* he
= light_locks
+ hash
;
779 _Jv_ThreadId_t self
= _Jv_ThreadSelf();
781 const unsigned N_SPINS
= 18;
783 // We need to somehow check that addr is not NULL on the fast path.
784 // A very predictable
785 // branch on a register value is probably cheaper than dereferencing addr.
786 // We could also permanently lock the NULL entry in the hash table.
787 // But it's not clear that's cheaper either.
788 if (__builtin_expect(!addr
, false))
789 throw new java::lang::NullPointerException
;
791 JvAssert(!(addr
& FLAGS
));
793 if (__builtin_expect(compare_and_swap(&(he
-> address
),
796 JvAssert(he
-> light_thr_id
== INVALID_THREAD_ID
);
797 JvAssert(he
-> light_count
== 0);
798 he
-> light_thr_id
= self
;
799 // Count fields are set correctly. Heavy_count was also zero,
800 // but can change asynchronously.
801 // This path is hopefully both fast and the most common.
804 address
= he
-> address
;
805 if ((address
& ~(HEAVY
| REQUEST_CONVERSION
)) == addr
)
807 if (he
-> light_thr_id
== self
)
809 // We hold the lightweight lock, and it's for the right
811 count
= he
-> light_count
;
812 if (count
== USHRT_MAX
)
814 // I think most JVMs don't check for this.
815 // But I'm not convinced I couldn't turn this into a security
816 // hole, even with a 32 bit counter.
817 throw new java::lang::IllegalMonitorStateException(
818 JvNewStringLatin1("maximum monitor nesting level exceeded"));
820 he
-> light_count
= count
+ 1;
825 // Lightweight lock is held, but by somone else.
826 // Spin a few times. This avoids turning this into a heavyweight
827 // lock if the current holder is about to release it.
828 for (unsigned int i
= 0; i
< N_SPINS
; ++i
)
830 if ((he
-> address
& ~LOCKED
) != (address
& ~LOCKED
)) goto retry
;
834 if (!compare_and_swap(&(he
-> address
), address
, address
| LOCKED
))
839 heavy_lock
*hl
= get_heavy(addr
, he
);
840 ++ (he
-> heavy_count
);
841 // The hl lock acquisition can't block for long, since it can
842 // only be held by other threads waiting for conversion, and
843 // they, like us, drop it quickly without blocking.
844 _Jv_MutexLock(&(hl
->si
.mutex
));
845 JvAssert(he
-> address
== address
| LOCKED
);
846 release_set(&(he
-> address
), (address
| REQUEST_CONVERSION
| HEAVY
));
847 // release lock on he
848 while ((he
-> address
& ~FLAGS
) == (address
& ~FLAGS
))
850 // Once converted, the lock has to retain heavyweight
851 // status, since heavy_count > 0 .
852 _Jv_CondWait (&(hl
->si
.condition
), &(hl
->si
.mutex
), 0, 0);
855 // Guarantee that hl doesn't get unlinked by finalizer.
856 // This is only an issue if the client fails to release
857 // the lock, which is unlikely.
858 JvAssert(he
-> address
& HEAVY
);
859 // Lock has been converted, we hold the heavyweight lock,
860 // heavy_count has been incremented.
864 obj_addr_t was_heavy
= (address
& HEAVY
);
866 if (!compare_and_swap(&(he
-> address
), address
, (address
| LOCKED
)))
871 if ((address
& ~(HEAVY
| REQUEST_CONVERSION
)) == 0)
873 // Either was_heavy is true, or something changed out from under us,
874 // since the initial test for 0 failed.
875 JvAssert(!(address
& REQUEST_CONVERSION
));
876 // Can't convert a nonexistent lightweight lock.
878 hl
= (was_heavy
? find_heavy(addr
, he
) : 0);
881 // It is OK to use the lighweight lock, since either the
882 // heavyweight lock does not exist, or none of the
883 // heavyweight locks currently exist. Future threads
884 // trying to acquire the lock will see the lightweight
885 // one first and use that.
886 he
-> light_thr_id
= self
; // OK, since nobody else can hold
887 // light lock or do this at the same time.
888 JvAssert(he
-> light_count
== 0);
889 JvAssert(was_heavy
== (he
-> address
& HEAVY
));
890 release_set(&(he
-> address
), (addr
| was_heavy
));
894 // Must use heavy lock.
895 ++ (he
-> heavy_count
);
896 JvAssert(0 == (address
& ~HEAVY
));
897 release_set(&(he
-> address
), HEAVY
);
898 _Jv_MutexLock(&(hl
->si
.mutex
));
903 // Lightweight lock is held, but does not correspond to this object.
904 // We hold the lock on the hash entry, and he -> address can't
905 // change from under us. Neither can the chain of heavy locks.
907 JvAssert(0 == he
-> heavy_count
|| (address
& HEAVY
));
908 heavy_lock
*hl
= get_heavy(addr
, he
);
909 ++ (he
-> heavy_count
);
910 release_set(&(he
-> address
), address
| HEAVY
);
911 _Jv_MutexLock(&(hl
->si
.mutex
));
918 _Jv_MonitorExit (jobject obj
)
920 #ifdef JV_LINKER_CANNOT_8BYTE_ALIGN_STATICS
921 obj_addr_t addr
= (obj_addr_t
)obj
& ~((obj_addr_t
)FLAGS
);
923 obj_addr_t addr
= (obj_addr_t
)obj
;
925 _Jv_ThreadId_t self
= _Jv_ThreadSelf();
926 unsigned hash
= JV_SYNC_HASH(addr
);
927 hash_entry
* he
= light_locks
+ hash
;
928 _Jv_ThreadId_t light_thr_id
;
933 light_thr_id
= he
-> light_thr_id
;
934 // Unfortunately, it turns out we always need to read the address
935 // first. Even if we are going to update it with compare_and_swap,
936 // we need to reset light_thr_id, and that's not safe unless we know
937 // that we hold the lock.
938 address
= he
-> address
;
939 // First the (relatively) fast cases:
940 if (__builtin_expect(light_thr_id
== self
, true))
941 // Above must fail if addr == 0 .
943 count
= he
-> light_count
;
944 if (__builtin_expect((address
& ~HEAVY
) == addr
, true))
948 // We held the lightweight lock all along. Thus the values
949 // we saw for light_thr_id and light_count must have been valid.
950 he
-> light_count
= count
- 1;
955 // We hold the lightweight lock once.
956 he
-> light_thr_id
= INVALID_THREAD_ID
;
957 if (compare_and_swap_release(&(he
-> address
), address
,
962 he
-> light_thr_id
= light_thr_id
; // Undo prior damage.
967 // else lock is not for this address, conversion is requested,
968 // or the lock bit in the address field is set.
972 if (__builtin_expect(!addr
, false))
973 throw new java::lang::NullPointerException
;
974 if ((address
& ~(HEAVY
| REQUEST_CONVERSION
)) == addr
)
977 fprintf(stderr
, "Lightweight lock held by other thread\n\t"
978 "light_thr_id = 0x%lx, self = 0x%lx, "
979 "address = 0x%lx, pid = %d\n",
980 light_thr_id
, self
, address
, getpid());
984 // Someone holds the lightweight lock for this object, and
986 throw new java::lang::IllegalMonitorStateException(
987 JvNewStringLatin1("current thread not owner"));
990 count
= he
-> light_count
;
992 if (address
& LOCKED
)
997 // Now the unlikely cases.
999 // - Address is set, and doesn't contain the LOCKED bit.
1000 // - If address refers to the same object as addr, then he -> light_thr_id
1001 // refers to this thread, and count is valid.
1002 // - The case in which we held the lightweight lock has been
1003 // completely handled, except for the REQUEST_CONVERSION case.
1005 if ((address
& ~FLAGS
) == addr
)
1007 // The lightweight lock is assigned to this object.
1008 // Thus we must be in the REQUEST_CONVERSION case.
1011 // Defer conversion until we exit completely.
1012 he
-> light_count
= count
- 1;
1015 JvAssert(he
-> light_thr_id
== self
);
1016 JvAssert(address
& REQUEST_CONVERSION
);
1017 // Conversion requested
1019 if (!compare_and_swap(&(he
-> address
), address
, address
| LOCKED
))
1021 heavy_lock
*hl
= find_heavy(addr
, he
);
1023 // Requestor created it.
1024 he
-> light_count
= 0;
1025 JvAssert(he
-> heavy_count
> 0);
1026 // was incremented by requestor.
1027 _Jv_MutexLock(&(hl
->si
.mutex
));
1028 // Release the he lock after acquiring the mutex.
1029 // Otherwise we can accidentally
1030 // notify a thread that has already seen a heavyweight
1032 he
-> light_thr_id
= INVALID_THREAD_ID
;
1033 release_set(&(he
-> address
), HEAVY
);
1034 // lightweight lock now unused.
1035 _Jv_CondNotifyAll(&(hl
->si
.condition
), &(hl
->si
.mutex
));
1036 _Jv_MutexUnlock(&(hl
->si
.mutex
));
1037 // heavy_count was already incremented by original requestor.
1041 // lightweight lock not for this object.
1042 JvAssert(!(address
& LOCKED
));
1043 JvAssert((address
& ~FLAGS
) != addr
);
1044 if (!compare_and_swap(&(he
-> address
), address
, address
| LOCKED
))
1046 heavy_lock
*hl
= find_heavy(addr
, he
);
1050 fprintf(stderr
, "Failed to find heavyweight lock for addr 0x%lx"
1051 " pid = %d\n", addr
, getpid());
1055 throw new java::lang::IllegalMonitorStateException(
1056 JvNewStringLatin1("current thread not owner"));
1058 JvAssert(address
& HEAVY
);
1059 count
= he
-> heavy_count
;
1060 JvAssert(count
> 0);
1062 he
-> heavy_count
= count
;
1065 const unsigned test_freq
= 16; // Power of 2
1066 static volatile unsigned counter
= 0;
1067 unsigned my_counter
= counter
;
1069 counter
= my_counter
+ 1;
1070 if (my_counter
%test_freq
== 0)
1072 // Randomize the interval length a bit.
1073 counter
= my_counter
+ (my_counter
>> 4) % (test_freq
/2);
1074 // Unlock mutex first, to avoid self-deadlock, or worse.
1075 _Jv_MutexUnlock(&(hl
->si
.mutex
));
1076 maybe_remove_all_heavy(he
, address
&~HEAVY
);
1077 // release lock bit, preserving
1078 // REQUEST_CONVERSION
1079 // and object address.
1083 release_set(&(he
-> address
), address
&~HEAVY
);
1084 _Jv_MutexUnlock(&(hl
->si
.mutex
));
1085 // Unlock after releasing the lock bit, so that
1086 // we don't switch to another thread prematurely.
1091 release_set(&(he
-> address
), address
);
1092 _Jv_MutexUnlock(&(hl
->si
.mutex
));
1097 // Return false if obj's monitor is held by the current thread
1099 _Jv_ObjectCheckMonitor (jobject obj
)
1101 #ifdef JV_LINKER_CANNOT_8BYTE_ALIGN_STATICS
1102 obj_addr_t addr
= (obj_addr_t
)obj
& ~((obj_addr_t
)FLAGS
);
1104 obj_addr_t addr
= (obj_addr_t
)obj
;
1107 unsigned hash
= JV_SYNC_HASH(addr
);
1108 hash_entry
* he
= light_locks
+ hash
;
1109 _Jv_ThreadId_t self
= _Jv_ThreadSelf();
1111 JvAssert(!(addr
& FLAGS
));
1113 // Acquire the hash table entry lock
1114 address
= ((he
-> address
) & ~LOCKED
);
1115 if (!compare_and_swap(&(he
-> address
), address
, address
| LOCKED
))
1123 if (!(address
& ~FLAGS
))
1125 else if ((address
& ~FLAGS
) == addr
)
1126 not_mine
= (he
-> light_thr_id
!= self
);
1129 heavy_lock
* hl
= find_heavy(addr
, he
);
1130 not_mine
= hl
? _Jv_MutexCheckMonitor(&hl
->si
.mutex
) : true;
1133 release_set(&(he
-> address
), address
); // unlock hash entry
1137 // The rest of these are moderately thin veneers on _Jv_Cond ops.
1138 // The current version of Notify might be able to make the pthread
1139 // call AFTER releasing the lock, thus saving some context switches??
1142 java::lang::Object::wait (jlong timeout
, jint nanos
)
1144 #ifdef JV_LINKER_CANNOT_8BYTE_ALIGN_STATICS
1145 obj_addr_t addr
= (obj_addr_t
)this & ~((obj_addr_t
)FLAGS
);
1147 obj_addr_t addr
= (obj_addr_t
)this;
1149 _Jv_ThreadId_t self
= _Jv_ThreadSelf();
1150 unsigned hash
= JV_SYNC_HASH(addr
);
1151 hash_entry
* he
= light_locks
+ hash
;
1156 if (__builtin_expect (timeout
< 0 || nanos
< 0 || nanos
> 999999, false))
1157 throw new IllegalArgumentException
;
1159 address
= he
-> address
;
1161 if (!compare_and_swap(&(he
-> address
), address
, address
| LOCKED
))
1166 // address does not have the lock bit set. We hold the lock on he.
1167 if ((address
& ~FLAGS
) == addr
)
1169 // Convert to heavyweight.
1170 if (he
-> light_thr_id
!= self
)
1173 fprintf(stderr
, "Found wrong lightweight lock owner in wait "
1174 "address = 0x%lx pid = %d\n", address
, getpid());
1178 release_set(&(he
-> address
), address
);
1179 throw new IllegalMonitorStateException (JvNewStringLatin1
1180 ("current thread not owner"));
1182 count
= he
-> light_count
;
1183 hl
= get_heavy(addr
, he
);
1184 he
-> light_count
= 0;
1185 he
-> heavy_count
+= count
+ 1;
1186 for (unsigned i
= 0; i
<= count
; ++i
)
1187 _Jv_MutexLock(&(hl
->si
.mutex
));
1188 // Again release the he lock after acquiring the mutex.
1189 he
-> light_thr_id
= INVALID_THREAD_ID
;
1190 release_set(&(he
-> address
), HEAVY
); // lightweight lock now unused.
1191 if (address
& REQUEST_CONVERSION
)
1192 _Jv_CondNotify (&(hl
->si
.condition
), &(hl
->si
.mutex
));
1194 else /* We should hold the heavyweight lock. */
1196 hl
= find_heavy(addr
, he
);
1197 release_set(&(he
-> address
), address
);
1201 fprintf(stderr
, "Couldn't find heavy lock in wait "
1202 "addr = 0x%lx pid = %d\n", addr
, getpid());
1206 throw new IllegalMonitorStateException (JvNewStringLatin1
1207 ("current thread not owner"));
1209 JvAssert(address
& HEAVY
);
1211 switch (_Jv_CondWait (&(hl
->si
.condition
), &(hl
->si
.mutex
), timeout
, nanos
))
1214 throw new IllegalMonitorStateException (JvNewStringLatin1
1215 ("current thread not owner"));
1216 case _JV_INTERRUPTED
:
1217 if (Thread::interrupted ())
1218 throw new InterruptedException
;
1223 java::lang::Object::notify (void)
1225 #ifdef JV_LINKER_CANNOT_8BYTE_ALIGN_STATICS
1226 obj_addr_t addr
= (obj_addr_t
)this & ~((obj_addr_t
)FLAGS
);
1228 obj_addr_t addr
= (obj_addr_t
)this;
1230 _Jv_ThreadId_t self
= _Jv_ThreadSelf();
1231 unsigned hash
= JV_SYNC_HASH(addr
);
1232 hash_entry
* he
= light_locks
+ hash
;
1238 address
= ((he
-> address
) & ~LOCKED
);
1239 if (!compare_and_swap(&(he
-> address
), address
, address
| LOCKED
))
1244 if ((address
& ~FLAGS
) == addr
&& he
-> light_thr_id
== self
)
1246 // We hold lightweight lock. Since it has not
1247 // been inflated, there are no waiters.
1248 release_set(&(he
-> address
), address
); // unlock
1251 hl
= find_heavy(addr
, he
);
1252 // Hl can't disappear since we point to the underlying object.
1253 // It's important that we release the lock bit before the notify, since
1254 // otherwise we will try to wake up thee target while we still hold the
1255 // bit. This results in lock bit contention, which we don't handle
1257 release_set(&(he
-> address
), address
); // unlock
1260 throw new IllegalMonitorStateException(JvNewStringLatin1
1261 ("current thread not owner"));
1264 result
= _Jv_CondNotify(&(hl
->si
.condition
), &(hl
->si
.mutex
));
1266 if (__builtin_expect (result
, 0))
1267 throw new IllegalMonitorStateException(JvNewStringLatin1
1268 ("current thread not owner"));
1272 java::lang::Object::notifyAll (void)
1274 #ifdef JV_LINKER_CANNOT_8BYTE_ALIGN_STATICS
1275 obj_addr_t addr
= (obj_addr_t
)this & ~((obj_addr_t
)FLAGS
);
1277 obj_addr_t addr
= (obj_addr_t
)this;
1279 _Jv_ThreadId_t self
= _Jv_ThreadSelf();
1280 unsigned hash
= JV_SYNC_HASH(addr
);
1281 hash_entry
* he
= light_locks
+ hash
;
1287 address
= (he
-> address
) & ~LOCKED
;
1288 if (!compare_and_swap(&(he
-> address
), address
, address
| LOCKED
))
1293 hl
= find_heavy(addr
, he
);
1294 if ((address
& ~FLAGS
) == addr
&& he
-> light_thr_id
== self
)
1296 // We hold lightweight lock. Since it has not
1297 // been inflated, there are no waiters.
1298 release_set(&(he
-> address
), address
); // unlock
1301 release_set(&(he
-> address
), address
); // unlock
1304 throw new IllegalMonitorStateException(JvNewStringLatin1
1305 ("current thread not owner"));
1307 result
= _Jv_CondNotifyAll(&(hl
->si
.condition
), &(hl
->si
.mutex
));
1308 if (__builtin_expect (result
, 0))
1309 throw new IllegalMonitorStateException(JvNewStringLatin1
1310 ("current thread not owner"));
1313 // This is declared in Java code and in Object.h.
1314 // It should never be called with JV_HASH_SYNCHRONIZATION
1316 java::lang::Object::sync_init (void)
1318 throw new IllegalMonitorStateException(JvNewStringLatin1
1319 ("internal error: sync_init"));
1322 // This is called on startup and declared in Object.h.
1323 // For now we just make it a no-op.
1325 _Jv_InitializeSyncMutex (void)
1329 #endif /* JV_HASH_SYNCHRONIZATION */