1 /* ***** BEGIN LICENSE BLOCK *****
2 * Version: MPL 1.1/GPL 2.0/LGPL 2.1
4 * The contents of this file are subject to the Mozilla Public License Version
5 * 1.1 (the "License"); you may not use this file except in compliance with
6 * the License. You may obtain a copy of the License at
7 * http://www.mozilla.org/MPL/
9 * Software distributed under the License is distributed on an "AS IS" basis,
10 * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
11 * for the specific language governing rights and limitations under the
14 * The Original Code is [Open Source Virtual Machine.].
16 * The Initial Developer of the Original Code is
17 * Adobe System Incorporated.
18 * Portions created by the Initial Developer are Copyright (C) 2004-2006
19 * the Initial Developer. All Rights Reserved.
24 * Alternatively, the contents of this file may be used under the terms of
25 * either the GNU General Public License Version 2 or later (the "GPL"), or
26 * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
27 * in which case the provisions of the GPL or the LGPL are applicable instead
28 * of those above. If you wish to allow use of your version of this file only
29 * under the terms of either the GPL or the LGPL, and not to allow others to
30 * use your version of this file under the terms of the MPL, indicate your
31 * decision by deleting the provisions above and replace them with the notice
32 * and other provisions required by the GPL or the LGPL. If you do not delete
33 * the provisions above, a recipient may use your version of this file under
34 * the terms of any one of the MPL, the GPL or the LGPL.
36 * ***** END LICENSE BLOCK ***** */
38 #ifndef __vmbase_VMThread__
39 #define __vmbase_VMThread__
42 * Defines the preferred style of block-scoped locking, e.g:
44 * void Foo::foo(int val)
47 * // Outside critical section
49 * SCOPE_LOCK(m_monitor) {
50 * m_sharedState += val; // Critical section protected by m_monitor
53 * // Outside critical section
56 * See the MutexLocker class for full details.
58 #define SCOPE_LOCK(_m_) if (vmbase::MutexLocker __locker = _m_) {} else
61 * Defines the preferred style of block-scoped monitor locking. The given
62 * name is bound to allow lexically scoped access to wait/notify functions.
64 * void Foo::boo(int val)
67 * // Outside critical section
69 * SCOPE_LOCK_NAMED(locker, m_monitor) {
70 * m_sharedState += val; // Critical section protected by m_monitor
75 * // Outside critical section
78 * See the MonitorLocker class for full details.
80 #define SCOPE_LOCK_NAMED(_name_, _m_) if (vmbase::MonitorLocker _name_ = _m_) {} else
85 * Class wrapper for a (recursive) mutex synchronization primitive.
86 * Native initialization and disposal will occur during object
87 * construction and destruction respectively.
89 * RecursiveMutex should not normally be used directly,
90 * rather, the WaitNotifyMonitor class provides an implicit condition
91 * paired with a mutex.
95 friend class ConditionVariable
;
96 friend class WaitNotifyMonitor
;
97 friend class MutexLocker
;
98 friend class MonitorLocker
;
109 // No copying allowed: undefined semantics
110 RecursiveMutex(const RecursiveMutex
& mutex
);
111 const RecursiveMutex
& operator=(const RecursiveMutex
& mutex
);
114 vmpi_mutex_t m_mutex
;
118 bool isLockedByCurrentThread() const;
120 vmpi_thread_t
volatile m_ownerThreadID
;
121 int m_recursionCount
;
126 * Class wrapper for a condition variable synchronization primitive.
127 * Native initialization and disposal will occur during object
128 * construction and destruction respectively.
130 * NOTE: ConditionVariables should not normally be used directly, rather,
131 * the WaitNotifyMonitor class provides an implicit condition
132 * paired with a mutex.
135 class ConditionVariable
137 friend class WaitNotifyMonitor
;
139 // ConditionVariables should only to be used as part of a WaitNotifyMonitor, so keep everything private.
144 * Signals a single thread waiting on this condition variable.
145 * This function does not imply any fairness policy when selecting the thread to signal.
146 * This function does not block.
151 * Signals all threads waiting on the given condition variable.
152 * This function does not block.
157 * Blocks the calling thread on this condition variable.
158 * The calling thread must own the given mutex or the results are undefined (asserted in debug builds).
159 * When the thread is blocked, it releases its lock on the mutex.
160 * The thread remains blocked until the condition variable is signaled
161 * (either individually or via a broadcast), or the specified timeout period has expired.
162 * Post-wait, the thread will attempt to re-acquire mutex. When the mutex is re-acquired,
163 * this function will return.
166 * - A waiting thread may spuriously awaken without being signaled.
167 * - Waiting on a condition variable with a recursively locked mutex results in
168 * undefined behavior (asserted in debug builds).
169 * - Waiting on a single condition variable with multiple mutexes results in
170 * undefined behavior.
172 * @param mutex The mutex to release whilst waiting and re-acquire when signaled
173 * @param timeoutMillis The maximum amount of time to wait to be signaled
174 * @return true if the timeout period expired
176 bool wait(RecursiveMutex
& mutex
, int32_t timeoutMillis
);
179 * Identical to the timeout version except the thread will wait forever to be signaled.
181 void wait(RecursiveMutex
& mutex
);
183 // No copying allowed: undefined semantics
184 ConditionVariable(const ConditionVariable
& condVar
);
185 const ConditionVariable
& operator=(const ConditionVariable
& condVar
);
188 ~ConditionVariable();
191 vmpi_condvar_t m_condVar
;
195 * Base class for objects which require monitor synchronization semantics with a single
196 * implicit condition for wait/notify.
198 * The general abstraction is that a monitor contains two thread sets: the blocked set and
199 * the wait set. Threads in the blocked set compete for the monitor's mutex. The monitor's
200 * 'owner' thread (i.e. the one which owns the mutex) can move itself to the wait set by calling
201 * one of the wait() functions. After an owner thread has entered the wait set the monitor's mutex
202 * is released; the blocked set then compete to acquire the mutex. Threads in the wait
203 * set may only continue execution by moving back to the blocked set and competing with other
204 * threads to acquire the mutex. Moving from the waiting to the block set occurs with the
205 * notify() and notifyAll() operations. Calling notify() on the monitor causes a single thread from
206 * the wait set to be moved to the blocked set (any thread could be chosen). Calling notifyAll()
207 * on the monitor causes all threads from the waiting set to be moved to the blocked set. When a
208 * thread acquires the mutex after moving from the wait set to the blocked set it continues execution
209 * by returning from its call to wait().
210 * Note that only the owner of the monitor is permitted to call notify() or notifyAll().
212 class WaitNotifyMonitor
: public RecursiveMutex
214 friend class MutexLocker
;
215 friend class MonitorLocker
;
219 virtual ~WaitNotifyMonitor();
223 * Moves the calling thread (which must be the monitor owner) to the
224 * monitor's wait set and releases the mutex.
225 * The thread remains in the wait set until it is notified
226 * or the specified timeout period has expired.
227 * Following notification or timeout, the calling(waiting) thread
228 * is moved to the monitor's blocked set to compete for its mutex.
229 * Following re-acquisition of the mutex, this function will return.
232 * - A waiting thread may spuriously move to the blocked set without
233 * notification or timeout.
234 * - Waiting on a recursively locked monitor results in
235 * undefined behavior (asserted in debug builds).
237 * @param timeoutMillis The maximum amount of time to wait before moving
238 * to the blocked set without notification.
239 * @return true if the timeout period expired
241 bool wait(int32_t timeoutMillis
);
244 * Identical to the timeout version except the thread will wait forever to be notified.
249 * Notifies a single thread waiting on this monitor. The notified thread will be moved
250 * to the blocked set.
251 * This function does not imply any fairness policy when selecting the thread to notify.
252 * This function does not block.
253 * Only the monitor's owner thread may call this function (asserted in debug builds).
258 * Notifies all threads waiting on this monitor. The threads will be moved
259 * to the blocked set.
260 * This function does not block.
261 * Only the monitor's owner thread may call this function (asserted in debug builds).
265 // No copying allowed: undefined semantics
266 WaitNotifyMonitor(const WaitNotifyMonitor
& monitor
);
267 const WaitNotifyMonitor
& operator=(const WaitNotifyMonitor
& monitor
);
270 ConditionVariable m_condVar
;
274 * MutexLocker provides RAII-style locking for mutexes, i.e.
275 * the lock/unlock of a mutex can only be performed by the
276 * ctor/dtor of a stack allocated MutexLocker.
284 * RecursiveMutex m_mutex;
290 * MutexLocker locker(m_mutex); // Lock m_mutex
291 * m_sharedState += val; // Access shared state
292 * // locker's dtor unlocks the mutex
296 * MutexLockers are intended to be used with the
297 * SCOPE_LOCK macro, to give synchronized-block
298 * sugaring to their declaration and scoping. For
299 * example, the above function Foo::foo would be:
303 * SCOPE_LOCK(m_mutex) {
304 * m_sharedState += val;
311 MutexLocker(RecursiveMutex
& mutex
);
314 operator bool () const {return false;} // For the SCOPE_LOCK* macros
317 // No copying allowed: undefined semantics
318 const MutexLocker
& operator=(const MutexLocker
& locker
);
319 // Force stack allocation
320 void* operator new(size_t);
323 RecursiveMutex
& m_mutex
;
327 * MonitorLocker provides RAII-style locking for monitors.
328 * See MutexLocker for more details.
330 * A MonitorLocker also provide access to a locked monitor's
331 * wait and notify functions, providing some guarantee that
332 * only the monitor's owner thread will perform these operations.
339 * WaitNotifyMonitor m_monitor;
345 * MonitorLocker locker(m_monitor); // Lock m_monitor
346 * while (m_sharedState < val) {
347 * m_sharedState += val;
348 * locker.notifyAll(); // Notify all waiters on m_monitor
349 * locker.wait(); // Wait on m_monitor
354 * or, using the preferred SCOPE_LOCK_NAMED macro:
358 * SCOPE_LOCK_NAMED(locker, m_monitor) { // Must give a name if the locker will be referenced
359 * while (m_sharedState < val) {
360 * m_sharedState += val;
361 * locker.notifyAll(); // Notify all waiters on locked m_monitor
362 * locker.wait(); // Wait on locked m_monitor
371 MonitorLocker(WaitNotifyMonitor
& monitor
);
374 // These functions just delegate to those of the locked WaitNotifyMonitor.
375 // See WaitNotifyMonitor for their documentation.
377 bool wait(int32_t timeout_millis
);
381 operator bool () const {return false;} // For the SCOPE_LOCK* macros
384 // No copying allowed: undefined semantics
385 const MonitorLocker
& operator=(const MonitorLocker
& locker
);
386 // Force stack allocation
387 void* operator new(size_t);
390 WaitNotifyMonitor
& m_monitor
;
394 * A collection of memory barrier (fence) operations.
396 * For a memory barrier overview see Mac OS X's developer docs:
397 * http://developer.apple.com/library/mac/#documentation/
398 * Cocoa/Conceptual/Multithreading/ThreadSafety/
399 * ThreadSafety.html#//apple_ref/doc/uid/10000057i-CH8-SW1
400 * Note that for all platforms supported by Tamarin,
401 * vmbase::MemoryBarrier::readWrite() has equivalent semantics
402 * to that of OS X's OSMemoryBarrier().
404 * For a more hardware-oriented discussion of why memory barriers
406 * 'Memory Barriers: a Hardware View for Software Hackers' (2009)
407 * by Paul E. Mckenney.
408 * http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.152.5245
410 * An older, but seminal report on memory models for shared memory
411 * multi-processing (and their implications for programmers and
413 * 'Shared memory consistency models: A tutorial' (1996)
414 * by Sarita V. Adve and Kourosh Gharachorloo.
415 * http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.106.5742
422 * Inserts the strongest hardware read/write memory barrier provided by the platform.
423 * The minimum guarantee provided by this barrier will be:
424 * - All load and store operations executed before the barrier will appear to
425 * complete before all load and store operations after the barrier.
427 * Note that this function implies an equivalent compiler memory-barrier in addition
428 * to the hardware barrier.
430 * On uniprocessor systems this will either be a no-op or translated into
431 * a compiler memory-barrier.
433 static void readWrite();
437 * A collection of atomic operations.
443 * Performs an atomic Compare-And-Swap operation.
444 * If the contents at 'address' are equal to 'oldValue', then they
445 * are replaced with 'newValue'.
447 * The 'WithBarrier' version includes a memory barrier with
448 * the ordering guarantees of MemoryBarrier::readWrite().
450 * @param oldValue The value to compare
451 * @param newValue The value to swap-in, if oldValue is the current value
452 * @param address The address of the value to update
453 * @return true if the update was successful
455 static bool compareAndSwap32(int32_t oldValue
, int32_t newValue
, volatile int32_t* address
);
456 static bool compareAndSwap32WithBarrier(int32_t oldValue
, int32_t newValue
, volatile int32_t* address
);
459 * Performs an atomic in-place bitwise OR.
461 * The 'WithBarrier' versions include a memory barrier with
462 * the ordering guarantees of MemoryBarrier::readWrite().
464 * @param mask The bit mask to apply
465 * @param address The address of the value to be OR'ed
466 * @return the updated value following the OR. The 'Prev' suffixed versions return the value before the OR.
468 static int32_t or32(uint32_t mask
, volatile int32_t* address
);
469 static int32_t or32WithBarrier(uint32_t mask
, volatile int32_t* address
);
470 static int32_t or32Prev(uint32_t mask
, volatile int32_t* address
);
471 static int32_t or32WithBarrierPrev(uint32_t mask
, volatile int32_t* address
);
474 * Performs an atomic in-place bitwise AND.
476 * The 'WithBarrier' versions include a memory barrier with
477 * the ordering guarantees of MemoryBarrier::readWrite().
479 * @param mask The bit mask to apply
480 * @param address The address of the value to be AND'ed
481 * @return the updated value following the AND. The 'Prev' suffixed versions return the value before the AND.
483 static int32_t and32(uint32_t mask
, volatile int32_t* address
);
484 static int32_t and32WithBarrier(uint32_t mask
, volatile int32_t* address
);
485 static int32_t and32Prev(uint32_t mask
, volatile int32_t* address
);
486 static int32_t and32WithBarrierPrev(uint32_t mask
, volatile int32_t* address
);
490 * An atomic 32-bit integer counter.
491 * The set, inc, dec, incAndGet and decAndGet operations include a memory
492 * barrier with the ordering guarantees of MemoryBarrier::readWrite().
494 class AtomicCounter32
497 AtomicCounter32(int32_t value
= 0);
500 void set(int32_t value
);
507 int32_t operator++() {return incAndGet();}
508 int32_t operator--() {return decAndGet();}
509 int32_t operator++(int32_t) {return incAndGet() - 1;}
510 int32_t operator--(int32_t) {return decAndGet() + 1;}
511 operator int32_t() const {return get();}
514 volatile int32_t m_value
;
518 * Class-wrapper for the platform thread-local storage implementation.
519 * The type T must cast to and from void*.
520 * The default per-thread value of a VMThreadLocal is 0.
522 template <typename T
>
527 : m_isInitialized(false)
529 m_isInitialized
= VMPI_tlsCreate(&m_tlsID
);
530 AvmAssert(m_isInitialized
);
532 AvmAssert(get() == 0);
537 if (m_isInitialized
) {
538 m_isInitialized
= false;
539 VMPI_tlsDestroy(m_tlsID
);
545 return m_isInitialized
;
548 REALLY_INLINE
void set(T value
)
550 AvmAssert(m_isInitialized
);
551 VMPI_tlsSetValue(m_tlsID
, (void*) (value
));
554 REALLY_INLINE T
get() const
556 AvmAssert(m_isInitialized
);
557 return (T
) (VMPI_tlsGetValue(m_tlsID
));
560 REALLY_INLINE T
operator =(T t
)
566 REALLY_INLINE
operator T() const
571 REALLY_INLINE T
operator->() const
577 VMThreadLocal(const VMThreadLocal
& threadLocal
);
578 const VMThreadLocal
& operator=(const VMThreadLocal
& threadLocal
);
582 bool m_isInitialized
;
586 * Base class for closures/functors executed by VMThreads.
591 virtual void run() = 0;
596 * VMThread builds on VMPI to provide a simple, object-oriented
597 * thread abstraction.
599 * Calling start() on a VMThread instance creates a new native thread.
600 * The thread begins execution from the run() method of a Runnable object,
601 * which can be passed to the VMThread constructor. If no Runnable is passed
602 * in the constructor then the VMThread calls its own run() method, which can
605 * VMThread provides an interface for joining with other VMThreads,
606 * putting VMThreads to sleep, and querying the identity of the running VMThread.
608 * Note that a VMThread instance does not depend on the presence of, or have
609 * any affinity with, any AvmCore instance.
612 class VMThread
: public Runnable
617 NOT_STARTED
, // The VMThread has been constructed, but it does not yet represent a running native thread.
618 RUNNABLE
, // The VMThread's native thread is running, waiting, blocked or sleeping
619 TERMINATED
, // The VMThread's native thread has exited
631 * Creates a VMThread with a system-assigned name.
632 * When started, the VMThread will call its own run() method.
637 * Creates a VMThread with the given name.
638 * When started, the VMThread will call its own run() method.
640 * @param name An identifier for the VMThread (makes own copy of the string)
642 VMThread(const char* name
);
645 * Creates a VMThread with a system-assigned name.
646 * When started, the VMThread will call the given Runnable's run() method.
648 * @param runnable The Runnable to call when the thread starts-up
650 VMThread(Runnable
* runnable
);
653 * Creates a VMThread with the given name.
654 * When started, the VMThread will call the given Runnable's run() method.
656 * @param name An identifier for the VMThread (makes own copy of the string)
657 * @param runnable The Runnable to call when the thread starts-up
659 VMThread(const char* name
, Runnable
* runnable
);
662 * Currently a VMThread's native thread cannot free the VMThread.
663 * The free must be performed by another thread which knows how
664 * the thread was allocated and when it is safe to free.
665 * Clearly 'safe to free' means that the freeing thread and all
666 * other threads will not reference the VMThread again.
667 * Hence the dtor provides some guarantees of safety by first waiting
668 * for all threads joining this VMThread to have exited the join
669 * operation before continuing.
674 * Creates a new native thread to begin execution at this VMThread's Runnable.
675 * Calling VMThread::currentThread() from the new thread will return this VMThread.
677 * @return true If the thread successfully began execution
682 * Identical to start(), but starts the thread with the given priority.
683 * Note that if the platform does not support thread priorities then
684 * the argument will be ignored.
685 * As of Nov' 2010, the POSIX implementations do not honor this argument.
687 * @param priority The thread's priority (low/normal/high)
688 * @return true If the thread successfully began execution
690 bool start(ThreadPriority priority
);
693 * Identical to start(ThreadPriority priority), but starts the thread with the given
694 * stack and guard area sizes.
695 * Note that if the platform does not support either parameter then any supplied
696 * arguments will be ignored.
697 * As of Nov' 2010, support should be:
698 * POSIX win32 (XP version APIs)
703 * @param priority The thread's priority (low/normal/high)
704 * @param stackSize The thread's stack size
705 * @param guardSize The thread's guard area size
706 * @return true If the thread successfully began execution
708 bool start(ThreadPriority priority
, size_t stackSize
, size_t guardSize
);
711 * Causes the calling thread to block until this VMThread's native thread has exited.
712 * This function is thread safe, i.e. multiple thread can concurrently join a thread.
714 * If this VMThread's native thread has already exited, then the function immediately returns.
719 * The default run() implementation immediately returns.
723 const char* getName() const;
726 * Causes the calling thread to sleep for the given number of milliseconds.
727 * Note that the thread may spuriously awaken before the timeout.
729 * @param timeout The length of time to sleep (in milliseconds)
731 static void sleep(int32_t timeout
);
734 * Returns the calling thread's VMThread.
736 * If the calling thread was not created via VMThread::start() then the
737 * returned value will be NULL.
739 * @return The calling thread's VMThread, or NULL if the calling thread
740 * was not started with VMThread::start()
742 static VMThread
* currentThread();
745 void setNameFrom(const char* name
);
746 static vmpi_thread_rtn_t VMPI_THREAD_START_CC
startInternal(vmpi_thread_arg_t args
);
747 bool start(vmpi_thread_attr_t
* attr
);
749 // No copying allowed: undefined semantics
750 VMThread(const VMThread
& thread
);
751 const VMThread
& operator=(const VMThread
& thread
);
754 Runnable
* m_runnable
;
756 vmpi_thread_t m_threadID
;
758 WaitNotifyMonitor m_joinMonitor
;
761 static AtomicCounter32 m_nextNameSuffix
;
762 static VMThreadLocal
<VMThread
*> m_currentThread
; // Keep each thread's VMThread in TLS.
766 #endif /* __vmbase_VMThread__ */