1 // Copyright (c) 2011 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "base/synchronization/condition_variable.h"
10 #include "base/compiler_specific.h"
11 #include "base/synchronization/lock.h"
12 #include "base/threading/thread_restrictions.h"
13 #include "base/time/time.h"
16 // We can't use the linker supported delay-load for kernel32 so all this
17 // cruft here is to manually late-bind the needed functions.
18 typedef void (WINAPI
*InitializeConditionVariableFn
)(PCONDITION_VARIABLE
);
19 typedef BOOL (WINAPI
*SleepConditionVariableCSFn
)(PCONDITION_VARIABLE
,
20 PCRITICAL_SECTION
, DWORD
);
21 typedef void (WINAPI
*WakeConditionVariableFn
)(PCONDITION_VARIABLE
);
22 typedef void (WINAPI
*WakeAllConditionVariableFn
)(PCONDITION_VARIABLE
);
24 InitializeConditionVariableFn initialize_condition_variable_fn
;
25 SleepConditionVariableCSFn sleep_condition_variable_fn
;
26 WakeConditionVariableFn wake_condition_variable_fn
;
27 WakeAllConditionVariableFn wake_all_condition_variable_fn
;
29 bool BindVistaCondVarFunctions() {
30 HMODULE kernel32
= GetModuleHandleA("kernel32.dll");
31 initialize_condition_variable_fn
=
32 reinterpret_cast<InitializeConditionVariableFn
>(
33 GetProcAddress(kernel32
, "InitializeConditionVariable"));
34 if (!initialize_condition_variable_fn
)
36 sleep_condition_variable_fn
=
37 reinterpret_cast<SleepConditionVariableCSFn
>(
38 GetProcAddress(kernel32
, "SleepConditionVariableCS"));
39 if (!sleep_condition_variable_fn
)
41 wake_condition_variable_fn
=
42 reinterpret_cast<WakeConditionVariableFn
>(
43 GetProcAddress(kernel32
, "WakeConditionVariable"));
44 if (!wake_condition_variable_fn
)
46 wake_all_condition_variable_fn
=
47 reinterpret_cast<WakeAllConditionVariableFn
>(
48 GetProcAddress(kernel32
, "WakeAllConditionVariable"));
49 if (!wake_all_condition_variable_fn
)
57 // Abstract base class of the pimpl idiom.
58 class ConditionVarImpl
{
60 virtual ~ConditionVarImpl() {};
61 virtual void Wait() = 0;
62 virtual void TimedWait(const TimeDelta
& max_time
) = 0;
63 virtual void Broadcast() = 0;
64 virtual void Signal() = 0;
67 ///////////////////////////////////////////////////////////////////////////////
68 // Windows Vista and Win7 implementation.
69 ///////////////////////////////////////////////////////////////////////////////
71 class WinVistaCondVar
: public ConditionVarImpl
{
73 WinVistaCondVar(Lock
* user_lock
);
74 ~WinVistaCondVar() override
{}
75 // Overridden from ConditionVarImpl.
77 void TimedWait(const TimeDelta
& max_time
) override
;
78 void Broadcast() override
;
79 void Signal() override
;
82 base::Lock
& user_lock_
;
83 CONDITION_VARIABLE cv_
;
86 WinVistaCondVar::WinVistaCondVar(Lock
* user_lock
)
87 : user_lock_(*user_lock
) {
88 initialize_condition_variable_fn(&cv_
);
92 void WinVistaCondVar::Wait() {
93 TimedWait(TimeDelta::FromMilliseconds(INFINITE
));
96 void WinVistaCondVar::TimedWait(const TimeDelta
& max_time
) {
97 base::ThreadRestrictions::AssertWaitAllowed();
98 DWORD timeout
= static_cast<DWORD
>(max_time
.InMilliseconds());
99 CRITICAL_SECTION
* cs
= user_lock_
.lock_
.native_handle();
102 user_lock_
.CheckHeldAndUnmark();
105 if (FALSE
== sleep_condition_variable_fn(&cv_
, cs
, timeout
)) {
106 DCHECK(GetLastError() != WAIT_TIMEOUT
);
109 #if !defined(NDEBUG) || defined(DCHECK_ALWAYS_ON)
110 user_lock_
.CheckUnheldAndMark();
114 void WinVistaCondVar::Broadcast() {
115 wake_all_condition_variable_fn(&cv_
);
118 void WinVistaCondVar::Signal() {
119 wake_condition_variable_fn(&cv_
);
122 ///////////////////////////////////////////////////////////////////////////////
123 // Windows XP implementation.
124 ///////////////////////////////////////////////////////////////////////////////
126 class WinXPCondVar
: public ConditionVarImpl
{
128 WinXPCondVar(Lock
* user_lock
);
129 ~WinXPCondVar() override
;
130 // Overridden from ConditionVarImpl.
131 void Wait() override
;
132 void TimedWait(const TimeDelta
& max_time
) override
;
133 void Broadcast() override
;
134 void Signal() override
;
136 // Define Event class that is used to form circularly linked lists.
137 // The list container is an element with NULL as its handle_ value.
138 // The actual list elements have a non-zero handle_ value.
139 // All calls to methods MUST be done under protection of a lock so that links
140 // can be validated. Without the lock, some links might asynchronously
141 // change, and the assertions would fail (as would list change operations).
144 // Default constructor with no arguments creates a list container.
148 // InitListElement transitions an instance from a container, to an element.
149 void InitListElement();
151 // Methods for use on lists.
152 bool IsEmpty() const;
153 void PushBack(Event
* other
);
157 // Methods for use on list elements.
159 HANDLE
handle() const;
160 // Pull an element from a list (if it's in one).
163 // Method for use on a list element or on a list.
164 bool IsSingleton() const;
167 // Provide pre/post conditions to validate correct manipulations.
168 bool ValidateAsDistinct(Event
* other
) const;
169 bool ValidateAsItem() const;
170 bool ValidateAsList() const;
171 bool ValidateLinks() const;
176 DISALLOW_COPY_AND_ASSIGN(Event
);
179 // Note that RUNNING is an unlikely number to have in RAM by accident.
180 // This helps with defensive destructor coding in the face of user error.
181 enum RunState
{ SHUTDOWN
= 0, RUNNING
= 64213 };
183 // Internal implementation methods supporting Wait().
184 Event
* GetEventForWaiting();
185 void RecycleEvent(Event
* used_event
);
189 // Private critical section for access to member data.
190 base::Lock internal_lock_
;
192 // Lock that is acquired before calling Wait().
193 base::Lock
& user_lock_
;
195 // Events that threads are blocked on.
198 // Free list for old events.
199 Event recycling_list_
;
200 int recycling_list_size_
;
202 // The number of allocated, but not yet deleted events.
203 int allocation_counter_
;
206 WinXPCondVar::WinXPCondVar(Lock
* user_lock
)
207 : run_state_(RUNNING
),
208 user_lock_(*user_lock
),
209 recycling_list_size_(0),
210 allocation_counter_(0) {
214 WinXPCondVar::~WinXPCondVar() {
215 AutoLock
auto_lock(internal_lock_
);
216 run_state_
= SHUTDOWN
; // Prevent any more waiting.
218 DCHECK_EQ(recycling_list_size_
, allocation_counter_
);
219 if (recycling_list_size_
!= allocation_counter_
) { // Rare shutdown problem.
220 // There are threads of execution still in this->TimedWait() and yet the
221 // caller has instigated the destruction of this instance :-/.
222 // A common reason for such "overly hasty" destruction is that the caller
223 // was not willing to wait for all the threads to terminate. Such hasty
224 // actions are a violation of our usage contract, but we'll give the
225 // waiting thread(s) one last chance to exit gracefully (prior to our
227 // Note: waiting_list_ *might* be empty, but recycling is still pending.
228 AutoUnlock
auto_unlock(internal_lock_
);
229 Broadcast(); // Make sure all waiting threads have been signaled.
230 Sleep(10); // Give threads a chance to grab internal_lock_.
231 // All contained threads should be blocked on user_lock_ by now :-).
232 } // Reacquire internal_lock_.
234 DCHECK_EQ(recycling_list_size_
, allocation_counter_
);
237 void WinXPCondVar::Wait() {
238 // Default to "wait forever" timing, which means have to get a Signal()
239 // or Broadcast() to come out of this wait state.
240 TimedWait(TimeDelta::FromMilliseconds(INFINITE
));
243 void WinXPCondVar::TimedWait(const TimeDelta
& max_time
) {
244 base::ThreadRestrictions::AssertWaitAllowed();
245 Event
* waiting_event
;
248 AutoLock
auto_lock(internal_lock_
);
249 if (RUNNING
!= run_state_
) return; // Destruction in progress.
250 waiting_event
= GetEventForWaiting();
251 handle
= waiting_event
->handle();
253 } // Release internal_lock.
256 AutoUnlock
unlock(user_lock_
); // Release caller's lock
257 WaitForSingleObject(handle
, static_cast<DWORD
>(max_time
.InMilliseconds()));
258 // Minimize spurious signal creation window by recycling asap.
259 AutoLock
auto_lock(internal_lock_
);
260 RecycleEvent(waiting_event
);
261 // Release internal_lock_
262 } // Reacquire callers lock to depth at entry.
265 // Broadcast() is guaranteed to signal all threads that were waiting (i.e., had
266 // a cv_event internally allocated for them) before Broadcast() was called.
267 void WinXPCondVar::Broadcast() {
268 std::stack
<HANDLE
> handles
; // See FAQ-question-10.
270 AutoLock
auto_lock(internal_lock_
);
271 if (waiting_list_
.IsEmpty())
273 while (!waiting_list_
.IsEmpty())
274 // This is not a leak from waiting_list_. See FAQ-question 12.
275 handles
.push(waiting_list_
.PopBack()->handle());
276 } // Release internal_lock_.
277 while (!handles
.empty()) {
278 SetEvent(handles
.top());
283 // Signal() will select one of the waiting threads, and signal it (signal its
284 // cv_event). For better performance we signal the thread that went to sleep
285 // most recently (LIFO). If we want fairness, then we wake the thread that has
286 // been sleeping the longest (FIFO).
287 void WinXPCondVar::Signal() {
290 AutoLock
auto_lock(internal_lock_
);
291 if (waiting_list_
.IsEmpty())
292 return; // No one to signal.
293 // Only performance option should be used.
294 // This is not a leak from waiting_list. See FAQ-question 12.
295 handle
= waiting_list_
.PopBack()->handle(); // LIFO.
296 } // Release internal_lock_.
300 // GetEventForWaiting() provides a unique cv_event for any caller that needs to
301 // wait. This means that (worst case) we may over time create as many cv_event
302 // objects as there are threads simultaneously using this instance's Wait()
304 WinXPCondVar::Event
* WinXPCondVar::GetEventForWaiting() {
305 // We hold internal_lock, courtesy of Wait().
307 if (0 == recycling_list_size_
) {
308 DCHECK(recycling_list_
.IsEmpty());
309 cv_event
= new Event();
310 cv_event
->InitListElement();
311 allocation_counter_
++;
312 DCHECK(cv_event
->handle());
314 cv_event
= recycling_list_
.PopFront();
315 recycling_list_size_
--;
317 waiting_list_
.PushBack(cv_event
);
321 // RecycleEvent() takes a cv_event that was previously used for Wait()ing, and
322 // recycles it for use in future Wait() calls for this or other threads.
323 // Note that there is a tiny chance that the cv_event is still signaled when we
324 // obtain it, and that can cause spurious signals (if/when we re-use the
325 // cv_event), but such is quite rare (see FAQ-question-5).
326 void WinXPCondVar::RecycleEvent(Event
* used_event
) {
327 // We hold internal_lock, courtesy of Wait().
328 // If the cv_event timed out, then it is necessary to remove it from
329 // waiting_list_. If it was selected by Broadcast() or Signal(), then it is
331 used_event
->Extract(); // Possibly redundant
332 recycling_list_
.PushBack(used_event
);
333 recycling_list_size_
++;
335 //------------------------------------------------------------------------------
336 // The next section provides the implementation for the private Event class.
337 //------------------------------------------------------------------------------
339 // Event provides a doubly-linked-list of events for use exclusively by the
340 // ConditionVariable class.
342 // This custom container was crafted because no simple combination of STL
343 // classes appeared to support the functionality required. The specific
344 // unusual requirement for a linked-list-class is support for the Extract()
345 // method, which can remove an element from a list, potentially for insertion
346 // into a second list. Most critically, the Extract() method is idempotent,
347 // turning the indicated element into an extracted singleton whether it was
348 // contained in a list or not. This functionality allows one (or more) of
349 // threads to do the extraction. The iterator that identifies this extractable
350 // element (in this case, a pointer to the list element) can be used after
351 // arbitrary manipulation of the (possibly) enclosing list container. In
352 // general, STL containers do not provide iterators that can be used across
353 // modifications (insertions/extractions) of the enclosing containers, and
354 // certainly don't provide iterators that can be used if the identified
355 // element is *deleted* (removed) from the container.
357 // It is possible to use multiple redundant containers, such as an STL list,
358 // and an STL map, to achieve similar container semantics. This container has
359 // only O(1) methods, while the corresponding (multiple) STL container approach
360 // would have more complex O(log(N)) methods (yeah... N isn't that large).
361 // Multiple containers also makes correctness more difficult to assert, as
362 // data is redundantly stored and maintained, which is generally evil.
364 WinXPCondVar::Event::Event() : handle_(0) {
365 next_
= prev_
= this; // Self referencing circular.
368 WinXPCondVar::Event::~Event() {
370 // This is the list holder
372 Event
* cv_event
= PopFront();
373 DCHECK(cv_event
->ValidateAsItem());
377 DCHECK(IsSingleton());
379 int ret_val
= CloseHandle(handle_
);
384 // Change a container instance permanently into an element of a list.
385 void WinXPCondVar::Event::InitListElement() {
387 handle_
= CreateEvent(NULL
, false, false, NULL
);
391 // Methods for use on lists.
392 bool WinXPCondVar::Event::IsEmpty() const {
393 DCHECK(ValidateAsList());
394 return IsSingleton();
397 void WinXPCondVar::Event::PushBack(Event
* other
) {
398 DCHECK(ValidateAsList());
399 DCHECK(other
->ValidateAsItem());
400 DCHECK(other
->IsSingleton());
401 // Prepare other for insertion.
402 other
->prev_
= prev_
;
405 prev_
->next_
= other
;
407 DCHECK(ValidateAsDistinct(other
));
410 WinXPCondVar::Event
* WinXPCondVar::Event::PopFront() {
411 DCHECK(ValidateAsList());
412 DCHECK(!IsSingleton());
413 return next_
->Extract();
416 WinXPCondVar::Event
* WinXPCondVar::Event::PopBack() {
417 DCHECK(ValidateAsList());
418 DCHECK(!IsSingleton());
419 return prev_
->Extract();
422 // Methods for use on list elements.
424 HANDLE
WinXPCondVar::Event::handle() const {
425 DCHECK(ValidateAsItem());
429 // Pull an element from a list (if it's in one).
430 WinXPCondVar::Event
* WinXPCondVar::Event::Extract() {
431 DCHECK(ValidateAsItem());
432 if (!IsSingleton()) {
433 // Stitch neighbors together.
434 next_
->prev_
= prev_
;
435 prev_
->next_
= next_
;
436 // Make extractee into a singleton.
437 prev_
= next_
= this;
439 DCHECK(IsSingleton());
443 // Method for use on a list element or on a list.
444 bool WinXPCondVar::Event::IsSingleton() const {
445 DCHECK(ValidateLinks());
446 return next_
== this;
449 // Provide pre/post conditions to validate correct manipulations.
450 bool WinXPCondVar::Event::ValidateAsDistinct(Event
* other
) const {
451 return ValidateLinks() && other
->ValidateLinks() && (this != other
);
454 bool WinXPCondVar::Event::ValidateAsItem() const {
455 return (0 != handle_
) && ValidateLinks();
458 bool WinXPCondVar::Event::ValidateAsList() const {
459 return (0 == handle_
) && ValidateLinks();
462 bool WinXPCondVar::Event::ValidateLinks() const {
463 // Make sure both of our neighbors have links that point back to us.
464 // We don't do the O(n) check and traverse the whole loop, and instead only
465 // do a local check to (and returning from) our immediate neighbors.
466 return (next_
->prev_
== this) && (prev_
->next_
== this);
471 FAQ On WinXPCondVar subtle implementation details:
473 1) What makes this problem subtle? Please take a look at "Strategies
474 for Implementing POSIX Condition Variables on Win32" by Douglas
475 C. Schmidt and Irfan Pyarali.
476 http://www.cs.wustl.edu/~schmidt/win32-cv-1.html It includes
477 discussions of numerous flawed strategies for implementing this
478 functionality. I'm not convinced that even the final proposed
479 implementation has semantics that are as nice as this implementation
480 (especially with regard to Broadcast() and the impact on threads that
481 try to Wait() after a Broadcast() has been called, but before all the
482 original waiting threads have been signaled).
484 2) Why can't you use a single wait_event for all threads that call
485 Wait()? See FAQ-question-1, or consider the following: If a single
486 event were used, then numerous threads calling Wait() could release
487 their cs locks, and be preempted just before calling
488 WaitForSingleObject(). If a call to Broadcast() was then presented on
489 a second thread, it would be impossible to actually signal all
490 waiting(?) threads. Some number of SetEvent() calls *could* be made,
491 but there could be no guarantee that those led to to more than one
492 signaled thread (SetEvent()'s may be discarded after the first!), and
493 there could be no guarantee that the SetEvent() calls didn't just
494 awaken "other" threads that hadn't even started waiting yet (oops).
495 Without any limit on the number of requisite SetEvent() calls, the
496 system would be forced to do many such calls, allowing many new waits
497 to receive spurious signals.
499 3) How does this implementation cause spurious signal events? The
500 cause in this implementation involves a race between a signal via
501 time-out and a signal via Signal() or Broadcast(). The series of
502 actions leading to this are:
504 a) Timer fires, and a waiting thread exits the line of code:
506 WaitForSingleObject(waiting_event, max_time.InMilliseconds());
508 b) That thread (in (a)) is randomly pre-empted after the above line,
509 leaving the waiting_event reset (unsignaled) and still in the
512 c) A call to Signal() (or Broadcast()) on a second thread proceeds, and
513 selects the waiting cv_event (identified in step (b)) as the event to revive
514 via a call to SetEvent().
516 d) The Signal() method (step c) calls SetEvent() on waiting_event (step b).
518 e) The waiting cv_event (step b) is now signaled, but no thread is
521 f) When that waiting_event (step b) is reused, it will immediately
522 be signaled (spuriously).
525 4) Why do you recycle events, and cause spurious signals? First off,
526 the spurious events are very rare. They can only (I think) appear
527 when the race described in FAQ-question-3 takes place. This should be
528 very rare. Most(?) uses will involve only timer expiration, or only
529 Signal/Broadcast() actions. When both are used, it will be rare that
530 the race will appear, and it would require MANY Wait() and signaling
531 activities. If this implementation did not recycle events, then it
532 would have to create and destroy events for every call to Wait().
533 That allocation/deallocation and associated construction/destruction
534 would be costly (per wait), and would only be a rare benefit (when the
535 race was "lost" and a spurious signal took place). That would be bad
536 (IMO) optimization trade-off. Finally, such spurious events are
537 allowed by the specification of condition variables (such as
538 implemented in Vista), and hence it is better if any user accommodates
539 such spurious events (see usage note in condition_variable.h).
541 5) Why don't you reset events when you are about to recycle them, or
542 about to reuse them, so that the spurious signals don't take place?
543 The thread described in FAQ-question-3 step c may be pre-empted for an
544 arbitrary length of time before proceeding to step d. As a result,
545 the wait_event may actually be re-used *before* step (e) is reached.
546 As a result, calling reset would not help significantly.
548 6) How is it that the callers lock is released atomically with the
549 entry into a wait state? We commit to the wait activity when we
550 allocate the wait_event for use in a given call to Wait(). This
551 allocation takes place before the caller's lock is released (and
552 actually before our internal_lock_ is released). That allocation is
553 the defining moment when "the wait state has been entered," as that
554 thread *can* now be signaled by a call to Broadcast() or Signal().
555 Hence we actually "commit to wait" before releasing the lock, making
556 the pair effectively atomic.
558 8) Why do you need to lock your data structures during waiting, as the
559 caller is already in possession of a lock? We need to Acquire() and
560 Release() our internal lock during Signal() and Broadcast(). If we tried
561 to use a callers lock for this purpose, we might conflict with their
562 external use of the lock. For example, the caller may use to consistently
563 hold a lock on one thread while calling Signal() on another, and that would
566 9) Couldn't a more efficient implementation be provided if you
567 preclude using more than one external lock in conjunction with a
568 single ConditionVariable instance? Yes, at least it could be viewed
569 as a simpler API (since you don't have to reiterate the lock argument
570 in each Wait() call). One of the constructors now takes a specific
571 lock as an argument, and a there are corresponding Wait() calls that
572 don't specify a lock now. It turns that the resulting implmentation
573 can't be made more efficient, as the internal lock needs to be used by
574 Signal() and Broadcast(), to access internal data structures. As a
575 result, I was not able to utilize the user supplied lock (which is
576 being used by the user elsewhere presumably) to protect the private
579 9) Since you have a second lock, how can be be sure that there is no
580 possible deadlock scenario? Our internal_lock_ is always the last
581 lock acquired, and the first one released, and hence a deadlock (due
582 to critical section problems) is impossible as a consequence of our
585 10) When doing a Broadcast(), why did you copy all the events into
586 an STL queue, rather than making a linked-loop, and iterating over it?
587 The iterating during Broadcast() is done so outside the protection
588 of the internal lock. As a result, other threads, such as the thread
589 wherein a related event is waiting, could asynchronously manipulate
590 the links around a cv_event. As a result, the link structure cannot
591 be used outside a lock. Broadcast() could iterate over waiting
592 events by cycling in-and-out of the protection of the internal_lock,
593 but that appears more expensive than copying the list into an STL
596 11) Why did the lock.h file need to be modified so much for this
597 change? Central to a Condition Variable is the atomic release of a
598 lock during a Wait(). This places Wait() functionality exactly
599 mid-way between the two classes, Lock and Condition Variable. Given
600 that there can be nested Acquire()'s of locks, and Wait() had to
601 Release() completely a held lock, it was necessary to augment the Lock
602 class with a recursion counter. Even more subtle is the fact that the
603 recursion counter (in a Lock) must be protected, as many threads can
604 access it asynchronously. As a positive fallout of this, there are
605 now some DCHECKS to be sure no one Release()s a Lock more than they
606 Acquire()ed it, and there is ifdef'ed functionality that can detect
607 nested locks (legal under windows, but not under Posix).
609 12) Why is it that the cv_events removed from list in Broadcast() and Signal()
610 are not leaked? How are they recovered?? The cv_events that appear to leak are
611 taken from the waiting_list_. For each element in that list, there is currently
612 a thread in or around the WaitForSingleObject() call of Wait(), and those
613 threads have references to these otherwise leaked events. They are passed as
614 arguments to be recycled just aftre returning from WaitForSingleObject().
616 13) Why did you use a custom container class (the linked list), when STL has
617 perfectly good containers, such as an STL list? The STL list, as with any
618 container, does not guarantee the utility of an iterator across manipulation
619 (such as insertions and deletions) of the underlying container. The custom
620 double-linked-list container provided that assurance. I don't believe any
621 combination of STL containers provided the services that were needed at the same
622 O(1) efficiency as the custom linked list. The unusual requirement
623 for the container class is that a reference to an item within a container (an
624 iterator) needed to be maintained across an arbitrary manipulation of the
625 container. This requirement exposes itself in the Wait() method, where a
626 waiting_event must be selected prior to the WaitForSingleObject(), and then it
627 must be used as part of recycling to remove the related instance from the
628 waiting_list. A hash table (STL map) could be used, but I was embarrased to
629 use a complex and relatively low efficiency container when a doubly linked list
630 provided O(1) performance in all required operations. Since other operations
631 to provide performance-and/or-fairness required queue (FIFO) and list (LIFO)
632 containers, I would also have needed to use an STL list/queue as well as an STL
633 map. In the end I decided it would be "fun" to just do it right, and I
634 put so many assertions (DCHECKs) into the container class that it is trivial to
635 code review and validate its correctness.
639 ConditionVariable::ConditionVariable(Lock
* user_lock
)
641 static bool use_vista_native_cv
= BindVistaCondVarFunctions();
642 if (use_vista_native_cv
)
643 impl_
= new WinVistaCondVar(user_lock
);
645 impl_
= new WinXPCondVar(user_lock
);
648 ConditionVariable::~ConditionVariable() {
652 void ConditionVariable::Wait() {
656 void ConditionVariable::TimedWait(const TimeDelta
& max_time
) {
657 impl_
->TimedWait(max_time
);
660 void ConditionVariable::Broadcast() {
664 void ConditionVariable::Signal() {