1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set sw=2 ts=8 et tw=80 :
4 /* This Source Code Form is subject to the terms of the Mozilla Public
5 * License, v. 2.0. If a copy of the MPL was not distributed with this
6 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
8 #ifndef mozilla_net_ChannelEventQueue_h
9 #define mozilla_net_ChannelEventQueue_h
12 #include "nsIEventTarget.h"
13 #include "nsThreadUtils.h"
14 #include "nsXULAppAPI.h"
15 #include "mozilla/DebugOnly.h"
16 #include "mozilla/Mutex.h"
17 #include "mozilla/RecursiveMutex.h"
18 #include "mozilla/UniquePtr.h"
19 #include "mozilla/Unused.h"
28 MOZ_COUNTED_DEFAULT_CTOR(ChannelEvent
)
29 MOZ_COUNTED_DTOR_VIRTUAL(ChannelEvent
) virtual void Run() = 0;
30 virtual already_AddRefed
<nsIEventTarget
> GetEventTarget() = 0;
33 // Note that MainThreadChannelEvent should not be used in child process since
34 // GetEventTarget() directly returns an unlabeled event target.
35 class MainThreadChannelEvent
: public ChannelEvent
{
37 MOZ_COUNTED_DEFAULT_CTOR(MainThreadChannelEvent
)
38 MOZ_COUNTED_DTOR_OVERRIDE(MainThreadChannelEvent
)
40 already_AddRefed
<nsIEventTarget
> GetEventTarget() override
{
41 MOZ_ASSERT(XRE_IsParentProcess());
43 return do_AddRef(GetMainThreadSerialEventTarget());
47 class ChannelFunctionEvent
: public ChannelEvent
{
50 std::function
<already_AddRefed
<nsIEventTarget
>()>&& aGetEventTarget
,
51 std::function
<void()>&& aCallback
)
52 : mGetEventTarget(std::move(aGetEventTarget
)),
53 mCallback(std::move(aCallback
)) {}
55 void Run() override
{ mCallback(); }
56 already_AddRefed
<nsIEventTarget
> GetEventTarget() override
{
57 return mGetEventTarget();
61 const std::function
<already_AddRefed
<nsIEventTarget
>()> mGetEventTarget
;
62 const std::function
<void()> mCallback
;
65 // UnsafePtr is a work-around our static analyzer that requires all
66 // ref-counted objects to be captured in lambda via a RefPtr
67 // The ChannelEventQueue makes it safe to capture "this" by pointer only.
68 // This is required as work-around to prevent cycles until bug 1596295
73 explicit UnsafePtr(T
* aPtr
) : mPtr(aPtr
) {}
75 T
& operator*() const { return *mPtr
; }
76 T
* operator->() const {
77 MOZ_ASSERT(mPtr
, "dereferencing a null pointer");
80 operator T
*() const& { return mPtr
; }
81 explicit operator bool() const { return mPtr
!= nullptr; }
87 class NeckoTargetChannelFunctionEvent
: public ChannelFunctionEvent
{
90 NeckoTargetChannelFunctionEvent(T
* aChild
, std::function
<void()>&& aCallback
)
91 : ChannelFunctionEvent(
92 [child
= UnsafePtr
<T
>(aChild
)]() {
94 return child
->GetNeckoTarget();
96 std::move(aCallback
)) {}
99 // Workaround for Necko re-entrancy dangers. We buffer IPDL messages in a
100 // queue if still dispatching previous one(s) to listeners/observers.
101 // Otherwise synchronous XMLHttpRequests and/or other code that spins the
102 // event loop (ex: IPDL rpc) could cause listener->OnDataAvailable (for
103 // instance) to be dispatched and called before mListener->OnStartRequest has
105 // The ChannelEventQueue implementation ensures strict ordering of
106 // event execution across target threads.
108 class ChannelEventQueue final
{
109 NS_INLINE_DECL_THREADSAFE_REFCOUNTING(ChannelEventQueue
)
112 explicit ChannelEventQueue(nsISupports
* owner
)
117 mHasCheckedForAsyncXMLHttpRequest(false),
118 mForAsyncXMLHttpRequest(false),
120 mMutex("ChannelEventQueue::mMutex"),
121 mRunningMutex("ChannelEventQueue::mRunningMutex") {}
123 // Puts IPDL-generated channel event into queue, to be run later
124 // automatically when EndForcedQueueing and/or Resume is called.
126 // @param aCallback - the ChannelEvent
127 // @param aAssertionWhenNotQueued - this optional param will be used in an
128 // assertion when the event is executed directly.
129 inline void RunOrEnqueue(ChannelEvent
* aCallback
,
130 bool aAssertionWhenNotQueued
= false);
132 // Append ChannelEvent in front of the event queue.
133 inline void PrependEvent(UniquePtr
<ChannelEvent
>&& aEvent
);
134 inline void PrependEventInternal(UniquePtr
<ChannelEvent
>&& aEvent
)
135 MOZ_REQUIRES(mMutex
);
136 inline void PrependEvents(nsTArray
<UniquePtr
<ChannelEvent
>>& aEvents
);
138 // After StartForcedQueueing is called, RunOrEnqueue() will start enqueuing
139 // events that will be run/flushed when EndForcedQueueing is called.
140 // - Note: queueing may still be required after EndForcedQueueing() (if the
141 // queue is suspended, etc): always call RunOrEnqueue() to avoid race
143 inline void StartForcedQueueing();
144 inline void EndForcedQueueing();
146 // Suspend/resume event queue. RunOrEnqueue() will start enqueuing
147 // events and they will be run/flushed when resume is called. These should be
148 // called when the channel owning the event queue is suspended/resumed.
150 // Resume flushes the queue asynchronously, i.e. items in queue will be
151 // dispatched in a new event on the current thread.
154 void NotifyReleasingOwner() {
155 MutexAutoLock
lock(mMutex
);
159 #ifdef MOZ_DIAGNOSTIC_ASSERT_ENABLED
161 MutexAutoLock
lock(mMutex
);
162 return mEventQueue
.IsEmpty();
167 // Private destructor, to discourage deletion outside of Release():
168 ~ChannelEventQueue() = default;
170 void SuspendInternal() MOZ_REQUIRES(mMutex
);
171 void ResumeInternal() MOZ_REQUIRES(mMutex
);
173 bool MaybeSuspendIfEventsAreSuppressed() MOZ_REQUIRES(mMutex
);
175 inline void MaybeFlushQueue() MOZ_REQUIRES(mMutex
);
176 void FlushQueue() MOZ_REQUIRES(mMutex
);
177 inline void CompleteResume();
179 ChannelEvent
* TakeEvent();
181 nsTArray
<UniquePtr
<ChannelEvent
>> mEventQueue
MOZ_GUARDED_BY(mMutex
);
183 uint32_t mSuspendCount
MOZ_GUARDED_BY(mMutex
);
184 bool mSuspended
MOZ_GUARDED_BY(mMutex
);
185 uint32_t mForcedCount
// Support ForcedQueueing on multiple thread.
186 MOZ_GUARDED_BY(mMutex
);
187 bool mFlushing
MOZ_GUARDED_BY(mMutex
);
189 // Whether the queue is associated with an ssync XHR.
190 // This is lazily instantiated the first time it is needed.
191 // These are MainThread-only.
192 bool mHasCheckedForAsyncXMLHttpRequest
;
193 bool mForAsyncXMLHttpRequest
;
195 // Keep ptr to avoid refcount cycle: only grab ref during flushing.
196 nsISupports
* mOwner
MOZ_GUARDED_BY(mMutex
);
198 // For atomic mEventQueue operation and state update
201 // To guarantee event execution order among threads
202 RecursiveMutex mRunningMutex
MOZ_ACQUIRED_BEFORE(mMutex
);
204 friend class AutoEventEnqueuer
;
207 inline void ChannelEventQueue::RunOrEnqueue(ChannelEvent
* aCallback
,
208 bool aAssertionWhenNotQueued
) {
209 MOZ_ASSERT(aCallback
);
210 // Events execution could be a destruction of the channel (and our own
211 // destructor) unless we make sure its refcount doesn't drop to 0 while this
212 // method is running.
213 nsCOMPtr
<nsISupports
> kungFuDeathGrip
;
216 UniquePtr
<ChannelEvent
> event(aCallback
);
218 // To guarantee that the running event and all the events generated within
219 // it will be finished before events on other threads.
220 RecursiveMutexAutoLock
lock(mRunningMutex
);
222 MutexAutoLock
lock(mMutex
);
223 kungFuDeathGrip
= mOwner
; // must be under the lock
225 bool enqueue
= !!mForcedCount
|| mSuspended
|| mFlushing
||
226 !mEventQueue
.IsEmpty() ||
227 MaybeSuspendIfEventsAreSuppressed();
228 // To ensure strict ordering of events across multiple threads we buffer the
229 // events for the below cases:
230 // a. event queuing is forced by AutoEventEnqueuer
231 // b. event queue is suspended
232 // c. an event is currently flushed/executed from the queue
233 // d. queue is non-empty (pending events on remote thread targets)
235 mEventQueue
.AppendElement(std::move(event
));
239 nsCOMPtr
<nsIEventTarget
> target
= event
->GetEventTarget();
242 bool isCurrentThread
= false;
243 DebugOnly
<nsresult
> rv
= target
->IsOnCurrentThread(&isCurrentThread
);
244 MOZ_ASSERT(NS_SUCCEEDED(rv
));
246 if (!isCurrentThread
) {
247 // Leverage Suspend/Resume mechanism to trigger flush procedure without
248 // creating a new one.
249 // The execution of further events in the queue is blocked until the
250 // target thread completes the execution of this event.
251 // A callback is dispatched to the target thread to flush events from the
252 // queue. This is done
253 // by ResumeInternal which dispatches a runnable
254 // (CompleteResumeRunnable) to the target thread. The target thread will
255 // call CompleteResume to flush the queue. All the events are run
256 // synchronously in their respective target threads.
258 mEventQueue
.AppendElement(std::move(event
));
264 MOZ_RELEASE_ASSERT(!aAssertionWhenNotQueued
);
265 // execute the event synchronously if we are not queuing it and
266 // the target thread is the current thread
270 inline void ChannelEventQueue::StartForcedQueueing() {
271 MutexAutoLock
lock(mMutex
);
275 inline void ChannelEventQueue::EndForcedQueueing() {
276 MutexAutoLock
lock(mMutex
);
277 MOZ_ASSERT(mForcedCount
> 0);
278 if (!--mForcedCount
) {
283 inline void ChannelEventQueue::PrependEvent(UniquePtr
<ChannelEvent
>&& aEvent
) {
284 MutexAutoLock
lock(mMutex
);
285 PrependEventInternal(std::move(aEvent
));
288 inline void ChannelEventQueue::PrependEventInternal(
289 UniquePtr
<ChannelEvent
>&& aEvent
) {
290 mMutex
.AssertCurrentThreadOwns();
292 // Prepending event while no queue flush foreseen might cause the following
293 // channel events not run. This assertion here guarantee there must be a
294 // queue flush, either triggered by Resume or EndForcedQueueing, to execute
296 MOZ_ASSERT(mSuspended
|| !!mForcedCount
);
298 mEventQueue
.InsertElementAt(0, std::move(aEvent
));
301 inline void ChannelEventQueue::PrependEvents(
302 nsTArray
<UniquePtr
<ChannelEvent
>>& aEvents
) {
303 MutexAutoLock
lock(mMutex
);
305 // Prepending event while no queue flush foreseen might cause the following
306 // channel events not run. This assertion here guarantee there must be a
307 // queue flush, either triggered by Resume or EndForcedQueueing, to execute
309 MOZ_ASSERT(mSuspended
|| !!mForcedCount
);
311 mEventQueue
.InsertElementsAt(0, aEvents
.Length());
313 for (uint32_t i
= 0; i
< aEvents
.Length(); i
++) {
314 mEventQueue
[i
] = std::move(aEvents
[i
]);
318 inline void ChannelEventQueue::CompleteResume() {
319 MutexAutoLock
lock(mMutex
);
321 // channel may have been suspended again since Resume fired event to call
323 if (!mSuspendCount
) {
324 // we need to remain logically suspended (for purposes of queuing incoming
325 // messages) until this point, else new incoming messages could run before
332 inline void ChannelEventQueue::MaybeFlushQueue() {
333 mMutex
.AssertCurrentThreadOwns();
334 // Don't flush if forced queuing on, we're already being flushed, or
335 // suspended, or there's nothing to flush
336 bool flushQueue
= !mForcedCount
&& !mFlushing
&& !mSuspended
&&
337 !mEventQueue
.IsEmpty() &&
338 !MaybeSuspendIfEventsAreSuppressed();
340 // Only one thread is allowed to run FlushQueue at a time.
347 // Ensures that RunOrEnqueue() will be collecting events during its lifetime
348 // (letting caller know incoming IPDL msgs should be queued). Flushes the
349 // queue when it goes out of scope.
350 class MOZ_STACK_CLASS AutoEventEnqueuer
{
352 explicit AutoEventEnqueuer(ChannelEventQueue
* queue
) : mEventQueue(queue
) {
354 // Probably not actually needed, since NotifyReleasingOwner should
355 // only happen after this, but safer to take it in case things change
356 MutexAutoLock
lock(queue
->mMutex
);
357 mOwner
= queue
->mOwner
;
359 mEventQueue
->StartForcedQueueing();
361 ~AutoEventEnqueuer() { mEventQueue
->EndForcedQueueing(); }
364 RefPtr
<ChannelEventQueue
> mEventQueue
;
365 // Ensure channel object lives longer than ChannelEventQueue.
366 nsCOMPtr
<nsISupports
> mOwner
;
370 } // namespace mozilla