Bug 1919083 - [ci] Enable os-integration variant for more suites, r=jmaher
[gecko.git] / gfx / layers / CanvasDrawEventRecorder.cpp
blob59e70dba7020e00c8d12ab82c6aaface9de5b45d
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2 /* vim: set ts=8 sts=2 et sw=2 tw=80: */
3 /* This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #include "CanvasDrawEventRecorder.h"
9 #include <string.h>
11 #include "mozilla/dom/WorkerCommon.h"
12 #include "mozilla/dom/WorkerPrivate.h"
13 #include "mozilla/dom/WorkerRef.h"
14 #include "mozilla/dom/WorkerRunnable.h"
15 #include "mozilla/layers/TextureRecorded.h"
16 #include "mozilla/layers/SharedSurfacesChild.h"
17 #include "mozilla/StaticPrefs_gfx.h"
18 #include "RecordedCanvasEventImpl.h"
20 namespace mozilla {
21 namespace layers {
23 struct ShmemAndHandle {
24 RefPtr<ipc::SharedMemory> shmem;
25 Handle handle;
28 static Maybe<ShmemAndHandle> CreateAndMapShmem(size_t aSize) {
29 auto shmem = MakeRefPtr<ipc::SharedMemory>();
30 if (!shmem->Create(aSize) || !shmem->Map(aSize)) {
31 return Nothing();
34 auto shmemHandle = shmem->TakeHandle();
35 if (!shmemHandle) {
36 return Nothing();
39 return Some(ShmemAndHandle{shmem.forget(), std::move(shmemHandle)});
42 CanvasDrawEventRecorder::CanvasDrawEventRecorder(
43 dom::ThreadSafeWorkerRef* aWorkerRef)
44 : mWorkerRef(aWorkerRef), mIsOnWorker(!!aWorkerRef) {
45 mDefaultBufferSize = ipc::SharedMemory::PageAlignedSize(
46 StaticPrefs::gfx_canvas_remote_default_buffer_size());
47 mMaxDefaultBuffers = StaticPrefs::gfx_canvas_remote_max_default_buffers();
48 mMaxSpinCount = StaticPrefs::gfx_canvas_remote_max_spin_count();
49 mDropBufferLimit = StaticPrefs::gfx_canvas_remote_drop_buffer_limit();
50 mDropBufferOnZero = mDropBufferLimit;
53 CanvasDrawEventRecorder::~CanvasDrawEventRecorder() { MOZ_ASSERT(!mWorkerRef); }
55 bool CanvasDrawEventRecorder::Init(TextureType aTextureType,
56 TextureType aWebglTextureType,
57 gfx::BackendType aBackendType,
58 UniquePtr<Helpers> aHelpers) {
59 NS_ASSERT_OWNINGTHREAD(CanvasDrawEventRecorder);
61 mHelpers = std::move(aHelpers);
63 MOZ_ASSERT(mTextureType == TextureType::Unknown);
64 auto header = CreateAndMapShmem(sizeof(Header));
65 if (NS_WARN_IF(header.isNothing())) {
66 return false;
69 mHeader = static_cast<Header*>(header->shmem->Memory());
70 mHeader->eventCount = 0;
71 mHeader->writerWaitCount = 0;
72 mHeader->writerState = State::Processing;
73 mHeader->processedCount = 0;
74 mHeader->readerState = State::Paused;
76 // We always keep at least two buffers. This means that when we
77 // have to add a new buffer, there is at least a full buffer that requires
78 // translating while the handle is sent over.
79 AutoTArray<Handle, 2> bufferHandles;
80 auto buffer = CreateAndMapShmem(mDefaultBufferSize);
81 if (NS_WARN_IF(buffer.isNothing())) {
82 return false;
84 mCurrentBuffer = CanvasBuffer(std::move(buffer->shmem));
85 bufferHandles.AppendElement(std::move(buffer->handle));
87 buffer = CreateAndMapShmem(mDefaultBufferSize);
88 if (NS_WARN_IF(buffer.isNothing())) {
89 return false;
91 mRecycledBuffers.emplace(buffer->shmem.forget(), 0);
92 bufferHandles.AppendElement(std::move(buffer->handle));
94 mWriterSemaphore.reset(CrossProcessSemaphore::Create("CanvasRecorder", 0));
95 auto writerSem = mWriterSemaphore->CloneHandle();
96 mWriterSemaphore->CloseHandle();
97 if (!IsHandleValid(writerSem)) {
98 return false;
101 mReaderSemaphore.reset(CrossProcessSemaphore::Create("CanvasTranslator", 0));
102 auto readerSem = mReaderSemaphore->CloneHandle();
103 mReaderSemaphore->CloseHandle();
104 if (!IsHandleValid(readerSem)) {
105 return false;
108 if (!mHelpers->InitTranslator(aTextureType, aWebglTextureType, aBackendType,
109 std::move(header->handle),
110 std::move(bufferHandles), mDefaultBufferSize,
111 std::move(readerSem), std::move(writerSem))) {
112 return false;
115 mTextureType = aTextureType;
116 mHeaderShmem = header->shmem;
117 return true;
120 void CanvasDrawEventRecorder::RecordEvent(const gfx::RecordedEvent& aEvent) {
121 NS_ASSERT_OWNINGTHREAD(CanvasDrawEventRecorder);
122 aEvent.RecordToStream(*this);
125 int64_t CanvasDrawEventRecorder::CreateCheckpoint() {
126 NS_ASSERT_OWNINGTHREAD(CanvasDrawEventRecorder);
127 int64_t checkpoint = mHeader->eventCount;
128 RecordEvent(RecordedCheckpoint());
129 ClearProcessedExternalSurfaces();
130 ClearProcessedExternalImages();
131 return checkpoint;
134 bool CanvasDrawEventRecorder::WaitForCheckpoint(int64_t aCheckpoint) {
135 NS_ASSERT_OWNINGTHREAD(CanvasDrawEventRecorder);
137 uint32_t spinCount = mMaxSpinCount;
138 do {
139 if (mHeader->processedCount >= aCheckpoint) {
140 return true;
142 } while (--spinCount != 0);
144 mHeader->writerState = State::AboutToWait;
145 if (mHeader->processedCount >= aCheckpoint) {
146 mHeader->writerState = State::Processing;
147 return true;
150 mHeader->writerWaitCount = aCheckpoint;
151 mHeader->writerState = State::Waiting;
153 // Wait unless we detect the reading side has closed.
154 while (!mHelpers->ReaderClosed() && mHeader->readerState != State::Failed) {
155 if (mWriterSemaphore->Wait(Some(TimeDuration::FromMilliseconds(100)))) {
156 MOZ_ASSERT(mHeader->processedCount >= aCheckpoint);
157 return true;
161 // Either the reader has failed or we're stopping writing for some other
162 // reason (e.g. shutdown), so mark us as failed so the reader is aware.
163 mHeader->writerState = State::Failed;
164 return false;
167 void CanvasDrawEventRecorder::WriteInternalEvent(EventType aEventType) {
168 MOZ_ASSERT(mCurrentBuffer.SizeRemaining() > 0);
170 WriteElement(mCurrentBuffer.Writer(), aEventType);
171 IncrementEventCount();
174 gfx::ContiguousBuffer& CanvasDrawEventRecorder::GetContiguousBuffer(
175 size_t aSize) {
176 if (!mCurrentBuffer.IsValid()) {
177 // If the current buffer is invalid then we've already failed previously.
178 MOZ_ASSERT(mHeader->writerState == State::Failed);
179 return mCurrentBuffer;
182 // We make sure that our buffer can hold aSize + 1 to ensure we always have
183 // room for the end of buffer event.
185 // Check if there is enough room is our current buffer.
186 if (mCurrentBuffer.SizeRemaining() > aSize) {
187 return mCurrentBuffer;
190 bool useRecycledBuffer = false;
191 if (mRecycledBuffers.front().Capacity() > aSize) {
192 // The recycled buffer is big enough, check if it is free.
193 if (mRecycledBuffers.front().eventCount <= mHeader->processedCount) {
194 useRecycledBuffer = true;
195 } else if (mRecycledBuffers.size() >= mMaxDefaultBuffers) {
196 // We've hit he max number of buffers, wait for the next one to be free.
197 // We wait for (eventCount - 1), as we check and signal in the translator
198 // during the play event, before the processedCount has been updated.
199 useRecycledBuffer = true;
200 if (!WaitForCheckpoint(mRecycledBuffers.front().eventCount - 1)) {
201 // The wait failed or we're shutting down, just return an empty buffer.
202 mCurrentBuffer = CanvasBuffer();
203 return mCurrentBuffer;
208 if (useRecycledBuffer) {
209 // Only queue default size buffers for recycling.
210 if (mCurrentBuffer.Capacity() == mDefaultBufferSize) {
211 WriteInternalEvent(RECYCLE_BUFFER);
212 mRecycledBuffers.emplace(std::move(mCurrentBuffer.shmem),
213 mHeader->eventCount);
214 } else {
215 WriteInternalEvent(DROP_BUFFER);
218 mCurrentBuffer = CanvasBuffer(std::move(mRecycledBuffers.front().shmem));
219 mRecycledBuffers.pop();
221 // If we have more than one recycled buffers free a configured number of
222 // times in a row then drop one.
223 if (mRecycledBuffers.size() > 1 &&
224 mRecycledBuffers.front().eventCount < mHeader->processedCount) {
225 if (--mDropBufferOnZero == 0) {
226 WriteInternalEvent(DROP_BUFFER);
227 mCurrentBuffer =
228 CanvasBuffer(std::move(mRecycledBuffers.front().shmem));
229 mRecycledBuffers.pop();
230 mDropBufferOnZero = 1;
232 } else {
233 mDropBufferOnZero = mDropBufferLimit;
236 return mCurrentBuffer;
239 // We don't have a buffer free or it is not big enough, so create a new one.
240 WriteInternalEvent(PAUSE_TRANSLATION);
242 // Only queue default size buffers for recycling.
243 if (mCurrentBuffer.Capacity() == mDefaultBufferSize) {
244 mRecycledBuffers.emplace(std::move(mCurrentBuffer.shmem),
245 mHeader->eventCount);
248 size_t bufferSize = std::max(mDefaultBufferSize,
249 ipc::SharedMemory::PageAlignedSize(aSize + 1));
250 auto newBuffer = CreateAndMapShmem(bufferSize);
251 if (NS_WARN_IF(newBuffer.isNothing())) {
252 mHeader->writerState = State::Failed;
253 mCurrentBuffer = CanvasBuffer();
254 return mCurrentBuffer;
257 if (!mHelpers->AddBuffer(std::move(newBuffer->handle), bufferSize)) {
258 mHeader->writerState = State::Failed;
259 mCurrentBuffer = CanvasBuffer();
260 return mCurrentBuffer;
263 mCurrentBuffer = CanvasBuffer(std::move(newBuffer->shmem));
264 return mCurrentBuffer;
267 void CanvasDrawEventRecorder::DropFreeBuffers() {
268 while (mRecycledBuffers.size() > 1 &&
269 mRecycledBuffers.front().eventCount < mHeader->processedCount) {
270 // If we encountered an error, we may have invalidated mCurrentBuffer in
271 // GetContiguousBuffer. No need to write the DROP_BUFFER event.
272 if (mCurrentBuffer.IsValid()) {
273 WriteInternalEvent(DROP_BUFFER);
275 mCurrentBuffer = CanvasBuffer(std::move(mRecycledBuffers.front().shmem));
276 mRecycledBuffers.pop();
279 ClearProcessedExternalSurfaces();
280 ClearProcessedExternalImages();
283 void CanvasDrawEventRecorder::IncrementEventCount() {
284 mHeader->eventCount++;
285 CheckAndSignalReader();
288 void CanvasDrawEventRecorder::CheckAndSignalReader() {
289 do {
290 switch (mHeader->readerState) {
291 case State::Processing:
292 case State::Paused:
293 case State::Failed:
294 return;
295 case State::AboutToWait:
296 // The reader is making a decision about whether to wait. So, we must
297 // wait until it has decided to avoid races. Check if the reader is
298 // closed to avoid hangs.
299 if (mHelpers->ReaderClosed()) {
300 return;
302 continue;
303 case State::Waiting:
304 if (mHeader->processedCount < mHeader->eventCount) {
305 // We have to use compareExchange here because the reader can change
306 // from Waiting to Stopped.
307 if (mHeader->readerState.compareExchange(State::Waiting,
308 State::Processing)) {
309 mReaderSemaphore->Signal();
310 return;
313 MOZ_ASSERT(mHeader->readerState == State::Stopped);
314 continue;
316 return;
317 case State::Stopped:
318 if (mHeader->processedCount < mHeader->eventCount) {
319 mHeader->readerState = State::Processing;
320 if (!mHelpers->RestartReader()) {
321 mHeader->writerState = State::Failed;
324 return;
325 default:
326 MOZ_ASSERT_UNREACHABLE("Invalid waiting state.");
327 return;
329 } while (true);
332 void CanvasDrawEventRecorder::DetachResources() {
333 NS_ASSERT_OWNINGTHREAD(CanvasDrawEventRecorder);
335 DrawEventRecorderPrivate::DetachResources();
338 auto lockedPendingDeletions = mPendingDeletions.Lock();
339 mWorkerRef = nullptr;
343 void CanvasDrawEventRecorder::QueueProcessPendingDeletionsLocked(
344 RefPtr<CanvasDrawEventRecorder>&& aRecorder) {
345 if (!mWorkerRef) {
346 MOZ_RELEASE_ASSERT(
347 !mIsOnWorker,
348 "QueueProcessPendingDeletionsLocked called after worker shutdown!");
350 NS_DispatchToMainThread(NS_NewRunnableFunction(
351 "CanvasDrawEventRecorder::QueueProcessPendingDeletionsLocked",
352 [self = std::move(aRecorder)]() { self->ProcessPendingDeletions(); }));
353 return;
356 if (!NS_IsMainThread()) {
357 NS_DispatchToMainThread(NS_NewRunnableFunction(
358 "CanvasDrawEventRecorder::QueueProcessPendingDeletionsLocked",
359 [self = std::move(aRecorder)]() mutable {
360 self->QueueProcessPendingDeletions(std::move(self));
361 }));
362 return;
365 class ProcessPendingRunnable final : public dom::MainThreadWorkerRunnable {
366 public:
367 explicit ProcessPendingRunnable(RefPtr<CanvasDrawEventRecorder>&& aRecorder)
368 : dom::MainThreadWorkerRunnable("ProcessPendingRunnable"),
369 mRecorder(std::move(aRecorder)) {}
371 bool WorkerRun(JSContext*, dom::WorkerPrivate*) override {
372 RefPtr<CanvasDrawEventRecorder> recorder = std::move(mRecorder);
373 recorder->ProcessPendingDeletions();
374 return true;
377 private:
378 RefPtr<CanvasDrawEventRecorder> mRecorder;
381 auto task = MakeRefPtr<ProcessPendingRunnable>(std::move(aRecorder));
382 if (NS_WARN_IF(!task->Dispatch(mWorkerRef->Private()))) {
383 MOZ_CRASH("ProcessPendingRunnable leaked!");
387 void CanvasDrawEventRecorder::QueueProcessPendingDeletions(
388 RefPtr<CanvasDrawEventRecorder>&& aRecorder) {
389 auto lockedPendingDeletions = mPendingDeletions.Lock();
390 if (lockedPendingDeletions->empty()) {
391 // We raced to handle the deletions, and something got there first.
392 return;
395 QueueProcessPendingDeletionsLocked(std::move(aRecorder));
398 void CanvasDrawEventRecorder::AddPendingDeletion(
399 std::function<void()>&& aPendingDeletion) {
400 PendingDeletionsVector pendingDeletions;
403 auto lockedPendingDeletions = mPendingDeletions.Lock();
404 bool wasEmpty = lockedPendingDeletions->empty();
405 lockedPendingDeletions->emplace_back(std::move(aPendingDeletion));
407 MOZ_RELEASE_ASSERT(!mIsOnWorker || mWorkerRef,
408 "AddPendingDeletion called after worker shutdown!");
410 // If we are not on the owning thread, we must queue an event to run the
411 // deletions, if we transitioned from empty to non-empty.
412 if ((mWorkerRef && !mWorkerRef->Private()->IsOnCurrentThread()) ||
413 (!mWorkerRef && !NS_IsMainThread())) {
414 if (wasEmpty) {
415 RefPtr<CanvasDrawEventRecorder> self(this);
416 QueueProcessPendingDeletionsLocked(std::move(self));
418 return;
421 // Otherwise, we can just run all of them right now.
422 pendingDeletions.swap(*lockedPendingDeletions);
425 for (const auto& pendingDeletion : pendingDeletions) {
426 pendingDeletion();
430 void CanvasDrawEventRecorder::StoreSourceSurfaceRecording(
431 gfx::SourceSurface* aSurface, const char* aReason) {
432 NS_ASSERT_OWNINGTHREAD(CanvasDrawEventRecorder);
434 if (NS_IsMainThread()) {
435 wr::ExternalImageId extId{};
436 nsresult rv = layers::SharedSurfacesChild::Share(aSurface, extId);
437 if (NS_SUCCEEDED(rv)) {
438 StoreExternalSurfaceRecording(aSurface, wr::AsUint64(extId));
439 mExternalSurfaces.back().mEventCount = mHeader->eventCount;
440 return;
444 DrawEventRecorderPrivate::StoreSourceSurfaceRecording(aSurface, aReason);
447 void CanvasDrawEventRecorder::StoreImageRecording(
448 const RefPtr<Image>& aImageOfSurfaceDescriptor, const char* aReasony) {
449 NS_ASSERT_OWNINGTHREAD(CanvasDrawEventRecorder);
451 StoreExternalImageRecording(aImageOfSurfaceDescriptor);
452 mExternalImages.back().mEventCount = mHeader->eventCount;
454 ClearProcessedExternalImages();
457 void CanvasDrawEventRecorder::ClearProcessedExternalSurfaces() {
458 while (!mExternalSurfaces.empty()) {
459 if (mExternalSurfaces.front().mEventCount > mHeader->processedCount) {
460 break;
462 mExternalSurfaces.pop_front();
466 void CanvasDrawEventRecorder::ClearProcessedExternalImages() {
467 while (!mExternalImages.empty()) {
468 if (mExternalImages.front().mEventCount > mHeader->processedCount) {
469 break;
471 mExternalImages.pop_front();
475 } // namespace layers
476 } // namespace mozilla