From d5157ac5afec145b85746b42f2d86383483a8222 Mon Sep 17 00:00:00 2001 From: reveman Date: Thu, 6 Aug 2015 16:58:22 -0700 Subject: [PATCH] cc: Use worker context for one-copy tile initialization. This moves management of staging resources to OneCopyTileTaskWorkerPool class. This makes it possible to use a worker context to issue and detect when copy operations complete. BUG=490295 CQ_INCLUDE_TRYBOTS=tryserver.blink:linux_blink_rel Review URL: https://codereview.chromium.org/1230203007 Cr-Commit-Position: refs/heads/master@{#342235} --- cc/output/context_provider.h | 22 + cc/output/output_surface.cc | 21 +- cc/raster/gpu_tile_task_worker_pool.cc | 13 +- cc/raster/one_copy_tile_task_worker_pool.cc | 755 ++++++++++++--------- cc/raster/one_copy_tile_task_worker_pool.h | 130 ++-- cc/raster/tile_task_worker_pool_perftest.cc | 7 +- cc/raster/tile_task_worker_pool_unittest.cc | 7 +- cc/resources/resource_format.cc | 19 + cc/resources/resource_format.h | 2 + cc/resources/resource_pool.cc | 5 +- cc/resources/resource_pool.h | 5 +- cc/resources/resource_pool_unittest.cc | 13 +- cc/resources/resource_provider.cc | 162 +---- cc/resources/resource_provider.h | 22 +- cc/resources/resource_provider_unittest.cc | 131 +--- cc/surfaces/display.cc | 5 +- cc/surfaces/surface_display_output_surface.cc | 5 +- cc/surfaces/surface_display_output_surface.h | 3 +- .../surface_display_output_surface_unittest.cc | 3 +- cc/test/fake_output_surface.h | 8 +- cc/test/fake_resource_provider.h | 13 +- cc/test/layer_tree_pixel_resource_test.cc | 10 +- cc/test/layer_tree_pixel_resource_test.h | 3 +- cc/test/layer_tree_pixel_test.cc | 6 + cc/test/layer_tree_pixel_test.h | 2 + cc/test/layer_tree_test.cc | 10 +- cc/test/layer_tree_test.h | 3 +- cc/test/pixel_test.cc | 4 +- cc/test/test_in_process_context_provider.cc | 1 + cc/tiles/tile_manager.cc | 2 +- cc/trees/layer_tree_host_impl.cc | 42 +- cc/trees/layer_tree_host_impl.h | 4 +- cc/trees/layer_tree_host_pixeltest_tiles.cc | 1 + cc/trees/layer_tree_settings.cc | 3 +- cc/trees/layer_tree_settings.h | 1 + .../browser_compositor_output_surface.cc | 8 +- .../compositor/browser_compositor_output_surface.h | 1 + .../gpu_browser_compositor_output_surface.cc | 2 + .../gpu_browser_compositor_output_surface.h | 1 + .../compositor/gpu_process_transport_factory.cc | 35 +- .../compositor/gpu_process_transport_factory.h | 1 + ...urfaceless_browser_compositor_output_surface.cc | 2 + ...surfaceless_browser_compositor_output_surface.h | 1 + .../offscreen_browser_compositor_output_surface.cc | 2 + .../offscreen_browser_compositor_output_surface.h | 1 + .../browser/compositor/reflector_impl_unittest.cc | 1 + content/browser/gpu/compositor_util.cc | 8 - .../renderer_host/compositor_impl_android.cc | 2 +- .../common/gpu/client/command_buffer_metrics.cc | 6 + content/common/gpu/client/command_buffer_metrics.h | 1 + content/renderer/gpu/render_widget_compositor.cc | 5 + gpu/command_buffer/service/query_manager.cc | 1 + ui/compositor/test/in_process_context_factory.cc | 23 +- ui/compositor/test/in_process_context_provider.cc | 4 + ui/compositor/test/in_process_context_provider.h | 1 + 55 files changed, 746 insertions(+), 803 deletions(-) diff --git a/cc/output/context_provider.h b/cc/output/context_provider.h index aa4367d97ff6..c024e02328e9 100644 --- a/cc/output/context_provider.h +++ b/cc/output/context_provider.h @@ -7,6 +7,7 @@ #include "base/callback.h" #include "base/memory/ref_counted.h" +#include "base/synchronization/lock.h" #include "cc/base/cc_export.h" #include "gpu/command_buffer/common/capabilities.h" @@ -26,6 +27,27 @@ struct ManagedMemoryPolicy; class ContextProvider : public base::RefCountedThreadSafe { public: + class ScopedContextLock { + public: + explicit ScopedContextLock(ContextProvider* context_provider) + : context_provider_(context_provider), + context_lock_(*context_provider_->GetLock()) { + // Allow current thread to bind to |context_provider|. + context_provider_->DetachFromThread(); + } + ~ScopedContextLock() { + // Allow a different thread to bind to |context_provider|. + context_provider_->DetachFromThread(); + } + + gpu::gles2::GLES2Interface* ContextGL() { + return context_provider_->ContextGL(); + } + + private: + ContextProvider* const context_provider_; + base::AutoLock context_lock_; + }; // Bind the 3d context to the current thread. This should be called before // accessing the contexts. Calling it more than once should have no effect. // Once this function has been called, the class should only be accessed diff --git a/cc/output/output_surface.cc b/cc/output/output_surface.cc index a597a87d8456..0786d660d4a5 100644 --- a/cc/output/output_surface.cc +++ b/cc/output/output_surface.cc @@ -107,10 +107,6 @@ OutputSurface::~OutputSurface() { context_provider_->SetMemoryPolicyChangedCallback( ContextProvider::MemoryPolicyChangedCallback()); } - if (worker_context_provider_.get()) { - worker_context_provider_->SetLostContextCallback( - ContextProvider::LostContextCallback()); - } } bool OutputSurface::HasExternalStencilTest() const { @@ -134,14 +130,8 @@ bool OutputSurface::BindToClient(OutputSurfaceClient* client) { if (success && worker_context_provider_.get()) { success = worker_context_provider_->BindToCurrentThread(); - if (success) { + if (success) worker_context_provider_->SetupLock(); - // The destructor resets the context lost callback, so base::Unretained - // is safe, as long as the worker threads stop using the context before - // the output surface is destroyed. - worker_context_provider_->SetLostContextCallback(base::Bind( - &OutputSurface::DidLoseOutputSurface, base::Unretained(this))); - } } if (!success) @@ -217,11 +207,7 @@ void OutputSurface::SetWorkerContextShouldAggressivelyFreeResources( "OutputSurface::SetWorkerContextShouldAggressivelyFreeResources", "aggressively_free_resources", aggressively_free_resources); if (auto* context_provider = worker_context_provider()) { - // The context lock must be held while accessing the worker context. - base::AutoLock context_lock(*context_provider->GetLock()); - - // Allow context to bind to current thread. - context_provider->DetachFromThread(); + ContextProvider::ScopedContextLock scoped_context(context_provider); if (aggressively_free_resources) { context_provider->DeleteCachedResources(); @@ -231,9 +217,6 @@ void OutputSurface::SetWorkerContextShouldAggressivelyFreeResources( context_support->SetAggressivelyFreeResources( aggressively_free_resources); } - - // Allow context to bind to other threads. - context_provider->DetachFromThread(); } } diff --git a/cc/raster/gpu_tile_task_worker_pool.cc b/cc/raster/gpu_tile_task_worker_pool.cc index 3567308fcdc7..238045501223 100644 --- a/cc/raster/gpu_tile_task_worker_pool.cc +++ b/cc/raster/gpu_tile_task_worker_pool.cc @@ -46,13 +46,9 @@ class RasterBufferImpl : public RasterBuffer { ContextProvider* context_provider = rasterizer_->resource_provider() ->output_surface() ->worker_context_provider(); + DCHECK(context_provider); - // The context lock must be held while accessing the context on a - // worker thread. - base::AutoLock context_lock(*context_provider->GetLock()); - - // Allow this worker thread to bind to context_provider. - context_provider->DetachFromThread(); + ContextProvider::ScopedContextLock scoped_context(context_provider); gfx::Rect playback_rect = raster_full_rect; if (resource_has_previous_content_) { @@ -67,10 +63,7 @@ class RasterBufferImpl : public RasterBuffer { playback_rect, scale); // Barrier to sync worker context output to cc context. - context_provider->ContextGL()->OrderingBarrierCHROMIUM(); - - // Allow compositor thread to bind to context_provider. - context_provider->DetachFromThread(); + scoped_context.ContextGL()->OrderingBarrierCHROMIUM(); } private: diff --git a/cc/raster/one_copy_tile_task_worker_pool.cc b/cc/raster/one_copy_tile_task_worker_pool.cc index 64da37e91d09..3284fe9ddc6a 100644 --- a/cc/raster/one_copy_tile_task_worker_pool.cc +++ b/cc/raster/one_copy_tile_task_worker_pool.cc @@ -8,16 +8,20 @@ #include #include "base/strings/stringprintf.h" +#include "base/thread_task_runner_handle.h" +#include "base/trace_event/memory_dump_manager.h" #include "base/trace_event/trace_event.h" #include "base/trace_event/trace_event_argument.h" #include "cc/base/math_util.h" #include "cc/debug/traced_value.h" #include "cc/raster/raster_buffer.h" #include "cc/resources/platform_color.h" -#include "cc/resources/resource_pool.h" +#include "cc/resources/resource_format.h" +#include "cc/resources/resource_util.h" #include "cc/resources/scoped_resource.h" +#include "gpu/GLES2/gl2extchromium.h" #include "gpu/command_buffer/client/gles2_interface.h" -#include "ui/gfx/gpu_memory_buffer.h" +#include "gpu/command_buffer/client/gpu_memory_buffer_manager.h" namespace cc { namespace { @@ -26,49 +30,15 @@ class RasterBufferImpl : public RasterBuffer { public: RasterBufferImpl(OneCopyTileTaskWorkerPool* worker_pool, ResourceProvider* resource_provider, - ResourcePool* resource_pool, ResourceFormat resource_format, - const Resource* output_resource, + const Resource* resource, uint64_t previous_content_id) : worker_pool_(worker_pool), - resource_provider_(resource_provider), - resource_pool_(resource_pool), - output_resource_(output_resource), - raster_content_id_(0), - sequence_(0) { - if (worker_pool->have_persistent_gpu_memory_buffers() && - previous_content_id) { - raster_resource_ = - resource_pool->TryAcquireResourceWithContentId(previous_content_id); - } - if (raster_resource_) { - raster_content_id_ = previous_content_id; - DCHECK_EQ(resource_format, raster_resource_->format()); - DCHECK_EQ(output_resource->size().ToString(), - raster_resource_->size().ToString()); - } else { - raster_resource_ = resource_pool->AcquireResource(output_resource->size(), - resource_format); - } - - lock_.reset(new ResourceProvider::ScopedWriteLockGpuMemoryBuffer( - resource_provider_, raster_resource_->id())); - } + resource_(resource), + lock_(resource_provider, resource->id()), + previous_content_id_(previous_content_id) {} - ~RasterBufferImpl() override { - // Release write lock in case a copy was never scheduled. - lock_.reset(); - - // Make sure any scheduled copy operations are issued before we release the - // raster resource. - if (sequence_) - worker_pool_->AdvanceLastIssuedCopyTo(sequence_); - - // Return resources to pool so they can be used by another RasterBuffer - // instance. - resource_pool_->ReleaseResource(raster_resource_.Pass(), - raster_content_id_); - } + ~RasterBufferImpl() override {} // Overridden from RasterBuffer: void Playback(const RasterSource* raster_source, @@ -77,55 +47,117 @@ class RasterBufferImpl : public RasterBuffer { uint64_t new_content_id, float scale, bool include_images) override { - // If there's a raster_content_id_, we are reusing a resource with that - // content id. - bool reusing_raster_resource = raster_content_id_ != 0; - sequence_ = worker_pool_->PlaybackAndScheduleCopyOnWorkerThread( - reusing_raster_resource, lock_.Pass(), raster_resource_.get(), - output_resource_, raster_source, raster_full_rect, raster_dirty_rect, - scale, include_images); - // Store the content id of the resource to return to the pool. - raster_content_id_ = new_content_id; + worker_pool_->PlaybackAndCopyOnWorkerThread( + resource_, &lock_, raster_source, raster_full_rect, raster_dirty_rect, + scale, include_images, previous_content_id_, new_content_id); } private: OneCopyTileTaskWorkerPool* worker_pool_; - ResourceProvider* resource_provider_; - ResourcePool* resource_pool_; - const Resource* output_resource_; - uint64_t raster_content_id_; - scoped_ptr raster_resource_; - scoped_ptr lock_; - CopySequenceNumber sequence_; + const Resource* resource_; + ResourceProvider::ScopedWriteLockGL lock_; + uint64_t previous_content_id_; DISALLOW_COPY_AND_ASSIGN(RasterBufferImpl); }; -// Number of in-flight copy operations to allow. -const int kMaxCopyOperations = 32; - -// Delay been checking for copy operations to complete. -const int kCheckForCompletedCopyOperationsTickRateMs = 1; +// Delay between checking for query result to be available. +const int kCheckForQueryResultAvailableTickRateMs = 1; -// Number of failed attempts to allow before we perform a check that will -// wait for copy operations to complete if needed. -const int kFailedAttemptsBeforeWaitIfNeeded = 256; +// Number of attempts to allow before we perform a check that will wait for +// query to complete. +const int kMaxCheckForQueryResultAvailableAttempts = 256; // 4MiB is the size of 4 512x512 tiles, which has proven to be a good // default batch size for copy operations. const int kMaxBytesPerCopyOperation = 1024 * 1024 * 4; +// Delay before a staging buffer might be released. +const int kStagingBufferExpirationDelayMs = 1000; + +bool CheckForQueryResult(gpu::gles2::GLES2Interface* gl, unsigned query_id) { + unsigned complete = 1; + gl->GetQueryObjectuivEXT(query_id, GL_QUERY_RESULT_AVAILABLE_EXT, &complete); + return !!complete; +} + +void WaitForQueryResult(gpu::gles2::GLES2Interface* gl, unsigned query_id) { + TRACE_EVENT0("cc", "WaitForQueryResult"); + + int attempts_left = kMaxCheckForQueryResultAvailableAttempts; + while (attempts_left--) { + if (CheckForQueryResult(gl, query_id)) + break; + + base::PlatformThread::Sleep(base::TimeDelta::FromMilliseconds( + kCheckForQueryResultAvailableTickRateMs)); + } + + unsigned result = 0; + gl->GetQueryObjectuivEXT(query_id, GL_QUERY_RESULT_EXT, &result); +} + } // namespace -OneCopyTileTaskWorkerPool::CopyOperation::CopyOperation( - scoped_ptr src_write_lock, - const Resource* src, - const Resource* dst, - const gfx::Rect& rect) - : src_write_lock(src_write_lock.Pass()), src(src), dst(dst), rect(rect) { +OneCopyTileTaskWorkerPool::StagingBuffer::StagingBuffer(const gfx::Size& size) + : size(size), texture_id(0), image_id(0), query_id(0), content_id(0) {} + +OneCopyTileTaskWorkerPool::StagingBuffer::~StagingBuffer() { + DCHECK_EQ(texture_id, 0u); + DCHECK_EQ(image_id, 0u); + DCHECK_EQ(query_id, 0u); } -OneCopyTileTaskWorkerPool::CopyOperation::~CopyOperation() { +void OneCopyTileTaskWorkerPool::StagingBuffer::DestroyGLResources( + gpu::gles2::GLES2Interface* gl) { + if (query_id) { + gl->DeleteQueriesEXT(1, &query_id); + query_id = 0; + } + if (image_id) { + gl->DestroyImageCHROMIUM(image_id); + image_id = 0; + } + if (texture_id) { + gl->DeleteTextures(1, &texture_id); + texture_id = 0; + } +} + +void OneCopyTileTaskWorkerPool::StagingBuffer::OnMemoryDump( + base::trace_event::ProcessMemoryDump* pmd, + ResourceFormat format, + bool in_free_list) const { + if (!gpu_memory_buffer) + return; + + gfx::GpuMemoryBufferId buffer_id = gpu_memory_buffer->GetId(); + std::string buffer_dump_name = + base::StringPrintf("cc/one_copy/staging_memory/buffer_%d", buffer_id); + base::trace_event::MemoryAllocatorDump* buffer_dump = + pmd->CreateAllocatorDump(buffer_dump_name); + + uint64_t buffer_size_in_bytes = + ResourceUtil::UncheckedSizeInBytes(size, format); + buffer_dump->AddScalar(base::trace_event::MemoryAllocatorDump::kNameSize, + base::trace_event::MemoryAllocatorDump::kUnitsBytes, + buffer_size_in_bytes); + buffer_dump->AddScalar("free_size", + base::trace_event::MemoryAllocatorDump::kUnitsBytes, + in_free_list ? buffer_size_in_bytes : 0); + + // Emit an ownership edge towards a global allocator dump node. + const uint64 tracing_process_id = + base::trace_event::MemoryDumpManager::GetInstance() + ->GetTracingProcessId(); + base::trace_event::MemoryAllocatorDumpGuid shared_buffer_guid = + gfx::GetGpuMemoryBufferGUIDForTracing(tracing_process_id, buffer_id); + pmd->CreateSharedGlobalAllocatorDump(shared_buffer_guid); + + // By creating an edge with a higher |importance| (w.r.t. browser-side dumps) + // the tracing UI will account the effective size of the buffer to the child. + const int kImportance = 2; + pmd->AddOwnershipEdge(buffer_dump->guid(), shared_buffer_guid, kImportance); } // static @@ -134,51 +166,52 @@ scoped_ptr OneCopyTileTaskWorkerPool::Create( TaskGraphRunner* task_graph_runner, ContextProvider* context_provider, ResourceProvider* resource_provider, - ResourcePool* resource_pool, int max_copy_texture_chromium_size, - bool have_persistent_gpu_memory_buffers) { + bool use_persistent_gpu_memory_buffers, + unsigned image_target, + int max_staging_buffers) { return make_scoped_ptr(new OneCopyTileTaskWorkerPool( - task_runner, task_graph_runner, context_provider, resource_provider, - resource_pool, max_copy_texture_chromium_size, - have_persistent_gpu_memory_buffers)); + task_runner, task_graph_runner, resource_provider, + max_copy_texture_chromium_size, use_persistent_gpu_memory_buffers, + image_target, max_staging_buffers)); } OneCopyTileTaskWorkerPool::OneCopyTileTaskWorkerPool( base::SequencedTaskRunner* task_runner, TaskGraphRunner* task_graph_runner, - ContextProvider* context_provider, ResourceProvider* resource_provider, - ResourcePool* resource_pool, int max_copy_texture_chromium_size, - bool have_persistent_gpu_memory_buffers) + bool use_persistent_gpu_memory_buffers, + unsigned image_target, + int max_staging_buffers) : task_runner_(task_runner), task_graph_runner_(task_graph_runner), namespace_token_(task_graph_runner->GetNamespaceToken()), - context_provider_(context_provider), resource_provider_(resource_provider), - resource_pool_(resource_pool), max_bytes_per_copy_operation_( max_copy_texture_chromium_size ? std::min(kMaxBytesPerCopyOperation, max_copy_texture_chromium_size) : kMaxBytesPerCopyOperation), - have_persistent_gpu_memory_buffers_(have_persistent_gpu_memory_buffers), - last_issued_copy_operation_(0), - last_flushed_copy_operation_(0), - lock_(), - copy_operation_count_cv_(&lock_), + use_persistent_gpu_memory_buffers_(use_persistent_gpu_memory_buffers), + image_target_(image_target), bytes_scheduled_since_last_flush_(0), - issued_copy_operation_count_(0), - next_copy_operation_sequence_(1), - check_for_completed_copy_operations_pending_(false), - shutdown_(false), + max_staging_buffers_(max_staging_buffers), + staging_buffer_expiration_delay_( + base::TimeDelta::FromMilliseconds(kStagingBufferExpirationDelayMs)), + reduce_memory_usage_pending_(false), weak_ptr_factory_(this), task_set_finished_weak_ptr_factory_(this) { - DCHECK(context_provider_); + base::trace_event::MemoryDumpManager::GetInstance()->RegisterDumpProvider( + this, base::ThreadTaskRunnerHandle::Get()); + reduce_memory_usage_callback_ = + base::Bind(&OneCopyTileTaskWorkerPool::ReduceMemoryUsage, + weak_ptr_factory_.GetWeakPtr()); } OneCopyTileTaskWorkerPool::~OneCopyTileTaskWorkerPool() { - DCHECK_EQ(pending_copy_operations_.size(), 0u); + base::trace_event::MemoryDumpManager::GetInstance()->UnregisterDumpProvider( + this); } TileTaskRunner* OneCopyTileTaskWorkerPool::AsTileTaskRunner() { @@ -192,28 +225,21 @@ void OneCopyTileTaskWorkerPool::SetClient(TileTaskRunnerClient* client) { void OneCopyTileTaskWorkerPool::Shutdown() { TRACE_EVENT0("cc", "OneCopyTileTaskWorkerPool::Shutdown"); - { - base::AutoLock lock(lock_); - - shutdown_ = true; - copy_operation_count_cv_.Signal(); - } - TaskGraph empty; task_graph_runner_->ScheduleTasks(namespace_token_, &empty); task_graph_runner_->WaitForTasksToFinishRunning(namespace_token_); + + base::AutoLock lock(lock_); + + if (buffers_.empty()) + return; + + ReleaseBuffersNotUsedSince(base::TimeTicks() + base::TimeDelta::Max()); } void OneCopyTileTaskWorkerPool::ScheduleTasks(TileTaskQueue* queue) { TRACE_EVENT0("cc", "OneCopyTileTaskWorkerPool::ScheduleTasks"); -#if DCHECK_IS_ON() - { - base::AutoLock lock(lock_); - DCHECK(!shutdown_); - } -#endif - if (tasks_pending_.none()) TRACE_EVENT_ASYNC_BEGIN0("cc", "ScheduledTasks", this); @@ -238,8 +264,6 @@ void OneCopyTileTaskWorkerPool::ScheduleTasks(TileTaskQueue* queue) { task_set_finished_weak_ptr_factory_.GetWeakPtr(), task_set)); } - resource_pool_->CheckBusyResources(false); - for (TileTaskQueue::Item::Vector::const_iterator it = queue->items.begin(); it != queue->items.end(); ++it) { const TileTaskQueue::Item& item = *it; @@ -266,14 +290,19 @@ void OneCopyTileTaskWorkerPool::ScheduleTasks(TileTaskQueue* queue) { } ScheduleTasksOnOriginThread(this, &graph_); + + // Barrier to sync any new resources to the worker context. + resource_provider_->output_surface() + ->context_provider() + ->ContextGL() + ->OrderingBarrierCHROMIUM(); + task_graph_runner_->ScheduleTasks(namespace_token_, &graph_); std::copy(new_task_set_finished_tasks, new_task_set_finished_tasks + kNumberOfTaskSets, task_set_finished_tasks_); - resource_pool_->ReduceResourceUsage(); - TRACE_EVENT_ASYNC_STEP_INTO1("cc", "ScheduledTasks", this, "running", "state", StateAsValue()); } @@ -312,10 +341,9 @@ scoped_ptr OneCopyTileTaskWorkerPool::AcquireBufferForRaster( // TODO(danakj): If resource_content_id != 0, we only need to copy/upload // the dirty rect. DCHECK_EQ(resource->format(), resource_provider_->best_texture_format()); - return make_scoped_ptr( - new RasterBufferImpl(this, resource_provider_, resource_pool_, - resource_provider_->best_texture_format(), resource, - previous_content_id)); + return make_scoped_ptr(new RasterBufferImpl( + this, resource_provider_, resource_provider_->best_texture_format(), + resource, previous_content_id)); } void OneCopyTileTaskWorkerPool::ReleaseBufferForRaster( @@ -323,238 +351,363 @@ void OneCopyTileTaskWorkerPool::ReleaseBufferForRaster( // Nothing to do here. RasterBufferImpl destructor cleans up after itself. } -CopySequenceNumber -OneCopyTileTaskWorkerPool::PlaybackAndScheduleCopyOnWorkerThread( - bool reusing_raster_resource, - scoped_ptr - raster_resource_write_lock, - const Resource* raster_resource, - const Resource* output_resource, +void OneCopyTileTaskWorkerPool::PlaybackAndCopyOnWorkerThread( + const Resource* resource, + const ResourceProvider::ScopedWriteLockGL* resource_lock, const RasterSource* raster_source, const gfx::Rect& raster_full_rect, const gfx::Rect& raster_dirty_rect, float scale, - bool include_images) { - gfx::GpuMemoryBuffer* gpu_memory_buffer = - raster_resource_write_lock->GetGpuMemoryBuffer(); - if (gpu_memory_buffer) { - void* data = NULL; - bool rv = gpu_memory_buffer->Map(&data); - DCHECK(rv); - int stride; - gpu_memory_buffer->GetStride(&stride); - // TileTaskWorkerPool::PlaybackToMemory only supports unsigned strides. - DCHECK_GE(stride, 0); + bool include_images, + uint64_t previous_content_id, + uint64_t new_content_id) { + base::AutoLock lock(lock_); - gfx::Rect playback_rect = raster_full_rect; - if (reusing_raster_resource) { - playback_rect.Intersect(raster_dirty_rect); - } - DCHECK(!playback_rect.IsEmpty()) - << "Why are we rastering a tile that's not dirty?"; - TileTaskWorkerPool::PlaybackToMemory( - data, raster_resource->format(), raster_resource->size(), - static_cast(stride), raster_source, raster_full_rect, - playback_rect, scale, include_images); - gpu_memory_buffer->Unmap(); - } + scoped_ptr staging_buffer = + AcquireStagingBuffer(resource, previous_content_id); + DCHECK(staging_buffer); - base::AutoLock lock(lock_); + { + base::AutoUnlock unlock(lock_); + + // Allocate GpuMemoryBuffer if necessary. + if (!staging_buffer->gpu_memory_buffer) { + staging_buffer->gpu_memory_buffer = + resource_provider_->gpu_memory_buffer_manager() + ->AllocateGpuMemoryBuffer( + staging_buffer->size, + BufferFormat(resource_provider_->best_texture_format()), + use_persistent_gpu_memory_buffers_ + ? gfx::BufferUsage::PERSISTENT_MAP + : gfx::BufferUsage::MAP); + } - CopySequenceNumber sequence = 0; - int bytes_per_row = (BitsPerPixel(raster_resource->format()) * - raster_resource->size().width()) / - 8; - int chunk_size_in_rows = - std::max(1, max_bytes_per_copy_operation_ / bytes_per_row); - // Align chunk size to 4. Required to support compressed texture formats. - chunk_size_in_rows = MathUtil::UncheckedRoundUp(chunk_size_in_rows, 4); - int y = 0; - int height = raster_resource->size().height(); - while (y < height) { - int failed_attempts = 0; - while ((pending_copy_operations_.size() + issued_copy_operation_count_) >= - kMaxCopyOperations) { - // Ignore limit when shutdown is set. - if (shutdown_) - break; + gfx::Rect playback_rect = raster_full_rect; + if (use_persistent_gpu_memory_buffers_ && previous_content_id) { + // Reduce playback rect to dirty region if the content id of the staging + // buffer matches the prevous content id. + if (previous_content_id == staging_buffer->content_id) + playback_rect.Intersect(raster_dirty_rect); + } - ++failed_attempts; + if (staging_buffer->gpu_memory_buffer) { + void* data = nullptr; + bool rv = staging_buffer->gpu_memory_buffer->Map(&data); + DCHECK(rv); + int stride; + staging_buffer->gpu_memory_buffer->GetStride(&stride); + // TileTaskWorkerPool::PlaybackToMemory only supports unsigned strides. + DCHECK_GE(stride, 0); + + DCHECK(!playback_rect.IsEmpty()) + << "Why are we rastering a tile that's not dirty?"; + TileTaskWorkerPool::PlaybackToMemory( + data, resource_provider_->best_texture_format(), staging_buffer->size, + static_cast(stride), raster_source, raster_full_rect, + playback_rect, scale, include_images); + staging_buffer->gpu_memory_buffer->Unmap(); + staging_buffer->content_id = new_content_id; + } + } - // Schedule a check that will also wait for operations to complete - // after too many failed attempts. - bool wait_if_needed = failed_attempts > kFailedAttemptsBeforeWaitIfNeeded; + ContextProvider* context_provider = + resource_provider_->output_surface()->worker_context_provider(); + DCHECK(context_provider); - // Schedule a check for completed copy operations if too many operations - // are currently in-flight. - ScheduleCheckForCompletedCopyOperationsWithLockAcquired(wait_if_needed); + { + ContextProvider::ScopedContextLock scoped_context(context_provider); + + gpu::gles2::GLES2Interface* gl = scoped_context.ContextGL(); + DCHECK(gl); + + if (!staging_buffer->texture_id) { + gl->GenTextures(1, &staging_buffer->texture_id); + gl->BindTexture(image_target_, staging_buffer->texture_id); + gl->TexParameteri(image_target_, GL_TEXTURE_MIN_FILTER, GL_NEAREST); + gl->TexParameteri(image_target_, GL_TEXTURE_MAG_FILTER, GL_NEAREST); + gl->TexParameteri(image_target_, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); + gl->TexParameteri(image_target_, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); + if (staging_buffer->gpu_memory_buffer) { + DCHECK(!staging_buffer->image_id); + staging_buffer->image_id = gl->CreateImageCHROMIUM( + staging_buffer->gpu_memory_buffer->AsClientBuffer(), + staging_buffer->size.width(), staging_buffer->size.height(), + GLInternalFormat(resource_provider_->best_texture_format())); + gl->BindTexImage2DCHROMIUM(image_target_, staging_buffer->image_id); + } else { + gl->BindTexture(image_target_, staging_buffer->texture_id); + gl->ReleaseTexImage2DCHROMIUM(image_target_, staging_buffer->image_id); + gl->BindTexImage2DCHROMIUM(image_target_, staging_buffer->image_id); + } + } - { - TRACE_EVENT0("cc", "WaitingForCopyOperationsToComplete"); + if (resource_provider_->use_sync_query()) { + if (!staging_buffer->query_id) + gl->GenQueriesEXT(1, &staging_buffer->query_id); + +#if defined(OS_CHROMEOS) + // TODO(reveman): This avoids a performance problem on some ChromeOS + // devices. This needs to be removed to support native GpuMemoryBuffer + // implementations. crbug.com/436314 + gl->BeginQueryEXT(GL_COMMANDS_ISSUED_CHROMIUM, staging_buffer->query_id); +#else + gl->BeginQueryEXT(GL_COMMANDS_COMPLETED_CHROMIUM, + staging_buffer->query_id); +#endif + } - // Wait for in-flight copy operations to drop below limit. - copy_operation_count_cv_.Wait(); + int bytes_per_row = + (BitsPerPixel(resource_provider_->best_texture_format()) * + resource->size().width()) / + 8; + int chunk_size_in_rows = + std::max(1, max_bytes_per_copy_operation_ / bytes_per_row); + // Align chunk size to 4. Required to support compressed texture formats. + chunk_size_in_rows = MathUtil::UncheckedRoundUp(chunk_size_in_rows, 4); + int y = 0; + int height = resource->size().height(); + while (y < height) { + // Copy at most |chunk_size_in_rows|. + int rows_to_copy = std::min(chunk_size_in_rows, height - y); + DCHECK_GT(rows_to_copy, 0); + + gl->CopySubTextureCHROMIUM(GL_TEXTURE_2D, staging_buffer->texture_id, + resource_lock->texture_id(), 0, y, 0, y, + resource->size().width(), rows_to_copy, false, + false, false); + y += rows_to_copy; + + // Increment |bytes_scheduled_since_last_flush_| by the amount of memory + // used for this copy operation. + bytes_scheduled_since_last_flush_ += rows_to_copy * bytes_per_row; + + if (bytes_scheduled_since_last_flush_ >= max_bytes_per_copy_operation_) { + gl->ShallowFlushCHROMIUM(); + bytes_scheduled_since_last_flush_ = 0; } } - // There may be more work available, so wake up another worker thread. - copy_operation_count_cv_.Signal(); - - // Copy at most |chunk_size_in_rows|. - int rows_to_copy = std::min(chunk_size_in_rows, height - y); - DCHECK_GT(rows_to_copy, 0); - - // |raster_resource_write_lock| is passed to the first copy operation as it - // needs to be released before we can issue a copy. - pending_copy_operations_.push_back(make_scoped_ptr(new CopyOperation( - raster_resource_write_lock.Pass(), raster_resource, output_resource, - gfx::Rect(0, y, raster_resource->size().width(), rows_to_copy)))); - y += rows_to_copy; - - // Acquire a sequence number for this copy operation. - sequence = next_copy_operation_sequence_++; - - // Increment |bytes_scheduled_since_last_flush_| by the amount of memory - // used for this copy operation. - bytes_scheduled_since_last_flush_ += rows_to_copy * bytes_per_row; - - // Post task that will advance last flushed copy operation to |sequence| - // when |bytes_scheduled_since_last_flush_| has reached - // |max_bytes_per_copy_operation_|. - if (bytes_scheduled_since_last_flush_ >= max_bytes_per_copy_operation_) { - task_runner_->PostTask( - FROM_HERE, - base::Bind(&OneCopyTileTaskWorkerPool::AdvanceLastFlushedCopyTo, - weak_ptr_factory_.GetWeakPtr(), sequence)); - bytes_scheduled_since_last_flush_ = 0; + if (resource_provider_->use_sync_query()) { +#if defined(OS_CHROMEOS) + gl->EndQueryEXT(GL_COMMANDS_ISSUED_CHROMIUM); +#else + gl->EndQueryEXT(GL_COMMANDS_COMPLETED_CHROMIUM); +#endif } + + // Barrier to sync worker context output to cc context. + gl->OrderingBarrierCHROMIUM(); } - return sequence; + staging_buffer->last_usage = base::TimeTicks::Now(); + busy_buffers_.push_back(staging_buffer.Pass()); + + ScheduleReduceMemoryUsage(); } -void OneCopyTileTaskWorkerPool::AdvanceLastIssuedCopyTo( - CopySequenceNumber sequence) { - if (last_issued_copy_operation_ >= sequence) - return; +bool OneCopyTileTaskWorkerPool::OnMemoryDump( + const base::trace_event::MemoryDumpArgs& args, + base::trace_event::ProcessMemoryDump* pmd) { + base::AutoLock lock(lock_); - IssueCopyOperations(sequence - last_issued_copy_operation_); - last_issued_copy_operation_ = sequence; + for (const auto& buffer : buffers_) { + buffer->OnMemoryDump(pmd, resource_provider_->best_texture_format(), + std::find(free_buffers_.begin(), free_buffers_.end(), + buffer) != free_buffers_.end()); + } + + return true; } -void OneCopyTileTaskWorkerPool::AdvanceLastFlushedCopyTo( - CopySequenceNumber sequence) { - if (last_flushed_copy_operation_ >= sequence) - return; +scoped_ptr +OneCopyTileTaskWorkerPool::AcquireStagingBuffer(const Resource* resource, + uint64_t previous_content_id) { + lock_.AssertAcquired(); - AdvanceLastIssuedCopyTo(sequence); + scoped_ptr staging_buffer; - // Flush all issued copy operations. - context_provider_->ContextGL()->ShallowFlushCHROMIUM(); - last_flushed_copy_operation_ = last_issued_copy_operation_; -} + ContextProvider* context_provider = + resource_provider_->output_surface()->worker_context_provider(); + DCHECK(context_provider); -void OneCopyTileTaskWorkerPool::OnTaskSetFinished(TaskSet task_set) { - TRACE_EVENT1("cc", "OneCopyTileTaskWorkerPool::OnTaskSetFinished", "task_set", - task_set); + ContextProvider::ScopedContextLock scoped_context(context_provider); - DCHECK(tasks_pending_[task_set]); - tasks_pending_[task_set] = false; - if (tasks_pending_.any()) { - TRACE_EVENT_ASYNC_STEP_INTO1("cc", "ScheduledTasks", this, "running", - "state", StateAsValue()); - } else { - TRACE_EVENT_ASYNC_END0("cc", "ScheduledTasks", this); - } - client_->DidFinishRunningTileTasks(task_set); -} + gpu::gles2::GLES2Interface* gl = scoped_context.ContextGL(); + DCHECK(gl); -void OneCopyTileTaskWorkerPool::IssueCopyOperations(int64 count) { - TRACE_EVENT1("cc", "OneCopyTileTaskWorkerPool::IssueCopyOperations", "count", - count); + // Check if any busy buffers have become available. + if (resource_provider_->use_sync_query()) { + while (!busy_buffers_.empty()) { + if (!CheckForQueryResult(gl, busy_buffers_.front()->query_id)) + break; - CopyOperation::Deque copy_operations; + free_buffers_.push_back(busy_buffers_.take_front()); + } + } - { - base::AutoLock lock(lock_); + // Wait for number of non-free buffers to become less than the limit. + while ((buffers_.size() - free_buffers_.size()) >= max_staging_buffers_) { + // Stop when there are no more busy buffers to wait for. + if (busy_buffers_.empty()) + break; - for (int64 i = 0; i < count; ++i) { - DCHECK(!pending_copy_operations_.empty()); - copy_operations.push_back(pending_copy_operations_.take_front()); + if (resource_provider_->use_sync_query()) { + WaitForQueryResult(gl, busy_buffers_.front()->query_id); + free_buffers_.push_back(busy_buffers_.take_front()); + } else { + // Fall-back to glFinish if CHROMIUM_sync_query is not available. + gl->Finish(); + while (!busy_buffers_.empty()) + free_buffers_.push_back(busy_buffers_.take_front()); } + } + + // Find a staging buffer that allows us to perform partial raster when + // using persistent GpuMemoryBuffers. + if (use_persistent_gpu_memory_buffers_ && previous_content_id) { + StagingBufferDeque::iterator it = + std::find_if(free_buffers_.begin(), free_buffers_.end(), + [previous_content_id](const StagingBuffer* buffer) { + return buffer->content_id == previous_content_id; + }); + if (it != free_buffers_.end()) + staging_buffer = free_buffers_.take(it); + } - // Increment |issued_copy_operation_count_| to reflect the transition of - // copy operations from "pending" to "issued" state. - issued_copy_operation_count_ += copy_operations.size(); + // Find staging buffer of correct size. + if (!staging_buffer) { + StagingBufferDeque::iterator it = + std::find_if(free_buffers_.begin(), free_buffers_.end(), + [resource](const StagingBuffer* buffer) { + return buffer->size == resource->size(); + }); + if (it != free_buffers_.end()) + staging_buffer = free_buffers_.take(it); } - while (!copy_operations.empty()) { - scoped_ptr copy_operation = copy_operations.take_front(); + // Create new staging buffer if necessary. + if (!staging_buffer) { + staging_buffer = make_scoped_ptr(new StagingBuffer(resource->size())); + buffers_.insert(staging_buffer.get()); + } - // Remove the write lock. - copy_operation->src_write_lock.reset(); + // Release enough free buffers to stay within the limit. + while (buffers_.size() > max_staging_buffers_) { + if (free_buffers_.empty()) + break; - // Copy contents of source resource to destination resource. - resource_provider_->CopyResource(copy_operation->src->id(), - copy_operation->dst->id(), - copy_operation->rect); + free_buffers_.front()->DestroyGLResources(gl); + buffers_.erase(free_buffers_.front()); + free_buffers_.take_front(); } + + return staging_buffer.Pass(); } -void OneCopyTileTaskWorkerPool:: - ScheduleCheckForCompletedCopyOperationsWithLockAcquired( - bool wait_if_needed) { +base::TimeTicks OneCopyTileTaskWorkerPool::GetUsageTimeForLRUBuffer() { lock_.AssertAcquired(); - if (check_for_completed_copy_operations_pending_) - return; + if (!free_buffers_.empty()) + return free_buffers_.front()->last_usage; + + if (!busy_buffers_.empty()) + return busy_buffers_.front()->last_usage; + + return base::TimeTicks(); +} + +void OneCopyTileTaskWorkerPool::ScheduleReduceMemoryUsage() { + lock_.AssertAcquired(); - base::TimeTicks now = base::TimeTicks::Now(); + if (reduce_memory_usage_pending_) + return; - // Schedule a check for completed copy operations as soon as possible but - // don't allow two consecutive checks to be scheduled to run less than the - // tick rate apart. - base::TimeTicks next_check_for_completed_copy_operations_time = - std::max(last_check_for_completed_copy_operations_time_ + - base::TimeDelta::FromMilliseconds( - kCheckForCompletedCopyOperationsTickRateMs), - now); + reduce_memory_usage_pending_ = true; + // Schedule a call to ReduceMemoryUsage at the time when the LRU buffer + // should be released. + base::TimeTicks reduce_memory_usage_time = + GetUsageTimeForLRUBuffer() + staging_buffer_expiration_delay_; task_runner_->PostDelayedTask( - FROM_HERE, - base::Bind(&OneCopyTileTaskWorkerPool::CheckForCompletedCopyOperations, - weak_ptr_factory_.GetWeakPtr(), wait_if_needed), - next_check_for_completed_copy_operations_time - now); - - last_check_for_completed_copy_operations_time_ = - next_check_for_completed_copy_operations_time; - check_for_completed_copy_operations_pending_ = true; + FROM_HERE, reduce_memory_usage_callback_, + reduce_memory_usage_time - base::TimeTicks::Now()); } -void OneCopyTileTaskWorkerPool::CheckForCompletedCopyOperations( - bool wait_if_needed) { - TRACE_EVENT1("cc", - "OneCopyTileTaskWorkerPool::CheckForCompletedCopyOperations", - "wait_if_needed", wait_if_needed); +void OneCopyTileTaskWorkerPool::ReduceMemoryUsage() { + base::AutoLock lock(lock_); + + reduce_memory_usage_pending_ = false; + + if (free_buffers_.empty() && busy_buffers_.empty()) + return; + + base::TimeTicks current_time = base::TimeTicks::Now(); + ReleaseBuffersNotUsedSince(current_time - staging_buffer_expiration_delay_); - resource_pool_->CheckBusyResources(wait_if_needed); + if (free_buffers_.empty() && busy_buffers_.empty()) + return; + + reduce_memory_usage_pending_ = true; + + // Schedule another call to ReduceMemoryUsage at the time when the next + // buffer should be released. + base::TimeTicks reduce_memory_usage_time = + GetUsageTimeForLRUBuffer() + staging_buffer_expiration_delay_; + task_runner_->PostDelayedTask(FROM_HERE, reduce_memory_usage_callback_, + reduce_memory_usage_time - current_time); +} + +void OneCopyTileTaskWorkerPool::ReleaseBuffersNotUsedSince( + base::TimeTicks time) { + lock_.AssertAcquired(); + + ContextProvider* context_provider = + resource_provider_->output_surface()->worker_context_provider(); + DCHECK(context_provider); { - base::AutoLock lock(lock_); + ContextProvider::ScopedContextLock scoped_context(context_provider); + + gpu::gles2::GLES2Interface* gl = scoped_context.ContextGL(); + DCHECK(gl); - DCHECK(check_for_completed_copy_operations_pending_); - check_for_completed_copy_operations_pending_ = false; + // Note: Front buffer is guaranteed to be LRU so we can stop releasing + // buffers as soon as we find a buffer that has been used since |time|. + while (!free_buffers_.empty()) { + if (free_buffers_.front()->last_usage > time) + return; - // The number of busy resources in the pool reflects the number of issued - // copy operations that have not yet completed. - issued_copy_operation_count_ = resource_pool_->busy_resource_count(); + free_buffers_.front()->DestroyGLResources(gl); + buffers_.erase(free_buffers_.front()); + free_buffers_.take_front(); + } + + while (!busy_buffers_.empty()) { + if (busy_buffers_.front()->last_usage > time) + return; - // There may be work blocked on too many in-flight copy operations, so wake - // up a worker thread. - copy_operation_count_cv_.Signal(); + busy_buffers_.front()->DestroyGLResources(gl); + buffers_.erase(busy_buffers_.front()); + busy_buffers_.take_front(); + } } } +void OneCopyTileTaskWorkerPool::OnTaskSetFinished(TaskSet task_set) { + TRACE_EVENT1("cc", "OneCopyTileTaskWorkerPool::OnTaskSetFinished", "task_set", + task_set); + + DCHECK(tasks_pending_[task_set]); + tasks_pending_[task_set] = false; + if (tasks_pending_.any()) { + TRACE_EVENT_ASYNC_STEP_INTO1("cc", "ScheduledTasks", this, "running", + "state", StateAsValue()); + } else { + TRACE_EVENT_ASYNC_END0("cc", "ScheduledTasks", this); + } + client_->DidFinishRunningTileTasks(task_set); +} + scoped_refptr OneCopyTileTaskWorkerPool::StateAsValue() const { scoped_refptr state = @@ -573,20 +726,14 @@ OneCopyTileTaskWorkerPool::StateAsValue() const { void OneCopyTileTaskWorkerPool::StagingStateAsValueInto( base::trace_event::TracedValue* staging_state) const { - staging_state->SetInteger( - "staging_resource_count", - static_cast(resource_pool_->total_resource_count())); - staging_state->SetInteger( - "bytes_used_for_staging_resources", - static_cast(resource_pool_->total_memory_usage_bytes())); - staging_state->SetInteger( - "pending_copy_count", - static_cast(resource_pool_->total_resource_count() - - resource_pool_->acquired_resource_count())); - staging_state->SetInteger( - "bytes_pending_copy", - static_cast(resource_pool_->total_memory_usage_bytes() - - resource_pool_->acquired_memory_usage_bytes())); + base::AutoLock lock(lock_); + + staging_state->SetInteger("staging_buffer_count", + static_cast(buffers_.size())); + staging_state->SetInteger("busy_count", + static_cast(busy_buffers_.size())); + staging_state->SetInteger("free_count", + static_cast(free_buffers_.size())); } } // namespace cc diff --git a/cc/raster/one_copy_tile_task_worker_pool.h b/cc/raster/one_copy_tile_task_worker_pool.h index 4e050b4c2023..6554fccb2569 100644 --- a/cc/raster/one_copy_tile_task_worker_pool.h +++ b/cc/raster/one_copy_tile_task_worker_pool.h @@ -5,8 +5,12 @@ #ifndef CC_RASTER_ONE_COPY_TILE_TASK_WORKER_POOL_H_ #define CC_RASTER_ONE_COPY_TILE_TASK_WORKER_POOL_H_ +#include + #include "base/memory/weak_ptr.h" #include "base/synchronization/lock.h" +#include "base/time/time.h" +#include "base/trace_event/memory_dump_provider.h" #include "base/values.h" #include "cc/base/scoped_ptr_deque.h" #include "cc/output/context_provider.h" @@ -21,15 +25,20 @@ class TracedValue; } } +namespace gpu { +namespace gles2 { +class GLES2Interface; +} +} + namespace cc { class ResourcePool; -class ScopedResource; - -typedef int64 CopySequenceNumber; -class CC_EXPORT OneCopyTileTaskWorkerPool : public TileTaskWorkerPool, - public TileTaskRunner, - public TileTaskClient { +class CC_EXPORT OneCopyTileTaskWorkerPool + : public TileTaskWorkerPool, + public TileTaskRunner, + public TileTaskClient, + public base::trace_event::MemoryDumpProvider { public: ~OneCopyTileTaskWorkerPool() override; @@ -38,9 +47,10 @@ class CC_EXPORT OneCopyTileTaskWorkerPool : public TileTaskWorkerPool, TaskGraphRunner* task_graph_runner, ContextProvider* context_provider, ResourceProvider* resource_provider, - ResourcePool* resource_pool, int max_copy_texture_chromium_size, - bool have_persistent_gpu_memory_buffers); + bool use_persistent_gpu_memory_buffers, + unsigned image_target, + int max_staging_buffers); // Overridden from TileTaskWorkerPool: TileTaskRunner* AsTileTaskRunner() override; @@ -60,61 +70,58 @@ class CC_EXPORT OneCopyTileTaskWorkerPool : public TileTaskWorkerPool, uint64_t previous_content_id) override; void ReleaseBufferForRaster(scoped_ptr buffer) override; - // Playback raster source and schedule copy of |raster_resource| resource to - // |output_resource|. Returns a non-zero sequence number for this copy - // operation. - CopySequenceNumber PlaybackAndScheduleCopyOnWorkerThread( - bool reusing_raster_resource, - scoped_ptr - raster_resource_write_lock, - const Resource* raster_resource, - const Resource* output_resource, + // Overridden from base::trace_event::MemoryDumpProvider: + bool OnMemoryDump(const base::trace_event::MemoryDumpArgs& args, + base::trace_event::ProcessMemoryDump* pmd) override; + + // Playback raster source and copy result into |resource|. + void PlaybackAndCopyOnWorkerThread( + const Resource* resource, + const ResourceProvider::ScopedWriteLockGL* resource_lock, const RasterSource* raster_source, const gfx::Rect& raster_full_rect, const gfx::Rect& raster_dirty_rect, float scale, - bool include_images); - - // Issues copy operations until |sequence| has been processed. This will - // return immediately if |sequence| has already been processed. - void AdvanceLastIssuedCopyTo(CopySequenceNumber sequence); - - bool have_persistent_gpu_memory_buffers() const { - return have_persistent_gpu_memory_buffers_; - } + bool include_images, + uint64_t resource_content_id, + uint64_t previous_content_id); protected: OneCopyTileTaskWorkerPool(base::SequencedTaskRunner* task_runner, TaskGraphRunner* task_graph_runner, - ContextProvider* context_provider, ResourceProvider* resource_provider, - ResourcePool* resource_pool, int max_copy_texture_chromium_size, - bool have_persistent_gpu_memory_buffers); + bool use_persistent_gpu_memory_buffers, + unsigned image_target, + int max_staging_buffers); private: - struct CopyOperation { - typedef ScopedPtrDeque Deque; - - CopyOperation(scoped_ptr - src_write_lock, - const Resource* src, - const Resource* dst, - const gfx::Rect& rect); - ~CopyOperation(); - - scoped_ptr src_write_lock; - const Resource* src; - const Resource* dst; - const gfx::Rect rect; + struct StagingBuffer { + explicit StagingBuffer(const gfx::Size& size); + ~StagingBuffer(); + + void DestroyGLResources(gpu::gles2::GLES2Interface* gl); + void OnMemoryDump(base::trace_event::ProcessMemoryDump* pmd, + ResourceFormat format, + bool is_free) const; + + const gfx::Size size; + scoped_ptr gpu_memory_buffer; + base::TimeTicks last_usage; + unsigned texture_id; + unsigned image_id; + unsigned query_id; + uint64_t content_id; }; + scoped_ptr AcquireStagingBuffer(const Resource* resource, + uint64_t previous_content_id); + base::TimeTicks GetUsageTimeForLRUBuffer(); + void ScheduleReduceMemoryUsage(); + void ReduceMemoryUsage(); + void ReleaseBuffersNotUsedSince(base::TimeTicks time); + void OnTaskSetFinished(TaskSet task_set); - void AdvanceLastFlushedCopyTo(CopySequenceNumber sequence); - void IssueCopyOperations(int64 count); - void ScheduleCheckForCompletedCopyOperationsWithLockAcquired( - bool wait_if_needed); - void CheckForCompletedCopyOperations(bool wait_if_needed); scoped_refptr StateAsValue() const; void StagingStateAsValueInto( @@ -124,31 +131,30 @@ class CC_EXPORT OneCopyTileTaskWorkerPool : public TileTaskWorkerPool, TaskGraphRunner* task_graph_runner_; const NamespaceToken namespace_token_; TileTaskRunnerClient* client_; - ContextProvider* context_provider_; - ResourceProvider* resource_provider_; - ResourcePool* resource_pool_; + ResourceProvider* const resource_provider_; const int max_bytes_per_copy_operation_; - const bool have_persistent_gpu_memory_buffers_; + const bool use_persistent_gpu_memory_buffers_; + const unsigned image_target_; TaskSetCollection tasks_pending_; scoped_refptr task_set_finished_tasks_[kNumberOfTaskSets]; - CopySequenceNumber last_issued_copy_operation_; - CopySequenceNumber last_flushed_copy_operation_; // Task graph used when scheduling tasks and vector used to gather // completed tasks. TaskGraph graph_; Task::Vector completed_tasks_; - base::Lock lock_; + mutable base::Lock lock_; // |lock_| must be acquired when accessing the following members. - base::ConditionVariable copy_operation_count_cv_; + using StagingBufferSet = std::set; + StagingBufferSet buffers_; + using StagingBufferDeque = ScopedPtrDeque; + StagingBufferDeque free_buffers_; + StagingBufferDeque busy_buffers_; int bytes_scheduled_since_last_flush_; - size_t issued_copy_operation_count_; - CopyOperation::Deque pending_copy_operations_; - CopySequenceNumber next_copy_operation_sequence_; - bool check_for_completed_copy_operations_pending_; - base::TimeTicks last_check_for_completed_copy_operations_time_; - bool shutdown_; + size_t max_staging_buffers_; + const base::TimeDelta staging_buffer_expiration_delay_; + bool reduce_memory_usage_pending_; + base::Closure reduce_memory_usage_callback_; base::WeakPtrFactory weak_ptr_factory_; // "raster finished" tasks need their own factory as they need to be diff --git a/cc/raster/tile_task_worker_pool_perftest.cc b/cc/raster/tile_task_worker_pool_perftest.cc index b49c498ac053..88322c1fd7a8 100644 --- a/cc/raster/tile_task_worker_pool_perftest.cc +++ b/cc/raster/tile_task_worker_pool_perftest.cc @@ -265,13 +265,11 @@ class TileTaskWorkerPoolPerfTest break; case TILE_TASK_WORKER_POOL_TYPE_ONE_COPY: Create3dOutputSurfaceAndResourceProvider(); - staging_resource_pool_ = ResourcePool::Create(resource_provider_.get(), - GL_TEXTURE_2D); tile_task_worker_pool_ = OneCopyTileTaskWorkerPool::Create( task_runner_.get(), task_graph_runner_.get(), context_provider_.get(), resource_provider_.get(), - staging_resource_pool_.get(), std::numeric_limits::max(), - false); + std::numeric_limits::max(), false, GL_TEXTURE_2D, + std::numeric_limits::max()); break; case TILE_TASK_WORKER_POOL_TYPE_GPU: Create3dOutputSurfaceAndResourceProvider(); @@ -431,7 +429,6 @@ class TileTaskWorkerPoolPerfTest return std::string(); } - scoped_ptr staging_resource_pool_; scoped_ptr tile_task_worker_pool_; TestGpuMemoryBufferManager gpu_memory_buffer_manager_; TestSharedBitmapManager shared_bitmap_manager_; diff --git a/cc/raster/tile_task_worker_pool_unittest.cc b/cc/raster/tile_task_worker_pool_unittest.cc index 6038b00e89bd..5e2d1afad27d 100644 --- a/cc/raster/tile_task_worker_pool_unittest.cc +++ b/cc/raster/tile_task_worker_pool_unittest.cc @@ -41,6 +41,7 @@ namespace { const size_t kMaxTransferBufferUsageBytes = 10000U; const size_t kMaxBytesPerCopyOperation = 1000U; +const size_t kMaxStagingBuffers = 32U; // A resource of this dimension^2 * 4 must be greater than the above transfer // buffer constant. @@ -165,12 +166,11 @@ class TileTaskWorkerPoolTest break; case TILE_TASK_WORKER_POOL_TYPE_ONE_COPY: Create3dOutputSurfaceAndResourceProvider(); - staging_resource_pool_ = ResourcePool::Create(resource_provider_.get(), - GL_TEXTURE_2D); tile_task_worker_pool_ = OneCopyTileTaskWorkerPool::Create( base::ThreadTaskRunnerHandle::Get().get(), &task_graph_runner_, context_provider_.get(), resource_provider_.get(), - staging_resource_pool_.get(), kMaxBytesPerCopyOperation, false); + kMaxBytesPerCopyOperation, false, GL_TEXTURE_2D, + kMaxStagingBuffers); break; case TILE_TASK_WORKER_POOL_TYPE_GPU: Create3dOutputSurfaceAndResourceProvider(); @@ -332,7 +332,6 @@ class TileTaskWorkerPoolTest FakeOutputSurfaceClient output_surface_client_; scoped_ptr output_surface_; scoped_ptr resource_provider_; - scoped_ptr staging_resource_pool_; scoped_ptr tile_task_worker_pool_; TestGpuMemoryBufferManager gpu_memory_buffer_manager_; TestSharedBitmapManager shared_bitmap_manager_; diff --git a/cc/resources/resource_format.cc b/cc/resources/resource_format.cc index ce9fe7d93af8..c62ae934d728 100644 --- a/cc/resources/resource_format.cc +++ b/cc/resources/resource_format.cc @@ -87,4 +87,23 @@ GLenum GLInternalFormat(ResourceFormat format) { return GLDataFormat(format); } +gfx::BufferFormat BufferFormat(ResourceFormat format) { + switch (format) { + case RGBA_8888: + return gfx::BufferFormat::RGBA_8888; + case BGRA_8888: + return gfx::BufferFormat::BGRA_8888; + case RGBA_4444: + return gfx::BufferFormat::RGBA_4444; + case ALPHA_8: + case LUMINANCE_8: + case RGB_565: + case ETC1: + case RED_8: + break; + } + NOTREACHED(); + return gfx::BufferFormat::RGBA_8888; +} + } // namespace cc diff --git a/cc/resources/resource_format.h b/cc/resources/resource_format.h index d910bb091e7e..79b815fcd297 100644 --- a/cc/resources/resource_format.h +++ b/cc/resources/resource_format.h @@ -8,6 +8,7 @@ #include "base/logging.h" #include "cc/base/cc_export.h" #include "third_party/skia/include/core/SkBitmap.h" +#include "ui/gfx/gpu_memory_buffer.h" // TODO(prashant.n): Including third_party/khronos/GLES2/gl2.h causes // redefinition errors as macros/functions defined in it conflict with @@ -35,6 +36,7 @@ CC_EXPORT int BitsPerPixel(ResourceFormat format); CC_EXPORT GLenum GLDataType(ResourceFormat format); CC_EXPORT GLenum GLDataFormat(ResourceFormat format); CC_EXPORT GLenum GLInternalFormat(ResourceFormat format); +CC_EXPORT gfx::BufferFormat BufferFormat(ResourceFormat format); } // namespace cc diff --git a/cc/resources/resource_pool.cc b/cc/resources/resource_pool.cc index f3e20903d5f1..c09fa14a632a 100644 --- a/cc/resources/resource_pool.cc +++ b/cc/resources/resource_pool.cc @@ -172,15 +172,12 @@ void ResourcePool::DeleteResource(ScopedResource* resource) { delete resource; } -void ResourcePool::CheckBusyResources(bool wait_if_needed) { +void ResourcePool::CheckBusyResources() { ResourceList::iterator it = busy_resources_.begin(); while (it != busy_resources_.end()) { ScopedResource* resource = it->resource; - if (wait_if_needed) - resource_provider_->WaitReadLockIfNeeded(resource->id()); - if (resource_provider_->CanLockForWrite(resource->id())) { DidFinishUsingResource(resource, it->content_id); it = busy_resources_.erase(it); diff --git a/cc/resources/resource_pool.h b/cc/resources/resource_pool.h index efd1e015c940..6ea9329c183b 100644 --- a/cc/resources/resource_pool.h +++ b/cc/resources/resource_pool.h @@ -37,10 +37,7 @@ class CC_EXPORT ResourcePool : public base::trace_event::MemoryDumpProvider { size_t max_resource_count); void ReduceResourceUsage(); - // This might block if |wait_if_needed| is true and one of the currently - // busy resources has a read lock fence that needs to be waited upon before - // it can be locked for write again. - void CheckBusyResources(bool wait_if_needed); + void CheckBusyResources(); size_t total_memory_usage_bytes() const { return memory_usage_bytes_; } size_t acquired_memory_usage_bytes() const { diff --git a/cc/resources/resource_pool_unittest.cc b/cc/resources/resource_pool_unittest.cc index 035b19e3cf7a..f280f49d1421 100644 --- a/cc/resources/resource_pool_unittest.cc +++ b/cc/resources/resource_pool_unittest.cc @@ -71,8 +71,7 @@ TEST_F(ResourcePoolTest, AccountingSingleResource) { EXPECT_EQ(1u, resource_pool_->total_resource_count()); EXPECT_EQ(1u, resource_pool_->busy_resource_count()); - bool wait_if_needed = false; - resource_pool_->CheckBusyResources(wait_if_needed); + resource_pool_->CheckBusyResources(); EXPECT_EQ(resource_bytes, resource_pool_->total_memory_usage_bytes()); EXPECT_EQ(0u, resource_pool_->acquired_memory_usage_bytes()); EXPECT_EQ(1u, resource_pool_->total_resource_count()); @@ -96,26 +95,25 @@ TEST_F(ResourcePoolTest, SimpleResourceReuse) { gfx::Size size(100, 100); ResourceFormat format = RGBA_8888; - bool wait_if_needed = false; scoped_ptr resource = resource_pool_->AcquireResource(size, format); resource_pool_->ReleaseResource(resource.Pass(), 0u); - resource_pool_->CheckBusyResources(wait_if_needed); + resource_pool_->CheckBusyResources(); EXPECT_EQ(1u, resource_provider_->num_resources()); // Same size/format should re-use resource. resource = resource_pool_->AcquireResource(size, format); EXPECT_EQ(1u, resource_provider_->num_resources()); resource_pool_->ReleaseResource(resource.Pass(), 0u); - resource_pool_->CheckBusyResources(wait_if_needed); + resource_pool_->CheckBusyResources(); EXPECT_EQ(1u, resource_provider_->num_resources()); // Different size/format should alloate new resource. resource = resource_pool_->AcquireResource(gfx::Size(50, 50), LUMINANCE_8); EXPECT_EQ(2u, resource_provider_->num_resources()); resource_pool_->ReleaseResource(resource.Pass(), 0u); - resource_pool_->CheckBusyResources(wait_if_needed); + resource_pool_->CheckBusyResources(); EXPECT_EQ(2u, resource_provider_->num_resources()); } @@ -127,7 +125,6 @@ TEST_F(ResourcePoolTest, LostResource) { gfx::Size size(100, 100); ResourceFormat format = RGBA_8888; - bool wait_if_needed = false; scoped_ptr resource = resource_pool_->AcquireResource(size, format); @@ -135,7 +132,7 @@ TEST_F(ResourcePoolTest, LostResource) { resource_provider_->LoseResourceForTesting(resource->id()); resource_pool_->ReleaseResource(resource.Pass(), 0u); - resource_pool_->CheckBusyResources(wait_if_needed); + resource_pool_->CheckBusyResources(); EXPECT_EQ(0u, resource_provider_->num_resources()); } diff --git a/cc/resources/resource_provider.cc b/cc/resources/resource_provider.cc index 204422c187f3..d09901e6912d 100644 --- a/cc/resources/resource_provider.cc +++ b/cc/resources/resource_provider.cc @@ -32,7 +32,6 @@ #include "third_party/skia/include/gpu/GrTextureProvider.h" #include "ui/gfx/geometry/rect.h" #include "ui/gfx/geometry/vector2d.h" -#include "ui/gfx/gpu_memory_buffer.h" #include "ui/gl/trace_util.h" using gpu::gles2::GLES2Interface; @@ -117,25 +116,6 @@ GrPixelConfig ToGrPixelConfig(ResourceFormat format) { return kSkia8888_GrPixelConfig; } -gfx::BufferFormat ToGpuMemoryBufferFormat(ResourceFormat format) { - switch (format) { - case RGBA_8888: - return gfx::BufferFormat::RGBA_8888; - case BGRA_8888: - return gfx::BufferFormat::BGRA_8888; - case RGBA_4444: - return gfx::BufferFormat::RGBA_4444; - case ALPHA_8: - case LUMINANCE_8: - case RGB_565: - case ETC1: - case RED_8: - break; - } - NOTREACHED(); - return gfx::BufferFormat::RGBA_8888; -} - class ScopedSetActiveTexture { public: ScopedSetActiveTexture(GLES2Interface* gl, GLenum unit) @@ -206,46 +186,6 @@ class BufferIdAllocator : public IdAllocator { DISALLOW_COPY_AND_ASSIGN(BufferIdAllocator); }; -// Query object based fence implementation used to detect completion of copy -// texture operations. Fence has passed when query result is available. -class CopyTextureFence : public ResourceProvider::Fence { - public: - CopyTextureFence(gpu::gles2::GLES2Interface* gl, unsigned query_id) - : gl_(gl), query_id_(query_id) {} - - // Overridden from ResourceProvider::Fence: - void Set() override {} - bool HasPassed() override { - unsigned available = 1; - gl_->GetQueryObjectuivEXT( - query_id_, GL_QUERY_RESULT_AVAILABLE_EXT, &available); - if (!available) - return false; - - ProcessResult(); - return true; - } - void Wait() override { - // ProcessResult() will wait for result to become available. - ProcessResult(); - } - - private: - ~CopyTextureFence() override {} - - void ProcessResult() { - unsigned time_elapsed_us = 0; - gl_->GetQueryObjectuivEXT(query_id_, GL_QUERY_RESULT_EXT, &time_elapsed_us); - UMA_HISTOGRAM_CUSTOM_COUNTS("Renderer4.CopyTextureLatency", time_elapsed_us, - 0, 256000, 50); - } - - gpu::gles2::GLES2Interface* gl_; - unsigned query_id_; - - DISALLOW_COPY_AND_ASSIGN(CopyTextureFence); -}; - } // namespace ResourceProvider::Resource::~Resource() {} @@ -396,13 +336,11 @@ scoped_ptr ResourceProvider::Create( BlockingTaskRunner* blocking_main_thread_task_runner, int highp_threshold_min, bool use_rgba_4444_texture_format, - size_t id_allocation_chunk_size, - bool use_persistent_map_for_gpu_memory_buffers) { + size_t id_allocation_chunk_size) { scoped_ptr resource_provider(new ResourceProvider( output_surface, shared_bitmap_manager, gpu_memory_buffer_manager, blocking_main_thread_task_runner, highp_threshold_min, - use_rgba_4444_texture_format, id_allocation_chunk_size, - use_persistent_map_for_gpu_memory_buffers)); + use_rgba_4444_texture_format, id_allocation_chunk_size)); resource_provider->Initialize(); return resource_provider; } @@ -993,13 +931,9 @@ gfx::GpuMemoryBuffer* ResourceProvider::ScopedWriteLockGpuMemoryBuffer::GetGpuMemoryBuffer() { if (gpu_memory_buffer_) return gpu_memory_buffer_; - gfx::BufferUsage usage = - resource_provider_->use_persistent_map_for_gpu_memory_buffers() - ? gfx::BufferUsage::PERSISTENT_MAP - : gfx::BufferUsage::MAP; scoped_ptr gpu_memory_buffer = gpu_memory_buffer_manager_->AllocateGpuMemoryBuffer( - size_, ToGpuMemoryBufferFormat(format_), usage); + size_, BufferFormat(format_), gfx::BufferUsage::MAP); gpu_memory_buffer_ = gpu_memory_buffer.release(); return gpu_memory_buffer_; } @@ -1090,8 +1024,7 @@ ResourceProvider::ResourceProvider( BlockingTaskRunner* blocking_main_thread_task_runner, int highp_threshold_min, bool use_rgba_4444_texture_format, - size_t id_allocation_chunk_size, - bool use_persistent_map_for_gpu_memory_buffers) + size_t id_allocation_chunk_size) : output_surface_(output_surface), shared_bitmap_manager_(shared_bitmap_manager), gpu_memory_buffer_manager_(gpu_memory_buffer_manager), @@ -1110,10 +1043,7 @@ ResourceProvider::ResourceProvider( best_texture_format_(RGBA_8888), best_render_buffer_format_(RGBA_8888), use_rgba_4444_texture_format_(use_rgba_4444_texture_format), - id_allocation_chunk_size_(id_allocation_chunk_size), - use_sync_query_(false), - use_persistent_map_for_gpu_memory_buffers_( - use_persistent_map_for_gpu_memory_buffers) { + id_allocation_chunk_size_(id_allocation_chunk_size) { DCHECK(output_surface_->HasClient()); DCHECK(id_allocation_chunk_size_); } @@ -1847,79 +1777,6 @@ void ResourceProvider::BindImageForSampling(Resource* resource) { resource->dirty_image = false; } -void ResourceProvider::CopyResource(ResourceId source_id, - ResourceId dest_id, - const gfx::Rect& rect) { - TRACE_EVENT0("cc", "ResourceProvider::CopyResource"); - - Resource* source_resource = GetResource(source_id); - DCHECK(!source_resource->lock_for_read_count); - DCHECK(source_resource->origin == Resource::INTERNAL); - DCHECK_EQ(source_resource->exported_count, 0); - DCHECK_EQ(RESOURCE_TYPE_GL_TEXTURE, source_resource->type); - LazyAllocate(source_resource); - - Resource* dest_resource = GetResource(dest_id); - DCHECK(!dest_resource->locked_for_write); - DCHECK(!dest_resource->lock_for_read_count); - DCHECK(dest_resource->origin == Resource::INTERNAL); - DCHECK_EQ(dest_resource->exported_count, 0); - DCHECK_EQ(RESOURCE_TYPE_GL_TEXTURE, dest_resource->type); - LazyAllocate(dest_resource); - - DCHECK_EQ(source_resource->type, dest_resource->type); - DCHECK_EQ(source_resource->format, dest_resource->format); - DCHECK(source_resource->size == dest_resource->size); - DCHECK(gfx::Rect(dest_resource->size).Contains(rect)); - - GLES2Interface* gl = ContextGL(); - DCHECK(gl); - if (source_resource->image_id && source_resource->dirty_image) { - gl->BindTexture(source_resource->target, source_resource->gl_id); - BindImageForSampling(source_resource); - } - if (use_sync_query_) { - if (!source_resource->gl_read_lock_query_id) - gl->GenQueriesEXT(1, &source_resource->gl_read_lock_query_id); -#if defined(OS_CHROMEOS) - // TODO(reveman): This avoids a performance problem on some ChromeOS - // devices. This needs to be removed to support native GpuMemoryBuffer - // implementations. crbug.com/436314 - gl->BeginQueryEXT(GL_COMMANDS_ISSUED_CHROMIUM, - source_resource->gl_read_lock_query_id); -#else - gl->BeginQueryEXT(GL_COMMANDS_COMPLETED_CHROMIUM, - source_resource->gl_read_lock_query_id); -#endif - } - DCHECK(!dest_resource->image_id); - dest_resource->allocated = true; - gl->CopySubTextureCHROMIUM(dest_resource->target, source_resource->gl_id, - dest_resource->gl_id, rect.x(), rect.y(), rect.x(), - rect.y(), rect.width(), rect.height(), - false, false, false); - if (source_resource->gl_read_lock_query_id) { - // End query and create a read lock fence that will prevent access to -// source resource until CopySubTextureCHROMIUM command has completed. -#if defined(OS_CHROMEOS) - gl->EndQueryEXT(GL_COMMANDS_ISSUED_CHROMIUM); -#else - gl->EndQueryEXT(GL_COMMANDS_COMPLETED_CHROMIUM); -#endif - source_resource->read_lock_fence = make_scoped_refptr( - new CopyTextureFence(gl, source_resource->gl_read_lock_query_id)); - } else { - // Create a SynchronousFence when CHROMIUM_sync_query extension is missing. - // Try to use one synchronous fence for as many CopyResource operations as - // possible as that reduce the number of times we have to synchronize with - // the GL. - if (!synchronous_fence_.get() || synchronous_fence_->has_synchronized()) - synchronous_fence_ = make_scoped_refptr(new SynchronousFence(gl)); - source_resource->read_lock_fence = synchronous_fence_; - source_resource->read_lock_fence->Set(); - } -} - void ResourceProvider::WaitSyncPointIfNeeded(ResourceId id) { Resource* resource = GetResource(id); DCHECK_EQ(resource->exported_count, 0); @@ -1935,15 +1792,6 @@ void ResourceProvider::WaitSyncPointIfNeeded(ResourceId id) { resource->mailbox.set_sync_point(0); } -void ResourceProvider::WaitReadLockIfNeeded(ResourceId id) { - Resource* resource = GetResource(id); - DCHECK_EQ(resource->exported_count, 0); - if (!resource->read_lock_fence.get()) - return; - - resource->read_lock_fence->Wait(); -} - GLint ResourceProvider::GetActiveTextureUnit(GLES2Interface* gl) { GLint active_unit = 0; gl->GetIntegerv(GL_ACTIVE_TEXTURE, &active_unit); diff --git a/cc/resources/resource_provider.h b/cc/resources/resource_provider.h index bee3ce1014a0..2a778d3e31ed 100644 --- a/cc/resources/resource_provider.h +++ b/cc/resources/resource_provider.h @@ -35,6 +35,7 @@ #include "third_party/skia/include/core/SkBitmap.h" #include "third_party/skia/include/core/SkCanvas.h" #include "ui/gfx/geometry/size.h" +#include "ui/gfx/gpu_memory_buffer.h" class GrContext; @@ -46,7 +47,6 @@ class GLES2Interface; } namespace gfx { -class GpuMemoryBuffer; class Rect; class Vector2d; } @@ -87,8 +87,7 @@ class CC_EXPORT ResourceProvider BlockingTaskRunner* blocking_main_thread_task_runner, int highp_threshold_min, bool use_rgba_4444_texture_format, - size_t id_allocation_chunk_size, - bool use_persistent_map_for_gpu_memory_buffers); + size_t id_allocation_chunk_size); ~ResourceProvider() override; void DidLoseOutputSurface() { lost_output_surface_ = true; } @@ -103,8 +102,8 @@ class CC_EXPORT ResourceProvider } ResourceFormat yuv_resource_format() const { return yuv_resource_format_; } bool use_sync_query() const { return use_sync_query_; } - bool use_persistent_map_for_gpu_memory_buffers() const { - return use_persistent_map_for_gpu_memory_buffers_; + gpu::GpuMemoryBufferManager* gpu_memory_buffer_manager() { + return gpu_memory_buffer_manager_; } size_t num_resources() const { return resources_.size(); } @@ -426,15 +425,8 @@ class CC_EXPORT ResourceProvider // Indicates if we can currently lock this resource for write. bool CanLockForWrite(ResourceId id); - // Copy |rect| pixels from source to destination. - void CopyResource(ResourceId source_id, - ResourceId dest_id, - const gfx::Rect& rect); - void WaitSyncPointIfNeeded(ResourceId id); - void WaitReadLockIfNeeded(ResourceId id); - static GLint GetActiveTextureUnit(gpu::gles2::GLES2Interface* gl); OutputSurface* output_surface() { return output_surface_; } @@ -452,8 +444,7 @@ class CC_EXPORT ResourceProvider BlockingTaskRunner* blocking_main_thread_task_runner, int highp_threshold_min, bool use_rgba_4444_texture_format, - size_t id_allocation_chunk_size, - bool use_persistent_map_for_gpu_memory_buffers); + size_t id_allocation_chunk_size); void Initialize(); private: @@ -608,9 +599,6 @@ class CC_EXPORT ResourceProvider scoped_ptr buffer_id_allocator_; bool use_sync_query_; - bool use_persistent_map_for_gpu_memory_buffers_; - // Fence used for CopyResource if CHROMIUM_sync_query is not supported. - scoped_refptr synchronous_fence_; DISALLOW_COPY_AND_ASSIGN(ResourceProvider); }; diff --git a/cc/resources/resource_provider_unittest.cc b/cc/resources/resource_provider_unittest.cc index 136855847db7..3d5adc0e7a42 100644 --- a/cc/resources/resource_provider_unittest.cc +++ b/cc/resources/resource_provider_unittest.cc @@ -421,11 +421,11 @@ class ResourceProviderTest resource_provider_ = ResourceProvider::Create( output_surface_.get(), shared_bitmap_manager_.get(), gpu_memory_buffer_manager_.get(), main_thread_task_runner_.get(), 0, - false, 1, false); + false, 1); child_resource_provider_ = ResourceProvider::Create( child_output_surface_.get(), shared_bitmap_manager_.get(), gpu_memory_buffer_manager_.get(), main_thread_task_runner_.get(), 0, - false, 1, false); + false, 1); } ResourceProviderTest() : ResourceProviderTest(true) {} @@ -1367,7 +1367,7 @@ TEST_P(ResourceProviderTest, TransferGLToSoftware) { scoped_ptr child_resource_provider(ResourceProvider::Create( child_output_surface.get(), shared_bitmap_manager_.get(), - gpu_memory_buffer_manager_.get(), NULL, 0, false, 1, false)); + gpu_memory_buffer_manager_.get(), NULL, 0, false, 1)); gfx::Size size(1, 1); ResourceFormat format = RGBA_8888; @@ -1851,7 +1851,7 @@ class ResourceProviderTestTextureFilters : public ResourceProviderTest { scoped_ptr child_resource_provider( ResourceProvider::Create(child_output_surface.get(), shared_bitmap_manager.get(), NULL, NULL, 0, - false, 1, false)); + false, 1)); scoped_ptr parent_context_owned( new TextureStateTrackingContext); @@ -1865,7 +1865,7 @@ class ResourceProviderTestTextureFilters : public ResourceProviderTest { scoped_ptr parent_resource_provider( ResourceProvider::Create(parent_output_surface.get(), shared_bitmap_manager.get(), NULL, NULL, 0, - false, 1, false)); + false, 1)); gfx::Size size(1, 1); ResourceFormat format = RGBA_8888; @@ -2497,7 +2497,7 @@ TEST_P(ResourceProviderTest, ScopedSampler) { scoped_ptr resource_provider(ResourceProvider::Create( output_surface.get(), shared_bitmap_manager_.get(), - gpu_memory_buffer_manager_.get(), NULL, 0, false, 1, false)); + gpu_memory_buffer_manager_.get(), NULL, 0, false, 1)); gfx::Size size(1, 1); ResourceFormat format = RGBA_8888; @@ -2579,7 +2579,7 @@ TEST_P(ResourceProviderTest, ManagedResource) { scoped_ptr resource_provider(ResourceProvider::Create( output_surface.get(), shared_bitmap_manager_.get(), - gpu_memory_buffer_manager_.get(), NULL, 0, false, 1, false)); + gpu_memory_buffer_manager_.get(), NULL, 0, false, 1)); gfx::Size size(1, 1); ResourceFormat format = RGBA_8888; @@ -2626,7 +2626,7 @@ TEST_P(ResourceProviderTest, TextureWrapMode) { scoped_ptr resource_provider(ResourceProvider::Create( output_surface.get(), shared_bitmap_manager_.get(), - gpu_memory_buffer_manager_.get(), NULL, 0, false, 1, false)); + gpu_memory_buffer_manager_.get(), NULL, 0, false, 1)); gfx::Size size(1, 1); ResourceFormat format = RGBA_8888; @@ -2676,7 +2676,7 @@ TEST_P(ResourceProviderTest, TextureHint) { scoped_ptr resource_provider(ResourceProvider::Create( output_surface.get(), shared_bitmap_manager_.get(), - gpu_memory_buffer_manager_.get(), NULL, 0, false, 1, false)); + gpu_memory_buffer_manager_.get(), NULL, 0, false, 1)); gfx::Size size(1, 1); ResourceFormat format = RGBA_8888; @@ -2741,7 +2741,7 @@ TEST_P(ResourceProviderTest, TextureMailbox_SharedMemory) { scoped_ptr resource_provider(ResourceProvider::Create( output_surface.get(), shared_bitmap_manager_.get(), gpu_memory_buffer_manager_.get(), main_thread_task_runner_.get(), 0, - false, 1, false)); + false, 1)); uint32 release_sync_point = 0; bool lost_resource = false; @@ -2790,7 +2790,7 @@ class ResourceProviderTestTextureMailboxGLFilters scoped_ptr resource_provider(ResourceProvider::Create( output_surface.get(), shared_bitmap_manager, gpu_memory_buffer_manager, - main_thread_task_runner, 0, false, 1, false)); + main_thread_task_runner, 0, false, 1)); unsigned texture_id = 1; uint32 sync_point = 30; @@ -2933,7 +2933,7 @@ TEST_P(ResourceProviderTest, TextureMailbox_GLTextureExternalOES) { scoped_ptr resource_provider(ResourceProvider::Create( output_surface.get(), shared_bitmap_manager_.get(), - gpu_memory_buffer_manager_.get(), NULL, 0, false, 1, false)); + gpu_memory_buffer_manager_.get(), NULL, 0, false, 1)); uint32 sync_point = 30; unsigned target = GL_TEXTURE_EXTERNAL_OES; @@ -3002,7 +3002,7 @@ TEST_P(ResourceProviderTest, scoped_ptr resource_provider(ResourceProvider::Create( output_surface.get(), shared_bitmap_manager_.get(), - gpu_memory_buffer_manager_.get(), NULL, 0, false, 1, false)); + gpu_memory_buffer_manager_.get(), NULL, 0, false, 1)); uint32 sync_point = 30; unsigned target = GL_TEXTURE_2D; @@ -3055,7 +3055,7 @@ TEST_P(ResourceProviderTest, TextureMailbox_WaitSyncPointIfNeeded_NoSyncPoint) { scoped_ptr resource_provider(ResourceProvider::Create( output_surface.get(), shared_bitmap_manager_.get(), - gpu_memory_buffer_manager_.get(), NULL, 0, false, 1, false)); + gpu_memory_buffer_manager_.get(), NULL, 0, false, 1)); uint32 sync_point = 0; unsigned target = GL_TEXTURE_2D; @@ -3175,7 +3175,7 @@ TEST_P(ResourceProviderTest, TextureAllocation) { scoped_ptr resource_provider(ResourceProvider::Create( output_surface.get(), shared_bitmap_manager_.get(), - gpu_memory_buffer_manager_.get(), NULL, 0, false, 1, false)); + gpu_memory_buffer_manager_.get(), NULL, 0, false, 1)); gfx::Size size(2, 2); gfx::Vector2d offset(0, 0); @@ -3249,7 +3249,7 @@ TEST_P(ResourceProviderTest, TextureAllocationHint) { scoped_ptr resource_provider(ResourceProvider::Create( output_surface.get(), shared_bitmap_manager_.get(), - gpu_memory_buffer_manager_.get(), NULL, 0, false, 1, false)); + gpu_memory_buffer_manager_.get(), NULL, 0, false, 1)); gfx::Size size(2, 2); @@ -3304,7 +3304,7 @@ TEST_P(ResourceProviderTest, TextureAllocationHint_BGRA) { scoped_ptr resource_provider(ResourceProvider::Create( output_surface.get(), shared_bitmap_manager_.get(), - gpu_memory_buffer_manager_.get(), NULL, 0, false, 1, false)); + gpu_memory_buffer_manager_.get(), NULL, 0, false, 1)); gfx::Size size(2, 2); const ResourceFormat formats[2] = {RGBA_8888, BGRA_8888}; @@ -3358,7 +3358,7 @@ TEST_P(ResourceProviderTest, PixelBuffer_GLTexture) { scoped_ptr resource_provider(ResourceProvider::Create( output_surface.get(), shared_bitmap_manager_.get(), - gpu_memory_buffer_manager_.get(), NULL, 0, false, 1, false)); + gpu_memory_buffer_manager_.get(), NULL, 0, false, 1)); id = resource_provider->CreateResource( size, GL_CLAMP_TO_EDGE, ResourceProvider::TEXTURE_HINT_IMMUTABLE, format); @@ -3400,7 +3400,7 @@ TEST_P(ResourceProviderTest, ForcingAsyncUploadToComplete) { scoped_ptr resource_provider(ResourceProvider::Create( output_surface.get(), shared_bitmap_manager_.get(), - gpu_memory_buffer_manager_.get(), NULL, 0, false, 1, false)); + gpu_memory_buffer_manager_.get(), NULL, 0, false, 1)); id = resource_provider->CreateResource( size, GL_CLAMP_TO_EDGE, ResourceProvider::TEXTURE_HINT_IMMUTABLE, format); @@ -3442,7 +3442,7 @@ TEST_P(ResourceProviderTest, PixelBufferLostContext) { scoped_ptr resource_provider(ResourceProvider::Create( output_surface.get(), shared_bitmap_manager_.get(), - gpu_memory_buffer_manager_.get(), NULL, 0, false, 1, false)); + gpu_memory_buffer_manager_.get(), NULL, 0, false, 1)); EXPECT_CALL(*context, NextTextureId()).WillRepeatedly(Return(texture_id)); @@ -3482,7 +3482,7 @@ TEST_P(ResourceProviderTest, Image_GLTexture) { scoped_ptr resource_provider(ResourceProvider::Create( output_surface.get(), shared_bitmap_manager_.get(), - gpu_memory_buffer_manager_.get(), NULL, 0, false, 1, false)); + gpu_memory_buffer_manager_.get(), NULL, 0, false, 1)); id = resource_provider->CreateResource( size, GL_CLAMP_TO_EDGE, ResourceProvider::TEXTURE_HINT_IMMUTABLE, format); @@ -3542,87 +3542,6 @@ TEST_P(ResourceProviderTest, Image_GLTexture) { .RetiresOnSaturation(); } -TEST_P(ResourceProviderTest, CopyResource_GLTexture) { - if (GetParam() != ResourceProvider::RESOURCE_TYPE_GL_TEXTURE) - return; - scoped_ptr context_owned( - new StrictMock); - AllocationTrackingContext3D* context = context_owned.get(); - context_owned->set_support_sync_query(true); - - FakeOutputSurfaceClient output_surface_client; - scoped_ptr output_surface( - FakeOutputSurface::Create3d(context_owned.Pass())); - ASSERT_TRUE(output_surface->BindToClient(&output_surface_client)); - - const int kWidth = 2; - const int kHeight = 2; - gfx::Size size(kWidth, kHeight); - ResourceFormat format = RGBA_8888; - ResourceId source_id = 0; - ResourceId dest_id = 0; - const unsigned kSourceTextureId = 123u; - const unsigned kDestTextureId = 321u; - const unsigned kImageId = 234u; - - scoped_ptr resource_provider(ResourceProvider::Create( - output_surface.get(), shared_bitmap_manager_.get(), - gpu_memory_buffer_manager_.get(), NULL, 0, false, 1, false)); - - source_id = resource_provider->CreateResource( - size, GL_CLAMP_TO_EDGE, ResourceProvider::TEXTURE_HINT_IMMUTABLE, format); - - EXPECT_CALL(*context, NextTextureId()) - .WillOnce(Return(kSourceTextureId)) - .RetiresOnSaturation(); - EXPECT_CALL(*context, bindTexture(GL_TEXTURE_2D, kSourceTextureId)) - .Times(1) - .RetiresOnSaturation(); - EXPECT_CALL(*context, createImageCHROMIUM(_, kWidth, kHeight, GL_RGBA)) - .WillOnce(Return(kImageId)) - .RetiresOnSaturation(); - { - ResourceProvider::ScopedWriteLockGpuMemoryBuffer lock( - resource_provider.get(), source_id); - EXPECT_TRUE(lock.GetGpuMemoryBuffer()); - } - Mock::VerifyAndClearExpectations(context); - - dest_id = resource_provider->CreateResource( - size, GL_CLAMP_TO_EDGE, ResourceProvider::TEXTURE_HINT_IMMUTABLE, format); - - EXPECT_CALL(*context, NextTextureId()) - .WillOnce(Return(kDestTextureId)) - .RetiresOnSaturation(); - EXPECT_CALL(*context, bindTexture(GL_TEXTURE_2D, kDestTextureId)) - .Times(2) - .RetiresOnSaturation(); - EXPECT_CALL(*context, texImage2D(GL_TEXTURE_2D, 0, GL_RGBA, 2, 2, 0, GL_RGBA, - GL_UNSIGNED_BYTE, nullptr)) - .Times(1) - .RetiresOnSaturation(); - EXPECT_CALL(*context, bindTexture(GL_TEXTURE_2D, kSourceTextureId)) - .Times(1) - .RetiresOnSaturation(); - EXPECT_CALL(*context, bindTexImage2DCHROMIUM(GL_TEXTURE_2D, kImageId)) - .Times(1) - .RetiresOnSaturation(); - resource_provider->CopyResource(source_id, dest_id, gfx::Rect(size)); - Mock::VerifyAndClearExpectations(context); - - EXPECT_CALL(*context, destroyImageCHROMIUM(kImageId)) - .Times(1) - .RetiresOnSaturation(); - EXPECT_CALL(*context, RetireTextureId(kSourceTextureId)) - .Times(1) - .RetiresOnSaturation(); - EXPECT_CALL(*context, RetireTextureId(kDestTextureId)) - .Times(1) - .RetiresOnSaturation(); - resource_provider->DeleteResource(source_id); - resource_provider->DeleteResource(dest_id); -} - TEST_P(ResourceProviderTest, CompressedTextureETC1Allocate) { if (GetParam() != ResourceProvider::RESOURCE_TYPE_GL_TEXTURE) return; @@ -3640,7 +3559,7 @@ TEST_P(ResourceProviderTest, CompressedTextureETC1Allocate) { gfx::Size size(4, 4); scoped_ptr resource_provider(ResourceProvider::Create( output_surface.get(), shared_bitmap_manager_.get(), - gpu_memory_buffer_manager_.get(), NULL, 0, false, 1, false)); + gpu_memory_buffer_manager_.get(), NULL, 0, false, 1)); int texture_id = 123; ResourceId id = resource_provider->CreateResource( @@ -3671,7 +3590,7 @@ TEST_P(ResourceProviderTest, CompressedTextureETC1Upload) { gfx::Size size(4, 4); scoped_ptr resource_provider(ResourceProvider::Create( output_surface.get(), shared_bitmap_manager_.get(), - gpu_memory_buffer_manager_.get(), NULL, 0, false, 1, false)); + gpu_memory_buffer_manager_.get(), NULL, 0, false, 1)); int texture_id = 123; uint8_t pixels[8]; @@ -3727,7 +3646,7 @@ TEST(ResourceProviderTest, TextureAllocationChunkSize) { size_t kTextureAllocationChunkSize = 1; scoped_ptr resource_provider(ResourceProvider::Create( output_surface.get(), shared_bitmap_manager.get(), NULL, NULL, 0, false, - kTextureAllocationChunkSize, false)); + kTextureAllocationChunkSize)); ResourceId id = resource_provider->CreateResource( size, GL_CLAMP_TO_EDGE, ResourceProvider::TEXTURE_HINT_IMMUTABLE, @@ -3743,7 +3662,7 @@ TEST(ResourceProviderTest, TextureAllocationChunkSize) { size_t kTextureAllocationChunkSize = 8; scoped_ptr resource_provider(ResourceProvider::Create( output_surface.get(), shared_bitmap_manager.get(), NULL, NULL, 0, false, - kTextureAllocationChunkSize, false)); + kTextureAllocationChunkSize)); ResourceId id = resource_provider->CreateResource( size, GL_CLAMP_TO_EDGE, ResourceProvider::TEXTURE_HINT_IMMUTABLE, diff --git a/cc/surfaces/display.cc b/cc/surfaces/display.cc index 7e80854a0aaf..586bfad3dc7a 100644 --- a/cc/surfaces/display.cc +++ b/cc/surfaces/display.cc @@ -101,13 +101,10 @@ void Display::InitializeRenderer() { if (resource_provider_) return; - // Display does not use GpuMemoryBuffers, so persistent map is not relevant. - bool use_persistent_map_for_gpu_memory_buffers = false; scoped_ptr resource_provider = ResourceProvider::Create( output_surface_.get(), bitmap_manager_, gpu_memory_buffer_manager_, nullptr, settings_.highp_threshold_min, settings_.use_rgba_4444_textures, - settings_.texture_id_allocation_chunk_size, - use_persistent_map_for_gpu_memory_buffers); + settings_.texture_id_allocation_chunk_size); if (!resource_provider) return; diff --git a/cc/surfaces/surface_display_output_surface.cc b/cc/surfaces/surface_display_output_surface.cc index e21d15d8afb3..6a2779d68929 100644 --- a/cc/surfaces/surface_display_output_surface.cc +++ b/cc/surfaces/surface_display_output_surface.cc @@ -17,8 +17,9 @@ namespace cc { SurfaceDisplayOutputSurface::SurfaceDisplayOutputSurface( SurfaceManager* surface_manager, SurfaceIdAllocator* allocator, - const scoped_refptr& context_provider) - : OutputSurface(context_provider), + const scoped_refptr& context_provider, + const scoped_refptr& worker_context_provider) + : OutputSurface(context_provider, worker_context_provider), display_client_(NULL), factory_(surface_manager, this), allocator_(allocator) { diff --git a/cc/surfaces/surface_display_output_surface.h b/cc/surfaces/surface_display_output_surface.h index c11bb86075de..2cdaadc78307 100644 --- a/cc/surfaces/surface_display_output_surface.h +++ b/cc/surfaces/surface_display_output_surface.h @@ -27,7 +27,8 @@ class CC_SURFACES_EXPORT SurfaceDisplayOutputSurface SurfaceDisplayOutputSurface( SurfaceManager* surface_manager, SurfaceIdAllocator* allocator, - const scoped_refptr& context_provider); + const scoped_refptr& context_provider, + const scoped_refptr& worker_context_provider); ~SurfaceDisplayOutputSurface() override; void set_display_client(OnscreenDisplayClient* display_client) { diff --git a/cc/surfaces/surface_display_output_surface_unittest.cc b/cc/surfaces/surface_display_output_surface_unittest.cc index 9b67f1f6f7f3..3145729115d4 100644 --- a/cc/surfaces/surface_display_output_surface_unittest.cc +++ b/cc/surfaces/surface_display_output_surface_unittest.cc @@ -60,7 +60,8 @@ class SurfaceDisplayOutputSurfaceTest : public testing::Test { context_provider_(TestContextProvider::Create()), surface_display_output_surface_(&surface_manager_, &allocator_, - context_provider_) { + context_provider_, + nullptr) { output_surface_ = display_client_.output_surface(); display_client_.set_surface_output_surface( &surface_display_output_surface_); diff --git a/cc/test/fake_output_surface.h b/cc/test/fake_output_surface.h index 7041a447f8e1..8edec08de47a 100644 --- a/cc/test/fake_output_surface.h +++ b/cc/test/fake_output_surface.h @@ -60,13 +60,15 @@ class FakeOutputSurface : public OutputSurface { static scoped_ptr CreateDelegating3d( scoped_refptr context_provider) { - return make_scoped_ptr(new FakeOutputSurface(context_provider, true)); + return make_scoped_ptr(new FakeOutputSurface( + context_provider, TestContextProvider::Create(), true)); } static scoped_ptr CreateDelegating3d( scoped_ptr context) { - return make_scoped_ptr(new FakeOutputSurface( - TestContextProvider::Create(context.Pass()), true)); + return make_scoped_ptr( + new FakeOutputSurface(TestContextProvider::Create(context.Pass()), + TestContextProvider::Create(), true)); } static scoped_ptr CreateDelegatingSoftware( diff --git a/cc/test/fake_resource_provider.h b/cc/test/fake_resource_provider.h index c96bebad6474..5570703922c0 100644 --- a/cc/test/fake_resource_provider.h +++ b/cc/test/fake_resource_provider.h @@ -14,9 +14,8 @@ class FakeResourceProvider : public ResourceProvider { static scoped_ptr Create( OutputSurface* output_surface, SharedBitmapManager* shared_bitmap_manager) { - scoped_ptr provider( - new FakeResourceProvider(output_surface, shared_bitmap_manager, nullptr, - nullptr, 0, false, 1, false)); + scoped_ptr provider(new FakeResourceProvider( + output_surface, shared_bitmap_manager, nullptr, nullptr, 0, false, 1)); provider->Initialize(); return provider; } @@ -27,7 +26,7 @@ class FakeResourceProvider : public ResourceProvider { gpu::GpuMemoryBufferManager* gpu_memory_buffer_manager) { scoped_ptr provider(new FakeResourceProvider( output_surface, shared_bitmap_manager, gpu_memory_buffer_manager, - nullptr, 0, false, 1, false)); + nullptr, 0, false, 1)); provider->Initialize(); return provider; } @@ -39,16 +38,14 @@ class FakeResourceProvider : public ResourceProvider { BlockingTaskRunner* blocking_main_thread_task_runner, int highp_threshold_min, bool use_rgba_4444_texture_format, - size_t id_allocation_chunk_size, - bool use_persistent_map_for_gpu_memory_buffers) + size_t id_allocation_chunk_size) : ResourceProvider(output_surface, shared_bitmap_manager, gpu_memory_buffer_manager, blocking_main_thread_task_runner, highp_threshold_min, use_rgba_4444_texture_format, - id_allocation_chunk_size, - use_persistent_map_for_gpu_memory_buffers) {} + id_allocation_chunk_size) {} }; } // namespace cc diff --git a/cc/test/layer_tree_pixel_resource_test.cc b/cc/test/layer_tree_pixel_resource_test.cc index bc323f138dc2..1bd398921b33 100644 --- a/cc/test/layer_tree_pixel_resource_test.cc +++ b/cc/test/layer_tree_pixel_resource_test.cc @@ -127,8 +127,7 @@ void LayerTreeHostPixelResourceTest::InitializeFromTestCase( void LayerTreeHostPixelResourceTest::CreateResourceAndTileTaskWorkerPool( LayerTreeHostImpl* host_impl, scoped_ptr* tile_task_worker_pool, - scoped_ptr* resource_pool, - scoped_ptr* staging_resource_pool) { + scoped_ptr* resource_pool) { base::SingleThreadTaskRunner* task_runner = proxy()->HasImplThread() ? proxy()->ImplThreadTaskRunner() : proxy()->MainThreadTaskRunner(); @@ -140,6 +139,7 @@ void LayerTreeHostPixelResourceTest::CreateResourceAndTileTaskWorkerPool( ResourceProvider* resource_provider = host_impl->resource_provider(); size_t max_transfer_buffer_usage_bytes = 1024u * 1024u * 60u; int max_bytes_per_copy_operation = 1024 * 1024; + int max_staging_buffers = 32; switch (resource_pool_option_) { case BITMAP_TILE_TASK_WORKER_POOL: @@ -177,15 +177,13 @@ void LayerTreeHostPixelResourceTest::CreateResourceAndTileTaskWorkerPool( EXPECT_TRUE(context_provider); EXPECT_EQ(PIXEL_TEST_GL, test_type_); EXPECT_TRUE(host_impl->GetRendererCapabilities().using_image); - // We need to create a staging resource pool when using copy rasterizer. - *staging_resource_pool = - ResourcePool::Create(resource_provider, staging_texture_target_); *resource_pool = ResourcePool::Create(resource_provider, draw_texture_target_); *tile_task_worker_pool = OneCopyTileTaskWorkerPool::Create( task_runner, task_graph_runner(), context_provider, resource_provider, - staging_resource_pool->get(), max_bytes_per_copy_operation, false); + max_bytes_per_copy_operation, false, staging_texture_target_, + max_staging_buffers); break; case PIXEL_BUFFER_TILE_TASK_WORKER_POOL: EXPECT_TRUE(context_provider); diff --git a/cc/test/layer_tree_pixel_resource_test.h b/cc/test/layer_tree_pixel_resource_test.h index 16b7ca9c7dde..405690ea9226 100644 --- a/cc/test/layer_tree_pixel_resource_test.h +++ b/cc/test/layer_tree_pixel_resource_test.h @@ -38,8 +38,7 @@ class LayerTreeHostPixelResourceTest : public LayerTreePixelTest { void CreateResourceAndTileTaskWorkerPool( LayerTreeHostImpl* host_impl, scoped_ptr* tile_task_worker_pool, - scoped_ptr* resource_pool, - scoped_ptr* staging_resource_pool) override; + scoped_ptr* resource_pool) override; void RunPixelResourceTest(scoped_refptr content_root, base::FilePath file_name); diff --git a/cc/test/layer_tree_pixel_test.cc b/cc/test/layer_tree_pixel_test.cc index c145735f4bc6..77399b9ff8de 100644 --- a/cc/test/layer_tree_pixel_test.cc +++ b/cc/test/layer_tree_pixel_test.cc @@ -381,4 +381,10 @@ void LayerTreePixelTest::CopyBitmapToTextureMailboxAsTexture( texture_id)); } +void LayerTreePixelTest::Finish() { + scoped_ptr context = CreateTestInProcessContext(); + GLES2Interface* gl = context->GetImplementation(); + gl->Finish(); +} + } // namespace cc diff --git a/cc/test/layer_tree_pixel_test.h b/cc/test/layer_tree_pixel_test.h index f2352b0bded2..fcaf1b477364 100644 --- a/cc/test/layer_tree_pixel_test.h +++ b/cc/test/layer_tree_pixel_test.h @@ -91,6 +91,8 @@ class LayerTreePixelTest : public LayerTreeTest { uint32 sync_point, bool lost_resource); + void Finish(); + void set_enlarge_texture_amount(const gfx::Vector2d& enlarge_texture_amount) { enlarge_texture_amount_ = enlarge_texture_amount; } diff --git a/cc/test/layer_tree_test.cc b/cc/test/layer_tree_test.cc index 7823ef192719..e81f1e3970c9 100644 --- a/cc/test/layer_tree_test.cc +++ b/cc/test/layer_tree_test.cc @@ -107,10 +107,9 @@ DrawResult TestHooks::PrepareToDrawOnThread( void TestHooks::CreateResourceAndTileTaskWorkerPool( LayerTreeHostImpl* host_impl, scoped_ptr* tile_task_worker_pool, - scoped_ptr* resource_pool, - scoped_ptr* staging_resource_pool) { + scoped_ptr* resource_pool) { host_impl->LayerTreeHostImpl::CreateResourceAndTileTaskWorkerPool( - tile_task_worker_pool, resource_pool, staging_resource_pool); + tile_task_worker_pool, resource_pool); } // Adapts ThreadProxy for test. Injects test hooks for testing. @@ -303,10 +302,9 @@ class LayerTreeHostImplForTesting : public LayerTreeHostImpl { void CreateResourceAndTileTaskWorkerPool( scoped_ptr* tile_task_worker_pool, - scoped_ptr* resource_pool, - scoped_ptr* staging_resource_pool) override { + scoped_ptr* resource_pool) override { test_hooks_->CreateResourceAndTileTaskWorkerPool( - this, tile_task_worker_pool, resource_pool, staging_resource_pool); + this, tile_task_worker_pool, resource_pool); } void WillBeginImplFrame(const BeginFrameArgs& args) override { diff --git a/cc/test/layer_tree_test.h b/cc/test/layer_tree_test.h index 828850c5f6e9..9f6d3697c076 100644 --- a/cc/test/layer_tree_test.h +++ b/cc/test/layer_tree_test.h @@ -55,8 +55,7 @@ class TestHooks : public AnimationDelegate { virtual void CreateResourceAndTileTaskWorkerPool( LayerTreeHostImpl* host_impl, scoped_ptr* tile_task_worker_pool, - scoped_ptr* resource_pool, - scoped_ptr* staging_resource_pool); + scoped_ptr* resource_pool); virtual void WillBeginImplFrameOnThread(LayerTreeHostImpl* host_impl, const BeginFrameArgs& args) {} virtual void DidFinishImplFrameOnThread(LayerTreeHostImpl* host_impl) {} diff --git a/cc/test/pixel_test.cc b/cc/test/pixel_test.cc index ae3cef1fa1ee..e7e76ff66990 100644 --- a/cc/test/pixel_test.cc +++ b/cc/test/pixel_test.cc @@ -136,7 +136,7 @@ void PixelTest::SetUpGLRenderer(bool use_skia_gpu_backend, resource_provider_ = ResourceProvider::Create( output_surface_.get(), shared_bitmap_manager_.get(), gpu_memory_buffer_manager_.get(), main_thread_task_runner_.get(), 0, - false, 1, false); + false, 1); texture_mailbox_deleter_ = make_scoped_ptr( new TextureMailboxDeleter(base::ThreadTaskRunnerHandle::Get())); @@ -177,7 +177,7 @@ void PixelTest::SetUpSoftwareRenderer() { resource_provider_ = ResourceProvider::Create( output_surface_.get(), shared_bitmap_manager_.get(), gpu_memory_buffer_manager_.get(), main_thread_task_runner_.get(), 0, - false, 1, false); + false, 1); renderer_ = SoftwareRenderer::Create(this, &settings_.renderer_settings, output_surface_.get(), resource_provider_.get()); diff --git a/cc/test/test_in_process_context_provider.cc b/cc/test/test_in_process_context_provider.cc index efbbeec88f7a..79d53d25c87d 100644 --- a/cc/test/test_in_process_context_provider.cc +++ b/cc/test/test_in_process_context_provider.cc @@ -142,6 +142,7 @@ TestInProcessContextProvider::ContextCapabilities() { ContextProvider::Capabilities capabilities; capabilities.gpu.image = true; capabilities.gpu.texture_rectangle = true; + capabilities.gpu.sync_query = true; switch (PlatformColor::Format()) { case PlatformColor::SOURCE_FORMAT_RGBA8: diff --git a/cc/tiles/tile_manager.cc b/cc/tiles/tile_manager.cc index 3ff2885006d3..6d192a14e7d3 100644 --- a/cc/tiles/tile_manager.cc +++ b/cc/tiles/tile_manager.cc @@ -494,7 +494,7 @@ void TileManager::AssignGpuMemoryToTiles( // or deleted. If this operation becomes expensive too, only do this after // some resource(s) was returned. Note that in that case, one also need to // invalidate when releasing some resource from the pool. - resource_pool_->CheckBusyResources(false); + resource_pool_->CheckBusyResources(); // Now give memory out to the tiles until we're out, and build // the needs-to-be-rasterized queue. diff --git a/cc/trees/layer_tree_host_impl.cc b/cc/trees/layer_tree_host_impl.cc index ae0c943199de..f638bd4c1dba 100644 --- a/cc/trees/layer_tree_host_impl.cc +++ b/cc/trees/layer_tree_host_impl.cc @@ -153,11 +153,6 @@ size_t GetMaxTransferBufferUsageBytes( max_transfer_buffer_usage_bytes); } -size_t GetMaxStagingResourceCount() { - // Upper bound for number of staging resource to allow. - return 32; -} - size_t GetDefaultMemoryAllocationLimit() { // TODO(ccameron): (http://crbug.com/137094) This 64MB default is a straggler // from the old texture manager and is just to give us a default memory @@ -1235,7 +1230,7 @@ void LayerTreeHostImpl::UpdateTileManagerMemoryPolicy( 100); DCHECK(resource_pool_); - resource_pool_->CheckBusyResources(false); + resource_pool_->CheckBusyResources(); // Soft limit is used for resource pool such that memory returns to soft // limit after going over. resource_pool_->SetResourceUsageLimits( @@ -1243,15 +1238,6 @@ void LayerTreeHostImpl::UpdateTileManagerMemoryPolicy( unused_memory_limit_in_bytes, global_tile_state_.num_resources_limit); - // Release all staging resources when invisible. - if (staging_resource_pool_) { - staging_resource_pool_->CheckBusyResources(false); - staging_resource_pool_->SetResourceUsageLimits( - std::numeric_limits::max(), - std::numeric_limits::max(), - visible_ ? GetMaxStagingResourceCount() : 0); - } - DidModifyTilePriorities(); } @@ -1465,7 +1451,7 @@ void LayerTreeHostImpl::ReclaimResources(const CompositorFrameAck* ack) { // In OOM, we now might be able to release more resources that were held // because they were exported. if (resource_pool_) { - resource_pool_->CheckBusyResources(false); + resource_pool_->CheckBusyResources(); resource_pool_->ReduceResourceUsage(); } // If we're not visible, we likely released resources, so we want to @@ -2061,8 +2047,7 @@ void LayerTreeHostImpl::CreateAndSetRenderer() { } void LayerTreeHostImpl::CreateTileManagerResources() { - CreateResourceAndTileTaskWorkerPool(&tile_task_worker_pool_, &resource_pool_, - &staging_resource_pool_); + CreateResourceAndTileTaskWorkerPool(&tile_task_worker_pool_, &resource_pool_); // TODO(vmpstr): Initialize tile task limit at ctor time. tile_manager_->SetResources( resource_pool_.get(), tile_task_worker_pool_->AsTileTaskRunner(), @@ -2073,8 +2058,7 @@ void LayerTreeHostImpl::CreateTileManagerResources() { void LayerTreeHostImpl::CreateResourceAndTileTaskWorkerPool( scoped_ptr* tile_task_worker_pool, - scoped_ptr* resource_pool, - scoped_ptr* staging_resource_pool) { + scoped_ptr* resource_pool) { DCHECK(GetTaskRunner()); // TODO(vmpstr): Make this a DCHECK (or remove) when crbug.com/419086 is // resolved. @@ -2131,14 +2115,6 @@ void LayerTreeHostImpl::CreateResourceAndTileTaskWorkerPool( } if (settings_.use_one_copy) { - // Synchronous single-threaded mode depends on tiles being ready to - // draw when raster is complete. Therefore, it must use one of zero - // copy, software raster, or GPU raster. - DCHECK(!is_synchronous_single_threaded_); - - // We need to create a staging resource pool when using copy rasterizer. - *staging_resource_pool = - ResourcePool::Create(resource_provider_.get(), image_target); *resource_pool = ResourcePool::Create(resource_provider_.get(), GL_TEXTURE_2D); @@ -2148,9 +2124,9 @@ void LayerTreeHostImpl::CreateResourceAndTileTaskWorkerPool( *tile_task_worker_pool = OneCopyTileTaskWorkerPool::Create( GetTaskRunner(), task_graph_runner, context_provider, - resource_provider_.get(), staging_resource_pool_.get(), - max_copy_texture_chromium_size, - settings_.use_persistent_map_for_gpu_memory_buffers); + resource_provider_.get(), max_copy_texture_chromium_size, + settings_.use_persistent_map_for_gpu_memory_buffers, image_target, + settings_.max_staging_buffers); return; } @@ -2193,7 +2169,6 @@ void LayerTreeHostImpl::PostFrameTimingEvents( void LayerTreeHostImpl::CleanUpTileManager() { tile_manager_->FinishTasksAndCleanUp(); resource_pool_ = nullptr; - staging_resource_pool_ = nullptr; tile_task_worker_pool_ = nullptr; single_thread_synchronous_task_graph_runner_ = nullptr; } @@ -2226,8 +2201,7 @@ bool LayerTreeHostImpl::InitializeRenderer( proxy_->blocking_main_thread_task_runner(), settings_.renderer_settings.highp_threshold_min, settings_.renderer_settings.use_rgba_4444_textures, - settings_.renderer_settings.texture_id_allocation_chunk_size, - settings_.use_persistent_map_for_gpu_memory_buffers); + settings_.renderer_settings.texture_id_allocation_chunk_size); CreateAndSetRenderer(); diff --git a/cc/trees/layer_tree_host_impl.h b/cc/trees/layer_tree_host_impl.h index b9a07f648382..7936eb5bf756 100644 --- a/cc/trees/layer_tree_host_impl.h +++ b/cc/trees/layer_tree_host_impl.h @@ -559,8 +559,7 @@ class CC_EXPORT LayerTreeHostImpl virtual void CreateResourceAndTileTaskWorkerPool( scoped_ptr* tile_task_worker_pool, - scoped_ptr* resource_pool, - scoped_ptr* staging_resource_pool); + scoped_ptr* resource_pool); bool prepare_tiles_needed() const { return tile_priorities_dirty_; } @@ -713,7 +712,6 @@ class CC_EXPORT LayerTreeHostImpl bool tree_resources_for_gpu_rasterization_dirty_; scoped_ptr tile_task_worker_pool_; scoped_ptr resource_pool_; - scoped_ptr staging_resource_pool_; scoped_ptr renderer_; GlobalStateThatImpactsTilePriority global_tile_state_; diff --git a/cc/trees/layer_tree_host_pixeltest_tiles.cc b/cc/trees/layer_tree_host_pixeltest_tiles.cc index a07f100c31a0..ede77dfa7aff 100644 --- a/cc/trees/layer_tree_host_pixeltest_tiles.cc +++ b/cc/trees/layer_tree_host_pixeltest_tiles.cc @@ -161,6 +161,7 @@ class LayerTreeHostTilesTestPartialInvalidation // only re-raster the stuff in the rect. If it doesn't do partial raster // it would re-raster the whole thing instead. client_.set_blue_top(false); + Finish(); picture_layer_->SetNeedsDisplayRect(gfx::Rect(50, 50, 100, 100)); // Add a copy request to see what happened! diff --git a/cc/trees/layer_tree_settings.cc b/cc/trees/layer_tree_settings.cc index 0869e92e561e..d4150c717959 100644 --- a/cc/trees/layer_tree_settings.cc +++ b/cc/trees/layer_tree_settings.cc @@ -71,7 +71,8 @@ LayerTreeSettings::LayerTreeSettings() gather_pixel_refs(false), use_compositor_animation_timelines(false), invert_viewport_scroll_order(false), - wait_for_beginframe_interval(true) {} + wait_for_beginframe_interval(true), + max_staging_buffers(32) {} LayerTreeSettings::~LayerTreeSettings() {} diff --git a/cc/trees/layer_tree_settings.h b/cc/trees/layer_tree_settings.h index 27e80afa379d..bad1d3b56488 100644 --- a/cc/trees/layer_tree_settings.h +++ b/cc/trees/layer_tree_settings.h @@ -86,6 +86,7 @@ class CC_EXPORT LayerTreeSettings { bool use_compositor_animation_timelines; bool invert_viewport_scroll_order; bool wait_for_beginframe_interval; + int max_staging_buffers; LayerTreeDebugState initial_debug_state; diff --git a/content/browser/compositor/browser_compositor_output_surface.cc b/content/browser/compositor/browser_compositor_output_surface.cc index 0e014c94a685..bdf3a1b2de66 100644 --- a/content/browser/compositor/browser_compositor_output_surface.cc +++ b/content/browser/compositor/browser_compositor_output_surface.cc @@ -17,14 +17,16 @@ namespace content { BrowserCompositorOutputSurface::BrowserCompositorOutputSurface( const scoped_refptr& context_provider, + const scoped_refptr& worker_context_provider, const scoped_refptr& vsync_manager, scoped_ptr overlay_candidate_validator) - : OutputSurface(context_provider), + : OutputSurface(context_provider, worker_context_provider), vsync_manager_(vsync_manager), reflector_(nullptr), - use_begin_frame_scheduling_(base::CommandLine::ForCurrentProcess()-> - HasSwitch(cc::switches::kEnableBeginFrameScheduling)) { + use_begin_frame_scheduling_( + base::CommandLine::ForCurrentProcess() + ->HasSwitch(cc::switches::kEnableBeginFrameScheduling)) { overlay_candidate_validator_ = overlay_candidate_validator.Pass(); Initialize(); } diff --git a/content/browser/compositor/browser_compositor_output_surface.h b/content/browser/compositor/browser_compositor_output_surface.h index f582776734fd..f5da02d56959 100644 --- a/content/browser/compositor/browser_compositor_output_surface.h +++ b/content/browser/compositor/browser_compositor_output_surface.h @@ -56,6 +56,7 @@ class CONTENT_EXPORT BrowserCompositorOutputSurface // Constructor used by the accelerated implementation. BrowserCompositorOutputSurface( const scoped_refptr& context, + const scoped_refptr& worker_context, const scoped_refptr& vsync_manager, scoped_ptr overlay_candidate_validator); diff --git a/content/browser/compositor/gpu_browser_compositor_output_surface.cc b/content/browser/compositor/gpu_browser_compositor_output_surface.cc index 81da36dc9fde..70725a2a8b09 100644 --- a/content/browser/compositor/gpu_browser_compositor_output_surface.cc +++ b/content/browser/compositor/gpu_browser_compositor_output_surface.cc @@ -17,10 +17,12 @@ namespace content { GpuBrowserCompositorOutputSurface::GpuBrowserCompositorOutputSurface( const scoped_refptr& context, + const scoped_refptr& worker_context, const scoped_refptr& vsync_manager, scoped_ptr overlay_candidate_validator) : BrowserCompositorOutputSurface(context, + worker_context, vsync_manager, overlay_candidate_validator.Pass()), #if defined(OS_MACOSX) diff --git a/content/browser/compositor/gpu_browser_compositor_output_surface.h b/content/browser/compositor/gpu_browser_compositor_output_surface.h index 3bc2a3a9c829..7b4f24281224 100644 --- a/content/browser/compositor/gpu_browser_compositor_output_surface.h +++ b/content/browser/compositor/gpu_browser_compositor_output_surface.h @@ -26,6 +26,7 @@ class GpuBrowserCompositorOutputSurface public: GpuBrowserCompositorOutputSurface( const scoped_refptr& context, + const scoped_refptr& worker_context, const scoped_refptr& vsync_manager, scoped_ptr overlay_candidate_validator); diff --git a/content/browser/compositor/gpu_process_transport_factory.cc b/content/browser/compositor/gpu_process_transport_factory.cc index fcb0a3e8abbb..f03eb9e52622 100644 --- a/content/browser/compositor/gpu_process_transport_factory.cc +++ b/content/browser/compositor/gpu_process_transport_factory.cc @@ -237,6 +237,13 @@ void GpuProcessTransportFactory::EstablishedGpuChannel( scoped_refptr context_provider; if (create_gpu_output_surface) { + // Try to reuse existing worker context provider. + if (shared_worker_context_provider_) { + base::AutoLock lock(*shared_worker_context_provider_->GetLock()); + if (shared_worker_context_provider_->ContextGL() + ->GetGraphicsResetStatusKHR() != GL_NO_ERROR) + shared_worker_context_provider_ = nullptr; + } scoped_refptr gpu_channel_host = BrowserGpuChannelHostFactory::instance()->GetGpuChannel(); if (gpu_channel_host.get()) { @@ -246,12 +253,24 @@ void GpuProcessTransportFactory::EstablishedGpuChannel( BROWSER_COMPOSITOR_ONSCREEN_CONTEXT); if (context_provider && !context_provider->BindToCurrentThread()) context_provider = nullptr; + if (!shared_worker_context_provider_) { + shared_worker_context_provider_ = ContextProviderCommandBuffer::Create( + GpuProcessTransportFactory::CreateContextCommon(gpu_channel_host, + 0), + BROWSER_WORKER_CONTEXT); + if (shared_worker_context_provider_ && + !shared_worker_context_provider_->BindToCurrentThread()) + shared_worker_context_provider_ = nullptr; + } } + bool created_gpu_browser_compositor = + !!context_provider && !!shared_worker_context_provider_; + UMA_HISTOGRAM_BOOLEAN("Aura.CreatedGpuBrowserCompositor", - !!context_provider.get()); + created_gpu_browser_compositor); - if (!context_provider) { + if (!created_gpu_browser_compositor) { // Try again. CauseForGpuLaunch cause = CAUSE_FOR_GPU_LAUNCH_WEBGRAPHICSCONTEXT3DCOMMANDBUFFERIMPL_INITIALIZE; @@ -274,7 +293,8 @@ void GpuProcessTransportFactory::EstablishedGpuChannel( context_provider->ContextCapabilities(); if (!data->surface_id) { surface = make_scoped_ptr(new OffscreenBrowserCompositorOutputSurface( - context_provider, compositor->vsync_manager(), + context_provider, shared_worker_context_provider_, + compositor->vsync_manager(), scoped_ptr())); } else if (capabilities.gpu.surfaceless) { GLenum target = GL_TEXTURE_2D; @@ -285,13 +305,15 @@ void GpuProcessTransportFactory::EstablishedGpuChannel( #endif surface = make_scoped_ptr(new GpuSurfacelessBrowserCompositorOutputSurface( - context_provider, data->surface_id, compositor->vsync_manager(), + context_provider, shared_worker_context_provider_, + data->surface_id, compositor->vsync_manager(), CreateOverlayCandidateValidator(compositor->widget()), target, format, BrowserGpuMemoryBufferManager::current())); } else { if (!surface) { surface = make_scoped_ptr(new GpuBrowserCompositorOutputSurface( - context_provider, compositor->vsync_manager(), + context_provider, shared_worker_context_provider_, + compositor->vsync_manager(), CreateOverlayCandidateValidator(compositor->widget()))); } } @@ -322,7 +344,8 @@ void GpuProcessTransportFactory::EstablishedGpuChannel( scoped_ptr output_surface( new cc::SurfaceDisplayOutputSurface( - manager, compositor->surface_id_allocator(), context_provider)); + manager, compositor->surface_id_allocator(), context_provider, + shared_worker_context_provider_)); display_client->set_surface_output_surface(output_surface.get()); output_surface->set_display_client(display_client.get()); display_client->display()->Resize(compositor->size()); diff --git a/content/browser/compositor/gpu_process_transport_factory.h b/content/browser/compositor/gpu_process_transport_factory.h index c16b36ef7d89..564b3cd97eb2 100644 --- a/content/browser/compositor/gpu_process_transport_factory.h +++ b/content/browser/compositor/gpu_process_transport_factory.h @@ -102,6 +102,7 @@ class GpuProcessTransportFactory uint32_t next_surface_id_namespace_; scoped_ptr task_graph_runner_; scoped_ptr raster_thread_; + scoped_refptr shared_worker_context_provider_; #if defined(OS_WIN) scoped_ptr software_backing_; diff --git a/content/browser/compositor/gpu_surfaceless_browser_compositor_output_surface.cc b/content/browser/compositor/gpu_surfaceless_browser_compositor_output_surface.cc index 9507cc31148d..d28e2645d06a 100644 --- a/content/browser/compositor/gpu_surfaceless_browser_compositor_output_surface.cc +++ b/content/browser/compositor/gpu_surfaceless_browser_compositor_output_surface.cc @@ -19,6 +19,7 @@ namespace content { GpuSurfacelessBrowserCompositorOutputSurface:: GpuSurfacelessBrowserCompositorOutputSurface( const scoped_refptr& context, + const scoped_refptr& worker_context, int surface_id, const scoped_refptr& vsync_manager, scoped_ptr @@ -27,6 +28,7 @@ GpuSurfacelessBrowserCompositorOutputSurface:: unsigned int internalformat, BrowserGpuMemoryBufferManager* gpu_memory_buffer_manager) : GpuBrowserCompositorOutputSurface(context, + worker_context, vsync_manager, overlay_candidate_validator.Pass()), internalformat_(internalformat), diff --git a/content/browser/compositor/gpu_surfaceless_browser_compositor_output_surface.h b/content/browser/compositor/gpu_surfaceless_browser_compositor_output_surface.h index 27bb3358fbc5..ed5618d17a45 100644 --- a/content/browser/compositor/gpu_surfaceless_browser_compositor_output_surface.h +++ b/content/browser/compositor/gpu_surfaceless_browser_compositor_output_surface.h @@ -18,6 +18,7 @@ class GpuSurfacelessBrowserCompositorOutputSurface public: GpuSurfacelessBrowserCompositorOutputSurface( const scoped_refptr& context, + const scoped_refptr& worker_context, int surface_id, const scoped_refptr& vsync_manager, scoped_ptr diff --git a/content/browser/compositor/offscreen_browser_compositor_output_surface.cc b/content/browser/compositor/offscreen_browser_compositor_output_surface.cc index 3a98015e5662..606b9fa16994 100644 --- a/content/browser/compositor/offscreen_browser_compositor_output_surface.cc +++ b/content/browser/compositor/offscreen_browser_compositor_output_surface.cc @@ -30,10 +30,12 @@ namespace content { OffscreenBrowserCompositorOutputSurface:: OffscreenBrowserCompositorOutputSurface( const scoped_refptr& context, + const scoped_refptr& worker_context, const scoped_refptr& vsync_manager, scoped_ptr overlay_candidate_validator) : BrowserCompositorOutputSurface(context, + worker_context, vsync_manager, overlay_candidate_validator.Pass()), fbo_(0), diff --git a/content/browser/compositor/offscreen_browser_compositor_output_surface.h b/content/browser/compositor/offscreen_browser_compositor_output_surface.h index 67fc2247c7e5..c9e32e7876dd 100644 --- a/content/browser/compositor/offscreen_browser_compositor_output_surface.h +++ b/content/browser/compositor/offscreen_browser_compositor_output_surface.h @@ -23,6 +23,7 @@ class OffscreenBrowserCompositorOutputSurface public: OffscreenBrowserCompositorOutputSurface( const scoped_refptr& context, + const scoped_refptr& worker_context, const scoped_refptr& vsync_manager, scoped_ptr overlay_candidate_validator); diff --git a/content/browser/compositor/reflector_impl_unittest.cc b/content/browser/compositor/reflector_impl_unittest.cc index ce9fbb9e6613..3377b8a917bc 100644 --- a/content/browser/compositor/reflector_impl_unittest.cc +++ b/content/browser/compositor/reflector_impl_unittest.cc @@ -74,6 +74,7 @@ class TestOutputSurface : public BrowserCompositorOutputSurface { const scoped_refptr& context_provider, const scoped_refptr& vsync_manager) : BrowserCompositorOutputSurface(context_provider, + nullptr, vsync_manager, CreateTestValidatorOzone().Pass()) {} diff --git a/content/browser/gpu/compositor_util.cc b/content/browser/gpu/compositor_util.cc index 2811a0c7e7b6..e80973bca4a5 100644 --- a/content/browser/gpu/compositor_util.cc +++ b/content/browser/gpu/compositor_util.cc @@ -242,14 +242,6 @@ bool IsOneCopyUploadEnabled() { bool IsZeroCopyUploadEnabled() { const base::CommandLine& command_line = *base::CommandLine::ForCurrentProcess(); - // Single-threaded mode in the renderer process (for layout tests) is - // synchronous, which depends on tiles being ready to draw when raster is - // complete. Therefore, it must use one of zero copy, software raster, or - // GPU raster. So we force zero-copy on for the case where software/GPU raster - // is not used. - // TODO(reveman): One-copy can work with sync compositing: crbug.com/490295. - if (command_line.HasSwitch(switches::kDisableThreadedCompositing)) - return true; return command_line.HasSwitch(switches::kEnableZeroCopy); } diff --git a/content/browser/renderer_host/compositor_impl_android.cc b/content/browser/renderer_host/compositor_impl_android.cc index 358ac519981e..d502f235e10f 100644 --- a/content/browser/renderer_host/compositor_impl_android.cc +++ b/content/browser/renderer_host/compositor_impl_android.cc @@ -674,7 +674,7 @@ void CompositorImpl::CreateOutputSurface() { base::ThreadTaskRunnerHandle::Get())); scoped_ptr surface_output_surface( new cc::SurfaceDisplayOutputSurface( - manager, surface_id_allocator_.get(), context_provider)); + manager, surface_id_allocator_.get(), context_provider, nullptr)); display_client_->set_surface_output_surface(surface_output_surface.get()); surface_output_surface->set_display_client(display_client_.get()); diff --git a/content/common/gpu/client/command_buffer_metrics.cc b/content/common/gpu/client/command_buffer_metrics.cc index d16f27619059..5065ca5da6c3 100644 --- a/content/common/gpu/client/command_buffer_metrics.cc +++ b/content/common/gpu/client/command_buffer_metrics.cc @@ -79,6 +79,10 @@ void RecordContextLost(CommandBufferContextType type, UMA_HISTOGRAM_ENUMERATION("GPU.ContextLost.BrowserMainThread", reason, CONTEXT_LOST_REASON_MAX_ENUM); break; + case BROWSER_WORKER_CONTEXT: + UMA_HISTOGRAM_ENUMERATION("GPU.ContextLost.BrowserWorker", reason, + CONTEXT_LOST_REASON_MAX_ENUM); + break; case RENDER_COMPOSITOR_CONTEXT: UMA_HISTOGRAM_ENUMERATION("GPU.ContextLost.RenderCompositor", reason, CONTEXT_LOST_REASON_MAX_ENUM); @@ -120,6 +124,8 @@ std::string CommandBufferContextTypeToString(CommandBufferContextType type) { return "Compositor"; case BROWSER_OFFSCREEN_MAINTHREAD_CONTEXT: return "Offscreen-MainThread"; + case BROWSER_WORKER_CONTEXT: + return "CompositorWorker"; case RENDER_COMPOSITOR_CONTEXT: return "RenderCompositor"; case RENDER_WORKER_CONTEXT: diff --git a/content/common/gpu/client/command_buffer_metrics.h b/content/common/gpu/client/command_buffer_metrics.h index e198d8579d9c..0b4790cd4b09 100644 --- a/content/common/gpu/client/command_buffer_metrics.h +++ b/content/common/gpu/client/command_buffer_metrics.h @@ -14,6 +14,7 @@ namespace content { enum CommandBufferContextType { BROWSER_COMPOSITOR_ONSCREEN_CONTEXT, BROWSER_OFFSCREEN_MAINTHREAD_CONTEXT, + BROWSER_WORKER_CONTEXT, RENDER_COMPOSITOR_CONTEXT, RENDER_WORKER_CONTEXT, RENDERER_MAINTHREAD_CONTEXT, diff --git a/content/renderer/gpu/render_widget_compositor.cc b/content/renderer/gpu/render_widget_compositor.cc index 942470b58059..aec0e5dc85bc 100644 --- a/content/renderer/gpu/render_widget_compositor.cc +++ b/content/renderer/gpu/render_widget_compositor.cc @@ -467,6 +467,11 @@ void RenderWidgetCompositor::Initialize() { settings.use_external_begin_frame_source = false; } + settings.max_staging_buffers = 32; + // Use 1/4th of staging buffers on low-end devices. + if (base::SysInfo::IsLowEndDevice()) + settings.max_staging_buffers /= 4; + scoped_refptr compositor_thread_task_runner = compositor_deps_->GetCompositorImplThreadTaskRunner(); scoped_refptr diff --git a/gpu/command_buffer/service/query_manager.cc b/gpu/command_buffer/service/query_manager.cc index 49f111f83914..a269f35323d5 100644 --- a/gpu/command_buffer/service/query_manager.cc +++ b/gpu/command_buffer/service/query_manager.cc @@ -785,6 +785,7 @@ QueryManager::~QueryManager() { } void QueryManager::Destroy(bool have_context) { + active_queries_.clear(); pending_queries_.clear(); pending_transfer_queries_.clear(); active_queries_.clear(); diff --git a/ui/compositor/test/in_process_context_factory.cc b/ui/compositor/test/in_process_context_factory.cc index 13ddd9075288..942947d5b1b0 100644 --- a/ui/compositor/test/in_process_context_factory.cc +++ b/ui/compositor/test/in_process_context_factory.cc @@ -41,9 +41,11 @@ class FakeReflector : public Reflector { // GL surface. class DirectOutputSurface : public cc::OutputSurface { public: - explicit DirectOutputSurface( - const scoped_refptr& context_provider) - : cc::OutputSurface(context_provider), weak_ptr_factory_(this) {} + DirectOutputSurface( + const scoped_refptr& context_provider, + const scoped_refptr& worker_context_provider) + : cc::OutputSurface(context_provider, worker_context_provider), + weak_ptr_factory_(this) {} ~DirectOutputSurface() override {} @@ -110,16 +112,19 @@ void InProcessContextFactory::CreateOutputSurface( InProcessContextProvider::Create(attribs, &gpu_memory_buffer_manager_, &image_factory_, compositor->widget(), "UICompositor"); + scoped_refptr worker_context_provider = + InProcessContextProvider::CreateOffscreen(&gpu_memory_buffer_manager_, + &image_factory_); scoped_ptr real_output_surface; if (use_test_surface_) { bool flipped_output_surface = false; real_output_surface = make_scoped_ptr(new cc::PixelTestOutputSurface( - context_provider, flipped_output_surface)); + context_provider, worker_context_provider, flipped_output_surface)); } else { - real_output_surface = - make_scoped_ptr(new DirectOutputSurface(context_provider)); + real_output_surface = make_scoped_ptr( + new DirectOutputSurface(context_provider, worker_context_provider)); } if (surface_manager_) { @@ -129,9 +134,9 @@ void InProcessContextFactory::CreateOutputSurface( GetSharedBitmapManager(), GetGpuMemoryBufferManager(), compositor->GetRendererSettings(), compositor->task_runner())); scoped_ptr surface_output_surface( - new cc::SurfaceDisplayOutputSurface(surface_manager_, - compositor->surface_id_allocator(), - context_provider)); + new cc::SurfaceDisplayOutputSurface( + surface_manager_, compositor->surface_id_allocator(), + context_provider, worker_context_provider)); display_client->set_surface_output_surface(surface_output_surface.get()); surface_output_surface->set_display_client(display_client.get()); diff --git a/ui/compositor/test/in_process_context_provider.cc b/ui/compositor/test/in_process_context_provider.cc index c5f950f580ea..76cd19dc191f 100644 --- a/ui/compositor/test/in_process_context_provider.cc +++ b/ui/compositor/test/in_process_context_provider.cc @@ -122,6 +122,10 @@ bool InProcessContextProvider::BindToCurrentThread() { return true; } +void InProcessContextProvider::DetachFromThread() { + context_thread_checker_.DetachFromThread(); +} + cc::ContextProvider::Capabilities InProcessContextProvider::ContextCapabilities() { DCHECK(context_thread_checker_.CalledOnValidThread()); diff --git a/ui/compositor/test/in_process_context_provider.h b/ui/compositor/test/in_process_context_provider.h index bc3e494ebeeb..0f5f35f14123 100644 --- a/ui/compositor/test/in_process_context_provider.h +++ b/ui/compositor/test/in_process_context_provider.h @@ -49,6 +49,7 @@ class InProcessContextProvider : public cc::ContextProvider { // cc::ContextProvider: bool BindToCurrentThread() override; + void DetachFromThread() override; Capabilities ContextCapabilities() override; gpu::gles2::GLES2Interface* ContextGL() override; gpu::ContextSupport* ContextSupport() override; -- 2.11.4.GIT