Update V8 to version 4.6.66.
[chromium-blink-merge.git] / cc / surfaces / display.cc
blob5b699647f1ac77fae54893965f49eaa5f738a340
1 // Copyright 2014 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "cc/surfaces/display.h"
7 #include "base/thread_task_runner_handle.h"
8 #include "base/trace_event/trace_event.h"
9 #include "cc/debug/benchmark_instrumentation.h"
10 #include "cc/output/compositor_frame.h"
11 #include "cc/output/compositor_frame_ack.h"
12 #include "cc/output/direct_renderer.h"
13 #include "cc/output/gl_renderer.h"
14 #include "cc/output/renderer_settings.h"
15 #include "cc/output/software_renderer.h"
16 #include "cc/output/texture_mailbox_deleter.h"
17 #include "cc/surfaces/display_client.h"
18 #include "cc/surfaces/display_scheduler.h"
19 #include "cc/surfaces/surface.h"
20 #include "cc/surfaces/surface_aggregator.h"
21 #include "cc/surfaces/surface_manager.h"
22 #include "gpu/command_buffer/client/gles2_interface.h"
23 #include "ui/gfx/buffer_types.h"
25 namespace cc {
27 Display::Display(DisplayClient* client,
28 SurfaceManager* manager,
29 SharedBitmapManager* bitmap_manager,
30 gpu::GpuMemoryBufferManager* gpu_memory_buffer_manager,
31 const RendererSettings& settings)
32 : client_(client),
33 manager_(manager),
34 bitmap_manager_(bitmap_manager),
35 gpu_memory_buffer_manager_(gpu_memory_buffer_manager),
36 settings_(settings),
37 device_scale_factor_(1.f),
38 swapped_since_resize_(false),
39 scheduler_(nullptr),
40 texture_mailbox_deleter_(new TextureMailboxDeleter(nullptr)) {
41 manager_->AddObserver(this);
44 Display::~Display() {
45 manager_->RemoveObserver(this);
46 if (aggregator_) {
47 for (const auto& id_entry : aggregator_->previous_contained_surfaces()) {
48 Surface* surface = manager_->GetSurfaceForId(id_entry.first);
49 if (surface)
50 surface->RunDrawCallbacks(SurfaceDrawStatus::DRAW_SKIPPED);
55 bool Display::Initialize(scoped_ptr<OutputSurface> output_surface,
56 DisplayScheduler* scheduler) {
57 output_surface_ = output_surface.Pass();
58 scheduler_ = scheduler;
59 return output_surface_->BindToClient(this);
62 void Display::SetSurfaceId(SurfaceId id, float device_scale_factor) {
63 if (current_surface_id_ == id && device_scale_factor_ == device_scale_factor)
64 return;
66 TRACE_EVENT0("cc", "Display::SetSurfaceId");
68 current_surface_id_ = id;
69 device_scale_factor_ = device_scale_factor;
71 UpdateRootSurfaceResourcesLocked();
72 if (scheduler_)
73 scheduler_->SetNewRootSurface(id);
76 void Display::Resize(const gfx::Size& size) {
77 if (size == current_surface_size_)
78 return;
80 TRACE_EVENT0("cc", "Display::Resize");
82 // Need to ensure all pending swaps have executed before the window is
83 // resized, or D3D11 will scale the swap output.
84 if (settings_.finish_rendering_on_resize) {
85 if (!swapped_since_resize_ && scheduler_)
86 scheduler_->ForceImmediateSwapIfPossible();
87 if (swapped_since_resize_ && output_surface_ &&
88 output_surface_->context_provider())
89 output_surface_->context_provider()->ContextGL()->ShallowFinishCHROMIUM();
91 swapped_since_resize_ = false;
92 current_surface_size_ = size;
93 if (scheduler_)
94 scheduler_->DisplayResized();
97 void Display::SetExternalClip(const gfx::Rect& clip) {
98 external_clip_ = clip;
101 void Display::InitializeRenderer() {
102 if (resource_provider_)
103 return;
105 // Display does not use GpuMemoryBuffers, so persistent map is not relevant.
106 bool use_persistent_map_for_gpu_memory_buffers = false;
107 scoped_ptr<ResourceProvider> resource_provider = ResourceProvider::Create(
108 output_surface_.get(), bitmap_manager_, gpu_memory_buffer_manager_,
109 nullptr, settings_.highp_threshold_min, settings_.use_rgba_4444_textures,
110 settings_.texture_id_allocation_chunk_size,
111 use_persistent_map_for_gpu_memory_buffers,
112 std::vector<unsigned>(static_cast<size_t>(gfx::BufferFormat::LAST) + 1,
113 GL_TEXTURE_2D));
114 if (!resource_provider)
115 return;
117 if (output_surface_->context_provider()) {
118 scoped_ptr<GLRenderer> renderer = GLRenderer::Create(
119 this, &settings_, output_surface_.get(), resource_provider.get(),
120 texture_mailbox_deleter_.get(), settings_.highp_threshold_min);
121 if (!renderer)
122 return;
123 renderer_ = renderer.Pass();
124 } else {
125 scoped_ptr<SoftwareRenderer> renderer = SoftwareRenderer::Create(
126 this, &settings_, output_surface_.get(), resource_provider.get());
127 if (!renderer)
128 return;
129 renderer_ = renderer.Pass();
132 resource_provider_ = resource_provider.Pass();
133 // TODO(jbauman): Outputting an incomplete quad list doesn't work when using
134 // overlays.
135 bool output_partial_list = renderer_->Capabilities().using_partial_swap &&
136 !output_surface_->GetOverlayCandidateValidator();
137 aggregator_.reset(new SurfaceAggregator(manager_, resource_provider_.get(),
138 output_partial_list));
141 void Display::DidLoseOutputSurface() {
142 if (scheduler_)
143 scheduler_->OutputSurfaceLost();
144 // WARNING: The client may delete the Display in this method call. Do not
145 // make any additional references to members after this call.
146 client_->OutputSurfaceLost();
149 void Display::UpdateRootSurfaceResourcesLocked() {
150 Surface* surface = manager_->GetSurfaceForId(current_surface_id_);
151 bool root_surface_resources_locked = !surface || !surface->GetEligibleFrame();
152 if (scheduler_)
153 scheduler_->SetRootSurfaceResourcesLocked(root_surface_resources_locked);
156 bool Display::DrawAndSwap() {
157 TRACE_EVENT0("cc", "Display::DrawAndSwap");
159 if (current_surface_id_.is_null()) {
160 TRACE_EVENT_INSTANT0("cc", "No root surface.", TRACE_EVENT_SCOPE_THREAD);
161 return false;
164 InitializeRenderer();
165 if (!output_surface_) {
166 TRACE_EVENT_INSTANT0("cc", "No output surface", TRACE_EVENT_SCOPE_THREAD);
167 return false;
170 scoped_ptr<CompositorFrame> frame =
171 aggregator_->Aggregate(current_surface_id_);
172 if (!frame) {
173 TRACE_EVENT_INSTANT0("cc", "Empty aggregated frame.",
174 TRACE_EVENT_SCOPE_THREAD);
175 return false;
178 // Run callbacks early to allow pipelining.
179 for (const auto& id_entry : aggregator_->previous_contained_surfaces()) {
180 Surface* surface = manager_->GetSurfaceForId(id_entry.first);
181 if (surface)
182 surface->RunDrawCallbacks(SurfaceDrawStatus::DRAWN);
185 DelegatedFrameData* frame_data = frame->delegated_frame_data.get();
187 frame->metadata.latency_info.insert(frame->metadata.latency_info.end(),
188 stored_latency_info_.begin(),
189 stored_latency_info_.end());
190 stored_latency_info_.clear();
191 bool have_copy_requests = false;
192 for (const auto* pass : frame_data->render_pass_list) {
193 have_copy_requests |= !pass->copy_requests.empty();
196 gfx::Size surface_size;
197 bool have_damage = false;
198 if (!frame_data->render_pass_list.empty()) {
199 surface_size = frame_data->render_pass_list.back()->output_rect.size();
200 have_damage =
201 !frame_data->render_pass_list.back()->damage_rect.size().IsEmpty();
204 bool size_matches = surface_size == current_surface_size_;
205 if (!size_matches)
206 TRACE_EVENT_INSTANT0("cc", "Size missmatch.", TRACE_EVENT_SCOPE_THREAD);
208 bool should_draw = !frame->metadata.latency_info.empty() ||
209 have_copy_requests || (have_damage && size_matches);
211 // If the surface is suspended then the resources to be used by the draw are
212 // likely destroyed.
213 if (output_surface_->SurfaceIsSuspendForRecycle()) {
214 TRACE_EVENT_INSTANT0("cc", "Surface is suspended for recycle.",
215 TRACE_EVENT_SCOPE_THREAD);
216 should_draw = false;
219 if (should_draw) {
220 gfx::Rect device_viewport_rect = gfx::Rect(current_surface_size_);
221 gfx::Rect device_clip_rect =
222 external_clip_.IsEmpty() ? device_viewport_rect : external_clip_;
223 bool disable_picture_quad_image_filtering = false;
225 renderer_->DecideRenderPassAllocationsForFrame(
226 frame_data->render_pass_list);
227 renderer_->DrawFrame(&frame_data->render_pass_list, device_scale_factor_,
228 device_viewport_rect, device_clip_rect,
229 disable_picture_quad_image_filtering);
230 } else {
231 TRACE_EVENT_INSTANT0("cc", "Draw skipped.", TRACE_EVENT_SCOPE_THREAD);
234 bool should_swap = should_draw && size_matches;
235 if (should_swap) {
236 swapped_since_resize_ = true;
237 for (auto& latency : frame->metadata.latency_info) {
238 TRACE_EVENT_FLOW_STEP0("input,benchmark", "LatencyInfo.Flow",
239 TRACE_ID_DONT_MANGLE(latency.trace_id()),
240 "Display::DrawAndSwap");
242 benchmark_instrumentation::IssueDisplayRenderingStatsEvent();
243 renderer_->SwapBuffers(frame->metadata);
244 } else {
245 TRACE_EVENT_INSTANT0("cc", "Swap skipped.", TRACE_EVENT_SCOPE_THREAD);
246 stored_latency_info_.insert(stored_latency_info_.end(),
247 frame->metadata.latency_info.begin(),
248 frame->metadata.latency_info.end());
249 DidSwapBuffers();
250 DidSwapBuffersComplete();
253 return true;
256 void Display::DidSwapBuffers() {
257 if (scheduler_)
258 scheduler_->DidSwapBuffers();
261 void Display::DidSwapBuffersComplete() {
262 if (scheduler_)
263 scheduler_->DidSwapBuffersComplete();
266 void Display::CommitVSyncParameters(base::TimeTicks timebase,
267 base::TimeDelta interval) {
268 client_->CommitVSyncParameters(timebase, interval);
271 void Display::SetMemoryPolicy(const ManagedMemoryPolicy& policy) {
272 client_->SetMemoryPolicy(policy);
275 void Display::OnDraw() {
276 NOTREACHED();
279 void Display::SetNeedsRedrawRect(const gfx::Rect& damage_rect) {
280 NOTREACHED();
283 void Display::ReclaimResources(const CompositorFrameAck* ack) {
284 NOTREACHED();
287 void Display::SetExternalDrawConstraints(
288 const gfx::Transform& transform,
289 const gfx::Rect& viewport,
290 const gfx::Rect& clip,
291 const gfx::Rect& viewport_rect_for_tile_priority,
292 const gfx::Transform& transform_for_tile_priority,
293 bool resourceless_software_draw) {
294 NOTREACHED();
297 void Display::SetTreeActivationCallback(const base::Closure& callback) {
298 NOTREACHED();
301 void Display::SetFullRootLayerDamage() {
302 if (aggregator_ && !current_surface_id_.is_null())
303 aggregator_->SetFullDamageForSurface(current_surface_id_);
306 void Display::OnSurfaceDamaged(SurfaceId surface_id, bool* changed) {
307 if (aggregator_ &&
308 aggregator_->previous_contained_surfaces().count(surface_id)) {
309 Surface* surface = manager_->GetSurfaceForId(surface_id);
310 if (surface) {
311 const CompositorFrame* current_frame = surface->GetEligibleFrame();
312 if (!current_frame || !current_frame->delegated_frame_data ||
313 !current_frame->delegated_frame_data->resource_list.size()) {
314 aggregator_->ReleaseResources(surface_id);
317 if (scheduler_)
318 scheduler_->SurfaceDamaged(surface_id);
319 *changed = true;
320 } else if (surface_id == current_surface_id_) {
321 if (scheduler_)
322 scheduler_->SurfaceDamaged(surface_id);
323 *changed = true;
326 if (surface_id == current_surface_id_)
327 UpdateRootSurfaceResourcesLocked();
330 SurfaceId Display::CurrentSurfaceId() {
331 return current_surface_id_;
334 } // namespace cc