[Android WebView] Fix webview perf bot switchover to use org.chromium.webview_shell...
[chromium-blink-merge.git] / content / common / gpu / gpu_memory_manager.cc
blobceedf098de50c22c877dd09b46f0f26ec71834a5
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "content/common/gpu/gpu_memory_manager.h"
7 #include <algorithm>
9 #include "base/bind.h"
10 #include "base/command_line.h"
11 #include "base/message_loop/message_loop.h"
12 #include "base/process/process_handle.h"
13 #include "base/strings/string_number_conversions.h"
14 #include "base/trace_event/trace_event.h"
15 #include "content/common/gpu/gpu_channel_manager.h"
16 #include "content/common/gpu/gpu_memory_manager_client.h"
17 #include "content/common/gpu/gpu_memory_tracking.h"
18 #include "content/common/gpu/gpu_memory_uma_stats.h"
19 #include "content/common/gpu/gpu_messages.h"
20 #include "gpu/command_buffer/common/gpu_memory_allocation.h"
21 #include "gpu/command_buffer/service/gpu_switches.h"
23 using gpu::MemoryAllocation;
25 namespace content {
26 namespace {
28 const int kDelayedScheduleManageTimeoutMs = 67;
30 const uint64 kBytesAllocatedUnmanagedStep = 16 * 1024 * 1024;
32 void TrackValueChanged(uint64 old_size, uint64 new_size, uint64* total_size) {
33 DCHECK(new_size > old_size || *total_size >= (old_size - new_size));
34 *total_size += (new_size - old_size);
39 GpuMemoryManager::GpuMemoryManager(
40 GpuChannelManager* channel_manager,
41 uint64 max_surfaces_with_frontbuffer_soft_limit)
42 : channel_manager_(channel_manager),
43 manage_immediate_scheduled_(false),
44 disable_schedule_manage_(false),
45 max_surfaces_with_frontbuffer_soft_limit_(
46 max_surfaces_with_frontbuffer_soft_limit),
47 client_hard_limit_bytes_(0),
48 bytes_allocated_managed_current_(0),
49 bytes_allocated_unmanaged_current_(0),
50 bytes_allocated_historical_max_(0)
51 { }
53 GpuMemoryManager::~GpuMemoryManager() {
54 DCHECK(tracking_groups_.empty());
55 DCHECK(clients_visible_mru_.empty());
56 DCHECK(clients_nonvisible_mru_.empty());
57 DCHECK(clients_nonsurface_.empty());
58 DCHECK(!bytes_allocated_managed_current_);
59 DCHECK(!bytes_allocated_unmanaged_current_);
62 void GpuMemoryManager::UpdateAvailableGpuMemory() {
63 // If the value was overridden on the command line, use the specified value.
64 static bool client_hard_limit_bytes_overridden =
65 base::CommandLine::ForCurrentProcess()->HasSwitch(
66 switches::kForceGpuMemAvailableMb);
67 if (client_hard_limit_bytes_overridden) {
68 base::StringToUint64(
69 base::CommandLine::ForCurrentProcess()->GetSwitchValueASCII(
70 switches::kForceGpuMemAvailableMb),
71 &client_hard_limit_bytes_);
72 client_hard_limit_bytes_ *= 1024 * 1024;
73 return;
76 #if defined(OS_ANDROID)
77 // On non-Android, we use an operating system query when possible.
78 // We do not have a reliable concept of multiple GPUs existing in
79 // a system, so just be safe and go with the minimum encountered.
80 uint64 bytes_min = 0;
82 // Only use the clients that are visible, because otherwise the set of clients
83 // we are querying could become extremely large.
84 for (ClientStateList::const_iterator it = clients_visible_mru_.begin();
85 it != clients_visible_mru_.end();
86 ++it) {
87 const GpuMemoryManagerClientState* client_state = *it;
88 if (!client_state->has_surface_)
89 continue;
90 if (!client_state->visible_)
91 continue;
93 uint64 bytes = 0;
94 if (client_state->client_->GetTotalGpuMemory(&bytes)) {
95 if (!bytes_min || bytes < bytes_min)
96 bytes_min = bytes;
100 client_hard_limit_bytes_ = bytes_min;
101 // Clamp the observed value to a specific range on Android.
102 client_hard_limit_bytes_ = std::max(client_hard_limit_bytes_,
103 static_cast<uint64>(8 * 1024 * 1024));
104 client_hard_limit_bytes_ = std::min(client_hard_limit_bytes_,
105 static_cast<uint64>(256 * 1024 * 1024));
106 #else
107 // Ignore what the system said and give all clients the same maximum
108 // allocation on desktop platforms.
109 client_hard_limit_bytes_ = 512 * 1024 * 1024;
110 #endif
113 void GpuMemoryManager::ScheduleManage(
114 ScheduleManageTime schedule_manage_time) {
115 if (disable_schedule_manage_)
116 return;
117 if (manage_immediate_scheduled_)
118 return;
119 if (schedule_manage_time == kScheduleManageNow) {
120 base::ThreadTaskRunnerHandle::Get()->PostTask(
121 FROM_HERE, base::Bind(&GpuMemoryManager::Manage, AsWeakPtr()));
122 manage_immediate_scheduled_ = true;
123 if (!delayed_manage_callback_.IsCancelled())
124 delayed_manage_callback_.Cancel();
125 } else {
126 if (!delayed_manage_callback_.IsCancelled())
127 return;
128 delayed_manage_callback_.Reset(base::Bind(&GpuMemoryManager::Manage,
129 AsWeakPtr()));
130 base::ThreadTaskRunnerHandle::Get()->PostDelayedTask(
131 FROM_HERE, delayed_manage_callback_.callback(),
132 base::TimeDelta::FromMilliseconds(kDelayedScheduleManageTimeoutMs));
136 void GpuMemoryManager::TrackMemoryAllocatedChange(
137 GpuMemoryTrackingGroup* tracking_group,
138 uint64 old_size,
139 uint64 new_size,
140 gpu::gles2::MemoryTracker::Pool tracking_pool) {
141 TrackValueChanged(old_size, new_size, &tracking_group->size_);
142 switch (tracking_pool) {
143 case gpu::gles2::MemoryTracker::kManaged:
144 TrackValueChanged(old_size, new_size, &bytes_allocated_managed_current_);
145 break;
146 case gpu::gles2::MemoryTracker::kUnmanaged:
147 TrackValueChanged(old_size,
148 new_size,
149 &bytes_allocated_unmanaged_current_);
150 break;
151 default:
152 NOTREACHED();
153 break;
155 if (new_size != old_size) {
156 TRACE_COUNTER1("gpu",
157 "GpuMemoryUsage",
158 GetCurrentUsage());
161 if (GetCurrentUsage() > bytes_allocated_historical_max_ +
162 kBytesAllocatedUnmanagedStep) {
163 bytes_allocated_historical_max_ = GetCurrentUsage();
164 // If we're blowing into new memory usage territory, spam the browser
165 // process with the most up-to-date information about our memory usage.
166 SendUmaStatsToBrowser();
170 bool GpuMemoryManager::EnsureGPUMemoryAvailable(uint64 /* size_needed */) {
171 // TODO: Check if there is enough space. Lose contexts until there is.
172 return true;
175 GpuMemoryManagerClientState* GpuMemoryManager::CreateClientState(
176 GpuMemoryManagerClient* client,
177 bool has_surface,
178 bool visible) {
179 TrackingGroupMap::iterator tracking_group_it =
180 tracking_groups_.find(client->GetMemoryTracker());
181 DCHECK(tracking_group_it != tracking_groups_.end());
182 GpuMemoryTrackingGroup* tracking_group = tracking_group_it->second;
184 GpuMemoryManagerClientState* client_state = new GpuMemoryManagerClientState(
185 this, client, tracking_group, has_surface, visible);
186 AddClientToList(client_state);
187 ScheduleManage(kScheduleManageNow);
188 return client_state;
191 void GpuMemoryManager::OnDestroyClientState(
192 GpuMemoryManagerClientState* client_state) {
193 RemoveClientFromList(client_state);
194 ScheduleManage(kScheduleManageLater);
197 void GpuMemoryManager::SetClientStateVisible(
198 GpuMemoryManagerClientState* client_state, bool visible) {
199 DCHECK(client_state->has_surface_);
200 if (client_state->visible_ == visible)
201 return;
203 RemoveClientFromList(client_state);
204 client_state->visible_ = visible;
205 AddClientToList(client_state);
206 ScheduleManage(visible ? kScheduleManageNow : kScheduleManageLater);
209 uint64 GpuMemoryManager::GetClientMemoryUsage(
210 const GpuMemoryManagerClient* client) const {
211 TrackingGroupMap::const_iterator tracking_group_it =
212 tracking_groups_.find(client->GetMemoryTracker());
213 DCHECK(tracking_group_it != tracking_groups_.end());
214 return tracking_group_it->second->GetSize();
217 GpuMemoryTrackingGroup* GpuMemoryManager::CreateTrackingGroup(
218 base::ProcessId pid, gpu::gles2::MemoryTracker* memory_tracker) {
219 GpuMemoryTrackingGroup* tracking_group = new GpuMemoryTrackingGroup(
220 pid, memory_tracker, this);
221 DCHECK(!tracking_groups_.count(tracking_group->GetMemoryTracker()));
222 tracking_groups_.insert(std::make_pair(tracking_group->GetMemoryTracker(),
223 tracking_group));
224 return tracking_group;
227 void GpuMemoryManager::OnDestroyTrackingGroup(
228 GpuMemoryTrackingGroup* tracking_group) {
229 DCHECK(tracking_groups_.count(tracking_group->GetMemoryTracker()));
230 tracking_groups_.erase(tracking_group->GetMemoryTracker());
233 void GpuMemoryManager::GetVideoMemoryUsageStats(
234 GPUVideoMemoryUsageStats* video_memory_usage_stats) const {
235 // For each context group, assign its memory usage to its PID
236 video_memory_usage_stats->process_map.clear();
237 for (TrackingGroupMap::const_iterator i =
238 tracking_groups_.begin(); i != tracking_groups_.end(); ++i) {
239 const GpuMemoryTrackingGroup* tracking_group = i->second;
240 video_memory_usage_stats->process_map[
241 tracking_group->GetPid()].video_memory += tracking_group->GetSize();
244 // Assign the total across all processes in the GPU process
245 video_memory_usage_stats->process_map[
246 base::GetCurrentProcId()].video_memory = GetCurrentUsage();
247 video_memory_usage_stats->process_map[
248 base::GetCurrentProcId()].has_duplicates = true;
250 video_memory_usage_stats->bytes_allocated = GetCurrentUsage();
251 video_memory_usage_stats->bytes_allocated_historical_max =
252 bytes_allocated_historical_max_;
255 void GpuMemoryManager::Manage() {
256 manage_immediate_scheduled_ = false;
257 delayed_manage_callback_.Cancel();
259 // Update the amount of GPU memory available on the system.
260 UpdateAvailableGpuMemory();
262 // Determine which clients are "hibernated" (which determines the
263 // distribution of frontbuffers and memory among clients that don't have
264 // surfaces).
265 SetClientsHibernatedState();
267 // Assign memory allocations to clients that have surfaces.
268 AssignSurfacesAllocations();
270 // Assign memory allocations to clients that don't have surfaces.
271 AssignNonSurfacesAllocations();
273 SendUmaStatsToBrowser();
276 void GpuMemoryManager::AssignSurfacesAllocations() {
277 // Send that allocation to the clients.
278 ClientStateList clients = clients_visible_mru_;
279 clients.insert(clients.end(),
280 clients_nonvisible_mru_.begin(),
281 clients_nonvisible_mru_.end());
282 for (ClientStateList::const_iterator it = clients.begin();
283 it != clients.end();
284 ++it) {
285 GpuMemoryManagerClientState* client_state = *it;
287 // Populate and send the allocation to the client
288 MemoryAllocation allocation;
289 allocation.bytes_limit_when_visible = client_hard_limit_bytes_;
290 #if defined(OS_ANDROID)
291 // On Android, because there is only one visible tab at any time, allow
292 // that renderer to cache as much as it can.
293 allocation.priority_cutoff_when_visible =
294 MemoryAllocation::CUTOFF_ALLOW_EVERYTHING;
295 #else
296 // On desktop platforms, instruct the renderers to cache only a smaller
297 // set, to play nice with other renderers and other applications. If this
298 // if not done, then the system can become unstable.
299 // http://crbug.com/145600 (Linux)
300 // http://crbug.com/141377 (Mac)
301 allocation.priority_cutoff_when_visible =
302 MemoryAllocation::CUTOFF_ALLOW_NICE_TO_HAVE;
303 #endif
305 client_state->client_->SetMemoryAllocation(allocation);
306 client_state->client_->SuggestHaveFrontBuffer(!client_state->hibernated_);
310 void GpuMemoryManager::AssignNonSurfacesAllocations() {
311 for (ClientStateList::const_iterator it = clients_nonsurface_.begin();
312 it != clients_nonsurface_.end();
313 ++it) {
314 GpuMemoryManagerClientState* client_state = *it;
315 MemoryAllocation allocation;
317 if (!client_state->hibernated_) {
318 allocation.bytes_limit_when_visible = client_hard_limit_bytes_;
319 allocation.priority_cutoff_when_visible =
320 MemoryAllocation::CUTOFF_ALLOW_EVERYTHING;
323 client_state->client_->SetMemoryAllocation(allocation);
327 void GpuMemoryManager::SetClientsHibernatedState() const {
328 // Re-set all tracking groups as being hibernated.
329 for (TrackingGroupMap::const_iterator it = tracking_groups_.begin();
330 it != tracking_groups_.end();
331 ++it) {
332 GpuMemoryTrackingGroup* tracking_group = it->second;
333 tracking_group->hibernated_ = true;
335 // All clients with surfaces that are visible are non-hibernated.
336 uint64 non_hibernated_clients = 0;
337 for (ClientStateList::const_iterator it = clients_visible_mru_.begin();
338 it != clients_visible_mru_.end();
339 ++it) {
340 GpuMemoryManagerClientState* client_state = *it;
341 client_state->hibernated_ = false;
342 client_state->tracking_group_->hibernated_ = false;
343 non_hibernated_clients++;
345 // Then an additional few clients with surfaces are non-hibernated too, up to
346 // a fixed limit.
347 for (ClientStateList::const_iterator it = clients_nonvisible_mru_.begin();
348 it != clients_nonvisible_mru_.end();
349 ++it) {
350 GpuMemoryManagerClientState* client_state = *it;
351 if (non_hibernated_clients < max_surfaces_with_frontbuffer_soft_limit_) {
352 client_state->hibernated_ = false;
353 client_state->tracking_group_->hibernated_ = false;
354 non_hibernated_clients++;
355 } else {
356 client_state->hibernated_ = true;
359 // Clients that don't have surfaces are non-hibernated if they are
360 // in a GL share group with a non-hibernated surface.
361 for (ClientStateList::const_iterator it = clients_nonsurface_.begin();
362 it != clients_nonsurface_.end();
363 ++it) {
364 GpuMemoryManagerClientState* client_state = *it;
365 client_state->hibernated_ = client_state->tracking_group_->hibernated_;
369 void GpuMemoryManager::SendUmaStatsToBrowser() {
370 if (!channel_manager_)
371 return;
372 GPUMemoryUmaStats params;
373 params.bytes_allocated_current = GetCurrentUsage();
374 params.bytes_allocated_max = bytes_allocated_historical_max_;
375 params.bytes_limit = client_hard_limit_bytes_;
376 params.client_count = clients_visible_mru_.size() +
377 clients_nonvisible_mru_.size() +
378 clients_nonsurface_.size();
379 params.context_group_count = tracking_groups_.size();
380 channel_manager_->Send(new GpuHostMsg_GpuMemoryUmaStats(params));
383 GpuMemoryManager::ClientStateList* GpuMemoryManager::GetClientList(
384 GpuMemoryManagerClientState* client_state) {
385 if (client_state->has_surface_) {
386 if (client_state->visible_)
387 return &clients_visible_mru_;
388 else
389 return &clients_nonvisible_mru_;
391 return &clients_nonsurface_;
394 void GpuMemoryManager::AddClientToList(
395 GpuMemoryManagerClientState* client_state) {
396 DCHECK(!client_state->list_iterator_valid_);
397 ClientStateList* client_list = GetClientList(client_state);
398 client_state->list_iterator_ = client_list->insert(
399 client_list->begin(), client_state);
400 client_state->list_iterator_valid_ = true;
403 void GpuMemoryManager::RemoveClientFromList(
404 GpuMemoryManagerClientState* client_state) {
405 DCHECK(client_state->list_iterator_valid_);
406 ClientStateList* client_list = GetClientList(client_state);
407 client_list->erase(client_state->list_iterator_);
408 client_state->list_iterator_valid_ = false;
411 } // namespace content