1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "content/common/gpu/gpu_memory_manager.h"
10 #include "base/command_line.h"
11 #include "base/debug/trace_event.h"
12 #include "base/message_loop/message_loop.h"
13 #include "base/process/process_handle.h"
14 #include "base/strings/string_number_conversions.h"
15 #include "content/common/gpu/gpu_channel_manager.h"
16 #include "content/common/gpu/gpu_memory_allocation.h"
17 #include "content/common/gpu/gpu_memory_manager_client.h"
18 #include "content/common/gpu/gpu_memory_tracking.h"
19 #include "content/common/gpu/gpu_memory_uma_stats.h"
20 #include "content/common/gpu/gpu_messages.h"
21 #include "gpu/command_buffer/service/gpu_switches.h"
26 const int kDelayedScheduleManageTimeoutMs
= 67;
28 const uint64 kBytesAllocatedUnmanagedStep
= 16 * 1024 * 1024;
30 void TrackValueChanged(uint64 old_size
, uint64 new_size
, uint64
* total_size
) {
31 DCHECK(new_size
> old_size
|| *total_size
>= (old_size
- new_size
));
32 *total_size
+= (new_size
- old_size
);
36 T
RoundUp(T n
, T mul
) {
37 return ((n
+ mul
- 1) / mul
) * mul
;
41 T
RoundDown(T n
, T mul
) {
42 return (n
/ mul
) * mul
;
47 GpuMemoryManager::GpuMemoryManager(
48 GpuChannelManager
* channel_manager
,
49 uint64 max_surfaces_with_frontbuffer_soft_limit
)
50 : channel_manager_(channel_manager
),
51 manage_immediate_scheduled_(false),
52 max_surfaces_with_frontbuffer_soft_limit_(
53 max_surfaces_with_frontbuffer_soft_limit
),
54 bytes_available_gpu_memory_(0),
55 bytes_available_gpu_memory_overridden_(false),
56 bytes_minimum_per_client_(0),
57 bytes_default_per_client_(0),
58 bytes_allocated_managed_current_(0),
59 bytes_allocated_managed_visible_(0),
60 bytes_allocated_managed_nonvisible_(0),
61 bytes_allocated_unmanaged_current_(0),
62 bytes_allocated_historical_max_(0),
63 bytes_allocated_unmanaged_high_(0),
64 bytes_allocated_unmanaged_low_(0),
65 bytes_unmanaged_limit_step_(kBytesAllocatedUnmanagedStep
),
66 disable_schedule_manage_(false)
68 CommandLine
* command_line
= CommandLine::ForCurrentProcess();
70 #if defined(OS_ANDROID)
71 bytes_default_per_client_
= 8 * 1024 * 1024;
72 bytes_minimum_per_client_
= 8 * 1024 * 1024;
73 #elif defined(OS_CHROMEOS)
74 bytes_default_per_client_
= 64 * 1024 * 1024;
75 bytes_minimum_per_client_
= 4 * 1024 * 1024;
77 bytes_default_per_client_
= 64 * 1024 * 1024;
78 bytes_minimum_per_client_
= 64 * 1024 * 1024;
81 // On Android, always discard everything that is nonvisible.
82 // On Linux and Mac, use as little memory as possible to avoid stability
84 // http://crbug.com/145600 (Linux)
85 // http://crbug.com/141377 (Mac)
86 #if defined(OS_ANDROID) || defined(OS_MACOSX) || \
87 (defined(OS_LINUX) && !defined(OS_CHROMEOS))
88 allow_nonvisible_memory_
= false;
90 allow_nonvisible_memory_
= true;
93 if (command_line
->HasSwitch(switches::kForceGpuMemAvailableMb
)) {
95 command_line
->GetSwitchValueASCII(switches::kForceGpuMemAvailableMb
),
96 &bytes_available_gpu_memory_
);
97 bytes_available_gpu_memory_
*= 1024 * 1024;
98 bytes_available_gpu_memory_overridden_
= true;
100 bytes_available_gpu_memory_
= GetDefaultAvailableGpuMemory();
103 GpuMemoryManager::~GpuMemoryManager() {
104 DCHECK(tracking_groups_
.empty());
105 DCHECK(clients_visible_mru_
.empty());
106 DCHECK(clients_nonvisible_mru_
.empty());
107 DCHECK(clients_nonsurface_
.empty());
108 DCHECK(!bytes_allocated_managed_current_
);
109 DCHECK(!bytes_allocated_unmanaged_current_
);
110 DCHECK(!bytes_allocated_managed_visible_
);
111 DCHECK(!bytes_allocated_managed_nonvisible_
);
114 uint64
GpuMemoryManager::GetAvailableGpuMemory() const {
115 // Allow unmanaged allocations to over-subscribe by at most (high_ - low_)
116 // before restricting managed (compositor) memory based on unmanaged usage.
117 if (bytes_allocated_unmanaged_low_
> bytes_available_gpu_memory_
)
119 return bytes_available_gpu_memory_
- bytes_allocated_unmanaged_low_
;
122 uint64
GpuMemoryManager::GetDefaultAvailableGpuMemory() const {
123 #if defined(OS_ANDROID)
124 return 16 * 1024 * 1024;
125 #elif defined(OS_CHROMEOS)
126 return 1024 * 1024 * 1024;
128 return 256 * 1024 * 1024;
132 uint64
GpuMemoryManager::GetMaximumTotalGpuMemory() const {
133 #if defined(OS_ANDROID)
134 return 256 * 1024 * 1024;
136 return 1024 * 1024 * 1024;
140 uint64
GpuMemoryManager::GetMaximumClientAllocation() const {
141 #if defined(OS_ANDROID) || defined(OS_CHROMEOS)
142 return bytes_available_gpu_memory_
;
144 // This is to avoid allowing a single page on to use a full 256MB of memory
145 // (the current total limit). Long-scroll pages will hit this limit,
146 // resulting in instability on some platforms (e.g, issue 141377).
147 return bytes_available_gpu_memory_
/ 2;
151 uint64
GpuMemoryManager::CalcAvailableFromGpuTotal(uint64 total_gpu_memory
) {
152 #if defined(OS_ANDROID)
153 // We don't need to reduce the total on Android, since
154 // the total is an estimate to begin with.
155 return total_gpu_memory
;
157 // Allow Chrome to use 75% of total GPU memory, or all-but-64MB of GPU
158 // memory, whichever is less.
159 return std::min(3 * total_gpu_memory
/ 4, total_gpu_memory
- 64*1024*1024);
163 void GpuMemoryManager::UpdateAvailableGpuMemory() {
164 // If the amount of video memory to use was specified at the command
165 // line, never change it.
166 if (bytes_available_gpu_memory_overridden_
)
169 // On non-Android, we use an operating system query when possible.
170 // We do not have a reliable concept of multiple GPUs existing in
171 // a system, so just be safe and go with the minimum encountered.
172 uint64 bytes_min
= 0;
174 // Only use the clients that are visible, because otherwise the set of clients
175 // we are querying could become extremely large.
176 for (ClientStateList::const_iterator it
= clients_visible_mru_
.begin();
177 it
!= clients_visible_mru_
.end();
179 const GpuMemoryManagerClientState
* client_state
= *it
;
180 if (!client_state
->has_surface_
)
182 if (!client_state
->visible_
)
186 if (client_state
->client_
->GetTotalGpuMemory(&bytes
)) {
187 if (!bytes_min
|| bytes
< bytes_min
)
195 bytes_available_gpu_memory_
= CalcAvailableFromGpuTotal(bytes_min
);
197 // Never go below the default allocation
198 bytes_available_gpu_memory_
= std::max(bytes_available_gpu_memory_
,
199 GetDefaultAvailableGpuMemory());
201 // Never go above the maximum.
202 bytes_available_gpu_memory_
= std::min(bytes_available_gpu_memory_
,
203 GetMaximumTotalGpuMemory());
206 void GpuMemoryManager::UpdateUnmanagedMemoryLimits() {
207 // Set the limit to be [current_, current_ + step_ / 4), with the endpoints
208 // of the intervals rounded down and up to the nearest step_, to avoid
209 // thrashing the interval.
210 bytes_allocated_unmanaged_high_
= RoundUp(
211 bytes_allocated_unmanaged_current_
+ bytes_unmanaged_limit_step_
/ 4,
212 bytes_unmanaged_limit_step_
);
213 bytes_allocated_unmanaged_low_
= RoundDown(
214 bytes_allocated_unmanaged_current_
,
215 bytes_unmanaged_limit_step_
);
218 void GpuMemoryManager::ScheduleManage(
219 ScheduleManageTime schedule_manage_time
) {
220 if (disable_schedule_manage_
)
222 if (manage_immediate_scheduled_
)
224 if (schedule_manage_time
== kScheduleManageNow
) {
225 base::MessageLoop::current()->PostTask(
226 FROM_HERE
, base::Bind(&GpuMemoryManager::Manage
, AsWeakPtr()));
227 manage_immediate_scheduled_
= true;
228 if (!delayed_manage_callback_
.IsCancelled())
229 delayed_manage_callback_
.Cancel();
231 if (!delayed_manage_callback_
.IsCancelled())
233 delayed_manage_callback_
.Reset(base::Bind(&GpuMemoryManager::Manage
,
235 base::MessageLoop::current()->PostDelayedTask(
237 delayed_manage_callback_
.callback(),
238 base::TimeDelta::FromMilliseconds(kDelayedScheduleManageTimeoutMs
));
242 void GpuMemoryManager::TrackMemoryAllocatedChange(
243 GpuMemoryTrackingGroup
* tracking_group
,
246 gpu::gles2::MemoryTracker::Pool tracking_pool
) {
247 TrackValueChanged(old_size
, new_size
, &tracking_group
->size_
);
248 switch (tracking_pool
) {
249 case gpu::gles2::MemoryTracker::kManaged
:
250 TrackValueChanged(old_size
, new_size
, &bytes_allocated_managed_current_
);
252 case gpu::gles2::MemoryTracker::kUnmanaged
:
253 TrackValueChanged(old_size
,
255 &bytes_allocated_unmanaged_current_
);
261 if (new_size
!= old_size
) {
262 TRACE_COUNTER1("gpu",
267 // If we've gone past our current limit on unmanaged memory, schedule a
268 // re-manage to take int account the unmanaged memory.
269 if (bytes_allocated_unmanaged_current_
>= bytes_allocated_unmanaged_high_
)
270 ScheduleManage(kScheduleManageNow
);
271 if (bytes_allocated_unmanaged_current_
< bytes_allocated_unmanaged_low_
)
272 ScheduleManage(kScheduleManageLater
);
274 if (GetCurrentUsage() > bytes_allocated_historical_max_
) {
275 bytes_allocated_historical_max_
= GetCurrentUsage();
276 // If we're blowing into new memory usage territory, spam the browser
277 // process with the most up-to-date information about our memory usage.
278 SendUmaStatsToBrowser();
282 bool GpuMemoryManager::EnsureGPUMemoryAvailable(uint64
/* size_needed */) {
283 // TODO: Check if there is enough space. Lose contexts until there is.
287 GpuMemoryManagerClientState
* GpuMemoryManager::CreateClientState(
288 GpuMemoryManagerClient
* client
,
291 TrackingGroupMap::iterator tracking_group_it
=
292 tracking_groups_
.find(client
->GetMemoryTracker());
293 DCHECK(tracking_group_it
!= tracking_groups_
.end());
294 GpuMemoryTrackingGroup
* tracking_group
= tracking_group_it
->second
;
296 GpuMemoryManagerClientState
* client_state
= new GpuMemoryManagerClientState(
297 this, client
, tracking_group
, has_surface
, visible
);
298 TrackValueChanged(0, client_state
->managed_memory_stats_
.bytes_allocated
,
299 client_state
->visible_
?
300 &bytes_allocated_managed_visible_
:
301 &bytes_allocated_managed_nonvisible_
);
302 AddClientToList(client_state
);
303 ScheduleManage(kScheduleManageNow
);
307 void GpuMemoryManager::OnDestroyClientState(
308 GpuMemoryManagerClientState
* client_state
) {
309 RemoveClientFromList(client_state
);
310 TrackValueChanged(client_state
->managed_memory_stats_
.bytes_allocated
, 0,
311 client_state
->visible_
?
312 &bytes_allocated_managed_visible_
:
313 &bytes_allocated_managed_nonvisible_
);
314 ScheduleManage(kScheduleManageLater
);
317 void GpuMemoryManager::SetClientStateVisible(
318 GpuMemoryManagerClientState
* client_state
, bool visible
) {
319 DCHECK(client_state
->has_surface_
);
320 if (client_state
->visible_
== visible
)
323 RemoveClientFromList(client_state
);
324 client_state
->visible_
= visible
;
325 AddClientToList(client_state
);
327 TrackValueChanged(client_state
->managed_memory_stats_
.bytes_allocated
, 0,
328 client_state
->visible_
?
329 &bytes_allocated_managed_nonvisible_
:
330 &bytes_allocated_managed_visible_
);
331 TrackValueChanged(0, client_state
->managed_memory_stats_
.bytes_allocated
,
332 client_state
->visible_
?
333 &bytes_allocated_managed_visible_
:
334 &bytes_allocated_managed_nonvisible_
);
335 ScheduleManage(visible
? kScheduleManageNow
: kScheduleManageLater
);
338 void GpuMemoryManager::SetClientStateManagedMemoryStats(
339 GpuMemoryManagerClientState
* client_state
,
340 const GpuManagedMemoryStats
& stats
)
342 TrackValueChanged(client_state
->managed_memory_stats_
.bytes_allocated
,
343 stats
.bytes_allocated
,
344 client_state
->visible_
?
345 &bytes_allocated_managed_visible_
:
346 &bytes_allocated_managed_nonvisible_
);
347 client_state
->managed_memory_stats_
= stats
;
349 // If this is the first time that stats have been received for this
350 // client, use them immediately.
351 if (!client_state
->managed_memory_stats_received_
) {
352 client_state
->managed_memory_stats_received_
= true;
353 ScheduleManage(kScheduleManageNow
);
357 // If these statistics sit outside of the range that we used in our
358 // computation of memory allocations then recompute the allocations.
359 if (client_state
->managed_memory_stats_
.bytes_nice_to_have
>
360 client_state
->bytes_nicetohave_limit_high_
) {
361 ScheduleManage(kScheduleManageNow
);
362 } else if (client_state
->managed_memory_stats_
.bytes_nice_to_have
<
363 client_state
->bytes_nicetohave_limit_low_
) {
364 ScheduleManage(kScheduleManageLater
);
368 GpuMemoryTrackingGroup
* GpuMemoryManager::CreateTrackingGroup(
369 base::ProcessId pid
, gpu::gles2::MemoryTracker
* memory_tracker
) {
370 GpuMemoryTrackingGroup
* tracking_group
= new GpuMemoryTrackingGroup(
371 pid
, memory_tracker
, this);
372 DCHECK(!tracking_groups_
.count(tracking_group
->GetMemoryTracker()));
373 tracking_groups_
.insert(std::make_pair(tracking_group
->GetMemoryTracker(),
375 return tracking_group
;
378 void GpuMemoryManager::OnDestroyTrackingGroup(
379 GpuMemoryTrackingGroup
* tracking_group
) {
380 DCHECK(tracking_groups_
.count(tracking_group
->GetMemoryTracker()));
381 tracking_groups_
.erase(tracking_group
->GetMemoryTracker());
384 void GpuMemoryManager::GetVideoMemoryUsageStats(
385 GPUVideoMemoryUsageStats
* video_memory_usage_stats
) const {
386 // For each context group, assign its memory usage to its PID
387 video_memory_usage_stats
->process_map
.clear();
388 for (TrackingGroupMap::const_iterator i
=
389 tracking_groups_
.begin(); i
!= tracking_groups_
.end(); ++i
) {
390 const GpuMemoryTrackingGroup
* tracking_group
= i
->second
;
391 video_memory_usage_stats
->process_map
[
392 tracking_group
->GetPid()].video_memory
+= tracking_group
->GetSize();
395 // Assign the total across all processes in the GPU process
396 video_memory_usage_stats
->process_map
[
397 base::GetCurrentProcId()].video_memory
= GetCurrentUsage();
398 video_memory_usage_stats
->process_map
[
399 base::GetCurrentProcId()].has_duplicates
= true;
401 video_memory_usage_stats
->bytes_allocated
= GetCurrentUsage();
402 video_memory_usage_stats
->bytes_allocated_historical_max
=
403 bytes_allocated_historical_max_
;
406 void GpuMemoryManager::Manage() {
407 manage_immediate_scheduled_
= false;
408 delayed_manage_callback_
.Cancel();
410 // Update the amount of GPU memory available on the system.
411 UpdateAvailableGpuMemory();
413 // Update the limit on unmanaged memory.
414 UpdateUnmanagedMemoryLimits();
416 // Determine which clients are "hibernated" (which determines the
417 // distribution of frontbuffers and memory among clients that don't have
419 SetClientsHibernatedState();
421 // Assign memory allocations to clients that have surfaces.
422 AssignSurfacesAllocations();
424 // Assign memory allocations to clients that don't have surfaces.
425 AssignNonSurfacesAllocations();
427 SendUmaStatsToBrowser();
431 uint64
GpuMemoryManager::ComputeCap(
432 std::vector
<uint64
> bytes
, uint64 bytes_sum_limit
)
434 size_t bytes_size
= bytes
.size();
435 uint64 bytes_sum
= 0;
438 return std::numeric_limits
<uint64
>::max();
440 // Sort and add up all entries
441 std::sort(bytes
.begin(), bytes
.end());
442 for (size_t i
= 0; i
< bytes_size
; ++i
)
443 bytes_sum
+= bytes
[i
];
445 // As we go through the below loop, let bytes_partial_sum be the
446 // sum of bytes[0] + ... + bytes[bytes_size - i - 1]
447 uint64 bytes_partial_sum
= bytes_sum
;
449 // Try using each entry as a cap, and see where we get cut off.
450 for (size_t i
= 0; i
< bytes_size
; ++i
) {
451 // Try limiting cap to bytes[bytes_size - i - 1]
452 uint64 test_cap
= bytes
[bytes_size
- i
- 1];
453 uint64 bytes_sum_with_test_cap
= i
* test_cap
+ bytes_partial_sum
;
455 // If that fits, raise test_cap to give an even distribution to the
457 if (bytes_sum_with_test_cap
<= bytes_sum_limit
) {
459 return std::numeric_limits
<uint64
>::max();
461 return test_cap
+ (bytes_sum_limit
- bytes_sum_with_test_cap
) / i
;
463 bytes_partial_sum
-= test_cap
;
467 // If we got here, then we can't fully accommodate any of the clients,
468 // so distribute bytes_sum_limit evenly.
469 return bytes_sum_limit
/ bytes_size
;
472 uint64
GpuMemoryManager::ComputeClientAllocationWhenVisible(
473 GpuMemoryManagerClientState
* client_state
,
474 uint64 bytes_above_required_cap
,
475 uint64 bytes_above_minimum_cap
,
476 uint64 bytes_overall_cap
) {
477 GpuManagedMemoryStats
* stats
= &client_state
->managed_memory_stats_
;
479 if (!client_state
->managed_memory_stats_received_
)
480 return GetDefaultClientAllocation();
482 uint64 bytes_required
= 9 * stats
->bytes_required
/ 8;
483 bytes_required
= std::min(bytes_required
, GetMaximumClientAllocation());
484 bytes_required
= std::max(bytes_required
, GetMinimumClientAllocation());
486 uint64 bytes_nicetohave
= 4 * stats
->bytes_nice_to_have
/ 3;
487 bytes_nicetohave
= std::min(bytes_nicetohave
, GetMaximumClientAllocation());
488 bytes_nicetohave
= std::max(bytes_nicetohave
, GetMinimumClientAllocation());
489 bytes_nicetohave
= std::max(bytes_nicetohave
, bytes_required
);
491 uint64 allocation
= GetMinimumClientAllocation();
492 allocation
+= std::min(bytes_required
- GetMinimumClientAllocation(),
493 bytes_above_minimum_cap
);
494 allocation
+= std::min(bytes_nicetohave
- bytes_required
,
495 bytes_above_required_cap
);
496 allocation
= std::min(allocation
,
501 uint64
GpuMemoryManager::ComputeClientAllocationWhenNonvisible(
502 GpuMemoryManagerClientState
* client_state
) {
503 if (!client_state
->managed_memory_stats_received_
)
506 if (!allow_nonvisible_memory_
)
509 return 9 * client_state
->managed_memory_stats_
.bytes_required
/ 8;
512 void GpuMemoryManager::ComputeVisibleSurfacesAllocations() {
513 uint64 bytes_available_total
= GetAvailableGpuMemory();
514 uint64 bytes_above_required_cap
= std::numeric_limits
<uint64
>::max();
515 uint64 bytes_above_minimum_cap
= std::numeric_limits
<uint64
>::max();
516 uint64 bytes_overall_cap_visible
= GetMaximumClientAllocation();
518 // Compute memory usage at three levels
519 // - painting everything that is nicetohave for visible clients
520 // - painting only what that is visible
521 // - giving every client the minimum allocation
522 uint64 bytes_nicetohave_visible
= 0;
523 uint64 bytes_required_visible
= 0;
524 uint64 bytes_minimum_visible
= 0;
525 for (ClientStateList::const_iterator it
= clients_visible_mru_
.begin();
526 it
!= clients_visible_mru_
.end();
528 GpuMemoryManagerClientState
* client_state
= *it
;
529 client_state
->bytes_allocation_ideal_nicetohave_
=
530 ComputeClientAllocationWhenVisible(
532 bytes_above_required_cap
,
533 bytes_above_minimum_cap
,
534 bytes_overall_cap_visible
);
535 client_state
->bytes_allocation_ideal_required_
=
536 ComputeClientAllocationWhenVisible(
539 bytes_above_minimum_cap
,
540 bytes_overall_cap_visible
);
541 client_state
->bytes_allocation_ideal_minimum_
=
542 ComputeClientAllocationWhenVisible(
546 bytes_overall_cap_visible
);
548 bytes_nicetohave_visible
+=
549 client_state
->bytes_allocation_ideal_nicetohave_
;
550 bytes_required_visible
+=
551 client_state
->bytes_allocation_ideal_required_
;
552 bytes_minimum_visible
+=
553 client_state
->bytes_allocation_ideal_minimum_
;
556 // Determine which of those three points we can satisfy, and limit
557 // bytes_above_required_cap and bytes_above_minimum_cap to not go
559 if (bytes_minimum_visible
> bytes_available_total
) {
560 bytes_above_required_cap
= 0;
561 bytes_above_minimum_cap
= 0;
562 } else if (bytes_required_visible
> bytes_available_total
) {
563 std::vector
<uint64
> bytes_to_fit
;
564 for (ClientStateList::const_iterator it
= clients_visible_mru_
.begin();
565 it
!= clients_visible_mru_
.end();
567 GpuMemoryManagerClientState
* client_state
= *it
;
568 bytes_to_fit
.push_back(client_state
->bytes_allocation_ideal_required_
-
569 client_state
->bytes_allocation_ideal_minimum_
);
571 bytes_above_required_cap
= 0;
572 bytes_above_minimum_cap
= ComputeCap(
573 bytes_to_fit
, bytes_available_total
- bytes_minimum_visible
);
574 } else if (bytes_nicetohave_visible
> bytes_available_total
) {
575 std::vector
<uint64
> bytes_to_fit
;
576 for (ClientStateList::const_iterator it
= clients_visible_mru_
.begin();
577 it
!= clients_visible_mru_
.end();
579 GpuMemoryManagerClientState
* client_state
= *it
;
580 bytes_to_fit
.push_back(client_state
->bytes_allocation_ideal_nicetohave_
-
581 client_state
->bytes_allocation_ideal_required_
);
583 bytes_above_required_cap
= ComputeCap(
584 bytes_to_fit
, bytes_available_total
- bytes_required_visible
);
585 bytes_above_minimum_cap
= std::numeric_limits
<uint64
>::max();
588 // Given those computed limits, set the actual memory allocations for the
589 // visible clients, tracking the largest allocation and the total allocation
591 uint64 bytes_allocated_visible
= 0;
592 uint64 bytes_allocated_max_client_allocation
= 0;
593 for (ClientStateList::const_iterator it
= clients_visible_mru_
.begin();
594 it
!= clients_visible_mru_
.end();
596 GpuMemoryManagerClientState
* client_state
= *it
;
597 client_state
->bytes_allocation_when_visible_
=
598 ComputeClientAllocationWhenVisible(
600 bytes_above_required_cap
,
601 bytes_above_minimum_cap
,
602 bytes_overall_cap_visible
);
603 bytes_allocated_visible
+= client_state
->bytes_allocation_when_visible_
;
604 bytes_allocated_max_client_allocation
= std::max(
605 bytes_allocated_max_client_allocation
,
606 client_state
->bytes_allocation_when_visible_
);
609 // Set the limit for nonvisible clients for when they become visible.
610 // Use the same formula, with a lowered overall cap to in case any of the
611 // currently-nonvisible clients are much more resource-intensive than any
612 // of the existing clients.
613 uint64 bytes_overall_cap_nonvisible
= bytes_allocated_max_client_allocation
;
614 if (bytes_available_total
> bytes_allocated_visible
) {
615 bytes_overall_cap_nonvisible
+=
616 bytes_available_total
- bytes_allocated_visible
;
618 bytes_overall_cap_nonvisible
= std::min(bytes_overall_cap_nonvisible
,
619 GetMaximumClientAllocation());
620 for (ClientStateList::const_iterator it
= clients_nonvisible_mru_
.begin();
621 it
!= clients_nonvisible_mru_
.end();
623 GpuMemoryManagerClientState
* client_state
= *it
;
624 client_state
->bytes_allocation_when_visible_
=
625 ComputeClientAllocationWhenVisible(
627 bytes_above_required_cap
,
628 bytes_above_minimum_cap
,
629 bytes_overall_cap_nonvisible
);
633 void GpuMemoryManager::ComputeNonvisibleSurfacesAllocations() {
634 uint64 bytes_allocated_visible
= 0;
635 for (ClientStateList::const_iterator it
= clients_visible_mru_
.begin();
636 it
!= clients_visible_mru_
.end();
638 GpuMemoryManagerClientState
* client_state
= *it
;
639 bytes_allocated_visible
+= client_state
->bytes_allocation_when_visible_
;
642 // Allow up to 1/4 of the memory that was available for visible clients to
643 // go to nonvisible clients.
644 uint64 bytes_available_total
= GetAvailableGpuMemory();
645 uint64 bytes_available_nonvisible
= 0;
646 uint64 bytes_allocated_nonvisible
= 0;
647 if (bytes_available_total
> bytes_allocated_visible
) {
648 bytes_available_nonvisible
= std::min(
649 bytes_available_total
/ 4,
650 bytes_available_total
- bytes_allocated_visible
);
653 // Determine which now-visible clients should keep their contents when
654 // they are made nonvisible.
655 for (ClientStateList::const_iterator it
= clients_visible_mru_
.begin();
656 it
!= clients_visible_mru_
.end();
658 GpuMemoryManagerClientState
* client_state
= *it
;
660 // Compute the amount of space available have for this renderer when it is
661 // nonvisible. Do not count this client's allocation while visible against
662 // the nonvisible clients' allocation total.
663 uint64 bytes_available_nonvisible_adjusted
= std::min(
664 bytes_available_nonvisible
+
665 client_state
->bytes_allocation_when_visible_
/ 4,
666 bytes_available_total
/ 4);
668 // Allow this client to keep its contents if they fit in the allocation.
669 client_state
->bytes_allocation_when_nonvisible_
=
670 ComputeClientAllocationWhenNonvisible(client_state
);
671 if (client_state
->bytes_allocation_when_nonvisible_
>
672 bytes_available_nonvisible_adjusted
)
673 client_state
->bytes_allocation_when_nonvisible_
= 0;
676 // Compute which currently nonvisible clients should keep their contents.
677 for (ClientStateList::const_iterator it
= clients_nonvisible_mru_
.begin();
678 it
!= clients_nonvisible_mru_
.end();
680 GpuMemoryManagerClientState
* client_state
= *it
;
682 // If this client is nonvisible and has already had its contents discarded,
683 // don't re-generate the contents until the client becomes visible again.
684 if (!client_state
->bytes_allocation_when_nonvisible_
)
687 client_state
->bytes_allocation_when_nonvisible_
=
688 ComputeClientAllocationWhenNonvisible(client_state
);
690 // Take into account all more recently used nonvisible clients, and only if
691 // this client still fits, all it to keep its contents.
692 if (bytes_allocated_nonvisible
+
693 client_state
->bytes_allocation_when_nonvisible_
>
694 bytes_available_nonvisible
) {
695 client_state
->bytes_allocation_when_nonvisible_
= 0;
697 bytes_allocated_nonvisible
+=
698 client_state
->bytes_allocation_when_nonvisible_
;
702 void GpuMemoryManager::DistributeRemainingMemoryToVisibleSurfaces() {
703 uint64 bytes_available_total
= GetAvailableGpuMemory();
704 uint64 bytes_allocated_total
= 0;
706 for (ClientStateList::const_iterator it
= clients_visible_mru_
.begin();
707 it
!= clients_visible_mru_
.end();
709 GpuMemoryManagerClientState
* client_state
= *it
;
710 bytes_allocated_total
+= client_state
->bytes_allocation_when_visible_
;
712 for (ClientStateList::const_iterator it
= clients_nonvisible_mru_
.begin();
713 it
!= clients_nonvisible_mru_
.end();
715 GpuMemoryManagerClientState
* client_state
= *it
;
716 bytes_allocated_total
+= client_state
->bytes_allocation_when_nonvisible_
;
719 if (bytes_allocated_total
>= bytes_available_total
)
722 std::vector
<uint64
> bytes_extra_requests
;
723 for (ClientStateList::const_iterator it
= clients_visible_mru_
.begin();
724 it
!= clients_visible_mru_
.end();
726 GpuMemoryManagerClientState
* client_state
= *it
;
727 CHECK(GetMaximumClientAllocation() >=
728 client_state
->bytes_allocation_when_visible_
);
729 uint64 bytes_extra
= GetMaximumClientAllocation() -
730 client_state
->bytes_allocation_when_visible_
;
731 bytes_extra_requests
.push_back(bytes_extra
);
733 uint64 bytes_extra_cap
= ComputeCap(
734 bytes_extra_requests
, bytes_available_total
- bytes_allocated_total
);
735 for (ClientStateList::const_iterator it
= clients_visible_mru_
.begin();
736 it
!= clients_visible_mru_
.end();
738 GpuMemoryManagerClientState
* client_state
= *it
;
739 uint64 bytes_extra
= GetMaximumClientAllocation() -
740 client_state
->bytes_allocation_when_visible_
;
741 client_state
->bytes_allocation_when_visible_
+= std::min(
742 bytes_extra
, bytes_extra_cap
);
746 void GpuMemoryManager::AssignSurfacesAllocations() {
747 // Compute allocation when for all clients.
748 ComputeVisibleSurfacesAllocations();
749 ComputeNonvisibleSurfacesAllocations();
751 // Distribute the remaining memory to visible clients.
752 DistributeRemainingMemoryToVisibleSurfaces();
754 // Send that allocation to the clients.
755 ClientStateList clients
= clients_visible_mru_
;
756 clients
.insert(clients
.end(),
757 clients_nonvisible_mru_
.begin(),
758 clients_nonvisible_mru_
.end());
759 for (ClientStateList::const_iterator it
= clients
.begin();
762 GpuMemoryManagerClientState
* client_state
= *it
;
764 // Re-assign memory limits to this client when its "nice to have" bucket
765 // grows or shrinks by 1/4.
766 client_state
->bytes_nicetohave_limit_high_
=
767 5 * client_state
->managed_memory_stats_
.bytes_nice_to_have
/ 4;
768 client_state
->bytes_nicetohave_limit_low_
=
769 3 * client_state
->managed_memory_stats_
.bytes_nice_to_have
/ 4;
771 // Populate and send the allocation to the client
772 GpuMemoryAllocation allocation
;
774 allocation
.browser_allocation
.suggest_have_frontbuffer
=
775 !client_state
->hibernated_
;
777 allocation
.renderer_allocation
.bytes_limit_when_visible
=
778 client_state
->bytes_allocation_when_visible_
;
779 // Use a more conservative memory allocation policy on Linux and Mac
780 // because the platform is unstable when under memory pressure.
781 // http://crbug.com/145600 (Linux)
782 // http://crbug.com/141377 (Mac)
783 allocation
.renderer_allocation
.priority_cutoff_when_visible
=
784 #if defined(OS_MACOSX) || (defined(OS_LINUX) && !defined(OS_CHROMEOS))
785 GpuMemoryAllocationForRenderer::kPriorityCutoffAllowNiceToHave
;
787 GpuMemoryAllocationForRenderer::kPriorityCutoffAllowEverything
;
790 allocation
.renderer_allocation
.bytes_limit_when_not_visible
=
791 client_state
->bytes_allocation_when_nonvisible_
;
792 allocation
.renderer_allocation
.priority_cutoff_when_not_visible
=
793 GpuMemoryAllocationForRenderer::kPriorityCutoffAllowOnlyRequired
;
795 client_state
->client_
->SetMemoryAllocation(allocation
);
799 void GpuMemoryManager::AssignNonSurfacesAllocations() {
800 for (ClientStateList::const_iterator it
= clients_nonsurface_
.begin();
801 it
!= clients_nonsurface_
.end();
803 GpuMemoryManagerClientState
* client_state
= *it
;
804 GpuMemoryAllocation allocation
;
806 if (!client_state
->hibernated_
) {
807 allocation
.renderer_allocation
.bytes_limit_when_visible
=
808 GetMinimumClientAllocation();
809 allocation
.renderer_allocation
.priority_cutoff_when_visible
=
810 GpuMemoryAllocationForRenderer::kPriorityCutoffAllowEverything
;
813 client_state
->client_
->SetMemoryAllocation(allocation
);
817 void GpuMemoryManager::SetClientsHibernatedState() const {
818 // Re-set all tracking groups as being hibernated.
819 for (TrackingGroupMap::const_iterator it
= tracking_groups_
.begin();
820 it
!= tracking_groups_
.end();
822 GpuMemoryTrackingGroup
* tracking_group
= it
->second
;
823 tracking_group
->hibernated_
= true;
825 // All clients with surfaces that are visible are non-hibernated.
826 uint64 non_hibernated_clients
= 0;
827 for (ClientStateList::const_iterator it
= clients_visible_mru_
.begin();
828 it
!= clients_visible_mru_
.end();
830 GpuMemoryManagerClientState
* client_state
= *it
;
831 client_state
->hibernated_
= false;
832 client_state
->tracking_group_
->hibernated_
= false;
833 non_hibernated_clients
++;
835 // Then an additional few clients with surfaces are non-hibernated too, up to
837 for (ClientStateList::const_iterator it
= clients_nonvisible_mru_
.begin();
838 it
!= clients_nonvisible_mru_
.end();
840 GpuMemoryManagerClientState
* client_state
= *it
;
841 if (non_hibernated_clients
< max_surfaces_with_frontbuffer_soft_limit_
) {
842 client_state
->hibernated_
= false;
843 client_state
->tracking_group_
->hibernated_
= false;
844 non_hibernated_clients
++;
846 client_state
->hibernated_
= true;
849 // Clients that don't have surfaces are non-hibernated if they are
850 // in a GL share group with a non-hibernated surface.
851 for (ClientStateList::const_iterator it
= clients_nonsurface_
.begin();
852 it
!= clients_nonsurface_
.end();
854 GpuMemoryManagerClientState
* client_state
= *it
;
855 client_state
->hibernated_
= client_state
->tracking_group_
->hibernated_
;
859 void GpuMemoryManager::SendUmaStatsToBrowser() {
860 if (!channel_manager_
)
862 GPUMemoryUmaStats params
;
863 params
.bytes_allocated_current
= GetCurrentUsage();
864 params
.bytes_allocated_max
= bytes_allocated_historical_max_
;
865 params
.bytes_limit
= bytes_available_gpu_memory_
;
866 params
.client_count
= clients_visible_mru_
.size() +
867 clients_nonvisible_mru_
.size() +
868 clients_nonsurface_
.size();
869 params
.context_group_count
= tracking_groups_
.size();
870 channel_manager_
->Send(new GpuHostMsg_GpuMemoryUmaStats(params
));
873 GpuMemoryManager::ClientStateList
* GpuMemoryManager::GetClientList(
874 GpuMemoryManagerClientState
* client_state
) {
875 if (client_state
->has_surface_
) {
876 if (client_state
->visible_
)
877 return &clients_visible_mru_
;
879 return &clients_nonvisible_mru_
;
881 return &clients_nonsurface_
;
884 void GpuMemoryManager::AddClientToList(
885 GpuMemoryManagerClientState
* client_state
) {
886 DCHECK(!client_state
->list_iterator_valid_
);
887 ClientStateList
* client_list
= GetClientList(client_state
);
888 client_state
->list_iterator_
= client_list
->insert(
889 client_list
->begin(), client_state
);
890 client_state
->list_iterator_valid_
= true;
893 void GpuMemoryManager::RemoveClientFromList(
894 GpuMemoryManagerClientState
* client_state
) {
895 DCHECK(client_state
->list_iterator_valid_
);
896 ClientStateList
* client_list
= GetClientList(client_state
);
897 client_list
->erase(client_state
->list_iterator_
);
898 client_state
->list_iterator_valid_
= false;
901 } // namespace content