Respond with QuotaExceededError when IndexedDB has no disk space on open.
[chromium-blink-merge.git] / content / common / gpu / gpu_memory_manager.h
blob9b2de82eb886d420ea7c3a81793e5ca15aa90d31
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #ifndef CONTENT_COMMON_GPU_GPU_MEMORY_MANAGER_H_
6 #define CONTENT_COMMON_GPU_GPU_MEMORY_MANAGER_H_
8 #include <list>
9 #include <map>
11 #include "base/basictypes.h"
12 #include "base/cancelable_callback.h"
13 #include "base/containers/hash_tables.h"
14 #include "base/gtest_prod_util.h"
15 #include "base/memory/weak_ptr.h"
16 #include "content/common/content_export.h"
17 #include "content/common/gpu/gpu_memory_allocation.h"
18 #include "content/public/common/gpu_memory_stats.h"
19 #include "gpu/command_buffer/service/memory_tracking.h"
21 namespace content {
23 class GpuChannelManager;
24 class GpuMemoryManagerClient;
25 class GpuMemoryManagerClientState;
26 class GpuMemoryTrackingGroup;
28 class CONTENT_EXPORT GpuMemoryManager :
29 public base::SupportsWeakPtr<GpuMemoryManager> {
30 public:
31 #if defined(OS_ANDROID) || (defined(OS_LINUX) && !defined(OS_CHROMEOS))
32 enum { kDefaultMaxSurfacesWithFrontbufferSoftLimit = 1 };
33 #else
34 enum { kDefaultMaxSurfacesWithFrontbufferSoftLimit = 8 };
35 #endif
36 enum ScheduleManageTime {
37 // Add a call to Manage to the thread's message loop immediately.
38 kScheduleManageNow,
39 // Add a Manage call to the thread's message loop for execution 1/60th of
40 // of a second from now.
41 kScheduleManageLater,
44 GpuMemoryManager(GpuChannelManager* channel_manager,
45 uint64 max_surfaces_with_frontbuffer_soft_limit);
46 ~GpuMemoryManager();
48 // Schedule a Manage() call. If immediate is true, we PostTask without delay.
49 // Otherwise PostDelayedTask using a CancelableClosure and allow multiple
50 // delayed calls to "queue" up. This way, we do not spam clients in certain
51 // lower priority situations. An immediate schedule manage will cancel any
52 // queued delayed manage.
53 void ScheduleManage(ScheduleManageTime schedule_manage_time);
55 // Retrieve GPU Resource consumption statistics for the task manager
56 void GetVideoMemoryUsageStats(
57 content::GPUVideoMemoryUsageStats* video_memory_usage_stats) const;
59 GpuMemoryManagerClientState* CreateClientState(
60 GpuMemoryManagerClient* client, bool has_surface, bool visible);
62 GpuMemoryTrackingGroup* CreateTrackingGroup(
63 base::ProcessId pid, gpu::gles2::MemoryTracker* memory_tracker);
65 private:
66 friend class GpuMemoryManagerTest;
67 friend class GpuMemoryTrackingGroup;
68 friend class GpuMemoryManagerClientState;
70 FRIEND_TEST_ALL_PREFIXES(GpuMemoryManagerTest,
71 TestManageBasicFunctionality);
72 FRIEND_TEST_ALL_PREFIXES(GpuMemoryManagerTest,
73 TestManageChangingVisibility);
74 FRIEND_TEST_ALL_PREFIXES(GpuMemoryManagerTest,
75 TestManageManyVisibleStubs);
76 FRIEND_TEST_ALL_PREFIXES(GpuMemoryManagerTest,
77 TestManageManyNotVisibleStubs);
78 FRIEND_TEST_ALL_PREFIXES(GpuMemoryManagerTest,
79 TestManageChangingLastUsedTime);
80 FRIEND_TEST_ALL_PREFIXES(GpuMemoryManagerTest,
81 TestManageChangingImportanceShareGroup);
82 FRIEND_TEST_ALL_PREFIXES(GpuMemoryManagerTest,
83 TestForegroundStubsGetBonusAllocation);
84 FRIEND_TEST_ALL_PREFIXES(GpuMemoryManagerTest,
85 TestUpdateAvailableGpuMemory);
86 FRIEND_TEST_ALL_PREFIXES(GpuMemoryManagerTest,
87 GpuMemoryAllocationCompareTests);
88 FRIEND_TEST_ALL_PREFIXES(GpuMemoryManagerTest,
89 StubMemoryStatsForLastManageTests);
90 FRIEND_TEST_ALL_PREFIXES(GpuMemoryManagerTest,
91 TestManagedUsageTracking);
92 FRIEND_TEST_ALL_PREFIXES(GpuMemoryManagerTest,
93 BackgroundMru);
94 FRIEND_TEST_ALL_PREFIXES(GpuMemoryManagerTest,
95 AllowNonvisibleMemory);
96 FRIEND_TEST_ALL_PREFIXES(GpuMemoryManagerTest,
97 BackgroundDiscardPersistent);
98 FRIEND_TEST_ALL_PREFIXES(GpuMemoryManagerTest,
99 UnmanagedTracking);
100 FRIEND_TEST_ALL_PREFIXES(GpuMemoryManagerTest,
101 DefaultAllocation);
103 typedef std::map<gpu::gles2::MemoryTracker*, GpuMemoryTrackingGroup*>
104 TrackingGroupMap;
106 typedef std::list<GpuMemoryManagerClientState*> ClientStateList;
108 void Manage();
109 void SetClientsHibernatedState() const;
110 void AssignSurfacesAllocations();
111 void AssignNonSurfacesAllocations();
113 // Math helper function to compute the maximum value of cap such that
114 // sum_i min(bytes[i], cap) <= bytes_sum_limit
115 static uint64 ComputeCap(std::vector<uint64> bytes, uint64 bytes_sum_limit);
117 // Compute the allocation for clients when visible and not visible.
118 void ComputeVisibleSurfacesAllocations();
119 void ComputeNonvisibleSurfacesAllocations();
120 void DistributeRemainingMemoryToVisibleSurfaces();
122 // Compute the budget for a client. Allow at most bytes_above_required_cap
123 // bytes above client_state's required level. Allow at most
124 // bytes_above_minimum_cap bytes above client_state's minimum level. Allow
125 // at most bytes_overall_cap bytes total.
126 uint64 ComputeClientAllocationWhenVisible(
127 GpuMemoryManagerClientState* client_state,
128 uint64 bytes_above_required_cap,
129 uint64 bytes_above_minimum_cap,
130 uint64 bytes_overall_cap);
131 uint64 ComputeClientAllocationWhenNonvisible(
132 GpuMemoryManagerClientState* client_state);
134 // Update the amount of GPU memory we think we have in the system, based
135 // on what the stubs' contexts report.
136 void UpdateAvailableGpuMemory();
137 void UpdateUnmanagedMemoryLimits();
139 // The amount of video memory which is available for allocation.
140 uint64 GetAvailableGpuMemory() const;
142 // Minimum value of available GPU memory, no matter how little the GPU
143 // reports. This is the default value.
144 uint64 GetDefaultAvailableGpuMemory() const;
146 // Maximum cap on total GPU memory, no matter how much the GPU reports.
147 uint64 GetMaximumTotalGpuMemory() const;
149 // The maximum and minimum amount of memory that a client may be assigned.
150 uint64 GetMaximumClientAllocation() const;
151 uint64 GetMinimumClientAllocation() const {
152 return bytes_minimum_per_client_;
154 // The default amount of memory that a client is assigned, if it has not
155 // reported any memory usage stats yet.
156 uint64 GetDefaultClientAllocation() const {
157 return bytes_default_per_client_;
160 static uint64 CalcAvailableFromGpuTotal(uint64 total_gpu_memory);
162 // Send memory usage stats to the browser process.
163 void SendUmaStatsToBrowser();
165 // Get the current number of bytes allocated.
166 uint64 GetCurrentUsage() const {
167 return bytes_allocated_managed_current_ +
168 bytes_allocated_unmanaged_current_;
171 // GpuMemoryTrackingGroup interface
172 void TrackMemoryAllocatedChange(
173 GpuMemoryTrackingGroup* tracking_group,
174 uint64 old_size,
175 uint64 new_size,
176 gpu::gles2::MemoryTracker::Pool tracking_pool);
177 void OnDestroyTrackingGroup(GpuMemoryTrackingGroup* tracking_group);
178 bool EnsureGPUMemoryAvailable(uint64 size_needed);
180 // GpuMemoryManagerClientState interface
181 void SetClientStateVisible(
182 GpuMemoryManagerClientState* client_state, bool visible);
183 void SetClientStateManagedMemoryStats(
184 GpuMemoryManagerClientState* client_state,
185 const GpuManagedMemoryStats& stats);
186 void OnDestroyClientState(GpuMemoryManagerClientState* client);
188 // Add or remove a client from its clients list (visible, nonvisible, or
189 // nonsurface). When adding the client, add it to the front of the list.
190 void AddClientToList(GpuMemoryManagerClientState* client_state);
191 void RemoveClientFromList(GpuMemoryManagerClientState* client_state);
192 ClientStateList* GetClientList(GpuMemoryManagerClientState* client_state);
194 // Interfaces for testing
195 void TestingDisableScheduleManage() { disable_schedule_manage_ = true; }
196 void TestingSetAvailableGpuMemory(uint64 bytes) {
197 bytes_available_gpu_memory_ = bytes;
198 bytes_available_gpu_memory_overridden_ = true;
201 void TestingSetMinimumClientAllocation(uint64 bytes) {
202 bytes_minimum_per_client_ = bytes;
205 void TestingSetDefaultClientAllocation(uint64 bytes) {
206 bytes_default_per_client_ = bytes;
209 void TestingSetUnmanagedLimitStep(uint64 bytes) {
210 bytes_unmanaged_limit_step_ = bytes;
213 GpuChannelManager* channel_manager_;
215 // A list of all visible and nonvisible clients, in most-recently-used
216 // order (most recently used is first).
217 ClientStateList clients_visible_mru_;
218 ClientStateList clients_nonvisible_mru_;
220 // A list of all clients that don't have a surface.
221 ClientStateList clients_nonsurface_;
223 // All context groups' tracking structures
224 TrackingGroupMap tracking_groups_;
226 base::CancelableClosure delayed_manage_callback_;
227 bool manage_immediate_scheduled_;
229 uint64 max_surfaces_with_frontbuffer_soft_limit_;
231 // The maximum amount of memory that may be allocated for GPU resources
232 uint64 bytes_available_gpu_memory_;
233 bool bytes_available_gpu_memory_overridden_;
235 // Whether or not clients can be allocated memory when nonvisible.
236 bool allow_nonvisible_memory_;
238 // The minimum and default allocations for a single client.
239 uint64 bytes_minimum_per_client_;
240 uint64 bytes_default_per_client_;
242 // The current total memory usage, and historical maximum memory usage
243 uint64 bytes_allocated_managed_current_;
244 uint64 bytes_allocated_managed_visible_;
245 uint64 bytes_allocated_managed_nonvisible_;
246 uint64 bytes_allocated_unmanaged_current_;
247 uint64 bytes_allocated_historical_max_;
249 // If bytes_allocated_unmanaged_current_ leaves the interval [low_, high_),
250 // then ScheduleManage to take the change into account.
251 uint64 bytes_allocated_unmanaged_high_;
252 uint64 bytes_allocated_unmanaged_low_;
254 // Update bytes_allocated_unmanaged_low/high_ in intervals of step_.
255 uint64 bytes_unmanaged_limit_step_;
257 // Used to disable automatic changes to Manage() in testing.
258 bool disable_schedule_manage_;
260 DISALLOW_COPY_AND_ASSIGN(GpuMemoryManager);
263 } // namespace content
265 #endif // CONTENT_COMMON_GPU_GPU_MEMORY_MANAGER_H_