Fix build break
[chromium-blink-merge.git] / chrome / browser / renderer_host / web_cache_manager.cc
blob9374a5dae4e8613286aaa406371d114b3abb9a0d
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "chrome/browser/renderer_host/web_cache_manager.h"
7 #include <algorithm>
9 #include "base/bind.h"
10 #include "base/compiler_specific.h"
11 #include "base/memory/singleton.h"
12 #include "base/message_loop.h"
13 #include "base/metrics/histogram.h"
14 #include "base/prefs/pref_registry_simple.h"
15 #include "base/prefs/pref_service.h"
16 #include "base/sys_info.h"
17 #include "base/time.h"
18 #include "chrome/browser/browser_process.h"
19 #include "chrome/common/chrome_constants.h"
20 #include "chrome/common/chrome_notification_types.h"
21 #include "chrome/common/pref_names.h"
22 #include "chrome/common/render_messages.h"
23 #include "content/public/browser/notification_service.h"
24 #include "content/public/browser/render_process_host.h"
26 using base::Time;
27 using base::TimeDelta;
28 using WebKit::WebCache;
30 static const int kReviseAllocationDelayMS = 200;
32 // The default size limit of the in-memory cache is 8 MB
33 static const int kDefaultMemoryCacheSize = 8 * 1024 * 1024;
35 namespace {
37 int GetDefaultCacheSize() {
38 // Start off with a modest default
39 int default_cache_size = kDefaultMemoryCacheSize;
41 // Check how much physical memory the OS has
42 int mem_size_mb = base::SysInfo::AmountOfPhysicalMemoryMB();
43 if (mem_size_mb >= 1000) // If we have a GB of memory, set a larger default.
44 default_cache_size *= 4;
45 else if (mem_size_mb >= 512) // With 512 MB, set a slightly larger default.
46 default_cache_size *= 2;
48 UMA_HISTOGRAM_MEMORY_MB("Cache.MaxCacheSizeMB",
49 default_cache_size / 1024 / 1024);
51 return default_cache_size;
54 } // anonymous namespace
56 // static
57 void WebCacheManager::RegisterPrefs(PrefRegistrySimple* registry) {
58 registry->RegisterIntegerPref(prefs::kMemoryCacheSize, GetDefaultCacheSize());
61 // static
62 WebCacheManager* WebCacheManager::GetInstance() {
63 return Singleton<WebCacheManager>::get();
66 WebCacheManager::WebCacheManager()
67 : global_size_limit_(GetDefaultGlobalSizeLimit()),
68 ALLOW_THIS_IN_INITIALIZER_LIST(weak_factory_(this)) {
69 registrar_.Add(this, content::NOTIFICATION_RENDERER_PROCESS_CREATED,
70 content::NotificationService::AllBrowserContextsAndSources());
71 registrar_.Add(this, content::NOTIFICATION_RENDERER_PROCESS_TERMINATED,
72 content::NotificationService::AllBrowserContextsAndSources());
75 WebCacheManager::~WebCacheManager() {
78 void WebCacheManager::Add(int renderer_id) {
79 DCHECK(inactive_renderers_.count(renderer_id) == 0);
81 // It is tempting to make the following DCHECK here, but it fails when a new
82 // tab is created as we observe activity from that tab because the
83 // RenderProcessHost is recreated and adds itself.
85 // DCHECK(active_renderers_.count(renderer_id) == 0);
87 // However, there doesn't seem to be much harm in receiving the calls in this
88 // order.
90 active_renderers_.insert(renderer_id);
92 RendererInfo* stats = &(stats_[renderer_id]);
93 memset(stats, 0, sizeof(*stats));
94 stats->access = Time::Now();
96 // Revise our allocation strategy to account for this new renderer.
97 ReviseAllocationStrategyLater();
100 void WebCacheManager::Remove(int renderer_id) {
101 // Erase all knowledge of this renderer
102 active_renderers_.erase(renderer_id);
103 inactive_renderers_.erase(renderer_id);
104 stats_.erase(renderer_id);
106 // Reallocate the resources used by this renderer
107 ReviseAllocationStrategyLater();
110 void WebCacheManager::ObserveActivity(int renderer_id) {
111 StatsMap::iterator item = stats_.find(renderer_id);
112 if (item == stats_.end())
113 return; // We might see stats for a renderer that has been destroyed.
115 // Record activity.
116 active_renderers_.insert(renderer_id);
117 item->second.access = Time::Now();
119 std::set<int>::iterator elmt = inactive_renderers_.find(renderer_id);
120 if (elmt != inactive_renderers_.end()) {
121 inactive_renderers_.erase(elmt);
123 // A renderer that was inactive, just became active. We should make sure
124 // it is given a fair cache allocation, but we defer this for a bit in
125 // order to make this function call cheap.
126 ReviseAllocationStrategyLater();
130 void WebCacheManager::ObserveStats(int renderer_id,
131 const WebCache::UsageStats& stats) {
132 StatsMap::iterator entry = stats_.find(renderer_id);
133 if (entry == stats_.end())
134 return; // We might see stats for a renderer that has been destroyed.
136 // Record the updated stats.
137 entry->second.capacity = stats.capacity;
138 entry->second.deadSize = stats.deadSize;
139 entry->second.liveSize = stats.liveSize;
140 entry->second.maxDeadCapacity = stats.maxDeadCapacity;
141 entry->second.minDeadCapacity = stats.minDeadCapacity;
144 void WebCacheManager::SetGlobalSizeLimit(size_t bytes) {
145 global_size_limit_ = bytes;
146 ReviseAllocationStrategyLater();
149 void WebCacheManager::ClearCache() {
150 // Tell each renderer process to clear the cache.
151 ClearRendererCache(active_renderers_, INSTANTLY);
152 ClearRendererCache(inactive_renderers_, INSTANTLY);
155 void WebCacheManager::ClearCacheOnNavigation() {
156 // Tell each renderer process to clear the cache when a tab is reloaded or
157 // the user navigates to a new website.
158 ClearRendererCache(active_renderers_, ON_NAVIGATION);
159 ClearRendererCache(inactive_renderers_, ON_NAVIGATION);
162 void WebCacheManager::Observe(int type,
163 const content::NotificationSource& source,
164 const content::NotificationDetails& details) {
165 switch (type) {
166 case content::NOTIFICATION_RENDERER_PROCESS_CREATED: {
167 content::RenderProcessHost* process =
168 content::Source<content::RenderProcessHost>(source).ptr();
169 Add(process->GetID());
170 break;
172 case content::NOTIFICATION_RENDERER_PROCESS_TERMINATED: {
173 content::RenderProcessHost* process =
174 content::Source<content::RenderProcessHost>(source).ptr();
175 Remove(process->GetID());
176 break;
178 default:
179 NOTREACHED();
180 break;
184 // static
185 size_t WebCacheManager::GetDefaultGlobalSizeLimit() {
186 PrefService* perf_service = g_browser_process->local_state();
187 if (perf_service)
188 return perf_service->GetInteger(prefs::kMemoryCacheSize);
190 return GetDefaultCacheSize();
193 void WebCacheManager::GatherStats(const std::set<int>& renderers,
194 WebCache::UsageStats* stats) {
195 DCHECK(stats);
197 memset(stats, 0, sizeof(WebCache::UsageStats));
199 std::set<int>::const_iterator iter = renderers.begin();
200 while (iter != renderers.end()) {
201 StatsMap::iterator elmt = stats_.find(*iter);
202 if (elmt != stats_.end()) {
203 stats->minDeadCapacity += elmt->second.minDeadCapacity;
204 stats->maxDeadCapacity += elmt->second.maxDeadCapacity;
205 stats->capacity += elmt->second.capacity;
206 stats->liveSize += elmt->second.liveSize;
207 stats->deadSize += elmt->second.deadSize;
209 ++iter;
213 // static
214 size_t WebCacheManager::GetSize(AllocationTactic tactic,
215 const WebCache::UsageStats& stats) {
216 switch (tactic) {
217 case DIVIDE_EVENLY:
218 // We aren't going to reserve any space for existing objects.
219 return 0;
220 case KEEP_CURRENT_WITH_HEADROOM:
221 // We need enough space for our current objects, plus some headroom.
222 return 3 * GetSize(KEEP_CURRENT, stats) / 2;
223 case KEEP_CURRENT:
224 // We need enough space to keep our current objects.
225 return stats.liveSize + stats.deadSize;
226 case KEEP_LIVE_WITH_HEADROOM:
227 // We need enough space to keep out live resources, plus some headroom.
228 return 3 * GetSize(KEEP_LIVE, stats) / 2;
229 case KEEP_LIVE:
230 // We need enough space to keep our live resources.
231 return stats.liveSize;
232 default:
233 NOTREACHED() << "Unknown cache allocation tactic";
234 return 0;
238 bool WebCacheManager::AttemptTactic(
239 AllocationTactic active_tactic,
240 const WebCache::UsageStats& active_stats,
241 AllocationTactic inactive_tactic,
242 const WebCache::UsageStats& inactive_stats,
243 AllocationStrategy* strategy) {
244 DCHECK(strategy);
246 size_t active_size = GetSize(active_tactic, active_stats);
247 size_t inactive_size = GetSize(inactive_tactic, inactive_stats);
249 // Give up if we don't have enough space to use this tactic.
250 if (global_size_limit_ < active_size + inactive_size)
251 return false;
253 // Compute the unreserved space available.
254 size_t total_extra = global_size_limit_ - (active_size + inactive_size);
256 // The plan for the extra space is to divide it evenly amoung the active
257 // renderers.
258 size_t shares = active_renderers_.size();
260 // The inactive renderers get one share of the extra memory to be divided
261 // among themselves.
262 size_t inactive_extra = 0;
263 if (!inactive_renderers_.empty()) {
264 ++shares;
265 inactive_extra = total_extra / shares;
268 // The remaining memory is allocated to the active renderers.
269 size_t active_extra = total_extra - inactive_extra;
271 // Actually compute the allocations for each renderer.
272 AddToStrategy(active_renderers_, active_tactic, active_extra, strategy);
273 AddToStrategy(inactive_renderers_, inactive_tactic, inactive_extra, strategy);
275 // We succeeded in computing an allocation strategy.
276 return true;
279 void WebCacheManager::AddToStrategy(const std::set<int>& renderers,
280 AllocationTactic tactic,
281 size_t extra_bytes_to_allocate,
282 AllocationStrategy* strategy) {
283 DCHECK(strategy);
285 // Nothing to do if there are no renderers. It is common for there to be no
286 // inactive renderers if there is a single active tab.
287 if (renderers.empty())
288 return;
290 // Divide the extra memory evenly among the renderers.
291 size_t extra_each = extra_bytes_to_allocate / renderers.size();
293 std::set<int>::const_iterator iter = renderers.begin();
294 while (iter != renderers.end()) {
295 size_t cache_size = extra_each;
297 // Add in the space required to implement |tactic|.
298 StatsMap::iterator elmt = stats_.find(*iter);
299 if (elmt != stats_.end())
300 cache_size += GetSize(tactic, elmt->second);
302 // Record the allocation in our strategy.
303 strategy->push_back(Allocation(*iter, cache_size));
304 ++iter;
308 void WebCacheManager::EnactStrategy(const AllocationStrategy& strategy) {
309 // Inform each render process of its cache allocation.
310 AllocationStrategy::const_iterator allocation = strategy.begin();
311 while (allocation != strategy.end()) {
312 content::RenderProcessHost* host =
313 content::RenderProcessHost::FromID(allocation->first);
314 if (host) {
315 // This is the capacity this renderer has been allocated.
316 size_t capacity = allocation->second;
318 // We don't reserve any space for dead objects in the cache. Instead, we
319 // prefer to keep live objects around. There is probably some performance
320 // tuning to be done here.
321 size_t min_dead_capacity = 0;
323 // We allow the dead objects to consume all of the cache, if the renderer
324 // so desires. If we wanted this memory, we would have set the total
325 // capacity lower.
326 size_t max_dead_capacity = capacity;
328 host->Send(new ChromeViewMsg_SetCacheCapacities(min_dead_capacity,
329 max_dead_capacity,
330 capacity));
332 ++allocation;
336 void WebCacheManager::ClearRendererCache(
337 const std::set<int>& renderers,
338 WebCacheManager::ClearCacheOccasion occasion) {
339 std::set<int>::const_iterator iter = renderers.begin();
340 for (; iter != renderers.end(); ++iter) {
341 content::RenderProcessHost* host =
342 content::RenderProcessHost::FromID(*iter);
343 if (host)
344 host->Send(new ChromeViewMsg_ClearCache(occasion == ON_NAVIGATION));
348 void WebCacheManager::ReviseAllocationStrategy() {
349 DCHECK(stats_.size() <=
350 active_renderers_.size() + inactive_renderers_.size());
352 // Check if renderers have gone inactive.
353 FindInactiveRenderers();
355 // Gather statistics
356 WebCache::UsageStats active;
357 WebCache::UsageStats inactive;
358 GatherStats(active_renderers_, &active);
359 GatherStats(inactive_renderers_, &inactive);
361 UMA_HISTOGRAM_COUNTS_100("Cache.ActiveTabs", active_renderers_.size());
362 UMA_HISTOGRAM_COUNTS_100("Cache.InactiveTabs", inactive_renderers_.size());
363 UMA_HISTOGRAM_MEMORY_MB("Cache.ActiveCapacityMB",
364 active.capacity / 1024 / 1024);
365 UMA_HISTOGRAM_MEMORY_MB("Cache.ActiveDeadSizeMB",
366 active.deadSize / 1024 / 1024);
367 UMA_HISTOGRAM_MEMORY_MB("Cache.ActiveLiveSizeMB",
368 active.liveSize / 1024 / 1024);
369 UMA_HISTOGRAM_MEMORY_MB("Cache.InactiveCapacityMB",
370 inactive.capacity / 1024 / 1024);
371 UMA_HISTOGRAM_MEMORY_MB("Cache.InactiveDeadSizeMB",
372 inactive.deadSize / 1024 / 1024);
373 UMA_HISTOGRAM_MEMORY_MB("Cache.InactiveLiveSizeMB",
374 inactive.liveSize / 1024 / 1024);
376 // Compute an allocation strategy.
378 // We attempt various tactics in order of preference. Our first preference
379 // is not to evict any objects. If we don't have enough resources, we'll
380 // first try to evict dead data only. If that fails, we'll just divide the
381 // resources we have evenly.
383 // We always try to give the active renderers some head room in their
384 // allocations so they can take memory away from an inactive renderer with
385 // a large cache allocation.
387 // Notice the early exit will prevent attempting less desirable tactics once
388 // we've found a workable strategy.
389 AllocationStrategy strategy;
390 if ( // Ideally, we'd like to give the active renderers some headroom and
391 // keep all our current objects.
392 AttemptTactic(KEEP_CURRENT_WITH_HEADROOM, active,
393 KEEP_CURRENT, inactive, &strategy) ||
394 // If we can't have that, then we first try to evict the dead objects in
395 // the caches of inactive renderers.
396 AttemptTactic(KEEP_CURRENT_WITH_HEADROOM, active,
397 KEEP_LIVE, inactive, &strategy) ||
398 // Next, we try to keep the live objects in the active renders (with some
399 // room for new objects) and give whatever is left to the inactive
400 // renderers.
401 AttemptTactic(KEEP_LIVE_WITH_HEADROOM, active,
402 DIVIDE_EVENLY, inactive, &strategy) ||
403 // If we've gotten this far, then we are very tight on memory. Let's try
404 // to at least keep around the live objects for the active renderers.
405 AttemptTactic(KEEP_LIVE, active, DIVIDE_EVENLY, inactive, &strategy) ||
406 // We're basically out of memory. The best we can do is just divide up
407 // what we have and soldier on.
408 AttemptTactic(DIVIDE_EVENLY, active, DIVIDE_EVENLY, inactive,
409 &strategy)) {
410 // Having found a workable strategy, we enact it.
411 EnactStrategy(strategy);
412 } else {
413 // DIVIDE_EVENLY / DIVIDE_EVENLY should always succeed.
414 NOTREACHED() << "Unable to find a cache allocation";
418 void WebCacheManager::ReviseAllocationStrategyLater() {
419 // Ask to be called back in a few milliseconds to actually recompute our
420 // allocation.
421 MessageLoop::current()->PostDelayedTask(FROM_HERE,
422 base::Bind(
423 &WebCacheManager::ReviseAllocationStrategy,
424 weak_factory_.GetWeakPtr()),
425 base::TimeDelta::FromMilliseconds(kReviseAllocationDelayMS));
428 void WebCacheManager::FindInactiveRenderers() {
429 std::set<int>::const_iterator iter = active_renderers_.begin();
430 while (iter != active_renderers_.end()) {
431 StatsMap::iterator elmt = stats_.find(*iter);
432 DCHECK(elmt != stats_.end());
433 TimeDelta idle = Time::Now() - elmt->second.access;
434 if (idle >= TimeDelta::FromMinutes(kRendererInactiveThresholdMinutes)) {
435 // Moved to inactive status. This invalidates our iterator.
436 inactive_renderers_.insert(*iter);
437 active_renderers_.erase(*iter);
438 iter = active_renderers_.begin();
439 continue;
441 ++iter;