NaCl: Update revision in DEPS, r12770 -> r12773
[chromium-blink-merge.git] / chrome / browser / renderer_host / web_cache_manager.cc
blob274bb461a9c8550a5fbee6b8e537915e91ec3545
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "chrome/browser/renderer_host/web_cache_manager.h"
7 #include <algorithm>
9 #include "base/bind.h"
10 #include "base/compiler_specific.h"
11 #include "base/memory/singleton.h"
12 #include "base/message_loop/message_loop.h"
13 #include "base/metrics/histogram.h"
14 #include "base/prefs/pref_registry_simple.h"
15 #include "base/prefs/pref_service.h"
16 #include "base/sys_info.h"
17 #include "base/time/time.h"
18 #include "chrome/browser/browser_process.h"
19 #include "chrome/browser/chrome_notification_types.h"
20 #include "chrome/common/chrome_constants.h"
21 #include "chrome/common/pref_names.h"
22 #include "chrome/common/render_messages.h"
23 #include "content/public/browser/notification_service.h"
24 #include "content/public/browser/render_process_host.h"
26 #if defined(OS_ANDROID)
27 #include "base/android/sys_utils.h"
28 #endif
30 using base::Time;
31 using base::TimeDelta;
32 using blink::WebCache;
34 static const int kReviseAllocationDelayMS = 200;
36 // The default size limit of the in-memory cache is 8 MB
37 static const int kDefaultMemoryCacheSize = 8 * 1024 * 1024;
39 namespace {
41 int GetDefaultCacheSize() {
42 // Start off with a modest default
43 int default_cache_size = kDefaultMemoryCacheSize;
45 // Check how much physical memory the OS has
46 int mem_size_mb = base::SysInfo::AmountOfPhysicalMemoryMB();
47 if (mem_size_mb >= 1000) // If we have a GB of memory, set a larger default.
48 default_cache_size *= 4;
49 else if (mem_size_mb >= 512) // With 512 MB, set a slightly larger default.
50 default_cache_size *= 2;
52 UMA_HISTOGRAM_MEMORY_MB("Cache.MaxCacheSizeMB",
53 default_cache_size / 1024 / 1024);
55 return default_cache_size;
58 } // anonymous namespace
60 // static
61 void WebCacheManager::RegisterPrefs(PrefRegistrySimple* registry) {
62 registry->RegisterIntegerPref(prefs::kMemoryCacheSize, GetDefaultCacheSize());
65 // static
66 WebCacheManager* WebCacheManager::GetInstance() {
67 return Singleton<WebCacheManager>::get();
70 WebCacheManager::WebCacheManager()
71 : global_size_limit_(GetDefaultGlobalSizeLimit()),
72 weak_factory_(this) {
73 registrar_.Add(this, content::NOTIFICATION_RENDERER_PROCESS_CREATED,
74 content::NotificationService::AllBrowserContextsAndSources());
75 registrar_.Add(this, content::NOTIFICATION_RENDERER_PROCESS_TERMINATED,
76 content::NotificationService::AllBrowserContextsAndSources());
79 WebCacheManager::~WebCacheManager() {
82 void WebCacheManager::Add(int renderer_id) {
83 DCHECK(inactive_renderers_.count(renderer_id) == 0);
85 // It is tempting to make the following DCHECK here, but it fails when a new
86 // tab is created as we observe activity from that tab because the
87 // RenderProcessHost is recreated and adds itself.
89 // DCHECK(active_renderers_.count(renderer_id) == 0);
91 // However, there doesn't seem to be much harm in receiving the calls in this
92 // order.
94 active_renderers_.insert(renderer_id);
96 RendererInfo* stats = &(stats_[renderer_id]);
97 memset(stats, 0, sizeof(*stats));
98 stats->access = Time::Now();
100 // Revise our allocation strategy to account for this new renderer.
101 ReviseAllocationStrategyLater();
104 void WebCacheManager::Remove(int renderer_id) {
105 // Erase all knowledge of this renderer
106 active_renderers_.erase(renderer_id);
107 inactive_renderers_.erase(renderer_id);
108 stats_.erase(renderer_id);
110 // Reallocate the resources used by this renderer
111 ReviseAllocationStrategyLater();
114 void WebCacheManager::ObserveActivity(int renderer_id) {
115 StatsMap::iterator item = stats_.find(renderer_id);
116 if (item == stats_.end())
117 return; // We might see stats for a renderer that has been destroyed.
119 // Record activity.
120 active_renderers_.insert(renderer_id);
121 item->second.access = Time::Now();
123 std::set<int>::iterator elmt = inactive_renderers_.find(renderer_id);
124 if (elmt != inactive_renderers_.end()) {
125 inactive_renderers_.erase(elmt);
127 // A renderer that was inactive, just became active. We should make sure
128 // it is given a fair cache allocation, but we defer this for a bit in
129 // order to make this function call cheap.
130 ReviseAllocationStrategyLater();
134 void WebCacheManager::ObserveStats(int renderer_id,
135 const WebCache::UsageStats& stats) {
136 StatsMap::iterator entry = stats_.find(renderer_id);
137 if (entry == stats_.end())
138 return; // We might see stats for a renderer that has been destroyed.
140 // Record the updated stats.
141 entry->second.capacity = stats.capacity;
142 entry->second.deadSize = stats.deadSize;
143 entry->second.liveSize = stats.liveSize;
144 entry->second.maxDeadCapacity = stats.maxDeadCapacity;
145 entry->second.minDeadCapacity = stats.minDeadCapacity;
148 void WebCacheManager::SetGlobalSizeLimit(size_t bytes) {
149 global_size_limit_ = bytes;
150 ReviseAllocationStrategyLater();
153 void WebCacheManager::ClearCache() {
154 // Tell each renderer process to clear the cache.
155 ClearRendererCache(active_renderers_, INSTANTLY);
156 ClearRendererCache(inactive_renderers_, INSTANTLY);
159 void WebCacheManager::ClearCacheOnNavigation() {
160 // Tell each renderer process to clear the cache when a tab is reloaded or
161 // the user navigates to a new website.
162 ClearRendererCache(active_renderers_, ON_NAVIGATION);
163 ClearRendererCache(inactive_renderers_, ON_NAVIGATION);
166 void WebCacheManager::Observe(int type,
167 const content::NotificationSource& source,
168 const content::NotificationDetails& details) {
169 switch (type) {
170 case content::NOTIFICATION_RENDERER_PROCESS_CREATED: {
171 content::RenderProcessHost* process =
172 content::Source<content::RenderProcessHost>(source).ptr();
173 Add(process->GetID());
174 break;
176 case content::NOTIFICATION_RENDERER_PROCESS_TERMINATED: {
177 content::RenderProcessHost* process =
178 content::Source<content::RenderProcessHost>(source).ptr();
179 Remove(process->GetID());
180 break;
182 default:
183 NOTREACHED();
184 break;
188 // static
189 size_t WebCacheManager::GetDefaultGlobalSizeLimit() {
190 PrefService* perf_service = g_browser_process->local_state();
191 if (perf_service)
192 return perf_service->GetInteger(prefs::kMemoryCacheSize);
194 return GetDefaultCacheSize();
197 void WebCacheManager::GatherStats(const std::set<int>& renderers,
198 WebCache::UsageStats* stats) {
199 DCHECK(stats);
201 memset(stats, 0, sizeof(WebCache::UsageStats));
203 std::set<int>::const_iterator iter = renderers.begin();
204 while (iter != renderers.end()) {
205 StatsMap::iterator elmt = stats_.find(*iter);
206 if (elmt != stats_.end()) {
207 stats->minDeadCapacity += elmt->second.minDeadCapacity;
208 stats->maxDeadCapacity += elmt->second.maxDeadCapacity;
209 stats->capacity += elmt->second.capacity;
210 stats->liveSize += elmt->second.liveSize;
211 stats->deadSize += elmt->second.deadSize;
213 ++iter;
217 // static
218 size_t WebCacheManager::GetSize(AllocationTactic tactic,
219 const WebCache::UsageStats& stats) {
220 switch (tactic) {
221 case DIVIDE_EVENLY:
222 // We aren't going to reserve any space for existing objects.
223 return 0;
224 case KEEP_CURRENT_WITH_HEADROOM:
225 // We need enough space for our current objects, plus some headroom.
226 return 3 * GetSize(KEEP_CURRENT, stats) / 2;
227 case KEEP_CURRENT:
228 // We need enough space to keep our current objects.
229 return stats.liveSize + stats.deadSize;
230 case KEEP_LIVE_WITH_HEADROOM:
231 // We need enough space to keep out live resources, plus some headroom.
232 return 3 * GetSize(KEEP_LIVE, stats) / 2;
233 case KEEP_LIVE:
234 // We need enough space to keep our live resources.
235 return stats.liveSize;
236 default:
237 NOTREACHED() << "Unknown cache allocation tactic";
238 return 0;
242 bool WebCacheManager::AttemptTactic(
243 AllocationTactic active_tactic,
244 const WebCache::UsageStats& active_stats,
245 AllocationTactic inactive_tactic,
246 const WebCache::UsageStats& inactive_stats,
247 AllocationStrategy* strategy) {
248 DCHECK(strategy);
250 size_t active_size = GetSize(active_tactic, active_stats);
251 size_t inactive_size = GetSize(inactive_tactic, inactive_stats);
253 // Give up if we don't have enough space to use this tactic.
254 if (global_size_limit_ < active_size + inactive_size)
255 return false;
257 // Compute the unreserved space available.
258 size_t total_extra = global_size_limit_ - (active_size + inactive_size);
260 // The plan for the extra space is to divide it evenly amoung the active
261 // renderers.
262 size_t shares = active_renderers_.size();
264 // The inactive renderers get one share of the extra memory to be divided
265 // among themselves.
266 size_t inactive_extra = 0;
267 if (!inactive_renderers_.empty()) {
268 ++shares;
269 inactive_extra = total_extra / shares;
272 // The remaining memory is allocated to the active renderers.
273 size_t active_extra = total_extra - inactive_extra;
275 // Actually compute the allocations for each renderer.
276 AddToStrategy(active_renderers_, active_tactic, active_extra, strategy);
277 AddToStrategy(inactive_renderers_, inactive_tactic, inactive_extra, strategy);
279 // We succeeded in computing an allocation strategy.
280 return true;
283 void WebCacheManager::AddToStrategy(const std::set<int>& renderers,
284 AllocationTactic tactic,
285 size_t extra_bytes_to_allocate,
286 AllocationStrategy* strategy) {
287 DCHECK(strategy);
289 // Nothing to do if there are no renderers. It is common for there to be no
290 // inactive renderers if there is a single active tab.
291 if (renderers.empty())
292 return;
294 // Divide the extra memory evenly among the renderers.
295 size_t extra_each = extra_bytes_to_allocate / renderers.size();
297 std::set<int>::const_iterator iter = renderers.begin();
298 while (iter != renderers.end()) {
299 size_t cache_size = extra_each;
301 // Add in the space required to implement |tactic|.
302 StatsMap::iterator elmt = stats_.find(*iter);
303 if (elmt != stats_.end())
304 cache_size += GetSize(tactic, elmt->second);
306 // Record the allocation in our strategy.
307 strategy->push_back(Allocation(*iter, cache_size));
308 ++iter;
312 void WebCacheManager::EnactStrategy(const AllocationStrategy& strategy) {
313 // Inform each render process of its cache allocation.
314 AllocationStrategy::const_iterator allocation = strategy.begin();
315 while (allocation != strategy.end()) {
316 content::RenderProcessHost* host =
317 content::RenderProcessHost::FromID(allocation->first);
318 if (host) {
319 // This is the capacity this renderer has been allocated.
320 size_t capacity = allocation->second;
322 // We don't reserve any space for dead objects in the cache. Instead, we
323 // prefer to keep live objects around. There is probably some performance
324 // tuning to be done here.
325 size_t min_dead_capacity = 0;
327 // We allow the dead objects to consume up to half of the cache capacity.
328 size_t max_dead_capacity = capacity / 2;
329 #if defined(OS_ANDROID)
330 if (base::android::SysUtils::IsLowEndDevice())
331 max_dead_capacity = std::min(512 * 1024U, max_dead_capacity);
332 #endif
334 host->Send(new ChromeViewMsg_SetCacheCapacities(min_dead_capacity,
335 max_dead_capacity,
336 capacity));
338 ++allocation;
342 void WebCacheManager::ClearRendererCache(
343 const std::set<int>& renderers,
344 WebCacheManager::ClearCacheOccasion occasion) {
345 std::set<int>::const_iterator iter = renderers.begin();
346 for (; iter != renderers.end(); ++iter) {
347 content::RenderProcessHost* host =
348 content::RenderProcessHost::FromID(*iter);
349 if (host)
350 host->Send(new ChromeViewMsg_ClearCache(occasion == ON_NAVIGATION));
354 void WebCacheManager::ReviseAllocationStrategy() {
355 DCHECK(stats_.size() <=
356 active_renderers_.size() + inactive_renderers_.size());
358 // Check if renderers have gone inactive.
359 FindInactiveRenderers();
361 // Gather statistics
362 WebCache::UsageStats active;
363 WebCache::UsageStats inactive;
364 GatherStats(active_renderers_, &active);
365 GatherStats(inactive_renderers_, &inactive);
367 UMA_HISTOGRAM_COUNTS_100("Cache.ActiveTabs", active_renderers_.size());
368 UMA_HISTOGRAM_COUNTS_100("Cache.InactiveTabs", inactive_renderers_.size());
369 UMA_HISTOGRAM_MEMORY_MB("Cache.ActiveCapacityMB",
370 active.capacity / 1024 / 1024);
371 UMA_HISTOGRAM_MEMORY_MB("Cache.ActiveDeadSizeMB",
372 active.deadSize / 1024 / 1024);
373 UMA_HISTOGRAM_MEMORY_MB("Cache.ActiveLiveSizeMB",
374 active.liveSize / 1024 / 1024);
375 UMA_HISTOGRAM_MEMORY_MB("Cache.InactiveCapacityMB",
376 inactive.capacity / 1024 / 1024);
377 UMA_HISTOGRAM_MEMORY_MB("Cache.InactiveDeadSizeMB",
378 inactive.deadSize / 1024 / 1024);
379 UMA_HISTOGRAM_MEMORY_MB("Cache.InactiveLiveSizeMB",
380 inactive.liveSize / 1024 / 1024);
382 // Compute an allocation strategy.
384 // We attempt various tactics in order of preference. Our first preference
385 // is not to evict any objects. If we don't have enough resources, we'll
386 // first try to evict dead data only. If that fails, we'll just divide the
387 // resources we have evenly.
389 // We always try to give the active renderers some head room in their
390 // allocations so they can take memory away from an inactive renderer with
391 // a large cache allocation.
393 // Notice the early exit will prevent attempting less desirable tactics once
394 // we've found a workable strategy.
395 AllocationStrategy strategy;
396 if ( // Ideally, we'd like to give the active renderers some headroom and
397 // keep all our current objects.
398 AttemptTactic(KEEP_CURRENT_WITH_HEADROOM, active,
399 KEEP_CURRENT, inactive, &strategy) ||
400 // If we can't have that, then we first try to evict the dead objects in
401 // the caches of inactive renderers.
402 AttemptTactic(KEEP_CURRENT_WITH_HEADROOM, active,
403 KEEP_LIVE, inactive, &strategy) ||
404 // Next, we try to keep the live objects in the active renders (with some
405 // room for new objects) and give whatever is left to the inactive
406 // renderers.
407 AttemptTactic(KEEP_LIVE_WITH_HEADROOM, active,
408 DIVIDE_EVENLY, inactive, &strategy) ||
409 // If we've gotten this far, then we are very tight on memory. Let's try
410 // to at least keep around the live objects for the active renderers.
411 AttemptTactic(KEEP_LIVE, active, DIVIDE_EVENLY, inactive, &strategy) ||
412 // We're basically out of memory. The best we can do is just divide up
413 // what we have and soldier on.
414 AttemptTactic(DIVIDE_EVENLY, active, DIVIDE_EVENLY, inactive,
415 &strategy)) {
416 // Having found a workable strategy, we enact it.
417 EnactStrategy(strategy);
418 } else {
419 // DIVIDE_EVENLY / DIVIDE_EVENLY should always succeed.
420 NOTREACHED() << "Unable to find a cache allocation";
424 void WebCacheManager::ReviseAllocationStrategyLater() {
425 // Ask to be called back in a few milliseconds to actually recompute our
426 // allocation.
427 base::MessageLoop::current()->PostDelayedTask(FROM_HERE,
428 base::Bind(
429 &WebCacheManager::ReviseAllocationStrategy,
430 weak_factory_.GetWeakPtr()),
431 base::TimeDelta::FromMilliseconds(kReviseAllocationDelayMS));
434 void WebCacheManager::FindInactiveRenderers() {
435 std::set<int>::const_iterator iter = active_renderers_.begin();
436 while (iter != active_renderers_.end()) {
437 StatsMap::iterator elmt = stats_.find(*iter);
438 DCHECK(elmt != stats_.end());
439 TimeDelta idle = Time::Now() - elmt->second.access;
440 if (idle >= TimeDelta::FromMinutes(kRendererInactiveThresholdMinutes)) {
441 // Moved to inactive status. This invalidates our iterator.
442 inactive_renderers_.insert(*iter);
443 active_renderers_.erase(*iter);
444 iter = active_renderers_.begin();
445 continue;
447 ++iter;