Roll src/third_party/WebKit 9f7fb92:f103b33 (svn 202621:202622)
[chromium-blink-merge.git] / components / web_cache / browser / web_cache_manager.cc
blob3972ea5c073a5ca7e4c59527783871e8f0d50d35
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "components/web_cache/browser/web_cache_manager.h"
7 #include <algorithm>
9 #include "base/bind.h"
10 #include "base/compiler_specific.h"
11 #include "base/location.h"
12 #include "base/memory/singleton.h"
13 #include "base/metrics/histogram_macros.h"
14 #include "base/prefs/pref_registry_simple.h"
15 #include "base/prefs/pref_service.h"
16 #include "base/single_thread_task_runner.h"
17 #include "base/sys_info.h"
18 #include "base/thread_task_runner_handle.h"
19 #include "base/time/time.h"
20 #include "components/web_cache/common/web_cache_messages.h"
21 #include "content/public/browser/notification_service.h"
22 #include "content/public/browser/notification_types.h"
23 #include "content/public/browser/render_process_host.h"
25 using base::Time;
26 using base::TimeDelta;
27 using blink::WebCache;
29 namespace web_cache {
31 static const int kReviseAllocationDelayMS = 200;
33 // The default size limit of the in-memory cache is 8 MB
34 static const int kDefaultMemoryCacheSize = 8 * 1024 * 1024;
36 namespace {
38 int GetDefaultCacheSize() {
39 // Start off with a modest default
40 int default_cache_size = kDefaultMemoryCacheSize;
42 // Check how much physical memory the OS has
43 int mem_size_mb = base::SysInfo::AmountOfPhysicalMemoryMB();
44 if (mem_size_mb >= 1000) // If we have a GB of memory, set a larger default.
45 default_cache_size *= 4;
46 else if (mem_size_mb >= 512) // With 512 MB, set a slightly larger default.
47 default_cache_size *= 2;
49 UMA_HISTOGRAM_MEMORY_MB("Cache.MaxCacheSizeMB",
50 default_cache_size / 1024 / 1024);
52 return default_cache_size;
55 } // anonymous namespace
57 // static
58 WebCacheManager* WebCacheManager::GetInstance() {
59 return base::Singleton<WebCacheManager>::get();
62 WebCacheManager::WebCacheManager()
63 : global_size_limit_(GetDefaultGlobalSizeLimit()),
64 weak_factory_(this) {
65 registrar_.Add(this, content::NOTIFICATION_RENDERER_PROCESS_CREATED,
66 content::NotificationService::AllBrowserContextsAndSources());
67 registrar_.Add(this, content::NOTIFICATION_RENDERER_PROCESS_TERMINATED,
68 content::NotificationService::AllBrowserContextsAndSources());
71 WebCacheManager::~WebCacheManager() {
74 void WebCacheManager::Add(int renderer_id) {
75 DCHECK(inactive_renderers_.count(renderer_id) == 0);
77 // It is tempting to make the following DCHECK here, but it fails when a new
78 // tab is created as we observe activity from that tab because the
79 // RenderProcessHost is recreated and adds itself.
81 // DCHECK(active_renderers_.count(renderer_id) == 0);
83 // However, there doesn't seem to be much harm in receiving the calls in this
84 // order.
86 active_renderers_.insert(renderer_id);
88 RendererInfo* stats = &(stats_[renderer_id]);
89 memset(stats, 0, sizeof(*stats));
90 stats->access = Time::Now();
92 // Revise our allocation strategy to account for this new renderer.
93 ReviseAllocationStrategyLater();
96 void WebCacheManager::Remove(int renderer_id) {
97 // Erase all knowledge of this renderer
98 active_renderers_.erase(renderer_id);
99 inactive_renderers_.erase(renderer_id);
100 stats_.erase(renderer_id);
102 // Reallocate the resources used by this renderer
103 ReviseAllocationStrategyLater();
106 void WebCacheManager::ObserveActivity(int renderer_id) {
107 StatsMap::iterator item = stats_.find(renderer_id);
108 if (item == stats_.end())
109 return; // We might see stats for a renderer that has been destroyed.
111 // Record activity.
112 active_renderers_.insert(renderer_id);
113 item->second.access = Time::Now();
115 std::set<int>::iterator elmt = inactive_renderers_.find(renderer_id);
116 if (elmt != inactive_renderers_.end()) {
117 inactive_renderers_.erase(elmt);
119 // A renderer that was inactive, just became active. We should make sure
120 // it is given a fair cache allocation, but we defer this for a bit in
121 // order to make this function call cheap.
122 ReviseAllocationStrategyLater();
126 void WebCacheManager::ObserveStats(int renderer_id,
127 const WebCache::UsageStats& stats) {
128 StatsMap::iterator entry = stats_.find(renderer_id);
129 if (entry == stats_.end())
130 return; // We might see stats for a renderer that has been destroyed.
132 // Record the updated stats.
133 entry->second.capacity = stats.capacity;
134 entry->second.deadSize = stats.deadSize;
135 entry->second.liveSize = stats.liveSize;
136 entry->second.maxDeadCapacity = stats.maxDeadCapacity;
137 entry->second.minDeadCapacity = stats.minDeadCapacity;
140 void WebCacheManager::SetGlobalSizeLimit(size_t bytes) {
141 global_size_limit_ = bytes;
142 ReviseAllocationStrategyLater();
145 void WebCacheManager::ClearCache() {
146 // Tell each renderer process to clear the cache.
147 ClearRendererCache(active_renderers_, INSTANTLY);
148 ClearRendererCache(inactive_renderers_, INSTANTLY);
151 void WebCacheManager::ClearCacheOnNavigation() {
152 // Tell each renderer process to clear the cache when a tab is reloaded or
153 // the user navigates to a new website.
154 ClearRendererCache(active_renderers_, ON_NAVIGATION);
155 ClearRendererCache(inactive_renderers_, ON_NAVIGATION);
158 void WebCacheManager::Observe(int type,
159 const content::NotificationSource& source,
160 const content::NotificationDetails& details) {
161 switch (type) {
162 case content::NOTIFICATION_RENDERER_PROCESS_CREATED: {
163 content::RenderProcessHost* process =
164 content::Source<content::RenderProcessHost>(source).ptr();
165 Add(process->GetID());
166 break;
168 case content::NOTIFICATION_RENDERER_PROCESS_TERMINATED: {
169 content::RenderProcessHost* process =
170 content::Source<content::RenderProcessHost>(source).ptr();
171 Remove(process->GetID());
172 break;
174 default:
175 NOTREACHED();
176 break;
180 // static
181 size_t WebCacheManager::GetDefaultGlobalSizeLimit() {
182 return GetDefaultCacheSize();
185 void WebCacheManager::GatherStats(const std::set<int>& renderers,
186 WebCache::UsageStats* stats) {
187 DCHECK(stats);
189 memset(stats, 0, sizeof(WebCache::UsageStats));
191 std::set<int>::const_iterator iter = renderers.begin();
192 while (iter != renderers.end()) {
193 StatsMap::iterator elmt = stats_.find(*iter);
194 if (elmt != stats_.end()) {
195 stats->minDeadCapacity += elmt->second.minDeadCapacity;
196 stats->maxDeadCapacity += elmt->second.maxDeadCapacity;
197 stats->capacity += elmt->second.capacity;
198 stats->liveSize += elmt->second.liveSize;
199 stats->deadSize += elmt->second.deadSize;
201 ++iter;
205 // static
206 size_t WebCacheManager::GetSize(AllocationTactic tactic,
207 const WebCache::UsageStats& stats) {
208 switch (tactic) {
209 case DIVIDE_EVENLY:
210 // We aren't going to reserve any space for existing objects.
211 return 0;
212 case KEEP_CURRENT_WITH_HEADROOM:
213 // We need enough space for our current objects, plus some headroom.
214 return 3 * GetSize(KEEP_CURRENT, stats) / 2;
215 case KEEP_CURRENT:
216 // We need enough space to keep our current objects.
217 return stats.liveSize + stats.deadSize;
218 case KEEP_LIVE_WITH_HEADROOM:
219 // We need enough space to keep out live resources, plus some headroom.
220 return 3 * GetSize(KEEP_LIVE, stats) / 2;
221 case KEEP_LIVE:
222 // We need enough space to keep our live resources.
223 return stats.liveSize;
224 default:
225 NOTREACHED() << "Unknown cache allocation tactic";
226 return 0;
230 bool WebCacheManager::AttemptTactic(
231 AllocationTactic active_tactic,
232 const WebCache::UsageStats& active_stats,
233 AllocationTactic inactive_tactic,
234 const WebCache::UsageStats& inactive_stats,
235 AllocationStrategy* strategy) {
236 DCHECK(strategy);
238 size_t active_size = GetSize(active_tactic, active_stats);
239 size_t inactive_size = GetSize(inactive_tactic, inactive_stats);
241 // Give up if we don't have enough space to use this tactic.
242 if (global_size_limit_ < active_size + inactive_size)
243 return false;
245 // Compute the unreserved space available.
246 size_t total_extra = global_size_limit_ - (active_size + inactive_size);
248 // The plan for the extra space is to divide it evenly amoung the active
249 // renderers.
250 size_t shares = active_renderers_.size();
252 // The inactive renderers get one share of the extra memory to be divided
253 // among themselves.
254 size_t inactive_extra = 0;
255 if (!inactive_renderers_.empty()) {
256 ++shares;
257 inactive_extra = total_extra / shares;
260 // The remaining memory is allocated to the active renderers.
261 size_t active_extra = total_extra - inactive_extra;
263 // Actually compute the allocations for each renderer.
264 AddToStrategy(active_renderers_, active_tactic, active_extra, strategy);
265 AddToStrategy(inactive_renderers_, inactive_tactic, inactive_extra, strategy);
267 // We succeeded in computing an allocation strategy.
268 return true;
271 void WebCacheManager::AddToStrategy(const std::set<int>& renderers,
272 AllocationTactic tactic,
273 size_t extra_bytes_to_allocate,
274 AllocationStrategy* strategy) {
275 DCHECK(strategy);
277 // Nothing to do if there are no renderers. It is common for there to be no
278 // inactive renderers if there is a single active tab.
279 if (renderers.empty())
280 return;
282 // Divide the extra memory evenly among the renderers.
283 size_t extra_each = extra_bytes_to_allocate / renderers.size();
285 std::set<int>::const_iterator iter = renderers.begin();
286 while (iter != renderers.end()) {
287 size_t cache_size = extra_each;
289 // Add in the space required to implement |tactic|.
290 StatsMap::iterator elmt = stats_.find(*iter);
291 if (elmt != stats_.end())
292 cache_size += GetSize(tactic, elmt->second);
294 // Record the allocation in our strategy.
295 strategy->push_back(Allocation(*iter, cache_size));
296 ++iter;
300 void WebCacheManager::EnactStrategy(const AllocationStrategy& strategy) {
301 // Inform each render process of its cache allocation.
302 AllocationStrategy::const_iterator allocation = strategy.begin();
303 while (allocation != strategy.end()) {
304 content::RenderProcessHost* host =
305 content::RenderProcessHost::FromID(allocation->first);
306 if (host) {
307 // This is the capacity this renderer has been allocated.
308 size_t capacity = allocation->second;
310 // We don't reserve any space for dead objects in the cache. Instead, we
311 // prefer to keep live objects around. There is probably some performance
312 // tuning to be done here.
313 size_t min_dead_capacity = 0;
315 // We allow the dead objects to consume up to half of the cache capacity.
316 size_t max_dead_capacity = capacity / 2;
317 if (base::SysInfo::IsLowEndDevice()) {
318 max_dead_capacity = std::min(static_cast<size_t>(512 * 1024),
319 max_dead_capacity);
321 host->Send(new WebCacheMsg_SetCacheCapacities(min_dead_capacity,
322 max_dead_capacity,
323 capacity));
325 ++allocation;
329 void WebCacheManager::ClearCacheForProcess(int render_process_id) {
330 std::set<int> renderers;
331 renderers.insert(render_process_id);
332 ClearRendererCache(renderers, INSTANTLY);
335 void WebCacheManager::ClearRendererCache(
336 const std::set<int>& renderers,
337 WebCacheManager::ClearCacheOccasion occasion) {
338 std::set<int>::const_iterator iter = renderers.begin();
339 for (; iter != renderers.end(); ++iter) {
340 content::RenderProcessHost* host =
341 content::RenderProcessHost::FromID(*iter);
342 if (host)
343 host->Send(new WebCacheMsg_ClearCache(occasion == ON_NAVIGATION));
347 void WebCacheManager::ReviseAllocationStrategy() {
348 DCHECK(stats_.size() <=
349 active_renderers_.size() + inactive_renderers_.size());
351 // Check if renderers have gone inactive.
352 FindInactiveRenderers();
354 // Gather statistics
355 WebCache::UsageStats active;
356 WebCache::UsageStats inactive;
357 GatherStats(active_renderers_, &active);
358 GatherStats(inactive_renderers_, &inactive);
360 UMA_HISTOGRAM_COUNTS_100("Cache.ActiveTabs", active_renderers_.size());
361 UMA_HISTOGRAM_COUNTS_100("Cache.InactiveTabs", inactive_renderers_.size());
362 UMA_HISTOGRAM_MEMORY_MB("Cache.ActiveCapacityMB",
363 active.capacity / 1024 / 1024);
364 UMA_HISTOGRAM_MEMORY_MB("Cache.ActiveDeadSizeMB",
365 active.deadSize / 1024 / 1024);
366 UMA_HISTOGRAM_MEMORY_MB("Cache.ActiveLiveSizeMB",
367 active.liveSize / 1024 / 1024);
368 UMA_HISTOGRAM_MEMORY_MB("Cache.InactiveCapacityMB",
369 inactive.capacity / 1024 / 1024);
370 UMA_HISTOGRAM_MEMORY_MB("Cache.InactiveDeadSizeMB",
371 inactive.deadSize / 1024 / 1024);
372 UMA_HISTOGRAM_MEMORY_MB("Cache.InactiveLiveSizeMB",
373 inactive.liveSize / 1024 / 1024);
375 // Compute an allocation strategy.
377 // We attempt various tactics in order of preference. Our first preference
378 // is not to evict any objects. If we don't have enough resources, we'll
379 // first try to evict dead data only. If that fails, we'll just divide the
380 // resources we have evenly.
382 // We always try to give the active renderers some head room in their
383 // allocations so they can take memory away from an inactive renderer with
384 // a large cache allocation.
386 // Notice the early exit will prevent attempting less desirable tactics once
387 // we've found a workable strategy.
388 AllocationStrategy strategy;
389 if ( // Ideally, we'd like to give the active renderers some headroom and
390 // keep all our current objects.
391 AttemptTactic(KEEP_CURRENT_WITH_HEADROOM, active,
392 KEEP_CURRENT, inactive, &strategy) ||
393 // If we can't have that, then we first try to evict the dead objects in
394 // the caches of inactive renderers.
395 AttemptTactic(KEEP_CURRENT_WITH_HEADROOM, active,
396 KEEP_LIVE, inactive, &strategy) ||
397 // Next, we try to keep the live objects in the active renders (with some
398 // room for new objects) and give whatever is left to the inactive
399 // renderers.
400 AttemptTactic(KEEP_LIVE_WITH_HEADROOM, active,
401 DIVIDE_EVENLY, inactive, &strategy) ||
402 // If we've gotten this far, then we are very tight on memory. Let's try
403 // to at least keep around the live objects for the active renderers.
404 AttemptTactic(KEEP_LIVE, active, DIVIDE_EVENLY, inactive, &strategy) ||
405 // We're basically out of memory. The best we can do is just divide up
406 // what we have and soldier on.
407 AttemptTactic(DIVIDE_EVENLY, active, DIVIDE_EVENLY, inactive,
408 &strategy)) {
409 // Having found a workable strategy, we enact it.
410 EnactStrategy(strategy);
411 } else {
412 // DIVIDE_EVENLY / DIVIDE_EVENLY should always succeed.
413 NOTREACHED() << "Unable to find a cache allocation";
417 void WebCacheManager::ReviseAllocationStrategyLater() {
418 // Ask to be called back in a few milliseconds to actually recompute our
419 // allocation.
420 base::ThreadTaskRunnerHandle::Get()->PostDelayedTask(
421 FROM_HERE, base::Bind(&WebCacheManager::ReviseAllocationStrategy,
422 weak_factory_.GetWeakPtr()),
423 base::TimeDelta::FromMilliseconds(kReviseAllocationDelayMS));
426 void WebCacheManager::FindInactiveRenderers() {
427 std::set<int>::const_iterator iter = active_renderers_.begin();
428 while (iter != active_renderers_.end()) {
429 StatsMap::iterator elmt = stats_.find(*iter);
430 DCHECK(elmt != stats_.end());
431 TimeDelta idle = Time::Now() - elmt->second.access;
432 if (idle >= TimeDelta::FromMinutes(kRendererInactiveThresholdMinutes)) {
433 // Moved to inactive status. This invalidates our iterator.
434 inactive_renderers_.insert(*iter);
435 active_renderers_.erase(*iter);
436 iter = active_renderers_.begin();
437 continue;
439 ++iter;
443 } // namespace web_cache