Revert "Reland c91b178b07b0d - Delete dead signin code (SigninGlobalError)"
[chromium-blink-merge.git] / net / disk_cache / memory / mem_backend_impl.cc
blob8350963c6e39be56570143a28b24f25251935e9c
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "net/disk_cache/memory/mem_backend_impl.h"
7 #include "base/logging.h"
8 #include "base/sys_info.h"
9 #include "net/base/net_errors.h"
10 #include "net/disk_cache/cache_util.h"
11 #include "net/disk_cache/memory/mem_entry_impl.h"
13 using base::Time;
15 namespace {
17 const int kDefaultInMemoryCacheSize = 10 * 1024 * 1024;
18 const int kCleanUpMargin = 1024 * 1024;
20 int LowWaterAdjust(int high_water) {
21 if (high_water < kCleanUpMargin)
22 return 0;
24 return high_water - kCleanUpMargin;
27 } // namespace
29 namespace disk_cache {
31 MemBackendImpl::MemBackendImpl(net::NetLog* net_log)
32 : max_size_(0), current_size_(0), net_log_(net_log), weak_factory_(this) {
35 MemBackendImpl::~MemBackendImpl() {
36 EntryMap::iterator it = entries_.begin();
37 while (it != entries_.end()) {
38 it->second->Doom();
39 it = entries_.begin();
41 DCHECK(!current_size_);
44 // Static.
45 scoped_ptr<Backend> MemBackendImpl::CreateBackend(int max_bytes,
46 net::NetLog* net_log) {
47 scoped_ptr<MemBackendImpl> cache(new MemBackendImpl(net_log));
48 cache->SetMaxSize(max_bytes);
49 if (cache->Init())
50 return cache.Pass();
52 LOG(ERROR) << "Unable to create cache";
53 return nullptr;
56 bool MemBackendImpl::Init() {
57 if (max_size_)
58 return true;
60 int64 total_memory = base::SysInfo::AmountOfPhysicalMemory();
62 if (total_memory <= 0) {
63 max_size_ = kDefaultInMemoryCacheSize;
64 return true;
67 // We want to use up to 2% of the computer's memory, with a limit of 50 MB,
68 // reached on systemd with more than 2.5 GB of RAM.
69 total_memory = total_memory * 2 / 100;
70 if (total_memory > kDefaultInMemoryCacheSize * 5)
71 max_size_ = kDefaultInMemoryCacheSize * 5;
72 else
73 max_size_ = static_cast<int32>(total_memory);
75 return true;
78 bool MemBackendImpl::SetMaxSize(int max_bytes) {
79 static_assert(sizeof(max_bytes) == sizeof(max_size_),
80 "unsupported int model");
81 if (max_bytes < 0)
82 return false;
84 // Zero size means use the default.
85 if (!max_bytes)
86 return true;
88 max_size_ = max_bytes;
89 return true;
92 void MemBackendImpl::InternalDoomEntry(MemEntryImpl* entry) {
93 // Only parent entries can be passed into this method.
94 DCHECK(entry->type() == MemEntryImpl::kParentEntry);
96 rankings_.Remove(entry);
97 EntryMap::iterator it = entries_.find(entry->GetKey());
98 if (it != entries_.end())
99 entries_.erase(it);
100 else
101 NOTREACHED();
103 entry->InternalDoom();
106 void MemBackendImpl::UpdateRank(MemEntryImpl* node) {
107 rankings_.UpdateRank(node);
110 void MemBackendImpl::ModifyStorageSize(int32 old_size, int32 new_size) {
111 if (old_size >= new_size)
112 SubstractStorageSize(old_size - new_size);
113 else
114 AddStorageSize(new_size - old_size);
117 int MemBackendImpl::MaxFileSize() const {
118 return max_size_ / 8;
121 void MemBackendImpl::InsertIntoRankingList(MemEntryImpl* entry) {
122 rankings_.Insert(entry);
125 void MemBackendImpl::RemoveFromRankingList(MemEntryImpl* entry) {
126 rankings_.Remove(entry);
129 net::CacheType MemBackendImpl::GetCacheType() const {
130 return net::MEMORY_CACHE;
133 int32 MemBackendImpl::GetEntryCount() const {
134 return static_cast<int32>(entries_.size());
137 int MemBackendImpl::OpenEntry(const std::string& key, Entry** entry,
138 const CompletionCallback& callback) {
139 if (OpenEntry(key, entry))
140 return net::OK;
142 return net::ERR_FAILED;
145 int MemBackendImpl::CreateEntry(const std::string& key, Entry** entry,
146 const CompletionCallback& callback) {
147 if (CreateEntry(key, entry))
148 return net::OK;
150 return net::ERR_FAILED;
153 int MemBackendImpl::DoomEntry(const std::string& key,
154 const CompletionCallback& callback) {
155 if (DoomEntry(key))
156 return net::OK;
158 return net::ERR_FAILED;
161 int MemBackendImpl::DoomAllEntries(const CompletionCallback& callback) {
162 if (DoomAllEntries())
163 return net::OK;
165 return net::ERR_FAILED;
168 int MemBackendImpl::DoomEntriesBetween(const base::Time initial_time,
169 const base::Time end_time,
170 const CompletionCallback& callback) {
171 if (DoomEntriesBetween(initial_time, end_time))
172 return net::OK;
174 return net::ERR_FAILED;
177 int MemBackendImpl::DoomEntriesSince(const base::Time initial_time,
178 const CompletionCallback& callback) {
179 if (DoomEntriesSince(initial_time))
180 return net::OK;
182 return net::ERR_FAILED;
185 class MemBackendImpl::MemIterator : public Backend::Iterator {
186 public:
187 explicit MemIterator(base::WeakPtr<MemBackendImpl> backend)
188 : backend_(backend), current_(NULL) {
191 int OpenNextEntry(Entry** next_entry,
192 const CompletionCallback& callback) override {
193 if (!backend_)
194 return net::ERR_FAILED;
196 MemEntryImpl* node = backend_->rankings_.GetNext(current_);
197 // We should never return a child entry so iterate until we hit a parent
198 // entry.
199 while (node && node->type() != MemEntryImpl::kParentEntry)
200 node = backend_->rankings_.GetNext(node);
201 *next_entry = node;
202 current_ = node;
204 if (node) {
205 node->Open();
206 return net::OK;
208 return net::ERR_FAILED;
211 private:
212 base::WeakPtr<MemBackendImpl> backend_;
213 MemEntryImpl* current_;
216 scoped_ptr<Backend::Iterator> MemBackendImpl::CreateIterator() {
217 return scoped_ptr<Backend::Iterator>(
218 new MemIterator(weak_factory_.GetWeakPtr()));
221 void MemBackendImpl::OnExternalCacheHit(const std::string& key) {
222 EntryMap::iterator it = entries_.find(key);
223 if (it != entries_.end()) {
224 UpdateRank(it->second);
228 bool MemBackendImpl::OpenEntry(const std::string& key, Entry** entry) {
229 EntryMap::iterator it = entries_.find(key);
230 if (it == entries_.end())
231 return false;
233 it->second->Open();
235 *entry = it->second;
236 return true;
239 bool MemBackendImpl::CreateEntry(const std::string& key, Entry** entry) {
240 EntryMap::iterator it = entries_.find(key);
241 if (it != entries_.end())
242 return false;
244 MemEntryImpl* cache_entry = new MemEntryImpl(this);
245 if (!cache_entry->CreateEntry(key, net_log_)) {
246 delete entry;
247 return false;
250 rankings_.Insert(cache_entry);
251 entries_[key] = cache_entry;
253 *entry = cache_entry;
254 return true;
257 bool MemBackendImpl::DoomEntry(const std::string& key) {
258 Entry* entry;
259 if (!OpenEntry(key, &entry))
260 return false;
262 entry->Doom();
263 entry->Close();
264 return true;
267 bool MemBackendImpl::DoomAllEntries() {
268 TrimCache(true);
269 return true;
272 bool MemBackendImpl::DoomEntriesBetween(const Time initial_time,
273 const Time end_time) {
274 if (end_time.is_null())
275 return DoomEntriesSince(initial_time);
277 DCHECK(end_time >= initial_time);
279 MemEntryImpl* node = rankings_.GetNext(NULL);
280 // Last valid entry before |node|.
281 // Note, that entries after |node| may become invalid during |node| doom in
282 // case when they are child entries of it. It is guaranteed that
283 // parent node will go prior to it childs in ranking list (see
284 // InternalReadSparseData and InternalWriteSparseData).
285 MemEntryImpl* last_valid = NULL;
287 // rankings_ is ordered by last used, this will descend through the cache
288 // and start dooming items before the end_time, and will stop once it reaches
289 // an item used before the initial time.
290 while (node) {
291 if (node->GetLastUsed() < initial_time)
292 break;
294 if (node->GetLastUsed() < end_time)
295 node->Doom();
296 else
297 last_valid = node;
298 node = rankings_.GetNext(last_valid);
301 return true;
304 bool MemBackendImpl::DoomEntriesSince(const Time initial_time) {
305 for (;;) {
306 // Get the entry in the front.
307 Entry* entry = rankings_.GetNext(NULL);
309 // Break the loop when there are no more entries or the entry is too old.
310 if (!entry || entry->GetLastUsed() < initial_time)
311 return true;
312 entry->Doom();
316 void MemBackendImpl::TrimCache(bool empty) {
317 MemEntryImpl* next = rankings_.GetPrev(NULL);
318 if (!next)
319 return;
321 int target_size = empty ? 0 : LowWaterAdjust(max_size_);
322 while (current_size_ > target_size && next) {
323 MemEntryImpl* node = next;
324 next = rankings_.GetPrev(next);
325 if (!node->InUse() || empty) {
326 node->Doom();
330 return;
333 void MemBackendImpl::AddStorageSize(int32 bytes) {
334 current_size_ += bytes;
335 DCHECK_GE(current_size_, 0);
337 if (current_size_ > max_size_)
338 TrimCache(false);
341 void MemBackendImpl::SubstractStorageSize(int32 bytes) {
342 current_size_ -= bytes;
343 DCHECK_GE(current_size_, 0);
346 } // namespace disk_cache