1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "net/disk_cache/memory/mem_backend_impl.h"
7 #include "base/logging.h"
8 #include "base/sys_info.h"
9 #include "net/base/net_errors.h"
10 #include "net/disk_cache/cache_util.h"
11 #include "net/disk_cache/memory/mem_entry_impl.h"
17 const int kDefaultInMemoryCacheSize
= 10 * 1024 * 1024;
18 const int kCleanUpMargin
= 1024 * 1024;
20 int LowWaterAdjust(int high_water
) {
21 if (high_water
< kCleanUpMargin
)
24 return high_water
- kCleanUpMargin
;
29 namespace disk_cache
{
31 MemBackendImpl::MemBackendImpl(net::NetLog
* net_log
)
32 : max_size_(0), current_size_(0), net_log_(net_log
), weak_factory_(this) {
35 MemBackendImpl::~MemBackendImpl() {
36 EntryMap::iterator it
= entries_
.begin();
37 while (it
!= entries_
.end()) {
39 it
= entries_
.begin();
41 DCHECK(!current_size_
);
45 scoped_ptr
<Backend
> MemBackendImpl::CreateBackend(int max_bytes
,
46 net::NetLog
* net_log
) {
47 scoped_ptr
<MemBackendImpl
> cache(new MemBackendImpl(net_log
));
48 cache
->SetMaxSize(max_bytes
);
52 LOG(ERROR
) << "Unable to create cache";
56 bool MemBackendImpl::Init() {
60 int64 total_memory
= base::SysInfo::AmountOfPhysicalMemory();
62 if (total_memory
<= 0) {
63 max_size_
= kDefaultInMemoryCacheSize
;
67 // We want to use up to 2% of the computer's memory, with a limit of 50 MB,
68 // reached on systemd with more than 2.5 GB of RAM.
69 total_memory
= total_memory
* 2 / 100;
70 if (total_memory
> kDefaultInMemoryCacheSize
* 5)
71 max_size_
= kDefaultInMemoryCacheSize
* 5;
73 max_size_
= static_cast<int32
>(total_memory
);
78 bool MemBackendImpl::SetMaxSize(int max_bytes
) {
79 COMPILE_ASSERT(sizeof(max_bytes
) == sizeof(max_size_
), unsupported_int_model
);
83 // Zero size means use the default.
87 max_size_
= max_bytes
;
91 void MemBackendImpl::InternalDoomEntry(MemEntryImpl
* entry
) {
92 // Only parent entries can be passed into this method.
93 DCHECK(entry
->type() == MemEntryImpl::kParentEntry
);
95 rankings_
.Remove(entry
);
96 EntryMap::iterator it
= entries_
.find(entry
->GetKey());
97 if (it
!= entries_
.end())
102 entry
->InternalDoom();
105 void MemBackendImpl::UpdateRank(MemEntryImpl
* node
) {
106 rankings_
.UpdateRank(node
);
109 void MemBackendImpl::ModifyStorageSize(int32 old_size
, int32 new_size
) {
110 if (old_size
>= new_size
)
111 SubstractStorageSize(old_size
- new_size
);
113 AddStorageSize(new_size
- old_size
);
116 int MemBackendImpl::MaxFileSize() const {
117 return max_size_
/ 8;
120 void MemBackendImpl::InsertIntoRankingList(MemEntryImpl
* entry
) {
121 rankings_
.Insert(entry
);
124 void MemBackendImpl::RemoveFromRankingList(MemEntryImpl
* entry
) {
125 rankings_
.Remove(entry
);
128 net::CacheType
MemBackendImpl::GetCacheType() const {
129 return net::MEMORY_CACHE
;
132 int32
MemBackendImpl::GetEntryCount() const {
133 return static_cast<int32
>(entries_
.size());
136 int MemBackendImpl::OpenEntry(const std::string
& key
, Entry
** entry
,
137 const CompletionCallback
& callback
) {
138 if (OpenEntry(key
, entry
))
141 return net::ERR_FAILED
;
144 int MemBackendImpl::CreateEntry(const std::string
& key
, Entry
** entry
,
145 const CompletionCallback
& callback
) {
146 if (CreateEntry(key
, entry
))
149 return net::ERR_FAILED
;
152 int MemBackendImpl::DoomEntry(const std::string
& key
,
153 const CompletionCallback
& callback
) {
157 return net::ERR_FAILED
;
160 int MemBackendImpl::DoomAllEntries(const CompletionCallback
& callback
) {
161 if (DoomAllEntries())
164 return net::ERR_FAILED
;
167 int MemBackendImpl::DoomEntriesBetween(const base::Time initial_time
,
168 const base::Time end_time
,
169 const CompletionCallback
& callback
) {
170 if (DoomEntriesBetween(initial_time
, end_time
))
173 return net::ERR_FAILED
;
176 int MemBackendImpl::DoomEntriesSince(const base::Time initial_time
,
177 const CompletionCallback
& callback
) {
178 if (DoomEntriesSince(initial_time
))
181 return net::ERR_FAILED
;
184 class MemBackendImpl::MemIterator
: public Backend::Iterator
{
186 explicit MemIterator(base::WeakPtr
<MemBackendImpl
> backend
)
187 : backend_(backend
), current_(NULL
) {
190 int OpenNextEntry(Entry
** next_entry
,
191 const CompletionCallback
& callback
) override
{
193 return net::ERR_FAILED
;
195 MemEntryImpl
* node
= backend_
->rankings_
.GetNext(current_
);
196 // We should never return a child entry so iterate until we hit a parent
198 while (node
&& node
->type() != MemEntryImpl::kParentEntry
)
199 node
= backend_
->rankings_
.GetNext(node
);
207 return net::ERR_FAILED
;
211 base::WeakPtr
<MemBackendImpl
> backend_
;
212 MemEntryImpl
* current_
;
215 scoped_ptr
<Backend::Iterator
> MemBackendImpl::CreateIterator() {
216 return scoped_ptr
<Backend::Iterator
>(
217 new MemIterator(weak_factory_
.GetWeakPtr()));
220 void MemBackendImpl::OnExternalCacheHit(const std::string
& key
) {
221 EntryMap::iterator it
= entries_
.find(key
);
222 if (it
!= entries_
.end()) {
223 UpdateRank(it
->second
);
227 bool MemBackendImpl::OpenEntry(const std::string
& key
, Entry
** entry
) {
228 EntryMap::iterator it
= entries_
.find(key
);
229 if (it
== entries_
.end())
238 bool MemBackendImpl::CreateEntry(const std::string
& key
, Entry
** entry
) {
239 EntryMap::iterator it
= entries_
.find(key
);
240 if (it
!= entries_
.end())
243 MemEntryImpl
* cache_entry
= new MemEntryImpl(this);
244 if (!cache_entry
->CreateEntry(key
, net_log_
)) {
249 rankings_
.Insert(cache_entry
);
250 entries_
[key
] = cache_entry
;
252 *entry
= cache_entry
;
256 bool MemBackendImpl::DoomEntry(const std::string
& key
) {
258 if (!OpenEntry(key
, &entry
))
266 bool MemBackendImpl::DoomAllEntries() {
271 bool MemBackendImpl::DoomEntriesBetween(const Time initial_time
,
272 const Time end_time
) {
273 if (end_time
.is_null())
274 return DoomEntriesSince(initial_time
);
276 DCHECK(end_time
>= initial_time
);
278 MemEntryImpl
* node
= rankings_
.GetNext(NULL
);
279 // Last valid entry before |node|.
280 // Note, that entries after |node| may become invalid during |node| doom in
281 // case when they are child entries of it. It is guaranteed that
282 // parent node will go prior to it childs in ranking list (see
283 // InternalReadSparseData and InternalWriteSparseData).
284 MemEntryImpl
* last_valid
= NULL
;
286 // rankings_ is ordered by last used, this will descend through the cache
287 // and start dooming items before the end_time, and will stop once it reaches
288 // an item used before the initial time.
290 if (node
->GetLastUsed() < initial_time
)
293 if (node
->GetLastUsed() < end_time
)
297 node
= rankings_
.GetNext(last_valid
);
303 bool MemBackendImpl::DoomEntriesSince(const Time initial_time
) {
305 // Get the entry in the front.
306 Entry
* entry
= rankings_
.GetNext(NULL
);
308 // Break the loop when there are no more entries or the entry is too old.
309 if (!entry
|| entry
->GetLastUsed() < initial_time
)
315 void MemBackendImpl::TrimCache(bool empty
) {
316 MemEntryImpl
* next
= rankings_
.GetPrev(NULL
);
320 int target_size
= empty
? 0 : LowWaterAdjust(max_size_
);
321 while (current_size_
> target_size
&& next
) {
322 MemEntryImpl
* node
= next
;
323 next
= rankings_
.GetPrev(next
);
324 if (!node
->InUse() || empty
) {
332 void MemBackendImpl::AddStorageSize(int32 bytes
) {
333 current_size_
+= bytes
;
334 DCHECK_GE(current_size_
, 0);
336 if (current_size_
> max_size_
)
340 void MemBackendImpl::SubstractStorageSize(int32 bytes
) {
341 current_size_
-= bytes
;
342 DCHECK_GE(current_size_
, 0);
345 } // namespace disk_cache