1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "net/disk_cache/blockfile/rankings.h"
7 #include "net/disk_cache/blockfile/backend_impl.h"
8 #include "net/disk_cache/blockfile/disk_format.h"
9 #include "net/disk_cache/blockfile/entry_impl.h"
10 #include "net/disk_cache/blockfile/errors.h"
11 #include "net/disk_cache/blockfile/histogram_macros.h"
12 #include "net/disk_cache/blockfile/stress_support.h"
14 // Provide a BackendImpl object to macros from histogram_macros.h.
15 #define CACHE_UMA_BACKEND_IMPL_OBJ backend_
18 using base::TimeTicks
;
20 namespace disk_cache
{
21 // This is used by crash_cache.exe to generate unit test files.
22 NET_EXPORT_PRIVATE RankCrashes g_rankings_crash
= NO_CRASH
;
32 // This class provides a simple lock for the LRU list of rankings. Whenever an
33 // entry is to be inserted or removed from the list, a transaction object should
34 // be created to keep track of the operation. If the process crashes before
35 // finishing the operation, the transaction record (stored as part of the user
36 // data on the file header) can be used to finish the operation.
39 // addr is the cache addres of the node being inserted or removed. We want to
40 // avoid having the compiler doing optimizations on when to read or write
41 // from user_data because it is the basis of the crash detection. Maybe
42 // volatile is not enough for that, but it should be a good hint.
43 Transaction(volatile disk_cache::LruData
* data
, disk_cache::Addr addr
,
44 Operation op
, int list
);
47 volatile disk_cache::LruData
* data_
;
48 DISALLOW_COPY_AND_ASSIGN(Transaction
);
51 Transaction::Transaction(volatile disk_cache::LruData
* data
,
52 disk_cache::Addr addr
, Operation op
, int list
)
54 DCHECK(!data_
->transaction
);
55 DCHECK(addr
.is_initialized());
56 data_
->operation
= op
;
57 data_
->operation_list
= list
;
58 data_
->transaction
= addr
.value();
61 Transaction::~Transaction() {
62 DCHECK(data_
->transaction
);
63 data_
->transaction
= 0;
65 data_
->operation_list
= 0;
68 // Code locations that can generate crashes.
70 ON_INSERT_1
, ON_INSERT_2
, ON_INSERT_3
, ON_INSERT_4
, ON_REMOVE_1
, ON_REMOVE_2
,
71 ON_REMOVE_3
, ON_REMOVE_4
, ON_REMOVE_5
, ON_REMOVE_6
, ON_REMOVE_7
, ON_REMOVE_8
75 void TerminateSelf() {
77 // Windows does more work on _exit() than we would like, so we force exit.
78 TerminateProcess(GetCurrentProcess(), 0);
79 #elif defined(OS_POSIX)
80 // On POSIX, _exit() will terminate the process with minimal cleanup,
81 // and it is cleaner than killing.
87 // Generates a crash on debug builds, acording to the value of g_rankings_crash.
88 // This used by crash_cache.exe to generate unit-test files.
89 void GenerateCrash(CrashLocation location
) {
91 if (disk_cache::NO_CRASH
== disk_cache::g_rankings_crash
)
95 switch (disk_cache::g_rankings_crash
) {
96 case disk_cache::INSERT_ONE_1
:
97 case disk_cache::INSERT_LOAD_1
:
104 if (disk_cache::INSERT_EMPTY_1
== disk_cache::g_rankings_crash
)
108 switch (disk_cache::g_rankings_crash
) {
109 case disk_cache::INSERT_EMPTY_2
:
110 case disk_cache::INSERT_ONE_2
:
111 case disk_cache::INSERT_LOAD_2
:
118 switch (disk_cache::g_rankings_crash
) {
119 case disk_cache::INSERT_EMPTY_3
:
120 case disk_cache::INSERT_ONE_3
:
127 switch (disk_cache::g_rankings_crash
) {
128 case disk_cache::REMOVE_ONE_1
:
129 case disk_cache::REMOVE_HEAD_1
:
130 case disk_cache::REMOVE_TAIL_1
:
131 case disk_cache::REMOVE_LOAD_1
:
138 if (disk_cache::REMOVE_ONE_2
== disk_cache::g_rankings_crash
)
142 if (disk_cache::REMOVE_ONE_3
== disk_cache::g_rankings_crash
)
146 if (disk_cache::REMOVE_HEAD_2
== disk_cache::g_rankings_crash
)
150 if (disk_cache::REMOVE_TAIL_2
== disk_cache::g_rankings_crash
)
154 if (disk_cache::REMOVE_TAIL_3
== disk_cache::g_rankings_crash
)
158 switch (disk_cache::g_rankings_crash
) {
159 case disk_cache::REMOVE_ONE_4
:
160 case disk_cache::REMOVE_LOAD_2
:
161 case disk_cache::REMOVE_HEAD_3
:
168 switch (disk_cache::g_rankings_crash
) {
169 case disk_cache::REMOVE_HEAD_4
:
170 case disk_cache::REMOVE_LOAD_3
:
183 // Update the timestamp fields of |node|.
184 void UpdateTimes(disk_cache::CacheRankingsBlock
* node
, bool modified
) {
185 base::Time now
= base::Time::Now();
186 node
->Data()->last_used
= now
.ToInternalValue();
188 node
->Data()->last_modified
= now
.ToInternalValue();
193 namespace disk_cache
{
195 Rankings::ScopedRankingsBlock::ScopedRankingsBlock() : rankings_(NULL
) {}
197 Rankings::ScopedRankingsBlock::ScopedRankingsBlock(Rankings
* rankings
)
198 : rankings_(rankings
) {}
200 Rankings::ScopedRankingsBlock::ScopedRankingsBlock(
201 Rankings
* rankings
, CacheRankingsBlock
* node
)
202 : scoped_ptr
<CacheRankingsBlock
>(node
), rankings_(rankings
) {}
204 Rankings::Iterator::Iterator() {
205 memset(this, 0, sizeof(Iterator
));
208 void Rankings::Iterator::Reset() {
210 for (int i
= 0; i
< 3; i
++)
211 ScopedRankingsBlock(my_rankings
, nodes
[i
]);
213 memset(this, 0, sizeof(Iterator
));
216 Rankings::Rankings() : init_(false) {}
218 Rankings::~Rankings() {}
220 bool Rankings::Init(BackendImpl
* backend
, bool count_lists
) {
226 control_data_
= backend_
->GetLruData();
227 count_lists_
= count_lists
;
232 if (control_data_
->transaction
)
233 CompleteTransaction();
239 void Rankings::Reset() {
241 for (int i
= 0; i
< LAST_ELEMENT
; i
++) {
242 heads_
[i
].set_value(0);
243 tails_
[i
].set_value(0);
245 control_data_
= NULL
;
248 void Rankings::Insert(CacheRankingsBlock
* node
, bool modified
, List list
) {
249 Trace("Insert 0x%x l %d", node
->address().value(), list
);
250 DCHECK(node
->HasData());
251 Addr
& my_head
= heads_
[list
];
252 Addr
& my_tail
= tails_
[list
];
253 Transaction
lock(control_data_
, node
->address(), INSERT
, list
);
254 CacheRankingsBlock
head(backend_
->File(my_head
), my_head
);
255 if (my_head
.is_initialized()) {
256 if (!GetRanking(&head
))
259 if (head
.Data()->prev
!= my_head
.value() && // Normal path.
260 head
.Data()->prev
!= node
->address().value()) { // FinishInsert().
261 backend_
->CriticalError(ERR_INVALID_LINKS
);
265 head
.Data()->prev
= node
->address().value();
267 GenerateCrash(ON_INSERT_1
);
268 UpdateIterators(&head
);
271 node
->Data()->next
= my_head
.value();
272 node
->Data()->prev
= node
->address().value();
273 my_head
.set_value(node
->address().value());
275 if (!my_tail
.is_initialized() || my_tail
.value() == node
->address().value()) {
276 my_tail
.set_value(node
->address().value());
277 node
->Data()->next
= my_tail
.value();
279 GenerateCrash(ON_INSERT_2
);
282 UpdateTimes(node
, modified
);
284 GenerateCrash(ON_INSERT_3
);
286 // The last thing to do is move our head to point to a node already stored.
288 IncrementCounter(list
);
289 GenerateCrash(ON_INSERT_4
);
290 backend_
->FlushIndex();
293 // If a, b and r are elements on the list, and we want to remove r, the possible
294 // states for the objects if a crash happens are (where y(x, z) means for object
295 // y, prev is x and next is z):
297 // 1. r(r, r), head(r), tail(r) initial state
298 // 2. r(r, r), head(0), tail(r) WriteHead()
299 // 3. r(r, r), head(0), tail(0) WriteTail()
300 // 4. r(0, 0), head(0), tail(0) next.Store()
302 // B. Remove a random element:
303 // 1. a(x, r), r(a, b), b(r, y), head(x), tail(y) initial state
304 // 2. a(x, r), r(a, b), b(a, y), head(x), tail(y) next.Store()
305 // 3. a(x, b), r(a, b), b(a, y), head(x), tail(y) prev.Store()
306 // 4. a(x, b), r(0, 0), b(a, y), head(x), tail(y) node.Store()
309 // 1. r(r, b), b(r, y), head(r), tail(y) initial state
310 // 2. r(r, b), b(r, y), head(b), tail(y) WriteHead()
311 // 3. r(r, b), b(b, y), head(b), tail(y) next.Store()
312 // 4. r(0, 0), b(b, y), head(b), tail(y) prev.Store()
315 // 1. a(x, r), r(a, r), head(x), tail(r) initial state
316 // 2. a(x, r), r(a, r), head(x), tail(a) WriteTail()
317 // 3. a(x, a), r(a, r), head(x), tail(a) prev.Store()
318 // 4. a(x, a), r(0, 0), head(x), tail(a) next.Store()
319 void Rankings::Remove(CacheRankingsBlock
* node
, List list
, bool strict
) {
320 Trace("Remove 0x%x (0x%x 0x%x) l %d", node
->address().value(),
321 node
->Data()->next
, node
->Data()->prev
, list
);
322 DCHECK(node
->HasData());
324 InvalidateIterators(node
);
326 Addr
next_addr(node
->Data()->next
);
327 Addr
prev_addr(node
->Data()->prev
);
328 if (!next_addr
.is_initialized() || next_addr
.is_separate_file() ||
329 !prev_addr
.is_initialized() || prev_addr
.is_separate_file()) {
330 if (next_addr
.is_initialized() || prev_addr
.is_initialized()) {
331 LOG(ERROR
) << "Invalid rankings info.";
337 CacheRankingsBlock
next(backend_
->File(next_addr
), next_addr
);
338 CacheRankingsBlock
prev(backend_
->File(prev_addr
), prev_addr
);
339 if (!GetRanking(&next
) || !GetRanking(&prev
)) {
344 if (!CheckLinks(node
, &prev
, &next
, &list
))
347 Transaction
lock(control_data_
, node
->address(), REMOVE
, list
);
348 prev
.Data()->next
= next
.address().value();
349 next
.Data()->prev
= prev
.address().value();
350 GenerateCrash(ON_REMOVE_1
);
352 CacheAddr node_value
= node
->address().value();
353 Addr
& my_head
= heads_
[list
];
354 Addr
& my_tail
= tails_
[list
];
355 if (node_value
== my_head
.value() || node_value
== my_tail
.value()) {
356 if (my_head
.value() == my_tail
.value()) {
357 my_head
.set_value(0);
358 my_tail
.set_value(0);
361 GenerateCrash(ON_REMOVE_2
);
363 GenerateCrash(ON_REMOVE_3
);
364 } else if (node_value
== my_head
.value()) {
365 my_head
.set_value(next
.address().value());
366 next
.Data()->prev
= next
.address().value();
369 GenerateCrash(ON_REMOVE_4
);
370 } else if (node_value
== my_tail
.value()) {
371 my_tail
.set_value(prev
.address().value());
372 prev
.Data()->next
= prev
.address().value();
375 GenerateCrash(ON_REMOVE_5
);
377 // Store the new tail to make sure we can undo the operation if we crash.
379 GenerateCrash(ON_REMOVE_6
);
383 // Nodes out of the list can be identified by invalid pointers.
384 node
->Data()->next
= 0;
385 node
->Data()->prev
= 0;
387 // The last thing to get to disk is the node itself, so before that there is
388 // enough info to recover.
390 GenerateCrash(ON_REMOVE_7
);
392 GenerateCrash(ON_REMOVE_8
);
394 DecrementCounter(list
);
395 UpdateIterators(&next
);
396 UpdateIterators(&prev
);
397 backend_
->FlushIndex();
400 // A crash in between Remove and Insert will lead to a dirty entry not on the
401 // list. We want to avoid that case as much as we can (as while waiting for IO),
402 // but the net effect is just an assert on debug when attempting to remove the
403 // entry. Otherwise we'll need reentrant transactions, which is an overkill.
404 void Rankings::UpdateRank(CacheRankingsBlock
* node
, bool modified
, List list
) {
405 Addr
& my_head
= heads_
[list
];
406 if (my_head
.value() == node
->address().value()) {
407 UpdateTimes(node
, modified
);
408 node
->set_modified();
412 TimeTicks start
= TimeTicks::Now();
413 Remove(node
, list
, true);
414 Insert(node
, modified
, list
);
415 CACHE_UMA(AGE_MS
, "UpdateRank", 0, start
);
418 CacheRankingsBlock
* Rankings::GetNext(CacheRankingsBlock
* node
, List list
) {
419 ScopedRankingsBlock
next(this);
421 Addr
& my_head
= heads_
[list
];
422 if (!my_head
.is_initialized())
424 next
.reset(new CacheRankingsBlock(backend_
->File(my_head
), my_head
));
426 if (!node
->HasData())
428 Addr
& my_tail
= tails_
[list
];
429 if (!my_tail
.is_initialized())
431 if (my_tail
.value() == node
->address().value())
433 Addr
address(node
->Data()->next
);
434 if (address
.value() == node
->address().value())
435 return NULL
; // Another tail? fail it.
436 next
.reset(new CacheRankingsBlock(backend_
->File(address
), address
));
439 TrackRankingsBlock(next
.get(), true);
441 if (!GetRanking(next
.get()))
444 ConvertToLongLived(next
.get());
445 if (node
&& !CheckSingleLink(node
, next
.get()))
448 return next
.release();
451 CacheRankingsBlock
* Rankings::GetPrev(CacheRankingsBlock
* node
, List list
) {
452 ScopedRankingsBlock
prev(this);
454 Addr
& my_tail
= tails_
[list
];
455 if (!my_tail
.is_initialized())
457 prev
.reset(new CacheRankingsBlock(backend_
->File(my_tail
), my_tail
));
459 if (!node
->HasData())
461 Addr
& my_head
= heads_
[list
];
462 if (!my_head
.is_initialized())
464 if (my_head
.value() == node
->address().value())
466 Addr
address(node
->Data()->prev
);
467 if (address
.value() == node
->address().value())
468 return NULL
; // Another head? fail it.
469 prev
.reset(new CacheRankingsBlock(backend_
->File(address
), address
));
472 TrackRankingsBlock(prev
.get(), true);
474 if (!GetRanking(prev
.get()))
477 ConvertToLongLived(prev
.get());
478 if (node
&& !CheckSingleLink(prev
.get(), node
))
481 return prev
.release();
484 void Rankings::FreeRankingsBlock(CacheRankingsBlock
* node
) {
485 TrackRankingsBlock(node
, false);
488 void Rankings::TrackRankingsBlock(CacheRankingsBlock
* node
,
489 bool start_tracking
) {
493 IteratorPair
current(node
->address().value(), node
);
496 iterators_
.push_back(current
);
498 iterators_
.remove(current
);
501 int Rankings::SelfCheck() {
504 for (int i
= 0; i
< LAST_ELEMENT
; i
++) {
505 int partial
= CheckList(static_cast<List
>(i
));
506 if (partial
< 0 && !error
)
508 else if (partial
> 0)
512 return error
? error
: total
;
515 bool Rankings::SanityCheck(CacheRankingsBlock
* node
, bool from_list
) const {
516 if (!node
->VerifyHash())
519 const RankingsNode
* data
= node
->Data();
521 if ((!data
->next
&& data
->prev
) || (data
->next
&& !data
->prev
))
524 // Both pointers on zero is a node out of the list.
525 if (!data
->next
&& !data
->prev
&& from_list
)
528 List list
= NO_USE
; // Initialize it to something.
529 if ((node
->address().value() == data
->prev
) && !IsHead(data
->prev
, &list
))
532 if ((node
->address().value() == data
->next
) && !IsTail(data
->next
, &list
))
535 if (!data
->next
&& !data
->prev
)
538 Addr
next_addr(data
->next
);
539 Addr
prev_addr(data
->prev
);
540 if (!next_addr
.SanityCheckV2() || next_addr
.file_type() != RANKINGS
||
541 !prev_addr
.SanityCheckV2() || prev_addr
.file_type() != RANKINGS
)
547 bool Rankings::DataSanityCheck(CacheRankingsBlock
* node
, bool from_list
) const {
548 const RankingsNode
* data
= node
->Data();
552 // It may have never been inserted.
553 if (from_list
&& (!data
->last_used
|| !data
->last_modified
))
559 void Rankings::SetContents(CacheRankingsBlock
* node
, CacheAddr address
) {
560 node
->Data()->contents
= address
;
564 void Rankings::ReadHeads() {
565 for (int i
= 0; i
< LAST_ELEMENT
; i
++)
566 heads_
[i
] = Addr(control_data_
->heads
[i
]);
569 void Rankings::ReadTails() {
570 for (int i
= 0; i
< LAST_ELEMENT
; i
++)
571 tails_
[i
] = Addr(control_data_
->tails
[i
]);
574 void Rankings::WriteHead(List list
) {
575 control_data_
->heads
[list
] = heads_
[list
].value();
578 void Rankings::WriteTail(List list
) {
579 control_data_
->tails
[list
] = tails_
[list
].value();
582 bool Rankings::GetRanking(CacheRankingsBlock
* rankings
) {
583 if (!rankings
->address().is_initialized())
586 TimeTicks start
= TimeTicks::Now();
587 if (!rankings
->Load())
590 if (!SanityCheck(rankings
, true)) {
591 backend_
->CriticalError(ERR_INVALID_LINKS
);
595 backend_
->OnEvent(Stats::OPEN_RANKINGS
);
597 // Note that if the cache is in read_only mode, open entries are not marked
598 // as dirty, except when an entry is doomed. We have to look for open entries.
599 if (!backend_
->read_only() && !rankings
->Data()->dirty
)
602 EntryImpl
* entry
= backend_
->GetOpenEntry(rankings
);
604 if (backend_
->read_only())
607 // We cannot trust this entry, but we cannot initiate a cleanup from this
608 // point (we may be in the middle of a cleanup already). The entry will be
609 // deleted when detected from a regular open/create path.
610 rankings
->Data()->dirty
= backend_
->GetCurrentEntryId() - 1;
611 if (!rankings
->Data()->dirty
)
612 rankings
->Data()->dirty
--;
616 // Note that we should not leave this module without deleting rankings first.
617 rankings
->SetData(entry
->rankings()->Data());
619 CACHE_UMA(AGE_MS
, "GetRankings", 0, start
);
623 void Rankings::ConvertToLongLived(CacheRankingsBlock
* rankings
) {
624 if (rankings
->own_data())
627 // We cannot return a shared node because we are not keeping a reference
628 // to the entry that owns the buffer. Make this node a copy of the one that
629 // we have, and let the iterator logic update it when the entry changes.
630 CacheRankingsBlock
temp(NULL
, Addr(0));
631 *temp
.Data() = *rankings
->Data();
632 rankings
->StopSharingData();
633 *rankings
->Data() = *temp
.Data();
636 void Rankings::CompleteTransaction() {
637 Addr
node_addr(static_cast<CacheAddr
>(control_data_
->transaction
));
638 if (!node_addr
.is_initialized() || node_addr
.is_separate_file()) {
640 LOG(ERROR
) << "Invalid rankings info.";
644 Trace("CompleteTransaction 0x%x", node_addr
.value());
646 CacheRankingsBlock
node(backend_
->File(node_addr
), node_addr
);
652 Addr
& my_head
= heads_
[control_data_
->operation_list
];
653 Addr
& my_tail
= tails_
[control_data_
->operation_list
];
655 // We want to leave the node inside the list. The entry must me marked as
656 // dirty, and will be removed later. Otherwise, we'll get assertions when
657 // attempting to remove the dirty entry.
658 if (INSERT
== control_data_
->operation
) {
659 Trace("FinishInsert h:0x%x t:0x%x", my_head
.value(), my_tail
.value());
661 } else if (REMOVE
== control_data_
->operation
) {
662 Trace("RevertRemove h:0x%x t:0x%x", my_head
.value(), my_tail
.value());
666 LOG(ERROR
) << "Invalid operation to recover.";
670 void Rankings::FinishInsert(CacheRankingsBlock
* node
) {
671 control_data_
->transaction
= 0;
672 control_data_
->operation
= 0;
673 Addr
& my_head
= heads_
[control_data_
->operation_list
];
674 Addr
& my_tail
= tails_
[control_data_
->operation_list
];
675 if (my_head
.value() != node
->address().value()) {
676 if (my_tail
.value() == node
->address().value()) {
677 // This part will be skipped by the logic of Insert.
678 node
->Data()->next
= my_tail
.value();
681 Insert(node
, true, static_cast<List
>(control_data_
->operation_list
));
684 // Tell the backend about this entry.
685 backend_
->RecoveredEntry(node
);
688 void Rankings::RevertRemove(CacheRankingsBlock
* node
) {
689 Addr
next_addr(node
->Data()->next
);
690 Addr
prev_addr(node
->Data()->prev
);
691 if (!next_addr
.is_initialized() || !prev_addr
.is_initialized()) {
692 // The operation actually finished. Nothing to do.
693 control_data_
->transaction
= 0;
696 if (next_addr
.is_separate_file() || prev_addr
.is_separate_file()) {
698 LOG(WARNING
) << "Invalid rankings info.";
699 control_data_
->transaction
= 0;
703 CacheRankingsBlock
next(backend_
->File(next_addr
), next_addr
);
704 CacheRankingsBlock
prev(backend_
->File(prev_addr
), prev_addr
);
705 if (!next
.Load() || !prev
.Load())
708 CacheAddr node_value
= node
->address().value();
709 DCHECK(prev
.Data()->next
== node_value
||
710 prev
.Data()->next
== prev_addr
.value() ||
711 prev
.Data()->next
== next
.address().value());
712 DCHECK(next
.Data()->prev
== node_value
||
713 next
.Data()->prev
== next_addr
.value() ||
714 next
.Data()->prev
== prev
.address().value());
716 if (node_value
!= prev_addr
.value())
717 prev
.Data()->next
= node_value
;
718 if (node_value
!= next_addr
.value())
719 next
.Data()->prev
= node_value
;
721 List my_list
= static_cast<List
>(control_data_
->operation_list
);
722 Addr
& my_head
= heads_
[my_list
];
723 Addr
& my_tail
= tails_
[my_list
];
724 if (!my_head
.is_initialized() || !my_tail
.is_initialized()) {
725 my_head
.set_value(node_value
);
726 my_tail
.set_value(node_value
);
729 } else if (my_head
.value() == next
.address().value()) {
730 my_head
.set_value(node_value
);
731 prev
.Data()->next
= next
.address().value();
733 } else if (my_tail
.value() == prev
.address().value()) {
734 my_tail
.set_value(node_value
);
735 next
.Data()->prev
= prev
.address().value();
741 control_data_
->transaction
= 0;
742 control_data_
->operation
= 0;
743 backend_
->FlushIndex();
746 bool Rankings::CheckLinks(CacheRankingsBlock
* node
, CacheRankingsBlock
* prev
,
747 CacheRankingsBlock
* next
, List
* list
) {
748 CacheAddr node_addr
= node
->address().value();
749 if (prev
->Data()->next
== node_addr
&&
750 next
->Data()->prev
== node_addr
) {
751 // A regular linked node.
755 Trace("CheckLinks 0x%x (0x%x 0x%x)", node_addr
,
756 prev
->Data()->next
, next
->Data()->prev
);
758 if (node_addr
!= prev
->address().value() &&
759 node_addr
!= next
->address().value() &&
760 prev
->Data()->next
== next
->address().value() &&
761 next
->Data()->prev
== prev
->address().value()) {
762 // The list is actually ok, node is wrong.
763 Trace("node 0x%x out of list %d", node_addr
, list
);
764 node
->Data()->next
= 0;
765 node
->Data()->prev
= 0;
770 if (prev
->Data()->next
== node_addr
||
771 next
->Data()->prev
== node_addr
) {
772 // Only one link is weird, lets double check.
773 if (prev
->Data()->next
!= node_addr
&& IsHead(node_addr
, list
))
776 if (next
->Data()->prev
!= node_addr
&& IsTail(node_addr
, list
))
780 LOG(ERROR
) << "Inconsistent LRU.";
783 backend_
->CriticalError(ERR_INVALID_LINKS
);
787 bool Rankings::CheckSingleLink(CacheRankingsBlock
* prev
,
788 CacheRankingsBlock
* next
) {
789 if (prev
->Data()->next
!= next
->address().value() ||
790 next
->Data()->prev
!= prev
->address().value()) {
791 LOG(ERROR
) << "Inconsistent LRU.";
793 backend_
->CriticalError(ERR_INVALID_LINKS
);
800 int Rankings::CheckList(List list
) {
803 int rv
= CheckListSection(list
, last1
, last2
, true, // Head to tail.
804 &last1
, &last2
, &head_items
);
805 if (rv
== ERR_NO_ERROR
)
811 // Note that the returned error codes assume a forward walk (from head to tail)
812 // so they have to be adjusted accordingly by the caller. We use two stop values
813 // to be able to detect a corrupt node at the end that is not linked going back.
814 int Rankings::CheckListSection(List list
, Addr end1
, Addr end2
, bool forward
,
815 Addr
* last
, Addr
* second_last
, int* num_items
) {
816 Addr current
= forward
? heads_
[list
] : tails_
[list
];
817 *last
= *second_last
= current
;
819 if (!current
.is_initialized())
822 if (!current
.SanityCheckForRankings())
823 return ERR_INVALID_HEAD
;
825 scoped_ptr
<CacheRankingsBlock
> node
;
826 Addr
prev_addr(current
);
828 node
.reset(new CacheRankingsBlock(backend_
->File(current
), current
));
830 if (!SanityCheck(node
.get(), true))
831 return ERR_INVALID_ENTRY
;
833 CacheAddr next
= forward
? node
->Data()->next
: node
->Data()->prev
;
834 CacheAddr prev
= forward
? node
->Data()->prev
: node
->Data()->next
;
836 if (prev
!= prev_addr
.value())
837 return ERR_INVALID_PREV
;
839 Addr
next_addr(next
);
840 if (!next_addr
.SanityCheckForRankings())
841 return ERR_INVALID_NEXT
;
845 *second_last
= *last
;
849 if (next_addr
== prev_addr
) {
850 Addr last
= forward
? tails_
[list
] : heads_
[list
];
851 if (next_addr
== last
)
853 return ERR_INVALID_TAIL
;
855 } while (current
!= end1
&& current
!= end2
);
859 bool Rankings::IsHead(CacheAddr addr
, List
* list
) const {
860 for (int i
= 0; i
< LAST_ELEMENT
; i
++) {
861 if (addr
== heads_
[i
].value()) {
863 Trace("Changing list %d to %d", *list
, i
);
864 *list
= static_cast<List
>(i
);
871 bool Rankings::IsTail(CacheAddr addr
, List
* list
) const {
872 for (int i
= 0; i
< LAST_ELEMENT
; i
++) {
873 if (addr
== tails_
[i
].value()) {
875 Trace("Changing list %d to %d", *list
, i
);
876 *list
= static_cast<List
>(i
);
883 // We expect to have just a few iterators at any given time, maybe two or three,
884 // But we could have more than one pointing at the same mode. We walk the list
885 // of cache iterators and update all that are pointing to the given node.
886 void Rankings::UpdateIterators(CacheRankingsBlock
* node
) {
887 CacheAddr address
= node
->address().value();
888 for (IteratorList::iterator it
= iterators_
.begin(); it
!= iterators_
.end();
890 if (it
->first
== address
&& it
->second
->HasData()) {
891 CacheRankingsBlock
* other
= it
->second
;
892 *other
->Data() = *node
->Data();
897 void Rankings::InvalidateIterators(CacheRankingsBlock
* node
) {
898 CacheAddr address
= node
->address().value();
899 for (IteratorList::iterator it
= iterators_
.begin(); it
!= iterators_
.end();
901 if (it
->first
== address
)
902 it
->second
->Discard();
906 void Rankings::IncrementCounter(List list
) {
910 DCHECK(control_data_
->sizes
[list
] < kint32max
);
911 if (control_data_
->sizes
[list
] < kint32max
)
912 control_data_
->sizes
[list
]++;
915 void Rankings::DecrementCounter(List list
) {
919 DCHECK(control_data_
->sizes
[list
] > 0);
920 if (control_data_
->sizes
[list
] > 0)
921 control_data_
->sizes
[list
]--;
924 } // namespace disk_cache