Move WeakPtrFactory member to the end of LocalExtensionCache
[chromium-blink-merge.git] / net / disk_cache / blockfile / rankings.cc
blob496008d93ec3f6b7d2d7726e9e68cde32d8f6aa9
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "net/disk_cache/blockfile/rankings.h"
7 #include "base/metrics/histogram.h"
8 #include "net/disk_cache/blockfile/backend_impl.h"
9 #include "net/disk_cache/blockfile/disk_format.h"
10 #include "net/disk_cache/blockfile/entry_impl.h"
11 #include "net/disk_cache/blockfile/errors.h"
12 #include "net/disk_cache/blockfile/histogram_macros.h"
13 #include "net/disk_cache/blockfile/stress_support.h"
15 // Provide a BackendImpl object to macros from histogram_macros.h.
16 #define CACHE_UMA_BACKEND_IMPL_OBJ backend_
18 using base::Time;
19 using base::TimeTicks;
21 namespace disk_cache {
22 // This is used by crash_cache.exe to generate unit test files.
23 NET_EXPORT_PRIVATE RankCrashes g_rankings_crash = NO_CRASH;
26 namespace {
28 enum Operation {
29 INSERT = 1,
30 REMOVE
33 // This class provides a simple lock for the LRU list of rankings. Whenever an
34 // entry is to be inserted or removed from the list, a transaction object should
35 // be created to keep track of the operation. If the process crashes before
36 // finishing the operation, the transaction record (stored as part of the user
37 // data on the file header) can be used to finish the operation.
38 class Transaction {
39 public:
40 // addr is the cache addres of the node being inserted or removed. We want to
41 // avoid having the compiler doing optimizations on when to read or write
42 // from user_data because it is the basis of the crash detection. Maybe
43 // volatile is not enough for that, but it should be a good hint.
44 Transaction(volatile disk_cache::LruData* data, disk_cache::Addr addr,
45 Operation op, int list);
46 ~Transaction();
47 private:
48 volatile disk_cache::LruData* data_;
49 DISALLOW_COPY_AND_ASSIGN(Transaction);
52 Transaction::Transaction(volatile disk_cache::LruData* data,
53 disk_cache::Addr addr, Operation op, int list)
54 : data_(data) {
55 DCHECK(!data_->transaction);
56 DCHECK(addr.is_initialized());
57 data_->operation = op;
58 data_->operation_list = list;
59 data_->transaction = addr.value();
62 Transaction::~Transaction() {
63 DCHECK(data_->transaction);
64 data_->transaction = 0;
65 data_->operation = 0;
66 data_->operation_list = 0;
69 // Code locations that can generate crashes.
70 enum CrashLocation {
71 ON_INSERT_1, ON_INSERT_2, ON_INSERT_3, ON_INSERT_4, ON_REMOVE_1, ON_REMOVE_2,
72 ON_REMOVE_3, ON_REMOVE_4, ON_REMOVE_5, ON_REMOVE_6, ON_REMOVE_7, ON_REMOVE_8
75 #ifndef NDEBUG
76 void TerminateSelf() {
77 #if defined(OS_WIN)
78 // Windows does more work on _exit() than we would like, so we force exit.
79 TerminateProcess(GetCurrentProcess(), 0);
80 #elif defined(OS_POSIX)
81 // On POSIX, _exit() will terminate the process with minimal cleanup,
82 // and it is cleaner than killing.
83 _exit(0);
84 #endif
86 #endif // NDEBUG
88 // Generates a crash on debug builds, acording to the value of g_rankings_crash.
89 // This used by crash_cache.exe to generate unit-test files.
90 void GenerateCrash(CrashLocation location) {
91 #ifndef NDEBUG
92 if (disk_cache::NO_CRASH == disk_cache::g_rankings_crash)
93 return;
94 switch (location) {
95 case ON_INSERT_1:
96 switch (disk_cache::g_rankings_crash) {
97 case disk_cache::INSERT_ONE_1:
98 case disk_cache::INSERT_LOAD_1:
99 TerminateSelf();
100 default:
101 break;
103 break;
104 case ON_INSERT_2:
105 if (disk_cache::INSERT_EMPTY_1 == disk_cache::g_rankings_crash)
106 TerminateSelf();
107 break;
108 case ON_INSERT_3:
109 switch (disk_cache::g_rankings_crash) {
110 case disk_cache::INSERT_EMPTY_2:
111 case disk_cache::INSERT_ONE_2:
112 case disk_cache::INSERT_LOAD_2:
113 TerminateSelf();
114 default:
115 break;
117 break;
118 case ON_INSERT_4:
119 switch (disk_cache::g_rankings_crash) {
120 case disk_cache::INSERT_EMPTY_3:
121 case disk_cache::INSERT_ONE_3:
122 TerminateSelf();
123 default:
124 break;
126 break;
127 case ON_REMOVE_1:
128 switch (disk_cache::g_rankings_crash) {
129 case disk_cache::REMOVE_ONE_1:
130 case disk_cache::REMOVE_HEAD_1:
131 case disk_cache::REMOVE_TAIL_1:
132 case disk_cache::REMOVE_LOAD_1:
133 TerminateSelf();
134 default:
135 break;
137 break;
138 case ON_REMOVE_2:
139 if (disk_cache::REMOVE_ONE_2 == disk_cache::g_rankings_crash)
140 TerminateSelf();
141 break;
142 case ON_REMOVE_3:
143 if (disk_cache::REMOVE_ONE_3 == disk_cache::g_rankings_crash)
144 TerminateSelf();
145 break;
146 case ON_REMOVE_4:
147 if (disk_cache::REMOVE_HEAD_2 == disk_cache::g_rankings_crash)
148 TerminateSelf();
149 break;
150 case ON_REMOVE_5:
151 if (disk_cache::REMOVE_TAIL_2 == disk_cache::g_rankings_crash)
152 TerminateSelf();
153 break;
154 case ON_REMOVE_6:
155 if (disk_cache::REMOVE_TAIL_3 == disk_cache::g_rankings_crash)
156 TerminateSelf();
157 break;
158 case ON_REMOVE_7:
159 switch (disk_cache::g_rankings_crash) {
160 case disk_cache::REMOVE_ONE_4:
161 case disk_cache::REMOVE_LOAD_2:
162 case disk_cache::REMOVE_HEAD_3:
163 TerminateSelf();
164 default:
165 break;
167 break;
168 case ON_REMOVE_8:
169 switch (disk_cache::g_rankings_crash) {
170 case disk_cache::REMOVE_HEAD_4:
171 case disk_cache::REMOVE_LOAD_3:
172 TerminateSelf();
173 default:
174 break;
176 break;
177 default:
178 NOTREACHED();
179 return;
181 #endif // NDEBUG
184 // Update the timestamp fields of |node|.
185 void UpdateTimes(disk_cache::CacheRankingsBlock* node, bool modified) {
186 base::Time now = base::Time::Now();
187 node->Data()->last_used = now.ToInternalValue();
188 if (modified)
189 node->Data()->last_modified = now.ToInternalValue();
192 } // namespace
194 namespace disk_cache {
196 Rankings::ScopedRankingsBlock::ScopedRankingsBlock() : rankings_(NULL) {}
198 Rankings::ScopedRankingsBlock::ScopedRankingsBlock(Rankings* rankings)
199 : rankings_(rankings) {}
201 Rankings::ScopedRankingsBlock::ScopedRankingsBlock(
202 Rankings* rankings, CacheRankingsBlock* node)
203 : scoped_ptr<CacheRankingsBlock>(node), rankings_(rankings) {}
205 Rankings::Iterator::Iterator() {
206 memset(this, 0, sizeof(Iterator));
209 void Rankings::Iterator::Reset() {
210 if (my_rankings) {
211 for (int i = 0; i < 3; i++)
212 ScopedRankingsBlock(my_rankings, nodes[i]);
214 memset(this, 0, sizeof(Iterator));
217 Rankings::Rankings() : init_(false) {}
219 Rankings::~Rankings() {}
221 bool Rankings::Init(BackendImpl* backend, bool count_lists) {
222 DCHECK(!init_);
223 if (init_)
224 return false;
226 backend_ = backend;
227 control_data_ = backend_->GetLruData();
228 count_lists_ = count_lists;
230 ReadHeads();
231 ReadTails();
233 if (control_data_->transaction)
234 CompleteTransaction();
236 init_ = true;
237 return true;
240 void Rankings::Reset() {
241 init_ = false;
242 for (int i = 0; i < LAST_ELEMENT; i++) {
243 heads_[i].set_value(0);
244 tails_[i].set_value(0);
246 control_data_ = NULL;
249 void Rankings::Insert(CacheRankingsBlock* node, bool modified, List list) {
250 Trace("Insert 0x%x l %d", node->address().value(), list);
251 DCHECK(node->HasData());
252 Addr& my_head = heads_[list];
253 Addr& my_tail = tails_[list];
254 Transaction lock(control_data_, node->address(), INSERT, list);
255 CacheRankingsBlock head(backend_->File(my_head), my_head);
256 if (my_head.is_initialized()) {
257 if (!GetRanking(&head))
258 return;
260 if (head.Data()->prev != my_head.value() && // Normal path.
261 head.Data()->prev != node->address().value()) { // FinishInsert().
262 backend_->CriticalError(ERR_INVALID_LINKS);
263 return;
266 head.Data()->prev = node->address().value();
267 head.Store();
268 GenerateCrash(ON_INSERT_1);
269 UpdateIterators(&head);
272 node->Data()->next = my_head.value();
273 node->Data()->prev = node->address().value();
274 my_head.set_value(node->address().value());
276 if (!my_tail.is_initialized() || my_tail.value() == node->address().value()) {
277 my_tail.set_value(node->address().value());
278 node->Data()->next = my_tail.value();
279 WriteTail(list);
280 GenerateCrash(ON_INSERT_2);
283 UpdateTimes(node, modified);
284 node->Store();
285 GenerateCrash(ON_INSERT_3);
287 // The last thing to do is move our head to point to a node already stored.
288 WriteHead(list);
289 IncrementCounter(list);
290 GenerateCrash(ON_INSERT_4);
291 backend_->FlushIndex();
294 // If a, b and r are elements on the list, and we want to remove r, the possible
295 // states for the objects if a crash happens are (where y(x, z) means for object
296 // y, prev is x and next is z):
297 // A. One element:
298 // 1. r(r, r), head(r), tail(r) initial state
299 // 2. r(r, r), head(0), tail(r) WriteHead()
300 // 3. r(r, r), head(0), tail(0) WriteTail()
301 // 4. r(0, 0), head(0), tail(0) next.Store()
303 // B. Remove a random element:
304 // 1. a(x, r), r(a, b), b(r, y), head(x), tail(y) initial state
305 // 2. a(x, r), r(a, b), b(a, y), head(x), tail(y) next.Store()
306 // 3. a(x, b), r(a, b), b(a, y), head(x), tail(y) prev.Store()
307 // 4. a(x, b), r(0, 0), b(a, y), head(x), tail(y) node.Store()
309 // C. Remove head:
310 // 1. r(r, b), b(r, y), head(r), tail(y) initial state
311 // 2. r(r, b), b(r, y), head(b), tail(y) WriteHead()
312 // 3. r(r, b), b(b, y), head(b), tail(y) next.Store()
313 // 4. r(0, 0), b(b, y), head(b), tail(y) prev.Store()
315 // D. Remove tail:
316 // 1. a(x, r), r(a, r), head(x), tail(r) initial state
317 // 2. a(x, r), r(a, r), head(x), tail(a) WriteTail()
318 // 3. a(x, a), r(a, r), head(x), tail(a) prev.Store()
319 // 4. a(x, a), r(0, 0), head(x), tail(a) next.Store()
320 void Rankings::Remove(CacheRankingsBlock* node, List list, bool strict) {
321 Trace("Remove 0x%x (0x%x 0x%x) l %d", node->address().value(),
322 node->Data()->next, node->Data()->prev, list);
323 DCHECK(node->HasData());
324 if (strict)
325 InvalidateIterators(node);
327 Addr next_addr(node->Data()->next);
328 Addr prev_addr(node->Data()->prev);
329 if (!next_addr.is_initialized() || next_addr.is_separate_file() ||
330 !prev_addr.is_initialized() || prev_addr.is_separate_file()) {
331 if (next_addr.is_initialized() || prev_addr.is_initialized()) {
332 LOG(ERROR) << "Invalid rankings info.";
333 STRESS_NOTREACHED();
335 return;
338 CacheRankingsBlock next(backend_->File(next_addr), next_addr);
339 CacheRankingsBlock prev(backend_->File(prev_addr), prev_addr);
340 if (!GetRanking(&next) || !GetRanking(&prev)) {
341 STRESS_NOTREACHED();
342 return;
345 if (!CheckLinks(node, &prev, &next, &list))
346 return;
348 Transaction lock(control_data_, node->address(), REMOVE, list);
349 prev.Data()->next = next.address().value();
350 next.Data()->prev = prev.address().value();
351 GenerateCrash(ON_REMOVE_1);
353 CacheAddr node_value = node->address().value();
354 Addr& my_head = heads_[list];
355 Addr& my_tail = tails_[list];
356 if (node_value == my_head.value() || node_value == my_tail.value()) {
357 if (my_head.value() == my_tail.value()) {
358 my_head.set_value(0);
359 my_tail.set_value(0);
361 WriteHead(list);
362 GenerateCrash(ON_REMOVE_2);
363 WriteTail(list);
364 GenerateCrash(ON_REMOVE_3);
365 } else if (node_value == my_head.value()) {
366 my_head.set_value(next.address().value());
367 next.Data()->prev = next.address().value();
369 WriteHead(list);
370 GenerateCrash(ON_REMOVE_4);
371 } else if (node_value == my_tail.value()) {
372 my_tail.set_value(prev.address().value());
373 prev.Data()->next = prev.address().value();
375 WriteTail(list);
376 GenerateCrash(ON_REMOVE_5);
378 // Store the new tail to make sure we can undo the operation if we crash.
379 prev.Store();
380 GenerateCrash(ON_REMOVE_6);
384 // Nodes out of the list can be identified by invalid pointers.
385 node->Data()->next = 0;
386 node->Data()->prev = 0;
388 // The last thing to get to disk is the node itself, so before that there is
389 // enough info to recover.
390 next.Store();
391 GenerateCrash(ON_REMOVE_7);
392 prev.Store();
393 GenerateCrash(ON_REMOVE_8);
394 node->Store();
395 DecrementCounter(list);
396 UpdateIterators(&next);
397 UpdateIterators(&prev);
398 backend_->FlushIndex();
401 // A crash in between Remove and Insert will lead to a dirty entry not on the
402 // list. We want to avoid that case as much as we can (as while waiting for IO),
403 // but the net effect is just an assert on debug when attempting to remove the
404 // entry. Otherwise we'll need reentrant transactions, which is an overkill.
405 void Rankings::UpdateRank(CacheRankingsBlock* node, bool modified, List list) {
406 Addr& my_head = heads_[list];
407 if (my_head.value() == node->address().value()) {
408 UpdateTimes(node, modified);
409 node->set_modified();
410 return;
413 TimeTicks start = TimeTicks::Now();
414 Remove(node, list, true);
415 Insert(node, modified, list);
416 CACHE_UMA(AGE_MS, "UpdateRank", 0, start);
419 CacheRankingsBlock* Rankings::GetNext(CacheRankingsBlock* node, List list) {
420 ScopedRankingsBlock next(this);
421 if (!node) {
422 Addr& my_head = heads_[list];
423 if (!my_head.is_initialized())
424 return NULL;
425 next.reset(new CacheRankingsBlock(backend_->File(my_head), my_head));
426 } else {
427 if (!node->HasData())
428 node->Load();
429 Addr& my_tail = tails_[list];
430 if (!my_tail.is_initialized())
431 return NULL;
432 if (my_tail.value() == node->address().value())
433 return NULL;
434 Addr address(node->Data()->next);
435 if (address.value() == node->address().value())
436 return NULL; // Another tail? fail it.
437 next.reset(new CacheRankingsBlock(backend_->File(address), address));
440 TrackRankingsBlock(next.get(), true);
442 if (!GetRanking(next.get()))
443 return NULL;
445 ConvertToLongLived(next.get());
446 if (node && !CheckSingleLink(node, next.get()))
447 return NULL;
449 return next.release();
452 CacheRankingsBlock* Rankings::GetPrev(CacheRankingsBlock* node, List list) {
453 ScopedRankingsBlock prev(this);
454 if (!node) {
455 Addr& my_tail = tails_[list];
456 if (!my_tail.is_initialized())
457 return NULL;
458 prev.reset(new CacheRankingsBlock(backend_->File(my_tail), my_tail));
459 } else {
460 if (!node->HasData())
461 node->Load();
462 Addr& my_head = heads_[list];
463 if (!my_head.is_initialized())
464 return NULL;
465 if (my_head.value() == node->address().value())
466 return NULL;
467 Addr address(node->Data()->prev);
468 if (address.value() == node->address().value())
469 return NULL; // Another head? fail it.
470 prev.reset(new CacheRankingsBlock(backend_->File(address), address));
473 TrackRankingsBlock(prev.get(), true);
475 if (!GetRanking(prev.get()))
476 return NULL;
478 ConvertToLongLived(prev.get());
479 if (node && !CheckSingleLink(prev.get(), node))
480 return NULL;
482 return prev.release();
485 void Rankings::FreeRankingsBlock(CacheRankingsBlock* node) {
486 TrackRankingsBlock(node, false);
489 void Rankings::TrackRankingsBlock(CacheRankingsBlock* node,
490 bool start_tracking) {
491 if (!node)
492 return;
494 IteratorPair current(node->address().value(), node);
496 if (start_tracking)
497 iterators_.push_back(current);
498 else
499 iterators_.remove(current);
502 int Rankings::SelfCheck() {
503 int total = 0;
504 int error = 0;
505 for (int i = 0; i < LAST_ELEMENT; i++) {
506 int partial = CheckList(static_cast<List>(i));
507 if (partial < 0 && !error)
508 error = partial;
509 else if (partial > 0)
510 total += partial;
513 return error ? error : total;
516 bool Rankings::SanityCheck(CacheRankingsBlock* node, bool from_list) const {
517 if (!node->VerifyHash())
518 return false;
520 const RankingsNode* data = node->Data();
522 if ((!data->next && data->prev) || (data->next && !data->prev))
523 return false;
525 // Both pointers on zero is a node out of the list.
526 if (!data->next && !data->prev && from_list)
527 return false;
529 List list = NO_USE; // Initialize it to something.
530 if ((node->address().value() == data->prev) && !IsHead(data->prev, &list))
531 return false;
533 if ((node->address().value() == data->next) && !IsTail(data->next, &list))
534 return false;
536 if (!data->next && !data->prev)
537 return true;
539 Addr next_addr(data->next);
540 Addr prev_addr(data->prev);
541 if (!next_addr.SanityCheckV2() || next_addr.file_type() != RANKINGS ||
542 !prev_addr.SanityCheckV2() || prev_addr.file_type() != RANKINGS)
543 return false;
545 return true;
548 bool Rankings::DataSanityCheck(CacheRankingsBlock* node, bool from_list) const {
549 const RankingsNode* data = node->Data();
550 if (!data->contents)
551 return false;
553 // It may have never been inserted.
554 if (from_list && (!data->last_used || !data->last_modified))
555 return false;
557 return true;
560 void Rankings::SetContents(CacheRankingsBlock* node, CacheAddr address) {
561 node->Data()->contents = address;
562 node->Store();
565 void Rankings::ReadHeads() {
566 for (int i = 0; i < LAST_ELEMENT; i++)
567 heads_[i] = Addr(control_data_->heads[i]);
570 void Rankings::ReadTails() {
571 for (int i = 0; i < LAST_ELEMENT; i++)
572 tails_[i] = Addr(control_data_->tails[i]);
575 void Rankings::WriteHead(List list) {
576 control_data_->heads[list] = heads_[list].value();
579 void Rankings::WriteTail(List list) {
580 control_data_->tails[list] = tails_[list].value();
583 bool Rankings::GetRanking(CacheRankingsBlock* rankings) {
584 if (!rankings->address().is_initialized())
585 return false;
587 TimeTicks start = TimeTicks::Now();
588 if (!rankings->Load())
589 return false;
591 if (!SanityCheck(rankings, true)) {
592 backend_->CriticalError(ERR_INVALID_LINKS);
593 return false;
596 backend_->OnEvent(Stats::OPEN_RANKINGS);
598 // Note that if the cache is in read_only mode, open entries are not marked
599 // as dirty, except when an entry is doomed. We have to look for open entries.
600 if (!backend_->read_only() && !rankings->Data()->dirty)
601 return true;
603 EntryImpl* entry = backend_->GetOpenEntry(rankings);
604 if (!entry) {
605 if (backend_->read_only())
606 return true;
608 // We cannot trust this entry, but we cannot initiate a cleanup from this
609 // point (we may be in the middle of a cleanup already). The entry will be
610 // deleted when detected from a regular open/create path.
611 rankings->Data()->dirty = backend_->GetCurrentEntryId() - 1;
612 if (!rankings->Data()->dirty)
613 rankings->Data()->dirty--;
614 return true;
617 // Note that we should not leave this module without deleting rankings first.
618 rankings->SetData(entry->rankings()->Data());
620 CACHE_UMA(AGE_MS, "GetRankings", 0, start);
621 return true;
624 void Rankings::ConvertToLongLived(CacheRankingsBlock* rankings) {
625 if (rankings->own_data())
626 return;
628 // We cannot return a shared node because we are not keeping a reference
629 // to the entry that owns the buffer. Make this node a copy of the one that
630 // we have, and let the iterator logic update it when the entry changes.
631 CacheRankingsBlock temp(NULL, Addr(0));
632 *temp.Data() = *rankings->Data();
633 rankings->StopSharingData();
634 *rankings->Data() = *temp.Data();
637 void Rankings::CompleteTransaction() {
638 Addr node_addr(static_cast<CacheAddr>(control_data_->transaction));
639 if (!node_addr.is_initialized() || node_addr.is_separate_file()) {
640 NOTREACHED();
641 LOG(ERROR) << "Invalid rankings info.";
642 return;
645 Trace("CompleteTransaction 0x%x", node_addr.value());
647 CacheRankingsBlock node(backend_->File(node_addr), node_addr);
648 if (!node.Load())
649 return;
651 node.Store();
653 Addr& my_head = heads_[control_data_->operation_list];
654 Addr& my_tail = tails_[control_data_->operation_list];
656 // We want to leave the node inside the list. The entry must me marked as
657 // dirty, and will be removed later. Otherwise, we'll get assertions when
658 // attempting to remove the dirty entry.
659 if (INSERT == control_data_->operation) {
660 Trace("FinishInsert h:0x%x t:0x%x", my_head.value(), my_tail.value());
661 FinishInsert(&node);
662 } else if (REMOVE == control_data_->operation) {
663 Trace("RevertRemove h:0x%x t:0x%x", my_head.value(), my_tail.value());
664 RevertRemove(&node);
665 } else {
666 NOTREACHED();
667 LOG(ERROR) << "Invalid operation to recover.";
671 void Rankings::FinishInsert(CacheRankingsBlock* node) {
672 control_data_->transaction = 0;
673 control_data_->operation = 0;
674 Addr& my_head = heads_[control_data_->operation_list];
675 Addr& my_tail = tails_[control_data_->operation_list];
676 if (my_head.value() != node->address().value()) {
677 if (my_tail.value() == node->address().value()) {
678 // This part will be skipped by the logic of Insert.
679 node->Data()->next = my_tail.value();
682 Insert(node, true, static_cast<List>(control_data_->operation_list));
685 // Tell the backend about this entry.
686 backend_->RecoveredEntry(node);
689 void Rankings::RevertRemove(CacheRankingsBlock* node) {
690 Addr next_addr(node->Data()->next);
691 Addr prev_addr(node->Data()->prev);
692 if (!next_addr.is_initialized() || !prev_addr.is_initialized()) {
693 // The operation actually finished. Nothing to do.
694 control_data_->transaction = 0;
695 return;
697 if (next_addr.is_separate_file() || prev_addr.is_separate_file()) {
698 NOTREACHED();
699 LOG(WARNING) << "Invalid rankings info.";
700 control_data_->transaction = 0;
701 return;
704 CacheRankingsBlock next(backend_->File(next_addr), next_addr);
705 CacheRankingsBlock prev(backend_->File(prev_addr), prev_addr);
706 if (!next.Load() || !prev.Load())
707 return;
709 CacheAddr node_value = node->address().value();
710 DCHECK(prev.Data()->next == node_value ||
711 prev.Data()->next == prev_addr.value() ||
712 prev.Data()->next == next.address().value());
713 DCHECK(next.Data()->prev == node_value ||
714 next.Data()->prev == next_addr.value() ||
715 next.Data()->prev == prev.address().value());
717 if (node_value != prev_addr.value())
718 prev.Data()->next = node_value;
719 if (node_value != next_addr.value())
720 next.Data()->prev = node_value;
722 List my_list = static_cast<List>(control_data_->operation_list);
723 Addr& my_head = heads_[my_list];
724 Addr& my_tail = tails_[my_list];
725 if (!my_head.is_initialized() || !my_tail.is_initialized()) {
726 my_head.set_value(node_value);
727 my_tail.set_value(node_value);
728 WriteHead(my_list);
729 WriteTail(my_list);
730 } else if (my_head.value() == next.address().value()) {
731 my_head.set_value(node_value);
732 prev.Data()->next = next.address().value();
733 WriteHead(my_list);
734 } else if (my_tail.value() == prev.address().value()) {
735 my_tail.set_value(node_value);
736 next.Data()->prev = prev.address().value();
737 WriteTail(my_list);
740 next.Store();
741 prev.Store();
742 control_data_->transaction = 0;
743 control_data_->operation = 0;
744 backend_->FlushIndex();
747 bool Rankings::CheckLinks(CacheRankingsBlock* node, CacheRankingsBlock* prev,
748 CacheRankingsBlock* next, List* list) {
749 CacheAddr node_addr = node->address().value();
750 if (prev->Data()->next == node_addr &&
751 next->Data()->prev == node_addr) {
752 // A regular linked node.
753 return true;
756 Trace("CheckLinks 0x%x (0x%x 0x%x)", node_addr,
757 prev->Data()->next, next->Data()->prev);
759 if (node_addr != prev->address().value() &&
760 node_addr != next->address().value() &&
761 prev->Data()->next == next->address().value() &&
762 next->Data()->prev == prev->address().value()) {
763 // The list is actually ok, node is wrong.
764 Trace("node 0x%x out of list %d", node_addr, list);
765 node->Data()->next = 0;
766 node->Data()->prev = 0;
767 node->Store();
768 return false;
771 if (prev->Data()->next == node_addr ||
772 next->Data()->prev == node_addr) {
773 // Only one link is weird, lets double check.
774 if (prev->Data()->next != node_addr && IsHead(node_addr, list))
775 return true;
777 if (next->Data()->prev != node_addr && IsTail(node_addr, list))
778 return true;
781 LOG(ERROR) << "Inconsistent LRU.";
782 STRESS_NOTREACHED();
784 backend_->CriticalError(ERR_INVALID_LINKS);
785 return false;
788 bool Rankings::CheckSingleLink(CacheRankingsBlock* prev,
789 CacheRankingsBlock* next) {
790 if (prev->Data()->next != next->address().value() ||
791 next->Data()->prev != prev->address().value()) {
792 LOG(ERROR) << "Inconsistent LRU.";
794 backend_->CriticalError(ERR_INVALID_LINKS);
795 return false;
798 return true;
801 int Rankings::CheckList(List list) {
802 Addr last1, last2;
803 int head_items;
804 int rv = CheckListSection(list, last1, last2, true, // Head to tail.
805 &last1, &last2, &head_items);
806 if (rv == ERR_NO_ERROR)
807 return head_items;
809 return rv;
812 // Note that the returned error codes assume a forward walk (from head to tail)
813 // so they have to be adjusted accordingly by the caller. We use two stop values
814 // to be able to detect a corrupt node at the end that is not linked going back.
815 int Rankings::CheckListSection(List list, Addr end1, Addr end2, bool forward,
816 Addr* last, Addr* second_last, int* num_items) {
817 Addr current = forward ? heads_[list] : tails_[list];
818 *last = *second_last = current;
819 *num_items = 0;
820 if (!current.is_initialized())
821 return ERR_NO_ERROR;
823 if (!current.SanityCheckForRankings())
824 return ERR_INVALID_HEAD;
826 scoped_ptr<CacheRankingsBlock> node;
827 Addr prev_addr(current);
828 do {
829 node.reset(new CacheRankingsBlock(backend_->File(current), current));
830 node->Load();
831 if (!SanityCheck(node.get(), true))
832 return ERR_INVALID_ENTRY;
834 CacheAddr next = forward ? node->Data()->next : node->Data()->prev;
835 CacheAddr prev = forward ? node->Data()->prev : node->Data()->next;
837 if (prev != prev_addr.value())
838 return ERR_INVALID_PREV;
840 Addr next_addr(next);
841 if (!next_addr.SanityCheckForRankings())
842 return ERR_INVALID_NEXT;
844 prev_addr = current;
845 current = next_addr;
846 *second_last = *last;
847 *last = current;
848 (*num_items)++;
850 if (next_addr == prev_addr) {
851 Addr last = forward ? tails_[list] : heads_[list];
852 if (next_addr == last)
853 return ERR_NO_ERROR;
854 return ERR_INVALID_TAIL;
856 } while (current != end1 && current != end2);
857 return ERR_NO_ERROR;
860 bool Rankings::IsHead(CacheAddr addr, List* list) const {
861 for (int i = 0; i < LAST_ELEMENT; i++) {
862 if (addr == heads_[i].value()) {
863 if (*list != i)
864 Trace("Changing list %d to %d", *list, i);
865 *list = static_cast<List>(i);
866 return true;
869 return false;
872 bool Rankings::IsTail(CacheAddr addr, List* list) const {
873 for (int i = 0; i < LAST_ELEMENT; i++) {
874 if (addr == tails_[i].value()) {
875 if (*list != i)
876 Trace("Changing list %d to %d", *list, i);
877 *list = static_cast<List>(i);
878 return true;
881 return false;
884 // We expect to have just a few iterators at any given time, maybe two or three,
885 // But we could have more than one pointing at the same mode. We walk the list
886 // of cache iterators and update all that are pointing to the given node.
887 void Rankings::UpdateIterators(CacheRankingsBlock* node) {
888 CacheAddr address = node->address().value();
889 for (IteratorList::iterator it = iterators_.begin(); it != iterators_.end();
890 ++it) {
891 if (it->first == address && it->second->HasData()) {
892 CacheRankingsBlock* other = it->second;
893 *other->Data() = *node->Data();
898 void Rankings::InvalidateIterators(CacheRankingsBlock* node) {
899 CacheAddr address = node->address().value();
900 for (IteratorList::iterator it = iterators_.begin(); it != iterators_.end();
901 ++it) {
902 if (it->first == address)
903 it->second->Discard();
907 void Rankings::IncrementCounter(List list) {
908 if (!count_lists_)
909 return;
911 DCHECK(control_data_->sizes[list] < kint32max);
912 if (control_data_->sizes[list] < kint32max)
913 control_data_->sizes[list]++;
916 void Rankings::DecrementCounter(List list) {
917 if (!count_lists_)
918 return;
920 DCHECK(control_data_->sizes[list] > 0);
921 if (control_data_->sizes[list] > 0)
922 control_data_->sizes[list]--;
925 } // namespace disk_cache