Pin Chrome's shortcut to the Win10 Start menu on install and OS upgrade.
[chromium-blink-merge.git] / net / disk_cache / blockfile / rankings.cc
blob6ea790955c7d8362ce1ce56a794afbc7f88f308f
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "net/disk_cache/blockfile/rankings.h"
7 #include "net/disk_cache/blockfile/backend_impl.h"
8 #include "net/disk_cache/blockfile/disk_format.h"
9 #include "net/disk_cache/blockfile/entry_impl.h"
10 #include "net/disk_cache/blockfile/errors.h"
11 #include "net/disk_cache/blockfile/histogram_macros.h"
12 #include "net/disk_cache/blockfile/stress_support.h"
14 // Provide a BackendImpl object to macros from histogram_macros.h.
15 #define CACHE_UMA_BACKEND_IMPL_OBJ backend_
17 using base::Time;
18 using base::TimeTicks;
20 namespace disk_cache {
21 // This is used by crash_cache.exe to generate unit test files.
22 NET_EXPORT_PRIVATE RankCrashes g_rankings_crash = NO_CRASH;
25 namespace {
27 enum Operation {
28 INSERT = 1,
29 REMOVE
32 // This class provides a simple lock for the LRU list of rankings. Whenever an
33 // entry is to be inserted or removed from the list, a transaction object should
34 // be created to keep track of the operation. If the process crashes before
35 // finishing the operation, the transaction record (stored as part of the user
36 // data on the file header) can be used to finish the operation.
37 class Transaction {
38 public:
39 // addr is the cache addres of the node being inserted or removed. We want to
40 // avoid having the compiler doing optimizations on when to read or write
41 // from user_data because it is the basis of the crash detection. Maybe
42 // volatile is not enough for that, but it should be a good hint.
43 Transaction(volatile disk_cache::LruData* data, disk_cache::Addr addr,
44 Operation op, int list);
45 ~Transaction();
46 private:
47 volatile disk_cache::LruData* data_;
48 DISALLOW_COPY_AND_ASSIGN(Transaction);
51 Transaction::Transaction(volatile disk_cache::LruData* data,
52 disk_cache::Addr addr, Operation op, int list)
53 : data_(data) {
54 DCHECK(!data_->transaction);
55 DCHECK(addr.is_initialized());
56 data_->operation = op;
57 data_->operation_list = list;
58 data_->transaction = addr.value();
61 Transaction::~Transaction() {
62 DCHECK(data_->transaction);
63 data_->transaction = 0;
64 data_->operation = 0;
65 data_->operation_list = 0;
68 // Code locations that can generate crashes.
69 enum CrashLocation {
70 ON_INSERT_1, ON_INSERT_2, ON_INSERT_3, ON_INSERT_4, ON_REMOVE_1, ON_REMOVE_2,
71 ON_REMOVE_3, ON_REMOVE_4, ON_REMOVE_5, ON_REMOVE_6, ON_REMOVE_7, ON_REMOVE_8
74 #ifndef NDEBUG
75 void TerminateSelf() {
76 #if defined(OS_WIN)
77 // Windows does more work on _exit() than we would like, so we force exit.
78 TerminateProcess(GetCurrentProcess(), 0);
79 #elif defined(OS_POSIX)
80 // On POSIX, _exit() will terminate the process with minimal cleanup,
81 // and it is cleaner than killing.
82 _exit(0);
83 #endif
85 #endif // NDEBUG
87 // Generates a crash on debug builds, acording to the value of g_rankings_crash.
88 // This used by crash_cache.exe to generate unit-test files.
89 void GenerateCrash(CrashLocation location) {
90 #ifndef NDEBUG
91 if (disk_cache::NO_CRASH == disk_cache::g_rankings_crash)
92 return;
93 switch (location) {
94 case ON_INSERT_1:
95 switch (disk_cache::g_rankings_crash) {
96 case disk_cache::INSERT_ONE_1:
97 case disk_cache::INSERT_LOAD_1:
98 TerminateSelf();
99 default:
100 break;
102 break;
103 case ON_INSERT_2:
104 if (disk_cache::INSERT_EMPTY_1 == disk_cache::g_rankings_crash)
105 TerminateSelf();
106 break;
107 case ON_INSERT_3:
108 switch (disk_cache::g_rankings_crash) {
109 case disk_cache::INSERT_EMPTY_2:
110 case disk_cache::INSERT_ONE_2:
111 case disk_cache::INSERT_LOAD_2:
112 TerminateSelf();
113 default:
114 break;
116 break;
117 case ON_INSERT_4:
118 switch (disk_cache::g_rankings_crash) {
119 case disk_cache::INSERT_EMPTY_3:
120 case disk_cache::INSERT_ONE_3:
121 TerminateSelf();
122 default:
123 break;
125 break;
126 case ON_REMOVE_1:
127 switch (disk_cache::g_rankings_crash) {
128 case disk_cache::REMOVE_ONE_1:
129 case disk_cache::REMOVE_HEAD_1:
130 case disk_cache::REMOVE_TAIL_1:
131 case disk_cache::REMOVE_LOAD_1:
132 TerminateSelf();
133 default:
134 break;
136 break;
137 case ON_REMOVE_2:
138 if (disk_cache::REMOVE_ONE_2 == disk_cache::g_rankings_crash)
139 TerminateSelf();
140 break;
141 case ON_REMOVE_3:
142 if (disk_cache::REMOVE_ONE_3 == disk_cache::g_rankings_crash)
143 TerminateSelf();
144 break;
145 case ON_REMOVE_4:
146 if (disk_cache::REMOVE_HEAD_2 == disk_cache::g_rankings_crash)
147 TerminateSelf();
148 break;
149 case ON_REMOVE_5:
150 if (disk_cache::REMOVE_TAIL_2 == disk_cache::g_rankings_crash)
151 TerminateSelf();
152 break;
153 case ON_REMOVE_6:
154 if (disk_cache::REMOVE_TAIL_3 == disk_cache::g_rankings_crash)
155 TerminateSelf();
156 break;
157 case ON_REMOVE_7:
158 switch (disk_cache::g_rankings_crash) {
159 case disk_cache::REMOVE_ONE_4:
160 case disk_cache::REMOVE_LOAD_2:
161 case disk_cache::REMOVE_HEAD_3:
162 TerminateSelf();
163 default:
164 break;
166 break;
167 case ON_REMOVE_8:
168 switch (disk_cache::g_rankings_crash) {
169 case disk_cache::REMOVE_HEAD_4:
170 case disk_cache::REMOVE_LOAD_3:
171 TerminateSelf();
172 default:
173 break;
175 break;
176 default:
177 NOTREACHED();
178 return;
180 #endif // NDEBUG
183 // Update the timestamp fields of |node|.
184 void UpdateTimes(disk_cache::CacheRankingsBlock* node, bool modified) {
185 base::Time now = base::Time::Now();
186 node->Data()->last_used = now.ToInternalValue();
187 if (modified)
188 node->Data()->last_modified = now.ToInternalValue();
191 } // namespace
193 namespace disk_cache {
195 Rankings::ScopedRankingsBlock::ScopedRankingsBlock() : rankings_(NULL) {}
197 Rankings::ScopedRankingsBlock::ScopedRankingsBlock(Rankings* rankings)
198 : rankings_(rankings) {}
200 Rankings::ScopedRankingsBlock::ScopedRankingsBlock(
201 Rankings* rankings, CacheRankingsBlock* node)
202 : scoped_ptr<CacheRankingsBlock>(node), rankings_(rankings) {}
204 Rankings::Iterator::Iterator() {
205 memset(this, 0, sizeof(Iterator));
208 void Rankings::Iterator::Reset() {
209 if (my_rankings) {
210 for (int i = 0; i < 3; i++)
211 ScopedRankingsBlock(my_rankings, nodes[i]);
213 memset(this, 0, sizeof(Iterator));
216 Rankings::Rankings() : init_(false) {}
218 Rankings::~Rankings() {}
220 bool Rankings::Init(BackendImpl* backend, bool count_lists) {
221 DCHECK(!init_);
222 if (init_)
223 return false;
225 backend_ = backend;
226 control_data_ = backend_->GetLruData();
227 count_lists_ = count_lists;
229 ReadHeads();
230 ReadTails();
232 if (control_data_->transaction)
233 CompleteTransaction();
235 init_ = true;
236 return true;
239 void Rankings::Reset() {
240 init_ = false;
241 for (int i = 0; i < LAST_ELEMENT; i++) {
242 heads_[i].set_value(0);
243 tails_[i].set_value(0);
245 control_data_ = NULL;
248 void Rankings::Insert(CacheRankingsBlock* node, bool modified, List list) {
249 Trace("Insert 0x%x l %d", node->address().value(), list);
250 DCHECK(node->HasData());
251 Addr& my_head = heads_[list];
252 Addr& my_tail = tails_[list];
253 Transaction lock(control_data_, node->address(), INSERT, list);
254 CacheRankingsBlock head(backend_->File(my_head), my_head);
255 if (my_head.is_initialized()) {
256 if (!GetRanking(&head))
257 return;
259 if (head.Data()->prev != my_head.value() && // Normal path.
260 head.Data()->prev != node->address().value()) { // FinishInsert().
261 backend_->CriticalError(ERR_INVALID_LINKS);
262 return;
265 head.Data()->prev = node->address().value();
266 head.Store();
267 GenerateCrash(ON_INSERT_1);
268 UpdateIterators(&head);
271 node->Data()->next = my_head.value();
272 node->Data()->prev = node->address().value();
273 my_head.set_value(node->address().value());
275 if (!my_tail.is_initialized() || my_tail.value() == node->address().value()) {
276 my_tail.set_value(node->address().value());
277 node->Data()->next = my_tail.value();
278 WriteTail(list);
279 GenerateCrash(ON_INSERT_2);
282 UpdateTimes(node, modified);
283 node->Store();
284 GenerateCrash(ON_INSERT_3);
286 // The last thing to do is move our head to point to a node already stored.
287 WriteHead(list);
288 IncrementCounter(list);
289 GenerateCrash(ON_INSERT_4);
290 backend_->FlushIndex();
293 // If a, b and r are elements on the list, and we want to remove r, the possible
294 // states for the objects if a crash happens are (where y(x, z) means for object
295 // y, prev is x and next is z):
296 // A. One element:
297 // 1. r(r, r), head(r), tail(r) initial state
298 // 2. r(r, r), head(0), tail(r) WriteHead()
299 // 3. r(r, r), head(0), tail(0) WriteTail()
300 // 4. r(0, 0), head(0), tail(0) next.Store()
302 // B. Remove a random element:
303 // 1. a(x, r), r(a, b), b(r, y), head(x), tail(y) initial state
304 // 2. a(x, r), r(a, b), b(a, y), head(x), tail(y) next.Store()
305 // 3. a(x, b), r(a, b), b(a, y), head(x), tail(y) prev.Store()
306 // 4. a(x, b), r(0, 0), b(a, y), head(x), tail(y) node.Store()
308 // C. Remove head:
309 // 1. r(r, b), b(r, y), head(r), tail(y) initial state
310 // 2. r(r, b), b(r, y), head(b), tail(y) WriteHead()
311 // 3. r(r, b), b(b, y), head(b), tail(y) next.Store()
312 // 4. r(0, 0), b(b, y), head(b), tail(y) prev.Store()
314 // D. Remove tail:
315 // 1. a(x, r), r(a, r), head(x), tail(r) initial state
316 // 2. a(x, r), r(a, r), head(x), tail(a) WriteTail()
317 // 3. a(x, a), r(a, r), head(x), tail(a) prev.Store()
318 // 4. a(x, a), r(0, 0), head(x), tail(a) next.Store()
319 void Rankings::Remove(CacheRankingsBlock* node, List list, bool strict) {
320 Trace("Remove 0x%x (0x%x 0x%x) l %d", node->address().value(),
321 node->Data()->next, node->Data()->prev, list);
322 DCHECK(node->HasData());
323 if (strict)
324 InvalidateIterators(node);
326 Addr next_addr(node->Data()->next);
327 Addr prev_addr(node->Data()->prev);
328 if (!next_addr.is_initialized() || next_addr.is_separate_file() ||
329 !prev_addr.is_initialized() || prev_addr.is_separate_file()) {
330 if (next_addr.is_initialized() || prev_addr.is_initialized()) {
331 LOG(ERROR) << "Invalid rankings info.";
332 STRESS_NOTREACHED();
334 return;
337 CacheRankingsBlock next(backend_->File(next_addr), next_addr);
338 CacheRankingsBlock prev(backend_->File(prev_addr), prev_addr);
339 if (!GetRanking(&next) || !GetRanking(&prev)) {
340 STRESS_NOTREACHED();
341 return;
344 if (!CheckLinks(node, &prev, &next, &list))
345 return;
347 Transaction lock(control_data_, node->address(), REMOVE, list);
348 prev.Data()->next = next.address().value();
349 next.Data()->prev = prev.address().value();
350 GenerateCrash(ON_REMOVE_1);
352 CacheAddr node_value = node->address().value();
353 Addr& my_head = heads_[list];
354 Addr& my_tail = tails_[list];
355 if (node_value == my_head.value() || node_value == my_tail.value()) {
356 if (my_head.value() == my_tail.value()) {
357 my_head.set_value(0);
358 my_tail.set_value(0);
360 WriteHead(list);
361 GenerateCrash(ON_REMOVE_2);
362 WriteTail(list);
363 GenerateCrash(ON_REMOVE_3);
364 } else if (node_value == my_head.value()) {
365 my_head.set_value(next.address().value());
366 next.Data()->prev = next.address().value();
368 WriteHead(list);
369 GenerateCrash(ON_REMOVE_4);
370 } else if (node_value == my_tail.value()) {
371 my_tail.set_value(prev.address().value());
372 prev.Data()->next = prev.address().value();
374 WriteTail(list);
375 GenerateCrash(ON_REMOVE_5);
377 // Store the new tail to make sure we can undo the operation if we crash.
378 prev.Store();
379 GenerateCrash(ON_REMOVE_6);
383 // Nodes out of the list can be identified by invalid pointers.
384 node->Data()->next = 0;
385 node->Data()->prev = 0;
387 // The last thing to get to disk is the node itself, so before that there is
388 // enough info to recover.
389 next.Store();
390 GenerateCrash(ON_REMOVE_7);
391 prev.Store();
392 GenerateCrash(ON_REMOVE_8);
393 node->Store();
394 DecrementCounter(list);
395 UpdateIterators(&next);
396 UpdateIterators(&prev);
397 backend_->FlushIndex();
400 // A crash in between Remove and Insert will lead to a dirty entry not on the
401 // list. We want to avoid that case as much as we can (as while waiting for IO),
402 // but the net effect is just an assert on debug when attempting to remove the
403 // entry. Otherwise we'll need reentrant transactions, which is an overkill.
404 void Rankings::UpdateRank(CacheRankingsBlock* node, bool modified, List list) {
405 Addr& my_head = heads_[list];
406 if (my_head.value() == node->address().value()) {
407 UpdateTimes(node, modified);
408 node->set_modified();
409 return;
412 TimeTicks start = TimeTicks::Now();
413 Remove(node, list, true);
414 Insert(node, modified, list);
415 CACHE_UMA(AGE_MS, "UpdateRank", 0, start);
418 CacheRankingsBlock* Rankings::GetNext(CacheRankingsBlock* node, List list) {
419 ScopedRankingsBlock next(this);
420 if (!node) {
421 Addr& my_head = heads_[list];
422 if (!my_head.is_initialized())
423 return NULL;
424 next.reset(new CacheRankingsBlock(backend_->File(my_head), my_head));
425 } else {
426 if (!node->HasData())
427 node->Load();
428 Addr& my_tail = tails_[list];
429 if (!my_tail.is_initialized())
430 return NULL;
431 if (my_tail.value() == node->address().value())
432 return NULL;
433 Addr address(node->Data()->next);
434 if (address.value() == node->address().value())
435 return NULL; // Another tail? fail it.
436 next.reset(new CacheRankingsBlock(backend_->File(address), address));
439 TrackRankingsBlock(next.get(), true);
441 if (!GetRanking(next.get()))
442 return NULL;
444 ConvertToLongLived(next.get());
445 if (node && !CheckSingleLink(node, next.get()))
446 return NULL;
448 return next.release();
451 CacheRankingsBlock* Rankings::GetPrev(CacheRankingsBlock* node, List list) {
452 ScopedRankingsBlock prev(this);
453 if (!node) {
454 Addr& my_tail = tails_[list];
455 if (!my_tail.is_initialized())
456 return NULL;
457 prev.reset(new CacheRankingsBlock(backend_->File(my_tail), my_tail));
458 } else {
459 if (!node->HasData())
460 node->Load();
461 Addr& my_head = heads_[list];
462 if (!my_head.is_initialized())
463 return NULL;
464 if (my_head.value() == node->address().value())
465 return NULL;
466 Addr address(node->Data()->prev);
467 if (address.value() == node->address().value())
468 return NULL; // Another head? fail it.
469 prev.reset(new CacheRankingsBlock(backend_->File(address), address));
472 TrackRankingsBlock(prev.get(), true);
474 if (!GetRanking(prev.get()))
475 return NULL;
477 ConvertToLongLived(prev.get());
478 if (node && !CheckSingleLink(prev.get(), node))
479 return NULL;
481 return prev.release();
484 void Rankings::FreeRankingsBlock(CacheRankingsBlock* node) {
485 TrackRankingsBlock(node, false);
488 void Rankings::TrackRankingsBlock(CacheRankingsBlock* node,
489 bool start_tracking) {
490 if (!node)
491 return;
493 IteratorPair current(node->address().value(), node);
495 if (start_tracking)
496 iterators_.push_back(current);
497 else
498 iterators_.remove(current);
501 int Rankings::SelfCheck() {
502 int total = 0;
503 int error = 0;
504 for (int i = 0; i < LAST_ELEMENT; i++) {
505 int partial = CheckList(static_cast<List>(i));
506 if (partial < 0 && !error)
507 error = partial;
508 else if (partial > 0)
509 total += partial;
512 return error ? error : total;
515 bool Rankings::SanityCheck(CacheRankingsBlock* node, bool from_list) const {
516 if (!node->VerifyHash())
517 return false;
519 const RankingsNode* data = node->Data();
521 if ((!data->next && data->prev) || (data->next && !data->prev))
522 return false;
524 // Both pointers on zero is a node out of the list.
525 if (!data->next && !data->prev && from_list)
526 return false;
528 List list = NO_USE; // Initialize it to something.
529 if ((node->address().value() == data->prev) && !IsHead(data->prev, &list))
530 return false;
532 if ((node->address().value() == data->next) && !IsTail(data->next, &list))
533 return false;
535 if (!data->next && !data->prev)
536 return true;
538 Addr next_addr(data->next);
539 Addr prev_addr(data->prev);
540 if (!next_addr.SanityCheckV2() || next_addr.file_type() != RANKINGS ||
541 !prev_addr.SanityCheckV2() || prev_addr.file_type() != RANKINGS)
542 return false;
544 return true;
547 bool Rankings::DataSanityCheck(CacheRankingsBlock* node, bool from_list) const {
548 const RankingsNode* data = node->Data();
549 if (!data->contents)
550 return false;
552 // It may have never been inserted.
553 if (from_list && (!data->last_used || !data->last_modified))
554 return false;
556 return true;
559 void Rankings::SetContents(CacheRankingsBlock* node, CacheAddr address) {
560 node->Data()->contents = address;
561 node->Store();
564 void Rankings::ReadHeads() {
565 for (int i = 0; i < LAST_ELEMENT; i++)
566 heads_[i] = Addr(control_data_->heads[i]);
569 void Rankings::ReadTails() {
570 for (int i = 0; i < LAST_ELEMENT; i++)
571 tails_[i] = Addr(control_data_->tails[i]);
574 void Rankings::WriteHead(List list) {
575 control_data_->heads[list] = heads_[list].value();
578 void Rankings::WriteTail(List list) {
579 control_data_->tails[list] = tails_[list].value();
582 bool Rankings::GetRanking(CacheRankingsBlock* rankings) {
583 if (!rankings->address().is_initialized())
584 return false;
586 TimeTicks start = TimeTicks::Now();
587 if (!rankings->Load())
588 return false;
590 if (!SanityCheck(rankings, true)) {
591 backend_->CriticalError(ERR_INVALID_LINKS);
592 return false;
595 backend_->OnEvent(Stats::OPEN_RANKINGS);
597 // Note that if the cache is in read_only mode, open entries are not marked
598 // as dirty, except when an entry is doomed. We have to look for open entries.
599 if (!backend_->read_only() && !rankings->Data()->dirty)
600 return true;
602 EntryImpl* entry = backend_->GetOpenEntry(rankings);
603 if (!entry) {
604 if (backend_->read_only())
605 return true;
607 // We cannot trust this entry, but we cannot initiate a cleanup from this
608 // point (we may be in the middle of a cleanup already). The entry will be
609 // deleted when detected from a regular open/create path.
610 rankings->Data()->dirty = backend_->GetCurrentEntryId() - 1;
611 if (!rankings->Data()->dirty)
612 rankings->Data()->dirty--;
613 return true;
616 // Note that we should not leave this module without deleting rankings first.
617 rankings->SetData(entry->rankings()->Data());
619 CACHE_UMA(AGE_MS, "GetRankings", 0, start);
620 return true;
623 void Rankings::ConvertToLongLived(CacheRankingsBlock* rankings) {
624 if (rankings->own_data())
625 return;
627 // We cannot return a shared node because we are not keeping a reference
628 // to the entry that owns the buffer. Make this node a copy of the one that
629 // we have, and let the iterator logic update it when the entry changes.
630 CacheRankingsBlock temp(NULL, Addr(0));
631 *temp.Data() = *rankings->Data();
632 rankings->StopSharingData();
633 *rankings->Data() = *temp.Data();
636 void Rankings::CompleteTransaction() {
637 Addr node_addr(static_cast<CacheAddr>(control_data_->transaction));
638 if (!node_addr.is_initialized() || node_addr.is_separate_file()) {
639 NOTREACHED();
640 LOG(ERROR) << "Invalid rankings info.";
641 return;
644 Trace("CompleteTransaction 0x%x", node_addr.value());
646 CacheRankingsBlock node(backend_->File(node_addr), node_addr);
647 if (!node.Load())
648 return;
650 node.Store();
652 Addr& my_head = heads_[control_data_->operation_list];
653 Addr& my_tail = tails_[control_data_->operation_list];
655 // We want to leave the node inside the list. The entry must me marked as
656 // dirty, and will be removed later. Otherwise, we'll get assertions when
657 // attempting to remove the dirty entry.
658 if (INSERT == control_data_->operation) {
659 Trace("FinishInsert h:0x%x t:0x%x", my_head.value(), my_tail.value());
660 FinishInsert(&node);
661 } else if (REMOVE == control_data_->operation) {
662 Trace("RevertRemove h:0x%x t:0x%x", my_head.value(), my_tail.value());
663 RevertRemove(&node);
664 } else {
665 NOTREACHED();
666 LOG(ERROR) << "Invalid operation to recover.";
670 void Rankings::FinishInsert(CacheRankingsBlock* node) {
671 control_data_->transaction = 0;
672 control_data_->operation = 0;
673 Addr& my_head = heads_[control_data_->operation_list];
674 Addr& my_tail = tails_[control_data_->operation_list];
675 if (my_head.value() != node->address().value()) {
676 if (my_tail.value() == node->address().value()) {
677 // This part will be skipped by the logic of Insert.
678 node->Data()->next = my_tail.value();
681 Insert(node, true, static_cast<List>(control_data_->operation_list));
684 // Tell the backend about this entry.
685 backend_->RecoveredEntry(node);
688 void Rankings::RevertRemove(CacheRankingsBlock* node) {
689 Addr next_addr(node->Data()->next);
690 Addr prev_addr(node->Data()->prev);
691 if (!next_addr.is_initialized() || !prev_addr.is_initialized()) {
692 // The operation actually finished. Nothing to do.
693 control_data_->transaction = 0;
694 return;
696 if (next_addr.is_separate_file() || prev_addr.is_separate_file()) {
697 NOTREACHED();
698 LOG(WARNING) << "Invalid rankings info.";
699 control_data_->transaction = 0;
700 return;
703 CacheRankingsBlock next(backend_->File(next_addr), next_addr);
704 CacheRankingsBlock prev(backend_->File(prev_addr), prev_addr);
705 if (!next.Load() || !prev.Load())
706 return;
708 CacheAddr node_value = node->address().value();
709 DCHECK(prev.Data()->next == node_value ||
710 prev.Data()->next == prev_addr.value() ||
711 prev.Data()->next == next.address().value());
712 DCHECK(next.Data()->prev == node_value ||
713 next.Data()->prev == next_addr.value() ||
714 next.Data()->prev == prev.address().value());
716 if (node_value != prev_addr.value())
717 prev.Data()->next = node_value;
718 if (node_value != next_addr.value())
719 next.Data()->prev = node_value;
721 List my_list = static_cast<List>(control_data_->operation_list);
722 Addr& my_head = heads_[my_list];
723 Addr& my_tail = tails_[my_list];
724 if (!my_head.is_initialized() || !my_tail.is_initialized()) {
725 my_head.set_value(node_value);
726 my_tail.set_value(node_value);
727 WriteHead(my_list);
728 WriteTail(my_list);
729 } else if (my_head.value() == next.address().value()) {
730 my_head.set_value(node_value);
731 prev.Data()->next = next.address().value();
732 WriteHead(my_list);
733 } else if (my_tail.value() == prev.address().value()) {
734 my_tail.set_value(node_value);
735 next.Data()->prev = prev.address().value();
736 WriteTail(my_list);
739 next.Store();
740 prev.Store();
741 control_data_->transaction = 0;
742 control_data_->operation = 0;
743 backend_->FlushIndex();
746 bool Rankings::CheckLinks(CacheRankingsBlock* node, CacheRankingsBlock* prev,
747 CacheRankingsBlock* next, List* list) {
748 CacheAddr node_addr = node->address().value();
749 if (prev->Data()->next == node_addr &&
750 next->Data()->prev == node_addr) {
751 // A regular linked node.
752 return true;
755 Trace("CheckLinks 0x%x (0x%x 0x%x)", node_addr,
756 prev->Data()->next, next->Data()->prev);
758 if (node_addr != prev->address().value() &&
759 node_addr != next->address().value() &&
760 prev->Data()->next == next->address().value() &&
761 next->Data()->prev == prev->address().value()) {
762 // The list is actually ok, node is wrong.
763 Trace("node 0x%x out of list %d", node_addr, list);
764 node->Data()->next = 0;
765 node->Data()->prev = 0;
766 node->Store();
767 return false;
770 if (prev->Data()->next == node_addr ||
771 next->Data()->prev == node_addr) {
772 // Only one link is weird, lets double check.
773 if (prev->Data()->next != node_addr && IsHead(node_addr, list))
774 return true;
776 if (next->Data()->prev != node_addr && IsTail(node_addr, list))
777 return true;
780 LOG(ERROR) << "Inconsistent LRU.";
781 STRESS_NOTREACHED();
783 backend_->CriticalError(ERR_INVALID_LINKS);
784 return false;
787 bool Rankings::CheckSingleLink(CacheRankingsBlock* prev,
788 CacheRankingsBlock* next) {
789 if (prev->Data()->next != next->address().value() ||
790 next->Data()->prev != prev->address().value()) {
791 LOG(ERROR) << "Inconsistent LRU.";
793 backend_->CriticalError(ERR_INVALID_LINKS);
794 return false;
797 return true;
800 int Rankings::CheckList(List list) {
801 Addr last1, last2;
802 int head_items;
803 int rv = CheckListSection(list, last1, last2, true, // Head to tail.
804 &last1, &last2, &head_items);
805 if (rv == ERR_NO_ERROR)
806 return head_items;
808 return rv;
811 // Note that the returned error codes assume a forward walk (from head to tail)
812 // so they have to be adjusted accordingly by the caller. We use two stop values
813 // to be able to detect a corrupt node at the end that is not linked going back.
814 int Rankings::CheckListSection(List list, Addr end1, Addr end2, bool forward,
815 Addr* last, Addr* second_last, int* num_items) {
816 Addr current = forward ? heads_[list] : tails_[list];
817 *last = *second_last = current;
818 *num_items = 0;
819 if (!current.is_initialized())
820 return ERR_NO_ERROR;
822 if (!current.SanityCheckForRankings())
823 return ERR_INVALID_HEAD;
825 scoped_ptr<CacheRankingsBlock> node;
826 Addr prev_addr(current);
827 do {
828 node.reset(new CacheRankingsBlock(backend_->File(current), current));
829 node->Load();
830 if (!SanityCheck(node.get(), true))
831 return ERR_INVALID_ENTRY;
833 CacheAddr next = forward ? node->Data()->next : node->Data()->prev;
834 CacheAddr prev = forward ? node->Data()->prev : node->Data()->next;
836 if (prev != prev_addr.value())
837 return ERR_INVALID_PREV;
839 Addr next_addr(next);
840 if (!next_addr.SanityCheckForRankings())
841 return ERR_INVALID_NEXT;
843 prev_addr = current;
844 current = next_addr;
845 *second_last = *last;
846 *last = current;
847 (*num_items)++;
849 if (next_addr == prev_addr) {
850 Addr last = forward ? tails_[list] : heads_[list];
851 if (next_addr == last)
852 return ERR_NO_ERROR;
853 return ERR_INVALID_TAIL;
855 } while (current != end1 && current != end2);
856 return ERR_NO_ERROR;
859 bool Rankings::IsHead(CacheAddr addr, List* list) const {
860 for (int i = 0; i < LAST_ELEMENT; i++) {
861 if (addr == heads_[i].value()) {
862 if (*list != i)
863 Trace("Changing list %d to %d", *list, i);
864 *list = static_cast<List>(i);
865 return true;
868 return false;
871 bool Rankings::IsTail(CacheAddr addr, List* list) const {
872 for (int i = 0; i < LAST_ELEMENT; i++) {
873 if (addr == tails_[i].value()) {
874 if (*list != i)
875 Trace("Changing list %d to %d", *list, i);
876 *list = static_cast<List>(i);
877 return true;
880 return false;
883 // We expect to have just a few iterators at any given time, maybe two or three,
884 // But we could have more than one pointing at the same mode. We walk the list
885 // of cache iterators and update all that are pointing to the given node.
886 void Rankings::UpdateIterators(CacheRankingsBlock* node) {
887 CacheAddr address = node->address().value();
888 for (IteratorList::iterator it = iterators_.begin(); it != iterators_.end();
889 ++it) {
890 if (it->first == address && it->second->HasData()) {
891 CacheRankingsBlock* other = it->second;
892 *other->Data() = *node->Data();
897 void Rankings::InvalidateIterators(CacheRankingsBlock* node) {
898 CacheAddr address = node->address().value();
899 for (IteratorList::iterator it = iterators_.begin(); it != iterators_.end();
900 ++it) {
901 if (it->first == address)
902 it->second->Discard();
906 void Rankings::IncrementCounter(List list) {
907 if (!count_lists_)
908 return;
910 DCHECK(control_data_->sizes[list] < kint32max);
911 if (control_data_->sizes[list] < kint32max)
912 control_data_->sizes[list]++;
915 void Rankings::DecrementCounter(List list) {
916 if (!count_lists_)
917 return;
919 DCHECK(control_data_->sizes[list] > 0);
920 if (control_data_->sizes[list] > 0)
921 control_data_->sizes[list]--;
924 } // namespace disk_cache