Fix build break
[chromium-blink-merge.git] / sync / syncable / directory.cc
bloba844e66e4cd17243facb911a0243e5ce431a27c2
1 // Copyright 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "sync/syncable/directory.h"
7 #include "base/base64.h"
8 #include "base/debug/trace_event.h"
9 #include "base/stl_util.h"
10 #include "base/string_number_conversions.h"
11 #include "sync/internal_api/public/base/unique_position.h"
12 #include "sync/internal_api/public/util/unrecoverable_error_handler.h"
13 #include "sync/syncable/entry.h"
14 #include "sync/syncable/entry_kernel.h"
15 #include "sync/syncable/in_memory_directory_backing_store.h"
16 #include "sync/syncable/on_disk_directory_backing_store.h"
17 #include "sync/syncable/scoped_index_updater.h"
18 #include "sync/syncable/scoped_parent_child_index_updater.h"
19 #include "sync/syncable/syncable-inl.h"
20 #include "sync/syncable/syncable_base_transaction.h"
21 #include "sync/syncable/syncable_changes_version.h"
22 #include "sync/syncable/syncable_read_transaction.h"
23 #include "sync/syncable/syncable_util.h"
24 #include "sync/syncable/syncable_write_transaction.h"
26 using std::string;
28 namespace syncer {
29 namespace syncable {
31 namespace {
32 // Helper function to add an item to the index, if it ought to be added.
33 template<typename Indexer>
34 void InitializeIndexEntry(EntryKernel* entry,
35 typename Index<Indexer>::Set* index) {
36 if (Indexer::ShouldInclude(entry)) {
37 index->insert(entry);
42 // static
43 bool ClientTagIndexer::ShouldInclude(const EntryKernel* a) {
44 return !a->ref(UNIQUE_CLIENT_TAG).empty();
47 // static
48 const base::FilePath::CharType Directory::kSyncDatabaseFilename[] =
49 FILE_PATH_LITERAL("SyncData.sqlite3");
51 void Directory::InitKernelForTest(
52 const std::string& name,
53 DirectoryChangeDelegate* delegate,
54 const WeakHandle<TransactionObserver>& transaction_observer) {
55 DCHECK(!kernel_);
56 kernel_ = new Kernel(name, KernelLoadInfo(), delegate, transaction_observer);
59 Directory::PersistedKernelInfo::PersistedKernelInfo()
60 : next_id(0) {
61 ModelTypeSet protocol_types = ProtocolTypes();
62 for (ModelTypeSet::Iterator iter = protocol_types.First(); iter.Good();
63 iter.Inc()) {
64 reset_download_progress(iter.Get());
65 transaction_version[iter.Get()] = 0;
69 Directory::PersistedKernelInfo::~PersistedKernelInfo() {}
71 void Directory::PersistedKernelInfo::reset_download_progress(
72 ModelType model_type) {
73 download_progress[model_type].set_data_type_id(
74 GetSpecificsFieldNumberFromModelType(model_type));
75 // An empty-string token indicates no prior knowledge.
76 download_progress[model_type].set_token(std::string());
79 Directory::SaveChangesSnapshot::SaveChangesSnapshot()
80 : kernel_info_status(KERNEL_SHARE_INFO_INVALID) {
83 Directory::SaveChangesSnapshot::~SaveChangesSnapshot() {
84 STLDeleteElements(&dirty_metas);
85 STLDeleteElements(&delete_journals);
88 Directory::Kernel::Kernel(
89 const std::string& name,
90 const KernelLoadInfo& info, DirectoryChangeDelegate* delegate,
91 const WeakHandle<TransactionObserver>& transaction_observer)
92 : next_write_transaction_id(0),
93 name(name),
94 metahandles_index(new Directory::MetahandlesIndex),
95 ids_index(new Directory::IdsIndex),
96 parent_child_index(new ParentChildIndex),
97 client_tag_index(new Directory::ClientTagIndex),
98 unsynced_metahandles(new MetahandleSet),
99 dirty_metahandles(new MetahandleSet),
100 metahandles_to_purge(new MetahandleSet),
101 info_status(Directory::KERNEL_SHARE_INFO_VALID),
102 persisted_info(info.kernel_info),
103 cache_guid(info.cache_guid),
104 next_metahandle(info.max_metahandle + 1),
105 delegate(delegate),
106 transaction_observer(transaction_observer) {
107 DCHECK(delegate);
108 DCHECK(transaction_observer.IsInitialized());
111 Directory::Kernel::~Kernel() {
112 delete unsynced_metahandles;
113 delete dirty_metahandles;
114 delete metahandles_to_purge;
115 delete parent_child_index;
116 delete client_tag_index;
117 delete ids_index;
118 STLDeleteElements(metahandles_index);
119 delete metahandles_index;
122 Directory::Directory(
123 DirectoryBackingStore* store,
124 UnrecoverableErrorHandler* unrecoverable_error_handler,
125 ReportUnrecoverableErrorFunction report_unrecoverable_error_function,
126 NigoriHandler* nigori_handler,
127 Cryptographer* cryptographer)
128 : kernel_(NULL),
129 store_(store),
130 unrecoverable_error_handler_(unrecoverable_error_handler),
131 report_unrecoverable_error_function_(
132 report_unrecoverable_error_function),
133 unrecoverable_error_set_(false),
134 nigori_handler_(nigori_handler),
135 cryptographer_(cryptographer),
136 invariant_check_level_(VERIFY_CHANGES) {
139 Directory::~Directory() {
140 Close();
143 DirOpenResult Directory::Open(
144 const string& name,
145 DirectoryChangeDelegate* delegate,
146 const WeakHandle<TransactionObserver>& transaction_observer) {
147 TRACE_EVENT0("sync", "SyncDatabaseOpen");
149 const DirOpenResult result =
150 OpenImpl(name, delegate, transaction_observer);
152 if (OPENED != result)
153 Close();
154 return result;
157 void Directory::InitializeIndices() {
158 MetahandlesIndex::iterator it = kernel_->metahandles_index->begin();
159 for (; it != kernel_->metahandles_index->end(); ++it) {
160 EntryKernel* entry = *it;
161 if (ParentChildIndex::ShouldInclude(entry))
162 kernel_->parent_child_index->Insert(entry);
163 InitializeIndexEntry<IdIndexer>(entry, kernel_->ids_index);
164 InitializeIndexEntry<ClientTagIndexer>(entry, kernel_->client_tag_index);
165 const int64 metahandle = entry->ref(META_HANDLE);
166 if (entry->ref(IS_UNSYNCED))
167 kernel_->unsynced_metahandles->insert(metahandle);
168 if (entry->ref(IS_UNAPPLIED_UPDATE)) {
169 const ModelType type = entry->GetServerModelType();
170 kernel_->unapplied_update_metahandles[type].insert(metahandle);
172 DCHECK(!entry->is_dirty());
176 DirOpenResult Directory::OpenImpl(
177 const string& name,
178 DirectoryChangeDelegate* delegate,
179 const WeakHandle<TransactionObserver>&
180 transaction_observer) {
181 KernelLoadInfo info;
182 // Temporary indices before kernel_ initialized in case Load fails. We 0(1)
183 // swap these later.
184 MetahandlesIndex metas_bucket;
185 JournalIndex delete_journals;
187 DirOpenResult result = store_->Load(&metas_bucket, &delete_journals, &info);
188 if (OPENED != result)
189 return result;
191 kernel_ = new Kernel(name, info, delegate, transaction_observer);
192 kernel_->metahandles_index->swap(metas_bucket);
193 delete_journal_.reset(new DeleteJournal(&delete_journals));
194 InitializeIndices();
196 // Write back the share info to reserve some space in 'next_id'. This will
197 // prevent local ID reuse in the case of an early crash. See the comments in
198 // TakeSnapshotForSaveChanges() or crbug.com/142987 for more information.
199 kernel_->info_status = KERNEL_SHARE_INFO_DIRTY;
200 if (!SaveChanges())
201 return FAILED_INITIAL_WRITE;
203 return OPENED;
206 DeleteJournal* Directory::delete_journal() {
207 DCHECK(delete_journal_.get());
208 return delete_journal_.get();
211 void Directory::Close() {
212 store_.reset();
213 if (kernel_) {
214 delete kernel_;
215 kernel_ = NULL;
219 void Directory::OnUnrecoverableError(const BaseTransaction* trans,
220 const tracked_objects::Location& location,
221 const std::string & message) {
222 DCHECK(trans != NULL);
223 unrecoverable_error_set_ = true;
224 unrecoverable_error_handler_->OnUnrecoverableError(location,
225 message);
228 EntryKernel* Directory::GetEntryById(const Id& id) {
229 ScopedKernelLock lock(this);
230 return GetEntryById(id, &lock);
233 EntryKernel* Directory::GetEntryById(const Id& id,
234 ScopedKernelLock* const lock) {
235 DCHECK(kernel_);
236 // Find it in the in memory ID index.
237 kernel_->needle.put(ID, id);
238 IdsIndex::iterator id_found = kernel_->ids_index->find(&kernel_->needle);
239 if (id_found != kernel_->ids_index->end()) {
240 return *id_found;
242 return NULL;
245 EntryKernel* Directory::GetEntryByClientTag(const string& tag) {
246 ScopedKernelLock lock(this);
247 DCHECK(kernel_);
248 // Find it in the ClientTagIndex.
249 kernel_->needle.put(UNIQUE_CLIENT_TAG, tag);
250 ClientTagIndex::iterator found = kernel_->client_tag_index->find(
251 &kernel_->needle);
252 if (found != kernel_->client_tag_index->end()) {
253 return *found;
255 return NULL;
258 EntryKernel* Directory::GetEntryByServerTag(const string& tag) {
259 ScopedKernelLock lock(this);
260 DCHECK(kernel_);
261 // We don't currently keep a separate index for the tags. Since tags
262 // only exist for server created items that are the first items
263 // to be created in a store, they should have small metahandles.
264 // So, we just iterate over the items in sorted metahandle order,
265 // looking for a match.
266 MetahandlesIndex& set = *kernel_->metahandles_index;
267 for (MetahandlesIndex::iterator i = set.begin(); i != set.end(); ++i) {
268 if ((*i)->ref(UNIQUE_SERVER_TAG) == tag) {
269 return *i;
272 return NULL;
275 EntryKernel* Directory::GetEntryByHandle(int64 metahandle) {
276 ScopedKernelLock lock(this);
277 return GetEntryByHandle(metahandle, &lock);
280 EntryKernel* Directory::GetEntryByHandle(int64 metahandle,
281 ScopedKernelLock* lock) {
282 // Look up in memory
283 kernel_->needle.put(META_HANDLE, metahandle);
284 MetahandlesIndex::iterator found =
285 kernel_->metahandles_index->find(&kernel_->needle);
286 if (found != kernel_->metahandles_index->end()) {
287 // Found it in memory. Easy.
288 return *found;
290 return NULL;
293 bool Directory::GetChildHandlesById(
294 BaseTransaction* trans, const Id& parent_id,
295 Directory::ChildHandles* result) {
296 if (!SyncAssert(this == trans->directory(), FROM_HERE,
297 "Directories don't match", trans))
298 return false;
299 result->clear();
301 ScopedKernelLock lock(this);
302 AppendChildHandles(lock, parent_id, result);
303 return true;
306 bool Directory::GetChildHandlesByHandle(
307 BaseTransaction* trans, int64 handle,
308 Directory::ChildHandles* result) {
309 if (!SyncAssert(this == trans->directory(), FROM_HERE,
310 "Directories don't match", trans))
311 return false;
313 result->clear();
315 ScopedKernelLock lock(this);
316 EntryKernel* kernel = GetEntryByHandle(handle, &lock);
317 if (!kernel)
318 return true;
320 AppendChildHandles(lock, kernel->ref(ID), result);
321 return true;
324 EntryKernel* Directory::GetRootEntry() {
325 return GetEntryById(Id());
328 bool Directory::InsertEntry(WriteTransaction* trans, EntryKernel* entry) {
329 ScopedKernelLock lock(this);
330 return InsertEntry(trans, entry, &lock);
333 bool Directory::InsertEntry(WriteTransaction* trans,
334 EntryKernel* entry,
335 ScopedKernelLock* lock) {
336 DCHECK(NULL != lock);
337 if (!SyncAssert(NULL != entry, FROM_HERE, "Entry is null", trans))
338 return false;
340 static const char error[] = "Entry already in memory index.";
341 if (!SyncAssert(kernel_->metahandles_index->insert(entry).second,
342 FROM_HERE,
343 error,
344 trans))
345 return false;
347 if (ParentChildIndex::ShouldInclude(entry)) {
348 if (!SyncAssert(kernel_->parent_child_index->Insert(entry),
349 FROM_HERE,
350 error,
351 trans)) {
352 return false;
355 if (!SyncAssert(kernel_->ids_index->insert(entry).second,
356 FROM_HERE,
357 error,
358 trans))
359 return false;
361 // Should NEVER be created with a client tag.
362 if (!SyncAssert(entry->ref(UNIQUE_CLIENT_TAG).empty(), FROM_HERE,
363 "Client should be empty", trans))
364 return false;
366 return true;
369 bool Directory::ReindexId(WriteTransaction* trans,
370 EntryKernel* const entry,
371 const Id& new_id) {
372 ScopedKernelLock lock(this);
373 if (NULL != GetEntryById(new_id, &lock))
374 return false;
377 // Update the indices that depend on the ID field.
378 ScopedIndexUpdater<IdIndexer> updater_a(lock, entry, kernel_->ids_index);
379 ScopedParentChildIndexUpdater updater_b(lock, entry,
380 kernel_->parent_child_index);
381 entry->put(ID, new_id);
383 return true;
386 bool Directory::ReindexParentId(WriteTransaction* trans,
387 EntryKernel* const entry,
388 const Id& new_parent_id) {
389 ScopedKernelLock lock(this);
392 // Update the indices that depend on the PARENT_ID field.
393 ScopedParentChildIndexUpdater index_updater(lock, entry,
394 kernel_->parent_child_index);
395 entry->put(PARENT_ID, new_parent_id);
397 return true;
400 bool Directory::unrecoverable_error_set(const BaseTransaction* trans) const {
401 DCHECK(trans != NULL);
402 return unrecoverable_error_set_;
405 void Directory::ClearDirtyMetahandles() {
406 kernel_->transaction_mutex.AssertAcquired();
407 kernel_->dirty_metahandles->clear();
410 bool Directory::SafeToPurgeFromMemory(WriteTransaction* trans,
411 const EntryKernel* const entry) const {
412 bool safe = entry->ref(IS_DEL) && !entry->is_dirty() &&
413 !entry->ref(SYNCING) && !entry->ref(IS_UNAPPLIED_UPDATE) &&
414 !entry->ref(IS_UNSYNCED);
416 if (safe) {
417 int64 handle = entry->ref(META_HANDLE);
418 const ModelType type = entry->GetServerModelType();
419 if (!SyncAssert(kernel_->dirty_metahandles->count(handle) == 0U,
420 FROM_HERE,
421 "Dirty metahandles should be empty", trans))
422 return false;
423 // TODO(tim): Bug 49278.
424 if (!SyncAssert(!kernel_->unsynced_metahandles->count(handle),
425 FROM_HERE,
426 "Unsynced handles should be empty",
427 trans))
428 return false;
429 if (!SyncAssert(!kernel_->unapplied_update_metahandles[type].count(handle),
430 FROM_HERE,
431 "Unapplied metahandles should be empty",
432 trans))
433 return false;
436 return safe;
439 void Directory::TakeSnapshotForSaveChanges(SaveChangesSnapshot* snapshot) {
440 ReadTransaction trans(FROM_HERE, this);
441 ScopedKernelLock lock(this);
443 // If there is an unrecoverable error then just bail out.
444 if (unrecoverable_error_set(&trans))
445 return;
447 // Deep copy dirty entries from kernel_->metahandles_index into snapshot and
448 // clear dirty flags.
449 for (MetahandleSet::const_iterator i = kernel_->dirty_metahandles->begin();
450 i != kernel_->dirty_metahandles->end(); ++i) {
451 EntryKernel* entry = GetEntryByHandle(*i, &lock);
452 if (!entry)
453 continue;
454 // Skip over false positives; it happens relatively infrequently.
455 if (!entry->is_dirty())
456 continue;
457 snapshot->dirty_metas.insert(snapshot->dirty_metas.end(),
458 new EntryKernel(*entry));
459 DCHECK_EQ(1U, kernel_->dirty_metahandles->count(*i));
460 // We don't bother removing from the index here as we blow the entire thing
461 // in a moment, and it unnecessarily complicates iteration.
462 entry->clear_dirty(NULL);
464 ClearDirtyMetahandles();
466 // Set purged handles.
467 DCHECK(snapshot->metahandles_to_purge.empty());
468 snapshot->metahandles_to_purge.swap(*(kernel_->metahandles_to_purge));
470 // Fill kernel_info_status and kernel_info.
471 snapshot->kernel_info = kernel_->persisted_info;
472 // To avoid duplicates when the process crashes, we record the next_id to be
473 // greater magnitude than could possibly be reached before the next save
474 // changes. In other words, it's effectively impossible for the user to
475 // generate 65536 new bookmarks in 3 seconds.
476 snapshot->kernel_info.next_id -= 65536;
477 snapshot->kernel_info_status = kernel_->info_status;
478 // This one we reset on failure.
479 kernel_->info_status = KERNEL_SHARE_INFO_VALID;
481 delete_journal_->TakeSnapshotAndClear(
482 &trans, &snapshot->delete_journals, &snapshot->delete_journals_to_purge);
485 bool Directory::SaveChanges() {
486 bool success = false;
488 base::AutoLock scoped_lock(kernel_->save_changes_mutex);
490 // Snapshot and save.
491 SaveChangesSnapshot snapshot;
492 TakeSnapshotForSaveChanges(&snapshot);
493 success = store_->SaveChanges(snapshot);
495 // Handle success or failure.
496 if (success)
497 success = VacuumAfterSaveChanges(snapshot);
498 else
499 HandleSaveChangesFailure(snapshot);
500 return success;
503 bool Directory::VacuumAfterSaveChanges(const SaveChangesSnapshot& snapshot) {
504 if (snapshot.dirty_metas.empty())
505 return true;
507 // Need a write transaction as we are about to permanently purge entries.
508 WriteTransaction trans(FROM_HERE, VACUUM_AFTER_SAVE, this);
509 ScopedKernelLock lock(this);
510 // Now drop everything we can out of memory.
511 for (EntryKernelSet::const_iterator i = snapshot.dirty_metas.begin();
512 i != snapshot.dirty_metas.end(); ++i) {
513 kernel_->needle.put(META_HANDLE, (*i)->ref(META_HANDLE));
514 MetahandlesIndex::iterator found =
515 kernel_->metahandles_index->find(&kernel_->needle);
516 EntryKernel* entry = (found == kernel_->metahandles_index->end() ?
517 NULL : *found);
518 if (entry && SafeToPurgeFromMemory(&trans, entry)) {
519 // We now drop deleted metahandles that are up to date on both the client
520 // and the server.
521 size_t num_erased = 0;
522 num_erased = kernel_->ids_index->erase(entry);
523 DCHECK_EQ(1u, num_erased);
524 num_erased = kernel_->metahandles_index->erase(entry);
525 DCHECK_EQ(1u, num_erased);
527 // Might not be in it
528 num_erased = kernel_->client_tag_index->erase(entry);
529 DCHECK_EQ(entry->ref(UNIQUE_CLIENT_TAG).empty(), !num_erased);
530 if (!SyncAssert(!kernel_->parent_child_index->Contains(entry),
531 FROM_HERE,
532 "Deleted entry still present",
533 (&trans)))
534 return false;
535 delete entry;
537 if (trans.unrecoverable_error_set())
538 return false;
540 return true;
543 bool Directory::PurgeEntriesWithTypeIn(ModelTypeSet types,
544 ModelTypeSet types_to_journal) {
545 types.RemoveAll(ProxyTypes());
547 if (types.Empty())
548 return true;
551 WriteTransaction trans(FROM_HERE, PURGE_ENTRIES, this);
553 EntryKernelSet entries_to_journal;
554 STLElementDeleter<EntryKernelSet> journal_deleter(&entries_to_journal);
557 ScopedKernelLock lock(this);
558 MetahandlesIndex::iterator it = kernel_->metahandles_index->begin();
559 while (it != kernel_->metahandles_index->end()) {
560 const sync_pb::EntitySpecifics& local_specifics = (*it)->ref(SPECIFICS);
561 const sync_pb::EntitySpecifics& server_specifics =
562 (*it)->ref(SERVER_SPECIFICS);
563 ModelType local_type = GetModelTypeFromSpecifics(local_specifics);
564 ModelType server_type = GetModelTypeFromSpecifics(server_specifics);
566 // Note the dance around incrementing |it|, since we sometimes erase().
567 if ((IsRealDataType(local_type) && types.Has(local_type)) ||
568 (IsRealDataType(server_type) && types.Has(server_type))) {
570 int64 handle = (*it)->ref(META_HANDLE);
571 kernel_->metahandles_to_purge->insert(handle);
573 size_t num_erased = 0;
574 EntryKernel* entry = *it;
575 num_erased = kernel_->ids_index->erase(entry);
576 DCHECK_EQ(1u, num_erased);
577 num_erased = kernel_->client_tag_index->erase(entry);
578 DCHECK_EQ(entry->ref(UNIQUE_CLIENT_TAG).empty(), !num_erased);
579 num_erased = kernel_->unsynced_metahandles->erase(handle);
580 DCHECK_EQ(entry->ref(IS_UNSYNCED), num_erased > 0);
581 num_erased =
582 kernel_->unapplied_update_metahandles[server_type].erase(handle);
583 DCHECK_EQ(entry->ref(IS_UNAPPLIED_UPDATE), num_erased > 0);
584 if (kernel_->parent_child_index->Contains(entry))
585 kernel_->parent_child_index->Remove(entry);
586 kernel_->metahandles_index->erase(it++);
588 if ((types_to_journal.Has(local_type) ||
589 types_to_journal.Has(server_type)) &&
590 (delete_journal_->IsDeleteJournalEnabled(local_type) ||
591 delete_journal_->IsDeleteJournalEnabled(server_type))) {
592 entries_to_journal.insert(entry);
593 } else {
594 delete entry;
596 } else {
597 ++it;
601 delete_journal_->AddJournalBatch(&trans, entries_to_journal);
603 // Ensure meta tracking for these data types reflects the deleted state.
604 for (ModelTypeSet::Iterator it = types.First();
605 it.Good(); it.Inc()) {
606 kernel_->persisted_info.reset_download_progress(it.Get());
607 kernel_->persisted_info.transaction_version[it.Get()] = 0;
611 return true;
614 void Directory::HandleSaveChangesFailure(const SaveChangesSnapshot& snapshot) {
615 WriteTransaction trans(FROM_HERE, HANDLE_SAVE_FAILURE, this);
616 ScopedKernelLock lock(this);
617 kernel_->info_status = KERNEL_SHARE_INFO_DIRTY;
619 // Because we optimistically cleared the dirty bit on the real entries when
620 // taking the snapshot, we must restore it on failure. Not doing this could
621 // cause lost data, if no other changes are made to the in-memory entries
622 // that would cause the dirty bit to get set again. Setting the bit ensures
623 // that SaveChanges will at least try again later.
624 for (EntryKernelSet::const_iterator i = snapshot.dirty_metas.begin();
625 i != snapshot.dirty_metas.end(); ++i) {
626 kernel_->needle.put(META_HANDLE, (*i)->ref(META_HANDLE));
627 MetahandlesIndex::iterator found =
628 kernel_->metahandles_index->find(&kernel_->needle);
629 if (found != kernel_->metahandles_index->end()) {
630 (*found)->mark_dirty(kernel_->dirty_metahandles);
634 kernel_->metahandles_to_purge->insert(snapshot.metahandles_to_purge.begin(),
635 snapshot.metahandles_to_purge.end());
637 // Restore delete journals.
638 delete_journal_->AddJournalBatch(&trans, snapshot.delete_journals);
639 delete_journal_->PurgeDeleteJournals(&trans,
640 snapshot.delete_journals_to_purge);
643 void Directory::GetDownloadProgress(
644 ModelType model_type,
645 sync_pb::DataTypeProgressMarker* value_out) const {
646 ScopedKernelLock lock(this);
647 return value_out->CopyFrom(
648 kernel_->persisted_info.download_progress[model_type]);
651 void Directory::GetDownloadProgressAsString(
652 ModelType model_type,
653 std::string* value_out) const {
654 ScopedKernelLock lock(this);
655 kernel_->persisted_info.download_progress[model_type].SerializeToString(
656 value_out);
659 size_t Directory::GetEntriesCount() const {
660 ScopedKernelLock lock(this);
661 return kernel_->metahandles_index ? kernel_->metahandles_index->size() : 0;
664 void Directory::SetDownloadProgress(
665 ModelType model_type,
666 const sync_pb::DataTypeProgressMarker& new_progress) {
667 ScopedKernelLock lock(this);
668 kernel_->persisted_info.download_progress[model_type].CopyFrom(new_progress);
669 kernel_->info_status = KERNEL_SHARE_INFO_DIRTY;
672 int64 Directory::GetTransactionVersion(ModelType type) const {
673 kernel_->transaction_mutex.AssertAcquired();
674 return kernel_->persisted_info.transaction_version[type];
677 void Directory::IncrementTransactionVersion(ModelType type) {
678 kernel_->transaction_mutex.AssertAcquired();
679 kernel_->persisted_info.transaction_version[type]++;
682 ModelTypeSet Directory::InitialSyncEndedTypes() {
683 syncable::ReadTransaction trans(FROM_HERE, this);
684 ModelTypeSet protocol_types = ProtocolTypes();
685 ModelTypeSet initial_sync_ended_types;
686 for (ModelTypeSet::Iterator i = protocol_types.First(); i.Good(); i.Inc()) {
687 if (InitialSyncEndedForType(&trans, i.Get())) {
688 initial_sync_ended_types.Put(i.Get());
691 return initial_sync_ended_types;
694 bool Directory::InitialSyncEndedForType(ModelType type) {
695 syncable::ReadTransaction trans(FROM_HERE, this);
696 return InitialSyncEndedForType(&trans, type);
699 bool Directory::InitialSyncEndedForType(
700 BaseTransaction* trans, ModelType type) {
701 // True iff the type's root node has been received and applied.
702 syncable::Entry entry(trans,
703 syncable::GET_BY_SERVER_TAG,
704 ModelTypeToRootTag(type));
705 return entry.good() && entry.Get(syncable::BASE_VERSION) != CHANGES_VERSION;
708 string Directory::store_birthday() const {
709 ScopedKernelLock lock(this);
710 return kernel_->persisted_info.store_birthday;
713 void Directory::set_store_birthday(const string& store_birthday) {
714 ScopedKernelLock lock(this);
715 if (kernel_->persisted_info.store_birthday == store_birthday)
716 return;
717 kernel_->persisted_info.store_birthday = store_birthday;
718 kernel_->info_status = KERNEL_SHARE_INFO_DIRTY;
721 string Directory::bag_of_chips() const {
722 ScopedKernelLock lock(this);
723 return kernel_->persisted_info.bag_of_chips;
726 void Directory::set_bag_of_chips(const string& bag_of_chips) {
727 ScopedKernelLock lock(this);
728 if (kernel_->persisted_info.bag_of_chips == bag_of_chips)
729 return;
730 kernel_->persisted_info.bag_of_chips = bag_of_chips;
731 kernel_->info_status = KERNEL_SHARE_INFO_DIRTY;
735 string Directory::cache_guid() const {
736 // No need to lock since nothing ever writes to it after load.
737 return kernel_->cache_guid;
740 NigoriHandler* Directory::GetNigoriHandler() {
741 return nigori_handler_;
744 Cryptographer* Directory::GetCryptographer(const BaseTransaction* trans) {
745 DCHECK_EQ(this, trans->directory());
746 return cryptographer_;
749 void Directory::GetAllMetaHandles(BaseTransaction* trans,
750 MetahandleSet* result) {
751 result->clear();
752 ScopedKernelLock lock(this);
753 MetahandlesIndex::iterator i;
754 for (i = kernel_->metahandles_index->begin();
755 i != kernel_->metahandles_index->end();
756 ++i) {
757 result->insert((*i)->ref(META_HANDLE));
761 void Directory::GetAllEntryKernels(BaseTransaction* trans,
762 std::vector<const EntryKernel*>* result) {
763 result->clear();
764 ScopedKernelLock lock(this);
765 result->insert(result->end(),
766 kernel_->metahandles_index->begin(),
767 kernel_->metahandles_index->end());
770 void Directory::GetUnsyncedMetaHandles(BaseTransaction* trans,
771 UnsyncedMetaHandles* result) {
772 result->clear();
773 ScopedKernelLock lock(this);
774 copy(kernel_->unsynced_metahandles->begin(),
775 kernel_->unsynced_metahandles->end(), back_inserter(*result));
778 int64 Directory::unsynced_entity_count() const {
779 ScopedKernelLock lock(this);
780 return kernel_->unsynced_metahandles->size();
783 FullModelTypeSet Directory::GetServerTypesWithUnappliedUpdates(
784 BaseTransaction* trans) const {
785 FullModelTypeSet server_types;
786 ScopedKernelLock lock(this);
787 for (int i = UNSPECIFIED; i < MODEL_TYPE_COUNT; ++i) {
788 const ModelType type = ModelTypeFromInt(i);
789 if (!kernel_->unapplied_update_metahandles[type].empty()) {
790 server_types.Put(type);
793 return server_types;
796 void Directory::GetUnappliedUpdateMetaHandles(
797 BaseTransaction* trans,
798 FullModelTypeSet server_types,
799 std::vector<int64>* result) {
800 result->clear();
801 ScopedKernelLock lock(this);
802 for (int i = UNSPECIFIED; i < MODEL_TYPE_COUNT; ++i) {
803 const ModelType type = ModelTypeFromInt(i);
804 if (server_types.Has(type)) {
805 std::copy(kernel_->unapplied_update_metahandles[type].begin(),
806 kernel_->unapplied_update_metahandles[type].end(),
807 back_inserter(*result));
812 void Directory::CollectMetaHandleCounts(
813 std::vector<int>* num_entries_by_type,
814 std::vector<int>* num_to_delete_entries_by_type) {
815 syncable::ReadTransaction trans(FROM_HERE, this);
816 ScopedKernelLock lock(this);
818 MetahandlesIndex::iterator it = kernel_->metahandles_index->begin();
819 for( ; it != kernel_->metahandles_index->end(); ++it) {
820 EntryKernel* entry = *it;
821 const ModelType type = GetModelTypeFromSpecifics(entry->ref(SPECIFICS));
822 (*num_entries_by_type)[type]++;
823 if(entry->ref(IS_DEL))
824 (*num_to_delete_entries_by_type)[type]++;
828 bool Directory::CheckInvariantsOnTransactionClose(
829 syncable::BaseTransaction* trans,
830 const EntryKernelMutationMap& mutations) {
831 // NOTE: The trans may be in the process of being destructed. Be careful if
832 // you wish to call any of its virtual methods.
833 MetahandleSet handles;
835 switch (invariant_check_level_) {
836 case FULL_DB_VERIFICATION:
837 GetAllMetaHandles(trans, &handles);
838 break;
839 case VERIFY_CHANGES:
840 for (EntryKernelMutationMap::const_iterator i = mutations.begin();
841 i != mutations.end(); ++i) {
842 handles.insert(i->first);
844 break;
845 case OFF:
846 break;
849 return CheckTreeInvariants(trans, handles);
852 bool Directory::FullyCheckTreeInvariants(syncable::BaseTransaction* trans) {
853 MetahandleSet handles;
854 GetAllMetaHandles(trans, &handles);
855 return CheckTreeInvariants(trans, handles);
858 bool Directory::CheckTreeInvariants(syncable::BaseTransaction* trans,
859 const MetahandleSet& handles) {
860 MetahandleSet::const_iterator i;
861 for (i = handles.begin() ; i != handles.end() ; ++i) {
862 int64 metahandle = *i;
863 Entry e(trans, GET_BY_HANDLE, metahandle);
864 if (!SyncAssert(e.good(), FROM_HERE, "Entry is bad", trans))
865 return false;
866 syncable::Id id = e.Get(ID);
867 syncable::Id parentid = e.Get(PARENT_ID);
869 if (id.IsRoot()) {
870 if (!SyncAssert(e.Get(IS_DIR), FROM_HERE,
871 "Entry should be a directory",
872 trans))
873 return false;
874 if (!SyncAssert(parentid.IsRoot(), FROM_HERE,
875 "Entry should be root",
876 trans))
877 return false;
878 if (!SyncAssert(!e.Get(IS_UNSYNCED), FROM_HERE,
879 "Entry should be sycned",
880 trans))
881 return false;
882 continue;
885 if (!e.Get(IS_DEL)) {
886 if (!SyncAssert(id != parentid, FROM_HERE,
887 "Id should be different from parent id.",
888 trans))
889 return false;
890 if (!SyncAssert(!e.Get(NON_UNIQUE_NAME).empty(), FROM_HERE,
891 "Non unique name should not be empty.",
892 trans))
893 return false;
894 int safety_count = handles.size() + 1;
895 while (!parentid.IsRoot()) {
896 Entry parent(trans, GET_BY_ID, parentid);
897 if (!SyncAssert(parent.good(), FROM_HERE,
898 "Parent entry is not valid.",
899 trans))
900 return false;
901 if (handles.end() == handles.find(parent.Get(META_HANDLE)))
902 break; // Skip further checking if parent was unmodified.
903 if (!SyncAssert(parent.Get(IS_DIR), FROM_HERE,
904 "Parent should be a directory",
905 trans))
906 return false;
907 if (!SyncAssert(!parent.Get(IS_DEL), FROM_HERE,
908 "Parent should not have been marked for deletion.",
909 trans))
910 return false;
911 if (!SyncAssert(handles.end() != handles.find(parent.Get(META_HANDLE)),
912 FROM_HERE,
913 "Parent should be in the index.",
914 trans))
915 return false;
916 parentid = parent.Get(PARENT_ID);
917 if (!SyncAssert(--safety_count > 0, FROM_HERE,
918 "Count should be greater than zero.",
919 trans))
920 return false;
923 int64 base_version = e.Get(BASE_VERSION);
924 int64 server_version = e.Get(SERVER_VERSION);
925 bool using_unique_client_tag = !e.Get(UNIQUE_CLIENT_TAG).empty();
926 if (CHANGES_VERSION == base_version || 0 == base_version) {
927 if (e.Get(IS_UNAPPLIED_UPDATE)) {
928 // Must be a new item, or a de-duplicated unique client tag
929 // that was created both locally and remotely.
930 if (!using_unique_client_tag) {
931 if (!SyncAssert(e.Get(IS_DEL), FROM_HERE,
932 "The entry should not have been deleted.",
933 trans))
934 return false;
936 // It came from the server, so it must have a server ID.
937 if (!SyncAssert(id.ServerKnows(), FROM_HERE,
938 "The id should be from a server.",
939 trans))
940 return false;
941 } else {
942 if (e.Get(IS_DIR)) {
943 // TODO(chron): Implement this mode if clients ever need it.
944 // For now, you can't combine a client tag and a directory.
945 if (!SyncAssert(!using_unique_client_tag, FROM_HERE,
946 "Directory cannot have a client tag.",
947 trans))
948 return false;
950 // Should be an uncomitted item, or a successfully deleted one.
951 if (!e.Get(IS_DEL)) {
952 if (!SyncAssert(e.Get(IS_UNSYNCED), FROM_HERE,
953 "The item should be unsynced.",
954 trans))
955 return false;
957 // If the next check failed, it would imply that an item exists
958 // on the server, isn't waiting for application locally, but either
959 // is an unsynced create or a sucessful delete in the local copy.
960 // Either way, that's a mismatch.
961 if (!SyncAssert(0 == server_version, FROM_HERE,
962 "Server version should be zero.",
963 trans))
964 return false;
965 // Items that aren't using the unique client tag should have a zero
966 // base version only if they have a local ID. Items with unique client
967 // tags are allowed to use the zero base version for undeletion and
968 // de-duplication; the unique client tag trumps the server ID.
969 if (!using_unique_client_tag) {
970 if (!SyncAssert(!id.ServerKnows(), FROM_HERE,
971 "Should be a client only id.",
972 trans))
973 return false;
976 } else {
977 if (!SyncAssert(id.ServerKnows(),
978 FROM_HERE,
979 "Should be a server id.",
980 trans))
981 return false;
983 // Server-unknown items that are locally deleted should not be sent up to
984 // the server. They must be !IS_UNSYNCED.
985 if (!SyncAssert(!(!id.ServerKnows() &&
986 e.Get(IS_DEL) &&
987 e.Get(IS_UNSYNCED)), FROM_HERE,
988 "Locally deleted item must not be unsynced.",
989 trans)) {
990 return false;
993 return true;
996 void Directory::SetInvariantCheckLevel(InvariantCheckLevel check_level) {
997 invariant_check_level_ = check_level;
1000 int64 Directory::NextMetahandle() {
1001 ScopedKernelLock lock(this);
1002 int64 metahandle = (kernel_->next_metahandle)++;
1003 return metahandle;
1006 // Always returns a client ID that is the string representation of a negative
1007 // number.
1008 Id Directory::NextId() {
1009 int64 result;
1011 ScopedKernelLock lock(this);
1012 result = (kernel_->persisted_info.next_id)--;
1013 kernel_->info_status = KERNEL_SHARE_INFO_DIRTY;
1015 DCHECK_LT(result, 0);
1016 return Id::CreateFromClientString(base::Int64ToString(result));
1019 bool Directory::HasChildren(BaseTransaction* trans, const Id& id) {
1020 ScopedKernelLock lock(this);
1021 return kernel_->parent_child_index->GetChildren(id) != NULL;
1024 Id Directory::GetFirstChildId(BaseTransaction* trans,
1025 const EntryKernel* parent) {
1026 DCHECK(parent);
1027 DCHECK(parent->ref(IS_DIR));
1029 ScopedKernelLock lock(this);
1030 const OrderedChildSet* children =
1031 kernel_->parent_child_index->GetChildren(parent->ref(ID));
1033 // We're expected to return root if there are no children.
1034 if (!children)
1035 return Id();
1037 return (*children->begin())->ref(ID);
1040 syncable::Id Directory::GetPredecessorId(EntryKernel* e) {
1041 ScopedKernelLock lock(this);
1043 DCHECK(ParentChildIndex::ShouldInclude(e));
1044 const OrderedChildSet* children =
1045 kernel_->parent_child_index->GetChildren(e->ref(PARENT_ID));
1046 DCHECK(children && !children->empty());
1047 OrderedChildSet::const_iterator i = children->find(e);
1048 DCHECK(i != children->end());
1050 if (i == children->begin()) {
1051 return Id();
1052 } else {
1053 i--;
1054 return (*i)->ref(ID);
1058 syncable::Id Directory::GetSuccessorId(EntryKernel* e) {
1059 ScopedKernelLock lock(this);
1061 DCHECK(ParentChildIndex::ShouldInclude(e));
1062 const OrderedChildSet* children =
1063 kernel_->parent_child_index->GetChildren(e->ref(PARENT_ID));
1064 DCHECK(children && !children->empty());
1065 OrderedChildSet::const_iterator i = children->find(e);
1066 DCHECK(i != children->end());
1068 i++;
1069 if (i == children->end()) {
1070 return Id();
1071 } else {
1072 return (*i)->ref(ID);
1076 // TODO(rlarocque): Remove all support for placing ShouldMaintainPosition()
1077 // items as siblings of items that do not maintain postions. It is required
1078 // only for tests. See crbug.com/178282.
1079 void Directory::PutPredecessor(EntryKernel* e, EntryKernel* predecessor) {
1080 DCHECK(!e->ref(IS_DEL));
1081 if (!e->ShouldMaintainPosition()) {
1082 DCHECK(!e->ref(UNIQUE_POSITION).IsValid());
1083 return;
1085 std::string suffix = e->ref(UNIQUE_BOOKMARK_TAG);
1086 DCHECK(!suffix.empty());
1088 // Remove our item from the ParentChildIndex and remember to re-add it later.
1089 ScopedKernelLock lock(this);
1090 ScopedParentChildIndexUpdater updater(lock, e, kernel_->parent_child_index);
1092 // Note: The ScopedParentChildIndexUpdater will update this set for us as we
1093 // leave this function.
1094 const OrderedChildSet* siblings =
1095 kernel_->parent_child_index->GetChildren(e->ref(PARENT_ID));
1097 if (!siblings) {
1098 // This parent currently has no other children.
1099 DCHECK(predecessor->ref(ID).IsRoot());
1100 UniquePosition pos = UniquePosition::InitialPosition(suffix);
1101 e->put(UNIQUE_POSITION, pos);
1102 return;
1105 if (predecessor->ref(ID).IsRoot()) {
1106 // We have at least one sibling, and we're inserting to the left of them.
1107 UniquePosition successor_pos = (*siblings->begin())->ref(UNIQUE_POSITION);
1109 UniquePosition pos;
1110 if (!successor_pos.IsValid()) {
1111 // If all our successors are of non-positionable types, just create an
1112 // initial position. We arbitrarily choose to sort invalid positions to
1113 // the right of the valid positions.
1115 // We really shouldn't need to support this. See TODO above.
1116 pos = UniquePosition::InitialPosition(suffix);
1117 } else {
1118 DCHECK(!siblings->empty());
1119 pos = UniquePosition::Before(successor_pos, suffix);
1122 e->put(UNIQUE_POSITION, pos);
1123 return;
1126 // We can't support placing an item after an invalid position. Fortunately,
1127 // the tests don't exercise this particular case. We should not support
1128 // siblings with invalid positions at all. See TODO above.
1129 DCHECK(predecessor->ref(UNIQUE_POSITION).IsValid());
1131 OrderedChildSet::const_iterator neighbour = siblings->find(predecessor);
1132 DCHECK(neighbour != siblings->end());
1134 ++neighbour;
1135 if (neighbour == siblings->end()) {
1136 // Inserting at the end of the list.
1137 UniquePosition pos = UniquePosition::After(
1138 predecessor->ref(UNIQUE_POSITION),
1139 suffix);
1140 e->put(UNIQUE_POSITION, pos);
1141 return;
1144 EntryKernel* successor = *neighbour;
1146 // Another mixed valid and invalid position case. This one could be supported
1147 // in theory, but we're trying to deprecate support for siblings with and
1148 // without valid positions. See TODO above.
1149 DCHECK(successor->ref(UNIQUE_POSITION).IsValid());
1151 // Finally, the normal case: inserting between two elements.
1152 UniquePosition pos = UniquePosition::Between(
1153 predecessor->ref(UNIQUE_POSITION),
1154 successor->ref(UNIQUE_POSITION),
1155 suffix);
1156 e->put(UNIQUE_POSITION, pos);
1157 return;
1160 // TODO(rlarocque): Avoid this indirection. Just return the set.
1161 void Directory::AppendChildHandles(const ScopedKernelLock& lock,
1162 const Id& parent_id,
1163 Directory::ChildHandles* result) {
1164 const OrderedChildSet* children =
1165 kernel_->parent_child_index->GetChildren(parent_id);
1166 if (!children)
1167 return;
1169 for (OrderedChildSet::const_iterator i = children->begin();
1170 i != children->end(); ++i) {
1171 DCHECK_EQ(parent_id, (*i)->ref(PARENT_ID));
1172 result->push_back((*i)->ref(META_HANDLE));
1176 ScopedKernelLock::ScopedKernelLock(const Directory* dir)
1177 : scoped_lock_(dir->kernel_->mutex), dir_(const_cast<Directory*>(dir)) {
1180 } // namespace syncable
1181 } // namespace syncer