Pin Chrome's shortcut to the Win10 Start menu on install and OS upgrade.
[chromium-blink-merge.git] / sync / syncable / directory.cc
blob720a924e4a6b801a802e55ad825834e7e2509542
1 // Copyright 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "sync/syncable/directory.h"
7 #include <algorithm>
8 #include <iterator>
10 #include "base/base64.h"
11 #include "base/guid.h"
12 #include "base/metrics/histogram.h"
13 #include "base/stl_util.h"
14 #include "base/strings/string_number_conversions.h"
15 #include "base/trace_event/trace_event.h"
16 #include "sync/internal_api/public/base/attachment_id_proto.h"
17 #include "sync/internal_api/public/base/unique_position.h"
18 #include "sync/internal_api/public/util/unrecoverable_error_handler.h"
19 #include "sync/syncable/entry.h"
20 #include "sync/syncable/entry_kernel.h"
21 #include "sync/syncable/in_memory_directory_backing_store.h"
22 #include "sync/syncable/on_disk_directory_backing_store.h"
23 #include "sync/syncable/scoped_kernel_lock.h"
24 #include "sync/syncable/scoped_parent_child_index_updater.h"
25 #include "sync/syncable/syncable-inl.h"
26 #include "sync/syncable/syncable_base_transaction.h"
27 #include "sync/syncable/syncable_changes_version.h"
28 #include "sync/syncable/syncable_read_transaction.h"
29 #include "sync/syncable/syncable_util.h"
30 #include "sync/syncable/syncable_write_transaction.h"
32 using std::string;
34 namespace syncer {
35 namespace syncable {
37 // static
38 const base::FilePath::CharType Directory::kSyncDatabaseFilename[] =
39 FILE_PATH_LITERAL("SyncData.sqlite3");
41 Directory::PersistedKernelInfo::PersistedKernelInfo() {
42 ModelTypeSet protocol_types = ProtocolTypes();
43 for (ModelTypeSet::Iterator iter = protocol_types.First(); iter.Good();
44 iter.Inc()) {
45 ResetDownloadProgress(iter.Get());
46 transaction_version[iter.Get()] = 0;
50 Directory::PersistedKernelInfo::~PersistedKernelInfo() {}
52 void Directory::PersistedKernelInfo::ResetDownloadProgress(
53 ModelType model_type) {
54 // Clear everything except the data type id field.
55 download_progress[model_type].Clear();
56 download_progress[model_type].set_data_type_id(
57 GetSpecificsFieldNumberFromModelType(model_type));
59 // Explicitly set an empty token field to denote no progress.
60 download_progress[model_type].set_token("");
63 bool Directory::PersistedKernelInfo::HasEmptyDownloadProgress(
64 ModelType model_type) {
65 const sync_pb::DataTypeProgressMarker& progress_marker =
66 download_progress[model_type];
67 return progress_marker.token().empty();
70 Directory::SaveChangesSnapshot::SaveChangesSnapshot()
71 : kernel_info_status(KERNEL_SHARE_INFO_INVALID) {
74 Directory::SaveChangesSnapshot::~SaveChangesSnapshot() {
75 STLDeleteElements(&dirty_metas);
76 STLDeleteElements(&delete_journals);
79 bool Directory::SaveChangesSnapshot::HasUnsavedMetahandleChanges() const {
80 return !dirty_metas.empty() || !metahandles_to_purge.empty() ||
81 !delete_journals.empty() || !delete_journals_to_purge.empty();
84 Directory::Kernel::Kernel(
85 const std::string& name,
86 const KernelLoadInfo& info,
87 DirectoryChangeDelegate* delegate,
88 const WeakHandle<TransactionObserver>& transaction_observer)
89 : next_write_transaction_id(0),
90 name(name),
91 info_status(Directory::KERNEL_SHARE_INFO_VALID),
92 persisted_info(info.kernel_info),
93 cache_guid(info.cache_guid),
94 next_metahandle(info.max_metahandle + 1),
95 delegate(delegate),
96 transaction_observer(transaction_observer) {
97 DCHECK(delegate);
98 DCHECK(transaction_observer.IsInitialized());
101 Directory::Kernel::~Kernel() {
102 STLDeleteContainerPairSecondPointers(metahandles_map.begin(),
103 metahandles_map.end());
106 Directory::Directory(DirectoryBackingStore* store,
107 UnrecoverableErrorHandler* unrecoverable_error_handler,
108 const base::Closure& report_unrecoverable_error_function,
109 NigoriHandler* nigori_handler,
110 Cryptographer* cryptographer)
111 : kernel_(NULL),
112 store_(store),
113 unrecoverable_error_handler_(unrecoverable_error_handler),
114 report_unrecoverable_error_function_(report_unrecoverable_error_function),
115 unrecoverable_error_set_(false),
116 nigori_handler_(nigori_handler),
117 cryptographer_(cryptographer),
118 invariant_check_level_(VERIFY_CHANGES),
119 weak_ptr_factory_(this) {
122 Directory::~Directory() {
123 Close();
126 DirOpenResult Directory::Open(
127 const string& name,
128 DirectoryChangeDelegate* delegate,
129 const WeakHandle<TransactionObserver>& transaction_observer) {
130 TRACE_EVENT0("sync", "SyncDatabaseOpen");
132 const DirOpenResult result =
133 OpenImpl(name, delegate, transaction_observer);
135 if (OPENED != result)
136 Close();
137 return result;
140 void Directory::InitializeIndices(MetahandlesMap* handles_map) {
141 ScopedKernelLock lock(this);
142 kernel_->metahandles_map.swap(*handles_map);
143 for (MetahandlesMap::const_iterator it = kernel_->metahandles_map.begin();
144 it != kernel_->metahandles_map.end(); ++it) {
145 EntryKernel* entry = it->second;
146 if (ParentChildIndex::ShouldInclude(entry))
147 kernel_->parent_child_index.Insert(entry);
148 const int64 metahandle = entry->ref(META_HANDLE);
149 if (entry->ref(IS_UNSYNCED))
150 kernel_->unsynced_metahandles.insert(metahandle);
151 if (entry->ref(IS_UNAPPLIED_UPDATE)) {
152 const ModelType type = entry->GetServerModelType();
153 kernel_->unapplied_update_metahandles[type].insert(metahandle);
155 if (!entry->ref(UNIQUE_SERVER_TAG).empty()) {
156 DCHECK(kernel_->server_tags_map.find(entry->ref(UNIQUE_SERVER_TAG)) ==
157 kernel_->server_tags_map.end())
158 << "Unexpected duplicate use of client tag";
159 kernel_->server_tags_map[entry->ref(UNIQUE_SERVER_TAG)] = entry;
161 if (!entry->ref(UNIQUE_CLIENT_TAG).empty()) {
162 DCHECK(kernel_->server_tags_map.find(entry->ref(UNIQUE_SERVER_TAG)) ==
163 kernel_->server_tags_map.end())
164 << "Unexpected duplicate use of server tag";
165 kernel_->client_tags_map[entry->ref(UNIQUE_CLIENT_TAG)] = entry;
167 DCHECK(kernel_->ids_map.find(entry->ref(ID).value()) ==
168 kernel_->ids_map.end()) << "Unexpected duplicate use of ID";
169 kernel_->ids_map[entry->ref(ID).value()] = entry;
170 DCHECK(!entry->is_dirty());
171 AddToAttachmentIndex(lock, metahandle, entry->ref(ATTACHMENT_METADATA));
175 DirOpenResult Directory::OpenImpl(
176 const string& name,
177 DirectoryChangeDelegate* delegate,
178 const WeakHandle<TransactionObserver>&
179 transaction_observer) {
180 KernelLoadInfo info;
181 // Temporary indices before kernel_ initialized in case Load fails. We 0(1)
182 // swap these later.
183 Directory::MetahandlesMap tmp_handles_map;
185 // Avoids mem leaks on failure. Harmlessly deletes the empty hash map after
186 // the swap in the success case.
187 STLValueDeleter<MetahandlesMap> deleter(&tmp_handles_map);
189 JournalIndex delete_journals;
190 MetahandleSet metahandles_to_purge;
192 DirOpenResult result = store_->Load(&tmp_handles_map, &delete_journals,
193 &metahandles_to_purge, &info);
194 if (OPENED != result)
195 return result;
197 DCHECK(!kernel_);
198 kernel_ = new Kernel(name, info, delegate, transaction_observer);
199 kernel_->metahandles_to_purge.swap(metahandles_to_purge);
200 delete_journal_.reset(new DeleteJournal(&delete_journals));
201 InitializeIndices(&tmp_handles_map);
203 // Save changes back in case there are any metahandles to purge.
204 if (!SaveChanges())
205 return FAILED_INITIAL_WRITE;
207 // Now that we've successfully opened the store, install an error handler to
208 // deal with catastrophic errors that may occur later on. Use a weak pointer
209 // because we cannot guarantee that this Directory will outlive the Closure.
210 store_->SetCatastrophicErrorHandler(base::Bind(
211 &Directory::OnCatastrophicError, weak_ptr_factory_.GetWeakPtr()));
213 return OPENED;
216 DeleteJournal* Directory::delete_journal() {
217 DCHECK(delete_journal_.get());
218 return delete_journal_.get();
221 void Directory::Close() {
222 store_.reset();
223 if (kernel_) {
224 delete kernel_;
225 kernel_ = NULL;
229 void Directory::OnUnrecoverableError(const BaseTransaction* trans,
230 const tracked_objects::Location& location,
231 const std::string & message) {
232 DCHECK(trans != NULL);
233 unrecoverable_error_set_ = true;
234 unrecoverable_error_handler_->OnUnrecoverableError(location,
235 message);
238 EntryKernel* Directory::GetEntryById(const Id& id) {
239 ScopedKernelLock lock(this);
240 return GetEntryById(lock, id);
243 EntryKernel* Directory::GetEntryById(const ScopedKernelLock& lock,
244 const Id& id) {
245 DCHECK(kernel_);
246 // Find it in the in memory ID index.
247 IdsMap::iterator id_found = kernel_->ids_map.find(id.value());
248 if (id_found != kernel_->ids_map.end()) {
249 return id_found->second;
251 return NULL;
254 EntryKernel* Directory::GetEntryByClientTag(const string& tag) {
255 ScopedKernelLock lock(this);
256 DCHECK(kernel_);
258 TagsMap::iterator it = kernel_->client_tags_map.find(tag);
259 if (it != kernel_->client_tags_map.end()) {
260 return it->second;
262 return NULL;
265 EntryKernel* Directory::GetEntryByServerTag(const string& tag) {
266 ScopedKernelLock lock(this);
267 DCHECK(kernel_);
268 TagsMap::iterator it = kernel_->server_tags_map.find(tag);
269 if (it != kernel_->server_tags_map.end()) {
270 return it->second;
272 return NULL;
275 EntryKernel* Directory::GetEntryByHandle(int64 metahandle) {
276 ScopedKernelLock lock(this);
277 return GetEntryByHandle(lock, metahandle);
280 EntryKernel* Directory::GetEntryByHandle(const ScopedKernelLock& lock,
281 int64 metahandle) {
282 // Look up in memory
283 MetahandlesMap::iterator found =
284 kernel_->metahandles_map.find(metahandle);
285 if (found != kernel_->metahandles_map.end()) {
286 // Found it in memory. Easy.
287 return found->second;
289 return NULL;
292 bool Directory::GetChildHandlesById(
293 BaseTransaction* trans, const Id& parent_id,
294 Directory::Metahandles* result) {
295 if (!SyncAssert(this == trans->directory(), FROM_HERE,
296 "Directories don't match", trans))
297 return false;
298 result->clear();
300 ScopedKernelLock lock(this);
301 AppendChildHandles(lock, parent_id, result);
302 return true;
305 int Directory::GetTotalNodeCount(
306 BaseTransaction* trans,
307 EntryKernel* kernel) const {
308 if (!SyncAssert(this == trans->directory(), FROM_HERE,
309 "Directories don't match", trans))
310 return false;
312 int count = 1;
313 std::deque<const OrderedChildSet*> child_sets;
315 GetChildSetForKernel(trans, kernel, &child_sets);
316 while (!child_sets.empty()) {
317 const OrderedChildSet* set = child_sets.front();
318 child_sets.pop_front();
319 for (OrderedChildSet::const_iterator it = set->begin();
320 it != set->end(); ++it) {
321 count++;
322 GetChildSetForKernel(trans, *it, &child_sets);
326 return count;
329 void Directory::GetChildSetForKernel(
330 BaseTransaction* trans,
331 EntryKernel* kernel,
332 std::deque<const OrderedChildSet*>* child_sets) const {
333 if (!kernel->ref(IS_DIR))
334 return; // Not a directory => no children.
336 const OrderedChildSet* descendants =
337 kernel_->parent_child_index.GetChildren(kernel->ref(ID));
338 if (!descendants)
339 return; // This directory has no children.
341 // Add our children to the list of items to be traversed.
342 child_sets->push_back(descendants);
345 int Directory::GetPositionIndex(
346 BaseTransaction* trans,
347 EntryKernel* kernel) const {
348 const OrderedChildSet* siblings =
349 kernel_->parent_child_index.GetSiblings(kernel);
351 OrderedChildSet::const_iterator it = siblings->find(kernel);
352 return std::distance(siblings->begin(), it);
355 bool Directory::InsertEntry(BaseWriteTransaction* trans, EntryKernel* entry) {
356 ScopedKernelLock lock(this);
357 return InsertEntry(lock, trans, entry);
360 bool Directory::InsertEntry(const ScopedKernelLock& lock,
361 BaseWriteTransaction* trans,
362 EntryKernel* entry) {
363 if (!SyncAssert(NULL != entry, FROM_HERE, "Entry is null", trans))
364 return false;
366 static const char error[] = "Entry already in memory index.";
368 if (!SyncAssert(
369 kernel_->metahandles_map.insert(
370 std::make_pair(entry->ref(META_HANDLE), entry)).second,
371 FROM_HERE,
372 error,
373 trans)) {
374 return false;
376 if (!SyncAssert(
377 kernel_->ids_map.insert(
378 std::make_pair(entry->ref(ID).value(), entry)).second,
379 FROM_HERE,
380 error,
381 trans)) {
382 return false;
384 if (ParentChildIndex::ShouldInclude(entry)) {
385 if (!SyncAssert(kernel_->parent_child_index.Insert(entry),
386 FROM_HERE,
387 error,
388 trans)) {
389 return false;
392 AddToAttachmentIndex(
393 lock, entry->ref(META_HANDLE), entry->ref(ATTACHMENT_METADATA));
395 // Should NEVER be created with a client tag or server tag.
396 if (!SyncAssert(entry->ref(UNIQUE_SERVER_TAG).empty(), FROM_HERE,
397 "Server tag should be empty", trans)) {
398 return false;
400 if (!SyncAssert(entry->ref(UNIQUE_CLIENT_TAG).empty(), FROM_HERE,
401 "Client tag should be empty", trans))
402 return false;
404 return true;
407 bool Directory::ReindexId(BaseWriteTransaction* trans,
408 EntryKernel* const entry,
409 const Id& new_id) {
410 ScopedKernelLock lock(this);
411 if (NULL != GetEntryById(lock, new_id))
412 return false;
415 // Update the indices that depend on the ID field.
416 ScopedParentChildIndexUpdater updater_b(lock, entry,
417 &kernel_->parent_child_index);
418 size_t num_erased = kernel_->ids_map.erase(entry->ref(ID).value());
419 DCHECK_EQ(1U, num_erased);
420 entry->put(ID, new_id);
421 kernel_->ids_map[entry->ref(ID).value()] = entry;
423 return true;
426 bool Directory::ReindexParentId(BaseWriteTransaction* trans,
427 EntryKernel* const entry,
428 const Id& new_parent_id) {
429 ScopedKernelLock lock(this);
432 // Update the indices that depend on the PARENT_ID field.
433 ScopedParentChildIndexUpdater index_updater(lock, entry,
434 &kernel_->parent_child_index);
435 entry->put(PARENT_ID, new_parent_id);
437 return true;
440 void Directory::RemoveFromAttachmentIndex(
441 const ScopedKernelLock& lock,
442 const int64 metahandle,
443 const sync_pb::AttachmentMetadata& attachment_metadata) {
444 for (int i = 0; i < attachment_metadata.record_size(); ++i) {
445 AttachmentIdUniqueId unique_id =
446 attachment_metadata.record(i).id().unique_id();
447 IndexByAttachmentId::iterator iter =
448 kernel_->index_by_attachment_id.find(unique_id);
449 if (iter != kernel_->index_by_attachment_id.end()) {
450 iter->second.erase(metahandle);
451 if (iter->second.empty()) {
452 kernel_->index_by_attachment_id.erase(iter);
458 void Directory::AddToAttachmentIndex(
459 const ScopedKernelLock& lock,
460 const int64 metahandle,
461 const sync_pb::AttachmentMetadata& attachment_metadata) {
462 for (int i = 0; i < attachment_metadata.record_size(); ++i) {
463 AttachmentIdUniqueId unique_id =
464 attachment_metadata.record(i).id().unique_id();
465 IndexByAttachmentId::iterator iter =
466 kernel_->index_by_attachment_id.find(unique_id);
467 if (iter == kernel_->index_by_attachment_id.end()) {
468 iter = kernel_->index_by_attachment_id.insert(std::make_pair(
469 unique_id,
470 MetahandleSet())).first;
472 iter->second.insert(metahandle);
476 void Directory::UpdateAttachmentIndex(
477 const int64 metahandle,
478 const sync_pb::AttachmentMetadata& old_metadata,
479 const sync_pb::AttachmentMetadata& new_metadata) {
480 ScopedKernelLock lock(this);
481 RemoveFromAttachmentIndex(lock, metahandle, old_metadata);
482 AddToAttachmentIndex(lock, metahandle, new_metadata);
485 void Directory::GetMetahandlesByAttachmentId(
486 BaseTransaction* trans,
487 const sync_pb::AttachmentIdProto& attachment_id_proto,
488 Metahandles* result) {
489 DCHECK(result);
490 result->clear();
491 ScopedKernelLock lock(this);
492 IndexByAttachmentId::const_iterator index_iter =
493 kernel_->index_by_attachment_id.find(attachment_id_proto.unique_id());
494 if (index_iter == kernel_->index_by_attachment_id.end())
495 return;
496 const MetahandleSet& metahandle_set = index_iter->second;
497 std::copy(
498 metahandle_set.begin(), metahandle_set.end(), back_inserter(*result));
501 bool Directory::unrecoverable_error_set(const BaseTransaction* trans) const {
502 DCHECK(trans != NULL);
503 return unrecoverable_error_set_;
506 void Directory::ClearDirtyMetahandles(const ScopedKernelLock& lock) {
507 kernel_->transaction_mutex.AssertAcquired();
508 kernel_->dirty_metahandles.clear();
511 bool Directory::SafeToPurgeFromMemory(WriteTransaction* trans,
512 const EntryKernel* const entry) const {
513 bool safe = entry->ref(IS_DEL) && !entry->is_dirty() &&
514 !entry->ref(SYNCING) && !entry->ref(IS_UNAPPLIED_UPDATE) &&
515 !entry->ref(IS_UNSYNCED);
517 if (safe) {
518 int64 handle = entry->ref(META_HANDLE);
519 const ModelType type = entry->GetServerModelType();
520 if (!SyncAssert(kernel_->dirty_metahandles.count(handle) == 0U,
521 FROM_HERE,
522 "Dirty metahandles should be empty", trans))
523 return false;
524 // TODO(tim): Bug 49278.
525 if (!SyncAssert(!kernel_->unsynced_metahandles.count(handle),
526 FROM_HERE,
527 "Unsynced handles should be empty",
528 trans))
529 return false;
530 if (!SyncAssert(!kernel_->unapplied_update_metahandles[type].count(handle),
531 FROM_HERE,
532 "Unapplied metahandles should be empty",
533 trans))
534 return false;
537 return safe;
540 void Directory::TakeSnapshotForSaveChanges(SaveChangesSnapshot* snapshot) {
541 ReadTransaction trans(FROM_HERE, this);
542 ScopedKernelLock lock(this);
544 // If there is an unrecoverable error then just bail out.
545 if (unrecoverable_error_set(&trans))
546 return;
548 // Deep copy dirty entries from kernel_->metahandles_index into snapshot and
549 // clear dirty flags.
550 for (MetahandleSet::const_iterator i = kernel_->dirty_metahandles.begin();
551 i != kernel_->dirty_metahandles.end(); ++i) {
552 EntryKernel* entry = GetEntryByHandle(lock, *i);
553 if (!entry)
554 continue;
555 // Skip over false positives; it happens relatively infrequently.
556 if (!entry->is_dirty())
557 continue;
558 snapshot->dirty_metas.insert(snapshot->dirty_metas.end(),
559 new EntryKernel(*entry));
560 DCHECK_EQ(1U, kernel_->dirty_metahandles.count(*i));
561 // We don't bother removing from the index here as we blow the entire thing
562 // in a moment, and it unnecessarily complicates iteration.
563 entry->clear_dirty(NULL);
565 ClearDirtyMetahandles(lock);
567 // Set purged handles.
568 DCHECK(snapshot->metahandles_to_purge.empty());
569 snapshot->metahandles_to_purge.swap(kernel_->metahandles_to_purge);
571 // Fill kernel_info_status and kernel_info.
572 snapshot->kernel_info = kernel_->persisted_info;
573 snapshot->kernel_info_status = kernel_->info_status;
574 // This one we reset on failure.
575 kernel_->info_status = KERNEL_SHARE_INFO_VALID;
577 delete_journal_->TakeSnapshotAndClear(
578 &trans, &snapshot->delete_journals, &snapshot->delete_journals_to_purge);
581 bool Directory::SaveChanges() {
582 bool success = false;
584 base::AutoLock scoped_lock(kernel_->save_changes_mutex);
586 // Snapshot and save.
587 SaveChangesSnapshot snapshot;
588 TakeSnapshotForSaveChanges(&snapshot);
589 success = store_->SaveChanges(snapshot);
591 // Handle success or failure.
592 if (success)
593 success = VacuumAfterSaveChanges(snapshot);
594 else
595 HandleSaveChangesFailure(snapshot);
596 return success;
599 bool Directory::VacuumAfterSaveChanges(const SaveChangesSnapshot& snapshot) {
600 if (snapshot.dirty_metas.empty())
601 return true;
603 // Need a write transaction as we are about to permanently purge entries.
604 WriteTransaction trans(FROM_HERE, VACUUM_AFTER_SAVE, this);
605 ScopedKernelLock lock(this);
606 // Now drop everything we can out of memory.
607 for (EntryKernelSet::const_iterator i = snapshot.dirty_metas.begin();
608 i != snapshot.dirty_metas.end(); ++i) {
609 MetahandlesMap::iterator found =
610 kernel_->metahandles_map.find((*i)->ref(META_HANDLE));
611 EntryKernel* entry = (found == kernel_->metahandles_map.end() ?
612 NULL : found->second);
613 if (entry && SafeToPurgeFromMemory(&trans, entry)) {
614 // We now drop deleted metahandles that are up to date on both the client
615 // and the server.
616 size_t num_erased = 0;
617 num_erased = kernel_->metahandles_map.erase(entry->ref(META_HANDLE));
618 DCHECK_EQ(1u, num_erased);
619 num_erased = kernel_->ids_map.erase(entry->ref(ID).value());
620 DCHECK_EQ(1u, num_erased);
621 if (!entry->ref(UNIQUE_SERVER_TAG).empty()) {
622 num_erased =
623 kernel_->server_tags_map.erase(entry->ref(UNIQUE_SERVER_TAG));
624 DCHECK_EQ(1u, num_erased);
626 if (!entry->ref(UNIQUE_CLIENT_TAG).empty()) {
627 num_erased =
628 kernel_->client_tags_map.erase(entry->ref(UNIQUE_CLIENT_TAG));
629 DCHECK_EQ(1u, num_erased);
631 if (!SyncAssert(!kernel_->parent_child_index.Contains(entry),
632 FROM_HERE,
633 "Deleted entry still present",
634 (&trans)))
635 return false;
636 RemoveFromAttachmentIndex(
637 lock, entry->ref(META_HANDLE), entry->ref(ATTACHMENT_METADATA));
639 delete entry;
641 if (trans.unrecoverable_error_set())
642 return false;
644 return true;
647 void Directory::UnapplyEntry(EntryKernel* entry) {
648 int64 handle = entry->ref(META_HANDLE);
649 ModelType server_type = GetModelTypeFromSpecifics(
650 entry->ref(SERVER_SPECIFICS));
652 // Clear enough so that on the next sync cycle all local data will
653 // be overwritten.
654 // Note: do not modify the root node in order to preserve the
655 // initial sync ended bit for this type (else on the next restart
656 // this type will be treated as disabled and therefore fully purged).
657 if (IsRealDataType(server_type) &&
658 ModelTypeToRootTag(server_type) == entry->ref(UNIQUE_SERVER_TAG)) {
659 return;
662 // Set the unapplied bit if this item has server data.
663 if (IsRealDataType(server_type) && !entry->ref(IS_UNAPPLIED_UPDATE)) {
664 entry->put(IS_UNAPPLIED_UPDATE, true);
665 kernel_->unapplied_update_metahandles[server_type].insert(handle);
666 entry->mark_dirty(&kernel_->dirty_metahandles);
669 // Unset the unsynced bit.
670 if (entry->ref(IS_UNSYNCED)) {
671 kernel_->unsynced_metahandles.erase(handle);
672 entry->put(IS_UNSYNCED, false);
673 entry->mark_dirty(&kernel_->dirty_metahandles);
676 // Mark the item as locally deleted. No deleted items are allowed in the
677 // parent child index.
678 if (!entry->ref(IS_DEL)) {
679 kernel_->parent_child_index.Remove(entry);
680 entry->put(IS_DEL, true);
681 entry->mark_dirty(&kernel_->dirty_metahandles);
684 // Set the version to the "newly created" version.
685 if (entry->ref(BASE_VERSION) != CHANGES_VERSION) {
686 entry->put(BASE_VERSION, CHANGES_VERSION);
687 entry->mark_dirty(&kernel_->dirty_metahandles);
690 // At this point locally created items that aren't synced will become locally
691 // deleted items, and purged on the next snapshot. All other items will match
692 // the state they would have had if they were just created via a server
693 // update. See MutableEntry::MutableEntry(.., CreateNewUpdateItem, ..).
696 void Directory::DeleteEntry(const ScopedKernelLock& lock,
697 bool save_to_journal,
698 EntryKernel* entry,
699 EntryKernelSet* entries_to_journal) {
700 int64 handle = entry->ref(META_HANDLE);
701 ModelType server_type = GetModelTypeFromSpecifics(
702 entry->ref(SERVER_SPECIFICS));
704 kernel_->metahandles_to_purge.insert(handle);
706 size_t num_erased = 0;
707 num_erased = kernel_->metahandles_map.erase(entry->ref(META_HANDLE));
708 DCHECK_EQ(1u, num_erased);
709 num_erased = kernel_->ids_map.erase(entry->ref(ID).value());
710 DCHECK_EQ(1u, num_erased);
711 num_erased = kernel_->unsynced_metahandles.erase(handle);
712 DCHECK_EQ(entry->ref(IS_UNSYNCED), num_erased > 0);
713 num_erased =
714 kernel_->unapplied_update_metahandles[server_type].erase(handle);
715 DCHECK_EQ(entry->ref(IS_UNAPPLIED_UPDATE), num_erased > 0);
716 if (kernel_->parent_child_index.Contains(entry))
717 kernel_->parent_child_index.Remove(entry);
719 if (!entry->ref(UNIQUE_CLIENT_TAG).empty()) {
720 num_erased =
721 kernel_->client_tags_map.erase(entry->ref(UNIQUE_CLIENT_TAG));
722 DCHECK_EQ(1u, num_erased);
724 if (!entry->ref(UNIQUE_SERVER_TAG).empty()) {
725 num_erased =
726 kernel_->server_tags_map.erase(entry->ref(UNIQUE_SERVER_TAG));
727 DCHECK_EQ(1u, num_erased);
729 RemoveFromAttachmentIndex(lock, handle, entry->ref(ATTACHMENT_METADATA));
731 if (save_to_journal) {
732 entries_to_journal->insert(entry);
733 } else {
734 delete entry;
738 bool Directory::PurgeEntriesWithTypeIn(ModelTypeSet disabled_types,
739 ModelTypeSet types_to_journal,
740 ModelTypeSet types_to_unapply) {
741 disabled_types.RemoveAll(ProxyTypes());
743 if (disabled_types.Empty())
744 return true;
747 WriteTransaction trans(FROM_HERE, PURGE_ENTRIES, this);
749 EntryKernelSet entries_to_journal;
750 STLElementDeleter<EntryKernelSet> journal_deleter(&entries_to_journal);
753 ScopedKernelLock lock(this);
755 bool found_progress = false;
756 for (ModelTypeSet::Iterator iter = disabled_types.First(); iter.Good();
757 iter.Inc()) {
758 if (!kernel_->persisted_info.HasEmptyDownloadProgress(iter.Get()))
759 found_progress = true;
762 // If none of the disabled types have progress markers, there's nothing to
763 // purge.
764 if (!found_progress)
765 return true;
767 // We iterate in two passes to avoid a bug in STLport (which is used in
768 // the Android build). There are some versions of that library where a
769 // hash_map's iterators can be invalidated when an item is erased from the
770 // hash_map.
771 // See http://sourceforge.net/p/stlport/bugs/239/.
773 std::set<EntryKernel*> to_purge;
774 for (MetahandlesMap::iterator it = kernel_->metahandles_map.begin();
775 it != kernel_->metahandles_map.end(); ++it) {
776 const sync_pb::EntitySpecifics& local_specifics =
777 it->second->ref(SPECIFICS);
778 const sync_pb::EntitySpecifics& server_specifics =
779 it->second->ref(SERVER_SPECIFICS);
780 ModelType local_type = GetModelTypeFromSpecifics(local_specifics);
781 ModelType server_type = GetModelTypeFromSpecifics(server_specifics);
783 if ((IsRealDataType(local_type) && disabled_types.Has(local_type)) ||
784 (IsRealDataType(server_type) && disabled_types.Has(server_type))) {
785 to_purge.insert(it->second);
789 for (std::set<EntryKernel*>::iterator it = to_purge.begin();
790 it != to_purge.end(); ++it) {
791 EntryKernel* entry = *it;
793 const sync_pb::EntitySpecifics& local_specifics =
794 (*it)->ref(SPECIFICS);
795 const sync_pb::EntitySpecifics& server_specifics =
796 (*it)->ref(SERVER_SPECIFICS);
797 ModelType local_type = GetModelTypeFromSpecifics(local_specifics);
798 ModelType server_type = GetModelTypeFromSpecifics(server_specifics);
800 if (types_to_unapply.Has(local_type) ||
801 types_to_unapply.Has(server_type)) {
802 UnapplyEntry(entry);
803 } else {
804 bool save_to_journal =
805 (types_to_journal.Has(local_type) ||
806 types_to_journal.Has(server_type)) &&
807 (delete_journal_->IsDeleteJournalEnabled(local_type) ||
808 delete_journal_->IsDeleteJournalEnabled(server_type));
809 DeleteEntry(lock, save_to_journal, entry, &entries_to_journal);
813 delete_journal_->AddJournalBatch(&trans, entries_to_journal);
815 // Ensure meta tracking for these data types reflects the purged state.
816 for (ModelTypeSet::Iterator it = disabled_types.First();
817 it.Good(); it.Inc()) {
818 kernel_->persisted_info.transaction_version[it.Get()] = 0;
820 // Don't discard progress markers or context for unapplied types.
821 if (!types_to_unapply.Has(it.Get())) {
822 kernel_->persisted_info.ResetDownloadProgress(it.Get());
823 kernel_->persisted_info.datatype_context[it.Get()].Clear();
827 kernel_->info_status = KERNEL_SHARE_INFO_DIRTY;
830 return true;
833 bool Directory::ResetVersionsForType(BaseWriteTransaction* trans,
834 ModelType type) {
835 if (!ProtocolTypes().Has(type))
836 return false;
837 DCHECK_NE(type, BOOKMARKS) << "Only non-hierarchical types are supported";
839 EntryKernel* type_root = GetEntryByServerTag(ModelTypeToRootTag(type));
840 if (!type_root)
841 return false;
843 ScopedKernelLock lock(this);
844 const Id& type_root_id = type_root->ref(ID);
845 Directory::Metahandles children;
846 AppendChildHandles(lock, type_root_id, &children);
848 for (Metahandles::iterator it = children.begin(); it != children.end();
849 ++it) {
850 EntryKernel* entry = GetEntryByHandle(lock, *it);
851 if (!entry)
852 continue;
853 if (entry->ref(BASE_VERSION) > 1)
854 entry->put(BASE_VERSION, 1);
855 if (entry->ref(SERVER_VERSION) > 1)
856 entry->put(SERVER_VERSION, 1);
858 // Note that we do not unset IS_UNSYNCED or IS_UNAPPLIED_UPDATE in order
859 // to ensure no in-transit data is lost.
861 entry->mark_dirty(&kernel_->dirty_metahandles);
864 return true;
867 bool Directory::IsAttachmentLinked(
868 const sync_pb::AttachmentIdProto& attachment_id_proto) const {
869 ScopedKernelLock lock(this);
870 IndexByAttachmentId::const_iterator iter =
871 kernel_->index_by_attachment_id.find(attachment_id_proto.unique_id());
872 if (iter != kernel_->index_by_attachment_id.end() && !iter->second.empty()) {
873 return true;
875 return false;
878 void Directory::HandleSaveChangesFailure(const SaveChangesSnapshot& snapshot) {
879 WriteTransaction trans(FROM_HERE, HANDLE_SAVE_FAILURE, this);
880 ScopedKernelLock lock(this);
881 kernel_->info_status = KERNEL_SHARE_INFO_DIRTY;
883 // Because we optimistically cleared the dirty bit on the real entries when
884 // taking the snapshot, we must restore it on failure. Not doing this could
885 // cause lost data, if no other changes are made to the in-memory entries
886 // that would cause the dirty bit to get set again. Setting the bit ensures
887 // that SaveChanges will at least try again later.
888 for (EntryKernelSet::const_iterator i = snapshot.dirty_metas.begin();
889 i != snapshot.dirty_metas.end(); ++i) {
890 MetahandlesMap::iterator found =
891 kernel_->metahandles_map.find((*i)->ref(META_HANDLE));
892 if (found != kernel_->metahandles_map.end()) {
893 found->second->mark_dirty(&kernel_->dirty_metahandles);
897 kernel_->metahandles_to_purge.insert(snapshot.metahandles_to_purge.begin(),
898 snapshot.metahandles_to_purge.end());
900 // Restore delete journals.
901 delete_journal_->AddJournalBatch(&trans, snapshot.delete_journals);
902 delete_journal_->PurgeDeleteJournals(&trans,
903 snapshot.delete_journals_to_purge);
906 void Directory::GetDownloadProgress(
907 ModelType model_type,
908 sync_pb::DataTypeProgressMarker* value_out) const {
909 ScopedKernelLock lock(this);
910 return value_out->CopyFrom(
911 kernel_->persisted_info.download_progress[model_type]);
914 void Directory::GetDownloadProgressAsString(
915 ModelType model_type,
916 std::string* value_out) const {
917 ScopedKernelLock lock(this);
918 kernel_->persisted_info.download_progress[model_type].SerializeToString(
919 value_out);
922 size_t Directory::GetEntriesCount() const {
923 ScopedKernelLock lock(this);
924 return kernel_->metahandles_map.size();
927 void Directory::SetDownloadProgress(
928 ModelType model_type,
929 const sync_pb::DataTypeProgressMarker& new_progress) {
930 ScopedKernelLock lock(this);
931 kernel_->persisted_info.download_progress[model_type].CopyFrom(new_progress);
932 kernel_->info_status = KERNEL_SHARE_INFO_DIRTY;
935 bool Directory::HasEmptyDownloadProgress(ModelType type) const {
936 ScopedKernelLock lock(this);
937 return kernel_->persisted_info.HasEmptyDownloadProgress(type);
940 int64 Directory::GetTransactionVersion(ModelType type) const {
941 kernel_->transaction_mutex.AssertAcquired();
942 return kernel_->persisted_info.transaction_version[type];
945 void Directory::IncrementTransactionVersion(ModelType type) {
946 kernel_->transaction_mutex.AssertAcquired();
947 kernel_->persisted_info.transaction_version[type]++;
948 kernel_->info_status = KERNEL_SHARE_INFO_DIRTY;
951 void Directory::GetDataTypeContext(BaseTransaction* trans,
952 ModelType type,
953 sync_pb::DataTypeContext* context) const {
954 ScopedKernelLock lock(this);
955 context->CopyFrom(kernel_->persisted_info.datatype_context[type]);
958 void Directory::SetDataTypeContext(
959 BaseWriteTransaction* trans,
960 ModelType type,
961 const sync_pb::DataTypeContext& context) {
962 ScopedKernelLock lock(this);
963 kernel_->persisted_info.datatype_context[type].CopyFrom(context);
964 kernel_->info_status = KERNEL_SHARE_INFO_DIRTY;
967 // TODO(stanisc): crbug.com/438313: change these to not rely on the folders.
968 ModelTypeSet Directory::InitialSyncEndedTypes() {
969 syncable::ReadTransaction trans(FROM_HERE, this);
970 ModelTypeSet protocol_types = ProtocolTypes();
971 ModelTypeSet initial_sync_ended_types;
972 for (ModelTypeSet::Iterator i = protocol_types.First(); i.Good(); i.Inc()) {
973 if (InitialSyncEndedForType(&trans, i.Get())) {
974 initial_sync_ended_types.Put(i.Get());
977 return initial_sync_ended_types;
980 bool Directory::InitialSyncEndedForType(ModelType type) {
981 syncable::ReadTransaction trans(FROM_HERE, this);
982 return InitialSyncEndedForType(&trans, type);
985 bool Directory::InitialSyncEndedForType(
986 BaseTransaction* trans, ModelType type) {
987 // True iff the type's root node has been created.
988 syncable::Entry entry(trans, syncable::GET_TYPE_ROOT, type);
989 return entry.good();
992 string Directory::store_birthday() const {
993 ScopedKernelLock lock(this);
994 return kernel_->persisted_info.store_birthday;
997 void Directory::set_store_birthday(const string& store_birthday) {
998 ScopedKernelLock lock(this);
999 if (kernel_->persisted_info.store_birthday == store_birthday)
1000 return;
1001 kernel_->persisted_info.store_birthday = store_birthday;
1002 kernel_->info_status = KERNEL_SHARE_INFO_DIRTY;
1005 string Directory::bag_of_chips() const {
1006 ScopedKernelLock lock(this);
1007 return kernel_->persisted_info.bag_of_chips;
1010 void Directory::set_bag_of_chips(const string& bag_of_chips) {
1011 ScopedKernelLock lock(this);
1012 if (kernel_->persisted_info.bag_of_chips == bag_of_chips)
1013 return;
1014 kernel_->persisted_info.bag_of_chips = bag_of_chips;
1015 kernel_->info_status = KERNEL_SHARE_INFO_DIRTY;
1019 string Directory::cache_guid() const {
1020 // No need to lock since nothing ever writes to it after load.
1021 return kernel_->cache_guid;
1024 NigoriHandler* Directory::GetNigoriHandler() {
1025 return nigori_handler_;
1028 Cryptographer* Directory::GetCryptographer(const BaseTransaction* trans) {
1029 DCHECK_EQ(this, trans->directory());
1030 return cryptographer_;
1033 void Directory::ReportUnrecoverableError() {
1034 if (!report_unrecoverable_error_function_.is_null()) {
1035 report_unrecoverable_error_function_.Run();
1039 void Directory::GetAllMetaHandles(BaseTransaction* trans,
1040 MetahandleSet* result) {
1041 result->clear();
1042 ScopedKernelLock lock(this);
1043 for (MetahandlesMap::iterator i = kernel_->metahandles_map.begin();
1044 i != kernel_->metahandles_map.end(); ++i) {
1045 result->insert(i->first);
1049 void Directory::GetUnsyncedMetaHandles(BaseTransaction* trans,
1050 Metahandles* result) {
1051 result->clear();
1052 ScopedKernelLock lock(this);
1053 copy(kernel_->unsynced_metahandles.begin(),
1054 kernel_->unsynced_metahandles.end(), back_inserter(*result));
1057 int64 Directory::unsynced_entity_count() const {
1058 ScopedKernelLock lock(this);
1059 return kernel_->unsynced_metahandles.size();
1062 bool Directory::TypeHasUnappliedUpdates(ModelType type) {
1063 ScopedKernelLock lock(this);
1064 return !kernel_->unapplied_update_metahandles[type].empty();
1067 void Directory::GetUnappliedUpdateMetaHandles(
1068 BaseTransaction* trans,
1069 FullModelTypeSet server_types,
1070 std::vector<int64>* result) {
1071 result->clear();
1072 ScopedKernelLock lock(this);
1073 for (int i = UNSPECIFIED; i < MODEL_TYPE_COUNT; ++i) {
1074 const ModelType type = ModelTypeFromInt(i);
1075 if (server_types.Has(type)) {
1076 std::copy(kernel_->unapplied_update_metahandles[type].begin(),
1077 kernel_->unapplied_update_metahandles[type].end(),
1078 back_inserter(*result));
1083 void Directory::GetMetaHandlesOfType(BaseTransaction* trans,
1084 ModelType type,
1085 std::vector<int64>* result) {
1086 ScopedKernelLock lock(this);
1087 GetMetaHandlesOfType(lock, trans, type, result);
1090 void Directory::GetMetaHandlesOfType(const ScopedKernelLock& lock,
1091 BaseTransaction* trans,
1092 ModelType type,
1093 std::vector<int64>* result) {
1094 result->clear();
1095 for (MetahandlesMap::iterator it = kernel_->metahandles_map.begin();
1096 it != kernel_->metahandles_map.end(); ++it) {
1097 EntryKernel* entry = it->second;
1098 const ModelType entry_type =
1099 GetModelTypeFromSpecifics(entry->ref(SPECIFICS));
1100 if (entry_type == type)
1101 result->push_back(it->first);
1105 void Directory::CollectMetaHandleCounts(
1106 std::vector<int>* num_entries_by_type,
1107 std::vector<int>* num_to_delete_entries_by_type) {
1108 syncable::ReadTransaction trans(FROM_HERE, this);
1109 ScopedKernelLock lock(this);
1111 for (MetahandlesMap::iterator it = kernel_->metahandles_map.begin();
1112 it != kernel_->metahandles_map.end(); ++it) {
1113 EntryKernel* entry = it->second;
1114 const ModelType type = GetModelTypeFromSpecifics(entry->ref(SPECIFICS));
1115 (*num_entries_by_type)[type]++;
1116 if (entry->ref(IS_DEL))
1117 (*num_to_delete_entries_by_type)[type]++;
1121 scoped_ptr<base::ListValue> Directory::GetNodeDetailsForType(
1122 BaseTransaction* trans,
1123 ModelType type) {
1124 scoped_ptr<base::ListValue> nodes(new base::ListValue());
1126 ScopedKernelLock lock(this);
1127 for (MetahandlesMap::iterator it = kernel_->metahandles_map.begin();
1128 it != kernel_->metahandles_map.end(); ++it) {
1129 if (GetModelTypeFromSpecifics(it->second->ref(SPECIFICS)) != type) {
1130 continue;
1133 EntryKernel* kernel = it->second;
1134 scoped_ptr<base::DictionaryValue> node(
1135 kernel->ToValue(GetCryptographer(trans)));
1137 // Add the position index if appropriate. This must be done here (and not
1138 // in EntryKernel) because the EntryKernel does not have access to its
1139 // siblings.
1140 if (kernel->ShouldMaintainPosition() && !kernel->ref(IS_DEL)) {
1141 node->SetInteger("positionIndex", GetPositionIndex(trans, kernel));
1144 nodes->Append(node.release());
1147 return nodes.Pass();
1150 bool Directory::CheckInvariantsOnTransactionClose(
1151 syncable::BaseTransaction* trans,
1152 const MetahandleSet& modified_handles) {
1153 // NOTE: The trans may be in the process of being destructed. Be careful if
1154 // you wish to call any of its virtual methods.
1155 switch (invariant_check_level_) {
1156 case FULL_DB_VERIFICATION: {
1157 MetahandleSet all_handles;
1158 GetAllMetaHandles(trans, &all_handles);
1159 return CheckTreeInvariants(trans, all_handles);
1161 case VERIFY_CHANGES: {
1162 return CheckTreeInvariants(trans, modified_handles);
1164 case OFF: {
1165 return true;
1168 NOTREACHED();
1169 return false;
1172 bool Directory::FullyCheckTreeInvariants(syncable::BaseTransaction* trans) {
1173 MetahandleSet handles;
1174 GetAllMetaHandles(trans, &handles);
1175 return CheckTreeInvariants(trans, handles);
1178 bool Directory::CheckTreeInvariants(syncable::BaseTransaction* trans,
1179 const MetahandleSet& handles) {
1180 MetahandleSet::const_iterator i;
1181 for (i = handles.begin() ; i != handles.end() ; ++i) {
1182 int64 metahandle = *i;
1183 Entry e(trans, GET_BY_HANDLE, metahandle);
1184 if (!SyncAssert(e.good(), FROM_HERE, "Entry is bad", trans))
1185 return false;
1186 syncable::Id id = e.GetId();
1187 syncable::Id parentid = e.GetParentId();
1189 if (id.IsRoot()) {
1190 if (!SyncAssert(e.GetIsDir(), FROM_HERE,
1191 "Entry should be a directory",
1192 trans))
1193 return false;
1194 if (!SyncAssert(parentid.IsRoot(), FROM_HERE,
1195 "Entry should be root",
1196 trans))
1197 return false;
1198 if (!SyncAssert(!e.GetIsUnsynced(), FROM_HERE, "Entry should be synced",
1199 trans))
1200 return false;
1201 continue;
1204 if (!e.GetIsDel()) {
1205 if (!SyncAssert(id != parentid, FROM_HERE,
1206 "Id should be different from parent id.",
1207 trans))
1208 return false;
1209 if (!SyncAssert(!e.GetNonUniqueName().empty(), FROM_HERE,
1210 "Non unique name should not be empty.",
1211 trans))
1212 return false;
1214 if (!parentid.IsNull()) {
1215 int safety_count = handles.size() + 1;
1216 while (!parentid.IsRoot()) {
1217 Entry parent(trans, GET_BY_ID, parentid);
1218 if (!SyncAssert(parent.good(), FROM_HERE,
1219 "Parent entry is not valid.", trans))
1220 return false;
1221 if (handles.end() == handles.find(parent.GetMetahandle()))
1222 break; // Skip further checking if parent was unmodified.
1223 if (!SyncAssert(parent.GetIsDir(), FROM_HERE,
1224 "Parent should be a directory", trans))
1225 return false;
1226 if (!SyncAssert(!parent.GetIsDel(), FROM_HERE,
1227 "Parent should not have been marked for deletion.",
1228 trans))
1229 return false;
1230 if (!SyncAssert(handles.end() != handles.find(parent.GetMetahandle()),
1231 FROM_HERE, "Parent should be in the index.", trans))
1232 return false;
1233 parentid = parent.GetParentId();
1234 if (!SyncAssert(--safety_count > 0, FROM_HERE,
1235 "Count should be greater than zero.", trans))
1236 return false;
1240 int64 base_version = e.GetBaseVersion();
1241 int64 server_version = e.GetServerVersion();
1242 bool using_unique_client_tag = !e.GetUniqueClientTag().empty();
1243 if (CHANGES_VERSION == base_version || 0 == base_version) {
1244 ModelType model_type = e.GetModelType();
1245 bool is_client_creatable_type_root_folder =
1246 parentid.IsRoot() &&
1247 IsTypeWithClientGeneratedRoot(model_type) &&
1248 e.GetUniqueServerTag() == ModelTypeToRootTag(model_type);
1249 if (e.GetIsUnappliedUpdate()) {
1250 // Must be a new item, or a de-duplicated unique client tag
1251 // that was created both locally and remotely, or a type root folder
1252 // that was created both locally and remotely.
1253 if (!(using_unique_client_tag ||
1254 is_client_creatable_type_root_folder)) {
1255 if (!SyncAssert(e.GetIsDel(), FROM_HERE,
1256 "The entry should have been deleted.", trans))
1257 return false;
1259 // It came from the server, so it must have a server ID.
1260 if (!SyncAssert(id.ServerKnows(), FROM_HERE,
1261 "The id should be from a server.",
1262 trans))
1263 return false;
1264 } else {
1265 if (e.GetIsDir()) {
1266 // TODO(chron): Implement this mode if clients ever need it.
1267 // For now, you can't combine a client tag and a directory.
1268 if (!SyncAssert(!using_unique_client_tag, FROM_HERE,
1269 "Directory cannot have a client tag.",
1270 trans))
1271 return false;
1273 if (is_client_creatable_type_root_folder) {
1274 // This must be a locally created type root folder.
1275 if (!SyncAssert(
1276 !e.GetIsUnsynced(), FROM_HERE,
1277 "Locally created type root folders should not be unsynced.",
1278 trans))
1279 return false;
1281 if (!SyncAssert(
1282 !e.GetIsDel(), FROM_HERE,
1283 "Locally created type root folders should not be deleted.",
1284 trans))
1285 return false;
1286 } else {
1287 // Should be an uncomitted item, or a successfully deleted one.
1288 if (!e.GetIsDel()) {
1289 if (!SyncAssert(e.GetIsUnsynced(), FROM_HERE,
1290 "The item should be unsynced.", trans))
1291 return false;
1294 // If the next check failed, it would imply that an item exists
1295 // on the server, isn't waiting for application locally, but either
1296 // is an unsynced create or a sucessful delete in the local copy.
1297 // Either way, that's a mismatch.
1298 if (!SyncAssert(0 == server_version, FROM_HERE,
1299 "Server version should be zero.",
1300 trans))
1301 return false;
1302 // Items that aren't using the unique client tag should have a zero
1303 // base version only if they have a local ID. Items with unique client
1304 // tags are allowed to use the zero base version for undeletion and
1305 // de-duplication; the unique client tag trumps the server ID.
1306 if (!using_unique_client_tag) {
1307 if (!SyncAssert(!id.ServerKnows(), FROM_HERE,
1308 "Should be a client only id.",
1309 trans))
1310 return false;
1313 } else {
1314 if (!SyncAssert(id.ServerKnows(),
1315 FROM_HERE,
1316 "Should be a server id.",
1317 trans))
1318 return false;
1321 // Previously we would assert that locally deleted items that have never
1322 // been synced must not be sent to the server (IS_UNSYNCED must be false).
1323 // This is not always true in the case that an item is deleted while the
1324 // initial commit is in flight. See crbug.com/426865.
1326 return true;
1329 void Directory::SetInvariantCheckLevel(InvariantCheckLevel check_level) {
1330 invariant_check_level_ = check_level;
1333 int64 Directory::NextMetahandle() {
1334 ScopedKernelLock lock(this);
1335 int64 metahandle = (kernel_->next_metahandle)++;
1336 return metahandle;
1339 // Generates next client ID based on a randomly generated GUID.
1340 Id Directory::NextId() {
1341 return Id::CreateFromClientString(base::GenerateGUID());
1344 bool Directory::HasChildren(BaseTransaction* trans, const Id& id) {
1345 ScopedKernelLock lock(this);
1346 return kernel_->parent_child_index.GetChildren(id) != NULL;
1349 Id Directory::GetFirstChildId(BaseTransaction* trans,
1350 const EntryKernel* parent) {
1351 DCHECK(parent);
1352 DCHECK(parent->ref(IS_DIR));
1354 ScopedKernelLock lock(this);
1355 const OrderedChildSet* children =
1356 kernel_->parent_child_index.GetChildren(parent->ref(ID));
1358 // We're expected to return root if there are no children.
1359 if (!children)
1360 return Id();
1362 return (*children->begin())->ref(ID);
1365 syncable::Id Directory::GetPredecessorId(EntryKernel* e) {
1366 ScopedKernelLock lock(this);
1368 DCHECK(ParentChildIndex::ShouldInclude(e));
1369 const OrderedChildSet* siblings = kernel_->parent_child_index.GetSiblings(e);
1370 OrderedChildSet::const_iterator i = siblings->find(e);
1371 DCHECK(i != siblings->end());
1373 if (i == siblings->begin()) {
1374 return Id();
1375 } else {
1376 i--;
1377 return (*i)->ref(ID);
1381 syncable::Id Directory::GetSuccessorId(EntryKernel* e) {
1382 ScopedKernelLock lock(this);
1384 DCHECK(ParentChildIndex::ShouldInclude(e));
1385 const OrderedChildSet* siblings = kernel_->parent_child_index.GetSiblings(e);
1386 OrderedChildSet::const_iterator i = siblings->find(e);
1387 DCHECK(i != siblings->end());
1389 i++;
1390 if (i == siblings->end()) {
1391 return Id();
1392 } else {
1393 return (*i)->ref(ID);
1397 // TODO(rlarocque): Remove all support for placing ShouldMaintainPosition()
1398 // items as siblings of items that do not maintain postions. It is required
1399 // only for tests. See crbug.com/178282.
1400 void Directory::PutPredecessor(EntryKernel* e, EntryKernel* predecessor) {
1401 DCHECK(!e->ref(IS_DEL));
1402 if (!e->ShouldMaintainPosition()) {
1403 DCHECK(!e->ref(UNIQUE_POSITION).IsValid());
1404 return;
1406 std::string suffix = e->ref(UNIQUE_BOOKMARK_TAG);
1407 DCHECK(!suffix.empty());
1409 // Remove our item from the ParentChildIndex and remember to re-add it later.
1410 ScopedKernelLock lock(this);
1411 ScopedParentChildIndexUpdater updater(lock, e, &kernel_->parent_child_index);
1413 // Note: The ScopedParentChildIndexUpdater will update this set for us as we
1414 // leave this function.
1415 const OrderedChildSet* siblings =
1416 kernel_->parent_child_index.GetChildren(e->ref(PARENT_ID));
1418 if (!siblings) {
1419 // This parent currently has no other children.
1420 DCHECK(predecessor == NULL);
1421 UniquePosition pos = UniquePosition::InitialPosition(suffix);
1422 e->put(UNIQUE_POSITION, pos);
1423 return;
1426 if (predecessor == NULL) {
1427 // We have at least one sibling, and we're inserting to the left of them.
1428 UniquePosition successor_pos = (*siblings->begin())->ref(UNIQUE_POSITION);
1430 UniquePosition pos;
1431 if (!successor_pos.IsValid()) {
1432 // If all our successors are of non-positionable types, just create an
1433 // initial position. We arbitrarily choose to sort invalid positions to
1434 // the right of the valid positions.
1436 // We really shouldn't need to support this. See TODO above.
1437 pos = UniquePosition::InitialPosition(suffix);
1438 } else {
1439 DCHECK(!siblings->empty());
1440 pos = UniquePosition::Before(successor_pos, suffix);
1443 e->put(UNIQUE_POSITION, pos);
1444 return;
1447 // We can't support placing an item after an invalid position. Fortunately,
1448 // the tests don't exercise this particular case. We should not support
1449 // siblings with invalid positions at all. See TODO above.
1450 DCHECK(predecessor->ref(UNIQUE_POSITION).IsValid());
1452 OrderedChildSet::const_iterator neighbour = siblings->find(predecessor);
1453 DCHECK(neighbour != siblings->end());
1455 ++neighbour;
1456 if (neighbour == siblings->end()) {
1457 // Inserting at the end of the list.
1458 UniquePosition pos = UniquePosition::After(
1459 predecessor->ref(UNIQUE_POSITION),
1460 suffix);
1461 e->put(UNIQUE_POSITION, pos);
1462 return;
1465 EntryKernel* successor = *neighbour;
1467 // Another mixed valid and invalid position case. This one could be supported
1468 // in theory, but we're trying to deprecate support for siblings with and
1469 // without valid positions. See TODO above.
1470 // Using a release CHECK here because the following UniquePosition::Between
1471 // call crashes anyway when the position string is empty (see crbug/332371).
1472 CHECK(successor->ref(UNIQUE_POSITION).IsValid());
1474 // Finally, the normal case: inserting between two elements.
1475 UniquePosition pos = UniquePosition::Between(
1476 predecessor->ref(UNIQUE_POSITION),
1477 successor->ref(UNIQUE_POSITION),
1478 suffix);
1479 e->put(UNIQUE_POSITION, pos);
1480 return;
1483 // TODO(rlarocque): Avoid this indirection. Just return the set.
1484 void Directory::AppendChildHandles(const ScopedKernelLock& lock,
1485 const Id& parent_id,
1486 Directory::Metahandles* result) {
1487 const OrderedChildSet* children =
1488 kernel_->parent_child_index.GetChildren(parent_id);
1489 if (!children)
1490 return;
1492 for (OrderedChildSet::const_iterator i = children->begin();
1493 i != children->end(); ++i) {
1494 result->push_back((*i)->ref(META_HANDLE));
1498 void Directory::UnmarkDirtyEntry(WriteTransaction* trans, Entry* entry) {
1499 CHECK(trans);
1500 entry->kernel_->clear_dirty(&kernel_->dirty_metahandles);
1503 void Directory::GetAttachmentIdsToUpload(BaseTransaction* trans,
1504 ModelType type,
1505 AttachmentIdList* ids) {
1506 // TODO(maniscalco): Maintain an index by ModelType and rewrite this method to
1507 // use it. The approach below is likely very expensive because it iterates
1508 // all entries (bug 415199).
1509 DCHECK(trans);
1510 DCHECK(ids);
1511 ids->clear();
1512 AttachmentIdSet on_server_id_set;
1513 AttachmentIdSet not_on_server_id_set;
1514 std::vector<int64> metahandles;
1516 ScopedKernelLock lock(this);
1517 GetMetaHandlesOfType(lock, trans, type, &metahandles);
1518 std::vector<int64>::const_iterator iter = metahandles.begin();
1519 const std::vector<int64>::const_iterator end = metahandles.end();
1520 // For all of this type's entries...
1521 for (; iter != end; ++iter) {
1522 EntryKernel* entry = GetEntryByHandle(lock, *iter);
1523 DCHECK(entry);
1524 const sync_pb::AttachmentMetadata metadata =
1525 entry->ref(ATTACHMENT_METADATA);
1526 // for each of this entry's attachments...
1527 for (int i = 0; i < metadata.record_size(); ++i) {
1528 AttachmentId id =
1529 AttachmentId::CreateFromProto(metadata.record(i).id());
1530 // if this attachment is known to be on the server, remember it for
1531 // later,
1532 if (metadata.record(i).is_on_server()) {
1533 on_server_id_set.insert(id);
1534 } else {
1535 // otherwise, add it to id_set.
1536 not_on_server_id_set.insert(id);
1541 // Why did we bother keeping a set of ids known to be on the server? The
1542 // is_on_server flag is stored denormalized so we can end up with two entries
1543 // with the same attachment id where one says it's on the server and the other
1544 // says it's not. When this happens, we trust the one that says it's on the
1545 // server. To avoid re-uploading the same attachment mulitple times, we
1546 // remove any ids known to be on the server from the id_set we are about to
1547 // return.
1549 // TODO(maniscalco): Eliminate redundant metadata storage (bug 415203).
1550 std::set_difference(not_on_server_id_set.begin(), not_on_server_id_set.end(),
1551 on_server_id_set.begin(), on_server_id_set.end(),
1552 std::back_inserter(*ids));
1555 void Directory::OnCatastrophicError() {
1556 UMA_HISTOGRAM_BOOLEAN("Sync.DirectoryCatastrophicError", true);
1557 ReadTransaction trans(FROM_HERE, this);
1558 OnUnrecoverableError(&trans, FROM_HERE,
1559 "Catastrophic error detected, Sync DB is unrecoverable");
1562 Directory::Kernel* Directory::kernel() {
1563 return kernel_;
1566 const Directory::Kernel* Directory::kernel() const {
1567 return kernel_;
1570 } // namespace syncable
1571 } // namespace syncer