Include all dupe types (event when value is zero) in scan stats.
[chromium-blink-merge.git] / sync / syncable / directory.cc
blob2fbceec3729d9f3fd4b23369275662ab658210ef
1 // Copyright 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "sync/syncable/directory.h"
7 #include <algorithm>
8 #include <iterator>
10 #include "base/base64.h"
11 #include "base/guid.h"
12 #include "base/metrics/histogram.h"
13 #include "base/stl_util.h"
14 #include "base/strings/string_number_conversions.h"
15 #include "base/trace_event/trace_event.h"
16 #include "sync/internal_api/public/base/attachment_id_proto.h"
17 #include "sync/internal_api/public/base/unique_position.h"
18 #include "sync/internal_api/public/util/unrecoverable_error_handler.h"
19 #include "sync/syncable/entry.h"
20 #include "sync/syncable/entry_kernel.h"
21 #include "sync/syncable/in_memory_directory_backing_store.h"
22 #include "sync/syncable/on_disk_directory_backing_store.h"
23 #include "sync/syncable/scoped_kernel_lock.h"
24 #include "sync/syncable/scoped_parent_child_index_updater.h"
25 #include "sync/syncable/syncable-inl.h"
26 #include "sync/syncable/syncable_base_transaction.h"
27 #include "sync/syncable/syncable_changes_version.h"
28 #include "sync/syncable/syncable_read_transaction.h"
29 #include "sync/syncable/syncable_util.h"
30 #include "sync/syncable/syncable_write_transaction.h"
32 using std::string;
34 namespace syncer {
35 namespace syncable {
37 // static
38 const base::FilePath::CharType Directory::kSyncDatabaseFilename[] =
39 FILE_PATH_LITERAL("SyncData.sqlite3");
41 Directory::PersistedKernelInfo::PersistedKernelInfo() {
42 ModelTypeSet protocol_types = ProtocolTypes();
43 for (ModelTypeSet::Iterator iter = protocol_types.First(); iter.Good();
44 iter.Inc()) {
45 ResetDownloadProgress(iter.Get());
46 transaction_version[iter.Get()] = 0;
50 Directory::PersistedKernelInfo::~PersistedKernelInfo() {}
52 void Directory::PersistedKernelInfo::ResetDownloadProgress(
53 ModelType model_type) {
54 // Clear everything except the data type id field.
55 download_progress[model_type].Clear();
56 download_progress[model_type].set_data_type_id(
57 GetSpecificsFieldNumberFromModelType(model_type));
59 // Explicitly set an empty token field to denote no progress.
60 download_progress[model_type].set_token("");
63 bool Directory::PersistedKernelInfo::HasEmptyDownloadProgress(
64 ModelType model_type) {
65 const sync_pb::DataTypeProgressMarker& progress_marker =
66 download_progress[model_type];
67 return progress_marker.token().empty();
70 Directory::SaveChangesSnapshot::SaveChangesSnapshot()
71 : kernel_info_status(KERNEL_SHARE_INFO_INVALID) {
74 Directory::SaveChangesSnapshot::~SaveChangesSnapshot() {
75 STLDeleteElements(&dirty_metas);
76 STLDeleteElements(&delete_journals);
79 bool Directory::SaveChangesSnapshot::HasUnsavedMetahandleChanges() const {
80 return !dirty_metas.empty() || !metahandles_to_purge.empty() ||
81 !delete_journals.empty() || !delete_journals_to_purge.empty();
84 Directory::Kernel::Kernel(
85 const std::string& name,
86 const KernelLoadInfo& info,
87 DirectoryChangeDelegate* delegate,
88 const WeakHandle<TransactionObserver>& transaction_observer)
89 : next_write_transaction_id(0),
90 name(name),
91 info_status(Directory::KERNEL_SHARE_INFO_VALID),
92 persisted_info(info.kernel_info),
93 cache_guid(info.cache_guid),
94 next_metahandle(info.max_metahandle + 1),
95 delegate(delegate),
96 transaction_observer(transaction_observer) {
97 DCHECK(delegate);
98 DCHECK(transaction_observer.IsInitialized());
101 Directory::Kernel::~Kernel() {
102 STLDeleteContainerPairSecondPointers(metahandles_map.begin(),
103 metahandles_map.end());
106 Directory::Directory(
107 DirectoryBackingStore* store,
108 UnrecoverableErrorHandler* unrecoverable_error_handler,
109 ReportUnrecoverableErrorFunction report_unrecoverable_error_function,
110 NigoriHandler* nigori_handler,
111 Cryptographer* cryptographer)
112 : kernel_(NULL),
113 store_(store),
114 unrecoverable_error_handler_(unrecoverable_error_handler),
115 report_unrecoverable_error_function_(report_unrecoverable_error_function),
116 unrecoverable_error_set_(false),
117 nigori_handler_(nigori_handler),
118 cryptographer_(cryptographer),
119 invariant_check_level_(VERIFY_CHANGES),
120 weak_ptr_factory_(this) {
123 Directory::~Directory() {
124 Close();
127 DirOpenResult Directory::Open(
128 const string& name,
129 DirectoryChangeDelegate* delegate,
130 const WeakHandle<TransactionObserver>& transaction_observer) {
131 TRACE_EVENT0("sync", "SyncDatabaseOpen");
133 const DirOpenResult result =
134 OpenImpl(name, delegate, transaction_observer);
136 if (OPENED != result)
137 Close();
138 return result;
141 void Directory::InitializeIndices(MetahandlesMap* handles_map) {
142 ScopedKernelLock lock(this);
143 kernel_->metahandles_map.swap(*handles_map);
144 for (MetahandlesMap::const_iterator it = kernel_->metahandles_map.begin();
145 it != kernel_->metahandles_map.end(); ++it) {
146 EntryKernel* entry = it->second;
147 if (ParentChildIndex::ShouldInclude(entry))
148 kernel_->parent_child_index.Insert(entry);
149 const int64 metahandle = entry->ref(META_HANDLE);
150 if (entry->ref(IS_UNSYNCED))
151 kernel_->unsynced_metahandles.insert(metahandle);
152 if (entry->ref(IS_UNAPPLIED_UPDATE)) {
153 const ModelType type = entry->GetServerModelType();
154 kernel_->unapplied_update_metahandles[type].insert(metahandle);
156 if (!entry->ref(UNIQUE_SERVER_TAG).empty()) {
157 DCHECK(kernel_->server_tags_map.find(entry->ref(UNIQUE_SERVER_TAG)) ==
158 kernel_->server_tags_map.end())
159 << "Unexpected duplicate use of client tag";
160 kernel_->server_tags_map[entry->ref(UNIQUE_SERVER_TAG)] = entry;
162 if (!entry->ref(UNIQUE_CLIENT_TAG).empty()) {
163 DCHECK(kernel_->server_tags_map.find(entry->ref(UNIQUE_SERVER_TAG)) ==
164 kernel_->server_tags_map.end())
165 << "Unexpected duplicate use of server tag";
166 kernel_->client_tags_map[entry->ref(UNIQUE_CLIENT_TAG)] = entry;
168 DCHECK(kernel_->ids_map.find(entry->ref(ID).value()) ==
169 kernel_->ids_map.end()) << "Unexpected duplicate use of ID";
170 kernel_->ids_map[entry->ref(ID).value()] = entry;
171 DCHECK(!entry->is_dirty());
172 AddToAttachmentIndex(lock, metahandle, entry->ref(ATTACHMENT_METADATA));
176 DirOpenResult Directory::OpenImpl(
177 const string& name,
178 DirectoryChangeDelegate* delegate,
179 const WeakHandle<TransactionObserver>&
180 transaction_observer) {
181 KernelLoadInfo info;
182 // Temporary indices before kernel_ initialized in case Load fails. We 0(1)
183 // swap these later.
184 Directory::MetahandlesMap tmp_handles_map;
186 // Avoids mem leaks on failure. Harmlessly deletes the empty hash map after
187 // the swap in the success case.
188 STLValueDeleter<MetahandlesMap> deleter(&tmp_handles_map);
190 JournalIndex delete_journals;
191 MetahandleSet metahandles_to_purge;
193 DirOpenResult result = store_->Load(&tmp_handles_map, &delete_journals,
194 &metahandles_to_purge, &info);
195 if (OPENED != result)
196 return result;
198 DCHECK(!kernel_);
199 kernel_ = new Kernel(name, info, delegate, transaction_observer);
200 kernel_->metahandles_to_purge.swap(metahandles_to_purge);
201 delete_journal_.reset(new DeleteJournal(&delete_journals));
202 InitializeIndices(&tmp_handles_map);
204 // Save changes back in case there are any metahandles to purge.
205 if (!SaveChanges())
206 return FAILED_INITIAL_WRITE;
208 // Now that we've successfully opened the store, install an error handler to
209 // deal with catastrophic errors that may occur later on. Use a weak pointer
210 // because we cannot guarantee that this Directory will outlive the Closure.
211 store_->SetCatastrophicErrorHandler(base::Bind(
212 &Directory::OnCatastrophicError, weak_ptr_factory_.GetWeakPtr()));
214 return OPENED;
217 DeleteJournal* Directory::delete_journal() {
218 DCHECK(delete_journal_.get());
219 return delete_journal_.get();
222 void Directory::Close() {
223 store_.reset();
224 if (kernel_) {
225 delete kernel_;
226 kernel_ = NULL;
230 void Directory::OnUnrecoverableError(const BaseTransaction* trans,
231 const tracked_objects::Location& location,
232 const std::string & message) {
233 DCHECK(trans != NULL);
234 unrecoverable_error_set_ = true;
235 unrecoverable_error_handler_->OnUnrecoverableError(location,
236 message);
239 EntryKernel* Directory::GetEntryById(const Id& id) {
240 ScopedKernelLock lock(this);
241 return GetEntryById(lock, id);
244 EntryKernel* Directory::GetEntryById(const ScopedKernelLock& lock,
245 const Id& id) {
246 DCHECK(kernel_);
247 // Find it in the in memory ID index.
248 IdsMap::iterator id_found = kernel_->ids_map.find(id.value());
249 if (id_found != kernel_->ids_map.end()) {
250 return id_found->second;
252 return NULL;
255 EntryKernel* Directory::GetEntryByClientTag(const string& tag) {
256 ScopedKernelLock lock(this);
257 DCHECK(kernel_);
259 TagsMap::iterator it = kernel_->client_tags_map.find(tag);
260 if (it != kernel_->client_tags_map.end()) {
261 return it->second;
263 return NULL;
266 EntryKernel* Directory::GetEntryByServerTag(const string& tag) {
267 ScopedKernelLock lock(this);
268 DCHECK(kernel_);
269 TagsMap::iterator it = kernel_->server_tags_map.find(tag);
270 if (it != kernel_->server_tags_map.end()) {
271 return it->second;
273 return NULL;
276 EntryKernel* Directory::GetEntryByHandle(int64 metahandle) {
277 ScopedKernelLock lock(this);
278 return GetEntryByHandle(lock, metahandle);
281 EntryKernel* Directory::GetEntryByHandle(const ScopedKernelLock& lock,
282 int64 metahandle) {
283 // Look up in memory
284 MetahandlesMap::iterator found =
285 kernel_->metahandles_map.find(metahandle);
286 if (found != kernel_->metahandles_map.end()) {
287 // Found it in memory. Easy.
288 return found->second;
290 return NULL;
293 bool Directory::GetChildHandlesById(
294 BaseTransaction* trans, const Id& parent_id,
295 Directory::Metahandles* result) {
296 if (!SyncAssert(this == trans->directory(), FROM_HERE,
297 "Directories don't match", trans))
298 return false;
299 result->clear();
301 ScopedKernelLock lock(this);
302 AppendChildHandles(lock, parent_id, result);
303 return true;
306 int Directory::GetTotalNodeCount(
307 BaseTransaction* trans,
308 EntryKernel* kernel) const {
309 if (!SyncAssert(this == trans->directory(), FROM_HERE,
310 "Directories don't match", trans))
311 return false;
313 int count = 1;
314 std::deque<const OrderedChildSet*> child_sets;
316 GetChildSetForKernel(trans, kernel, &child_sets);
317 while (!child_sets.empty()) {
318 const OrderedChildSet* set = child_sets.front();
319 child_sets.pop_front();
320 for (OrderedChildSet::const_iterator it = set->begin();
321 it != set->end(); ++it) {
322 count++;
323 GetChildSetForKernel(trans, *it, &child_sets);
327 return count;
330 void Directory::GetChildSetForKernel(
331 BaseTransaction* trans,
332 EntryKernel* kernel,
333 std::deque<const OrderedChildSet*>* child_sets) const {
334 if (!kernel->ref(IS_DIR))
335 return; // Not a directory => no children.
337 const OrderedChildSet* descendants =
338 kernel_->parent_child_index.GetChildren(kernel->ref(ID));
339 if (!descendants)
340 return; // This directory has no children.
342 // Add our children to the list of items to be traversed.
343 child_sets->push_back(descendants);
346 int Directory::GetPositionIndex(
347 BaseTransaction* trans,
348 EntryKernel* kernel) const {
349 const OrderedChildSet* siblings =
350 kernel_->parent_child_index.GetSiblings(kernel);
352 OrderedChildSet::const_iterator it = siblings->find(kernel);
353 return std::distance(siblings->begin(), it);
356 bool Directory::InsertEntry(BaseWriteTransaction* trans, EntryKernel* entry) {
357 ScopedKernelLock lock(this);
358 return InsertEntry(lock, trans, entry);
361 bool Directory::InsertEntry(const ScopedKernelLock& lock,
362 BaseWriteTransaction* trans,
363 EntryKernel* entry) {
364 if (!SyncAssert(NULL != entry, FROM_HERE, "Entry is null", trans))
365 return false;
367 static const char error[] = "Entry already in memory index.";
369 if (!SyncAssert(
370 kernel_->metahandles_map.insert(
371 std::make_pair(entry->ref(META_HANDLE), entry)).second,
372 FROM_HERE,
373 error,
374 trans)) {
375 return false;
377 if (!SyncAssert(
378 kernel_->ids_map.insert(
379 std::make_pair(entry->ref(ID).value(), entry)).second,
380 FROM_HERE,
381 error,
382 trans)) {
383 return false;
385 if (ParentChildIndex::ShouldInclude(entry)) {
386 if (!SyncAssert(kernel_->parent_child_index.Insert(entry),
387 FROM_HERE,
388 error,
389 trans)) {
390 return false;
393 AddToAttachmentIndex(
394 lock, entry->ref(META_HANDLE), entry->ref(ATTACHMENT_METADATA));
396 // Should NEVER be created with a client tag or server tag.
397 if (!SyncAssert(entry->ref(UNIQUE_SERVER_TAG).empty(), FROM_HERE,
398 "Server tag should be empty", trans)) {
399 return false;
401 if (!SyncAssert(entry->ref(UNIQUE_CLIENT_TAG).empty(), FROM_HERE,
402 "Client tag should be empty", trans))
403 return false;
405 return true;
408 bool Directory::ReindexId(BaseWriteTransaction* trans,
409 EntryKernel* const entry,
410 const Id& new_id) {
411 ScopedKernelLock lock(this);
412 if (NULL != GetEntryById(lock, new_id))
413 return false;
416 // Update the indices that depend on the ID field.
417 ScopedParentChildIndexUpdater updater_b(lock, entry,
418 &kernel_->parent_child_index);
419 size_t num_erased = kernel_->ids_map.erase(entry->ref(ID).value());
420 DCHECK_EQ(1U, num_erased);
421 entry->put(ID, new_id);
422 kernel_->ids_map[entry->ref(ID).value()] = entry;
424 return true;
427 bool Directory::ReindexParentId(BaseWriteTransaction* trans,
428 EntryKernel* const entry,
429 const Id& new_parent_id) {
430 ScopedKernelLock lock(this);
433 // Update the indices that depend on the PARENT_ID field.
434 ScopedParentChildIndexUpdater index_updater(lock, entry,
435 &kernel_->parent_child_index);
436 entry->put(PARENT_ID, new_parent_id);
438 return true;
441 void Directory::RemoveFromAttachmentIndex(
442 const ScopedKernelLock& lock,
443 const int64 metahandle,
444 const sync_pb::AttachmentMetadata& attachment_metadata) {
445 for (int i = 0; i < attachment_metadata.record_size(); ++i) {
446 AttachmentIdUniqueId unique_id =
447 attachment_metadata.record(i).id().unique_id();
448 IndexByAttachmentId::iterator iter =
449 kernel_->index_by_attachment_id.find(unique_id);
450 if (iter != kernel_->index_by_attachment_id.end()) {
451 iter->second.erase(metahandle);
452 if (iter->second.empty()) {
453 kernel_->index_by_attachment_id.erase(iter);
459 void Directory::AddToAttachmentIndex(
460 const ScopedKernelLock& lock,
461 const int64 metahandle,
462 const sync_pb::AttachmentMetadata& attachment_metadata) {
463 for (int i = 0; i < attachment_metadata.record_size(); ++i) {
464 AttachmentIdUniqueId unique_id =
465 attachment_metadata.record(i).id().unique_id();
466 IndexByAttachmentId::iterator iter =
467 kernel_->index_by_attachment_id.find(unique_id);
468 if (iter == kernel_->index_by_attachment_id.end()) {
469 iter = kernel_->index_by_attachment_id.insert(std::make_pair(
470 unique_id,
471 MetahandleSet())).first;
473 iter->second.insert(metahandle);
477 void Directory::UpdateAttachmentIndex(
478 const int64 metahandle,
479 const sync_pb::AttachmentMetadata& old_metadata,
480 const sync_pb::AttachmentMetadata& new_metadata) {
481 ScopedKernelLock lock(this);
482 RemoveFromAttachmentIndex(lock, metahandle, old_metadata);
483 AddToAttachmentIndex(lock, metahandle, new_metadata);
486 void Directory::GetMetahandlesByAttachmentId(
487 BaseTransaction* trans,
488 const sync_pb::AttachmentIdProto& attachment_id_proto,
489 Metahandles* result) {
490 DCHECK(result);
491 result->clear();
492 ScopedKernelLock lock(this);
493 IndexByAttachmentId::const_iterator index_iter =
494 kernel_->index_by_attachment_id.find(attachment_id_proto.unique_id());
495 if (index_iter == kernel_->index_by_attachment_id.end())
496 return;
497 const MetahandleSet& metahandle_set = index_iter->second;
498 std::copy(
499 metahandle_set.begin(), metahandle_set.end(), back_inserter(*result));
502 bool Directory::unrecoverable_error_set(const BaseTransaction* trans) const {
503 DCHECK(trans != NULL);
504 return unrecoverable_error_set_;
507 void Directory::ClearDirtyMetahandles(const ScopedKernelLock& lock) {
508 kernel_->transaction_mutex.AssertAcquired();
509 kernel_->dirty_metahandles.clear();
512 bool Directory::SafeToPurgeFromMemory(WriteTransaction* trans,
513 const EntryKernel* const entry) const {
514 bool safe = entry->ref(IS_DEL) && !entry->is_dirty() &&
515 !entry->ref(SYNCING) && !entry->ref(IS_UNAPPLIED_UPDATE) &&
516 !entry->ref(IS_UNSYNCED);
518 if (safe) {
519 int64 handle = entry->ref(META_HANDLE);
520 const ModelType type = entry->GetServerModelType();
521 if (!SyncAssert(kernel_->dirty_metahandles.count(handle) == 0U,
522 FROM_HERE,
523 "Dirty metahandles should be empty", trans))
524 return false;
525 // TODO(tim): Bug 49278.
526 if (!SyncAssert(!kernel_->unsynced_metahandles.count(handle),
527 FROM_HERE,
528 "Unsynced handles should be empty",
529 trans))
530 return false;
531 if (!SyncAssert(!kernel_->unapplied_update_metahandles[type].count(handle),
532 FROM_HERE,
533 "Unapplied metahandles should be empty",
534 trans))
535 return false;
538 return safe;
541 void Directory::TakeSnapshotForSaveChanges(SaveChangesSnapshot* snapshot) {
542 ReadTransaction trans(FROM_HERE, this);
543 ScopedKernelLock lock(this);
545 // If there is an unrecoverable error then just bail out.
546 if (unrecoverable_error_set(&trans))
547 return;
549 // Deep copy dirty entries from kernel_->metahandles_index into snapshot and
550 // clear dirty flags.
551 for (MetahandleSet::const_iterator i = kernel_->dirty_metahandles.begin();
552 i != kernel_->dirty_metahandles.end(); ++i) {
553 EntryKernel* entry = GetEntryByHandle(lock, *i);
554 if (!entry)
555 continue;
556 // Skip over false positives; it happens relatively infrequently.
557 if (!entry->is_dirty())
558 continue;
559 snapshot->dirty_metas.insert(snapshot->dirty_metas.end(),
560 new EntryKernel(*entry));
561 DCHECK_EQ(1U, kernel_->dirty_metahandles.count(*i));
562 // We don't bother removing from the index here as we blow the entire thing
563 // in a moment, and it unnecessarily complicates iteration.
564 entry->clear_dirty(NULL);
566 ClearDirtyMetahandles(lock);
568 // Set purged handles.
569 DCHECK(snapshot->metahandles_to_purge.empty());
570 snapshot->metahandles_to_purge.swap(kernel_->metahandles_to_purge);
572 // Fill kernel_info_status and kernel_info.
573 snapshot->kernel_info = kernel_->persisted_info;
574 snapshot->kernel_info_status = kernel_->info_status;
575 // This one we reset on failure.
576 kernel_->info_status = KERNEL_SHARE_INFO_VALID;
578 delete_journal_->TakeSnapshotAndClear(
579 &trans, &snapshot->delete_journals, &snapshot->delete_journals_to_purge);
582 bool Directory::SaveChanges() {
583 bool success = false;
585 base::AutoLock scoped_lock(kernel_->save_changes_mutex);
587 // Snapshot and save.
588 SaveChangesSnapshot snapshot;
589 TakeSnapshotForSaveChanges(&snapshot);
590 success = store_->SaveChanges(snapshot);
592 // Handle success or failure.
593 if (success)
594 success = VacuumAfterSaveChanges(snapshot);
595 else
596 HandleSaveChangesFailure(snapshot);
597 return success;
600 bool Directory::VacuumAfterSaveChanges(const SaveChangesSnapshot& snapshot) {
601 if (snapshot.dirty_metas.empty())
602 return true;
604 // Need a write transaction as we are about to permanently purge entries.
605 WriteTransaction trans(FROM_HERE, VACUUM_AFTER_SAVE, this);
606 ScopedKernelLock lock(this);
607 // Now drop everything we can out of memory.
608 for (EntryKernelSet::const_iterator i = snapshot.dirty_metas.begin();
609 i != snapshot.dirty_metas.end(); ++i) {
610 MetahandlesMap::iterator found =
611 kernel_->metahandles_map.find((*i)->ref(META_HANDLE));
612 EntryKernel* entry = (found == kernel_->metahandles_map.end() ?
613 NULL : found->second);
614 if (entry && SafeToPurgeFromMemory(&trans, entry)) {
615 // We now drop deleted metahandles that are up to date on both the client
616 // and the server.
617 size_t num_erased = 0;
618 num_erased = kernel_->metahandles_map.erase(entry->ref(META_HANDLE));
619 DCHECK_EQ(1u, num_erased);
620 num_erased = kernel_->ids_map.erase(entry->ref(ID).value());
621 DCHECK_EQ(1u, num_erased);
622 if (!entry->ref(UNIQUE_SERVER_TAG).empty()) {
623 num_erased =
624 kernel_->server_tags_map.erase(entry->ref(UNIQUE_SERVER_TAG));
625 DCHECK_EQ(1u, num_erased);
627 if (!entry->ref(UNIQUE_CLIENT_TAG).empty()) {
628 num_erased =
629 kernel_->client_tags_map.erase(entry->ref(UNIQUE_CLIENT_TAG));
630 DCHECK_EQ(1u, num_erased);
632 if (!SyncAssert(!kernel_->parent_child_index.Contains(entry),
633 FROM_HERE,
634 "Deleted entry still present",
635 (&trans)))
636 return false;
637 RemoveFromAttachmentIndex(
638 lock, entry->ref(META_HANDLE), entry->ref(ATTACHMENT_METADATA));
640 delete entry;
642 if (trans.unrecoverable_error_set())
643 return false;
645 return true;
648 void Directory::UnapplyEntry(EntryKernel* entry) {
649 int64 handle = entry->ref(META_HANDLE);
650 ModelType server_type = GetModelTypeFromSpecifics(
651 entry->ref(SERVER_SPECIFICS));
653 // Clear enough so that on the next sync cycle all local data will
654 // be overwritten.
655 // Note: do not modify the root node in order to preserve the
656 // initial sync ended bit for this type (else on the next restart
657 // this type will be treated as disabled and therefore fully purged).
658 if (IsRealDataType(server_type) &&
659 ModelTypeToRootTag(server_type) == entry->ref(UNIQUE_SERVER_TAG)) {
660 return;
663 // Set the unapplied bit if this item has server data.
664 if (IsRealDataType(server_type) && !entry->ref(IS_UNAPPLIED_UPDATE)) {
665 entry->put(IS_UNAPPLIED_UPDATE, true);
666 kernel_->unapplied_update_metahandles[server_type].insert(handle);
667 entry->mark_dirty(&kernel_->dirty_metahandles);
670 // Unset the unsynced bit.
671 if (entry->ref(IS_UNSYNCED)) {
672 kernel_->unsynced_metahandles.erase(handle);
673 entry->put(IS_UNSYNCED, false);
674 entry->mark_dirty(&kernel_->dirty_metahandles);
677 // Mark the item as locally deleted. No deleted items are allowed in the
678 // parent child index.
679 if (!entry->ref(IS_DEL)) {
680 kernel_->parent_child_index.Remove(entry);
681 entry->put(IS_DEL, true);
682 entry->mark_dirty(&kernel_->dirty_metahandles);
685 // Set the version to the "newly created" version.
686 if (entry->ref(BASE_VERSION) != CHANGES_VERSION) {
687 entry->put(BASE_VERSION, CHANGES_VERSION);
688 entry->mark_dirty(&kernel_->dirty_metahandles);
691 // At this point locally created items that aren't synced will become locally
692 // deleted items, and purged on the next snapshot. All other items will match
693 // the state they would have had if they were just created via a server
694 // update. See MutableEntry::MutableEntry(.., CreateNewUpdateItem, ..).
697 void Directory::DeleteEntry(const ScopedKernelLock& lock,
698 bool save_to_journal,
699 EntryKernel* entry,
700 EntryKernelSet* entries_to_journal) {
701 int64 handle = entry->ref(META_HANDLE);
702 ModelType server_type = GetModelTypeFromSpecifics(
703 entry->ref(SERVER_SPECIFICS));
705 kernel_->metahandles_to_purge.insert(handle);
707 size_t num_erased = 0;
708 num_erased = kernel_->metahandles_map.erase(entry->ref(META_HANDLE));
709 DCHECK_EQ(1u, num_erased);
710 num_erased = kernel_->ids_map.erase(entry->ref(ID).value());
711 DCHECK_EQ(1u, num_erased);
712 num_erased = kernel_->unsynced_metahandles.erase(handle);
713 DCHECK_EQ(entry->ref(IS_UNSYNCED), num_erased > 0);
714 num_erased =
715 kernel_->unapplied_update_metahandles[server_type].erase(handle);
716 DCHECK_EQ(entry->ref(IS_UNAPPLIED_UPDATE), num_erased > 0);
717 if (kernel_->parent_child_index.Contains(entry))
718 kernel_->parent_child_index.Remove(entry);
720 if (!entry->ref(UNIQUE_CLIENT_TAG).empty()) {
721 num_erased =
722 kernel_->client_tags_map.erase(entry->ref(UNIQUE_CLIENT_TAG));
723 DCHECK_EQ(1u, num_erased);
725 if (!entry->ref(UNIQUE_SERVER_TAG).empty()) {
726 num_erased =
727 kernel_->server_tags_map.erase(entry->ref(UNIQUE_SERVER_TAG));
728 DCHECK_EQ(1u, num_erased);
730 RemoveFromAttachmentIndex(lock, handle, entry->ref(ATTACHMENT_METADATA));
732 if (save_to_journal) {
733 entries_to_journal->insert(entry);
734 } else {
735 delete entry;
739 bool Directory::PurgeEntriesWithTypeIn(ModelTypeSet disabled_types,
740 ModelTypeSet types_to_journal,
741 ModelTypeSet types_to_unapply) {
742 disabled_types.RemoveAll(ProxyTypes());
744 if (disabled_types.Empty())
745 return true;
748 WriteTransaction trans(FROM_HERE, PURGE_ENTRIES, this);
750 EntryKernelSet entries_to_journal;
751 STLElementDeleter<EntryKernelSet> journal_deleter(&entries_to_journal);
754 ScopedKernelLock lock(this);
756 bool found_progress = false;
757 for (ModelTypeSet::Iterator iter = disabled_types.First(); iter.Good();
758 iter.Inc()) {
759 if (!kernel_->persisted_info.HasEmptyDownloadProgress(iter.Get()))
760 found_progress = true;
763 // If none of the disabled types have progress markers, there's nothing to
764 // purge.
765 if (!found_progress)
766 return true;
768 // We iterate in two passes to avoid a bug in STLport (which is used in
769 // the Android build). There are some versions of that library where a
770 // hash_map's iterators can be invalidated when an item is erased from the
771 // hash_map.
772 // See http://sourceforge.net/p/stlport/bugs/239/.
774 std::set<EntryKernel*> to_purge;
775 for (MetahandlesMap::iterator it = kernel_->metahandles_map.begin();
776 it != kernel_->metahandles_map.end(); ++it) {
777 const sync_pb::EntitySpecifics& local_specifics =
778 it->second->ref(SPECIFICS);
779 const sync_pb::EntitySpecifics& server_specifics =
780 it->second->ref(SERVER_SPECIFICS);
781 ModelType local_type = GetModelTypeFromSpecifics(local_specifics);
782 ModelType server_type = GetModelTypeFromSpecifics(server_specifics);
784 if ((IsRealDataType(local_type) && disabled_types.Has(local_type)) ||
785 (IsRealDataType(server_type) && disabled_types.Has(server_type))) {
786 to_purge.insert(it->second);
790 for (std::set<EntryKernel*>::iterator it = to_purge.begin();
791 it != to_purge.end(); ++it) {
792 EntryKernel* entry = *it;
794 const sync_pb::EntitySpecifics& local_specifics =
795 (*it)->ref(SPECIFICS);
796 const sync_pb::EntitySpecifics& server_specifics =
797 (*it)->ref(SERVER_SPECIFICS);
798 ModelType local_type = GetModelTypeFromSpecifics(local_specifics);
799 ModelType server_type = GetModelTypeFromSpecifics(server_specifics);
801 if (types_to_unapply.Has(local_type) ||
802 types_to_unapply.Has(server_type)) {
803 UnapplyEntry(entry);
804 } else {
805 bool save_to_journal =
806 (types_to_journal.Has(local_type) ||
807 types_to_journal.Has(server_type)) &&
808 (delete_journal_->IsDeleteJournalEnabled(local_type) ||
809 delete_journal_->IsDeleteJournalEnabled(server_type));
810 DeleteEntry(lock, save_to_journal, entry, &entries_to_journal);
814 delete_journal_->AddJournalBatch(&trans, entries_to_journal);
816 // Ensure meta tracking for these data types reflects the purged state.
817 for (ModelTypeSet::Iterator it = disabled_types.First();
818 it.Good(); it.Inc()) {
819 kernel_->persisted_info.transaction_version[it.Get()] = 0;
821 // Don't discard progress markers or context for unapplied types.
822 if (!types_to_unapply.Has(it.Get())) {
823 kernel_->persisted_info.ResetDownloadProgress(it.Get());
824 kernel_->persisted_info.datatype_context[it.Get()].Clear();
828 kernel_->info_status = KERNEL_SHARE_INFO_DIRTY;
831 return true;
834 bool Directory::ResetVersionsForType(BaseWriteTransaction* trans,
835 ModelType type) {
836 if (!ProtocolTypes().Has(type))
837 return false;
838 DCHECK_NE(type, BOOKMARKS) << "Only non-hierarchical types are supported";
840 EntryKernel* type_root = GetEntryByServerTag(ModelTypeToRootTag(type));
841 if (!type_root)
842 return false;
844 ScopedKernelLock lock(this);
845 const Id& type_root_id = type_root->ref(ID);
846 Directory::Metahandles children;
847 AppendChildHandles(lock, type_root_id, &children);
849 for (Metahandles::iterator it = children.begin(); it != children.end();
850 ++it) {
851 EntryKernel* entry = GetEntryByHandle(lock, *it);
852 if (!entry)
853 continue;
854 if (entry->ref(BASE_VERSION) > 1)
855 entry->put(BASE_VERSION, 1);
856 if (entry->ref(SERVER_VERSION) > 1)
857 entry->put(SERVER_VERSION, 1);
859 // Note that we do not unset IS_UNSYNCED or IS_UNAPPLIED_UPDATE in order
860 // to ensure no in-transit data is lost.
862 entry->mark_dirty(&kernel_->dirty_metahandles);
865 return true;
868 bool Directory::IsAttachmentLinked(
869 const sync_pb::AttachmentIdProto& attachment_id_proto) const {
870 ScopedKernelLock lock(this);
871 IndexByAttachmentId::const_iterator iter =
872 kernel_->index_by_attachment_id.find(attachment_id_proto.unique_id());
873 if (iter != kernel_->index_by_attachment_id.end() && !iter->second.empty()) {
874 return true;
876 return false;
879 void Directory::HandleSaveChangesFailure(const SaveChangesSnapshot& snapshot) {
880 WriteTransaction trans(FROM_HERE, HANDLE_SAVE_FAILURE, this);
881 ScopedKernelLock lock(this);
882 kernel_->info_status = KERNEL_SHARE_INFO_DIRTY;
884 // Because we optimistically cleared the dirty bit on the real entries when
885 // taking the snapshot, we must restore it on failure. Not doing this could
886 // cause lost data, if no other changes are made to the in-memory entries
887 // that would cause the dirty bit to get set again. Setting the bit ensures
888 // that SaveChanges will at least try again later.
889 for (EntryKernelSet::const_iterator i = snapshot.dirty_metas.begin();
890 i != snapshot.dirty_metas.end(); ++i) {
891 MetahandlesMap::iterator found =
892 kernel_->metahandles_map.find((*i)->ref(META_HANDLE));
893 if (found != kernel_->metahandles_map.end()) {
894 found->second->mark_dirty(&kernel_->dirty_metahandles);
898 kernel_->metahandles_to_purge.insert(snapshot.metahandles_to_purge.begin(),
899 snapshot.metahandles_to_purge.end());
901 // Restore delete journals.
902 delete_journal_->AddJournalBatch(&trans, snapshot.delete_journals);
903 delete_journal_->PurgeDeleteJournals(&trans,
904 snapshot.delete_journals_to_purge);
907 void Directory::GetDownloadProgress(
908 ModelType model_type,
909 sync_pb::DataTypeProgressMarker* value_out) const {
910 ScopedKernelLock lock(this);
911 return value_out->CopyFrom(
912 kernel_->persisted_info.download_progress[model_type]);
915 void Directory::GetDownloadProgressAsString(
916 ModelType model_type,
917 std::string* value_out) const {
918 ScopedKernelLock lock(this);
919 kernel_->persisted_info.download_progress[model_type].SerializeToString(
920 value_out);
923 size_t Directory::GetEntriesCount() const {
924 ScopedKernelLock lock(this);
925 return kernel_->metahandles_map.size();
928 void Directory::SetDownloadProgress(
929 ModelType model_type,
930 const sync_pb::DataTypeProgressMarker& new_progress) {
931 ScopedKernelLock lock(this);
932 kernel_->persisted_info.download_progress[model_type].CopyFrom(new_progress);
933 kernel_->info_status = KERNEL_SHARE_INFO_DIRTY;
936 bool Directory::HasEmptyDownloadProgress(ModelType type) const {
937 ScopedKernelLock lock(this);
938 return kernel_->persisted_info.HasEmptyDownloadProgress(type);
941 int64 Directory::GetTransactionVersion(ModelType type) const {
942 kernel_->transaction_mutex.AssertAcquired();
943 return kernel_->persisted_info.transaction_version[type];
946 void Directory::IncrementTransactionVersion(ModelType type) {
947 kernel_->transaction_mutex.AssertAcquired();
948 kernel_->persisted_info.transaction_version[type]++;
949 kernel_->info_status = KERNEL_SHARE_INFO_DIRTY;
952 void Directory::GetDataTypeContext(BaseTransaction* trans,
953 ModelType type,
954 sync_pb::DataTypeContext* context) const {
955 ScopedKernelLock lock(this);
956 context->CopyFrom(kernel_->persisted_info.datatype_context[type]);
959 void Directory::SetDataTypeContext(
960 BaseWriteTransaction* trans,
961 ModelType type,
962 const sync_pb::DataTypeContext& context) {
963 ScopedKernelLock lock(this);
964 kernel_->persisted_info.datatype_context[type].CopyFrom(context);
965 kernel_->info_status = KERNEL_SHARE_INFO_DIRTY;
968 // TODO(stanisc): crbug.com/438313: change these to not rely on the folders.
969 ModelTypeSet Directory::InitialSyncEndedTypes() {
970 syncable::ReadTransaction trans(FROM_HERE, this);
971 ModelTypeSet protocol_types = ProtocolTypes();
972 ModelTypeSet initial_sync_ended_types;
973 for (ModelTypeSet::Iterator i = protocol_types.First(); i.Good(); i.Inc()) {
974 if (InitialSyncEndedForType(&trans, i.Get())) {
975 initial_sync_ended_types.Put(i.Get());
978 return initial_sync_ended_types;
981 bool Directory::InitialSyncEndedForType(ModelType type) {
982 syncable::ReadTransaction trans(FROM_HERE, this);
983 return InitialSyncEndedForType(&trans, type);
986 bool Directory::InitialSyncEndedForType(
987 BaseTransaction* trans, ModelType type) {
988 // True iff the type's root node has been created.
989 syncable::Entry entry(trans, syncable::GET_TYPE_ROOT, type);
990 return entry.good();
993 string Directory::store_birthday() const {
994 ScopedKernelLock lock(this);
995 return kernel_->persisted_info.store_birthday;
998 void Directory::set_store_birthday(const string& store_birthday) {
999 ScopedKernelLock lock(this);
1000 if (kernel_->persisted_info.store_birthday == store_birthday)
1001 return;
1002 kernel_->persisted_info.store_birthday = store_birthday;
1003 kernel_->info_status = KERNEL_SHARE_INFO_DIRTY;
1006 string Directory::bag_of_chips() const {
1007 ScopedKernelLock lock(this);
1008 return kernel_->persisted_info.bag_of_chips;
1011 void Directory::set_bag_of_chips(const string& bag_of_chips) {
1012 ScopedKernelLock lock(this);
1013 if (kernel_->persisted_info.bag_of_chips == bag_of_chips)
1014 return;
1015 kernel_->persisted_info.bag_of_chips = bag_of_chips;
1016 kernel_->info_status = KERNEL_SHARE_INFO_DIRTY;
1020 string Directory::cache_guid() const {
1021 // No need to lock since nothing ever writes to it after load.
1022 return kernel_->cache_guid;
1025 NigoriHandler* Directory::GetNigoriHandler() {
1026 return nigori_handler_;
1029 Cryptographer* Directory::GetCryptographer(const BaseTransaction* trans) {
1030 DCHECK_EQ(this, trans->directory());
1031 return cryptographer_;
1034 void Directory::GetAllMetaHandles(BaseTransaction* trans,
1035 MetahandleSet* result) {
1036 result->clear();
1037 ScopedKernelLock lock(this);
1038 for (MetahandlesMap::iterator i = kernel_->metahandles_map.begin();
1039 i != kernel_->metahandles_map.end(); ++i) {
1040 result->insert(i->first);
1044 void Directory::GetUnsyncedMetaHandles(BaseTransaction* trans,
1045 Metahandles* result) {
1046 result->clear();
1047 ScopedKernelLock lock(this);
1048 copy(kernel_->unsynced_metahandles.begin(),
1049 kernel_->unsynced_metahandles.end(), back_inserter(*result));
1052 int64 Directory::unsynced_entity_count() const {
1053 ScopedKernelLock lock(this);
1054 return kernel_->unsynced_metahandles.size();
1057 bool Directory::TypeHasUnappliedUpdates(ModelType type) {
1058 ScopedKernelLock lock(this);
1059 return !kernel_->unapplied_update_metahandles[type].empty();
1062 void Directory::GetUnappliedUpdateMetaHandles(
1063 BaseTransaction* trans,
1064 FullModelTypeSet server_types,
1065 std::vector<int64>* result) {
1066 result->clear();
1067 ScopedKernelLock lock(this);
1068 for (int i = UNSPECIFIED; i < MODEL_TYPE_COUNT; ++i) {
1069 const ModelType type = ModelTypeFromInt(i);
1070 if (server_types.Has(type)) {
1071 std::copy(kernel_->unapplied_update_metahandles[type].begin(),
1072 kernel_->unapplied_update_metahandles[type].end(),
1073 back_inserter(*result));
1078 void Directory::GetMetaHandlesOfType(BaseTransaction* trans,
1079 ModelType type,
1080 std::vector<int64>* result) {
1081 ScopedKernelLock lock(this);
1082 GetMetaHandlesOfType(lock, trans, type, result);
1085 void Directory::GetMetaHandlesOfType(const ScopedKernelLock& lock,
1086 BaseTransaction* trans,
1087 ModelType type,
1088 std::vector<int64>* result) {
1089 result->clear();
1090 for (MetahandlesMap::iterator it = kernel_->metahandles_map.begin();
1091 it != kernel_->metahandles_map.end(); ++it) {
1092 EntryKernel* entry = it->second;
1093 const ModelType entry_type =
1094 GetModelTypeFromSpecifics(entry->ref(SPECIFICS));
1095 if (entry_type == type)
1096 result->push_back(it->first);
1100 void Directory::CollectMetaHandleCounts(
1101 std::vector<int>* num_entries_by_type,
1102 std::vector<int>* num_to_delete_entries_by_type) {
1103 syncable::ReadTransaction trans(FROM_HERE, this);
1104 ScopedKernelLock lock(this);
1106 for (MetahandlesMap::iterator it = kernel_->metahandles_map.begin();
1107 it != kernel_->metahandles_map.end(); ++it) {
1108 EntryKernel* entry = it->second;
1109 const ModelType type = GetModelTypeFromSpecifics(entry->ref(SPECIFICS));
1110 (*num_entries_by_type)[type]++;
1111 if (entry->ref(IS_DEL))
1112 (*num_to_delete_entries_by_type)[type]++;
1116 scoped_ptr<base::ListValue> Directory::GetNodeDetailsForType(
1117 BaseTransaction* trans,
1118 ModelType type) {
1119 scoped_ptr<base::ListValue> nodes(new base::ListValue());
1121 ScopedKernelLock lock(this);
1122 for (MetahandlesMap::iterator it = kernel_->metahandles_map.begin();
1123 it != kernel_->metahandles_map.end(); ++it) {
1124 if (GetModelTypeFromSpecifics(it->second->ref(SPECIFICS)) != type) {
1125 continue;
1128 EntryKernel* kernel = it->second;
1129 scoped_ptr<base::DictionaryValue> node(
1130 kernel->ToValue(GetCryptographer(trans)));
1132 // Add the position index if appropriate. This must be done here (and not
1133 // in EntryKernel) because the EntryKernel does not have access to its
1134 // siblings.
1135 if (kernel->ShouldMaintainPosition() && !kernel->ref(IS_DEL)) {
1136 node->SetInteger("positionIndex", GetPositionIndex(trans, kernel));
1139 nodes->Append(node.release());
1142 return nodes.Pass();
1145 bool Directory::CheckInvariantsOnTransactionClose(
1146 syncable::BaseTransaction* trans,
1147 const MetahandleSet& modified_handles) {
1148 // NOTE: The trans may be in the process of being destructed. Be careful if
1149 // you wish to call any of its virtual methods.
1150 switch (invariant_check_level_) {
1151 case FULL_DB_VERIFICATION: {
1152 MetahandleSet all_handles;
1153 GetAllMetaHandles(trans, &all_handles);
1154 return CheckTreeInvariants(trans, all_handles);
1156 case VERIFY_CHANGES: {
1157 return CheckTreeInvariants(trans, modified_handles);
1159 case OFF: {
1160 return true;
1163 NOTREACHED();
1164 return false;
1167 bool Directory::FullyCheckTreeInvariants(syncable::BaseTransaction* trans) {
1168 MetahandleSet handles;
1169 GetAllMetaHandles(trans, &handles);
1170 return CheckTreeInvariants(trans, handles);
1173 bool Directory::CheckTreeInvariants(syncable::BaseTransaction* trans,
1174 const MetahandleSet& handles) {
1175 MetahandleSet::const_iterator i;
1176 for (i = handles.begin() ; i != handles.end() ; ++i) {
1177 int64 metahandle = *i;
1178 Entry e(trans, GET_BY_HANDLE, metahandle);
1179 if (!SyncAssert(e.good(), FROM_HERE, "Entry is bad", trans))
1180 return false;
1181 syncable::Id id = e.GetId();
1182 syncable::Id parentid = e.GetParentId();
1184 if (id.IsRoot()) {
1185 if (!SyncAssert(e.GetIsDir(), FROM_HERE,
1186 "Entry should be a directory",
1187 trans))
1188 return false;
1189 if (!SyncAssert(parentid.IsRoot(), FROM_HERE,
1190 "Entry should be root",
1191 trans))
1192 return false;
1193 if (!SyncAssert(!e.GetIsUnsynced(), FROM_HERE, "Entry should be synced",
1194 trans))
1195 return false;
1196 continue;
1199 if (!e.GetIsDel()) {
1200 if (!SyncAssert(id != parentid, FROM_HERE,
1201 "Id should be different from parent id.",
1202 trans))
1203 return false;
1204 if (!SyncAssert(!e.GetNonUniqueName().empty(), FROM_HERE,
1205 "Non unique name should not be empty.",
1206 trans))
1207 return false;
1209 if (!parentid.IsNull()) {
1210 int safety_count = handles.size() + 1;
1211 while (!parentid.IsRoot()) {
1212 Entry parent(trans, GET_BY_ID, parentid);
1213 if (!SyncAssert(parent.good(), FROM_HERE,
1214 "Parent entry is not valid.", trans))
1215 return false;
1216 if (handles.end() == handles.find(parent.GetMetahandle()))
1217 break; // Skip further checking if parent was unmodified.
1218 if (!SyncAssert(parent.GetIsDir(), FROM_HERE,
1219 "Parent should be a directory", trans))
1220 return false;
1221 if (!SyncAssert(!parent.GetIsDel(), FROM_HERE,
1222 "Parent should not have been marked for deletion.",
1223 trans))
1224 return false;
1225 if (!SyncAssert(handles.end() != handles.find(parent.GetMetahandle()),
1226 FROM_HERE, "Parent should be in the index.", trans))
1227 return false;
1228 parentid = parent.GetParentId();
1229 if (!SyncAssert(--safety_count > 0, FROM_HERE,
1230 "Count should be greater than zero.", trans))
1231 return false;
1235 int64 base_version = e.GetBaseVersion();
1236 int64 server_version = e.GetServerVersion();
1237 bool using_unique_client_tag = !e.GetUniqueClientTag().empty();
1238 if (CHANGES_VERSION == base_version || 0 == base_version) {
1239 ModelType model_type = e.GetModelType();
1240 bool is_client_creatable_type_root_folder =
1241 parentid.IsRoot() &&
1242 IsTypeWithClientGeneratedRoot(model_type) &&
1243 e.GetUniqueServerTag() == ModelTypeToRootTag(model_type);
1244 if (e.GetIsUnappliedUpdate()) {
1245 // Must be a new item, or a de-duplicated unique client tag
1246 // that was created both locally and remotely, or a type root folder
1247 // that was created both locally and remotely.
1248 if (!(using_unique_client_tag ||
1249 is_client_creatable_type_root_folder)) {
1250 if (!SyncAssert(e.GetIsDel(), FROM_HERE,
1251 "The entry should have been deleted.", trans))
1252 return false;
1254 // It came from the server, so it must have a server ID.
1255 if (!SyncAssert(id.ServerKnows(), FROM_HERE,
1256 "The id should be from a server.",
1257 trans))
1258 return false;
1259 } else {
1260 if (e.GetIsDir()) {
1261 // TODO(chron): Implement this mode if clients ever need it.
1262 // For now, you can't combine a client tag and a directory.
1263 if (!SyncAssert(!using_unique_client_tag, FROM_HERE,
1264 "Directory cannot have a client tag.",
1265 trans))
1266 return false;
1268 if (is_client_creatable_type_root_folder) {
1269 // This must be a locally created type root folder.
1270 if (!SyncAssert(
1271 !e.GetIsUnsynced(), FROM_HERE,
1272 "Locally created type root folders should not be unsynced.",
1273 trans))
1274 return false;
1276 if (!SyncAssert(
1277 !e.GetIsDel(), FROM_HERE,
1278 "Locally created type root folders should not be deleted.",
1279 trans))
1280 return false;
1281 } else {
1282 // Should be an uncomitted item, or a successfully deleted one.
1283 if (!e.GetIsDel()) {
1284 if (!SyncAssert(e.GetIsUnsynced(), FROM_HERE,
1285 "The item should be unsynced.", trans))
1286 return false;
1289 // If the next check failed, it would imply that an item exists
1290 // on the server, isn't waiting for application locally, but either
1291 // is an unsynced create or a sucessful delete in the local copy.
1292 // Either way, that's a mismatch.
1293 if (!SyncAssert(0 == server_version, FROM_HERE,
1294 "Server version should be zero.",
1295 trans))
1296 return false;
1297 // Items that aren't using the unique client tag should have a zero
1298 // base version only if they have a local ID. Items with unique client
1299 // tags are allowed to use the zero base version for undeletion and
1300 // de-duplication; the unique client tag trumps the server ID.
1301 if (!using_unique_client_tag) {
1302 if (!SyncAssert(!id.ServerKnows(), FROM_HERE,
1303 "Should be a client only id.",
1304 trans))
1305 return false;
1308 } else {
1309 if (!SyncAssert(id.ServerKnows(),
1310 FROM_HERE,
1311 "Should be a server id.",
1312 trans))
1313 return false;
1316 // Previously we would assert that locally deleted items that have never
1317 // been synced must not be sent to the server (IS_UNSYNCED must be false).
1318 // This is not always true in the case that an item is deleted while the
1319 // initial commit is in flight. See crbug.com/426865.
1321 return true;
1324 void Directory::SetInvariantCheckLevel(InvariantCheckLevel check_level) {
1325 invariant_check_level_ = check_level;
1328 int64 Directory::NextMetahandle() {
1329 ScopedKernelLock lock(this);
1330 int64 metahandle = (kernel_->next_metahandle)++;
1331 return metahandle;
1334 // Generates next client ID based on a randomly generated GUID.
1335 Id Directory::NextId() {
1336 return Id::CreateFromClientString(base::GenerateGUID());
1339 bool Directory::HasChildren(BaseTransaction* trans, const Id& id) {
1340 ScopedKernelLock lock(this);
1341 return kernel_->parent_child_index.GetChildren(id) != NULL;
1344 Id Directory::GetFirstChildId(BaseTransaction* trans,
1345 const EntryKernel* parent) {
1346 DCHECK(parent);
1347 DCHECK(parent->ref(IS_DIR));
1349 ScopedKernelLock lock(this);
1350 const OrderedChildSet* children =
1351 kernel_->parent_child_index.GetChildren(parent->ref(ID));
1353 // We're expected to return root if there are no children.
1354 if (!children)
1355 return Id();
1357 return (*children->begin())->ref(ID);
1360 syncable::Id Directory::GetPredecessorId(EntryKernel* e) {
1361 ScopedKernelLock lock(this);
1363 DCHECK(ParentChildIndex::ShouldInclude(e));
1364 const OrderedChildSet* siblings = kernel_->parent_child_index.GetSiblings(e);
1365 OrderedChildSet::const_iterator i = siblings->find(e);
1366 DCHECK(i != siblings->end());
1368 if (i == siblings->begin()) {
1369 return Id();
1370 } else {
1371 i--;
1372 return (*i)->ref(ID);
1376 syncable::Id Directory::GetSuccessorId(EntryKernel* e) {
1377 ScopedKernelLock lock(this);
1379 DCHECK(ParentChildIndex::ShouldInclude(e));
1380 const OrderedChildSet* siblings = kernel_->parent_child_index.GetSiblings(e);
1381 OrderedChildSet::const_iterator i = siblings->find(e);
1382 DCHECK(i != siblings->end());
1384 i++;
1385 if (i == siblings->end()) {
1386 return Id();
1387 } else {
1388 return (*i)->ref(ID);
1392 // TODO(rlarocque): Remove all support for placing ShouldMaintainPosition()
1393 // items as siblings of items that do not maintain postions. It is required
1394 // only for tests. See crbug.com/178282.
1395 void Directory::PutPredecessor(EntryKernel* e, EntryKernel* predecessor) {
1396 DCHECK(!e->ref(IS_DEL));
1397 if (!e->ShouldMaintainPosition()) {
1398 DCHECK(!e->ref(UNIQUE_POSITION).IsValid());
1399 return;
1401 std::string suffix = e->ref(UNIQUE_BOOKMARK_TAG);
1402 DCHECK(!suffix.empty());
1404 // Remove our item from the ParentChildIndex and remember to re-add it later.
1405 ScopedKernelLock lock(this);
1406 ScopedParentChildIndexUpdater updater(lock, e, &kernel_->parent_child_index);
1408 // Note: The ScopedParentChildIndexUpdater will update this set for us as we
1409 // leave this function.
1410 const OrderedChildSet* siblings =
1411 kernel_->parent_child_index.GetChildren(e->ref(PARENT_ID));
1413 if (!siblings) {
1414 // This parent currently has no other children.
1415 DCHECK(predecessor == NULL);
1416 UniquePosition pos = UniquePosition::InitialPosition(suffix);
1417 e->put(UNIQUE_POSITION, pos);
1418 return;
1421 if (predecessor == NULL) {
1422 // We have at least one sibling, and we're inserting to the left of them.
1423 UniquePosition successor_pos = (*siblings->begin())->ref(UNIQUE_POSITION);
1425 UniquePosition pos;
1426 if (!successor_pos.IsValid()) {
1427 // If all our successors are of non-positionable types, just create an
1428 // initial position. We arbitrarily choose to sort invalid positions to
1429 // the right of the valid positions.
1431 // We really shouldn't need to support this. See TODO above.
1432 pos = UniquePosition::InitialPosition(suffix);
1433 } else {
1434 DCHECK(!siblings->empty());
1435 pos = UniquePosition::Before(successor_pos, suffix);
1438 e->put(UNIQUE_POSITION, pos);
1439 return;
1442 // We can't support placing an item after an invalid position. Fortunately,
1443 // the tests don't exercise this particular case. We should not support
1444 // siblings with invalid positions at all. See TODO above.
1445 DCHECK(predecessor->ref(UNIQUE_POSITION).IsValid());
1447 OrderedChildSet::const_iterator neighbour = siblings->find(predecessor);
1448 DCHECK(neighbour != siblings->end());
1450 ++neighbour;
1451 if (neighbour == siblings->end()) {
1452 // Inserting at the end of the list.
1453 UniquePosition pos = UniquePosition::After(
1454 predecessor->ref(UNIQUE_POSITION),
1455 suffix);
1456 e->put(UNIQUE_POSITION, pos);
1457 return;
1460 EntryKernel* successor = *neighbour;
1462 // Another mixed valid and invalid position case. This one could be supported
1463 // in theory, but we're trying to deprecate support for siblings with and
1464 // without valid positions. See TODO above.
1465 DCHECK(successor->ref(UNIQUE_POSITION).IsValid());
1467 // Finally, the normal case: inserting between two elements.
1468 UniquePosition pos = UniquePosition::Between(
1469 predecessor->ref(UNIQUE_POSITION),
1470 successor->ref(UNIQUE_POSITION),
1471 suffix);
1472 e->put(UNIQUE_POSITION, pos);
1473 return;
1476 // TODO(rlarocque): Avoid this indirection. Just return the set.
1477 void Directory::AppendChildHandles(const ScopedKernelLock& lock,
1478 const Id& parent_id,
1479 Directory::Metahandles* result) {
1480 const OrderedChildSet* children =
1481 kernel_->parent_child_index.GetChildren(parent_id);
1482 if (!children)
1483 return;
1485 for (OrderedChildSet::const_iterator i = children->begin();
1486 i != children->end(); ++i) {
1487 result->push_back((*i)->ref(META_HANDLE));
1491 void Directory::UnmarkDirtyEntry(WriteTransaction* trans, Entry* entry) {
1492 CHECK(trans);
1493 entry->kernel_->clear_dirty(&kernel_->dirty_metahandles);
1496 void Directory::GetAttachmentIdsToUpload(BaseTransaction* trans,
1497 ModelType type,
1498 AttachmentIdList* ids) {
1499 // TODO(maniscalco): Maintain an index by ModelType and rewrite this method to
1500 // use it. The approach below is likely very expensive because it iterates
1501 // all entries (bug 415199).
1502 DCHECK(trans);
1503 DCHECK(ids);
1504 ids->clear();
1505 AttachmentIdSet on_server_id_set;
1506 AttachmentIdSet not_on_server_id_set;
1507 std::vector<int64> metahandles;
1509 ScopedKernelLock lock(this);
1510 GetMetaHandlesOfType(lock, trans, type, &metahandles);
1511 std::vector<int64>::const_iterator iter = metahandles.begin();
1512 const std::vector<int64>::const_iterator end = metahandles.end();
1513 // For all of this type's entries...
1514 for (; iter != end; ++iter) {
1515 EntryKernel* entry = GetEntryByHandle(lock, *iter);
1516 DCHECK(entry);
1517 const sync_pb::AttachmentMetadata metadata =
1518 entry->ref(ATTACHMENT_METADATA);
1519 // for each of this entry's attachments...
1520 for (int i = 0; i < metadata.record_size(); ++i) {
1521 AttachmentId id =
1522 AttachmentId::CreateFromProto(metadata.record(i).id());
1523 // if this attachment is known to be on the server, remember it for
1524 // later,
1525 if (metadata.record(i).is_on_server()) {
1526 on_server_id_set.insert(id);
1527 } else {
1528 // otherwise, add it to id_set.
1529 not_on_server_id_set.insert(id);
1534 // Why did we bother keeping a set of ids known to be on the server? The
1535 // is_on_server flag is stored denormalized so we can end up with two entries
1536 // with the same attachment id where one says it's on the server and the other
1537 // says it's not. When this happens, we trust the one that says it's on the
1538 // server. To avoid re-uploading the same attachment mulitple times, we
1539 // remove any ids known to be on the server from the id_set we are about to
1540 // return.
1542 // TODO(maniscalco): Eliminate redundant metadata storage (bug 415203).
1543 std::set_difference(not_on_server_id_set.begin(), not_on_server_id_set.end(),
1544 on_server_id_set.begin(), on_server_id_set.end(),
1545 std::back_inserter(*ids));
1548 void Directory::OnCatastrophicError() {
1549 UMA_HISTOGRAM_BOOLEAN("Sync.DirectoryCatastrophicError", true);
1550 ReadTransaction trans(FROM_HERE, this);
1551 OnUnrecoverableError(&trans, FROM_HERE,
1552 "Catastrophic error detected, Sync DB is unrecoverable");
1555 Directory::Kernel* Directory::kernel() {
1556 return kernel_;
1559 const Directory::Kernel* Directory::kernel() const {
1560 return kernel_;
1563 } // namespace syncable
1564 } // namespace syncer