1 // Copyright 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "sync/syncable/directory.h"
10 #include "base/base64.h"
11 #include "base/guid.h"
12 #include "base/metrics/histogram.h"
13 #include "base/stl_util.h"
14 #include "base/strings/string_number_conversions.h"
15 #include "base/trace_event/trace_event.h"
16 #include "sync/internal_api/public/base/attachment_id_proto.h"
17 #include "sync/internal_api/public/base/unique_position.h"
18 #include "sync/internal_api/public/util/unrecoverable_error_handler.h"
19 #include "sync/syncable/entry.h"
20 #include "sync/syncable/entry_kernel.h"
21 #include "sync/syncable/in_memory_directory_backing_store.h"
22 #include "sync/syncable/on_disk_directory_backing_store.h"
23 #include "sync/syncable/scoped_kernel_lock.h"
24 #include "sync/syncable/scoped_parent_child_index_updater.h"
25 #include "sync/syncable/syncable-inl.h"
26 #include "sync/syncable/syncable_base_transaction.h"
27 #include "sync/syncable/syncable_changes_version.h"
28 #include "sync/syncable/syncable_read_transaction.h"
29 #include "sync/syncable/syncable_util.h"
30 #include "sync/syncable/syncable_write_transaction.h"
38 const base::FilePath::CharType
Directory::kSyncDatabaseFilename
[] =
39 FILE_PATH_LITERAL("SyncData.sqlite3");
41 Directory::PersistedKernelInfo::PersistedKernelInfo() {
42 ModelTypeSet protocol_types
= ProtocolTypes();
43 for (ModelTypeSet::Iterator iter
= protocol_types
.First(); iter
.Good();
45 ResetDownloadProgress(iter
.Get());
46 transaction_version
[iter
.Get()] = 0;
50 Directory::PersistedKernelInfo::~PersistedKernelInfo() {}
52 void Directory::PersistedKernelInfo::ResetDownloadProgress(
53 ModelType model_type
) {
54 // Clear everything except the data type id field.
55 download_progress
[model_type
].Clear();
56 download_progress
[model_type
].set_data_type_id(
57 GetSpecificsFieldNumberFromModelType(model_type
));
59 // Explicitly set an empty token field to denote no progress.
60 download_progress
[model_type
].set_token("");
63 bool Directory::PersistedKernelInfo::HasEmptyDownloadProgress(
64 ModelType model_type
) {
65 const sync_pb::DataTypeProgressMarker
& progress_marker
=
66 download_progress
[model_type
];
67 return progress_marker
.token().empty();
70 Directory::SaveChangesSnapshot::SaveChangesSnapshot()
71 : kernel_info_status(KERNEL_SHARE_INFO_INVALID
) {
74 Directory::SaveChangesSnapshot::~SaveChangesSnapshot() {
75 STLDeleteElements(&dirty_metas
);
76 STLDeleteElements(&delete_journals
);
79 bool Directory::SaveChangesSnapshot::HasUnsavedMetahandleChanges() const {
80 return !dirty_metas
.empty() || !metahandles_to_purge
.empty() ||
81 !delete_journals
.empty() || !delete_journals_to_purge
.empty();
84 Directory::Kernel::Kernel(
85 const std::string
& name
,
86 const KernelLoadInfo
& info
,
87 DirectoryChangeDelegate
* delegate
,
88 const WeakHandle
<TransactionObserver
>& transaction_observer
)
89 : next_write_transaction_id(0),
91 info_status(Directory::KERNEL_SHARE_INFO_VALID
),
92 persisted_info(info
.kernel_info
),
93 cache_guid(info
.cache_guid
),
94 next_metahandle(info
.max_metahandle
+ 1),
96 transaction_observer(transaction_observer
) {
98 DCHECK(transaction_observer
.IsInitialized());
101 Directory::Kernel::~Kernel() {
102 STLDeleteContainerPairSecondPointers(metahandles_map
.begin(),
103 metahandles_map
.end());
106 Directory::Directory(DirectoryBackingStore
* store
,
107 UnrecoverableErrorHandler
* unrecoverable_error_handler
,
108 const base::Closure
& report_unrecoverable_error_function
,
109 NigoriHandler
* nigori_handler
,
110 Cryptographer
* cryptographer
)
113 unrecoverable_error_handler_(unrecoverable_error_handler
),
114 report_unrecoverable_error_function_(report_unrecoverable_error_function
),
115 unrecoverable_error_set_(false),
116 nigori_handler_(nigori_handler
),
117 cryptographer_(cryptographer
),
118 invariant_check_level_(VERIFY_CHANGES
),
119 weak_ptr_factory_(this) {
122 Directory::~Directory() {
126 DirOpenResult
Directory::Open(
128 DirectoryChangeDelegate
* delegate
,
129 const WeakHandle
<TransactionObserver
>& transaction_observer
) {
130 TRACE_EVENT0("sync", "SyncDatabaseOpen");
132 const DirOpenResult result
=
133 OpenImpl(name
, delegate
, transaction_observer
);
135 if (OPENED
!= result
)
140 void Directory::InitializeIndices(MetahandlesMap
* handles_map
) {
141 ScopedKernelLock
lock(this);
142 kernel_
->metahandles_map
.swap(*handles_map
);
143 for (MetahandlesMap::const_iterator it
= kernel_
->metahandles_map
.begin();
144 it
!= kernel_
->metahandles_map
.end(); ++it
) {
145 EntryKernel
* entry
= it
->second
;
146 if (ParentChildIndex::ShouldInclude(entry
))
147 kernel_
->parent_child_index
.Insert(entry
);
148 const int64 metahandle
= entry
->ref(META_HANDLE
);
149 if (entry
->ref(IS_UNSYNCED
))
150 kernel_
->unsynced_metahandles
.insert(metahandle
);
151 if (entry
->ref(IS_UNAPPLIED_UPDATE
)) {
152 const ModelType type
= entry
->GetServerModelType();
153 kernel_
->unapplied_update_metahandles
[type
].insert(metahandle
);
155 if (!entry
->ref(UNIQUE_SERVER_TAG
).empty()) {
156 DCHECK(kernel_
->server_tags_map
.find(entry
->ref(UNIQUE_SERVER_TAG
)) ==
157 kernel_
->server_tags_map
.end())
158 << "Unexpected duplicate use of client tag";
159 kernel_
->server_tags_map
[entry
->ref(UNIQUE_SERVER_TAG
)] = entry
;
161 if (!entry
->ref(UNIQUE_CLIENT_TAG
).empty()) {
162 DCHECK(kernel_
->server_tags_map
.find(entry
->ref(UNIQUE_SERVER_TAG
)) ==
163 kernel_
->server_tags_map
.end())
164 << "Unexpected duplicate use of server tag";
165 kernel_
->client_tags_map
[entry
->ref(UNIQUE_CLIENT_TAG
)] = entry
;
167 DCHECK(kernel_
->ids_map
.find(entry
->ref(ID
).value()) ==
168 kernel_
->ids_map
.end()) << "Unexpected duplicate use of ID";
169 kernel_
->ids_map
[entry
->ref(ID
).value()] = entry
;
170 DCHECK(!entry
->is_dirty());
171 AddToAttachmentIndex(lock
, metahandle
, entry
->ref(ATTACHMENT_METADATA
));
175 DirOpenResult
Directory::OpenImpl(
177 DirectoryChangeDelegate
* delegate
,
178 const WeakHandle
<TransactionObserver
>&
179 transaction_observer
) {
181 // Temporary indices before kernel_ initialized in case Load fails. We 0(1)
183 Directory::MetahandlesMap tmp_handles_map
;
185 // Avoids mem leaks on failure. Harmlessly deletes the empty hash map after
186 // the swap in the success case.
187 STLValueDeleter
<MetahandlesMap
> deleter(&tmp_handles_map
);
189 JournalIndex delete_journals
;
190 MetahandleSet metahandles_to_purge
;
192 DirOpenResult result
= store_
->Load(&tmp_handles_map
, &delete_journals
,
193 &metahandles_to_purge
, &info
);
194 if (OPENED
!= result
)
198 kernel_
= new Kernel(name
, info
, delegate
, transaction_observer
);
199 kernel_
->metahandles_to_purge
.swap(metahandles_to_purge
);
200 delete_journal_
.reset(new DeleteJournal(&delete_journals
));
201 InitializeIndices(&tmp_handles_map
);
203 // Save changes back in case there are any metahandles to purge.
205 return FAILED_INITIAL_WRITE
;
207 // Now that we've successfully opened the store, install an error handler to
208 // deal with catastrophic errors that may occur later on. Use a weak pointer
209 // because we cannot guarantee that this Directory will outlive the Closure.
210 store_
->SetCatastrophicErrorHandler(base::Bind(
211 &Directory::OnCatastrophicError
, weak_ptr_factory_
.GetWeakPtr()));
216 DeleteJournal
* Directory::delete_journal() {
217 DCHECK(delete_journal_
.get());
218 return delete_journal_
.get();
221 void Directory::Close() {
229 void Directory::OnUnrecoverableError(const BaseTransaction
* trans
,
230 const tracked_objects::Location
& location
,
231 const std::string
& message
) {
232 DCHECK(trans
!= NULL
);
233 unrecoverable_error_set_
= true;
234 unrecoverable_error_handler_
->OnUnrecoverableError(location
,
238 EntryKernel
* Directory::GetEntryById(const Id
& id
) {
239 ScopedKernelLock
lock(this);
240 return GetEntryById(lock
, id
);
243 EntryKernel
* Directory::GetEntryById(const ScopedKernelLock
& lock
,
246 // Find it in the in memory ID index.
247 IdsMap::iterator id_found
= kernel_
->ids_map
.find(id
.value());
248 if (id_found
!= kernel_
->ids_map
.end()) {
249 return id_found
->second
;
254 EntryKernel
* Directory::GetEntryByClientTag(const string
& tag
) {
255 ScopedKernelLock
lock(this);
258 TagsMap::iterator it
= kernel_
->client_tags_map
.find(tag
);
259 if (it
!= kernel_
->client_tags_map
.end()) {
265 EntryKernel
* Directory::GetEntryByServerTag(const string
& tag
) {
266 ScopedKernelLock
lock(this);
268 TagsMap::iterator it
= kernel_
->server_tags_map
.find(tag
);
269 if (it
!= kernel_
->server_tags_map
.end()) {
275 EntryKernel
* Directory::GetEntryByHandle(int64 metahandle
) {
276 ScopedKernelLock
lock(this);
277 return GetEntryByHandle(lock
, metahandle
);
280 EntryKernel
* Directory::GetEntryByHandle(const ScopedKernelLock
& lock
,
283 MetahandlesMap::iterator found
=
284 kernel_
->metahandles_map
.find(metahandle
);
285 if (found
!= kernel_
->metahandles_map
.end()) {
286 // Found it in memory. Easy.
287 return found
->second
;
292 bool Directory::GetChildHandlesById(
293 BaseTransaction
* trans
, const Id
& parent_id
,
294 Directory::Metahandles
* result
) {
295 if (!SyncAssert(this == trans
->directory(), FROM_HERE
,
296 "Directories don't match", trans
))
300 ScopedKernelLock
lock(this);
301 AppendChildHandles(lock
, parent_id
, result
);
305 int Directory::GetTotalNodeCount(
306 BaseTransaction
* trans
,
307 EntryKernel
* kernel
) const {
308 if (!SyncAssert(this == trans
->directory(), FROM_HERE
,
309 "Directories don't match", trans
))
313 std::deque
<const OrderedChildSet
*> child_sets
;
315 GetChildSetForKernel(trans
, kernel
, &child_sets
);
316 while (!child_sets
.empty()) {
317 const OrderedChildSet
* set
= child_sets
.front();
318 child_sets
.pop_front();
319 for (OrderedChildSet::const_iterator it
= set
->begin();
320 it
!= set
->end(); ++it
) {
322 GetChildSetForKernel(trans
, *it
, &child_sets
);
329 void Directory::GetChildSetForKernel(
330 BaseTransaction
* trans
,
332 std::deque
<const OrderedChildSet
*>* child_sets
) const {
333 if (!kernel
->ref(IS_DIR
))
334 return; // Not a directory => no children.
336 const OrderedChildSet
* descendants
=
337 kernel_
->parent_child_index
.GetChildren(kernel
->ref(ID
));
339 return; // This directory has no children.
341 // Add our children to the list of items to be traversed.
342 child_sets
->push_back(descendants
);
345 int Directory::GetPositionIndex(
346 BaseTransaction
* trans
,
347 EntryKernel
* kernel
) const {
348 const OrderedChildSet
* siblings
=
349 kernel_
->parent_child_index
.GetSiblings(kernel
);
351 OrderedChildSet::const_iterator it
= siblings
->find(kernel
);
352 return std::distance(siblings
->begin(), it
);
355 bool Directory::InsertEntry(BaseWriteTransaction
* trans
, EntryKernel
* entry
) {
356 ScopedKernelLock
lock(this);
357 return InsertEntry(lock
, trans
, entry
);
360 bool Directory::InsertEntry(const ScopedKernelLock
& lock
,
361 BaseWriteTransaction
* trans
,
362 EntryKernel
* entry
) {
363 if (!SyncAssert(NULL
!= entry
, FROM_HERE
, "Entry is null", trans
))
366 static const char error
[] = "Entry already in memory index.";
369 kernel_
->metahandles_map
.insert(
370 std::make_pair(entry
->ref(META_HANDLE
), entry
)).second
,
377 kernel_
->ids_map
.insert(
378 std::make_pair(entry
->ref(ID
).value(), entry
)).second
,
384 if (ParentChildIndex::ShouldInclude(entry
)) {
385 if (!SyncAssert(kernel_
->parent_child_index
.Insert(entry
),
392 AddToAttachmentIndex(
393 lock
, entry
->ref(META_HANDLE
), entry
->ref(ATTACHMENT_METADATA
));
395 // Should NEVER be created with a client tag or server tag.
396 if (!SyncAssert(entry
->ref(UNIQUE_SERVER_TAG
).empty(), FROM_HERE
,
397 "Server tag should be empty", trans
)) {
400 if (!SyncAssert(entry
->ref(UNIQUE_CLIENT_TAG
).empty(), FROM_HERE
,
401 "Client tag should be empty", trans
))
407 bool Directory::ReindexId(BaseWriteTransaction
* trans
,
408 EntryKernel
* const entry
,
410 ScopedKernelLock
lock(this);
411 if (NULL
!= GetEntryById(lock
, new_id
))
415 // Update the indices that depend on the ID field.
416 ScopedParentChildIndexUpdater
updater_b(lock
, entry
,
417 &kernel_
->parent_child_index
);
418 size_t num_erased
= kernel_
->ids_map
.erase(entry
->ref(ID
).value());
419 DCHECK_EQ(1U, num_erased
);
420 entry
->put(ID
, new_id
);
421 kernel_
->ids_map
[entry
->ref(ID
).value()] = entry
;
426 bool Directory::ReindexParentId(BaseWriteTransaction
* trans
,
427 EntryKernel
* const entry
,
428 const Id
& new_parent_id
) {
429 ScopedKernelLock
lock(this);
432 // Update the indices that depend on the PARENT_ID field.
433 ScopedParentChildIndexUpdater
index_updater(lock
, entry
,
434 &kernel_
->parent_child_index
);
435 entry
->put(PARENT_ID
, new_parent_id
);
440 void Directory::RemoveFromAttachmentIndex(
441 const ScopedKernelLock
& lock
,
442 const int64 metahandle
,
443 const sync_pb::AttachmentMetadata
& attachment_metadata
) {
444 for (int i
= 0; i
< attachment_metadata
.record_size(); ++i
) {
445 AttachmentIdUniqueId unique_id
=
446 attachment_metadata
.record(i
).id().unique_id();
447 IndexByAttachmentId::iterator iter
=
448 kernel_
->index_by_attachment_id
.find(unique_id
);
449 if (iter
!= kernel_
->index_by_attachment_id
.end()) {
450 iter
->second
.erase(metahandle
);
451 if (iter
->second
.empty()) {
452 kernel_
->index_by_attachment_id
.erase(iter
);
458 void Directory::AddToAttachmentIndex(
459 const ScopedKernelLock
& lock
,
460 const int64 metahandle
,
461 const sync_pb::AttachmentMetadata
& attachment_metadata
) {
462 for (int i
= 0; i
< attachment_metadata
.record_size(); ++i
) {
463 AttachmentIdUniqueId unique_id
=
464 attachment_metadata
.record(i
).id().unique_id();
465 IndexByAttachmentId::iterator iter
=
466 kernel_
->index_by_attachment_id
.find(unique_id
);
467 if (iter
== kernel_
->index_by_attachment_id
.end()) {
468 iter
= kernel_
->index_by_attachment_id
.insert(std::make_pair(
470 MetahandleSet())).first
;
472 iter
->second
.insert(metahandle
);
476 void Directory::UpdateAttachmentIndex(
477 const int64 metahandle
,
478 const sync_pb::AttachmentMetadata
& old_metadata
,
479 const sync_pb::AttachmentMetadata
& new_metadata
) {
480 ScopedKernelLock
lock(this);
481 RemoveFromAttachmentIndex(lock
, metahandle
, old_metadata
);
482 AddToAttachmentIndex(lock
, metahandle
, new_metadata
);
485 void Directory::GetMetahandlesByAttachmentId(
486 BaseTransaction
* trans
,
487 const sync_pb::AttachmentIdProto
& attachment_id_proto
,
488 Metahandles
* result
) {
491 ScopedKernelLock
lock(this);
492 IndexByAttachmentId::const_iterator index_iter
=
493 kernel_
->index_by_attachment_id
.find(attachment_id_proto
.unique_id());
494 if (index_iter
== kernel_
->index_by_attachment_id
.end())
496 const MetahandleSet
& metahandle_set
= index_iter
->second
;
498 metahandle_set
.begin(), metahandle_set
.end(), back_inserter(*result
));
501 bool Directory::unrecoverable_error_set(const BaseTransaction
* trans
) const {
502 DCHECK(trans
!= NULL
);
503 return unrecoverable_error_set_
;
506 void Directory::ClearDirtyMetahandles(const ScopedKernelLock
& lock
) {
507 kernel_
->transaction_mutex
.AssertAcquired();
508 kernel_
->dirty_metahandles
.clear();
511 bool Directory::SafeToPurgeFromMemory(WriteTransaction
* trans
,
512 const EntryKernel
* const entry
) const {
513 bool safe
= entry
->ref(IS_DEL
) && !entry
->is_dirty() &&
514 !entry
->ref(SYNCING
) && !entry
->ref(IS_UNAPPLIED_UPDATE
) &&
515 !entry
->ref(IS_UNSYNCED
);
518 int64 handle
= entry
->ref(META_HANDLE
);
519 const ModelType type
= entry
->GetServerModelType();
520 if (!SyncAssert(kernel_
->dirty_metahandles
.count(handle
) == 0U,
522 "Dirty metahandles should be empty", trans
))
524 // TODO(tim): Bug 49278.
525 if (!SyncAssert(!kernel_
->unsynced_metahandles
.count(handle
),
527 "Unsynced handles should be empty",
530 if (!SyncAssert(!kernel_
->unapplied_update_metahandles
[type
].count(handle
),
532 "Unapplied metahandles should be empty",
540 void Directory::TakeSnapshotForSaveChanges(SaveChangesSnapshot
* snapshot
) {
541 ReadTransaction
trans(FROM_HERE
, this);
542 ScopedKernelLock
lock(this);
544 // If there is an unrecoverable error then just bail out.
545 if (unrecoverable_error_set(&trans
))
548 // Deep copy dirty entries from kernel_->metahandles_index into snapshot and
549 // clear dirty flags.
550 for (MetahandleSet::const_iterator i
= kernel_
->dirty_metahandles
.begin();
551 i
!= kernel_
->dirty_metahandles
.end(); ++i
) {
552 EntryKernel
* entry
= GetEntryByHandle(lock
, *i
);
555 // Skip over false positives; it happens relatively infrequently.
556 if (!entry
->is_dirty())
558 snapshot
->dirty_metas
.insert(snapshot
->dirty_metas
.end(),
559 new EntryKernel(*entry
));
560 DCHECK_EQ(1U, kernel_
->dirty_metahandles
.count(*i
));
561 // We don't bother removing from the index here as we blow the entire thing
562 // in a moment, and it unnecessarily complicates iteration.
563 entry
->clear_dirty(NULL
);
565 ClearDirtyMetahandles(lock
);
567 // Set purged handles.
568 DCHECK(snapshot
->metahandles_to_purge
.empty());
569 snapshot
->metahandles_to_purge
.swap(kernel_
->metahandles_to_purge
);
571 // Fill kernel_info_status and kernel_info.
572 snapshot
->kernel_info
= kernel_
->persisted_info
;
573 snapshot
->kernel_info_status
= kernel_
->info_status
;
574 // This one we reset on failure.
575 kernel_
->info_status
= KERNEL_SHARE_INFO_VALID
;
577 delete_journal_
->TakeSnapshotAndClear(
578 &trans
, &snapshot
->delete_journals
, &snapshot
->delete_journals_to_purge
);
581 bool Directory::SaveChanges() {
582 bool success
= false;
584 base::AutoLock
scoped_lock(kernel_
->save_changes_mutex
);
586 // Snapshot and save.
587 SaveChangesSnapshot snapshot
;
588 TakeSnapshotForSaveChanges(&snapshot
);
589 success
= store_
->SaveChanges(snapshot
);
591 // Handle success or failure.
593 success
= VacuumAfterSaveChanges(snapshot
);
595 HandleSaveChangesFailure(snapshot
);
599 bool Directory::VacuumAfterSaveChanges(const SaveChangesSnapshot
& snapshot
) {
600 if (snapshot
.dirty_metas
.empty())
603 // Need a write transaction as we are about to permanently purge entries.
604 WriteTransaction
trans(FROM_HERE
, VACUUM_AFTER_SAVE
, this);
605 ScopedKernelLock
lock(this);
606 // Now drop everything we can out of memory.
607 for (EntryKernelSet::const_iterator i
= snapshot
.dirty_metas
.begin();
608 i
!= snapshot
.dirty_metas
.end(); ++i
) {
609 MetahandlesMap::iterator found
=
610 kernel_
->metahandles_map
.find((*i
)->ref(META_HANDLE
));
611 EntryKernel
* entry
= (found
== kernel_
->metahandles_map
.end() ?
612 NULL
: found
->second
);
613 if (entry
&& SafeToPurgeFromMemory(&trans
, entry
)) {
614 // We now drop deleted metahandles that are up to date on both the client
616 size_t num_erased
= 0;
617 num_erased
= kernel_
->metahandles_map
.erase(entry
->ref(META_HANDLE
));
618 DCHECK_EQ(1u, num_erased
);
619 num_erased
= kernel_
->ids_map
.erase(entry
->ref(ID
).value());
620 DCHECK_EQ(1u, num_erased
);
621 if (!entry
->ref(UNIQUE_SERVER_TAG
).empty()) {
623 kernel_
->server_tags_map
.erase(entry
->ref(UNIQUE_SERVER_TAG
));
624 DCHECK_EQ(1u, num_erased
);
626 if (!entry
->ref(UNIQUE_CLIENT_TAG
).empty()) {
628 kernel_
->client_tags_map
.erase(entry
->ref(UNIQUE_CLIENT_TAG
));
629 DCHECK_EQ(1u, num_erased
);
631 if (!SyncAssert(!kernel_
->parent_child_index
.Contains(entry
),
633 "Deleted entry still present",
636 RemoveFromAttachmentIndex(
637 lock
, entry
->ref(META_HANDLE
), entry
->ref(ATTACHMENT_METADATA
));
641 if (trans
.unrecoverable_error_set())
647 void Directory::UnapplyEntry(EntryKernel
* entry
) {
648 int64 handle
= entry
->ref(META_HANDLE
);
649 ModelType server_type
= GetModelTypeFromSpecifics(
650 entry
->ref(SERVER_SPECIFICS
));
652 // Clear enough so that on the next sync cycle all local data will
654 // Note: do not modify the root node in order to preserve the
655 // initial sync ended bit for this type (else on the next restart
656 // this type will be treated as disabled and therefore fully purged).
657 if (IsRealDataType(server_type
) &&
658 ModelTypeToRootTag(server_type
) == entry
->ref(UNIQUE_SERVER_TAG
)) {
662 // Set the unapplied bit if this item has server data.
663 if (IsRealDataType(server_type
) && !entry
->ref(IS_UNAPPLIED_UPDATE
)) {
664 entry
->put(IS_UNAPPLIED_UPDATE
, true);
665 kernel_
->unapplied_update_metahandles
[server_type
].insert(handle
);
666 entry
->mark_dirty(&kernel_
->dirty_metahandles
);
669 // Unset the unsynced bit.
670 if (entry
->ref(IS_UNSYNCED
)) {
671 kernel_
->unsynced_metahandles
.erase(handle
);
672 entry
->put(IS_UNSYNCED
, false);
673 entry
->mark_dirty(&kernel_
->dirty_metahandles
);
676 // Mark the item as locally deleted. No deleted items are allowed in the
677 // parent child index.
678 if (!entry
->ref(IS_DEL
)) {
679 kernel_
->parent_child_index
.Remove(entry
);
680 entry
->put(IS_DEL
, true);
681 entry
->mark_dirty(&kernel_
->dirty_metahandles
);
684 // Set the version to the "newly created" version.
685 if (entry
->ref(BASE_VERSION
) != CHANGES_VERSION
) {
686 entry
->put(BASE_VERSION
, CHANGES_VERSION
);
687 entry
->mark_dirty(&kernel_
->dirty_metahandles
);
690 // At this point locally created items that aren't synced will become locally
691 // deleted items, and purged on the next snapshot. All other items will match
692 // the state they would have had if they were just created via a server
693 // update. See MutableEntry::MutableEntry(.., CreateNewUpdateItem, ..).
696 void Directory::DeleteEntry(const ScopedKernelLock
& lock
,
697 bool save_to_journal
,
699 EntryKernelSet
* entries_to_journal
) {
700 int64 handle
= entry
->ref(META_HANDLE
);
701 ModelType server_type
= GetModelTypeFromSpecifics(
702 entry
->ref(SERVER_SPECIFICS
));
704 kernel_
->metahandles_to_purge
.insert(handle
);
706 size_t num_erased
= 0;
707 num_erased
= kernel_
->metahandles_map
.erase(entry
->ref(META_HANDLE
));
708 DCHECK_EQ(1u, num_erased
);
709 num_erased
= kernel_
->ids_map
.erase(entry
->ref(ID
).value());
710 DCHECK_EQ(1u, num_erased
);
711 num_erased
= kernel_
->unsynced_metahandles
.erase(handle
);
712 DCHECK_EQ(entry
->ref(IS_UNSYNCED
), num_erased
> 0);
714 kernel_
->unapplied_update_metahandles
[server_type
].erase(handle
);
715 DCHECK_EQ(entry
->ref(IS_UNAPPLIED_UPDATE
), num_erased
> 0);
716 if (kernel_
->parent_child_index
.Contains(entry
))
717 kernel_
->parent_child_index
.Remove(entry
);
719 if (!entry
->ref(UNIQUE_CLIENT_TAG
).empty()) {
721 kernel_
->client_tags_map
.erase(entry
->ref(UNIQUE_CLIENT_TAG
));
722 DCHECK_EQ(1u, num_erased
);
724 if (!entry
->ref(UNIQUE_SERVER_TAG
).empty()) {
726 kernel_
->server_tags_map
.erase(entry
->ref(UNIQUE_SERVER_TAG
));
727 DCHECK_EQ(1u, num_erased
);
729 RemoveFromAttachmentIndex(lock
, handle
, entry
->ref(ATTACHMENT_METADATA
));
731 if (save_to_journal
) {
732 entries_to_journal
->insert(entry
);
738 bool Directory::PurgeEntriesWithTypeIn(ModelTypeSet disabled_types
,
739 ModelTypeSet types_to_journal
,
740 ModelTypeSet types_to_unapply
) {
741 disabled_types
.RemoveAll(ProxyTypes());
743 if (disabled_types
.Empty())
747 WriteTransaction
trans(FROM_HERE
, PURGE_ENTRIES
, this);
749 EntryKernelSet entries_to_journal
;
750 STLElementDeleter
<EntryKernelSet
> journal_deleter(&entries_to_journal
);
753 ScopedKernelLock
lock(this);
755 bool found_progress
= false;
756 for (ModelTypeSet::Iterator iter
= disabled_types
.First(); iter
.Good();
758 if (!kernel_
->persisted_info
.HasEmptyDownloadProgress(iter
.Get()))
759 found_progress
= true;
762 // If none of the disabled types have progress markers, there's nothing to
767 // We iterate in two passes to avoid a bug in STLport (which is used in
768 // the Android build). There are some versions of that library where a
769 // hash_map's iterators can be invalidated when an item is erased from the
771 // See http://sourceforge.net/p/stlport/bugs/239/.
773 std::set
<EntryKernel
*> to_purge
;
774 for (MetahandlesMap::iterator it
= kernel_
->metahandles_map
.begin();
775 it
!= kernel_
->metahandles_map
.end(); ++it
) {
776 const sync_pb::EntitySpecifics
& local_specifics
=
777 it
->second
->ref(SPECIFICS
);
778 const sync_pb::EntitySpecifics
& server_specifics
=
779 it
->second
->ref(SERVER_SPECIFICS
);
780 ModelType local_type
= GetModelTypeFromSpecifics(local_specifics
);
781 ModelType server_type
= GetModelTypeFromSpecifics(server_specifics
);
783 if ((IsRealDataType(local_type
) && disabled_types
.Has(local_type
)) ||
784 (IsRealDataType(server_type
) && disabled_types
.Has(server_type
))) {
785 to_purge
.insert(it
->second
);
789 for (std::set
<EntryKernel
*>::iterator it
= to_purge
.begin();
790 it
!= to_purge
.end(); ++it
) {
791 EntryKernel
* entry
= *it
;
793 const sync_pb::EntitySpecifics
& local_specifics
=
794 (*it
)->ref(SPECIFICS
);
795 const sync_pb::EntitySpecifics
& server_specifics
=
796 (*it
)->ref(SERVER_SPECIFICS
);
797 ModelType local_type
= GetModelTypeFromSpecifics(local_specifics
);
798 ModelType server_type
= GetModelTypeFromSpecifics(server_specifics
);
800 if (types_to_unapply
.Has(local_type
) ||
801 types_to_unapply
.Has(server_type
)) {
804 bool save_to_journal
=
805 (types_to_journal
.Has(local_type
) ||
806 types_to_journal
.Has(server_type
)) &&
807 (delete_journal_
->IsDeleteJournalEnabled(local_type
) ||
808 delete_journal_
->IsDeleteJournalEnabled(server_type
));
809 DeleteEntry(lock
, save_to_journal
, entry
, &entries_to_journal
);
813 delete_journal_
->AddJournalBatch(&trans
, entries_to_journal
);
815 // Ensure meta tracking for these data types reflects the purged state.
816 for (ModelTypeSet::Iterator it
= disabled_types
.First();
817 it
.Good(); it
.Inc()) {
818 kernel_
->persisted_info
.transaction_version
[it
.Get()] = 0;
820 // Don't discard progress markers or context for unapplied types.
821 if (!types_to_unapply
.Has(it
.Get())) {
822 kernel_
->persisted_info
.ResetDownloadProgress(it
.Get());
823 kernel_
->persisted_info
.datatype_context
[it
.Get()].Clear();
827 kernel_
->info_status
= KERNEL_SHARE_INFO_DIRTY
;
833 bool Directory::ResetVersionsForType(BaseWriteTransaction
* trans
,
835 if (!ProtocolTypes().Has(type
))
837 DCHECK_NE(type
, BOOKMARKS
) << "Only non-hierarchical types are supported";
839 EntryKernel
* type_root
= GetEntryByServerTag(ModelTypeToRootTag(type
));
843 ScopedKernelLock
lock(this);
844 const Id
& type_root_id
= type_root
->ref(ID
);
845 Directory::Metahandles children
;
846 AppendChildHandles(lock
, type_root_id
, &children
);
848 for (Metahandles::iterator it
= children
.begin(); it
!= children
.end();
850 EntryKernel
* entry
= GetEntryByHandle(lock
, *it
);
853 if (entry
->ref(BASE_VERSION
) > 1)
854 entry
->put(BASE_VERSION
, 1);
855 if (entry
->ref(SERVER_VERSION
) > 1)
856 entry
->put(SERVER_VERSION
, 1);
858 // Note that we do not unset IS_UNSYNCED or IS_UNAPPLIED_UPDATE in order
859 // to ensure no in-transit data is lost.
861 entry
->mark_dirty(&kernel_
->dirty_metahandles
);
867 bool Directory::IsAttachmentLinked(
868 const sync_pb::AttachmentIdProto
& attachment_id_proto
) const {
869 ScopedKernelLock
lock(this);
870 IndexByAttachmentId::const_iterator iter
=
871 kernel_
->index_by_attachment_id
.find(attachment_id_proto
.unique_id());
872 if (iter
!= kernel_
->index_by_attachment_id
.end() && !iter
->second
.empty()) {
878 void Directory::HandleSaveChangesFailure(const SaveChangesSnapshot
& snapshot
) {
879 WriteTransaction
trans(FROM_HERE
, HANDLE_SAVE_FAILURE
, this);
880 ScopedKernelLock
lock(this);
881 kernel_
->info_status
= KERNEL_SHARE_INFO_DIRTY
;
883 // Because we optimistically cleared the dirty bit on the real entries when
884 // taking the snapshot, we must restore it on failure. Not doing this could
885 // cause lost data, if no other changes are made to the in-memory entries
886 // that would cause the dirty bit to get set again. Setting the bit ensures
887 // that SaveChanges will at least try again later.
888 for (EntryKernelSet::const_iterator i
= snapshot
.dirty_metas
.begin();
889 i
!= snapshot
.dirty_metas
.end(); ++i
) {
890 MetahandlesMap::iterator found
=
891 kernel_
->metahandles_map
.find((*i
)->ref(META_HANDLE
));
892 if (found
!= kernel_
->metahandles_map
.end()) {
893 found
->second
->mark_dirty(&kernel_
->dirty_metahandles
);
897 kernel_
->metahandles_to_purge
.insert(snapshot
.metahandles_to_purge
.begin(),
898 snapshot
.metahandles_to_purge
.end());
900 // Restore delete journals.
901 delete_journal_
->AddJournalBatch(&trans
, snapshot
.delete_journals
);
902 delete_journal_
->PurgeDeleteJournals(&trans
,
903 snapshot
.delete_journals_to_purge
);
906 void Directory::GetDownloadProgress(
907 ModelType model_type
,
908 sync_pb::DataTypeProgressMarker
* value_out
) const {
909 ScopedKernelLock
lock(this);
910 return value_out
->CopyFrom(
911 kernel_
->persisted_info
.download_progress
[model_type
]);
914 void Directory::GetDownloadProgressAsString(
915 ModelType model_type
,
916 std::string
* value_out
) const {
917 ScopedKernelLock
lock(this);
918 kernel_
->persisted_info
.download_progress
[model_type
].SerializeToString(
922 size_t Directory::GetEntriesCount() const {
923 ScopedKernelLock
lock(this);
924 return kernel_
->metahandles_map
.size();
927 void Directory::SetDownloadProgress(
928 ModelType model_type
,
929 const sync_pb::DataTypeProgressMarker
& new_progress
) {
930 ScopedKernelLock
lock(this);
931 kernel_
->persisted_info
.download_progress
[model_type
].CopyFrom(new_progress
);
932 kernel_
->info_status
= KERNEL_SHARE_INFO_DIRTY
;
935 bool Directory::HasEmptyDownloadProgress(ModelType type
) const {
936 ScopedKernelLock
lock(this);
937 return kernel_
->persisted_info
.HasEmptyDownloadProgress(type
);
940 int64
Directory::GetTransactionVersion(ModelType type
) const {
941 kernel_
->transaction_mutex
.AssertAcquired();
942 return kernel_
->persisted_info
.transaction_version
[type
];
945 void Directory::IncrementTransactionVersion(ModelType type
) {
946 kernel_
->transaction_mutex
.AssertAcquired();
947 kernel_
->persisted_info
.transaction_version
[type
]++;
948 kernel_
->info_status
= KERNEL_SHARE_INFO_DIRTY
;
951 void Directory::GetDataTypeContext(BaseTransaction
* trans
,
953 sync_pb::DataTypeContext
* context
) const {
954 ScopedKernelLock
lock(this);
955 context
->CopyFrom(kernel_
->persisted_info
.datatype_context
[type
]);
958 void Directory::SetDataTypeContext(
959 BaseWriteTransaction
* trans
,
961 const sync_pb::DataTypeContext
& context
) {
962 ScopedKernelLock
lock(this);
963 kernel_
->persisted_info
.datatype_context
[type
].CopyFrom(context
);
964 kernel_
->info_status
= KERNEL_SHARE_INFO_DIRTY
;
967 // TODO(stanisc): crbug.com/438313: change these to not rely on the folders.
968 ModelTypeSet
Directory::InitialSyncEndedTypes() {
969 syncable::ReadTransaction
trans(FROM_HERE
, this);
970 ModelTypeSet protocol_types
= ProtocolTypes();
971 ModelTypeSet initial_sync_ended_types
;
972 for (ModelTypeSet::Iterator i
= protocol_types
.First(); i
.Good(); i
.Inc()) {
973 if (InitialSyncEndedForType(&trans
, i
.Get())) {
974 initial_sync_ended_types
.Put(i
.Get());
977 return initial_sync_ended_types
;
980 bool Directory::InitialSyncEndedForType(ModelType type
) {
981 syncable::ReadTransaction
trans(FROM_HERE
, this);
982 return InitialSyncEndedForType(&trans
, type
);
985 bool Directory::InitialSyncEndedForType(
986 BaseTransaction
* trans
, ModelType type
) {
987 // True iff the type's root node has been created.
988 syncable::Entry
entry(trans
, syncable::GET_TYPE_ROOT
, type
);
992 string
Directory::store_birthday() const {
993 ScopedKernelLock
lock(this);
994 return kernel_
->persisted_info
.store_birthday
;
997 void Directory::set_store_birthday(const string
& store_birthday
) {
998 ScopedKernelLock
lock(this);
999 if (kernel_
->persisted_info
.store_birthday
== store_birthday
)
1001 kernel_
->persisted_info
.store_birthday
= store_birthday
;
1002 kernel_
->info_status
= KERNEL_SHARE_INFO_DIRTY
;
1005 string
Directory::bag_of_chips() const {
1006 ScopedKernelLock
lock(this);
1007 return kernel_
->persisted_info
.bag_of_chips
;
1010 void Directory::set_bag_of_chips(const string
& bag_of_chips
) {
1011 ScopedKernelLock
lock(this);
1012 if (kernel_
->persisted_info
.bag_of_chips
== bag_of_chips
)
1014 kernel_
->persisted_info
.bag_of_chips
= bag_of_chips
;
1015 kernel_
->info_status
= KERNEL_SHARE_INFO_DIRTY
;
1019 string
Directory::cache_guid() const {
1020 // No need to lock since nothing ever writes to it after load.
1021 return kernel_
->cache_guid
;
1024 NigoriHandler
* Directory::GetNigoriHandler() {
1025 return nigori_handler_
;
1028 Cryptographer
* Directory::GetCryptographer(const BaseTransaction
* trans
) {
1029 DCHECK_EQ(this, trans
->directory());
1030 return cryptographer_
;
1033 void Directory::ReportUnrecoverableError() {
1034 if (!report_unrecoverable_error_function_
.is_null()) {
1035 report_unrecoverable_error_function_
.Run();
1039 void Directory::GetAllMetaHandles(BaseTransaction
* trans
,
1040 MetahandleSet
* result
) {
1042 ScopedKernelLock
lock(this);
1043 for (MetahandlesMap::iterator i
= kernel_
->metahandles_map
.begin();
1044 i
!= kernel_
->metahandles_map
.end(); ++i
) {
1045 result
->insert(i
->first
);
1049 void Directory::GetUnsyncedMetaHandles(BaseTransaction
* trans
,
1050 Metahandles
* result
) {
1052 ScopedKernelLock
lock(this);
1053 copy(kernel_
->unsynced_metahandles
.begin(),
1054 kernel_
->unsynced_metahandles
.end(), back_inserter(*result
));
1057 int64
Directory::unsynced_entity_count() const {
1058 ScopedKernelLock
lock(this);
1059 return kernel_
->unsynced_metahandles
.size();
1062 bool Directory::TypeHasUnappliedUpdates(ModelType type
) {
1063 ScopedKernelLock
lock(this);
1064 return !kernel_
->unapplied_update_metahandles
[type
].empty();
1067 void Directory::GetUnappliedUpdateMetaHandles(
1068 BaseTransaction
* trans
,
1069 FullModelTypeSet server_types
,
1070 std::vector
<int64
>* result
) {
1072 ScopedKernelLock
lock(this);
1073 for (int i
= UNSPECIFIED
; i
< MODEL_TYPE_COUNT
; ++i
) {
1074 const ModelType type
= ModelTypeFromInt(i
);
1075 if (server_types
.Has(type
)) {
1076 std::copy(kernel_
->unapplied_update_metahandles
[type
].begin(),
1077 kernel_
->unapplied_update_metahandles
[type
].end(),
1078 back_inserter(*result
));
1083 void Directory::GetMetaHandlesOfType(BaseTransaction
* trans
,
1085 std::vector
<int64
>* result
) {
1086 ScopedKernelLock
lock(this);
1087 GetMetaHandlesOfType(lock
, trans
, type
, result
);
1090 void Directory::GetMetaHandlesOfType(const ScopedKernelLock
& lock
,
1091 BaseTransaction
* trans
,
1093 std::vector
<int64
>* result
) {
1095 for (MetahandlesMap::iterator it
= kernel_
->metahandles_map
.begin();
1096 it
!= kernel_
->metahandles_map
.end(); ++it
) {
1097 EntryKernel
* entry
= it
->second
;
1098 const ModelType entry_type
=
1099 GetModelTypeFromSpecifics(entry
->ref(SPECIFICS
));
1100 if (entry_type
== type
)
1101 result
->push_back(it
->first
);
1105 void Directory::CollectMetaHandleCounts(
1106 std::vector
<int>* num_entries_by_type
,
1107 std::vector
<int>* num_to_delete_entries_by_type
) {
1108 syncable::ReadTransaction
trans(FROM_HERE
, this);
1109 ScopedKernelLock
lock(this);
1111 for (MetahandlesMap::iterator it
= kernel_
->metahandles_map
.begin();
1112 it
!= kernel_
->metahandles_map
.end(); ++it
) {
1113 EntryKernel
* entry
= it
->second
;
1114 const ModelType type
= GetModelTypeFromSpecifics(entry
->ref(SPECIFICS
));
1115 (*num_entries_by_type
)[type
]++;
1116 if (entry
->ref(IS_DEL
))
1117 (*num_to_delete_entries_by_type
)[type
]++;
1121 scoped_ptr
<base::ListValue
> Directory::GetNodeDetailsForType(
1122 BaseTransaction
* trans
,
1124 scoped_ptr
<base::ListValue
> nodes(new base::ListValue());
1126 ScopedKernelLock
lock(this);
1127 for (MetahandlesMap::iterator it
= kernel_
->metahandles_map
.begin();
1128 it
!= kernel_
->metahandles_map
.end(); ++it
) {
1129 if (GetModelTypeFromSpecifics(it
->second
->ref(SPECIFICS
)) != type
) {
1133 EntryKernel
* kernel
= it
->second
;
1134 scoped_ptr
<base::DictionaryValue
> node(
1135 kernel
->ToValue(GetCryptographer(trans
)));
1137 // Add the position index if appropriate. This must be done here (and not
1138 // in EntryKernel) because the EntryKernel does not have access to its
1140 if (kernel
->ShouldMaintainPosition() && !kernel
->ref(IS_DEL
)) {
1141 node
->SetInteger("positionIndex", GetPositionIndex(trans
, kernel
));
1144 nodes
->Append(node
.release());
1147 return nodes
.Pass();
1150 bool Directory::CheckInvariantsOnTransactionClose(
1151 syncable::BaseTransaction
* trans
,
1152 const MetahandleSet
& modified_handles
) {
1153 // NOTE: The trans may be in the process of being destructed. Be careful if
1154 // you wish to call any of its virtual methods.
1155 switch (invariant_check_level_
) {
1156 case FULL_DB_VERIFICATION
: {
1157 MetahandleSet all_handles
;
1158 GetAllMetaHandles(trans
, &all_handles
);
1159 return CheckTreeInvariants(trans
, all_handles
);
1161 case VERIFY_CHANGES
: {
1162 return CheckTreeInvariants(trans
, modified_handles
);
1172 bool Directory::FullyCheckTreeInvariants(syncable::BaseTransaction
* trans
) {
1173 MetahandleSet handles
;
1174 GetAllMetaHandles(trans
, &handles
);
1175 return CheckTreeInvariants(trans
, handles
);
1178 bool Directory::CheckTreeInvariants(syncable::BaseTransaction
* trans
,
1179 const MetahandleSet
& handles
) {
1180 MetahandleSet::const_iterator i
;
1181 for (i
= handles
.begin() ; i
!= handles
.end() ; ++i
) {
1182 int64 metahandle
= *i
;
1183 Entry
e(trans
, GET_BY_HANDLE
, metahandle
);
1184 if (!SyncAssert(e
.good(), FROM_HERE
, "Entry is bad", trans
))
1186 syncable::Id id
= e
.GetId();
1187 syncable::Id parentid
= e
.GetParentId();
1190 if (!SyncAssert(e
.GetIsDir(), FROM_HERE
,
1191 "Entry should be a directory",
1194 if (!SyncAssert(parentid
.IsRoot(), FROM_HERE
,
1195 "Entry should be root",
1198 if (!SyncAssert(!e
.GetIsUnsynced(), FROM_HERE
, "Entry should be synced",
1204 if (!e
.GetIsDel()) {
1205 if (!SyncAssert(id
!= parentid
, FROM_HERE
,
1206 "Id should be different from parent id.",
1209 if (!SyncAssert(!e
.GetNonUniqueName().empty(), FROM_HERE
,
1210 "Non unique name should not be empty.",
1214 if (!parentid
.IsNull()) {
1215 int safety_count
= handles
.size() + 1;
1216 while (!parentid
.IsRoot()) {
1217 Entry
parent(trans
, GET_BY_ID
, parentid
);
1218 if (!SyncAssert(parent
.good(), FROM_HERE
,
1219 "Parent entry is not valid.", trans
))
1221 if (handles
.end() == handles
.find(parent
.GetMetahandle()))
1222 break; // Skip further checking if parent was unmodified.
1223 if (!SyncAssert(parent
.GetIsDir(), FROM_HERE
,
1224 "Parent should be a directory", trans
))
1226 if (!SyncAssert(!parent
.GetIsDel(), FROM_HERE
,
1227 "Parent should not have been marked for deletion.",
1230 if (!SyncAssert(handles
.end() != handles
.find(parent
.GetMetahandle()),
1231 FROM_HERE
, "Parent should be in the index.", trans
))
1233 parentid
= parent
.GetParentId();
1234 if (!SyncAssert(--safety_count
> 0, FROM_HERE
,
1235 "Count should be greater than zero.", trans
))
1240 int64 base_version
= e
.GetBaseVersion();
1241 int64 server_version
= e
.GetServerVersion();
1242 bool using_unique_client_tag
= !e
.GetUniqueClientTag().empty();
1243 if (CHANGES_VERSION
== base_version
|| 0 == base_version
) {
1244 ModelType model_type
= e
.GetModelType();
1245 bool is_client_creatable_type_root_folder
=
1246 parentid
.IsRoot() &&
1247 IsTypeWithClientGeneratedRoot(model_type
) &&
1248 e
.GetUniqueServerTag() == ModelTypeToRootTag(model_type
);
1249 if (e
.GetIsUnappliedUpdate()) {
1250 // Must be a new item, or a de-duplicated unique client tag
1251 // that was created both locally and remotely, or a type root folder
1252 // that was created both locally and remotely.
1253 if (!(using_unique_client_tag
||
1254 is_client_creatable_type_root_folder
)) {
1255 if (!SyncAssert(e
.GetIsDel(), FROM_HERE
,
1256 "The entry should have been deleted.", trans
))
1259 // It came from the server, so it must have a server ID.
1260 if (!SyncAssert(id
.ServerKnows(), FROM_HERE
,
1261 "The id should be from a server.",
1266 // TODO(chron): Implement this mode if clients ever need it.
1267 // For now, you can't combine a client tag and a directory.
1268 if (!SyncAssert(!using_unique_client_tag
, FROM_HERE
,
1269 "Directory cannot have a client tag.",
1273 if (is_client_creatable_type_root_folder
) {
1274 // This must be a locally created type root folder.
1276 !e
.GetIsUnsynced(), FROM_HERE
,
1277 "Locally created type root folders should not be unsynced.",
1282 !e
.GetIsDel(), FROM_HERE
,
1283 "Locally created type root folders should not be deleted.",
1287 // Should be an uncomitted item, or a successfully deleted one.
1288 if (!e
.GetIsDel()) {
1289 if (!SyncAssert(e
.GetIsUnsynced(), FROM_HERE
,
1290 "The item should be unsynced.", trans
))
1294 // If the next check failed, it would imply that an item exists
1295 // on the server, isn't waiting for application locally, but either
1296 // is an unsynced create or a sucessful delete in the local copy.
1297 // Either way, that's a mismatch.
1298 if (!SyncAssert(0 == server_version
, FROM_HERE
,
1299 "Server version should be zero.",
1302 // Items that aren't using the unique client tag should have a zero
1303 // base version only if they have a local ID. Items with unique client
1304 // tags are allowed to use the zero base version for undeletion and
1305 // de-duplication; the unique client tag trumps the server ID.
1306 if (!using_unique_client_tag
) {
1307 if (!SyncAssert(!id
.ServerKnows(), FROM_HERE
,
1308 "Should be a client only id.",
1314 if (!SyncAssert(id
.ServerKnows(),
1316 "Should be a server id.",
1321 // Previously we would assert that locally deleted items that have never
1322 // been synced must not be sent to the server (IS_UNSYNCED must be false).
1323 // This is not always true in the case that an item is deleted while the
1324 // initial commit is in flight. See crbug.com/426865.
1329 void Directory::SetInvariantCheckLevel(InvariantCheckLevel check_level
) {
1330 invariant_check_level_
= check_level
;
1333 int64
Directory::NextMetahandle() {
1334 ScopedKernelLock
lock(this);
1335 int64 metahandle
= (kernel_
->next_metahandle
)++;
1339 // Generates next client ID based on a randomly generated GUID.
1340 Id
Directory::NextId() {
1341 return Id::CreateFromClientString(base::GenerateGUID());
1344 bool Directory::HasChildren(BaseTransaction
* trans
, const Id
& id
) {
1345 ScopedKernelLock
lock(this);
1346 return kernel_
->parent_child_index
.GetChildren(id
) != NULL
;
1349 Id
Directory::GetFirstChildId(BaseTransaction
* trans
,
1350 const EntryKernel
* parent
) {
1352 DCHECK(parent
->ref(IS_DIR
));
1354 ScopedKernelLock
lock(this);
1355 const OrderedChildSet
* children
=
1356 kernel_
->parent_child_index
.GetChildren(parent
->ref(ID
));
1358 // We're expected to return root if there are no children.
1362 return (*children
->begin())->ref(ID
);
1365 syncable::Id
Directory::GetPredecessorId(EntryKernel
* e
) {
1366 ScopedKernelLock
lock(this);
1368 DCHECK(ParentChildIndex::ShouldInclude(e
));
1369 const OrderedChildSet
* siblings
= kernel_
->parent_child_index
.GetSiblings(e
);
1370 OrderedChildSet::const_iterator i
= siblings
->find(e
);
1371 DCHECK(i
!= siblings
->end());
1373 if (i
== siblings
->begin()) {
1377 return (*i
)->ref(ID
);
1381 syncable::Id
Directory::GetSuccessorId(EntryKernel
* e
) {
1382 ScopedKernelLock
lock(this);
1384 DCHECK(ParentChildIndex::ShouldInclude(e
));
1385 const OrderedChildSet
* siblings
= kernel_
->parent_child_index
.GetSiblings(e
);
1386 OrderedChildSet::const_iterator i
= siblings
->find(e
);
1387 DCHECK(i
!= siblings
->end());
1390 if (i
== siblings
->end()) {
1393 return (*i
)->ref(ID
);
1397 // TODO(rlarocque): Remove all support for placing ShouldMaintainPosition()
1398 // items as siblings of items that do not maintain postions. It is required
1399 // only for tests. See crbug.com/178282.
1400 void Directory::PutPredecessor(EntryKernel
* e
, EntryKernel
* predecessor
) {
1401 DCHECK(!e
->ref(IS_DEL
));
1402 if (!e
->ShouldMaintainPosition()) {
1403 DCHECK(!e
->ref(UNIQUE_POSITION
).IsValid());
1406 std::string suffix
= e
->ref(UNIQUE_BOOKMARK_TAG
);
1407 DCHECK(!suffix
.empty());
1409 // Remove our item from the ParentChildIndex and remember to re-add it later.
1410 ScopedKernelLock
lock(this);
1411 ScopedParentChildIndexUpdater
updater(lock
, e
, &kernel_
->parent_child_index
);
1413 // Note: The ScopedParentChildIndexUpdater will update this set for us as we
1414 // leave this function.
1415 const OrderedChildSet
* siblings
=
1416 kernel_
->parent_child_index
.GetChildren(e
->ref(PARENT_ID
));
1419 // This parent currently has no other children.
1420 DCHECK(predecessor
== NULL
);
1421 UniquePosition pos
= UniquePosition::InitialPosition(suffix
);
1422 e
->put(UNIQUE_POSITION
, pos
);
1426 if (predecessor
== NULL
) {
1427 // We have at least one sibling, and we're inserting to the left of them.
1428 UniquePosition successor_pos
= (*siblings
->begin())->ref(UNIQUE_POSITION
);
1431 if (!successor_pos
.IsValid()) {
1432 // If all our successors are of non-positionable types, just create an
1433 // initial position. We arbitrarily choose to sort invalid positions to
1434 // the right of the valid positions.
1436 // We really shouldn't need to support this. See TODO above.
1437 pos
= UniquePosition::InitialPosition(suffix
);
1439 DCHECK(!siblings
->empty());
1440 pos
= UniquePosition::Before(successor_pos
, suffix
);
1443 e
->put(UNIQUE_POSITION
, pos
);
1447 // We can't support placing an item after an invalid position. Fortunately,
1448 // the tests don't exercise this particular case. We should not support
1449 // siblings with invalid positions at all. See TODO above.
1450 DCHECK(predecessor
->ref(UNIQUE_POSITION
).IsValid());
1452 OrderedChildSet::const_iterator neighbour
= siblings
->find(predecessor
);
1453 DCHECK(neighbour
!= siblings
->end());
1456 if (neighbour
== siblings
->end()) {
1457 // Inserting at the end of the list.
1458 UniquePosition pos
= UniquePosition::After(
1459 predecessor
->ref(UNIQUE_POSITION
),
1461 e
->put(UNIQUE_POSITION
, pos
);
1465 EntryKernel
* successor
= *neighbour
;
1467 // Another mixed valid and invalid position case. This one could be supported
1468 // in theory, but we're trying to deprecate support for siblings with and
1469 // without valid positions. See TODO above.
1470 // Using a release CHECK here because the following UniquePosition::Between
1471 // call crashes anyway when the position string is empty (see crbug/332371).
1472 CHECK(successor
->ref(UNIQUE_POSITION
).IsValid());
1474 // Finally, the normal case: inserting between two elements.
1475 UniquePosition pos
= UniquePosition::Between(
1476 predecessor
->ref(UNIQUE_POSITION
),
1477 successor
->ref(UNIQUE_POSITION
),
1479 e
->put(UNIQUE_POSITION
, pos
);
1483 // TODO(rlarocque): Avoid this indirection. Just return the set.
1484 void Directory::AppendChildHandles(const ScopedKernelLock
& lock
,
1485 const Id
& parent_id
,
1486 Directory::Metahandles
* result
) {
1487 const OrderedChildSet
* children
=
1488 kernel_
->parent_child_index
.GetChildren(parent_id
);
1492 for (OrderedChildSet::const_iterator i
= children
->begin();
1493 i
!= children
->end(); ++i
) {
1494 result
->push_back((*i
)->ref(META_HANDLE
));
1498 void Directory::UnmarkDirtyEntry(WriteTransaction
* trans
, Entry
* entry
) {
1500 entry
->kernel_
->clear_dirty(&kernel_
->dirty_metahandles
);
1503 void Directory::GetAttachmentIdsToUpload(BaseTransaction
* trans
,
1505 AttachmentIdList
* ids
) {
1506 // TODO(maniscalco): Maintain an index by ModelType and rewrite this method to
1507 // use it. The approach below is likely very expensive because it iterates
1508 // all entries (bug 415199).
1512 AttachmentIdSet on_server_id_set
;
1513 AttachmentIdSet not_on_server_id_set
;
1514 std::vector
<int64
> metahandles
;
1516 ScopedKernelLock
lock(this);
1517 GetMetaHandlesOfType(lock
, trans
, type
, &metahandles
);
1518 std::vector
<int64
>::const_iterator iter
= metahandles
.begin();
1519 const std::vector
<int64
>::const_iterator end
= metahandles
.end();
1520 // For all of this type's entries...
1521 for (; iter
!= end
; ++iter
) {
1522 EntryKernel
* entry
= GetEntryByHandle(lock
, *iter
);
1524 const sync_pb::AttachmentMetadata metadata
=
1525 entry
->ref(ATTACHMENT_METADATA
);
1526 // for each of this entry's attachments...
1527 for (int i
= 0; i
< metadata
.record_size(); ++i
) {
1529 AttachmentId::CreateFromProto(metadata
.record(i
).id());
1530 // if this attachment is known to be on the server, remember it for
1532 if (metadata
.record(i
).is_on_server()) {
1533 on_server_id_set
.insert(id
);
1535 // otherwise, add it to id_set.
1536 not_on_server_id_set
.insert(id
);
1541 // Why did we bother keeping a set of ids known to be on the server? The
1542 // is_on_server flag is stored denormalized so we can end up with two entries
1543 // with the same attachment id where one says it's on the server and the other
1544 // says it's not. When this happens, we trust the one that says it's on the
1545 // server. To avoid re-uploading the same attachment mulitple times, we
1546 // remove any ids known to be on the server from the id_set we are about to
1549 // TODO(maniscalco): Eliminate redundant metadata storage (bug 415203).
1550 std::set_difference(not_on_server_id_set
.begin(), not_on_server_id_set
.end(),
1551 on_server_id_set
.begin(), on_server_id_set
.end(),
1552 std::back_inserter(*ids
));
1555 void Directory::OnCatastrophicError() {
1556 UMA_HISTOGRAM_BOOLEAN("Sync.DirectoryCatastrophicError", true);
1557 ReadTransaction
trans(FROM_HERE
, this);
1558 OnUnrecoverableError(&trans
, FROM_HERE
,
1559 "Catastrophic error detected, Sync DB is unrecoverable");
1562 Directory::Kernel
* Directory::kernel() {
1566 const Directory::Kernel
* Directory::kernel() const {
1570 } // namespace syncable
1571 } // namespace syncer