1 // Copyright 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "sync/syncable/directory.h"
10 #include "base/base64.h"
11 #include "base/guid.h"
12 #include "base/metrics/histogram.h"
13 #include "base/stl_util.h"
14 #include "base/strings/string_number_conversions.h"
15 #include "base/trace_event/trace_event.h"
16 #include "sync/internal_api/public/base/attachment_id_proto.h"
17 #include "sync/internal_api/public/base/unique_position.h"
18 #include "sync/internal_api/public/util/unrecoverable_error_handler.h"
19 #include "sync/syncable/entry.h"
20 #include "sync/syncable/entry_kernel.h"
21 #include "sync/syncable/in_memory_directory_backing_store.h"
22 #include "sync/syncable/on_disk_directory_backing_store.h"
23 #include "sync/syncable/scoped_kernel_lock.h"
24 #include "sync/syncable/scoped_parent_child_index_updater.h"
25 #include "sync/syncable/syncable-inl.h"
26 #include "sync/syncable/syncable_base_transaction.h"
27 #include "sync/syncable/syncable_changes_version.h"
28 #include "sync/syncable/syncable_read_transaction.h"
29 #include "sync/syncable/syncable_util.h"
30 #include "sync/syncable/syncable_write_transaction.h"
38 const base::FilePath::CharType
Directory::kSyncDatabaseFilename
[] =
39 FILE_PATH_LITERAL("SyncData.sqlite3");
41 Directory::PersistedKernelInfo::PersistedKernelInfo() {
42 ModelTypeSet protocol_types
= ProtocolTypes();
43 for (ModelTypeSet::Iterator iter
= protocol_types
.First(); iter
.Good();
45 ResetDownloadProgress(iter
.Get());
46 transaction_version
[iter
.Get()] = 0;
50 Directory::PersistedKernelInfo::~PersistedKernelInfo() {}
52 void Directory::PersistedKernelInfo::ResetDownloadProgress(
53 ModelType model_type
) {
54 // Clear everything except the data type id field.
55 download_progress
[model_type
].Clear();
56 download_progress
[model_type
].set_data_type_id(
57 GetSpecificsFieldNumberFromModelType(model_type
));
59 // Explicitly set an empty token field to denote no progress.
60 download_progress
[model_type
].set_token("");
63 bool Directory::PersistedKernelInfo::HasEmptyDownloadProgress(
64 ModelType model_type
) {
65 const sync_pb::DataTypeProgressMarker
& progress_marker
=
66 download_progress
[model_type
];
67 return progress_marker
.token().empty();
70 Directory::SaveChangesSnapshot::SaveChangesSnapshot()
71 : kernel_info_status(KERNEL_SHARE_INFO_INVALID
) {
74 Directory::SaveChangesSnapshot::~SaveChangesSnapshot() {
75 STLDeleteElements(&dirty_metas
);
76 STLDeleteElements(&delete_journals
);
79 bool Directory::SaveChangesSnapshot::HasUnsavedMetahandleChanges() const {
80 return !dirty_metas
.empty() || !metahandles_to_purge
.empty() ||
81 !delete_journals
.empty() || !delete_journals_to_purge
.empty();
84 Directory::Kernel::Kernel(
85 const std::string
& name
,
86 const KernelLoadInfo
& info
,
87 DirectoryChangeDelegate
* delegate
,
88 const WeakHandle
<TransactionObserver
>& transaction_observer
)
89 : next_write_transaction_id(0),
91 info_status(Directory::KERNEL_SHARE_INFO_VALID
),
92 persisted_info(info
.kernel_info
),
93 cache_guid(info
.cache_guid
),
94 next_metahandle(info
.max_metahandle
+ 1),
96 transaction_observer(transaction_observer
) {
98 DCHECK(transaction_observer
.IsInitialized());
101 Directory::Kernel::~Kernel() {
102 STLDeleteContainerPairSecondPointers(metahandles_map
.begin(),
103 metahandles_map
.end());
106 Directory::Directory(
107 DirectoryBackingStore
* store
,
108 const WeakHandle
<UnrecoverableErrorHandler
>& unrecoverable_error_handler
,
109 const base::Closure
& report_unrecoverable_error_function
,
110 NigoriHandler
* nigori_handler
,
111 Cryptographer
* cryptographer
)
114 unrecoverable_error_handler_(unrecoverable_error_handler
),
115 report_unrecoverable_error_function_(report_unrecoverable_error_function
),
116 unrecoverable_error_set_(false),
117 nigori_handler_(nigori_handler
),
118 cryptographer_(cryptographer
),
119 invariant_check_level_(VERIFY_CHANGES
),
120 weak_ptr_factory_(this) {}
122 Directory::~Directory() {
126 DirOpenResult
Directory::Open(
128 DirectoryChangeDelegate
* delegate
,
129 const WeakHandle
<TransactionObserver
>& transaction_observer
) {
130 TRACE_EVENT0("sync", "SyncDatabaseOpen");
132 const DirOpenResult result
=
133 OpenImpl(name
, delegate
, transaction_observer
);
135 if (OPENED
!= result
)
140 void Directory::InitializeIndices(MetahandlesMap
* handles_map
) {
141 ScopedKernelLock
lock(this);
142 kernel_
->metahandles_map
.swap(*handles_map
);
143 for (MetahandlesMap::const_iterator it
= kernel_
->metahandles_map
.begin();
144 it
!= kernel_
->metahandles_map
.end(); ++it
) {
145 EntryKernel
* entry
= it
->second
;
146 if (ParentChildIndex::ShouldInclude(entry
))
147 kernel_
->parent_child_index
.Insert(entry
);
148 const int64 metahandle
= entry
->ref(META_HANDLE
);
149 if (entry
->ref(IS_UNSYNCED
))
150 kernel_
->unsynced_metahandles
.insert(metahandle
);
151 if (entry
->ref(IS_UNAPPLIED_UPDATE
)) {
152 const ModelType type
= entry
->GetServerModelType();
153 kernel_
->unapplied_update_metahandles
[type
].insert(metahandle
);
155 if (!entry
->ref(UNIQUE_SERVER_TAG
).empty()) {
156 DCHECK(kernel_
->server_tags_map
.find(entry
->ref(UNIQUE_SERVER_TAG
)) ==
157 kernel_
->server_tags_map
.end())
158 << "Unexpected duplicate use of client tag";
159 kernel_
->server_tags_map
[entry
->ref(UNIQUE_SERVER_TAG
)] = entry
;
161 if (!entry
->ref(UNIQUE_CLIENT_TAG
).empty()) {
162 DCHECK(kernel_
->server_tags_map
.find(entry
->ref(UNIQUE_SERVER_TAG
)) ==
163 kernel_
->server_tags_map
.end())
164 << "Unexpected duplicate use of server tag";
165 kernel_
->client_tags_map
[entry
->ref(UNIQUE_CLIENT_TAG
)] = entry
;
167 DCHECK(kernel_
->ids_map
.find(entry
->ref(ID
).value()) ==
168 kernel_
->ids_map
.end()) << "Unexpected duplicate use of ID";
169 kernel_
->ids_map
[entry
->ref(ID
).value()] = entry
;
170 DCHECK(!entry
->is_dirty());
171 AddToAttachmentIndex(lock
, metahandle
, entry
->ref(ATTACHMENT_METADATA
));
175 DirOpenResult
Directory::OpenImpl(
177 DirectoryChangeDelegate
* delegate
,
178 const WeakHandle
<TransactionObserver
>&
179 transaction_observer
) {
181 // Temporary indices before kernel_ initialized in case Load fails. We 0(1)
183 Directory::MetahandlesMap tmp_handles_map
;
185 // Avoids mem leaks on failure. Harmlessly deletes the empty hash map after
186 // the swap in the success case.
187 STLValueDeleter
<MetahandlesMap
> deleter(&tmp_handles_map
);
189 JournalIndex delete_journals
;
190 MetahandleSet metahandles_to_purge
;
192 DirOpenResult result
= store_
->Load(&tmp_handles_map
, &delete_journals
,
193 &metahandles_to_purge
, &info
);
194 if (OPENED
!= result
)
198 kernel_
= new Kernel(name
, info
, delegate
, transaction_observer
);
199 kernel_
->metahandles_to_purge
.swap(metahandles_to_purge
);
200 delete_journal_
.reset(new DeleteJournal(&delete_journals
));
201 InitializeIndices(&tmp_handles_map
);
203 // Save changes back in case there are any metahandles to purge.
205 return FAILED_INITIAL_WRITE
;
207 // Now that we've successfully opened the store, install an error handler to
208 // deal with catastrophic errors that may occur later on. Use a weak pointer
209 // because we cannot guarantee that this Directory will outlive the Closure.
210 store_
->SetCatastrophicErrorHandler(base::Bind(
211 &Directory::OnCatastrophicError
, weak_ptr_factory_
.GetWeakPtr()));
216 DeleteJournal
* Directory::delete_journal() {
217 DCHECK(delete_journal_
.get());
218 return delete_journal_
.get();
221 void Directory::Close() {
229 void Directory::OnUnrecoverableError(const BaseTransaction
* trans
,
230 const tracked_objects::Location
& location
,
231 const std::string
& message
) {
232 DCHECK(trans
!= NULL
);
233 unrecoverable_error_set_
= true;
234 unrecoverable_error_handler_
.Call(
235 FROM_HERE
, &UnrecoverableErrorHandler::OnUnrecoverableError
, location
,
239 EntryKernel
* Directory::GetEntryById(const Id
& id
) {
240 ScopedKernelLock
lock(this);
241 return GetEntryById(lock
, id
);
244 EntryKernel
* Directory::GetEntryById(const ScopedKernelLock
& lock
,
247 // Find it in the in memory ID index.
248 IdsMap::iterator id_found
= kernel_
->ids_map
.find(id
.value());
249 if (id_found
!= kernel_
->ids_map
.end()) {
250 return id_found
->second
;
255 EntryKernel
* Directory::GetEntryByClientTag(const string
& tag
) {
256 ScopedKernelLock
lock(this);
259 TagsMap::iterator it
= kernel_
->client_tags_map
.find(tag
);
260 if (it
!= kernel_
->client_tags_map
.end()) {
266 EntryKernel
* Directory::GetEntryByServerTag(const string
& tag
) {
267 ScopedKernelLock
lock(this);
269 TagsMap::iterator it
= kernel_
->server_tags_map
.find(tag
);
270 if (it
!= kernel_
->server_tags_map
.end()) {
276 EntryKernel
* Directory::GetEntryByHandle(int64 metahandle
) {
277 ScopedKernelLock
lock(this);
278 return GetEntryByHandle(lock
, metahandle
);
281 EntryKernel
* Directory::GetEntryByHandle(const ScopedKernelLock
& lock
,
284 MetahandlesMap::iterator found
=
285 kernel_
->metahandles_map
.find(metahandle
);
286 if (found
!= kernel_
->metahandles_map
.end()) {
287 // Found it in memory. Easy.
288 return found
->second
;
293 bool Directory::GetChildHandlesById(
294 BaseTransaction
* trans
, const Id
& parent_id
,
295 Directory::Metahandles
* result
) {
296 if (!SyncAssert(this == trans
->directory(), FROM_HERE
,
297 "Directories don't match", trans
))
301 ScopedKernelLock
lock(this);
302 AppendChildHandles(lock
, parent_id
, result
);
306 int Directory::GetTotalNodeCount(
307 BaseTransaction
* trans
,
308 EntryKernel
* kernel
) const {
309 if (!SyncAssert(this == trans
->directory(), FROM_HERE
,
310 "Directories don't match", trans
))
314 std::deque
<const OrderedChildSet
*> child_sets
;
316 GetChildSetForKernel(trans
, kernel
, &child_sets
);
317 while (!child_sets
.empty()) {
318 const OrderedChildSet
* set
= child_sets
.front();
319 child_sets
.pop_front();
320 for (OrderedChildSet::const_iterator it
= set
->begin();
321 it
!= set
->end(); ++it
) {
323 GetChildSetForKernel(trans
, *it
, &child_sets
);
330 void Directory::GetChildSetForKernel(
331 BaseTransaction
* trans
,
333 std::deque
<const OrderedChildSet
*>* child_sets
) const {
334 if (!kernel
->ref(IS_DIR
))
335 return; // Not a directory => no children.
337 const OrderedChildSet
* descendants
=
338 kernel_
->parent_child_index
.GetChildren(kernel
->ref(ID
));
340 return; // This directory has no children.
342 // Add our children to the list of items to be traversed.
343 child_sets
->push_back(descendants
);
346 int Directory::GetPositionIndex(
347 BaseTransaction
* trans
,
348 EntryKernel
* kernel
) const {
349 const OrderedChildSet
* siblings
=
350 kernel_
->parent_child_index
.GetSiblings(kernel
);
352 OrderedChildSet::const_iterator it
= siblings
->find(kernel
);
353 return std::distance(siblings
->begin(), it
);
356 bool Directory::InsertEntry(BaseWriteTransaction
* trans
, EntryKernel
* entry
) {
357 ScopedKernelLock
lock(this);
358 return InsertEntry(lock
, trans
, entry
);
361 bool Directory::InsertEntry(const ScopedKernelLock
& lock
,
362 BaseWriteTransaction
* trans
,
363 EntryKernel
* entry
) {
364 if (!SyncAssert(NULL
!= entry
, FROM_HERE
, "Entry is null", trans
))
367 static const char error
[] = "Entry already in memory index.";
370 kernel_
->metahandles_map
.insert(
371 std::make_pair(entry
->ref(META_HANDLE
), entry
)).second
,
378 kernel_
->ids_map
.insert(
379 std::make_pair(entry
->ref(ID
).value(), entry
)).second
,
385 if (ParentChildIndex::ShouldInclude(entry
)) {
386 if (!SyncAssert(kernel_
->parent_child_index
.Insert(entry
),
393 AddToAttachmentIndex(
394 lock
, entry
->ref(META_HANDLE
), entry
->ref(ATTACHMENT_METADATA
));
396 // Should NEVER be created with a client tag or server tag.
397 if (!SyncAssert(entry
->ref(UNIQUE_SERVER_TAG
).empty(), FROM_HERE
,
398 "Server tag should be empty", trans
)) {
401 if (!SyncAssert(entry
->ref(UNIQUE_CLIENT_TAG
).empty(), FROM_HERE
,
402 "Client tag should be empty", trans
))
408 bool Directory::ReindexId(BaseWriteTransaction
* trans
,
409 EntryKernel
* const entry
,
411 ScopedKernelLock
lock(this);
412 if (NULL
!= GetEntryById(lock
, new_id
))
416 // Update the indices that depend on the ID field.
417 ScopedParentChildIndexUpdater
updater_b(lock
, entry
,
418 &kernel_
->parent_child_index
);
419 size_t num_erased
= kernel_
->ids_map
.erase(entry
->ref(ID
).value());
420 DCHECK_EQ(1U, num_erased
);
421 entry
->put(ID
, new_id
);
422 kernel_
->ids_map
[entry
->ref(ID
).value()] = entry
;
427 bool Directory::ReindexParentId(BaseWriteTransaction
* trans
,
428 EntryKernel
* const entry
,
429 const Id
& new_parent_id
) {
430 ScopedKernelLock
lock(this);
433 // Update the indices that depend on the PARENT_ID field.
434 ScopedParentChildIndexUpdater
index_updater(lock
, entry
,
435 &kernel_
->parent_child_index
);
436 entry
->put(PARENT_ID
, new_parent_id
);
441 void Directory::RemoveFromAttachmentIndex(
442 const ScopedKernelLock
& lock
,
443 const int64 metahandle
,
444 const sync_pb::AttachmentMetadata
& attachment_metadata
) {
445 for (int i
= 0; i
< attachment_metadata
.record_size(); ++i
) {
446 AttachmentIdUniqueId unique_id
=
447 attachment_metadata
.record(i
).id().unique_id();
448 IndexByAttachmentId::iterator iter
=
449 kernel_
->index_by_attachment_id
.find(unique_id
);
450 if (iter
!= kernel_
->index_by_attachment_id
.end()) {
451 iter
->second
.erase(metahandle
);
452 if (iter
->second
.empty()) {
453 kernel_
->index_by_attachment_id
.erase(iter
);
459 void Directory::AddToAttachmentIndex(
460 const ScopedKernelLock
& lock
,
461 const int64 metahandle
,
462 const sync_pb::AttachmentMetadata
& attachment_metadata
) {
463 for (int i
= 0; i
< attachment_metadata
.record_size(); ++i
) {
464 AttachmentIdUniqueId unique_id
=
465 attachment_metadata
.record(i
).id().unique_id();
466 IndexByAttachmentId::iterator iter
=
467 kernel_
->index_by_attachment_id
.find(unique_id
);
468 if (iter
== kernel_
->index_by_attachment_id
.end()) {
469 iter
= kernel_
->index_by_attachment_id
.insert(std::make_pair(
471 MetahandleSet())).first
;
473 iter
->second
.insert(metahandle
);
477 void Directory::UpdateAttachmentIndex(
478 const int64 metahandle
,
479 const sync_pb::AttachmentMetadata
& old_metadata
,
480 const sync_pb::AttachmentMetadata
& new_metadata
) {
481 ScopedKernelLock
lock(this);
482 RemoveFromAttachmentIndex(lock
, metahandle
, old_metadata
);
483 AddToAttachmentIndex(lock
, metahandle
, new_metadata
);
486 void Directory::GetMetahandlesByAttachmentId(
487 BaseTransaction
* trans
,
488 const sync_pb::AttachmentIdProto
& attachment_id_proto
,
489 Metahandles
* result
) {
492 ScopedKernelLock
lock(this);
493 IndexByAttachmentId::const_iterator index_iter
=
494 kernel_
->index_by_attachment_id
.find(attachment_id_proto
.unique_id());
495 if (index_iter
== kernel_
->index_by_attachment_id
.end())
497 const MetahandleSet
& metahandle_set
= index_iter
->second
;
499 metahandle_set
.begin(), metahandle_set
.end(), back_inserter(*result
));
502 bool Directory::unrecoverable_error_set(const BaseTransaction
* trans
) const {
503 DCHECK(trans
!= NULL
);
504 return unrecoverable_error_set_
;
507 void Directory::ClearDirtyMetahandles(const ScopedKernelLock
& lock
) {
508 kernel_
->transaction_mutex
.AssertAcquired();
509 kernel_
->dirty_metahandles
.clear();
512 bool Directory::SafeToPurgeFromMemory(WriteTransaction
* trans
,
513 const EntryKernel
* const entry
) const {
514 bool safe
= entry
->ref(IS_DEL
) && !entry
->is_dirty() &&
515 !entry
->ref(SYNCING
) && !entry
->ref(IS_UNAPPLIED_UPDATE
) &&
516 !entry
->ref(IS_UNSYNCED
);
519 int64 handle
= entry
->ref(META_HANDLE
);
520 const ModelType type
= entry
->GetServerModelType();
521 if (!SyncAssert(kernel_
->dirty_metahandles
.count(handle
) == 0U,
523 "Dirty metahandles should be empty", trans
))
525 // TODO(tim): Bug 49278.
526 if (!SyncAssert(!kernel_
->unsynced_metahandles
.count(handle
),
528 "Unsynced handles should be empty",
531 if (!SyncAssert(!kernel_
->unapplied_update_metahandles
[type
].count(handle
),
533 "Unapplied metahandles should be empty",
541 void Directory::TakeSnapshotForSaveChanges(SaveChangesSnapshot
* snapshot
) {
542 ReadTransaction
trans(FROM_HERE
, this);
543 ScopedKernelLock
lock(this);
545 // If there is an unrecoverable error then just bail out.
546 if (unrecoverable_error_set(&trans
))
549 // Deep copy dirty entries from kernel_->metahandles_index into snapshot and
550 // clear dirty flags.
551 for (MetahandleSet::const_iterator i
= kernel_
->dirty_metahandles
.begin();
552 i
!= kernel_
->dirty_metahandles
.end(); ++i
) {
553 EntryKernel
* entry
= GetEntryByHandle(lock
, *i
);
556 // Skip over false positives; it happens relatively infrequently.
557 if (!entry
->is_dirty())
559 snapshot
->dirty_metas
.insert(snapshot
->dirty_metas
.end(),
560 new EntryKernel(*entry
));
561 DCHECK_EQ(1U, kernel_
->dirty_metahandles
.count(*i
));
562 // We don't bother removing from the index here as we blow the entire thing
563 // in a moment, and it unnecessarily complicates iteration.
564 entry
->clear_dirty(NULL
);
566 ClearDirtyMetahandles(lock
);
568 // Set purged handles.
569 DCHECK(snapshot
->metahandles_to_purge
.empty());
570 snapshot
->metahandles_to_purge
.swap(kernel_
->metahandles_to_purge
);
572 // Fill kernel_info_status and kernel_info.
573 snapshot
->kernel_info
= kernel_
->persisted_info
;
574 snapshot
->kernel_info_status
= kernel_
->info_status
;
575 // This one we reset on failure.
576 kernel_
->info_status
= KERNEL_SHARE_INFO_VALID
;
578 delete_journal_
->TakeSnapshotAndClear(
579 &trans
, &snapshot
->delete_journals
, &snapshot
->delete_journals_to_purge
);
582 bool Directory::SaveChanges() {
583 bool success
= false;
585 base::AutoLock
scoped_lock(kernel_
->save_changes_mutex
);
587 // Snapshot and save.
588 SaveChangesSnapshot snapshot
;
589 TakeSnapshotForSaveChanges(&snapshot
);
590 success
= store_
->SaveChanges(snapshot
);
592 // Handle success or failure.
594 success
= VacuumAfterSaveChanges(snapshot
);
596 HandleSaveChangesFailure(snapshot
);
600 bool Directory::VacuumAfterSaveChanges(const SaveChangesSnapshot
& snapshot
) {
601 if (snapshot
.dirty_metas
.empty())
604 // Need a write transaction as we are about to permanently purge entries.
605 WriteTransaction
trans(FROM_HERE
, VACUUM_AFTER_SAVE
, this);
606 ScopedKernelLock
lock(this);
607 // Now drop everything we can out of memory.
608 for (EntryKernelSet::const_iterator i
= snapshot
.dirty_metas
.begin();
609 i
!= snapshot
.dirty_metas
.end(); ++i
) {
610 MetahandlesMap::iterator found
=
611 kernel_
->metahandles_map
.find((*i
)->ref(META_HANDLE
));
612 EntryKernel
* entry
= (found
== kernel_
->metahandles_map
.end() ?
613 NULL
: found
->second
);
614 if (entry
&& SafeToPurgeFromMemory(&trans
, entry
)) {
615 // We now drop deleted metahandles that are up to date on both the client
617 size_t num_erased
= 0;
618 num_erased
= kernel_
->metahandles_map
.erase(entry
->ref(META_HANDLE
));
619 DCHECK_EQ(1u, num_erased
);
620 num_erased
= kernel_
->ids_map
.erase(entry
->ref(ID
).value());
621 DCHECK_EQ(1u, num_erased
);
622 if (!entry
->ref(UNIQUE_SERVER_TAG
).empty()) {
624 kernel_
->server_tags_map
.erase(entry
->ref(UNIQUE_SERVER_TAG
));
625 DCHECK_EQ(1u, num_erased
);
627 if (!entry
->ref(UNIQUE_CLIENT_TAG
).empty()) {
629 kernel_
->client_tags_map
.erase(entry
->ref(UNIQUE_CLIENT_TAG
));
630 DCHECK_EQ(1u, num_erased
);
632 if (!SyncAssert(!kernel_
->parent_child_index
.Contains(entry
),
634 "Deleted entry still present",
637 RemoveFromAttachmentIndex(
638 lock
, entry
->ref(META_HANDLE
), entry
->ref(ATTACHMENT_METADATA
));
642 if (trans
.unrecoverable_error_set())
648 void Directory::UnapplyEntry(EntryKernel
* entry
) {
649 int64 handle
= entry
->ref(META_HANDLE
);
650 ModelType server_type
= GetModelTypeFromSpecifics(
651 entry
->ref(SERVER_SPECIFICS
));
653 // Clear enough so that on the next sync cycle all local data will
655 // Note: do not modify the root node in order to preserve the
656 // initial sync ended bit for this type (else on the next restart
657 // this type will be treated as disabled and therefore fully purged).
658 if (IsRealDataType(server_type
) &&
659 ModelTypeToRootTag(server_type
) == entry
->ref(UNIQUE_SERVER_TAG
)) {
663 // Set the unapplied bit if this item has server data.
664 if (IsRealDataType(server_type
) && !entry
->ref(IS_UNAPPLIED_UPDATE
)) {
665 entry
->put(IS_UNAPPLIED_UPDATE
, true);
666 kernel_
->unapplied_update_metahandles
[server_type
].insert(handle
);
667 entry
->mark_dirty(&kernel_
->dirty_metahandles
);
670 // Unset the unsynced bit.
671 if (entry
->ref(IS_UNSYNCED
)) {
672 kernel_
->unsynced_metahandles
.erase(handle
);
673 entry
->put(IS_UNSYNCED
, false);
674 entry
->mark_dirty(&kernel_
->dirty_metahandles
);
677 // Mark the item as locally deleted. No deleted items are allowed in the
678 // parent child index.
679 if (!entry
->ref(IS_DEL
)) {
680 kernel_
->parent_child_index
.Remove(entry
);
681 entry
->put(IS_DEL
, true);
682 entry
->mark_dirty(&kernel_
->dirty_metahandles
);
685 // Set the version to the "newly created" version.
686 if (entry
->ref(BASE_VERSION
) != CHANGES_VERSION
) {
687 entry
->put(BASE_VERSION
, CHANGES_VERSION
);
688 entry
->mark_dirty(&kernel_
->dirty_metahandles
);
691 // At this point locally created items that aren't synced will become locally
692 // deleted items, and purged on the next snapshot. All other items will match
693 // the state they would have had if they were just created via a server
694 // update. See MutableEntry::MutableEntry(.., CreateNewUpdateItem, ..).
697 void Directory::DeleteEntry(const ScopedKernelLock
& lock
,
698 bool save_to_journal
,
700 EntryKernelSet
* entries_to_journal
) {
701 int64 handle
= entry
->ref(META_HANDLE
);
702 ModelType server_type
= GetModelTypeFromSpecifics(
703 entry
->ref(SERVER_SPECIFICS
));
705 kernel_
->metahandles_to_purge
.insert(handle
);
707 size_t num_erased
= 0;
708 num_erased
= kernel_
->metahandles_map
.erase(entry
->ref(META_HANDLE
));
709 DCHECK_EQ(1u, num_erased
);
710 num_erased
= kernel_
->ids_map
.erase(entry
->ref(ID
).value());
711 DCHECK_EQ(1u, num_erased
);
712 num_erased
= kernel_
->unsynced_metahandles
.erase(handle
);
713 DCHECK_EQ(entry
->ref(IS_UNSYNCED
), num_erased
> 0);
715 kernel_
->unapplied_update_metahandles
[server_type
].erase(handle
);
716 DCHECK_EQ(entry
->ref(IS_UNAPPLIED_UPDATE
), num_erased
> 0);
717 if (kernel_
->parent_child_index
.Contains(entry
))
718 kernel_
->parent_child_index
.Remove(entry
);
720 if (!entry
->ref(UNIQUE_CLIENT_TAG
).empty()) {
722 kernel_
->client_tags_map
.erase(entry
->ref(UNIQUE_CLIENT_TAG
));
723 DCHECK_EQ(1u, num_erased
);
725 if (!entry
->ref(UNIQUE_SERVER_TAG
).empty()) {
727 kernel_
->server_tags_map
.erase(entry
->ref(UNIQUE_SERVER_TAG
));
728 DCHECK_EQ(1u, num_erased
);
730 RemoveFromAttachmentIndex(lock
, handle
, entry
->ref(ATTACHMENT_METADATA
));
732 if (save_to_journal
) {
733 entries_to_journal
->insert(entry
);
739 bool Directory::PurgeEntriesWithTypeIn(ModelTypeSet disabled_types
,
740 ModelTypeSet types_to_journal
,
741 ModelTypeSet types_to_unapply
) {
742 disabled_types
.RemoveAll(ProxyTypes());
744 if (disabled_types
.Empty())
748 WriteTransaction
trans(FROM_HERE
, PURGE_ENTRIES
, this);
750 EntryKernelSet entries_to_journal
;
751 STLElementDeleter
<EntryKernelSet
> journal_deleter(&entries_to_journal
);
754 ScopedKernelLock
lock(this);
756 bool found_progress
= false;
757 for (ModelTypeSet::Iterator iter
= disabled_types
.First(); iter
.Good();
759 if (!kernel_
->persisted_info
.HasEmptyDownloadProgress(iter
.Get()))
760 found_progress
= true;
763 // If none of the disabled types have progress markers, there's nothing to
768 // We iterate in two passes to avoid a bug in STLport (which is used in
769 // the Android build). There are some versions of that library where a
770 // hash_map's iterators can be invalidated when an item is erased from the
772 // See http://sourceforge.net/p/stlport/bugs/239/.
774 std::set
<EntryKernel
*> to_purge
;
775 for (MetahandlesMap::iterator it
= kernel_
->metahandles_map
.begin();
776 it
!= kernel_
->metahandles_map
.end(); ++it
) {
777 const sync_pb::EntitySpecifics
& local_specifics
=
778 it
->second
->ref(SPECIFICS
);
779 const sync_pb::EntitySpecifics
& server_specifics
=
780 it
->second
->ref(SERVER_SPECIFICS
);
781 ModelType local_type
= GetModelTypeFromSpecifics(local_specifics
);
782 ModelType server_type
= GetModelTypeFromSpecifics(server_specifics
);
784 if ((IsRealDataType(local_type
) && disabled_types
.Has(local_type
)) ||
785 (IsRealDataType(server_type
) && disabled_types
.Has(server_type
))) {
786 to_purge
.insert(it
->second
);
790 for (std::set
<EntryKernel
*>::iterator it
= to_purge
.begin();
791 it
!= to_purge
.end(); ++it
) {
792 EntryKernel
* entry
= *it
;
794 const sync_pb::EntitySpecifics
& local_specifics
=
795 (*it
)->ref(SPECIFICS
);
796 const sync_pb::EntitySpecifics
& server_specifics
=
797 (*it
)->ref(SERVER_SPECIFICS
);
798 ModelType local_type
= GetModelTypeFromSpecifics(local_specifics
);
799 ModelType server_type
= GetModelTypeFromSpecifics(server_specifics
);
801 if (types_to_unapply
.Has(local_type
) ||
802 types_to_unapply
.Has(server_type
)) {
805 bool save_to_journal
=
806 (types_to_journal
.Has(local_type
) ||
807 types_to_journal
.Has(server_type
)) &&
808 (delete_journal_
->IsDeleteJournalEnabled(local_type
) ||
809 delete_journal_
->IsDeleteJournalEnabled(server_type
));
810 DeleteEntry(lock
, save_to_journal
, entry
, &entries_to_journal
);
814 delete_journal_
->AddJournalBatch(&trans
, entries_to_journal
);
816 // Ensure meta tracking for these data types reflects the purged state.
817 for (ModelTypeSet::Iterator it
= disabled_types
.First();
818 it
.Good(); it
.Inc()) {
819 kernel_
->persisted_info
.transaction_version
[it
.Get()] = 0;
821 // Don't discard progress markers or context for unapplied types.
822 if (!types_to_unapply
.Has(it
.Get())) {
823 kernel_
->persisted_info
.ResetDownloadProgress(it
.Get());
824 kernel_
->persisted_info
.datatype_context
[it
.Get()].Clear();
828 kernel_
->info_status
= KERNEL_SHARE_INFO_DIRTY
;
834 bool Directory::ResetVersionsForType(BaseWriteTransaction
* trans
,
836 if (!ProtocolTypes().Has(type
))
838 DCHECK_NE(type
, BOOKMARKS
) << "Only non-hierarchical types are supported";
840 EntryKernel
* type_root
= GetEntryByServerTag(ModelTypeToRootTag(type
));
844 ScopedKernelLock
lock(this);
845 const Id
& type_root_id
= type_root
->ref(ID
);
846 Directory::Metahandles children
;
847 AppendChildHandles(lock
, type_root_id
, &children
);
849 for (Metahandles::iterator it
= children
.begin(); it
!= children
.end();
851 EntryKernel
* entry
= GetEntryByHandle(lock
, *it
);
854 if (entry
->ref(BASE_VERSION
) > 1)
855 entry
->put(BASE_VERSION
, 1);
856 if (entry
->ref(SERVER_VERSION
) > 1)
857 entry
->put(SERVER_VERSION
, 1);
859 // Note that we do not unset IS_UNSYNCED or IS_UNAPPLIED_UPDATE in order
860 // to ensure no in-transit data is lost.
862 entry
->mark_dirty(&kernel_
->dirty_metahandles
);
868 bool Directory::IsAttachmentLinked(
869 const sync_pb::AttachmentIdProto
& attachment_id_proto
) const {
870 ScopedKernelLock
lock(this);
871 IndexByAttachmentId::const_iterator iter
=
872 kernel_
->index_by_attachment_id
.find(attachment_id_proto
.unique_id());
873 if (iter
!= kernel_
->index_by_attachment_id
.end() && !iter
->second
.empty()) {
879 void Directory::HandleSaveChangesFailure(const SaveChangesSnapshot
& snapshot
) {
880 WriteTransaction
trans(FROM_HERE
, HANDLE_SAVE_FAILURE
, this);
881 ScopedKernelLock
lock(this);
882 kernel_
->info_status
= KERNEL_SHARE_INFO_DIRTY
;
884 // Because we optimistically cleared the dirty bit on the real entries when
885 // taking the snapshot, we must restore it on failure. Not doing this could
886 // cause lost data, if no other changes are made to the in-memory entries
887 // that would cause the dirty bit to get set again. Setting the bit ensures
888 // that SaveChanges will at least try again later.
889 for (EntryKernelSet::const_iterator i
= snapshot
.dirty_metas
.begin();
890 i
!= snapshot
.dirty_metas
.end(); ++i
) {
891 MetahandlesMap::iterator found
=
892 kernel_
->metahandles_map
.find((*i
)->ref(META_HANDLE
));
893 if (found
!= kernel_
->metahandles_map
.end()) {
894 found
->second
->mark_dirty(&kernel_
->dirty_metahandles
);
898 kernel_
->metahandles_to_purge
.insert(snapshot
.metahandles_to_purge
.begin(),
899 snapshot
.metahandles_to_purge
.end());
901 // Restore delete journals.
902 delete_journal_
->AddJournalBatch(&trans
, snapshot
.delete_journals
);
903 delete_journal_
->PurgeDeleteJournals(&trans
,
904 snapshot
.delete_journals_to_purge
);
907 void Directory::GetDownloadProgress(
908 ModelType model_type
,
909 sync_pb::DataTypeProgressMarker
* value_out
) const {
910 ScopedKernelLock
lock(this);
911 return value_out
->CopyFrom(
912 kernel_
->persisted_info
.download_progress
[model_type
]);
915 void Directory::GetDownloadProgressAsString(
916 ModelType model_type
,
917 std::string
* value_out
) const {
918 ScopedKernelLock
lock(this);
919 kernel_
->persisted_info
.download_progress
[model_type
].SerializeToString(
923 size_t Directory::GetEntriesCount() const {
924 ScopedKernelLock
lock(this);
925 return kernel_
->metahandles_map
.size();
928 void Directory::SetDownloadProgress(
929 ModelType model_type
,
930 const sync_pb::DataTypeProgressMarker
& new_progress
) {
931 ScopedKernelLock
lock(this);
932 kernel_
->persisted_info
.download_progress
[model_type
].CopyFrom(new_progress
);
933 kernel_
->info_status
= KERNEL_SHARE_INFO_DIRTY
;
936 bool Directory::HasEmptyDownloadProgress(ModelType type
) const {
937 ScopedKernelLock
lock(this);
938 return kernel_
->persisted_info
.HasEmptyDownloadProgress(type
);
941 int64
Directory::GetTransactionVersion(ModelType type
) const {
942 kernel_
->transaction_mutex
.AssertAcquired();
943 return kernel_
->persisted_info
.transaction_version
[type
];
946 void Directory::IncrementTransactionVersion(ModelType type
) {
947 kernel_
->transaction_mutex
.AssertAcquired();
948 kernel_
->persisted_info
.transaction_version
[type
]++;
949 kernel_
->info_status
= KERNEL_SHARE_INFO_DIRTY
;
952 void Directory::GetDataTypeContext(BaseTransaction
* trans
,
954 sync_pb::DataTypeContext
* context
) const {
955 ScopedKernelLock
lock(this);
956 context
->CopyFrom(kernel_
->persisted_info
.datatype_context
[type
]);
959 void Directory::SetDataTypeContext(
960 BaseWriteTransaction
* trans
,
962 const sync_pb::DataTypeContext
& context
) {
963 ScopedKernelLock
lock(this);
964 kernel_
->persisted_info
.datatype_context
[type
].CopyFrom(context
);
965 kernel_
->info_status
= KERNEL_SHARE_INFO_DIRTY
;
968 // TODO(stanisc): crbug.com/438313: change these to not rely on the folders.
969 ModelTypeSet
Directory::InitialSyncEndedTypes() {
970 syncable::ReadTransaction
trans(FROM_HERE
, this);
971 ModelTypeSet protocol_types
= ProtocolTypes();
972 ModelTypeSet initial_sync_ended_types
;
973 for (ModelTypeSet::Iterator i
= protocol_types
.First(); i
.Good(); i
.Inc()) {
974 if (InitialSyncEndedForType(&trans
, i
.Get())) {
975 initial_sync_ended_types
.Put(i
.Get());
978 return initial_sync_ended_types
;
981 bool Directory::InitialSyncEndedForType(ModelType type
) {
982 syncable::ReadTransaction
trans(FROM_HERE
, this);
983 return InitialSyncEndedForType(&trans
, type
);
986 bool Directory::InitialSyncEndedForType(
987 BaseTransaction
* trans
, ModelType type
) {
988 // True iff the type's root node has been created.
989 syncable::Entry
entry(trans
, syncable::GET_TYPE_ROOT
, type
);
993 string
Directory::store_birthday() const {
994 ScopedKernelLock
lock(this);
995 return kernel_
->persisted_info
.store_birthday
;
998 void Directory::set_store_birthday(const string
& store_birthday
) {
999 ScopedKernelLock
lock(this);
1000 if (kernel_
->persisted_info
.store_birthday
== store_birthday
)
1002 kernel_
->persisted_info
.store_birthday
= store_birthday
;
1003 kernel_
->info_status
= KERNEL_SHARE_INFO_DIRTY
;
1006 string
Directory::bag_of_chips() const {
1007 ScopedKernelLock
lock(this);
1008 return kernel_
->persisted_info
.bag_of_chips
;
1011 void Directory::set_bag_of_chips(const string
& bag_of_chips
) {
1012 ScopedKernelLock
lock(this);
1013 if (kernel_
->persisted_info
.bag_of_chips
== bag_of_chips
)
1015 kernel_
->persisted_info
.bag_of_chips
= bag_of_chips
;
1016 kernel_
->info_status
= KERNEL_SHARE_INFO_DIRTY
;
1020 string
Directory::cache_guid() const {
1021 // No need to lock since nothing ever writes to it after load.
1022 return kernel_
->cache_guid
;
1025 NigoriHandler
* Directory::GetNigoriHandler() {
1026 return nigori_handler_
;
1029 Cryptographer
* Directory::GetCryptographer(const BaseTransaction
* trans
) {
1030 DCHECK_EQ(this, trans
->directory());
1031 return cryptographer_
;
1034 void Directory::ReportUnrecoverableError() {
1035 if (!report_unrecoverable_error_function_
.is_null()) {
1036 report_unrecoverable_error_function_
.Run();
1040 void Directory::GetAllMetaHandles(BaseTransaction
* trans
,
1041 MetahandleSet
* result
) {
1043 ScopedKernelLock
lock(this);
1044 for (MetahandlesMap::iterator i
= kernel_
->metahandles_map
.begin();
1045 i
!= kernel_
->metahandles_map
.end(); ++i
) {
1046 result
->insert(i
->first
);
1050 void Directory::GetUnsyncedMetaHandles(BaseTransaction
* trans
,
1051 Metahandles
* result
) {
1053 ScopedKernelLock
lock(this);
1054 copy(kernel_
->unsynced_metahandles
.begin(),
1055 kernel_
->unsynced_metahandles
.end(), back_inserter(*result
));
1058 int64
Directory::unsynced_entity_count() const {
1059 ScopedKernelLock
lock(this);
1060 return kernel_
->unsynced_metahandles
.size();
1063 bool Directory::TypeHasUnappliedUpdates(ModelType type
) {
1064 ScopedKernelLock
lock(this);
1065 return !kernel_
->unapplied_update_metahandles
[type
].empty();
1068 void Directory::GetUnappliedUpdateMetaHandles(
1069 BaseTransaction
* trans
,
1070 FullModelTypeSet server_types
,
1071 std::vector
<int64
>* result
) {
1073 ScopedKernelLock
lock(this);
1074 for (int i
= UNSPECIFIED
; i
< MODEL_TYPE_COUNT
; ++i
) {
1075 const ModelType type
= ModelTypeFromInt(i
);
1076 if (server_types
.Has(type
)) {
1077 std::copy(kernel_
->unapplied_update_metahandles
[type
].begin(),
1078 kernel_
->unapplied_update_metahandles
[type
].end(),
1079 back_inserter(*result
));
1084 void Directory::GetMetaHandlesOfType(BaseTransaction
* trans
,
1086 std::vector
<int64
>* result
) {
1087 ScopedKernelLock
lock(this);
1088 GetMetaHandlesOfType(lock
, trans
, type
, result
);
1091 void Directory::GetMetaHandlesOfType(const ScopedKernelLock
& lock
,
1092 BaseTransaction
* trans
,
1094 std::vector
<int64
>* result
) {
1096 for (MetahandlesMap::iterator it
= kernel_
->metahandles_map
.begin();
1097 it
!= kernel_
->metahandles_map
.end(); ++it
) {
1098 EntryKernel
* entry
= it
->second
;
1099 const ModelType entry_type
=
1100 GetModelTypeFromSpecifics(entry
->ref(SPECIFICS
));
1101 if (entry_type
== type
)
1102 result
->push_back(it
->first
);
1106 void Directory::CollectMetaHandleCounts(
1107 std::vector
<int>* num_entries_by_type
,
1108 std::vector
<int>* num_to_delete_entries_by_type
) {
1109 syncable::ReadTransaction
trans(FROM_HERE
, this);
1110 ScopedKernelLock
lock(this);
1112 for (MetahandlesMap::iterator it
= kernel_
->metahandles_map
.begin();
1113 it
!= kernel_
->metahandles_map
.end(); ++it
) {
1114 EntryKernel
* entry
= it
->second
;
1115 const ModelType type
= GetModelTypeFromSpecifics(entry
->ref(SPECIFICS
));
1116 (*num_entries_by_type
)[type
]++;
1117 if (entry
->ref(IS_DEL
))
1118 (*num_to_delete_entries_by_type
)[type
]++;
1122 scoped_ptr
<base::ListValue
> Directory::GetNodeDetailsForType(
1123 BaseTransaction
* trans
,
1125 scoped_ptr
<base::ListValue
> nodes(new base::ListValue());
1127 ScopedKernelLock
lock(this);
1128 for (MetahandlesMap::iterator it
= kernel_
->metahandles_map
.begin();
1129 it
!= kernel_
->metahandles_map
.end(); ++it
) {
1130 if (GetModelTypeFromSpecifics(it
->second
->ref(SPECIFICS
)) != type
) {
1134 EntryKernel
* kernel
= it
->second
;
1135 scoped_ptr
<base::DictionaryValue
> node(
1136 kernel
->ToValue(GetCryptographer(trans
)));
1138 // Add the position index if appropriate. This must be done here (and not
1139 // in EntryKernel) because the EntryKernel does not have access to its
1141 if (kernel
->ShouldMaintainPosition() && !kernel
->ref(IS_DEL
)) {
1142 node
->SetInteger("positionIndex", GetPositionIndex(trans
, kernel
));
1145 nodes
->Append(node
.release());
1148 return nodes
.Pass();
1151 bool Directory::CheckInvariantsOnTransactionClose(
1152 syncable::BaseTransaction
* trans
,
1153 const MetahandleSet
& modified_handles
) {
1154 // NOTE: The trans may be in the process of being destructed. Be careful if
1155 // you wish to call any of its virtual methods.
1156 switch (invariant_check_level_
) {
1157 case FULL_DB_VERIFICATION
: {
1158 MetahandleSet all_handles
;
1159 GetAllMetaHandles(trans
, &all_handles
);
1160 return CheckTreeInvariants(trans
, all_handles
);
1162 case VERIFY_CHANGES
: {
1163 return CheckTreeInvariants(trans
, modified_handles
);
1173 bool Directory::FullyCheckTreeInvariants(syncable::BaseTransaction
* trans
) {
1174 MetahandleSet handles
;
1175 GetAllMetaHandles(trans
, &handles
);
1176 return CheckTreeInvariants(trans
, handles
);
1179 bool Directory::CheckTreeInvariants(syncable::BaseTransaction
* trans
,
1180 const MetahandleSet
& handles
) {
1181 MetahandleSet::const_iterator i
;
1182 for (i
= handles
.begin() ; i
!= handles
.end() ; ++i
) {
1183 int64 metahandle
= *i
;
1184 Entry
e(trans
, GET_BY_HANDLE
, metahandle
);
1185 if (!SyncAssert(e
.good(), FROM_HERE
, "Entry is bad", trans
))
1187 syncable::Id id
= e
.GetId();
1188 syncable::Id parentid
= e
.GetParentId();
1191 if (!SyncAssert(e
.GetIsDir(), FROM_HERE
,
1192 "Entry should be a directory",
1195 if (!SyncAssert(parentid
.IsRoot(), FROM_HERE
,
1196 "Entry should be root",
1199 if (!SyncAssert(!e
.GetIsUnsynced(), FROM_HERE
, "Entry should be synced",
1205 if (!e
.GetIsDel()) {
1206 if (!SyncAssert(id
!= parentid
, FROM_HERE
,
1207 "Id should be different from parent id.",
1210 if (!SyncAssert(!e
.GetNonUniqueName().empty(), FROM_HERE
,
1211 "Non unique name should not be empty.",
1215 if (!parentid
.IsNull()) {
1216 int safety_count
= handles
.size() + 1;
1217 while (!parentid
.IsRoot()) {
1218 Entry
parent(trans
, GET_BY_ID
, parentid
);
1219 if (!SyncAssert(parent
.good(), FROM_HERE
,
1220 "Parent entry is not valid.", trans
))
1222 if (handles
.end() == handles
.find(parent
.GetMetahandle()))
1223 break; // Skip further checking if parent was unmodified.
1224 if (!SyncAssert(parent
.GetIsDir(), FROM_HERE
,
1225 "Parent should be a directory", trans
))
1227 if (!SyncAssert(!parent
.GetIsDel(), FROM_HERE
,
1228 "Parent should not have been marked for deletion.",
1231 if (!SyncAssert(handles
.end() != handles
.find(parent
.GetMetahandle()),
1232 FROM_HERE
, "Parent should be in the index.", trans
))
1234 parentid
= parent
.GetParentId();
1235 if (!SyncAssert(--safety_count
> 0, FROM_HERE
,
1236 "Count should be greater than zero.", trans
))
1241 int64 base_version
= e
.GetBaseVersion();
1242 int64 server_version
= e
.GetServerVersion();
1243 bool using_unique_client_tag
= !e
.GetUniqueClientTag().empty();
1244 if (CHANGES_VERSION
== base_version
|| 0 == base_version
) {
1245 ModelType model_type
= e
.GetModelType();
1246 bool is_client_creatable_type_root_folder
=
1247 parentid
.IsRoot() &&
1248 IsTypeWithClientGeneratedRoot(model_type
) &&
1249 e
.GetUniqueServerTag() == ModelTypeToRootTag(model_type
);
1250 if (e
.GetIsUnappliedUpdate()) {
1251 // Must be a new item, or a de-duplicated unique client tag
1252 // that was created both locally and remotely, or a type root folder
1253 // that was created both locally and remotely.
1254 if (!(using_unique_client_tag
||
1255 is_client_creatable_type_root_folder
)) {
1256 if (!SyncAssert(e
.GetIsDel(), FROM_HERE
,
1257 "The entry should have been deleted.", trans
))
1260 // It came from the server, so it must have a server ID.
1261 if (!SyncAssert(id
.ServerKnows(), FROM_HERE
,
1262 "The id should be from a server.",
1267 // TODO(chron): Implement this mode if clients ever need it.
1268 // For now, you can't combine a client tag and a directory.
1269 if (!SyncAssert(!using_unique_client_tag
, FROM_HERE
,
1270 "Directory cannot have a client tag.",
1274 if (is_client_creatable_type_root_folder
) {
1275 // This must be a locally created type root folder.
1277 !e
.GetIsUnsynced(), FROM_HERE
,
1278 "Locally created type root folders should not be unsynced.",
1283 !e
.GetIsDel(), FROM_HERE
,
1284 "Locally created type root folders should not be deleted.",
1288 // Should be an uncomitted item, or a successfully deleted one.
1289 if (!e
.GetIsDel()) {
1290 if (!SyncAssert(e
.GetIsUnsynced(), FROM_HERE
,
1291 "The item should be unsynced.", trans
))
1295 // If the next check failed, it would imply that an item exists
1296 // on the server, isn't waiting for application locally, but either
1297 // is an unsynced create or a sucessful delete in the local copy.
1298 // Either way, that's a mismatch.
1299 if (!SyncAssert(0 == server_version
, FROM_HERE
,
1300 "Server version should be zero.",
1303 // Items that aren't using the unique client tag should have a zero
1304 // base version only if they have a local ID. Items with unique client
1305 // tags are allowed to use the zero base version for undeletion and
1306 // de-duplication; the unique client tag trumps the server ID.
1307 if (!using_unique_client_tag
) {
1308 if (!SyncAssert(!id
.ServerKnows(), FROM_HERE
,
1309 "Should be a client only id.",
1315 if (!SyncAssert(id
.ServerKnows(),
1317 "Should be a server id.",
1322 // Previously we would assert that locally deleted items that have never
1323 // been synced must not be sent to the server (IS_UNSYNCED must be false).
1324 // This is not always true in the case that an item is deleted while the
1325 // initial commit is in flight. See crbug.com/426865.
1330 void Directory::SetInvariantCheckLevel(InvariantCheckLevel check_level
) {
1331 invariant_check_level_
= check_level
;
1334 int64
Directory::NextMetahandle() {
1335 ScopedKernelLock
lock(this);
1336 int64 metahandle
= (kernel_
->next_metahandle
)++;
1340 // Generates next client ID based on a randomly generated GUID.
1341 Id
Directory::NextId() {
1342 return Id::CreateFromClientString(base::GenerateGUID());
1345 bool Directory::HasChildren(BaseTransaction
* trans
, const Id
& id
) {
1346 ScopedKernelLock
lock(this);
1347 return kernel_
->parent_child_index
.GetChildren(id
) != NULL
;
1350 Id
Directory::GetFirstChildId(BaseTransaction
* trans
,
1351 const EntryKernel
* parent
) {
1353 DCHECK(parent
->ref(IS_DIR
));
1355 ScopedKernelLock
lock(this);
1356 const OrderedChildSet
* children
=
1357 kernel_
->parent_child_index
.GetChildren(parent
->ref(ID
));
1359 // We're expected to return root if there are no children.
1363 return (*children
->begin())->ref(ID
);
1366 syncable::Id
Directory::GetPredecessorId(EntryKernel
* e
) {
1367 ScopedKernelLock
lock(this);
1369 DCHECK(ParentChildIndex::ShouldInclude(e
));
1370 const OrderedChildSet
* siblings
= kernel_
->parent_child_index
.GetSiblings(e
);
1371 OrderedChildSet::const_iterator i
= siblings
->find(e
);
1372 DCHECK(i
!= siblings
->end());
1374 if (i
== siblings
->begin()) {
1378 return (*i
)->ref(ID
);
1382 syncable::Id
Directory::GetSuccessorId(EntryKernel
* e
) {
1383 ScopedKernelLock
lock(this);
1385 DCHECK(ParentChildIndex::ShouldInclude(e
));
1386 const OrderedChildSet
* siblings
= kernel_
->parent_child_index
.GetSiblings(e
);
1387 OrderedChildSet::const_iterator i
= siblings
->find(e
);
1388 DCHECK(i
!= siblings
->end());
1391 if (i
== siblings
->end()) {
1394 return (*i
)->ref(ID
);
1398 // TODO(rlarocque): Remove all support for placing ShouldMaintainPosition()
1399 // items as siblings of items that do not maintain postions. It is required
1400 // only for tests. See crbug.com/178282.
1401 void Directory::PutPredecessor(EntryKernel
* e
, EntryKernel
* predecessor
) {
1402 DCHECK(!e
->ref(IS_DEL
));
1403 if (!e
->ShouldMaintainPosition()) {
1404 DCHECK(!e
->ref(UNIQUE_POSITION
).IsValid());
1407 std::string suffix
= e
->ref(UNIQUE_BOOKMARK_TAG
);
1408 DCHECK(!suffix
.empty());
1410 // Remove our item from the ParentChildIndex and remember to re-add it later.
1411 ScopedKernelLock
lock(this);
1412 ScopedParentChildIndexUpdater
updater(lock
, e
, &kernel_
->parent_child_index
);
1414 // Note: The ScopedParentChildIndexUpdater will update this set for us as we
1415 // leave this function.
1416 const OrderedChildSet
* siblings
=
1417 kernel_
->parent_child_index
.GetChildren(e
->ref(PARENT_ID
));
1420 // This parent currently has no other children.
1421 DCHECK(predecessor
== NULL
);
1422 UniquePosition pos
= UniquePosition::InitialPosition(suffix
);
1423 e
->put(UNIQUE_POSITION
, pos
);
1427 if (predecessor
== NULL
) {
1428 // We have at least one sibling, and we're inserting to the left of them.
1429 UniquePosition successor_pos
= (*siblings
->begin())->ref(UNIQUE_POSITION
);
1432 if (!successor_pos
.IsValid()) {
1433 // If all our successors are of non-positionable types, just create an
1434 // initial position. We arbitrarily choose to sort invalid positions to
1435 // the right of the valid positions.
1437 // We really shouldn't need to support this. See TODO above.
1438 pos
= UniquePosition::InitialPosition(suffix
);
1440 DCHECK(!siblings
->empty());
1441 pos
= UniquePosition::Before(successor_pos
, suffix
);
1444 e
->put(UNIQUE_POSITION
, pos
);
1448 // We can't support placing an item after an invalid position. Fortunately,
1449 // the tests don't exercise this particular case. We should not support
1450 // siblings with invalid positions at all. See TODO above.
1451 DCHECK(predecessor
->ref(UNIQUE_POSITION
).IsValid());
1453 OrderedChildSet::const_iterator neighbour
= siblings
->find(predecessor
);
1454 DCHECK(neighbour
!= siblings
->end());
1457 if (neighbour
== siblings
->end()) {
1458 // Inserting at the end of the list.
1459 UniquePosition pos
= UniquePosition::After(
1460 predecessor
->ref(UNIQUE_POSITION
),
1462 e
->put(UNIQUE_POSITION
, pos
);
1466 EntryKernel
* successor
= *neighbour
;
1468 // Another mixed valid and invalid position case. This one could be supported
1469 // in theory, but we're trying to deprecate support for siblings with and
1470 // without valid positions. See TODO above.
1471 // Using a release CHECK here because the following UniquePosition::Between
1472 // call crashes anyway when the position string is empty (see crbug/332371).
1473 CHECK(successor
->ref(UNIQUE_POSITION
).IsValid()) << *successor
;
1475 // Finally, the normal case: inserting between two elements.
1476 UniquePosition pos
= UniquePosition::Between(
1477 predecessor
->ref(UNIQUE_POSITION
),
1478 successor
->ref(UNIQUE_POSITION
),
1480 e
->put(UNIQUE_POSITION
, pos
);
1484 // TODO(rlarocque): Avoid this indirection. Just return the set.
1485 void Directory::AppendChildHandles(const ScopedKernelLock
& lock
,
1486 const Id
& parent_id
,
1487 Directory::Metahandles
* result
) {
1488 const OrderedChildSet
* children
=
1489 kernel_
->parent_child_index
.GetChildren(parent_id
);
1493 for (OrderedChildSet::const_iterator i
= children
->begin();
1494 i
!= children
->end(); ++i
) {
1495 result
->push_back((*i
)->ref(META_HANDLE
));
1499 void Directory::UnmarkDirtyEntry(WriteTransaction
* trans
, Entry
* entry
) {
1501 entry
->kernel_
->clear_dirty(&kernel_
->dirty_metahandles
);
1504 void Directory::GetAttachmentIdsToUpload(BaseTransaction
* trans
,
1506 AttachmentIdList
* ids
) {
1507 // TODO(maniscalco): Maintain an index by ModelType and rewrite this method to
1508 // use it. The approach below is likely very expensive because it iterates
1509 // all entries (bug 415199).
1513 AttachmentIdSet on_server_id_set
;
1514 AttachmentIdSet not_on_server_id_set
;
1515 std::vector
<int64
> metahandles
;
1517 ScopedKernelLock
lock(this);
1518 GetMetaHandlesOfType(lock
, trans
, type
, &metahandles
);
1519 std::vector
<int64
>::const_iterator iter
= metahandles
.begin();
1520 const std::vector
<int64
>::const_iterator end
= metahandles
.end();
1521 // For all of this type's entries...
1522 for (; iter
!= end
; ++iter
) {
1523 EntryKernel
* entry
= GetEntryByHandle(lock
, *iter
);
1525 const sync_pb::AttachmentMetadata metadata
=
1526 entry
->ref(ATTACHMENT_METADATA
);
1527 // for each of this entry's attachments...
1528 for (int i
= 0; i
< metadata
.record_size(); ++i
) {
1530 AttachmentId::CreateFromProto(metadata
.record(i
).id());
1531 // if this attachment is known to be on the server, remember it for
1533 if (metadata
.record(i
).is_on_server()) {
1534 on_server_id_set
.insert(id
);
1536 // otherwise, add it to id_set.
1537 not_on_server_id_set
.insert(id
);
1542 // Why did we bother keeping a set of ids known to be on the server? The
1543 // is_on_server flag is stored denormalized so we can end up with two entries
1544 // with the same attachment id where one says it's on the server and the other
1545 // says it's not. When this happens, we trust the one that says it's on the
1546 // server. To avoid re-uploading the same attachment mulitple times, we
1547 // remove any ids known to be on the server from the id_set we are about to
1550 // TODO(maniscalco): Eliminate redundant metadata storage (bug 415203).
1551 std::set_difference(not_on_server_id_set
.begin(), not_on_server_id_set
.end(),
1552 on_server_id_set
.begin(), on_server_id_set
.end(),
1553 std::back_inserter(*ids
));
1556 void Directory::OnCatastrophicError() {
1557 UMA_HISTOGRAM_BOOLEAN("Sync.DirectoryCatastrophicError", true);
1558 ReadTransaction
trans(FROM_HERE
, this);
1559 OnUnrecoverableError(&trans
, FROM_HERE
,
1560 "Catastrophic error detected, Sync DB is unrecoverable");
1563 Directory::Kernel
* Directory::kernel() {
1567 const Directory::Kernel
* Directory::kernel() const {
1571 } // namespace syncable
1572 } // namespace syncer