1 // Copyright 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "sync/syncable/directory.h"
10 #include "base/base64.h"
11 #include "base/stl_util.h"
12 #include "base/strings/string_number_conversions.h"
13 #include "base/trace_event/trace_event.h"
14 #include "sync/internal_api/public/base/attachment_id_proto.h"
15 #include "sync/internal_api/public/base/unique_position.h"
16 #include "sync/internal_api/public/util/unrecoverable_error_handler.h"
17 #include "sync/syncable/entry.h"
18 #include "sync/syncable/entry_kernel.h"
19 #include "sync/syncable/in_memory_directory_backing_store.h"
20 #include "sync/syncable/on_disk_directory_backing_store.h"
21 #include "sync/syncable/scoped_kernel_lock.h"
22 #include "sync/syncable/scoped_parent_child_index_updater.h"
23 #include "sync/syncable/syncable-inl.h"
24 #include "sync/syncable/syncable_base_transaction.h"
25 #include "sync/syncable/syncable_changes_version.h"
26 #include "sync/syncable/syncable_read_transaction.h"
27 #include "sync/syncable/syncable_util.h"
28 #include "sync/syncable/syncable_write_transaction.h"
36 const base::FilePath::CharType
Directory::kSyncDatabaseFilename
[] =
37 FILE_PATH_LITERAL("SyncData.sqlite3");
39 Directory::PersistedKernelInfo::PersistedKernelInfo()
41 ModelTypeSet protocol_types
= ProtocolTypes();
42 for (ModelTypeSet::Iterator iter
= protocol_types
.First(); iter
.Good();
44 ResetDownloadProgress(iter
.Get());
45 transaction_version
[iter
.Get()] = 0;
49 Directory::PersistedKernelInfo::~PersistedKernelInfo() {}
51 void Directory::PersistedKernelInfo::ResetDownloadProgress(
52 ModelType model_type
) {
53 // Clear everything except the data type id field.
54 download_progress
[model_type
].Clear();
55 download_progress
[model_type
].set_data_type_id(
56 GetSpecificsFieldNumberFromModelType(model_type
));
58 // Explicitly set an empty token field to denote no progress.
59 download_progress
[model_type
].set_token("");
62 bool Directory::PersistedKernelInfo::HasEmptyDownloadProgress(
63 ModelType model_type
) {
64 const sync_pb::DataTypeProgressMarker
& progress_marker
=
65 download_progress
[model_type
];
66 return progress_marker
.token().empty();
69 Directory::SaveChangesSnapshot::SaveChangesSnapshot()
70 : kernel_info_status(KERNEL_SHARE_INFO_INVALID
) {
73 Directory::SaveChangesSnapshot::~SaveChangesSnapshot() {
74 STLDeleteElements(&dirty_metas
);
75 STLDeleteElements(&delete_journals
);
78 bool Directory::SaveChangesSnapshot::HasUnsavedMetahandleChanges() const {
79 return !dirty_metas
.empty() || !metahandles_to_purge
.empty() ||
80 !delete_journals
.empty() || !delete_journals_to_purge
.empty();
83 Directory::Kernel::Kernel(
84 const std::string
& name
,
85 const KernelLoadInfo
& info
,
86 DirectoryChangeDelegate
* delegate
,
87 const WeakHandle
<TransactionObserver
>& transaction_observer
)
88 : next_write_transaction_id(0),
90 info_status(Directory::KERNEL_SHARE_INFO_VALID
),
91 persisted_info(info
.kernel_info
),
92 cache_guid(info
.cache_guid
),
93 next_metahandle(info
.max_metahandle
+ 1),
95 transaction_observer(transaction_observer
) {
97 DCHECK(transaction_observer
.IsInitialized());
100 Directory::Kernel::~Kernel() {
101 STLDeleteContainerPairSecondPointers(metahandles_map
.begin(),
102 metahandles_map
.end());
105 Directory::Directory(
106 DirectoryBackingStore
* store
,
107 UnrecoverableErrorHandler
* unrecoverable_error_handler
,
108 ReportUnrecoverableErrorFunction report_unrecoverable_error_function
,
109 NigoriHandler
* nigori_handler
,
110 Cryptographer
* cryptographer
)
113 unrecoverable_error_handler_(unrecoverable_error_handler
),
114 report_unrecoverable_error_function_(
115 report_unrecoverable_error_function
),
116 unrecoverable_error_set_(false),
117 nigori_handler_(nigori_handler
),
118 cryptographer_(cryptographer
),
119 invariant_check_level_(VERIFY_CHANGES
) {
122 Directory::~Directory() {
126 DirOpenResult
Directory::Open(
128 DirectoryChangeDelegate
* delegate
,
129 const WeakHandle
<TransactionObserver
>& transaction_observer
) {
130 TRACE_EVENT0("sync", "SyncDatabaseOpen");
132 const DirOpenResult result
=
133 OpenImpl(name
, delegate
, transaction_observer
);
135 if (OPENED
!= result
)
140 void Directory::InitializeIndices(MetahandlesMap
* handles_map
) {
141 ScopedKernelLock
lock(this);
142 kernel_
->metahandles_map
.swap(*handles_map
);
143 for (MetahandlesMap::const_iterator it
= kernel_
->metahandles_map
.begin();
144 it
!= kernel_
->metahandles_map
.end(); ++it
) {
145 EntryKernel
* entry
= it
->second
;
146 if (ParentChildIndex::ShouldInclude(entry
))
147 kernel_
->parent_child_index
.Insert(entry
);
148 const int64 metahandle
= entry
->ref(META_HANDLE
);
149 if (entry
->ref(IS_UNSYNCED
))
150 kernel_
->unsynced_metahandles
.insert(metahandle
);
151 if (entry
->ref(IS_UNAPPLIED_UPDATE
)) {
152 const ModelType type
= entry
->GetServerModelType();
153 kernel_
->unapplied_update_metahandles
[type
].insert(metahandle
);
155 if (!entry
->ref(UNIQUE_SERVER_TAG
).empty()) {
156 DCHECK(kernel_
->server_tags_map
.find(entry
->ref(UNIQUE_SERVER_TAG
)) ==
157 kernel_
->server_tags_map
.end())
158 << "Unexpected duplicate use of client tag";
159 kernel_
->server_tags_map
[entry
->ref(UNIQUE_SERVER_TAG
)] = entry
;
161 if (!entry
->ref(UNIQUE_CLIENT_TAG
).empty()) {
162 DCHECK(kernel_
->server_tags_map
.find(entry
->ref(UNIQUE_SERVER_TAG
)) ==
163 kernel_
->server_tags_map
.end())
164 << "Unexpected duplicate use of server tag";
165 kernel_
->client_tags_map
[entry
->ref(UNIQUE_CLIENT_TAG
)] = entry
;
167 DCHECK(kernel_
->ids_map
.find(entry
->ref(ID
).value()) ==
168 kernel_
->ids_map
.end()) << "Unexpected duplicate use of ID";
169 kernel_
->ids_map
[entry
->ref(ID
).value()] = entry
;
170 DCHECK(!entry
->is_dirty());
171 AddToAttachmentIndex(lock
, metahandle
, entry
->ref(ATTACHMENT_METADATA
));
175 DirOpenResult
Directory::OpenImpl(
177 DirectoryChangeDelegate
* delegate
,
178 const WeakHandle
<TransactionObserver
>&
179 transaction_observer
) {
181 // Temporary indices before kernel_ initialized in case Load fails. We 0(1)
183 Directory::MetahandlesMap tmp_handles_map
;
185 // Avoids mem leaks on failure. Harmlessly deletes the empty hash map after
186 // the swap in the success case.
187 STLValueDeleter
<MetahandlesMap
> deleter(&tmp_handles_map
);
189 JournalIndex delete_journals
;
190 MetahandleSet metahandles_to_purge
;
192 DirOpenResult result
= store_
->Load(&tmp_handles_map
, &delete_journals
,
193 &metahandles_to_purge
, &info
);
194 if (OPENED
!= result
)
198 kernel_
= new Kernel(name
, info
, delegate
, transaction_observer
);
199 delete_journal_
.reset(new DeleteJournal(&delete_journals
));
200 InitializeIndices(&tmp_handles_map
);
202 // Write back the share info to reserve some space in 'next_id'. This will
203 // prevent local ID reuse in the case of an early crash. See the comments in
204 // TakeSnapshotForSaveChanges() or crbug.com/142987 for more information.
205 kernel_
->info_status
= KERNEL_SHARE_INFO_DIRTY
;
207 kernel_
->metahandles_to_purge
.swap(metahandles_to_purge
);
209 return FAILED_INITIAL_WRITE
;
214 DeleteJournal
* Directory::delete_journal() {
215 DCHECK(delete_journal_
.get());
216 return delete_journal_
.get();
219 void Directory::Close() {
227 void Directory::OnUnrecoverableError(const BaseTransaction
* trans
,
228 const tracked_objects::Location
& location
,
229 const std::string
& message
) {
230 DCHECK(trans
!= NULL
);
231 unrecoverable_error_set_
= true;
232 unrecoverable_error_handler_
->OnUnrecoverableError(location
,
236 EntryKernel
* Directory::GetEntryById(const Id
& id
) {
237 ScopedKernelLock
lock(this);
238 return GetEntryById(lock
, id
);
241 EntryKernel
* Directory::GetEntryById(const ScopedKernelLock
& lock
,
244 // Find it in the in memory ID index.
245 IdsMap::iterator id_found
= kernel_
->ids_map
.find(id
.value());
246 if (id_found
!= kernel_
->ids_map
.end()) {
247 return id_found
->second
;
252 EntryKernel
* Directory::GetEntryByClientTag(const string
& tag
) {
253 ScopedKernelLock
lock(this);
256 TagsMap::iterator it
= kernel_
->client_tags_map
.find(tag
);
257 if (it
!= kernel_
->client_tags_map
.end()) {
263 EntryKernel
* Directory::GetEntryByServerTag(const string
& tag
) {
264 ScopedKernelLock
lock(this);
266 TagsMap::iterator it
= kernel_
->server_tags_map
.find(tag
);
267 if (it
!= kernel_
->server_tags_map
.end()) {
273 EntryKernel
* Directory::GetEntryByHandle(int64 metahandle
) {
274 ScopedKernelLock
lock(this);
275 return GetEntryByHandle(lock
, metahandle
);
278 EntryKernel
* Directory::GetEntryByHandle(const ScopedKernelLock
& lock
,
281 MetahandlesMap::iterator found
=
282 kernel_
->metahandles_map
.find(metahandle
);
283 if (found
!= kernel_
->metahandles_map
.end()) {
284 // Found it in memory. Easy.
285 return found
->second
;
290 bool Directory::GetChildHandlesById(
291 BaseTransaction
* trans
, const Id
& parent_id
,
292 Directory::Metahandles
* result
) {
293 if (!SyncAssert(this == trans
->directory(), FROM_HERE
,
294 "Directories don't match", trans
))
298 ScopedKernelLock
lock(this);
299 AppendChildHandles(lock
, parent_id
, result
);
303 int Directory::GetTotalNodeCount(
304 BaseTransaction
* trans
,
305 EntryKernel
* kernel
) const {
306 if (!SyncAssert(this == trans
->directory(), FROM_HERE
,
307 "Directories don't match", trans
))
311 std::deque
<const OrderedChildSet
*> child_sets
;
313 GetChildSetForKernel(trans
, kernel
, &child_sets
);
314 while (!child_sets
.empty()) {
315 const OrderedChildSet
* set
= child_sets
.front();
316 child_sets
.pop_front();
317 for (OrderedChildSet::const_iterator it
= set
->begin();
318 it
!= set
->end(); ++it
) {
320 GetChildSetForKernel(trans
, *it
, &child_sets
);
327 void Directory::GetChildSetForKernel(
328 BaseTransaction
* trans
,
330 std::deque
<const OrderedChildSet
*>* child_sets
) const {
331 if (!kernel
->ref(IS_DIR
))
332 return; // Not a directory => no children.
334 const OrderedChildSet
* descendants
=
335 kernel_
->parent_child_index
.GetChildren(kernel
->ref(ID
));
337 return; // This directory has no children.
339 // Add our children to the list of items to be traversed.
340 child_sets
->push_back(descendants
);
343 int Directory::GetPositionIndex(
344 BaseTransaction
* trans
,
345 EntryKernel
* kernel
) const {
346 const OrderedChildSet
* siblings
=
347 kernel_
->parent_child_index
.GetSiblings(kernel
);
349 OrderedChildSet::const_iterator it
= siblings
->find(kernel
);
350 return std::distance(siblings
->begin(), it
);
353 bool Directory::InsertEntry(BaseWriteTransaction
* trans
, EntryKernel
* entry
) {
354 ScopedKernelLock
lock(this);
355 return InsertEntry(lock
, trans
, entry
);
358 bool Directory::InsertEntry(const ScopedKernelLock
& lock
,
359 BaseWriteTransaction
* trans
,
360 EntryKernel
* entry
) {
361 if (!SyncAssert(NULL
!= entry
, FROM_HERE
, "Entry is null", trans
))
364 static const char error
[] = "Entry already in memory index.";
367 kernel_
->metahandles_map
.insert(
368 std::make_pair(entry
->ref(META_HANDLE
), entry
)).second
,
375 kernel_
->ids_map
.insert(
376 std::make_pair(entry
->ref(ID
).value(), entry
)).second
,
382 if (ParentChildIndex::ShouldInclude(entry
)) {
383 if (!SyncAssert(kernel_
->parent_child_index
.Insert(entry
),
390 AddToAttachmentIndex(
391 lock
, entry
->ref(META_HANDLE
), entry
->ref(ATTACHMENT_METADATA
));
393 // Should NEVER be created with a client tag or server tag.
394 if (!SyncAssert(entry
->ref(UNIQUE_SERVER_TAG
).empty(), FROM_HERE
,
395 "Server tag should be empty", trans
)) {
398 if (!SyncAssert(entry
->ref(UNIQUE_CLIENT_TAG
).empty(), FROM_HERE
,
399 "Client tag should be empty", trans
))
405 bool Directory::ReindexId(BaseWriteTransaction
* trans
,
406 EntryKernel
* const entry
,
408 ScopedKernelLock
lock(this);
409 if (NULL
!= GetEntryById(lock
, new_id
))
413 // Update the indices that depend on the ID field.
414 ScopedParentChildIndexUpdater
updater_b(lock
, entry
,
415 &kernel_
->parent_child_index
);
416 size_t num_erased
= kernel_
->ids_map
.erase(entry
->ref(ID
).value());
417 DCHECK_EQ(1U, num_erased
);
418 entry
->put(ID
, new_id
);
419 kernel_
->ids_map
[entry
->ref(ID
).value()] = entry
;
424 bool Directory::ReindexParentId(BaseWriteTransaction
* trans
,
425 EntryKernel
* const entry
,
426 const Id
& new_parent_id
) {
427 ScopedKernelLock
lock(this);
430 // Update the indices that depend on the PARENT_ID field.
431 ScopedParentChildIndexUpdater
index_updater(lock
, entry
,
432 &kernel_
->parent_child_index
);
433 entry
->put(PARENT_ID
, new_parent_id
);
438 void Directory::RemoveFromAttachmentIndex(
439 const ScopedKernelLock
& lock
,
440 const int64 metahandle
,
441 const sync_pb::AttachmentMetadata
& attachment_metadata
) {
442 for (int i
= 0; i
< attachment_metadata
.record_size(); ++i
) {
443 AttachmentIdUniqueId unique_id
=
444 attachment_metadata
.record(i
).id().unique_id();
445 IndexByAttachmentId::iterator iter
=
446 kernel_
->index_by_attachment_id
.find(unique_id
);
447 if (iter
!= kernel_
->index_by_attachment_id
.end()) {
448 iter
->second
.erase(metahandle
);
449 if (iter
->second
.empty()) {
450 kernel_
->index_by_attachment_id
.erase(iter
);
456 void Directory::AddToAttachmentIndex(
457 const ScopedKernelLock
& lock
,
458 const int64 metahandle
,
459 const sync_pb::AttachmentMetadata
& attachment_metadata
) {
460 for (int i
= 0; i
< attachment_metadata
.record_size(); ++i
) {
461 AttachmentIdUniqueId unique_id
=
462 attachment_metadata
.record(i
).id().unique_id();
463 IndexByAttachmentId::iterator iter
=
464 kernel_
->index_by_attachment_id
.find(unique_id
);
465 if (iter
== kernel_
->index_by_attachment_id
.end()) {
466 iter
= kernel_
->index_by_attachment_id
.insert(std::make_pair(
468 MetahandleSet())).first
;
470 iter
->second
.insert(metahandle
);
474 void Directory::UpdateAttachmentIndex(
475 const int64 metahandle
,
476 const sync_pb::AttachmentMetadata
& old_metadata
,
477 const sync_pb::AttachmentMetadata
& new_metadata
) {
478 ScopedKernelLock
lock(this);
479 RemoveFromAttachmentIndex(lock
, metahandle
, old_metadata
);
480 AddToAttachmentIndex(lock
, metahandle
, new_metadata
);
483 void Directory::GetMetahandlesByAttachmentId(
484 BaseTransaction
* trans
,
485 const sync_pb::AttachmentIdProto
& attachment_id_proto
,
486 Metahandles
* result
) {
489 ScopedKernelLock
lock(this);
490 IndexByAttachmentId::const_iterator index_iter
=
491 kernel_
->index_by_attachment_id
.find(attachment_id_proto
.unique_id());
492 if (index_iter
== kernel_
->index_by_attachment_id
.end())
494 const MetahandleSet
& metahandle_set
= index_iter
->second
;
496 metahandle_set
.begin(), metahandle_set
.end(), back_inserter(*result
));
499 bool Directory::unrecoverable_error_set(const BaseTransaction
* trans
) const {
500 DCHECK(trans
!= NULL
);
501 return unrecoverable_error_set_
;
504 void Directory::ClearDirtyMetahandles(const ScopedKernelLock
& lock
) {
505 kernel_
->transaction_mutex
.AssertAcquired();
506 kernel_
->dirty_metahandles
.clear();
509 bool Directory::SafeToPurgeFromMemory(WriteTransaction
* trans
,
510 const EntryKernel
* const entry
) const {
511 bool safe
= entry
->ref(IS_DEL
) && !entry
->is_dirty() &&
512 !entry
->ref(SYNCING
) && !entry
->ref(IS_UNAPPLIED_UPDATE
) &&
513 !entry
->ref(IS_UNSYNCED
);
516 int64 handle
= entry
->ref(META_HANDLE
);
517 const ModelType type
= entry
->GetServerModelType();
518 if (!SyncAssert(kernel_
->dirty_metahandles
.count(handle
) == 0U,
520 "Dirty metahandles should be empty", trans
))
522 // TODO(tim): Bug 49278.
523 if (!SyncAssert(!kernel_
->unsynced_metahandles
.count(handle
),
525 "Unsynced handles should be empty",
528 if (!SyncAssert(!kernel_
->unapplied_update_metahandles
[type
].count(handle
),
530 "Unapplied metahandles should be empty",
538 void Directory::TakeSnapshotForSaveChanges(SaveChangesSnapshot
* snapshot
) {
539 ReadTransaction
trans(FROM_HERE
, this);
540 ScopedKernelLock
lock(this);
542 // If there is an unrecoverable error then just bail out.
543 if (unrecoverable_error_set(&trans
))
546 // Deep copy dirty entries from kernel_->metahandles_index into snapshot and
547 // clear dirty flags.
548 for (MetahandleSet::const_iterator i
= kernel_
->dirty_metahandles
.begin();
549 i
!= kernel_
->dirty_metahandles
.end(); ++i
) {
550 EntryKernel
* entry
= GetEntryByHandle(lock
, *i
);
553 // Skip over false positives; it happens relatively infrequently.
554 if (!entry
->is_dirty())
556 snapshot
->dirty_metas
.insert(snapshot
->dirty_metas
.end(),
557 new EntryKernel(*entry
));
558 DCHECK_EQ(1U, kernel_
->dirty_metahandles
.count(*i
));
559 // We don't bother removing from the index here as we blow the entire thing
560 // in a moment, and it unnecessarily complicates iteration.
561 entry
->clear_dirty(NULL
);
563 ClearDirtyMetahandles(lock
);
565 // Set purged handles.
566 DCHECK(snapshot
->metahandles_to_purge
.empty());
567 snapshot
->metahandles_to_purge
.swap(kernel_
->metahandles_to_purge
);
569 // Fill kernel_info_status and kernel_info.
570 snapshot
->kernel_info
= kernel_
->persisted_info
;
571 // To avoid duplicates when the process crashes, we record the next_id to be
572 // greater magnitude than could possibly be reached before the next save
573 // changes. In other words, it's effectively impossible for the user to
574 // generate 65536 new bookmarks in 3 seconds.
575 snapshot
->kernel_info
.next_id
-= 65536;
576 snapshot
->kernel_info_status
= kernel_
->info_status
;
577 // This one we reset on failure.
578 kernel_
->info_status
= KERNEL_SHARE_INFO_VALID
;
580 delete_journal_
->TakeSnapshotAndClear(
581 &trans
, &snapshot
->delete_journals
, &snapshot
->delete_journals_to_purge
);
584 bool Directory::SaveChanges() {
585 bool success
= false;
587 base::AutoLock
scoped_lock(kernel_
->save_changes_mutex
);
589 // Snapshot and save.
590 SaveChangesSnapshot snapshot
;
591 TakeSnapshotForSaveChanges(&snapshot
);
592 success
= store_
->SaveChanges(snapshot
);
594 // Handle success or failure.
596 success
= VacuumAfterSaveChanges(snapshot
);
598 HandleSaveChangesFailure(snapshot
);
602 bool Directory::VacuumAfterSaveChanges(const SaveChangesSnapshot
& snapshot
) {
603 if (snapshot
.dirty_metas
.empty())
606 // Need a write transaction as we are about to permanently purge entries.
607 WriteTransaction
trans(FROM_HERE
, VACUUM_AFTER_SAVE
, this);
608 ScopedKernelLock
lock(this);
609 // Now drop everything we can out of memory.
610 for (EntryKernelSet::const_iterator i
= snapshot
.dirty_metas
.begin();
611 i
!= snapshot
.dirty_metas
.end(); ++i
) {
612 MetahandlesMap::iterator found
=
613 kernel_
->metahandles_map
.find((*i
)->ref(META_HANDLE
));
614 EntryKernel
* entry
= (found
== kernel_
->metahandles_map
.end() ?
615 NULL
: found
->second
);
616 if (entry
&& SafeToPurgeFromMemory(&trans
, entry
)) {
617 // We now drop deleted metahandles that are up to date on both the client
619 size_t num_erased
= 0;
620 num_erased
= kernel_
->metahandles_map
.erase(entry
->ref(META_HANDLE
));
621 DCHECK_EQ(1u, num_erased
);
622 num_erased
= kernel_
->ids_map
.erase(entry
->ref(ID
).value());
623 DCHECK_EQ(1u, num_erased
);
624 if (!entry
->ref(UNIQUE_SERVER_TAG
).empty()) {
626 kernel_
->server_tags_map
.erase(entry
->ref(UNIQUE_SERVER_TAG
));
627 DCHECK_EQ(1u, num_erased
);
629 if (!entry
->ref(UNIQUE_CLIENT_TAG
).empty()) {
631 kernel_
->client_tags_map
.erase(entry
->ref(UNIQUE_CLIENT_TAG
));
632 DCHECK_EQ(1u, num_erased
);
634 if (!SyncAssert(!kernel_
->parent_child_index
.Contains(entry
),
636 "Deleted entry still present",
639 RemoveFromAttachmentIndex(
640 lock
, entry
->ref(META_HANDLE
), entry
->ref(ATTACHMENT_METADATA
));
644 if (trans
.unrecoverable_error_set())
650 void Directory::UnapplyEntry(EntryKernel
* entry
) {
651 int64 handle
= entry
->ref(META_HANDLE
);
652 ModelType server_type
= GetModelTypeFromSpecifics(
653 entry
->ref(SERVER_SPECIFICS
));
655 // Clear enough so that on the next sync cycle all local data will
657 // Note: do not modify the root node in order to preserve the
658 // initial sync ended bit for this type (else on the next restart
659 // this type will be treated as disabled and therefore fully purged).
660 if (IsRealDataType(server_type
) &&
661 ModelTypeToRootTag(server_type
) == entry
->ref(UNIQUE_SERVER_TAG
)) {
665 // Set the unapplied bit if this item has server data.
666 if (IsRealDataType(server_type
) && !entry
->ref(IS_UNAPPLIED_UPDATE
)) {
667 entry
->put(IS_UNAPPLIED_UPDATE
, true);
668 kernel_
->unapplied_update_metahandles
[server_type
].insert(handle
);
669 entry
->mark_dirty(&kernel_
->dirty_metahandles
);
672 // Unset the unsynced bit.
673 if (entry
->ref(IS_UNSYNCED
)) {
674 kernel_
->unsynced_metahandles
.erase(handle
);
675 entry
->put(IS_UNSYNCED
, false);
676 entry
->mark_dirty(&kernel_
->dirty_metahandles
);
679 // Mark the item as locally deleted. No deleted items are allowed in the
680 // parent child index.
681 if (!entry
->ref(IS_DEL
)) {
682 kernel_
->parent_child_index
.Remove(entry
);
683 entry
->put(IS_DEL
, true);
684 entry
->mark_dirty(&kernel_
->dirty_metahandles
);
687 // Set the version to the "newly created" version.
688 if (entry
->ref(BASE_VERSION
) != CHANGES_VERSION
) {
689 entry
->put(BASE_VERSION
, CHANGES_VERSION
);
690 entry
->mark_dirty(&kernel_
->dirty_metahandles
);
693 // At this point locally created items that aren't synced will become locally
694 // deleted items, and purged on the next snapshot. All other items will match
695 // the state they would have had if they were just created via a server
696 // update. See MutableEntry::MutableEntry(.., CreateNewUpdateItem, ..).
699 void Directory::DeleteEntry(const ScopedKernelLock
& lock
,
700 bool save_to_journal
,
702 EntryKernelSet
* entries_to_journal
) {
703 int64 handle
= entry
->ref(META_HANDLE
);
704 ModelType server_type
= GetModelTypeFromSpecifics(
705 entry
->ref(SERVER_SPECIFICS
));
707 kernel_
->metahandles_to_purge
.insert(handle
);
709 size_t num_erased
= 0;
710 num_erased
= kernel_
->metahandles_map
.erase(entry
->ref(META_HANDLE
));
711 DCHECK_EQ(1u, num_erased
);
712 num_erased
= kernel_
->ids_map
.erase(entry
->ref(ID
).value());
713 DCHECK_EQ(1u, num_erased
);
714 num_erased
= kernel_
->unsynced_metahandles
.erase(handle
);
715 DCHECK_EQ(entry
->ref(IS_UNSYNCED
), num_erased
> 0);
717 kernel_
->unapplied_update_metahandles
[server_type
].erase(handle
);
718 DCHECK_EQ(entry
->ref(IS_UNAPPLIED_UPDATE
), num_erased
> 0);
719 if (kernel_
->parent_child_index
.Contains(entry
))
720 kernel_
->parent_child_index
.Remove(entry
);
722 if (!entry
->ref(UNIQUE_CLIENT_TAG
).empty()) {
724 kernel_
->client_tags_map
.erase(entry
->ref(UNIQUE_CLIENT_TAG
));
725 DCHECK_EQ(1u, num_erased
);
727 if (!entry
->ref(UNIQUE_SERVER_TAG
).empty()) {
729 kernel_
->server_tags_map
.erase(entry
->ref(UNIQUE_SERVER_TAG
));
730 DCHECK_EQ(1u, num_erased
);
732 RemoveFromAttachmentIndex(lock
, handle
, entry
->ref(ATTACHMENT_METADATA
));
734 if (save_to_journal
) {
735 entries_to_journal
->insert(entry
);
741 bool Directory::PurgeEntriesWithTypeIn(ModelTypeSet disabled_types
,
742 ModelTypeSet types_to_journal
,
743 ModelTypeSet types_to_unapply
) {
744 disabled_types
.RemoveAll(ProxyTypes());
746 if (disabled_types
.Empty())
750 WriteTransaction
trans(FROM_HERE
, PURGE_ENTRIES
, this);
752 EntryKernelSet entries_to_journal
;
753 STLElementDeleter
<EntryKernelSet
> journal_deleter(&entries_to_journal
);
756 ScopedKernelLock
lock(this);
758 bool found_progress
= false;
759 for (ModelTypeSet::Iterator iter
= disabled_types
.First(); iter
.Good();
761 if (!kernel_
->persisted_info
.HasEmptyDownloadProgress(iter
.Get()))
762 found_progress
= true;
765 // If none of the disabled types have progress markers, there's nothing to
770 // We iterate in two passes to avoid a bug in STLport (which is used in
771 // the Android build). There are some versions of that library where a
772 // hash_map's iterators can be invalidated when an item is erased from the
774 // See http://sourceforge.net/p/stlport/bugs/239/.
776 std::set
<EntryKernel
*> to_purge
;
777 for (MetahandlesMap::iterator it
= kernel_
->metahandles_map
.begin();
778 it
!= kernel_
->metahandles_map
.end(); ++it
) {
779 const sync_pb::EntitySpecifics
& local_specifics
=
780 it
->second
->ref(SPECIFICS
);
781 const sync_pb::EntitySpecifics
& server_specifics
=
782 it
->second
->ref(SERVER_SPECIFICS
);
783 ModelType local_type
= GetModelTypeFromSpecifics(local_specifics
);
784 ModelType server_type
= GetModelTypeFromSpecifics(server_specifics
);
786 if ((IsRealDataType(local_type
) && disabled_types
.Has(local_type
)) ||
787 (IsRealDataType(server_type
) && disabled_types
.Has(server_type
))) {
788 to_purge
.insert(it
->second
);
792 for (std::set
<EntryKernel
*>::iterator it
= to_purge
.begin();
793 it
!= to_purge
.end(); ++it
) {
794 EntryKernel
* entry
= *it
;
796 const sync_pb::EntitySpecifics
& local_specifics
=
797 (*it
)->ref(SPECIFICS
);
798 const sync_pb::EntitySpecifics
& server_specifics
=
799 (*it
)->ref(SERVER_SPECIFICS
);
800 ModelType local_type
= GetModelTypeFromSpecifics(local_specifics
);
801 ModelType server_type
= GetModelTypeFromSpecifics(server_specifics
);
803 if (types_to_unapply
.Has(local_type
) ||
804 types_to_unapply
.Has(server_type
)) {
807 bool save_to_journal
=
808 (types_to_journal
.Has(local_type
) ||
809 types_to_journal
.Has(server_type
)) &&
810 (delete_journal_
->IsDeleteJournalEnabled(local_type
) ||
811 delete_journal_
->IsDeleteJournalEnabled(server_type
));
812 DeleteEntry(lock
, save_to_journal
, entry
, &entries_to_journal
);
816 delete_journal_
->AddJournalBatch(&trans
, entries_to_journal
);
818 // Ensure meta tracking for these data types reflects the purged state.
819 for (ModelTypeSet::Iterator it
= disabled_types
.First();
820 it
.Good(); it
.Inc()) {
821 kernel_
->persisted_info
.transaction_version
[it
.Get()] = 0;
823 // Don't discard progress markers or context for unapplied types.
824 if (!types_to_unapply
.Has(it
.Get())) {
825 kernel_
->persisted_info
.ResetDownloadProgress(it
.Get());
826 kernel_
->persisted_info
.datatype_context
[it
.Get()].Clear();
830 kernel_
->info_status
= KERNEL_SHARE_INFO_DIRTY
;
836 bool Directory::ResetVersionsForType(BaseWriteTransaction
* trans
,
838 if (!ProtocolTypes().Has(type
))
840 DCHECK_NE(type
, BOOKMARKS
) << "Only non-hierarchical types are supported";
842 EntryKernel
* type_root
= GetEntryByServerTag(ModelTypeToRootTag(type
));
846 ScopedKernelLock
lock(this);
847 const Id
& type_root_id
= type_root
->ref(ID
);
848 Directory::Metahandles children
;
849 AppendChildHandles(lock
, type_root_id
, &children
);
851 for (Metahandles::iterator it
= children
.begin(); it
!= children
.end();
853 EntryKernel
* entry
= GetEntryByHandle(lock
, *it
);
856 if (entry
->ref(BASE_VERSION
) > 1)
857 entry
->put(BASE_VERSION
, 1);
858 if (entry
->ref(SERVER_VERSION
) > 1)
859 entry
->put(SERVER_VERSION
, 1);
861 // Note that we do not unset IS_UNSYNCED or IS_UNAPPLIED_UPDATE in order
862 // to ensure no in-transit data is lost.
864 entry
->mark_dirty(&kernel_
->dirty_metahandles
);
870 bool Directory::IsAttachmentLinked(
871 const sync_pb::AttachmentIdProto
& attachment_id_proto
) const {
872 ScopedKernelLock
lock(this);
873 IndexByAttachmentId::const_iterator iter
=
874 kernel_
->index_by_attachment_id
.find(attachment_id_proto
.unique_id());
875 if (iter
!= kernel_
->index_by_attachment_id
.end() && !iter
->second
.empty()) {
881 void Directory::HandleSaveChangesFailure(const SaveChangesSnapshot
& snapshot
) {
882 WriteTransaction
trans(FROM_HERE
, HANDLE_SAVE_FAILURE
, this);
883 ScopedKernelLock
lock(this);
884 kernel_
->info_status
= KERNEL_SHARE_INFO_DIRTY
;
886 // Because we optimistically cleared the dirty bit on the real entries when
887 // taking the snapshot, we must restore it on failure. Not doing this could
888 // cause lost data, if no other changes are made to the in-memory entries
889 // that would cause the dirty bit to get set again. Setting the bit ensures
890 // that SaveChanges will at least try again later.
891 for (EntryKernelSet::const_iterator i
= snapshot
.dirty_metas
.begin();
892 i
!= snapshot
.dirty_metas
.end(); ++i
) {
893 MetahandlesMap::iterator found
=
894 kernel_
->metahandles_map
.find((*i
)->ref(META_HANDLE
));
895 if (found
!= kernel_
->metahandles_map
.end()) {
896 found
->second
->mark_dirty(&kernel_
->dirty_metahandles
);
900 kernel_
->metahandles_to_purge
.insert(snapshot
.metahandles_to_purge
.begin(),
901 snapshot
.metahandles_to_purge
.end());
903 // Restore delete journals.
904 delete_journal_
->AddJournalBatch(&trans
, snapshot
.delete_journals
);
905 delete_journal_
->PurgeDeleteJournals(&trans
,
906 snapshot
.delete_journals_to_purge
);
909 void Directory::GetDownloadProgress(
910 ModelType model_type
,
911 sync_pb::DataTypeProgressMarker
* value_out
) const {
912 ScopedKernelLock
lock(this);
913 return value_out
->CopyFrom(
914 kernel_
->persisted_info
.download_progress
[model_type
]);
917 void Directory::GetDownloadProgressAsString(
918 ModelType model_type
,
919 std::string
* value_out
) const {
920 ScopedKernelLock
lock(this);
921 kernel_
->persisted_info
.download_progress
[model_type
].SerializeToString(
925 size_t Directory::GetEntriesCount() const {
926 ScopedKernelLock
lock(this);
927 return kernel_
->metahandles_map
.size();
930 void Directory::SetDownloadProgress(
931 ModelType model_type
,
932 const sync_pb::DataTypeProgressMarker
& new_progress
) {
933 ScopedKernelLock
lock(this);
934 kernel_
->persisted_info
.download_progress
[model_type
].CopyFrom(new_progress
);
935 kernel_
->info_status
= KERNEL_SHARE_INFO_DIRTY
;
938 bool Directory::HasEmptyDownloadProgress(ModelType type
) const {
939 ScopedKernelLock
lock(this);
940 return kernel_
->persisted_info
.HasEmptyDownloadProgress(type
);
943 int64
Directory::GetTransactionVersion(ModelType type
) const {
944 kernel_
->transaction_mutex
.AssertAcquired();
945 return kernel_
->persisted_info
.transaction_version
[type
];
948 void Directory::IncrementTransactionVersion(ModelType type
) {
949 kernel_
->transaction_mutex
.AssertAcquired();
950 kernel_
->persisted_info
.transaction_version
[type
]++;
953 void Directory::GetDataTypeContext(BaseTransaction
* trans
,
955 sync_pb::DataTypeContext
* context
) const {
956 ScopedKernelLock
lock(this);
957 context
->CopyFrom(kernel_
->persisted_info
.datatype_context
[type
]);
960 void Directory::SetDataTypeContext(
961 BaseWriteTransaction
* trans
,
963 const sync_pb::DataTypeContext
& context
) {
964 ScopedKernelLock
lock(this);
965 kernel_
->persisted_info
.datatype_context
[type
].CopyFrom(context
);
966 kernel_
->info_status
= KERNEL_SHARE_INFO_DIRTY
;
969 // TODO(stanisc): crbug.com/438313: change these to not rely on the folders.
970 ModelTypeSet
Directory::InitialSyncEndedTypes() {
971 syncable::ReadTransaction
trans(FROM_HERE
, this);
972 ModelTypeSet protocol_types
= ProtocolTypes();
973 ModelTypeSet initial_sync_ended_types
;
974 for (ModelTypeSet::Iterator i
= protocol_types
.First(); i
.Good(); i
.Inc()) {
975 if (InitialSyncEndedForType(&trans
, i
.Get())) {
976 initial_sync_ended_types
.Put(i
.Get());
979 return initial_sync_ended_types
;
982 bool Directory::InitialSyncEndedForType(ModelType type
) {
983 syncable::ReadTransaction
trans(FROM_HERE
, this);
984 return InitialSyncEndedForType(&trans
, type
);
987 bool Directory::InitialSyncEndedForType(
988 BaseTransaction
* trans
, ModelType type
) {
989 // True iff the type's root node has been received and applied.
990 syncable::Entry
entry(trans
, syncable::GET_TYPE_ROOT
, type
);
991 return entry
.good() && entry
.GetBaseVersion() != CHANGES_VERSION
;
994 string
Directory::store_birthday() const {
995 ScopedKernelLock
lock(this);
996 return kernel_
->persisted_info
.store_birthday
;
999 void Directory::set_store_birthday(const string
& store_birthday
) {
1000 ScopedKernelLock
lock(this);
1001 if (kernel_
->persisted_info
.store_birthday
== store_birthday
)
1003 kernel_
->persisted_info
.store_birthday
= store_birthday
;
1004 kernel_
->info_status
= KERNEL_SHARE_INFO_DIRTY
;
1007 string
Directory::bag_of_chips() const {
1008 ScopedKernelLock
lock(this);
1009 return kernel_
->persisted_info
.bag_of_chips
;
1012 void Directory::set_bag_of_chips(const string
& bag_of_chips
) {
1013 ScopedKernelLock
lock(this);
1014 if (kernel_
->persisted_info
.bag_of_chips
== bag_of_chips
)
1016 kernel_
->persisted_info
.bag_of_chips
= bag_of_chips
;
1017 kernel_
->info_status
= KERNEL_SHARE_INFO_DIRTY
;
1021 string
Directory::cache_guid() const {
1022 // No need to lock since nothing ever writes to it after load.
1023 return kernel_
->cache_guid
;
1026 NigoriHandler
* Directory::GetNigoriHandler() {
1027 return nigori_handler_
;
1030 Cryptographer
* Directory::GetCryptographer(const BaseTransaction
* trans
) {
1031 DCHECK_EQ(this, trans
->directory());
1032 return cryptographer_
;
1035 void Directory::GetAllMetaHandles(BaseTransaction
* trans
,
1036 MetahandleSet
* result
) {
1038 ScopedKernelLock
lock(this);
1039 for (MetahandlesMap::iterator i
= kernel_
->metahandles_map
.begin();
1040 i
!= kernel_
->metahandles_map
.end(); ++i
) {
1041 result
->insert(i
->first
);
1045 void Directory::GetUnsyncedMetaHandles(BaseTransaction
* trans
,
1046 Metahandles
* result
) {
1048 ScopedKernelLock
lock(this);
1049 copy(kernel_
->unsynced_metahandles
.begin(),
1050 kernel_
->unsynced_metahandles
.end(), back_inserter(*result
));
1053 int64
Directory::unsynced_entity_count() const {
1054 ScopedKernelLock
lock(this);
1055 return kernel_
->unsynced_metahandles
.size();
1058 bool Directory::TypeHasUnappliedUpdates(ModelType type
) {
1059 ScopedKernelLock
lock(this);
1060 return !kernel_
->unapplied_update_metahandles
[type
].empty();
1063 void Directory::GetUnappliedUpdateMetaHandles(
1064 BaseTransaction
* trans
,
1065 FullModelTypeSet server_types
,
1066 std::vector
<int64
>* result
) {
1068 ScopedKernelLock
lock(this);
1069 for (int i
= UNSPECIFIED
; i
< MODEL_TYPE_COUNT
; ++i
) {
1070 const ModelType type
= ModelTypeFromInt(i
);
1071 if (server_types
.Has(type
)) {
1072 std::copy(kernel_
->unapplied_update_metahandles
[type
].begin(),
1073 kernel_
->unapplied_update_metahandles
[type
].end(),
1074 back_inserter(*result
));
1079 void Directory::GetMetaHandlesOfType(BaseTransaction
* trans
,
1081 std::vector
<int64
>* result
) {
1082 ScopedKernelLock
lock(this);
1083 GetMetaHandlesOfType(lock
, trans
, type
, result
);
1086 void Directory::GetMetaHandlesOfType(const ScopedKernelLock
& lock
,
1087 BaseTransaction
* trans
,
1089 std::vector
<int64
>* result
) {
1091 for (MetahandlesMap::iterator it
= kernel_
->metahandles_map
.begin();
1092 it
!= kernel_
->metahandles_map
.end(); ++it
) {
1093 EntryKernel
* entry
= it
->second
;
1094 const ModelType entry_type
=
1095 GetModelTypeFromSpecifics(entry
->ref(SPECIFICS
));
1096 if (entry_type
== type
)
1097 result
->push_back(it
->first
);
1101 void Directory::CollectMetaHandleCounts(
1102 std::vector
<int>* num_entries_by_type
,
1103 std::vector
<int>* num_to_delete_entries_by_type
) {
1104 syncable::ReadTransaction
trans(FROM_HERE
, this);
1105 ScopedKernelLock
lock(this);
1107 for (MetahandlesMap::iterator it
= kernel_
->metahandles_map
.begin();
1108 it
!= kernel_
->metahandles_map
.end(); ++it
) {
1109 EntryKernel
* entry
= it
->second
;
1110 const ModelType type
= GetModelTypeFromSpecifics(entry
->ref(SPECIFICS
));
1111 (*num_entries_by_type
)[type
]++;
1112 if (entry
->ref(IS_DEL
))
1113 (*num_to_delete_entries_by_type
)[type
]++;
1117 scoped_ptr
<base::ListValue
> Directory::GetNodeDetailsForType(
1118 BaseTransaction
* trans
,
1120 scoped_ptr
<base::ListValue
> nodes(new base::ListValue());
1122 ScopedKernelLock
lock(this);
1123 for (MetahandlesMap::iterator it
= kernel_
->metahandles_map
.begin();
1124 it
!= kernel_
->metahandles_map
.end(); ++it
) {
1125 if (GetModelTypeFromSpecifics(it
->second
->ref(SPECIFICS
)) != type
) {
1129 EntryKernel
* kernel
= it
->second
;
1130 scoped_ptr
<base::DictionaryValue
> node(
1131 kernel
->ToValue(GetCryptographer(trans
)));
1133 // Add the position index if appropriate. This must be done here (and not
1134 // in EntryKernel) because the EntryKernel does not have access to its
1136 if (kernel
->ShouldMaintainPosition() && !kernel
->ref(IS_DEL
)) {
1137 node
->SetInteger("positionIndex", GetPositionIndex(trans
, kernel
));
1140 nodes
->Append(node
.release());
1143 return nodes
.Pass();
1146 bool Directory::CheckInvariantsOnTransactionClose(
1147 syncable::BaseTransaction
* trans
,
1148 const MetahandleSet
& modified_handles
) {
1149 // NOTE: The trans may be in the process of being destructed. Be careful if
1150 // you wish to call any of its virtual methods.
1151 switch (invariant_check_level_
) {
1152 case FULL_DB_VERIFICATION
: {
1153 MetahandleSet all_handles
;
1154 GetAllMetaHandles(trans
, &all_handles
);
1155 return CheckTreeInvariants(trans
, all_handles
);
1157 case VERIFY_CHANGES
: {
1158 return CheckTreeInvariants(trans
, modified_handles
);
1168 bool Directory::FullyCheckTreeInvariants(syncable::BaseTransaction
* trans
) {
1169 MetahandleSet handles
;
1170 GetAllMetaHandles(trans
, &handles
);
1171 return CheckTreeInvariants(trans
, handles
);
1174 bool Directory::CheckTreeInvariants(syncable::BaseTransaction
* trans
,
1175 const MetahandleSet
& handles
) {
1176 MetahandleSet::const_iterator i
;
1177 for (i
= handles
.begin() ; i
!= handles
.end() ; ++i
) {
1178 int64 metahandle
= *i
;
1179 Entry
e(trans
, GET_BY_HANDLE
, metahandle
);
1180 if (!SyncAssert(e
.good(), FROM_HERE
, "Entry is bad", trans
))
1182 syncable::Id id
= e
.GetId();
1183 syncable::Id parentid
= e
.GetParentId();
1186 if (!SyncAssert(e
.GetIsDir(), FROM_HERE
,
1187 "Entry should be a directory",
1190 if (!SyncAssert(parentid
.IsRoot(), FROM_HERE
,
1191 "Entry should be root",
1194 if (!SyncAssert(!e
.GetIsUnsynced(), FROM_HERE
, "Entry should be synced",
1200 if (!e
.GetIsDel()) {
1201 if (!SyncAssert(id
!= parentid
, FROM_HERE
,
1202 "Id should be different from parent id.",
1205 if (!SyncAssert(!e
.GetNonUniqueName().empty(), FROM_HERE
,
1206 "Non unique name should not be empty.",
1210 if (!parentid
.IsNull()) {
1211 int safety_count
= handles
.size() + 1;
1212 while (!parentid
.IsRoot()) {
1213 Entry
parent(trans
, GET_BY_ID
, parentid
);
1214 if (!SyncAssert(parent
.good(), FROM_HERE
,
1215 "Parent entry is not valid.", trans
))
1217 if (handles
.end() == handles
.find(parent
.GetMetahandle()))
1218 break; // Skip further checking if parent was unmodified.
1219 if (!SyncAssert(parent
.GetIsDir(), FROM_HERE
,
1220 "Parent should be a directory", trans
))
1222 if (!SyncAssert(!parent
.GetIsDel(), FROM_HERE
,
1223 "Parent should not have been marked for deletion.",
1226 if (!SyncAssert(handles
.end() != handles
.find(parent
.GetMetahandle()),
1227 FROM_HERE
, "Parent should be in the index.", trans
))
1229 parentid
= parent
.GetParentId();
1230 if (!SyncAssert(--safety_count
> 0, FROM_HERE
,
1231 "Count should be greater than zero.", trans
))
1236 int64 base_version
= e
.GetBaseVersion();
1237 int64 server_version
= e
.GetServerVersion();
1238 bool using_unique_client_tag
= !e
.GetUniqueClientTag().empty();
1239 if (CHANGES_VERSION
== base_version
|| 0 == base_version
) {
1240 ModelType model_type
= e
.GetModelType();
1241 bool is_client_creatable_type_root_folder
=
1242 parentid
.IsRoot() &&
1243 IsTypeWithClientGeneratedRoot(model_type
) &&
1244 e
.GetUniqueServerTag() == ModelTypeToRootTag(model_type
);
1245 if (e
.GetIsUnappliedUpdate()) {
1246 // Must be a new item, or a de-duplicated unique client tag
1247 // that was created both locally and remotely, or a type root folder
1248 // that was created both locally and remotely.
1249 if (!(using_unique_client_tag
||
1250 is_client_creatable_type_root_folder
)) {
1251 if (!SyncAssert(e
.GetIsDel(), FROM_HERE
,
1252 "The entry should have been deleted.", trans
))
1255 // It came from the server, so it must have a server ID.
1256 if (!SyncAssert(id
.ServerKnows(), FROM_HERE
,
1257 "The id should be from a server.",
1262 // TODO(chron): Implement this mode if clients ever need it.
1263 // For now, you can't combine a client tag and a directory.
1264 if (!SyncAssert(!using_unique_client_tag
, FROM_HERE
,
1265 "Directory cannot have a client tag.",
1269 if (is_client_creatable_type_root_folder
) {
1270 // This must be a locally created type root folder.
1272 !e
.GetIsUnsynced(), FROM_HERE
,
1273 "Locally created type root folders should not be unsynced.",
1278 !e
.GetIsDel(), FROM_HERE
,
1279 "Locally created type root folders should not be deleted.",
1283 // Should be an uncomitted item, or a successfully deleted one.
1284 if (!e
.GetIsDel()) {
1285 if (!SyncAssert(e
.GetIsUnsynced(), FROM_HERE
,
1286 "The item should be unsynced.", trans
))
1290 // If the next check failed, it would imply that an item exists
1291 // on the server, isn't waiting for application locally, but either
1292 // is an unsynced create or a sucessful delete in the local copy.
1293 // Either way, that's a mismatch.
1294 if (!SyncAssert(0 == server_version
, FROM_HERE
,
1295 "Server version should be zero.",
1298 // Items that aren't using the unique client tag should have a zero
1299 // base version only if they have a local ID. Items with unique client
1300 // tags are allowed to use the zero base version for undeletion and
1301 // de-duplication; the unique client tag trumps the server ID.
1302 if (!using_unique_client_tag
) {
1303 if (!SyncAssert(!id
.ServerKnows(), FROM_HERE
,
1304 "Should be a client only id.",
1310 if (!SyncAssert(id
.ServerKnows(),
1312 "Should be a server id.",
1317 // Previously we would assert that locally deleted items that have never
1318 // been synced must not be sent to the server (IS_UNSYNCED must be false).
1319 // This is not always true in the case that an item is deleted while the
1320 // initial commit is in flight. See crbug.com/426865.
1325 void Directory::SetInvariantCheckLevel(InvariantCheckLevel check_level
) {
1326 invariant_check_level_
= check_level
;
1329 int64
Directory::NextMetahandle() {
1330 ScopedKernelLock
lock(this);
1331 int64 metahandle
= (kernel_
->next_metahandle
)++;
1335 // Always returns a client ID that is the string representation of a negative
1337 Id
Directory::NextId() {
1340 ScopedKernelLock
lock(this);
1341 result
= (kernel_
->persisted_info
.next_id
)--;
1342 kernel_
->info_status
= KERNEL_SHARE_INFO_DIRTY
;
1344 DCHECK_LT(result
, 0);
1345 return Id::CreateFromClientString(base::Int64ToString(result
));
1348 bool Directory::HasChildren(BaseTransaction
* trans
, const Id
& id
) {
1349 ScopedKernelLock
lock(this);
1350 return kernel_
->parent_child_index
.GetChildren(id
) != NULL
;
1353 Id
Directory::GetFirstChildId(BaseTransaction
* trans
,
1354 const EntryKernel
* parent
) {
1356 DCHECK(parent
->ref(IS_DIR
));
1358 ScopedKernelLock
lock(this);
1359 const OrderedChildSet
* children
=
1360 kernel_
->parent_child_index
.GetChildren(parent
->ref(ID
));
1362 // We're expected to return root if there are no children.
1366 return (*children
->begin())->ref(ID
);
1369 syncable::Id
Directory::GetPredecessorId(EntryKernel
* e
) {
1370 ScopedKernelLock
lock(this);
1372 DCHECK(ParentChildIndex::ShouldInclude(e
));
1373 const OrderedChildSet
* siblings
= kernel_
->parent_child_index
.GetSiblings(e
);
1374 OrderedChildSet::const_iterator i
= siblings
->find(e
);
1375 DCHECK(i
!= siblings
->end());
1377 if (i
== siblings
->begin()) {
1381 return (*i
)->ref(ID
);
1385 syncable::Id
Directory::GetSuccessorId(EntryKernel
* e
) {
1386 ScopedKernelLock
lock(this);
1388 DCHECK(ParentChildIndex::ShouldInclude(e
));
1389 const OrderedChildSet
* siblings
= kernel_
->parent_child_index
.GetSiblings(e
);
1390 OrderedChildSet::const_iterator i
= siblings
->find(e
);
1391 DCHECK(i
!= siblings
->end());
1394 if (i
== siblings
->end()) {
1397 return (*i
)->ref(ID
);
1401 // TODO(rlarocque): Remove all support for placing ShouldMaintainPosition()
1402 // items as siblings of items that do not maintain postions. It is required
1403 // only for tests. See crbug.com/178282.
1404 void Directory::PutPredecessor(EntryKernel
* e
, EntryKernel
* predecessor
) {
1405 DCHECK(!e
->ref(IS_DEL
));
1406 if (!e
->ShouldMaintainPosition()) {
1407 DCHECK(!e
->ref(UNIQUE_POSITION
).IsValid());
1410 std::string suffix
= e
->ref(UNIQUE_BOOKMARK_TAG
);
1411 DCHECK(!suffix
.empty());
1413 // Remove our item from the ParentChildIndex and remember to re-add it later.
1414 ScopedKernelLock
lock(this);
1415 ScopedParentChildIndexUpdater
updater(lock
, e
, &kernel_
->parent_child_index
);
1417 // Note: The ScopedParentChildIndexUpdater will update this set for us as we
1418 // leave this function.
1419 const OrderedChildSet
* siblings
=
1420 kernel_
->parent_child_index
.GetChildren(e
->ref(PARENT_ID
));
1423 // This parent currently has no other children.
1424 DCHECK(predecessor
== NULL
);
1425 UniquePosition pos
= UniquePosition::InitialPosition(suffix
);
1426 e
->put(UNIQUE_POSITION
, pos
);
1430 if (predecessor
== NULL
) {
1431 // We have at least one sibling, and we're inserting to the left of them.
1432 UniquePosition successor_pos
= (*siblings
->begin())->ref(UNIQUE_POSITION
);
1435 if (!successor_pos
.IsValid()) {
1436 // If all our successors are of non-positionable types, just create an
1437 // initial position. We arbitrarily choose to sort invalid positions to
1438 // the right of the valid positions.
1440 // We really shouldn't need to support this. See TODO above.
1441 pos
= UniquePosition::InitialPosition(suffix
);
1443 DCHECK(!siblings
->empty());
1444 pos
= UniquePosition::Before(successor_pos
, suffix
);
1447 e
->put(UNIQUE_POSITION
, pos
);
1451 // We can't support placing an item after an invalid position. Fortunately,
1452 // the tests don't exercise this particular case. We should not support
1453 // siblings with invalid positions at all. See TODO above.
1454 DCHECK(predecessor
->ref(UNIQUE_POSITION
).IsValid());
1456 OrderedChildSet::const_iterator neighbour
= siblings
->find(predecessor
);
1457 DCHECK(neighbour
!= siblings
->end());
1460 if (neighbour
== siblings
->end()) {
1461 // Inserting at the end of the list.
1462 UniquePosition pos
= UniquePosition::After(
1463 predecessor
->ref(UNIQUE_POSITION
),
1465 e
->put(UNIQUE_POSITION
, pos
);
1469 EntryKernel
* successor
= *neighbour
;
1471 // Another mixed valid and invalid position case. This one could be supported
1472 // in theory, but we're trying to deprecate support for siblings with and
1473 // without valid positions. See TODO above.
1474 DCHECK(successor
->ref(UNIQUE_POSITION
).IsValid());
1476 // Finally, the normal case: inserting between two elements.
1477 UniquePosition pos
= UniquePosition::Between(
1478 predecessor
->ref(UNIQUE_POSITION
),
1479 successor
->ref(UNIQUE_POSITION
),
1481 e
->put(UNIQUE_POSITION
, pos
);
1485 // TODO(rlarocque): Avoid this indirection. Just return the set.
1486 void Directory::AppendChildHandles(const ScopedKernelLock
& lock
,
1487 const Id
& parent_id
,
1488 Directory::Metahandles
* result
) {
1489 const OrderedChildSet
* children
=
1490 kernel_
->parent_child_index
.GetChildren(parent_id
);
1494 for (OrderedChildSet::const_iterator i
= children
->begin();
1495 i
!= children
->end(); ++i
) {
1496 result
->push_back((*i
)->ref(META_HANDLE
));
1500 void Directory::UnmarkDirtyEntry(WriteTransaction
* trans
, Entry
* entry
) {
1502 entry
->kernel_
->clear_dirty(&kernel_
->dirty_metahandles
);
1505 void Directory::GetAttachmentIdsToUpload(BaseTransaction
* trans
,
1507 AttachmentIdList
* ids
) {
1508 // TODO(maniscalco): Maintain an index by ModelType and rewrite this method to
1509 // use it. The approach below is likely very expensive because it iterates
1510 // all entries (bug 415199).
1514 AttachmentIdSet on_server_id_set
;
1515 AttachmentIdSet not_on_server_id_set
;
1516 std::vector
<int64
> metahandles
;
1518 ScopedKernelLock
lock(this);
1519 GetMetaHandlesOfType(lock
, trans
, type
, &metahandles
);
1520 std::vector
<int64
>::const_iterator iter
= metahandles
.begin();
1521 const std::vector
<int64
>::const_iterator end
= metahandles
.end();
1522 // For all of this type's entries...
1523 for (; iter
!= end
; ++iter
) {
1524 EntryKernel
* entry
= GetEntryByHandle(lock
, *iter
);
1526 const sync_pb::AttachmentMetadata metadata
=
1527 entry
->ref(ATTACHMENT_METADATA
);
1528 // for each of this entry's attachments...
1529 for (int i
= 0; i
< metadata
.record_size(); ++i
) {
1531 AttachmentId::CreateFromProto(metadata
.record(i
).id());
1532 // if this attachment is known to be on the server, remember it for
1534 if (metadata
.record(i
).is_on_server()) {
1535 on_server_id_set
.insert(id
);
1537 // otherwise, add it to id_set.
1538 not_on_server_id_set
.insert(id
);
1543 // Why did we bother keeping a set of ids known to be on the server? The
1544 // is_on_server flag is stored denormalized so we can end up with two entries
1545 // with the same attachment id where one says it's on the server and the other
1546 // says it's not. When this happens, we trust the one that says it's on the
1547 // server. To avoid re-uploading the same attachment mulitple times, we
1548 // remove any ids known to be on the server from the id_set we are about to
1551 // TODO(maniscalco): Eliminate redundant metadata storage (bug 415203).
1552 std::set_difference(not_on_server_id_set
.begin(), not_on_server_id_set
.end(),
1553 on_server_id_set
.begin(), on_server_id_set
.end(),
1554 std::back_inserter(*ids
));
1557 Directory::Kernel
* Directory::kernel() {
1561 const Directory::Kernel
* Directory::kernel() const {
1565 } // namespace syncable
1566 } // namespace syncer