1 // Copyright 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "sync/syncable/directory.h"
9 #include "base/base64.h"
10 #include "base/debug/trace_event.h"
11 #include "base/stl_util.h"
12 #include "base/strings/string_number_conversions.h"
13 #include "sync/internal_api/public/base/unique_position.h"
14 #include "sync/internal_api/public/util/unrecoverable_error_handler.h"
15 #include "sync/syncable/entry.h"
16 #include "sync/syncable/entry_kernel.h"
17 #include "sync/syncable/in_memory_directory_backing_store.h"
18 #include "sync/syncable/on_disk_directory_backing_store.h"
19 #include "sync/syncable/scoped_kernel_lock.h"
20 #include "sync/syncable/scoped_parent_child_index_updater.h"
21 #include "sync/syncable/syncable-inl.h"
22 #include "sync/syncable/syncable_base_transaction.h"
23 #include "sync/syncable/syncable_changes_version.h"
24 #include "sync/syncable/syncable_read_transaction.h"
25 #include "sync/syncable/syncable_util.h"
26 #include "sync/syncable/syncable_write_transaction.h"
34 const base::FilePath::CharType
Directory::kSyncDatabaseFilename
[] =
35 FILE_PATH_LITERAL("SyncData.sqlite3");
37 Directory::PersistedKernelInfo::PersistedKernelInfo()
39 ModelTypeSet protocol_types
= ProtocolTypes();
40 for (ModelTypeSet::Iterator iter
= protocol_types
.First(); iter
.Good();
42 reset_download_progress(iter
.Get());
43 transaction_version
[iter
.Get()] = 0;
47 Directory::PersistedKernelInfo::~PersistedKernelInfo() {}
49 void Directory::PersistedKernelInfo::reset_download_progress(
50 ModelType model_type
) {
51 download_progress
[model_type
].set_data_type_id(
52 GetSpecificsFieldNumberFromModelType(model_type
));
53 // An empty-string token indicates no prior knowledge.
54 download_progress
[model_type
].set_token(std::string());
57 Directory::SaveChangesSnapshot::SaveChangesSnapshot()
58 : kernel_info_status(KERNEL_SHARE_INFO_INVALID
) {
61 Directory::SaveChangesSnapshot::~SaveChangesSnapshot() {
62 STLDeleteElements(&dirty_metas
);
63 STLDeleteElements(&delete_journals
);
66 Directory::Kernel::Kernel(
67 const std::string
& name
,
68 const KernelLoadInfo
& info
, DirectoryChangeDelegate
* delegate
,
69 const WeakHandle
<TransactionObserver
>& transaction_observer
)
70 : next_write_transaction_id(0),
72 info_status(Directory::KERNEL_SHARE_INFO_VALID
),
73 persisted_info(info
.kernel_info
),
74 cache_guid(info
.cache_guid
),
75 next_metahandle(info
.max_metahandle
+ 1),
77 transaction_observer(transaction_observer
) {
79 DCHECK(transaction_observer
.IsInitialized());
82 Directory::Kernel::~Kernel() {
83 STLDeleteContainerPairSecondPointers(metahandles_map
.begin(),
84 metahandles_map
.end());
88 DirectoryBackingStore
* store
,
89 UnrecoverableErrorHandler
* unrecoverable_error_handler
,
90 ReportUnrecoverableErrorFunction report_unrecoverable_error_function
,
91 NigoriHandler
* nigori_handler
,
92 Cryptographer
* cryptographer
)
95 unrecoverable_error_handler_(unrecoverable_error_handler
),
96 report_unrecoverable_error_function_(
97 report_unrecoverable_error_function
),
98 unrecoverable_error_set_(false),
99 nigori_handler_(nigori_handler
),
100 cryptographer_(cryptographer
),
101 invariant_check_level_(VERIFY_CHANGES
) {
104 Directory::~Directory() {
108 DirOpenResult
Directory::Open(
110 DirectoryChangeDelegate
* delegate
,
111 const WeakHandle
<TransactionObserver
>& transaction_observer
) {
112 TRACE_EVENT0("sync", "SyncDatabaseOpen");
114 const DirOpenResult result
=
115 OpenImpl(name
, delegate
, transaction_observer
);
117 if (OPENED
!= result
)
122 void Directory::InitializeIndices(MetahandlesMap
* handles_map
) {
123 kernel_
->metahandles_map
.swap(*handles_map
);
124 for (MetahandlesMap::const_iterator it
= kernel_
->metahandles_map
.begin();
125 it
!= kernel_
->metahandles_map
.end(); ++it
) {
126 EntryKernel
* entry
= it
->second
;
127 if (ParentChildIndex::ShouldInclude(entry
))
128 kernel_
->parent_child_index
.Insert(entry
);
129 const int64 metahandle
= entry
->ref(META_HANDLE
);
130 if (entry
->ref(IS_UNSYNCED
))
131 kernel_
->unsynced_metahandles
.insert(metahandle
);
132 if (entry
->ref(IS_UNAPPLIED_UPDATE
)) {
133 const ModelType type
= entry
->GetServerModelType();
134 kernel_
->unapplied_update_metahandles
[type
].insert(metahandle
);
136 if (!entry
->ref(UNIQUE_SERVER_TAG
).empty()) {
137 DCHECK(kernel_
->server_tags_map
.find(entry
->ref(UNIQUE_SERVER_TAG
)) ==
138 kernel_
->server_tags_map
.end())
139 << "Unexpected duplicate use of client tag";
140 kernel_
->server_tags_map
[entry
->ref(UNIQUE_SERVER_TAG
)] = entry
;
142 if (!entry
->ref(UNIQUE_CLIENT_TAG
).empty()) {
143 DCHECK(kernel_
->server_tags_map
.find(entry
->ref(UNIQUE_SERVER_TAG
)) ==
144 kernel_
->server_tags_map
.end())
145 << "Unexpected duplicate use of server tag";
146 kernel_
->client_tags_map
[entry
->ref(UNIQUE_CLIENT_TAG
)] = entry
;
148 DCHECK(kernel_
->ids_map
.find(entry
->ref(ID
).value()) ==
149 kernel_
->ids_map
.end()) << "Unexpected duplicate use of ID";
150 kernel_
->ids_map
[entry
->ref(ID
).value()] = entry
;
151 DCHECK(!entry
->is_dirty());
155 DirOpenResult
Directory::OpenImpl(
157 DirectoryChangeDelegate
* delegate
,
158 const WeakHandle
<TransactionObserver
>&
159 transaction_observer
) {
161 // Temporary indices before kernel_ initialized in case Load fails. We 0(1)
163 Directory::MetahandlesMap tmp_handles_map
;
164 JournalIndex delete_journals
;
166 DirOpenResult result
=
167 store_
->Load(&tmp_handles_map
, &delete_journals
, &info
);
168 if (OPENED
!= result
)
171 kernel_
= new Kernel(name
, info
, delegate
, transaction_observer
);
172 delete_journal_
.reset(new DeleteJournal(&delete_journals
));
173 InitializeIndices(&tmp_handles_map
);
175 // Write back the share info to reserve some space in 'next_id'. This will
176 // prevent local ID reuse in the case of an early crash. See the comments in
177 // TakeSnapshotForSaveChanges() or crbug.com/142987 for more information.
178 kernel_
->info_status
= KERNEL_SHARE_INFO_DIRTY
;
180 return FAILED_INITIAL_WRITE
;
185 DeleteJournal
* Directory::delete_journal() {
186 DCHECK(delete_journal_
.get());
187 return delete_journal_
.get();
190 void Directory::Close() {
198 void Directory::OnUnrecoverableError(const BaseTransaction
* trans
,
199 const tracked_objects::Location
& location
,
200 const std::string
& message
) {
201 DCHECK(trans
!= NULL
);
202 unrecoverable_error_set_
= true;
203 unrecoverable_error_handler_
->OnUnrecoverableError(location
,
207 EntryKernel
* Directory::GetEntryById(const Id
& id
) {
208 ScopedKernelLock
lock(this);
209 return GetEntryById(id
, &lock
);
212 EntryKernel
* Directory::GetEntryById(const Id
& id
,
213 ScopedKernelLock
* const lock
) {
215 // Find it in the in memory ID index.
216 IdsMap::iterator id_found
= kernel_
->ids_map
.find(id
.value());
217 if (id_found
!= kernel_
->ids_map
.end()) {
218 return id_found
->second
;
223 EntryKernel
* Directory::GetEntryByClientTag(const string
& tag
) {
224 ScopedKernelLock
lock(this);
227 TagsMap::iterator it
= kernel_
->client_tags_map
.find(tag
);
228 if (it
!= kernel_
->client_tags_map
.end()) {
234 EntryKernel
* Directory::GetEntryByServerTag(const string
& tag
) {
235 ScopedKernelLock
lock(this);
237 TagsMap::iterator it
= kernel_
->server_tags_map
.find(tag
);
238 if (it
!= kernel_
->server_tags_map
.end()) {
244 EntryKernel
* Directory::GetEntryByHandle(int64 metahandle
) {
245 ScopedKernelLock
lock(this);
246 return GetEntryByHandle(metahandle
, &lock
);
249 EntryKernel
* Directory::GetEntryByHandle(int64 metahandle
,
250 ScopedKernelLock
* lock
) {
252 MetahandlesMap::iterator found
=
253 kernel_
->metahandles_map
.find(metahandle
);
254 if (found
!= kernel_
->metahandles_map
.end()) {
255 // Found it in memory. Easy.
256 return found
->second
;
261 bool Directory::GetChildHandlesById(
262 BaseTransaction
* trans
, const Id
& parent_id
,
263 Directory::Metahandles
* result
) {
264 if (!SyncAssert(this == trans
->directory(), FROM_HERE
,
265 "Directories don't match", trans
))
269 ScopedKernelLock
lock(this);
270 AppendChildHandles(lock
, parent_id
, result
);
274 bool Directory::GetChildHandlesByHandle(
275 BaseTransaction
* trans
, int64 handle
,
276 Directory::Metahandles
* result
) {
277 if (!SyncAssert(this == trans
->directory(), FROM_HERE
,
278 "Directories don't match", trans
))
283 ScopedKernelLock
lock(this);
284 EntryKernel
* kernel
= GetEntryByHandle(handle
, &lock
);
288 AppendChildHandles(lock
, kernel
->ref(ID
), result
);
292 int Directory::GetTotalNodeCount(
293 BaseTransaction
* trans
,
294 EntryKernel
* kernel
) const {
295 if (!SyncAssert(this == trans
->directory(), FROM_HERE
,
296 "Directories don't match", trans
))
300 std::deque
<const OrderedChildSet
*> child_sets
;
302 GetChildSetForKernel(trans
, kernel
, &child_sets
);
303 while (!child_sets
.empty()) {
304 const OrderedChildSet
* set
= child_sets
.front();
305 child_sets
.pop_front();
306 for (OrderedChildSet::const_iterator it
= set
->begin();
307 it
!= set
->end(); ++it
) {
309 GetChildSetForKernel(trans
, *it
, &child_sets
);
316 void Directory::GetChildSetForKernel(
317 BaseTransaction
* trans
,
319 std::deque
<const OrderedChildSet
*>* child_sets
) const {
320 if (!kernel
->ref(IS_DIR
))
321 return; // Not a directory => no children.
323 const OrderedChildSet
* descendants
=
324 kernel_
->parent_child_index
.GetChildren(kernel
->ref(ID
));
326 return; // This directory has no children.
328 // Add our children to the list of items to be traversed.
329 child_sets
->push_back(descendants
);
332 int Directory::GetPositionIndex(
333 BaseTransaction
* trans
,
334 EntryKernel
* kernel
) const {
335 const OrderedChildSet
* siblings
=
336 kernel_
->parent_child_index
.GetChildren(kernel
->ref(PARENT_ID
));
338 OrderedChildSet::const_iterator it
= siblings
->find(kernel
);
339 return std::distance(siblings
->begin(), it
);
342 EntryKernel
* Directory::GetRootEntry() {
343 return GetEntryById(Id());
346 bool Directory::InsertEntry(BaseWriteTransaction
* trans
, EntryKernel
* entry
) {
347 ScopedKernelLock
lock(this);
348 return InsertEntry(trans
, entry
, &lock
);
351 bool Directory::InsertEntry(BaseWriteTransaction
* trans
,
353 ScopedKernelLock
* lock
) {
354 DCHECK(NULL
!= lock
);
355 if (!SyncAssert(NULL
!= entry
, FROM_HERE
, "Entry is null", trans
))
358 static const char error
[] = "Entry already in memory index.";
361 kernel_
->metahandles_map
.insert(
362 std::make_pair(entry
->ref(META_HANDLE
), entry
)).second
,
369 kernel_
->ids_map
.insert(
370 std::make_pair(entry
->ref(ID
).value(), entry
)).second
,
376 if (ParentChildIndex::ShouldInclude(entry
)) {
377 if (!SyncAssert(kernel_
->parent_child_index
.Insert(entry
),
385 // Should NEVER be created with a client tag or server tag.
386 if (!SyncAssert(entry
->ref(UNIQUE_SERVER_TAG
).empty(), FROM_HERE
,
387 "Server tag should be empty", trans
)) {
390 if (!SyncAssert(entry
->ref(UNIQUE_CLIENT_TAG
).empty(), FROM_HERE
,
391 "Client tag should be empty", trans
))
397 bool Directory::ReindexId(BaseWriteTransaction
* trans
,
398 EntryKernel
* const entry
,
400 ScopedKernelLock
lock(this);
401 if (NULL
!= GetEntryById(new_id
, &lock
))
405 // Update the indices that depend on the ID field.
406 ScopedParentChildIndexUpdater
updater_b(lock
, entry
,
407 &kernel_
->parent_child_index
);
408 size_t num_erased
= kernel_
->ids_map
.erase(entry
->ref(ID
).value());
409 DCHECK_EQ(1U, num_erased
);
410 entry
->put(ID
, new_id
);
411 kernel_
->ids_map
[entry
->ref(ID
).value()] = entry
;
416 bool Directory::ReindexParentId(BaseWriteTransaction
* trans
,
417 EntryKernel
* const entry
,
418 const Id
& new_parent_id
) {
419 ScopedKernelLock
lock(this);
422 // Update the indices that depend on the PARENT_ID field.
423 ScopedParentChildIndexUpdater
index_updater(lock
, entry
,
424 &kernel_
->parent_child_index
);
425 entry
->put(PARENT_ID
, new_parent_id
);
430 bool Directory::unrecoverable_error_set(const BaseTransaction
* trans
) const {
431 DCHECK(trans
!= NULL
);
432 return unrecoverable_error_set_
;
435 void Directory::ClearDirtyMetahandles() {
436 kernel_
->transaction_mutex
.AssertAcquired();
437 kernel_
->dirty_metahandles
.clear();
440 bool Directory::SafeToPurgeFromMemory(WriteTransaction
* trans
,
441 const EntryKernel
* const entry
) const {
442 bool safe
= entry
->ref(IS_DEL
) && !entry
->is_dirty() &&
443 !entry
->ref(SYNCING
) && !entry
->ref(IS_UNAPPLIED_UPDATE
) &&
444 !entry
->ref(IS_UNSYNCED
);
447 int64 handle
= entry
->ref(META_HANDLE
);
448 const ModelType type
= entry
->GetServerModelType();
449 if (!SyncAssert(kernel_
->dirty_metahandles
.count(handle
) == 0U,
451 "Dirty metahandles should be empty", trans
))
453 // TODO(tim): Bug 49278.
454 if (!SyncAssert(!kernel_
->unsynced_metahandles
.count(handle
),
456 "Unsynced handles should be empty",
459 if (!SyncAssert(!kernel_
->unapplied_update_metahandles
[type
].count(handle
),
461 "Unapplied metahandles should be empty",
469 void Directory::TakeSnapshotForSaveChanges(SaveChangesSnapshot
* snapshot
) {
470 ReadTransaction
trans(FROM_HERE
, this);
471 ScopedKernelLock
lock(this);
473 // If there is an unrecoverable error then just bail out.
474 if (unrecoverable_error_set(&trans
))
477 // Deep copy dirty entries from kernel_->metahandles_index into snapshot and
478 // clear dirty flags.
479 for (MetahandleSet::const_iterator i
= kernel_
->dirty_metahandles
.begin();
480 i
!= kernel_
->dirty_metahandles
.end(); ++i
) {
481 EntryKernel
* entry
= GetEntryByHandle(*i
, &lock
);
484 // Skip over false positives; it happens relatively infrequently.
485 if (!entry
->is_dirty())
487 snapshot
->dirty_metas
.insert(snapshot
->dirty_metas
.end(),
488 new EntryKernel(*entry
));
489 DCHECK_EQ(1U, kernel_
->dirty_metahandles
.count(*i
));
490 // We don't bother removing from the index here as we blow the entire thing
491 // in a moment, and it unnecessarily complicates iteration.
492 entry
->clear_dirty(NULL
);
494 ClearDirtyMetahandles();
496 // Set purged handles.
497 DCHECK(snapshot
->metahandles_to_purge
.empty());
498 snapshot
->metahandles_to_purge
.swap(kernel_
->metahandles_to_purge
);
500 // Fill kernel_info_status and kernel_info.
501 snapshot
->kernel_info
= kernel_
->persisted_info
;
502 // To avoid duplicates when the process crashes, we record the next_id to be
503 // greater magnitude than could possibly be reached before the next save
504 // changes. In other words, it's effectively impossible for the user to
505 // generate 65536 new bookmarks in 3 seconds.
506 snapshot
->kernel_info
.next_id
-= 65536;
507 snapshot
->kernel_info_status
= kernel_
->info_status
;
508 // This one we reset on failure.
509 kernel_
->info_status
= KERNEL_SHARE_INFO_VALID
;
511 delete_journal_
->TakeSnapshotAndClear(
512 &trans
, &snapshot
->delete_journals
, &snapshot
->delete_journals_to_purge
);
515 bool Directory::SaveChanges() {
516 bool success
= false;
518 base::AutoLock
scoped_lock(kernel_
->save_changes_mutex
);
520 // Snapshot and save.
521 SaveChangesSnapshot snapshot
;
522 TakeSnapshotForSaveChanges(&snapshot
);
523 success
= store_
->SaveChanges(snapshot
);
525 // Handle success or failure.
527 success
= VacuumAfterSaveChanges(snapshot
);
529 HandleSaveChangesFailure(snapshot
);
533 bool Directory::VacuumAfterSaveChanges(const SaveChangesSnapshot
& snapshot
) {
534 if (snapshot
.dirty_metas
.empty())
537 // Need a write transaction as we are about to permanently purge entries.
538 WriteTransaction
trans(FROM_HERE
, VACUUM_AFTER_SAVE
, this);
539 ScopedKernelLock
lock(this);
540 // Now drop everything we can out of memory.
541 for (EntryKernelSet::const_iterator i
= snapshot
.dirty_metas
.begin();
542 i
!= snapshot
.dirty_metas
.end(); ++i
) {
543 MetahandlesMap::iterator found
=
544 kernel_
->metahandles_map
.find((*i
)->ref(META_HANDLE
));
545 EntryKernel
* entry
= (found
== kernel_
->metahandles_map
.end() ?
546 NULL
: found
->second
);
547 if (entry
&& SafeToPurgeFromMemory(&trans
, entry
)) {
548 // We now drop deleted metahandles that are up to date on both the client
550 size_t num_erased
= 0;
551 num_erased
= kernel_
->metahandles_map
.erase(entry
->ref(META_HANDLE
));
552 DCHECK_EQ(1u, num_erased
);
553 num_erased
= kernel_
->ids_map
.erase(entry
->ref(ID
).value());
554 DCHECK_EQ(1u, num_erased
);
555 if (!entry
->ref(UNIQUE_SERVER_TAG
).empty()) {
557 kernel_
->server_tags_map
.erase(entry
->ref(UNIQUE_SERVER_TAG
));
558 DCHECK_EQ(1u, num_erased
);
560 if (!entry
->ref(UNIQUE_CLIENT_TAG
).empty()) {
562 kernel_
->client_tags_map
.erase(entry
->ref(UNIQUE_CLIENT_TAG
));
563 DCHECK_EQ(1u, num_erased
);
565 if (!SyncAssert(!kernel_
->parent_child_index
.Contains(entry
),
567 "Deleted entry still present",
572 if (trans
.unrecoverable_error_set())
578 void Directory::UnapplyEntry(EntryKernel
* entry
) {
579 int64 handle
= entry
->ref(META_HANDLE
);
580 ModelType server_type
= GetModelTypeFromSpecifics(
581 entry
->ref(SERVER_SPECIFICS
));
583 // Clear enough so that on the next sync cycle all local data will
585 // Note: do not modify the root node in order to preserve the
586 // initial sync ended bit for this type (else on the next restart
587 // this type will be treated as disabled and therefore fully purged).
588 if (IsRealDataType(server_type
) &&
589 ModelTypeToRootTag(server_type
) == entry
->ref(UNIQUE_SERVER_TAG
)) {
593 // Set the unapplied bit if this item has server data.
594 if (IsRealDataType(server_type
) && !entry
->ref(IS_UNAPPLIED_UPDATE
)) {
595 entry
->put(IS_UNAPPLIED_UPDATE
, true);
596 kernel_
->unapplied_update_metahandles
[server_type
].insert(handle
);
597 entry
->mark_dirty(&kernel_
->dirty_metahandles
);
600 // Unset the unsynced bit.
601 if (entry
->ref(IS_UNSYNCED
)) {
602 kernel_
->unsynced_metahandles
.erase(handle
);
603 entry
->put(IS_UNSYNCED
, false);
604 entry
->mark_dirty(&kernel_
->dirty_metahandles
);
607 // Mark the item as locally deleted. No deleted items are allowed in the
608 // parent child index.
609 if (!entry
->ref(IS_DEL
)) {
610 kernel_
->parent_child_index
.Remove(entry
);
611 entry
->put(IS_DEL
, true);
612 entry
->mark_dirty(&kernel_
->dirty_metahandles
);
615 // Set the version to the "newly created" version.
616 if (entry
->ref(BASE_VERSION
) != CHANGES_VERSION
) {
617 entry
->put(BASE_VERSION
, CHANGES_VERSION
);
618 entry
->mark_dirty(&kernel_
->dirty_metahandles
);
621 // At this point locally created items that aren't synced will become locally
622 // deleted items, and purged on the next snapshot. All other items will match
623 // the state they would have had if they were just created via a server
624 // update. See MutableEntry::MutableEntry(.., CreateNewUpdateItem, ..).
627 void Directory::DeleteEntry(bool save_to_journal
,
629 EntryKernelSet
* entries_to_journal
) {
630 int64 handle
= entry
->ref(META_HANDLE
);
631 ModelType server_type
= GetModelTypeFromSpecifics(
632 entry
->ref(SERVER_SPECIFICS
));
634 kernel_
->metahandles_to_purge
.insert(handle
);
636 size_t num_erased
= 0;
637 num_erased
= kernel_
->metahandles_map
.erase(entry
->ref(META_HANDLE
));
638 DCHECK_EQ(1u, num_erased
);
639 num_erased
= kernel_
->ids_map
.erase(entry
->ref(ID
).value());
640 DCHECK_EQ(1u, num_erased
);
641 num_erased
= kernel_
->unsynced_metahandles
.erase(handle
);
642 DCHECK_EQ(entry
->ref(IS_UNSYNCED
), num_erased
> 0);
644 kernel_
->unapplied_update_metahandles
[server_type
].erase(handle
);
645 DCHECK_EQ(entry
->ref(IS_UNAPPLIED_UPDATE
), num_erased
> 0);
646 if (kernel_
->parent_child_index
.Contains(entry
))
647 kernel_
->parent_child_index
.Remove(entry
);
649 if (!entry
->ref(UNIQUE_CLIENT_TAG
).empty()) {
651 kernel_
->client_tags_map
.erase(entry
->ref(UNIQUE_CLIENT_TAG
));
652 DCHECK_EQ(1u, num_erased
);
654 if (!entry
->ref(UNIQUE_SERVER_TAG
).empty()) {
656 kernel_
->server_tags_map
.erase(entry
->ref(UNIQUE_SERVER_TAG
));
657 DCHECK_EQ(1u, num_erased
);
660 if (save_to_journal
) {
661 entries_to_journal
->insert(entry
);
667 bool Directory::PurgeEntriesWithTypeIn(ModelTypeSet disabled_types
,
668 ModelTypeSet types_to_journal
,
669 ModelTypeSet types_to_unapply
) {
670 disabled_types
.RemoveAll(ProxyTypes());
672 if (disabled_types
.Empty())
676 WriteTransaction
trans(FROM_HERE
, PURGE_ENTRIES
, this);
678 EntryKernelSet entries_to_journal
;
679 STLElementDeleter
<EntryKernelSet
> journal_deleter(&entries_to_journal
);
682 ScopedKernelLock
lock(this);
684 // We iterate in two passes to avoid a bug in STLport (which is used in
685 // the Android build). There are some versions of that library where a
686 // hash_map's iterators can be invalidated when an item is erased from the
688 // See http://sourceforge.net/p/stlport/bugs/239/.
690 std::set
<EntryKernel
*> to_purge
;
691 for (MetahandlesMap::iterator it
= kernel_
->metahandles_map
.begin();
692 it
!= kernel_
->metahandles_map
.end(); ++it
) {
693 const sync_pb::EntitySpecifics
& local_specifics
=
694 it
->second
->ref(SPECIFICS
);
695 const sync_pb::EntitySpecifics
& server_specifics
=
696 it
->second
->ref(SERVER_SPECIFICS
);
697 ModelType local_type
= GetModelTypeFromSpecifics(local_specifics
);
698 ModelType server_type
= GetModelTypeFromSpecifics(server_specifics
);
700 if ((IsRealDataType(local_type
) && disabled_types
.Has(local_type
)) ||
701 (IsRealDataType(server_type
) && disabled_types
.Has(server_type
))) {
702 to_purge
.insert(it
->second
);
706 for (std::set
<EntryKernel
*>::iterator it
= to_purge
.begin();
707 it
!= to_purge
.end(); ++it
) {
708 EntryKernel
* entry
= *it
;
710 const sync_pb::EntitySpecifics
& local_specifics
=
711 (*it
)->ref(SPECIFICS
);
712 const sync_pb::EntitySpecifics
& server_specifics
=
713 (*it
)->ref(SERVER_SPECIFICS
);
714 ModelType local_type
= GetModelTypeFromSpecifics(local_specifics
);
715 ModelType server_type
= GetModelTypeFromSpecifics(server_specifics
);
717 if (types_to_unapply
.Has(local_type
) ||
718 types_to_unapply
.Has(server_type
)) {
721 bool save_to_journal
=
722 (types_to_journal
.Has(local_type
) ||
723 types_to_journal
.Has(server_type
)) &&
724 (delete_journal_
->IsDeleteJournalEnabled(local_type
) ||
725 delete_journal_
->IsDeleteJournalEnabled(server_type
));
726 DeleteEntry(save_to_journal
, entry
, &entries_to_journal
);
730 delete_journal_
->AddJournalBatch(&trans
, entries_to_journal
);
732 // Ensure meta tracking for these data types reflects the purged state.
733 for (ModelTypeSet::Iterator it
= disabled_types
.First();
734 it
.Good(); it
.Inc()) {
735 kernel_
->persisted_info
.transaction_version
[it
.Get()] = 0;
737 // Don't discard progress markers for unapplied types.
738 if (!types_to_unapply
.Has(it
.Get()))
739 kernel_
->persisted_info
.reset_download_progress(it
.Get());
746 void Directory::HandleSaveChangesFailure(const SaveChangesSnapshot
& snapshot
) {
747 WriteTransaction
trans(FROM_HERE
, HANDLE_SAVE_FAILURE
, this);
748 ScopedKernelLock
lock(this);
749 kernel_
->info_status
= KERNEL_SHARE_INFO_DIRTY
;
751 // Because we optimistically cleared the dirty bit on the real entries when
752 // taking the snapshot, we must restore it on failure. Not doing this could
753 // cause lost data, if no other changes are made to the in-memory entries
754 // that would cause the dirty bit to get set again. Setting the bit ensures
755 // that SaveChanges will at least try again later.
756 for (EntryKernelSet::const_iterator i
= snapshot
.dirty_metas
.begin();
757 i
!= snapshot
.dirty_metas
.end(); ++i
) {
758 MetahandlesMap::iterator found
=
759 kernel_
->metahandles_map
.find((*i
)->ref(META_HANDLE
));
760 if (found
!= kernel_
->metahandles_map
.end()) {
761 found
->second
->mark_dirty(&kernel_
->dirty_metahandles
);
765 kernel_
->metahandles_to_purge
.insert(snapshot
.metahandles_to_purge
.begin(),
766 snapshot
.metahandles_to_purge
.end());
768 // Restore delete journals.
769 delete_journal_
->AddJournalBatch(&trans
, snapshot
.delete_journals
);
770 delete_journal_
->PurgeDeleteJournals(&trans
,
771 snapshot
.delete_journals_to_purge
);
774 void Directory::GetDownloadProgress(
775 ModelType model_type
,
776 sync_pb::DataTypeProgressMarker
* value_out
) const {
777 ScopedKernelLock
lock(this);
778 return value_out
->CopyFrom(
779 kernel_
->persisted_info
.download_progress
[model_type
]);
782 void Directory::GetDownloadProgressAsString(
783 ModelType model_type
,
784 std::string
* value_out
) const {
785 ScopedKernelLock
lock(this);
786 kernel_
->persisted_info
.download_progress
[model_type
].SerializeToString(
790 size_t Directory::GetEntriesCount() const {
791 ScopedKernelLock
lock(this);
792 return kernel_
->metahandles_map
.size();
795 void Directory::SetDownloadProgress(
796 ModelType model_type
,
797 const sync_pb::DataTypeProgressMarker
& new_progress
) {
798 ScopedKernelLock
lock(this);
799 kernel_
->persisted_info
.download_progress
[model_type
].CopyFrom(new_progress
);
800 kernel_
->info_status
= KERNEL_SHARE_INFO_DIRTY
;
803 int64
Directory::GetTransactionVersion(ModelType type
) const {
804 kernel_
->transaction_mutex
.AssertAcquired();
805 return kernel_
->persisted_info
.transaction_version
[type
];
808 void Directory::IncrementTransactionVersion(ModelType type
) {
809 kernel_
->transaction_mutex
.AssertAcquired();
810 kernel_
->persisted_info
.transaction_version
[type
]++;
813 ModelTypeSet
Directory::InitialSyncEndedTypes() {
814 syncable::ReadTransaction
trans(FROM_HERE
, this);
815 ModelTypeSet protocol_types
= ProtocolTypes();
816 ModelTypeSet initial_sync_ended_types
;
817 for (ModelTypeSet::Iterator i
= protocol_types
.First(); i
.Good(); i
.Inc()) {
818 if (InitialSyncEndedForType(&trans
, i
.Get())) {
819 initial_sync_ended_types
.Put(i
.Get());
822 return initial_sync_ended_types
;
825 bool Directory::InitialSyncEndedForType(ModelType type
) {
826 syncable::ReadTransaction
trans(FROM_HERE
, this);
827 return InitialSyncEndedForType(&trans
, type
);
830 bool Directory::InitialSyncEndedForType(
831 BaseTransaction
* trans
, ModelType type
) {
832 // True iff the type's root node has been received and applied.
833 syncable::Entry
entry(trans
,
834 syncable::GET_BY_SERVER_TAG
,
835 ModelTypeToRootTag(type
));
836 return entry
.good() && entry
.GetBaseVersion() != CHANGES_VERSION
;
839 string
Directory::store_birthday() const {
840 ScopedKernelLock
lock(this);
841 return kernel_
->persisted_info
.store_birthday
;
844 void Directory::set_store_birthday(const string
& store_birthday
) {
845 ScopedKernelLock
lock(this);
846 if (kernel_
->persisted_info
.store_birthday
== store_birthday
)
848 kernel_
->persisted_info
.store_birthday
= store_birthday
;
849 kernel_
->info_status
= KERNEL_SHARE_INFO_DIRTY
;
852 string
Directory::bag_of_chips() const {
853 ScopedKernelLock
lock(this);
854 return kernel_
->persisted_info
.bag_of_chips
;
857 void Directory::set_bag_of_chips(const string
& bag_of_chips
) {
858 ScopedKernelLock
lock(this);
859 if (kernel_
->persisted_info
.bag_of_chips
== bag_of_chips
)
861 kernel_
->persisted_info
.bag_of_chips
= bag_of_chips
;
862 kernel_
->info_status
= KERNEL_SHARE_INFO_DIRTY
;
866 string
Directory::cache_guid() const {
867 // No need to lock since nothing ever writes to it after load.
868 return kernel_
->cache_guid
;
871 NigoriHandler
* Directory::GetNigoriHandler() {
872 return nigori_handler_
;
875 Cryptographer
* Directory::GetCryptographer(const BaseTransaction
* trans
) {
876 DCHECK_EQ(this, trans
->directory());
877 return cryptographer_
;
880 void Directory::GetAllMetaHandles(BaseTransaction
* trans
,
881 MetahandleSet
* result
) {
883 ScopedKernelLock
lock(this);
884 for (MetahandlesMap::iterator i
= kernel_
->metahandles_map
.begin();
885 i
!= kernel_
->metahandles_map
.end(); ++i
) {
886 result
->insert(i
->first
);
890 void Directory::GetAllEntryKernels(BaseTransaction
* trans
,
891 std::vector
<const EntryKernel
*>* result
) {
893 ScopedKernelLock
lock(this);
894 for (MetahandlesMap::iterator i
= kernel_
->metahandles_map
.begin();
895 i
!= kernel_
->metahandles_map
.end(); ++i
) {
896 result
->push_back(i
->second
);
900 void Directory::GetUnsyncedMetaHandles(BaseTransaction
* trans
,
901 Metahandles
* result
) {
903 ScopedKernelLock
lock(this);
904 copy(kernel_
->unsynced_metahandles
.begin(),
905 kernel_
->unsynced_metahandles
.end(), back_inserter(*result
));
908 int64
Directory::unsynced_entity_count() const {
909 ScopedKernelLock
lock(this);
910 return kernel_
->unsynced_metahandles
.size();
913 FullModelTypeSet
Directory::GetServerTypesWithUnappliedUpdates(
914 BaseTransaction
* trans
) const {
915 FullModelTypeSet server_types
;
916 ScopedKernelLock
lock(this);
917 for (int i
= UNSPECIFIED
; i
< MODEL_TYPE_COUNT
; ++i
) {
918 const ModelType type
= ModelTypeFromInt(i
);
919 if (!kernel_
->unapplied_update_metahandles
[type
].empty()) {
920 server_types
.Put(type
);
926 void Directory::GetUnappliedUpdateMetaHandles(
927 BaseTransaction
* trans
,
928 FullModelTypeSet server_types
,
929 std::vector
<int64
>* result
) {
931 ScopedKernelLock
lock(this);
932 for (int i
= UNSPECIFIED
; i
< MODEL_TYPE_COUNT
; ++i
) {
933 const ModelType type
= ModelTypeFromInt(i
);
934 if (server_types
.Has(type
)) {
935 std::copy(kernel_
->unapplied_update_metahandles
[type
].begin(),
936 kernel_
->unapplied_update_metahandles
[type
].end(),
937 back_inserter(*result
));
942 void Directory::CollectMetaHandleCounts(
943 std::vector
<int>* num_entries_by_type
,
944 std::vector
<int>* num_to_delete_entries_by_type
) {
945 syncable::ReadTransaction
trans(FROM_HERE
, this);
946 ScopedKernelLock
lock(this);
948 for (MetahandlesMap::iterator it
= kernel_
->metahandles_map
.begin();
949 it
!= kernel_
->metahandles_map
.end(); ++it
) {
950 EntryKernel
* entry
= it
->second
;
951 const ModelType type
= GetModelTypeFromSpecifics(entry
->ref(SPECIFICS
));
952 (*num_entries_by_type
)[type
]++;
953 if (entry
->ref(IS_DEL
))
954 (*num_to_delete_entries_by_type
)[type
]++;
958 bool Directory::CheckInvariantsOnTransactionClose(
959 syncable::BaseTransaction
* trans
,
960 const MetahandleSet
& modified_handles
) {
961 // NOTE: The trans may be in the process of being destructed. Be careful if
962 // you wish to call any of its virtual methods.
963 switch (invariant_check_level_
) {
964 case FULL_DB_VERIFICATION
: {
965 MetahandleSet all_handles
;
966 GetAllMetaHandles(trans
, &all_handles
);
967 return CheckTreeInvariants(trans
, all_handles
);
969 case VERIFY_CHANGES
: {
970 return CheckTreeInvariants(trans
, modified_handles
);
980 bool Directory::FullyCheckTreeInvariants(syncable::BaseTransaction
* trans
) {
981 MetahandleSet handles
;
982 GetAllMetaHandles(trans
, &handles
);
983 return CheckTreeInvariants(trans
, handles
);
986 bool Directory::CheckTreeInvariants(syncable::BaseTransaction
* trans
,
987 const MetahandleSet
& handles
) {
988 MetahandleSet::const_iterator i
;
989 for (i
= handles
.begin() ; i
!= handles
.end() ; ++i
) {
990 int64 metahandle
= *i
;
991 Entry
e(trans
, GET_BY_HANDLE
, metahandle
);
992 if (!SyncAssert(e
.good(), FROM_HERE
, "Entry is bad", trans
))
994 syncable::Id id
= e
.GetId();
995 syncable::Id parentid
= e
.GetParentId();
998 if (!SyncAssert(e
.GetIsDir(), FROM_HERE
,
999 "Entry should be a directory",
1002 if (!SyncAssert(parentid
.IsRoot(), FROM_HERE
,
1003 "Entry should be root",
1006 if (!SyncAssert(!e
.GetIsUnsynced(), FROM_HERE
,
1007 "Entry should be sycned",
1013 if (!e
.GetIsDel()) {
1014 if (!SyncAssert(id
!= parentid
, FROM_HERE
,
1015 "Id should be different from parent id.",
1018 if (!SyncAssert(!e
.GetNonUniqueName().empty(), FROM_HERE
,
1019 "Non unique name should not be empty.",
1022 int safety_count
= handles
.size() + 1;
1023 while (!parentid
.IsRoot()) {
1024 Entry
parent(trans
, GET_BY_ID
, parentid
);
1025 if (!SyncAssert(parent
.good(), FROM_HERE
,
1026 "Parent entry is not valid.",
1029 if (handles
.end() == handles
.find(parent
.GetMetahandle()))
1030 break; // Skip further checking if parent was unmodified.
1031 if (!SyncAssert(parent
.GetIsDir(), FROM_HERE
,
1032 "Parent should be a directory",
1035 if (!SyncAssert(!parent
.GetIsDel(), FROM_HERE
,
1036 "Parent should not have been marked for deletion.",
1039 if (!SyncAssert(handles
.end() != handles
.find(parent
.GetMetahandle()),
1041 "Parent should be in the index.",
1044 parentid
= parent
.GetParentId();
1045 if (!SyncAssert(--safety_count
> 0, FROM_HERE
,
1046 "Count should be greater than zero.",
1051 int64 base_version
= e
.GetBaseVersion();
1052 int64 server_version
= e
.GetServerVersion();
1053 bool using_unique_client_tag
= !e
.GetUniqueClientTag().empty();
1054 if (CHANGES_VERSION
== base_version
|| 0 == base_version
) {
1055 if (e
.GetIsUnappliedUpdate()) {
1056 // Must be a new item, or a de-duplicated unique client tag
1057 // that was created both locally and remotely.
1058 if (!using_unique_client_tag
) {
1059 if (!SyncAssert(e
.GetIsDel(), FROM_HERE
,
1060 "The entry should not have been deleted.",
1064 // It came from the server, so it must have a server ID.
1065 if (!SyncAssert(id
.ServerKnows(), FROM_HERE
,
1066 "The id should be from a server.",
1071 // TODO(chron): Implement this mode if clients ever need it.
1072 // For now, you can't combine a client tag and a directory.
1073 if (!SyncAssert(!using_unique_client_tag
, FROM_HERE
,
1074 "Directory cannot have a client tag.",
1078 // Should be an uncomitted item, or a successfully deleted one.
1079 if (!e
.GetIsDel()) {
1080 if (!SyncAssert(e
.GetIsUnsynced(), FROM_HERE
,
1081 "The item should be unsynced.",
1085 // If the next check failed, it would imply that an item exists
1086 // on the server, isn't waiting for application locally, but either
1087 // is an unsynced create or a sucessful delete in the local copy.
1088 // Either way, that's a mismatch.
1089 if (!SyncAssert(0 == server_version
, FROM_HERE
,
1090 "Server version should be zero.",
1093 // Items that aren't using the unique client tag should have a zero
1094 // base version only if they have a local ID. Items with unique client
1095 // tags are allowed to use the zero base version for undeletion and
1096 // de-duplication; the unique client tag trumps the server ID.
1097 if (!using_unique_client_tag
) {
1098 if (!SyncAssert(!id
.ServerKnows(), FROM_HERE
,
1099 "Should be a client only id.",
1105 if (!SyncAssert(id
.ServerKnows(),
1107 "Should be a server id.",
1111 // Server-unknown items that are locally deleted should not be sent up to
1112 // the server. They must be !IS_UNSYNCED.
1113 if (!SyncAssert(!(!id
.ServerKnows() && e
.GetIsDel() && e
.GetIsUnsynced()),
1115 "Locally deleted item must not be unsynced.",
1123 void Directory::SetInvariantCheckLevel(InvariantCheckLevel check_level
) {
1124 invariant_check_level_
= check_level
;
1127 int64
Directory::NextMetahandle() {
1128 ScopedKernelLock
lock(this);
1129 int64 metahandle
= (kernel_
->next_metahandle
)++;
1133 // Always returns a client ID that is the string representation of a negative
1135 Id
Directory::NextId() {
1138 ScopedKernelLock
lock(this);
1139 result
= (kernel_
->persisted_info
.next_id
)--;
1140 kernel_
->info_status
= KERNEL_SHARE_INFO_DIRTY
;
1142 DCHECK_LT(result
, 0);
1143 return Id::CreateFromClientString(base::Int64ToString(result
));
1146 bool Directory::HasChildren(BaseTransaction
* trans
, const Id
& id
) {
1147 ScopedKernelLock
lock(this);
1148 return kernel_
->parent_child_index
.GetChildren(id
) != NULL
;
1151 Id
Directory::GetFirstChildId(BaseTransaction
* trans
,
1152 const EntryKernel
* parent
) {
1154 DCHECK(parent
->ref(IS_DIR
));
1156 ScopedKernelLock
lock(this);
1157 const OrderedChildSet
* children
=
1158 kernel_
->parent_child_index
.GetChildren(parent
->ref(ID
));
1160 // We're expected to return root if there are no children.
1164 return (*children
->begin())->ref(ID
);
1167 syncable::Id
Directory::GetPredecessorId(EntryKernel
* e
) {
1168 ScopedKernelLock
lock(this);
1170 DCHECK(ParentChildIndex::ShouldInclude(e
));
1171 const OrderedChildSet
* children
=
1172 kernel_
->parent_child_index
.GetChildren(e
->ref(PARENT_ID
));
1173 DCHECK(children
&& !children
->empty());
1174 OrderedChildSet::const_iterator i
= children
->find(e
);
1175 DCHECK(i
!= children
->end());
1177 if (i
== children
->begin()) {
1181 return (*i
)->ref(ID
);
1185 syncable::Id
Directory::GetSuccessorId(EntryKernel
* e
) {
1186 ScopedKernelLock
lock(this);
1188 DCHECK(ParentChildIndex::ShouldInclude(e
));
1189 const OrderedChildSet
* children
=
1190 kernel_
->parent_child_index
.GetChildren(e
->ref(PARENT_ID
));
1191 DCHECK(children
&& !children
->empty());
1192 OrderedChildSet::const_iterator i
= children
->find(e
);
1193 DCHECK(i
!= children
->end());
1196 if (i
== children
->end()) {
1199 return (*i
)->ref(ID
);
1203 // TODO(rlarocque): Remove all support for placing ShouldMaintainPosition()
1204 // items as siblings of items that do not maintain postions. It is required
1205 // only for tests. See crbug.com/178282.
1206 void Directory::PutPredecessor(EntryKernel
* e
, EntryKernel
* predecessor
) {
1207 DCHECK(!e
->ref(IS_DEL
));
1208 if (!e
->ShouldMaintainPosition()) {
1209 DCHECK(!e
->ref(UNIQUE_POSITION
).IsValid());
1212 std::string suffix
= e
->ref(UNIQUE_BOOKMARK_TAG
);
1213 DCHECK(!suffix
.empty());
1215 // Remove our item from the ParentChildIndex and remember to re-add it later.
1216 ScopedKernelLock
lock(this);
1217 ScopedParentChildIndexUpdater
updater(lock
, e
, &kernel_
->parent_child_index
);
1219 // Note: The ScopedParentChildIndexUpdater will update this set for us as we
1220 // leave this function.
1221 const OrderedChildSet
* siblings
=
1222 kernel_
->parent_child_index
.GetChildren(e
->ref(PARENT_ID
));
1225 // This parent currently has no other children.
1226 DCHECK(predecessor
->ref(ID
).IsRoot());
1227 UniquePosition pos
= UniquePosition::InitialPosition(suffix
);
1228 e
->put(UNIQUE_POSITION
, pos
);
1232 if (predecessor
->ref(ID
).IsRoot()) {
1233 // We have at least one sibling, and we're inserting to the left of them.
1234 UniquePosition successor_pos
= (*siblings
->begin())->ref(UNIQUE_POSITION
);
1237 if (!successor_pos
.IsValid()) {
1238 // If all our successors are of non-positionable types, just create an
1239 // initial position. We arbitrarily choose to sort invalid positions to
1240 // the right of the valid positions.
1242 // We really shouldn't need to support this. See TODO above.
1243 pos
= UniquePosition::InitialPosition(suffix
);
1245 DCHECK(!siblings
->empty());
1246 pos
= UniquePosition::Before(successor_pos
, suffix
);
1249 e
->put(UNIQUE_POSITION
, pos
);
1253 // We can't support placing an item after an invalid position. Fortunately,
1254 // the tests don't exercise this particular case. We should not support
1255 // siblings with invalid positions at all. See TODO above.
1256 DCHECK(predecessor
->ref(UNIQUE_POSITION
).IsValid());
1258 OrderedChildSet::const_iterator neighbour
= siblings
->find(predecessor
);
1259 DCHECK(neighbour
!= siblings
->end());
1262 if (neighbour
== siblings
->end()) {
1263 // Inserting at the end of the list.
1264 UniquePosition pos
= UniquePosition::After(
1265 predecessor
->ref(UNIQUE_POSITION
),
1267 e
->put(UNIQUE_POSITION
, pos
);
1271 EntryKernel
* successor
= *neighbour
;
1273 // Another mixed valid and invalid position case. This one could be supported
1274 // in theory, but we're trying to deprecate support for siblings with and
1275 // without valid positions. See TODO above.
1276 DCHECK(successor
->ref(UNIQUE_POSITION
).IsValid());
1278 // Finally, the normal case: inserting between two elements.
1279 UniquePosition pos
= UniquePosition::Between(
1280 predecessor
->ref(UNIQUE_POSITION
),
1281 successor
->ref(UNIQUE_POSITION
),
1283 e
->put(UNIQUE_POSITION
, pos
);
1287 // TODO(rlarocque): Avoid this indirection. Just return the set.
1288 void Directory::AppendChildHandles(const ScopedKernelLock
& lock
,
1289 const Id
& parent_id
,
1290 Directory::Metahandles
* result
) {
1291 const OrderedChildSet
* children
=
1292 kernel_
->parent_child_index
.GetChildren(parent_id
);
1296 for (OrderedChildSet::const_iterator i
= children
->begin();
1297 i
!= children
->end(); ++i
) {
1298 DCHECK_EQ(parent_id
, (*i
)->ref(PARENT_ID
));
1299 result
->push_back((*i
)->ref(META_HANDLE
));
1303 } // namespace syncable
1304 } // namespace syncer