1 // Copyright 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "sync/syncable/directory.h"
9 #include "base/base64.h"
10 #include "base/debug/trace_event.h"
11 #include "base/stl_util.h"
12 #include "base/strings/string_number_conversions.h"
13 #include "sync/internal_api/public/base/attachment_id_proto.h"
14 #include "sync/internal_api/public/base/unique_position.h"
15 #include "sync/internal_api/public/util/unrecoverable_error_handler.h"
16 #include "sync/syncable/entry.h"
17 #include "sync/syncable/entry_kernel.h"
18 #include "sync/syncable/in_memory_directory_backing_store.h"
19 #include "sync/syncable/on_disk_directory_backing_store.h"
20 #include "sync/syncable/scoped_kernel_lock.h"
21 #include "sync/syncable/scoped_parent_child_index_updater.h"
22 #include "sync/syncable/syncable-inl.h"
23 #include "sync/syncable/syncable_base_transaction.h"
24 #include "sync/syncable/syncable_changes_version.h"
25 #include "sync/syncable/syncable_read_transaction.h"
26 #include "sync/syncable/syncable_util.h"
27 #include "sync/syncable/syncable_write_transaction.h"
35 const base::FilePath::CharType
Directory::kSyncDatabaseFilename
[] =
36 FILE_PATH_LITERAL("SyncData.sqlite3");
38 Directory::PersistedKernelInfo::PersistedKernelInfo()
40 ModelTypeSet protocol_types
= ProtocolTypes();
41 for (ModelTypeSet::Iterator iter
= protocol_types
.First(); iter
.Good();
43 ResetDownloadProgress(iter
.Get());
44 transaction_version
[iter
.Get()] = 0;
48 Directory::PersistedKernelInfo::~PersistedKernelInfo() {}
50 void Directory::PersistedKernelInfo::ResetDownloadProgress(
51 ModelType model_type
) {
52 // Clear everything except the data type id field.
53 download_progress
[model_type
].Clear();
54 download_progress
[model_type
].set_data_type_id(
55 GetSpecificsFieldNumberFromModelType(model_type
));
57 // Explicitly set an empty token field to denote no progress.
58 download_progress
[model_type
].set_token("");
61 bool Directory::PersistedKernelInfo::HasEmptyDownloadProgress(
62 ModelType model_type
) {
63 const sync_pb::DataTypeProgressMarker
& progress_marker
=
64 download_progress
[model_type
];
65 return progress_marker
.token().empty();
68 Directory::SaveChangesSnapshot::SaveChangesSnapshot()
69 : kernel_info_status(KERNEL_SHARE_INFO_INVALID
) {
72 Directory::SaveChangesSnapshot::~SaveChangesSnapshot() {
73 STLDeleteElements(&dirty_metas
);
74 STLDeleteElements(&delete_journals
);
77 Directory::Kernel::Kernel(
78 const std::string
& name
,
79 const KernelLoadInfo
& info
, DirectoryChangeDelegate
* delegate
,
80 const WeakHandle
<TransactionObserver
>& transaction_observer
)
81 : next_write_transaction_id(0),
83 info_status(Directory::KERNEL_SHARE_INFO_VALID
),
84 persisted_info(info
.kernel_info
),
85 cache_guid(info
.cache_guid
),
86 next_metahandle(info
.max_metahandle
+ 1),
88 transaction_observer(transaction_observer
) {
90 DCHECK(transaction_observer
.IsInitialized());
93 Directory::Kernel::~Kernel() {
94 STLDeleteContainerPairSecondPointers(metahandles_map
.begin(),
95 metahandles_map
.end());
99 DirectoryBackingStore
* store
,
100 UnrecoverableErrorHandler
* unrecoverable_error_handler
,
101 ReportUnrecoverableErrorFunction report_unrecoverable_error_function
,
102 NigoriHandler
* nigori_handler
,
103 Cryptographer
* cryptographer
)
106 unrecoverable_error_handler_(unrecoverable_error_handler
),
107 report_unrecoverable_error_function_(
108 report_unrecoverable_error_function
),
109 unrecoverable_error_set_(false),
110 nigori_handler_(nigori_handler
),
111 cryptographer_(cryptographer
),
112 invariant_check_level_(VERIFY_CHANGES
) {
115 Directory::~Directory() {
119 DirOpenResult
Directory::Open(
121 DirectoryChangeDelegate
* delegate
,
122 const WeakHandle
<TransactionObserver
>& transaction_observer
) {
123 TRACE_EVENT0("sync", "SyncDatabaseOpen");
125 const DirOpenResult result
=
126 OpenImpl(name
, delegate
, transaction_observer
);
128 if (OPENED
!= result
)
133 void Directory::InitializeIndices(MetahandlesMap
* handles_map
) {
134 ScopedKernelLock
lock(this);
135 kernel_
->metahandles_map
.swap(*handles_map
);
136 for (MetahandlesMap::const_iterator it
= kernel_
->metahandles_map
.begin();
137 it
!= kernel_
->metahandles_map
.end(); ++it
) {
138 EntryKernel
* entry
= it
->second
;
139 if (ParentChildIndex::ShouldInclude(entry
))
140 kernel_
->parent_child_index
.Insert(entry
);
141 const int64 metahandle
= entry
->ref(META_HANDLE
);
142 if (entry
->ref(IS_UNSYNCED
))
143 kernel_
->unsynced_metahandles
.insert(metahandle
);
144 if (entry
->ref(IS_UNAPPLIED_UPDATE
)) {
145 const ModelType type
= entry
->GetServerModelType();
146 kernel_
->unapplied_update_metahandles
[type
].insert(metahandle
);
148 if (!entry
->ref(UNIQUE_SERVER_TAG
).empty()) {
149 DCHECK(kernel_
->server_tags_map
.find(entry
->ref(UNIQUE_SERVER_TAG
)) ==
150 kernel_
->server_tags_map
.end())
151 << "Unexpected duplicate use of client tag";
152 kernel_
->server_tags_map
[entry
->ref(UNIQUE_SERVER_TAG
)] = entry
;
154 if (!entry
->ref(UNIQUE_CLIENT_TAG
).empty()) {
155 DCHECK(kernel_
->server_tags_map
.find(entry
->ref(UNIQUE_SERVER_TAG
)) ==
156 kernel_
->server_tags_map
.end())
157 << "Unexpected duplicate use of server tag";
158 kernel_
->client_tags_map
[entry
->ref(UNIQUE_CLIENT_TAG
)] = entry
;
160 DCHECK(kernel_
->ids_map
.find(entry
->ref(ID
).value()) ==
161 kernel_
->ids_map
.end()) << "Unexpected duplicate use of ID";
162 kernel_
->ids_map
[entry
->ref(ID
).value()] = entry
;
163 DCHECK(!entry
->is_dirty());
164 AddToAttachmentIndex(metahandle
, entry
->ref(ATTACHMENT_METADATA
), lock
);
168 DirOpenResult
Directory::OpenImpl(
170 DirectoryChangeDelegate
* delegate
,
171 const WeakHandle
<TransactionObserver
>&
172 transaction_observer
) {
174 // Temporary indices before kernel_ initialized in case Load fails. We 0(1)
176 Directory::MetahandlesMap tmp_handles_map
;
178 // Avoids mem leaks on failure. Harmlessly deletes the empty hash map after
179 // the swap in the success case.
180 STLValueDeleter
<Directory::MetahandlesMap
> deleter(&tmp_handles_map
);
182 JournalIndex delete_journals
;
184 DirOpenResult result
=
185 store_
->Load(&tmp_handles_map
, &delete_journals
, &info
);
186 if (OPENED
!= result
)
189 kernel_
= new Kernel(name
, info
, delegate
, transaction_observer
);
190 delete_journal_
.reset(new DeleteJournal(&delete_journals
));
191 InitializeIndices(&tmp_handles_map
);
193 // Write back the share info to reserve some space in 'next_id'. This will
194 // prevent local ID reuse in the case of an early crash. See the comments in
195 // TakeSnapshotForSaveChanges() or crbug.com/142987 for more information.
196 kernel_
->info_status
= KERNEL_SHARE_INFO_DIRTY
;
198 return FAILED_INITIAL_WRITE
;
203 DeleteJournal
* Directory::delete_journal() {
204 DCHECK(delete_journal_
.get());
205 return delete_journal_
.get();
208 void Directory::Close() {
216 void Directory::OnUnrecoverableError(const BaseTransaction
* trans
,
217 const tracked_objects::Location
& location
,
218 const std::string
& message
) {
219 DCHECK(trans
!= NULL
);
220 unrecoverable_error_set_
= true;
221 unrecoverable_error_handler_
->OnUnrecoverableError(location
,
225 EntryKernel
* Directory::GetEntryById(const Id
& id
) {
226 ScopedKernelLock
lock(this);
227 return GetEntryById(id
, &lock
);
230 EntryKernel
* Directory::GetEntryById(const Id
& id
,
231 ScopedKernelLock
* const lock
) {
233 // Find it in the in memory ID index.
234 IdsMap::iterator id_found
= kernel_
->ids_map
.find(id
.value());
235 if (id_found
!= kernel_
->ids_map
.end()) {
236 return id_found
->second
;
241 EntryKernel
* Directory::GetEntryByClientTag(const string
& tag
) {
242 ScopedKernelLock
lock(this);
245 TagsMap::iterator it
= kernel_
->client_tags_map
.find(tag
);
246 if (it
!= kernel_
->client_tags_map
.end()) {
252 EntryKernel
* Directory::GetEntryByServerTag(const string
& tag
) {
253 ScopedKernelLock
lock(this);
255 TagsMap::iterator it
= kernel_
->server_tags_map
.find(tag
);
256 if (it
!= kernel_
->server_tags_map
.end()) {
262 EntryKernel
* Directory::GetEntryByHandle(int64 metahandle
) {
263 ScopedKernelLock
lock(this);
264 return GetEntryByHandle(metahandle
, &lock
);
267 EntryKernel
* Directory::GetEntryByHandle(int64 metahandle
,
268 ScopedKernelLock
* lock
) {
270 MetahandlesMap::iterator found
=
271 kernel_
->metahandles_map
.find(metahandle
);
272 if (found
!= kernel_
->metahandles_map
.end()) {
273 // Found it in memory. Easy.
274 return found
->second
;
279 bool Directory::GetChildHandlesById(
280 BaseTransaction
* trans
, const Id
& parent_id
,
281 Directory::Metahandles
* result
) {
282 if (!SyncAssert(this == trans
->directory(), FROM_HERE
,
283 "Directories don't match", trans
))
287 ScopedKernelLock
lock(this);
288 AppendChildHandles(lock
, parent_id
, result
);
292 int Directory::GetTotalNodeCount(
293 BaseTransaction
* trans
,
294 EntryKernel
* kernel
) const {
295 if (!SyncAssert(this == trans
->directory(), FROM_HERE
,
296 "Directories don't match", trans
))
300 std::deque
<const OrderedChildSet
*> child_sets
;
302 GetChildSetForKernel(trans
, kernel
, &child_sets
);
303 while (!child_sets
.empty()) {
304 const OrderedChildSet
* set
= child_sets
.front();
305 child_sets
.pop_front();
306 for (OrderedChildSet::const_iterator it
= set
->begin();
307 it
!= set
->end(); ++it
) {
309 GetChildSetForKernel(trans
, *it
, &child_sets
);
316 void Directory::GetChildSetForKernel(
317 BaseTransaction
* trans
,
319 std::deque
<const OrderedChildSet
*>* child_sets
) const {
320 if (!kernel
->ref(IS_DIR
))
321 return; // Not a directory => no children.
323 const OrderedChildSet
* descendants
=
324 kernel_
->parent_child_index
.GetChildren(kernel
->ref(ID
));
326 return; // This directory has no children.
328 // Add our children to the list of items to be traversed.
329 child_sets
->push_back(descendants
);
332 int Directory::GetPositionIndex(
333 BaseTransaction
* trans
,
334 EntryKernel
* kernel
) const {
335 const OrderedChildSet
* siblings
=
336 kernel_
->parent_child_index
.GetChildren(kernel
->ref(PARENT_ID
));
338 OrderedChildSet::const_iterator it
= siblings
->find(kernel
);
339 return std::distance(siblings
->begin(), it
);
342 bool Directory::InsertEntry(BaseWriteTransaction
* trans
, EntryKernel
* entry
) {
343 ScopedKernelLock
lock(this);
344 return InsertEntry(trans
, entry
, &lock
);
347 bool Directory::InsertEntry(BaseWriteTransaction
* trans
,
349 ScopedKernelLock
* lock
) {
350 DCHECK(NULL
!= lock
);
351 if (!SyncAssert(NULL
!= entry
, FROM_HERE
, "Entry is null", trans
))
354 static const char error
[] = "Entry already in memory index.";
357 kernel_
->metahandles_map
.insert(
358 std::make_pair(entry
->ref(META_HANDLE
), entry
)).second
,
365 kernel_
->ids_map
.insert(
366 std::make_pair(entry
->ref(ID
).value(), entry
)).second
,
372 if (ParentChildIndex::ShouldInclude(entry
)) {
373 if (!SyncAssert(kernel_
->parent_child_index
.Insert(entry
),
380 AddToAttachmentIndex(
381 entry
->ref(META_HANDLE
), entry
->ref(ATTACHMENT_METADATA
), *lock
);
383 // Should NEVER be created with a client tag or server tag.
384 if (!SyncAssert(entry
->ref(UNIQUE_SERVER_TAG
).empty(), FROM_HERE
,
385 "Server tag should be empty", trans
)) {
388 if (!SyncAssert(entry
->ref(UNIQUE_CLIENT_TAG
).empty(), FROM_HERE
,
389 "Client tag should be empty", trans
))
395 bool Directory::ReindexId(BaseWriteTransaction
* trans
,
396 EntryKernel
* const entry
,
398 ScopedKernelLock
lock(this);
399 if (NULL
!= GetEntryById(new_id
, &lock
))
403 // Update the indices that depend on the ID field.
404 ScopedParentChildIndexUpdater
updater_b(lock
, entry
,
405 &kernel_
->parent_child_index
);
406 size_t num_erased
= kernel_
->ids_map
.erase(entry
->ref(ID
).value());
407 DCHECK_EQ(1U, num_erased
);
408 entry
->put(ID
, new_id
);
409 kernel_
->ids_map
[entry
->ref(ID
).value()] = entry
;
414 bool Directory::ReindexParentId(BaseWriteTransaction
* trans
,
415 EntryKernel
* const entry
,
416 const Id
& new_parent_id
) {
417 ScopedKernelLock
lock(this);
420 // Update the indices that depend on the PARENT_ID field.
421 ScopedParentChildIndexUpdater
index_updater(lock
, entry
,
422 &kernel_
->parent_child_index
);
423 entry
->put(PARENT_ID
, new_parent_id
);
428 void Directory::RemoveFromAttachmentIndex(
429 const int64 metahandle
,
430 const sync_pb::AttachmentMetadata
& attachment_metadata
,
431 const ScopedKernelLock
& lock
) {
432 for (int i
= 0; i
< attachment_metadata
.record_size(); ++i
) {
433 AttachmentIdUniqueId unique_id
=
434 attachment_metadata
.record(i
).id().unique_id();
435 IndexByAttachmentId::iterator iter
=
436 kernel_
->index_by_attachment_id
.find(unique_id
);
437 if (iter
!= kernel_
->index_by_attachment_id
.end()) {
438 iter
->second
.erase(metahandle
);
439 if (iter
->second
.empty()) {
440 kernel_
->index_by_attachment_id
.erase(iter
);
446 void Directory::AddToAttachmentIndex(
447 const int64 metahandle
,
448 const sync_pb::AttachmentMetadata
& attachment_metadata
,
449 const ScopedKernelLock
& lock
) {
450 for (int i
= 0; i
< attachment_metadata
.record_size(); ++i
) {
451 AttachmentIdUniqueId unique_id
=
452 attachment_metadata
.record(i
).id().unique_id();
453 IndexByAttachmentId::iterator iter
=
454 kernel_
->index_by_attachment_id
.find(unique_id
);
455 if (iter
== kernel_
->index_by_attachment_id
.end()) {
456 iter
= kernel_
->index_by_attachment_id
.insert(std::make_pair(
458 MetahandleSet())).first
;
460 iter
->second
.insert(metahandle
);
464 void Directory::UpdateAttachmentIndex(
465 const int64 metahandle
,
466 const sync_pb::AttachmentMetadata
& old_metadata
,
467 const sync_pb::AttachmentMetadata
& new_metadata
) {
468 ScopedKernelLock
lock(this);
469 RemoveFromAttachmentIndex(metahandle
, old_metadata
, lock
);
470 AddToAttachmentIndex(metahandle
, new_metadata
, lock
);
473 void Directory::GetMetahandlesByAttachmentId(
474 BaseTransaction
* trans
,
475 const sync_pb::AttachmentIdProto
& attachment_id_proto
,
476 Metahandles
* result
) {
479 ScopedKernelLock
lock(this);
480 IndexByAttachmentId::const_iterator index_iter
=
481 kernel_
->index_by_attachment_id
.find(attachment_id_proto
.unique_id());
482 if (index_iter
== kernel_
->index_by_attachment_id
.end())
484 const MetahandleSet
& metahandle_set
= index_iter
->second
;
486 metahandle_set
.begin(), metahandle_set
.end(), back_inserter(*result
));
489 bool Directory::unrecoverable_error_set(const BaseTransaction
* trans
) const {
490 DCHECK(trans
!= NULL
);
491 return unrecoverable_error_set_
;
494 void Directory::ClearDirtyMetahandles() {
495 kernel_
->transaction_mutex
.AssertAcquired();
496 kernel_
->dirty_metahandles
.clear();
499 bool Directory::SafeToPurgeFromMemory(WriteTransaction
* trans
,
500 const EntryKernel
* const entry
) const {
501 bool safe
= entry
->ref(IS_DEL
) && !entry
->is_dirty() &&
502 !entry
->ref(SYNCING
) && !entry
->ref(IS_UNAPPLIED_UPDATE
) &&
503 !entry
->ref(IS_UNSYNCED
);
506 int64 handle
= entry
->ref(META_HANDLE
);
507 const ModelType type
= entry
->GetServerModelType();
508 if (!SyncAssert(kernel_
->dirty_metahandles
.count(handle
) == 0U,
510 "Dirty metahandles should be empty", trans
))
512 // TODO(tim): Bug 49278.
513 if (!SyncAssert(!kernel_
->unsynced_metahandles
.count(handle
),
515 "Unsynced handles should be empty",
518 if (!SyncAssert(!kernel_
->unapplied_update_metahandles
[type
].count(handle
),
520 "Unapplied metahandles should be empty",
528 void Directory::TakeSnapshotForSaveChanges(SaveChangesSnapshot
* snapshot
) {
529 ReadTransaction
trans(FROM_HERE
, this);
530 ScopedKernelLock
lock(this);
532 // If there is an unrecoverable error then just bail out.
533 if (unrecoverable_error_set(&trans
))
536 // Deep copy dirty entries from kernel_->metahandles_index into snapshot and
537 // clear dirty flags.
538 for (MetahandleSet::const_iterator i
= kernel_
->dirty_metahandles
.begin();
539 i
!= kernel_
->dirty_metahandles
.end(); ++i
) {
540 EntryKernel
* entry
= GetEntryByHandle(*i
, &lock
);
543 // Skip over false positives; it happens relatively infrequently.
544 if (!entry
->is_dirty())
546 snapshot
->dirty_metas
.insert(snapshot
->dirty_metas
.end(),
547 new EntryKernel(*entry
));
548 DCHECK_EQ(1U, kernel_
->dirty_metahandles
.count(*i
));
549 // We don't bother removing from the index here as we blow the entire thing
550 // in a moment, and it unnecessarily complicates iteration.
551 entry
->clear_dirty(NULL
);
553 ClearDirtyMetahandles();
555 // Set purged handles.
556 DCHECK(snapshot
->metahandles_to_purge
.empty());
557 snapshot
->metahandles_to_purge
.swap(kernel_
->metahandles_to_purge
);
559 // Fill kernel_info_status and kernel_info.
560 snapshot
->kernel_info
= kernel_
->persisted_info
;
561 // To avoid duplicates when the process crashes, we record the next_id to be
562 // greater magnitude than could possibly be reached before the next save
563 // changes. In other words, it's effectively impossible for the user to
564 // generate 65536 new bookmarks in 3 seconds.
565 snapshot
->kernel_info
.next_id
-= 65536;
566 snapshot
->kernel_info_status
= kernel_
->info_status
;
567 // This one we reset on failure.
568 kernel_
->info_status
= KERNEL_SHARE_INFO_VALID
;
570 delete_journal_
->TakeSnapshotAndClear(
571 &trans
, &snapshot
->delete_journals
, &snapshot
->delete_journals_to_purge
);
574 bool Directory::SaveChanges() {
575 bool success
= false;
577 base::AutoLock
scoped_lock(kernel_
->save_changes_mutex
);
579 // Snapshot and save.
580 SaveChangesSnapshot snapshot
;
581 TakeSnapshotForSaveChanges(&snapshot
);
582 success
= store_
->SaveChanges(snapshot
);
584 // Handle success or failure.
586 success
= VacuumAfterSaveChanges(snapshot
);
588 HandleSaveChangesFailure(snapshot
);
592 bool Directory::VacuumAfterSaveChanges(const SaveChangesSnapshot
& snapshot
) {
593 if (snapshot
.dirty_metas
.empty())
596 // Need a write transaction as we are about to permanently purge entries.
597 WriteTransaction
trans(FROM_HERE
, VACUUM_AFTER_SAVE
, this);
598 ScopedKernelLock
lock(this);
599 // Now drop everything we can out of memory.
600 for (EntryKernelSet::const_iterator i
= snapshot
.dirty_metas
.begin();
601 i
!= snapshot
.dirty_metas
.end(); ++i
) {
602 MetahandlesMap::iterator found
=
603 kernel_
->metahandles_map
.find((*i
)->ref(META_HANDLE
));
604 EntryKernel
* entry
= (found
== kernel_
->metahandles_map
.end() ?
605 NULL
: found
->second
);
606 if (entry
&& SafeToPurgeFromMemory(&trans
, entry
)) {
607 // We now drop deleted metahandles that are up to date on both the client
609 size_t num_erased
= 0;
610 num_erased
= kernel_
->metahandles_map
.erase(entry
->ref(META_HANDLE
));
611 DCHECK_EQ(1u, num_erased
);
612 num_erased
= kernel_
->ids_map
.erase(entry
->ref(ID
).value());
613 DCHECK_EQ(1u, num_erased
);
614 if (!entry
->ref(UNIQUE_SERVER_TAG
).empty()) {
616 kernel_
->server_tags_map
.erase(entry
->ref(UNIQUE_SERVER_TAG
));
617 DCHECK_EQ(1u, num_erased
);
619 if (!entry
->ref(UNIQUE_CLIENT_TAG
).empty()) {
621 kernel_
->client_tags_map
.erase(entry
->ref(UNIQUE_CLIENT_TAG
));
622 DCHECK_EQ(1u, num_erased
);
624 if (!SyncAssert(!kernel_
->parent_child_index
.Contains(entry
),
626 "Deleted entry still present",
629 RemoveFromAttachmentIndex(
630 entry
->ref(META_HANDLE
), entry
->ref(ATTACHMENT_METADATA
), lock
);
634 if (trans
.unrecoverable_error_set())
640 void Directory::UnapplyEntry(EntryKernel
* entry
) {
641 int64 handle
= entry
->ref(META_HANDLE
);
642 ModelType server_type
= GetModelTypeFromSpecifics(
643 entry
->ref(SERVER_SPECIFICS
));
645 // Clear enough so that on the next sync cycle all local data will
647 // Note: do not modify the root node in order to preserve the
648 // initial sync ended bit for this type (else on the next restart
649 // this type will be treated as disabled and therefore fully purged).
650 if (IsRealDataType(server_type
) &&
651 ModelTypeToRootTag(server_type
) == entry
->ref(UNIQUE_SERVER_TAG
)) {
655 // Set the unapplied bit if this item has server data.
656 if (IsRealDataType(server_type
) && !entry
->ref(IS_UNAPPLIED_UPDATE
)) {
657 entry
->put(IS_UNAPPLIED_UPDATE
, true);
658 kernel_
->unapplied_update_metahandles
[server_type
].insert(handle
);
659 entry
->mark_dirty(&kernel_
->dirty_metahandles
);
662 // Unset the unsynced bit.
663 if (entry
->ref(IS_UNSYNCED
)) {
664 kernel_
->unsynced_metahandles
.erase(handle
);
665 entry
->put(IS_UNSYNCED
, false);
666 entry
->mark_dirty(&kernel_
->dirty_metahandles
);
669 // Mark the item as locally deleted. No deleted items are allowed in the
670 // parent child index.
671 if (!entry
->ref(IS_DEL
)) {
672 kernel_
->parent_child_index
.Remove(entry
);
673 entry
->put(IS_DEL
, true);
674 entry
->mark_dirty(&kernel_
->dirty_metahandles
);
677 // Set the version to the "newly created" version.
678 if (entry
->ref(BASE_VERSION
) != CHANGES_VERSION
) {
679 entry
->put(BASE_VERSION
, CHANGES_VERSION
);
680 entry
->mark_dirty(&kernel_
->dirty_metahandles
);
683 // At this point locally created items that aren't synced will become locally
684 // deleted items, and purged on the next snapshot. All other items will match
685 // the state they would have had if they were just created via a server
686 // update. See MutableEntry::MutableEntry(.., CreateNewUpdateItem, ..).
689 void Directory::DeleteEntry(bool save_to_journal
,
691 EntryKernelSet
* entries_to_journal
,
692 const ScopedKernelLock
& lock
) {
693 int64 handle
= entry
->ref(META_HANDLE
);
694 ModelType server_type
= GetModelTypeFromSpecifics(
695 entry
->ref(SERVER_SPECIFICS
));
697 kernel_
->metahandles_to_purge
.insert(handle
);
699 size_t num_erased
= 0;
700 num_erased
= kernel_
->metahandles_map
.erase(entry
->ref(META_HANDLE
));
701 DCHECK_EQ(1u, num_erased
);
702 num_erased
= kernel_
->ids_map
.erase(entry
->ref(ID
).value());
703 DCHECK_EQ(1u, num_erased
);
704 num_erased
= kernel_
->unsynced_metahandles
.erase(handle
);
705 DCHECK_EQ(entry
->ref(IS_UNSYNCED
), num_erased
> 0);
707 kernel_
->unapplied_update_metahandles
[server_type
].erase(handle
);
708 DCHECK_EQ(entry
->ref(IS_UNAPPLIED_UPDATE
), num_erased
> 0);
709 if (kernel_
->parent_child_index
.Contains(entry
))
710 kernel_
->parent_child_index
.Remove(entry
);
712 if (!entry
->ref(UNIQUE_CLIENT_TAG
).empty()) {
714 kernel_
->client_tags_map
.erase(entry
->ref(UNIQUE_CLIENT_TAG
));
715 DCHECK_EQ(1u, num_erased
);
717 if (!entry
->ref(UNIQUE_SERVER_TAG
).empty()) {
719 kernel_
->server_tags_map
.erase(entry
->ref(UNIQUE_SERVER_TAG
));
720 DCHECK_EQ(1u, num_erased
);
722 RemoveFromAttachmentIndex(handle
, entry
->ref(ATTACHMENT_METADATA
), lock
);
724 if (save_to_journal
) {
725 entries_to_journal
->insert(entry
);
731 bool Directory::PurgeEntriesWithTypeIn(ModelTypeSet disabled_types
,
732 ModelTypeSet types_to_journal
,
733 ModelTypeSet types_to_unapply
) {
734 disabled_types
.RemoveAll(ProxyTypes());
736 if (disabled_types
.Empty())
740 WriteTransaction
trans(FROM_HERE
, PURGE_ENTRIES
, this);
742 EntryKernelSet entries_to_journal
;
743 STLElementDeleter
<EntryKernelSet
> journal_deleter(&entries_to_journal
);
746 ScopedKernelLock
lock(this);
748 bool found_progress
= false;
749 for (ModelTypeSet::Iterator iter
= disabled_types
.First(); iter
.Good();
751 if (!kernel_
->persisted_info
.HasEmptyDownloadProgress(iter
.Get()))
752 found_progress
= true;
755 // If none of the disabled types have progress markers, there's nothing to
760 // We iterate in two passes to avoid a bug in STLport (which is used in
761 // the Android build). There are some versions of that library where a
762 // hash_map's iterators can be invalidated when an item is erased from the
764 // See http://sourceforge.net/p/stlport/bugs/239/.
766 std::set
<EntryKernel
*> to_purge
;
767 for (MetahandlesMap::iterator it
= kernel_
->metahandles_map
.begin();
768 it
!= kernel_
->metahandles_map
.end(); ++it
) {
769 const sync_pb::EntitySpecifics
& local_specifics
=
770 it
->second
->ref(SPECIFICS
);
771 const sync_pb::EntitySpecifics
& server_specifics
=
772 it
->second
->ref(SERVER_SPECIFICS
);
773 ModelType local_type
= GetModelTypeFromSpecifics(local_specifics
);
774 ModelType server_type
= GetModelTypeFromSpecifics(server_specifics
);
776 if ((IsRealDataType(local_type
) && disabled_types
.Has(local_type
)) ||
777 (IsRealDataType(server_type
) && disabled_types
.Has(server_type
))) {
778 to_purge
.insert(it
->second
);
782 for (std::set
<EntryKernel
*>::iterator it
= to_purge
.begin();
783 it
!= to_purge
.end(); ++it
) {
784 EntryKernel
* entry
= *it
;
786 const sync_pb::EntitySpecifics
& local_specifics
=
787 (*it
)->ref(SPECIFICS
);
788 const sync_pb::EntitySpecifics
& server_specifics
=
789 (*it
)->ref(SERVER_SPECIFICS
);
790 ModelType local_type
= GetModelTypeFromSpecifics(local_specifics
);
791 ModelType server_type
= GetModelTypeFromSpecifics(server_specifics
);
793 if (types_to_unapply
.Has(local_type
) ||
794 types_to_unapply
.Has(server_type
)) {
797 bool save_to_journal
=
798 (types_to_journal
.Has(local_type
) ||
799 types_to_journal
.Has(server_type
)) &&
800 (delete_journal_
->IsDeleteJournalEnabled(local_type
) ||
801 delete_journal_
->IsDeleteJournalEnabled(server_type
));
802 DeleteEntry(save_to_journal
, entry
, &entries_to_journal
, lock
);
806 delete_journal_
->AddJournalBatch(&trans
, entries_to_journal
);
808 // Ensure meta tracking for these data types reflects the purged state.
809 for (ModelTypeSet::Iterator it
= disabled_types
.First();
810 it
.Good(); it
.Inc()) {
811 kernel_
->persisted_info
.transaction_version
[it
.Get()] = 0;
813 // Don't discard progress markers or context for unapplied types.
814 if (!types_to_unapply
.Has(it
.Get())) {
815 kernel_
->persisted_info
.ResetDownloadProgress(it
.Get());
816 kernel_
->persisted_info
.datatype_context
[it
.Get()].Clear();
820 kernel_
->info_status
= KERNEL_SHARE_INFO_DIRTY
;
826 bool Directory::ResetVersionsForType(BaseWriteTransaction
* trans
,
828 if (!ProtocolTypes().Has(type
))
830 DCHECK_NE(type
, BOOKMARKS
) << "Only non-hierarchical types are supported";
832 EntryKernel
* type_root
= GetEntryByServerTag(ModelTypeToRootTag(type
));
836 ScopedKernelLock
lock(this);
837 const Id
& type_root_id
= type_root
->ref(ID
);
838 Directory::Metahandles children
;
839 AppendChildHandles(lock
, type_root_id
, &children
);
841 for (Metahandles::iterator it
= children
.begin(); it
!= children
.end();
843 EntryKernel
* entry
= GetEntryByHandle(*it
, &lock
);
846 if (entry
->ref(BASE_VERSION
) > 1)
847 entry
->put(BASE_VERSION
, 1);
848 if (entry
->ref(SERVER_VERSION
) > 1)
849 entry
->put(SERVER_VERSION
, 1);
851 // Note that we do not unset IS_UNSYNCED or IS_UNAPPLIED_UPDATE in order
852 // to ensure no in-transit data is lost.
854 entry
->mark_dirty(&kernel_
->dirty_metahandles
);
860 bool Directory::IsAttachmentLinked(
861 const sync_pb::AttachmentIdProto
& attachment_id_proto
) const {
862 ScopedKernelLock
lock(this);
863 IndexByAttachmentId::const_iterator iter
=
864 kernel_
->index_by_attachment_id
.find(attachment_id_proto
.unique_id());
865 if (iter
!= kernel_
->index_by_attachment_id
.end() && !iter
->second
.empty()) {
871 void Directory::HandleSaveChangesFailure(const SaveChangesSnapshot
& snapshot
) {
872 WriteTransaction
trans(FROM_HERE
, HANDLE_SAVE_FAILURE
, this);
873 ScopedKernelLock
lock(this);
874 kernel_
->info_status
= KERNEL_SHARE_INFO_DIRTY
;
876 // Because we optimistically cleared the dirty bit on the real entries when
877 // taking the snapshot, we must restore it on failure. Not doing this could
878 // cause lost data, if no other changes are made to the in-memory entries
879 // that would cause the dirty bit to get set again. Setting the bit ensures
880 // that SaveChanges will at least try again later.
881 for (EntryKernelSet::const_iterator i
= snapshot
.dirty_metas
.begin();
882 i
!= snapshot
.dirty_metas
.end(); ++i
) {
883 MetahandlesMap::iterator found
=
884 kernel_
->metahandles_map
.find((*i
)->ref(META_HANDLE
));
885 if (found
!= kernel_
->metahandles_map
.end()) {
886 found
->second
->mark_dirty(&kernel_
->dirty_metahandles
);
890 kernel_
->metahandles_to_purge
.insert(snapshot
.metahandles_to_purge
.begin(),
891 snapshot
.metahandles_to_purge
.end());
893 // Restore delete journals.
894 delete_journal_
->AddJournalBatch(&trans
, snapshot
.delete_journals
);
895 delete_journal_
->PurgeDeleteJournals(&trans
,
896 snapshot
.delete_journals_to_purge
);
899 void Directory::GetDownloadProgress(
900 ModelType model_type
,
901 sync_pb::DataTypeProgressMarker
* value_out
) const {
902 ScopedKernelLock
lock(this);
903 return value_out
->CopyFrom(
904 kernel_
->persisted_info
.download_progress
[model_type
]);
907 void Directory::GetDownloadProgressAsString(
908 ModelType model_type
,
909 std::string
* value_out
) const {
910 ScopedKernelLock
lock(this);
911 kernel_
->persisted_info
.download_progress
[model_type
].SerializeToString(
915 size_t Directory::GetEntriesCount() const {
916 ScopedKernelLock
lock(this);
917 return kernel_
->metahandles_map
.size();
920 void Directory::SetDownloadProgress(
921 ModelType model_type
,
922 const sync_pb::DataTypeProgressMarker
& new_progress
) {
923 ScopedKernelLock
lock(this);
924 kernel_
->persisted_info
.download_progress
[model_type
].CopyFrom(new_progress
);
925 kernel_
->info_status
= KERNEL_SHARE_INFO_DIRTY
;
928 int64
Directory::GetTransactionVersion(ModelType type
) const {
929 kernel_
->transaction_mutex
.AssertAcquired();
930 return kernel_
->persisted_info
.transaction_version
[type
];
933 void Directory::IncrementTransactionVersion(ModelType type
) {
934 kernel_
->transaction_mutex
.AssertAcquired();
935 kernel_
->persisted_info
.transaction_version
[type
]++;
938 void Directory::GetDataTypeContext(BaseTransaction
* trans
,
940 sync_pb::DataTypeContext
* context
) const {
941 ScopedKernelLock
lock(this);
942 context
->CopyFrom(kernel_
->persisted_info
.datatype_context
[type
]);
945 void Directory::SetDataTypeContext(
946 BaseWriteTransaction
* trans
,
948 const sync_pb::DataTypeContext
& context
) {
949 ScopedKernelLock
lock(this);
950 kernel_
->persisted_info
.datatype_context
[type
].CopyFrom(context
);
951 kernel_
->info_status
= KERNEL_SHARE_INFO_DIRTY
;
954 ModelTypeSet
Directory::InitialSyncEndedTypes() {
955 syncable::ReadTransaction
trans(FROM_HERE
, this);
956 ModelTypeSet protocol_types
= ProtocolTypes();
957 ModelTypeSet initial_sync_ended_types
;
958 for (ModelTypeSet::Iterator i
= protocol_types
.First(); i
.Good(); i
.Inc()) {
959 if (InitialSyncEndedForType(&trans
, i
.Get())) {
960 initial_sync_ended_types
.Put(i
.Get());
963 return initial_sync_ended_types
;
966 bool Directory::InitialSyncEndedForType(ModelType type
) {
967 syncable::ReadTransaction
trans(FROM_HERE
, this);
968 return InitialSyncEndedForType(&trans
, type
);
971 bool Directory::InitialSyncEndedForType(
972 BaseTransaction
* trans
, ModelType type
) {
973 // True iff the type's root node has been received and applied.
974 syncable::Entry
entry(trans
, syncable::GET_TYPE_ROOT
, type
);
975 return entry
.good() && entry
.GetBaseVersion() != CHANGES_VERSION
;
978 string
Directory::store_birthday() const {
979 ScopedKernelLock
lock(this);
980 return kernel_
->persisted_info
.store_birthday
;
983 void Directory::set_store_birthday(const string
& store_birthday
) {
984 ScopedKernelLock
lock(this);
985 if (kernel_
->persisted_info
.store_birthday
== store_birthday
)
987 kernel_
->persisted_info
.store_birthday
= store_birthday
;
988 kernel_
->info_status
= KERNEL_SHARE_INFO_DIRTY
;
991 string
Directory::bag_of_chips() const {
992 ScopedKernelLock
lock(this);
993 return kernel_
->persisted_info
.bag_of_chips
;
996 void Directory::set_bag_of_chips(const string
& bag_of_chips
) {
997 ScopedKernelLock
lock(this);
998 if (kernel_
->persisted_info
.bag_of_chips
== bag_of_chips
)
1000 kernel_
->persisted_info
.bag_of_chips
= bag_of_chips
;
1001 kernel_
->info_status
= KERNEL_SHARE_INFO_DIRTY
;
1005 string
Directory::cache_guid() const {
1006 // No need to lock since nothing ever writes to it after load.
1007 return kernel_
->cache_guid
;
1010 NigoriHandler
* Directory::GetNigoriHandler() {
1011 return nigori_handler_
;
1014 Cryptographer
* Directory::GetCryptographer(const BaseTransaction
* trans
) {
1015 DCHECK_EQ(this, trans
->directory());
1016 return cryptographer_
;
1019 void Directory::GetAllMetaHandles(BaseTransaction
* trans
,
1020 MetahandleSet
* result
) {
1022 ScopedKernelLock
lock(this);
1023 for (MetahandlesMap::iterator i
= kernel_
->metahandles_map
.begin();
1024 i
!= kernel_
->metahandles_map
.end(); ++i
) {
1025 result
->insert(i
->first
);
1029 void Directory::GetUnsyncedMetaHandles(BaseTransaction
* trans
,
1030 Metahandles
* result
) {
1032 ScopedKernelLock
lock(this);
1033 copy(kernel_
->unsynced_metahandles
.begin(),
1034 kernel_
->unsynced_metahandles
.end(), back_inserter(*result
));
1037 int64
Directory::unsynced_entity_count() const {
1038 ScopedKernelLock
lock(this);
1039 return kernel_
->unsynced_metahandles
.size();
1042 bool Directory::TypeHasUnappliedUpdates(ModelType type
) {
1043 ScopedKernelLock
lock(this);
1044 return !kernel_
->unapplied_update_metahandles
[type
].empty();
1047 void Directory::GetUnappliedUpdateMetaHandles(
1048 BaseTransaction
* trans
,
1049 FullModelTypeSet server_types
,
1050 std::vector
<int64
>* result
) {
1052 ScopedKernelLock
lock(this);
1053 for (int i
= UNSPECIFIED
; i
< MODEL_TYPE_COUNT
; ++i
) {
1054 const ModelType type
= ModelTypeFromInt(i
);
1055 if (server_types
.Has(type
)) {
1056 std::copy(kernel_
->unapplied_update_metahandles
[type
].begin(),
1057 kernel_
->unapplied_update_metahandles
[type
].end(),
1058 back_inserter(*result
));
1063 void Directory::GetMetaHandlesOfType(BaseTransaction
* trans
,
1065 std::vector
<int64
>* result
) {
1067 ScopedKernelLock
lock(this);
1068 for (MetahandlesMap::iterator it
= kernel_
->metahandles_map
.begin();
1069 it
!= kernel_
->metahandles_map
.end(); ++it
) {
1070 EntryKernel
* entry
= it
->second
;
1071 const ModelType entry_type
=
1072 GetModelTypeFromSpecifics(entry
->ref(SPECIFICS
));
1073 if (entry_type
== type
)
1074 result
->push_back(it
->first
);
1078 void Directory::CollectMetaHandleCounts(
1079 std::vector
<int>* num_entries_by_type
,
1080 std::vector
<int>* num_to_delete_entries_by_type
) {
1081 syncable::ReadTransaction
trans(FROM_HERE
, this);
1082 ScopedKernelLock
lock(this);
1084 for (MetahandlesMap::iterator it
= kernel_
->metahandles_map
.begin();
1085 it
!= kernel_
->metahandles_map
.end(); ++it
) {
1086 EntryKernel
* entry
= it
->second
;
1087 const ModelType type
= GetModelTypeFromSpecifics(entry
->ref(SPECIFICS
));
1088 (*num_entries_by_type
)[type
]++;
1089 if (entry
->ref(IS_DEL
))
1090 (*num_to_delete_entries_by_type
)[type
]++;
1094 scoped_ptr
<base::ListValue
> Directory::GetNodeDetailsForType(
1095 BaseTransaction
* trans
,
1097 scoped_ptr
<base::ListValue
> nodes(new base::ListValue());
1099 ScopedKernelLock
lock(this);
1100 for (MetahandlesMap::iterator it
= kernel_
->metahandles_map
.begin();
1101 it
!= kernel_
->metahandles_map
.end(); ++it
) {
1102 if (GetModelTypeFromSpecifics(it
->second
->ref(SPECIFICS
)) != type
) {
1106 EntryKernel
* kernel
= it
->second
;
1107 scoped_ptr
<base::DictionaryValue
> node(
1108 kernel
->ToValue(GetCryptographer(trans
)));
1110 // Add the position index if appropriate. This must be done here (and not
1111 // in EntryKernel) because the EntryKernel does not have access to its
1113 if (kernel
->ShouldMaintainPosition() && !kernel
->ref(IS_DEL
)) {
1114 node
->SetInteger("positionIndex", GetPositionIndex(trans
, kernel
));
1117 nodes
->Append(node
.release());
1120 return nodes
.Pass();
1123 bool Directory::CheckInvariantsOnTransactionClose(
1124 syncable::BaseTransaction
* trans
,
1125 const MetahandleSet
& modified_handles
) {
1126 // NOTE: The trans may be in the process of being destructed. Be careful if
1127 // you wish to call any of its virtual methods.
1128 switch (invariant_check_level_
) {
1129 case FULL_DB_VERIFICATION
: {
1130 MetahandleSet all_handles
;
1131 GetAllMetaHandles(trans
, &all_handles
);
1132 return CheckTreeInvariants(trans
, all_handles
);
1134 case VERIFY_CHANGES
: {
1135 return CheckTreeInvariants(trans
, modified_handles
);
1145 bool Directory::FullyCheckTreeInvariants(syncable::BaseTransaction
* trans
) {
1146 MetahandleSet handles
;
1147 GetAllMetaHandles(trans
, &handles
);
1148 return CheckTreeInvariants(trans
, handles
);
1151 bool Directory::CheckTreeInvariants(syncable::BaseTransaction
* trans
,
1152 const MetahandleSet
& handles
) {
1153 MetahandleSet::const_iterator i
;
1154 for (i
= handles
.begin() ; i
!= handles
.end() ; ++i
) {
1155 int64 metahandle
= *i
;
1156 Entry
e(trans
, GET_BY_HANDLE
, metahandle
);
1157 if (!SyncAssert(e
.good(), FROM_HERE
, "Entry is bad", trans
))
1159 syncable::Id id
= e
.GetId();
1160 syncable::Id parentid
= e
.GetParentId();
1163 if (!SyncAssert(e
.GetIsDir(), FROM_HERE
,
1164 "Entry should be a directory",
1167 if (!SyncAssert(parentid
.IsRoot(), FROM_HERE
,
1168 "Entry should be root",
1171 if (!SyncAssert(!e
.GetIsUnsynced(), FROM_HERE
,
1172 "Entry should be sycned",
1178 if (!e
.GetIsDel()) {
1179 if (!SyncAssert(id
!= parentid
, FROM_HERE
,
1180 "Id should be different from parent id.",
1183 if (!SyncAssert(!e
.GetNonUniqueName().empty(), FROM_HERE
,
1184 "Non unique name should not be empty.",
1187 int safety_count
= handles
.size() + 1;
1188 while (!parentid
.IsRoot()) {
1189 Entry
parent(trans
, GET_BY_ID
, parentid
);
1190 if (!SyncAssert(parent
.good(), FROM_HERE
,
1191 "Parent entry is not valid.",
1194 if (handles
.end() == handles
.find(parent
.GetMetahandle()))
1195 break; // Skip further checking if parent was unmodified.
1196 if (!SyncAssert(parent
.GetIsDir(), FROM_HERE
,
1197 "Parent should be a directory",
1200 if (!SyncAssert(!parent
.GetIsDel(), FROM_HERE
,
1201 "Parent should not have been marked for deletion.",
1204 if (!SyncAssert(handles
.end() != handles
.find(parent
.GetMetahandle()),
1206 "Parent should be in the index.",
1209 parentid
= parent
.GetParentId();
1210 if (!SyncAssert(--safety_count
> 0, FROM_HERE
,
1211 "Count should be greater than zero.",
1216 int64 base_version
= e
.GetBaseVersion();
1217 int64 server_version
= e
.GetServerVersion();
1218 bool using_unique_client_tag
= !e
.GetUniqueClientTag().empty();
1219 if (CHANGES_VERSION
== base_version
|| 0 == base_version
) {
1220 if (e
.GetIsUnappliedUpdate()) {
1221 // Must be a new item, or a de-duplicated unique client tag
1222 // that was created both locally and remotely.
1223 if (!using_unique_client_tag
) {
1224 if (!SyncAssert(e
.GetIsDel(), FROM_HERE
,
1225 "The entry should not have been deleted.",
1229 // It came from the server, so it must have a server ID.
1230 if (!SyncAssert(id
.ServerKnows(), FROM_HERE
,
1231 "The id should be from a server.",
1236 // TODO(chron): Implement this mode if clients ever need it.
1237 // For now, you can't combine a client tag and a directory.
1238 if (!SyncAssert(!using_unique_client_tag
, FROM_HERE
,
1239 "Directory cannot have a client tag.",
1243 // Should be an uncomitted item, or a successfully deleted one.
1244 if (!e
.GetIsDel()) {
1245 if (!SyncAssert(e
.GetIsUnsynced(), FROM_HERE
,
1246 "The item should be unsynced.",
1250 // If the next check failed, it would imply that an item exists
1251 // on the server, isn't waiting for application locally, but either
1252 // is an unsynced create or a sucessful delete in the local copy.
1253 // Either way, that's a mismatch.
1254 if (!SyncAssert(0 == server_version
, FROM_HERE
,
1255 "Server version should be zero.",
1258 // Items that aren't using the unique client tag should have a zero
1259 // base version only if they have a local ID. Items with unique client
1260 // tags are allowed to use the zero base version for undeletion and
1261 // de-duplication; the unique client tag trumps the server ID.
1262 if (!using_unique_client_tag
) {
1263 if (!SyncAssert(!id
.ServerKnows(), FROM_HERE
,
1264 "Should be a client only id.",
1270 if (!SyncAssert(id
.ServerKnows(),
1272 "Should be a server id.",
1276 // Server-unknown items that are locally deleted should not be sent up to
1277 // the server. They must be !IS_UNSYNCED.
1278 if (!SyncAssert(!(!id
.ServerKnows() && e
.GetIsDel() && e
.GetIsUnsynced()),
1280 "Locally deleted item must not be unsynced.",
1288 void Directory::SetInvariantCheckLevel(InvariantCheckLevel check_level
) {
1289 invariant_check_level_
= check_level
;
1292 int64
Directory::NextMetahandle() {
1293 ScopedKernelLock
lock(this);
1294 int64 metahandle
= (kernel_
->next_metahandle
)++;
1298 // Always returns a client ID that is the string representation of a negative
1300 Id
Directory::NextId() {
1303 ScopedKernelLock
lock(this);
1304 result
= (kernel_
->persisted_info
.next_id
)--;
1305 kernel_
->info_status
= KERNEL_SHARE_INFO_DIRTY
;
1307 DCHECK_LT(result
, 0);
1308 return Id::CreateFromClientString(base::Int64ToString(result
));
1311 bool Directory::HasChildren(BaseTransaction
* trans
, const Id
& id
) {
1312 ScopedKernelLock
lock(this);
1313 return kernel_
->parent_child_index
.GetChildren(id
) != NULL
;
1316 Id
Directory::GetFirstChildId(BaseTransaction
* trans
,
1317 const EntryKernel
* parent
) {
1319 DCHECK(parent
->ref(IS_DIR
));
1321 ScopedKernelLock
lock(this);
1322 const OrderedChildSet
* children
=
1323 kernel_
->parent_child_index
.GetChildren(parent
->ref(ID
));
1325 // We're expected to return root if there are no children.
1329 return (*children
->begin())->ref(ID
);
1332 syncable::Id
Directory::GetPredecessorId(EntryKernel
* e
) {
1333 ScopedKernelLock
lock(this);
1335 DCHECK(ParentChildIndex::ShouldInclude(e
));
1336 const OrderedChildSet
* children
=
1337 kernel_
->parent_child_index
.GetChildren(e
->ref(PARENT_ID
));
1338 DCHECK(children
&& !children
->empty());
1339 OrderedChildSet::const_iterator i
= children
->find(e
);
1340 DCHECK(i
!= children
->end());
1342 if (i
== children
->begin()) {
1346 return (*i
)->ref(ID
);
1350 syncable::Id
Directory::GetSuccessorId(EntryKernel
* e
) {
1351 ScopedKernelLock
lock(this);
1353 DCHECK(ParentChildIndex::ShouldInclude(e
));
1354 const OrderedChildSet
* children
=
1355 kernel_
->parent_child_index
.GetChildren(e
->ref(PARENT_ID
));
1356 DCHECK(children
&& !children
->empty());
1357 OrderedChildSet::const_iterator i
= children
->find(e
);
1358 DCHECK(i
!= children
->end());
1361 if (i
== children
->end()) {
1364 return (*i
)->ref(ID
);
1368 // TODO(rlarocque): Remove all support for placing ShouldMaintainPosition()
1369 // items as siblings of items that do not maintain postions. It is required
1370 // only for tests. See crbug.com/178282.
1371 void Directory::PutPredecessor(EntryKernel
* e
, EntryKernel
* predecessor
) {
1372 DCHECK(!e
->ref(IS_DEL
));
1373 if (!e
->ShouldMaintainPosition()) {
1374 DCHECK(!e
->ref(UNIQUE_POSITION
).IsValid());
1377 std::string suffix
= e
->ref(UNIQUE_BOOKMARK_TAG
);
1378 DCHECK(!suffix
.empty());
1380 // Remove our item from the ParentChildIndex and remember to re-add it later.
1381 ScopedKernelLock
lock(this);
1382 ScopedParentChildIndexUpdater
updater(lock
, e
, &kernel_
->parent_child_index
);
1384 // Note: The ScopedParentChildIndexUpdater will update this set for us as we
1385 // leave this function.
1386 const OrderedChildSet
* siblings
=
1387 kernel_
->parent_child_index
.GetChildren(e
->ref(PARENT_ID
));
1390 // This parent currently has no other children.
1391 DCHECK(predecessor
->ref(ID
).IsRoot());
1392 UniquePosition pos
= UniquePosition::InitialPosition(suffix
);
1393 e
->put(UNIQUE_POSITION
, pos
);
1397 if (predecessor
->ref(ID
).IsRoot()) {
1398 // We have at least one sibling, and we're inserting to the left of them.
1399 UniquePosition successor_pos
= (*siblings
->begin())->ref(UNIQUE_POSITION
);
1402 if (!successor_pos
.IsValid()) {
1403 // If all our successors are of non-positionable types, just create an
1404 // initial position. We arbitrarily choose to sort invalid positions to
1405 // the right of the valid positions.
1407 // We really shouldn't need to support this. See TODO above.
1408 pos
= UniquePosition::InitialPosition(suffix
);
1410 DCHECK(!siblings
->empty());
1411 pos
= UniquePosition::Before(successor_pos
, suffix
);
1414 e
->put(UNIQUE_POSITION
, pos
);
1418 // We can't support placing an item after an invalid position. Fortunately,
1419 // the tests don't exercise this particular case. We should not support
1420 // siblings with invalid positions at all. See TODO above.
1421 DCHECK(predecessor
->ref(UNIQUE_POSITION
).IsValid());
1423 OrderedChildSet::const_iterator neighbour
= siblings
->find(predecessor
);
1424 DCHECK(neighbour
!= siblings
->end());
1427 if (neighbour
== siblings
->end()) {
1428 // Inserting at the end of the list.
1429 UniquePosition pos
= UniquePosition::After(
1430 predecessor
->ref(UNIQUE_POSITION
),
1432 e
->put(UNIQUE_POSITION
, pos
);
1436 EntryKernel
* successor
= *neighbour
;
1438 // Another mixed valid and invalid position case. This one could be supported
1439 // in theory, but we're trying to deprecate support for siblings with and
1440 // without valid positions. See TODO above.
1441 DCHECK(successor
->ref(UNIQUE_POSITION
).IsValid());
1443 // Finally, the normal case: inserting between two elements.
1444 UniquePosition pos
= UniquePosition::Between(
1445 predecessor
->ref(UNIQUE_POSITION
),
1446 successor
->ref(UNIQUE_POSITION
),
1448 e
->put(UNIQUE_POSITION
, pos
);
1452 // TODO(rlarocque): Avoid this indirection. Just return the set.
1453 void Directory::AppendChildHandles(const ScopedKernelLock
& lock
,
1454 const Id
& parent_id
,
1455 Directory::Metahandles
* result
) {
1456 const OrderedChildSet
* children
=
1457 kernel_
->parent_child_index
.GetChildren(parent_id
);
1461 for (OrderedChildSet::const_iterator i
= children
->begin();
1462 i
!= children
->end(); ++i
) {
1463 DCHECK_EQ(parent_id
, (*i
)->ref(PARENT_ID
));
1464 result
->push_back((*i
)->ref(META_HANDLE
));
1468 void Directory::UnmarkDirtyEntry(WriteTransaction
* trans
, Entry
* entry
) {
1470 entry
->kernel_
->clear_dirty(&kernel_
->dirty_metahandles
);
1473 } // namespace syncable
1474 } // namespace syncer