1 // Copyright 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "sync/syncable/directory.h"
10 #include "base/base64.h"
11 #include "base/stl_util.h"
12 #include "base/strings/string_number_conversions.h"
13 #include "base/trace_event/trace_event.h"
14 #include "sync/internal_api/public/base/attachment_id_proto.h"
15 #include "sync/internal_api/public/base/unique_position.h"
16 #include "sync/internal_api/public/util/unrecoverable_error_handler.h"
17 #include "sync/syncable/entry.h"
18 #include "sync/syncable/entry_kernel.h"
19 #include "sync/syncable/in_memory_directory_backing_store.h"
20 #include "sync/syncable/on_disk_directory_backing_store.h"
21 #include "sync/syncable/scoped_kernel_lock.h"
22 #include "sync/syncable/scoped_parent_child_index_updater.h"
23 #include "sync/syncable/syncable-inl.h"
24 #include "sync/syncable/syncable_base_transaction.h"
25 #include "sync/syncable/syncable_changes_version.h"
26 #include "sync/syncable/syncable_read_transaction.h"
27 #include "sync/syncable/syncable_util.h"
28 #include "sync/syncable/syncable_write_transaction.h"
36 const base::FilePath::CharType
Directory::kSyncDatabaseFilename
[] =
37 FILE_PATH_LITERAL("SyncData.sqlite3");
39 Directory::PersistedKernelInfo::PersistedKernelInfo()
41 ModelTypeSet protocol_types
= ProtocolTypes();
42 for (ModelTypeSet::Iterator iter
= protocol_types
.First(); iter
.Good();
44 ResetDownloadProgress(iter
.Get());
45 transaction_version
[iter
.Get()] = 0;
49 Directory::PersistedKernelInfo::~PersistedKernelInfo() {}
51 void Directory::PersistedKernelInfo::ResetDownloadProgress(
52 ModelType model_type
) {
53 // Clear everything except the data type id field.
54 download_progress
[model_type
].Clear();
55 download_progress
[model_type
].set_data_type_id(
56 GetSpecificsFieldNumberFromModelType(model_type
));
58 // Explicitly set an empty token field to denote no progress.
59 download_progress
[model_type
].set_token("");
62 bool Directory::PersistedKernelInfo::HasEmptyDownloadProgress(
63 ModelType model_type
) {
64 const sync_pb::DataTypeProgressMarker
& progress_marker
=
65 download_progress
[model_type
];
66 return progress_marker
.token().empty();
69 Directory::SaveChangesSnapshot::SaveChangesSnapshot()
70 : kernel_info_status(KERNEL_SHARE_INFO_INVALID
) {
73 Directory::SaveChangesSnapshot::~SaveChangesSnapshot() {
74 STLDeleteElements(&dirty_metas
);
75 STLDeleteElements(&delete_journals
);
78 Directory::Kernel::Kernel(
79 const std::string
& name
,
80 const KernelLoadInfo
& info
,
81 DirectoryChangeDelegate
* delegate
,
82 const WeakHandle
<TransactionObserver
>& transaction_observer
)
83 : next_write_transaction_id(0),
85 info_status(Directory::KERNEL_SHARE_INFO_VALID
),
86 persisted_info(info
.kernel_info
),
87 cache_guid(info
.cache_guid
),
88 next_metahandle(info
.max_metahandle
+ 1),
90 transaction_observer(transaction_observer
) {
92 DCHECK(transaction_observer
.IsInitialized());
95 Directory::Kernel::~Kernel() {
96 STLDeleteContainerPairSecondPointers(metahandles_map
.begin(),
97 metahandles_map
.end());
100 Directory::Directory(
101 DirectoryBackingStore
* store
,
102 UnrecoverableErrorHandler
* unrecoverable_error_handler
,
103 ReportUnrecoverableErrorFunction report_unrecoverable_error_function
,
104 NigoriHandler
* nigori_handler
,
105 Cryptographer
* cryptographer
)
108 unrecoverable_error_handler_(unrecoverable_error_handler
),
109 report_unrecoverable_error_function_(
110 report_unrecoverable_error_function
),
111 unrecoverable_error_set_(false),
112 nigori_handler_(nigori_handler
),
113 cryptographer_(cryptographer
),
114 invariant_check_level_(VERIFY_CHANGES
) {
117 Directory::~Directory() {
121 DirOpenResult
Directory::Open(
123 DirectoryChangeDelegate
* delegate
,
124 const WeakHandle
<TransactionObserver
>& transaction_observer
) {
125 TRACE_EVENT0("sync", "SyncDatabaseOpen");
127 const DirOpenResult result
=
128 OpenImpl(name
, delegate
, transaction_observer
);
130 if (OPENED
!= result
)
135 void Directory::InitializeIndices(MetahandlesMap
* handles_map
) {
136 ScopedKernelLock
lock(this);
137 kernel_
->metahandles_map
.swap(*handles_map
);
138 for (MetahandlesMap::const_iterator it
= kernel_
->metahandles_map
.begin();
139 it
!= kernel_
->metahandles_map
.end(); ++it
) {
140 EntryKernel
* entry
= it
->second
;
141 if (ParentChildIndex::ShouldInclude(entry
))
142 kernel_
->parent_child_index
.Insert(entry
);
143 const int64 metahandle
= entry
->ref(META_HANDLE
);
144 if (entry
->ref(IS_UNSYNCED
))
145 kernel_
->unsynced_metahandles
.insert(metahandle
);
146 if (entry
->ref(IS_UNAPPLIED_UPDATE
)) {
147 const ModelType type
= entry
->GetServerModelType();
148 kernel_
->unapplied_update_metahandles
[type
].insert(metahandle
);
150 if (!entry
->ref(UNIQUE_SERVER_TAG
).empty()) {
151 DCHECK(kernel_
->server_tags_map
.find(entry
->ref(UNIQUE_SERVER_TAG
)) ==
152 kernel_
->server_tags_map
.end())
153 << "Unexpected duplicate use of client tag";
154 kernel_
->server_tags_map
[entry
->ref(UNIQUE_SERVER_TAG
)] = entry
;
156 if (!entry
->ref(UNIQUE_CLIENT_TAG
).empty()) {
157 DCHECK(kernel_
->server_tags_map
.find(entry
->ref(UNIQUE_SERVER_TAG
)) ==
158 kernel_
->server_tags_map
.end())
159 << "Unexpected duplicate use of server tag";
160 kernel_
->client_tags_map
[entry
->ref(UNIQUE_CLIENT_TAG
)] = entry
;
162 DCHECK(kernel_
->ids_map
.find(entry
->ref(ID
).value()) ==
163 kernel_
->ids_map
.end()) << "Unexpected duplicate use of ID";
164 kernel_
->ids_map
[entry
->ref(ID
).value()] = entry
;
165 DCHECK(!entry
->is_dirty());
166 AddToAttachmentIndex(lock
, metahandle
, entry
->ref(ATTACHMENT_METADATA
));
170 DirOpenResult
Directory::OpenImpl(
172 DirectoryChangeDelegate
* delegate
,
173 const WeakHandle
<TransactionObserver
>&
174 transaction_observer
) {
176 // Temporary indices before kernel_ initialized in case Load fails. We 0(1)
178 Directory::MetahandlesMap tmp_handles_map
;
180 // Avoids mem leaks on failure. Harmlessly deletes the empty hash map after
181 // the swap in the success case.
182 STLValueDeleter
<MetahandlesMap
> deleter(&tmp_handles_map
);
184 JournalIndex delete_journals
;
185 MetahandleSet metahandles_to_purge
;
187 DirOpenResult result
= store_
->Load(&tmp_handles_map
, &delete_journals
,
188 &metahandles_to_purge
, &info
);
189 if (OPENED
!= result
)
192 kernel_
= new Kernel(name
, info
, delegate
, transaction_observer
);
193 delete_journal_
.reset(new DeleteJournal(&delete_journals
));
194 InitializeIndices(&tmp_handles_map
);
196 // Write back the share info to reserve some space in 'next_id'. This will
197 // prevent local ID reuse in the case of an early crash. See the comments in
198 // TakeSnapshotForSaveChanges() or crbug.com/142987 for more information.
199 kernel_
->info_status
= KERNEL_SHARE_INFO_DIRTY
;
201 kernel_
->metahandles_to_purge
.swap(metahandles_to_purge
);
203 return FAILED_INITIAL_WRITE
;
208 DeleteJournal
* Directory::delete_journal() {
209 DCHECK(delete_journal_
.get());
210 return delete_journal_
.get();
213 void Directory::Close() {
221 void Directory::OnUnrecoverableError(const BaseTransaction
* trans
,
222 const tracked_objects::Location
& location
,
223 const std::string
& message
) {
224 DCHECK(trans
!= NULL
);
225 unrecoverable_error_set_
= true;
226 unrecoverable_error_handler_
->OnUnrecoverableError(location
,
230 EntryKernel
* Directory::GetEntryById(const Id
& id
) {
231 ScopedKernelLock
lock(this);
232 return GetEntryById(lock
, id
);
235 EntryKernel
* Directory::GetEntryById(const ScopedKernelLock
& lock
,
238 // Find it in the in memory ID index.
239 IdsMap::iterator id_found
= kernel_
->ids_map
.find(id
.value());
240 if (id_found
!= kernel_
->ids_map
.end()) {
241 return id_found
->second
;
246 EntryKernel
* Directory::GetEntryByClientTag(const string
& tag
) {
247 ScopedKernelLock
lock(this);
250 TagsMap::iterator it
= kernel_
->client_tags_map
.find(tag
);
251 if (it
!= kernel_
->client_tags_map
.end()) {
257 EntryKernel
* Directory::GetEntryByServerTag(const string
& tag
) {
258 ScopedKernelLock
lock(this);
260 TagsMap::iterator it
= kernel_
->server_tags_map
.find(tag
);
261 if (it
!= kernel_
->server_tags_map
.end()) {
267 EntryKernel
* Directory::GetEntryByHandle(int64 metahandle
) {
268 ScopedKernelLock
lock(this);
269 return GetEntryByHandle(lock
, metahandle
);
272 EntryKernel
* Directory::GetEntryByHandle(const ScopedKernelLock
& lock
,
275 MetahandlesMap::iterator found
=
276 kernel_
->metahandles_map
.find(metahandle
);
277 if (found
!= kernel_
->metahandles_map
.end()) {
278 // Found it in memory. Easy.
279 return found
->second
;
284 bool Directory::GetChildHandlesById(
285 BaseTransaction
* trans
, const Id
& parent_id
,
286 Directory::Metahandles
* result
) {
287 if (!SyncAssert(this == trans
->directory(), FROM_HERE
,
288 "Directories don't match", trans
))
292 ScopedKernelLock
lock(this);
293 AppendChildHandles(lock
, parent_id
, result
);
297 int Directory::GetTotalNodeCount(
298 BaseTransaction
* trans
,
299 EntryKernel
* kernel
) const {
300 if (!SyncAssert(this == trans
->directory(), FROM_HERE
,
301 "Directories don't match", trans
))
305 std::deque
<const OrderedChildSet
*> child_sets
;
307 GetChildSetForKernel(trans
, kernel
, &child_sets
);
308 while (!child_sets
.empty()) {
309 const OrderedChildSet
* set
= child_sets
.front();
310 child_sets
.pop_front();
311 for (OrderedChildSet::const_iterator it
= set
->begin();
312 it
!= set
->end(); ++it
) {
314 GetChildSetForKernel(trans
, *it
, &child_sets
);
321 void Directory::GetChildSetForKernel(
322 BaseTransaction
* trans
,
324 std::deque
<const OrderedChildSet
*>* child_sets
) const {
325 if (!kernel
->ref(IS_DIR
))
326 return; // Not a directory => no children.
328 const OrderedChildSet
* descendants
=
329 kernel_
->parent_child_index
.GetChildren(kernel
->ref(ID
));
331 return; // This directory has no children.
333 // Add our children to the list of items to be traversed.
334 child_sets
->push_back(descendants
);
337 int Directory::GetPositionIndex(
338 BaseTransaction
* trans
,
339 EntryKernel
* kernel
) const {
340 const OrderedChildSet
* siblings
=
341 kernel_
->parent_child_index
.GetSiblings(kernel
);
343 OrderedChildSet::const_iterator it
= siblings
->find(kernel
);
344 return std::distance(siblings
->begin(), it
);
347 bool Directory::InsertEntry(BaseWriteTransaction
* trans
, EntryKernel
* entry
) {
348 ScopedKernelLock
lock(this);
349 return InsertEntry(lock
, trans
, entry
);
352 bool Directory::InsertEntry(const ScopedKernelLock
& lock
,
353 BaseWriteTransaction
* trans
,
354 EntryKernel
* entry
) {
355 if (!SyncAssert(NULL
!= entry
, FROM_HERE
, "Entry is null", trans
))
358 static const char error
[] = "Entry already in memory index.";
361 kernel_
->metahandles_map
.insert(
362 std::make_pair(entry
->ref(META_HANDLE
), entry
)).second
,
369 kernel_
->ids_map
.insert(
370 std::make_pair(entry
->ref(ID
).value(), entry
)).second
,
376 if (ParentChildIndex::ShouldInclude(entry
)) {
377 if (!SyncAssert(kernel_
->parent_child_index
.Insert(entry
),
384 AddToAttachmentIndex(
385 lock
, entry
->ref(META_HANDLE
), entry
->ref(ATTACHMENT_METADATA
));
387 // Should NEVER be created with a client tag or server tag.
388 if (!SyncAssert(entry
->ref(UNIQUE_SERVER_TAG
).empty(), FROM_HERE
,
389 "Server tag should be empty", trans
)) {
392 if (!SyncAssert(entry
->ref(UNIQUE_CLIENT_TAG
).empty(), FROM_HERE
,
393 "Client tag should be empty", trans
))
399 bool Directory::ReindexId(BaseWriteTransaction
* trans
,
400 EntryKernel
* const entry
,
402 ScopedKernelLock
lock(this);
403 if (NULL
!= GetEntryById(lock
, new_id
))
407 // Update the indices that depend on the ID field.
408 ScopedParentChildIndexUpdater
updater_b(lock
, entry
,
409 &kernel_
->parent_child_index
);
410 size_t num_erased
= kernel_
->ids_map
.erase(entry
->ref(ID
).value());
411 DCHECK_EQ(1U, num_erased
);
412 entry
->put(ID
, new_id
);
413 kernel_
->ids_map
[entry
->ref(ID
).value()] = entry
;
418 bool Directory::ReindexParentId(BaseWriteTransaction
* trans
,
419 EntryKernel
* const entry
,
420 const Id
& new_parent_id
) {
421 ScopedKernelLock
lock(this);
424 // Update the indices that depend on the PARENT_ID field.
425 ScopedParentChildIndexUpdater
index_updater(lock
, entry
,
426 &kernel_
->parent_child_index
);
427 entry
->put(PARENT_ID
, new_parent_id
);
432 void Directory::RemoveFromAttachmentIndex(
433 const ScopedKernelLock
& lock
,
434 const int64 metahandle
,
435 const sync_pb::AttachmentMetadata
& attachment_metadata
) {
436 for (int i
= 0; i
< attachment_metadata
.record_size(); ++i
) {
437 AttachmentIdUniqueId unique_id
=
438 attachment_metadata
.record(i
).id().unique_id();
439 IndexByAttachmentId::iterator iter
=
440 kernel_
->index_by_attachment_id
.find(unique_id
);
441 if (iter
!= kernel_
->index_by_attachment_id
.end()) {
442 iter
->second
.erase(metahandle
);
443 if (iter
->second
.empty()) {
444 kernel_
->index_by_attachment_id
.erase(iter
);
450 void Directory::AddToAttachmentIndex(
451 const ScopedKernelLock
& lock
,
452 const int64 metahandle
,
453 const sync_pb::AttachmentMetadata
& attachment_metadata
) {
454 for (int i
= 0; i
< attachment_metadata
.record_size(); ++i
) {
455 AttachmentIdUniqueId unique_id
=
456 attachment_metadata
.record(i
).id().unique_id();
457 IndexByAttachmentId::iterator iter
=
458 kernel_
->index_by_attachment_id
.find(unique_id
);
459 if (iter
== kernel_
->index_by_attachment_id
.end()) {
460 iter
= kernel_
->index_by_attachment_id
.insert(std::make_pair(
462 MetahandleSet())).first
;
464 iter
->second
.insert(metahandle
);
468 void Directory::UpdateAttachmentIndex(
469 const int64 metahandle
,
470 const sync_pb::AttachmentMetadata
& old_metadata
,
471 const sync_pb::AttachmentMetadata
& new_metadata
) {
472 ScopedKernelLock
lock(this);
473 RemoveFromAttachmentIndex(lock
, metahandle
, old_metadata
);
474 AddToAttachmentIndex(lock
, metahandle
, new_metadata
);
477 void Directory::GetMetahandlesByAttachmentId(
478 BaseTransaction
* trans
,
479 const sync_pb::AttachmentIdProto
& attachment_id_proto
,
480 Metahandles
* result
) {
483 ScopedKernelLock
lock(this);
484 IndexByAttachmentId::const_iterator index_iter
=
485 kernel_
->index_by_attachment_id
.find(attachment_id_proto
.unique_id());
486 if (index_iter
== kernel_
->index_by_attachment_id
.end())
488 const MetahandleSet
& metahandle_set
= index_iter
->second
;
490 metahandle_set
.begin(), metahandle_set
.end(), back_inserter(*result
));
493 bool Directory::unrecoverable_error_set(const BaseTransaction
* trans
) const {
494 DCHECK(trans
!= NULL
);
495 return unrecoverable_error_set_
;
498 void Directory::ClearDirtyMetahandles(const ScopedKernelLock
& lock
) {
499 kernel_
->transaction_mutex
.AssertAcquired();
500 kernel_
->dirty_metahandles
.clear();
503 bool Directory::SafeToPurgeFromMemory(WriteTransaction
* trans
,
504 const EntryKernel
* const entry
) const {
505 bool safe
= entry
->ref(IS_DEL
) && !entry
->is_dirty() &&
506 !entry
->ref(SYNCING
) && !entry
->ref(IS_UNAPPLIED_UPDATE
) &&
507 !entry
->ref(IS_UNSYNCED
);
510 int64 handle
= entry
->ref(META_HANDLE
);
511 const ModelType type
= entry
->GetServerModelType();
512 if (!SyncAssert(kernel_
->dirty_metahandles
.count(handle
) == 0U,
514 "Dirty metahandles should be empty", trans
))
516 // TODO(tim): Bug 49278.
517 if (!SyncAssert(!kernel_
->unsynced_metahandles
.count(handle
),
519 "Unsynced handles should be empty",
522 if (!SyncAssert(!kernel_
->unapplied_update_metahandles
[type
].count(handle
),
524 "Unapplied metahandles should be empty",
532 void Directory::TakeSnapshotForSaveChanges(SaveChangesSnapshot
* snapshot
) {
533 ReadTransaction
trans(FROM_HERE
, this);
534 ScopedKernelLock
lock(this);
536 // If there is an unrecoverable error then just bail out.
537 if (unrecoverable_error_set(&trans
))
540 // Deep copy dirty entries from kernel_->metahandles_index into snapshot and
541 // clear dirty flags.
542 for (MetahandleSet::const_iterator i
= kernel_
->dirty_metahandles
.begin();
543 i
!= kernel_
->dirty_metahandles
.end(); ++i
) {
544 EntryKernel
* entry
= GetEntryByHandle(lock
, *i
);
547 // Skip over false positives; it happens relatively infrequently.
548 if (!entry
->is_dirty())
550 snapshot
->dirty_metas
.insert(snapshot
->dirty_metas
.end(),
551 new EntryKernel(*entry
));
552 DCHECK_EQ(1U, kernel_
->dirty_metahandles
.count(*i
));
553 // We don't bother removing from the index here as we blow the entire thing
554 // in a moment, and it unnecessarily complicates iteration.
555 entry
->clear_dirty(NULL
);
557 ClearDirtyMetahandles(lock
);
559 // Set purged handles.
560 DCHECK(snapshot
->metahandles_to_purge
.empty());
561 snapshot
->metahandles_to_purge
.swap(kernel_
->metahandles_to_purge
);
563 // Fill kernel_info_status and kernel_info.
564 snapshot
->kernel_info
= kernel_
->persisted_info
;
565 // To avoid duplicates when the process crashes, we record the next_id to be
566 // greater magnitude than could possibly be reached before the next save
567 // changes. In other words, it's effectively impossible for the user to
568 // generate 65536 new bookmarks in 3 seconds.
569 snapshot
->kernel_info
.next_id
-= 65536;
570 snapshot
->kernel_info_status
= kernel_
->info_status
;
571 // This one we reset on failure.
572 kernel_
->info_status
= KERNEL_SHARE_INFO_VALID
;
574 delete_journal_
->TakeSnapshotAndClear(
575 &trans
, &snapshot
->delete_journals
, &snapshot
->delete_journals_to_purge
);
578 bool Directory::SaveChanges() {
579 bool success
= false;
581 base::AutoLock
scoped_lock(kernel_
->save_changes_mutex
);
583 // Snapshot and save.
584 SaveChangesSnapshot snapshot
;
585 TakeSnapshotForSaveChanges(&snapshot
);
586 success
= store_
->SaveChanges(snapshot
);
588 // Handle success or failure.
590 success
= VacuumAfterSaveChanges(snapshot
);
592 HandleSaveChangesFailure(snapshot
);
596 bool Directory::VacuumAfterSaveChanges(const SaveChangesSnapshot
& snapshot
) {
597 if (snapshot
.dirty_metas
.empty())
600 // Need a write transaction as we are about to permanently purge entries.
601 WriteTransaction
trans(FROM_HERE
, VACUUM_AFTER_SAVE
, this);
602 ScopedKernelLock
lock(this);
603 // Now drop everything we can out of memory.
604 for (EntryKernelSet::const_iterator i
= snapshot
.dirty_metas
.begin();
605 i
!= snapshot
.dirty_metas
.end(); ++i
) {
606 MetahandlesMap::iterator found
=
607 kernel_
->metahandles_map
.find((*i
)->ref(META_HANDLE
));
608 EntryKernel
* entry
= (found
== kernel_
->metahandles_map
.end() ?
609 NULL
: found
->second
);
610 if (entry
&& SafeToPurgeFromMemory(&trans
, entry
)) {
611 // We now drop deleted metahandles that are up to date on both the client
613 size_t num_erased
= 0;
614 num_erased
= kernel_
->metahandles_map
.erase(entry
->ref(META_HANDLE
));
615 DCHECK_EQ(1u, num_erased
);
616 num_erased
= kernel_
->ids_map
.erase(entry
->ref(ID
).value());
617 DCHECK_EQ(1u, num_erased
);
618 if (!entry
->ref(UNIQUE_SERVER_TAG
).empty()) {
620 kernel_
->server_tags_map
.erase(entry
->ref(UNIQUE_SERVER_TAG
));
621 DCHECK_EQ(1u, num_erased
);
623 if (!entry
->ref(UNIQUE_CLIENT_TAG
).empty()) {
625 kernel_
->client_tags_map
.erase(entry
->ref(UNIQUE_CLIENT_TAG
));
626 DCHECK_EQ(1u, num_erased
);
628 if (!SyncAssert(!kernel_
->parent_child_index
.Contains(entry
),
630 "Deleted entry still present",
633 RemoveFromAttachmentIndex(
634 lock
, entry
->ref(META_HANDLE
), entry
->ref(ATTACHMENT_METADATA
));
638 if (trans
.unrecoverable_error_set())
644 void Directory::UnapplyEntry(EntryKernel
* entry
) {
645 int64 handle
= entry
->ref(META_HANDLE
);
646 ModelType server_type
= GetModelTypeFromSpecifics(
647 entry
->ref(SERVER_SPECIFICS
));
649 // Clear enough so that on the next sync cycle all local data will
651 // Note: do not modify the root node in order to preserve the
652 // initial sync ended bit for this type (else on the next restart
653 // this type will be treated as disabled and therefore fully purged).
654 if (IsRealDataType(server_type
) &&
655 ModelTypeToRootTag(server_type
) == entry
->ref(UNIQUE_SERVER_TAG
)) {
659 // Set the unapplied bit if this item has server data.
660 if (IsRealDataType(server_type
) && !entry
->ref(IS_UNAPPLIED_UPDATE
)) {
661 entry
->put(IS_UNAPPLIED_UPDATE
, true);
662 kernel_
->unapplied_update_metahandles
[server_type
].insert(handle
);
663 entry
->mark_dirty(&kernel_
->dirty_metahandles
);
666 // Unset the unsynced bit.
667 if (entry
->ref(IS_UNSYNCED
)) {
668 kernel_
->unsynced_metahandles
.erase(handle
);
669 entry
->put(IS_UNSYNCED
, false);
670 entry
->mark_dirty(&kernel_
->dirty_metahandles
);
673 // Mark the item as locally deleted. No deleted items are allowed in the
674 // parent child index.
675 if (!entry
->ref(IS_DEL
)) {
676 kernel_
->parent_child_index
.Remove(entry
);
677 entry
->put(IS_DEL
, true);
678 entry
->mark_dirty(&kernel_
->dirty_metahandles
);
681 // Set the version to the "newly created" version.
682 if (entry
->ref(BASE_VERSION
) != CHANGES_VERSION
) {
683 entry
->put(BASE_VERSION
, CHANGES_VERSION
);
684 entry
->mark_dirty(&kernel_
->dirty_metahandles
);
687 // At this point locally created items that aren't synced will become locally
688 // deleted items, and purged on the next snapshot. All other items will match
689 // the state they would have had if they were just created via a server
690 // update. See MutableEntry::MutableEntry(.., CreateNewUpdateItem, ..).
693 void Directory::DeleteEntry(const ScopedKernelLock
& lock
,
694 bool save_to_journal
,
696 EntryKernelSet
* entries_to_journal
) {
697 int64 handle
= entry
->ref(META_HANDLE
);
698 ModelType server_type
= GetModelTypeFromSpecifics(
699 entry
->ref(SERVER_SPECIFICS
));
701 kernel_
->metahandles_to_purge
.insert(handle
);
703 size_t num_erased
= 0;
704 num_erased
= kernel_
->metahandles_map
.erase(entry
->ref(META_HANDLE
));
705 DCHECK_EQ(1u, num_erased
);
706 num_erased
= kernel_
->ids_map
.erase(entry
->ref(ID
).value());
707 DCHECK_EQ(1u, num_erased
);
708 num_erased
= kernel_
->unsynced_metahandles
.erase(handle
);
709 DCHECK_EQ(entry
->ref(IS_UNSYNCED
), num_erased
> 0);
711 kernel_
->unapplied_update_metahandles
[server_type
].erase(handle
);
712 DCHECK_EQ(entry
->ref(IS_UNAPPLIED_UPDATE
), num_erased
> 0);
713 if (kernel_
->parent_child_index
.Contains(entry
))
714 kernel_
->parent_child_index
.Remove(entry
);
716 if (!entry
->ref(UNIQUE_CLIENT_TAG
).empty()) {
718 kernel_
->client_tags_map
.erase(entry
->ref(UNIQUE_CLIENT_TAG
));
719 DCHECK_EQ(1u, num_erased
);
721 if (!entry
->ref(UNIQUE_SERVER_TAG
).empty()) {
723 kernel_
->server_tags_map
.erase(entry
->ref(UNIQUE_SERVER_TAG
));
724 DCHECK_EQ(1u, num_erased
);
726 RemoveFromAttachmentIndex(lock
, handle
, entry
->ref(ATTACHMENT_METADATA
));
728 if (save_to_journal
) {
729 entries_to_journal
->insert(entry
);
735 bool Directory::PurgeEntriesWithTypeIn(ModelTypeSet disabled_types
,
736 ModelTypeSet types_to_journal
,
737 ModelTypeSet types_to_unapply
) {
738 disabled_types
.RemoveAll(ProxyTypes());
740 if (disabled_types
.Empty())
744 WriteTransaction
trans(FROM_HERE
, PURGE_ENTRIES
, this);
746 EntryKernelSet entries_to_journal
;
747 STLElementDeleter
<EntryKernelSet
> journal_deleter(&entries_to_journal
);
750 ScopedKernelLock
lock(this);
752 bool found_progress
= false;
753 for (ModelTypeSet::Iterator iter
= disabled_types
.First(); iter
.Good();
755 if (!kernel_
->persisted_info
.HasEmptyDownloadProgress(iter
.Get()))
756 found_progress
= true;
759 // If none of the disabled types have progress markers, there's nothing to
764 // We iterate in two passes to avoid a bug in STLport (which is used in
765 // the Android build). There are some versions of that library where a
766 // hash_map's iterators can be invalidated when an item is erased from the
768 // See http://sourceforge.net/p/stlport/bugs/239/.
770 std::set
<EntryKernel
*> to_purge
;
771 for (MetahandlesMap::iterator it
= kernel_
->metahandles_map
.begin();
772 it
!= kernel_
->metahandles_map
.end(); ++it
) {
773 const sync_pb::EntitySpecifics
& local_specifics
=
774 it
->second
->ref(SPECIFICS
);
775 const sync_pb::EntitySpecifics
& server_specifics
=
776 it
->second
->ref(SERVER_SPECIFICS
);
777 ModelType local_type
= GetModelTypeFromSpecifics(local_specifics
);
778 ModelType server_type
= GetModelTypeFromSpecifics(server_specifics
);
780 if ((IsRealDataType(local_type
) && disabled_types
.Has(local_type
)) ||
781 (IsRealDataType(server_type
) && disabled_types
.Has(server_type
))) {
782 to_purge
.insert(it
->second
);
786 for (std::set
<EntryKernel
*>::iterator it
= to_purge
.begin();
787 it
!= to_purge
.end(); ++it
) {
788 EntryKernel
* entry
= *it
;
790 const sync_pb::EntitySpecifics
& local_specifics
=
791 (*it
)->ref(SPECIFICS
);
792 const sync_pb::EntitySpecifics
& server_specifics
=
793 (*it
)->ref(SERVER_SPECIFICS
);
794 ModelType local_type
= GetModelTypeFromSpecifics(local_specifics
);
795 ModelType server_type
= GetModelTypeFromSpecifics(server_specifics
);
797 if (types_to_unapply
.Has(local_type
) ||
798 types_to_unapply
.Has(server_type
)) {
801 bool save_to_journal
=
802 (types_to_journal
.Has(local_type
) ||
803 types_to_journal
.Has(server_type
)) &&
804 (delete_journal_
->IsDeleteJournalEnabled(local_type
) ||
805 delete_journal_
->IsDeleteJournalEnabled(server_type
));
806 DeleteEntry(lock
, save_to_journal
, entry
, &entries_to_journal
);
810 delete_journal_
->AddJournalBatch(&trans
, entries_to_journal
);
812 // Ensure meta tracking for these data types reflects the purged state.
813 for (ModelTypeSet::Iterator it
= disabled_types
.First();
814 it
.Good(); it
.Inc()) {
815 kernel_
->persisted_info
.transaction_version
[it
.Get()] = 0;
817 // Don't discard progress markers or context for unapplied types.
818 if (!types_to_unapply
.Has(it
.Get())) {
819 kernel_
->persisted_info
.ResetDownloadProgress(it
.Get());
820 kernel_
->persisted_info
.datatype_context
[it
.Get()].Clear();
824 kernel_
->info_status
= KERNEL_SHARE_INFO_DIRTY
;
830 bool Directory::ResetVersionsForType(BaseWriteTransaction
* trans
,
832 if (!ProtocolTypes().Has(type
))
834 DCHECK_NE(type
, BOOKMARKS
) << "Only non-hierarchical types are supported";
836 EntryKernel
* type_root
= GetEntryByServerTag(ModelTypeToRootTag(type
));
840 ScopedKernelLock
lock(this);
841 const Id
& type_root_id
= type_root
->ref(ID
);
842 Directory::Metahandles children
;
843 AppendChildHandles(lock
, type_root_id
, &children
);
845 for (Metahandles::iterator it
= children
.begin(); it
!= children
.end();
847 EntryKernel
* entry
= GetEntryByHandle(lock
, *it
);
850 if (entry
->ref(BASE_VERSION
) > 1)
851 entry
->put(BASE_VERSION
, 1);
852 if (entry
->ref(SERVER_VERSION
) > 1)
853 entry
->put(SERVER_VERSION
, 1);
855 // Note that we do not unset IS_UNSYNCED or IS_UNAPPLIED_UPDATE in order
856 // to ensure no in-transit data is lost.
858 entry
->mark_dirty(&kernel_
->dirty_metahandles
);
864 bool Directory::IsAttachmentLinked(
865 const sync_pb::AttachmentIdProto
& attachment_id_proto
) const {
866 ScopedKernelLock
lock(this);
867 IndexByAttachmentId::const_iterator iter
=
868 kernel_
->index_by_attachment_id
.find(attachment_id_proto
.unique_id());
869 if (iter
!= kernel_
->index_by_attachment_id
.end() && !iter
->second
.empty()) {
875 void Directory::HandleSaveChangesFailure(const SaveChangesSnapshot
& snapshot
) {
876 WriteTransaction
trans(FROM_HERE
, HANDLE_SAVE_FAILURE
, this);
877 ScopedKernelLock
lock(this);
878 kernel_
->info_status
= KERNEL_SHARE_INFO_DIRTY
;
880 // Because we optimistically cleared the dirty bit on the real entries when
881 // taking the snapshot, we must restore it on failure. Not doing this could
882 // cause lost data, if no other changes are made to the in-memory entries
883 // that would cause the dirty bit to get set again. Setting the bit ensures
884 // that SaveChanges will at least try again later.
885 for (EntryKernelSet::const_iterator i
= snapshot
.dirty_metas
.begin();
886 i
!= snapshot
.dirty_metas
.end(); ++i
) {
887 MetahandlesMap::iterator found
=
888 kernel_
->metahandles_map
.find((*i
)->ref(META_HANDLE
));
889 if (found
!= kernel_
->metahandles_map
.end()) {
890 found
->second
->mark_dirty(&kernel_
->dirty_metahandles
);
894 kernel_
->metahandles_to_purge
.insert(snapshot
.metahandles_to_purge
.begin(),
895 snapshot
.metahandles_to_purge
.end());
897 // Restore delete journals.
898 delete_journal_
->AddJournalBatch(&trans
, snapshot
.delete_journals
);
899 delete_journal_
->PurgeDeleteJournals(&trans
,
900 snapshot
.delete_journals_to_purge
);
903 void Directory::GetDownloadProgress(
904 ModelType model_type
,
905 sync_pb::DataTypeProgressMarker
* value_out
) const {
906 ScopedKernelLock
lock(this);
907 return value_out
->CopyFrom(
908 kernel_
->persisted_info
.download_progress
[model_type
]);
911 void Directory::GetDownloadProgressAsString(
912 ModelType model_type
,
913 std::string
* value_out
) const {
914 ScopedKernelLock
lock(this);
915 kernel_
->persisted_info
.download_progress
[model_type
].SerializeToString(
919 size_t Directory::GetEntriesCount() const {
920 ScopedKernelLock
lock(this);
921 return kernel_
->metahandles_map
.size();
924 void Directory::SetDownloadProgress(
925 ModelType model_type
,
926 const sync_pb::DataTypeProgressMarker
& new_progress
) {
927 ScopedKernelLock
lock(this);
928 kernel_
->persisted_info
.download_progress
[model_type
].CopyFrom(new_progress
);
929 kernel_
->info_status
= KERNEL_SHARE_INFO_DIRTY
;
932 bool Directory::HasEmptyDownloadProgress(ModelType type
) const {
933 ScopedKernelLock
lock(this);
934 return kernel_
->persisted_info
.HasEmptyDownloadProgress(type
);
937 int64
Directory::GetTransactionVersion(ModelType type
) const {
938 kernel_
->transaction_mutex
.AssertAcquired();
939 return kernel_
->persisted_info
.transaction_version
[type
];
942 void Directory::IncrementTransactionVersion(ModelType type
) {
943 kernel_
->transaction_mutex
.AssertAcquired();
944 kernel_
->persisted_info
.transaction_version
[type
]++;
947 void Directory::GetDataTypeContext(BaseTransaction
* trans
,
949 sync_pb::DataTypeContext
* context
) const {
950 ScopedKernelLock
lock(this);
951 context
->CopyFrom(kernel_
->persisted_info
.datatype_context
[type
]);
954 void Directory::SetDataTypeContext(
955 BaseWriteTransaction
* trans
,
957 const sync_pb::DataTypeContext
& context
) {
958 ScopedKernelLock
lock(this);
959 kernel_
->persisted_info
.datatype_context
[type
].CopyFrom(context
);
960 kernel_
->info_status
= KERNEL_SHARE_INFO_DIRTY
;
963 // TODO(stanisc): crbug.com/438313: change these to not rely on the folders.
964 ModelTypeSet
Directory::InitialSyncEndedTypes() {
965 syncable::ReadTransaction
trans(FROM_HERE
, this);
966 ModelTypeSet protocol_types
= ProtocolTypes();
967 ModelTypeSet initial_sync_ended_types
;
968 for (ModelTypeSet::Iterator i
= protocol_types
.First(); i
.Good(); i
.Inc()) {
969 if (InitialSyncEndedForType(&trans
, i
.Get())) {
970 initial_sync_ended_types
.Put(i
.Get());
973 return initial_sync_ended_types
;
976 bool Directory::InitialSyncEndedForType(ModelType type
) {
977 syncable::ReadTransaction
trans(FROM_HERE
, this);
978 return InitialSyncEndedForType(&trans
, type
);
981 bool Directory::InitialSyncEndedForType(
982 BaseTransaction
* trans
, ModelType type
) {
983 // True iff the type's root node has been received and applied.
984 syncable::Entry
entry(trans
, syncable::GET_TYPE_ROOT
, type
);
985 return entry
.good() && entry
.GetBaseVersion() != CHANGES_VERSION
;
988 string
Directory::store_birthday() const {
989 ScopedKernelLock
lock(this);
990 return kernel_
->persisted_info
.store_birthday
;
993 void Directory::set_store_birthday(const string
& store_birthday
) {
994 ScopedKernelLock
lock(this);
995 if (kernel_
->persisted_info
.store_birthday
== store_birthday
)
997 kernel_
->persisted_info
.store_birthday
= store_birthday
;
998 kernel_
->info_status
= KERNEL_SHARE_INFO_DIRTY
;
1001 string
Directory::bag_of_chips() const {
1002 ScopedKernelLock
lock(this);
1003 return kernel_
->persisted_info
.bag_of_chips
;
1006 void Directory::set_bag_of_chips(const string
& bag_of_chips
) {
1007 ScopedKernelLock
lock(this);
1008 if (kernel_
->persisted_info
.bag_of_chips
== bag_of_chips
)
1010 kernel_
->persisted_info
.bag_of_chips
= bag_of_chips
;
1011 kernel_
->info_status
= KERNEL_SHARE_INFO_DIRTY
;
1015 string
Directory::cache_guid() const {
1016 // No need to lock since nothing ever writes to it after load.
1017 return kernel_
->cache_guid
;
1020 NigoriHandler
* Directory::GetNigoriHandler() {
1021 return nigori_handler_
;
1024 Cryptographer
* Directory::GetCryptographer(const BaseTransaction
* trans
) {
1025 DCHECK_EQ(this, trans
->directory());
1026 return cryptographer_
;
1029 void Directory::GetAllMetaHandles(BaseTransaction
* trans
,
1030 MetahandleSet
* result
) {
1032 ScopedKernelLock
lock(this);
1033 for (MetahandlesMap::iterator i
= kernel_
->metahandles_map
.begin();
1034 i
!= kernel_
->metahandles_map
.end(); ++i
) {
1035 result
->insert(i
->first
);
1039 void Directory::GetUnsyncedMetaHandles(BaseTransaction
* trans
,
1040 Metahandles
* result
) {
1042 ScopedKernelLock
lock(this);
1043 copy(kernel_
->unsynced_metahandles
.begin(),
1044 kernel_
->unsynced_metahandles
.end(), back_inserter(*result
));
1047 int64
Directory::unsynced_entity_count() const {
1048 ScopedKernelLock
lock(this);
1049 return kernel_
->unsynced_metahandles
.size();
1052 bool Directory::TypeHasUnappliedUpdates(ModelType type
) {
1053 ScopedKernelLock
lock(this);
1054 return !kernel_
->unapplied_update_metahandles
[type
].empty();
1057 void Directory::GetUnappliedUpdateMetaHandles(
1058 BaseTransaction
* trans
,
1059 FullModelTypeSet server_types
,
1060 std::vector
<int64
>* result
) {
1062 ScopedKernelLock
lock(this);
1063 for (int i
= UNSPECIFIED
; i
< MODEL_TYPE_COUNT
; ++i
) {
1064 const ModelType type
= ModelTypeFromInt(i
);
1065 if (server_types
.Has(type
)) {
1066 std::copy(kernel_
->unapplied_update_metahandles
[type
].begin(),
1067 kernel_
->unapplied_update_metahandles
[type
].end(),
1068 back_inserter(*result
));
1073 void Directory::GetMetaHandlesOfType(BaseTransaction
* trans
,
1075 std::vector
<int64
>* result
) {
1076 ScopedKernelLock
lock(this);
1077 GetMetaHandlesOfType(lock
, trans
, type
, result
);
1080 void Directory::GetMetaHandlesOfType(const ScopedKernelLock
& lock
,
1081 BaseTransaction
* trans
,
1083 std::vector
<int64
>* result
) {
1085 for (MetahandlesMap::iterator it
= kernel_
->metahandles_map
.begin();
1086 it
!= kernel_
->metahandles_map
.end(); ++it
) {
1087 EntryKernel
* entry
= it
->second
;
1088 const ModelType entry_type
=
1089 GetModelTypeFromSpecifics(entry
->ref(SPECIFICS
));
1090 if (entry_type
== type
)
1091 result
->push_back(it
->first
);
1095 void Directory::CollectMetaHandleCounts(
1096 std::vector
<int>* num_entries_by_type
,
1097 std::vector
<int>* num_to_delete_entries_by_type
) {
1098 syncable::ReadTransaction
trans(FROM_HERE
, this);
1099 ScopedKernelLock
lock(this);
1101 for (MetahandlesMap::iterator it
= kernel_
->metahandles_map
.begin();
1102 it
!= kernel_
->metahandles_map
.end(); ++it
) {
1103 EntryKernel
* entry
= it
->second
;
1104 const ModelType type
= GetModelTypeFromSpecifics(entry
->ref(SPECIFICS
));
1105 (*num_entries_by_type
)[type
]++;
1106 if (entry
->ref(IS_DEL
))
1107 (*num_to_delete_entries_by_type
)[type
]++;
1111 scoped_ptr
<base::ListValue
> Directory::GetNodeDetailsForType(
1112 BaseTransaction
* trans
,
1114 scoped_ptr
<base::ListValue
> nodes(new base::ListValue());
1116 ScopedKernelLock
lock(this);
1117 for (MetahandlesMap::iterator it
= kernel_
->metahandles_map
.begin();
1118 it
!= kernel_
->metahandles_map
.end(); ++it
) {
1119 if (GetModelTypeFromSpecifics(it
->second
->ref(SPECIFICS
)) != type
) {
1123 EntryKernel
* kernel
= it
->second
;
1124 scoped_ptr
<base::DictionaryValue
> node(
1125 kernel
->ToValue(GetCryptographer(trans
)));
1127 // Add the position index if appropriate. This must be done here (and not
1128 // in EntryKernel) because the EntryKernel does not have access to its
1130 if (kernel
->ShouldMaintainPosition() && !kernel
->ref(IS_DEL
)) {
1131 node
->SetInteger("positionIndex", GetPositionIndex(trans
, kernel
));
1134 nodes
->Append(node
.release());
1137 return nodes
.Pass();
1140 bool Directory::CheckInvariantsOnTransactionClose(
1141 syncable::BaseTransaction
* trans
,
1142 const MetahandleSet
& modified_handles
) {
1143 // NOTE: The trans may be in the process of being destructed. Be careful if
1144 // you wish to call any of its virtual methods.
1145 switch (invariant_check_level_
) {
1146 case FULL_DB_VERIFICATION
: {
1147 MetahandleSet all_handles
;
1148 GetAllMetaHandles(trans
, &all_handles
);
1149 return CheckTreeInvariants(trans
, all_handles
);
1151 case VERIFY_CHANGES
: {
1152 return CheckTreeInvariants(trans
, modified_handles
);
1162 bool Directory::FullyCheckTreeInvariants(syncable::BaseTransaction
* trans
) {
1163 MetahandleSet handles
;
1164 GetAllMetaHandles(trans
, &handles
);
1165 return CheckTreeInvariants(trans
, handles
);
1168 bool Directory::CheckTreeInvariants(syncable::BaseTransaction
* trans
,
1169 const MetahandleSet
& handles
) {
1170 MetahandleSet::const_iterator i
;
1171 for (i
= handles
.begin() ; i
!= handles
.end() ; ++i
) {
1172 int64 metahandle
= *i
;
1173 Entry
e(trans
, GET_BY_HANDLE
, metahandle
);
1174 if (!SyncAssert(e
.good(), FROM_HERE
, "Entry is bad", trans
))
1176 syncable::Id id
= e
.GetId();
1177 syncable::Id parentid
= e
.GetParentId();
1180 if (!SyncAssert(e
.GetIsDir(), FROM_HERE
,
1181 "Entry should be a directory",
1184 if (!SyncAssert(parentid
.IsRoot(), FROM_HERE
,
1185 "Entry should be root",
1188 if (!SyncAssert(!e
.GetIsUnsynced(), FROM_HERE
, "Entry should be synced",
1194 if (!e
.GetIsDel()) {
1195 if (!SyncAssert(id
!= parentid
, FROM_HERE
,
1196 "Id should be different from parent id.",
1199 if (!SyncAssert(!e
.GetNonUniqueName().empty(), FROM_HERE
,
1200 "Non unique name should not be empty.",
1204 if (!parentid
.IsNull()) {
1205 int safety_count
= handles
.size() + 1;
1206 while (!parentid
.IsRoot()) {
1207 Entry
parent(trans
, GET_BY_ID
, parentid
);
1208 if (!SyncAssert(parent
.good(), FROM_HERE
,
1209 "Parent entry is not valid.", trans
))
1211 if (handles
.end() == handles
.find(parent
.GetMetahandle()))
1212 break; // Skip further checking if parent was unmodified.
1213 if (!SyncAssert(parent
.GetIsDir(), FROM_HERE
,
1214 "Parent should be a directory", trans
))
1216 if (!SyncAssert(!parent
.GetIsDel(), FROM_HERE
,
1217 "Parent should not have been marked for deletion.",
1220 if (!SyncAssert(handles
.end() != handles
.find(parent
.GetMetahandle()),
1221 FROM_HERE
, "Parent should be in the index.", trans
))
1223 parentid
= parent
.GetParentId();
1224 if (!SyncAssert(--safety_count
> 0, FROM_HERE
,
1225 "Count should be greater than zero.", trans
))
1230 int64 base_version
= e
.GetBaseVersion();
1231 int64 server_version
= e
.GetServerVersion();
1232 bool using_unique_client_tag
= !e
.GetUniqueClientTag().empty();
1233 if (CHANGES_VERSION
== base_version
|| 0 == base_version
) {
1234 ModelType model_type
= e
.GetModelType();
1235 bool is_client_creatable_type_root_folder
=
1236 parentid
.IsRoot() &&
1237 IsTypeWithClientGeneratedRoot(model_type
) &&
1238 e
.GetUniqueServerTag() == ModelTypeToRootTag(model_type
);
1239 if (e
.GetIsUnappliedUpdate()) {
1240 // Must be a new item, or a de-duplicated unique client tag
1241 // that was created both locally and remotely, or a type root folder
1242 // that was created both locally and remotely.
1243 if (!(using_unique_client_tag
||
1244 is_client_creatable_type_root_folder
)) {
1245 if (!SyncAssert(e
.GetIsDel(), FROM_HERE
,
1246 "The entry should have been deleted.", trans
))
1249 // It came from the server, so it must have a server ID.
1250 if (!SyncAssert(id
.ServerKnows(), FROM_HERE
,
1251 "The id should be from a server.",
1256 // TODO(chron): Implement this mode if clients ever need it.
1257 // For now, you can't combine a client tag and a directory.
1258 if (!SyncAssert(!using_unique_client_tag
, FROM_HERE
,
1259 "Directory cannot have a client tag.",
1263 if (is_client_creatable_type_root_folder
) {
1264 // This must be a locally created type root folder.
1266 !e
.GetIsUnsynced(), FROM_HERE
,
1267 "Locally created type root folders should not be unsynced.",
1272 !e
.GetIsDel(), FROM_HERE
,
1273 "Locally created type root folders should not be deleted.",
1277 // Should be an uncomitted item, or a successfully deleted one.
1278 if (!e
.GetIsDel()) {
1279 if (!SyncAssert(e
.GetIsUnsynced(), FROM_HERE
,
1280 "The item should be unsynced.", trans
))
1284 // If the next check failed, it would imply that an item exists
1285 // on the server, isn't waiting for application locally, but either
1286 // is an unsynced create or a sucessful delete in the local copy.
1287 // Either way, that's a mismatch.
1288 if (!SyncAssert(0 == server_version
, FROM_HERE
,
1289 "Server version should be zero.",
1292 // Items that aren't using the unique client tag should have a zero
1293 // base version only if they have a local ID. Items with unique client
1294 // tags are allowed to use the zero base version for undeletion and
1295 // de-duplication; the unique client tag trumps the server ID.
1296 if (!using_unique_client_tag
) {
1297 if (!SyncAssert(!id
.ServerKnows(), FROM_HERE
,
1298 "Should be a client only id.",
1304 if (!SyncAssert(id
.ServerKnows(),
1306 "Should be a server id.",
1311 // Previously we would assert that locally deleted items that have never
1312 // been synced must not be sent to the server (IS_UNSYNCED must be false).
1313 // This is not always true in the case that an item is deleted while the
1314 // initial commit is in flight. See crbug.com/426865.
1319 void Directory::SetInvariantCheckLevel(InvariantCheckLevel check_level
) {
1320 invariant_check_level_
= check_level
;
1323 int64
Directory::NextMetahandle() {
1324 ScopedKernelLock
lock(this);
1325 int64 metahandle
= (kernel_
->next_metahandle
)++;
1329 // Always returns a client ID that is the string representation of a negative
1331 Id
Directory::NextId() {
1334 ScopedKernelLock
lock(this);
1335 result
= (kernel_
->persisted_info
.next_id
)--;
1336 kernel_
->info_status
= KERNEL_SHARE_INFO_DIRTY
;
1338 DCHECK_LT(result
, 0);
1339 return Id::CreateFromClientString(base::Int64ToString(result
));
1342 bool Directory::HasChildren(BaseTransaction
* trans
, const Id
& id
) {
1343 ScopedKernelLock
lock(this);
1344 return kernel_
->parent_child_index
.GetChildren(id
) != NULL
;
1347 Id
Directory::GetFirstChildId(BaseTransaction
* trans
,
1348 const EntryKernel
* parent
) {
1350 DCHECK(parent
->ref(IS_DIR
));
1352 ScopedKernelLock
lock(this);
1353 const OrderedChildSet
* children
=
1354 kernel_
->parent_child_index
.GetChildren(parent
->ref(ID
));
1356 // We're expected to return root if there are no children.
1360 return (*children
->begin())->ref(ID
);
1363 syncable::Id
Directory::GetPredecessorId(EntryKernel
* e
) {
1364 ScopedKernelLock
lock(this);
1366 DCHECK(ParentChildIndex::ShouldInclude(e
));
1367 const OrderedChildSet
* siblings
= kernel_
->parent_child_index
.GetSiblings(e
);
1368 OrderedChildSet::const_iterator i
= siblings
->find(e
);
1369 DCHECK(i
!= siblings
->end());
1371 if (i
== siblings
->begin()) {
1375 return (*i
)->ref(ID
);
1379 syncable::Id
Directory::GetSuccessorId(EntryKernel
* e
) {
1380 ScopedKernelLock
lock(this);
1382 DCHECK(ParentChildIndex::ShouldInclude(e
));
1383 const OrderedChildSet
* siblings
= kernel_
->parent_child_index
.GetSiblings(e
);
1384 OrderedChildSet::const_iterator i
= siblings
->find(e
);
1385 DCHECK(i
!= siblings
->end());
1388 if (i
== siblings
->end()) {
1391 return (*i
)->ref(ID
);
1395 // TODO(rlarocque): Remove all support for placing ShouldMaintainPosition()
1396 // items as siblings of items that do not maintain postions. It is required
1397 // only for tests. See crbug.com/178282.
1398 void Directory::PutPredecessor(EntryKernel
* e
, EntryKernel
* predecessor
) {
1399 DCHECK(!e
->ref(IS_DEL
));
1400 if (!e
->ShouldMaintainPosition()) {
1401 DCHECK(!e
->ref(UNIQUE_POSITION
).IsValid());
1404 std::string suffix
= e
->ref(UNIQUE_BOOKMARK_TAG
);
1405 DCHECK(!suffix
.empty());
1407 // Remove our item from the ParentChildIndex and remember to re-add it later.
1408 ScopedKernelLock
lock(this);
1409 ScopedParentChildIndexUpdater
updater(lock
, e
, &kernel_
->parent_child_index
);
1411 // Note: The ScopedParentChildIndexUpdater will update this set for us as we
1412 // leave this function.
1413 const OrderedChildSet
* siblings
=
1414 kernel_
->parent_child_index
.GetChildren(e
->ref(PARENT_ID
));
1417 // This parent currently has no other children.
1418 DCHECK(predecessor
== NULL
);
1419 UniquePosition pos
= UniquePosition::InitialPosition(suffix
);
1420 e
->put(UNIQUE_POSITION
, pos
);
1424 if (predecessor
== NULL
) {
1425 // We have at least one sibling, and we're inserting to the left of them.
1426 UniquePosition successor_pos
= (*siblings
->begin())->ref(UNIQUE_POSITION
);
1429 if (!successor_pos
.IsValid()) {
1430 // If all our successors are of non-positionable types, just create an
1431 // initial position. We arbitrarily choose to sort invalid positions to
1432 // the right of the valid positions.
1434 // We really shouldn't need to support this. See TODO above.
1435 pos
= UniquePosition::InitialPosition(suffix
);
1437 DCHECK(!siblings
->empty());
1438 pos
= UniquePosition::Before(successor_pos
, suffix
);
1441 e
->put(UNIQUE_POSITION
, pos
);
1445 // We can't support placing an item after an invalid position. Fortunately,
1446 // the tests don't exercise this particular case. We should not support
1447 // siblings with invalid positions at all. See TODO above.
1448 DCHECK(predecessor
->ref(UNIQUE_POSITION
).IsValid());
1450 OrderedChildSet::const_iterator neighbour
= siblings
->find(predecessor
);
1451 DCHECK(neighbour
!= siblings
->end());
1454 if (neighbour
== siblings
->end()) {
1455 // Inserting at the end of the list.
1456 UniquePosition pos
= UniquePosition::After(
1457 predecessor
->ref(UNIQUE_POSITION
),
1459 e
->put(UNIQUE_POSITION
, pos
);
1463 EntryKernel
* successor
= *neighbour
;
1465 // Another mixed valid and invalid position case. This one could be supported
1466 // in theory, but we're trying to deprecate support for siblings with and
1467 // without valid positions. See TODO above.
1468 DCHECK(successor
->ref(UNIQUE_POSITION
).IsValid());
1470 // Finally, the normal case: inserting between two elements.
1471 UniquePosition pos
= UniquePosition::Between(
1472 predecessor
->ref(UNIQUE_POSITION
),
1473 successor
->ref(UNIQUE_POSITION
),
1475 e
->put(UNIQUE_POSITION
, pos
);
1479 // TODO(rlarocque): Avoid this indirection. Just return the set.
1480 void Directory::AppendChildHandles(const ScopedKernelLock
& lock
,
1481 const Id
& parent_id
,
1482 Directory::Metahandles
* result
) {
1483 const OrderedChildSet
* children
=
1484 kernel_
->parent_child_index
.GetChildren(parent_id
);
1488 for (OrderedChildSet::const_iterator i
= children
->begin();
1489 i
!= children
->end(); ++i
) {
1490 result
->push_back((*i
)->ref(META_HANDLE
));
1494 void Directory::UnmarkDirtyEntry(WriteTransaction
* trans
, Entry
* entry
) {
1496 entry
->kernel_
->clear_dirty(&kernel_
->dirty_metahandles
);
1499 void Directory::GetAttachmentIdsToUpload(BaseTransaction
* trans
,
1501 AttachmentIdSet
* id_set
) {
1502 // TODO(maniscalco): Maintain an index by ModelType and rewrite this method to
1503 // use it. The approach below is likely very expensive because it iterates
1504 // all entries (bug 415199).
1508 AttachmentIdSet on_server_id_set
;
1509 AttachmentIdSet not_on_server_id_set
;
1510 std::vector
<int64
> metahandles
;
1512 ScopedKernelLock
lock(this);
1513 GetMetaHandlesOfType(lock
, trans
, type
, &metahandles
);
1514 std::vector
<int64
>::const_iterator iter
= metahandles
.begin();
1515 const std::vector
<int64
>::const_iterator end
= metahandles
.end();
1516 // For all of this type's entries...
1517 for (; iter
!= end
; ++iter
) {
1518 EntryKernel
* entry
= GetEntryByHandle(lock
, *iter
);
1520 const sync_pb::AttachmentMetadata metadata
=
1521 entry
->ref(ATTACHMENT_METADATA
);
1522 // for each of this entry's attachments...
1523 for (int i
= 0; i
< metadata
.record_size(); ++i
) {
1525 AttachmentId::CreateFromProto(metadata
.record(i
).id());
1526 // if this attachment is known to be on the server, remember it for
1528 if (metadata
.record(i
).is_on_server()) {
1529 on_server_id_set
.insert(id
);
1531 // otherwise, add it to id_set.
1532 not_on_server_id_set
.insert(id
);
1537 // Why did we bother keeping a set of ids known to be on the server? The
1538 // is_on_server flag is stored denormalized so we can end up with two entries
1539 // with the same attachment id where one says it's on the server and the other
1540 // says it's not. When this happens, we trust the one that says it's on the
1541 // server. To avoid re-uploading the same attachment mulitple times, we
1542 // remove any ids known to be on the server from the id_set we are about to
1545 // TODO(maniscalco): Eliminate redundant metadata storage (bug 415203).
1546 std::set_difference(not_on_server_id_set
.begin(),
1547 not_on_server_id_set
.end(),
1548 on_server_id_set
.begin(),
1549 on_server_id_set
.end(),
1550 std::inserter(*id_set
, id_set
->end()));
1553 } // namespace syncable
1554 } // namespace syncer