Automated Commit: Committing new LKGM version 6953.0.0 for chromeos.
[chromium-blink-merge.git] / sync / syncable / directory.cc
blob45e0798da5aff4a941ec49270faa57e8a751558b
1 // Copyright 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "sync/syncable/directory.h"
7 #include <algorithm>
8 #include <iterator>
10 #include "base/base64.h"
11 #include "base/stl_util.h"
12 #include "base/strings/string_number_conversions.h"
13 #include "base/trace_event/trace_event.h"
14 #include "sync/internal_api/public/base/attachment_id_proto.h"
15 #include "sync/internal_api/public/base/unique_position.h"
16 #include "sync/internal_api/public/util/unrecoverable_error_handler.h"
17 #include "sync/syncable/entry.h"
18 #include "sync/syncable/entry_kernel.h"
19 #include "sync/syncable/in_memory_directory_backing_store.h"
20 #include "sync/syncable/on_disk_directory_backing_store.h"
21 #include "sync/syncable/scoped_kernel_lock.h"
22 #include "sync/syncable/scoped_parent_child_index_updater.h"
23 #include "sync/syncable/syncable-inl.h"
24 #include "sync/syncable/syncable_base_transaction.h"
25 #include "sync/syncable/syncable_changes_version.h"
26 #include "sync/syncable/syncable_read_transaction.h"
27 #include "sync/syncable/syncable_util.h"
28 #include "sync/syncable/syncable_write_transaction.h"
30 using std::string;
32 namespace syncer {
33 namespace syncable {
35 // static
36 const base::FilePath::CharType Directory::kSyncDatabaseFilename[] =
37 FILE_PATH_LITERAL("SyncData.sqlite3");
39 Directory::PersistedKernelInfo::PersistedKernelInfo()
40 : next_id(0) {
41 ModelTypeSet protocol_types = ProtocolTypes();
42 for (ModelTypeSet::Iterator iter = protocol_types.First(); iter.Good();
43 iter.Inc()) {
44 ResetDownloadProgress(iter.Get());
45 transaction_version[iter.Get()] = 0;
49 Directory::PersistedKernelInfo::~PersistedKernelInfo() {}
51 void Directory::PersistedKernelInfo::ResetDownloadProgress(
52 ModelType model_type) {
53 // Clear everything except the data type id field.
54 download_progress[model_type].Clear();
55 download_progress[model_type].set_data_type_id(
56 GetSpecificsFieldNumberFromModelType(model_type));
58 // Explicitly set an empty token field to denote no progress.
59 download_progress[model_type].set_token("");
62 bool Directory::PersistedKernelInfo::HasEmptyDownloadProgress(
63 ModelType model_type) {
64 const sync_pb::DataTypeProgressMarker& progress_marker =
65 download_progress[model_type];
66 return progress_marker.token().empty();
69 Directory::SaveChangesSnapshot::SaveChangesSnapshot()
70 : kernel_info_status(KERNEL_SHARE_INFO_INVALID) {
73 Directory::SaveChangesSnapshot::~SaveChangesSnapshot() {
74 STLDeleteElements(&dirty_metas);
75 STLDeleteElements(&delete_journals);
78 Directory::Kernel::Kernel(
79 const std::string& name,
80 const KernelLoadInfo& info,
81 DirectoryChangeDelegate* delegate,
82 const WeakHandle<TransactionObserver>& transaction_observer)
83 : next_write_transaction_id(0),
84 name(name),
85 info_status(Directory::KERNEL_SHARE_INFO_VALID),
86 persisted_info(info.kernel_info),
87 cache_guid(info.cache_guid),
88 next_metahandle(info.max_metahandle + 1),
89 delegate(delegate),
90 transaction_observer(transaction_observer) {
91 DCHECK(delegate);
92 DCHECK(transaction_observer.IsInitialized());
95 Directory::Kernel::~Kernel() {
96 STLDeleteContainerPairSecondPointers(metahandles_map.begin(),
97 metahandles_map.end());
100 Directory::Directory(
101 DirectoryBackingStore* store,
102 UnrecoverableErrorHandler* unrecoverable_error_handler,
103 ReportUnrecoverableErrorFunction report_unrecoverable_error_function,
104 NigoriHandler* nigori_handler,
105 Cryptographer* cryptographer)
106 : kernel_(NULL),
107 store_(store),
108 unrecoverable_error_handler_(unrecoverable_error_handler),
109 report_unrecoverable_error_function_(
110 report_unrecoverable_error_function),
111 unrecoverable_error_set_(false),
112 nigori_handler_(nigori_handler),
113 cryptographer_(cryptographer),
114 invariant_check_level_(VERIFY_CHANGES) {
117 Directory::~Directory() {
118 Close();
121 DirOpenResult Directory::Open(
122 const string& name,
123 DirectoryChangeDelegate* delegate,
124 const WeakHandle<TransactionObserver>& transaction_observer) {
125 TRACE_EVENT0("sync", "SyncDatabaseOpen");
127 const DirOpenResult result =
128 OpenImpl(name, delegate, transaction_observer);
130 if (OPENED != result)
131 Close();
132 return result;
135 void Directory::InitializeIndices(MetahandlesMap* handles_map) {
136 ScopedKernelLock lock(this);
137 kernel_->metahandles_map.swap(*handles_map);
138 for (MetahandlesMap::const_iterator it = kernel_->metahandles_map.begin();
139 it != kernel_->metahandles_map.end(); ++it) {
140 EntryKernel* entry = it->second;
141 if (ParentChildIndex::ShouldInclude(entry))
142 kernel_->parent_child_index.Insert(entry);
143 const int64 metahandle = entry->ref(META_HANDLE);
144 if (entry->ref(IS_UNSYNCED))
145 kernel_->unsynced_metahandles.insert(metahandle);
146 if (entry->ref(IS_UNAPPLIED_UPDATE)) {
147 const ModelType type = entry->GetServerModelType();
148 kernel_->unapplied_update_metahandles[type].insert(metahandle);
150 if (!entry->ref(UNIQUE_SERVER_TAG).empty()) {
151 DCHECK(kernel_->server_tags_map.find(entry->ref(UNIQUE_SERVER_TAG)) ==
152 kernel_->server_tags_map.end())
153 << "Unexpected duplicate use of client tag";
154 kernel_->server_tags_map[entry->ref(UNIQUE_SERVER_TAG)] = entry;
156 if (!entry->ref(UNIQUE_CLIENT_TAG).empty()) {
157 DCHECK(kernel_->server_tags_map.find(entry->ref(UNIQUE_SERVER_TAG)) ==
158 kernel_->server_tags_map.end())
159 << "Unexpected duplicate use of server tag";
160 kernel_->client_tags_map[entry->ref(UNIQUE_CLIENT_TAG)] = entry;
162 DCHECK(kernel_->ids_map.find(entry->ref(ID).value()) ==
163 kernel_->ids_map.end()) << "Unexpected duplicate use of ID";
164 kernel_->ids_map[entry->ref(ID).value()] = entry;
165 DCHECK(!entry->is_dirty());
166 AddToAttachmentIndex(lock, metahandle, entry->ref(ATTACHMENT_METADATA));
170 DirOpenResult Directory::OpenImpl(
171 const string& name,
172 DirectoryChangeDelegate* delegate,
173 const WeakHandle<TransactionObserver>&
174 transaction_observer) {
175 KernelLoadInfo info;
176 // Temporary indices before kernel_ initialized in case Load fails. We 0(1)
177 // swap these later.
178 Directory::MetahandlesMap tmp_handles_map;
180 // Avoids mem leaks on failure. Harmlessly deletes the empty hash map after
181 // the swap in the success case.
182 STLValueDeleter<MetahandlesMap> deleter(&tmp_handles_map);
184 JournalIndex delete_journals;
185 MetahandleSet metahandles_to_purge;
187 DirOpenResult result = store_->Load(&tmp_handles_map, &delete_journals,
188 &metahandles_to_purge, &info);
189 if (OPENED != result)
190 return result;
192 DCHECK(!kernel_);
193 kernel_ = new Kernel(name, info, delegate, transaction_observer);
194 delete_journal_.reset(new DeleteJournal(&delete_journals));
195 InitializeIndices(&tmp_handles_map);
197 // Write back the share info to reserve some space in 'next_id'. This will
198 // prevent local ID reuse in the case of an early crash. See the comments in
199 // TakeSnapshotForSaveChanges() or crbug.com/142987 for more information.
200 kernel_->info_status = KERNEL_SHARE_INFO_DIRTY;
202 kernel_->metahandles_to_purge.swap(metahandles_to_purge);
203 if (!SaveChanges())
204 return FAILED_INITIAL_WRITE;
206 return OPENED;
209 DeleteJournal* Directory::delete_journal() {
210 DCHECK(delete_journal_.get());
211 return delete_journal_.get();
214 void Directory::Close() {
215 store_.reset();
216 if (kernel_) {
217 delete kernel_;
218 kernel_ = NULL;
222 void Directory::OnUnrecoverableError(const BaseTransaction* trans,
223 const tracked_objects::Location& location,
224 const std::string & message) {
225 DCHECK(trans != NULL);
226 unrecoverable_error_set_ = true;
227 unrecoverable_error_handler_->OnUnrecoverableError(location,
228 message);
231 EntryKernel* Directory::GetEntryById(const Id& id) {
232 ScopedKernelLock lock(this);
233 return GetEntryById(lock, id);
236 EntryKernel* Directory::GetEntryById(const ScopedKernelLock& lock,
237 const Id& id) {
238 DCHECK(kernel_);
239 // Find it in the in memory ID index.
240 IdsMap::iterator id_found = kernel_->ids_map.find(id.value());
241 if (id_found != kernel_->ids_map.end()) {
242 return id_found->second;
244 return NULL;
247 EntryKernel* Directory::GetEntryByClientTag(const string& tag) {
248 ScopedKernelLock lock(this);
249 DCHECK(kernel_);
251 TagsMap::iterator it = kernel_->client_tags_map.find(tag);
252 if (it != kernel_->client_tags_map.end()) {
253 return it->second;
255 return NULL;
258 EntryKernel* Directory::GetEntryByServerTag(const string& tag) {
259 ScopedKernelLock lock(this);
260 DCHECK(kernel_);
261 TagsMap::iterator it = kernel_->server_tags_map.find(tag);
262 if (it != kernel_->server_tags_map.end()) {
263 return it->second;
265 return NULL;
268 EntryKernel* Directory::GetEntryByHandle(int64 metahandle) {
269 ScopedKernelLock lock(this);
270 return GetEntryByHandle(lock, metahandle);
273 EntryKernel* Directory::GetEntryByHandle(const ScopedKernelLock& lock,
274 int64 metahandle) {
275 // Look up in memory
276 MetahandlesMap::iterator found =
277 kernel_->metahandles_map.find(metahandle);
278 if (found != kernel_->metahandles_map.end()) {
279 // Found it in memory. Easy.
280 return found->second;
282 return NULL;
285 bool Directory::GetChildHandlesById(
286 BaseTransaction* trans, const Id& parent_id,
287 Directory::Metahandles* result) {
288 if (!SyncAssert(this == trans->directory(), FROM_HERE,
289 "Directories don't match", trans))
290 return false;
291 result->clear();
293 ScopedKernelLock lock(this);
294 AppendChildHandles(lock, parent_id, result);
295 return true;
298 int Directory::GetTotalNodeCount(
299 BaseTransaction* trans,
300 EntryKernel* kernel) const {
301 if (!SyncAssert(this == trans->directory(), FROM_HERE,
302 "Directories don't match", trans))
303 return false;
305 int count = 1;
306 std::deque<const OrderedChildSet*> child_sets;
308 GetChildSetForKernel(trans, kernel, &child_sets);
309 while (!child_sets.empty()) {
310 const OrderedChildSet* set = child_sets.front();
311 child_sets.pop_front();
312 for (OrderedChildSet::const_iterator it = set->begin();
313 it != set->end(); ++it) {
314 count++;
315 GetChildSetForKernel(trans, *it, &child_sets);
319 return count;
322 void Directory::GetChildSetForKernel(
323 BaseTransaction* trans,
324 EntryKernel* kernel,
325 std::deque<const OrderedChildSet*>* child_sets) const {
326 if (!kernel->ref(IS_DIR))
327 return; // Not a directory => no children.
329 const OrderedChildSet* descendants =
330 kernel_->parent_child_index.GetChildren(kernel->ref(ID));
331 if (!descendants)
332 return; // This directory has no children.
334 // Add our children to the list of items to be traversed.
335 child_sets->push_back(descendants);
338 int Directory::GetPositionIndex(
339 BaseTransaction* trans,
340 EntryKernel* kernel) const {
341 const OrderedChildSet* siblings =
342 kernel_->parent_child_index.GetSiblings(kernel);
344 OrderedChildSet::const_iterator it = siblings->find(kernel);
345 return std::distance(siblings->begin(), it);
348 bool Directory::InsertEntry(BaseWriteTransaction* trans, EntryKernel* entry) {
349 ScopedKernelLock lock(this);
350 return InsertEntry(lock, trans, entry);
353 bool Directory::InsertEntry(const ScopedKernelLock& lock,
354 BaseWriteTransaction* trans,
355 EntryKernel* entry) {
356 if (!SyncAssert(NULL != entry, FROM_HERE, "Entry is null", trans))
357 return false;
359 static const char error[] = "Entry already in memory index.";
361 if (!SyncAssert(
362 kernel_->metahandles_map.insert(
363 std::make_pair(entry->ref(META_HANDLE), entry)).second,
364 FROM_HERE,
365 error,
366 trans)) {
367 return false;
369 if (!SyncAssert(
370 kernel_->ids_map.insert(
371 std::make_pair(entry->ref(ID).value(), entry)).second,
372 FROM_HERE,
373 error,
374 trans)) {
375 return false;
377 if (ParentChildIndex::ShouldInclude(entry)) {
378 if (!SyncAssert(kernel_->parent_child_index.Insert(entry),
379 FROM_HERE,
380 error,
381 trans)) {
382 return false;
385 AddToAttachmentIndex(
386 lock, entry->ref(META_HANDLE), entry->ref(ATTACHMENT_METADATA));
388 // Should NEVER be created with a client tag or server tag.
389 if (!SyncAssert(entry->ref(UNIQUE_SERVER_TAG).empty(), FROM_HERE,
390 "Server tag should be empty", trans)) {
391 return false;
393 if (!SyncAssert(entry->ref(UNIQUE_CLIENT_TAG).empty(), FROM_HERE,
394 "Client tag should be empty", trans))
395 return false;
397 return true;
400 bool Directory::ReindexId(BaseWriteTransaction* trans,
401 EntryKernel* const entry,
402 const Id& new_id) {
403 ScopedKernelLock lock(this);
404 if (NULL != GetEntryById(lock, new_id))
405 return false;
408 // Update the indices that depend on the ID field.
409 ScopedParentChildIndexUpdater updater_b(lock, entry,
410 &kernel_->parent_child_index);
411 size_t num_erased = kernel_->ids_map.erase(entry->ref(ID).value());
412 DCHECK_EQ(1U, num_erased);
413 entry->put(ID, new_id);
414 kernel_->ids_map[entry->ref(ID).value()] = entry;
416 return true;
419 bool Directory::ReindexParentId(BaseWriteTransaction* trans,
420 EntryKernel* const entry,
421 const Id& new_parent_id) {
422 ScopedKernelLock lock(this);
425 // Update the indices that depend on the PARENT_ID field.
426 ScopedParentChildIndexUpdater index_updater(lock, entry,
427 &kernel_->parent_child_index);
428 entry->put(PARENT_ID, new_parent_id);
430 return true;
433 void Directory::RemoveFromAttachmentIndex(
434 const ScopedKernelLock& lock,
435 const int64 metahandle,
436 const sync_pb::AttachmentMetadata& attachment_metadata) {
437 for (int i = 0; i < attachment_metadata.record_size(); ++i) {
438 AttachmentIdUniqueId unique_id =
439 attachment_metadata.record(i).id().unique_id();
440 IndexByAttachmentId::iterator iter =
441 kernel_->index_by_attachment_id.find(unique_id);
442 if (iter != kernel_->index_by_attachment_id.end()) {
443 iter->second.erase(metahandle);
444 if (iter->second.empty()) {
445 kernel_->index_by_attachment_id.erase(iter);
451 void Directory::AddToAttachmentIndex(
452 const ScopedKernelLock& lock,
453 const int64 metahandle,
454 const sync_pb::AttachmentMetadata& attachment_metadata) {
455 for (int i = 0; i < attachment_metadata.record_size(); ++i) {
456 AttachmentIdUniqueId unique_id =
457 attachment_metadata.record(i).id().unique_id();
458 IndexByAttachmentId::iterator iter =
459 kernel_->index_by_attachment_id.find(unique_id);
460 if (iter == kernel_->index_by_attachment_id.end()) {
461 iter = kernel_->index_by_attachment_id.insert(std::make_pair(
462 unique_id,
463 MetahandleSet())).first;
465 iter->second.insert(metahandle);
469 void Directory::UpdateAttachmentIndex(
470 const int64 metahandle,
471 const sync_pb::AttachmentMetadata& old_metadata,
472 const sync_pb::AttachmentMetadata& new_metadata) {
473 ScopedKernelLock lock(this);
474 RemoveFromAttachmentIndex(lock, metahandle, old_metadata);
475 AddToAttachmentIndex(lock, metahandle, new_metadata);
478 void Directory::GetMetahandlesByAttachmentId(
479 BaseTransaction* trans,
480 const sync_pb::AttachmentIdProto& attachment_id_proto,
481 Metahandles* result) {
482 DCHECK(result);
483 result->clear();
484 ScopedKernelLock lock(this);
485 IndexByAttachmentId::const_iterator index_iter =
486 kernel_->index_by_attachment_id.find(attachment_id_proto.unique_id());
487 if (index_iter == kernel_->index_by_attachment_id.end())
488 return;
489 const MetahandleSet& metahandle_set = index_iter->second;
490 std::copy(
491 metahandle_set.begin(), metahandle_set.end(), back_inserter(*result));
494 bool Directory::unrecoverable_error_set(const BaseTransaction* trans) const {
495 DCHECK(trans != NULL);
496 return unrecoverable_error_set_;
499 void Directory::ClearDirtyMetahandles(const ScopedKernelLock& lock) {
500 kernel_->transaction_mutex.AssertAcquired();
501 kernel_->dirty_metahandles.clear();
504 bool Directory::SafeToPurgeFromMemory(WriteTransaction* trans,
505 const EntryKernel* const entry) const {
506 bool safe = entry->ref(IS_DEL) && !entry->is_dirty() &&
507 !entry->ref(SYNCING) && !entry->ref(IS_UNAPPLIED_UPDATE) &&
508 !entry->ref(IS_UNSYNCED);
510 if (safe) {
511 int64 handle = entry->ref(META_HANDLE);
512 const ModelType type = entry->GetServerModelType();
513 if (!SyncAssert(kernel_->dirty_metahandles.count(handle) == 0U,
514 FROM_HERE,
515 "Dirty metahandles should be empty", trans))
516 return false;
517 // TODO(tim): Bug 49278.
518 if (!SyncAssert(!kernel_->unsynced_metahandles.count(handle),
519 FROM_HERE,
520 "Unsynced handles should be empty",
521 trans))
522 return false;
523 if (!SyncAssert(!kernel_->unapplied_update_metahandles[type].count(handle),
524 FROM_HERE,
525 "Unapplied metahandles should be empty",
526 trans))
527 return false;
530 return safe;
533 void Directory::TakeSnapshotForSaveChanges(SaveChangesSnapshot* snapshot) {
534 ReadTransaction trans(FROM_HERE, this);
535 ScopedKernelLock lock(this);
537 // If there is an unrecoverable error then just bail out.
538 if (unrecoverable_error_set(&trans))
539 return;
541 // Deep copy dirty entries from kernel_->metahandles_index into snapshot and
542 // clear dirty flags.
543 for (MetahandleSet::const_iterator i = kernel_->dirty_metahandles.begin();
544 i != kernel_->dirty_metahandles.end(); ++i) {
545 EntryKernel* entry = GetEntryByHandle(lock, *i);
546 if (!entry)
547 continue;
548 // Skip over false positives; it happens relatively infrequently.
549 if (!entry->is_dirty())
550 continue;
551 snapshot->dirty_metas.insert(snapshot->dirty_metas.end(),
552 new EntryKernel(*entry));
553 DCHECK_EQ(1U, kernel_->dirty_metahandles.count(*i));
554 // We don't bother removing from the index here as we blow the entire thing
555 // in a moment, and it unnecessarily complicates iteration.
556 entry->clear_dirty(NULL);
558 ClearDirtyMetahandles(lock);
560 // Set purged handles.
561 DCHECK(snapshot->metahandles_to_purge.empty());
562 snapshot->metahandles_to_purge.swap(kernel_->metahandles_to_purge);
564 // Fill kernel_info_status and kernel_info.
565 snapshot->kernel_info = kernel_->persisted_info;
566 // To avoid duplicates when the process crashes, we record the next_id to be
567 // greater magnitude than could possibly be reached before the next save
568 // changes. In other words, it's effectively impossible for the user to
569 // generate 65536 new bookmarks in 3 seconds.
570 snapshot->kernel_info.next_id -= 65536;
571 snapshot->kernel_info_status = kernel_->info_status;
572 // This one we reset on failure.
573 kernel_->info_status = KERNEL_SHARE_INFO_VALID;
575 delete_journal_->TakeSnapshotAndClear(
576 &trans, &snapshot->delete_journals, &snapshot->delete_journals_to_purge);
579 bool Directory::SaveChanges() {
580 bool success = false;
582 base::AutoLock scoped_lock(kernel_->save_changes_mutex);
584 // Snapshot and save.
585 SaveChangesSnapshot snapshot;
586 TakeSnapshotForSaveChanges(&snapshot);
587 success = store_->SaveChanges(snapshot);
589 // Handle success or failure.
590 if (success)
591 success = VacuumAfterSaveChanges(snapshot);
592 else
593 HandleSaveChangesFailure(snapshot);
594 return success;
597 bool Directory::VacuumAfterSaveChanges(const SaveChangesSnapshot& snapshot) {
598 if (snapshot.dirty_metas.empty())
599 return true;
601 // Need a write transaction as we are about to permanently purge entries.
602 WriteTransaction trans(FROM_HERE, VACUUM_AFTER_SAVE, this);
603 ScopedKernelLock lock(this);
604 // Now drop everything we can out of memory.
605 for (EntryKernelSet::const_iterator i = snapshot.dirty_metas.begin();
606 i != snapshot.dirty_metas.end(); ++i) {
607 MetahandlesMap::iterator found =
608 kernel_->metahandles_map.find((*i)->ref(META_HANDLE));
609 EntryKernel* entry = (found == kernel_->metahandles_map.end() ?
610 NULL : found->second);
611 if (entry && SafeToPurgeFromMemory(&trans, entry)) {
612 // We now drop deleted metahandles that are up to date on both the client
613 // and the server.
614 size_t num_erased = 0;
615 num_erased = kernel_->metahandles_map.erase(entry->ref(META_HANDLE));
616 DCHECK_EQ(1u, num_erased);
617 num_erased = kernel_->ids_map.erase(entry->ref(ID).value());
618 DCHECK_EQ(1u, num_erased);
619 if (!entry->ref(UNIQUE_SERVER_TAG).empty()) {
620 num_erased =
621 kernel_->server_tags_map.erase(entry->ref(UNIQUE_SERVER_TAG));
622 DCHECK_EQ(1u, num_erased);
624 if (!entry->ref(UNIQUE_CLIENT_TAG).empty()) {
625 num_erased =
626 kernel_->client_tags_map.erase(entry->ref(UNIQUE_CLIENT_TAG));
627 DCHECK_EQ(1u, num_erased);
629 if (!SyncAssert(!kernel_->parent_child_index.Contains(entry),
630 FROM_HERE,
631 "Deleted entry still present",
632 (&trans)))
633 return false;
634 RemoveFromAttachmentIndex(
635 lock, entry->ref(META_HANDLE), entry->ref(ATTACHMENT_METADATA));
637 delete entry;
639 if (trans.unrecoverable_error_set())
640 return false;
642 return true;
645 void Directory::UnapplyEntry(EntryKernel* entry) {
646 int64 handle = entry->ref(META_HANDLE);
647 ModelType server_type = GetModelTypeFromSpecifics(
648 entry->ref(SERVER_SPECIFICS));
650 // Clear enough so that on the next sync cycle all local data will
651 // be overwritten.
652 // Note: do not modify the root node in order to preserve the
653 // initial sync ended bit for this type (else on the next restart
654 // this type will be treated as disabled and therefore fully purged).
655 if (IsRealDataType(server_type) &&
656 ModelTypeToRootTag(server_type) == entry->ref(UNIQUE_SERVER_TAG)) {
657 return;
660 // Set the unapplied bit if this item has server data.
661 if (IsRealDataType(server_type) && !entry->ref(IS_UNAPPLIED_UPDATE)) {
662 entry->put(IS_UNAPPLIED_UPDATE, true);
663 kernel_->unapplied_update_metahandles[server_type].insert(handle);
664 entry->mark_dirty(&kernel_->dirty_metahandles);
667 // Unset the unsynced bit.
668 if (entry->ref(IS_UNSYNCED)) {
669 kernel_->unsynced_metahandles.erase(handle);
670 entry->put(IS_UNSYNCED, false);
671 entry->mark_dirty(&kernel_->dirty_metahandles);
674 // Mark the item as locally deleted. No deleted items are allowed in the
675 // parent child index.
676 if (!entry->ref(IS_DEL)) {
677 kernel_->parent_child_index.Remove(entry);
678 entry->put(IS_DEL, true);
679 entry->mark_dirty(&kernel_->dirty_metahandles);
682 // Set the version to the "newly created" version.
683 if (entry->ref(BASE_VERSION) != CHANGES_VERSION) {
684 entry->put(BASE_VERSION, CHANGES_VERSION);
685 entry->mark_dirty(&kernel_->dirty_metahandles);
688 // At this point locally created items that aren't synced will become locally
689 // deleted items, and purged on the next snapshot. All other items will match
690 // the state they would have had if they were just created via a server
691 // update. See MutableEntry::MutableEntry(.., CreateNewUpdateItem, ..).
694 void Directory::DeleteEntry(const ScopedKernelLock& lock,
695 bool save_to_journal,
696 EntryKernel* entry,
697 EntryKernelSet* entries_to_journal) {
698 int64 handle = entry->ref(META_HANDLE);
699 ModelType server_type = GetModelTypeFromSpecifics(
700 entry->ref(SERVER_SPECIFICS));
702 kernel_->metahandles_to_purge.insert(handle);
704 size_t num_erased = 0;
705 num_erased = kernel_->metahandles_map.erase(entry->ref(META_HANDLE));
706 DCHECK_EQ(1u, num_erased);
707 num_erased = kernel_->ids_map.erase(entry->ref(ID).value());
708 DCHECK_EQ(1u, num_erased);
709 num_erased = kernel_->unsynced_metahandles.erase(handle);
710 DCHECK_EQ(entry->ref(IS_UNSYNCED), num_erased > 0);
711 num_erased =
712 kernel_->unapplied_update_metahandles[server_type].erase(handle);
713 DCHECK_EQ(entry->ref(IS_UNAPPLIED_UPDATE), num_erased > 0);
714 if (kernel_->parent_child_index.Contains(entry))
715 kernel_->parent_child_index.Remove(entry);
717 if (!entry->ref(UNIQUE_CLIENT_TAG).empty()) {
718 num_erased =
719 kernel_->client_tags_map.erase(entry->ref(UNIQUE_CLIENT_TAG));
720 DCHECK_EQ(1u, num_erased);
722 if (!entry->ref(UNIQUE_SERVER_TAG).empty()) {
723 num_erased =
724 kernel_->server_tags_map.erase(entry->ref(UNIQUE_SERVER_TAG));
725 DCHECK_EQ(1u, num_erased);
727 RemoveFromAttachmentIndex(lock, handle, entry->ref(ATTACHMENT_METADATA));
729 if (save_to_journal) {
730 entries_to_journal->insert(entry);
731 } else {
732 delete entry;
736 bool Directory::PurgeEntriesWithTypeIn(ModelTypeSet disabled_types,
737 ModelTypeSet types_to_journal,
738 ModelTypeSet types_to_unapply) {
739 disabled_types.RemoveAll(ProxyTypes());
741 if (disabled_types.Empty())
742 return true;
745 WriteTransaction trans(FROM_HERE, PURGE_ENTRIES, this);
747 EntryKernelSet entries_to_journal;
748 STLElementDeleter<EntryKernelSet> journal_deleter(&entries_to_journal);
751 ScopedKernelLock lock(this);
753 bool found_progress = false;
754 for (ModelTypeSet::Iterator iter = disabled_types.First(); iter.Good();
755 iter.Inc()) {
756 if (!kernel_->persisted_info.HasEmptyDownloadProgress(iter.Get()))
757 found_progress = true;
760 // If none of the disabled types have progress markers, there's nothing to
761 // purge.
762 if (!found_progress)
763 return true;
765 // We iterate in two passes to avoid a bug in STLport (which is used in
766 // the Android build). There are some versions of that library where a
767 // hash_map's iterators can be invalidated when an item is erased from the
768 // hash_map.
769 // See http://sourceforge.net/p/stlport/bugs/239/.
771 std::set<EntryKernel*> to_purge;
772 for (MetahandlesMap::iterator it = kernel_->metahandles_map.begin();
773 it != kernel_->metahandles_map.end(); ++it) {
774 const sync_pb::EntitySpecifics& local_specifics =
775 it->second->ref(SPECIFICS);
776 const sync_pb::EntitySpecifics& server_specifics =
777 it->second->ref(SERVER_SPECIFICS);
778 ModelType local_type = GetModelTypeFromSpecifics(local_specifics);
779 ModelType server_type = GetModelTypeFromSpecifics(server_specifics);
781 if ((IsRealDataType(local_type) && disabled_types.Has(local_type)) ||
782 (IsRealDataType(server_type) && disabled_types.Has(server_type))) {
783 to_purge.insert(it->second);
787 for (std::set<EntryKernel*>::iterator it = to_purge.begin();
788 it != to_purge.end(); ++it) {
789 EntryKernel* entry = *it;
791 const sync_pb::EntitySpecifics& local_specifics =
792 (*it)->ref(SPECIFICS);
793 const sync_pb::EntitySpecifics& server_specifics =
794 (*it)->ref(SERVER_SPECIFICS);
795 ModelType local_type = GetModelTypeFromSpecifics(local_specifics);
796 ModelType server_type = GetModelTypeFromSpecifics(server_specifics);
798 if (types_to_unapply.Has(local_type) ||
799 types_to_unapply.Has(server_type)) {
800 UnapplyEntry(entry);
801 } else {
802 bool save_to_journal =
803 (types_to_journal.Has(local_type) ||
804 types_to_journal.Has(server_type)) &&
805 (delete_journal_->IsDeleteJournalEnabled(local_type) ||
806 delete_journal_->IsDeleteJournalEnabled(server_type));
807 DeleteEntry(lock, save_to_journal, entry, &entries_to_journal);
811 delete_journal_->AddJournalBatch(&trans, entries_to_journal);
813 // Ensure meta tracking for these data types reflects the purged state.
814 for (ModelTypeSet::Iterator it = disabled_types.First();
815 it.Good(); it.Inc()) {
816 kernel_->persisted_info.transaction_version[it.Get()] = 0;
818 // Don't discard progress markers or context for unapplied types.
819 if (!types_to_unapply.Has(it.Get())) {
820 kernel_->persisted_info.ResetDownloadProgress(it.Get());
821 kernel_->persisted_info.datatype_context[it.Get()].Clear();
825 kernel_->info_status = KERNEL_SHARE_INFO_DIRTY;
828 return true;
831 bool Directory::ResetVersionsForType(BaseWriteTransaction* trans,
832 ModelType type) {
833 if (!ProtocolTypes().Has(type))
834 return false;
835 DCHECK_NE(type, BOOKMARKS) << "Only non-hierarchical types are supported";
837 EntryKernel* type_root = GetEntryByServerTag(ModelTypeToRootTag(type));
838 if (!type_root)
839 return false;
841 ScopedKernelLock lock(this);
842 const Id& type_root_id = type_root->ref(ID);
843 Directory::Metahandles children;
844 AppendChildHandles(lock, type_root_id, &children);
846 for (Metahandles::iterator it = children.begin(); it != children.end();
847 ++it) {
848 EntryKernel* entry = GetEntryByHandle(lock, *it);
849 if (!entry)
850 continue;
851 if (entry->ref(BASE_VERSION) > 1)
852 entry->put(BASE_VERSION, 1);
853 if (entry->ref(SERVER_VERSION) > 1)
854 entry->put(SERVER_VERSION, 1);
856 // Note that we do not unset IS_UNSYNCED or IS_UNAPPLIED_UPDATE in order
857 // to ensure no in-transit data is lost.
859 entry->mark_dirty(&kernel_->dirty_metahandles);
862 return true;
865 bool Directory::IsAttachmentLinked(
866 const sync_pb::AttachmentIdProto& attachment_id_proto) const {
867 ScopedKernelLock lock(this);
868 IndexByAttachmentId::const_iterator iter =
869 kernel_->index_by_attachment_id.find(attachment_id_proto.unique_id());
870 if (iter != kernel_->index_by_attachment_id.end() && !iter->second.empty()) {
871 return true;
873 return false;
876 void Directory::HandleSaveChangesFailure(const SaveChangesSnapshot& snapshot) {
877 WriteTransaction trans(FROM_HERE, HANDLE_SAVE_FAILURE, this);
878 ScopedKernelLock lock(this);
879 kernel_->info_status = KERNEL_SHARE_INFO_DIRTY;
881 // Because we optimistically cleared the dirty bit on the real entries when
882 // taking the snapshot, we must restore it on failure. Not doing this could
883 // cause lost data, if no other changes are made to the in-memory entries
884 // that would cause the dirty bit to get set again. Setting the bit ensures
885 // that SaveChanges will at least try again later.
886 for (EntryKernelSet::const_iterator i = snapshot.dirty_metas.begin();
887 i != snapshot.dirty_metas.end(); ++i) {
888 MetahandlesMap::iterator found =
889 kernel_->metahandles_map.find((*i)->ref(META_HANDLE));
890 if (found != kernel_->metahandles_map.end()) {
891 found->second->mark_dirty(&kernel_->dirty_metahandles);
895 kernel_->metahandles_to_purge.insert(snapshot.metahandles_to_purge.begin(),
896 snapshot.metahandles_to_purge.end());
898 // Restore delete journals.
899 delete_journal_->AddJournalBatch(&trans, snapshot.delete_journals);
900 delete_journal_->PurgeDeleteJournals(&trans,
901 snapshot.delete_journals_to_purge);
904 void Directory::GetDownloadProgress(
905 ModelType model_type,
906 sync_pb::DataTypeProgressMarker* value_out) const {
907 ScopedKernelLock lock(this);
908 return value_out->CopyFrom(
909 kernel_->persisted_info.download_progress[model_type]);
912 void Directory::GetDownloadProgressAsString(
913 ModelType model_type,
914 std::string* value_out) const {
915 ScopedKernelLock lock(this);
916 kernel_->persisted_info.download_progress[model_type].SerializeToString(
917 value_out);
920 size_t Directory::GetEntriesCount() const {
921 ScopedKernelLock lock(this);
922 return kernel_->metahandles_map.size();
925 void Directory::SetDownloadProgress(
926 ModelType model_type,
927 const sync_pb::DataTypeProgressMarker& new_progress) {
928 ScopedKernelLock lock(this);
929 kernel_->persisted_info.download_progress[model_type].CopyFrom(new_progress);
930 kernel_->info_status = KERNEL_SHARE_INFO_DIRTY;
933 bool Directory::HasEmptyDownloadProgress(ModelType type) const {
934 ScopedKernelLock lock(this);
935 return kernel_->persisted_info.HasEmptyDownloadProgress(type);
938 int64 Directory::GetTransactionVersion(ModelType type) const {
939 kernel_->transaction_mutex.AssertAcquired();
940 return kernel_->persisted_info.transaction_version[type];
943 void Directory::IncrementTransactionVersion(ModelType type) {
944 kernel_->transaction_mutex.AssertAcquired();
945 kernel_->persisted_info.transaction_version[type]++;
948 void Directory::GetDataTypeContext(BaseTransaction* trans,
949 ModelType type,
950 sync_pb::DataTypeContext* context) const {
951 ScopedKernelLock lock(this);
952 context->CopyFrom(kernel_->persisted_info.datatype_context[type]);
955 void Directory::SetDataTypeContext(
956 BaseWriteTransaction* trans,
957 ModelType type,
958 const sync_pb::DataTypeContext& context) {
959 ScopedKernelLock lock(this);
960 kernel_->persisted_info.datatype_context[type].CopyFrom(context);
961 kernel_->info_status = KERNEL_SHARE_INFO_DIRTY;
964 // TODO(stanisc): crbug.com/438313: change these to not rely on the folders.
965 ModelTypeSet Directory::InitialSyncEndedTypes() {
966 syncable::ReadTransaction trans(FROM_HERE, this);
967 ModelTypeSet protocol_types = ProtocolTypes();
968 ModelTypeSet initial_sync_ended_types;
969 for (ModelTypeSet::Iterator i = protocol_types.First(); i.Good(); i.Inc()) {
970 if (InitialSyncEndedForType(&trans, i.Get())) {
971 initial_sync_ended_types.Put(i.Get());
974 return initial_sync_ended_types;
977 bool Directory::InitialSyncEndedForType(ModelType type) {
978 syncable::ReadTransaction trans(FROM_HERE, this);
979 return InitialSyncEndedForType(&trans, type);
982 bool Directory::InitialSyncEndedForType(
983 BaseTransaction* trans, ModelType type) {
984 // True iff the type's root node has been received and applied.
985 syncable::Entry entry(trans, syncable::GET_TYPE_ROOT, type);
986 return entry.good() && entry.GetBaseVersion() != CHANGES_VERSION;
989 string Directory::store_birthday() const {
990 ScopedKernelLock lock(this);
991 return kernel_->persisted_info.store_birthday;
994 void Directory::set_store_birthday(const string& store_birthday) {
995 ScopedKernelLock lock(this);
996 if (kernel_->persisted_info.store_birthday == store_birthday)
997 return;
998 kernel_->persisted_info.store_birthday = store_birthday;
999 kernel_->info_status = KERNEL_SHARE_INFO_DIRTY;
1002 string Directory::bag_of_chips() const {
1003 ScopedKernelLock lock(this);
1004 return kernel_->persisted_info.bag_of_chips;
1007 void Directory::set_bag_of_chips(const string& bag_of_chips) {
1008 ScopedKernelLock lock(this);
1009 if (kernel_->persisted_info.bag_of_chips == bag_of_chips)
1010 return;
1011 kernel_->persisted_info.bag_of_chips = bag_of_chips;
1012 kernel_->info_status = KERNEL_SHARE_INFO_DIRTY;
1016 string Directory::cache_guid() const {
1017 // No need to lock since nothing ever writes to it after load.
1018 return kernel_->cache_guid;
1021 NigoriHandler* Directory::GetNigoriHandler() {
1022 return nigori_handler_;
1025 Cryptographer* Directory::GetCryptographer(const BaseTransaction* trans) {
1026 DCHECK_EQ(this, trans->directory());
1027 return cryptographer_;
1030 void Directory::GetAllMetaHandles(BaseTransaction* trans,
1031 MetahandleSet* result) {
1032 result->clear();
1033 ScopedKernelLock lock(this);
1034 for (MetahandlesMap::iterator i = kernel_->metahandles_map.begin();
1035 i != kernel_->metahandles_map.end(); ++i) {
1036 result->insert(i->first);
1040 void Directory::GetUnsyncedMetaHandles(BaseTransaction* trans,
1041 Metahandles* result) {
1042 result->clear();
1043 ScopedKernelLock lock(this);
1044 copy(kernel_->unsynced_metahandles.begin(),
1045 kernel_->unsynced_metahandles.end(), back_inserter(*result));
1048 int64 Directory::unsynced_entity_count() const {
1049 ScopedKernelLock lock(this);
1050 return kernel_->unsynced_metahandles.size();
1053 bool Directory::TypeHasUnappliedUpdates(ModelType type) {
1054 ScopedKernelLock lock(this);
1055 return !kernel_->unapplied_update_metahandles[type].empty();
1058 void Directory::GetUnappliedUpdateMetaHandles(
1059 BaseTransaction* trans,
1060 FullModelTypeSet server_types,
1061 std::vector<int64>* result) {
1062 result->clear();
1063 ScopedKernelLock lock(this);
1064 for (int i = UNSPECIFIED; i < MODEL_TYPE_COUNT; ++i) {
1065 const ModelType type = ModelTypeFromInt(i);
1066 if (server_types.Has(type)) {
1067 std::copy(kernel_->unapplied_update_metahandles[type].begin(),
1068 kernel_->unapplied_update_metahandles[type].end(),
1069 back_inserter(*result));
1074 void Directory::GetMetaHandlesOfType(BaseTransaction* trans,
1075 ModelType type,
1076 std::vector<int64>* result) {
1077 ScopedKernelLock lock(this);
1078 GetMetaHandlesOfType(lock, trans, type, result);
1081 void Directory::GetMetaHandlesOfType(const ScopedKernelLock& lock,
1082 BaseTransaction* trans,
1083 ModelType type,
1084 std::vector<int64>* result) {
1085 result->clear();
1086 for (MetahandlesMap::iterator it = kernel_->metahandles_map.begin();
1087 it != kernel_->metahandles_map.end(); ++it) {
1088 EntryKernel* entry = it->second;
1089 const ModelType entry_type =
1090 GetModelTypeFromSpecifics(entry->ref(SPECIFICS));
1091 if (entry_type == type)
1092 result->push_back(it->first);
1096 void Directory::CollectMetaHandleCounts(
1097 std::vector<int>* num_entries_by_type,
1098 std::vector<int>* num_to_delete_entries_by_type) {
1099 syncable::ReadTransaction trans(FROM_HERE, this);
1100 ScopedKernelLock lock(this);
1102 for (MetahandlesMap::iterator it = kernel_->metahandles_map.begin();
1103 it != kernel_->metahandles_map.end(); ++it) {
1104 EntryKernel* entry = it->second;
1105 const ModelType type = GetModelTypeFromSpecifics(entry->ref(SPECIFICS));
1106 (*num_entries_by_type)[type]++;
1107 if (entry->ref(IS_DEL))
1108 (*num_to_delete_entries_by_type)[type]++;
1112 scoped_ptr<base::ListValue> Directory::GetNodeDetailsForType(
1113 BaseTransaction* trans,
1114 ModelType type) {
1115 scoped_ptr<base::ListValue> nodes(new base::ListValue());
1117 ScopedKernelLock lock(this);
1118 for (MetahandlesMap::iterator it = kernel_->metahandles_map.begin();
1119 it != kernel_->metahandles_map.end(); ++it) {
1120 if (GetModelTypeFromSpecifics(it->second->ref(SPECIFICS)) != type) {
1121 continue;
1124 EntryKernel* kernel = it->second;
1125 scoped_ptr<base::DictionaryValue> node(
1126 kernel->ToValue(GetCryptographer(trans)));
1128 // Add the position index if appropriate. This must be done here (and not
1129 // in EntryKernel) because the EntryKernel does not have access to its
1130 // siblings.
1131 if (kernel->ShouldMaintainPosition() && !kernel->ref(IS_DEL)) {
1132 node->SetInteger("positionIndex", GetPositionIndex(trans, kernel));
1135 nodes->Append(node.release());
1138 return nodes.Pass();
1141 bool Directory::CheckInvariantsOnTransactionClose(
1142 syncable::BaseTransaction* trans,
1143 const MetahandleSet& modified_handles) {
1144 // NOTE: The trans may be in the process of being destructed. Be careful if
1145 // you wish to call any of its virtual methods.
1146 switch (invariant_check_level_) {
1147 case FULL_DB_VERIFICATION: {
1148 MetahandleSet all_handles;
1149 GetAllMetaHandles(trans, &all_handles);
1150 return CheckTreeInvariants(trans, all_handles);
1152 case VERIFY_CHANGES: {
1153 return CheckTreeInvariants(trans, modified_handles);
1155 case OFF: {
1156 return true;
1159 NOTREACHED();
1160 return false;
1163 bool Directory::FullyCheckTreeInvariants(syncable::BaseTransaction* trans) {
1164 MetahandleSet handles;
1165 GetAllMetaHandles(trans, &handles);
1166 return CheckTreeInvariants(trans, handles);
1169 bool Directory::CheckTreeInvariants(syncable::BaseTransaction* trans,
1170 const MetahandleSet& handles) {
1171 MetahandleSet::const_iterator i;
1172 for (i = handles.begin() ; i != handles.end() ; ++i) {
1173 int64 metahandle = *i;
1174 Entry e(trans, GET_BY_HANDLE, metahandle);
1175 if (!SyncAssert(e.good(), FROM_HERE, "Entry is bad", trans))
1176 return false;
1177 syncable::Id id = e.GetId();
1178 syncable::Id parentid = e.GetParentId();
1180 if (id.IsRoot()) {
1181 if (!SyncAssert(e.GetIsDir(), FROM_HERE,
1182 "Entry should be a directory",
1183 trans))
1184 return false;
1185 if (!SyncAssert(parentid.IsRoot(), FROM_HERE,
1186 "Entry should be root",
1187 trans))
1188 return false;
1189 if (!SyncAssert(!e.GetIsUnsynced(), FROM_HERE, "Entry should be synced",
1190 trans))
1191 return false;
1192 continue;
1195 if (!e.GetIsDel()) {
1196 if (!SyncAssert(id != parentid, FROM_HERE,
1197 "Id should be different from parent id.",
1198 trans))
1199 return false;
1200 if (!SyncAssert(!e.GetNonUniqueName().empty(), FROM_HERE,
1201 "Non unique name should not be empty.",
1202 trans))
1203 return false;
1205 if (!parentid.IsNull()) {
1206 int safety_count = handles.size() + 1;
1207 while (!parentid.IsRoot()) {
1208 Entry parent(trans, GET_BY_ID, parentid);
1209 if (!SyncAssert(parent.good(), FROM_HERE,
1210 "Parent entry is not valid.", trans))
1211 return false;
1212 if (handles.end() == handles.find(parent.GetMetahandle()))
1213 break; // Skip further checking if parent was unmodified.
1214 if (!SyncAssert(parent.GetIsDir(), FROM_HERE,
1215 "Parent should be a directory", trans))
1216 return false;
1217 if (!SyncAssert(!parent.GetIsDel(), FROM_HERE,
1218 "Parent should not have been marked for deletion.",
1219 trans))
1220 return false;
1221 if (!SyncAssert(handles.end() != handles.find(parent.GetMetahandle()),
1222 FROM_HERE, "Parent should be in the index.", trans))
1223 return false;
1224 parentid = parent.GetParentId();
1225 if (!SyncAssert(--safety_count > 0, FROM_HERE,
1226 "Count should be greater than zero.", trans))
1227 return false;
1231 int64 base_version = e.GetBaseVersion();
1232 int64 server_version = e.GetServerVersion();
1233 bool using_unique_client_tag = !e.GetUniqueClientTag().empty();
1234 if (CHANGES_VERSION == base_version || 0 == base_version) {
1235 ModelType model_type = e.GetModelType();
1236 bool is_client_creatable_type_root_folder =
1237 parentid.IsRoot() &&
1238 IsTypeWithClientGeneratedRoot(model_type) &&
1239 e.GetUniqueServerTag() == ModelTypeToRootTag(model_type);
1240 if (e.GetIsUnappliedUpdate()) {
1241 // Must be a new item, or a de-duplicated unique client tag
1242 // that was created both locally and remotely, or a type root folder
1243 // that was created both locally and remotely.
1244 if (!(using_unique_client_tag ||
1245 is_client_creatable_type_root_folder)) {
1246 if (!SyncAssert(e.GetIsDel(), FROM_HERE,
1247 "The entry should have been deleted.", trans))
1248 return false;
1250 // It came from the server, so it must have a server ID.
1251 if (!SyncAssert(id.ServerKnows(), FROM_HERE,
1252 "The id should be from a server.",
1253 trans))
1254 return false;
1255 } else {
1256 if (e.GetIsDir()) {
1257 // TODO(chron): Implement this mode if clients ever need it.
1258 // For now, you can't combine a client tag and a directory.
1259 if (!SyncAssert(!using_unique_client_tag, FROM_HERE,
1260 "Directory cannot have a client tag.",
1261 trans))
1262 return false;
1264 if (is_client_creatable_type_root_folder) {
1265 // This must be a locally created type root folder.
1266 if (!SyncAssert(
1267 !e.GetIsUnsynced(), FROM_HERE,
1268 "Locally created type root folders should not be unsynced.",
1269 trans))
1270 return false;
1272 if (!SyncAssert(
1273 !e.GetIsDel(), FROM_HERE,
1274 "Locally created type root folders should not be deleted.",
1275 trans))
1276 return false;
1277 } else {
1278 // Should be an uncomitted item, or a successfully deleted one.
1279 if (!e.GetIsDel()) {
1280 if (!SyncAssert(e.GetIsUnsynced(), FROM_HERE,
1281 "The item should be unsynced.", trans))
1282 return false;
1285 // If the next check failed, it would imply that an item exists
1286 // on the server, isn't waiting for application locally, but either
1287 // is an unsynced create or a sucessful delete in the local copy.
1288 // Either way, that's a mismatch.
1289 if (!SyncAssert(0 == server_version, FROM_HERE,
1290 "Server version should be zero.",
1291 trans))
1292 return false;
1293 // Items that aren't using the unique client tag should have a zero
1294 // base version only if they have a local ID. Items with unique client
1295 // tags are allowed to use the zero base version for undeletion and
1296 // de-duplication; the unique client tag trumps the server ID.
1297 if (!using_unique_client_tag) {
1298 if (!SyncAssert(!id.ServerKnows(), FROM_HERE,
1299 "Should be a client only id.",
1300 trans))
1301 return false;
1304 } else {
1305 if (!SyncAssert(id.ServerKnows(),
1306 FROM_HERE,
1307 "Should be a server id.",
1308 trans))
1309 return false;
1312 // Previously we would assert that locally deleted items that have never
1313 // been synced must not be sent to the server (IS_UNSYNCED must be false).
1314 // This is not always true in the case that an item is deleted while the
1315 // initial commit is in flight. See crbug.com/426865.
1317 return true;
1320 void Directory::SetInvariantCheckLevel(InvariantCheckLevel check_level) {
1321 invariant_check_level_ = check_level;
1324 int64 Directory::NextMetahandle() {
1325 ScopedKernelLock lock(this);
1326 int64 metahandle = (kernel_->next_metahandle)++;
1327 return metahandle;
1330 // Always returns a client ID that is the string representation of a negative
1331 // number.
1332 Id Directory::NextId() {
1333 int64 result;
1335 ScopedKernelLock lock(this);
1336 result = (kernel_->persisted_info.next_id)--;
1337 kernel_->info_status = KERNEL_SHARE_INFO_DIRTY;
1339 DCHECK_LT(result, 0);
1340 return Id::CreateFromClientString(base::Int64ToString(result));
1343 bool Directory::HasChildren(BaseTransaction* trans, const Id& id) {
1344 ScopedKernelLock lock(this);
1345 return kernel_->parent_child_index.GetChildren(id) != NULL;
1348 Id Directory::GetFirstChildId(BaseTransaction* trans,
1349 const EntryKernel* parent) {
1350 DCHECK(parent);
1351 DCHECK(parent->ref(IS_DIR));
1353 ScopedKernelLock lock(this);
1354 const OrderedChildSet* children =
1355 kernel_->parent_child_index.GetChildren(parent->ref(ID));
1357 // We're expected to return root if there are no children.
1358 if (!children)
1359 return Id();
1361 return (*children->begin())->ref(ID);
1364 syncable::Id Directory::GetPredecessorId(EntryKernel* e) {
1365 ScopedKernelLock lock(this);
1367 DCHECK(ParentChildIndex::ShouldInclude(e));
1368 const OrderedChildSet* siblings = kernel_->parent_child_index.GetSiblings(e);
1369 OrderedChildSet::const_iterator i = siblings->find(e);
1370 DCHECK(i != siblings->end());
1372 if (i == siblings->begin()) {
1373 return Id();
1374 } else {
1375 i--;
1376 return (*i)->ref(ID);
1380 syncable::Id Directory::GetSuccessorId(EntryKernel* e) {
1381 ScopedKernelLock lock(this);
1383 DCHECK(ParentChildIndex::ShouldInclude(e));
1384 const OrderedChildSet* siblings = kernel_->parent_child_index.GetSiblings(e);
1385 OrderedChildSet::const_iterator i = siblings->find(e);
1386 DCHECK(i != siblings->end());
1388 i++;
1389 if (i == siblings->end()) {
1390 return Id();
1391 } else {
1392 return (*i)->ref(ID);
1396 // TODO(rlarocque): Remove all support for placing ShouldMaintainPosition()
1397 // items as siblings of items that do not maintain postions. It is required
1398 // only for tests. See crbug.com/178282.
1399 void Directory::PutPredecessor(EntryKernel* e, EntryKernel* predecessor) {
1400 DCHECK(!e->ref(IS_DEL));
1401 if (!e->ShouldMaintainPosition()) {
1402 DCHECK(!e->ref(UNIQUE_POSITION).IsValid());
1403 return;
1405 std::string suffix = e->ref(UNIQUE_BOOKMARK_TAG);
1406 DCHECK(!suffix.empty());
1408 // Remove our item from the ParentChildIndex and remember to re-add it later.
1409 ScopedKernelLock lock(this);
1410 ScopedParentChildIndexUpdater updater(lock, e, &kernel_->parent_child_index);
1412 // Note: The ScopedParentChildIndexUpdater will update this set for us as we
1413 // leave this function.
1414 const OrderedChildSet* siblings =
1415 kernel_->parent_child_index.GetChildren(e->ref(PARENT_ID));
1417 if (!siblings) {
1418 // This parent currently has no other children.
1419 DCHECK(predecessor == NULL);
1420 UniquePosition pos = UniquePosition::InitialPosition(suffix);
1421 e->put(UNIQUE_POSITION, pos);
1422 return;
1425 if (predecessor == NULL) {
1426 // We have at least one sibling, and we're inserting to the left of them.
1427 UniquePosition successor_pos = (*siblings->begin())->ref(UNIQUE_POSITION);
1429 UniquePosition pos;
1430 if (!successor_pos.IsValid()) {
1431 // If all our successors are of non-positionable types, just create an
1432 // initial position. We arbitrarily choose to sort invalid positions to
1433 // the right of the valid positions.
1435 // We really shouldn't need to support this. See TODO above.
1436 pos = UniquePosition::InitialPosition(suffix);
1437 } else {
1438 DCHECK(!siblings->empty());
1439 pos = UniquePosition::Before(successor_pos, suffix);
1442 e->put(UNIQUE_POSITION, pos);
1443 return;
1446 // We can't support placing an item after an invalid position. Fortunately,
1447 // the tests don't exercise this particular case. We should not support
1448 // siblings with invalid positions at all. See TODO above.
1449 DCHECK(predecessor->ref(UNIQUE_POSITION).IsValid());
1451 OrderedChildSet::const_iterator neighbour = siblings->find(predecessor);
1452 DCHECK(neighbour != siblings->end());
1454 ++neighbour;
1455 if (neighbour == siblings->end()) {
1456 // Inserting at the end of the list.
1457 UniquePosition pos = UniquePosition::After(
1458 predecessor->ref(UNIQUE_POSITION),
1459 suffix);
1460 e->put(UNIQUE_POSITION, pos);
1461 return;
1464 EntryKernel* successor = *neighbour;
1466 // Another mixed valid and invalid position case. This one could be supported
1467 // in theory, but we're trying to deprecate support for siblings with and
1468 // without valid positions. See TODO above.
1469 DCHECK(successor->ref(UNIQUE_POSITION).IsValid());
1471 // Finally, the normal case: inserting between two elements.
1472 UniquePosition pos = UniquePosition::Between(
1473 predecessor->ref(UNIQUE_POSITION),
1474 successor->ref(UNIQUE_POSITION),
1475 suffix);
1476 e->put(UNIQUE_POSITION, pos);
1477 return;
1480 // TODO(rlarocque): Avoid this indirection. Just return the set.
1481 void Directory::AppendChildHandles(const ScopedKernelLock& lock,
1482 const Id& parent_id,
1483 Directory::Metahandles* result) {
1484 const OrderedChildSet* children =
1485 kernel_->parent_child_index.GetChildren(parent_id);
1486 if (!children)
1487 return;
1489 for (OrderedChildSet::const_iterator i = children->begin();
1490 i != children->end(); ++i) {
1491 result->push_back((*i)->ref(META_HANDLE));
1495 void Directory::UnmarkDirtyEntry(WriteTransaction* trans, Entry* entry) {
1496 CHECK(trans);
1497 entry->kernel_->clear_dirty(&kernel_->dirty_metahandles);
1500 void Directory::GetAttachmentIdsToUpload(BaseTransaction* trans,
1501 ModelType type,
1502 AttachmentIdList* ids) {
1503 // TODO(maniscalco): Maintain an index by ModelType and rewrite this method to
1504 // use it. The approach below is likely very expensive because it iterates
1505 // all entries (bug 415199).
1506 DCHECK(trans);
1507 DCHECK(ids);
1508 ids->clear();
1509 AttachmentIdSet on_server_id_set;
1510 AttachmentIdSet not_on_server_id_set;
1511 std::vector<int64> metahandles;
1513 ScopedKernelLock lock(this);
1514 GetMetaHandlesOfType(lock, trans, type, &metahandles);
1515 std::vector<int64>::const_iterator iter = metahandles.begin();
1516 const std::vector<int64>::const_iterator end = metahandles.end();
1517 // For all of this type's entries...
1518 for (; iter != end; ++iter) {
1519 EntryKernel* entry = GetEntryByHandle(lock, *iter);
1520 DCHECK(entry);
1521 const sync_pb::AttachmentMetadata metadata =
1522 entry->ref(ATTACHMENT_METADATA);
1523 // for each of this entry's attachments...
1524 for (int i = 0; i < metadata.record_size(); ++i) {
1525 AttachmentId id =
1526 AttachmentId::CreateFromProto(metadata.record(i).id());
1527 // if this attachment is known to be on the server, remember it for
1528 // later,
1529 if (metadata.record(i).is_on_server()) {
1530 on_server_id_set.insert(id);
1531 } else {
1532 // otherwise, add it to id_set.
1533 not_on_server_id_set.insert(id);
1538 // Why did we bother keeping a set of ids known to be on the server? The
1539 // is_on_server flag is stored denormalized so we can end up with two entries
1540 // with the same attachment id where one says it's on the server and the other
1541 // says it's not. When this happens, we trust the one that says it's on the
1542 // server. To avoid re-uploading the same attachment mulitple times, we
1543 // remove any ids known to be on the server from the id_set we are about to
1544 // return.
1546 // TODO(maniscalco): Eliminate redundant metadata storage (bug 415203).
1547 std::set_difference(not_on_server_id_set.begin(), not_on_server_id_set.end(),
1548 on_server_id_set.begin(), on_server_id_set.end(),
1549 std::back_inserter(*ids));
1552 Directory::Kernel* Directory::kernel() {
1553 return kernel_;
1556 const Directory::Kernel* Directory::kernel() const {
1557 return kernel_;
1560 } // namespace syncable
1561 } // namespace syncer