OneClickSigninBubbleView: Only show title if no error present.
[chromium-blink-merge.git] / sync / syncable / directory.cc
blobd83b1f6e2a2c8438082f43f3d562444c092f88b8
1 // Copyright 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "sync/syncable/directory.h"
7 #include <iterator>
9 #include "base/base64.h"
10 #include "base/debug/trace_event.h"
11 #include "base/stl_util.h"
12 #include "base/strings/string_number_conversions.h"
13 #include "sync/internal_api/public/base/unique_position.h"
14 #include "sync/internal_api/public/util/unrecoverable_error_handler.h"
15 #include "sync/syncable/entry.h"
16 #include "sync/syncable/entry_kernel.h"
17 #include "sync/syncable/in_memory_directory_backing_store.h"
18 #include "sync/syncable/on_disk_directory_backing_store.h"
19 #include "sync/syncable/scoped_kernel_lock.h"
20 #include "sync/syncable/scoped_parent_child_index_updater.h"
21 #include "sync/syncable/syncable-inl.h"
22 #include "sync/syncable/syncable_base_transaction.h"
23 #include "sync/syncable/syncable_changes_version.h"
24 #include "sync/syncable/syncable_read_transaction.h"
25 #include "sync/syncable/syncable_util.h"
26 #include "sync/syncable/syncable_write_transaction.h"
28 using std::string;
30 namespace syncer {
31 namespace syncable {
33 // static
34 const base::FilePath::CharType Directory::kSyncDatabaseFilename[] =
35 FILE_PATH_LITERAL("SyncData.sqlite3");
37 Directory::PersistedKernelInfo::PersistedKernelInfo()
38 : next_id(0) {
39 ModelTypeSet protocol_types = ProtocolTypes();
40 for (ModelTypeSet::Iterator iter = protocol_types.First(); iter.Good();
41 iter.Inc()) {
42 reset_download_progress(iter.Get());
43 transaction_version[iter.Get()] = 0;
47 Directory::PersistedKernelInfo::~PersistedKernelInfo() {}
49 void Directory::PersistedKernelInfo::reset_download_progress(
50 ModelType model_type) {
51 download_progress[model_type].set_data_type_id(
52 GetSpecificsFieldNumberFromModelType(model_type));
53 // An empty-string token indicates no prior knowledge.
54 download_progress[model_type].set_token(std::string());
57 Directory::SaveChangesSnapshot::SaveChangesSnapshot()
58 : kernel_info_status(KERNEL_SHARE_INFO_INVALID) {
61 Directory::SaveChangesSnapshot::~SaveChangesSnapshot() {
62 STLDeleteElements(&dirty_metas);
63 STLDeleteElements(&delete_journals);
66 Directory::Kernel::Kernel(
67 const std::string& name,
68 const KernelLoadInfo& info, DirectoryChangeDelegate* delegate,
69 const WeakHandle<TransactionObserver>& transaction_observer)
70 : next_write_transaction_id(0),
71 name(name),
72 info_status(Directory::KERNEL_SHARE_INFO_VALID),
73 persisted_info(info.kernel_info),
74 cache_guid(info.cache_guid),
75 next_metahandle(info.max_metahandle + 1),
76 delegate(delegate),
77 transaction_observer(transaction_observer) {
78 DCHECK(delegate);
79 DCHECK(transaction_observer.IsInitialized());
82 Directory::Kernel::~Kernel() {
83 STLDeleteContainerPairSecondPointers(metahandles_map.begin(),
84 metahandles_map.end());
87 Directory::Directory(
88 DirectoryBackingStore* store,
89 UnrecoverableErrorHandler* unrecoverable_error_handler,
90 ReportUnrecoverableErrorFunction report_unrecoverable_error_function,
91 NigoriHandler* nigori_handler,
92 Cryptographer* cryptographer)
93 : kernel_(NULL),
94 store_(store),
95 unrecoverable_error_handler_(unrecoverable_error_handler),
96 report_unrecoverable_error_function_(
97 report_unrecoverable_error_function),
98 unrecoverable_error_set_(false),
99 nigori_handler_(nigori_handler),
100 cryptographer_(cryptographer),
101 invariant_check_level_(VERIFY_CHANGES) {
104 Directory::~Directory() {
105 Close();
108 DirOpenResult Directory::Open(
109 const string& name,
110 DirectoryChangeDelegate* delegate,
111 const WeakHandle<TransactionObserver>& transaction_observer) {
112 TRACE_EVENT0("sync", "SyncDatabaseOpen");
114 const DirOpenResult result =
115 OpenImpl(name, delegate, transaction_observer);
117 if (OPENED != result)
118 Close();
119 return result;
122 void Directory::InitializeIndices(MetahandlesMap* handles_map) {
123 kernel_->metahandles_map.swap(*handles_map);
124 for (MetahandlesMap::const_iterator it = kernel_->metahandles_map.begin();
125 it != kernel_->metahandles_map.end(); ++it) {
126 EntryKernel* entry = it->second;
127 if (ParentChildIndex::ShouldInclude(entry))
128 kernel_->parent_child_index.Insert(entry);
129 const int64 metahandle = entry->ref(META_HANDLE);
130 if (entry->ref(IS_UNSYNCED))
131 kernel_->unsynced_metahandles.insert(metahandle);
132 if (entry->ref(IS_UNAPPLIED_UPDATE)) {
133 const ModelType type = entry->GetServerModelType();
134 kernel_->unapplied_update_metahandles[type].insert(metahandle);
136 if (!entry->ref(UNIQUE_SERVER_TAG).empty()) {
137 DCHECK(kernel_->server_tags_map.find(entry->ref(UNIQUE_SERVER_TAG)) ==
138 kernel_->server_tags_map.end())
139 << "Unexpected duplicate use of client tag";
140 kernel_->server_tags_map[entry->ref(UNIQUE_SERVER_TAG)] = entry;
142 if (!entry->ref(UNIQUE_CLIENT_TAG).empty()) {
143 DCHECK(kernel_->server_tags_map.find(entry->ref(UNIQUE_SERVER_TAG)) ==
144 kernel_->server_tags_map.end())
145 << "Unexpected duplicate use of server tag";
146 kernel_->client_tags_map[entry->ref(UNIQUE_CLIENT_TAG)] = entry;
148 DCHECK(kernel_->ids_map.find(entry->ref(ID).value()) ==
149 kernel_->ids_map.end()) << "Unexpected duplicate use of ID";
150 kernel_->ids_map[entry->ref(ID).value()] = entry;
151 DCHECK(!entry->is_dirty());
155 DirOpenResult Directory::OpenImpl(
156 const string& name,
157 DirectoryChangeDelegate* delegate,
158 const WeakHandle<TransactionObserver>&
159 transaction_observer) {
160 KernelLoadInfo info;
161 // Temporary indices before kernel_ initialized in case Load fails. We 0(1)
162 // swap these later.
163 Directory::MetahandlesMap tmp_handles_map;
164 JournalIndex delete_journals;
166 DirOpenResult result =
167 store_->Load(&tmp_handles_map, &delete_journals, &info);
168 if (OPENED != result)
169 return result;
171 kernel_ = new Kernel(name, info, delegate, transaction_observer);
172 delete_journal_.reset(new DeleteJournal(&delete_journals));
173 InitializeIndices(&tmp_handles_map);
175 // Write back the share info to reserve some space in 'next_id'. This will
176 // prevent local ID reuse in the case of an early crash. See the comments in
177 // TakeSnapshotForSaveChanges() or crbug.com/142987 for more information.
178 kernel_->info_status = KERNEL_SHARE_INFO_DIRTY;
179 if (!SaveChanges())
180 return FAILED_INITIAL_WRITE;
182 return OPENED;
185 DeleteJournal* Directory::delete_journal() {
186 DCHECK(delete_journal_.get());
187 return delete_journal_.get();
190 void Directory::Close() {
191 store_.reset();
192 if (kernel_) {
193 delete kernel_;
194 kernel_ = NULL;
198 void Directory::OnUnrecoverableError(const BaseTransaction* trans,
199 const tracked_objects::Location& location,
200 const std::string & message) {
201 DCHECK(trans != NULL);
202 unrecoverable_error_set_ = true;
203 unrecoverable_error_handler_->OnUnrecoverableError(location,
204 message);
207 EntryKernel* Directory::GetEntryById(const Id& id) {
208 ScopedKernelLock lock(this);
209 return GetEntryById(id, &lock);
212 EntryKernel* Directory::GetEntryById(const Id& id,
213 ScopedKernelLock* const lock) {
214 DCHECK(kernel_);
215 // Find it in the in memory ID index.
216 IdsMap::iterator id_found = kernel_->ids_map.find(id.value());
217 if (id_found != kernel_->ids_map.end()) {
218 return id_found->second;
220 return NULL;
223 EntryKernel* Directory::GetEntryByClientTag(const string& tag) {
224 ScopedKernelLock lock(this);
225 DCHECK(kernel_);
227 TagsMap::iterator it = kernel_->client_tags_map.find(tag);
228 if (it != kernel_->client_tags_map.end()) {
229 return it->second;
231 return NULL;
234 EntryKernel* Directory::GetEntryByServerTag(const string& tag) {
235 ScopedKernelLock lock(this);
236 DCHECK(kernel_);
237 TagsMap::iterator it = kernel_->server_tags_map.find(tag);
238 if (it != kernel_->server_tags_map.end()) {
239 return it->second;
241 return NULL;
244 EntryKernel* Directory::GetEntryByHandle(int64 metahandle) {
245 ScopedKernelLock lock(this);
246 return GetEntryByHandle(metahandle, &lock);
249 EntryKernel* Directory::GetEntryByHandle(int64 metahandle,
250 ScopedKernelLock* lock) {
251 // Look up in memory
252 MetahandlesMap::iterator found =
253 kernel_->metahandles_map.find(metahandle);
254 if (found != kernel_->metahandles_map.end()) {
255 // Found it in memory. Easy.
256 return found->second;
258 return NULL;
261 bool Directory::GetChildHandlesById(
262 BaseTransaction* trans, const Id& parent_id,
263 Directory::Metahandles* result) {
264 if (!SyncAssert(this == trans->directory(), FROM_HERE,
265 "Directories don't match", trans))
266 return false;
267 result->clear();
269 ScopedKernelLock lock(this);
270 AppendChildHandles(lock, parent_id, result);
271 return true;
274 int Directory::GetTotalNodeCount(
275 BaseTransaction* trans,
276 EntryKernel* kernel) const {
277 if (!SyncAssert(this == trans->directory(), FROM_HERE,
278 "Directories don't match", trans))
279 return false;
281 int count = 1;
282 std::deque<const OrderedChildSet*> child_sets;
284 GetChildSetForKernel(trans, kernel, &child_sets);
285 while (!child_sets.empty()) {
286 const OrderedChildSet* set = child_sets.front();
287 child_sets.pop_front();
288 for (OrderedChildSet::const_iterator it = set->begin();
289 it != set->end(); ++it) {
290 count++;
291 GetChildSetForKernel(trans, *it, &child_sets);
295 return count;
298 void Directory::GetChildSetForKernel(
299 BaseTransaction* trans,
300 EntryKernel* kernel,
301 std::deque<const OrderedChildSet*>* child_sets) const {
302 if (!kernel->ref(IS_DIR))
303 return; // Not a directory => no children.
305 const OrderedChildSet* descendants =
306 kernel_->parent_child_index.GetChildren(kernel->ref(ID));
307 if (!descendants)
308 return; // This directory has no children.
310 // Add our children to the list of items to be traversed.
311 child_sets->push_back(descendants);
314 int Directory::GetPositionIndex(
315 BaseTransaction* trans,
316 EntryKernel* kernel) const {
317 const OrderedChildSet* siblings =
318 kernel_->parent_child_index.GetChildren(kernel->ref(PARENT_ID));
320 OrderedChildSet::const_iterator it = siblings->find(kernel);
321 return std::distance(siblings->begin(), it);
324 EntryKernel* Directory::GetRootEntry() {
325 return GetEntryById(Id());
328 bool Directory::InsertEntry(BaseWriteTransaction* trans, EntryKernel* entry) {
329 ScopedKernelLock lock(this);
330 return InsertEntry(trans, entry, &lock);
333 bool Directory::InsertEntry(BaseWriteTransaction* trans,
334 EntryKernel* entry,
335 ScopedKernelLock* lock) {
336 DCHECK(NULL != lock);
337 if (!SyncAssert(NULL != entry, FROM_HERE, "Entry is null", trans))
338 return false;
340 static const char error[] = "Entry already in memory index.";
342 if (!SyncAssert(
343 kernel_->metahandles_map.insert(
344 std::make_pair(entry->ref(META_HANDLE), entry)).second,
345 FROM_HERE,
346 error,
347 trans)) {
348 return false;
350 if (!SyncAssert(
351 kernel_->ids_map.insert(
352 std::make_pair(entry->ref(ID).value(), entry)).second,
353 FROM_HERE,
354 error,
355 trans)) {
356 return false;
358 if (ParentChildIndex::ShouldInclude(entry)) {
359 if (!SyncAssert(kernel_->parent_child_index.Insert(entry),
360 FROM_HERE,
361 error,
362 trans)) {
363 return false;
367 // Should NEVER be created with a client tag or server tag.
368 if (!SyncAssert(entry->ref(UNIQUE_SERVER_TAG).empty(), FROM_HERE,
369 "Server tag should be empty", trans)) {
370 return false;
372 if (!SyncAssert(entry->ref(UNIQUE_CLIENT_TAG).empty(), FROM_HERE,
373 "Client tag should be empty", trans))
374 return false;
376 return true;
379 bool Directory::ReindexId(BaseWriteTransaction* trans,
380 EntryKernel* const entry,
381 const Id& new_id) {
382 ScopedKernelLock lock(this);
383 if (NULL != GetEntryById(new_id, &lock))
384 return false;
387 // Update the indices that depend on the ID field.
388 ScopedParentChildIndexUpdater updater_b(lock, entry,
389 &kernel_->parent_child_index);
390 size_t num_erased = kernel_->ids_map.erase(entry->ref(ID).value());
391 DCHECK_EQ(1U, num_erased);
392 entry->put(ID, new_id);
393 kernel_->ids_map[entry->ref(ID).value()] = entry;
395 return true;
398 bool Directory::ReindexParentId(BaseWriteTransaction* trans,
399 EntryKernel* const entry,
400 const Id& new_parent_id) {
401 ScopedKernelLock lock(this);
404 // Update the indices that depend on the PARENT_ID field.
405 ScopedParentChildIndexUpdater index_updater(lock, entry,
406 &kernel_->parent_child_index);
407 entry->put(PARENT_ID, new_parent_id);
409 return true;
412 bool Directory::unrecoverable_error_set(const BaseTransaction* trans) const {
413 DCHECK(trans != NULL);
414 return unrecoverable_error_set_;
417 void Directory::ClearDirtyMetahandles() {
418 kernel_->transaction_mutex.AssertAcquired();
419 kernel_->dirty_metahandles.clear();
422 bool Directory::SafeToPurgeFromMemory(WriteTransaction* trans,
423 const EntryKernel* const entry) const {
424 bool safe = entry->ref(IS_DEL) && !entry->is_dirty() &&
425 !entry->ref(SYNCING) && !entry->ref(IS_UNAPPLIED_UPDATE) &&
426 !entry->ref(IS_UNSYNCED);
428 if (safe) {
429 int64 handle = entry->ref(META_HANDLE);
430 const ModelType type = entry->GetServerModelType();
431 if (!SyncAssert(kernel_->dirty_metahandles.count(handle) == 0U,
432 FROM_HERE,
433 "Dirty metahandles should be empty", trans))
434 return false;
435 // TODO(tim): Bug 49278.
436 if (!SyncAssert(!kernel_->unsynced_metahandles.count(handle),
437 FROM_HERE,
438 "Unsynced handles should be empty",
439 trans))
440 return false;
441 if (!SyncAssert(!kernel_->unapplied_update_metahandles[type].count(handle),
442 FROM_HERE,
443 "Unapplied metahandles should be empty",
444 trans))
445 return false;
448 return safe;
451 void Directory::TakeSnapshotForSaveChanges(SaveChangesSnapshot* snapshot) {
452 ReadTransaction trans(FROM_HERE, this);
453 ScopedKernelLock lock(this);
455 // If there is an unrecoverable error then just bail out.
456 if (unrecoverable_error_set(&trans))
457 return;
459 // Deep copy dirty entries from kernel_->metahandles_index into snapshot and
460 // clear dirty flags.
461 for (MetahandleSet::const_iterator i = kernel_->dirty_metahandles.begin();
462 i != kernel_->dirty_metahandles.end(); ++i) {
463 EntryKernel* entry = GetEntryByHandle(*i, &lock);
464 if (!entry)
465 continue;
466 // Skip over false positives; it happens relatively infrequently.
467 if (!entry->is_dirty())
468 continue;
469 snapshot->dirty_metas.insert(snapshot->dirty_metas.end(),
470 new EntryKernel(*entry));
471 DCHECK_EQ(1U, kernel_->dirty_metahandles.count(*i));
472 // We don't bother removing from the index here as we blow the entire thing
473 // in a moment, and it unnecessarily complicates iteration.
474 entry->clear_dirty(NULL);
476 ClearDirtyMetahandles();
478 // Set purged handles.
479 DCHECK(snapshot->metahandles_to_purge.empty());
480 snapshot->metahandles_to_purge.swap(kernel_->metahandles_to_purge);
482 // Fill kernel_info_status and kernel_info.
483 snapshot->kernel_info = kernel_->persisted_info;
484 // To avoid duplicates when the process crashes, we record the next_id to be
485 // greater magnitude than could possibly be reached before the next save
486 // changes. In other words, it's effectively impossible for the user to
487 // generate 65536 new bookmarks in 3 seconds.
488 snapshot->kernel_info.next_id -= 65536;
489 snapshot->kernel_info_status = kernel_->info_status;
490 // This one we reset on failure.
491 kernel_->info_status = KERNEL_SHARE_INFO_VALID;
493 delete_journal_->TakeSnapshotAndClear(
494 &trans, &snapshot->delete_journals, &snapshot->delete_journals_to_purge);
497 bool Directory::SaveChanges() {
498 bool success = false;
500 base::AutoLock scoped_lock(kernel_->save_changes_mutex);
502 // Snapshot and save.
503 SaveChangesSnapshot snapshot;
504 TakeSnapshotForSaveChanges(&snapshot);
505 success = store_->SaveChanges(snapshot);
507 // Handle success or failure.
508 if (success)
509 success = VacuumAfterSaveChanges(snapshot);
510 else
511 HandleSaveChangesFailure(snapshot);
512 return success;
515 bool Directory::VacuumAfterSaveChanges(const SaveChangesSnapshot& snapshot) {
516 if (snapshot.dirty_metas.empty())
517 return true;
519 // Need a write transaction as we are about to permanently purge entries.
520 WriteTransaction trans(FROM_HERE, VACUUM_AFTER_SAVE, this);
521 ScopedKernelLock lock(this);
522 // Now drop everything we can out of memory.
523 for (EntryKernelSet::const_iterator i = snapshot.dirty_metas.begin();
524 i != snapshot.dirty_metas.end(); ++i) {
525 MetahandlesMap::iterator found =
526 kernel_->metahandles_map.find((*i)->ref(META_HANDLE));
527 EntryKernel* entry = (found == kernel_->metahandles_map.end() ?
528 NULL : found->second);
529 if (entry && SafeToPurgeFromMemory(&trans, entry)) {
530 // We now drop deleted metahandles that are up to date on both the client
531 // and the server.
532 size_t num_erased = 0;
533 num_erased = kernel_->metahandles_map.erase(entry->ref(META_HANDLE));
534 DCHECK_EQ(1u, num_erased);
535 num_erased = kernel_->ids_map.erase(entry->ref(ID).value());
536 DCHECK_EQ(1u, num_erased);
537 if (!entry->ref(UNIQUE_SERVER_TAG).empty()) {
538 num_erased =
539 kernel_->server_tags_map.erase(entry->ref(UNIQUE_SERVER_TAG));
540 DCHECK_EQ(1u, num_erased);
542 if (!entry->ref(UNIQUE_CLIENT_TAG).empty()) {
543 num_erased =
544 kernel_->client_tags_map.erase(entry->ref(UNIQUE_CLIENT_TAG));
545 DCHECK_EQ(1u, num_erased);
547 if (!SyncAssert(!kernel_->parent_child_index.Contains(entry),
548 FROM_HERE,
549 "Deleted entry still present",
550 (&trans)))
551 return false;
552 delete entry;
554 if (trans.unrecoverable_error_set())
555 return false;
557 return true;
560 void Directory::UnapplyEntry(EntryKernel* entry) {
561 int64 handle = entry->ref(META_HANDLE);
562 ModelType server_type = GetModelTypeFromSpecifics(
563 entry->ref(SERVER_SPECIFICS));
565 // Clear enough so that on the next sync cycle all local data will
566 // be overwritten.
567 // Note: do not modify the root node in order to preserve the
568 // initial sync ended bit for this type (else on the next restart
569 // this type will be treated as disabled and therefore fully purged).
570 if (IsRealDataType(server_type) &&
571 ModelTypeToRootTag(server_type) == entry->ref(UNIQUE_SERVER_TAG)) {
572 return;
575 // Set the unapplied bit if this item has server data.
576 if (IsRealDataType(server_type) && !entry->ref(IS_UNAPPLIED_UPDATE)) {
577 entry->put(IS_UNAPPLIED_UPDATE, true);
578 kernel_->unapplied_update_metahandles[server_type].insert(handle);
579 entry->mark_dirty(&kernel_->dirty_metahandles);
582 // Unset the unsynced bit.
583 if (entry->ref(IS_UNSYNCED)) {
584 kernel_->unsynced_metahandles.erase(handle);
585 entry->put(IS_UNSYNCED, false);
586 entry->mark_dirty(&kernel_->dirty_metahandles);
589 // Mark the item as locally deleted. No deleted items are allowed in the
590 // parent child index.
591 if (!entry->ref(IS_DEL)) {
592 kernel_->parent_child_index.Remove(entry);
593 entry->put(IS_DEL, true);
594 entry->mark_dirty(&kernel_->dirty_metahandles);
597 // Set the version to the "newly created" version.
598 if (entry->ref(BASE_VERSION) != CHANGES_VERSION) {
599 entry->put(BASE_VERSION, CHANGES_VERSION);
600 entry->mark_dirty(&kernel_->dirty_metahandles);
603 // At this point locally created items that aren't synced will become locally
604 // deleted items, and purged on the next snapshot. All other items will match
605 // the state they would have had if they were just created via a server
606 // update. See MutableEntry::MutableEntry(.., CreateNewUpdateItem, ..).
609 void Directory::DeleteEntry(bool save_to_journal,
610 EntryKernel* entry,
611 EntryKernelSet* entries_to_journal) {
612 int64 handle = entry->ref(META_HANDLE);
613 ModelType server_type = GetModelTypeFromSpecifics(
614 entry->ref(SERVER_SPECIFICS));
616 kernel_->metahandles_to_purge.insert(handle);
618 size_t num_erased = 0;
619 num_erased = kernel_->metahandles_map.erase(entry->ref(META_HANDLE));
620 DCHECK_EQ(1u, num_erased);
621 num_erased = kernel_->ids_map.erase(entry->ref(ID).value());
622 DCHECK_EQ(1u, num_erased);
623 num_erased = kernel_->unsynced_metahandles.erase(handle);
624 DCHECK_EQ(entry->ref(IS_UNSYNCED), num_erased > 0);
625 num_erased =
626 kernel_->unapplied_update_metahandles[server_type].erase(handle);
627 DCHECK_EQ(entry->ref(IS_UNAPPLIED_UPDATE), num_erased > 0);
628 if (kernel_->parent_child_index.Contains(entry))
629 kernel_->parent_child_index.Remove(entry);
631 if (!entry->ref(UNIQUE_CLIENT_TAG).empty()) {
632 num_erased =
633 kernel_->client_tags_map.erase(entry->ref(UNIQUE_CLIENT_TAG));
634 DCHECK_EQ(1u, num_erased);
636 if (!entry->ref(UNIQUE_SERVER_TAG).empty()) {
637 num_erased =
638 kernel_->server_tags_map.erase(entry->ref(UNIQUE_SERVER_TAG));
639 DCHECK_EQ(1u, num_erased);
642 if (save_to_journal) {
643 entries_to_journal->insert(entry);
644 } else {
645 delete entry;
649 bool Directory::PurgeEntriesWithTypeIn(ModelTypeSet disabled_types,
650 ModelTypeSet types_to_journal,
651 ModelTypeSet types_to_unapply) {
652 disabled_types.RemoveAll(ProxyTypes());
654 if (disabled_types.Empty())
655 return true;
658 WriteTransaction trans(FROM_HERE, PURGE_ENTRIES, this);
660 EntryKernelSet entries_to_journal;
661 STLElementDeleter<EntryKernelSet> journal_deleter(&entries_to_journal);
664 ScopedKernelLock lock(this);
666 // We iterate in two passes to avoid a bug in STLport (which is used in
667 // the Android build). There are some versions of that library where a
668 // hash_map's iterators can be invalidated when an item is erased from the
669 // hash_map.
670 // See http://sourceforge.net/p/stlport/bugs/239/.
672 std::set<EntryKernel*> to_purge;
673 for (MetahandlesMap::iterator it = kernel_->metahandles_map.begin();
674 it != kernel_->metahandles_map.end(); ++it) {
675 const sync_pb::EntitySpecifics& local_specifics =
676 it->second->ref(SPECIFICS);
677 const sync_pb::EntitySpecifics& server_specifics =
678 it->second->ref(SERVER_SPECIFICS);
679 ModelType local_type = GetModelTypeFromSpecifics(local_specifics);
680 ModelType server_type = GetModelTypeFromSpecifics(server_specifics);
682 if ((IsRealDataType(local_type) && disabled_types.Has(local_type)) ||
683 (IsRealDataType(server_type) && disabled_types.Has(server_type))) {
684 to_purge.insert(it->second);
688 for (std::set<EntryKernel*>::iterator it = to_purge.begin();
689 it != to_purge.end(); ++it) {
690 EntryKernel* entry = *it;
692 const sync_pb::EntitySpecifics& local_specifics =
693 (*it)->ref(SPECIFICS);
694 const sync_pb::EntitySpecifics& server_specifics =
695 (*it)->ref(SERVER_SPECIFICS);
696 ModelType local_type = GetModelTypeFromSpecifics(local_specifics);
697 ModelType server_type = GetModelTypeFromSpecifics(server_specifics);
699 if (types_to_unapply.Has(local_type) ||
700 types_to_unapply.Has(server_type)) {
701 UnapplyEntry(entry);
702 } else {
703 bool save_to_journal =
704 (types_to_journal.Has(local_type) ||
705 types_to_journal.Has(server_type)) &&
706 (delete_journal_->IsDeleteJournalEnabled(local_type) ||
707 delete_journal_->IsDeleteJournalEnabled(server_type));
708 DeleteEntry(save_to_journal, entry, &entries_to_journal);
712 delete_journal_->AddJournalBatch(&trans, entries_to_journal);
714 // Ensure meta tracking for these data types reflects the purged state.
715 for (ModelTypeSet::Iterator it = disabled_types.First();
716 it.Good(); it.Inc()) {
717 kernel_->persisted_info.transaction_version[it.Get()] = 0;
719 // Don't discard progress markers for unapplied types.
720 if (!types_to_unapply.Has(it.Get()))
721 kernel_->persisted_info.reset_download_progress(it.Get());
725 return true;
728 void Directory::HandleSaveChangesFailure(const SaveChangesSnapshot& snapshot) {
729 WriteTransaction trans(FROM_HERE, HANDLE_SAVE_FAILURE, this);
730 ScopedKernelLock lock(this);
731 kernel_->info_status = KERNEL_SHARE_INFO_DIRTY;
733 // Because we optimistically cleared the dirty bit on the real entries when
734 // taking the snapshot, we must restore it on failure. Not doing this could
735 // cause lost data, if no other changes are made to the in-memory entries
736 // that would cause the dirty bit to get set again. Setting the bit ensures
737 // that SaveChanges will at least try again later.
738 for (EntryKernelSet::const_iterator i = snapshot.dirty_metas.begin();
739 i != snapshot.dirty_metas.end(); ++i) {
740 MetahandlesMap::iterator found =
741 kernel_->metahandles_map.find((*i)->ref(META_HANDLE));
742 if (found != kernel_->metahandles_map.end()) {
743 found->second->mark_dirty(&kernel_->dirty_metahandles);
747 kernel_->metahandles_to_purge.insert(snapshot.metahandles_to_purge.begin(),
748 snapshot.metahandles_to_purge.end());
750 // Restore delete journals.
751 delete_journal_->AddJournalBatch(&trans, snapshot.delete_journals);
752 delete_journal_->PurgeDeleteJournals(&trans,
753 snapshot.delete_journals_to_purge);
756 void Directory::GetDownloadProgress(
757 ModelType model_type,
758 sync_pb::DataTypeProgressMarker* value_out) const {
759 ScopedKernelLock lock(this);
760 return value_out->CopyFrom(
761 kernel_->persisted_info.download_progress[model_type]);
764 void Directory::GetDownloadProgressAsString(
765 ModelType model_type,
766 std::string* value_out) const {
767 ScopedKernelLock lock(this);
768 kernel_->persisted_info.download_progress[model_type].SerializeToString(
769 value_out);
772 size_t Directory::GetEntriesCount() const {
773 ScopedKernelLock lock(this);
774 return kernel_->metahandles_map.size();
777 void Directory::SetDownloadProgress(
778 ModelType model_type,
779 const sync_pb::DataTypeProgressMarker& new_progress) {
780 ScopedKernelLock lock(this);
781 kernel_->persisted_info.download_progress[model_type].CopyFrom(new_progress);
782 kernel_->info_status = KERNEL_SHARE_INFO_DIRTY;
785 int64 Directory::GetTransactionVersion(ModelType type) const {
786 kernel_->transaction_mutex.AssertAcquired();
787 return kernel_->persisted_info.transaction_version[type];
790 void Directory::IncrementTransactionVersion(ModelType type) {
791 kernel_->transaction_mutex.AssertAcquired();
792 kernel_->persisted_info.transaction_version[type]++;
795 ModelTypeSet Directory::InitialSyncEndedTypes() {
796 syncable::ReadTransaction trans(FROM_HERE, this);
797 ModelTypeSet protocol_types = ProtocolTypes();
798 ModelTypeSet initial_sync_ended_types;
799 for (ModelTypeSet::Iterator i = protocol_types.First(); i.Good(); i.Inc()) {
800 if (InitialSyncEndedForType(&trans, i.Get())) {
801 initial_sync_ended_types.Put(i.Get());
804 return initial_sync_ended_types;
807 bool Directory::InitialSyncEndedForType(ModelType type) {
808 syncable::ReadTransaction trans(FROM_HERE, this);
809 return InitialSyncEndedForType(&trans, type);
812 bool Directory::InitialSyncEndedForType(
813 BaseTransaction* trans, ModelType type) {
814 // True iff the type's root node has been received and applied.
815 syncable::Entry entry(trans,
816 syncable::GET_BY_SERVER_TAG,
817 ModelTypeToRootTag(type));
818 return entry.good() && entry.GetBaseVersion() != CHANGES_VERSION;
821 string Directory::store_birthday() const {
822 ScopedKernelLock lock(this);
823 return kernel_->persisted_info.store_birthday;
826 void Directory::set_store_birthday(const string& store_birthday) {
827 ScopedKernelLock lock(this);
828 if (kernel_->persisted_info.store_birthday == store_birthday)
829 return;
830 kernel_->persisted_info.store_birthday = store_birthday;
831 kernel_->info_status = KERNEL_SHARE_INFO_DIRTY;
834 string Directory::bag_of_chips() const {
835 ScopedKernelLock lock(this);
836 return kernel_->persisted_info.bag_of_chips;
839 void Directory::set_bag_of_chips(const string& bag_of_chips) {
840 ScopedKernelLock lock(this);
841 if (kernel_->persisted_info.bag_of_chips == bag_of_chips)
842 return;
843 kernel_->persisted_info.bag_of_chips = bag_of_chips;
844 kernel_->info_status = KERNEL_SHARE_INFO_DIRTY;
848 string Directory::cache_guid() const {
849 // No need to lock since nothing ever writes to it after load.
850 return kernel_->cache_guid;
853 NigoriHandler* Directory::GetNigoriHandler() {
854 return nigori_handler_;
857 Cryptographer* Directory::GetCryptographer(const BaseTransaction* trans) {
858 DCHECK_EQ(this, trans->directory());
859 return cryptographer_;
862 void Directory::GetAllMetaHandles(BaseTransaction* trans,
863 MetahandleSet* result) {
864 result->clear();
865 ScopedKernelLock lock(this);
866 for (MetahandlesMap::iterator i = kernel_->metahandles_map.begin();
867 i != kernel_->metahandles_map.end(); ++i) {
868 result->insert(i->first);
872 void Directory::GetUnsyncedMetaHandles(BaseTransaction* trans,
873 Metahandles* result) {
874 result->clear();
875 ScopedKernelLock lock(this);
876 copy(kernel_->unsynced_metahandles.begin(),
877 kernel_->unsynced_metahandles.end(), back_inserter(*result));
880 int64 Directory::unsynced_entity_count() const {
881 ScopedKernelLock lock(this);
882 return kernel_->unsynced_metahandles.size();
885 bool Directory::TypeHasUnappliedUpdates(ModelType type) {
886 ScopedKernelLock lock(this);
887 return !kernel_->unapplied_update_metahandles[type].empty();
890 void Directory::GetUnappliedUpdateMetaHandles(
891 BaseTransaction* trans,
892 FullModelTypeSet server_types,
893 std::vector<int64>* result) {
894 result->clear();
895 ScopedKernelLock lock(this);
896 for (int i = UNSPECIFIED; i < MODEL_TYPE_COUNT; ++i) {
897 const ModelType type = ModelTypeFromInt(i);
898 if (server_types.Has(type)) {
899 std::copy(kernel_->unapplied_update_metahandles[type].begin(),
900 kernel_->unapplied_update_metahandles[type].end(),
901 back_inserter(*result));
906 void Directory::CollectMetaHandleCounts(
907 std::vector<int>* num_entries_by_type,
908 std::vector<int>* num_to_delete_entries_by_type) {
909 syncable::ReadTransaction trans(FROM_HERE, this);
910 ScopedKernelLock lock(this);
912 for (MetahandlesMap::iterator it = kernel_->metahandles_map.begin();
913 it != kernel_->metahandles_map.end(); ++it) {
914 EntryKernel* entry = it->second;
915 const ModelType type = GetModelTypeFromSpecifics(entry->ref(SPECIFICS));
916 (*num_entries_by_type)[type]++;
917 if (entry->ref(IS_DEL))
918 (*num_to_delete_entries_by_type)[type]++;
922 scoped_ptr<base::ListValue> Directory::GetAllNodeDetails(
923 BaseTransaction* trans) {
924 scoped_ptr<base::ListValue> nodes(new base::ListValue());
926 ScopedKernelLock lock(this);
927 for (MetahandlesMap::iterator it = kernel_->metahandles_map.begin();
928 it != kernel_->metahandles_map.end(); ++it) {
929 EntryKernel* kernel = it->second;
930 scoped_ptr<base::DictionaryValue> node(
931 kernel->ToValue(GetCryptographer(trans)));
933 // Add the position index if appropriate. This must be done here (and not
934 // in EntryKernel) because the EntryKernel does not have access to its
935 // siblings.
936 if (kernel->ShouldMaintainPosition() && !kernel->ref(IS_DEL)) {
937 node->SetInteger("positionIndex", GetPositionIndex(trans, kernel));
940 nodes->Append(node.release());
943 return nodes.Pass();
946 bool Directory::CheckInvariantsOnTransactionClose(
947 syncable::BaseTransaction* trans,
948 const MetahandleSet& modified_handles) {
949 // NOTE: The trans may be in the process of being destructed. Be careful if
950 // you wish to call any of its virtual methods.
951 switch (invariant_check_level_) {
952 case FULL_DB_VERIFICATION: {
953 MetahandleSet all_handles;
954 GetAllMetaHandles(trans, &all_handles);
955 return CheckTreeInvariants(trans, all_handles);
957 case VERIFY_CHANGES: {
958 return CheckTreeInvariants(trans, modified_handles);
960 case OFF: {
961 return true;
964 NOTREACHED();
965 return false;
968 bool Directory::FullyCheckTreeInvariants(syncable::BaseTransaction* trans) {
969 MetahandleSet handles;
970 GetAllMetaHandles(trans, &handles);
971 return CheckTreeInvariants(trans, handles);
974 bool Directory::CheckTreeInvariants(syncable::BaseTransaction* trans,
975 const MetahandleSet& handles) {
976 MetahandleSet::const_iterator i;
977 for (i = handles.begin() ; i != handles.end() ; ++i) {
978 int64 metahandle = *i;
979 Entry e(trans, GET_BY_HANDLE, metahandle);
980 if (!SyncAssert(e.good(), FROM_HERE, "Entry is bad", trans))
981 return false;
982 syncable::Id id = e.GetId();
983 syncable::Id parentid = e.GetParentId();
985 if (id.IsRoot()) {
986 if (!SyncAssert(e.GetIsDir(), FROM_HERE,
987 "Entry should be a directory",
988 trans))
989 return false;
990 if (!SyncAssert(parentid.IsRoot(), FROM_HERE,
991 "Entry should be root",
992 trans))
993 return false;
994 if (!SyncAssert(!e.GetIsUnsynced(), FROM_HERE,
995 "Entry should be sycned",
996 trans))
997 return false;
998 continue;
1001 if (!e.GetIsDel()) {
1002 if (!SyncAssert(id != parentid, FROM_HERE,
1003 "Id should be different from parent id.",
1004 trans))
1005 return false;
1006 if (!SyncAssert(!e.GetNonUniqueName().empty(), FROM_HERE,
1007 "Non unique name should not be empty.",
1008 trans))
1009 return false;
1010 int safety_count = handles.size() + 1;
1011 while (!parentid.IsRoot()) {
1012 Entry parent(trans, GET_BY_ID, parentid);
1013 if (!SyncAssert(parent.good(), FROM_HERE,
1014 "Parent entry is not valid.",
1015 trans))
1016 return false;
1017 if (handles.end() == handles.find(parent.GetMetahandle()))
1018 break; // Skip further checking if parent was unmodified.
1019 if (!SyncAssert(parent.GetIsDir(), FROM_HERE,
1020 "Parent should be a directory",
1021 trans))
1022 return false;
1023 if (!SyncAssert(!parent.GetIsDel(), FROM_HERE,
1024 "Parent should not have been marked for deletion.",
1025 trans))
1026 return false;
1027 if (!SyncAssert(handles.end() != handles.find(parent.GetMetahandle()),
1028 FROM_HERE,
1029 "Parent should be in the index.",
1030 trans))
1031 return false;
1032 parentid = parent.GetParentId();
1033 if (!SyncAssert(--safety_count > 0, FROM_HERE,
1034 "Count should be greater than zero.",
1035 trans))
1036 return false;
1039 int64 base_version = e.GetBaseVersion();
1040 int64 server_version = e.GetServerVersion();
1041 bool using_unique_client_tag = !e.GetUniqueClientTag().empty();
1042 if (CHANGES_VERSION == base_version || 0 == base_version) {
1043 if (e.GetIsUnappliedUpdate()) {
1044 // Must be a new item, or a de-duplicated unique client tag
1045 // that was created both locally and remotely.
1046 if (!using_unique_client_tag) {
1047 if (!SyncAssert(e.GetIsDel(), FROM_HERE,
1048 "The entry should not have been deleted.",
1049 trans))
1050 return false;
1052 // It came from the server, so it must have a server ID.
1053 if (!SyncAssert(id.ServerKnows(), FROM_HERE,
1054 "The id should be from a server.",
1055 trans))
1056 return false;
1057 } else {
1058 if (e.GetIsDir()) {
1059 // TODO(chron): Implement this mode if clients ever need it.
1060 // For now, you can't combine a client tag and a directory.
1061 if (!SyncAssert(!using_unique_client_tag, FROM_HERE,
1062 "Directory cannot have a client tag.",
1063 trans))
1064 return false;
1066 // Should be an uncomitted item, or a successfully deleted one.
1067 if (!e.GetIsDel()) {
1068 if (!SyncAssert(e.GetIsUnsynced(), FROM_HERE,
1069 "The item should be unsynced.",
1070 trans))
1071 return false;
1073 // If the next check failed, it would imply that an item exists
1074 // on the server, isn't waiting for application locally, but either
1075 // is an unsynced create or a sucessful delete in the local copy.
1076 // Either way, that's a mismatch.
1077 if (!SyncAssert(0 == server_version, FROM_HERE,
1078 "Server version should be zero.",
1079 trans))
1080 return false;
1081 // Items that aren't using the unique client tag should have a zero
1082 // base version only if they have a local ID. Items with unique client
1083 // tags are allowed to use the zero base version for undeletion and
1084 // de-duplication; the unique client tag trumps the server ID.
1085 if (!using_unique_client_tag) {
1086 if (!SyncAssert(!id.ServerKnows(), FROM_HERE,
1087 "Should be a client only id.",
1088 trans))
1089 return false;
1092 } else {
1093 if (!SyncAssert(id.ServerKnows(),
1094 FROM_HERE,
1095 "Should be a server id.",
1096 trans))
1097 return false;
1099 // Server-unknown items that are locally deleted should not be sent up to
1100 // the server. They must be !IS_UNSYNCED.
1101 if (!SyncAssert(!(!id.ServerKnows() && e.GetIsDel() && e.GetIsUnsynced()),
1102 FROM_HERE,
1103 "Locally deleted item must not be unsynced.",
1104 trans)) {
1105 return false;
1108 return true;
1111 void Directory::SetInvariantCheckLevel(InvariantCheckLevel check_level) {
1112 invariant_check_level_ = check_level;
1115 int64 Directory::NextMetahandle() {
1116 ScopedKernelLock lock(this);
1117 int64 metahandle = (kernel_->next_metahandle)++;
1118 return metahandle;
1121 // Always returns a client ID that is the string representation of a negative
1122 // number.
1123 Id Directory::NextId() {
1124 int64 result;
1126 ScopedKernelLock lock(this);
1127 result = (kernel_->persisted_info.next_id)--;
1128 kernel_->info_status = KERNEL_SHARE_INFO_DIRTY;
1130 DCHECK_LT(result, 0);
1131 return Id::CreateFromClientString(base::Int64ToString(result));
1134 bool Directory::HasChildren(BaseTransaction* trans, const Id& id) {
1135 ScopedKernelLock lock(this);
1136 return kernel_->parent_child_index.GetChildren(id) != NULL;
1139 Id Directory::GetFirstChildId(BaseTransaction* trans,
1140 const EntryKernel* parent) {
1141 DCHECK(parent);
1142 DCHECK(parent->ref(IS_DIR));
1144 ScopedKernelLock lock(this);
1145 const OrderedChildSet* children =
1146 kernel_->parent_child_index.GetChildren(parent->ref(ID));
1148 // We're expected to return root if there are no children.
1149 if (!children)
1150 return Id();
1152 return (*children->begin())->ref(ID);
1155 syncable::Id Directory::GetPredecessorId(EntryKernel* e) {
1156 ScopedKernelLock lock(this);
1158 DCHECK(ParentChildIndex::ShouldInclude(e));
1159 const OrderedChildSet* children =
1160 kernel_->parent_child_index.GetChildren(e->ref(PARENT_ID));
1161 DCHECK(children && !children->empty());
1162 OrderedChildSet::const_iterator i = children->find(e);
1163 DCHECK(i != children->end());
1165 if (i == children->begin()) {
1166 return Id();
1167 } else {
1168 i--;
1169 return (*i)->ref(ID);
1173 syncable::Id Directory::GetSuccessorId(EntryKernel* e) {
1174 ScopedKernelLock lock(this);
1176 DCHECK(ParentChildIndex::ShouldInclude(e));
1177 const OrderedChildSet* children =
1178 kernel_->parent_child_index.GetChildren(e->ref(PARENT_ID));
1179 DCHECK(children && !children->empty());
1180 OrderedChildSet::const_iterator i = children->find(e);
1181 DCHECK(i != children->end());
1183 i++;
1184 if (i == children->end()) {
1185 return Id();
1186 } else {
1187 return (*i)->ref(ID);
1191 // TODO(rlarocque): Remove all support for placing ShouldMaintainPosition()
1192 // items as siblings of items that do not maintain postions. It is required
1193 // only for tests. See crbug.com/178282.
1194 void Directory::PutPredecessor(EntryKernel* e, EntryKernel* predecessor) {
1195 DCHECK(!e->ref(IS_DEL));
1196 if (!e->ShouldMaintainPosition()) {
1197 DCHECK(!e->ref(UNIQUE_POSITION).IsValid());
1198 return;
1200 std::string suffix = e->ref(UNIQUE_BOOKMARK_TAG);
1201 DCHECK(!suffix.empty());
1203 // Remove our item from the ParentChildIndex and remember to re-add it later.
1204 ScopedKernelLock lock(this);
1205 ScopedParentChildIndexUpdater updater(lock, e, &kernel_->parent_child_index);
1207 // Note: The ScopedParentChildIndexUpdater will update this set for us as we
1208 // leave this function.
1209 const OrderedChildSet* siblings =
1210 kernel_->parent_child_index.GetChildren(e->ref(PARENT_ID));
1212 if (!siblings) {
1213 // This parent currently has no other children.
1214 DCHECK(predecessor->ref(ID).IsRoot());
1215 UniquePosition pos = UniquePosition::InitialPosition(suffix);
1216 e->put(UNIQUE_POSITION, pos);
1217 return;
1220 if (predecessor->ref(ID).IsRoot()) {
1221 // We have at least one sibling, and we're inserting to the left of them.
1222 UniquePosition successor_pos = (*siblings->begin())->ref(UNIQUE_POSITION);
1224 UniquePosition pos;
1225 if (!successor_pos.IsValid()) {
1226 // If all our successors are of non-positionable types, just create an
1227 // initial position. We arbitrarily choose to sort invalid positions to
1228 // the right of the valid positions.
1230 // We really shouldn't need to support this. See TODO above.
1231 pos = UniquePosition::InitialPosition(suffix);
1232 } else {
1233 DCHECK(!siblings->empty());
1234 pos = UniquePosition::Before(successor_pos, suffix);
1237 e->put(UNIQUE_POSITION, pos);
1238 return;
1241 // We can't support placing an item after an invalid position. Fortunately,
1242 // the tests don't exercise this particular case. We should not support
1243 // siblings with invalid positions at all. See TODO above.
1244 DCHECK(predecessor->ref(UNIQUE_POSITION).IsValid());
1246 OrderedChildSet::const_iterator neighbour = siblings->find(predecessor);
1247 DCHECK(neighbour != siblings->end());
1249 ++neighbour;
1250 if (neighbour == siblings->end()) {
1251 // Inserting at the end of the list.
1252 UniquePosition pos = UniquePosition::After(
1253 predecessor->ref(UNIQUE_POSITION),
1254 suffix);
1255 e->put(UNIQUE_POSITION, pos);
1256 return;
1259 EntryKernel* successor = *neighbour;
1261 // Another mixed valid and invalid position case. This one could be supported
1262 // in theory, but we're trying to deprecate support for siblings with and
1263 // without valid positions. See TODO above.
1264 DCHECK(successor->ref(UNIQUE_POSITION).IsValid());
1266 // Finally, the normal case: inserting between two elements.
1267 UniquePosition pos = UniquePosition::Between(
1268 predecessor->ref(UNIQUE_POSITION),
1269 successor->ref(UNIQUE_POSITION),
1270 suffix);
1271 e->put(UNIQUE_POSITION, pos);
1272 return;
1275 // TODO(rlarocque): Avoid this indirection. Just return the set.
1276 void Directory::AppendChildHandles(const ScopedKernelLock& lock,
1277 const Id& parent_id,
1278 Directory::Metahandles* result) {
1279 const OrderedChildSet* children =
1280 kernel_->parent_child_index.GetChildren(parent_id);
1281 if (!children)
1282 return;
1284 for (OrderedChildSet::const_iterator i = children->begin();
1285 i != children->end(); ++i) {
1286 DCHECK_EQ(parent_id, (*i)->ref(PARENT_ID));
1287 result->push_back((*i)->ref(META_HANDLE));
1291 } // namespace syncable
1292 } // namespace syncer