Follow-up for: Disable inline autocompletion for IME in Omnibox by default.
[chromium-blink-merge.git] / sync / engine / get_commit_ids.cc
blob3a87383d1bcab065282603fa9a7cc8d5c5195d19
1 // Copyright 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "sync/engine/get_commit_ids.h"
7 #include <set>
8 #include <vector>
10 #include "base/basictypes.h"
11 #include "sync/engine/syncer_util.h"
12 #include "sync/syncable/directory.h"
13 #include "sync/syncable/entry.h"
14 #include "sync/syncable/nigori_handler.h"
15 #include "sync/syncable/nigori_util.h"
16 #include "sync/syncable/syncable_base_transaction.h"
17 #include "sync/syncable/syncable_util.h"
18 #include "sync/util/cryptographer.h"
20 using std::set;
21 using std::vector;
23 namespace syncer {
25 namespace {
27 // Forward-declare some helper functions. This gives us more options for
28 // ordering the function defintions within this file.
30 // Filters |unsynced_handles| to remove all entries that do not belong to the
31 // specified |requested_types|, or are not eligible for a commit at this time.
32 void FilterUnreadyEntries(
33 syncable::BaseTransaction* trans,
34 ModelTypeSet requested_types,
35 ModelTypeSet encrypted_types,
36 bool passphrase_missing,
37 const syncable::Directory::Metahandles& unsynced_handles,
38 std::set<int64>* ready_unsynced_set);
40 // Given a set of commit metahandles that are ready for commit
41 // (|ready_unsynced_set|), sorts these into commit order and places up to
42 // |max_entries| of them in the output parameter |out|.
44 // See the header file for an explanation of commit ordering.
45 void OrderCommitIds(
46 syncable::BaseTransaction* trans,
47 size_t max_entries,
48 const std::set<int64>& ready_unsynced_set,
49 std::vector<int64>* out);
51 } // namespace
53 void GetCommitIdsForType(
54 syncable::BaseTransaction* trans,
55 ModelType type,
56 size_t max_entries,
57 syncable::Directory::Metahandles* out) {
58 syncable::Directory* dir = trans->directory();
60 // Gather the full set of unsynced items and store it in the session. They
61 // are not in the correct order for commit.
62 std::set<int64> ready_unsynced_set;
63 syncable::Directory::Metahandles all_unsynced_handles;
64 GetUnsyncedEntries(trans, &all_unsynced_handles);
66 ModelTypeSet encrypted_types;
67 bool passphrase_missing = false;
68 Cryptographer* cryptographer = dir->GetCryptographer(trans);
69 if (cryptographer) {
70 encrypted_types = dir->GetNigoriHandler()->GetEncryptedTypes(trans);
71 passphrase_missing = cryptographer->has_pending_keys();
74 // We filter out all unready entries from the set of unsynced handles. This
75 // new set of ready and unsynced items is then what we use to determine what
76 // is a candidate for commit. The caller of this SyncerCommand is responsible
77 // for ensuring that no throttled types are included among the
78 // requested_types.
79 FilterUnreadyEntries(trans,
80 ModelTypeSet(type),
81 encrypted_types,
82 passphrase_missing,
83 all_unsynced_handles,
84 &ready_unsynced_set);
86 OrderCommitIds(trans, max_entries, ready_unsynced_set, out);
88 for (size_t i = 0; i < out->size(); i++) {
89 DVLOG(1) << "Debug commit batch result:" << (*out)[i];
93 namespace {
95 bool IsEntryInConflict(const syncable::Entry& entry) {
96 if (entry.GetIsUnsynced() &&
97 entry.GetServerVersion() > 0 &&
98 (entry.GetServerVersion() > entry.GetBaseVersion())) {
99 // The local and server versions don't match. The item must be in
100 // conflict, so there's no point in attempting to commit.
101 DCHECK(entry.GetIsUnappliedUpdate());
102 DVLOG(1) << "Excluding entry from commit due to version mismatch "
103 << entry;
104 return true;
106 return false;
109 // An entry is not considered ready for commit if any are true:
110 // 1. It's in conflict.
111 // 2. It requires encryption (either the type is encrypted but a passphrase
112 // is missing from the cryptographer, or the entry itself wasn't properly
113 // encrypted).
114 // 3. It's type is currently throttled.
115 // 4. It's a delete but has not been committed.
116 bool IsEntryReadyForCommit(ModelTypeSet requested_types,
117 ModelTypeSet encrypted_types,
118 bool passphrase_missing,
119 const syncable::Entry& entry) {
120 DCHECK(entry.GetIsUnsynced());
121 if (IsEntryInConflict(entry))
122 return false;
124 const ModelType type = entry.GetModelType();
125 // We special case the nigori node because even though it is considered an
126 // "encrypted type", not all nigori node changes require valid encryption
127 // (ex: sync_tabs).
128 if ((type != NIGORI) && encrypted_types.Has(type) &&
129 (passphrase_missing ||
130 syncable::EntryNeedsEncryption(encrypted_types, entry))) {
131 // This entry requires encryption but is not properly encrypted (possibly
132 // due to the cryptographer not being initialized or the user hasn't
133 // provided the most recent passphrase).
134 DVLOG(1) << "Excluding entry from commit due to lack of encryption "
135 << entry;
136 return false;
139 // Ignore it if it's not in our set of requested types.
140 if (!requested_types.Has(type))
141 return false;
143 if (entry.GetIsDel() && !entry.GetId().ServerKnows()) {
144 // New clients (following the resolution of crbug.com/125381) should not
145 // create such items. Old clients may have left some in the database
146 // (crbug.com/132905), but we should now be cleaning them on startup.
147 NOTREACHED() << "Found deleted and unsynced local item: " << entry;
148 return false;
151 // Extra validity checks.
152 syncable::Id id = entry.GetId();
153 if (id == entry.GetParentId()) {
154 CHECK(id.IsRoot()) << "Non-root item is self parenting." << entry;
155 // If the root becomes unsynced it can cause us problems.
156 NOTREACHED() << "Root item became unsynced " << entry;
157 return false;
160 if (entry.IsRoot()) {
161 NOTREACHED() << "Permanent item became unsynced " << entry;
162 return false;
165 DVLOG(2) << "Entry is ready for commit: " << entry;
166 return true;
169 // Filters |unsynced_handles| to remove all entries that do not belong to the
170 // specified |requested_types|, or are not eligible for a commit at this time.
171 void FilterUnreadyEntries(
172 syncable::BaseTransaction* trans,
173 ModelTypeSet requested_types,
174 ModelTypeSet encrypted_types,
175 bool passphrase_missing,
176 const syncable::Directory::Metahandles& unsynced_handles,
177 std::set<int64>* ready_unsynced_set) {
178 for (syncable::Directory::Metahandles::const_iterator iter =
179 unsynced_handles.begin(); iter != unsynced_handles.end(); ++iter) {
180 syncable::Entry entry(trans, syncable::GET_BY_HANDLE, *iter);
181 if (IsEntryReadyForCommit(requested_types,
182 encrypted_types,
183 passphrase_missing,
184 entry)) {
185 ready_unsynced_set->insert(*iter);
190 // This class helps to implement OrderCommitIds(). Its members track the
191 // progress of a traversal while its methods extend it. It can return early if
192 // the traversal reaches the desired size before the full traversal is complete.
193 class Traversal {
194 public:
195 Traversal(
196 syncable::BaseTransaction* trans,
197 int64 max_entries,
198 syncable::Directory::Metahandles* out);
199 ~Traversal();
201 // First step of traversal building. Adds non-deleted items in order.
202 void AddCreatesAndMoves(const std::set<int64>& ready_unsynced_set);
204 // Second step of traverals building. Appends deleted items.
205 void AddDeletes(const std::set<int64>& ready_unsynced_set);
207 private:
208 // The following functions do not modify the traversal directly. They return
209 // their results in the |result| vector instead.
210 bool AddUncommittedParentsAndTheirPredecessors(
211 const std::set<int64>& ready_unsynced_set,
212 const syncable::Entry& item,
213 syncable::Directory::Metahandles* result) const;
215 void TryAddItem(const std::set<int64>& ready_unsynced_set,
216 const syncable::Entry& item,
217 syncable::Directory::Metahandles* result) const;
219 void AddItemThenPredecessors(
220 const std::set<int64>& ready_unsynced_set,
221 const syncable::Entry& item,
222 syncable::Directory::Metahandles* result) const;
224 void AddPredecessorsThenItem(
225 const std::set<int64>& ready_unsynced_set,
226 const syncable::Entry& item,
227 syncable::Directory::Metahandles* result) const;
229 // Returns true if we've collected enough items.
230 bool IsFull() const;
232 // Returns true if the specified handle is already in the traversal.
233 bool HaveItem(int64 handle) const;
235 // Adds the specified handles to the traversal.
236 void AppendManyToTraversal(const syncable::Directory::Metahandles& handles);
238 // Adds the specifed handle to the traversal.
239 void AppendToTraversal(int64 handle);
241 syncable::Directory::Metahandles* out_;
242 std::set<int64> added_handles_;
243 const size_t max_entries_;
244 syncable::BaseTransaction* trans_;
246 DISALLOW_COPY_AND_ASSIGN(Traversal);
249 Traversal::Traversal(
250 syncable::BaseTransaction* trans,
251 int64 max_entries,
252 syncable::Directory::Metahandles* out)
253 : out_(out),
254 max_entries_(max_entries),
255 trans_(trans) { }
257 Traversal::~Traversal() {}
259 bool Traversal::AddUncommittedParentsAndTheirPredecessors(
260 const std::set<int64>& ready_unsynced_set,
261 const syncable::Entry& item,
262 syncable::Directory::Metahandles* result) const {
263 syncable::Directory::Metahandles dependencies;
264 syncable::Id parent_id = item.GetParentId();
266 // Climb the tree adding entries leaf -> root.
267 while (!parent_id.ServerKnows()) {
268 syncable::Entry parent(trans_, syncable::GET_BY_ID, parent_id);
269 CHECK(parent.good()) << "Bad user-only parent in item path.";
270 int64 handle = parent.GetMetahandle();
271 if (HaveItem(handle)) {
272 // We've already added this parent (and therefore all of its parents).
273 // We can return early.
274 break;
276 if (IsEntryInConflict(parent)) {
277 // We ignore all entries that are children of a conflicing item. Return
278 // false immediately to forget the traversal we've built up so far.
279 DVLOG(1) << "Parent was in conflict, omitting " << item;
280 return false;
282 AddItemThenPredecessors(ready_unsynced_set,
283 parent,
284 &dependencies);
285 parent_id = parent.GetParentId();
288 // Reverse what we added to get the correct order.
289 result->insert(result->end(), dependencies.rbegin(), dependencies.rend());
290 return true;
293 // Adds the given item to the list if it is unsynced and ready for commit.
294 void Traversal::TryAddItem(const std::set<int64>& ready_unsynced_set,
295 const syncable::Entry& item,
296 syncable::Directory::Metahandles* result) const {
297 DCHECK(item.GetIsUnsynced());
298 int64 item_handle = item.GetMetahandle();
299 if (ready_unsynced_set.count(item_handle) != 0) {
300 result->push_back(item_handle);
304 // Adds the given item, and all its unsynced predecessors. The traversal will
305 // be cut short if any item along the traversal is not IS_UNSYNCED, or if we
306 // detect that this area of the tree has already been traversed. Items that are
307 // not 'ready' for commit (see IsEntryReadyForCommit()) will not be added to the
308 // list, though they will not stop the traversal.
309 void Traversal::AddItemThenPredecessors(
310 const std::set<int64>& ready_unsynced_set,
311 const syncable::Entry& item,
312 syncable::Directory::Metahandles* result) const {
313 int64 item_handle = item.GetMetahandle();
314 if (HaveItem(item_handle)) {
315 // We've already added this item to the commit set, and so must have
316 // already added the predecessors as well.
317 return;
319 TryAddItem(ready_unsynced_set, item, result);
320 if (item.GetIsDel())
321 return; // Deleted items have no predecessors.
323 syncable::Id prev_id = item.GetPredecessorId();
324 while (!prev_id.IsRoot()) {
325 syncable::Entry prev(trans_, syncable::GET_BY_ID, prev_id);
326 CHECK(prev.good()) << "Bad id when walking predecessors.";
327 if (!prev.GetIsUnsynced()) {
328 // We're interested in "runs" of unsynced items. This item breaks
329 // the streak, so we stop traversing.
330 return;
332 int64 handle = prev.GetMetahandle();
333 if (HaveItem(handle)) {
334 // We've already added this item to the commit set, and so must have
335 // already added the predecessors as well.
336 return;
338 TryAddItem(ready_unsynced_set, prev, result);
339 prev_id = prev.GetPredecessorId();
343 // Same as AddItemThenPredecessor, but the traversal order will be reversed.
344 void Traversal::AddPredecessorsThenItem(
345 const std::set<int64>& ready_unsynced_set,
346 const syncable::Entry& item,
347 syncable::Directory::Metahandles* result) const {
348 syncable::Directory::Metahandles dependencies;
349 AddItemThenPredecessors(ready_unsynced_set, item, &dependencies);
351 // Reverse what we added to get the correct order.
352 result->insert(result->end(), dependencies.rbegin(), dependencies.rend());
355 bool Traversal::IsFull() const {
356 return out_->size() >= max_entries_;
359 bool Traversal::HaveItem(int64 handle) const {
360 return added_handles_.find(handle) != added_handles_.end();
363 void Traversal::AppendManyToTraversal(
364 const syncable::Directory::Metahandles& handles) {
365 out_->insert(out_->end(), handles.begin(), handles.end());
366 added_handles_.insert(handles.begin(), handles.end());
369 void Traversal::AppendToTraversal(int64 metahandle) {
370 out_->push_back(metahandle);
371 added_handles_.insert(metahandle);
374 void Traversal::AddCreatesAndMoves(
375 const std::set<int64>& ready_unsynced_set) {
376 // Add moves and creates, and prepend their uncommitted parents.
377 for (std::set<int64>::const_iterator iter = ready_unsynced_set.begin();
378 !IsFull() && iter != ready_unsynced_set.end(); ++iter) {
379 int64 metahandle = *iter;
380 if (HaveItem(metahandle))
381 continue;
383 syncable::Entry entry(trans_,
384 syncable::GET_BY_HANDLE,
385 metahandle);
386 if (!entry.GetIsDel()) {
387 // We only commit an item + its dependencies if it and all its
388 // dependencies are not in conflict.
389 syncable::Directory::Metahandles item_dependencies;
390 if (AddUncommittedParentsAndTheirPredecessors(
391 ready_unsynced_set,
392 entry,
393 &item_dependencies)) {
394 AddPredecessorsThenItem(ready_unsynced_set,
395 entry,
396 &item_dependencies);
397 AppendManyToTraversal(item_dependencies);
402 // It's possible that we overcommitted while trying to expand dependent
403 // items. If so, truncate the set down to the allowed size.
404 if (out_->size() > max_entries_)
405 out_->resize(max_entries_);
408 void Traversal::AddDeletes(
409 const std::set<int64>& ready_unsynced_set) {
410 set<syncable::Id> legal_delete_parents;
412 for (std::set<int64>::const_iterator iter = ready_unsynced_set.begin();
413 !IsFull() && iter != ready_unsynced_set.end(); ++iter) {
414 int64 metahandle = *iter;
415 if (HaveItem(metahandle))
416 continue;
418 syncable::Entry entry(trans_, syncable::GET_BY_HANDLE,
419 metahandle);
421 if (entry.GetIsDel()) {
422 syncable::Entry parent(trans_, syncable::GET_BY_ID,
423 entry.GetParentId());
424 // If the parent is deleted and unsynced, then any children of that
425 // parent don't need to be added to the delete queue.
427 // Note: the parent could be synced if there was an update deleting a
428 // folder when we had a deleted all items in it.
429 // We may get more updates, or we may want to delete the entry.
430 if (parent.good() && parent.GetIsDel() && parent.GetIsUnsynced()) {
431 // However, if an entry is moved, these rules can apply differently.
433 // If the entry was moved, then the destination parent was deleted,
434 // then we'll miss it in the roll up. We have to add it in manually.
435 // TODO(chron): Unit test for move / delete cases:
436 // Case 1: Locally moved, then parent deleted
437 // Case 2: Server moved, then locally issue recursive delete.
438 if (entry.GetId().ServerKnows() &&
439 entry.GetParentId() != entry.GetServerParentId()) {
440 DVLOG(1) << "Inserting moved and deleted entry, will be missed by "
441 << "delete roll." << entry.GetId();
443 AppendToTraversal(metahandle);
446 // Skip this entry since it's a child of a parent that will be
447 // deleted. The server will unroll the delete and delete the
448 // child as well.
449 continue;
452 legal_delete_parents.insert(entry.GetParentId());
456 // We could store all the potential entries with a particular parent during
457 // the above scan, but instead we rescan here. This is less efficient, but
458 // we're dropping memory alloc/dealloc in favor of linear scans of recently
459 // examined entries.
461 // Scan through the UnsyncedMetaHandles again. If we have a deleted
462 // entry, then check if the parent is in legal_delete_parents.
464 // Parent being in legal_delete_parents means for the child:
465 // a recursive delete is not currently happening (no recent deletes in same
466 // folder)
467 // parent did expect at least one old deleted child
468 // parent was not deleted
469 for (std::set<int64>::const_iterator iter = ready_unsynced_set.begin();
470 !IsFull() && iter != ready_unsynced_set.end(); ++iter) {
471 int64 metahandle = *iter;
472 if (HaveItem(metahandle))
473 continue;
474 syncable::Entry entry(trans_, syncable::GET_BY_HANDLE, metahandle);
475 if (entry.GetIsDel()) {
476 syncable::Id parent_id = entry.GetParentId();
477 if (legal_delete_parents.count(parent_id)) {
478 AppendToTraversal(metahandle);
484 void OrderCommitIds(
485 syncable::BaseTransaction* trans,
486 size_t max_entries,
487 const std::set<int64>& ready_unsynced_set,
488 syncable::Directory::Metahandles* out) {
489 // Commits follow these rules:
490 // 1. Moves or creates are preceded by needed folder creates, from
491 // root to leaf. For folders whose contents are ordered, moves
492 // and creates appear in order.
493 // 2. Moves/Creates before deletes.
494 // 3. Deletes, collapsed.
495 // We commit deleted moves under deleted items as moves when collapsing
496 // delete trees.
498 Traversal traversal(trans, max_entries, out);
500 // Add moves and creates, and prepend their uncommitted parents.
501 traversal.AddCreatesAndMoves(ready_unsynced_set);
503 // Add all deletes.
504 traversal.AddDeletes(ready_unsynced_set);
507 } // namespace
509 } // namespace syncer