[SyncFS] Build indexes from FileTracker entries on disk.
[chromium-blink-merge.git] / sync / syncable / directory.h
blob2830eec445891b3e97f00c177583bb03b14370e6
1 // Copyright 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #ifndef SYNC_SYNCABLE_DIRECTORY_H_
6 #define SYNC_SYNCABLE_DIRECTORY_H_
8 #include <deque>
9 #include <set>
10 #include <string>
11 #include <vector>
13 #include "base/basictypes.h"
14 #include "base/containers/hash_tables.h"
15 #include "base/file_util.h"
16 #include "base/gtest_prod_util.h"
17 #include "base/values.h"
18 #include "sync/base/sync_export.h"
19 #include "sync/internal_api/public/util/report_unrecoverable_error_function.h"
20 #include "sync/internal_api/public/util/weak_handle.h"
21 #include "sync/syncable/dir_open_result.h"
22 #include "sync/syncable/entry.h"
23 #include "sync/syncable/entry_kernel.h"
24 #include "sync/syncable/metahandle_set.h"
25 #include "sync/syncable/parent_child_index.h"
26 #include "sync/syncable/syncable_delete_journal.h"
28 namespace syncer {
30 class Cryptographer;
31 class TestUserShare;
32 class UnrecoverableErrorHandler;
34 namespace syncable {
36 class BaseTransaction;
37 class BaseWriteTransaction;
38 class DirectoryChangeDelegate;
39 class DirectoryBackingStore;
40 class NigoriHandler;
41 class ScopedKernelLock;
42 class TransactionObserver;
43 class WriteTransaction;
45 enum InvariantCheckLevel {
46 OFF = 0, // No checking.
47 VERIFY_CHANGES = 1, // Checks only mutated entries. Does not check hierarchy.
48 FULL_DB_VERIFICATION = 2 // Check every entry. This can be expensive.
51 // Directory stores and manages EntryKernels.
53 // This class is tightly coupled to several other classes (see friends).
54 class SYNC_EXPORT Directory {
55 friend class BaseTransaction;
56 friend class Entry;
57 friend class ModelNeutralMutableEntry;
58 friend class MutableEntry;
59 friend class ReadTransaction;
60 friend class ScopedKernelLock;
61 friend class WriteTransaction;
62 friend class SyncableDirectoryTest;
63 friend class syncer::TestUserShare;
64 FRIEND_TEST_ALL_PREFIXES(SyncableDirectoryTest, ManageDeleteJournals);
65 FRIEND_TEST_ALL_PREFIXES(SyncableDirectoryTest,
66 TakeSnapshotGetsAllDirtyHandlesTest);
67 FRIEND_TEST_ALL_PREFIXES(SyncableDirectoryTest,
68 TakeSnapshotGetsOnlyDirtyHandlesTest);
69 FRIEND_TEST_ALL_PREFIXES(SyncableDirectoryTest,
70 TakeSnapshotGetsMetahandlesToPurge);
72 public:
73 typedef std::vector<int64> Metahandles;
75 // Be careful when using these hash_map containers. According to the spec,
76 // inserting into them may invalidate all iterators.
78 // It gets worse, though. The Anroid STL library has a bug that means it may
79 // invalidate all iterators when you erase from the map, too. That means that
80 // you can't iterate while erasing. STLDeleteElements(), std::remove_if(),
81 // and other similar functions are off-limits too, until this bug is fixed.
83 // See http://sourceforge.net/p/stlport/bugs/239/.
84 typedef base::hash_map<int64, EntryKernel*> MetahandlesMap;
85 typedef base::hash_map<std::string, EntryKernel*> IdsMap;
86 typedef base::hash_map<std::string, EntryKernel*> TagsMap;
87 typedef std::string AttachmentIdUniqueId;
88 typedef base::hash_map<AttachmentIdUniqueId, MetahandleSet>
89 IndexByAttachmentId;
91 static const base::FilePath::CharType kSyncDatabaseFilename[];
93 // The dirty/clean state of kernel fields backed by the share_info table.
94 // This is public so it can be used in SaveChangesSnapshot for persistence.
95 enum KernelShareInfoStatus {
96 KERNEL_SHARE_INFO_INVALID,
97 KERNEL_SHARE_INFO_VALID,
98 KERNEL_SHARE_INFO_DIRTY
101 // Various data that the Directory::Kernel we are backing (persisting data
102 // for) needs saved across runs of the application.
103 struct SYNC_EXPORT_PRIVATE PersistedKernelInfo {
104 PersistedKernelInfo();
105 ~PersistedKernelInfo();
107 // Set the |download_progress| entry for the given model to a
108 // "first sync" start point. When such a value is sent to the server,
109 // a full download of all objects of the model will be initiated.
110 void ResetDownloadProgress(ModelType model_type);
112 // Last sync timestamp fetched from the server.
113 sync_pb::DataTypeProgressMarker download_progress[MODEL_TYPE_COUNT];
114 // Sync-side transaction version per data type. Monotonically incremented
115 // when updating native model. A copy is also saved in native model.
116 // Later out-of-sync models can be detected and fixed by comparing
117 // transaction versions of sync model and native model.
118 // TODO(hatiaol): implement detection and fixing of out-of-sync models.
119 // Bug 154858.
120 int64 transaction_version[MODEL_TYPE_COUNT];
121 // The store birthday we were given by the server. Contents are opaque to
122 // the client.
123 std::string store_birthday;
124 // The next local ID that has not been used with this cache-GUID.
125 int64 next_id;
126 // The serialized bag of chips we were given by the server. Contents are
127 // opaque to the client. This is the serialization of a message of type
128 // ChipBag defined in sync.proto. It can contains NULL characters.
129 std::string bag_of_chips;
130 // The per-datatype context.
131 sync_pb::DataTypeContext datatype_context[MODEL_TYPE_COUNT];
134 // What the Directory needs on initialization to create itself and its Kernel.
135 // Filled by DirectoryBackingStore::Load.
136 struct KernelLoadInfo {
137 PersistedKernelInfo kernel_info;
138 std::string cache_guid; // Created on first initialization, never changes.
139 int64 max_metahandle; // Computed (using sql MAX aggregate) on init.
140 KernelLoadInfo() : max_metahandle(0) {
144 // When the Directory is told to SaveChanges, a SaveChangesSnapshot is
145 // constructed and forms a consistent snapshot of what needs to be sent to
146 // the backing store.
147 struct SYNC_EXPORT_PRIVATE SaveChangesSnapshot {
148 SaveChangesSnapshot();
149 ~SaveChangesSnapshot();
151 KernelShareInfoStatus kernel_info_status;
152 PersistedKernelInfo kernel_info;
153 EntryKernelSet dirty_metas;
154 MetahandleSet metahandles_to_purge;
155 EntryKernelSet delete_journals;
156 MetahandleSet delete_journals_to_purge;
159 // Does not take ownership of |encryptor|.
160 // |report_unrecoverable_error_function| may be NULL.
161 // Takes ownership of |store|.
162 Directory(
163 DirectoryBackingStore* store,
164 UnrecoverableErrorHandler* unrecoverable_error_handler,
165 ReportUnrecoverableErrorFunction
166 report_unrecoverable_error_function,
167 NigoriHandler* nigori_handler,
168 Cryptographer* cryptographer);
169 virtual ~Directory();
171 // Does not take ownership of |delegate|, which must not be NULL.
172 // Starts sending events to |delegate| if the returned result is
173 // OPENED. Note that events to |delegate| may be sent from *any*
174 // thread. |transaction_observer| must be initialized.
175 DirOpenResult Open(const std::string& name,
176 DirectoryChangeDelegate* delegate,
177 const WeakHandle<TransactionObserver>&
178 transaction_observer);
180 // Stops sending events to the delegate and the transaction
181 // observer.
182 void Close();
184 int64 NextMetahandle();
185 // Returns a negative integer unique to this client.
186 syncable::Id NextId();
188 bool good() const { return NULL != kernel_; }
190 // The download progress is an opaque token provided by the sync server
191 // to indicate the continuation state of the next GetUpdates operation.
192 void GetDownloadProgress(
193 ModelType type,
194 sync_pb::DataTypeProgressMarker* value_out) const;
195 void GetDownloadProgressAsString(
196 ModelType type,
197 std::string* value_out) const;
198 size_t GetEntriesCount() const;
199 void SetDownloadProgress(
200 ModelType type,
201 const sync_pb::DataTypeProgressMarker& value);
203 // Gets/Increments transaction version of a model type. Must be called when
204 // holding kernel mutex.
205 int64 GetTransactionVersion(ModelType type) const;
206 void IncrementTransactionVersion(ModelType type);
208 // Getter/setters for the per datatype context.
209 void GetDataTypeContext(BaseTransaction* trans,
210 ModelType type,
211 sync_pb::DataTypeContext* context) const;
212 void SetDataTypeContext(BaseWriteTransaction* trans,
213 ModelType type,
214 const sync_pb::DataTypeContext& context);
216 ModelTypeSet InitialSyncEndedTypes();
217 bool InitialSyncEndedForType(ModelType type);
218 bool InitialSyncEndedForType(BaseTransaction* trans, ModelType type);
220 const std::string& name() const { return kernel_->name; }
222 // (Account) Store birthday is opaque to the client, so we keep it in the
223 // format it is in the proto buffer in case we switch to a binary birthday
224 // later.
225 std::string store_birthday() const;
226 void set_store_birthday(const std::string& store_birthday);
228 // (Account) Bag of chip is an opaque state used by the server to track the
229 // client.
230 std::string bag_of_chips() const;
231 void set_bag_of_chips(const std::string& bag_of_chips);
233 // Unique to each account / client pair.
234 std::string cache_guid() const;
236 // Returns a pointer to our Nigori node handler.
237 NigoriHandler* GetNigoriHandler();
239 // Returns a pointer to our cryptographer. Does not transfer ownership.
240 // Not thread safe, so should only be accessed while holding a transaction.
241 Cryptographer* GetCryptographer(const BaseTransaction* trans);
243 // Returns true if the directory had encountered an unrecoverable error.
244 // Note: Any function in |Directory| that can be called without holding a
245 // transaction need to check if the Directory already has an unrecoverable
246 // error on it.
247 bool unrecoverable_error_set(const BaseTransaction* trans) const;
249 // Called to immediately report an unrecoverable error (but don't
250 // propagate it up).
251 void ReportUnrecoverableError() {
252 if (report_unrecoverable_error_function_) {
253 report_unrecoverable_error_function_();
257 // Called to set the unrecoverable error on the directory and to propagate
258 // the error to upper layers.
259 void OnUnrecoverableError(const BaseTransaction* trans,
260 const tracked_objects::Location& location,
261 const std::string & message);
263 DeleteJournal* delete_journal();
265 // Returns the child meta handles (even those for deleted/unlinked
266 // nodes) for given parent id. Clears |result| if there are no
267 // children.
268 bool GetChildHandlesById(BaseTransaction*, const Id& parent_id,
269 Metahandles* result);
271 // Counts all items under the given node, including the node itself.
272 int GetTotalNodeCount(BaseTransaction*, EntryKernel* kernel_) const;
274 // Returns this item's position within its parent folder.
275 // The left-most item is 0, second left-most is 1, etc.
276 int GetPositionIndex(BaseTransaction*, EntryKernel* kernel_) const;
278 // Returns true iff |id| has children.
279 bool HasChildren(BaseTransaction* trans, const Id& id);
281 // Find the first child in the positional ordering under a parent,
282 // and fill in |*first_child_id| with its id. Fills in a root Id if
283 // parent has no children. Returns true if the first child was
284 // successfully found, or false if an error was encountered.
285 Id GetFirstChildId(BaseTransaction* trans, const EntryKernel* parent);
287 // These functions allow one to fetch the next or previous item under
288 // the same folder. Returns the "root" ID if there is no predecessor
289 // or successor.
291 // TODO(rlarocque): These functions are used mainly for tree traversal. We
292 // should replace these with an iterator API. See crbug.com/178275.
293 syncable::Id GetPredecessorId(EntryKernel*);
294 syncable::Id GetSuccessorId(EntryKernel*);
296 // Places |e| as a successor to |predecessor|. If |predecessor| is NULL,
297 // |e| will be placed as the left-most item in its folder.
299 // Both |e| and |predecessor| must be valid entries under the same parent.
301 // TODO(rlarocque): This function includes limited support for placing items
302 // with valid positions (ie. Bookmarks) as siblings of items that have no set
303 // ordering (ie. Autofill items). This support is required only for tests,
304 // and should be removed. See crbug.com/178282.
305 void PutPredecessor(EntryKernel* e, EntryKernel* predecessor);
307 // SaveChanges works by taking a consistent snapshot of the current Directory
308 // state and indices (by deep copy) under a ReadTransaction, passing this
309 // snapshot to the backing store under no transaction, and finally cleaning
310 // up by either purging entries no longer needed (this part done under a
311 // WriteTransaction) or rolling back the dirty bits. It also uses
312 // internal locking to enforce SaveChanges operations are mutually exclusive.
314 // WARNING: THIS METHOD PERFORMS SYNCHRONOUS I/O VIA SQLITE.
315 bool SaveChanges();
317 // Returns the number of entities with the unsynced bit set.
318 int64 unsynced_entity_count() const;
320 // Get GetUnsyncedMetaHandles should only be called after SaveChanges and
321 // before any new entries have been created. The intention is that the
322 // syncer should call it from its PerformSyncQueries member.
323 void GetUnsyncedMetaHandles(BaseTransaction* trans,
324 Metahandles* result);
326 // Returns whether or not this |type| has unapplied updates.
327 bool TypeHasUnappliedUpdates(ModelType type);
329 // Get all the metahandles for unapplied updates for a given set of
330 // server types.
331 void GetUnappliedUpdateMetaHandles(BaseTransaction* trans,
332 FullModelTypeSet server_types,
333 std::vector<int64>* result);
335 // Get all the metahandles of entries of |type|.
336 void GetMetaHandlesOfType(BaseTransaction* trans,
337 ModelType type,
338 Metahandles* result);
340 // Get metahandle counts for various criteria to show on the
341 // about:sync page. The information is computed on the fly
342 // each time. If this results in a significant performance hit,
343 // additional data structures can be added to cache results.
344 void CollectMetaHandleCounts(std::vector<int>* num_entries_by_type,
345 std::vector<int>* num_to_delete_entries_by_type);
347 // Returns a ListValue serialization of all nodes for the given type.
348 scoped_ptr<base::ListValue> GetNodeDetailsForType(
349 BaseTransaction* trans,
350 ModelType type);
352 // Sets the level of invariant checking performed after transactions.
353 void SetInvariantCheckLevel(InvariantCheckLevel check_level);
355 // Checks tree metadata consistency following a transaction. It is intended
356 // to provide a reasonable tradeoff between performance and comprehensiveness
357 // and may be used in release code.
358 bool CheckInvariantsOnTransactionClose(
359 syncable::BaseTransaction* trans,
360 const MetahandleSet& modified_handles);
362 // Forces a full check of the directory. This operation may be slow and
363 // should not be invoked outside of tests.
364 bool FullyCheckTreeInvariants(BaseTransaction *trans);
366 // Purges data associated with any entries whose ModelType or ServerModelType
367 // is found in |disabled_types|, from sync directory _both_ in memory and on
368 // disk. Only valid, "real" model types are allowed in |disabled_types| (see
369 // model_type.h for definitions).
370 // 1. Data associated with |types_to_journal| is saved in the delete journal
371 // to help prevent back-from-dead problem due to offline delete in the next
372 // sync session. |types_to_journal| must be a subset of |disabled_types|.
373 // 2. Data associated with |types_to_unapply| is reset to an "unapplied"
374 // state, wherein all local data is deleted and IS_UNAPPLIED is set to true.
375 // This is useful when there's no benefit in discarding the currently
376 // downloaded state, such as when there are cryptographer errors.
377 // |types_to_unapply| must be a subset of |disabled_types|.
378 // 3. All other data is purged entirely.
379 // Note: "Purge" is just meant to distinguish from "deleting" entries, which
380 // means something different in the syncable namespace.
381 // WARNING! This can be real slow, as it iterates over all entries.
382 // WARNING! Performs synchronous I/O.
383 // Returns: true on success, false if an error was encountered.
384 virtual bool PurgeEntriesWithTypeIn(ModelTypeSet disabled_types,
385 ModelTypeSet types_to_journal,
386 ModelTypeSet types_to_unapply);
388 // Resets the base_versions and server_versions of all synced entities
389 // associated with |type| to 1.
390 // WARNING! This can be slow, as it iterates over all entries for a type.
391 bool ResetVersionsForType(BaseWriteTransaction* trans, ModelType type);
393 // Returns true iff the attachment identified by |attachment_id_proto| is
394 // linked to an entry.
396 // An attachment linked to a deleted entry is still considered linked if the
397 // entry hasn't yet been purged.
398 bool IsAttachmentLinked(
399 const sync_pb::AttachmentIdProto& attachment_id_proto) const;
401 // Given attachment id return metahandles to all entries that reference this
402 // attachment.
403 void GetMetahandlesByAttachmentId(
404 BaseTransaction* trans,
405 const sync_pb::AttachmentIdProto& attachment_id_proto,
406 Metahandles* result);
408 // Change entry to not dirty. Used in special case when we don't want to
409 // persist modified entry on disk. e.g. SyncBackupManager uses this to
410 // preserve sync preferences in DB on disk.
411 void UnmarkDirtyEntry(WriteTransaction* trans, Entry* entry);
413 protected: // for friends, mainly used by Entry constructors
414 virtual EntryKernel* GetEntryByHandle(int64 handle);
415 virtual EntryKernel* GetEntryByHandle(int64 metahandle,
416 ScopedKernelLock* lock);
417 virtual EntryKernel* GetEntryById(const Id& id);
418 EntryKernel* GetEntryByServerTag(const std::string& tag);
419 virtual EntryKernel* GetEntryByClientTag(const std::string& tag);
420 bool ReindexId(BaseWriteTransaction* trans, EntryKernel* const entry,
421 const Id& new_id);
422 bool ReindexParentId(BaseWriteTransaction* trans, EntryKernel* const entry,
423 const Id& new_parent_id);
424 // Update the attachment index for |metahandle| removing it from the index
425 // under |old_metadata| entries and add it under |new_metadata| entries.
426 void UpdateAttachmentIndex(const int64 metahandle,
427 const sync_pb::AttachmentMetadata& old_metadata,
428 const sync_pb::AttachmentMetadata& new_metadata);
429 void ClearDirtyMetahandles();
431 DirOpenResult OpenImpl(
432 const std::string& name,
433 DirectoryChangeDelegate* delegate,
434 const WeakHandle<TransactionObserver>& transaction_observer);
436 private:
437 struct Kernel {
438 // |delegate| must not be NULL. |transaction_observer| must be
439 // initialized.
440 Kernel(const std::string& name, const KernelLoadInfo& info,
441 DirectoryChangeDelegate* delegate,
442 const WeakHandle<TransactionObserver>& transaction_observer);
444 ~Kernel();
446 // Implements ReadTransaction / WriteTransaction using a simple lock.
447 base::Lock transaction_mutex;
449 // Protected by transaction_mutex. Used by WriteTransactions.
450 int64 next_write_transaction_id;
452 // The name of this directory.
453 std::string const name;
455 // Protects all members below.
456 // The mutex effectively protects all the indices, but not the
457 // entries themselves. So once a pointer to an entry is pulled
458 // from the index, the mutex can be unlocked and entry read or written.
460 // Never hold the mutex and do anything with the database or any
461 // other buffered IO. Violating this rule will result in deadlock.
462 base::Lock mutex;
464 // Entries indexed by metahandle. This container is considered to be the
465 // owner of all EntryKernels, which may be referened by the other
466 // containers. If you remove an EntryKernel from this map, you probably
467 // want to remove it from all other containers and delete it, too.
468 MetahandlesMap metahandles_map;
470 // Entries indexed by id
471 IdsMap ids_map;
473 // Entries indexed by server tag.
474 // This map does not include any entries with non-existent server tags.
475 TagsMap server_tags_map;
477 // Entries indexed by client tag.
478 // This map does not include any entries with non-existent client tags.
479 // IS_DEL items are included.
480 TagsMap client_tags_map;
482 // Contains non-deleted items, indexed according to parent and position
483 // within parent. Protected by the ScopedKernelLock.
484 ParentChildIndex parent_child_index;
486 // This index keeps track of which metahandles refer to a given attachment.
487 // Think of it as the inverse of EntryKernel's AttachmentMetadata Records.
489 // Because entries can be undeleted (e.g. PutIsDel(false)), entries should
490 // not removed from the index until they are actually deleted from memory.
492 // All access should go through IsAttachmentLinked,
493 // RemoveFromAttachmentIndex, AddToAttachmentIndex, and
494 // UpdateAttachmentIndex methods to avoid iterator invalidation errors.
495 IndexByAttachmentId index_by_attachment_id;
497 // 3 in-memory indices on bits used extremely frequently by the syncer.
498 // |unapplied_update_metahandles| is keyed by the server model type.
499 MetahandleSet unapplied_update_metahandles[MODEL_TYPE_COUNT];
500 MetahandleSet unsynced_metahandles;
501 // Contains metahandles that are most likely dirty (though not
502 // necessarily). Dirtyness is confirmed in TakeSnapshotForSaveChanges().
503 MetahandleSet dirty_metahandles;
505 // When a purge takes place, we remove items from all our indices and stash
506 // them in here so that SaveChanges can persist their permanent deletion.
507 MetahandleSet metahandles_to_purge;
509 KernelShareInfoStatus info_status;
511 // These 3 members are backed in the share_info table, and
512 // their state is marked by the flag above.
514 // A structure containing the Directory state that is written back into the
515 // database on SaveChanges.
516 PersistedKernelInfo persisted_info;
518 // A unique identifier for this account's cache db, used to generate
519 // unique server IDs. No need to lock, only written at init time.
520 const std::string cache_guid;
522 // It doesn't make sense for two threads to run SaveChanges at the same
523 // time; this mutex protects that activity.
524 base::Lock save_changes_mutex;
526 // The next metahandle is protected by kernel mutex.
527 int64 next_metahandle;
529 // The delegate for directory change events. Must not be NULL.
530 DirectoryChangeDelegate* const delegate;
532 // The transaction observer.
533 const WeakHandle<TransactionObserver> transaction_observer;
536 // These private versions expect the kernel lock to already be held
537 // before calling.
538 EntryKernel* GetEntryById(const Id& id, ScopedKernelLock* const lock);
540 // A helper that implements the logic of checking tree invariants.
541 bool CheckTreeInvariants(syncable::BaseTransaction* trans,
542 const MetahandleSet& handles);
544 // Helper to prime metahandles_map, ids_map, parent_child_index,
545 // unsynced_metahandles, unapplied_update_metahandles, server_tags_map and
546 // client_tags_map from metahandles_index. The input |handles_map| will be
547 // cleared during the initialization process.
548 void InitializeIndices(MetahandlesMap* handles_map);
550 // Constructs a consistent snapshot of the current Directory state and
551 // indices (by deep copy) under a ReadTransaction for use in |snapshot|.
552 // See SaveChanges() for more information.
553 void TakeSnapshotForSaveChanges(SaveChangesSnapshot* snapshot);
555 // Purges from memory any unused, safe to remove entries that were
556 // successfully deleted on disk as a result of the SaveChanges that processed
557 // |snapshot|. See SaveChanges() for more information.
558 bool VacuumAfterSaveChanges(const SaveChangesSnapshot& snapshot);
560 // Rolls back dirty bits in the event that the SaveChanges that
561 // processed |snapshot| failed, for example, due to no disk space.
562 void HandleSaveChangesFailure(const SaveChangesSnapshot& snapshot);
564 // For new entry creation only
565 bool InsertEntry(BaseWriteTransaction* trans,
566 EntryKernel* entry, ScopedKernelLock* lock);
567 bool InsertEntry(BaseWriteTransaction* trans, EntryKernel* entry);
569 // Used by CheckTreeInvariants
570 void GetAllMetaHandles(BaseTransaction* trans, MetahandleSet* result);
571 bool SafeToPurgeFromMemory(WriteTransaction* trans,
572 const EntryKernel* const entry) const;
574 // A helper used by GetTotalNodeCount.
575 void GetChildSetForKernel(
576 BaseTransaction*,
577 EntryKernel* kernel_,
578 std::deque<const OrderedChildSet*>* child_sets) const;
580 // Append the handles of the children of |parent_id| to |result|.
581 void AppendChildHandles(
582 const ScopedKernelLock& lock,
583 const Id& parent_id, Directory::Metahandles* result);
585 // Helper methods used by PurgeDisabledTypes.
586 void UnapplyEntry(EntryKernel* entry);
587 void DeleteEntry(bool save_to_journal,
588 EntryKernel* entry,
589 EntryKernelSet* entries_to_journal,
590 const ScopedKernelLock& lock);
592 // Remove each of |metahandle|'s attachment ids from index_by_attachment_id.
593 void RemoveFromAttachmentIndex(
594 const int64 metahandle,
595 const sync_pb::AttachmentMetadata& attachment_metadata,
596 const ScopedKernelLock& lock);
597 // Add each of |metahandle|'s attachment ids to the index_by_attachment_id.
598 void AddToAttachmentIndex(
599 const int64 metahandle,
600 const sync_pb::AttachmentMetadata& attachment_metadata,
601 const ScopedKernelLock& lock);
603 Kernel* kernel_;
605 scoped_ptr<DirectoryBackingStore> store_;
607 UnrecoverableErrorHandler* const unrecoverable_error_handler_;
608 const ReportUnrecoverableErrorFunction report_unrecoverable_error_function_;
609 bool unrecoverable_error_set_;
611 // Not owned.
612 NigoriHandler* const nigori_handler_;
613 Cryptographer* const cryptographer_;
615 InvariantCheckLevel invariant_check_level_;
617 // Maintain deleted entries not in |kernel_| until it's verified that they
618 // are deleted in native models as well.
619 scoped_ptr<DeleteJournal> delete_journal_;
621 DISALLOW_COPY_AND_ASSIGN(Directory);
624 } // namespace syncable
625 } // namespace syncer
627 #endif // SYNC_SYNCABLE_DIRECTORY_H_