1 // Copyright 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "sync/syncable/directory_backing_store.h"
7 #include "build/build_config.h"
11 #include "base/base64.h"
12 #include "base/logging.h"
13 #include "base/rand_util.h"
14 #include "base/strings/stringprintf.h"
15 #include "base/thread_task_runner_handle.h"
16 #include "base/time/time.h"
17 #include "base/trace_event/trace_event.h"
18 #include "sql/connection.h"
19 #include "sql/error_delegate_util.h"
20 #include "sql/statement.h"
21 #include "sql/transaction.h"
22 #include "sync/internal_api/public/base/node_ordinal.h"
23 #include "sync/protocol/bookmark_specifics.pb.h"
24 #include "sync/protocol/sync.pb.h"
25 #include "sync/syncable/syncable-inl.h"
26 #include "sync/syncable/syncable_columns.h"
27 #include "sync/syncable/syncable_util.h"
28 #include "sync/util/time.h"
35 // Increment this version whenever updating DB tables.
36 const int32 kCurrentDBVersion
= 89;
38 // Iterate over the fields of |entry| and bind each to |statement| for
39 // updating. Returns the number of args bound.
40 void BindFields(const EntryKernel
& entry
,
41 sql::Statement
* statement
) {
44 for (i
= BEGIN_FIELDS
; i
< INT64_FIELDS_END
; ++i
) {
45 statement
->BindInt64(index
++, entry
.ref(static_cast<Int64Field
>(i
)));
47 for ( ; i
< TIME_FIELDS_END
; ++i
) {
48 statement
->BindInt64(index
++,
50 entry
.ref(static_cast<TimeField
>(i
))));
52 for ( ; i
< ID_FIELDS_END
; ++i
) {
53 statement
->BindString(index
++, entry
.ref(static_cast<IdField
>(i
)).s_
);
55 for ( ; i
< BIT_FIELDS_END
; ++i
) {
56 statement
->BindInt(index
++, entry
.ref(static_cast<BitField
>(i
)));
58 for ( ; i
< STRING_FIELDS_END
; ++i
) {
59 statement
->BindString(index
++, entry
.ref(static_cast<StringField
>(i
)));
61 for ( ; i
< PROTO_FIELDS_END
; ++i
) {
63 entry
.ref(static_cast<ProtoField
>(i
)).SerializeToString(&temp
);
64 statement
->BindBlob(index
++, temp
.data(), temp
.length());
66 for ( ; i
< UNIQUE_POSITION_FIELDS_END
; ++i
) {
68 entry
.ref(static_cast<UniquePositionField
>(i
)).SerializeToString(&temp
);
69 statement
->BindBlob(index
++, temp
.data(), temp
.length());
71 for (; i
< ATTACHMENT_METADATA_FIELDS_END
; ++i
) {
73 entry
.ref(static_cast<AttachmentMetadataField
>(i
)).SerializeToString(&temp
);
74 statement
->BindBlob(index
++, temp
.data(), temp
.length());
78 // Helper function that loads a number of shareable fields of the
79 // same type. The sharing criteria is based on comparison of
80 // the serialized data. Only consecutive DB columns need to compared
81 // to cover all possible sharing combinations.
82 template <typename TValue
, typename TField
>
83 void UnpackProtoFields(sql::Statement
* statement
,
87 const void* prev_blob
= nullptr;
91 for (; *index
< end_index
; ++(*index
)) {
92 int length
= statement
->ColumnByteLength(*index
);
94 // Skip this column and keep the default value in the kernel field.
98 const void* blob
= statement
->ColumnBlob(*index
);
99 // According to sqlite3 documentation, the prev_blob pointer should remain
100 // valid until moving to the next row.
101 if (length
== prev_length
&& memcmp(blob
, prev_blob
, length
) == 0) {
102 // Serialized values are the same - share the value from |prev_index|
103 // field with the current field.
104 kernel
->copy(static_cast<TField
>(prev_index
),
105 static_cast<TField
>(*index
));
107 // Regular case - deserialize and copy the value to the field.
109 value
.ParseFromArray(blob
, length
);
110 kernel
->put(static_cast<TField
>(*index
), value
);
112 prev_length
= length
;
118 // The caller owns the returned EntryKernel*. Assumes the statement currently
119 // points to a valid row in the metas table. Returns NULL to indicate that
120 // it detected a corruption in the data on unpacking.
121 scoped_ptr
<EntryKernel
> UnpackEntry(sql::Statement
* statement
) {
122 scoped_ptr
<EntryKernel
> kernel(new EntryKernel());
123 DCHECK_EQ(statement
->ColumnCount(), static_cast<int>(FIELD_COUNT
));
125 for (i
= BEGIN_FIELDS
; i
< INT64_FIELDS_END
; ++i
) {
126 kernel
->put(static_cast<Int64Field
>(i
), statement
->ColumnInt64(i
));
128 for ( ; i
< TIME_FIELDS_END
; ++i
) {
129 kernel
->put(static_cast<TimeField
>(i
),
130 ProtoTimeToTime(statement
->ColumnInt64(i
)));
132 for ( ; i
< ID_FIELDS_END
; ++i
) {
133 kernel
->mutable_ref(static_cast<IdField
>(i
)).s_
=
134 statement
->ColumnString(i
);
136 for ( ; i
< BIT_FIELDS_END
; ++i
) {
137 kernel
->put(static_cast<BitField
>(i
), (0 != statement
->ColumnInt(i
)));
139 for ( ; i
< STRING_FIELDS_END
; ++i
) {
140 kernel
->put(static_cast<StringField
>(i
),
141 statement
->ColumnString(i
));
143 UnpackProtoFields
<sync_pb::EntitySpecifics
, ProtoField
>(
144 statement
, kernel
.get(), &i
, PROTO_FIELDS_END
);
145 for ( ; i
< UNIQUE_POSITION_FIELDS_END
; ++i
) {
147 statement
->ColumnBlobAsString(i
, &temp
);
149 sync_pb::UniquePosition proto
;
150 if (!proto
.ParseFromString(temp
)) {
151 DVLOG(1) << "Unpacked invalid position. Assuming the DB is corrupt";
152 return scoped_ptr
<EntryKernel
>();
155 kernel
->mutable_ref(static_cast<UniquePositionField
>(i
)) =
156 UniquePosition::FromProto(proto
);
158 UnpackProtoFields
<sync_pb::AttachmentMetadata
, AttachmentMetadataField
>(
159 statement
, kernel
.get(), &i
, ATTACHMENT_METADATA_FIELDS_END
);
161 // Sanity check on positions. We risk strange and rare crashes if our
162 // assumptions about unique position values are broken.
163 if (kernel
->ShouldMaintainPosition() &&
164 !kernel
->ref(UNIQUE_POSITION
).IsValid()) {
165 DVLOG(1) << "Unpacked invalid position on an entity that should have a "
166 << "valid position. Assuming the DB is corrupt.";
167 return scoped_ptr
<EntryKernel
>();
170 return kernel
.Pass();
175 // This just has to be big enough to hold an UPDATE or INSERT statement that
176 // modifies all the columns in the entry table.
177 static const string::size_type kUpdateStatementBufferSize
= 2048;
179 void OnSqliteError(const base::Closure
& catastrophic_error_handler
,
181 sql::Statement
* statement
) {
182 // An error has been detected. Ignore unless it is catastrophic.
183 if (sql::IsErrorCatastrophic(err
)) {
184 // At this point sql::* and DirectoryBackingStore may be on the callstack so
185 // don't invoke the error handler directly. Instead, PostTask to this thread
186 // to avoid potential reentrancy issues.
187 base::MessageLoop::current()->PostTask(FROM_HERE
,
188 catastrophic_error_handler
);
192 string
ComposeCreateTableColumnSpecs() {
193 const ColumnSpec
* begin
= g_metas_columns
;
194 const ColumnSpec
* end
= g_metas_columns
+ arraysize(g_metas_columns
);
196 query
.reserve(kUpdateStatementBufferSize
);
197 char separator
= '(';
198 for (const ColumnSpec
* column
= begin
; column
!= end
; ++column
) {
199 query
.push_back(separator
);
201 query
.append(column
->name
);
202 query
.push_back(' ');
203 query
.append(column
->spec
);
205 query
.push_back(')');
209 void AppendColumnList(std::string
* output
) {
210 const char* joiner
= " ";
211 // Be explicit in SELECT order to match up with UnpackEntry.
212 for (int i
= BEGIN_FIELDS
; i
< FIELD_COUNT
; ++i
) {
213 output
->append(joiner
);
214 output
->append(ColumnName(i
));
219 bool SaveEntryToDB(sql::Statement
* save_statement
, const EntryKernel
& entry
) {
220 save_statement
->Reset(true);
221 BindFields(entry
, save_statement
);
222 return save_statement
->Run();
227 ///////////////////////////////////////////////////////////////////////////////
228 // DirectoryBackingStore implementation.
230 DirectoryBackingStore::DirectoryBackingStore(const string
& dir_name
)
231 : dir_name_(dir_name
),
232 database_page_size_(32768),
233 needs_column_refresh_(false) {
234 DCHECK(base::ThreadTaskRunnerHandle::IsSet());
235 ResetAndCreateConnection();
238 DirectoryBackingStore::DirectoryBackingStore(const string
& dir_name
,
240 : dir_name_(dir_name
),
241 database_page_size_(32768),
243 needs_column_refresh_(false) {
244 DCHECK(base::ThreadTaskRunnerHandle::IsSet());
247 DirectoryBackingStore::~DirectoryBackingStore() {
250 bool DirectoryBackingStore::DeleteEntries(EntryTable from
,
251 const MetahandleSet
& handles
) {
255 sql::Statement statement
;
256 // Call GetCachedStatement() separately to get different statements for
260 statement
.Assign(db_
->GetCachedStatement(
261 SQL_FROM_HERE
, "DELETE FROM metas WHERE metahandle = ?"));
263 case DELETE_JOURNAL_TABLE
:
264 statement
.Assign(db_
->GetCachedStatement(
265 SQL_FROM_HERE
, "DELETE FROM deleted_metas WHERE metahandle = ?"));
269 for (MetahandleSet::const_iterator i
= handles
.begin(); i
!= handles
.end();
271 statement
.BindInt64(0, *i
);
272 if (!statement
.Run())
274 statement
.Reset(true);
279 bool DirectoryBackingStore::SaveChanges(
280 const Directory::SaveChangesSnapshot
& snapshot
) {
281 DCHECK(CalledOnValidThread());
282 DCHECK(db_
->is_open());
284 // Back out early if there is nothing to write.
286 (Directory::KERNEL_SHARE_INFO_DIRTY
== snapshot
.kernel_info_status
);
287 if (!snapshot
.HasUnsavedMetahandleChanges() && !save_info
) {
291 sql::Transaction
transaction(db_
.get());
292 if (!transaction
.Begin())
295 PrepareSaveEntryStatement(METAS_TABLE
, &save_meta_statement_
);
296 for (EntryKernelSet::const_iterator i
= snapshot
.dirty_metas
.begin();
297 i
!= snapshot
.dirty_metas
.end(); ++i
) {
298 DCHECK((*i
)->is_dirty());
299 if (!SaveEntryToDB(&save_meta_statement_
, **i
))
303 if (!DeleteEntries(METAS_TABLE
, snapshot
.metahandles_to_purge
))
306 PrepareSaveEntryStatement(DELETE_JOURNAL_TABLE
,
307 &save_delete_journal_statement_
);
308 for (EntryKernelSet::const_iterator i
= snapshot
.delete_journals
.begin();
309 i
!= snapshot
.delete_journals
.end(); ++i
) {
310 if (!SaveEntryToDB(&save_delete_journal_statement_
, **i
))
314 if (!DeleteEntries(DELETE_JOURNAL_TABLE
, snapshot
.delete_journals_to_purge
))
318 const Directory::PersistedKernelInfo
& info
= snapshot
.kernel_info
;
319 sql::Statement
s1(db_
->GetCachedStatement(
322 "SET store_birthday = ?, "
323 "bag_of_chips = ?"));
324 s1
.BindString(0, info
.store_birthday
);
325 s1
.BindBlob(1, info
.bag_of_chips
.data(), info
.bag_of_chips
.size());
329 DCHECK_EQ(db_
->GetLastChangeCount(), 1);
331 sql::Statement
s2(db_
->GetCachedStatement(
334 "INTO models (model_id, "
336 "transaction_version, "
338 "VALUES (?, ?, ?, ?)"));
340 ModelTypeSet protocol_types
= ProtocolTypes();
341 for (ModelTypeSet::Iterator iter
= protocol_types
.First(); iter
.Good();
343 ModelType type
= iter
.Get();
344 // We persist not ModelType but rather a protobuf-derived ID.
345 string model_id
= ModelTypeEnumToModelId(type
);
346 string progress_marker
;
347 info
.download_progress
[type
].SerializeToString(&progress_marker
);
348 s2
.BindBlob(0, model_id
.data(), model_id
.length());
349 s2
.BindBlob(1, progress_marker
.data(), progress_marker
.length());
350 s2
.BindInt64(2, info
.transaction_version
[type
]);
352 info
.datatype_context
[type
].SerializeToString(&context
);
353 s2
.BindBlob(3, context
.data(), context
.length());
356 DCHECK_EQ(db_
->GetLastChangeCount(), 1);
361 return transaction
.Commit();
364 sql::Connection
* DirectoryBackingStore::db() {
368 bool DirectoryBackingStore::IsOpen() const {
369 return db_
->is_open();
372 bool DirectoryBackingStore::Open(const base::FilePath
& path
) {
373 DCHECK(!db_
->is_open());
374 return db_
->Open(path
);
377 bool DirectoryBackingStore::OpenInMemory() {
378 DCHECK(!db_
->is_open());
379 return db_
->OpenInMemory();
382 bool DirectoryBackingStore::InitializeTables() {
384 if (GetDatabasePageSize(&page_size
) && page_size
== 4096) {
385 IncreasePageSizeTo32K();
387 sql::Transaction
transaction(db_
.get());
388 if (!transaction
.Begin())
391 int version_on_disk
= GetVersion();
393 // Upgrade from version 67. Version 67 was widely distributed as the original
394 // Bookmark Sync release. Version 68 removed unique naming.
395 if (version_on_disk
== 67) {
396 if (MigrateVersion67To68())
397 version_on_disk
= 68;
399 // Version 69 introduced additional datatypes.
400 if (version_on_disk
== 68) {
401 if (MigrateVersion68To69())
402 version_on_disk
= 69;
405 if (version_on_disk
== 69) {
406 if (MigrateVersion69To70())
407 version_on_disk
= 70;
410 // Version 71 changed the sync progress information to be per-datatype.
411 if (version_on_disk
== 70) {
412 if (MigrateVersion70To71())
413 version_on_disk
= 71;
416 // Version 72 removed extended attributes, a legacy way to do extensible
417 // key/value information, stored in their own table.
418 if (version_on_disk
== 71) {
419 if (MigrateVersion71To72())
420 version_on_disk
= 72;
423 // Version 73 added a field for notification state.
424 if (version_on_disk
== 72) {
425 if (MigrateVersion72To73())
426 version_on_disk
= 73;
429 // Version 74 added state for the autofill migration.
430 if (version_on_disk
== 73) {
431 if (MigrateVersion73To74())
432 version_on_disk
= 74;
435 // Version 75 migrated from int64-based timestamps to per-datatype tokens.
436 if (version_on_disk
== 74) {
437 if (MigrateVersion74To75())
438 version_on_disk
= 75;
441 // Version 76 removed all (5) autofill migration related columns.
442 if (version_on_disk
== 75) {
443 if (MigrateVersion75To76())
444 version_on_disk
= 76;
447 // Version 77 standardized all time fields to ms since the Unix
449 if (version_on_disk
== 76) {
450 if (MigrateVersion76To77())
451 version_on_disk
= 77;
454 // Version 78 added the column base_server_specifics to the metas table.
455 if (version_on_disk
== 77) {
456 if (MigrateVersion77To78())
457 version_on_disk
= 78;
460 // Version 79 migration is a one-time fix for some users in a bad state.
461 if (version_on_disk
== 78) {
462 if (MigrateVersion78To79())
463 version_on_disk
= 79;
466 // Version 80 migration is adding the bag_of_chips column.
467 if (version_on_disk
== 79) {
468 if (MigrateVersion79To80())
469 version_on_disk
= 80;
472 // Version 81 replaces the int64 server_position_in_parent_field
473 // with a blob server_ordinal_in_parent field.
474 if (version_on_disk
== 80) {
475 if (MigrateVersion80To81())
476 version_on_disk
= 81;
479 // Version 82 migration added transaction_version column per data type.
480 if (version_on_disk
== 81) {
481 if (MigrateVersion81To82())
482 version_on_disk
= 82;
485 // Version 83 migration added transaction_version column per sync entry.
486 if (version_on_disk
== 82) {
487 if (MigrateVersion82To83())
488 version_on_disk
= 83;
491 // Version 84 migration added deleted_metas table.
492 if (version_on_disk
== 83) {
493 if (MigrateVersion83To84())
494 version_on_disk
= 84;
497 // Version 85 migration removes the initial_sync_ended bits.
498 if (version_on_disk
== 84) {
499 if (MigrateVersion84To85())
500 version_on_disk
= 85;
503 // Version 86 migration converts bookmarks to the unique positioning system.
504 // It also introduces a new field to store a unique ID for each bookmark.
505 if (version_on_disk
== 85) {
506 if (MigrateVersion85To86())
507 version_on_disk
= 86;
510 // Version 87 migration adds a collection of attachment ids per sync entry.
511 if (version_on_disk
== 86) {
512 if (MigrateVersion86To87())
513 version_on_disk
= 87;
516 // Version 88 migration adds datatype contexts to the models table.
517 if (version_on_disk
== 87) {
518 if (MigrateVersion87To88())
519 version_on_disk
= 88;
522 // Version 89 migration adds server attachment metadata to the metas table.
523 if (version_on_disk
== 88) {
524 if (MigrateVersion88To89())
525 version_on_disk
= 89;
528 // If one of the migrations requested it, drop columns that aren't current.
529 // It's only safe to do this after migrating all the way to the current
531 if (version_on_disk
== kCurrentDBVersion
&& needs_column_refresh_
) {
532 if (!RefreshColumns())
536 // A final, alternative catch-all migration to simply re-sync everything.
537 if (version_on_disk
!= kCurrentDBVersion
) {
538 if (version_on_disk
> kCurrentDBVersion
)
541 // Fallback (re-sync everything) migration path.
542 DVLOG(1) << "Old/null sync database, version " << version_on_disk
;
543 // Delete the existing database (if any), and create a fresh one.
549 sql::Statement
s(db_
->GetUniqueStatement(
550 "SELECT db_create_version, db_create_time FROM share_info"));
553 string db_create_version
= s
.ColumnString(0);
554 int db_create_time
= s
.ColumnInt(1);
555 DVLOG(1) << "DB created at " << db_create_time
<< " by version " <<
558 return transaction
.Commit();
561 // This function drops unused columns by creating a new table that contains only
562 // the currently used columns then copying all rows from the old tables into
563 // this new one. The tables are then rearranged so the new replaces the old.
564 bool DirectoryBackingStore::RefreshColumns() {
565 DCHECK(needs_column_refresh_
);
567 // Create a new table named temp_metas.
568 SafeDropTable("temp_metas");
569 if (!CreateMetasTable(true))
572 // Populate temp_metas from metas.
574 // At this point, the metas table may contain columns belonging to obsolete
575 // schema versions. This statement explicitly lists only the columns that
576 // belong to the current schema version, so the obsolete columns will be
577 // effectively dropped once we rename temp_metas over top of metas.
578 std::string query
= "INSERT INTO temp_metas (";
579 AppendColumnList(&query
);
580 query
.append(") SELECT ");
581 AppendColumnList(&query
);
582 query
.append(" FROM metas");
583 if (!db_
->Execute(query
.c_str()))
587 SafeDropTable("metas");
589 // Rename temp_metas -> metas.
590 if (!db_
->Execute("ALTER TABLE temp_metas RENAME TO metas"))
593 // Repeat the process for share_info.
594 SafeDropTable("temp_share_info");
595 if (!CreateShareInfoTable(true))
598 // TODO(rlarocque, 124140): Remove notification_state.
600 "INSERT INTO temp_share_info (id, name, store_birthday, "
601 "db_create_version, db_create_time, next_id, cache_guid,"
602 "notification_state, bag_of_chips) "
603 "SELECT id, name, store_birthday, db_create_version, "
604 "db_create_time, next_id, cache_guid, notification_state, "
609 SafeDropTable("share_info");
610 if (!db_
->Execute("ALTER TABLE temp_share_info RENAME TO share_info"))
613 needs_column_refresh_
= false;
617 bool DirectoryBackingStore::LoadEntries(Directory::MetahandlesMap
* handles_map
,
618 MetahandleSet
* metahandles_to_purge
) {
620 select
.reserve(kUpdateStatementBufferSize
);
621 select
.append("SELECT ");
622 AppendColumnList(&select
);
623 select
.append(" FROM metas");
625 sql::Statement
s(db_
->GetUniqueStatement(select
.c_str()));
628 scoped_ptr
<EntryKernel
> kernel
= UnpackEntry(&s
);
629 // A null kernel is evidence of external data corruption.
633 int64 handle
= kernel
->ref(META_HANDLE
);
634 if (SafeToPurgeOnLoading(*kernel
))
635 metahandles_to_purge
->insert(handle
);
637 (*handles_map
)[handle
] = kernel
.release();
639 return s
.Succeeded();
642 bool DirectoryBackingStore::SafeToPurgeOnLoading(
643 const EntryKernel
& entry
) const {
644 if (entry
.ref(IS_DEL
)) {
645 if (!entry
.ref(IS_UNSYNCED
) && !entry
.ref(IS_UNAPPLIED_UPDATE
))
647 else if (!entry
.ref(ID
).ServerKnows())
653 bool DirectoryBackingStore::LoadDeleteJournals(
654 JournalIndex
* delete_journals
) {
656 select
.reserve(kUpdateStatementBufferSize
);
657 select
.append("SELECT ");
658 AppendColumnList(&select
);
659 select
.append(" FROM deleted_metas");
661 sql::Statement
s(db_
->GetUniqueStatement(select
.c_str()));
664 scoped_ptr
<EntryKernel
> kernel
= UnpackEntry(&s
);
665 // A null kernel is evidence of external data corruption.
668 delete_journals
->insert(kernel
.release());
670 return s
.Succeeded();
673 bool DirectoryBackingStore::LoadInfo(Directory::KernelLoadInfo
* info
) {
675 sql::Statement
s(db_
->GetUniqueStatement(
676 "SELECT store_birthday, cache_guid, bag_of_chips "
681 info
->kernel_info
.store_birthday
= s
.ColumnString(0);
682 info
->cache_guid
= s
.ColumnString(1);
683 s
.ColumnBlobAsString(2, &(info
->kernel_info
.bag_of_chips
));
685 // Verify there was only one row returned.
687 DCHECK(s
.Succeeded());
692 db_
->GetUniqueStatement(
693 "SELECT model_id, progress_marker, "
694 "transaction_version, context FROM models"));
697 ModelType type
= ModelIdToModelTypeEnum(s
.ColumnBlob(0),
698 s
.ColumnByteLength(0));
699 if (type
!= UNSPECIFIED
&& type
!= TOP_LEVEL_FOLDER
) {
700 info
->kernel_info
.download_progress
[type
].ParseFromArray(
701 s
.ColumnBlob(1), s
.ColumnByteLength(1));
702 info
->kernel_info
.transaction_version
[type
] = s
.ColumnInt64(2);
703 info
->kernel_info
.datatype_context
[type
].ParseFromArray(
704 s
.ColumnBlob(3), s
.ColumnByteLength(3));
712 db_
->GetUniqueStatement(
713 "SELECT MAX(metahandle) FROM metas"));
717 info
->max_metahandle
= s
.ColumnInt64(0);
719 // Verify only one row was returned.
721 DCHECK(s
.Succeeded());
726 bool DirectoryBackingStore::SafeDropTable(const char* table_name
) {
727 string query
= "DROP TABLE IF EXISTS ";
728 query
.append(table_name
);
729 return db_
->Execute(query
.c_str());
732 void DirectoryBackingStore::DropAllTables() {
733 SafeDropTable("metas");
734 SafeDropTable("temp_metas");
735 SafeDropTable("share_info");
736 SafeDropTable("temp_share_info");
737 SafeDropTable("share_version");
738 SafeDropTable("extended_attributes");
739 SafeDropTable("models");
740 SafeDropTable("temp_models");
741 needs_column_refresh_
= false;
745 ModelType
DirectoryBackingStore::ModelIdToModelTypeEnum(
746 const void* data
, int size
) {
747 sync_pb::EntitySpecifics specifics
;
748 if (!specifics
.ParseFromArray(data
, size
))
750 return GetModelTypeFromSpecifics(specifics
);
754 string
DirectoryBackingStore::ModelTypeEnumToModelId(ModelType model_type
) {
755 sync_pb::EntitySpecifics specifics
;
756 AddDefaultFieldValue(model_type
, &specifics
);
757 return specifics
.SerializeAsString();
761 std::string
DirectoryBackingStore::GenerateCacheGUID() {
762 // Generate a GUID with 128 bits of randomness.
763 const int kGuidBytes
= 128 / 8;
765 base::Base64Encode(base::RandBytesAsString(kGuidBytes
), &guid
);
769 bool DirectoryBackingStore::MigrateToSpecifics(
770 const char* old_columns
,
771 const char* specifics_column
,
772 void (*handler_function
)(sql::Statement
* old_value_query
,
773 int old_value_column
,
774 sync_pb::EntitySpecifics
* mutable_new_value
)) {
775 std::string query_sql
= base::StringPrintf(
776 "SELECT metahandle, %s, %s FROM metas", specifics_column
, old_columns
);
777 std::string update_sql
= base::StringPrintf(
778 "UPDATE metas SET %s = ? WHERE metahandle = ?", specifics_column
);
780 sql::Statement
query(db_
->GetUniqueStatement(query_sql
.c_str()));
781 sql::Statement
update(db_
->GetUniqueStatement(update_sql
.c_str()));
783 while (query
.Step()) {
784 int64 metahandle
= query
.ColumnInt64(0);
785 std::string new_value_bytes
;
786 query
.ColumnBlobAsString(1, &new_value_bytes
);
787 sync_pb::EntitySpecifics new_value
;
788 new_value
.ParseFromString(new_value_bytes
);
789 handler_function(&query
, 2, &new_value
);
790 new_value
.SerializeToString(&new_value_bytes
);
792 update
.BindBlob(0, new_value_bytes
.data(), new_value_bytes
.length());
793 update
.BindInt64(1, metahandle
);
798 return query
.Succeeded();
801 bool DirectoryBackingStore::SetVersion(int version
) {
802 sql::Statement
s(db_
->GetCachedStatement(
803 SQL_FROM_HERE
, "UPDATE share_version SET data = ?"));
804 s
.BindInt(0, version
);
809 int DirectoryBackingStore::GetVersion() {
810 if (!db_
->DoesTableExist("share_version"))
813 sql::Statement
statement(db_
->GetUniqueStatement(
814 "SELECT data FROM share_version"));
815 if (statement
.Step()) {
816 return statement
.ColumnInt(0);
822 bool DirectoryBackingStore::MigrateVersion67To68() {
823 // This change simply removed three columns:
825 // string UNSANITIZED_NAME
826 // string SERVER_NAME
827 // No data migration is necessary, but we should do a column refresh.
829 needs_column_refresh_
= true;
833 bool DirectoryBackingStore::MigrateVersion69To70() {
834 // Added "unique_client_tag", renamed "singleton_tag" to unique_server_tag
837 "ALTER TABLE metas ADD COLUMN unique_server_tag varchar"))
840 "ALTER TABLE metas ADD COLUMN unique_client_tag varchar"))
842 needs_column_refresh_
= true;
845 "UPDATE metas SET unique_server_tag = singleton_tag"))
853 // Callback passed to MigrateToSpecifics for the v68->v69 migration. See
854 // MigrateVersion68To69().
855 void EncodeBookmarkURLAndFavicon(sql::Statement
* old_value_query
,
856 int old_value_column
,
857 sync_pb::EntitySpecifics
* mutable_new_value
) {
858 // Extract data from the column trio we expect.
859 bool old_is_bookmark_object
= old_value_query
->ColumnBool(old_value_column
);
860 std::string old_url
= old_value_query
->ColumnString(old_value_column
+ 1);
861 std::string old_favicon
;
862 old_value_query
->ColumnBlobAsString(old_value_column
+ 2, &old_favicon
);
863 bool old_is_dir
= old_value_query
->ColumnBool(old_value_column
+ 3);
865 if (old_is_bookmark_object
) {
866 sync_pb::BookmarkSpecifics
* bookmark_data
=
867 mutable_new_value
->mutable_bookmark();
869 bookmark_data
->set_url(old_url
);
870 bookmark_data
->set_favicon(old_favicon
);
877 bool DirectoryBackingStore::MigrateVersion68To69() {
878 // In Version 68, there were columns on table 'metas':
879 // string BOOKMARK_URL
880 // string SERVER_BOOKMARK_URL
881 // blob BOOKMARK_FAVICON
882 // blob SERVER_BOOKMARK_FAVICON
883 // In version 69, these columns went away in favor of storing
884 // a serialized EntrySpecifics protobuf in the columns:
885 // protobuf blob SPECIFICS
886 // protobuf blob SERVER_SPECIFICS
887 // For bookmarks, EntrySpecifics is extended as per
888 // bookmark_specifics.proto. This migration converts bookmarks from the
889 // former scheme to the latter scheme.
891 // First, add the two new columns to the schema.
893 "ALTER TABLE metas ADD COLUMN specifics blob"))
896 "ALTER TABLE metas ADD COLUMN server_specifics blob"))
899 // Next, fold data from the old columns into the new protobuf columns.
900 if (!MigrateToSpecifics(("is_bookmark_object, bookmark_url, "
901 "bookmark_favicon, is_dir"),
903 &EncodeBookmarkURLAndFavicon
)) {
906 if (!MigrateToSpecifics(("server_is_bookmark_object, "
907 "server_bookmark_url, "
908 "server_bookmark_favicon, "
911 &EncodeBookmarkURLAndFavicon
)) {
915 // Lastly, fix up the "Google Chrome" folder, which is of the TOP_LEVEL_FOLDER
916 // ModelType: it shouldn't have BookmarkSpecifics.
918 "UPDATE metas SET specifics = NULL, server_specifics = NULL WHERE "
919 "singleton_tag IN ('google_chrome')"))
923 needs_column_refresh_
= true; // Trigger deletion of old columns.
927 // Version 71, the columns 'initial_sync_ended' and 'last_sync_timestamp'
928 // were removed from the share_info table. They were replaced by
929 // the 'models' table, which has these values on a per-datatype basis.
930 bool DirectoryBackingStore::MigrateVersion70To71() {
931 if (!CreateV71ModelsTable())
934 // Move data from the old share_info columns to the new models table.
936 sql::Statement
fetch(db_
->GetUniqueStatement(
937 "SELECT last_sync_timestamp, initial_sync_ended FROM share_info"));
941 int64 last_sync_timestamp
= fetch
.ColumnInt64(0);
942 bool initial_sync_ended
= fetch
.ColumnBool(1);
944 // Verify there were no additional rows returned.
945 DCHECK(!fetch
.Step());
946 DCHECK(fetch
.Succeeded());
948 sql::Statement
update(db_
->GetUniqueStatement(
949 "INSERT INTO models (model_id, "
950 "last_download_timestamp, initial_sync_ended) VALUES (?, ?, ?)"));
951 string bookmark_model_id
= ModelTypeEnumToModelId(BOOKMARKS
);
952 update
.BindBlob(0, bookmark_model_id
.data(), bookmark_model_id
.size());
953 update
.BindInt64(1, last_sync_timestamp
);
954 update
.BindBool(2, initial_sync_ended
);
960 // Drop the columns from the old share_info table via a temp table.
961 const bool kCreateAsTempShareInfo
= true;
963 if (!CreateShareInfoTableVersion71(kCreateAsTempShareInfo
))
966 "INSERT INTO temp_share_info (id, name, store_birthday, "
967 "db_create_version, db_create_time, next_id, cache_guid) "
968 "SELECT id, name, store_birthday, db_create_version, "
969 "db_create_time, next_id, cache_guid FROM share_info"))
971 SafeDropTable("share_info");
973 "ALTER TABLE temp_share_info RENAME TO share_info"))
979 bool DirectoryBackingStore::MigrateVersion71To72() {
980 // Version 72 removed a table 'extended_attributes', whose
981 // contents didn't matter.
982 SafeDropTable("extended_attributes");
987 bool DirectoryBackingStore::MigrateVersion72To73() {
988 // Version 73 added one column to the table 'share_info': notification_state
990 "ALTER TABLE share_info ADD COLUMN notification_state BLOB"))
996 bool DirectoryBackingStore::MigrateVersion73To74() {
997 // Version 74 added the following columns to the table 'share_info':
998 // autofill_migration_state
999 // bookmarks_added_during_autofill_migration
1000 // autofill_migration_time
1001 // autofill_entries_added_during_migration
1002 // autofill_profiles_added_during_migration
1005 "ALTER TABLE share_info ADD COLUMN "
1006 "autofill_migration_state INT default 0"))
1010 "ALTER TABLE share_info ADD COLUMN "
1011 "bookmarks_added_during_autofill_migration "
1016 "ALTER TABLE share_info ADD COLUMN autofill_migration_time "
1021 "ALTER TABLE share_info ADD COLUMN "
1022 "autofill_entries_added_during_migration "
1027 "ALTER TABLE share_info ADD COLUMN "
1028 "autofill_profiles_added_during_migration "
1036 bool DirectoryBackingStore::MigrateVersion74To75() {
1037 // In version 74, there was a table 'models':
1038 // blob model_id (entity specifics, primary key)
1039 // int last_download_timestamp
1040 // boolean initial_sync_ended
1041 // In version 75, we deprecated the integer-valued last_download_timestamp,
1042 // using insted a protobuf-valued progress_marker field:
1043 // blob progress_marker
1044 // The progress_marker values are initialized from the value of
1045 // last_download_timestamp, thereby preserving the download state.
1047 // Move aside the old table and create a new empty one at the current schema.
1048 if (!db_
->Execute("ALTER TABLE models RENAME TO temp_models"))
1050 if (!CreateV75ModelsTable())
1053 sql::Statement
query(db_
->GetUniqueStatement(
1054 "SELECT model_id, last_download_timestamp, initial_sync_ended "
1055 "FROM temp_models"));
1057 sql::Statement
update(db_
->GetUniqueStatement(
1058 "INSERT INTO models (model_id, "
1059 "progress_marker, initial_sync_ended) VALUES (?, ?, ?)"));
1061 while (query
.Step()) {
1062 ModelType type
= ModelIdToModelTypeEnum(query
.ColumnBlob(0),
1063 query
.ColumnByteLength(0));
1064 if (type
!= UNSPECIFIED
) {
1065 // Set the |timestamp_token_for_migration| on a new
1066 // DataTypeProgressMarker, using the old value of last_download_timestamp.
1067 // The server will turn this into a real token on our behalf the next
1068 // time we check for updates.
1069 sync_pb::DataTypeProgressMarker progress_marker
;
1070 progress_marker
.set_data_type_id(
1071 GetSpecificsFieldNumberFromModelType(type
));
1072 progress_marker
.set_timestamp_token_for_migration(query
.ColumnInt64(1));
1073 std::string progress_blob
;
1074 progress_marker
.SerializeToString(&progress_blob
);
1076 update
.BindBlob(0, query
.ColumnBlob(0), query
.ColumnByteLength(0));
1077 update
.BindBlob(1, progress_blob
.data(), progress_blob
.length());
1078 update
.BindBool(2, query
.ColumnBool(2));
1084 if (!query
.Succeeded())
1087 // Drop the old table.
1088 SafeDropTable("temp_models");
1094 bool DirectoryBackingStore::MigrateVersion75To76() {
1095 // This change removed five columns:
1096 // autofill_migration_state
1097 // bookmarks_added_during_autofill_migration
1098 // autofill_migration_time
1099 // autofill_entries_added_during_migration
1100 // autofill_profiles_added_during_migration
1101 // No data migration is necessary, but we should do a column refresh.
1103 needs_column_refresh_
= true;
1107 bool DirectoryBackingStore::MigrateVersion76To77() {
1108 // This change changes the format of stored timestamps to ms since
1111 // On Windows, we used to store timestamps in FILETIME format (100s of
1112 // ns since Jan 1, 1601). Magic numbers taken from
1113 // http://stackoverflow.com/questions/5398557/
1114 // java-library-for-dealing-with-win32-filetime
1116 #define TO_UNIX_TIME_MS(x) #x " = " #x " / 10000 - 11644473600000"
1118 // On other platforms, we used to store timestamps in time_t format (s
1119 // since the Unix epoch).
1120 #define TO_UNIX_TIME_MS(x) #x " = " #x " * 1000"
1122 sql::Statement
update_timestamps(db_
->GetUniqueStatement(
1124 TO_UNIX_TIME_MS(mtime
) ", "
1125 TO_UNIX_TIME_MS(server_mtime
) ", "
1126 TO_UNIX_TIME_MS(ctime
) ", "
1127 TO_UNIX_TIME_MS(server_ctime
)));
1128 #undef TO_UNIX_TIME_MS
1129 if (!update_timestamps
.Run())
1135 bool DirectoryBackingStore::MigrateVersion77To78() {
1136 // Version 78 added one column to table 'metas': base_server_specifics.
1138 "ALTER TABLE metas ADD COLUMN base_server_specifics BLOB")) {
1145 bool DirectoryBackingStore::MigrateVersion78To79() {
1146 // Some users are stuck with a DB that causes them to reuse existing IDs. We
1147 // perform this one-time fixup on all users to help the few that are stuck.
1148 // See crbug.com/142987 for details.
1150 "UPDATE share_info SET next_id = next_id - 65536")) {
1157 bool DirectoryBackingStore::MigrateVersion79To80() {
1159 "ALTER TABLE share_info ADD COLUMN bag_of_chips BLOB"))
1161 sql::Statement
update(db_
->GetUniqueStatement(
1162 "UPDATE share_info SET bag_of_chips = ?"));
1163 // An empty message is serialized to an empty string.
1164 update
.BindBlob(0, NULL
, 0);
1171 bool DirectoryBackingStore::MigrateVersion80To81() {
1173 "ALTER TABLE metas ADD COLUMN server_ordinal_in_parent BLOB"))
1176 sql::Statement
get_positions(db_
->GetUniqueStatement(
1177 "SELECT metahandle, server_position_in_parent FROM metas"));
1179 sql::Statement
put_ordinals(db_
->GetUniqueStatement(
1180 "UPDATE metas SET server_ordinal_in_parent = ?"
1181 "WHERE metahandle = ?"));
1183 while(get_positions
.Step()) {
1184 int64 metahandle
= get_positions
.ColumnInt64(0);
1185 int64 position
= get_positions
.ColumnInt64(1);
1187 const std::string
& ordinal
= Int64ToNodeOrdinal(position
).ToInternalValue();
1188 put_ordinals
.BindBlob(0, ordinal
.data(), ordinal
.length());
1189 put_ordinals
.BindInt64(1, metahandle
);
1191 if(!put_ordinals
.Run())
1193 put_ordinals
.Reset(true);
1197 needs_column_refresh_
= true;
1201 bool DirectoryBackingStore::MigrateVersion81To82() {
1203 "ALTER TABLE models ADD COLUMN transaction_version BIGINT default 0"))
1205 sql::Statement
update(db_
->GetUniqueStatement(
1206 "UPDATE models SET transaction_version = 0"));
1213 bool DirectoryBackingStore::MigrateVersion82To83() {
1214 // Version 83 added transaction_version on sync node.
1216 "ALTER TABLE metas ADD COLUMN transaction_version BIGINT default 0"))
1218 sql::Statement
update(db_
->GetUniqueStatement(
1219 "UPDATE metas SET transaction_version = 0"));
1226 bool DirectoryBackingStore::MigrateVersion83To84() {
1227 // Version 84 added deleted_metas table to store deleted metas until we know
1228 // for sure that the deletions are persisted in native models.
1229 string query
= "CREATE TABLE deleted_metas ";
1230 query
.append(ComposeCreateTableColumnSpecs());
1231 if (!db_
->Execute(query
.c_str()))
1237 bool DirectoryBackingStore::MigrateVersion84To85() {
1238 // Version 85 removes the initial_sync_ended flag.
1239 if (!db_
->Execute("ALTER TABLE models RENAME TO temp_models"))
1241 if (!CreateV81ModelsTable())
1243 if (!db_
->Execute("INSERT INTO models SELECT "
1244 "model_id, progress_marker, transaction_version "
1245 "FROM temp_models")) {
1248 SafeDropTable("temp_models");
1254 bool DirectoryBackingStore::MigrateVersion85To86() {
1255 // Version 86 removes both server ordinals and local NEXT_ID, PREV_ID and
1256 // SERVER_{POSITION,ORDINAL}_IN_PARENT and replaces them with UNIQUE_POSITION
1257 // and SERVER_UNIQUE_POSITION.
1258 if (!db_
->Execute("ALTER TABLE metas ADD COLUMN "
1259 "server_unique_position BLOB")) {
1262 if (!db_
->Execute("ALTER TABLE metas ADD COLUMN "
1263 "unique_position BLOB")) {
1266 if (!db_
->Execute("ALTER TABLE metas ADD COLUMN "
1267 "unique_bookmark_tag VARCHAR")) {
1271 // Fetch the cache_guid from the DB, because we don't otherwise have access to
1273 sql::Statement
get_cache_guid(db_
->GetUniqueStatement(
1274 "SELECT cache_guid FROM share_info"));
1275 if (!get_cache_guid
.Step()) {
1278 std::string cache_guid
= get_cache_guid
.ColumnString(0);
1279 DCHECK(!get_cache_guid
.Step());
1280 DCHECK(get_cache_guid
.Succeeded());
1282 sql::Statement
get(db_
->GetUniqueStatement(
1288 " unique_server_tag, "
1289 " server_ordinal_in_parent "
1292 // Note that we set both the local and server position based on the server
1293 // position. We wll lose any unsynced local position changes. Unfortunately,
1294 // there's nothing we can do to avoid that. The NEXT_ID / PREV_ID values
1295 // can't be translated into a UNIQUE_POSTION in a reliable way.
1296 sql::Statement
put(db_
->GetCachedStatement(
1299 " server_unique_position = ?,"
1300 " unique_position = ?,"
1301 " unique_bookmark_tag = ?"
1302 "WHERE metahandle = ?"));
1304 while (get
.Step()) {
1305 int64 metahandle
= get
.ColumnInt64(0);
1307 std::string id_string
;
1308 get
.ColumnBlobAsString(1, &id_string
);
1310 sync_pb::EntitySpecifics specifics
;
1311 specifics
.ParseFromArray(
1312 get
.ColumnBlob(2), get
.ColumnByteLength(2));
1314 bool is_dir
= get
.ColumnBool(3);
1316 std::string server_unique_tag
= get
.ColumnString(4);
1318 std::string ordinal_string
;
1319 get
.ColumnBlobAsString(5, &ordinal_string
);
1320 NodeOrdinal
ordinal(ordinal_string
);
1323 std::string unique_bookmark_tag
;
1325 // We only maintain positions for bookmarks that are not server-defined
1326 // top-level folders.
1327 UniquePosition position
;
1328 if (GetModelTypeFromSpecifics(specifics
) == BOOKMARKS
1329 && !(is_dir
&& !server_unique_tag
.empty())) {
1330 if (id_string
.at(0) == 'c') {
1331 // We found an uncommitted item. This is rare, but fortunate. This
1332 // means we can set the bookmark tag according to the originator client
1333 // item ID and originator cache guid, because (unlike the other case) we
1334 // know that this client is the originator.
1335 unique_bookmark_tag
= syncable::GenerateSyncableBookmarkHash(
1337 id_string
.substr(1));
1339 // If we've already committed the item, then we don't know who the
1340 // originator was. We do not have access to the originator client item
1341 // ID and originator cache guid at this point.
1343 // We will base our hash entirely on the server ID instead. This is
1344 // incorrect, but at least all clients that undergo this migration step
1345 // will be incorrect in the same way.
1347 // To get everyone back into a synced state, we will update the bookmark
1348 // tag according to the originator_cache_guid and originator_item_id
1349 // when we see updates for this item. That should ensure that commonly
1350 // modified items will end up with the proper tag values eventually.
1351 unique_bookmark_tag
= syncable::GenerateSyncableBookmarkHash(
1352 std::string(), // cache_guid left intentionally blank.
1353 id_string
.substr(1));
1356 int64 int_position
= NodeOrdinalToInt64(ordinal
);
1357 position
= UniquePosition::FromInt64(int_position
, unique_bookmark_tag
);
1359 // Leave bookmark_tag and position at their default (invalid) values.
1362 std::string position_blob
;
1363 position
.SerializeToString(&position_blob
);
1364 put
.BindBlob(0, position_blob
.data(), position_blob
.length());
1365 put
.BindBlob(1, position_blob
.data(), position_blob
.length());
1366 put
.BindBlob(2, unique_bookmark_tag
.data(), unique_bookmark_tag
.length());
1367 put
.BindInt64(3, metahandle
);
1375 needs_column_refresh_
= true;
1379 bool DirectoryBackingStore::MigrateVersion86To87() {
1380 // Version 87 adds AttachmentMetadata proto.
1382 "ALTER TABLE metas ADD COLUMN "
1383 "attachment_metadata BLOB")) {
1387 needs_column_refresh_
= true;
1391 bool DirectoryBackingStore::MigrateVersion87To88() {
1392 // Version 88 adds the datatype context to the models table.
1393 if (!db_
->Execute("ALTER TABLE models ADD COLUMN context blob"))
1400 bool DirectoryBackingStore::MigrateVersion88To89() {
1401 // Version 89 adds server_attachment_metadata.
1403 "ALTER TABLE metas ADD COLUMN "
1404 "server_attachment_metadata BLOB")) {
1408 needs_column_refresh_
= true;
1412 bool DirectoryBackingStore::CreateTables() {
1413 DVLOG(1) << "First run, creating tables";
1414 // Create two little tables share_version and share_info
1416 "CREATE TABLE share_version ("
1417 "id VARCHAR(128) primary key, data INT)")) {
1422 sql::Statement
s(db_
->GetUniqueStatement(
1423 "INSERT INTO share_version VALUES(?, ?)"));
1424 s
.BindString(0, dir_name_
);
1425 s
.BindInt(1, kCurrentDBVersion
);
1431 const bool kCreateAsTempShareInfo
= false;
1432 if (!CreateShareInfoTable(kCreateAsTempShareInfo
)) {
1437 sql::Statement
s(db_
->GetUniqueStatement(
1438 "INSERT INTO share_info VALUES"
1441 "?, " // store_birthday
1442 "?, " // db_create_version
1443 "?, " // db_create_time
1446 // TODO(rlarocque, 124140): Remove notification_state field.
1447 "?, " // notification_state
1448 "?);")); // bag_of_chips
1449 s
.BindString(0, dir_name_
); // id
1450 s
.BindString(1, dir_name_
); // name
1451 s
.BindString(2, std::string()); // store_birthday
1452 // TODO(akalin): Remove this unused db_create_version field. (Or
1453 // actually use it for something.) http://crbug.com/118356
1454 s
.BindString(3, "Unknown"); // db_create_version
1455 s
.BindInt(4, static_cast<int32
>(time(0))); // db_create_time
1456 s
.BindString(5, GenerateCacheGUID()); // cache_guid
1457 // TODO(rlarocque, 124140): Remove this unused notification-state field.
1458 s
.BindBlob(6, NULL
, 0); // notification_state
1459 s
.BindBlob(7, NULL
, 0); // bag_of_chips
1464 if (!CreateModelsTable())
1467 // Create the big metas table.
1468 if (!CreateMetasTable(false))
1472 // Insert the entry for the root into the metas table.
1473 const int64 now
= TimeToProtoTime(base::Time::Now());
1474 sql::Statement
s(db_
->GetUniqueStatement(
1475 "INSERT INTO metas "
1476 "( id, metahandle, is_dir, ctime, mtime ) "
1477 "VALUES ( \"r\", 1, 1, ?, ? )"));
1478 s
.BindInt64(0, now
);
1479 s
.BindInt64(1, now
);
1488 bool DirectoryBackingStore::CreateMetasTable(bool is_temporary
) {
1489 string query
= "CREATE TABLE ";
1490 query
.append(is_temporary
? "temp_metas" : "metas");
1491 query
.append(ComposeCreateTableColumnSpecs());
1492 if (!db_
->Execute(query
.c_str()))
1495 // Create a deleted_metas table to save copies of deleted metas until the
1496 // deletions are persisted. For simplicity, don't try to migrate existing
1497 // data because it's rarely used.
1498 SafeDropTable("deleted_metas");
1499 query
= "CREATE TABLE deleted_metas ";
1500 query
.append(ComposeCreateTableColumnSpecs());
1501 return db_
->Execute(query
.c_str());
1504 bool DirectoryBackingStore::CreateV71ModelsTable() {
1505 // This is an old schema for the Models table, used from versions 71 to 74.
1506 return db_
->Execute(
1507 "CREATE TABLE models ("
1508 "model_id BLOB primary key, "
1509 "last_download_timestamp INT, "
1510 // Gets set if the syncer ever gets updates from the
1511 // server and the server returns 0. Lets us detect the
1512 // end of the initial sync.
1513 "initial_sync_ended BOOLEAN default 0)");
1516 bool DirectoryBackingStore::CreateV75ModelsTable() {
1517 // This is an old schema for the Models table, used from versions 75 to 80.
1518 return db_
->Execute(
1519 "CREATE TABLE models ("
1520 "model_id BLOB primary key, "
1521 "progress_marker BLOB, "
1522 // Gets set if the syncer ever gets updates from the
1523 // server and the server returns 0. Lets us detect the
1524 // end of the initial sync.
1525 "initial_sync_ended BOOLEAN default 0)");
1528 bool DirectoryBackingStore::CreateV81ModelsTable() {
1529 // This is an old schema for the Models table, used from versions 81 to 87.
1530 return db_
->Execute(
1531 "CREATE TABLE models ("
1532 "model_id BLOB primary key, "
1533 "progress_marker BLOB, "
1534 // Gets set if the syncer ever gets updates from the
1535 // server and the server returns 0. Lets us detect the
1536 // end of the initial sync.
1537 "transaction_version BIGINT default 0)");
1540 bool DirectoryBackingStore::CreateModelsTable() {
1541 // This is the current schema for the Models table, from version 88
1542 // onward. If you change the schema, you'll probably want to double-check
1543 // the use of this function in the v84-v85 migration.
1544 return db_
->Execute(
1545 "CREATE TABLE models ("
1546 "model_id BLOB primary key, "
1547 "progress_marker BLOB, "
1548 // Gets set if the syncer ever gets updates from the
1549 // server and the server returns 0. Lets us detect the
1550 // end of the initial sync.
1551 "transaction_version BIGINT default 0,"
1555 bool DirectoryBackingStore::CreateShareInfoTable(bool is_temporary
) {
1556 const char* name
= is_temporary
? "temp_share_info" : "share_info";
1557 string query
= "CREATE TABLE ";
1559 // This is the current schema for the ShareInfo table, from version 76
1562 "id TEXT primary key, "
1564 "store_birthday TEXT, "
1565 "db_create_version TEXT, "
1566 "db_create_time INT, "
1567 "next_id INT default -2, "
1569 // TODO(rlarocque, 124140): Remove notification_state field.
1570 "notification_state BLOB, "
1573 return db_
->Execute(query
.c_str());
1576 bool DirectoryBackingStore::CreateShareInfoTableVersion71(
1577 bool is_temporary
) {
1578 const char* name
= is_temporary
? "temp_share_info" : "share_info";
1579 string query
= "CREATE TABLE ";
1581 // This is the schema for the ShareInfo table used from versions 71 to 72.
1583 "id TEXT primary key, "
1585 "store_birthday TEXT, "
1586 "db_create_version TEXT, "
1587 "db_create_time INT, "
1588 "next_id INT default -2, "
1589 "cache_guid TEXT )");
1590 return db_
->Execute(query
.c_str());
1593 // This function checks to see if the given list of Metahandles has any nodes
1594 // whose PARENT_ID values refer to ID values that do not actually exist.
1595 // Returns true on success.
1596 bool DirectoryBackingStore::VerifyReferenceIntegrity(
1597 const Directory::MetahandlesMap
* handles_map
) {
1598 TRACE_EVENT0("sync", "SyncDatabaseIntegrityCheck");
1599 using namespace syncable
;
1600 typedef base::hash_set
<std::string
> IdsSet
;
1605 for (Directory::MetahandlesMap::const_iterator it
= handles_map
->begin();
1606 it
!= handles_map
->end(); ++it
) {
1607 EntryKernel
* entry
= it
->second
;
1608 bool is_duplicate_id
= !(ids_set
.insert(entry
->ref(ID
).value()).second
);
1609 is_ok
= is_ok
&& !is_duplicate_id
;
1612 IdsSet::iterator end
= ids_set
.end();
1613 for (Directory::MetahandlesMap::const_iterator it
= handles_map
->begin();
1614 it
!= handles_map
->end(); ++it
) {
1615 EntryKernel
* entry
= it
->second
;
1616 if (!entry
->ref(PARENT_ID
).IsNull()) {
1617 bool parent_exists
= (ids_set
.find(entry
->ref(PARENT_ID
).value()) != end
);
1618 if (!parent_exists
) {
1626 void DirectoryBackingStore::PrepareSaveEntryStatement(
1627 EntryTable table
, sql::Statement
* save_statement
) {
1628 if (save_statement
->is_valid())
1632 query
.reserve(kUpdateStatementBufferSize
);
1635 query
.append("INSERT OR REPLACE INTO metas ");
1637 case DELETE_JOURNAL_TABLE
:
1638 query
.append("INSERT OR REPLACE INTO deleted_metas ");
1643 values
.reserve(kUpdateStatementBufferSize
);
1644 values
.append(" VALUES ");
1645 const char* separator
= "( ";
1647 for (i
= BEGIN_FIELDS
; i
< FIELD_COUNT
; ++i
) {
1648 query
.append(separator
);
1649 values
.append(separator
);
1651 query
.append(ColumnName(i
));
1654 query
.append(" ) ");
1655 values
.append(" )");
1656 query
.append(values
);
1657 save_statement
->Assign(db_
->GetUniqueStatement(
1658 base::StringPrintf(query
.c_str(), "metas").c_str()));
1661 // Get page size for the database.
1662 bool DirectoryBackingStore::GetDatabasePageSize(int* page_size
) {
1663 sql::Statement
s(db_
->GetUniqueStatement("PRAGMA page_size"));
1666 *page_size
= s
.ColumnInt(0);
1670 bool DirectoryBackingStore::IncreasePageSizeTo32K() {
1671 if (!db_
->Execute("PRAGMA page_size=32768;") || !Vacuum()) {
1677 bool DirectoryBackingStore::Vacuum() {
1678 DCHECK_EQ(db_
->transaction_nesting(), 0);
1679 if (!db_
->Execute("VACUUM;")) {
1685 bool DirectoryBackingStore::needs_column_refresh() const {
1686 return needs_column_refresh_
;
1689 void DirectoryBackingStore::ResetAndCreateConnection() {
1690 db_
.reset(new sql::Connection());
1691 db_
->set_histogram_tag("SyncDirectory");
1692 db_
->set_exclusive_locking();
1693 db_
->set_cache_size(32);
1694 db_
->set_page_size(database_page_size_
);
1695 if (!catastrophic_error_handler_
.is_null())
1696 SetCatastrophicErrorHandler(catastrophic_error_handler_
);
1699 void DirectoryBackingStore::SetCatastrophicErrorHandler(
1700 const base::Closure
& catastrophic_error_handler
) {
1701 DCHECK(CalledOnValidThread());
1702 DCHECK(!catastrophic_error_handler
.is_null());
1703 catastrophic_error_handler_
= catastrophic_error_handler
;
1704 sql::Connection::ErrorCallback error_callback
=
1705 base::Bind(&OnSqliteError
, catastrophic_error_handler_
);
1706 db_
->set_error_callback(error_callback
);
1709 } // namespace syncable
1710 } // namespace syncer