1 // Copyright 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "sync/syncable/directory_backing_store.h"
7 #include "build/build_config.h"
11 #include "base/base64.h"
12 #include "base/logging.h"
13 #include "base/metrics/field_trial.h"
14 #include "base/rand_util.h"
15 #include "base/strings/stringprintf.h"
16 #include "base/time/time.h"
17 #include "base/trace_event/trace_event.h"
18 #include "sql/connection.h"
19 #include "sql/statement.h"
20 #include "sql/transaction.h"
21 #include "sync/internal_api/public/base/node_ordinal.h"
22 #include "sync/protocol/bookmark_specifics.pb.h"
23 #include "sync/protocol/sync.pb.h"
24 #include "sync/syncable/syncable-inl.h"
25 #include "sync/syncable/syncable_columns.h"
26 #include "sync/syncable/syncable_util.h"
27 #include "sync/util/time.h"
34 // This just has to be big enough to hold an UPDATE or INSERT statement that
35 // modifies all the columns in the entry table.
36 static const string::size_type kUpdateStatementBufferSize
= 2048;
38 // Increment this version whenever updating DB tables.
39 const int32 kCurrentDBVersion
= 89;
41 // Iterate over the fields of |entry| and bind each to |statement| for
42 // updating. Returns the number of args bound.
43 void BindFields(const EntryKernel
& entry
,
44 sql::Statement
* statement
) {
47 for (i
= BEGIN_FIELDS
; i
< INT64_FIELDS_END
; ++i
) {
48 statement
->BindInt64(index
++, entry
.ref(static_cast<Int64Field
>(i
)));
50 for ( ; i
< TIME_FIELDS_END
; ++i
) {
51 statement
->BindInt64(index
++,
53 entry
.ref(static_cast<TimeField
>(i
))));
55 for ( ; i
< ID_FIELDS_END
; ++i
) {
56 statement
->BindString(index
++, entry
.ref(static_cast<IdField
>(i
)).s_
);
58 for ( ; i
< BIT_FIELDS_END
; ++i
) {
59 statement
->BindInt(index
++, entry
.ref(static_cast<BitField
>(i
)));
61 for ( ; i
< STRING_FIELDS_END
; ++i
) {
62 statement
->BindString(index
++, entry
.ref(static_cast<StringField
>(i
)));
64 for ( ; i
< PROTO_FIELDS_END
; ++i
) {
66 entry
.ref(static_cast<ProtoField
>(i
)).SerializeToString(&temp
);
67 statement
->BindBlob(index
++, temp
.data(), temp
.length());
69 for ( ; i
< UNIQUE_POSITION_FIELDS_END
; ++i
) {
71 entry
.ref(static_cast<UniquePositionField
>(i
)).SerializeToString(&temp
);
72 statement
->BindBlob(index
++, temp
.data(), temp
.length());
74 for (; i
< ATTACHMENT_METADATA_FIELDS_END
; ++i
) {
76 entry
.ref(static_cast<AttachmentMetadataField
>(i
)).SerializeToString(&temp
);
77 statement
->BindBlob(index
++, temp
.data(), temp
.length());
81 // The caller owns the returned EntryKernel*. Assumes the statement currently
82 // points to a valid row in the metas table. Returns NULL to indicate that
83 // it detected a corruption in the data on unpacking.
84 scoped_ptr
<EntryKernel
> UnpackEntry(sql::Statement
* statement
) {
85 scoped_ptr
<EntryKernel
> kernel(new EntryKernel());
86 DCHECK_EQ(statement
->ColumnCount(), static_cast<int>(FIELD_COUNT
));
88 for (i
= BEGIN_FIELDS
; i
< INT64_FIELDS_END
; ++i
) {
89 kernel
->put(static_cast<Int64Field
>(i
), statement
->ColumnInt64(i
));
91 for ( ; i
< TIME_FIELDS_END
; ++i
) {
92 kernel
->put(static_cast<TimeField
>(i
),
93 ProtoTimeToTime(statement
->ColumnInt64(i
)));
95 for ( ; i
< ID_FIELDS_END
; ++i
) {
96 kernel
->mutable_ref(static_cast<IdField
>(i
)).s_
=
97 statement
->ColumnString(i
);
99 for ( ; i
< BIT_FIELDS_END
; ++i
) {
100 kernel
->put(static_cast<BitField
>(i
), (0 != statement
->ColumnInt(i
)));
102 for ( ; i
< STRING_FIELDS_END
; ++i
) {
103 kernel
->put(static_cast<StringField
>(i
),
104 statement
->ColumnString(i
));
106 for ( ; i
< PROTO_FIELDS_END
; ++i
) {
107 kernel
->mutable_ref(static_cast<ProtoField
>(i
)).ParseFromArray(
108 statement
->ColumnBlob(i
), statement
->ColumnByteLength(i
));
110 for ( ; i
< UNIQUE_POSITION_FIELDS_END
; ++i
) {
112 statement
->ColumnBlobAsString(i
, &temp
);
114 sync_pb::UniquePosition proto
;
115 if (!proto
.ParseFromString(temp
)) {
116 DVLOG(1) << "Unpacked invalid position. Assuming the DB is corrupt";
117 return scoped_ptr
<EntryKernel
>();
120 kernel
->mutable_ref(static_cast<UniquePositionField
>(i
)) =
121 UniquePosition::FromProto(proto
);
123 for (; i
< ATTACHMENT_METADATA_FIELDS_END
; ++i
) {
124 kernel
->mutable_ref(static_cast<AttachmentMetadataField
>(i
)).ParseFromArray(
125 statement
->ColumnBlob(i
), statement
->ColumnByteLength(i
));
128 // Sanity check on positions. We risk strange and rare crashes if our
129 // assumptions about unique position values are broken.
130 if (kernel
->ShouldMaintainPosition() &&
131 !kernel
->ref(UNIQUE_POSITION
).IsValid()) {
132 DVLOG(1) << "Unpacked invalid position on an entity that should have a "
133 << "valid position. Assuming the DB is corrupt.";
134 return scoped_ptr
<EntryKernel
>();
137 return kernel
.Pass();
142 string
ComposeCreateTableColumnSpecs() {
143 const ColumnSpec
* begin
= g_metas_columns
;
144 const ColumnSpec
* end
= g_metas_columns
+ arraysize(g_metas_columns
);
146 query
.reserve(kUpdateStatementBufferSize
);
147 char separator
= '(';
148 for (const ColumnSpec
* column
= begin
; column
!= end
; ++column
) {
149 query
.push_back(separator
);
151 query
.append(column
->name
);
152 query
.push_back(' ');
153 query
.append(column
->spec
);
155 query
.push_back(')');
159 void AppendColumnList(std::string
* output
) {
160 const char* joiner
= " ";
161 // Be explicit in SELECT order to match up with UnpackEntry.
162 for (int i
= BEGIN_FIELDS
; i
< FIELD_COUNT
; ++i
) {
163 output
->append(joiner
);
164 output
->append(ColumnName(i
));
171 ///////////////////////////////////////////////////////////////////////////////
172 // DirectoryBackingStore implementation.
174 DirectoryBackingStore::DirectoryBackingStore(const string
& dir_name
)
175 : db_(new sql::Connection()),
177 needs_column_refresh_(false) {
178 db_
->set_histogram_tag("SyncDirectory");
179 db_
->set_cache_size(32);
180 databasePageSize_
= IsSyncBackingDatabase32KEnabled() ? 32768 : 4096;
181 db_
->set_page_size(databasePageSize_
);
184 DirectoryBackingStore::DirectoryBackingStore(const string
& dir_name
,
188 needs_column_refresh_(false) {
191 DirectoryBackingStore::~DirectoryBackingStore() {
194 bool DirectoryBackingStore::DeleteEntries(EntryTable from
,
195 const MetahandleSet
& handles
) {
199 sql::Statement statement
;
200 // Call GetCachedStatement() separately to get different statements for
204 statement
.Assign(db_
->GetCachedStatement(
205 SQL_FROM_HERE
, "DELETE FROM metas WHERE metahandle = ?"));
207 case DELETE_JOURNAL_TABLE
:
208 statement
.Assign(db_
->GetCachedStatement(
209 SQL_FROM_HERE
, "DELETE FROM deleted_metas WHERE metahandle = ?"));
213 for (MetahandleSet::const_iterator i
= handles
.begin(); i
!= handles
.end();
215 statement
.BindInt64(0, *i
);
216 if (!statement
.Run())
218 statement
.Reset(true);
223 bool DirectoryBackingStore::SaveChanges(
224 const Directory::SaveChangesSnapshot
& snapshot
) {
225 DCHECK(CalledOnValidThread());
226 DCHECK(db_
->is_open());
228 // Back out early if there is nothing to write.
230 (Directory::KERNEL_SHARE_INFO_DIRTY
== snapshot
.kernel_info_status
);
231 if (snapshot
.dirty_metas
.empty() && snapshot
.metahandles_to_purge
.empty() &&
232 snapshot
.delete_journals
.empty() &&
233 snapshot
.delete_journals_to_purge
.empty() && !save_info
) {
237 sql::Transaction
transaction(db_
.get());
238 if (!transaction
.Begin())
241 PrepareSaveEntryStatement(METAS_TABLE
, &save_meta_statment_
);
242 for (EntryKernelSet::const_iterator i
= snapshot
.dirty_metas
.begin();
243 i
!= snapshot
.dirty_metas
.end(); ++i
) {
244 DCHECK((*i
)->is_dirty());
245 if (!SaveEntryToDB(&save_meta_statment_
, **i
))
249 if (!DeleteEntries(METAS_TABLE
, snapshot
.metahandles_to_purge
))
252 PrepareSaveEntryStatement(DELETE_JOURNAL_TABLE
,
253 &save_delete_journal_statment_
);
254 for (EntryKernelSet::const_iterator i
= snapshot
.delete_journals
.begin();
255 i
!= snapshot
.delete_journals
.end(); ++i
) {
256 if (!SaveEntryToDB(&save_delete_journal_statment_
, **i
))
260 if (!DeleteEntries(DELETE_JOURNAL_TABLE
, snapshot
.delete_journals_to_purge
))
264 const Directory::PersistedKernelInfo
& info
= snapshot
.kernel_info
;
265 sql::Statement
s1(db_
->GetCachedStatement(
268 "SET store_birthday = ?, "
270 "bag_of_chips = ?"));
271 s1
.BindString(0, info
.store_birthday
);
272 s1
.BindInt64(1, info
.next_id
);
273 s1
.BindBlob(2, info
.bag_of_chips
.data(), info
.bag_of_chips
.size());
277 DCHECK_EQ(db_
->GetLastChangeCount(), 1);
279 sql::Statement
s2(db_
->GetCachedStatement(
282 "INTO models (model_id, "
284 "transaction_version, "
286 "VALUES (?, ?, ?, ?)"));
288 ModelTypeSet protocol_types
= ProtocolTypes();
289 for (ModelTypeSet::Iterator iter
= protocol_types
.First(); iter
.Good();
291 ModelType type
= iter
.Get();
292 // We persist not ModelType but rather a protobuf-derived ID.
293 string model_id
= ModelTypeEnumToModelId(type
);
294 string progress_marker
;
295 info
.download_progress
[type
].SerializeToString(&progress_marker
);
296 s2
.BindBlob(0, model_id
.data(), model_id
.length());
297 s2
.BindBlob(1, progress_marker
.data(), progress_marker
.length());
298 s2
.BindInt64(2, info
.transaction_version
[type
]);
300 info
.datatype_context
[type
].SerializeToString(&context
);
301 s2
.BindBlob(3, context
.data(), context
.length());
304 DCHECK_EQ(db_
->GetLastChangeCount(), 1);
309 return transaction
.Commit();
312 bool DirectoryBackingStore::InitializeTables() {
314 if (IsSyncBackingDatabase32KEnabled() && GetDatabasePageSize(&page_size
) &&
316 IncreasePageSizeTo32K();
318 sql::Transaction
transaction(db_
.get());
319 if (!transaction
.Begin())
322 int version_on_disk
= GetVersion();
324 // Upgrade from version 67. Version 67 was widely distributed as the original
325 // Bookmark Sync release. Version 68 removed unique naming.
326 if (version_on_disk
== 67) {
327 if (MigrateVersion67To68())
328 version_on_disk
= 68;
330 // Version 69 introduced additional datatypes.
331 if (version_on_disk
== 68) {
332 if (MigrateVersion68To69())
333 version_on_disk
= 69;
336 if (version_on_disk
== 69) {
337 if (MigrateVersion69To70())
338 version_on_disk
= 70;
341 // Version 71 changed the sync progress information to be per-datatype.
342 if (version_on_disk
== 70) {
343 if (MigrateVersion70To71())
344 version_on_disk
= 71;
347 // Version 72 removed extended attributes, a legacy way to do extensible
348 // key/value information, stored in their own table.
349 if (version_on_disk
== 71) {
350 if (MigrateVersion71To72())
351 version_on_disk
= 72;
354 // Version 73 added a field for notification state.
355 if (version_on_disk
== 72) {
356 if (MigrateVersion72To73())
357 version_on_disk
= 73;
360 // Version 74 added state for the autofill migration.
361 if (version_on_disk
== 73) {
362 if (MigrateVersion73To74())
363 version_on_disk
= 74;
366 // Version 75 migrated from int64-based timestamps to per-datatype tokens.
367 if (version_on_disk
== 74) {
368 if (MigrateVersion74To75())
369 version_on_disk
= 75;
372 // Version 76 removed all (5) autofill migration related columns.
373 if (version_on_disk
== 75) {
374 if (MigrateVersion75To76())
375 version_on_disk
= 76;
378 // Version 77 standardized all time fields to ms since the Unix
380 if (version_on_disk
== 76) {
381 if (MigrateVersion76To77())
382 version_on_disk
= 77;
385 // Version 78 added the column base_server_specifics to the metas table.
386 if (version_on_disk
== 77) {
387 if (MigrateVersion77To78())
388 version_on_disk
= 78;
391 // Version 79 migration is a one-time fix for some users in a bad state.
392 if (version_on_disk
== 78) {
393 if (MigrateVersion78To79())
394 version_on_disk
= 79;
397 // Version 80 migration is adding the bag_of_chips column.
398 if (version_on_disk
== 79) {
399 if (MigrateVersion79To80())
400 version_on_disk
= 80;
403 // Version 81 replaces the int64 server_position_in_parent_field
404 // with a blob server_ordinal_in_parent field.
405 if (version_on_disk
== 80) {
406 if (MigrateVersion80To81())
407 version_on_disk
= 81;
410 // Version 82 migration added transaction_version column per data type.
411 if (version_on_disk
== 81) {
412 if (MigrateVersion81To82())
413 version_on_disk
= 82;
416 // Version 83 migration added transaction_version column per sync entry.
417 if (version_on_disk
== 82) {
418 if (MigrateVersion82To83())
419 version_on_disk
= 83;
422 // Version 84 migration added deleted_metas table.
423 if (version_on_disk
== 83) {
424 if (MigrateVersion83To84())
425 version_on_disk
= 84;
428 // Version 85 migration removes the initial_sync_ended bits.
429 if (version_on_disk
== 84) {
430 if (MigrateVersion84To85())
431 version_on_disk
= 85;
434 // Version 86 migration converts bookmarks to the unique positioning system.
435 // It also introduces a new field to store a unique ID for each bookmark.
436 if (version_on_disk
== 85) {
437 if (MigrateVersion85To86())
438 version_on_disk
= 86;
441 // Version 87 migration adds a collection of attachment ids per sync entry.
442 if (version_on_disk
== 86) {
443 if (MigrateVersion86To87())
444 version_on_disk
= 87;
447 // Version 88 migration adds datatype contexts to the models table.
448 if (version_on_disk
== 87) {
449 if (MigrateVersion87To88())
450 version_on_disk
= 88;
453 // Version 89 migration adds server attachment metadata to the metas table.
454 if (version_on_disk
== 88) {
455 if (MigrateVersion88To89())
456 version_on_disk
= 89;
459 // If one of the migrations requested it, drop columns that aren't current.
460 // It's only safe to do this after migrating all the way to the current
462 if (version_on_disk
== kCurrentDBVersion
&& needs_column_refresh_
) {
463 if (!RefreshColumns())
467 // A final, alternative catch-all migration to simply re-sync everything.
468 if (version_on_disk
!= kCurrentDBVersion
) {
469 if (version_on_disk
> kCurrentDBVersion
)
472 // Fallback (re-sync everything) migration path.
473 DVLOG(1) << "Old/null sync database, version " << version_on_disk
;
474 // Delete the existing database (if any), and create a fresh one.
480 sql::Statement
s(db_
->GetUniqueStatement(
481 "SELECT db_create_version, db_create_time FROM share_info"));
484 string db_create_version
= s
.ColumnString(0);
485 int db_create_time
= s
.ColumnInt(1);
486 DVLOG(1) << "DB created at " << db_create_time
<< " by version " <<
489 return transaction
.Commit();
492 // This function drops unused columns by creating a new table that contains only
493 // the currently used columns then copying all rows from the old tables into
494 // this new one. The tables are then rearranged so the new replaces the old.
495 bool DirectoryBackingStore::RefreshColumns() {
496 DCHECK(needs_column_refresh_
);
498 // Create a new table named temp_metas.
499 SafeDropTable("temp_metas");
500 if (!CreateMetasTable(true))
503 // Populate temp_metas from metas.
505 // At this point, the metas table may contain columns belonging to obsolete
506 // schema versions. This statement explicitly lists only the columns that
507 // belong to the current schema version, so the obsolete columns will be
508 // effectively dropped once we rename temp_metas over top of metas.
509 std::string query
= "INSERT INTO temp_metas (";
510 AppendColumnList(&query
);
511 query
.append(") SELECT ");
512 AppendColumnList(&query
);
513 query
.append(" FROM metas");
514 if (!db_
->Execute(query
.c_str()))
518 SafeDropTable("metas");
520 // Rename temp_metas -> metas.
521 if (!db_
->Execute("ALTER TABLE temp_metas RENAME TO metas"))
524 // Repeat the process for share_info.
525 SafeDropTable("temp_share_info");
526 if (!CreateShareInfoTable(true))
529 // TODO(rlarocque, 124140): Remove notification_state.
531 "INSERT INTO temp_share_info (id, name, store_birthday, "
532 "db_create_version, db_create_time, next_id, cache_guid,"
533 "notification_state, bag_of_chips) "
534 "SELECT id, name, store_birthday, db_create_version, "
535 "db_create_time, next_id, cache_guid, notification_state, "
540 SafeDropTable("share_info");
541 if (!db_
->Execute("ALTER TABLE temp_share_info RENAME TO share_info"))
544 needs_column_refresh_
= false;
548 bool DirectoryBackingStore::LoadEntries(Directory::MetahandlesMap
* handles_map
,
549 MetahandleSet
* metahandles_to_purge
) {
551 select
.reserve(kUpdateStatementBufferSize
);
552 select
.append("SELECT ");
553 AppendColumnList(&select
);
554 select
.append(" FROM metas");
556 sql::Statement
s(db_
->GetUniqueStatement(select
.c_str()));
559 scoped_ptr
<EntryKernel
> kernel
= UnpackEntry(&s
);
560 // A null kernel is evidence of external data corruption.
564 int64 handle
= kernel
->ref(META_HANDLE
);
565 if (SafeToPurgeOnLoading(*kernel
))
566 metahandles_to_purge
->insert(handle
);
568 (*handles_map
)[handle
] = kernel
.release();
570 return s
.Succeeded();
573 bool DirectoryBackingStore::SafeToPurgeOnLoading(
574 const EntryKernel
& entry
) const {
575 if (entry
.ref(IS_DEL
)) {
576 if (!entry
.ref(IS_UNSYNCED
) && !entry
.ref(IS_UNAPPLIED_UPDATE
))
578 else if (!entry
.ref(ID
).ServerKnows())
584 bool DirectoryBackingStore::LoadDeleteJournals(
585 JournalIndex
* delete_journals
) {
587 select
.reserve(kUpdateStatementBufferSize
);
588 select
.append("SELECT ");
589 AppendColumnList(&select
);
590 select
.append(" FROM deleted_metas");
592 sql::Statement
s(db_
->GetUniqueStatement(select
.c_str()));
595 scoped_ptr
<EntryKernel
> kernel
= UnpackEntry(&s
);
596 // A null kernel is evidence of external data corruption.
599 delete_journals
->insert(kernel
.release());
601 return s
.Succeeded();
604 bool DirectoryBackingStore::LoadInfo(Directory::KernelLoadInfo
* info
) {
607 db_
->GetUniqueStatement(
608 "SELECT store_birthday, next_id, cache_guid, bag_of_chips "
613 info
->kernel_info
.store_birthday
= s
.ColumnString(0);
614 info
->kernel_info
.next_id
= s
.ColumnInt64(1);
615 info
->cache_guid
= s
.ColumnString(2);
616 s
.ColumnBlobAsString(3, &(info
->kernel_info
.bag_of_chips
));
618 // Verify there was only one row returned.
620 DCHECK(s
.Succeeded());
625 db_
->GetUniqueStatement(
626 "SELECT model_id, progress_marker, "
627 "transaction_version, context FROM models"));
630 ModelType type
= ModelIdToModelTypeEnum(s
.ColumnBlob(0),
631 s
.ColumnByteLength(0));
632 if (type
!= UNSPECIFIED
&& type
!= TOP_LEVEL_FOLDER
) {
633 info
->kernel_info
.download_progress
[type
].ParseFromArray(
634 s
.ColumnBlob(1), s
.ColumnByteLength(1));
635 info
->kernel_info
.transaction_version
[type
] = s
.ColumnInt64(2);
636 info
->kernel_info
.datatype_context
[type
].ParseFromArray(
637 s
.ColumnBlob(3), s
.ColumnByteLength(3));
645 db_
->GetUniqueStatement(
646 "SELECT MAX(metahandle) FROM metas"));
650 info
->max_metahandle
= s
.ColumnInt64(0);
652 // Verify only one row was returned.
654 DCHECK(s
.Succeeded());
660 bool DirectoryBackingStore::SaveEntryToDB(sql::Statement
* save_statement
,
661 const EntryKernel
& entry
) {
662 save_statement
->Reset(true);
663 BindFields(entry
, save_statement
);
664 return save_statement
->Run();
667 bool DirectoryBackingStore::SafeDropTable(const char* table_name
) {
668 string query
= "DROP TABLE IF EXISTS ";
669 query
.append(table_name
);
670 return db_
->Execute(query
.c_str());
673 void DirectoryBackingStore::DropAllTables() {
674 SafeDropTable("metas");
675 SafeDropTable("temp_metas");
676 SafeDropTable("share_info");
677 SafeDropTable("temp_share_info");
678 SafeDropTable("share_version");
679 SafeDropTable("extended_attributes");
680 SafeDropTable("models");
681 SafeDropTable("temp_models");
682 needs_column_refresh_
= false;
686 ModelType
DirectoryBackingStore::ModelIdToModelTypeEnum(
687 const void* data
, int size
) {
688 sync_pb::EntitySpecifics specifics
;
689 if (!specifics
.ParseFromArray(data
, size
))
691 return GetModelTypeFromSpecifics(specifics
);
695 string
DirectoryBackingStore::ModelTypeEnumToModelId(ModelType model_type
) {
696 sync_pb::EntitySpecifics specifics
;
697 AddDefaultFieldValue(model_type
, &specifics
);
698 return specifics
.SerializeAsString();
702 std::string
DirectoryBackingStore::GenerateCacheGUID() {
703 // Generate a GUID with 128 bits of randomness.
704 const int kGuidBytes
= 128 / 8;
706 base::Base64Encode(base::RandBytesAsString(kGuidBytes
), &guid
);
710 bool DirectoryBackingStore::MigrateToSpecifics(
711 const char* old_columns
,
712 const char* specifics_column
,
713 void (*handler_function
)(sql::Statement
* old_value_query
,
714 int old_value_column
,
715 sync_pb::EntitySpecifics
* mutable_new_value
)) {
716 std::string query_sql
= base::StringPrintf(
717 "SELECT metahandle, %s, %s FROM metas", specifics_column
, old_columns
);
718 std::string update_sql
= base::StringPrintf(
719 "UPDATE metas SET %s = ? WHERE metahandle = ?", specifics_column
);
721 sql::Statement
query(db_
->GetUniqueStatement(query_sql
.c_str()));
722 sql::Statement
update(db_
->GetUniqueStatement(update_sql
.c_str()));
724 while (query
.Step()) {
725 int64 metahandle
= query
.ColumnInt64(0);
726 std::string new_value_bytes
;
727 query
.ColumnBlobAsString(1, &new_value_bytes
);
728 sync_pb::EntitySpecifics new_value
;
729 new_value
.ParseFromString(new_value_bytes
);
730 handler_function(&query
, 2, &new_value
);
731 new_value
.SerializeToString(&new_value_bytes
);
733 update
.BindBlob(0, new_value_bytes
.data(), new_value_bytes
.length());
734 update
.BindInt64(1, metahandle
);
739 return query
.Succeeded();
742 bool DirectoryBackingStore::SetVersion(int version
) {
743 sql::Statement
s(db_
->GetCachedStatement(
744 SQL_FROM_HERE
, "UPDATE share_version SET data = ?"));
745 s
.BindInt(0, version
);
750 int DirectoryBackingStore::GetVersion() {
751 if (!db_
->DoesTableExist("share_version"))
754 sql::Statement
statement(db_
->GetUniqueStatement(
755 "SELECT data FROM share_version"));
756 if (statement
.Step()) {
757 return statement
.ColumnInt(0);
763 bool DirectoryBackingStore::MigrateVersion67To68() {
764 // This change simply removed three columns:
766 // string UNSANITIZED_NAME
767 // string SERVER_NAME
768 // No data migration is necessary, but we should do a column refresh.
770 needs_column_refresh_
= true;
774 bool DirectoryBackingStore::MigrateVersion69To70() {
775 // Added "unique_client_tag", renamed "singleton_tag" to unique_server_tag
778 "ALTER TABLE metas ADD COLUMN unique_server_tag varchar"))
781 "ALTER TABLE metas ADD COLUMN unique_client_tag varchar"))
783 needs_column_refresh_
= true;
786 "UPDATE metas SET unique_server_tag = singleton_tag"))
794 // Callback passed to MigrateToSpecifics for the v68->v69 migration. See
795 // MigrateVersion68To69().
796 void EncodeBookmarkURLAndFavicon(sql::Statement
* old_value_query
,
797 int old_value_column
,
798 sync_pb::EntitySpecifics
* mutable_new_value
) {
799 // Extract data from the column trio we expect.
800 bool old_is_bookmark_object
= old_value_query
->ColumnBool(old_value_column
);
801 std::string old_url
= old_value_query
->ColumnString(old_value_column
+ 1);
802 std::string old_favicon
;
803 old_value_query
->ColumnBlobAsString(old_value_column
+ 2, &old_favicon
);
804 bool old_is_dir
= old_value_query
->ColumnBool(old_value_column
+ 3);
806 if (old_is_bookmark_object
) {
807 sync_pb::BookmarkSpecifics
* bookmark_data
=
808 mutable_new_value
->mutable_bookmark();
810 bookmark_data
->set_url(old_url
);
811 bookmark_data
->set_favicon(old_favicon
);
818 bool DirectoryBackingStore::MigrateVersion68To69() {
819 // In Version 68, there were columns on table 'metas':
820 // string BOOKMARK_URL
821 // string SERVER_BOOKMARK_URL
822 // blob BOOKMARK_FAVICON
823 // blob SERVER_BOOKMARK_FAVICON
824 // In version 69, these columns went away in favor of storing
825 // a serialized EntrySpecifics protobuf in the columns:
826 // protobuf blob SPECIFICS
827 // protobuf blob SERVER_SPECIFICS
828 // For bookmarks, EntrySpecifics is extended as per
829 // bookmark_specifics.proto. This migration converts bookmarks from the
830 // former scheme to the latter scheme.
832 // First, add the two new columns to the schema.
834 "ALTER TABLE metas ADD COLUMN specifics blob"))
837 "ALTER TABLE metas ADD COLUMN server_specifics blob"))
840 // Next, fold data from the old columns into the new protobuf columns.
841 if (!MigrateToSpecifics(("is_bookmark_object, bookmark_url, "
842 "bookmark_favicon, is_dir"),
844 &EncodeBookmarkURLAndFavicon
)) {
847 if (!MigrateToSpecifics(("server_is_bookmark_object, "
848 "server_bookmark_url, "
849 "server_bookmark_favicon, "
852 &EncodeBookmarkURLAndFavicon
)) {
856 // Lastly, fix up the "Google Chrome" folder, which is of the TOP_LEVEL_FOLDER
857 // ModelType: it shouldn't have BookmarkSpecifics.
859 "UPDATE metas SET specifics = NULL, server_specifics = NULL WHERE "
860 "singleton_tag IN ('google_chrome')"))
864 needs_column_refresh_
= true; // Trigger deletion of old columns.
868 // Version 71, the columns 'initial_sync_ended' and 'last_sync_timestamp'
869 // were removed from the share_info table. They were replaced by
870 // the 'models' table, which has these values on a per-datatype basis.
871 bool DirectoryBackingStore::MigrateVersion70To71() {
872 if (!CreateV71ModelsTable())
875 // Move data from the old share_info columns to the new models table.
877 sql::Statement
fetch(db_
->GetUniqueStatement(
878 "SELECT last_sync_timestamp, initial_sync_ended FROM share_info"));
882 int64 last_sync_timestamp
= fetch
.ColumnInt64(0);
883 bool initial_sync_ended
= fetch
.ColumnBool(1);
885 // Verify there were no additional rows returned.
886 DCHECK(!fetch
.Step());
887 DCHECK(fetch
.Succeeded());
889 sql::Statement
update(db_
->GetUniqueStatement(
890 "INSERT INTO models (model_id, "
891 "last_download_timestamp, initial_sync_ended) VALUES (?, ?, ?)"));
892 string bookmark_model_id
= ModelTypeEnumToModelId(BOOKMARKS
);
893 update
.BindBlob(0, bookmark_model_id
.data(), bookmark_model_id
.size());
894 update
.BindInt64(1, last_sync_timestamp
);
895 update
.BindBool(2, initial_sync_ended
);
901 // Drop the columns from the old share_info table via a temp table.
902 const bool kCreateAsTempShareInfo
= true;
904 if (!CreateShareInfoTableVersion71(kCreateAsTempShareInfo
))
907 "INSERT INTO temp_share_info (id, name, store_birthday, "
908 "db_create_version, db_create_time, next_id, cache_guid) "
909 "SELECT id, name, store_birthday, db_create_version, "
910 "db_create_time, next_id, cache_guid FROM share_info"))
912 SafeDropTable("share_info");
914 "ALTER TABLE temp_share_info RENAME TO share_info"))
920 bool DirectoryBackingStore::MigrateVersion71To72() {
921 // Version 72 removed a table 'extended_attributes', whose
922 // contents didn't matter.
923 SafeDropTable("extended_attributes");
928 bool DirectoryBackingStore::MigrateVersion72To73() {
929 // Version 73 added one column to the table 'share_info': notification_state
931 "ALTER TABLE share_info ADD COLUMN notification_state BLOB"))
937 bool DirectoryBackingStore::MigrateVersion73To74() {
938 // Version 74 added the following columns to the table 'share_info':
939 // autofill_migration_state
940 // bookmarks_added_during_autofill_migration
941 // autofill_migration_time
942 // autofill_entries_added_during_migration
943 // autofill_profiles_added_during_migration
946 "ALTER TABLE share_info ADD COLUMN "
947 "autofill_migration_state INT default 0"))
951 "ALTER TABLE share_info ADD COLUMN "
952 "bookmarks_added_during_autofill_migration "
957 "ALTER TABLE share_info ADD COLUMN autofill_migration_time "
962 "ALTER TABLE share_info ADD COLUMN "
963 "autofill_entries_added_during_migration "
968 "ALTER TABLE share_info ADD COLUMN "
969 "autofill_profiles_added_during_migration "
977 bool DirectoryBackingStore::MigrateVersion74To75() {
978 // In version 74, there was a table 'models':
979 // blob model_id (entity specifics, primary key)
980 // int last_download_timestamp
981 // boolean initial_sync_ended
982 // In version 75, we deprecated the integer-valued last_download_timestamp,
983 // using insted a protobuf-valued progress_marker field:
984 // blob progress_marker
985 // The progress_marker values are initialized from the value of
986 // last_download_timestamp, thereby preserving the download state.
988 // Move aside the old table and create a new empty one at the current schema.
989 if (!db_
->Execute("ALTER TABLE models RENAME TO temp_models"))
991 if (!CreateV75ModelsTable())
994 sql::Statement
query(db_
->GetUniqueStatement(
995 "SELECT model_id, last_download_timestamp, initial_sync_ended "
996 "FROM temp_models"));
998 sql::Statement
update(db_
->GetUniqueStatement(
999 "INSERT INTO models (model_id, "
1000 "progress_marker, initial_sync_ended) VALUES (?, ?, ?)"));
1002 while (query
.Step()) {
1003 ModelType type
= ModelIdToModelTypeEnum(query
.ColumnBlob(0),
1004 query
.ColumnByteLength(0));
1005 if (type
!= UNSPECIFIED
) {
1006 // Set the |timestamp_token_for_migration| on a new
1007 // DataTypeProgressMarker, using the old value of last_download_timestamp.
1008 // The server will turn this into a real token on our behalf the next
1009 // time we check for updates.
1010 sync_pb::DataTypeProgressMarker progress_marker
;
1011 progress_marker
.set_data_type_id(
1012 GetSpecificsFieldNumberFromModelType(type
));
1013 progress_marker
.set_timestamp_token_for_migration(query
.ColumnInt64(1));
1014 std::string progress_blob
;
1015 progress_marker
.SerializeToString(&progress_blob
);
1017 update
.BindBlob(0, query
.ColumnBlob(0), query
.ColumnByteLength(0));
1018 update
.BindBlob(1, progress_blob
.data(), progress_blob
.length());
1019 update
.BindBool(2, query
.ColumnBool(2));
1025 if (!query
.Succeeded())
1028 // Drop the old table.
1029 SafeDropTable("temp_models");
1035 bool DirectoryBackingStore::MigrateVersion75To76() {
1036 // This change removed five columns:
1037 // autofill_migration_state
1038 // bookmarks_added_during_autofill_migration
1039 // autofill_migration_time
1040 // autofill_entries_added_during_migration
1041 // autofill_profiles_added_during_migration
1042 // No data migration is necessary, but we should do a column refresh.
1044 needs_column_refresh_
= true;
1048 bool DirectoryBackingStore::MigrateVersion76To77() {
1049 // This change changes the format of stored timestamps to ms since
1052 // On Windows, we used to store timestamps in FILETIME format (100s of
1053 // ns since Jan 1, 1601). Magic numbers taken from
1054 // http://stackoverflow.com/questions/5398557/
1055 // java-library-for-dealing-with-win32-filetime
1057 #define TO_UNIX_TIME_MS(x) #x " = " #x " / 10000 - 11644473600000"
1059 // On other platforms, we used to store timestamps in time_t format (s
1060 // since the Unix epoch).
1061 #define TO_UNIX_TIME_MS(x) #x " = " #x " * 1000"
1063 sql::Statement
update_timestamps(db_
->GetUniqueStatement(
1065 TO_UNIX_TIME_MS(mtime
) ", "
1066 TO_UNIX_TIME_MS(server_mtime
) ", "
1067 TO_UNIX_TIME_MS(ctime
) ", "
1068 TO_UNIX_TIME_MS(server_ctime
)));
1069 #undef TO_UNIX_TIME_MS
1070 if (!update_timestamps
.Run())
1076 bool DirectoryBackingStore::MigrateVersion77To78() {
1077 // Version 78 added one column to table 'metas': base_server_specifics.
1079 "ALTER TABLE metas ADD COLUMN base_server_specifics BLOB")) {
1086 bool DirectoryBackingStore::MigrateVersion78To79() {
1087 // Some users are stuck with a DB that causes them to reuse existing IDs. We
1088 // perform this one-time fixup on all users to help the few that are stuck.
1089 // See crbug.com/142987 for details.
1091 "UPDATE share_info SET next_id = next_id - 65536")) {
1098 bool DirectoryBackingStore::MigrateVersion79To80() {
1100 "ALTER TABLE share_info ADD COLUMN bag_of_chips BLOB"))
1102 sql::Statement
update(db_
->GetUniqueStatement(
1103 "UPDATE share_info SET bag_of_chips = ?"));
1104 // An empty message is serialized to an empty string.
1105 update
.BindBlob(0, NULL
, 0);
1112 bool DirectoryBackingStore::MigrateVersion80To81() {
1114 "ALTER TABLE metas ADD COLUMN server_ordinal_in_parent BLOB"))
1117 sql::Statement
get_positions(db_
->GetUniqueStatement(
1118 "SELECT metahandle, server_position_in_parent FROM metas"));
1120 sql::Statement
put_ordinals(db_
->GetUniqueStatement(
1121 "UPDATE metas SET server_ordinal_in_parent = ?"
1122 "WHERE metahandle = ?"));
1124 while(get_positions
.Step()) {
1125 int64 metahandle
= get_positions
.ColumnInt64(0);
1126 int64 position
= get_positions
.ColumnInt64(1);
1128 const std::string
& ordinal
= Int64ToNodeOrdinal(position
).ToInternalValue();
1129 put_ordinals
.BindBlob(0, ordinal
.data(), ordinal
.length());
1130 put_ordinals
.BindInt64(1, metahandle
);
1132 if(!put_ordinals
.Run())
1134 put_ordinals
.Reset(true);
1138 needs_column_refresh_
= true;
1142 bool DirectoryBackingStore::MigrateVersion81To82() {
1144 "ALTER TABLE models ADD COLUMN transaction_version BIGINT default 0"))
1146 sql::Statement
update(db_
->GetUniqueStatement(
1147 "UPDATE models SET transaction_version = 0"));
1154 bool DirectoryBackingStore::MigrateVersion82To83() {
1155 // Version 83 added transaction_version on sync node.
1157 "ALTER TABLE metas ADD COLUMN transaction_version BIGINT default 0"))
1159 sql::Statement
update(db_
->GetUniqueStatement(
1160 "UPDATE metas SET transaction_version = 0"));
1167 bool DirectoryBackingStore::MigrateVersion83To84() {
1168 // Version 84 added deleted_metas table to store deleted metas until we know
1169 // for sure that the deletions are persisted in native models.
1170 string query
= "CREATE TABLE deleted_metas ";
1171 query
.append(ComposeCreateTableColumnSpecs());
1172 if (!db_
->Execute(query
.c_str()))
1178 bool DirectoryBackingStore::MigrateVersion84To85() {
1179 // Version 85 removes the initial_sync_ended flag.
1180 if (!db_
->Execute("ALTER TABLE models RENAME TO temp_models"))
1182 if (!CreateV81ModelsTable())
1184 if (!db_
->Execute("INSERT INTO models SELECT "
1185 "model_id, progress_marker, transaction_version "
1186 "FROM temp_models")) {
1189 SafeDropTable("temp_models");
1195 bool DirectoryBackingStore::MigrateVersion85To86() {
1196 // Version 86 removes both server ordinals and local NEXT_ID, PREV_ID and
1197 // SERVER_{POSITION,ORDINAL}_IN_PARENT and replaces them with UNIQUE_POSITION
1198 // and SERVER_UNIQUE_POSITION.
1199 if (!db_
->Execute("ALTER TABLE metas ADD COLUMN "
1200 "server_unique_position BLOB")) {
1203 if (!db_
->Execute("ALTER TABLE metas ADD COLUMN "
1204 "unique_position BLOB")) {
1207 if (!db_
->Execute("ALTER TABLE metas ADD COLUMN "
1208 "unique_bookmark_tag VARCHAR")) {
1212 // Fetch the cache_guid from the DB, because we don't otherwise have access to
1214 sql::Statement
get_cache_guid(db_
->GetUniqueStatement(
1215 "SELECT cache_guid FROM share_info"));
1216 if (!get_cache_guid
.Step()) {
1219 std::string cache_guid
= get_cache_guid
.ColumnString(0);
1220 DCHECK(!get_cache_guid
.Step());
1221 DCHECK(get_cache_guid
.Succeeded());
1223 sql::Statement
get(db_
->GetUniqueStatement(
1229 " unique_server_tag, "
1230 " server_ordinal_in_parent "
1233 // Note that we set both the local and server position based on the server
1234 // position. We wll lose any unsynced local position changes. Unfortunately,
1235 // there's nothing we can do to avoid that. The NEXT_ID / PREV_ID values
1236 // can't be translated into a UNIQUE_POSTION in a reliable way.
1237 sql::Statement
put(db_
->GetCachedStatement(
1240 " server_unique_position = ?,"
1241 " unique_position = ?,"
1242 " unique_bookmark_tag = ?"
1243 "WHERE metahandle = ?"));
1245 while (get
.Step()) {
1246 int64 metahandle
= get
.ColumnInt64(0);
1248 std::string id_string
;
1249 get
.ColumnBlobAsString(1, &id_string
);
1251 sync_pb::EntitySpecifics specifics
;
1252 specifics
.ParseFromArray(
1253 get
.ColumnBlob(2), get
.ColumnByteLength(2));
1255 bool is_dir
= get
.ColumnBool(3);
1257 std::string server_unique_tag
= get
.ColumnString(4);
1259 std::string ordinal_string
;
1260 get
.ColumnBlobAsString(5, &ordinal_string
);
1261 NodeOrdinal
ordinal(ordinal_string
);
1264 std::string unique_bookmark_tag
;
1266 // We only maintain positions for bookmarks that are not server-defined
1267 // top-level folders.
1268 UniquePosition position
;
1269 if (GetModelTypeFromSpecifics(specifics
) == BOOKMARKS
1270 && !(is_dir
&& !server_unique_tag
.empty())) {
1271 if (id_string
.at(0) == 'c') {
1272 // We found an uncommitted item. This is rare, but fortunate. This
1273 // means we can set the bookmark tag according to the originator client
1274 // item ID and originator cache guid, because (unlike the other case) we
1275 // know that this client is the originator.
1276 unique_bookmark_tag
= syncable::GenerateSyncableBookmarkHash(
1278 id_string
.substr(1));
1280 // If we've already committed the item, then we don't know who the
1281 // originator was. We do not have access to the originator client item
1282 // ID and originator cache guid at this point.
1284 // We will base our hash entirely on the server ID instead. This is
1285 // incorrect, but at least all clients that undergo this migration step
1286 // will be incorrect in the same way.
1288 // To get everyone back into a synced state, we will update the bookmark
1289 // tag according to the originator_cache_guid and originator_item_id
1290 // when we see updates for this item. That should ensure that commonly
1291 // modified items will end up with the proper tag values eventually.
1292 unique_bookmark_tag
= syncable::GenerateSyncableBookmarkHash(
1293 std::string(), // cache_guid left intentionally blank.
1294 id_string
.substr(1));
1297 int64 int_position
= NodeOrdinalToInt64(ordinal
);
1298 position
= UniquePosition::FromInt64(int_position
, unique_bookmark_tag
);
1300 // Leave bookmark_tag and position at their default (invalid) values.
1303 std::string position_blob
;
1304 position
.SerializeToString(&position_blob
);
1305 put
.BindBlob(0, position_blob
.data(), position_blob
.length());
1306 put
.BindBlob(1, position_blob
.data(), position_blob
.length());
1307 put
.BindBlob(2, unique_bookmark_tag
.data(), unique_bookmark_tag
.length());
1308 put
.BindInt64(3, metahandle
);
1316 needs_column_refresh_
= true;
1320 bool DirectoryBackingStore::MigrateVersion86To87() {
1321 // Version 87 adds AttachmentMetadata proto.
1323 "ALTER TABLE metas ADD COLUMN "
1324 "attachment_metadata BLOB")) {
1328 needs_column_refresh_
= true;
1332 bool DirectoryBackingStore::MigrateVersion87To88() {
1333 // Version 88 adds the datatype context to the models table.
1334 if (!db_
->Execute("ALTER TABLE models ADD COLUMN context blob"))
1341 bool DirectoryBackingStore::MigrateVersion88To89() {
1342 // Version 89 adds server_attachment_metadata.
1344 "ALTER TABLE metas ADD COLUMN "
1345 "server_attachment_metadata BLOB")) {
1349 needs_column_refresh_
= true;
1353 bool DirectoryBackingStore::CreateTables() {
1354 DVLOG(1) << "First run, creating tables";
1355 // Create two little tables share_version and share_info
1357 "CREATE TABLE share_version ("
1358 "id VARCHAR(128) primary key, data INT)")) {
1363 sql::Statement
s(db_
->GetUniqueStatement(
1364 "INSERT INTO share_version VALUES(?, ?)"));
1365 s
.BindString(0, dir_name_
);
1366 s
.BindInt(1, kCurrentDBVersion
);
1372 const bool kCreateAsTempShareInfo
= false;
1373 if (!CreateShareInfoTable(kCreateAsTempShareInfo
)) {
1378 sql::Statement
s(db_
->GetUniqueStatement(
1379 "INSERT INTO share_info VALUES"
1382 "?, " // store_birthday
1383 "?, " // db_create_version
1384 "?, " // db_create_time
1387 // TODO(rlarocque, 124140): Remove notification_state field.
1388 "?, " // notification_state
1389 "?);")); // bag_of_chips
1390 s
.BindString(0, dir_name_
); // id
1391 s
.BindString(1, dir_name_
); // name
1392 s
.BindString(2, std::string()); // store_birthday
1393 // TODO(akalin): Remove this unused db_create_version field. (Or
1394 // actually use it for something.) http://crbug.com/118356
1395 s
.BindString(3, "Unknown"); // db_create_version
1396 s
.BindInt(4, static_cast<int32
>(time(0))); // db_create_time
1397 s
.BindString(5, GenerateCacheGUID()); // cache_guid
1398 // TODO(rlarocque, 124140): Remove this unused notification-state field.
1399 s
.BindBlob(6, NULL
, 0); // notification_state
1400 s
.BindBlob(7, NULL
, 0); // bag_of_chips
1405 if (!CreateModelsTable())
1408 // Create the big metas table.
1409 if (!CreateMetasTable(false))
1413 // Insert the entry for the root into the metas table.
1414 const int64 now
= TimeToProtoTime(base::Time::Now());
1415 sql::Statement
s(db_
->GetUniqueStatement(
1416 "INSERT INTO metas "
1417 "( id, metahandle, is_dir, ctime, mtime ) "
1418 "VALUES ( \"r\", 1, 1, ?, ? )"));
1419 s
.BindInt64(0, now
);
1420 s
.BindInt64(1, now
);
1429 bool DirectoryBackingStore::CreateMetasTable(bool is_temporary
) {
1430 string query
= "CREATE TABLE ";
1431 query
.append(is_temporary
? "temp_metas" : "metas");
1432 query
.append(ComposeCreateTableColumnSpecs());
1433 if (!db_
->Execute(query
.c_str()))
1436 // Create a deleted_metas table to save copies of deleted metas until the
1437 // deletions are persisted. For simplicity, don't try to migrate existing
1438 // data because it's rarely used.
1439 SafeDropTable("deleted_metas");
1440 query
= "CREATE TABLE deleted_metas ";
1441 query
.append(ComposeCreateTableColumnSpecs());
1442 return db_
->Execute(query
.c_str());
1445 bool DirectoryBackingStore::CreateV71ModelsTable() {
1446 // This is an old schema for the Models table, used from versions 71 to 74.
1447 return db_
->Execute(
1448 "CREATE TABLE models ("
1449 "model_id BLOB primary key, "
1450 "last_download_timestamp INT, "
1451 // Gets set if the syncer ever gets updates from the
1452 // server and the server returns 0. Lets us detect the
1453 // end of the initial sync.
1454 "initial_sync_ended BOOLEAN default 0)");
1457 bool DirectoryBackingStore::CreateV75ModelsTable() {
1458 // This is an old schema for the Models table, used from versions 75 to 80.
1459 return db_
->Execute(
1460 "CREATE TABLE models ("
1461 "model_id BLOB primary key, "
1462 "progress_marker BLOB, "
1463 // Gets set if the syncer ever gets updates from the
1464 // server and the server returns 0. Lets us detect the
1465 // end of the initial sync.
1466 "initial_sync_ended BOOLEAN default 0)");
1469 bool DirectoryBackingStore::CreateV81ModelsTable() {
1470 // This is an old schema for the Models table, used from versions 81 to 87.
1471 return db_
->Execute(
1472 "CREATE TABLE models ("
1473 "model_id BLOB primary key, "
1474 "progress_marker BLOB, "
1475 // Gets set if the syncer ever gets updates from the
1476 // server and the server returns 0. Lets us detect the
1477 // end of the initial sync.
1478 "transaction_version BIGINT default 0)");
1481 bool DirectoryBackingStore::CreateModelsTable() {
1482 // This is the current schema for the Models table, from version 88
1483 // onward. If you change the schema, you'll probably want to double-check
1484 // the use of this function in the v84-v85 migration.
1485 return db_
->Execute(
1486 "CREATE TABLE models ("
1487 "model_id BLOB primary key, "
1488 "progress_marker BLOB, "
1489 // Gets set if the syncer ever gets updates from the
1490 // server and the server returns 0. Lets us detect the
1491 // end of the initial sync.
1492 "transaction_version BIGINT default 0,"
1496 bool DirectoryBackingStore::CreateShareInfoTable(bool is_temporary
) {
1497 const char* name
= is_temporary
? "temp_share_info" : "share_info";
1498 string query
= "CREATE TABLE ";
1500 // This is the current schema for the ShareInfo table, from version 76
1503 "id TEXT primary key, "
1505 "store_birthday TEXT, "
1506 "db_create_version TEXT, "
1507 "db_create_time INT, "
1508 "next_id INT default -2, "
1510 // TODO(rlarocque, 124140): Remove notification_state field.
1511 "notification_state BLOB, "
1514 return db_
->Execute(query
.c_str());
1517 bool DirectoryBackingStore::CreateShareInfoTableVersion71(
1518 bool is_temporary
) {
1519 const char* name
= is_temporary
? "temp_share_info" : "share_info";
1520 string query
= "CREATE TABLE ";
1522 // This is the schema for the ShareInfo table used from versions 71 to 72.
1524 "id TEXT primary key, "
1526 "store_birthday TEXT, "
1527 "db_create_version TEXT, "
1528 "db_create_time INT, "
1529 "next_id INT default -2, "
1530 "cache_guid TEXT )");
1531 return db_
->Execute(query
.c_str());
1534 // This function checks to see if the given list of Metahandles has any nodes
1535 // whose PARENT_ID values refer to ID values that do not actually exist.
1536 // Returns true on success.
1537 bool DirectoryBackingStore::VerifyReferenceIntegrity(
1538 const Directory::MetahandlesMap
* handles_map
) {
1539 TRACE_EVENT0("sync", "SyncDatabaseIntegrityCheck");
1540 using namespace syncable
;
1541 typedef base::hash_set
<std::string
> IdsSet
;
1546 for (Directory::MetahandlesMap::const_iterator it
= handles_map
->begin();
1547 it
!= handles_map
->end(); ++it
) {
1548 EntryKernel
* entry
= it
->second
;
1549 bool is_duplicate_id
= !(ids_set
.insert(entry
->ref(ID
).value()).second
);
1550 is_ok
= is_ok
&& !is_duplicate_id
;
1553 IdsSet::iterator end
= ids_set
.end();
1554 for (Directory::MetahandlesMap::const_iterator it
= handles_map
->begin();
1555 it
!= handles_map
->end(); ++it
) {
1556 EntryKernel
* entry
= it
->second
;
1557 if (!entry
->ref(PARENT_ID
).IsNull()) {
1558 bool parent_exists
= (ids_set
.find(entry
->ref(PARENT_ID
).value()) != end
);
1559 if (!parent_exists
) {
1567 void DirectoryBackingStore::PrepareSaveEntryStatement(
1568 EntryTable table
, sql::Statement
* save_statement
) {
1569 if (save_statement
->is_valid())
1573 query
.reserve(kUpdateStatementBufferSize
);
1576 query
.append("INSERT OR REPLACE INTO metas ");
1578 case DELETE_JOURNAL_TABLE
:
1579 query
.append("INSERT OR REPLACE INTO deleted_metas ");
1584 values
.reserve(kUpdateStatementBufferSize
);
1585 values
.append(" VALUES ");
1586 const char* separator
= "( ";
1588 for (i
= BEGIN_FIELDS
; i
< FIELD_COUNT
; ++i
) {
1589 query
.append(separator
);
1590 values
.append(separator
);
1592 query
.append(ColumnName(i
));
1595 query
.append(" ) ");
1596 values
.append(" )");
1597 query
.append(values
);
1598 save_statement
->Assign(db_
->GetUniqueStatement(
1599 base::StringPrintf(query
.c_str(), "metas").c_str()));
1602 // Get page size for the database.
1603 bool DirectoryBackingStore::GetDatabasePageSize(int* page_size
) {
1604 sql::Statement
s(db_
->GetUniqueStatement("PRAGMA page_size"));
1607 *page_size
= s
.ColumnInt(0);
1611 bool DirectoryBackingStore::IsSyncBackingDatabase32KEnabled() {
1612 const std::string group_name
=
1613 base::FieldTrialList::FindFullName("SyncBackingDatabase32K");
1614 return group_name
== "Enabled";
1617 bool DirectoryBackingStore::IncreasePageSizeTo32K() {
1618 if (!db_
->Execute("PRAGMA page_size=32768;") || !Vacuum()) {
1624 bool DirectoryBackingStore::Vacuum() {
1625 DCHECK_EQ(db_
->transaction_nesting(), 0);
1626 if (!db_
->Execute("VACUUM;")) {
1632 } // namespace syncable
1633 } // namespace syncer