1 // Copyright 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "sync/syncable/directory_backing_store.h"
7 #include "build/build_config.h"
11 #include "base/base64.h"
12 #include "base/debug/trace_event.h"
13 #include "base/logging.h"
14 #include "base/rand_util.h"
15 #include "base/strings/stringprintf.h"
16 #include "base/time/time.h"
17 #include "sql/connection.h"
18 #include "sql/statement.h"
19 #include "sql/transaction.h"
20 #include "sync/internal_api/public/base/node_ordinal.h"
21 #include "sync/protocol/bookmark_specifics.pb.h"
22 #include "sync/protocol/sync.pb.h"
23 #include "sync/syncable/syncable-inl.h"
24 #include "sync/syncable/syncable_columns.h"
25 #include "sync/syncable/syncable_util.h"
26 #include "sync/util/time.h"
33 // This just has to be big enough to hold an UPDATE or INSERT statement that
34 // modifies all the columns in the entry table.
35 static const string::size_type kUpdateStatementBufferSize
= 2048;
37 // Increment this version whenever updating DB tables.
38 const int32 kCurrentDBVersion
= 89;
40 // Iterate over the fields of |entry| and bind each to |statement| for
41 // updating. Returns the number of args bound.
42 void BindFields(const EntryKernel
& entry
,
43 sql::Statement
* statement
) {
46 for (i
= BEGIN_FIELDS
; i
< INT64_FIELDS_END
; ++i
) {
47 statement
->BindInt64(index
++, entry
.ref(static_cast<Int64Field
>(i
)));
49 for ( ; i
< TIME_FIELDS_END
; ++i
) {
50 statement
->BindInt64(index
++,
52 entry
.ref(static_cast<TimeField
>(i
))));
54 for ( ; i
< ID_FIELDS_END
; ++i
) {
55 statement
->BindString(index
++, entry
.ref(static_cast<IdField
>(i
)).s_
);
57 for ( ; i
< BIT_FIELDS_END
; ++i
) {
58 statement
->BindInt(index
++, entry
.ref(static_cast<BitField
>(i
)));
60 for ( ; i
< STRING_FIELDS_END
; ++i
) {
61 statement
->BindString(index
++, entry
.ref(static_cast<StringField
>(i
)));
63 for ( ; i
< PROTO_FIELDS_END
; ++i
) {
65 entry
.ref(static_cast<ProtoField
>(i
)).SerializeToString(&temp
);
66 statement
->BindBlob(index
++, temp
.data(), temp
.length());
68 for ( ; i
< UNIQUE_POSITION_FIELDS_END
; ++i
) {
70 entry
.ref(static_cast<UniquePositionField
>(i
)).SerializeToString(&temp
);
71 statement
->BindBlob(index
++, temp
.data(), temp
.length());
73 for (; i
< ATTACHMENT_METADATA_FIELDS_END
; ++i
) {
75 entry
.ref(static_cast<AttachmentMetadataField
>(i
)).SerializeToString(&temp
);
76 statement
->BindBlob(index
++, temp
.data(), temp
.length());
80 // The caller owns the returned EntryKernel*. Assumes the statement currently
81 // points to a valid row in the metas table. Returns NULL to indicate that
82 // it detected a corruption in the data on unpacking.
83 scoped_ptr
<EntryKernel
> UnpackEntry(sql::Statement
* statement
) {
84 scoped_ptr
<EntryKernel
> kernel(new EntryKernel());
85 DCHECK_EQ(statement
->ColumnCount(), static_cast<int>(FIELD_COUNT
));
87 for (i
= BEGIN_FIELDS
; i
< INT64_FIELDS_END
; ++i
) {
88 kernel
->put(static_cast<Int64Field
>(i
), statement
->ColumnInt64(i
));
90 for ( ; i
< TIME_FIELDS_END
; ++i
) {
91 kernel
->put(static_cast<TimeField
>(i
),
92 ProtoTimeToTime(statement
->ColumnInt64(i
)));
94 for ( ; i
< ID_FIELDS_END
; ++i
) {
95 kernel
->mutable_ref(static_cast<IdField
>(i
)).s_
=
96 statement
->ColumnString(i
);
98 for ( ; i
< BIT_FIELDS_END
; ++i
) {
99 kernel
->put(static_cast<BitField
>(i
), (0 != statement
->ColumnInt(i
)));
101 for ( ; i
< STRING_FIELDS_END
; ++i
) {
102 kernel
->put(static_cast<StringField
>(i
),
103 statement
->ColumnString(i
));
105 for ( ; i
< PROTO_FIELDS_END
; ++i
) {
106 kernel
->mutable_ref(static_cast<ProtoField
>(i
)).ParseFromArray(
107 statement
->ColumnBlob(i
), statement
->ColumnByteLength(i
));
109 for ( ; i
< UNIQUE_POSITION_FIELDS_END
; ++i
) {
111 statement
->ColumnBlobAsString(i
, &temp
);
113 sync_pb::UniquePosition proto
;
114 if (!proto
.ParseFromString(temp
)) {
115 DVLOG(1) << "Unpacked invalid position. Assuming the DB is corrupt";
116 return scoped_ptr
<EntryKernel
>();
119 kernel
->mutable_ref(static_cast<UniquePositionField
>(i
)) =
120 UniquePosition::FromProto(proto
);
122 for (; i
< ATTACHMENT_METADATA_FIELDS_END
; ++i
) {
123 kernel
->mutable_ref(static_cast<AttachmentMetadataField
>(i
)).ParseFromArray(
124 statement
->ColumnBlob(i
), statement
->ColumnByteLength(i
));
127 // Sanity check on positions. We risk strange and rare crashes if our
128 // assumptions about unique position values are broken.
129 if (kernel
->ShouldMaintainPosition() &&
130 !kernel
->ref(UNIQUE_POSITION
).IsValid()) {
131 DVLOG(1) << "Unpacked invalid position on an entity that should have a "
132 << "valid position. Assuming the DB is corrupt.";
133 return scoped_ptr
<EntryKernel
>();
136 return kernel
.Pass();
141 string
ComposeCreateTableColumnSpecs() {
142 const ColumnSpec
* begin
= g_metas_columns
;
143 const ColumnSpec
* end
= g_metas_columns
+ arraysize(g_metas_columns
);
145 query
.reserve(kUpdateStatementBufferSize
);
146 char separator
= '(';
147 for (const ColumnSpec
* column
= begin
; column
!= end
; ++column
) {
148 query
.push_back(separator
);
150 query
.append(column
->name
);
151 query
.push_back(' ');
152 query
.append(column
->spec
);
154 query
.push_back(')');
158 void AppendColumnList(std::string
* output
) {
159 const char* joiner
= " ";
160 // Be explicit in SELECT order to match up with UnpackEntry.
161 for (int i
= BEGIN_FIELDS
; i
< FIELD_COUNT
; ++i
) {
162 output
->append(joiner
);
163 output
->append(ColumnName(i
));
170 ///////////////////////////////////////////////////////////////////////////////
171 // DirectoryBackingStore implementation.
173 DirectoryBackingStore::DirectoryBackingStore(const string
& dir_name
)
174 : db_(new sql::Connection()),
176 needs_column_refresh_(false) {
177 db_
->set_histogram_tag("SyncDirectory");
178 db_
->set_page_size(4096);
179 db_
->set_cache_size(32);
182 DirectoryBackingStore::DirectoryBackingStore(const string
& dir_name
,
186 needs_column_refresh_(false) {
189 DirectoryBackingStore::~DirectoryBackingStore() {
192 bool DirectoryBackingStore::DeleteEntries(EntryTable from
,
193 const MetahandleSet
& handles
) {
197 sql::Statement statement
;
198 // Call GetCachedStatement() separately to get different statements for
202 statement
.Assign(db_
->GetCachedStatement(
203 SQL_FROM_HERE
, "DELETE FROM metas WHERE metahandle = ?"));
205 case DELETE_JOURNAL_TABLE
:
206 statement
.Assign(db_
->GetCachedStatement(
207 SQL_FROM_HERE
, "DELETE FROM deleted_metas WHERE metahandle = ?"));
211 for (MetahandleSet::const_iterator i
= handles
.begin(); i
!= handles
.end();
213 statement
.BindInt64(0, *i
);
214 if (!statement
.Run())
216 statement
.Reset(true);
221 bool DirectoryBackingStore::SaveChanges(
222 const Directory::SaveChangesSnapshot
& snapshot
) {
223 DCHECK(CalledOnValidThread());
224 DCHECK(db_
->is_open());
226 // Back out early if there is nothing to write.
228 (Directory::KERNEL_SHARE_INFO_DIRTY
== snapshot
.kernel_info_status
);
229 if (snapshot
.dirty_metas
.empty() && snapshot
.metahandles_to_purge
.empty() &&
230 snapshot
.delete_journals
.empty() &&
231 snapshot
.delete_journals_to_purge
.empty() && !save_info
) {
235 sql::Transaction
transaction(db_
.get());
236 if (!transaction
.Begin())
239 PrepareSaveEntryStatement(METAS_TABLE
, &save_meta_statment_
);
240 for (EntryKernelSet::const_iterator i
= snapshot
.dirty_metas
.begin();
241 i
!= snapshot
.dirty_metas
.end(); ++i
) {
242 DCHECK((*i
)->is_dirty());
243 if (!SaveEntryToDB(&save_meta_statment_
, **i
))
247 if (!DeleteEntries(METAS_TABLE
, snapshot
.metahandles_to_purge
))
250 PrepareSaveEntryStatement(DELETE_JOURNAL_TABLE
,
251 &save_delete_journal_statment_
);
252 for (EntryKernelSet::const_iterator i
= snapshot
.delete_journals
.begin();
253 i
!= snapshot
.delete_journals
.end(); ++i
) {
254 if (!SaveEntryToDB(&save_delete_journal_statment_
, **i
))
258 if (!DeleteEntries(DELETE_JOURNAL_TABLE
, snapshot
.delete_journals_to_purge
))
262 const Directory::PersistedKernelInfo
& info
= snapshot
.kernel_info
;
263 sql::Statement
s1(db_
->GetCachedStatement(
266 "SET store_birthday = ?, "
268 "bag_of_chips = ?"));
269 s1
.BindString(0, info
.store_birthday
);
270 s1
.BindInt64(1, info
.next_id
);
271 s1
.BindBlob(2, info
.bag_of_chips
.data(), info
.bag_of_chips
.size());
275 DCHECK_EQ(db_
->GetLastChangeCount(), 1);
277 sql::Statement
s2(db_
->GetCachedStatement(
280 "INTO models (model_id, "
282 "transaction_version, "
284 "VALUES (?, ?, ?, ?)"));
286 ModelTypeSet protocol_types
= ProtocolTypes();
287 for (ModelTypeSet::Iterator iter
= protocol_types
.First(); iter
.Good();
289 ModelType type
= iter
.Get();
290 // We persist not ModelType but rather a protobuf-derived ID.
291 string model_id
= ModelTypeEnumToModelId(type
);
292 string progress_marker
;
293 info
.download_progress
[type
].SerializeToString(&progress_marker
);
294 s2
.BindBlob(0, model_id
.data(), model_id
.length());
295 s2
.BindBlob(1, progress_marker
.data(), progress_marker
.length());
296 s2
.BindInt64(2, info
.transaction_version
[type
]);
298 info
.datatype_context
[type
].SerializeToString(&context
);
299 s2
.BindBlob(3, context
.data(), context
.length());
302 DCHECK_EQ(db_
->GetLastChangeCount(), 1);
307 return transaction
.Commit();
310 bool DirectoryBackingStore::InitializeTables() {
311 sql::Transaction
transaction(db_
.get());
312 if (!transaction
.Begin())
315 int version_on_disk
= GetVersion();
317 // Upgrade from version 67. Version 67 was widely distributed as the original
318 // Bookmark Sync release. Version 68 removed unique naming.
319 if (version_on_disk
== 67) {
320 if (MigrateVersion67To68())
321 version_on_disk
= 68;
323 // Version 69 introduced additional datatypes.
324 if (version_on_disk
== 68) {
325 if (MigrateVersion68To69())
326 version_on_disk
= 69;
329 if (version_on_disk
== 69) {
330 if (MigrateVersion69To70())
331 version_on_disk
= 70;
334 // Version 71 changed the sync progress information to be per-datatype.
335 if (version_on_disk
== 70) {
336 if (MigrateVersion70To71())
337 version_on_disk
= 71;
340 // Version 72 removed extended attributes, a legacy way to do extensible
341 // key/value information, stored in their own table.
342 if (version_on_disk
== 71) {
343 if (MigrateVersion71To72())
344 version_on_disk
= 72;
347 // Version 73 added a field for notification state.
348 if (version_on_disk
== 72) {
349 if (MigrateVersion72To73())
350 version_on_disk
= 73;
353 // Version 74 added state for the autofill migration.
354 if (version_on_disk
== 73) {
355 if (MigrateVersion73To74())
356 version_on_disk
= 74;
359 // Version 75 migrated from int64-based timestamps to per-datatype tokens.
360 if (version_on_disk
== 74) {
361 if (MigrateVersion74To75())
362 version_on_disk
= 75;
365 // Version 76 removed all (5) autofill migration related columns.
366 if (version_on_disk
== 75) {
367 if (MigrateVersion75To76())
368 version_on_disk
= 76;
371 // Version 77 standardized all time fields to ms since the Unix
373 if (version_on_disk
== 76) {
374 if (MigrateVersion76To77())
375 version_on_disk
= 77;
378 // Version 78 added the column base_server_specifics to the metas table.
379 if (version_on_disk
== 77) {
380 if (MigrateVersion77To78())
381 version_on_disk
= 78;
384 // Version 79 migration is a one-time fix for some users in a bad state.
385 if (version_on_disk
== 78) {
386 if (MigrateVersion78To79())
387 version_on_disk
= 79;
390 // Version 80 migration is adding the bag_of_chips column.
391 if (version_on_disk
== 79) {
392 if (MigrateVersion79To80())
393 version_on_disk
= 80;
396 // Version 81 replaces the int64 server_position_in_parent_field
397 // with a blob server_ordinal_in_parent field.
398 if (version_on_disk
== 80) {
399 if (MigrateVersion80To81())
400 version_on_disk
= 81;
403 // Version 82 migration added transaction_version column per data type.
404 if (version_on_disk
== 81) {
405 if (MigrateVersion81To82())
406 version_on_disk
= 82;
409 // Version 83 migration added transaction_version column per sync entry.
410 if (version_on_disk
== 82) {
411 if (MigrateVersion82To83())
412 version_on_disk
= 83;
415 // Version 84 migration added deleted_metas table.
416 if (version_on_disk
== 83) {
417 if (MigrateVersion83To84())
418 version_on_disk
= 84;
421 // Version 85 migration removes the initial_sync_ended bits.
422 if (version_on_disk
== 84) {
423 if (MigrateVersion84To85())
424 version_on_disk
= 85;
427 // Version 86 migration converts bookmarks to the unique positioning system.
428 // It also introduces a new field to store a unique ID for each bookmark.
429 if (version_on_disk
== 85) {
430 if (MigrateVersion85To86())
431 version_on_disk
= 86;
434 // Version 87 migration adds a collection of attachment ids per sync entry.
435 if (version_on_disk
== 86) {
436 if (MigrateVersion86To87())
437 version_on_disk
= 87;
440 // Version 88 migration adds datatype contexts to the models table.
441 if (version_on_disk
== 87) {
442 if (MigrateVersion87To88())
443 version_on_disk
= 88;
446 // Version 89 migration adds server attachment metadata to the metas table.
447 if (version_on_disk
== 88) {
448 if (MigrateVersion88To89())
449 version_on_disk
= 89;
452 // If one of the migrations requested it, drop columns that aren't current.
453 // It's only safe to do this after migrating all the way to the current
455 if (version_on_disk
== kCurrentDBVersion
&& needs_column_refresh_
) {
456 if (!RefreshColumns())
460 // A final, alternative catch-all migration to simply re-sync everything.
461 if (version_on_disk
!= kCurrentDBVersion
) {
462 if (version_on_disk
> kCurrentDBVersion
)
465 // Fallback (re-sync everything) migration path.
466 DVLOG(1) << "Old/null sync database, version " << version_on_disk
;
467 // Delete the existing database (if any), and create a fresh one.
473 sql::Statement
s(db_
->GetUniqueStatement(
474 "SELECT db_create_version, db_create_time FROM share_info"));
477 string db_create_version
= s
.ColumnString(0);
478 int db_create_time
= s
.ColumnInt(1);
479 DVLOG(1) << "DB created at " << db_create_time
<< " by version " <<
482 return transaction
.Commit();
485 // This function drops unused columns by creating a new table that contains only
486 // the currently used columns then copying all rows from the old tables into
487 // this new one. The tables are then rearranged so the new replaces the old.
488 bool DirectoryBackingStore::RefreshColumns() {
489 DCHECK(needs_column_refresh_
);
491 // Create a new table named temp_metas.
492 SafeDropTable("temp_metas");
493 if (!CreateMetasTable(true))
496 // Populate temp_metas from metas.
498 // At this point, the metas table may contain columns belonging to obsolete
499 // schema versions. This statement explicitly lists only the columns that
500 // belong to the current schema version, so the obsolete columns will be
501 // effectively dropped once we rename temp_metas over top of metas.
502 std::string query
= "INSERT INTO temp_metas (";
503 AppendColumnList(&query
);
504 query
.append(") SELECT ");
505 AppendColumnList(&query
);
506 query
.append(" FROM metas");
507 if (!db_
->Execute(query
.c_str()))
511 SafeDropTable("metas");
513 // Rename temp_metas -> metas.
514 if (!db_
->Execute("ALTER TABLE temp_metas RENAME TO metas"))
517 // Repeat the process for share_info.
518 SafeDropTable("temp_share_info");
519 if (!CreateShareInfoTable(true))
522 // TODO(rlarocque, 124140): Remove notification_state.
524 "INSERT INTO temp_share_info (id, name, store_birthday, "
525 "db_create_version, db_create_time, next_id, cache_guid,"
526 "notification_state, bag_of_chips) "
527 "SELECT id, name, store_birthday, db_create_version, "
528 "db_create_time, next_id, cache_guid, notification_state, "
533 SafeDropTable("share_info");
534 if (!db_
->Execute("ALTER TABLE temp_share_info RENAME TO share_info"))
537 needs_column_refresh_
= false;
541 bool DirectoryBackingStore::LoadEntries(
542 Directory::MetahandlesMap
* handles_map
) {
544 select
.reserve(kUpdateStatementBufferSize
);
545 select
.append("SELECT ");
546 AppendColumnList(&select
);
547 select
.append(" FROM metas");
549 sql::Statement
s(db_
->GetUniqueStatement(select
.c_str()));
552 scoped_ptr
<EntryKernel
> kernel
= UnpackEntry(&s
);
553 // A null kernel is evidence of external data corruption.
557 int64 handle
= kernel
->ref(META_HANDLE
);
558 (*handles_map
)[handle
] = kernel
.release();
560 return s
.Succeeded();
563 bool DirectoryBackingStore::LoadDeleteJournals(
564 JournalIndex
* delete_journals
) {
566 select
.reserve(kUpdateStatementBufferSize
);
567 select
.append("SELECT ");
568 AppendColumnList(&select
);
569 select
.append(" FROM deleted_metas");
571 sql::Statement
s(db_
->GetUniqueStatement(select
.c_str()));
574 scoped_ptr
<EntryKernel
> kernel
= UnpackEntry(&s
);
575 // A null kernel is evidence of external data corruption.
578 delete_journals
->insert(kernel
.release());
580 return s
.Succeeded();
583 bool DirectoryBackingStore::LoadInfo(Directory::KernelLoadInfo
* info
) {
586 db_
->GetUniqueStatement(
587 "SELECT store_birthday, next_id, cache_guid, bag_of_chips "
592 info
->kernel_info
.store_birthday
= s
.ColumnString(0);
593 info
->kernel_info
.next_id
= s
.ColumnInt64(1);
594 info
->cache_guid
= s
.ColumnString(2);
595 s
.ColumnBlobAsString(3, &(info
->kernel_info
.bag_of_chips
));
597 // Verify there was only one row returned.
599 DCHECK(s
.Succeeded());
604 db_
->GetUniqueStatement(
605 "SELECT model_id, progress_marker, "
606 "transaction_version, context FROM models"));
609 ModelType type
= ModelIdToModelTypeEnum(s
.ColumnBlob(0),
610 s
.ColumnByteLength(0));
611 if (type
!= UNSPECIFIED
&& type
!= TOP_LEVEL_FOLDER
) {
612 info
->kernel_info
.download_progress
[type
].ParseFromArray(
613 s
.ColumnBlob(1), s
.ColumnByteLength(1));
614 info
->kernel_info
.transaction_version
[type
] = s
.ColumnInt64(2);
615 info
->kernel_info
.datatype_context
[type
].ParseFromArray(
616 s
.ColumnBlob(3), s
.ColumnByteLength(3));
624 db_
->GetUniqueStatement(
625 "SELECT MAX(metahandle) FROM metas"));
629 info
->max_metahandle
= s
.ColumnInt64(0);
631 // Verify only one row was returned.
633 DCHECK(s
.Succeeded());
639 bool DirectoryBackingStore::SaveEntryToDB(sql::Statement
* save_statement
,
640 const EntryKernel
& entry
) {
641 save_statement
->Reset(true);
642 BindFields(entry
, save_statement
);
643 return save_statement
->Run();
646 bool DirectoryBackingStore::DropDeletedEntries() {
647 if (!db_
->Execute("DELETE FROM metas "
649 "AND is_unsynced < 1 "
650 "AND is_unapplied_update < 1")) {
653 if (!db_
->Execute("DELETE FROM metas "
655 "AND id LIKE 'c%'")) {
661 bool DirectoryBackingStore::SafeDropTable(const char* table_name
) {
662 string query
= "DROP TABLE IF EXISTS ";
663 query
.append(table_name
);
664 return db_
->Execute(query
.c_str());
667 void DirectoryBackingStore::DropAllTables() {
668 SafeDropTable("metas");
669 SafeDropTable("temp_metas");
670 SafeDropTable("share_info");
671 SafeDropTable("temp_share_info");
672 SafeDropTable("share_version");
673 SafeDropTable("extended_attributes");
674 SafeDropTable("models");
675 SafeDropTable("temp_models");
676 needs_column_refresh_
= false;
680 ModelType
DirectoryBackingStore::ModelIdToModelTypeEnum(
681 const void* data
, int size
) {
682 sync_pb::EntitySpecifics specifics
;
683 if (!specifics
.ParseFromArray(data
, size
))
685 return GetModelTypeFromSpecifics(specifics
);
689 string
DirectoryBackingStore::ModelTypeEnumToModelId(ModelType model_type
) {
690 sync_pb::EntitySpecifics specifics
;
691 AddDefaultFieldValue(model_type
, &specifics
);
692 return specifics
.SerializeAsString();
696 std::string
DirectoryBackingStore::GenerateCacheGUID() {
697 // Generate a GUID with 128 bits of randomness.
698 const int kGuidBytes
= 128 / 8;
700 base::Base64Encode(base::RandBytesAsString(kGuidBytes
), &guid
);
704 bool DirectoryBackingStore::MigrateToSpecifics(
705 const char* old_columns
,
706 const char* specifics_column
,
707 void (*handler_function
)(sql::Statement
* old_value_query
,
708 int old_value_column
,
709 sync_pb::EntitySpecifics
* mutable_new_value
)) {
710 std::string query_sql
= base::StringPrintf(
711 "SELECT metahandle, %s, %s FROM metas", specifics_column
, old_columns
);
712 std::string update_sql
= base::StringPrintf(
713 "UPDATE metas SET %s = ? WHERE metahandle = ?", specifics_column
);
715 sql::Statement
query(db_
->GetUniqueStatement(query_sql
.c_str()));
716 sql::Statement
update(db_
->GetUniqueStatement(update_sql
.c_str()));
718 while (query
.Step()) {
719 int64 metahandle
= query
.ColumnInt64(0);
720 std::string new_value_bytes
;
721 query
.ColumnBlobAsString(1, &new_value_bytes
);
722 sync_pb::EntitySpecifics new_value
;
723 new_value
.ParseFromString(new_value_bytes
);
724 handler_function(&query
, 2, &new_value
);
725 new_value
.SerializeToString(&new_value_bytes
);
727 update
.BindBlob(0, new_value_bytes
.data(), new_value_bytes
.length());
728 update
.BindInt64(1, metahandle
);
733 return query
.Succeeded();
736 bool DirectoryBackingStore::SetVersion(int version
) {
737 sql::Statement
s(db_
->GetCachedStatement(
738 SQL_FROM_HERE
, "UPDATE share_version SET data = ?"));
739 s
.BindInt(0, version
);
744 int DirectoryBackingStore::GetVersion() {
745 if (!db_
->DoesTableExist("share_version"))
748 sql::Statement
statement(db_
->GetUniqueStatement(
749 "SELECT data FROM share_version"));
750 if (statement
.Step()) {
751 return statement
.ColumnInt(0);
757 bool DirectoryBackingStore::MigrateVersion67To68() {
758 // This change simply removed three columns:
760 // string UNSANITIZED_NAME
761 // string SERVER_NAME
762 // No data migration is necessary, but we should do a column refresh.
764 needs_column_refresh_
= true;
768 bool DirectoryBackingStore::MigrateVersion69To70() {
769 // Added "unique_client_tag", renamed "singleton_tag" to unique_server_tag
772 "ALTER TABLE metas ADD COLUMN unique_server_tag varchar"))
775 "ALTER TABLE metas ADD COLUMN unique_client_tag varchar"))
777 needs_column_refresh_
= true;
780 "UPDATE metas SET unique_server_tag = singleton_tag"))
788 // Callback passed to MigrateToSpecifics for the v68->v69 migration. See
789 // MigrateVersion68To69().
790 void EncodeBookmarkURLAndFavicon(sql::Statement
* old_value_query
,
791 int old_value_column
,
792 sync_pb::EntitySpecifics
* mutable_new_value
) {
793 // Extract data from the column trio we expect.
794 bool old_is_bookmark_object
= old_value_query
->ColumnBool(old_value_column
);
795 std::string old_url
= old_value_query
->ColumnString(old_value_column
+ 1);
796 std::string old_favicon
;
797 old_value_query
->ColumnBlobAsString(old_value_column
+ 2, &old_favicon
);
798 bool old_is_dir
= old_value_query
->ColumnBool(old_value_column
+ 3);
800 if (old_is_bookmark_object
) {
801 sync_pb::BookmarkSpecifics
* bookmark_data
=
802 mutable_new_value
->mutable_bookmark();
804 bookmark_data
->set_url(old_url
);
805 bookmark_data
->set_favicon(old_favicon
);
812 bool DirectoryBackingStore::MigrateVersion68To69() {
813 // In Version 68, there were columns on table 'metas':
814 // string BOOKMARK_URL
815 // string SERVER_BOOKMARK_URL
816 // blob BOOKMARK_FAVICON
817 // blob SERVER_BOOKMARK_FAVICON
818 // In version 69, these columns went away in favor of storing
819 // a serialized EntrySpecifics protobuf in the columns:
820 // protobuf blob SPECIFICS
821 // protobuf blob SERVER_SPECIFICS
822 // For bookmarks, EntrySpecifics is extended as per
823 // bookmark_specifics.proto. This migration converts bookmarks from the
824 // former scheme to the latter scheme.
826 // First, add the two new columns to the schema.
828 "ALTER TABLE metas ADD COLUMN specifics blob"))
831 "ALTER TABLE metas ADD COLUMN server_specifics blob"))
834 // Next, fold data from the old columns into the new protobuf columns.
835 if (!MigrateToSpecifics(("is_bookmark_object, bookmark_url, "
836 "bookmark_favicon, is_dir"),
838 &EncodeBookmarkURLAndFavicon
)) {
841 if (!MigrateToSpecifics(("server_is_bookmark_object, "
842 "server_bookmark_url, "
843 "server_bookmark_favicon, "
846 &EncodeBookmarkURLAndFavicon
)) {
850 // Lastly, fix up the "Google Chrome" folder, which is of the TOP_LEVEL_FOLDER
851 // ModelType: it shouldn't have BookmarkSpecifics.
853 "UPDATE metas SET specifics = NULL, server_specifics = NULL WHERE "
854 "singleton_tag IN ('google_chrome')"))
858 needs_column_refresh_
= true; // Trigger deletion of old columns.
862 // Version 71, the columns 'initial_sync_ended' and 'last_sync_timestamp'
863 // were removed from the share_info table. They were replaced by
864 // the 'models' table, which has these values on a per-datatype basis.
865 bool DirectoryBackingStore::MigrateVersion70To71() {
866 if (!CreateV71ModelsTable())
869 // Move data from the old share_info columns to the new models table.
871 sql::Statement
fetch(db_
->GetUniqueStatement(
872 "SELECT last_sync_timestamp, initial_sync_ended FROM share_info"));
876 int64 last_sync_timestamp
= fetch
.ColumnInt64(0);
877 bool initial_sync_ended
= fetch
.ColumnBool(1);
879 // Verify there were no additional rows returned.
880 DCHECK(!fetch
.Step());
881 DCHECK(fetch
.Succeeded());
883 sql::Statement
update(db_
->GetUniqueStatement(
884 "INSERT INTO models (model_id, "
885 "last_download_timestamp, initial_sync_ended) VALUES (?, ?, ?)"));
886 string bookmark_model_id
= ModelTypeEnumToModelId(BOOKMARKS
);
887 update
.BindBlob(0, bookmark_model_id
.data(), bookmark_model_id
.size());
888 update
.BindInt64(1, last_sync_timestamp
);
889 update
.BindBool(2, initial_sync_ended
);
895 // Drop the columns from the old share_info table via a temp table.
896 const bool kCreateAsTempShareInfo
= true;
898 if (!CreateShareInfoTableVersion71(kCreateAsTempShareInfo
))
901 "INSERT INTO temp_share_info (id, name, store_birthday, "
902 "db_create_version, db_create_time, next_id, cache_guid) "
903 "SELECT id, name, store_birthday, db_create_version, "
904 "db_create_time, next_id, cache_guid FROM share_info"))
906 SafeDropTable("share_info");
908 "ALTER TABLE temp_share_info RENAME TO share_info"))
914 bool DirectoryBackingStore::MigrateVersion71To72() {
915 // Version 72 removed a table 'extended_attributes', whose
916 // contents didn't matter.
917 SafeDropTable("extended_attributes");
922 bool DirectoryBackingStore::MigrateVersion72To73() {
923 // Version 73 added one column to the table 'share_info': notification_state
925 "ALTER TABLE share_info ADD COLUMN notification_state BLOB"))
931 bool DirectoryBackingStore::MigrateVersion73To74() {
932 // Version 74 added the following columns to the table 'share_info':
933 // autofill_migration_state
934 // bookmarks_added_during_autofill_migration
935 // autofill_migration_time
936 // autofill_entries_added_during_migration
937 // autofill_profiles_added_during_migration
940 "ALTER TABLE share_info ADD COLUMN "
941 "autofill_migration_state INT default 0"))
945 "ALTER TABLE share_info ADD COLUMN "
946 "bookmarks_added_during_autofill_migration "
951 "ALTER TABLE share_info ADD COLUMN autofill_migration_time "
956 "ALTER TABLE share_info ADD COLUMN "
957 "autofill_entries_added_during_migration "
962 "ALTER TABLE share_info ADD COLUMN "
963 "autofill_profiles_added_during_migration "
971 bool DirectoryBackingStore::MigrateVersion74To75() {
972 // In version 74, there was a table 'models':
973 // blob model_id (entity specifics, primary key)
974 // int last_download_timestamp
975 // boolean initial_sync_ended
976 // In version 75, we deprecated the integer-valued last_download_timestamp,
977 // using insted a protobuf-valued progress_marker field:
978 // blob progress_marker
979 // The progress_marker values are initialized from the value of
980 // last_download_timestamp, thereby preserving the download state.
982 // Move aside the old table and create a new empty one at the current schema.
983 if (!db_
->Execute("ALTER TABLE models RENAME TO temp_models"))
985 if (!CreateV75ModelsTable())
988 sql::Statement
query(db_
->GetUniqueStatement(
989 "SELECT model_id, last_download_timestamp, initial_sync_ended "
990 "FROM temp_models"));
992 sql::Statement
update(db_
->GetUniqueStatement(
993 "INSERT INTO models (model_id, "
994 "progress_marker, initial_sync_ended) VALUES (?, ?, ?)"));
996 while (query
.Step()) {
997 ModelType type
= ModelIdToModelTypeEnum(query
.ColumnBlob(0),
998 query
.ColumnByteLength(0));
999 if (type
!= UNSPECIFIED
) {
1000 // Set the |timestamp_token_for_migration| on a new
1001 // DataTypeProgressMarker, using the old value of last_download_timestamp.
1002 // The server will turn this into a real token on our behalf the next
1003 // time we check for updates.
1004 sync_pb::DataTypeProgressMarker progress_marker
;
1005 progress_marker
.set_data_type_id(
1006 GetSpecificsFieldNumberFromModelType(type
));
1007 progress_marker
.set_timestamp_token_for_migration(query
.ColumnInt64(1));
1008 std::string progress_blob
;
1009 progress_marker
.SerializeToString(&progress_blob
);
1011 update
.BindBlob(0, query
.ColumnBlob(0), query
.ColumnByteLength(0));
1012 update
.BindBlob(1, progress_blob
.data(), progress_blob
.length());
1013 update
.BindBool(2, query
.ColumnBool(2));
1019 if (!query
.Succeeded())
1022 // Drop the old table.
1023 SafeDropTable("temp_models");
1029 bool DirectoryBackingStore::MigrateVersion75To76() {
1030 // This change removed five columns:
1031 // autofill_migration_state
1032 // bookmarks_added_during_autofill_migration
1033 // autofill_migration_time
1034 // autofill_entries_added_during_migration
1035 // autofill_profiles_added_during_migration
1036 // No data migration is necessary, but we should do a column refresh.
1038 needs_column_refresh_
= true;
1042 bool DirectoryBackingStore::MigrateVersion76To77() {
1043 // This change changes the format of stored timestamps to ms since
1046 // On Windows, we used to store timestamps in FILETIME format (100s of
1047 // ns since Jan 1, 1601). Magic numbers taken from
1048 // http://stackoverflow.com/questions/5398557/
1049 // java-library-for-dealing-with-win32-filetime
1051 #define TO_UNIX_TIME_MS(x) #x " = " #x " / 10000 - 11644473600000"
1053 // On other platforms, we used to store timestamps in time_t format (s
1054 // since the Unix epoch).
1055 #define TO_UNIX_TIME_MS(x) #x " = " #x " * 1000"
1057 sql::Statement
update_timestamps(db_
->GetUniqueStatement(
1059 TO_UNIX_TIME_MS(mtime
) ", "
1060 TO_UNIX_TIME_MS(server_mtime
) ", "
1061 TO_UNIX_TIME_MS(ctime
) ", "
1062 TO_UNIX_TIME_MS(server_ctime
)));
1063 #undef TO_UNIX_TIME_MS
1064 if (!update_timestamps
.Run())
1070 bool DirectoryBackingStore::MigrateVersion77To78() {
1071 // Version 78 added one column to table 'metas': base_server_specifics.
1073 "ALTER TABLE metas ADD COLUMN base_server_specifics BLOB")) {
1080 bool DirectoryBackingStore::MigrateVersion78To79() {
1081 // Some users are stuck with a DB that causes them to reuse existing IDs. We
1082 // perform this one-time fixup on all users to help the few that are stuck.
1083 // See crbug.com/142987 for details.
1085 "UPDATE share_info SET next_id = next_id - 65536")) {
1092 bool DirectoryBackingStore::MigrateVersion79To80() {
1094 "ALTER TABLE share_info ADD COLUMN bag_of_chips BLOB"))
1096 sql::Statement
update(db_
->GetUniqueStatement(
1097 "UPDATE share_info SET bag_of_chips = ?"));
1098 // An empty message is serialized to an empty string.
1099 update
.BindBlob(0, NULL
, 0);
1106 bool DirectoryBackingStore::MigrateVersion80To81() {
1108 "ALTER TABLE metas ADD COLUMN server_ordinal_in_parent BLOB"))
1111 sql::Statement
get_positions(db_
->GetUniqueStatement(
1112 "SELECT metahandle, server_position_in_parent FROM metas"));
1114 sql::Statement
put_ordinals(db_
->GetUniqueStatement(
1115 "UPDATE metas SET server_ordinal_in_parent = ?"
1116 "WHERE metahandle = ?"));
1118 while(get_positions
.Step()) {
1119 int64 metahandle
= get_positions
.ColumnInt64(0);
1120 int64 position
= get_positions
.ColumnInt64(1);
1122 const std::string
& ordinal
= Int64ToNodeOrdinal(position
).ToInternalValue();
1123 put_ordinals
.BindBlob(0, ordinal
.data(), ordinal
.length());
1124 put_ordinals
.BindInt64(1, metahandle
);
1126 if(!put_ordinals
.Run())
1128 put_ordinals
.Reset(true);
1132 needs_column_refresh_
= true;
1136 bool DirectoryBackingStore::MigrateVersion81To82() {
1138 "ALTER TABLE models ADD COLUMN transaction_version BIGINT default 0"))
1140 sql::Statement
update(db_
->GetUniqueStatement(
1141 "UPDATE models SET transaction_version = 0"));
1148 bool DirectoryBackingStore::MigrateVersion82To83() {
1149 // Version 83 added transaction_version on sync node.
1151 "ALTER TABLE metas ADD COLUMN transaction_version BIGINT default 0"))
1153 sql::Statement
update(db_
->GetUniqueStatement(
1154 "UPDATE metas SET transaction_version = 0"));
1161 bool DirectoryBackingStore::MigrateVersion83To84() {
1162 // Version 84 added deleted_metas table to store deleted metas until we know
1163 // for sure that the deletions are persisted in native models.
1164 string query
= "CREATE TABLE deleted_metas ";
1165 query
.append(ComposeCreateTableColumnSpecs());
1166 if (!db_
->Execute(query
.c_str()))
1172 bool DirectoryBackingStore::MigrateVersion84To85() {
1173 // Version 85 removes the initial_sync_ended flag.
1174 if (!db_
->Execute("ALTER TABLE models RENAME TO temp_models"))
1176 if (!CreateV81ModelsTable())
1178 if (!db_
->Execute("INSERT INTO models SELECT "
1179 "model_id, progress_marker, transaction_version "
1180 "FROM temp_models")) {
1183 SafeDropTable("temp_models");
1189 bool DirectoryBackingStore::MigrateVersion85To86() {
1190 // Version 86 removes both server ordinals and local NEXT_ID, PREV_ID and
1191 // SERVER_{POSITION,ORDINAL}_IN_PARENT and replaces them with UNIQUE_POSITION
1192 // and SERVER_UNIQUE_POSITION.
1193 if (!db_
->Execute("ALTER TABLE metas ADD COLUMN "
1194 "server_unique_position BLOB")) {
1197 if (!db_
->Execute("ALTER TABLE metas ADD COLUMN "
1198 "unique_position BLOB")) {
1201 if (!db_
->Execute("ALTER TABLE metas ADD COLUMN "
1202 "unique_bookmark_tag VARCHAR")) {
1206 // Fetch the cache_guid from the DB, because we don't otherwise have access to
1208 sql::Statement
get_cache_guid(db_
->GetUniqueStatement(
1209 "SELECT cache_guid FROM share_info"));
1210 if (!get_cache_guid
.Step()) {
1213 std::string cache_guid
= get_cache_guid
.ColumnString(0);
1214 DCHECK(!get_cache_guid
.Step());
1215 DCHECK(get_cache_guid
.Succeeded());
1217 sql::Statement
get(db_
->GetUniqueStatement(
1223 " unique_server_tag, "
1224 " server_ordinal_in_parent "
1227 // Note that we set both the local and server position based on the server
1228 // position. We wll lose any unsynced local position changes. Unfortunately,
1229 // there's nothing we can do to avoid that. The NEXT_ID / PREV_ID values
1230 // can't be translated into a UNIQUE_POSTION in a reliable way.
1231 sql::Statement
put(db_
->GetCachedStatement(
1234 " server_unique_position = ?,"
1235 " unique_position = ?,"
1236 " unique_bookmark_tag = ?"
1237 "WHERE metahandle = ?"));
1239 while (get
.Step()) {
1240 int64 metahandle
= get
.ColumnInt64(0);
1242 std::string id_string
;
1243 get
.ColumnBlobAsString(1, &id_string
);
1245 sync_pb::EntitySpecifics specifics
;
1246 specifics
.ParseFromArray(
1247 get
.ColumnBlob(2), get
.ColumnByteLength(2));
1249 bool is_dir
= get
.ColumnBool(3);
1251 std::string server_unique_tag
= get
.ColumnString(4);
1253 std::string ordinal_string
;
1254 get
.ColumnBlobAsString(5, &ordinal_string
);
1255 NodeOrdinal
ordinal(ordinal_string
);
1258 std::string unique_bookmark_tag
;
1260 // We only maintain positions for bookmarks that are not server-defined
1261 // top-level folders.
1262 UniquePosition position
;
1263 if (GetModelTypeFromSpecifics(specifics
) == BOOKMARKS
1264 && !(is_dir
&& !server_unique_tag
.empty())) {
1265 if (id_string
.at(0) == 'c') {
1266 // We found an uncommitted item. This is rare, but fortunate. This
1267 // means we can set the bookmark tag according to the originator client
1268 // item ID and originator cache guid, because (unlike the other case) we
1269 // know that this client is the originator.
1270 unique_bookmark_tag
= syncable::GenerateSyncableBookmarkHash(
1272 id_string
.substr(1));
1274 // If we've already committed the item, then we don't know who the
1275 // originator was. We do not have access to the originator client item
1276 // ID and originator cache guid at this point.
1278 // We will base our hash entirely on the server ID instead. This is
1279 // incorrect, but at least all clients that undergo this migration step
1280 // will be incorrect in the same way.
1282 // To get everyone back into a synced state, we will update the bookmark
1283 // tag according to the originator_cache_guid and originator_item_id
1284 // when we see updates for this item. That should ensure that commonly
1285 // modified items will end up with the proper tag values eventually.
1286 unique_bookmark_tag
= syncable::GenerateSyncableBookmarkHash(
1287 std::string(), // cache_guid left intentionally blank.
1288 id_string
.substr(1));
1291 int64 int_position
= NodeOrdinalToInt64(ordinal
);
1292 position
= UniquePosition::FromInt64(int_position
, unique_bookmark_tag
);
1294 // Leave bookmark_tag and position at their default (invalid) values.
1297 std::string position_blob
;
1298 position
.SerializeToString(&position_blob
);
1299 put
.BindBlob(0, position_blob
.data(), position_blob
.length());
1300 put
.BindBlob(1, position_blob
.data(), position_blob
.length());
1301 put
.BindBlob(2, unique_bookmark_tag
.data(), unique_bookmark_tag
.length());
1302 put
.BindInt64(3, metahandle
);
1310 needs_column_refresh_
= true;
1314 bool DirectoryBackingStore::MigrateVersion86To87() {
1315 // Version 87 adds AttachmentMetadata proto.
1317 "ALTER TABLE metas ADD COLUMN "
1318 "attachment_metadata BLOB")) {
1322 needs_column_refresh_
= true;
1326 bool DirectoryBackingStore::MigrateVersion87To88() {
1327 // Version 88 adds the datatype context to the models table.
1328 if (!db_
->Execute("ALTER TABLE models ADD COLUMN context blob"))
1335 bool DirectoryBackingStore::MigrateVersion88To89() {
1336 // Version 89 adds server_attachment_metadata.
1338 "ALTER TABLE metas ADD COLUMN "
1339 "server_attachment_metadata BLOB")) {
1343 needs_column_refresh_
= true;
1347 bool DirectoryBackingStore::CreateTables() {
1348 DVLOG(1) << "First run, creating tables";
1349 // Create two little tables share_version and share_info
1351 "CREATE TABLE share_version ("
1352 "id VARCHAR(128) primary key, data INT)")) {
1357 sql::Statement
s(db_
->GetUniqueStatement(
1358 "INSERT INTO share_version VALUES(?, ?)"));
1359 s
.BindString(0, dir_name_
);
1360 s
.BindInt(1, kCurrentDBVersion
);
1366 const bool kCreateAsTempShareInfo
= false;
1367 if (!CreateShareInfoTable(kCreateAsTempShareInfo
)) {
1372 sql::Statement
s(db_
->GetUniqueStatement(
1373 "INSERT INTO share_info VALUES"
1376 "?, " // store_birthday
1377 "?, " // db_create_version
1378 "?, " // db_create_time
1381 // TODO(rlarocque, 124140): Remove notification_state field.
1382 "?, " // notification_state
1383 "?);")); // bag_of_chips
1384 s
.BindString(0, dir_name_
); // id
1385 s
.BindString(1, dir_name_
); // name
1386 s
.BindString(2, std::string()); // store_birthday
1387 // TODO(akalin): Remove this unused db_create_version field. (Or
1388 // actually use it for something.) http://crbug.com/118356
1389 s
.BindString(3, "Unknown"); // db_create_version
1390 s
.BindInt(4, static_cast<int32
>(time(0))); // db_create_time
1391 s
.BindString(5, GenerateCacheGUID()); // cache_guid
1392 // TODO(rlarocque, 124140): Remove this unused notification-state field.
1393 s
.BindBlob(6, NULL
, 0); // notification_state
1394 s
.BindBlob(7, NULL
, 0); // bag_of_chips
1399 if (!CreateModelsTable())
1402 // Create the big metas table.
1403 if (!CreateMetasTable(false))
1407 // Insert the entry for the root into the metas table.
1408 const int64 now
= TimeToProtoTime(base::Time::Now());
1409 sql::Statement
s(db_
->GetUniqueStatement(
1410 "INSERT INTO metas "
1411 "( id, metahandle, is_dir, ctime, mtime ) "
1412 "VALUES ( \"r\", 1, 1, ?, ? )"));
1413 s
.BindInt64(0, now
);
1414 s
.BindInt64(1, now
);
1423 bool DirectoryBackingStore::CreateMetasTable(bool is_temporary
) {
1424 string query
= "CREATE TABLE ";
1425 query
.append(is_temporary
? "temp_metas" : "metas");
1426 query
.append(ComposeCreateTableColumnSpecs());
1427 if (!db_
->Execute(query
.c_str()))
1430 // Create a deleted_metas table to save copies of deleted metas until the
1431 // deletions are persisted. For simplicity, don't try to migrate existing
1432 // data because it's rarely used.
1433 SafeDropTable("deleted_metas");
1434 query
= "CREATE TABLE deleted_metas ";
1435 query
.append(ComposeCreateTableColumnSpecs());
1436 return db_
->Execute(query
.c_str());
1439 bool DirectoryBackingStore::CreateV71ModelsTable() {
1440 // This is an old schema for the Models table, used from versions 71 to 74.
1441 return db_
->Execute(
1442 "CREATE TABLE models ("
1443 "model_id BLOB primary key, "
1444 "last_download_timestamp INT, "
1445 // Gets set if the syncer ever gets updates from the
1446 // server and the server returns 0. Lets us detect the
1447 // end of the initial sync.
1448 "initial_sync_ended BOOLEAN default 0)");
1451 bool DirectoryBackingStore::CreateV75ModelsTable() {
1452 // This is an old schema for the Models table, used from versions 75 to 80.
1453 return db_
->Execute(
1454 "CREATE TABLE models ("
1455 "model_id BLOB primary key, "
1456 "progress_marker BLOB, "
1457 // Gets set if the syncer ever gets updates from the
1458 // server and the server returns 0. Lets us detect the
1459 // end of the initial sync.
1460 "initial_sync_ended BOOLEAN default 0)");
1463 bool DirectoryBackingStore::CreateV81ModelsTable() {
1464 // This is an old schema for the Models table, used from versions 81 to 87.
1465 return db_
->Execute(
1466 "CREATE TABLE models ("
1467 "model_id BLOB primary key, "
1468 "progress_marker BLOB, "
1469 // Gets set if the syncer ever gets updates from the
1470 // server and the server returns 0. Lets us detect the
1471 // end of the initial sync.
1472 "transaction_version BIGINT default 0)");
1475 bool DirectoryBackingStore::CreateModelsTable() {
1476 // This is the current schema for the Models table, from version 88
1477 // onward. If you change the schema, you'll probably want to double-check
1478 // the use of this function in the v84-v85 migration.
1479 return db_
->Execute(
1480 "CREATE TABLE models ("
1481 "model_id BLOB primary key, "
1482 "progress_marker BLOB, "
1483 // Gets set if the syncer ever gets updates from the
1484 // server and the server returns 0. Lets us detect the
1485 // end of the initial sync.
1486 "transaction_version BIGINT default 0,"
1490 bool DirectoryBackingStore::CreateShareInfoTable(bool is_temporary
) {
1491 const char* name
= is_temporary
? "temp_share_info" : "share_info";
1492 string query
= "CREATE TABLE ";
1494 // This is the current schema for the ShareInfo table, from version 76
1497 "id TEXT primary key, "
1499 "store_birthday TEXT, "
1500 "db_create_version TEXT, "
1501 "db_create_time INT, "
1502 "next_id INT default -2, "
1504 // TODO(rlarocque, 124140): Remove notification_state field.
1505 "notification_state BLOB, "
1508 return db_
->Execute(query
.c_str());
1511 bool DirectoryBackingStore::CreateShareInfoTableVersion71(
1512 bool is_temporary
) {
1513 const char* name
= is_temporary
? "temp_share_info" : "share_info";
1514 string query
= "CREATE TABLE ";
1516 // This is the schema for the ShareInfo table used from versions 71 to 72.
1518 "id TEXT primary key, "
1520 "store_birthday TEXT, "
1521 "db_create_version TEXT, "
1522 "db_create_time INT, "
1523 "next_id INT default -2, "
1524 "cache_guid TEXT )");
1525 return db_
->Execute(query
.c_str());
1528 // This function checks to see if the given list of Metahandles has any nodes
1529 // whose PARENT_ID values refer to ID values that do not actually exist.
1530 // Returns true on success.
1531 bool DirectoryBackingStore::VerifyReferenceIntegrity(
1532 const Directory::MetahandlesMap
* handles_map
) {
1533 TRACE_EVENT0("sync", "SyncDatabaseIntegrityCheck");
1534 using namespace syncable
;
1535 typedef base::hash_set
<std::string
> IdsSet
;
1540 for (Directory::MetahandlesMap::const_iterator it
= handles_map
->begin();
1541 it
!= handles_map
->end(); ++it
) {
1542 EntryKernel
* entry
= it
->second
;
1543 bool is_duplicate_id
= !(ids_set
.insert(entry
->ref(ID
).value()).second
);
1544 is_ok
= is_ok
&& !is_duplicate_id
;
1547 IdsSet::iterator end
= ids_set
.end();
1548 for (Directory::MetahandlesMap::const_iterator it
= handles_map
->begin();
1549 it
!= handles_map
->end(); ++it
) {
1550 EntryKernel
* entry
= it
->second
;
1551 if (!entry
->ref(PARENT_ID
).IsNull()) {
1552 bool parent_exists
= (ids_set
.find(entry
->ref(PARENT_ID
).value()) != end
);
1553 if (!parent_exists
) {
1561 void DirectoryBackingStore::PrepareSaveEntryStatement(
1562 EntryTable table
, sql::Statement
* save_statement
) {
1563 if (save_statement
->is_valid())
1567 query
.reserve(kUpdateStatementBufferSize
);
1570 query
.append("INSERT OR REPLACE INTO metas ");
1572 case DELETE_JOURNAL_TABLE
:
1573 query
.append("INSERT OR REPLACE INTO deleted_metas ");
1578 values
.reserve(kUpdateStatementBufferSize
);
1579 values
.append(" VALUES ");
1580 const char* separator
= "( ";
1582 for (i
= BEGIN_FIELDS
; i
< FIELD_COUNT
; ++i
) {
1583 query
.append(separator
);
1584 values
.append(separator
);
1586 query
.append(ColumnName(i
));
1589 query
.append(" ) ");
1590 values
.append(" )");
1591 query
.append(values
);
1592 save_statement
->Assign(db_
->GetUniqueStatement(
1593 base::StringPrintf(query
.c_str(), "metas").c_str()));
1596 } // namespace syncable
1597 } // namespace syncer