From 3723846d9bf63d802a8cae98fbff565a7b7dc526 Mon Sep 17 00:00:00 2001 From: dmurph Date: Fri, 18 Sep 2015 22:25:26 -0700 Subject: [PATCH] [IndexedDB] Adding traces, perf tests BUG=525141 CQ_EXTRA_TRYBOTS=tryserver.chromium.perf:linux_perf_bisect;tryserver.chromium.perf:win_perf_bisect;tryserver.chromium.perf:android_nexus5_perf_bisect Review URL: https://codereview.chromium.org/1238393003 Cr-Commit-Position: refs/heads/master@{#349853} --- chrome/test/data/indexeddb/perf_test.js | 6 +- .../browser/indexed_db/indexed_db_backing_store.cc | 22 +- content/browser/indexed_db/indexed_db_callbacks.cc | 2 + content/browser/indexed_db/indexed_db_database.cc | 31 +- content/browser/indexed_db/indexed_db_tracing.h | 5 + .../browser/indexed_db/indexed_db_transaction.cc | 8 +- .../browser/indexed_db/leveldb/leveldb_database.cc | 3 + .../indexed_db/leveldb/leveldb_transaction.cc | 3 + tools/perf/benchmarks/indexeddb_perf.py | 49 ++- tools/perf/page_sets/indexeddb_endure_page.py | 46 +++ .../page_sets/indexeddb_perf/endure/app-worker.js | 224 +++++++++++ .../perf/page_sets/indexeddb_perf/endure/app.html | 21 + tools/perf/page_sets/indexeddb_perf/endure/app.js | 274 +++++++++++++ tools/perf/page_sets/indexeddb_perf/perf_shared.js | 435 +++++++++++++++++++++ tools/perf/page_sets/indexeddb_perf/perf_test.html | 14 + .../perf/page_sets/indexeddb_perf}/perf_test.js | 183 +++++++-- .../telemetry/testing/test_page_test_results.py | 9 + tools/telemetry/telemetry/timeline/counter.py | 4 + .../web_perf/metrics/indexeddb_timeline.py | 80 ++++ .../web_perf/metrics/trace_event_stats.py | 128 ++++++ .../web_perf/metrics/trace_event_stats_unittest.py | 146 +++++++ .../web_perf/timeline_based_measurement.py | 4 +- 22 files changed, 1646 insertions(+), 51 deletions(-) create mode 100644 tools/perf/page_sets/indexeddb_endure_page.py create mode 100644 tools/perf/page_sets/indexeddb_perf/endure/app-worker.js create mode 100644 tools/perf/page_sets/indexeddb_perf/endure/app.html create mode 100644 tools/perf/page_sets/indexeddb_perf/endure/app.js create mode 100644 tools/perf/page_sets/indexeddb_perf/perf_shared.js create mode 100644 tools/perf/page_sets/indexeddb_perf/perf_test.html copy {chrome/test/data/indexeddb => tools/perf/page_sets/indexeddb_perf}/perf_test.js (79%) create mode 100644 tools/telemetry/telemetry/web_perf/metrics/indexeddb_timeline.py create mode 100644 tools/telemetry/telemetry/web_perf/metrics/trace_event_stats.py create mode 100644 tools/telemetry/telemetry/web_perf/metrics/trace_event_stats_unittest.py diff --git a/chrome/test/data/indexeddb/perf_test.js b/chrome/test/data/indexeddb/perf_test.js index 853fbbfdf9e8..bf0f6e853d45 100644 --- a/chrome/test/data/indexeddb/perf_test.js +++ b/chrome/test/data/indexeddb/perf_test.js @@ -93,13 +93,16 @@ var tests = [ ]; var currentTest = 0; +var done = false; function test() { runNextTest(); } +var testFilter; + function runNextTest() { - var filter = window.location.hash.slice(1); + var filter = testFilter | window.location.hash.slice(1); var test, f; while (currentTest < tests.length) { test = tests[currentTest]; @@ -122,6 +125,7 @@ function onAllTestsComplete() { var overallDuration = window.performance.now() - overallTestStartTime; automation.addResult("OverallTestDuration", overallDuration); automation.setDone(); + done = true; } // This is the only test that includes database creation and deletion in its diff --git a/content/browser/indexed_db/indexed_db_backing_store.cc b/content/browser/indexed_db/indexed_db_backing_store.cc index eff43e0f0d32..7eb73aaafc5c 100644 --- a/content/browser/indexed_db/indexed_db_backing_store.cc +++ b/content/browser/indexed_db/indexed_db_backing_store.cc @@ -565,6 +565,7 @@ template static leveldb::Status GetBlobJournal(const StringPiece& key, TransactionType* transaction, BlobJournalType* journal) { + IDB_TRACE("IndexedDBBackingStore::GetBlobJournal"); std::string data; bool found = false; leveldb::Status s = transaction->Get(key, &data, &found); @@ -666,6 +667,7 @@ static leveldb::Status MergeDatabaseIntoBlobJournal( LevelDBDirectTransaction* transaction, const std::string& key, int64 database_id) { + IDB_TRACE("IndexedDBBackingStore::MergeDatabaseIntoBlobJournal"); BlobJournalType journal; leveldb::Status s = GetBlobJournal(key, transaction, &journal); if (!s.ok()) @@ -1474,11 +1476,14 @@ leveldb::Status IndexedDBBackingStore::DeleteDatabase( metadata.id, DatabaseMetaDataKey::ORIGIN_NAME); const std::string stop_key = DatabaseMetaDataKey::Encode( metadata.id + 1, DatabaseMetaDataKey::ORIGIN_NAME); - scoped_ptr it = db_->CreateIterator(); - for (s = it->Seek(start_key); - s.ok() && it->IsValid() && CompareKeys(it->Key(), stop_key) < 0; - s = it->Next()) - transaction->Remove(it->Key()); + { + IDB_TRACE("IndexedDBBackingStore::DeleteDatabase.DeleteEntries"); + scoped_ptr it = db_->CreateIterator(); + for (s = it->Seek(start_key); + s.ok() && it->IsValid() && CompareKeys(it->Key(), stop_key) < 0; + s = it->Next()) + transaction->Remove(it->Key()); + } if (!s.ok()) { INTERNAL_WRITE_ERROR_UNTESTED(DELETE_DATABASE); return s; @@ -2692,6 +2697,7 @@ bool IndexedDBBackingStore::RemoveBlobDirectory(int64 database_id) const { leveldb::Status IndexedDBBackingStore::CleanUpBlobJournalEntries( const BlobJournalType& journal) const { + IDB_TRACE("IndexedDBBackingStore::CleanUpBlobJournalEntries"); if (journal.empty()) return leveldb::Status::OK(); for (const auto& entry : journal) { @@ -2712,6 +2718,7 @@ leveldb::Status IndexedDBBackingStore::CleanUpBlobJournalEntries( leveldb::Status IndexedDBBackingStore::CleanUpBlobJournal( const std::string& level_db_key) const { + IDB_TRACE("IndexedDBBackingStore::CleanUpBlobJournal"); DCHECK(!committing_transaction_count_); leveldb::Status s; scoped_refptr journal_transaction = @@ -4223,6 +4230,7 @@ leveldb::Status IndexedDBBackingStore::Transaction::CommitPhaseTwo() { BlobJournalType primary_journal, live_journal, saved_primary_journal, dead_blobs; if (!blob_change_map_.empty()) { + IDB_TRACE("IndexedDBBackingStore::Transaction.BlobJournal"); // Read the persisted states of the primary/live blob journals, // so that they can be updated correctly by the transaction. scoped_refptr journal_transaction = @@ -4317,6 +4325,8 @@ class IndexedDBBackingStore::Transaction::BlobWriteCallbackWrapper scoped_refptr callback) : transaction_(transaction), callback_(callback) {} void Run(bool succeeded) override { + IDB_ASYNC_TRACE_END("IndexedDBBackingStore::Transaction::WriteNewBlobs", + transaction_); callback_->Run(succeeded); if (succeeded) // Else it's already been deleted during rollback. transaction_->chained_blob_writer_ = NULL; @@ -4336,6 +4346,8 @@ void IndexedDBBackingStore::Transaction::WriteNewBlobs( BlobEntryKeyValuePairVec* new_blob_entries, WriteDescriptorVec* new_files_to_write, scoped_refptr callback) { + IDB_ASYNC_TRACE_BEGIN("IndexedDBBackingStore::Transaction::WriteNewBlobs", + this); DCHECK_GT(new_files_to_write->size(), 0UL); DCHECK_GT(database_id_, 0); for (auto& blob_entry_iter : *new_blob_entries) { diff --git a/content/browser/indexed_db/indexed_db_callbacks.cc b/content/browser/indexed_db/indexed_db_callbacks.cc index bfd0a8a0d06b..16bb523757bc 100644 --- a/content/browser/indexed_db/indexed_db_callbacks.cc +++ b/content/browser/indexed_db/indexed_db_callbacks.cc @@ -19,6 +19,7 @@ #include "content/browser/indexed_db/indexed_db_database_error.h" #include "content/browser/indexed_db/indexed_db_metadata.h" #include "content/browser/indexed_db/indexed_db_return_value.h" +#include "content/browser/indexed_db/indexed_db_tracing.h" #include "content/browser/indexed_db/indexed_db_value.h" #include "content/common/indexed_db/indexed_db_constants.h" #include "content/common/indexed_db/indexed_db_messages.h" @@ -244,6 +245,7 @@ static bool CreateAllBlobs( const std::vector& blob_info, std::vector* blob_or_file_info, scoped_refptr dispatcher_host) { + IDB_TRACE("IndexedDBCallbacks::CreateAllBlobs"); DCHECK_EQ(blob_info.size(), blob_or_file_info->size()); size_t i; if (!dispatcher_host->blob_storage_context()) diff --git a/content/browser/indexed_db/indexed_db_database.cc b/content/browser/indexed_db/indexed_db_database.cc index 31d0e6922451..5334541c20a7 100644 --- a/content/browser/indexed_db/indexed_db_database.cc +++ b/content/browser/indexed_db/indexed_db_database.cc @@ -1066,19 +1066,22 @@ void IndexedDBDatabase::PutOperation(scoped_ptr params, error); return; } - - for (size_t i = 0; i < index_writers.size(); ++i) { - IndexWriter* index_writer = index_writers[i]; - index_writer->WriteIndexKeys(record_identifier, - backing_store_.get(), - transaction->BackingStoreTransaction(), - id(), - params->object_store_id); + { + IDB_TRACE1("IndexedDBDatabase::PutOperation.UpdateIndexes", "txn.id", + transaction->id()); + for (size_t i = 0; i < index_writers.size(); ++i) { + IndexWriter* index_writer = index_writers[i]; + index_writer->WriteIndexKeys(record_identifier, backing_store_.get(), + transaction->BackingStoreTransaction(), id(), + params->object_store_id); + } } if (object_store.auto_increment && params->put_mode != blink::WebIDBPutModeCursorUpdate && key->type() == WebIDBKeyTypeNumber) { + IDB_TRACE1("IndexedDBDatabase::PutOperation.AutoIncrement", "txn.id", + transaction->id()); leveldb::Status s = UpdateKeyGenerator(backing_store_.get(), transaction, id(), @@ -1095,8 +1098,11 @@ void IndexedDBDatabase::PutOperation(scoped_ptr params, return; } } - - params->callbacks->OnSuccess(*key); + { + IDB_TRACE1("IndexedDBDatabase::PutOperation.Callbacks", "txn.id", + transaction->id()); + params->callbacks->OnSuccess(*key); + } } void IndexedDBDatabase::SetIndexKeys(int64 transaction_id, @@ -1178,7 +1184,6 @@ void IndexedDBDatabase::SetIndexKeys(int64 transaction_id, void IndexedDBDatabase::SetIndexesReady(int64 transaction_id, int64, const std::vector& index_ids) { - IDB_TRACE1("IndexedDBDatabase::SetIndexesReady", "txn.id", transaction_id); IndexedDBTransaction* transaction = GetTransaction(transaction_id); if (!transaction) return; @@ -1194,9 +1199,6 @@ void IndexedDBDatabase::SetIndexesReady(int64 transaction_id, void IndexedDBDatabase::SetIndexesReadyOperation( size_t index_count, IndexedDBTransaction* transaction) { - IDB_TRACE1("IndexedDBDatabase::SetIndexesReadyOperation", - "txn.id", - transaction->id()); for (size_t i = 0; i < index_count; ++i) transaction->DidCompletePreemptiveEvent(); } @@ -1555,6 +1557,7 @@ void IndexedDBDatabase::VersionChangeOperation( void IndexedDBDatabase::TransactionFinished(IndexedDBTransaction* transaction, bool committed) { + IDB_TRACE1("IndexedDBTransaction::TransactionFinished", "txn.id", id()); DCHECK(transactions_.find(transaction->id()) != transactions_.end()); DCHECK_EQ(transactions_[transaction->id()], transaction); transactions_.erase(transaction->id()); diff --git a/content/browser/indexed_db/indexed_db_tracing.h b/content/browser/indexed_db/indexed_db_tracing.h index 489f157bf569..843c18fc202b 100644 --- a/content/browser/indexed_db/indexed_db_tracing.h +++ b/content/browser/indexed_db/indexed_db_tracing.h @@ -10,4 +10,9 @@ #define IDB_TRACE1(a, arg1_name, arg1_val) \ TRACE_EVENT1("IndexedDB", (a), (arg1_name), (arg1_val)); +#define IDB_ASYNC_TRACE_BEGIN(a, id) \ + TRACE_EVENT_ASYNC_BEGIN0("IndexedDB", (a), (id)); +#define IDB_ASYNC_TRACE_END(a, id) \ + TRACE_EVENT_ASYNC_END0("IndexedDB", (a), (id)); + #endif // CONTENT_BROWSER_INDEXED_DB_INDEXED_DB_TRACING_H_ diff --git a/content/browser/indexed_db/indexed_db_transaction.cc b/content/browser/indexed_db/indexed_db_transaction.cc index 7e4a9f38c7d3..36faa51da0f1 100644 --- a/content/browser/indexed_db/indexed_db_transaction.cc +++ b/content/browser/indexed_db/indexed_db_transaction.cc @@ -315,7 +315,12 @@ leveldb::Status IndexedDBTransaction::CommitPhaseTwo() { if (committed) { abort_task_stack_.clear(); - callbacks_->OnComplete(id_); + { + IDB_TRACE1( + "IndexedDBTransaction::CommitPhaseTwo.TransactionCompleteCallbacks", + "txn.id", id()); + callbacks_->OnComplete(id_); + } database_->TransactionFinished(this, true); } else { while (!abort_task_stack_.empty()) @@ -409,6 +414,7 @@ void IndexedDBTransaction::Timeout() { } void IndexedDBTransaction::CloseOpenCursors() { + IDB_TRACE1("IndexedDBTransaction::CloseOpenCursors", "txn.id", id()); for (auto* cursor : open_cursors_) cursor->Close(); open_cursors_.clear(); diff --git a/content/browser/indexed_db/leveldb/leveldb_database.cc b/content/browser/indexed_db/leveldb/leveldb_database.cc index 2c18f1f5170f..7efd98625c4a 100644 --- a/content/browser/indexed_db/leveldb/leveldb_database.cc +++ b/content/browser/indexed_db/leveldb/leveldb_database.cc @@ -17,6 +17,7 @@ #include "base/strings/utf_string_conversions.h" #include "base/sys_info.h" #include "content/browser/indexed_db/indexed_db_class_factory.h" +#include "content/browser/indexed_db/indexed_db_tracing.h" #include "content/browser/indexed_db/leveldb/leveldb_comparator.h" #include "content/browser/indexed_db/leveldb/leveldb_env.h" #include "content/browser/indexed_db/leveldb/leveldb_iterator_impl.h" @@ -273,6 +274,7 @@ leveldb::Status LevelDBDatabase::Open(const base::FilePath& file_name, const LevelDBComparator* comparator, scoped_ptr* result, bool* is_disk_full) { + IDB_TRACE("LevelDBDatabase::Open"); base::TimeTicks begin_time = base::TimeTicks::Now(); scoped_ptr comparator_adapter( @@ -423,6 +425,7 @@ const LevelDBComparator* LevelDBDatabase::Comparator() const { void LevelDBDatabase::Compact(const base::StringPiece& start, const base::StringPiece& stop) { + IDB_TRACE("LevelDBDatabase::Compact"); const leveldb::Slice start_slice = MakeSlice(start); const leveldb::Slice stop_slice = MakeSlice(stop); // NULL batch means just wait for earlier writes to be done diff --git a/content/browser/indexed_db/leveldb/leveldb_transaction.cc b/content/browser/indexed_db/leveldb/leveldb_transaction.cc index a9e710933228..91a7956d7aab 100644 --- a/content/browser/indexed_db/leveldb/leveldb_transaction.cc +++ b/content/browser/indexed_db/leveldb/leveldb_transaction.cc @@ -7,6 +7,7 @@ #include "base/logging.h" #include "base/metrics/histogram.h" #include "base/time/time.h" +#include "content/browser/indexed_db/indexed_db_tracing.h" #include "content/browser/indexed_db/leveldb/leveldb_database.h" #include "content/browser/indexed_db/leveldb/leveldb_write_batch.h" #include "third_party/leveldatabase/src/include/leveldb/db.h" @@ -88,6 +89,7 @@ leveldb::Status LevelDBTransaction::Get(const StringPiece& key, leveldb::Status LevelDBTransaction::Commit() { DCHECK(!finished_); + IDB_TRACE("LevelDBTransaction::Commit"); if (data_.empty()) { finished_ = true; @@ -482,6 +484,7 @@ void LevelDBDirectTransaction::Remove(const StringPiece& key) { leveldb::Status LevelDBDirectTransaction::Commit() { DCHECK(!finished_); + IDB_TRACE("LevelDBDirectTransaction::Commit"); leveldb::Status s = db_->Write(*write_batch_); if (s.ok()) { diff --git a/tools/perf/benchmarks/indexeddb_perf.py b/tools/perf/benchmarks/indexeddb_perf.py index ae1d435b45e0..6ebdfd38f8d7 100644 --- a/tools/perf/benchmarks/indexeddb_perf.py +++ b/tools/perf/benchmarks/indexeddb_perf.py @@ -24,15 +24,23 @@ import os from core import perf_benchmark -from telemetry.core import util from telemetry import page as page_module -from telemetry.page import page_test from telemetry import story +from telemetry.core import util +from telemetry.page import page_test from telemetry.value import scalar from metrics import memory from metrics import power +import page_sets + +from telemetry.timeline import tracing_category_filter +from telemetry.web_perf import timeline_based_measurement + + +IDB_CATEGORY = 'IndexedDB' +TIMELINE_REQUIRED_CATEGORY = 'blink.console' class _IndexedDbMeasurement(page_test.PageTest): def __init__(self): @@ -54,8 +62,7 @@ class _IndexedDbMeasurement(page_test.PageTest): def ValidateAndMeasurePage(self, page, tab, results): tab.WaitForDocumentReadyStateToBeComplete() - tab.WaitForJavaScriptExpression( - 'window.document.cookie.indexOf("__done=1") >= 0', 600) + tab.WaitForJavaScriptExpression('window.done', 600) self._power_metric.Stop(page, tab) self._memory_metric.Stop(page, tab) @@ -82,7 +89,8 @@ class _IndexedDbMeasurement(page_test.PageTest): memory.MemoryMetric.CustomizeBrowserOptions(options) power.PowerMetric.CustomizeBrowserOptions(options) -class IndexedDb(perf_benchmark.PerfBenchmark): + +class IndexedDbOriginal(perf_benchmark.PerfBenchmark): """Chromium's IndexedDB Performance tests.""" test = _IndexedDbMeasurement @@ -96,3 +104,34 @@ class IndexedDb(perf_benchmark.PerfBenchmark): ps = story.StorySet(base_dir=indexeddb_dir) ps.AddStory(page_module.Page('file://perf_test.html', ps, ps.base_dir)) return ps + + +class IndexedDbOriginalSectioned(perf_benchmark.PerfBenchmark): + """Chromium's IndexedDB Performance tests.""" + test = _IndexedDbMeasurement + page_set = page_sets.IndexedDBEndurePageSet + + @classmethod + def Name(cls): + return 'storage.indexeddb_endure' + + +class IndexedDbTracing(perf_benchmark.PerfBenchmark): + """IndexedDB Performance tests that use tracing.""" + page_set = page_sets.IndexedDBEndurePageSet + + def CreateTimelineBasedMeasurementOptions(self): + cat_filter = tracing_category_filter.CreateMinimalOverheadFilter() + cat_filter.AddIncludedCategory(IDB_CATEGORY) + cat_filter.AddIncludedCategory(TIMELINE_REQUIRED_CATEGORY) + + return timeline_based_measurement.Options( + overhead_level=cat_filter) + + @classmethod + def Name(cls): + return 'storage.indexeddb_endure_tracing' + + @classmethod + def ValueCanBeAddedPredicate(cls, value, is_first_result): + return 'idb' in value.name diff --git a/tools/perf/page_sets/indexeddb_endure_page.py b/tools/perf/page_sets/indexeddb_endure_page.py new file mode 100644 index 000000000000..4910782a94d8 --- /dev/null +++ b/tools/perf/page_sets/indexeddb_endure_page.py @@ -0,0 +1,46 @@ +# Copyright 2015 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +from telemetry.page import page as page_module +from telemetry import story + +class IndexedDBEndurePage(page_module.Page): + + def __init__(self, subtest, page_set): + super(IndexedDBEndurePage, self).__init__( + url='file://indexeddb_perf/perf_test.html', + page_set=page_set, + name='indexeddb-endure-' + subtest) + self._subtest = subtest + + def RunPageInteractions(self, action_runner): + action_runner.ExecuteJavaScript('window.testFilter = "' + + self._subtest + '";') + with action_runner.CreateInteraction('Action_Test'): + action_runner.ExecuteJavaScript('window.test();') + action_runner.WaitForJavaScriptCondition('window.done', 600) + +class IndexedDBEndurePageSet(story.StorySet): + """The IndexedDB Endurance page set. + + This page set exercises various common operations in IndexedDB. + """ + + def __init__(self): + super(IndexedDBEndurePageSet, self).__init__() + tests = [ + 'testCreateAndDeleteDatabases', + 'testCreateAndDeleteDatabase', + 'testCreateKeysInStores', + 'testRandomReadsAndWritesWithoutIndex', + 'testRandomReadsAndWritesWithIndex', + 'testReadCacheWithoutIndex', + 'testReadCacheWithIndex', + 'testCreateAndDeleteIndex', + 'testWalkingMultipleCursors', + 'testCursorSeeksWithoutIndex', + 'testCursorSeeksWithIndex' + ] + for test in tests: + self.AddStory(IndexedDBEndurePage(test, self)) diff --git a/tools/perf/page_sets/indexeddb_perf/endure/app-worker.js b/tools/perf/page_sets/indexeddb_perf/endure/app-worker.js new file mode 100644 index 000000000000..e96c38b2e0ff --- /dev/null +++ b/tools/perf/page_sets/indexeddb_perf/endure/app-worker.js @@ -0,0 +1,224 @@ +// Copyright (c) 2012 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// This file simulates a typical background process of an offline-capable +// authoring application. When in an "online" state it receives chunks of +// data updates from a simulated server and stores them in a temporary IDB +// data store. On a different timer, the chunks are drained from the +// temporary store and combined into larger records in a permanent store. +// When in an "offline" state, nothing else happens. + +function unexpectedErrorCallback(e) { + self.postMessage({type: 'ERROR', error: { + name: e.target.error.name, + message: e.target.error.message + }}); +} + +function unexpectedAbortCallback(e) { + self.postMessage({type: 'ABORT', error: { + name: e.target.error.name, + message: e.target.error.message + }}); +} + +function log(message) { + self.postMessage({type: 'LOG', message: message}); +} + +function error(message) { + self.postMessage({type: 'ERROR', message: message}); +} + +var DBNAME = 'endurance-db'; +var DBVERSION = 1; + +var MAX_DOC_ID = 25; +var MAX_CHUNK_ID = 10; +var MAX_CHUNK_SIZE = 5 * 1024; +var SYNC_TIMEOUT = 100; +var COMBINE_TIMEOUT = 234; // relatively prime with SYNC_TIMEOUT + +function randomString(len) +{ + var s = ''; + while (len--) + s += Math.floor((Math.random() * 36)).toString(36); + return s; +} + +var getNextChunk = ( + function () { + var nextDocID = 0; + var nextChunkID = 0; + + return function () { + var doc_id = nextDocID; + var chunk_id = nextChunkID; + + nextDocID += 1; + if (nextDocID >= MAX_DOC_ID) { + nextDocID = 0; + nextChunkID += 1; + if (nextChunkID >= MAX_CHUNK_ID) + nextChunkID = 0; + } + + return { + docid: doc_id, + chunkid: chunk_id, + timestamp: new Date(), + data: randomString(MAX_CHUNK_SIZE) + }; + }; + }() +); + + +self.onmessage = function (event) { + switch (event.data.type) { + case 'offline': + goOffline(); + break; + case 'online': + goOnline(); + break; + default: + throw new Error("Unexpected message: " + event.data.type); + } +}; + + +var offline = true; +var syncTimeoutId = 0; +var combineTimeoutId = 0; + +function goOffline() { + if (offline) + return; + log('offline'); + offline = true; + clearTimeout(syncTimeoutId); + syncTimeoutId = 0; + clearTimeout(combineTimeoutId); + combineTimeoutId = 0; +} + +function goOnline() { + if (!offline) + return; + offline = false; + log('online'); + syncTimeoutId = setTimeout(sync, SYNC_TIMEOUT); + combineTimeoutId = setTimeout(combine, COMBINE_TIMEOUT); + // NOTE: Not using setInterval as we need to be sure they complete. +} + +var sync_count = 0; +function sync() { + if (offline) + return; + + var sync_id = ++sync_count; + log('sync ' + sync_id + ' started'); + + var chunk = getNextChunk(); + log('sync ' + sync_id + + ' adding chunk: ' + chunk.chunkid + + ' to doc: ' + chunk.docid); + + var request = indexedDB.open(DBNAME); + request.onerror = unexpectedErrorCallback; + request.onsuccess = function () { + var db = request.result; + if (db.version !== DBVERSION) { + error('DB version incorrect'); + return; + } + + var transaction = db.transaction('sync-chunks', 'readwrite'); + var store = transaction.objectStore('sync-chunks'); + request = store.put(chunk); + transaction.onabort = unexpectedAbortCallback; + transaction.oncomplete = function () { + log('sync ' + sync_id + ' finished'); + db.close(); + syncTimeoutId = setTimeout(sync, SYNC_TIMEOUT); + }; + }; +} + +var combine_count = 0; +function combine() { + if (offline) + return; + + var combine_id = ++combine_count; + log('combine ' + combine_id + ' started'); + + var combine_chunk_count = 0; + + var request = indexedDB.open(DBNAME); + request.onerror = unexpectedErrorCallback; + request.onsuccess = function () { + var db = request.result; + if (db.version !== DBVERSION) { + error('DB version incorrect'); + return; + } + + var transaction = db.transaction(['sync-chunks', 'docs'], 'readwrite'); + var syncStore = transaction.objectStore('sync-chunks'); + var docStore = transaction.objectStore('docs'); + + var cursorRequest = syncStore.openCursor(); + cursorRequest.onerror = unexpectedErrorCallback; + cursorRequest.onsuccess = function () { + var cursor = cursorRequest.result; + if (cursor) { + combine_chunk_count += 1; + log('combine ' + combine_id + + ' processing chunk # ' + combine_chunk_count); + + var key = cursor.key; + var chunk = cursor.value; + var docRequest = docStore.get(chunk.docid); + docRequest.onerror = unexpectedErrorCallback; + docRequest.onsuccess = function () { + var doc = docRequest.result; + if (!doc) { + doc = { + docid: chunk.docid, + chunks: [] + }; + log('combine # ' + combine_id + + ' created doc: ' + doc.docid); + } + + log('combine # ' + combine_id + + ' updating doc: ' + doc.docid + + ' chunk: ' + chunk.chunkid); + + doc.chunks[chunk.chunkid] = chunk; + doc.timestamp = new Date(); + request = docStore.put(doc); + request.onerror = unexpectedErrorCallback; + cursor.delete(key); + cursor.continue(); + }; + } else { + // let transaction complete + log('combine ' + combine_id + + ' done, processed ' + combine_chunk_count + ' chunks'); + } + }; + transaction.onabort = unexpectedAbortCallback; + transaction.oncomplete = function () { + log('combine ' + combine_id + + ' finished, processed ' + combine_chunk_count + ' chunks'); + db.close(); + combineTimeoutId = setTimeout(combine, COMBINE_TIMEOUT); + }; + }; +} diff --git a/tools/perf/page_sets/indexeddb_perf/endure/app.html b/tools/perf/page_sets/indexeddb_perf/endure/app.html new file mode 100644 index 000000000000..19a5b13d010b --- /dev/null +++ b/tools/perf/page_sets/indexeddb_perf/endure/app.html @@ -0,0 +1,21 @@ + +IndexedDB Offline +

This test models the typical design of an offline-aware authoring + application: +

    +
  • When "offline", a series of (fake) user events are logged into a + store +
  • When "online", the events are played back to a (fake) server, + draining the store +
  • When "online" a Worker synchronizes data from a (fake) remote + source into a store in chunks, then consolidates it into another + store +
+ +
+ + + +
+ + diff --git a/tools/perf/page_sets/indexeddb_perf/endure/app.js b/tools/perf/page_sets/indexeddb_perf/endure/app.js new file mode 100644 index 000000000000..ad611b5da46e --- /dev/null +++ b/tools/perf/page_sets/indexeddb_perf/endure/app.js @@ -0,0 +1,274 @@ +// Copyright (c) 2012 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// This file simulates a typical foreground process of an offline-capable +// authoring application. When in an "offline" state, simulated user actions +// are recorded for later playback in an IDB data store. When in an "online" +// state, the recorded actions are drained from the store (as if being sent +// to the server). + +var $ = function(s) { + return document.querySelector(s); +}; + +function status(message) { + var elem = $('#status'); + while (elem.firstChild) + elem.removeChild(elem.firstChild); + elem.appendChild(document.createTextNode(message)); +} + +function log(message) { + status(message); +} + +function error(message) { + status(message); + console.error(message); +} + +function unexpectedErrorCallback(e) { + error("Unexpected error callback: (" + e.target.error.name + ") " + + e.target.error.message); +} + +function unexpectedAbortCallback(e) { + error("Unexpected abort callback: (" + e.target.error.name + ") " + + e.target.error.message); +} + +function unexpectedBlockedCallback(e) { + error("Unexpected blocked callback!"); +} + +var DBNAME = 'endurance-db'; +var DBVERSION = 1; +var MAX_DOC_ID = 25; + +var db; + +function initdb() { + var request = indexedDB.deleteDatabase(DBNAME); + request.onerror = unexpectedErrorCallback; + request.onblocked = unexpectedBlockedCallback; + request.onsuccess = function () { + request = indexedDB.open(DBNAME, DBVERSION); + request.onerror = unexpectedErrorCallback; + request.onblocked = unexpectedBlockedCallback; + request.onupgradeneeded = function () { + db = request.result; + request.transaction.onabort = unexpectedAbortCallback; + + var syncStore = db.createObjectStore( + 'sync-chunks', {keyPath: 'sequence', autoIncrement: true}); + syncStore.createIndex('doc-index', 'docid'); + + var docStore = db.createObjectStore( + 'docs', {keyPath: 'docid'}); + docStore.createIndex( + 'owner-index', 'owner', {multiEntry: true}); + + var userEventStore = db.createObjectStore( + 'user-events', {keyPath: 'sequence', autoIncrement: true}); + userEventStore.createIndex('doc-index', 'docid'); + }; + request.onsuccess = function () { + log('initialized'); + $('#offline').disabled = true; + $('#online').disabled = false; + }; + }; +} + +var offline = true; +var worker = new Worker('app-worker.js?cachebust'); +worker.onmessage = function (event) { + var data = event.data; + switch (data.type) { + case 'ABORT': + unexpectedAbortCallback({target: {error: data.error}}); + break; + case 'ERROR': + unexpectedErrorCallback({target: {error: data.error}}); + break; + case 'BLOCKED': + unexpectedBlockedCallback({target: {error: data.error}}); + break; + case 'LOG': + log('WORKER: ' + data.message); + break; + case 'ERROR': + error('WORKER: ' + data.message); + break; + } +}; +worker.onerror = function (event) { + error("Error in: " + event.filename + "(" + event.lineno + "): " + + event.message); +}; + +$('#offline').addEventListener('click', goOffline); +$('#online').addEventListener('click', goOnline); + +var EVENT_INTERVAL = 100; +var eventIntervalId = 0; + +function goOffline() { + if (offline) + return; + offline = true; + $('#offline').disabled = offline; + $('#online').disabled = !offline; + $('#state').innerHTML = 'offline'; + log('offline'); + + worker.postMessage({type: 'offline'}); + + eventIntervalId = setInterval(recordEvent, EVENT_INTERVAL); +} + +function goOnline() { + if (!offline) + return; + offline = false; + $('#offline').disabled = offline; + $('#online').disabled = !offline; + $('#state').innerHTML = 'online'; + log('online'); + + worker.postMessage({type: 'online'}); + + setTimeout(playbackEvents, 100); + clearInterval(eventIntervalId); + eventIntervalId = 0; +}; + +function recordEvent() { + if (!db) { + error("Database not initialized"); + return; + } + + var transaction = db.transaction(['user-events'], 'readwrite'); + var store = transaction.objectStore('user-events'); + var record = { + // 'sequence' key will be generated + docid: Math.floor(Math.random() * MAX_DOC_ID), + timestamp: new Date(), + data: randomString(256) + }; + + log('putting user event'); + var request = store.put(record); + request.onerror = unexpectedErrorCallback; + transaction.onabort = unexpectedAbortCallback; + transaction.oncomplete = function () { + log('put user event'); + }; +} + +function sendEvent(record, callback) { + setTimeout( + function () { + if (offline) + callback(false); + else { + var serialization = JSON.stringify(record); + callback(true); + } + }, + Math.random() * 200); // Simulate network jitter +} + +var PLAYBACK_NONE = 0; +var PLAYBACK_SUCCESS = 1; +var PLAYBACK_FAILURE = 2; + +function playbackEvent(callback) { + log('playbackEvent'); + var result = false; + var transaction = db.transaction(['user-events'], 'readonly'); + transaction.onabort = unexpectedAbortCallback; + var store = transaction.objectStore('user-events'); + var cursorRequest = store.openCursor(); + cursorRequest.onerror = unexpectedErrorCallback; + cursorRequest.onsuccess = function () { + var cursor = cursorRequest.result; + if (cursor) { + var record = cursor.value; + var key = cursor.key; + // NOTE: sendEvent is asynchronous so transaction should finish + sendEvent( + record, + function (success) { + if (success) { + // Use another transaction to delete event + var transaction = db.transaction(['user-events'], 'readwrite'); + transaction.onabort = unexpectedAbortCallback; + var store = transaction.objectStore('user-events'); + var deleteRequest = store.delete(key); + deleteRequest.onerror = unexpectedErrorCallback; + transaction.oncomplete = function () { + // successfully sent and deleted event + callback(PLAYBACK_SUCCESS); + }; + } else { + // No progress made + callback(PLAYBACK_FAILURE); + } + }); + } else { + callback(PLAYBACK_NONE); + } + }; +} + +var playback = false; + +function playbackEvents() { + log('playbackEvents'); + if (!db) { + error("Database not initialized"); + return; + } + + if (playback) + return; + + playback = true; + log("Playing back events"); + + function nextEvent() { + playbackEvent( + function (result) { + switch (result) { + case PLAYBACK_NONE: + playback = false; + log("Done playing back events"); + return; + case PLAYBACK_SUCCESS: + setTimeout(nextEvent, 0); + return; + case PLAYBACK_FAILURE: + playback = false; + log("Failure during playback (dropped offline?)"); + return; + } + }); + } + + nextEvent(); +} + +function randomString(len) { + var s = ''; + while (len--) + s += Math.floor((Math.random() * 36)).toString(36); + return s; +} + +window.onload = function () { + log("initializing..."); + initdb(); +}; diff --git a/tools/perf/page_sets/indexeddb_perf/perf_shared.js b/tools/perf/page_sets/indexeddb_perf/perf_shared.js new file mode 100644 index 000000000000..41681e18a834 --- /dev/null +++ b/tools/perf/page_sets/indexeddb_perf/perf_shared.js @@ -0,0 +1,435 @@ +// Copyright (c) 2012 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +var automation = { + results: {} +}; + +automation.setDone = function() { + this.setStatus("Test complete."); + document.cookie = '__done=1; path=/'; +}; + +automation.addResult = function(name, result) { + result = "" + result; + this.results[name] = result; + var elt = document.getElementById('results'); + var div = document.createElement('div'); + div.textContent = name + ": " + result; + elt.appendChild(div); +}; + +automation.getResults = function() { + return this.results; +}; + +automation.setStatus = function(s) { + document.getElementById('status').textContent = s; +}; + +function assert(t) { + if (!t) { + var e = new Error("Assertion failed!"); + console.log(e.stack); + throw e; + } +} + +function onError(e) { + var s = "Caught error."; + if (e.target && e.target.error) + s += "\n" + e.target.error.name + "\n" + e.target.error.message; + console.log(s); + automation.setStatus(s); + e.stopPropagation(); + throw new Error(e); +} + +var baseVersion = 2; // The version with our object stores. +var curVersion; + +// Valid options fields: +// indexName: the name of an index to create on each object store +// indexKeyPath: the key path for that index +// indexIsUnique: the "unique" option for IDBIndexParameters +// indexIsMultiEntry: the "multiEntry" option for IDBIndexParameters +// +function createDatabase( + name, objectStoreNames, handler, errorHandler, optionSets) { + var openRequest = indexedDB.open(name, baseVersion); + openRequest.onblocked = errorHandler; + openRequest.onerror = errorHandler; + function createObjectStores(db) { + for (var store in objectStoreNames) { + var name = objectStoreNames[store]; + assert(!db.objectStoreNames.contains(name)); + var os = db.createObjectStore(name); + if (optionSets) { + for (o in optionSets) { + var options = optionSets[o]; + assert(options.indexName); + assert('indexKeyPath' in options); + os.createIndex(options.indexName, options.indexKeyPath, + { unique: options.indexIsUnique, + multiEntry: options.indexIsMultiEntry }); + } + } + } + } + openRequest.onupgradeneeded = function(ev) { + // This is the spec-compliant path, which doesn't yet run in Chrome, but + // works in Firefox. + assert(openRequest == ev.target); + var db = openRequest.result; + db.onerror = errorHandler; + createObjectStores(db); + // onsuccess will get called after this exits. + }; + openRequest.onsuccess = function(ev) { + assert(openRequest == ev.target); + var db = openRequest.result; + curVersion = db.version; + db.onerror = function(ev) { + console.log("db error", arguments, openRequest.error.message); + errorHandler(ev); + }; + if (curVersion != baseVersion) { + // This is the legacy path, which runs only in Chrome. + var setVersionRequest = db.setVersion(baseVersion); + setVersionRequest.onerror = errorHandler; + setVersionRequest.onsuccess = function(e) { + assert(setVersionRequest == e.target); + createObjectStores(db); + var versionTransaction = setVersionRequest.result; + versionTransaction.oncomplete = function() { handler(db); }; + versionTransaction.onerror = onError; + }; + } else { + handler(db); + } + }; +} + +// You must close all database connections before calling this. +function alterObjectStores( + name, objectStoreNames, func, handler, errorHandler) { + var version = curVersion + 1; + var openRequest = indexedDB.open(name, version); + openRequest.onblocked = errorHandler; + openRequest.onupgradeneeded = function(ev) { + doAlteration(ev.target.transaction); + // onsuccess will get called after this exits. + }; + openRequest.onsuccess = function(ev) { + assert(openRequest == ev.target); + var db = openRequest.result; + db.onerror = function(ev) { + console.log("error altering db", arguments, + openRequest.error.message); + errorHandler(); + }; + if (db.version != version) { + // This is the legacy path, which runs only in Chrome before M23. + var setVersionRequest = db.setVersion(version); + setVersionRequest.onerror = errorHandler; + setVersionRequest.onsuccess = + function(e) { + curVersion = db.version; + assert(setVersionRequest == e.target); + var versionTransaction = setVersionRequest.result; + versionTransaction.oncomplete = function() { handler(db); }; + versionTransaction.onerror = onError; + doAlteration(versionTransaction); + }; + } else { + handler(db); + } + }; + function doAlteration(target) { + for (var store in objectStoreNames) { + func(target.objectStore(objectStoreNames[store])); + } + } +} + +function getTransaction(db, objectStoreNames, mode, opt_handler) { + var transaction = db.transaction(objectStoreNames, mode); + transaction.onerror = onError; + transaction.onabort = onError; + if (opt_handler) { + transaction.oncomplete = opt_handler; + } + return transaction; +} + +function deleteDatabase(name, opt_handler) { + var deleteRequest = indexedDB.deleteDatabase(name); + deleteRequest.onerror = onError; + deleteRequest.onblocked = onError; + if (opt_handler) { + deleteRequest.onsuccess = opt_handler; + } +} + +function getCompletionFunc(db, testName, startTime, onTestComplete) { + function onDeleted() { + automation.setStatus("Deleted database."); + onTestComplete(); + } + return function() { + var duration = window.performance.now() - startTime; + // Ignore the cleanup time for this test. + automation.addResult(testName, duration); + automation.setStatus("Deleting database."); + db.close(); + deleteDatabase(testName, onDeleted); + }; +} + +function getDisplayName(args) { + function functionName(f) { + // Function.prototype.name is nonstandard, and not implemented in IE10- + return f.name || f.toString().match(/^function\s*([^(\s]*)/)[1]; + } + // The last arg is the completion callback the test runner tacks on. + // TODO(ericu): Make test errors delete the database automatically. + return functionName(getDisplayName.caller) + (args.length > 1 ? "_" : "") + + Array.prototype.slice.call(args, 0, args.length - 1).join("_"); +} + +// Pad a string [or object convertible to a string] to a fixed width; use this +// to have numeric strings sort properly. +function padToWidth(s, width) { + s = String(s); + assert(s.length <= width); + if (s.length < width) { + s = stringOfLength(width - s.length, '0') + s; + } + return s; +} + +function stringOfLength(n, c) { + if (c == null) + c = 'X'; + assert(n > 0); + assert(n == Math.floor(n)); + return new Array(n + 1).join(c); +} + +function getSimpleKey(i) { + return "key " + padToWidth(i, 10); +} + +function getSimpleValue(i) { + return "value " + padToWidth(i, 10); +} + +function getIndexableValue(i) { + return { id: getSimpleValue(i) }; +} + +function getForwardIndexKey(i) { + return i; +} + +function getBackwardIndexKey(i) { + return -i; +} + +// This is useful for indexing by keypath; the two names should be ordered in +// opposite directions for all i in uint32 range. +function getObjectValue(i) { + return { + firstName: getForwardIndexKey(i), + lastName: getBackwardIndexKey(i) + }; +} + +function getNFieldName(k) { + return "field" + k; +} + +function getNFieldObjectValue(i, n) { + assert(Math.floor(n) == n); + assert(n > 0); + var o = {}; + for (; n > 0; --n) { + // The value varies per field, each object will tend to be unique, + // and thanks to the modulus, indexing on different fields will give you + // different ordering for large-enough data sets. + o[getNFieldName(n - 1)] = Math.pow(i + 0.5, n + 0.5) % 65536; + } + return o; +} + +function putLinearValues( + transaction, objectStoreNames, numKeys, getKey, getValue) { + if (!getKey) + getKey = getSimpleKey; + if (!getValue) + getValue = getSimpleValue; + for (var i in objectStoreNames) { + var os = transaction.objectStore(objectStoreNames[i]); + for (var j = 0; j < numKeys; ++j) { + var request = os.put(getValue(j), getKey(j)); + request.onerror = onError; + } + } +} + +function verifyResultNonNull(result) { + assert(result != null); +} + +function getRandomValues( + transaction, objectStoreNames, numReads, numKeys, indexName, getKey) { + if (!getKey) + getKey = getSimpleKey; + for (var i in objectStoreNames) { + var os = transaction.objectStore(objectStoreNames[i]); + var source = os; + if (indexName) + source = source.index(indexName); + for (var j = 0; j < numReads; ++j) { + var rand = Math.floor(random() * numKeys); + var request = source.get(getKey(rand)); + request.onerror = onError; + request.onsuccess = verifyResultNonNull; + } + } +} + +function putRandomValues( + transaction, objectStoreNames, numPuts, numKeys, getKey, getValue) { + if (!getKey) + getKey = getSimpleKey; + if (!getValue) + getValue = getSimpleValue; + for (var i in objectStoreNames) { + var os = transaction.objectStore(objectStoreNames[i]); + for (var j = 0; j < numPuts; ++j) { + var rand = Math.floor(random() * numKeys); + var request = os.put(getValue(rand), getKey(rand)); + request.onerror = onError; + } + } +} + +function getSpecificValues(transaction, objectStoreNames, indexName, keys) { + for (var i in objectStoreNames) { + var os = transaction.objectStore(objectStoreNames[i]); + var source = os; + if (indexName) + source = source.index(indexName); + for (var j = 0; j < keys.length; ++j) { + var request = source.get(keys[j]); + request.onerror = onError; + request.onsuccess = verifyResultNonNull; + } + } +} + +// getKey should be deterministic, as we assume that a cursor that starts at +// getKey(X) and runs through getKey(X + K) has exactly K values available. +// This is annoying to guarantee generally when using an index, so we avoid both +// ends of the key space just in case and use simple indices. +// TODO(ericu): Figure out if this can be simplified and we can remove uses of +// getObjectValue in favor of getNFieldObjectValue. +function getValuesFromCursor( + transaction, inputObjectStoreName, numReads, numKeys, indexName, getKey, + readKeysOnly, outputObjectStoreName) { + assert(2 * numReads < numKeys); + if (!getKey) + getKey = getSimpleKey; + var rand = Math.floor(random() * (numKeys - 2 * numReads)) + numReads; + var values = []; + var queryObject = transaction.objectStore(inputObjectStoreName); + assert(queryObject); + if (indexName) + queryObject = queryObject.index(indexName); + var keyRange = IDBKeyRange.bound( + getKey(rand), getKey(rand + numReads), false, true); + var request; + if (readKeysOnly) { + request = queryObject.openKeyCursor(keyRange); + } else { + request = queryObject.openCursor(keyRange); + } + var oos; + if (outputObjectStoreName) + oos = transaction.objectStore(outputObjectStoreName); + var numReadsLeft = numReads; + request.onsuccess = function(event) { + var cursor = event.target.result; + if (cursor) { + assert(numReadsLeft); + --numReadsLeft; + if (oos) + // Put in random order for maximum difficulty. We add in numKeys just + // in case we're writing back to the same store; this way we won't + // affect the number of keys available to the cursor, since we're always + // outside its range. + oos.put(cursor.value, numKeys + random()); + values.push({key: cursor.key, value: cursor.value}); + cursor.continue(); + } else { + assert(!numReadsLeft); + } + }; + request.onerror = onError; +} + +function runTransactionBatch(db, count, batchFunc, objectStoreNames, mode, + onComplete) { + var numTransactionsRunning = 0; + + runOneBatch(db); + + function runOneBatch(db) { + if (count <= 0) { + return; + } + --count; + ++numTransactionsRunning; + var transaction = getTransaction(db, objectStoreNames, mode, + function() { + assert(!--numTransactionsRunning); + if (count <= 0) { + onComplete(); + } else { + runOneBatch(db); + } + }); + + batchFunc(transaction); + } +} + +// Use random() instead of Math.random() so runs are repeatable. +var random = (function(seed) { + + // Implementation of: http://www.burtleburtle.net/bob/rand/smallprng.html + function uint32(x) { return x >>> 0; } + function rot(x, k) { return (x << k) | (x >> (32 - k)); } + + function SmallPRNG(seed) { + seed = uint32(seed); + this.a = 0xf1ea5eed; + this.b = this.c = this.d = seed; + for (var i = 0; i < 20; ++i) + this.ranval(); + } + + SmallPRNG.prototype.ranval = function() { + var e = uint32(this.a - rot(this.b, 27)); + this.a = this.b ^ rot(this.c, 17); + this.b = uint32(this.c + this.d); + this.c = uint32(this.d + e); + this.d = uint32(e + this.a); + return this.d; + }; + + var prng = new SmallPRNG(seed); + return function() { return prng.ranval() / 0x100000000; }; +}(0)); diff --git a/tools/perf/page_sets/indexeddb_perf/perf_test.html b/tools/perf/page_sets/indexeddb_perf/perf_test.html new file mode 100644 index 000000000000..c368ceda17f1 --- /dev/null +++ b/tools/perf/page_sets/indexeddb_perf/perf_test.html @@ -0,0 +1,14 @@ + + + + IndexedDB perf test first try + + + + + +
Starting...
+
+ + + diff --git a/chrome/test/data/indexeddb/perf_test.js b/tools/perf/page_sets/indexeddb_perf/perf_test.js similarity index 79% copy from chrome/test/data/indexeddb/perf_test.js copy to tools/perf/page_sets/indexeddb_perf/perf_test.js index 853fbbfdf9e8..3f264505f154 100644 --- a/chrome/test/data/indexeddb/perf_test.js +++ b/tools/perf/page_sets/indexeddb_perf/perf_test.js @@ -16,6 +16,9 @@ var kDontRead = false; var kAlternateWithReads = true; var tests = [ +// Create 30 databases, populate them with 20 object stores with 10 items +// each, and then open them 60 times. Each item is 100 bytes long. + [testCreateAndDeleteDatabases, 30, 60, 10, 20, 100], // Create a single small item in a single object store, then delete everything. [testCreateAndDeleteDatabase, 1, 1, 1], // Create many small items in a single object store, then delete everything. @@ -32,26 +35,31 @@ var tests = [ [testCreateKeysInStores, 1, 1000, 1], // Create many large items in a single object store. [testCreateKeysInStores, 1000, 1, 10000], + // Read one item per transaction. - [testRandomReadsAndWrites, 1000, 1, 0, 1000, kDontUseIndex], + [testRandomReadsAndWritesWithoutIndex, 1000, 1, 0, 1000], // Read a few random items in each of many transactions. - [testRandomReadsAndWrites, 1000, 5, 0, 100, kDontUseIndex], + [testRandomReadsAndWritesWithoutIndex, 1000, 5, 0, 100], // Read many random items in each of a few transactions. - [testRandomReadsAndWrites, 1000, 500, 0, 5, kDontUseIndex], + [testRandomReadsAndWritesWithoutIndex, 1000, 500, 0, 5], // Read many random items in each of a few transactions, in a large store. - [testRandomReadsAndWrites, 10000, 500, 0, 5, kDontUseIndex], + [testRandomReadsAndWritesWithoutIndex, 10000, 500, 0, 5], +// Read and write a few random items in each of many transactions. + [testRandomReadsAndWritesWithoutIndex, 1000, 5, 5, 50], + +// Read one item per transaction. + [testRandomReadsAndWritesWithIndex, 1000, 1, 0, 1000], // Read a few random items from an index, in each of many transactions. - [testRandomReadsAndWrites, 1000, 5, 0, 100, kUseIndex], + [testRandomReadsAndWritesWithIndex, 1000, 5, 0, 100], // Read many random items from an index, in each of a few transactions. - [testRandomReadsAndWrites, 1000, 500, 0, 5, kUseIndex], + [testRandomReadsAndWritesWithIndex, 1000, 500, 0, 5], // Read many random items from an index, in each of a few transactions, in a // large store. - [testRandomReadsAndWrites, 10000, 500, 0, 5, kUseIndex], -// Read and write a few random items in each of many transactions. - [testRandomReadsAndWrites, 1000, 5, 5, 50, kDontUseIndex], + [testRandomReadsAndWritesWithIndex, 10000, 500, 0, 5], // Read and write a few random items, reading from an index, in each of many // transactions. - [testRandomReadsAndWrites, 1000, 5, 5, 50, kUseIndex], + [testRandomReadsAndWritesWithIndex, 1000, 5, 5, 50], + // Read a long, contiguous sequence of an object store via a cursor. [testCursorReadsAndRandomWrites, kReadDataToo, kDontUseIndex, kDontWrite, kPlaceholderArg], @@ -69,14 +77,16 @@ var tests = [ // Read a sequence of an index into an object store via a key cursor. [testCursorReadsAndRandomWrites, kReadKeysOnly, kUseIndex, kDontWrite, kPlaceholderArg], + // Make a small bunch of batches of reads of the same keys from an object store. - [testReadCache, 10, kDontUseIndex], -// Make a bunch of batches of reads of the same keys from an index. - [testReadCache, 50, kUseIndex], + [testReadCacheWithoutIndex, 10], +// Make a bunch of batches of reads of the same keys from an object store. + [testReadCacheWithoutIndex, 50], // Make a small bunch of batches of reads of the same keys from an object store. - [testReadCache, 10, kDontUseIndex], + [testReadCacheWithIndex, 10], // Make a bunch of batches of reads of the same keys from an index. - [testReadCache, 50, kUseIndex], + [testReadCacheWithIndex, 50], + // Create and delete an index on a store that already contains data [produces // a timing result for each of creation and deletion]. [testCreateAndDeleteIndex, 5000], @@ -87,31 +97,74 @@ var tests = [ // you've reached the end of each of them. [testWalkingMultipleCursors, 50], // Open an object store cursor, then continue(key) to the last value. - [testCursorSeeks, 2000, 10, 4, kDontUseIndex], + [testCursorSeeksWithoutIndex, 2000, 10, 4], // Open an index key cursor, then continue(key) to the last value. - [testCursorSeeks, 2000, 10, 4, kUseIndex], + [testCursorSeeksWithIndex, 2000, 10, 4], ]; + +function testRandomReadsAndWritesWithIndex( + numKeys, numReadsPerTransaction, numWritesPerTransaction, + numTransactions, onTestComplete) { + testRandomReadsAndWrites(numKeys, numReadsPerTransaction, + numWritesPerTransaction, + numTransactions, true, onTestComplete); +} + +function testRandomReadsAndWritesWithoutIndex( + numKeys, numReadsPerTransaction, numWritesPerTransaction, + numTransactions, onTestComplete) { + testRandomReadsAndWrites(numKeys, numReadsPerTransaction, + numWritesPerTransaction, + numTransactions, false, onTestComplete); +} + + +function testReadCacheWithIndex(numTransactions, onTestComplete) { + testReadCache(numTransactions, true, onTestComplete); +} + +function testReadCacheWithoutIndex(numTransactions, onTestComplete) { + testReadCache(numTransactions, false, onTestComplete) +} + +function testCursorSeeksWithIndex(numKeys, numSeeksPerTransaction, + numTransactions, onTestComplete) { + testCursorSeeks(numKeys, numSeeksPerTransaction, numTransactions, + true, onTestComplete); +} + +function testCursorSeeksWithoutIndex(numKeys, numSeeksPerTransaction, + numTransactions, onTestComplete) { + testCursorSeeks(numKeys, numSeeksPerTransaction, numTransactions, + false, onTestComplete); +} + + + var currentTest = 0; +var testFilter; +var done = false; function test() { + done = false; runNextTest(); } + function runNextTest() { - var filter = window.location.hash.slice(1); - var test, f; + var running_test, f; while (currentTest < tests.length) { - test = tests[currentTest]; - f = test.shift(); - if (!filter || f.name == filter) + running_test = tests[currentTest]; + f = running_test.shift(); + if (!testFilter || f.name == testFilter) break; ++currentTest; } if (currentTest < tests.length) { - test.push(runNextTest); - f.apply(null, test); + running_test.push(runNextTest); + f.apply(null, running_test); ++currentTest; } else { onAllTestsComplete(); @@ -122,6 +175,88 @@ function onAllTestsComplete() { var overallDuration = window.performance.now() - overallTestStartTime; automation.addResult("OverallTestDuration", overallDuration); automation.setDone(); + done = true; +} + +function testCreateAndDeleteDatabases( + numDatabases, numOpens, numKeys, numStores, + payloadLength, onTestComplete) { + var testName = getDisplayName(arguments); + assert(numOpens >= 1); + assert(numKeys >= 0); + assert(numStores >= 1); + var objectStoreNames = []; + for (var i=0; i < numStores; ++i) { + objectStoreNames.push("store " + i); + } + var value = stringOfLength(payloadLength); + function getValue() { + return value; + } + + automation.setStatus("Creating databases."); + var startTime = window.performance.now(); + + var numCreated = 0; + for (var i = 0; i < numDatabases; i++) { + createDatabase(testName + i, objectStoreNames, onCreated, onError); + } + + function onCreated(db) { + automation.setStatus("Constructing transactions."); + var transaction = + getTransaction(db, objectStoreNames, "readwrite", + function() { openLoop(db, numOpens); }); + putLinearValues(transaction, objectStoreNames, numKeys, null, getValue); + } + + function openLoop(db, timesLeft) { + db.close(); + if (timesLeft == 0) { + deleteDatabase(db.name, onDeleted); + return; + } + createDatabase(db.name, objectStoreNames, + function(db) { openLoop(db, timesLeft - 1); }, onError) + } + + var numDeleted = 0; + function onDeleted() { + var duration = window.performance.now() - startTime; + automation.addResult(testName, duration); + automation.setStatus("Deleted database."); + if (++numDeleted == numDatabases) { + onTestComplete(); + } + } +} + +function testCreateKeysInStores( + numKeys, numStores, payloadLength, onTestComplete) { + var testName = getDisplayName(arguments); + assert(numKeys >= 0); + assert(numStores >= 1); + var objectStoreNames = []; + for (var i=0; i < numStores; ++i) { + objectStoreNames.push("store " + i); + } + var value = stringOfLength(payloadLength); + function getValue() { + return value; + } + + automation.setStatus("Creating database."); + createDatabase(testName, objectStoreNames, onCreated, onError); + + function onCreated(db) { + automation.setStatus("Constructing transaction."); + var completionFunc = + getCompletionFunc(db, testName, window.performance.now(), + onTestComplete); + var transaction = + getTransaction(db, objectStoreNames, "readwrite", completionFunc); + putLinearValues(transaction, objectStoreNames, numKeys, null, getValue); + } } // This is the only test that includes database creation and deletion in its diff --git a/tools/telemetry/telemetry/testing/test_page_test_results.py b/tools/telemetry/telemetry/testing/test_page_test_results.py index 82e72a71e063..3770c72dd602 100644 --- a/tools/telemetry/telemetry/testing/test_page_test_results.py +++ b/tools/telemetry/telemetry/testing/test_page_test_results.py @@ -4,6 +4,7 @@ from telemetry.internal.results import page_test_results from telemetry.page import page as page_module +from telemetry.value import list_of_scalar_values from telemetry.value import scalar @@ -27,5 +28,13 @@ class TestPageTestResults( self.test.assertTrue(isinstance(value, scalar.ScalarValue)) self.test.assertEquals(expected_value, value.value) + def AssertHasPageSpecificListOfScalarValues(self, name, units, + expected_values): + value = self.GetPageSpecificValueNamed(name) + self.test.assertEquals(units, value.units) + self.test.assertTrue( + isinstance(value, list_of_scalar_values.ListOfScalarValues)) + self.test.assertItemsEqual(expected_values, value.values) + def __str__(self): return '\n'.join([repr(x) for x in self.all_page_specific_values]) diff --git a/tools/telemetry/telemetry/timeline/counter.py b/tools/telemetry/telemetry/timeline/counter.py index d981e93e6df7..72fdd873c3cd 100644 --- a/tools/telemetry/telemetry/timeline/counter.py +++ b/tools/telemetry/telemetry/timeline/counter.py @@ -14,6 +14,10 @@ class CounterSample(object): self._sample_index = sample_index @property + def category(self): + return self._counter.category + + @property def name(self): return self._counter.full_name diff --git a/tools/telemetry/telemetry/web_perf/metrics/indexeddb_timeline.py b/tools/telemetry/telemetry/web_perf/metrics/indexeddb_timeline.py new file mode 100644 index 000000000000..1dc8382ab185 --- /dev/null +++ b/tools/telemetry/telemetry/web_perf/metrics/indexeddb_timeline.py @@ -0,0 +1,80 @@ +# Copyright 2015 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + + +from telemetry.web_perf.metrics import timeline_based_metric +from telemetry.web_perf.metrics.trace_event_stats import TraceEventStats +from telemetry.web_perf.metrics.trace_event_stats import TraceEventStatsInput + + +class IndexedDBTimelineMetric(timeline_based_metric.TimelineBasedMetric): + """Metrics for IndexedDB operations. + """ + + def __init__(self): + super(IndexedDBTimelineMetric, self).__init__() + self._stats = TraceEventStats() + + self._stats.AddInput(TraceEventStatsInput( + event_category='IndexedDB', + event_name='IndexedDBDatabase::GetOperation', + metric_name='idb-gets', + metric_description='The duration of all "get" ops in IndexedDB', + units='ms', + process_name='Browser')) + + self._stats.AddInput(TraceEventStatsInput( + event_category='IndexedDB', + event_name='IndexedDBDatabase::PutOperation', + metric_name='idb-puts', + metric_description='The duration of all "put" ops in IndexedDB', + units='ms', + process_name='Browser')) + + self._stats.AddInput(TraceEventStatsInput( + event_category='IndexedDB', + event_name='IndexedDBFactoryImpl::Open', + metric_name='idb-opens', + metric_description='The duration of all "open" ops in IndexedDB', + units='ms', + process_name='Browser')) + + self._stats.AddInput(TraceEventStatsInput( + event_category='IndexedDB', + event_name='IndexedDBTransaction::Commit', + metric_name='idb-transaction-commits', + metric_description=('The duration of all "commit" ops of ' + + 'transactions in IndexedDB.'), + units='ms', + process_name='Browser')) + + self._stats.AddInput(TraceEventStatsInput( + event_category='IndexedDB', + event_name='IndexedDBFactoryImpl::DeleteDatabase', + metric_name='idb-database-deletes', + metric_description=('The duration of all "delete" ops of ' + + 'IndexedDB databases.'), + units='ms', + process_name='Browser')) + + self._stats.AddInput(TraceEventStatsInput( + event_category='IndexedDB', + event_name='IndexedDBDatabase::OpenCursorOperation', + metric_name='idb-cursor-opens', + metric_description=('The duration of all "open" ops of ' + + 'IndexedDB cursors.'), + units='ms', + process_name='Browser')) + + self._stats.AddInput(TraceEventStatsInput( + event_category='IndexedDB', + event_name='IndexedDBCursor::CursorIterationOperation', + metric_name='idb-cursor-iterations', + metric_description=('The duration of all "iteration" ops of ' + + 'IndexedDB cursors.'), + units='ms', + process_name='Browser')) + + def AddResults(self, model, renderer_process, interactions, results): + self._stats.AddResults(model, renderer_process, interactions, results) diff --git a/tools/telemetry/telemetry/web_perf/metrics/trace_event_stats.py b/tools/telemetry/telemetry/web_perf/metrics/trace_event_stats.py new file mode 100644 index 000000000000..21c9614b7640 --- /dev/null +++ b/tools/telemetry/telemetry/web_perf/metrics/trace_event_stats.py @@ -0,0 +1,128 @@ +# Copyright 2015 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +import collections + +from telemetry.value import list_of_scalar_values +from telemetry.value import scalar + + +class TraceEventStatsInput(object): + """Input for the TraceEventStats. + Using this object with TraceEventStats will include two metrics, one with a + list of times of the given event, and one for the count of the events, named + `metric_name + '-count'`. + Args: + event_category: The category of the event to track. + event_name: The name of the event to track. + metric_name: The name of the metric name, which accumulates all of the + times of the events. + metric_description: Description of the metric. + units: Units for the metric. + process_name: (optional) The name of the process to inspect for the trace + events. Defaults to 'Renderer'. + """ + def __init__(self, event_category, event_name, metric_name, + metric_description, units, process_name='Renderer'): + self.event_category = event_category + self.event_name = event_name + self.metric_name = metric_name + self.metric_description = metric_description + self.units = units + self.process_name = process_name + self.event_id = TraceEventStatsInput.GetEventId(event_category, event_name) + assert process_name is not None + + @staticmethod + def GetEventId(event_category, event_name): + return event_category + '^SERIALIZE-DELIM^' + event_name + +class TraceEventStats(object): + """Reports durations and counts of given trace events. + """ + + def __init__(self, trace_event_aggregator_inputs=None): + self._inputs_by_process_name = collections.defaultdict(list) + self._metrics = set() + self._IndexNewInputs(trace_event_aggregator_inputs) + + def AddInput(self, trace_event_aggregator_input): + self._IndexNewInputs([trace_event_aggregator_input]) + + def _IndexNewInputs(self, input_list): + if not input_list: + return + for input_obj in input_list: + name = input_obj.metric_name + # We check here to make sure we don't have a duplicate metric + assert name not in self._metrics + assert (name + '-count') not in self._metrics + self._metrics.add(name) + self._metrics.add(name + '-count') + + self._inputs_by_process_name[input_obj.process_name].append(input_obj) + + @staticmethod + def ThreadDurationIfPresent(event): + if event.thread_duration: + return event.thread_duration + else: + return event.duration + + def AddResults(self, model, renderer_process, interactions, results): + assert interactions + for p in model.GetAllProcesses(): + if p.name not in self._inputs_by_process_name: + continue + + inputs = self._inputs_by_process_name[p.name] + input_ids = {i.event_id for i in inputs} + + def InputIdPredicate(e, ids): + return TraceEventStatsInput.GetEventId(e.category, e.name) in ids + + self._AddResultsInternal( + p.IterAllEvents( + recursive=True, + event_type_predicate=lambda t: True, + event_predicate= + lambda e, ids=input_ids: InputIdPredicate(e, ids)), + interactions, + results, + inputs) + + # We assume events have been filtered already. 'events' is an iterator. + def _AddResultsInternal(self, events, interactions, results, inputs): + times_by_event_id = collections.defaultdict(list) + + for event in events: + if not any(interaction.start <= event.start <= interaction.end + for interaction in interactions): + continue + event_id = TraceEventStatsInput.GetEventId(event.category, event.name) + times_by_event_id[event_id].append(self.ThreadDurationIfPresent(event)) + + if not times_by_event_id: + return + + inputs_by_event_id = dict([[input_obj.event_id, input_obj] + for input_obj in inputs]) + + for (event_name, times) in times_by_event_id.iteritems(): + input_for_event = inputs_by_event_id[event_name] + name = input_for_event.metric_name + results.AddValue(scalar.ScalarValue( + page=results.current_page, + name=name + '-count', + units='count', + value=len(times), + description='The number of times ' + name + ' was recorded.')) + if len(times) == 0: + continue + results.AddValue(list_of_scalar_values.ListOfScalarValues( + page=results.current_page, + name=name, + units=input_for_event.units, + values=times, + description=input_for_event.metric_description)) diff --git a/tools/telemetry/telemetry/web_perf/metrics/trace_event_stats_unittest.py b/tools/telemetry/telemetry/web_perf/metrics/trace_event_stats_unittest.py new file mode 100644 index 000000000000..242ae59d1162 --- /dev/null +++ b/tools/telemetry/telemetry/web_perf/metrics/trace_event_stats_unittest.py @@ -0,0 +1,146 @@ +# Copyright 2015 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +import unittest + +from collections import namedtuple +from telemetry.testing import test_page_test_results +from telemetry.timeline import model as model_module +from telemetry.timeline import slice as slice_module +from telemetry.web_perf import timeline_interaction_record +from telemetry.web_perf.metrics.trace_event_stats import TraceEventStats +from telemetry.web_perf.metrics.trace_event_stats import TraceEventStatsInput + + +FakeEvent = namedtuple('Event', 'name, start, end, thread_duration, args') +Interaction = timeline_interaction_record.TimelineInteractionRecord +TEST_INTERACTION_LABEL = 'Action_TestInteraction' + +RENDERER_PROCESS = 'Renderer' +OTHER_PROCESS = 'Other' + +EVENT_CATEGORY1 = 'Category1' +EVENT_CATEGORY2 = 'Category2' + +EVENT_NAME1 = 'Name1' +EVENT_NAME2 = 'Name2' + + +def TestInteraction(start, end): + return Interaction(TEST_INTERACTION_LABEL, start, end) + +class TraceEventStatsUnittest(unittest.TestCase): + + def setUp(self): + self.model = model_module.TimelineModel() + self.renderer_process = self.model.GetOrCreateProcess(1) + self.renderer_process.name = RENDERER_PROCESS + self.main_thread = self.renderer_process.GetOrCreateThread(tid=11) + self.other_process = self.model.GetOrCreateProcess(2) + self.other_process.name = OTHER_PROCESS + self.other_thread = self.other_process.GetOrCreateThread(tid=12) + + def GetThreadForProcessName(self, process_name): + if process_name is RENDERER_PROCESS: + return self.main_thread + elif process_name is OTHER_PROCESS: + return self.other_thread + else: + raise + + def AddEvent(self, process_name, event_category, event_name, + start, duration, thread_start, thread_duration): + thread = self.GetThreadForProcessName(process_name) + record = slice_module.Slice(thread, + event_category, + event_name, + start, duration, thread_start, thread_duration) + thread.PushSlice(record) + + def RunAggregator(self, aggregator, interactions): + results = test_page_test_results.TestPageTestResults(self) + aggregator.AddResults(self.model, self.renderer_process, + interactions, results) + return results + + def testBasicUsage(self): + self.AddEvent(RENDERER_PROCESS, EVENT_CATEGORY1, EVENT_NAME1, 10, 8, 10, 5) + self.AddEvent(RENDERER_PROCESS, EVENT_CATEGORY1, EVENT_NAME1, 14, 2, 14, 2) + interactions = [TestInteraction(9, 14)] + + aggregator = TraceEventStats() + aggregator.AddInput(TraceEventStatsInput( + EVENT_CATEGORY1, + EVENT_NAME1, + 'metric-name', + 'metric-description', + 'units', + 'Renderer')) + + results = self.RunAggregator(aggregator, interactions) + results.AssertHasPageSpecificScalarValue('metric-name-count', 'count', 2) + results.AssertHasPageSpecificListOfScalarValues( + 'metric-name', 'units', [5, 2]) + + def testFiltering(self): + # These should be recorded. + self.AddEvent(RENDERER_PROCESS, EVENT_CATEGORY1, EVENT_NAME1, 10, 8, 10, 5) + self.AddEvent(RENDERER_PROCESS, EVENT_CATEGORY1, EVENT_NAME1, 14, 2, 14, 2) + self.AddEvent(RENDERER_PROCESS, EVENT_CATEGORY1, EVENT_NAME1, 20, 6, 20, 1) + + # These should be filtered. + self.AddEvent(RENDERER_PROCESS, EVENT_CATEGORY1, EVENT_NAME1, 15, 1, 15, 1) + self.AddEvent(RENDERER_PROCESS, EVENT_CATEGORY2, EVENT_NAME1, 11, 4, 11, 4) + self.AddEvent(RENDERER_PROCESS, EVENT_CATEGORY1, EVENT_NAME2, 11, 3, 11, 3) + self.AddEvent(OTHER_PROCESS, EVENT_CATEGORY1, EVENT_NAME1, 11, 2, 11, 2) + + interactions = [TestInteraction(9, 14), TestInteraction(20, 21)] + + aggregator = TraceEventStats() + # Test that we default to 'Renderer' + aggregator.AddInput(TraceEventStatsInput( + EVENT_CATEGORY1, + EVENT_NAME1, + 'metric-name', + 'metric-description', + 'units')) + + results = self.RunAggregator(aggregator, interactions) + results.AssertHasPageSpecificScalarValue('metric-name-count', 'count', 3) + results.AssertHasPageSpecificListOfScalarValues( + 'metric-name', 'units', [5, 2, 1]) + + def testNoInputs(self): + # These should be recorded. + self.AddEvent(RENDERER_PROCESS, EVENT_CATEGORY1, EVENT_NAME1, 10, 8, 10, 5) + self.AddEvent(RENDERER_PROCESS, EVENT_CATEGORY1, EVENT_NAME1, 14, 2, 14, 2) + self.AddEvent(RENDERER_PROCESS, EVENT_CATEGORY1, EVENT_NAME1, 20, 6, 20, 1) + + # These should be filtered. + self.AddEvent(RENDERER_PROCESS, EVENT_CATEGORY1, EVENT_NAME1, 15, 1, 15, 1) + self.AddEvent(RENDERER_PROCESS, EVENT_CATEGORY2, EVENT_NAME1, 11, 4, 11, 4) + self.AddEvent(RENDERER_PROCESS, EVENT_CATEGORY1, EVENT_NAME2, 11, 3, 11, 3) + self.AddEvent(OTHER_PROCESS, EVENT_CATEGORY1, EVENT_NAME1, 11, 2, 11, 2) + + interactions = [TestInteraction(9, 14), TestInteraction(20, 21)] + + aggregator = TraceEventStats() + + results = self.RunAggregator(aggregator, interactions) + self.assertEquals([], results.all_page_specific_values) + + + def testNoEvents(self): + interactions = [TestInteraction(9, 14)] + + aggregator = TraceEventStats() + aggregator.AddInput(TraceEventStatsInput( + EVENT_CATEGORY1, + EVENT_NAME1, + 'metric-name', + 'metric-description', + 'units')) + + results = self.RunAggregator(aggregator, interactions) + self.assertEquals([], results.all_page_specific_values) diff --git a/tools/telemetry/telemetry/web_perf/timeline_based_measurement.py b/tools/telemetry/telemetry/web_perf/timeline_based_measurement.py index b2c35e0ef23c..74316fb89dfd 100644 --- a/tools/telemetry/telemetry/web_perf/timeline_based_measurement.py +++ b/tools/telemetry/telemetry/web_perf/timeline_based_measurement.py @@ -11,6 +11,7 @@ from telemetry.value import trace from telemetry.web_perf.metrics import timeline_based_metric from telemetry.web_perf.metrics import blob_timeline from telemetry.web_perf.metrics import gpu_timeline +from telemetry.web_perf.metrics import indexeddb_timeline from telemetry.web_perf.metrics import layout from telemetry.web_perf.metrics import memory_timeline from telemetry.web_perf.metrics import responsiveness_metric @@ -45,7 +46,8 @@ def _GetAllTimelineBasedMetrics(): gpu_timeline.GPUTimelineMetric(), blob_timeline.BlobTimelineMetric(), memory_timeline.MemoryTimelineMetric(), - text_selection.TextSelectionMetric()) + text_selection.TextSelectionMetric(), + indexeddb_timeline.IndexedDBTimelineMetric()) class InvalidInteractions(Exception): -- 2.11.4.GIT