Update V8 to version 4.6.21.
[chromium-blink-merge.git] / base / prefs / json_pref_store.cc
blob74d51443ddd2cc424e429162e389702092772472
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "base/prefs/json_pref_store.h"
7 #include <algorithm>
9 #include "base/bind.h"
10 #include "base/callback.h"
11 #include "base/files/file_path.h"
12 #include "base/files/file_util.h"
13 #include "base/json/json_file_value_serializer.h"
14 #include "base/json/json_string_value_serializer.h"
15 #include "base/memory/ref_counted.h"
16 #include "base/metrics/histogram.h"
17 #include "base/prefs/pref_filter.h"
18 #include "base/sequenced_task_runner.h"
19 #include "base/strings/string_number_conversions.h"
20 #include "base/strings/string_util.h"
21 #include "base/task_runner_util.h"
22 #include "base/threading/sequenced_worker_pool.h"
23 #include "base/time/default_clock.h"
24 #include "base/values.h"
26 // Result returned from internal read tasks.
27 struct JsonPrefStore::ReadResult {
28 public:
29 ReadResult();
30 ~ReadResult();
32 scoped_ptr<base::Value> value;
33 PrefReadError error;
34 bool no_dir;
36 private:
37 DISALLOW_COPY_AND_ASSIGN(ReadResult);
40 JsonPrefStore::ReadResult::ReadResult()
41 : error(PersistentPrefStore::PREF_READ_ERROR_NONE), no_dir(false) {
44 JsonPrefStore::ReadResult::~ReadResult() {
47 namespace {
49 // Some extensions we'll tack on to copies of the Preferences files.
50 const base::FilePath::CharType kBadExtension[] = FILE_PATH_LITERAL("bad");
52 PersistentPrefStore::PrefReadError HandleReadErrors(
53 const base::Value* value,
54 const base::FilePath& path,
55 int error_code,
56 const std::string& error_msg) {
57 if (!value) {
58 DVLOG(1) << "Error while loading JSON file: " << error_msg
59 << ", file: " << path.value();
60 switch (error_code) {
61 case JSONFileValueDeserializer::JSON_ACCESS_DENIED:
62 return PersistentPrefStore::PREF_READ_ERROR_ACCESS_DENIED;
63 break;
64 case JSONFileValueDeserializer::JSON_CANNOT_READ_FILE:
65 return PersistentPrefStore::PREF_READ_ERROR_FILE_OTHER;
66 break;
67 case JSONFileValueDeserializer::JSON_FILE_LOCKED:
68 return PersistentPrefStore::PREF_READ_ERROR_FILE_LOCKED;
69 break;
70 case JSONFileValueDeserializer::JSON_NO_SUCH_FILE:
71 return PersistentPrefStore::PREF_READ_ERROR_NO_FILE;
72 break;
73 default:
74 // JSON errors indicate file corruption of some sort.
75 // Since the file is corrupt, move it to the side and continue with
76 // empty preferences. This will result in them losing their settings.
77 // We keep the old file for possible support and debugging assistance
78 // as well as to detect if they're seeing these errors repeatedly.
79 // TODO(erikkay) Instead, use the last known good file.
80 base::FilePath bad = path.ReplaceExtension(kBadExtension);
82 // If they've ever had a parse error before, put them in another bucket.
83 // TODO(erikkay) if we keep this error checking for very long, we may
84 // want to differentiate between recent and long ago errors.
85 bool bad_existed = base::PathExists(bad);
86 base::Move(path, bad);
87 return bad_existed ? PersistentPrefStore::PREF_READ_ERROR_JSON_REPEAT
88 : PersistentPrefStore::PREF_READ_ERROR_JSON_PARSE;
90 } else if (!value->IsType(base::Value::TYPE_DICTIONARY)) {
91 return PersistentPrefStore::PREF_READ_ERROR_JSON_TYPE;
93 return PersistentPrefStore::PREF_READ_ERROR_NONE;
96 // Records a sample for |size| in the Settings.JsonDataReadSizeKilobytes
97 // histogram suffixed with the base name of the JSON file under |path|.
98 void RecordJsonDataSizeHistogram(const base::FilePath& path, size_t size) {
99 std::string spaceless_basename;
100 base::ReplaceChars(path.BaseName().MaybeAsASCII(), " ", "_",
101 &spaceless_basename);
103 // The histogram below is an expansion of the UMA_HISTOGRAM_CUSTOM_COUNTS
104 // macro adapted to allow for a dynamically suffixed histogram name.
105 // Note: The factory creates and owns the histogram.
106 base::HistogramBase* histogram = base::Histogram::FactoryGet(
107 "Settings.JsonDataReadSizeKilobytes." + spaceless_basename, 1, 10000, 50,
108 base::HistogramBase::kUmaTargetedHistogramFlag);
109 histogram->Add(static_cast<int>(size) / 1024);
112 scoped_ptr<JsonPrefStore::ReadResult> ReadPrefsFromDisk(
113 const base::FilePath& path,
114 const base::FilePath& alternate_path) {
115 if (!base::PathExists(path) && !alternate_path.empty() &&
116 base::PathExists(alternate_path)) {
117 base::Move(alternate_path, path);
120 int error_code;
121 std::string error_msg;
122 scoped_ptr<JsonPrefStore::ReadResult> read_result(
123 new JsonPrefStore::ReadResult);
124 JSONFileValueDeserializer deserializer(path);
125 read_result->value.reset(deserializer.Deserialize(&error_code, &error_msg));
126 read_result->error =
127 HandleReadErrors(read_result->value.get(), path, error_code, error_msg);
128 read_result->no_dir = !base::PathExists(path.DirName());
130 if (read_result->error == PersistentPrefStore::PREF_READ_ERROR_NONE)
131 RecordJsonDataSizeHistogram(path, deserializer.get_last_read_size());
133 return read_result.Pass();
136 } // namespace
138 // static
139 scoped_refptr<base::SequencedTaskRunner> JsonPrefStore::GetTaskRunnerForFile(
140 const base::FilePath& filename,
141 base::SequencedWorkerPool* worker_pool) {
142 std::string token("json_pref_store-");
143 token.append(filename.AsUTF8Unsafe());
144 return worker_pool->GetSequencedTaskRunnerWithShutdownBehavior(
145 worker_pool->GetNamedSequenceToken(token),
146 base::SequencedWorkerPool::BLOCK_SHUTDOWN);
149 JsonPrefStore::JsonPrefStore(
150 const base::FilePath& filename,
151 const scoped_refptr<base::SequencedTaskRunner>& sequenced_task_runner,
152 scoped_ptr<PrefFilter> pref_filter)
153 : JsonPrefStore(filename,
154 base::FilePath(),
155 sequenced_task_runner,
156 pref_filter.Pass()) {
159 JsonPrefStore::JsonPrefStore(
160 const base::FilePath& filename,
161 const base::FilePath& alternate_filename,
162 const scoped_refptr<base::SequencedTaskRunner>& sequenced_task_runner,
163 scoped_ptr<PrefFilter> pref_filter)
164 : path_(filename),
165 alternate_path_(alternate_filename),
166 sequenced_task_runner_(sequenced_task_runner),
167 prefs_(new base::DictionaryValue()),
168 read_only_(false),
169 writer_(filename, sequenced_task_runner),
170 pref_filter_(pref_filter.Pass()),
171 initialized_(false),
172 filtering_in_progress_(false),
173 pending_lossy_write_(false),
174 read_error_(PREF_READ_ERROR_NONE),
175 write_count_histogram_(writer_.commit_interval(), path_) {
176 DCHECK(!path_.empty());
179 bool JsonPrefStore::GetValue(const std::string& key,
180 const base::Value** result) const {
181 DCHECK(CalledOnValidThread());
183 base::Value* tmp = NULL;
184 if (!prefs_->Get(key, &tmp))
185 return false;
187 if (result)
188 *result = tmp;
189 return true;
192 void JsonPrefStore::AddObserver(PrefStore::Observer* observer) {
193 DCHECK(CalledOnValidThread());
195 observers_.AddObserver(observer);
198 void JsonPrefStore::RemoveObserver(PrefStore::Observer* observer) {
199 DCHECK(CalledOnValidThread());
201 observers_.RemoveObserver(observer);
204 bool JsonPrefStore::HasObservers() const {
205 DCHECK(CalledOnValidThread());
207 return observers_.might_have_observers();
210 bool JsonPrefStore::IsInitializationComplete() const {
211 DCHECK(CalledOnValidThread());
213 return initialized_;
216 bool JsonPrefStore::GetMutableValue(const std::string& key,
217 base::Value** result) {
218 DCHECK(CalledOnValidThread());
220 return prefs_->Get(key, result);
223 void JsonPrefStore::SetValue(const std::string& key,
224 scoped_ptr<base::Value> value,
225 uint32 flags) {
226 DCHECK(CalledOnValidThread());
228 DCHECK(value);
229 base::Value* old_value = NULL;
230 prefs_->Get(key, &old_value);
231 if (!old_value || !value->Equals(old_value)) {
232 prefs_->Set(key, value.Pass());
233 ReportValueChanged(key, flags);
237 void JsonPrefStore::SetValueSilently(const std::string& key,
238 scoped_ptr<base::Value> value,
239 uint32 flags) {
240 DCHECK(CalledOnValidThread());
242 DCHECK(value);
243 base::Value* old_value = NULL;
244 prefs_->Get(key, &old_value);
245 if (!old_value || !value->Equals(old_value)) {
246 prefs_->Set(key, value.Pass());
247 ScheduleWrite(flags);
251 void JsonPrefStore::RemoveValue(const std::string& key, uint32 flags) {
252 DCHECK(CalledOnValidThread());
254 if (prefs_->RemovePath(key, NULL))
255 ReportValueChanged(key, flags);
258 void JsonPrefStore::RemoveValueSilently(const std::string& key, uint32 flags) {
259 DCHECK(CalledOnValidThread());
261 prefs_->RemovePath(key, NULL);
262 ScheduleWrite(flags);
265 bool JsonPrefStore::ReadOnly() const {
266 DCHECK(CalledOnValidThread());
268 return read_only_;
271 PersistentPrefStore::PrefReadError JsonPrefStore::GetReadError() const {
272 DCHECK(CalledOnValidThread());
274 return read_error_;
277 PersistentPrefStore::PrefReadError JsonPrefStore::ReadPrefs() {
278 DCHECK(CalledOnValidThread());
280 OnFileRead(ReadPrefsFromDisk(path_, alternate_path_));
281 return filtering_in_progress_ ? PREF_READ_ERROR_ASYNCHRONOUS_TASK_INCOMPLETE
282 : read_error_;
285 void JsonPrefStore::ReadPrefsAsync(ReadErrorDelegate* error_delegate) {
286 DCHECK(CalledOnValidThread());
288 initialized_ = false;
289 error_delegate_.reset(error_delegate);
291 // Weakly binds the read task so that it doesn't kick in during shutdown.
292 base::PostTaskAndReplyWithResult(
293 sequenced_task_runner_.get(),
294 FROM_HERE,
295 base::Bind(&ReadPrefsFromDisk, path_, alternate_path_),
296 base::Bind(&JsonPrefStore::OnFileRead, AsWeakPtr()));
299 void JsonPrefStore::CommitPendingWrite() {
300 DCHECK(CalledOnValidThread());
302 // Schedule a write for any lossy writes that are outstanding to ensure that
303 // they get flushed when this function is called.
304 SchedulePendingLossyWrites();
306 if (writer_.HasPendingWrite() && !read_only_)
307 writer_.DoScheduledWrite();
310 void JsonPrefStore::SchedulePendingLossyWrites() {
311 if (pending_lossy_write_)
312 writer_.ScheduleWrite(this);
315 void JsonPrefStore::ReportValueChanged(const std::string& key, uint32 flags) {
316 DCHECK(CalledOnValidThread());
318 if (pref_filter_)
319 pref_filter_->FilterUpdate(key);
321 FOR_EACH_OBSERVER(PrefStore::Observer, observers_, OnPrefValueChanged(key));
323 ScheduleWrite(flags);
326 void JsonPrefStore::RegisterOnNextSuccessfulWriteCallback(
327 const base::Closure& on_next_successful_write) {
328 DCHECK(CalledOnValidThread());
330 writer_.RegisterOnNextSuccessfulWriteCallback(on_next_successful_write);
333 void JsonPrefStore::OnFileRead(scoped_ptr<ReadResult> read_result) {
334 DCHECK(CalledOnValidThread());
336 DCHECK(read_result);
338 scoped_ptr<base::DictionaryValue> unfiltered_prefs(new base::DictionaryValue);
340 read_error_ = read_result->error;
342 bool initialization_successful = !read_result->no_dir;
344 if (initialization_successful) {
345 switch (read_error_) {
346 case PREF_READ_ERROR_ACCESS_DENIED:
347 case PREF_READ_ERROR_FILE_OTHER:
348 case PREF_READ_ERROR_FILE_LOCKED:
349 case PREF_READ_ERROR_JSON_TYPE:
350 case PREF_READ_ERROR_FILE_NOT_SPECIFIED:
351 read_only_ = true;
352 break;
353 case PREF_READ_ERROR_NONE:
354 DCHECK(read_result->value.get());
355 unfiltered_prefs.reset(
356 static_cast<base::DictionaryValue*>(read_result->value.release()));
357 break;
358 case PREF_READ_ERROR_NO_FILE:
359 // If the file just doesn't exist, maybe this is first run. In any case
360 // there's no harm in writing out default prefs in this case.
361 break;
362 case PREF_READ_ERROR_JSON_PARSE:
363 case PREF_READ_ERROR_JSON_REPEAT:
364 break;
365 case PREF_READ_ERROR_ASYNCHRONOUS_TASK_INCOMPLETE:
366 // This is a special error code to be returned by ReadPrefs when it
367 // can't complete synchronously, it should never be returned by the read
368 // operation itself.
369 NOTREACHED();
370 break;
371 case PREF_READ_ERROR_MAX_ENUM:
372 NOTREACHED();
373 break;
377 if (pref_filter_) {
378 filtering_in_progress_ = true;
379 const PrefFilter::PostFilterOnLoadCallback post_filter_on_load_callback(
380 base::Bind(
381 &JsonPrefStore::FinalizeFileRead, AsWeakPtr(),
382 initialization_successful));
383 pref_filter_->FilterOnLoad(post_filter_on_load_callback,
384 unfiltered_prefs.Pass());
385 } else {
386 FinalizeFileRead(initialization_successful, unfiltered_prefs.Pass(), false);
390 JsonPrefStore::~JsonPrefStore() {
391 CommitPendingWrite();
394 bool JsonPrefStore::SerializeData(std::string* output) {
395 DCHECK(CalledOnValidThread());
397 pending_lossy_write_ = false;
399 write_count_histogram_.RecordWriteOccured();
401 if (pref_filter_)
402 pref_filter_->FilterSerializeData(prefs_.get());
404 JSONStringValueSerializer serializer(output);
405 // Not pretty-printing prefs shrinks pref file size by ~30%. To obtain
406 // readable prefs for debugging purposes, you can dump your prefs into any
407 // command-line or online JSON pretty printing tool.
408 serializer.set_pretty_print(false);
409 return serializer.Serialize(*prefs_);
412 void JsonPrefStore::FinalizeFileRead(bool initialization_successful,
413 scoped_ptr<base::DictionaryValue> prefs,
414 bool schedule_write) {
415 DCHECK(CalledOnValidThread());
417 filtering_in_progress_ = false;
419 if (!initialization_successful) {
420 FOR_EACH_OBSERVER(PrefStore::Observer,
421 observers_,
422 OnInitializationCompleted(false));
423 return;
426 prefs_ = prefs.Pass();
428 initialized_ = true;
430 if (schedule_write)
431 ScheduleWrite(DEFAULT_PREF_WRITE_FLAGS);
433 if (error_delegate_ && read_error_ != PREF_READ_ERROR_NONE)
434 error_delegate_->OnError(read_error_);
436 FOR_EACH_OBSERVER(PrefStore::Observer,
437 observers_,
438 OnInitializationCompleted(true));
440 return;
443 void JsonPrefStore::ScheduleWrite(uint32 flags) {
444 if (read_only_)
445 return;
447 if (flags & LOSSY_PREF_WRITE_FLAG)
448 pending_lossy_write_ = true;
449 else
450 writer_.ScheduleWrite(this);
453 // NOTE: This value should NOT be changed without renaming the histogram
454 // otherwise it will create incompatible buckets.
455 const int32_t
456 JsonPrefStore::WriteCountHistogram::kHistogramWriteReportIntervalMins = 5;
458 JsonPrefStore::WriteCountHistogram::WriteCountHistogram(
459 const base::TimeDelta& commit_interval,
460 const base::FilePath& path)
461 : WriteCountHistogram(commit_interval,
462 path,
463 scoped_ptr<base::Clock>(new base::DefaultClock)) {
466 JsonPrefStore::WriteCountHistogram::WriteCountHistogram(
467 const base::TimeDelta& commit_interval,
468 const base::FilePath& path,
469 scoped_ptr<base::Clock> clock)
470 : commit_interval_(commit_interval),
471 path_(path),
472 clock_(clock.release()),
473 report_interval_(
474 base::TimeDelta::FromMinutes(kHistogramWriteReportIntervalMins)),
475 last_report_time_(clock_->Now()),
476 writes_since_last_report_(0) {
479 JsonPrefStore::WriteCountHistogram::~WriteCountHistogram() {
480 ReportOutstandingWrites();
483 void JsonPrefStore::WriteCountHistogram::RecordWriteOccured() {
484 ReportOutstandingWrites();
486 ++writes_since_last_report_;
489 void JsonPrefStore::WriteCountHistogram::ReportOutstandingWrites() {
490 base::Time current_time = clock_->Now();
491 base::TimeDelta time_since_last_report = current_time - last_report_time_;
493 if (time_since_last_report <= report_interval_)
494 return;
496 // If the time since the last report exceeds the report interval, report all
497 // the writes since the last report. They must have all occurred in the same
498 // report interval.
499 base::HistogramBase* histogram = GetHistogram();
500 histogram->Add(writes_since_last_report_);
502 // There may be several report intervals that elapsed that don't have any
503 // writes in them. Report these too.
504 int64 total_num_intervals_elapsed =
505 (time_since_last_report / report_interval_);
506 for (int64 i = 0; i < total_num_intervals_elapsed - 1; ++i)
507 histogram->Add(0);
509 writes_since_last_report_ = 0;
510 last_report_time_ += total_num_intervals_elapsed * report_interval_;
513 base::HistogramBase* JsonPrefStore::WriteCountHistogram::GetHistogram() {
514 std::string spaceless_basename;
515 base::ReplaceChars(path_.BaseName().MaybeAsASCII(), " ", "_",
516 &spaceless_basename);
517 std::string histogram_name =
518 "Settings.JsonDataWriteCount." + spaceless_basename;
520 // The min value for a histogram is 1. The max value is the maximum number of
521 // writes that can occur in the window being recorded. The number of buckets
522 // used is the max value (plus the underflow/overflow buckets).
523 int32_t min_value = 1;
524 int32_t max_value = report_interval_ / commit_interval_;
525 int32_t num_buckets = max_value + 1;
527 // NOTE: These values should NOT be changed without renaming the histogram
528 // otherwise it will create incompatible buckets.
529 DCHECK_EQ(30, max_value);
530 DCHECK_EQ(31, num_buckets);
532 // The histogram below is an expansion of the UMA_HISTOGRAM_CUSTOM_COUNTS
533 // macro adapted to allow for a dynamically suffixed histogram name.
534 // Note: The factory creates and owns the histogram.
535 base::HistogramBase* histogram = base::Histogram::FactoryGet(
536 histogram_name, min_value, max_value, num_buckets,
537 base::HistogramBase::kUmaTargetedHistogramFlag);
538 return histogram;