Update broken references to image assets
[chromium-blink-merge.git] / base / prefs / json_pref_store.cc
blob87943d168ed141cf29c0894e19edf810485ea7d4
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "base/prefs/json_pref_store.h"
7 #include <algorithm>
9 #include "base/bind.h"
10 #include "base/callback.h"
11 #include "base/files/file_path.h"
12 #include "base/files/file_util.h"
13 #include "base/json/json_file_value_serializer.h"
14 #include "base/json/json_string_value_serializer.h"
15 #include "base/memory/ref_counted.h"
16 #include "base/metrics/histogram.h"
17 #include "base/prefs/pref_filter.h"
18 #include "base/sequenced_task_runner.h"
19 #include "base/strings/string_number_conversions.h"
20 #include "base/strings/string_util.h"
21 #include "base/task_runner_util.h"
22 #include "base/threading/sequenced_worker_pool.h"
23 #include "base/time/default_clock.h"
24 #include "base/values.h"
26 // Result returned from internal read tasks.
27 struct JsonPrefStore::ReadResult {
28 public:
29 ReadResult();
30 ~ReadResult();
32 scoped_ptr<base::Value> value;
33 PrefReadError error;
34 bool no_dir;
36 private:
37 DISALLOW_COPY_AND_ASSIGN(ReadResult);
40 JsonPrefStore::ReadResult::ReadResult()
41 : error(PersistentPrefStore::PREF_READ_ERROR_NONE), no_dir(false) {
44 JsonPrefStore::ReadResult::~ReadResult() {
47 namespace {
49 // Some extensions we'll tack on to copies of the Preferences files.
50 const base::FilePath::CharType kBadExtension[] = FILE_PATH_LITERAL("bad");
52 PersistentPrefStore::PrefReadError HandleReadErrors(
53 const base::Value* value,
54 const base::FilePath& path,
55 int error_code,
56 const std::string& error_msg) {
57 if (!value) {
58 DVLOG(1) << "Error while loading JSON file: " << error_msg
59 << ", file: " << path.value();
60 switch (error_code) {
61 case JSONFileValueDeserializer::JSON_ACCESS_DENIED:
62 return PersistentPrefStore::PREF_READ_ERROR_ACCESS_DENIED;
63 case JSONFileValueDeserializer::JSON_CANNOT_READ_FILE:
64 return PersistentPrefStore::PREF_READ_ERROR_FILE_OTHER;
65 case JSONFileValueDeserializer::JSON_FILE_LOCKED:
66 return PersistentPrefStore::PREF_READ_ERROR_FILE_LOCKED;
67 case JSONFileValueDeserializer::JSON_NO_SUCH_FILE:
68 return PersistentPrefStore::PREF_READ_ERROR_NO_FILE;
69 default:
70 // JSON errors indicate file corruption of some sort.
71 // Since the file is corrupt, move it to the side and continue with
72 // empty preferences. This will result in them losing their settings.
73 // We keep the old file for possible support and debugging assistance
74 // as well as to detect if they're seeing these errors repeatedly.
75 // TODO(erikkay) Instead, use the last known good file.
76 base::FilePath bad = path.ReplaceExtension(kBadExtension);
78 // If they've ever had a parse error before, put them in another bucket.
79 // TODO(erikkay) if we keep this error checking for very long, we may
80 // want to differentiate between recent and long ago errors.
81 bool bad_existed = base::PathExists(bad);
82 base::Move(path, bad);
83 return bad_existed ? PersistentPrefStore::PREF_READ_ERROR_JSON_REPEAT
84 : PersistentPrefStore::PREF_READ_ERROR_JSON_PARSE;
87 if (!value->IsType(base::Value::TYPE_DICTIONARY))
88 return PersistentPrefStore::PREF_READ_ERROR_JSON_TYPE;
89 return PersistentPrefStore::PREF_READ_ERROR_NONE;
92 // Records a sample for |size| in the Settings.JsonDataReadSizeKilobytes
93 // histogram suffixed with the base name of the JSON file under |path|.
94 void RecordJsonDataSizeHistogram(const base::FilePath& path, size_t size) {
95 std::string spaceless_basename;
96 base::ReplaceChars(path.BaseName().MaybeAsASCII(), " ", "_",
97 &spaceless_basename);
99 // The histogram below is an expansion of the UMA_HISTOGRAM_CUSTOM_COUNTS
100 // macro adapted to allow for a dynamically suffixed histogram name.
101 // Note: The factory creates and owns the histogram.
102 base::HistogramBase* histogram = base::Histogram::FactoryGet(
103 "Settings.JsonDataReadSizeKilobytes." + spaceless_basename, 1, 10000, 50,
104 base::HistogramBase::kUmaTargetedHistogramFlag);
105 histogram->Add(static_cast<int>(size) / 1024);
108 scoped_ptr<JsonPrefStore::ReadResult> ReadPrefsFromDisk(
109 const base::FilePath& path,
110 const base::FilePath& alternate_path) {
111 if (!base::PathExists(path) && !alternate_path.empty() &&
112 base::PathExists(alternate_path)) {
113 base::Move(alternate_path, path);
116 int error_code;
117 std::string error_msg;
118 scoped_ptr<JsonPrefStore::ReadResult> read_result(
119 new JsonPrefStore::ReadResult);
120 JSONFileValueDeserializer deserializer(path);
121 read_result->value.reset(deserializer.Deserialize(&error_code, &error_msg));
122 read_result->error =
123 HandleReadErrors(read_result->value.get(), path, error_code, error_msg);
124 read_result->no_dir = !base::PathExists(path.DirName());
126 if (read_result->error == PersistentPrefStore::PREF_READ_ERROR_NONE)
127 RecordJsonDataSizeHistogram(path, deserializer.get_last_read_size());
129 return read_result.Pass();
132 } // namespace
134 // static
135 scoped_refptr<base::SequencedTaskRunner> JsonPrefStore::GetTaskRunnerForFile(
136 const base::FilePath& filename,
137 base::SequencedWorkerPool* worker_pool) {
138 std::string token("json_pref_store-");
139 token.append(filename.AsUTF8Unsafe());
140 return worker_pool->GetSequencedTaskRunnerWithShutdownBehavior(
141 worker_pool->GetNamedSequenceToken(token),
142 base::SequencedWorkerPool::BLOCK_SHUTDOWN);
145 JsonPrefStore::JsonPrefStore(
146 const base::FilePath& pref_filename,
147 const scoped_refptr<base::SequencedTaskRunner>& sequenced_task_runner,
148 scoped_ptr<PrefFilter> pref_filter)
149 : JsonPrefStore(pref_filename,
150 base::FilePath(),
151 sequenced_task_runner,
152 pref_filter.Pass()) {
155 JsonPrefStore::JsonPrefStore(
156 const base::FilePath& pref_filename,
157 const base::FilePath& pref_alternate_filename,
158 const scoped_refptr<base::SequencedTaskRunner>& sequenced_task_runner,
159 scoped_ptr<PrefFilter> pref_filter)
160 : path_(pref_filename),
161 alternate_path_(pref_alternate_filename),
162 sequenced_task_runner_(sequenced_task_runner),
163 prefs_(new base::DictionaryValue()),
164 read_only_(false),
165 writer_(pref_filename, sequenced_task_runner),
166 pref_filter_(pref_filter.Pass()),
167 initialized_(false),
168 filtering_in_progress_(false),
169 pending_lossy_write_(false),
170 read_error_(PREF_READ_ERROR_NONE),
171 write_count_histogram_(writer_.commit_interval(), path_) {
172 DCHECK(!path_.empty());
175 bool JsonPrefStore::GetValue(const std::string& key,
176 const base::Value** result) const {
177 DCHECK(CalledOnValidThread());
179 base::Value* tmp = nullptr;
180 if (!prefs_->Get(key, &tmp))
181 return false;
183 if (result)
184 *result = tmp;
185 return true;
188 void JsonPrefStore::AddObserver(PrefStore::Observer* observer) {
189 DCHECK(CalledOnValidThread());
191 observers_.AddObserver(observer);
194 void JsonPrefStore::RemoveObserver(PrefStore::Observer* observer) {
195 DCHECK(CalledOnValidThread());
197 observers_.RemoveObserver(observer);
200 bool JsonPrefStore::HasObservers() const {
201 DCHECK(CalledOnValidThread());
203 return observers_.might_have_observers();
206 bool JsonPrefStore::IsInitializationComplete() const {
207 DCHECK(CalledOnValidThread());
209 return initialized_;
212 bool JsonPrefStore::GetMutableValue(const std::string& key,
213 base::Value** result) {
214 DCHECK(CalledOnValidThread());
216 return prefs_->Get(key, result);
219 void JsonPrefStore::SetValue(const std::string& key,
220 scoped_ptr<base::Value> value,
221 uint32 flags) {
222 DCHECK(CalledOnValidThread());
224 DCHECK(value);
225 base::Value* old_value = nullptr;
226 prefs_->Get(key, &old_value);
227 if (!old_value || !value->Equals(old_value)) {
228 prefs_->Set(key, value.Pass());
229 ReportValueChanged(key, flags);
233 void JsonPrefStore::SetValueSilently(const std::string& key,
234 scoped_ptr<base::Value> value,
235 uint32 flags) {
236 DCHECK(CalledOnValidThread());
238 DCHECK(value);
239 base::Value* old_value = nullptr;
240 prefs_->Get(key, &old_value);
241 if (!old_value || !value->Equals(old_value)) {
242 prefs_->Set(key, value.Pass());
243 ScheduleWrite(flags);
247 void JsonPrefStore::RemoveValue(const std::string& key, uint32 flags) {
248 DCHECK(CalledOnValidThread());
250 if (prefs_->RemovePath(key, nullptr))
251 ReportValueChanged(key, flags);
254 void JsonPrefStore::RemoveValueSilently(const std::string& key, uint32 flags) {
255 DCHECK(CalledOnValidThread());
257 prefs_->RemovePath(key, nullptr);
258 ScheduleWrite(flags);
261 bool JsonPrefStore::ReadOnly() const {
262 DCHECK(CalledOnValidThread());
264 return read_only_;
267 PersistentPrefStore::PrefReadError JsonPrefStore::GetReadError() const {
268 DCHECK(CalledOnValidThread());
270 return read_error_;
273 PersistentPrefStore::PrefReadError JsonPrefStore::ReadPrefs() {
274 DCHECK(CalledOnValidThread());
276 OnFileRead(ReadPrefsFromDisk(path_, alternate_path_));
277 return filtering_in_progress_ ? PREF_READ_ERROR_ASYNCHRONOUS_TASK_INCOMPLETE
278 : read_error_;
281 void JsonPrefStore::ReadPrefsAsync(ReadErrorDelegate* error_delegate) {
282 DCHECK(CalledOnValidThread());
284 initialized_ = false;
285 error_delegate_.reset(error_delegate);
287 // Weakly binds the read task so that it doesn't kick in during shutdown.
288 base::PostTaskAndReplyWithResult(
289 sequenced_task_runner_.get(),
290 FROM_HERE,
291 base::Bind(&ReadPrefsFromDisk, path_, alternate_path_),
292 base::Bind(&JsonPrefStore::OnFileRead, AsWeakPtr()));
295 void JsonPrefStore::CommitPendingWrite() {
296 DCHECK(CalledOnValidThread());
298 // Schedule a write for any lossy writes that are outstanding to ensure that
299 // they get flushed when this function is called.
300 SchedulePendingLossyWrites();
302 if (writer_.HasPendingWrite() && !read_only_)
303 writer_.DoScheduledWrite();
306 void JsonPrefStore::SchedulePendingLossyWrites() {
307 if (pending_lossy_write_)
308 writer_.ScheduleWrite(this);
311 void JsonPrefStore::ReportValueChanged(const std::string& key, uint32 flags) {
312 DCHECK(CalledOnValidThread());
314 if (pref_filter_)
315 pref_filter_->FilterUpdate(key);
317 FOR_EACH_OBSERVER(PrefStore::Observer, observers_, OnPrefValueChanged(key));
319 ScheduleWrite(flags);
322 void JsonPrefStore::RegisterOnNextSuccessfulWriteCallback(
323 const base::Closure& on_next_successful_write) {
324 DCHECK(CalledOnValidThread());
326 writer_.RegisterOnNextSuccessfulWriteCallback(on_next_successful_write);
329 void JsonPrefStore::OnFileRead(scoped_ptr<ReadResult> read_result) {
330 DCHECK(CalledOnValidThread());
332 DCHECK(read_result);
334 scoped_ptr<base::DictionaryValue> unfiltered_prefs(new base::DictionaryValue);
336 read_error_ = read_result->error;
338 bool initialization_successful = !read_result->no_dir;
340 if (initialization_successful) {
341 switch (read_error_) {
342 case PREF_READ_ERROR_ACCESS_DENIED:
343 case PREF_READ_ERROR_FILE_OTHER:
344 case PREF_READ_ERROR_FILE_LOCKED:
345 case PREF_READ_ERROR_JSON_TYPE:
346 case PREF_READ_ERROR_FILE_NOT_SPECIFIED:
347 read_only_ = true;
348 break;
349 case PREF_READ_ERROR_NONE:
350 DCHECK(read_result->value.get());
351 unfiltered_prefs.reset(
352 static_cast<base::DictionaryValue*>(read_result->value.release()));
353 break;
354 case PREF_READ_ERROR_NO_FILE:
355 // If the file just doesn't exist, maybe this is first run. In any case
356 // there's no harm in writing out default prefs in this case.
357 break;
358 case PREF_READ_ERROR_JSON_PARSE:
359 case PREF_READ_ERROR_JSON_REPEAT:
360 break;
361 case PREF_READ_ERROR_ASYNCHRONOUS_TASK_INCOMPLETE:
362 // This is a special error code to be returned by ReadPrefs when it
363 // can't complete synchronously, it should never be returned by the read
364 // operation itself.
365 NOTREACHED();
366 break;
367 case PREF_READ_ERROR_MAX_ENUM:
368 NOTREACHED();
369 break;
373 if (pref_filter_) {
374 filtering_in_progress_ = true;
375 const PrefFilter::PostFilterOnLoadCallback post_filter_on_load_callback(
376 base::Bind(
377 &JsonPrefStore::FinalizeFileRead, AsWeakPtr(),
378 initialization_successful));
379 pref_filter_->FilterOnLoad(post_filter_on_load_callback,
380 unfiltered_prefs.Pass());
381 } else {
382 FinalizeFileRead(initialization_successful, unfiltered_prefs.Pass(), false);
386 JsonPrefStore::~JsonPrefStore() {
387 CommitPendingWrite();
390 bool JsonPrefStore::SerializeData(std::string* output) {
391 DCHECK(CalledOnValidThread());
393 pending_lossy_write_ = false;
395 write_count_histogram_.RecordWriteOccured();
397 if (pref_filter_)
398 pref_filter_->FilterSerializeData(prefs_.get());
400 JSONStringValueSerializer serializer(output);
401 // Not pretty-printing prefs shrinks pref file size by ~30%. To obtain
402 // readable prefs for debugging purposes, you can dump your prefs into any
403 // command-line or online JSON pretty printing tool.
404 serializer.set_pretty_print(false);
405 return serializer.Serialize(*prefs_);
408 void JsonPrefStore::FinalizeFileRead(bool initialization_successful,
409 scoped_ptr<base::DictionaryValue> prefs,
410 bool schedule_write) {
411 DCHECK(CalledOnValidThread());
413 filtering_in_progress_ = false;
415 if (!initialization_successful) {
416 FOR_EACH_OBSERVER(PrefStore::Observer,
417 observers_,
418 OnInitializationCompleted(false));
419 return;
422 prefs_ = prefs.Pass();
424 initialized_ = true;
426 if (schedule_write)
427 ScheduleWrite(DEFAULT_PREF_WRITE_FLAGS);
429 if (error_delegate_ && read_error_ != PREF_READ_ERROR_NONE)
430 error_delegate_->OnError(read_error_);
432 FOR_EACH_OBSERVER(PrefStore::Observer,
433 observers_,
434 OnInitializationCompleted(true));
436 return;
439 void JsonPrefStore::ScheduleWrite(uint32 flags) {
440 if (read_only_)
441 return;
443 if (flags & LOSSY_PREF_WRITE_FLAG)
444 pending_lossy_write_ = true;
445 else
446 writer_.ScheduleWrite(this);
449 // NOTE: This value should NOT be changed without renaming the histogram
450 // otherwise it will create incompatible buckets.
451 const int32_t
452 JsonPrefStore::WriteCountHistogram::kHistogramWriteReportIntervalMins = 5;
454 JsonPrefStore::WriteCountHistogram::WriteCountHistogram(
455 const base::TimeDelta& commit_interval,
456 const base::FilePath& path)
457 : WriteCountHistogram(commit_interval,
458 path,
459 scoped_ptr<base::Clock>(new base::DefaultClock)) {
462 JsonPrefStore::WriteCountHistogram::WriteCountHistogram(
463 const base::TimeDelta& commit_interval,
464 const base::FilePath& path,
465 scoped_ptr<base::Clock> clock)
466 : commit_interval_(commit_interval),
467 path_(path),
468 clock_(clock.release()),
469 report_interval_(
470 base::TimeDelta::FromMinutes(kHistogramWriteReportIntervalMins)),
471 last_report_time_(clock_->Now()),
472 writes_since_last_report_(0) {
475 JsonPrefStore::WriteCountHistogram::~WriteCountHistogram() {
476 ReportOutstandingWrites();
479 void JsonPrefStore::WriteCountHistogram::RecordWriteOccured() {
480 ReportOutstandingWrites();
482 ++writes_since_last_report_;
485 void JsonPrefStore::WriteCountHistogram::ReportOutstandingWrites() {
486 base::Time current_time = clock_->Now();
487 base::TimeDelta time_since_last_report = current_time - last_report_time_;
489 if (time_since_last_report <= report_interval_)
490 return;
492 // If the time since the last report exceeds the report interval, report all
493 // the writes since the last report. They must have all occurred in the same
494 // report interval.
495 base::HistogramBase* histogram = GetHistogram();
496 histogram->Add(writes_since_last_report_);
498 // There may be several report intervals that elapsed that don't have any
499 // writes in them. Report these too.
500 int64 total_num_intervals_elapsed =
501 (time_since_last_report / report_interval_);
502 for (int64 i = 0; i < total_num_intervals_elapsed - 1; ++i)
503 histogram->Add(0);
505 writes_since_last_report_ = 0;
506 last_report_time_ += total_num_intervals_elapsed * report_interval_;
509 base::HistogramBase* JsonPrefStore::WriteCountHistogram::GetHistogram() {
510 std::string spaceless_basename;
511 base::ReplaceChars(path_.BaseName().MaybeAsASCII(), " ", "_",
512 &spaceless_basename);
513 std::string histogram_name =
514 "Settings.JsonDataWriteCount." + spaceless_basename;
516 // The min value for a histogram is 1. The max value is the maximum number of
517 // writes that can occur in the window being recorded. The number of buckets
518 // used is the max value (plus the underflow/overflow buckets).
519 int32_t min_value = 1;
520 int32_t max_value = report_interval_ / commit_interval_;
521 int32_t num_buckets = max_value + 1;
523 // NOTE: These values should NOT be changed without renaming the histogram
524 // otherwise it will create incompatible buckets.
525 DCHECK_EQ(30, max_value);
526 DCHECK_EQ(31, num_buckets);
528 // The histogram below is an expansion of the UMA_HISTOGRAM_CUSTOM_COUNTS
529 // macro adapted to allow for a dynamically suffixed histogram name.
530 // Note: The factory creates and owns the histogram.
531 base::HistogramBase* histogram = base::Histogram::FactoryGet(
532 histogram_name, min_value, max_value, num_buckets,
533 base::HistogramBase::kUmaTargetedHistogramFlag);
534 return histogram;