Supervised user import: Listen for profile creation/deletion
[chromium-blink-merge.git] / base / prefs / json_pref_store.cc
blob416e43fce74862df138cf1a671f78dacf946e9de
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "base/prefs/json_pref_store.h"
7 #include <algorithm>
9 #include "base/bind.h"
10 #include "base/callback.h"
11 #include "base/files/file_path.h"
12 #include "base/files/file_util.h"
13 #include "base/json/json_file_value_serializer.h"
14 #include "base/json/json_string_value_serializer.h"
15 #include "base/memory/ref_counted.h"
16 #include "base/metrics/histogram.h"
17 #include "base/prefs/pref_filter.h"
18 #include "base/sequenced_task_runner.h"
19 #include "base/strings/string_number_conversions.h"
20 #include "base/strings/string_util.h"
21 #include "base/task_runner_util.h"
22 #include "base/threading/sequenced_worker_pool.h"
23 #include "base/time/default_clock.h"
24 #include "base/values.h"
26 // Result returned from internal read tasks.
27 struct JsonPrefStore::ReadResult {
28 public:
29 ReadResult();
30 ~ReadResult();
32 scoped_ptr<base::Value> value;
33 PrefReadError error;
34 bool no_dir;
36 private:
37 DISALLOW_COPY_AND_ASSIGN(ReadResult);
40 JsonPrefStore::ReadResult::ReadResult()
41 : error(PersistentPrefStore::PREF_READ_ERROR_NONE), no_dir(false) {
44 JsonPrefStore::ReadResult::~ReadResult() {
47 namespace {
49 // Some extensions we'll tack on to copies of the Preferences files.
50 const base::FilePath::CharType kBadExtension[] = FILE_PATH_LITERAL("bad");
52 PersistentPrefStore::PrefReadError HandleReadErrors(
53 const base::Value* value,
54 const base::FilePath& path,
55 int error_code,
56 const std::string& error_msg) {
57 if (!value) {
58 DVLOG(1) << "Error while loading JSON file: " << error_msg
59 << ", file: " << path.value();
60 switch (error_code) {
61 case JSONFileValueDeserializer::JSON_ACCESS_DENIED:
62 return PersistentPrefStore::PREF_READ_ERROR_ACCESS_DENIED;
63 break;
64 case JSONFileValueDeserializer::JSON_CANNOT_READ_FILE:
65 return PersistentPrefStore::PREF_READ_ERROR_FILE_OTHER;
66 break;
67 case JSONFileValueDeserializer::JSON_FILE_LOCKED:
68 return PersistentPrefStore::PREF_READ_ERROR_FILE_LOCKED;
69 break;
70 case JSONFileValueDeserializer::JSON_NO_SUCH_FILE:
71 return PersistentPrefStore::PREF_READ_ERROR_NO_FILE;
72 break;
73 default:
74 // JSON errors indicate file corruption of some sort.
75 // Since the file is corrupt, move it to the side and continue with
76 // empty preferences. This will result in them losing their settings.
77 // We keep the old file for possible support and debugging assistance
78 // as well as to detect if they're seeing these errors repeatedly.
79 // TODO(erikkay) Instead, use the last known good file.
80 base::FilePath bad = path.ReplaceExtension(kBadExtension);
82 // If they've ever had a parse error before, put them in another bucket.
83 // TODO(erikkay) if we keep this error checking for very long, we may
84 // want to differentiate between recent and long ago errors.
85 bool bad_existed = base::PathExists(bad);
86 base::Move(path, bad);
87 return bad_existed ? PersistentPrefStore::PREF_READ_ERROR_JSON_REPEAT
88 : PersistentPrefStore::PREF_READ_ERROR_JSON_PARSE;
90 } else if (!value->IsType(base::Value::TYPE_DICTIONARY)) {
91 return PersistentPrefStore::PREF_READ_ERROR_JSON_TYPE;
93 return PersistentPrefStore::PREF_READ_ERROR_NONE;
96 // Records a sample for |size| in the Settings.JsonDataReadSizeKilobytes
97 // histogram suffixed with the base name of the JSON file under |path|.
98 void RecordJsonDataSizeHistogram(const base::FilePath& path, size_t size) {
99 std::string spaceless_basename;
100 base::ReplaceChars(path.BaseName().MaybeAsASCII(), " ", "_",
101 &spaceless_basename);
103 // The histogram below is an expansion of the UMA_HISTOGRAM_CUSTOM_COUNTS
104 // macro adapted to allow for a dynamically suffixed histogram name.
105 // Note: The factory creates and owns the histogram.
106 base::HistogramBase* histogram = base::Histogram::FactoryGet(
107 "Settings.JsonDataReadSizeKilobytes." + spaceless_basename, 1, 10000, 50,
108 base::HistogramBase::kUmaTargetedHistogramFlag);
109 histogram->Add(static_cast<int>(size) / 1024);
112 scoped_ptr<JsonPrefStore::ReadResult> ReadPrefsFromDisk(
113 const base::FilePath& path,
114 const base::FilePath& alternate_path) {
115 if (!base::PathExists(path) && !alternate_path.empty() &&
116 base::PathExists(alternate_path)) {
117 base::Move(alternate_path, path);
120 int error_code;
121 std::string error_msg;
122 scoped_ptr<JsonPrefStore::ReadResult> read_result(
123 new JsonPrefStore::ReadResult);
124 JSONFileValueDeserializer deserializer(path);
125 read_result->value.reset(deserializer.Deserialize(&error_code, &error_msg));
126 read_result->error =
127 HandleReadErrors(read_result->value.get(), path, error_code, error_msg);
128 read_result->no_dir = !base::PathExists(path.DirName());
130 if (read_result->error == PersistentPrefStore::PREF_READ_ERROR_NONE)
131 RecordJsonDataSizeHistogram(path, deserializer.get_last_read_size());
133 return read_result.Pass();
136 } // namespace
138 // static
139 scoped_refptr<base::SequencedTaskRunner> JsonPrefStore::GetTaskRunnerForFile(
140 const base::FilePath& filename,
141 base::SequencedWorkerPool* worker_pool) {
142 std::string token("json_pref_store-");
143 token.append(filename.AsUTF8Unsafe());
144 return worker_pool->GetSequencedTaskRunnerWithShutdownBehavior(
145 worker_pool->GetNamedSequenceToken(token),
146 base::SequencedWorkerPool::BLOCK_SHUTDOWN);
149 JsonPrefStore::JsonPrefStore(
150 const base::FilePath& filename,
151 const scoped_refptr<base::SequencedTaskRunner>& sequenced_task_runner,
152 scoped_ptr<PrefFilter> pref_filter)
153 : path_(filename),
154 sequenced_task_runner_(sequenced_task_runner),
155 prefs_(new base::DictionaryValue()),
156 read_only_(false),
157 writer_(filename, sequenced_task_runner),
158 pref_filter_(pref_filter.Pass()),
159 initialized_(false),
160 filtering_in_progress_(false),
161 read_error_(PREF_READ_ERROR_NONE),
162 write_count_histogram_(writer_.commit_interval(), path_) {
163 DCHECK(!path_.empty());
166 JsonPrefStore::JsonPrefStore(
167 const base::FilePath& filename,
168 const base::FilePath& alternate_filename,
169 const scoped_refptr<base::SequencedTaskRunner>& sequenced_task_runner,
170 scoped_ptr<PrefFilter> pref_filter)
171 : path_(filename),
172 alternate_path_(alternate_filename),
173 sequenced_task_runner_(sequenced_task_runner),
174 prefs_(new base::DictionaryValue()),
175 read_only_(false),
176 writer_(filename, sequenced_task_runner),
177 pref_filter_(pref_filter.Pass()),
178 initialized_(false),
179 filtering_in_progress_(false),
180 read_error_(PREF_READ_ERROR_NONE),
181 write_count_histogram_(writer_.commit_interval(), path_) {
182 DCHECK(!path_.empty());
185 bool JsonPrefStore::GetValue(const std::string& key,
186 const base::Value** result) const {
187 DCHECK(CalledOnValidThread());
189 base::Value* tmp = NULL;
190 if (!prefs_->Get(key, &tmp))
191 return false;
193 if (result)
194 *result = tmp;
195 return true;
198 void JsonPrefStore::AddObserver(PrefStore::Observer* observer) {
199 DCHECK(CalledOnValidThread());
201 observers_.AddObserver(observer);
204 void JsonPrefStore::RemoveObserver(PrefStore::Observer* observer) {
205 DCHECK(CalledOnValidThread());
207 observers_.RemoveObserver(observer);
210 bool JsonPrefStore::HasObservers() const {
211 DCHECK(CalledOnValidThread());
213 return observers_.might_have_observers();
216 bool JsonPrefStore::IsInitializationComplete() const {
217 DCHECK(CalledOnValidThread());
219 return initialized_;
222 bool JsonPrefStore::GetMutableValue(const std::string& key,
223 base::Value** result) {
224 DCHECK(CalledOnValidThread());
226 return prefs_->Get(key, result);
229 void JsonPrefStore::SetValue(const std::string& key, base::Value* value) {
230 DCHECK(CalledOnValidThread());
232 DCHECK(value);
233 scoped_ptr<base::Value> new_value(value);
234 base::Value* old_value = NULL;
235 prefs_->Get(key, &old_value);
236 if (!old_value || !value->Equals(old_value)) {
237 prefs_->Set(key, new_value.release());
238 ReportValueChanged(key);
242 void JsonPrefStore::SetValueSilently(const std::string& key,
243 base::Value* value) {
244 DCHECK(CalledOnValidThread());
246 DCHECK(value);
247 scoped_ptr<base::Value> new_value(value);
248 base::Value* old_value = NULL;
249 prefs_->Get(key, &old_value);
250 if (!old_value || !value->Equals(old_value)) {
251 prefs_->Set(key, new_value.release());
252 if (!read_only_)
253 writer_.ScheduleWrite(this);
257 void JsonPrefStore::RemoveValue(const std::string& key) {
258 DCHECK(CalledOnValidThread());
260 if (prefs_->RemovePath(key, NULL))
261 ReportValueChanged(key);
264 void JsonPrefStore::RemoveValueSilently(const std::string& key) {
265 DCHECK(CalledOnValidThread());
267 prefs_->RemovePath(key, NULL);
268 if (!read_only_)
269 writer_.ScheduleWrite(this);
272 bool JsonPrefStore::ReadOnly() const {
273 DCHECK(CalledOnValidThread());
275 return read_only_;
278 PersistentPrefStore::PrefReadError JsonPrefStore::GetReadError() const {
279 DCHECK(CalledOnValidThread());
281 return read_error_;
284 PersistentPrefStore::PrefReadError JsonPrefStore::ReadPrefs() {
285 DCHECK(CalledOnValidThread());
287 OnFileRead(ReadPrefsFromDisk(path_, alternate_path_));
288 return filtering_in_progress_ ? PREF_READ_ERROR_ASYNCHRONOUS_TASK_INCOMPLETE
289 : read_error_;
292 void JsonPrefStore::ReadPrefsAsync(ReadErrorDelegate* error_delegate) {
293 DCHECK(CalledOnValidThread());
295 initialized_ = false;
296 error_delegate_.reset(error_delegate);
298 // Weakly binds the read task so that it doesn't kick in during shutdown.
299 base::PostTaskAndReplyWithResult(
300 sequenced_task_runner_.get(),
301 FROM_HERE,
302 base::Bind(&ReadPrefsFromDisk, path_, alternate_path_),
303 base::Bind(&JsonPrefStore::OnFileRead, AsWeakPtr()));
306 void JsonPrefStore::CommitPendingWrite() {
307 DCHECK(CalledOnValidThread());
309 if (writer_.HasPendingWrite() && !read_only_)
310 writer_.DoScheduledWrite();
313 void JsonPrefStore::ReportValueChanged(const std::string& key) {
314 DCHECK(CalledOnValidThread());
316 if (pref_filter_)
317 pref_filter_->FilterUpdate(key);
319 FOR_EACH_OBSERVER(PrefStore::Observer, observers_, OnPrefValueChanged(key));
321 if (!read_only_)
322 writer_.ScheduleWrite(this);
325 void JsonPrefStore::RegisterOnNextSuccessfulWriteCallback(
326 const base::Closure& on_next_successful_write) {
327 DCHECK(CalledOnValidThread());
329 writer_.RegisterOnNextSuccessfulWriteCallback(on_next_successful_write);
332 void JsonPrefStore::OnFileRead(scoped_ptr<ReadResult> read_result) {
333 DCHECK(CalledOnValidThread());
335 DCHECK(read_result);
337 scoped_ptr<base::DictionaryValue> unfiltered_prefs(new base::DictionaryValue);
339 read_error_ = read_result->error;
341 bool initialization_successful = !read_result->no_dir;
343 if (initialization_successful) {
344 switch (read_error_) {
345 case PREF_READ_ERROR_ACCESS_DENIED:
346 case PREF_READ_ERROR_FILE_OTHER:
347 case PREF_READ_ERROR_FILE_LOCKED:
348 case PREF_READ_ERROR_JSON_TYPE:
349 case PREF_READ_ERROR_FILE_NOT_SPECIFIED:
350 read_only_ = true;
351 break;
352 case PREF_READ_ERROR_NONE:
353 DCHECK(read_result->value.get());
354 unfiltered_prefs.reset(
355 static_cast<base::DictionaryValue*>(read_result->value.release()));
356 break;
357 case PREF_READ_ERROR_NO_FILE:
358 // If the file just doesn't exist, maybe this is first run. In any case
359 // there's no harm in writing out default prefs in this case.
360 break;
361 case PREF_READ_ERROR_JSON_PARSE:
362 case PREF_READ_ERROR_JSON_REPEAT:
363 break;
364 case PREF_READ_ERROR_ASYNCHRONOUS_TASK_INCOMPLETE:
365 // This is a special error code to be returned by ReadPrefs when it
366 // can't complete synchronously, it should never be returned by the read
367 // operation itself.
368 NOTREACHED();
369 break;
370 case PREF_READ_ERROR_LEVELDB_IO:
371 case PREF_READ_ERROR_LEVELDB_CORRUPTION_READ_ONLY:
372 case PREF_READ_ERROR_LEVELDB_CORRUPTION:
373 // These are specific to LevelDBPrefStore.
374 NOTREACHED();
375 case PREF_READ_ERROR_MAX_ENUM:
376 NOTREACHED();
377 break;
381 if (pref_filter_) {
382 filtering_in_progress_ = true;
383 const PrefFilter::PostFilterOnLoadCallback post_filter_on_load_callback(
384 base::Bind(
385 &JsonPrefStore::FinalizeFileRead, AsWeakPtr(),
386 initialization_successful));
387 pref_filter_->FilterOnLoad(post_filter_on_load_callback,
388 unfiltered_prefs.Pass());
389 } else {
390 FinalizeFileRead(initialization_successful, unfiltered_prefs.Pass(), false);
394 JsonPrefStore::~JsonPrefStore() {
395 CommitPendingWrite();
398 bool JsonPrefStore::SerializeData(std::string* output) {
399 DCHECK(CalledOnValidThread());
401 write_count_histogram_.RecordWriteOccured();
403 if (pref_filter_)
404 pref_filter_->FilterSerializeData(prefs_.get());
406 JSONStringValueSerializer serializer(output);
407 // Not pretty-printing prefs shrinks pref file size by ~30%. To obtain
408 // readable prefs for debugging purposes, you can dump your prefs into any
409 // command-line or online JSON pretty printing tool.
410 serializer.set_pretty_print(false);
411 return serializer.Serialize(*prefs_);
414 void JsonPrefStore::FinalizeFileRead(bool initialization_successful,
415 scoped_ptr<base::DictionaryValue> prefs,
416 bool schedule_write) {
417 DCHECK(CalledOnValidThread());
419 filtering_in_progress_ = false;
421 if (!initialization_successful) {
422 FOR_EACH_OBSERVER(PrefStore::Observer,
423 observers_,
424 OnInitializationCompleted(false));
425 return;
428 prefs_ = prefs.Pass();
430 initialized_ = true;
432 if (schedule_write && !read_only_)
433 writer_.ScheduleWrite(this);
435 if (error_delegate_ && read_error_ != PREF_READ_ERROR_NONE)
436 error_delegate_->OnError(read_error_);
438 FOR_EACH_OBSERVER(PrefStore::Observer,
439 observers_,
440 OnInitializationCompleted(true));
442 return;
445 // NOTE: This value should NOT be changed without renaming the histogram
446 // otherwise it will create incompatible buckets.
447 const int32_t
448 JsonPrefStore::WriteCountHistogram::kHistogramWriteReportIntervalMins = 5;
450 JsonPrefStore::WriteCountHistogram::WriteCountHistogram(
451 const base::TimeDelta& commit_interval,
452 const base::FilePath& path)
453 : WriteCountHistogram(commit_interval,
454 path,
455 scoped_ptr<base::Clock>(new base::DefaultClock)) {
458 JsonPrefStore::WriteCountHistogram::WriteCountHistogram(
459 const base::TimeDelta& commit_interval,
460 const base::FilePath& path,
461 scoped_ptr<base::Clock> clock)
462 : commit_interval_(commit_interval),
463 path_(path),
464 clock_(clock.release()),
465 report_interval_(
466 base::TimeDelta::FromMinutes(kHistogramWriteReportIntervalMins)),
467 last_report_time_(clock_->Now()),
468 writes_since_last_report_(0) {
471 JsonPrefStore::WriteCountHistogram::~WriteCountHistogram() {
472 ReportOutstandingWrites();
475 void JsonPrefStore::WriteCountHistogram::RecordWriteOccured() {
476 ReportOutstandingWrites();
478 ++writes_since_last_report_;
481 void JsonPrefStore::WriteCountHistogram::ReportOutstandingWrites() {
482 base::Time current_time = clock_->Now();
483 base::TimeDelta time_since_last_report = current_time - last_report_time_;
485 if (time_since_last_report <= report_interval_)
486 return;
488 // If the time since the last report exceeds the report interval, report all
489 // the writes since the last report. They must have all occurred in the same
490 // report interval.
491 base::HistogramBase* histogram = GetHistogram();
492 histogram->Add(writes_since_last_report_);
494 // There may be several report intervals that elapsed that don't have any
495 // writes in them. Report these too.
496 int64 total_num_intervals_elapsed =
497 (time_since_last_report / report_interval_);
498 for (int64 i = 0; i < total_num_intervals_elapsed - 1; ++i)
499 histogram->Add(0);
501 writes_since_last_report_ = 0;
502 last_report_time_ += total_num_intervals_elapsed * report_interval_;
505 base::HistogramBase* JsonPrefStore::WriteCountHistogram::GetHistogram() {
506 std::string spaceless_basename;
507 base::ReplaceChars(path_.BaseName().MaybeAsASCII(), " ", "_",
508 &spaceless_basename);
509 std::string histogram_name =
510 "Settings.JsonDataWriteCount." + spaceless_basename;
512 // The min value for a histogram is 1. The max value is the maximum number of
513 // writes that can occur in the window being recorded. The number of buckets
514 // used is the max value (plus the underflow/overflow buckets).
515 int32_t min_value = 1;
516 int32_t max_value = report_interval_ / commit_interval_;
517 int32_t num_buckets = max_value + 1;
519 // NOTE: These values should NOT be changed without renaming the histogram
520 // otherwise it will create incompatible buckets.
521 DCHECK_EQ(30, max_value);
522 DCHECK_EQ(31, num_buckets);
524 // The histogram below is an expansion of the UMA_HISTOGRAM_CUSTOM_COUNTS
525 // macro adapted to allow for a dynamically suffixed histogram name.
526 // Note: The factory creates and owns the histogram.
527 base::HistogramBase* histogram = base::Histogram::FactoryGet(
528 histogram_name, min_value, max_value, num_buckets,
529 base::HistogramBase::kUmaTargetedHistogramFlag);
530 return histogram;