1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "base/prefs/json_pref_store.h"
10 #include "base/callback.h"
11 #include "base/files/file_path.h"
12 #include "base/files/file_util.h"
13 #include "base/json/json_file_value_serializer.h"
14 #include "base/json/json_string_value_serializer.h"
15 #include "base/memory/ref_counted.h"
16 #include "base/metrics/histogram.h"
17 #include "base/prefs/pref_filter.h"
18 #include "base/sequenced_task_runner.h"
19 #include "base/strings/string_number_conversions.h"
20 #include "base/strings/string_util.h"
21 #include "base/task_runner_util.h"
22 #include "base/threading/sequenced_worker_pool.h"
23 #include "base/time/default_clock.h"
24 #include "base/values.h"
26 // Result returned from internal read tasks.
27 struct JsonPrefStore::ReadResult
{
32 scoped_ptr
<base::Value
> value
;
37 DISALLOW_COPY_AND_ASSIGN(ReadResult
);
40 JsonPrefStore::ReadResult::ReadResult()
41 : error(PersistentPrefStore::PREF_READ_ERROR_NONE
), no_dir(false) {
44 JsonPrefStore::ReadResult::~ReadResult() {
49 // Some extensions we'll tack on to copies of the Preferences files.
50 const base::FilePath::CharType kBadExtension
[] = FILE_PATH_LITERAL("bad");
52 PersistentPrefStore::PrefReadError
HandleReadErrors(
53 const base::Value
* value
,
54 const base::FilePath
& path
,
56 const std::string
& error_msg
) {
58 DVLOG(1) << "Error while loading JSON file: " << error_msg
59 << ", file: " << path
.value();
61 case JSONFileValueDeserializer::JSON_ACCESS_DENIED
:
62 return PersistentPrefStore::PREF_READ_ERROR_ACCESS_DENIED
;
63 case JSONFileValueDeserializer::JSON_CANNOT_READ_FILE
:
64 return PersistentPrefStore::PREF_READ_ERROR_FILE_OTHER
;
65 case JSONFileValueDeserializer::JSON_FILE_LOCKED
:
66 return PersistentPrefStore::PREF_READ_ERROR_FILE_LOCKED
;
67 case JSONFileValueDeserializer::JSON_NO_SUCH_FILE
:
68 return PersistentPrefStore::PREF_READ_ERROR_NO_FILE
;
70 // JSON errors indicate file corruption of some sort.
71 // Since the file is corrupt, move it to the side and continue with
72 // empty preferences. This will result in them losing their settings.
73 // We keep the old file for possible support and debugging assistance
74 // as well as to detect if they're seeing these errors repeatedly.
75 // TODO(erikkay) Instead, use the last known good file.
76 base::FilePath bad
= path
.ReplaceExtension(kBadExtension
);
78 // If they've ever had a parse error before, put them in another bucket.
79 // TODO(erikkay) if we keep this error checking for very long, we may
80 // want to differentiate between recent and long ago errors.
81 bool bad_existed
= base::PathExists(bad
);
82 base::Move(path
, bad
);
83 return bad_existed
? PersistentPrefStore::PREF_READ_ERROR_JSON_REPEAT
84 : PersistentPrefStore::PREF_READ_ERROR_JSON_PARSE
;
87 if (!value
->IsType(base::Value::TYPE_DICTIONARY
))
88 return PersistentPrefStore::PREF_READ_ERROR_JSON_TYPE
;
89 return PersistentPrefStore::PREF_READ_ERROR_NONE
;
92 // Records a sample for |size| in the Settings.JsonDataReadSizeKilobytes
93 // histogram suffixed with the base name of the JSON file under |path|.
94 void RecordJsonDataSizeHistogram(const base::FilePath
& path
, size_t size
) {
95 std::string spaceless_basename
;
96 base::ReplaceChars(path
.BaseName().MaybeAsASCII(), " ", "_",
99 // The histogram below is an expansion of the UMA_HISTOGRAM_CUSTOM_COUNTS
100 // macro adapted to allow for a dynamically suffixed histogram name.
101 // Note: The factory creates and owns the histogram.
102 base::HistogramBase
* histogram
= base::Histogram::FactoryGet(
103 "Settings.JsonDataReadSizeKilobytes." + spaceless_basename
, 1, 10000, 50,
104 base::HistogramBase::kUmaTargetedHistogramFlag
);
105 histogram
->Add(static_cast<int>(size
) / 1024);
108 scoped_ptr
<JsonPrefStore::ReadResult
> ReadPrefsFromDisk(
109 const base::FilePath
& path
,
110 const base::FilePath
& alternate_path
) {
111 if (!base::PathExists(path
) && !alternate_path
.empty() &&
112 base::PathExists(alternate_path
)) {
113 base::Move(alternate_path
, path
);
117 std::string error_msg
;
118 scoped_ptr
<JsonPrefStore::ReadResult
> read_result(
119 new JsonPrefStore::ReadResult
);
120 JSONFileValueDeserializer
deserializer(path
);
121 read_result
->value
.reset(deserializer
.Deserialize(&error_code
, &error_msg
));
123 HandleReadErrors(read_result
->value
.get(), path
, error_code
, error_msg
);
124 read_result
->no_dir
= !base::PathExists(path
.DirName());
126 if (read_result
->error
== PersistentPrefStore::PREF_READ_ERROR_NONE
)
127 RecordJsonDataSizeHistogram(path
, deserializer
.get_last_read_size());
129 return read_result
.Pass();
135 scoped_refptr
<base::SequencedTaskRunner
> JsonPrefStore::GetTaskRunnerForFile(
136 const base::FilePath
& filename
,
137 base::SequencedWorkerPool
* worker_pool
) {
138 std::string
token("json_pref_store-");
139 token
.append(filename
.AsUTF8Unsafe());
140 return worker_pool
->GetSequencedTaskRunnerWithShutdownBehavior(
141 worker_pool
->GetNamedSequenceToken(token
),
142 base::SequencedWorkerPool::BLOCK_SHUTDOWN
);
145 JsonPrefStore::JsonPrefStore(
146 const base::FilePath
& pref_filename
,
147 const scoped_refptr
<base::SequencedTaskRunner
>& sequenced_task_runner
,
148 scoped_ptr
<PrefFilter
> pref_filter
)
149 : JsonPrefStore(pref_filename
,
151 sequenced_task_runner
,
152 pref_filter
.Pass()) {
155 JsonPrefStore::JsonPrefStore(
156 const base::FilePath
& pref_filename
,
157 const base::FilePath
& pref_alternate_filename
,
158 const scoped_refptr
<base::SequencedTaskRunner
>& sequenced_task_runner
,
159 scoped_ptr
<PrefFilter
> pref_filter
)
160 : path_(pref_filename
),
161 alternate_path_(pref_alternate_filename
),
162 sequenced_task_runner_(sequenced_task_runner
),
163 prefs_(new base::DictionaryValue()),
165 writer_(pref_filename
, sequenced_task_runner
),
166 pref_filter_(pref_filter
.Pass()),
168 filtering_in_progress_(false),
169 pending_lossy_write_(false),
170 read_error_(PREF_READ_ERROR_NONE
),
171 write_count_histogram_(writer_
.commit_interval(), path_
) {
172 DCHECK(!path_
.empty());
175 bool JsonPrefStore::GetValue(const std::string
& key
,
176 const base::Value
** result
) const {
177 DCHECK(CalledOnValidThread());
179 base::Value
* tmp
= nullptr;
180 if (!prefs_
->Get(key
, &tmp
))
188 void JsonPrefStore::AddObserver(PrefStore::Observer
* observer
) {
189 DCHECK(CalledOnValidThread());
191 observers_
.AddObserver(observer
);
194 void JsonPrefStore::RemoveObserver(PrefStore::Observer
* observer
) {
195 DCHECK(CalledOnValidThread());
197 observers_
.RemoveObserver(observer
);
200 bool JsonPrefStore::HasObservers() const {
201 DCHECK(CalledOnValidThread());
203 return observers_
.might_have_observers();
206 bool JsonPrefStore::IsInitializationComplete() const {
207 DCHECK(CalledOnValidThread());
212 bool JsonPrefStore::GetMutableValue(const std::string
& key
,
213 base::Value
** result
) {
214 DCHECK(CalledOnValidThread());
216 return prefs_
->Get(key
, result
);
219 void JsonPrefStore::SetValue(const std::string
& key
,
220 scoped_ptr
<base::Value
> value
,
222 DCHECK(CalledOnValidThread());
225 base::Value
* old_value
= nullptr;
226 prefs_
->Get(key
, &old_value
);
227 if (!old_value
|| !value
->Equals(old_value
)) {
228 prefs_
->Set(key
, value
.Pass());
229 ReportValueChanged(key
, flags
);
233 void JsonPrefStore::SetValueSilently(const std::string
& key
,
234 scoped_ptr
<base::Value
> value
,
236 DCHECK(CalledOnValidThread());
239 base::Value
* old_value
= nullptr;
240 prefs_
->Get(key
, &old_value
);
241 if (!old_value
|| !value
->Equals(old_value
)) {
242 prefs_
->Set(key
, value
.Pass());
243 ScheduleWrite(flags
);
247 void JsonPrefStore::RemoveValue(const std::string
& key
, uint32 flags
) {
248 DCHECK(CalledOnValidThread());
250 if (prefs_
->RemovePath(key
, nullptr))
251 ReportValueChanged(key
, flags
);
254 void JsonPrefStore::RemoveValueSilently(const std::string
& key
, uint32 flags
) {
255 DCHECK(CalledOnValidThread());
257 prefs_
->RemovePath(key
, nullptr);
258 ScheduleWrite(flags
);
261 bool JsonPrefStore::ReadOnly() const {
262 DCHECK(CalledOnValidThread());
267 PersistentPrefStore::PrefReadError
JsonPrefStore::GetReadError() const {
268 DCHECK(CalledOnValidThread());
273 PersistentPrefStore::PrefReadError
JsonPrefStore::ReadPrefs() {
274 DCHECK(CalledOnValidThread());
276 OnFileRead(ReadPrefsFromDisk(path_
, alternate_path_
));
277 return filtering_in_progress_
? PREF_READ_ERROR_ASYNCHRONOUS_TASK_INCOMPLETE
281 void JsonPrefStore::ReadPrefsAsync(ReadErrorDelegate
* error_delegate
) {
282 DCHECK(CalledOnValidThread());
284 initialized_
= false;
285 error_delegate_
.reset(error_delegate
);
287 // Weakly binds the read task so that it doesn't kick in during shutdown.
288 base::PostTaskAndReplyWithResult(
289 sequenced_task_runner_
.get(),
291 base::Bind(&ReadPrefsFromDisk
, path_
, alternate_path_
),
292 base::Bind(&JsonPrefStore::OnFileRead
, AsWeakPtr()));
295 void JsonPrefStore::CommitPendingWrite() {
296 DCHECK(CalledOnValidThread());
298 // Schedule a write for any lossy writes that are outstanding to ensure that
299 // they get flushed when this function is called.
300 SchedulePendingLossyWrites();
302 if (writer_
.HasPendingWrite() && !read_only_
)
303 writer_
.DoScheduledWrite();
306 void JsonPrefStore::SchedulePendingLossyWrites() {
307 if (pending_lossy_write_
)
308 writer_
.ScheduleWrite(this);
311 void JsonPrefStore::ReportValueChanged(const std::string
& key
, uint32 flags
) {
312 DCHECK(CalledOnValidThread());
315 pref_filter_
->FilterUpdate(key
);
317 FOR_EACH_OBSERVER(PrefStore::Observer
, observers_
, OnPrefValueChanged(key
));
319 ScheduleWrite(flags
);
322 void JsonPrefStore::RegisterOnNextSuccessfulWriteCallback(
323 const base::Closure
& on_next_successful_write
) {
324 DCHECK(CalledOnValidThread());
326 writer_
.RegisterOnNextSuccessfulWriteCallback(on_next_successful_write
);
329 void JsonPrefStore::OnFileRead(scoped_ptr
<ReadResult
> read_result
) {
330 DCHECK(CalledOnValidThread());
334 scoped_ptr
<base::DictionaryValue
> unfiltered_prefs(new base::DictionaryValue
);
336 read_error_
= read_result
->error
;
338 bool initialization_successful
= !read_result
->no_dir
;
340 if (initialization_successful
) {
341 switch (read_error_
) {
342 case PREF_READ_ERROR_ACCESS_DENIED
:
343 case PREF_READ_ERROR_FILE_OTHER
:
344 case PREF_READ_ERROR_FILE_LOCKED
:
345 case PREF_READ_ERROR_JSON_TYPE
:
346 case PREF_READ_ERROR_FILE_NOT_SPECIFIED
:
349 case PREF_READ_ERROR_NONE
:
350 DCHECK(read_result
->value
.get());
351 unfiltered_prefs
.reset(
352 static_cast<base::DictionaryValue
*>(read_result
->value
.release()));
354 case PREF_READ_ERROR_NO_FILE
:
355 // If the file just doesn't exist, maybe this is first run. In any case
356 // there's no harm in writing out default prefs in this case.
358 case PREF_READ_ERROR_JSON_PARSE
:
359 case PREF_READ_ERROR_JSON_REPEAT
:
361 case PREF_READ_ERROR_ASYNCHRONOUS_TASK_INCOMPLETE
:
362 // This is a special error code to be returned by ReadPrefs when it
363 // can't complete synchronously, it should never be returned by the read
367 case PREF_READ_ERROR_MAX_ENUM
:
374 filtering_in_progress_
= true;
375 const PrefFilter::PostFilterOnLoadCallback
post_filter_on_load_callback(
377 &JsonPrefStore::FinalizeFileRead
, AsWeakPtr(),
378 initialization_successful
));
379 pref_filter_
->FilterOnLoad(post_filter_on_load_callback
,
380 unfiltered_prefs
.Pass());
382 FinalizeFileRead(initialization_successful
, unfiltered_prefs
.Pass(), false);
386 JsonPrefStore::~JsonPrefStore() {
387 CommitPendingWrite();
390 bool JsonPrefStore::SerializeData(std::string
* output
) {
391 DCHECK(CalledOnValidThread());
393 pending_lossy_write_
= false;
395 write_count_histogram_
.RecordWriteOccured();
398 pref_filter_
->FilterSerializeData(prefs_
.get());
400 JSONStringValueSerializer
serializer(output
);
401 // Not pretty-printing prefs shrinks pref file size by ~30%. To obtain
402 // readable prefs for debugging purposes, you can dump your prefs into any
403 // command-line or online JSON pretty printing tool.
404 serializer
.set_pretty_print(false);
405 return serializer
.Serialize(*prefs_
);
408 void JsonPrefStore::FinalizeFileRead(bool initialization_successful
,
409 scoped_ptr
<base::DictionaryValue
> prefs
,
410 bool schedule_write
) {
411 DCHECK(CalledOnValidThread());
413 filtering_in_progress_
= false;
415 if (!initialization_successful
) {
416 FOR_EACH_OBSERVER(PrefStore::Observer
,
418 OnInitializationCompleted(false));
422 prefs_
= prefs
.Pass();
427 ScheduleWrite(DEFAULT_PREF_WRITE_FLAGS
);
429 if (error_delegate_
&& read_error_
!= PREF_READ_ERROR_NONE
)
430 error_delegate_
->OnError(read_error_
);
432 FOR_EACH_OBSERVER(PrefStore::Observer
,
434 OnInitializationCompleted(true));
439 void JsonPrefStore::ScheduleWrite(uint32 flags
) {
443 if (flags
& LOSSY_PREF_WRITE_FLAG
)
444 pending_lossy_write_
= true;
446 writer_
.ScheduleWrite(this);
449 // NOTE: This value should NOT be changed without renaming the histogram
450 // otherwise it will create incompatible buckets.
452 JsonPrefStore::WriteCountHistogram::kHistogramWriteReportIntervalMins
= 5;
454 JsonPrefStore::WriteCountHistogram::WriteCountHistogram(
455 const base::TimeDelta
& commit_interval
,
456 const base::FilePath
& path
)
457 : WriteCountHistogram(commit_interval
,
459 scoped_ptr
<base::Clock
>(new base::DefaultClock
)) {
462 JsonPrefStore::WriteCountHistogram::WriteCountHistogram(
463 const base::TimeDelta
& commit_interval
,
464 const base::FilePath
& path
,
465 scoped_ptr
<base::Clock
> clock
)
466 : commit_interval_(commit_interval
),
468 clock_(clock
.release()),
470 base::TimeDelta::FromMinutes(kHistogramWriteReportIntervalMins
)),
471 last_report_time_(clock_
->Now()),
472 writes_since_last_report_(0) {
475 JsonPrefStore::WriteCountHistogram::~WriteCountHistogram() {
476 ReportOutstandingWrites();
479 void JsonPrefStore::WriteCountHistogram::RecordWriteOccured() {
480 ReportOutstandingWrites();
482 ++writes_since_last_report_
;
485 void JsonPrefStore::WriteCountHistogram::ReportOutstandingWrites() {
486 base::Time current_time
= clock_
->Now();
487 base::TimeDelta time_since_last_report
= current_time
- last_report_time_
;
489 if (time_since_last_report
<= report_interval_
)
492 // If the time since the last report exceeds the report interval, report all
493 // the writes since the last report. They must have all occurred in the same
495 base::HistogramBase
* histogram
= GetHistogram();
496 histogram
->Add(writes_since_last_report_
);
498 // There may be several report intervals that elapsed that don't have any
499 // writes in them. Report these too.
500 int64 total_num_intervals_elapsed
=
501 (time_since_last_report
/ report_interval_
);
502 for (int64 i
= 0; i
< total_num_intervals_elapsed
- 1; ++i
)
505 writes_since_last_report_
= 0;
506 last_report_time_
+= total_num_intervals_elapsed
* report_interval_
;
509 base::HistogramBase
* JsonPrefStore::WriteCountHistogram::GetHistogram() {
510 std::string spaceless_basename
;
511 base::ReplaceChars(path_
.BaseName().MaybeAsASCII(), " ", "_",
512 &spaceless_basename
);
513 std::string histogram_name
=
514 "Settings.JsonDataWriteCount." + spaceless_basename
;
516 // The min value for a histogram is 1. The max value is the maximum number of
517 // writes that can occur in the window being recorded. The number of buckets
518 // used is the max value (plus the underflow/overflow buckets).
519 int32_t min_value
= 1;
520 int32_t max_value
= report_interval_
/ commit_interval_
;
521 int32_t num_buckets
= max_value
+ 1;
523 // NOTE: These values should NOT be changed without renaming the histogram
524 // otherwise it will create incompatible buckets.
525 DCHECK_EQ(30, max_value
);
526 DCHECK_EQ(31, num_buckets
);
528 // The histogram below is an expansion of the UMA_HISTOGRAM_CUSTOM_COUNTS
529 // macro adapted to allow for a dynamically suffixed histogram name.
530 // Note: The factory creates and owns the histogram.
531 base::HistogramBase
* histogram
= base::Histogram::FactoryGet(
532 histogram_name
, min_value
, max_value
, num_buckets
,
533 base::HistogramBase::kUmaTargetedHistogramFlag
);