[Android] Make adb_install_apk use the device blacklist.
[chromium-blink-merge.git] / base / prefs / json_pref_store.cc
blobc2ff42593fad0eef46fa7fdba82c6546905d5055
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "base/prefs/json_pref_store.h"
7 #include <algorithm>
9 #include "base/bind.h"
10 #include "base/callback.h"
11 #include "base/files/file_path.h"
12 #include "base/files/file_util.h"
13 #include "base/json/json_file_value_serializer.h"
14 #include "base/json/json_string_value_serializer.h"
15 #include "base/memory/ref_counted.h"
16 #include "base/metrics/histogram.h"
17 #include "base/prefs/pref_filter.h"
18 #include "base/sequenced_task_runner.h"
19 #include "base/strings/string_number_conversions.h"
20 #include "base/strings/string_util.h"
21 #include "base/task_runner_util.h"
22 #include "base/threading/sequenced_worker_pool.h"
23 #include "base/time/default_clock.h"
24 #include "base/values.h"
26 // Result returned from internal read tasks.
27 struct JsonPrefStore::ReadResult {
28 public:
29 ReadResult();
30 ~ReadResult();
32 scoped_ptr<base::Value> value;
33 PrefReadError error;
34 bool no_dir;
36 private:
37 DISALLOW_COPY_AND_ASSIGN(ReadResult);
40 JsonPrefStore::ReadResult::ReadResult()
41 : error(PersistentPrefStore::PREF_READ_ERROR_NONE), no_dir(false) {
44 JsonPrefStore::ReadResult::~ReadResult() {
47 namespace {
49 // Some extensions we'll tack on to copies of the Preferences files.
50 const base::FilePath::CharType kBadExtension[] = FILE_PATH_LITERAL("bad");
52 PersistentPrefStore::PrefReadError HandleReadErrors(
53 const base::Value* value,
54 const base::FilePath& path,
55 int error_code,
56 const std::string& error_msg) {
57 if (!value) {
58 DVLOG(1) << "Error while loading JSON file: " << error_msg
59 << ", file: " << path.value();
60 switch (error_code) {
61 case JSONFileValueDeserializer::JSON_ACCESS_DENIED:
62 return PersistentPrefStore::PREF_READ_ERROR_ACCESS_DENIED;
63 break;
64 case JSONFileValueDeserializer::JSON_CANNOT_READ_FILE:
65 return PersistentPrefStore::PREF_READ_ERROR_FILE_OTHER;
66 break;
67 case JSONFileValueDeserializer::JSON_FILE_LOCKED:
68 return PersistentPrefStore::PREF_READ_ERROR_FILE_LOCKED;
69 break;
70 case JSONFileValueDeserializer::JSON_NO_SUCH_FILE:
71 return PersistentPrefStore::PREF_READ_ERROR_NO_FILE;
72 break;
73 default:
74 // JSON errors indicate file corruption of some sort.
75 // Since the file is corrupt, move it to the side and continue with
76 // empty preferences. This will result in them losing their settings.
77 // We keep the old file for possible support and debugging assistance
78 // as well as to detect if they're seeing these errors repeatedly.
79 // TODO(erikkay) Instead, use the last known good file.
80 base::FilePath bad = path.ReplaceExtension(kBadExtension);
82 // If they've ever had a parse error before, put them in another bucket.
83 // TODO(erikkay) if we keep this error checking for very long, we may
84 // want to differentiate between recent and long ago errors.
85 bool bad_existed = base::PathExists(bad);
86 base::Move(path, bad);
87 return bad_existed ? PersistentPrefStore::PREF_READ_ERROR_JSON_REPEAT
88 : PersistentPrefStore::PREF_READ_ERROR_JSON_PARSE;
90 } else if (!value->IsType(base::Value::TYPE_DICTIONARY)) {
91 return PersistentPrefStore::PREF_READ_ERROR_JSON_TYPE;
93 return PersistentPrefStore::PREF_READ_ERROR_NONE;
96 // Records a sample for |size| in the Settings.JsonDataReadSizeKilobytes
97 // histogram suffixed with the base name of the JSON file under |path|.
98 void RecordJsonDataSizeHistogram(const base::FilePath& path, size_t size) {
99 std::string spaceless_basename;
100 base::ReplaceChars(path.BaseName().MaybeAsASCII(), " ", "_",
101 &spaceless_basename);
103 // The histogram below is an expansion of the UMA_HISTOGRAM_CUSTOM_COUNTS
104 // macro adapted to allow for a dynamically suffixed histogram name.
105 // Note: The factory creates and owns the histogram.
106 base::HistogramBase* histogram = base::Histogram::FactoryGet(
107 "Settings.JsonDataReadSizeKilobytes." + spaceless_basename, 1, 10000, 50,
108 base::HistogramBase::kUmaTargetedHistogramFlag);
109 histogram->Add(static_cast<int>(size) / 1024);
112 scoped_ptr<JsonPrefStore::ReadResult> ReadPrefsFromDisk(
113 const base::FilePath& path,
114 const base::FilePath& alternate_path) {
115 if (!base::PathExists(path) && !alternate_path.empty() &&
116 base::PathExists(alternate_path)) {
117 base::Move(alternate_path, path);
120 int error_code;
121 std::string error_msg;
122 scoped_ptr<JsonPrefStore::ReadResult> read_result(
123 new JsonPrefStore::ReadResult);
124 JSONFileValueDeserializer deserializer(path);
125 read_result->value.reset(deserializer.Deserialize(&error_code, &error_msg));
126 read_result->error =
127 HandleReadErrors(read_result->value.get(), path, error_code, error_msg);
128 read_result->no_dir = !base::PathExists(path.DirName());
130 if (read_result->error == PersistentPrefStore::PREF_READ_ERROR_NONE)
131 RecordJsonDataSizeHistogram(path, deserializer.get_last_read_size());
133 return read_result.Pass();
136 } // namespace
138 // static
139 scoped_refptr<base::SequencedTaskRunner> JsonPrefStore::GetTaskRunnerForFile(
140 const base::FilePath& filename,
141 base::SequencedWorkerPool* worker_pool) {
142 std::string token("json_pref_store-");
143 token.append(filename.AsUTF8Unsafe());
144 return worker_pool->GetSequencedTaskRunnerWithShutdownBehavior(
145 worker_pool->GetNamedSequenceToken(token),
146 base::SequencedWorkerPool::BLOCK_SHUTDOWN);
149 JsonPrefStore::JsonPrefStore(
150 const base::FilePath& filename,
151 const scoped_refptr<base::SequencedTaskRunner>& sequenced_task_runner,
152 scoped_ptr<PrefFilter> pref_filter)
153 : JsonPrefStore(filename,
154 base::FilePath(),
155 sequenced_task_runner,
156 pref_filter.Pass()) {
159 JsonPrefStore::JsonPrefStore(
160 const base::FilePath& filename,
161 const base::FilePath& alternate_filename,
162 const scoped_refptr<base::SequencedTaskRunner>& sequenced_task_runner,
163 scoped_ptr<PrefFilter> pref_filter)
164 : path_(filename),
165 alternate_path_(alternate_filename),
166 sequenced_task_runner_(sequenced_task_runner),
167 prefs_(new base::DictionaryValue()),
168 read_only_(false),
169 writer_(filename, sequenced_task_runner),
170 pref_filter_(pref_filter.Pass()),
171 initialized_(false),
172 filtering_in_progress_(false),
173 pending_lossy_write_(false),
174 read_error_(PREF_READ_ERROR_NONE),
175 write_count_histogram_(writer_.commit_interval(), path_) {
176 DCHECK(!path_.empty());
179 bool JsonPrefStore::GetValue(const std::string& key,
180 const base::Value** result) const {
181 DCHECK(CalledOnValidThread());
183 base::Value* tmp = NULL;
184 if (!prefs_->Get(key, &tmp))
185 return false;
187 if (result)
188 *result = tmp;
189 return true;
192 void JsonPrefStore::AddObserver(PrefStore::Observer* observer) {
193 DCHECK(CalledOnValidThread());
195 observers_.AddObserver(observer);
198 void JsonPrefStore::RemoveObserver(PrefStore::Observer* observer) {
199 DCHECK(CalledOnValidThread());
201 observers_.RemoveObserver(observer);
204 bool JsonPrefStore::HasObservers() const {
205 DCHECK(CalledOnValidThread());
207 return observers_.might_have_observers();
210 bool JsonPrefStore::IsInitializationComplete() const {
211 DCHECK(CalledOnValidThread());
213 return initialized_;
216 bool JsonPrefStore::GetMutableValue(const std::string& key,
217 base::Value** result) {
218 DCHECK(CalledOnValidThread());
220 return prefs_->Get(key, result);
223 void JsonPrefStore::SetValue(const std::string& key,
224 base::Value* value,
225 uint32 flags) {
226 DCHECK(CalledOnValidThread());
228 DCHECK(value);
229 scoped_ptr<base::Value> new_value(value);
230 base::Value* old_value = NULL;
231 prefs_->Get(key, &old_value);
232 if (!old_value || !value->Equals(old_value)) {
233 prefs_->Set(key, new_value.Pass());
234 ReportValueChanged(key, flags);
238 void JsonPrefStore::SetValueSilently(const std::string& key,
239 base::Value* value,
240 uint32 flags) {
241 DCHECK(CalledOnValidThread());
243 DCHECK(value);
244 scoped_ptr<base::Value> new_value(value);
245 base::Value* old_value = NULL;
246 prefs_->Get(key, &old_value);
247 if (!old_value || !value->Equals(old_value)) {
248 prefs_->Set(key, new_value.Pass());
249 ScheduleWrite(flags);
253 void JsonPrefStore::RemoveValue(const std::string& key, uint32 flags) {
254 DCHECK(CalledOnValidThread());
256 if (prefs_->RemovePath(key, NULL))
257 ReportValueChanged(key, flags);
260 void JsonPrefStore::RemoveValueSilently(const std::string& key, uint32 flags) {
261 DCHECK(CalledOnValidThread());
263 prefs_->RemovePath(key, NULL);
264 ScheduleWrite(flags);
267 bool JsonPrefStore::ReadOnly() const {
268 DCHECK(CalledOnValidThread());
270 return read_only_;
273 PersistentPrefStore::PrefReadError JsonPrefStore::GetReadError() const {
274 DCHECK(CalledOnValidThread());
276 return read_error_;
279 PersistentPrefStore::PrefReadError JsonPrefStore::ReadPrefs() {
280 DCHECK(CalledOnValidThread());
282 OnFileRead(ReadPrefsFromDisk(path_, alternate_path_));
283 return filtering_in_progress_ ? PREF_READ_ERROR_ASYNCHRONOUS_TASK_INCOMPLETE
284 : read_error_;
287 void JsonPrefStore::ReadPrefsAsync(ReadErrorDelegate* error_delegate) {
288 DCHECK(CalledOnValidThread());
290 initialized_ = false;
291 error_delegate_.reset(error_delegate);
293 // Weakly binds the read task so that it doesn't kick in during shutdown.
294 base::PostTaskAndReplyWithResult(
295 sequenced_task_runner_.get(),
296 FROM_HERE,
297 base::Bind(&ReadPrefsFromDisk, path_, alternate_path_),
298 base::Bind(&JsonPrefStore::OnFileRead, AsWeakPtr()));
301 void JsonPrefStore::CommitPendingWrite() {
302 DCHECK(CalledOnValidThread());
304 // Schedule a write for any lossy writes that are outstanding to ensure that
305 // they get flushed when this function is called.
306 SchedulePendingLossyWrites();
308 if (writer_.HasPendingWrite() && !read_only_)
309 writer_.DoScheduledWrite();
312 void JsonPrefStore::SchedulePendingLossyWrites() {
313 if (pending_lossy_write_)
314 writer_.ScheduleWrite(this);
317 void JsonPrefStore::ReportValueChanged(const std::string& key, uint32 flags) {
318 DCHECK(CalledOnValidThread());
320 if (pref_filter_)
321 pref_filter_->FilterUpdate(key);
323 FOR_EACH_OBSERVER(PrefStore::Observer, observers_, OnPrefValueChanged(key));
325 ScheduleWrite(flags);
328 void JsonPrefStore::RegisterOnNextSuccessfulWriteCallback(
329 const base::Closure& on_next_successful_write) {
330 DCHECK(CalledOnValidThread());
332 writer_.RegisterOnNextSuccessfulWriteCallback(on_next_successful_write);
335 void JsonPrefStore::OnFileRead(scoped_ptr<ReadResult> read_result) {
336 DCHECK(CalledOnValidThread());
338 DCHECK(read_result);
340 scoped_ptr<base::DictionaryValue> unfiltered_prefs(new base::DictionaryValue);
342 read_error_ = read_result->error;
344 bool initialization_successful = !read_result->no_dir;
346 if (initialization_successful) {
347 switch (read_error_) {
348 case PREF_READ_ERROR_ACCESS_DENIED:
349 case PREF_READ_ERROR_FILE_OTHER:
350 case PREF_READ_ERROR_FILE_LOCKED:
351 case PREF_READ_ERROR_JSON_TYPE:
352 case PREF_READ_ERROR_FILE_NOT_SPECIFIED:
353 read_only_ = true;
354 break;
355 case PREF_READ_ERROR_NONE:
356 DCHECK(read_result->value.get());
357 unfiltered_prefs.reset(
358 static_cast<base::DictionaryValue*>(read_result->value.release()));
359 break;
360 case PREF_READ_ERROR_NO_FILE:
361 // If the file just doesn't exist, maybe this is first run. In any case
362 // there's no harm in writing out default prefs in this case.
363 break;
364 case PREF_READ_ERROR_JSON_PARSE:
365 case PREF_READ_ERROR_JSON_REPEAT:
366 break;
367 case PREF_READ_ERROR_ASYNCHRONOUS_TASK_INCOMPLETE:
368 // This is a special error code to be returned by ReadPrefs when it
369 // can't complete synchronously, it should never be returned by the read
370 // operation itself.
371 NOTREACHED();
372 break;
373 case PREF_READ_ERROR_LEVELDB_IO:
374 case PREF_READ_ERROR_LEVELDB_CORRUPTION_READ_ONLY:
375 case PREF_READ_ERROR_LEVELDB_CORRUPTION:
376 // These are specific to LevelDBPrefStore.
377 NOTREACHED();
378 case PREF_READ_ERROR_MAX_ENUM:
379 NOTREACHED();
380 break;
384 if (pref_filter_) {
385 filtering_in_progress_ = true;
386 const PrefFilter::PostFilterOnLoadCallback post_filter_on_load_callback(
387 base::Bind(
388 &JsonPrefStore::FinalizeFileRead, AsWeakPtr(),
389 initialization_successful));
390 pref_filter_->FilterOnLoad(post_filter_on_load_callback,
391 unfiltered_prefs.Pass());
392 } else {
393 FinalizeFileRead(initialization_successful, unfiltered_prefs.Pass(), false);
397 JsonPrefStore::~JsonPrefStore() {
398 CommitPendingWrite();
401 bool JsonPrefStore::SerializeData(std::string* output) {
402 DCHECK(CalledOnValidThread());
404 pending_lossy_write_ = false;
406 write_count_histogram_.RecordWriteOccured();
408 if (pref_filter_)
409 pref_filter_->FilterSerializeData(prefs_.get());
411 JSONStringValueSerializer serializer(output);
412 // Not pretty-printing prefs shrinks pref file size by ~30%. To obtain
413 // readable prefs for debugging purposes, you can dump your prefs into any
414 // command-line or online JSON pretty printing tool.
415 serializer.set_pretty_print(false);
416 return serializer.Serialize(*prefs_);
419 void JsonPrefStore::FinalizeFileRead(bool initialization_successful,
420 scoped_ptr<base::DictionaryValue> prefs,
421 bool schedule_write) {
422 DCHECK(CalledOnValidThread());
424 filtering_in_progress_ = false;
426 if (!initialization_successful) {
427 FOR_EACH_OBSERVER(PrefStore::Observer,
428 observers_,
429 OnInitializationCompleted(false));
430 return;
433 prefs_ = prefs.Pass();
435 initialized_ = true;
437 if (schedule_write)
438 ScheduleWrite(DEFAULT_PREF_WRITE_FLAGS);
440 if (error_delegate_ && read_error_ != PREF_READ_ERROR_NONE)
441 error_delegate_->OnError(read_error_);
443 FOR_EACH_OBSERVER(PrefStore::Observer,
444 observers_,
445 OnInitializationCompleted(true));
447 return;
450 void JsonPrefStore::ScheduleWrite(uint32 flags) {
451 if (read_only_)
452 return;
454 if (flags & LOSSY_PREF_WRITE_FLAG)
455 pending_lossy_write_ = true;
456 else
457 writer_.ScheduleWrite(this);
460 // NOTE: This value should NOT be changed without renaming the histogram
461 // otherwise it will create incompatible buckets.
462 const int32_t
463 JsonPrefStore::WriteCountHistogram::kHistogramWriteReportIntervalMins = 5;
465 JsonPrefStore::WriteCountHistogram::WriteCountHistogram(
466 const base::TimeDelta& commit_interval,
467 const base::FilePath& path)
468 : WriteCountHistogram(commit_interval,
469 path,
470 scoped_ptr<base::Clock>(new base::DefaultClock)) {
473 JsonPrefStore::WriteCountHistogram::WriteCountHistogram(
474 const base::TimeDelta& commit_interval,
475 const base::FilePath& path,
476 scoped_ptr<base::Clock> clock)
477 : commit_interval_(commit_interval),
478 path_(path),
479 clock_(clock.release()),
480 report_interval_(
481 base::TimeDelta::FromMinutes(kHistogramWriteReportIntervalMins)),
482 last_report_time_(clock_->Now()),
483 writes_since_last_report_(0) {
486 JsonPrefStore::WriteCountHistogram::~WriteCountHistogram() {
487 ReportOutstandingWrites();
490 void JsonPrefStore::WriteCountHistogram::RecordWriteOccured() {
491 ReportOutstandingWrites();
493 ++writes_since_last_report_;
496 void JsonPrefStore::WriteCountHistogram::ReportOutstandingWrites() {
497 base::Time current_time = clock_->Now();
498 base::TimeDelta time_since_last_report = current_time - last_report_time_;
500 if (time_since_last_report <= report_interval_)
501 return;
503 // If the time since the last report exceeds the report interval, report all
504 // the writes since the last report. They must have all occurred in the same
505 // report interval.
506 base::HistogramBase* histogram = GetHistogram();
507 histogram->Add(writes_since_last_report_);
509 // There may be several report intervals that elapsed that don't have any
510 // writes in them. Report these too.
511 int64 total_num_intervals_elapsed =
512 (time_since_last_report / report_interval_);
513 for (int64 i = 0; i < total_num_intervals_elapsed - 1; ++i)
514 histogram->Add(0);
516 writes_since_last_report_ = 0;
517 last_report_time_ += total_num_intervals_elapsed * report_interval_;
520 base::HistogramBase* JsonPrefStore::WriteCountHistogram::GetHistogram() {
521 std::string spaceless_basename;
522 base::ReplaceChars(path_.BaseName().MaybeAsASCII(), " ", "_",
523 &spaceless_basename);
524 std::string histogram_name =
525 "Settings.JsonDataWriteCount." + spaceless_basename;
527 // The min value for a histogram is 1. The max value is the maximum number of
528 // writes that can occur in the window being recorded. The number of buckets
529 // used is the max value (plus the underflow/overflow buckets).
530 int32_t min_value = 1;
531 int32_t max_value = report_interval_ / commit_interval_;
532 int32_t num_buckets = max_value + 1;
534 // NOTE: These values should NOT be changed without renaming the histogram
535 // otherwise it will create incompatible buckets.
536 DCHECK_EQ(30, max_value);
537 DCHECK_EQ(31, num_buckets);
539 // The histogram below is an expansion of the UMA_HISTOGRAM_CUSTOM_COUNTS
540 // macro adapted to allow for a dynamically suffixed histogram name.
541 // Note: The factory creates and owns the histogram.
542 base::HistogramBase* histogram = base::Histogram::FactoryGet(
543 histogram_name, min_value, max_value, num_buckets,
544 base::HistogramBase::kUmaTargetedHistogramFlag);
545 return histogram;