1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "ui/base/resource/data_pack.h"
9 #include "base/files/file_util.h"
10 #include "base/files/memory_mapped_file.h"
11 #include "base/logging.h"
12 #include "base/memory/ref_counted_memory.h"
13 #include "base/metrics/histogram.h"
14 #include "base/strings/string_piece.h"
16 // For details of the file layout, see
17 // http://dev.chromium.org/developers/design-documents/linuxresourcesandlocalizedstrings
21 static const uint32 kFileFormatVersion
= 4;
22 // Length of file header: version, entry count and text encoding type.
23 static const size_t kHeaderLength
= 2 * sizeof(uint32
) + sizeof(uint8
);
26 struct DataPackEntry
{
30 static int CompareById(const void* void_key
, const void* void_entry
) {
31 uint16 key
= *reinterpret_cast<const uint16
*>(void_key
);
32 const DataPackEntry
* entry
=
33 reinterpret_cast<const DataPackEntry
*>(void_entry
);
34 if (key
< entry
->resource_id
) {
36 } else if (key
> entry
->resource_id
) {
45 static_assert(sizeof(DataPackEntry
) == 6, "size of entry must be six");
47 // We're crashing when trying to load a pak file on Windows. Add some error
49 // http://crbug.com/58056
57 INIT_FAILED_FROM_FILE
,
66 DataPack::DataPack(ui::ScaleFactor scale_factor
)
68 text_encoding_type_(BINARY
),
69 scale_factor_(scale_factor
),
70 has_only_material_design_assets_(false) {
73 DataPack::~DataPack() {
76 bool DataPack::LoadFromPath(const base::FilePath
& path
) {
77 mmap_
.reset(new base::MemoryMappedFile
);
78 if (!mmap_
->Initialize(path
)) {
79 DLOG(ERROR
) << "Failed to mmap datapack";
80 UMA_HISTOGRAM_ENUMERATION("DataPack.Load", INIT_FAILED
,
88 bool DataPack::LoadFromFile(base::File file
) {
89 return LoadFromFileRegion(file
.Pass(),
90 base::MemoryMappedFile::Region::kWholeFile
);
93 bool DataPack::LoadFromFileRegion(
95 const base::MemoryMappedFile::Region
& region
) {
96 mmap_
.reset(new base::MemoryMappedFile
);
97 if (!mmap_
->Initialize(file
.Pass(), region
)) {
98 DLOG(ERROR
) << "Failed to mmap datapack";
99 UMA_HISTOGRAM_ENUMERATION("DataPack.Load", INIT_FAILED_FROM_FILE
,
107 bool DataPack::LoadImpl() {
108 // Sanity check the header of the file.
109 if (kHeaderLength
> mmap_
->length()) {
110 DLOG(ERROR
) << "Data pack file corruption: incomplete file header.";
111 UMA_HISTOGRAM_ENUMERATION("DataPack.Load", HEADER_TRUNCATED
,
117 // Parse the header of the file.
118 // First uint32: version; second: resource count;
119 const uint32
* ptr
= reinterpret_cast<const uint32
*>(mmap_
->data());
120 uint32 version
= ptr
[0];
121 if (version
!= kFileFormatVersion
) {
122 LOG(ERROR
) << "Bad data pack version: got " << version
<< ", expected "
123 << kFileFormatVersion
;
124 UMA_HISTOGRAM_ENUMERATION("DataPack.Load", BAD_VERSION
,
129 resource_count_
= ptr
[1];
131 // third: text encoding.
132 const uint8
* ptr_encoding
= reinterpret_cast<const uint8
*>(ptr
+ 2);
133 text_encoding_type_
= static_cast<TextEncodingType
>(*ptr_encoding
);
134 if (text_encoding_type_
!= UTF8
&& text_encoding_type_
!= UTF16
&&
135 text_encoding_type_
!= BINARY
) {
136 LOG(ERROR
) << "Bad data pack text encoding: got " << text_encoding_type_
137 << ", expected between " << BINARY
<< " and " << UTF16
;
138 UMA_HISTOGRAM_ENUMERATION("DataPack.Load", WRONG_ENCODING
,
144 // Sanity check the file.
145 // 1) Check we have enough entries. There's an extra entry after the last item
146 // which gives the length of the last item.
147 if (kHeaderLength
+ (resource_count_
+ 1) * sizeof(DataPackEntry
) >
149 LOG(ERROR
) << "Data pack file corruption: too short for number of "
150 "entries specified.";
151 UMA_HISTOGRAM_ENUMERATION("DataPack.Load", INDEX_TRUNCATED
,
156 // 2) Verify the entries are within the appropriate bounds. There's an extra
157 // entry after the last item which gives us the length of the last item.
158 for (size_t i
= 0; i
< resource_count_
+ 1; ++i
) {
159 const DataPackEntry
* entry
= reinterpret_cast<const DataPackEntry
*>(
160 mmap_
->data() + kHeaderLength
+ (i
* sizeof(DataPackEntry
)));
161 if (entry
->file_offset
> mmap_
->length()) {
162 LOG(ERROR
) << "Entry #" << i
<< " in data pack points off end of file. "
163 << "Was the file corrupted?";
164 UMA_HISTOGRAM_ENUMERATION("DataPack.Load", ENTRY_NOT_FOUND
,
174 bool DataPack::HasResource(uint16 resource_id
) const {
175 return !!bsearch(&resource_id
, mmap_
->data() + kHeaderLength
, resource_count_
,
176 sizeof(DataPackEntry
), DataPackEntry::CompareById
);
179 bool DataPack::GetStringPiece(uint16 resource_id
,
180 base::StringPiece
* data
) const {
181 // It won't be hard to make this endian-agnostic, but it's not worth
182 // bothering to do right now.
183 #if defined(__BYTE_ORDER)
185 static_assert(__BYTE_ORDER
== __LITTLE_ENDIAN
,
186 "datapack assumes little endian");
187 #elif defined(__BIG_ENDIAN__)
189 #error DataPack assumes little endian
192 const DataPackEntry
* target
= reinterpret_cast<const DataPackEntry
*>(
193 bsearch(&resource_id
, mmap_
->data() + kHeaderLength
, resource_count_
,
194 sizeof(DataPackEntry
), DataPackEntry::CompareById
));
199 const DataPackEntry
* next_entry
= target
+ 1;
200 // If the next entry points beyond the end of the file this data pack's entry
201 // table is corrupt. Log an error and return false. See
202 // http://crbug.com/371301.
203 if (next_entry
->file_offset
> mmap_
->length()) {
204 size_t entry_index
= target
-
205 reinterpret_cast<const DataPackEntry
*>(mmap_
->data() + kHeaderLength
);
206 LOG(ERROR
) << "Entry #" << entry_index
<< " in data pack points off end "
207 << "of file. This should have been caught when loading. Was the "
212 size_t length
= next_entry
->file_offset
- target
->file_offset
;
213 data
->set(reinterpret_cast<const char*>(mmap_
->data() + target
->file_offset
),
218 base::RefCountedStaticMemory
* DataPack::GetStaticMemory(
219 uint16 resource_id
) const {
220 base::StringPiece piece
;
221 if (!GetStringPiece(resource_id
, &piece
))
224 return new base::RefCountedStaticMemory(piece
.data(), piece
.length());
227 ResourceHandle::TextEncodingType
DataPack::GetTextEncodingType() const {
228 return text_encoding_type_
;
231 ui::ScaleFactor
DataPack::GetScaleFactor() const {
232 return scale_factor_
;
235 bool DataPack::HasOnlyMaterialDesignAssets() const {
236 return has_only_material_design_assets_
;
240 void DataPack::CheckForDuplicateResources(
241 const ScopedVector
<ResourceHandle
>& packs
) {
242 for (size_t i
= 0; i
< resource_count_
+ 1; ++i
) {
243 const DataPackEntry
* entry
= reinterpret_cast<const DataPackEntry
*>(
244 mmap_
->data() + kHeaderLength
+ (i
* sizeof(DataPackEntry
)));
245 const uint16 resource_id
= entry
->resource_id
;
246 const float resource_scale
= GetScaleForScaleFactor(scale_factor_
);
247 for (const ResourceHandle
* handle
: packs
) {
248 if (HasOnlyMaterialDesignAssets() !=
249 handle
->HasOnlyMaterialDesignAssets()) {
252 if (GetScaleForScaleFactor(handle
->GetScaleFactor()) != resource_scale
)
254 DCHECK(!handle
->HasResource(resource_id
)) << "Duplicate resource "
255 << resource_id
<< " with scale "
260 #endif // DCHECK_IS_ON()
263 bool DataPack::WritePack(const base::FilePath
& path
,
264 const std::map
<uint16
, base::StringPiece
>& resources
,
265 TextEncodingType textEncodingType
) {
266 FILE* file
= base::OpenFile(path
, "wb");
270 if (fwrite(&kFileFormatVersion
, sizeof(kFileFormatVersion
), 1, file
) != 1) {
271 LOG(ERROR
) << "Failed to write file version";
272 base::CloseFile(file
);
276 // Note: the python version of this function explicitly sorted keys, but
277 // std::map is a sorted associative container, we shouldn't have to do that.
278 uint32 entry_count
= resources
.size();
279 if (fwrite(&entry_count
, sizeof(entry_count
), 1, file
) != 1) {
280 LOG(ERROR
) << "Failed to write entry count";
281 base::CloseFile(file
);
285 if (textEncodingType
!= UTF8
&& textEncodingType
!= UTF16
&&
286 textEncodingType
!= BINARY
) {
287 LOG(ERROR
) << "Invalid text encoding type, got " << textEncodingType
288 << ", expected between " << BINARY
<< " and " << UTF16
;
289 base::CloseFile(file
);
293 uint8 write_buffer
= static_cast<uint8
>(textEncodingType
);
294 if (fwrite(&write_buffer
, sizeof(uint8
), 1, file
) != 1) {
295 LOG(ERROR
) << "Failed to write file text resources encoding";
296 base::CloseFile(file
);
300 // Each entry is a uint16 + a uint32. We have an extra entry after the last
301 // item so we can compute the size of the list item.
302 uint32 index_length
= (entry_count
+ 1) * sizeof(DataPackEntry
);
303 uint32 data_offset
= kHeaderLength
+ index_length
;
304 for (std::map
<uint16
, base::StringPiece
>::const_iterator it
=
306 it
!= resources
.end(); ++it
) {
307 uint16 resource_id
= it
->first
;
308 if (fwrite(&resource_id
, sizeof(resource_id
), 1, file
) != 1) {
309 LOG(ERROR
) << "Failed to write id for " << resource_id
;
310 base::CloseFile(file
);
314 if (fwrite(&data_offset
, sizeof(data_offset
), 1, file
) != 1) {
315 LOG(ERROR
) << "Failed to write offset for " << resource_id
;
316 base::CloseFile(file
);
320 data_offset
+= it
->second
.length();
323 // We place an extra entry after the last item that allows us to read the
324 // size of the last item.
325 uint16 resource_id
= 0;
326 if (fwrite(&resource_id
, sizeof(resource_id
), 1, file
) != 1) {
327 LOG(ERROR
) << "Failed to write extra resource id.";
328 base::CloseFile(file
);
332 if (fwrite(&data_offset
, sizeof(data_offset
), 1, file
) != 1) {
333 LOG(ERROR
) << "Failed to write extra offset.";
334 base::CloseFile(file
);
338 for (std::map
<uint16
, base::StringPiece
>::const_iterator it
=
340 it
!= resources
.end(); ++it
) {
341 if (fwrite(it
->second
.data(), it
->second
.length(), 1, file
) != 1) {
342 LOG(ERROR
) << "Failed to write data for " << it
->first
;
343 base::CloseFile(file
);
348 base::CloseFile(file
);