1 // Copyright (c) 2011 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "courgette/encoded_program.h"
12 #include "base/environment.h"
13 #include "base/logging.h"
14 #include "base/memory/scoped_ptr.h"
15 #include "base/strings/string_util.h"
16 #include "base/strings/utf_string_conversions.h"
17 #include "courgette/courgette.h"
18 #include "courgette/disassembler_elf_32_arm.h"
19 #include "courgette/streams.h"
20 #include "courgette/types_elf.h"
25 const int kStreamMisc
= 0;
26 const int kStreamOps
= 1;
27 const int kStreamBytes
= 2;
28 const int kStreamAbs32Indexes
= 3;
29 const int kStreamRel32Indexes
= 4;
30 const int kStreamAbs32Addresses
= 5;
31 const int kStreamRel32Addresses
= 6;
32 const int kStreamCopyCounts
= 7;
33 const int kStreamOriginAddresses
= kStreamMisc
;
35 const int kStreamLimit
= 9;
37 // Constructor is here rather than in the header. Although the constructor
38 // appears to do nothing it is fact quite large because of the implicit calls to
39 // field constructors. Ditto for the destructor.
40 EncodedProgram::EncodedProgram() : image_base_(0) {}
41 EncodedProgram::~EncodedProgram() {}
43 // Serializes a vector of integral values using Varint32 coding.
45 CheckBool
WriteVector(const V
& items
, SinkStream
* buffer
) {
46 size_t count
= items
.size();
47 bool ok
= buffer
->WriteSizeVarint32(count
);
48 for (size_t i
= 0; ok
&& i
< count
; ++i
) {
49 COMPILE_ASSERT(sizeof(items
[0]) <= sizeof(uint32
), // NOLINT
50 T_must_fit_in_uint32
);
51 ok
= buffer
->WriteSizeVarint32(items
[i
]);
57 bool ReadVector(V
* items
, SourceStream
* buffer
) {
59 if (!buffer
->ReadVarint32(&count
))
64 bool ok
= items
->reserve(count
);
65 for (size_t i
= 0; ok
&& i
< count
; ++i
) {
67 ok
= buffer
->ReadVarint32(&item
);
69 ok
= items
->push_back(static_cast<typename
V::value_type
>(item
));
75 // Serializes a vector, using delta coding followed by Varint32 coding.
77 CheckBool
WriteU32Delta(const V
& set
, SinkStream
* buffer
) {
78 size_t count
= set
.size();
79 bool ok
= buffer
->WriteSizeVarint32(count
);
81 for (size_t i
= 0; ok
&& i
< count
; ++i
) {
82 uint32 current
= set
[i
];
83 uint32 delta
= current
- prev
;
84 ok
= buffer
->WriteVarint32(delta
);
91 static CheckBool
ReadU32Delta(V
* set
, SourceStream
* buffer
) {
94 if (!buffer
->ReadVarint32(&count
))
98 bool ok
= set
->reserve(count
);
101 for (size_t i
= 0; ok
&& i
< count
; ++i
) {
103 ok
= buffer
->ReadVarint32(&delta
);
105 uint32 current
= prev
+ delta
;
106 ok
= set
->push_back(current
);
114 // Write a vector as the byte representation of the contents.
116 // (This only really makes sense for a type T that has sizeof(T)==1, otherwise
117 // serialized representation is not endian-agnostic. But it is useful to keep
118 // the possibility of a greater size for experiments comparing Varint32 encoding
119 // of a vector of larger integrals vs a plain form.)
122 CheckBool
WriteVectorU8(const V
& items
, SinkStream
* buffer
) {
123 size_t count
= items
.size();
124 bool ok
= buffer
->WriteSizeVarint32(count
);
125 if (count
!= 0 && ok
) {
126 size_t byte_count
= count
* sizeof(typename
V::value_type
);
127 ok
= buffer
->Write(static_cast<const void*>(&items
[0]), byte_count
);
133 bool ReadVectorU8(V
* items
, SourceStream
* buffer
) {
135 if (!buffer
->ReadVarint32(&count
))
139 bool ok
= items
->resize(count
, 0);
140 if (ok
&& count
!= 0) {
141 size_t byte_count
= count
* sizeof(typename
V::value_type
);
142 return buffer
->Read(static_cast<void*>(&((*items
)[0])), byte_count
);
147 ////////////////////////////////////////////////////////////////////////////////
149 CheckBool
EncodedProgram::DefineRel32Label(int index
, RVA value
) {
150 return DefineLabelCommon(&rel32_rva_
, index
, value
);
153 CheckBool
EncodedProgram::DefineAbs32Label(int index
, RVA value
) {
154 return DefineLabelCommon(&abs32_rva_
, index
, value
);
157 static const RVA kUnassignedRVA
= static_cast<RVA
>(-1);
159 CheckBool
EncodedProgram::DefineLabelCommon(RvaVector
* rvas
,
163 if (static_cast<int>(rvas
->size()) <= index
)
164 ok
= rvas
->resize(index
+ 1, kUnassignedRVA
);
167 DCHECK_EQ((*rvas
)[index
], kUnassignedRVA
)
168 << "DefineLabel double assigned " << index
;
169 (*rvas
)[index
] = rva
;
175 void EncodedProgram::EndLabels() {
176 FinishLabelsCommon(&abs32_rva_
);
177 FinishLabelsCommon(&rel32_rva_
);
180 void EncodedProgram::FinishLabelsCommon(RvaVector
* rvas
) {
181 // Replace all unassigned slots with the value at the previous index so they
182 // delta-encode to zero. (There might be better values than zero. The way to
183 // get that is have the higher level assembly program assign the unassigned
186 size_t size
= rvas
->size();
187 for (size_t i
= 0; i
< size
; ++i
) {
188 if ((*rvas
)[i
] == kUnassignedRVA
)
189 (*rvas
)[i
] = previous
;
191 previous
= (*rvas
)[i
];
195 CheckBool
EncodedProgram::AddOrigin(RVA origin
) {
196 return ops_
.push_back(ORIGIN
) && origins_
.push_back(origin
);
199 CheckBool
EncodedProgram::AddCopy(uint32 count
, const void* bytes
) {
200 const uint8
* source
= static_cast<const uint8
*>(bytes
);
204 // Fold adjacent COPY instructions into one. This nearly halves the size of
205 // an EncodedProgram with only COPY1 instructions since there are approx plain
206 // 16 bytes per reloc. This has a working-set benefit during decompression.
207 // For compression of files with large differences this makes a small (4%)
208 // improvement in size. For files with small differences this degrades the
209 // compressed size by 1.3%
211 if (ops_
.back() == COPY1
) {
213 ok
= copy_counts_
.push_back(1);
215 if (ok
&& ops_
.back() == COPY
) {
216 copy_counts_
.back() += count
;
217 for (uint32 i
= 0; ok
&& i
< count
; ++i
) {
218 ok
= copy_bytes_
.push_back(source
[i
]);
226 ok
= ops_
.push_back(COPY1
) && copy_bytes_
.push_back(source
[0]);
228 ok
= ops_
.push_back(COPY
) && copy_counts_
.push_back(count
);
229 for (uint32 i
= 0; ok
&& i
< count
; ++i
) {
230 ok
= copy_bytes_
.push_back(source
[i
]);
238 CheckBool
EncodedProgram::AddAbs32(int label_index
) {
239 return ops_
.push_back(ABS32
) && abs32_ix_
.push_back(label_index
);
242 CheckBool
EncodedProgram::AddRel32(int label_index
) {
243 return ops_
.push_back(REL32
) && rel32_ix_
.push_back(label_index
);
246 CheckBool
EncodedProgram::AddRel32ARM(uint16 op
, int label_index
) {
247 return ops_
.push_back(static_cast<OP
>(op
)) &&
248 rel32_ix_
.push_back(label_index
);
251 CheckBool
EncodedProgram::AddPeMakeRelocs(ExecutableType kind
) {
252 if (kind
== EXE_WIN_32_X86
)
253 return ops_
.push_back(MAKE_PE_RELOCATION_TABLE
);
254 return ops_
.push_back(MAKE_PE64_RELOCATION_TABLE
);
257 CheckBool
EncodedProgram::AddElfMakeRelocs() {
258 return ops_
.push_back(MAKE_ELF_RELOCATION_TABLE
);
261 CheckBool
EncodedProgram::AddElfARMMakeRelocs() {
262 return ops_
.push_back(MAKE_ELF_ARM_RELOCATION_TABLE
);
265 void EncodedProgram::DebuggingSummary() {
266 VLOG(1) << "EncodedProgram Summary"
267 << "\n image base " << image_base_
268 << "\n abs32 rvas " << abs32_rva_
.size()
269 << "\n rel32 rvas " << rel32_rva_
.size()
270 << "\n ops " << ops_
.size()
271 << "\n origins " << origins_
.size()
272 << "\n copy_counts " << copy_counts_
.size()
273 << "\n copy_bytes " << copy_bytes_
.size()
274 << "\n abs32_ix " << abs32_ix_
.size()
275 << "\n rel32_ix " << rel32_ix_
.size();
278 ////////////////////////////////////////////////////////////////////////////////
280 // For algorithm refinement purposes it is useful to write subsets of the file
281 // format. This gives us the ability to estimate the entropy of the
282 // differential compression of the individual streams, which can provide
283 // invaluable insights. The default, of course, is to include all the streams.
286 INCLUDE_ABS32_ADDRESSES
= 0x0001,
287 INCLUDE_REL32_ADDRESSES
= 0x0002,
288 INCLUDE_ABS32_INDEXES
= 0x0010,
289 INCLUDE_REL32_INDEXES
= 0x0020,
290 INCLUDE_OPS
= 0x0100,
291 INCLUDE_BYTES
= 0x0200,
292 INCLUDE_COPY_COUNTS
= 0x0400,
293 INCLUDE_MISC
= 0x1000
296 static FieldSelect
GetFieldSelect() {
298 // TODO(sra): Use better configuration.
299 scoped_ptr
<base::Environment
> env(base::Environment::Create());
301 env
->GetVar("A_FIELDS", &s
);
303 return static_cast<FieldSelect
>(
304 wcstoul(base::ASCIIToWide(s
).c_str(), 0, 0));
307 return static_cast<FieldSelect
>(~0);
310 CheckBool
EncodedProgram::WriteTo(SinkStreamSet
* streams
) {
311 FieldSelect select
= GetFieldSelect();
313 // The order of fields must be consistent in WriteTo and ReadFrom, regardless
314 // of the streams used. The code can be configured with all kStreamXXX
315 // constants the same.
317 // If we change the code to pipeline reading with assembly (to avoid temporary
318 // storage vectors by consuming operands directly from the stream) then we
319 // need to read the base address and the random access address tables first,
320 // the rest can be interleaved.
322 if (select
& INCLUDE_MISC
) {
323 // TODO(sra): write 64 bits.
324 if (!streams
->stream(kStreamMisc
)->WriteVarint32(
325 static_cast<uint32
>(image_base_
))) {
332 if (select
& INCLUDE_ABS32_ADDRESSES
) {
333 success
&= WriteU32Delta(abs32_rva_
,
334 streams
->stream(kStreamAbs32Addresses
));
337 if (select
& INCLUDE_REL32_ADDRESSES
) {
338 success
&= WriteU32Delta(rel32_rva_
,
339 streams
->stream(kStreamRel32Addresses
));
342 if (select
& INCLUDE_MISC
)
343 success
&= WriteVector(origins_
, streams
->stream(kStreamOriginAddresses
));
345 if (select
& INCLUDE_OPS
) {
347 success
&= streams
->stream(kStreamOps
)->Reserve(ops_
.size() + 5);
348 success
&= WriteVector(ops_
, streams
->stream(kStreamOps
));
351 if (select
& INCLUDE_COPY_COUNTS
)
352 success
&= WriteVector(copy_counts_
, streams
->stream(kStreamCopyCounts
));
354 if (select
& INCLUDE_BYTES
)
355 success
&= WriteVectorU8(copy_bytes_
, streams
->stream(kStreamBytes
));
357 if (select
& INCLUDE_ABS32_INDEXES
)
358 success
&= WriteVector(abs32_ix_
, streams
->stream(kStreamAbs32Indexes
));
360 if (select
& INCLUDE_REL32_INDEXES
)
361 success
&= WriteVector(rel32_ix_
, streams
->stream(kStreamRel32Indexes
));
366 bool EncodedProgram::ReadFrom(SourceStreamSet
* streams
) {
367 // TODO(sra): read 64 bits.
369 if (!streams
->stream(kStreamMisc
)->ReadVarint32(&temp
))
373 if (!ReadU32Delta(&abs32_rva_
, streams
->stream(kStreamAbs32Addresses
)))
375 if (!ReadU32Delta(&rel32_rva_
, streams
->stream(kStreamRel32Addresses
)))
377 if (!ReadVector(&origins_
, streams
->stream(kStreamOriginAddresses
)))
379 if (!ReadVector(&ops_
, streams
->stream(kStreamOps
)))
381 if (!ReadVector(©_counts_
, streams
->stream(kStreamCopyCounts
)))
383 if (!ReadVectorU8(©_bytes_
, streams
->stream(kStreamBytes
)))
385 if (!ReadVector(&abs32_ix_
, streams
->stream(kStreamAbs32Indexes
)))
387 if (!ReadVector(&rel32_ix_
, streams
->stream(kStreamRel32Indexes
)))
390 // Check that streams have been completely consumed.
391 for (int i
= 0; i
< kStreamLimit
; ++i
) {
392 if (streams
->stream(i
)->Remaining() > 0)
399 // Safe, non-throwing version of std::vector::at(). Returns 'true' for success,
400 // 'false' for out-of-bounds index error.
401 template<typename V
, typename T
>
402 bool VectorAt(const V
& v
, size_t index
, T
* output
) {
403 if (index
>= v
.size())
409 CheckBool
EncodedProgram::EvaluateRel32ARM(OP op
,
412 SinkStream
* output
) {
413 switch (op
& 0x0000F000) {
416 if (!VectorAt(rel32_ix_
, ix_rel32_ix
, &index
))
420 if (!VectorAt(rel32_rva_
, index
, &rva
))
422 uint32 decompressed_op
;
423 if (!DisassemblerElf32ARM::Decompress(ARM_OFF8
,
424 static_cast<uint16
>(op
),
425 static_cast<uint32
>(rva
-
430 uint16 op16
= decompressed_op
;
431 if (!output
->Write(&op16
, 2))
438 if (!VectorAt(rel32_ix_
, ix_rel32_ix
, &index
))
442 if (!VectorAt(rel32_rva_
, index
, &rva
))
444 uint32 decompressed_op
;
445 if (!DisassemblerElf32ARM::Decompress(ARM_OFF11
, (uint16
) op
,
446 (uint32
) (rva
- current_rva
),
450 uint16 op16
= decompressed_op
;
451 if (!output
->Write(&op16
, 2))
458 if (!VectorAt(rel32_ix_
, ix_rel32_ix
, &index
))
462 if (!VectorAt(rel32_rva_
, index
, &rva
))
464 uint32 decompressed_op
;
465 if (!DisassemblerElf32ARM::Decompress(ARM_OFF24
, (uint16
) op
,
466 (uint32
) (rva
- current_rva
),
470 if (!output
->Write(&decompressed_op
, 4))
477 if (!VectorAt(rel32_ix_
, ix_rel32_ix
, &index
))
481 if (!VectorAt(rel32_rva_
, index
, &rva
))
483 uint32 decompressed_op
;
484 if (!DisassemblerElf32ARM::Decompress(ARM_OFF25
, (uint16
) op
,
485 (uint32
) (rva
- current_rva
),
489 uint32 words
= (decompressed_op
<< 16) | (decompressed_op
>> 16);
490 if (!output
->Write(&words
, 4))
497 if (!VectorAt(rel32_ix_
, ix_rel32_ix
, &index
))
501 if (!VectorAt(rel32_rva_
, index
, &rva
))
503 uint32 decompressed_op
;
504 if (!DisassemblerElf32ARM::Decompress(ARM_OFF21
, (uint16
) op
,
505 (uint32
) (rva
- current_rva
),
509 uint32 words
= (decompressed_op
<< 16) | (decompressed_op
>> 16);
510 if (!output
->Write(&words
, 4))
522 CheckBool
EncodedProgram::AssembleTo(SinkStream
* final_buffer
) {
523 // For the most part, the assembly process walks the various tables.
524 // ix_mumble is the index into the mumble table.
525 size_t ix_origins
= 0;
526 size_t ix_copy_counts
= 0;
527 size_t ix_copy_bytes
= 0;
528 size_t ix_abs32_ix
= 0;
529 size_t ix_rel32_ix
= 0;
533 bool pending_pe_relocation_table
= false;
534 uint8 pending_pe_relocation_table_type
= 0x03; // IMAGE_REL_BASED_HIGHLOW
535 Elf32_Word pending_elf_relocation_table_type
= 0;
536 SinkStream bytes_following_relocation_table
;
538 SinkStream
* output
= final_buffer
;
540 for (size_t ix_ops
= 0; ix_ops
< ops_
.size(); ++ix_ops
) {
541 OP op
= ops_
[ix_ops
];
545 if (!EvaluateRel32ARM(op
, ix_rel32_ix
, current_rva
, output
))
551 if (!VectorAt(origins_
, ix_origins
, §ion_rva
))
554 current_rva
= section_rva
;
560 if (!VectorAt(copy_counts_
, ix_copy_counts
, &count
))
563 for (uint32 i
= 0; i
< count
; ++i
) {
565 if (!VectorAt(copy_bytes_
, ix_copy_bytes
, &b
))
568 if (!output
->Write(&b
, 1))
571 current_rva
+= count
;
577 if (!VectorAt(copy_bytes_
, ix_copy_bytes
, &b
))
580 if (!output
->Write(&b
, 1))
588 if (!VectorAt(rel32_ix_
, ix_rel32_ix
, &index
))
592 if (!VectorAt(rel32_rva_
, index
, &rva
))
594 uint32 offset
= (rva
- (current_rva
+ 4));
595 if (!output
->Write(&offset
, 4))
603 if (!VectorAt(abs32_ix_
, ix_abs32_ix
, &index
))
607 if (!VectorAt(abs32_rva_
, index
, &rva
))
609 uint32 abs32
= static_cast<uint32
>(rva
+ image_base_
);
610 if (!abs32_relocs_
.push_back(current_rva
) || !output
->Write(&abs32
, 4))
616 case MAKE_PE_RELOCATION_TABLE
: {
617 // We can see the base relocation anywhere, but we only have the
618 // information to generate it at the very end. So we divert the bytes
619 // we are generating to a temporary stream.
620 if (pending_pe_relocation_table
)
621 return false; // Can't have two base relocation tables.
623 pending_pe_relocation_table
= true;
624 output
= &bytes_following_relocation_table
;
626 // There is a potential problem *if* the instruction stream contains
627 // some REL32 relocations following the base relocation and in the same
628 // section. We don't know the size of the table, so 'current_rva' will
629 // be wrong, causing REL32 offsets to be miscalculated. This never
630 // happens; the base relocation table is usually in a section of its
631 // own, a data-only section, and following everything else in the
632 // executable except some padding zero bytes. We could fix this by
633 // emitting an ORIGIN after the MAKE_BASE_RELOCATION_TABLE.
636 case MAKE_PE64_RELOCATION_TABLE
: {
637 if (pending_pe_relocation_table
)
638 return false; // Can't have two base relocation tables.
640 pending_pe_relocation_table
= true;
641 pending_pe_relocation_table_type
= 0x0A; // IMAGE_REL_BASED_DIR64
642 output
= &bytes_following_relocation_table
;
646 case MAKE_ELF_ARM_RELOCATION_TABLE
: {
647 // We can see the base relocation anywhere, but we only have the
648 // information to generate it at the very end. So we divert the bytes
649 // we are generating to a temporary stream.
650 if (pending_elf_relocation_table_type
)
651 return false; // Can't have two base relocation tables.
653 pending_elf_relocation_table_type
= R_ARM_RELATIVE
;
654 output
= &bytes_following_relocation_table
;
658 case MAKE_ELF_RELOCATION_TABLE
: {
659 // We can see the base relocation anywhere, but we only have the
660 // information to generate it at the very end. So we divert the bytes
661 // we are generating to a temporary stream.
662 if (pending_elf_relocation_table_type
)
663 return false; // Can't have two base relocation tables.
665 pending_elf_relocation_table_type
= R_386_RELATIVE
;
666 output
= &bytes_following_relocation_table
;
672 if (pending_pe_relocation_table
) {
673 if (!GeneratePeRelocations(final_buffer
,
674 pending_pe_relocation_table_type
) ||
675 !final_buffer
->Append(&bytes_following_relocation_table
))
679 if (pending_elf_relocation_table_type
) {
680 if (!GenerateElfRelocations(pending_elf_relocation_table_type
,
682 !final_buffer
->Append(&bytes_following_relocation_table
))
686 // Final verification check: did we consume all lists?
687 if (ix_copy_counts
!= copy_counts_
.size())
689 if (ix_copy_bytes
!= copy_bytes_
.size())
691 if (ix_abs32_ix
!= abs32_ix_
.size())
693 if (ix_rel32_ix
!= rel32_ix_
.size())
699 // RelocBlock has the layout of a block of relocations in the base relocation
700 // table file format.
702 struct RelocBlockPOD
{
705 uint16 relocs
[4096]; // Allow up to one relocation per byte of a 4k page.
708 COMPILE_ASSERT(offsetof(RelocBlockPOD
, relocs
) == 8, reloc_block_header_size
);
713 pod
.page_rva
= 0xFFFFFFFF;
717 void Add(uint16 item
) {
718 pod
.relocs
[(pod
.block_size
-8)/2] = item
;
722 CheckBool
Flush(SinkStream
* buffer
) WARN_UNUSED_RESULT
{
724 if (pod
.block_size
!= 8) {
725 if (pod
.block_size
% 4 != 0) { // Pad to make size multiple of 4 bytes.
728 ok
= buffer
->Write(&pod
, pod
.block_size
);
736 CheckBool
EncodedProgram::GeneratePeRelocations(SinkStream
* buffer
,
738 std::sort(abs32_relocs_
.begin(), abs32_relocs_
.end());
743 for (size_t i
= 0; ok
&& i
< abs32_relocs_
.size(); ++i
) {
744 uint32 rva
= abs32_relocs_
[i
];
745 uint32 page_rva
= rva
& ~0xFFF;
746 if (page_rva
!= block
.pod
.page_rva
) {
747 ok
&= block
.Flush(buffer
);
748 block
.pod
.page_rva
= page_rva
;
751 block
.Add(((static_cast<uint16
>(type
)) << 12 ) | (rva
& 0xFFF));
753 ok
&= block
.Flush(buffer
);
757 CheckBool
EncodedProgram::GenerateElfRelocations(Elf32_Word r_info
,
758 SinkStream
* buffer
) {
759 std::sort(abs32_relocs_
.begin(), abs32_relocs_
.end());
761 Elf32_Rel relocation_block
;
763 relocation_block
.r_info
= r_info
;
766 for (size_t i
= 0; ok
&& i
< abs32_relocs_
.size(); ++i
) {
767 relocation_block
.r_offset
= abs32_relocs_
[i
];
768 ok
= buffer
->Write(&relocation_block
, sizeof(Elf32_Rel
));
773 ////////////////////////////////////////////////////////////////////////////////
775 Status
WriteEncodedProgram(EncodedProgram
* encoded
, SinkStreamSet
* sink
) {
776 if (!encoded
->WriteTo(sink
))
777 return C_STREAM_ERROR
;
781 Status
ReadEncodedProgram(SourceStreamSet
* streams
, EncodedProgram
** output
) {
782 EncodedProgram
* encoded
= new EncodedProgram();
783 if (encoded
->ReadFrom(streams
)) {
788 return C_DESERIALIZATION_FAILED
;
791 Status
Assemble(EncodedProgram
* encoded
, SinkStream
* buffer
) {
792 bool assembled
= encoded
->AssembleTo(buffer
);
795 return C_ASSEMBLY_FAILED
;
798 void DeleteEncodedProgram(EncodedProgram
* encoded
) {