1 // Copyright (c) 2011 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "courgette/encoded_program.h"
12 #include "base/environment.h"
13 #include "base/logging.h"
14 #include "base/memory/scoped_ptr.h"
15 #include "base/strings/string_util.h"
16 #include "base/strings/utf_string_conversions.h"
17 #include "courgette/courgette.h"
18 #include "courgette/disassembler_elf_32_arm.h"
19 #include "courgette/streams.h"
20 #include "courgette/types_elf.h"
25 const int kStreamMisc
= 0;
26 const int kStreamOps
= 1;
27 const int kStreamBytes
= 2;
28 const int kStreamAbs32Indexes
= 3;
29 const int kStreamRel32Indexes
= 4;
30 const int kStreamAbs32Addresses
= 5;
31 const int kStreamRel32Addresses
= 6;
32 const int kStreamCopyCounts
= 7;
33 const int kStreamOriginAddresses
= kStreamMisc
;
35 const int kStreamLimit
= 9;
37 // Constructor is here rather than in the header. Although the constructor
38 // appears to do nothing it is fact quite large because of the implicit calls to
39 // field constructors. Ditto for the destructor.
40 EncodedProgram::EncodedProgram() : image_base_(0) {}
41 EncodedProgram::~EncodedProgram() {}
43 // Serializes a vector of integral values using Varint32 coding.
45 CheckBool
WriteVector(const V
& items
, SinkStream
* buffer
) {
46 size_t count
= items
.size();
47 bool ok
= buffer
->WriteSizeVarint32(count
);
48 for (size_t i
= 0; ok
&& i
< count
; ++i
) {
49 COMPILE_ASSERT(sizeof(items
[0]) <= sizeof(uint32
), // NOLINT
50 T_must_fit_in_uint32
);
51 ok
= buffer
->WriteSizeVarint32(items
[i
]);
57 bool ReadVector(V
* items
, SourceStream
* buffer
) {
59 if (!buffer
->ReadVarint32(&count
))
64 bool ok
= items
->reserve(count
);
65 for (size_t i
= 0; ok
&& i
< count
; ++i
) {
67 ok
= buffer
->ReadVarint32(&item
);
69 ok
= items
->push_back(static_cast<typename
V::value_type
>(item
));
75 // Serializes a vector, using delta coding followed by Varint32 coding.
77 CheckBool
WriteU32Delta(const V
& set
, SinkStream
* buffer
) {
78 size_t count
= set
.size();
79 bool ok
= buffer
->WriteSizeVarint32(count
);
81 for (size_t i
= 0; ok
&& i
< count
; ++i
) {
82 uint32 current
= set
[i
];
83 uint32 delta
= current
- prev
;
84 ok
= buffer
->WriteVarint32(delta
);
91 static CheckBool
ReadU32Delta(V
* set
, SourceStream
* buffer
) {
94 if (!buffer
->ReadVarint32(&count
))
98 bool ok
= set
->reserve(count
);
101 for (size_t i
= 0; ok
&& i
< count
; ++i
) {
103 ok
= buffer
->ReadVarint32(&delta
);
105 uint32 current
= prev
+ delta
;
106 ok
= set
->push_back(current
);
114 // Write a vector as the byte representation of the contents.
116 // (This only really makes sense for a type T that has sizeof(T)==1, otherwise
117 // serialized representation is not endian-agnostic. But it is useful to keep
118 // the possibility of a greater size for experiments comparing Varint32 encoding
119 // of a vector of larger integrals vs a plain form.)
122 CheckBool
WriteVectorU8(const V
& items
, SinkStream
* buffer
) {
123 size_t count
= items
.size();
124 bool ok
= buffer
->WriteSizeVarint32(count
);
125 if (count
!= 0 && ok
) {
126 size_t byte_count
= count
* sizeof(typename
V::value_type
);
127 ok
= buffer
->Write(static_cast<const void*>(&items
[0]), byte_count
);
133 bool ReadVectorU8(V
* items
, SourceStream
* buffer
) {
135 if (!buffer
->ReadVarint32(&count
))
139 bool ok
= items
->resize(count
, 0);
140 if (ok
&& count
!= 0) {
141 size_t byte_count
= count
* sizeof(typename
V::value_type
);
142 return buffer
->Read(static_cast<void*>(&((*items
)[0])), byte_count
);
147 ////////////////////////////////////////////////////////////////////////////////
149 CheckBool
EncodedProgram::DefineRel32Label(int index
, RVA value
) {
150 return DefineLabelCommon(&rel32_rva_
, index
, value
);
153 CheckBool
EncodedProgram::DefineAbs32Label(int index
, RVA value
) {
154 return DefineLabelCommon(&abs32_rva_
, index
, value
);
157 static const RVA kUnassignedRVA
= static_cast<RVA
>(-1);
159 CheckBool
EncodedProgram::DefineLabelCommon(RvaVector
* rvas
,
163 if (static_cast<int>(rvas
->size()) <= index
)
164 ok
= rvas
->resize(index
+ 1, kUnassignedRVA
);
167 DCHECK_EQ((*rvas
)[index
], kUnassignedRVA
)
168 << "DefineLabel double assigned " << index
;
169 (*rvas
)[index
] = rva
;
175 void EncodedProgram::EndLabels() {
176 FinishLabelsCommon(&abs32_rva_
);
177 FinishLabelsCommon(&rel32_rva_
);
180 void EncodedProgram::FinishLabelsCommon(RvaVector
* rvas
) {
181 // Replace all unassigned slots with the value at the previous index so they
182 // delta-encode to zero. (There might be better values than zero. The way to
183 // get that is have the higher level assembly program assign the unassigned
186 size_t size
= rvas
->size();
187 for (size_t i
= 0; i
< size
; ++i
) {
188 if ((*rvas
)[i
] == kUnassignedRVA
)
189 (*rvas
)[i
] = previous
;
191 previous
= (*rvas
)[i
];
195 CheckBool
EncodedProgram::AddOrigin(RVA origin
) {
196 return ops_
.push_back(ORIGIN
) && origins_
.push_back(origin
);
199 CheckBool
EncodedProgram::AddCopy(uint32 count
, const void* bytes
) {
200 const uint8
* source
= static_cast<const uint8
*>(bytes
);
204 // Fold adjacent COPY instructions into one. This nearly halves the size of
205 // an EncodedProgram with only COPY1 instructions since there are approx plain
206 // 16 bytes per reloc. This has a working-set benefit during decompression.
207 // For compression of files with large differences this makes a small (4%)
208 // improvement in size. For files with small differences this degrades the
209 // compressed size by 1.3%
211 if (ops_
.back() == COPY1
) {
213 ok
= copy_counts_
.push_back(1);
215 if (ok
&& ops_
.back() == COPY
) {
216 copy_counts_
.back() += count
;
217 for (uint32 i
= 0; ok
&& i
< count
; ++i
) {
218 ok
= copy_bytes_
.push_back(source
[i
]);
226 ok
= ops_
.push_back(COPY1
) && copy_bytes_
.push_back(source
[0]);
228 ok
= ops_
.push_back(COPY
) && copy_counts_
.push_back(count
);
229 for (uint32 i
= 0; ok
&& i
< count
; ++i
) {
230 ok
= copy_bytes_
.push_back(source
[i
]);
238 CheckBool
EncodedProgram::AddAbs32(int label_index
) {
239 return ops_
.push_back(ABS32
) && abs32_ix_
.push_back(label_index
);
242 CheckBool
EncodedProgram::AddRel32(int label_index
) {
243 return ops_
.push_back(REL32
) && rel32_ix_
.push_back(label_index
);
246 CheckBool
EncodedProgram::AddRel32ARM(uint16 op
, int label_index
) {
247 return ops_
.push_back(static_cast<OP
>(op
)) &&
248 rel32_ix_
.push_back(label_index
);
251 CheckBool
EncodedProgram::AddPeMakeRelocs(ExecutableType kind
) {
252 if (kind
== EXE_WIN_32_X86
)
253 return ops_
.push_back(MAKE_PE_RELOCATION_TABLE
);
254 return ops_
.push_back(MAKE_PE64_RELOCATION_TABLE
);
257 CheckBool
EncodedProgram::AddElfMakeRelocs() {
258 return ops_
.push_back(MAKE_ELF_RELOCATION_TABLE
);
261 CheckBool
EncodedProgram::AddElfARMMakeRelocs() {
262 return ops_
.push_back(MAKE_ELF_ARM_RELOCATION_TABLE
);
265 void EncodedProgram::DebuggingSummary() {
266 VLOG(1) << "EncodedProgram Summary"
267 << "\n image base " << image_base_
268 << "\n abs32 rvas " << abs32_rva_
.size()
269 << "\n rel32 rvas " << rel32_rva_
.size()
270 << "\n ops " << ops_
.size()
271 << "\n origins " << origins_
.size()
272 << "\n copy_counts " << copy_counts_
.size()
273 << "\n copy_bytes " << copy_bytes_
.size()
274 << "\n abs32_ix " << abs32_ix_
.size()
275 << "\n rel32_ix " << rel32_ix_
.size();
278 ////////////////////////////////////////////////////////////////////////////////
280 // For algorithm refinement purposes it is useful to write subsets of the file
281 // format. This gives us the ability to estimate the entropy of the
282 // differential compression of the individual streams, which can provide
283 // invaluable insights. The default, of course, is to include all the streams.
286 INCLUDE_ABS32_ADDRESSES
= 0x0001,
287 INCLUDE_REL32_ADDRESSES
= 0x0002,
288 INCLUDE_ABS32_INDEXES
= 0x0010,
289 INCLUDE_REL32_INDEXES
= 0x0020,
290 INCLUDE_OPS
= 0x0100,
291 INCLUDE_BYTES
= 0x0200,
292 INCLUDE_COPY_COUNTS
= 0x0400,
293 INCLUDE_MISC
= 0x1000
296 static FieldSelect
GetFieldSelect() {
298 // TODO(sra): Use better configuration.
299 scoped_ptr
<base::Environment
> env(base::Environment::Create());
301 env
->GetVar("A_FIELDS", &s
);
303 return static_cast<FieldSelect
>(wcstoul(ASCIIToWide(s
).c_str(), 0, 0));
306 return static_cast<FieldSelect
>(~0);
309 CheckBool
EncodedProgram::WriteTo(SinkStreamSet
* streams
) {
310 FieldSelect select
= GetFieldSelect();
312 // The order of fields must be consistent in WriteTo and ReadFrom, regardless
313 // of the streams used. The code can be configured with all kStreamXXX
314 // constants the same.
316 // If we change the code to pipeline reading with assembly (to avoid temporary
317 // storage vectors by consuming operands directly from the stream) then we
318 // need to read the base address and the random access address tables first,
319 // the rest can be interleaved.
321 if (select
& INCLUDE_MISC
) {
322 // TODO(sra): write 64 bits.
323 if (!streams
->stream(kStreamMisc
)->WriteVarint32(
324 static_cast<uint32
>(image_base_
))) {
331 if (select
& INCLUDE_ABS32_ADDRESSES
) {
332 success
&= WriteU32Delta(abs32_rva_
,
333 streams
->stream(kStreamAbs32Addresses
));
336 if (select
& INCLUDE_REL32_ADDRESSES
) {
337 success
&= WriteU32Delta(rel32_rva_
,
338 streams
->stream(kStreamRel32Addresses
));
341 if (select
& INCLUDE_MISC
)
342 success
&= WriteVector(origins_
, streams
->stream(kStreamOriginAddresses
));
344 if (select
& INCLUDE_OPS
) {
346 success
&= streams
->stream(kStreamOps
)->Reserve(ops_
.size() + 5);
347 success
&= WriteVector(ops_
, streams
->stream(kStreamOps
));
350 if (select
& INCLUDE_COPY_COUNTS
)
351 success
&= WriteVector(copy_counts_
, streams
->stream(kStreamCopyCounts
));
353 if (select
& INCLUDE_BYTES
)
354 success
&= WriteVectorU8(copy_bytes_
, streams
->stream(kStreamBytes
));
356 if (select
& INCLUDE_ABS32_INDEXES
)
357 success
&= WriteVector(abs32_ix_
, streams
->stream(kStreamAbs32Indexes
));
359 if (select
& INCLUDE_REL32_INDEXES
)
360 success
&= WriteVector(rel32_ix_
, streams
->stream(kStreamRel32Indexes
));
365 bool EncodedProgram::ReadFrom(SourceStreamSet
* streams
) {
366 // TODO(sra): read 64 bits.
368 if (!streams
->stream(kStreamMisc
)->ReadVarint32(&temp
))
372 if (!ReadU32Delta(&abs32_rva_
, streams
->stream(kStreamAbs32Addresses
)))
374 if (!ReadU32Delta(&rel32_rva_
, streams
->stream(kStreamRel32Addresses
)))
376 if (!ReadVector(&origins_
, streams
->stream(kStreamOriginAddresses
)))
378 if (!ReadVector(&ops_
, streams
->stream(kStreamOps
)))
380 if (!ReadVector(©_counts_
, streams
->stream(kStreamCopyCounts
)))
382 if (!ReadVectorU8(©_bytes_
, streams
->stream(kStreamBytes
)))
384 if (!ReadVector(&abs32_ix_
, streams
->stream(kStreamAbs32Indexes
)))
386 if (!ReadVector(&rel32_ix_
, streams
->stream(kStreamRel32Indexes
)))
389 // Check that streams have been completely consumed.
390 for (int i
= 0; i
< kStreamLimit
; ++i
) {
391 if (streams
->stream(i
)->Remaining() > 0)
398 // Safe, non-throwing version of std::vector::at(). Returns 'true' for success,
399 // 'false' for out-of-bounds index error.
400 template<typename V
, typename T
>
401 bool VectorAt(const V
& v
, size_t index
, T
* output
) {
402 if (index
>= v
.size())
408 CheckBool
EncodedProgram::EvaluateRel32ARM(OP op
,
411 SinkStream
* output
) {
412 switch (op
& 0x0000F000) {
415 if (!VectorAt(rel32_ix_
, ix_rel32_ix
, &index
))
419 if (!VectorAt(rel32_rva_
, index
, &rva
))
421 uint32 decompressed_op
;
422 if (!DisassemblerElf32ARM::Decompress(ARM_OFF8
,
423 static_cast<uint16
>(op
),
424 static_cast<uint32
>(rva
-
429 uint16 op16
= decompressed_op
;
430 if (!output
->Write(&op16
, 2))
437 if (!VectorAt(rel32_ix_
, ix_rel32_ix
, &index
))
441 if (!VectorAt(rel32_rva_
, index
, &rva
))
443 uint32 decompressed_op
;
444 if (!DisassemblerElf32ARM::Decompress(ARM_OFF11
, (uint16
) op
,
445 (uint32
) (rva
- current_rva
),
449 uint16 op16
= decompressed_op
;
450 if (!output
->Write(&op16
, 2))
457 if (!VectorAt(rel32_ix_
, ix_rel32_ix
, &index
))
461 if (!VectorAt(rel32_rva_
, index
, &rva
))
463 uint32 decompressed_op
;
464 if (!DisassemblerElf32ARM::Decompress(ARM_OFF24
, (uint16
) op
,
465 (uint32
) (rva
- current_rva
),
469 if (!output
->Write(&decompressed_op
, 4))
476 if (!VectorAt(rel32_ix_
, ix_rel32_ix
, &index
))
480 if (!VectorAt(rel32_rva_
, index
, &rva
))
482 uint32 decompressed_op
;
483 if (!DisassemblerElf32ARM::Decompress(ARM_OFF25
, (uint16
) op
,
484 (uint32
) (rva
- current_rva
),
488 uint32 words
= (decompressed_op
<< 16) | (decompressed_op
>> 16);
489 if (!output
->Write(&words
, 4))
496 if (!VectorAt(rel32_ix_
, ix_rel32_ix
, &index
))
500 if (!VectorAt(rel32_rva_
, index
, &rva
))
502 uint32 decompressed_op
;
503 if (!DisassemblerElf32ARM::Decompress(ARM_OFF21
, (uint16
) op
,
504 (uint32
) (rva
- current_rva
),
508 uint32 words
= (decompressed_op
<< 16) | (decompressed_op
>> 16);
509 if (!output
->Write(&words
, 4))
521 CheckBool
EncodedProgram::AssembleTo(SinkStream
* final_buffer
) {
522 // For the most part, the assembly process walks the various tables.
523 // ix_mumble is the index into the mumble table.
524 size_t ix_origins
= 0;
525 size_t ix_copy_counts
= 0;
526 size_t ix_copy_bytes
= 0;
527 size_t ix_abs32_ix
= 0;
528 size_t ix_rel32_ix
= 0;
532 bool pending_pe_relocation_table
= false;
533 uint8 pending_pe_relocation_table_type
= 0x03; // IMAGE_REL_BASED_HIGHLOW
534 Elf32_Word pending_elf_relocation_table_type
= 0;
535 SinkStream bytes_following_relocation_table
;
537 SinkStream
* output
= final_buffer
;
539 for (size_t ix_ops
= 0; ix_ops
< ops_
.size(); ++ix_ops
) {
540 OP op
= ops_
[ix_ops
];
544 if (!EvaluateRel32ARM(op
, ix_rel32_ix
, current_rva
, output
))
550 if (!VectorAt(origins_
, ix_origins
, §ion_rva
))
553 current_rva
= section_rva
;
559 if (!VectorAt(copy_counts_
, ix_copy_counts
, &count
))
562 for (uint32 i
= 0; i
< count
; ++i
) {
564 if (!VectorAt(copy_bytes_
, ix_copy_bytes
, &b
))
567 if (!output
->Write(&b
, 1))
570 current_rva
+= count
;
576 if (!VectorAt(copy_bytes_
, ix_copy_bytes
, &b
))
579 if (!output
->Write(&b
, 1))
587 if (!VectorAt(rel32_ix_
, ix_rel32_ix
, &index
))
591 if (!VectorAt(rel32_rva_
, index
, &rva
))
593 uint32 offset
= (rva
- (current_rva
+ 4));
594 if (!output
->Write(&offset
, 4))
602 if (!VectorAt(abs32_ix_
, ix_abs32_ix
, &index
))
606 if (!VectorAt(abs32_rva_
, index
, &rva
))
608 uint32 abs32
= static_cast<uint32
>(rva
+ image_base_
);
609 if (!abs32_relocs_
.push_back(current_rva
) || !output
->Write(&abs32
, 4))
615 case MAKE_PE_RELOCATION_TABLE
: {
616 // We can see the base relocation anywhere, but we only have the
617 // information to generate it at the very end. So we divert the bytes
618 // we are generating to a temporary stream.
619 if (pending_pe_relocation_table
)
620 return false; // Can't have two base relocation tables.
622 pending_pe_relocation_table
= true;
623 output
= &bytes_following_relocation_table
;
625 // There is a potential problem *if* the instruction stream contains
626 // some REL32 relocations following the base relocation and in the same
627 // section. We don't know the size of the table, so 'current_rva' will
628 // be wrong, causing REL32 offsets to be miscalculated. This never
629 // happens; the base relocation table is usually in a section of its
630 // own, a data-only section, and following everything else in the
631 // executable except some padding zero bytes. We could fix this by
632 // emitting an ORIGIN after the MAKE_BASE_RELOCATION_TABLE.
635 case MAKE_PE64_RELOCATION_TABLE
: {
636 if (pending_pe_relocation_table
)
637 return false; // Can't have two base relocation tables.
639 pending_pe_relocation_table
= true;
640 pending_pe_relocation_table_type
= 0x0A; // IMAGE_REL_BASED_DIR64
641 output
= &bytes_following_relocation_table
;
645 case MAKE_ELF_ARM_RELOCATION_TABLE
: {
646 // We can see the base relocation anywhere, but we only have the
647 // information to generate it at the very end. So we divert the bytes
648 // we are generating to a temporary stream.
649 if (pending_elf_relocation_table_type
)
650 return false; // Can't have two base relocation tables.
652 pending_elf_relocation_table_type
= R_ARM_RELATIVE
;
653 output
= &bytes_following_relocation_table
;
657 case MAKE_ELF_RELOCATION_TABLE
: {
658 // We can see the base relocation anywhere, but we only have the
659 // information to generate it at the very end. So we divert the bytes
660 // we are generating to a temporary stream.
661 if (pending_elf_relocation_table_type
)
662 return false; // Can't have two base relocation tables.
664 pending_elf_relocation_table_type
= R_386_RELATIVE
;
665 output
= &bytes_following_relocation_table
;
671 if (pending_pe_relocation_table
) {
672 if (!GeneratePeRelocations(final_buffer
,
673 pending_pe_relocation_table_type
) ||
674 !final_buffer
->Append(&bytes_following_relocation_table
))
678 if (pending_elf_relocation_table_type
) {
679 if (!GenerateElfRelocations(pending_elf_relocation_table_type
,
681 !final_buffer
->Append(&bytes_following_relocation_table
))
685 // Final verification check: did we consume all lists?
686 if (ix_copy_counts
!= copy_counts_
.size())
688 if (ix_copy_bytes
!= copy_bytes_
.size())
690 if (ix_abs32_ix
!= abs32_ix_
.size())
692 if (ix_rel32_ix
!= rel32_ix_
.size())
698 // RelocBlock has the layout of a block of relocations in the base relocation
699 // table file format.
701 struct RelocBlockPOD
{
704 uint16 relocs
[4096]; // Allow up to one relocation per byte of a 4k page.
707 COMPILE_ASSERT(offsetof(RelocBlockPOD
, relocs
) == 8, reloc_block_header_size
);
716 void Add(uint16 item
) {
717 pod
.relocs
[(pod
.block_size
-8)/2] = item
;
721 CheckBool
Flush(SinkStream
* buffer
) WARN_UNUSED_RESULT
{
723 if (pod
.block_size
!= 8) {
724 if (pod
.block_size
% 4 != 0) { // Pad to make size multiple of 4 bytes.
727 ok
= buffer
->Write(&pod
, pod
.block_size
);
735 CheckBool
EncodedProgram::GeneratePeRelocations(SinkStream
* buffer
,
737 std::sort(abs32_relocs_
.begin(), abs32_relocs_
.end());
742 for (size_t i
= 0; ok
&& i
< abs32_relocs_
.size(); ++i
) {
743 uint32 rva
= abs32_relocs_
[i
];
744 uint32 page_rva
= rva
& ~0xFFF;
745 if (page_rva
!= block
.pod
.page_rva
) {
746 ok
&= block
.Flush(buffer
);
747 block
.pod
.page_rva
= page_rva
;
750 block
.Add(((static_cast<uint16
>(type
)) << 12 ) | (rva
& 0xFFF));
752 ok
&= block
.Flush(buffer
);
756 CheckBool
EncodedProgram::GenerateElfRelocations(Elf32_Word r_info
,
757 SinkStream
* buffer
) {
758 std::sort(abs32_relocs_
.begin(), abs32_relocs_
.end());
760 Elf32_Rel relocation_block
;
762 relocation_block
.r_info
= r_info
;
765 for (size_t i
= 0; ok
&& i
< abs32_relocs_
.size(); ++i
) {
766 relocation_block
.r_offset
= abs32_relocs_
[i
];
767 ok
= buffer
->Write(&relocation_block
, sizeof(Elf32_Rel
));
772 ////////////////////////////////////////////////////////////////////////////////
774 Status
WriteEncodedProgram(EncodedProgram
* encoded
, SinkStreamSet
* sink
) {
775 if (!encoded
->WriteTo(sink
))
776 return C_STREAM_ERROR
;
780 Status
ReadEncodedProgram(SourceStreamSet
* streams
, EncodedProgram
** output
) {
781 EncodedProgram
* encoded
= new EncodedProgram();
782 if (encoded
->ReadFrom(streams
)) {
787 return C_DESERIALIZATION_FAILED
;
790 Status
Assemble(EncodedProgram
* encoded
, SinkStream
* buffer
) {
791 bool assembled
= encoded
->AssembleTo(buffer
);
794 return C_ASSEMBLY_FAILED
;
797 void DeleteEncodedProgram(EncodedProgram
* encoded
) {