1 /* elf.c -- Get debug data from an ELF file for backtraces.
2 Copyright (C) 2012-2024 Free Software Foundation, Inc.
3 Written by Ian Lance Taylor, Google.
5 Redistribution and use in source and binary forms, with or without
6 modification, are permitted provided that the following conditions are
9 (1) Redistributions of source code must retain the above copyright
10 notice, this list of conditions and the following disclaimer.
12 (2) Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in
14 the documentation and/or other materials provided with the
17 (3) The name of the author may not be used to
18 endorse or promote products derived from this software without
19 specific prior written permission.
21 THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
22 IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
23 WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
24 DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
25 INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
26 (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27 SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
29 STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
30 IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 POSSIBILITY OF SUCH DAMAGE. */
38 #include <sys/types.h>
42 #ifdef HAVE_DL_ITERATE_PHDR
46 #ifdef HAVE_SYS_LINK_H
51 #include "backtrace.h"
56 #define S_IFLNK 0120000
59 #define S_IFMT 0170000
61 #define S_ISLNK(m) (((m) & S_IFMT) == S_IFLNK)
65 #define __builtin_prefetch(p, r, l)
66 #define unlikely(x) (x)
68 #define unlikely(x) __builtin_expect(!!(x), 0)
71 #if !defined(HAVE_DECL_STRNLEN) || !HAVE_DECL_STRNLEN
73 /* If strnlen is not declared, provide our own version. */
76 xstrnlen (const char *s
, size_t maxlen
)
80 for (i
= 0; i
< maxlen
; ++i
)
86 #define strnlen xstrnlen
92 /* Dummy version of lstat for systems that don't have it. */
95 xlstat (const char *path ATTRIBUTE_UNUSED
, struct stat
*st ATTRIBUTE_UNUSED
)
104 #ifndef HAVE_READLINK
106 /* Dummy version of readlink for systems that don't have it. */
109 xreadlink (const char *path ATTRIBUTE_UNUSED
, char *buf ATTRIBUTE_UNUSED
,
110 size_t bufsz ATTRIBUTE_UNUSED
)
115 #define readlink xreadlink
119 #ifndef HAVE_DL_ITERATE_PHDR
121 /* Dummy version of dl_iterate_phdr for systems that don't have it. */
123 #define dl_phdr_info x_dl_phdr_info
124 #define dl_iterate_phdr x_dl_iterate_phdr
129 const char *dlpi_name
;
133 dl_iterate_phdr (int (*callback
) (struct dl_phdr_info
*,
134 size_t, void *) ATTRIBUTE_UNUSED
,
135 void *data ATTRIBUTE_UNUSED
)
140 #endif /* ! defined (HAVE_DL_ITERATE_PHDR) */
142 /* The configure script must tell us whether we are 32-bit or 64-bit
143 ELF. We could make this code test and support either possibility,
144 but there is no point. This code only works for the currently
145 running executable, which means that we know the ELF mode at
148 #if BACKTRACE_ELF_SIZE != 32 && BACKTRACE_ELF_SIZE != 64
149 #error "Unknown BACKTRACE_ELF_SIZE"
152 /* <link.h> might #include <elf.h> which might define our constants
153 with slightly different values. Undefine them to be safe. */
182 #undef SHF_COMPRESSED
185 #undef NT_GNU_BUILD_ID
186 #undef ELFCOMPRESS_ZLIB
187 #undef ELFCOMPRESS_ZSTD
191 typedef uint16_t b_elf_half
; /* Elf_Half. */
192 typedef uint32_t b_elf_word
; /* Elf_Word. */
193 typedef int32_t b_elf_sword
; /* Elf_Sword. */
195 #if BACKTRACE_ELF_SIZE == 32
197 typedef uint32_t b_elf_addr
; /* Elf_Addr. */
198 typedef uint32_t b_elf_off
; /* Elf_Off. */
200 typedef uint32_t b_elf_wxword
; /* 32-bit Elf_Word, 64-bit ELF_Xword. */
204 typedef uint64_t b_elf_addr
; /* Elf_Addr. */
205 typedef uint64_t b_elf_off
; /* Elf_Off. */
206 typedef uint64_t b_elf_xword
; /* Elf_Xword. */
207 typedef int64_t b_elf_sxword
; /* Elf_Sxword. */
209 typedef uint64_t b_elf_wxword
; /* 32-bit Elf_Word, 64-bit ELF_Xword. */
213 /* Data structures and associated constants. */
218 unsigned char e_ident
[EI_NIDENT
]; /* ELF "magic number" */
219 b_elf_half e_type
; /* Identifies object file type */
220 b_elf_half e_machine
; /* Specifies required architecture */
221 b_elf_word e_version
; /* Identifies object file version */
222 b_elf_addr e_entry
; /* Entry point virtual address */
223 b_elf_off e_phoff
; /* Program header table file offset */
224 b_elf_off e_shoff
; /* Section header table file offset */
225 b_elf_word e_flags
; /* Processor-specific flags */
226 b_elf_half e_ehsize
; /* ELF header size in bytes */
227 b_elf_half e_phentsize
; /* Program header table entry size */
228 b_elf_half e_phnum
; /* Program header table entry count */
229 b_elf_half e_shentsize
; /* Section header table entry size */
230 b_elf_half e_shnum
; /* Section header table entry count */
231 b_elf_half e_shstrndx
; /* Section header string table index */
232 } b_elf_ehdr
; /* Elf_Ehdr. */
250 #define ELFDATA2LSB 1
251 #define ELFDATA2MSB 2
258 #define EF_PPC64_ABI 3
261 b_elf_word sh_name
; /* Section name, index in string tbl */
262 b_elf_word sh_type
; /* Type of section */
263 b_elf_wxword sh_flags
; /* Miscellaneous section attributes */
264 b_elf_addr sh_addr
; /* Section virtual addr at execution */
265 b_elf_off sh_offset
; /* Section file offset */
266 b_elf_wxword sh_size
; /* Size of section in bytes */
267 b_elf_word sh_link
; /* Index of another section */
268 b_elf_word sh_info
; /* Additional section information */
269 b_elf_wxword sh_addralign
; /* Section alignment */
270 b_elf_wxword sh_entsize
; /* Entry size if section holds table */
271 } b_elf_shdr
; /* Elf_Shdr. */
273 #define SHN_UNDEF 0x0000 /* Undefined section */
274 #define SHN_LORESERVE 0xFF00 /* Begin range of reserved indices */
275 #define SHN_XINDEX 0xFFFF /* Section index is held elsewhere */
277 #define SHT_PROGBITS 1
280 #define SHT_DYNSYM 11
282 #define SHF_COMPRESSED 0x800
284 #if BACKTRACE_ELF_SIZE == 32
288 b_elf_word st_name
; /* Symbol name, index in string tbl */
289 b_elf_addr st_value
; /* Symbol value */
290 b_elf_word st_size
; /* Symbol size */
291 unsigned char st_info
; /* Symbol binding and type */
292 unsigned char st_other
; /* Visibility and other data */
293 b_elf_half st_shndx
; /* Symbol section index */
294 } b_elf_sym
; /* Elf_Sym. */
296 #else /* BACKTRACE_ELF_SIZE != 32 */
300 b_elf_word st_name
; /* Symbol name, index in string tbl */
301 unsigned char st_info
; /* Symbol binding and type */
302 unsigned char st_other
; /* Visibility and other data */
303 b_elf_half st_shndx
; /* Symbol section index */
304 b_elf_addr st_value
; /* Symbol value */
305 b_elf_xword st_size
; /* Symbol size */
306 } b_elf_sym
; /* Elf_Sym. */
308 #endif /* BACKTRACE_ELF_SIZE != 32 */
321 #define NT_GNU_BUILD_ID 3
323 #if BACKTRACE_ELF_SIZE == 32
327 b_elf_word ch_type
; /* Compresstion algorithm */
328 b_elf_word ch_size
; /* Uncompressed size */
329 b_elf_word ch_addralign
; /* Alignment for uncompressed data */
330 } b_elf_chdr
; /* Elf_Chdr */
332 #else /* BACKTRACE_ELF_SIZE != 32 */
336 b_elf_word ch_type
; /* Compression algorithm */
337 b_elf_word ch_reserved
; /* Reserved */
338 b_elf_xword ch_size
; /* Uncompressed size */
339 b_elf_xword ch_addralign
; /* Alignment for uncompressed data */
340 } b_elf_chdr
; /* Elf_Chdr */
342 #endif /* BACKTRACE_ELF_SIZE != 32 */
344 #define ELFCOMPRESS_ZLIB 1
345 #define ELFCOMPRESS_ZSTD 2
347 /* Names of sections, indexed by enum dwarf_section in internal.h. */
349 static const char * const dwarf_section_names
[DEBUG_MAX
] =
357 ".debug_str_offsets",
362 /* Information we gather for the sections we care about. */
364 struct debug_section_info
366 /* Section file offset. */
370 /* Section contents, after read from file. */
371 const unsigned char *data
;
372 /* Whether the SHF_COMPRESSED flag is set for the section. */
376 /* Information we keep for an ELF symbol. */
380 /* The name of the symbol. */
382 /* The address of the symbol. */
384 /* The size of the symbol. */
388 /* Information to pass to elf_syminfo. */
390 struct elf_syminfo_data
392 /* Symbols for the next module. */
393 struct elf_syminfo_data
*next
;
394 /* The ELF symbols, sorted by address. */
395 struct elf_symbol
*symbols
;
396 /* The number of symbols. */
400 /* A view that works for either a file or memory. */
404 struct backtrace_view view
;
405 int release
; /* If non-zero, must call backtrace_release_view. */
408 /* Information about PowerPC64 ELFv1 .opd section. */
410 struct elf_ppc64_opd_data
412 /* Address of the .opd section. */
416 /* Size of the .opd section. */
418 /* Corresponding section view. */
419 struct elf_view view
;
422 /* Create a view of SIZE bytes from DESCRIPTOR/MEMORY at OFFSET. */
425 elf_get_view (struct backtrace_state
*state
, int descriptor
,
426 const unsigned char *memory
, size_t memory_size
, off_t offset
,
427 uint64_t size
, backtrace_error_callback error_callback
,
428 void *data
, struct elf_view
*view
)
433 return backtrace_get_view (state
, descriptor
, offset
, size
,
434 error_callback
, data
, &view
->view
);
438 if ((uint64_t) offset
+ size
> (uint64_t) memory_size
)
440 error_callback (data
, "out of range for in-memory file", 0);
443 view
->view
.data
= (const void *) (memory
+ offset
);
444 view
->view
.base
= NULL
;
445 view
->view
.len
= size
;
451 /* Release a view read by elf_get_view. */
454 elf_release_view (struct backtrace_state
*state
, struct elf_view
*view
,
455 backtrace_error_callback error_callback
, void *data
)
458 backtrace_release_view (state
, &view
->view
, error_callback
, data
);
461 /* Compute the CRC-32 of BUF/LEN. This uses the CRC used for
462 .gnu_debuglink files. */
465 elf_crc32 (uint32_t crc
, const unsigned char *buf
, size_t len
)
467 static const uint32_t crc32_table
[256] =
469 0x00000000, 0x77073096, 0xee0e612c, 0x990951ba, 0x076dc419,
470 0x706af48f, 0xe963a535, 0x9e6495a3, 0x0edb8832, 0x79dcb8a4,
471 0xe0d5e91e, 0x97d2d988, 0x09b64c2b, 0x7eb17cbd, 0xe7b82d07,
472 0x90bf1d91, 0x1db71064, 0x6ab020f2, 0xf3b97148, 0x84be41de,
473 0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7, 0x136c9856,
474 0x646ba8c0, 0xfd62f97a, 0x8a65c9ec, 0x14015c4f, 0x63066cd9,
475 0xfa0f3d63, 0x8d080df5, 0x3b6e20c8, 0x4c69105e, 0xd56041e4,
476 0xa2677172, 0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b,
477 0x35b5a8fa, 0x42b2986c, 0xdbbbc9d6, 0xacbcf940, 0x32d86ce3,
478 0x45df5c75, 0xdcd60dcf, 0xabd13d59, 0x26d930ac, 0x51de003a,
479 0xc8d75180, 0xbfd06116, 0x21b4f4b5, 0x56b3c423, 0xcfba9599,
480 0xb8bda50f, 0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924,
481 0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d, 0x76dc4190,
482 0x01db7106, 0x98d220bc, 0xefd5102a, 0x71b18589, 0x06b6b51f,
483 0x9fbfe4a5, 0xe8b8d433, 0x7807c9a2, 0x0f00f934, 0x9609a88e,
484 0xe10e9818, 0x7f6a0dbb, 0x086d3d2d, 0x91646c97, 0xe6635c01,
485 0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e, 0x6c0695ed,
486 0x1b01a57b, 0x8208f4c1, 0xf50fc457, 0x65b0d9c6, 0x12b7e950,
487 0x8bbeb8ea, 0xfcb9887c, 0x62dd1ddf, 0x15da2d49, 0x8cd37cf3,
488 0xfbd44c65, 0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2,
489 0x4adfa541, 0x3dd895d7, 0xa4d1c46d, 0xd3d6f4fb, 0x4369e96a,
490 0x346ed9fc, 0xad678846, 0xda60b8d0, 0x44042d73, 0x33031de5,
491 0xaa0a4c5f, 0xdd0d7cc9, 0x5005713c, 0x270241aa, 0xbe0b1010,
492 0xc90c2086, 0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f,
493 0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4, 0x59b33d17,
494 0x2eb40d81, 0xb7bd5c3b, 0xc0ba6cad, 0xedb88320, 0x9abfb3b6,
495 0x03b6e20c, 0x74b1d29a, 0xead54739, 0x9dd277af, 0x04db2615,
496 0x73dc1683, 0xe3630b12, 0x94643b84, 0x0d6d6a3e, 0x7a6a5aa8,
497 0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1, 0xf00f9344,
498 0x8708a3d2, 0x1e01f268, 0x6906c2fe, 0xf762575d, 0x806567cb,
499 0x196c3671, 0x6e6b06e7, 0xfed41b76, 0x89d32be0, 0x10da7a5a,
500 0x67dd4acc, 0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5,
501 0xd6d6a3e8, 0xa1d1937e, 0x38d8c2c4, 0x4fdff252, 0xd1bb67f1,
502 0xa6bc5767, 0x3fb506dd, 0x48b2364b, 0xd80d2bda, 0xaf0a1b4c,
503 0x36034af6, 0x41047a60, 0xdf60efc3, 0xa867df55, 0x316e8eef,
504 0x4669be79, 0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236,
505 0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f, 0xc5ba3bbe,
506 0xb2bd0b28, 0x2bb45a92, 0x5cb36a04, 0xc2d7ffa7, 0xb5d0cf31,
507 0x2cd99e8b, 0x5bdeae1d, 0x9b64c2b0, 0xec63f226, 0x756aa39c,
508 0x026d930a, 0x9c0906a9, 0xeb0e363f, 0x72076785, 0x05005713,
509 0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38, 0x92d28e9b,
510 0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21, 0x86d3d2d4, 0xf1d4e242,
511 0x68ddb3f8, 0x1fda836e, 0x81be16cd, 0xf6b9265b, 0x6fb077e1,
512 0x18b74777, 0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c,
513 0x8f659eff, 0xf862ae69, 0x616bffd3, 0x166ccf45, 0xa00ae278,
514 0xd70dd2ee, 0x4e048354, 0x3903b3c2, 0xa7672661, 0xd06016f7,
515 0x4969474d, 0x3e6e77db, 0xaed16a4a, 0xd9d65adc, 0x40df0b66,
516 0x37d83bf0, 0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9,
517 0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6, 0xbad03605,
518 0xcdd70693, 0x54de5729, 0x23d967bf, 0xb3667a2e, 0xc4614ab8,
519 0x5d681b02, 0x2a6f2b94, 0xb40bbe37, 0xc30c8ea1, 0x5a05df1b,
522 const unsigned char *end
;
525 for (end
= buf
+ len
; buf
< end
; ++ buf
)
526 crc
= crc32_table
[(crc
^ *buf
) & 0xff] ^ (crc
>> 8);
530 /* Return the CRC-32 of the entire file open at DESCRIPTOR. */
533 elf_crc32_file (struct backtrace_state
*state
, int descriptor
,
534 backtrace_error_callback error_callback
, void *data
)
537 struct backtrace_view file_view
;
540 if (fstat (descriptor
, &st
) < 0)
542 error_callback (data
, "fstat", errno
);
546 if (!backtrace_get_view (state
, descriptor
, 0, st
.st_size
, error_callback
,
550 ret
= elf_crc32 (0, (const unsigned char *) file_view
.data
, st
.st_size
);
552 backtrace_release_view (state
, &file_view
, error_callback
, data
);
557 /* A dummy callback function used when we can't find a symbol
561 elf_nosyms (struct backtrace_state
*state ATTRIBUTE_UNUSED
,
562 uintptr_t addr ATTRIBUTE_UNUSED
,
563 backtrace_syminfo_callback callback ATTRIBUTE_UNUSED
,
564 backtrace_error_callback error_callback
, void *data
)
566 error_callback (data
, "no symbol table in ELF executable", -1);
569 /* A callback function used when we can't find any debug info. */
572 elf_nodebug (struct backtrace_state
*state
, uintptr_t pc
,
573 backtrace_full_callback callback
,
574 backtrace_error_callback error_callback
, void *data
)
576 if (state
->syminfo_fn
!= NULL
&& state
->syminfo_fn
!= elf_nosyms
)
578 struct backtrace_call_full bdata
;
580 /* Fetch symbol information so that we can least get the
583 bdata
.full_callback
= callback
;
584 bdata
.full_error_callback
= error_callback
;
585 bdata
.full_data
= data
;
587 state
->syminfo_fn (state
, pc
, backtrace_syminfo_to_full_callback
,
588 backtrace_syminfo_to_full_error_callback
, &bdata
);
592 error_callback (data
, "no debug info in ELF executable (make sure to compile with -g)", -1);
596 /* Compare struct elf_symbol for qsort. */
599 elf_symbol_compare (const void *v1
, const void *v2
)
601 const struct elf_symbol
*e1
= (const struct elf_symbol
*) v1
;
602 const struct elf_symbol
*e2
= (const struct elf_symbol
*) v2
;
604 if (e1
->address
< e2
->address
)
606 else if (e1
->address
> e2
->address
)
612 /* Compare an ADDR against an elf_symbol for bsearch. We allocate one
613 extra entry in the array so that this can look safely at the next
617 elf_symbol_search (const void *vkey
, const void *ventry
)
619 const uintptr_t *key
= (const uintptr_t *) vkey
;
620 const struct elf_symbol
*entry
= (const struct elf_symbol
*) ventry
;
624 if (addr
< entry
->address
)
626 else if (addr
>= entry
->address
+ entry
->size
)
632 /* Initialize the symbol table info for elf_syminfo. */
635 elf_initialize_syminfo (struct backtrace_state
*state
,
636 struct libbacktrace_base_address base_address
,
637 const unsigned char *symtab_data
, size_t symtab_size
,
638 const unsigned char *strtab
, size_t strtab_size
,
639 backtrace_error_callback error_callback
,
640 void *data
, struct elf_syminfo_data
*sdata
,
641 struct elf_ppc64_opd_data
*opd
)
644 const b_elf_sym
*sym
;
645 size_t elf_symbol_count
;
646 size_t elf_symbol_size
;
647 struct elf_symbol
*elf_symbols
;
651 sym_count
= symtab_size
/ sizeof (b_elf_sym
);
653 /* We only care about function symbols. Count them. */
654 sym
= (const b_elf_sym
*) symtab_data
;
655 elf_symbol_count
= 0;
656 for (i
= 0; i
< sym_count
; ++i
, ++sym
)
660 info
= sym
->st_info
& 0xf;
661 if ((info
== STT_FUNC
|| info
== STT_OBJECT
)
662 && sym
->st_shndx
!= SHN_UNDEF
)
666 elf_symbol_size
= elf_symbol_count
* sizeof (struct elf_symbol
);
667 elf_symbols
= ((struct elf_symbol
*)
668 backtrace_alloc (state
, elf_symbol_size
, error_callback
,
670 if (elf_symbols
== NULL
)
673 sym
= (const b_elf_sym
*) symtab_data
;
675 for (i
= 0; i
< sym_count
; ++i
, ++sym
)
679 info
= sym
->st_info
& 0xf;
680 if (info
!= STT_FUNC
&& info
!= STT_OBJECT
)
682 if (sym
->st_shndx
== SHN_UNDEF
)
684 if (sym
->st_name
>= strtab_size
)
686 error_callback (data
, "symbol string index out of range", 0);
687 backtrace_free (state
, elf_symbols
, elf_symbol_size
, error_callback
,
691 elf_symbols
[j
].name
= (const char *) strtab
+ sym
->st_name
;
692 /* Special case PowerPC64 ELFv1 symbols in .opd section, if the symbol
693 is a function descriptor, read the actual code address from the
696 && sym
->st_value
>= opd
->addr
697 && sym
->st_value
< opd
->addr
+ opd
->size
)
698 elf_symbols
[j
].address
699 = *(const b_elf_addr
*) (opd
->data
+ (sym
->st_value
- opd
->addr
));
701 elf_symbols
[j
].address
= sym
->st_value
;
702 elf_symbols
[j
].address
=
703 libbacktrace_add_base (elf_symbols
[j
].address
, base_address
);
704 elf_symbols
[j
].size
= sym
->st_size
;
708 backtrace_qsort (elf_symbols
, elf_symbol_count
, sizeof (struct elf_symbol
),
712 sdata
->symbols
= elf_symbols
;
713 sdata
->count
= elf_symbol_count
;
718 /* Add EDATA to the list in STATE. */
721 elf_add_syminfo_data (struct backtrace_state
*state
,
722 struct elf_syminfo_data
*edata
)
724 if (!state
->threaded
)
726 struct elf_syminfo_data
**pp
;
728 for (pp
= (struct elf_syminfo_data
**) (void *) &state
->syminfo_data
;
738 struct elf_syminfo_data
**pp
;
740 pp
= (struct elf_syminfo_data
**) (void *) &state
->syminfo_data
;
744 struct elf_syminfo_data
*p
;
746 p
= backtrace_atomic_load_pointer (pp
);
754 if (__sync_bool_compare_and_swap (pp
, NULL
, edata
))
760 /* Return the symbol name and value for an ADDR. */
763 elf_syminfo (struct backtrace_state
*state
, uintptr_t addr
,
764 backtrace_syminfo_callback callback
,
765 backtrace_error_callback error_callback ATTRIBUTE_UNUSED
,
768 struct elf_syminfo_data
*edata
;
769 struct elf_symbol
*sym
= NULL
;
771 if (!state
->threaded
)
773 for (edata
= (struct elf_syminfo_data
*) state
->syminfo_data
;
777 sym
= ((struct elf_symbol
*)
778 bsearch (&addr
, edata
->symbols
, edata
->count
,
779 sizeof (struct elf_symbol
), elf_symbol_search
));
786 struct elf_syminfo_data
**pp
;
788 pp
= (struct elf_syminfo_data
**) (void *) &state
->syminfo_data
;
791 edata
= backtrace_atomic_load_pointer (pp
);
795 sym
= ((struct elf_symbol
*)
796 bsearch (&addr
, edata
->symbols
, edata
->count
,
797 sizeof (struct elf_symbol
), elf_symbol_search
));
806 callback (data
, addr
, NULL
, 0, 0);
808 callback (data
, addr
, sym
->name
, sym
->address
, sym
->size
);
811 /* Return whether FILENAME is a symlink. */
814 elf_is_symlink (const char *filename
)
818 if (lstat (filename
, &st
) < 0)
820 return S_ISLNK (st
.st_mode
);
823 /* Return the results of reading the symlink FILENAME in a buffer
824 allocated by backtrace_alloc. Return the length of the buffer in
828 elf_readlink (struct backtrace_state
*state
, const char *filename
,
829 backtrace_error_callback error_callback
, void *data
,
840 buf
= backtrace_alloc (state
, len
, error_callback
, data
);
843 rl
= readlink (filename
, buf
, len
);
846 backtrace_free (state
, buf
, len
, error_callback
, data
);
849 if ((size_t) rl
< len
- 1)
855 backtrace_free (state
, buf
, len
, error_callback
, data
);
860 #define SYSTEM_BUILD_ID_DIR "/usr/lib/debug/.build-id/"
862 /* Open a separate debug info file, using the build ID to find it.
863 Returns an open file descriptor, or -1.
865 The GDB manual says that the only place gdb looks for a debug file
866 when the build ID is known is in /usr/lib/debug/.build-id. */
869 elf_open_debugfile_by_buildid (struct backtrace_state
*state
,
870 const char *buildid_data
, size_t buildid_size
,
871 backtrace_error_callback error_callback
,
874 const char * const prefix
= SYSTEM_BUILD_ID_DIR
;
875 const size_t prefix_len
= strlen (prefix
);
876 const char * const suffix
= ".debug";
877 const size_t suffix_len
= strlen (suffix
);
885 len
= prefix_len
+ buildid_size
* 2 + suffix_len
+ 2;
886 bd_filename
= backtrace_alloc (state
, len
, error_callback
, data
);
887 if (bd_filename
== NULL
)
891 memcpy (t
, prefix
, prefix_len
);
893 for (i
= 0; i
< buildid_size
; i
++)
898 b
= (unsigned char) buildid_data
[i
];
899 nib
= (b
& 0xf0) >> 4;
900 *t
++ = nib
< 10 ? '0' + nib
: 'a' + nib
- 10;
902 *t
++ = nib
< 10 ? '0' + nib
: 'a' + nib
- 10;
906 memcpy (t
, suffix
, suffix_len
);
907 t
[suffix_len
] = '\0';
909 ret
= backtrace_open (bd_filename
, error_callback
, data
, &does_not_exist
);
911 backtrace_free (state
, bd_filename
, len
, error_callback
, data
);
913 /* gdb checks that the debuginfo file has the same build ID note.
914 That seems kind of pointless to me--why would it have the right
915 name but not the right build ID?--so skipping the check. */
920 /* Try to open a file whose name is PREFIX (length PREFIX_LEN)
921 concatenated with PREFIX2 (length PREFIX2_LEN) concatenated with
922 DEBUGLINK_NAME. Returns an open file descriptor, or -1. */
925 elf_try_debugfile (struct backtrace_state
*state
, const char *prefix
,
926 size_t prefix_len
, const char *prefix2
, size_t prefix2_len
,
927 const char *debuglink_name
,
928 backtrace_error_callback error_callback
, void *data
)
930 size_t debuglink_len
;
936 debuglink_len
= strlen (debuglink_name
);
937 try_len
= prefix_len
+ prefix2_len
+ debuglink_len
+ 1;
938 try = backtrace_alloc (state
, try_len
, error_callback
, data
);
942 memcpy (try, prefix
, prefix_len
);
943 memcpy (try + prefix_len
, prefix2
, prefix2_len
);
944 memcpy (try + prefix_len
+ prefix2_len
, debuglink_name
, debuglink_len
);
945 try[prefix_len
+ prefix2_len
+ debuglink_len
] = '\0';
947 ret
= backtrace_open (try, error_callback
, data
, &does_not_exist
);
949 backtrace_free (state
, try, try_len
, error_callback
, data
);
954 /* Find a separate debug info file, using the debuglink section data
955 to find it. Returns an open file descriptor, or -1. */
958 elf_find_debugfile_by_debuglink (struct backtrace_state
*state
,
959 const char *filename
,
960 const char *debuglink_name
,
961 backtrace_error_callback error_callback
,
972 /* Resolve symlinks in FILENAME. Since FILENAME is fairly likely to
973 be /proc/self/exe, symlinks are common. We don't try to resolve
974 the whole path name, just the base name. */
978 while (elf_is_symlink (filename
))
983 new_buf
= elf_readlink (state
, filename
, error_callback
, data
, &new_len
);
987 if (new_buf
[0] == '/')
991 slash
= strrchr (filename
, '/');
1000 clen
= slash
- filename
+ strlen (new_buf
) + 1;
1001 c
= backtrace_alloc (state
, clen
, error_callback
, data
);
1005 memcpy (c
, filename
, slash
- filename
);
1006 memcpy (c
+ (slash
- filename
), new_buf
, strlen (new_buf
));
1007 c
[slash
- filename
+ strlen (new_buf
)] = '\0';
1008 backtrace_free (state
, new_buf
, new_len
, error_callback
, data
);
1016 backtrace_free (state
, alc
, alc_len
, error_callback
, data
);
1021 /* Look for DEBUGLINK_NAME in the same directory as FILENAME. */
1023 slash
= strrchr (filename
, '/');
1033 prefix_len
= slash
- filename
;
1036 ddescriptor
= elf_try_debugfile (state
, prefix
, prefix_len
, "", 0,
1037 debuglink_name
, error_callback
, data
);
1038 if (ddescriptor
>= 0)
1044 /* Look for DEBUGLINK_NAME in a .debug subdirectory of FILENAME. */
1046 ddescriptor
= elf_try_debugfile (state
, prefix
, prefix_len
, ".debug/",
1047 strlen (".debug/"), debuglink_name
,
1048 error_callback
, data
);
1049 if (ddescriptor
>= 0)
1055 /* Look for DEBUGLINK_NAME in /usr/lib/debug. */
1057 ddescriptor
= elf_try_debugfile (state
, "/usr/lib/debug/",
1058 strlen ("/usr/lib/debug/"), prefix
,
1059 prefix_len
, debuglink_name
,
1060 error_callback
, data
);
1061 if (ddescriptor
>= 0)
1065 if (alc
!= NULL
&& alc_len
> 0)
1066 backtrace_free (state
, alc
, alc_len
, error_callback
, data
);
1070 /* Open a separate debug info file, using the debuglink section data
1071 to find it. Returns an open file descriptor, or -1. */
1074 elf_open_debugfile_by_debuglink (struct backtrace_state
*state
,
1075 const char *filename
,
1076 const char *debuglink_name
,
1077 uint32_t debuglink_crc
,
1078 backtrace_error_callback error_callback
,
1083 ddescriptor
= elf_find_debugfile_by_debuglink (state
, filename
,
1085 error_callback
, data
);
1086 if (ddescriptor
< 0)
1089 if (debuglink_crc
!= 0)
1093 got_crc
= elf_crc32_file (state
, ddescriptor
, error_callback
, data
);
1094 if (got_crc
!= debuglink_crc
)
1096 backtrace_close (ddescriptor
, error_callback
, data
);
1104 /* A function useful for setting a breakpoint for an inflation failure
1105 when this code is compiled with -g. */
1108 elf_uncompress_failed(void)
1112 /* *PVAL is the current value being read from the stream, and *PBITS
1113 is the number of valid bits. Ensure that *PVAL holds at least 15
1114 bits by reading additional bits from *PPIN, up to PINEND, as
1115 needed. Updates *PPIN, *PVAL and *PBITS. Returns 1 on success, 0
1119 elf_fetch_bits (const unsigned char **ppin
, const unsigned char *pinend
,
1120 uint64_t *pval
, unsigned int *pbits
)
1123 const unsigned char *pin
;
1133 if (unlikely (pinend
- pin
< 4))
1135 elf_uncompress_failed ();
1139 #if defined(__BYTE_ORDER__) && defined(__ORDER_LITTLE_ENDIAN__) \
1140 && defined(__ORDER_BIG_ENDIAN__) \
1141 && (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ \
1142 || __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)
1143 /* We've ensured that PIN is aligned. */
1144 next
= *(const uint32_t *)pin
;
1146 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
1147 next
= __builtin_bswap32 (next
);
1150 next
= pin
[0] | (pin
[1] << 8) | (pin
[2] << 16) | (pin
[3] << 24);
1153 val
|= (uint64_t)next
<< bits
;
1157 /* We will need the next four bytes soon. */
1158 __builtin_prefetch (pin
, 0, 0);
1166 /* This is like elf_fetch_bits, but it fetchs the bits backward, and ensures at
1167 least 16 bits. This is for zstd. */
1170 elf_fetch_bits_backward (const unsigned char **ppin
,
1171 const unsigned char *pinend
,
1172 uint64_t *pval
, unsigned int *pbits
)
1175 const unsigned char *pin
;
1185 if (unlikely (pin
<= pinend
))
1190 #if defined(__BYTE_ORDER__) && defined(__ORDER_LITTLE_ENDIAN__) \
1191 && defined(__ORDER_BIG_ENDIAN__) \
1192 && (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ \
1193 || __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)
1194 /* We've ensured that PIN is aligned. */
1195 next
= *(const uint32_t *)pin
;
1197 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
1198 next
= __builtin_bswap32 (next
);
1201 next
= pin
[0] | (pin
[1] << 8) | (pin
[2] << 16) | (pin
[3] << 24);
1208 if (unlikely (pin
< pinend
))
1210 val
>>= (pinend
- pin
) * 8;
1211 bits
-= (pinend
- pin
) * 8;
1220 /* Initialize backward fetching when the bitstream starts with a 1 bit in the
1221 last byte in memory (which is the first one that we read). This is used by
1222 zstd decompression. Returns 1 on success, 0 on error. */
1225 elf_fetch_backward_init (const unsigned char **ppin
,
1226 const unsigned char *pinend
,
1227 uint64_t *pval
, unsigned int *pbits
)
1229 const unsigned char *pin
;
1230 unsigned int stream_start
;
1235 stream_start
= (unsigned int)*pin
;
1236 if (unlikely (stream_start
== 0))
1238 elf_uncompress_failed ();
1244 /* Align to a 32-bit boundary. */
1245 while ((((uintptr_t)pin
) & 3) != 0)
1248 val
|= (uint64_t)*pin
;
1254 val
|= (uint64_t)*pin
;
1260 if (!elf_fetch_bits_backward (ppin
, pinend
, pval
, pbits
))
1263 *pbits
-= __builtin_clz (stream_start
) - (sizeof (unsigned int) - 1) * 8 + 1;
1265 if (!elf_fetch_bits_backward (ppin
, pinend
, pval
, pbits
))
1271 /* Huffman code tables, like the rest of the zlib format, are defined
1272 by RFC 1951. We store a Huffman code table as a series of tables
1273 stored sequentially in memory. Each entry in a table is 16 bits.
1274 The first, main, table has 256 entries. It is followed by a set of
1275 secondary tables of length 2 to 128 entries. The maximum length of
1276 a code sequence in the deflate format is 15 bits, so that is all we
1277 need. Each secondary table has an index, which is the offset of
1278 the table in the overall memory storage.
1280 The deflate format says that all codes of a given bit length are
1281 lexicographically consecutive. Perhaps we could have 130 values
1282 that require a 15-bit code, perhaps requiring three secondary
1283 tables of size 128. I don't know if this is actually possible, but
1284 it suggests that the maximum size required for secondary tables is
1285 3 * 128 + 3 * 64 ... == 768. The zlib enough program reports 660
1286 as the maximum. We permit 768, since in addition to the 256 for
1287 the primary table, with two bytes per entry, and with the two
1288 tables we need, that gives us a page.
1290 A single table entry needs to store a value or (for the main table
1291 only) the index and size of a secondary table. Values range from 0
1292 to 285, inclusive. Secondary table indexes, per above, range from
1293 0 to 510. For a value we need to store the number of bits we need
1294 to determine that value (one value may appear multiple times in the
1295 table), which is 1 to 8. For a secondary table we need to store
1296 the number of bits used to index into the table, which is 1 to 7.
1297 And of course we need 1 bit to decide whether we have a value or a
1298 secondary table index. So each entry needs 9 bits for value/table
1299 index, 3 bits for size, 1 bit what it is. For simplicity we use 16
1302 /* Number of entries we allocate to for one code table. We get a page
1303 for the two code tables we need. */
1305 #define ZLIB_HUFFMAN_TABLE_SIZE (1024)
1307 /* Bit masks and shifts for the values in the table. */
1309 #define ZLIB_HUFFMAN_VALUE_MASK 0x01ff
1310 #define ZLIB_HUFFMAN_BITS_SHIFT 9
1311 #define ZLIB_HUFFMAN_BITS_MASK 0x7
1312 #define ZLIB_HUFFMAN_SECONDARY_SHIFT 12
1314 /* For working memory while inflating we need two code tables, we need
1315 an array of code lengths (max value 15, so we use unsigned char),
1316 and an array of unsigned shorts used while building a table. The
1317 latter two arrays must be large enough to hold the maximum number
1318 of code lengths, which RFC 1951 defines as 286 + 30. */
1320 #define ZLIB_TABLE_SIZE \
1321 (2 * ZLIB_HUFFMAN_TABLE_SIZE * sizeof (uint16_t) \
1322 + (286 + 30) * sizeof (uint16_t) \
1323 + (286 + 30) * sizeof (unsigned char))
1325 #define ZLIB_TABLE_CODELEN_OFFSET \
1326 (2 * ZLIB_HUFFMAN_TABLE_SIZE * sizeof (uint16_t) \
1327 + (286 + 30) * sizeof (uint16_t))
1329 #define ZLIB_TABLE_WORK_OFFSET \
1330 (2 * ZLIB_HUFFMAN_TABLE_SIZE * sizeof (uint16_t))
1332 #ifdef BACKTRACE_GENERATE_FIXED_HUFFMAN_TABLE
1334 /* Used by the main function that generates the fixed table to learn
1336 static size_t final_next_secondary
;
1340 /* Build a Huffman code table from an array of lengths in CODES of
1341 length CODES_LEN. The table is stored into *TABLE. ZDEBUG_TABLE
1342 is the same as for elf_zlib_inflate, used to find some work space.
1343 Returns 1 on success, 0 on error. */
1346 elf_zlib_inflate_table (unsigned char *codes
, size_t codes_len
,
1347 uint16_t *zdebug_table
, uint16_t *table
)
1352 uint16_t firstcode
[7];
1357 size_t next_secondary
;
1359 /* Count the number of code of each length. Set NEXT[val] to be the
1360 next value after VAL with the same bit length. */
1362 next
= (uint16_t *) (((unsigned char *) zdebug_table
)
1363 + ZLIB_TABLE_WORK_OFFSET
);
1365 memset (&count
[0], 0, 16 * sizeof (uint16_t));
1366 for (i
= 0; i
< codes_len
; ++i
)
1368 if (unlikely (codes
[i
] >= 16))
1370 elf_uncompress_failed ();
1374 if (count
[codes
[i
]] == 0)
1376 start
[codes
[i
]] = i
;
1381 next
[prev
[codes
[i
]]] = i
;
1388 /* For each length, fill in the table for the codes of that
1391 memset (table
, 0, ZLIB_HUFFMAN_TABLE_SIZE
* sizeof (uint16_t));
1393 /* Handle the values that do not require a secondary table. */
1396 for (j
= 1; j
<= 8; ++j
)
1405 if (unlikely (jcnt
> (1U << j
)))
1407 elf_uncompress_failed ();
1411 /* There are JCNT values that have this length, the values
1412 starting from START[j] continuing through NEXT[VAL]. Those
1413 values are assigned consecutive values starting at CODE. */
1416 for (i
= 0; i
< jcnt
; ++i
)
1422 /* In the compressed bit stream, the value VAL is encoded as
1423 J bits with the value C. */
1425 if (unlikely ((val
& ~ZLIB_HUFFMAN_VALUE_MASK
) != 0))
1427 elf_uncompress_failed ();
1431 tval
= val
| ((j
- 1) << ZLIB_HUFFMAN_BITS_SHIFT
);
1433 /* The table lookup uses 8 bits. If J is less than 8, we
1434 don't know what the other bits will be. We need to fill
1435 in all possibilities in the table. Since the Huffman
1436 code is unambiguous, those entries can't be used for any
1439 for (ind
= code
; ind
< 0x100; ind
+= 1 << j
)
1441 if (unlikely (table
[ind
] != 0))
1443 elf_uncompress_failed ();
1449 /* Advance to the next value with this length. */
1453 /* The Huffman codes are stored in the bitstream with the
1454 most significant bit first, as is required to make them
1455 unambiguous. The effect is that when we read them from
1456 the bitstream we see the bit sequence in reverse order:
1457 the most significant bit of the Huffman code is the least
1458 significant bit of the value we read from the bitstream.
1459 That means that to make our table lookups work, we need
1460 to reverse the bits of CODE. Since reversing bits is
1461 tedious and in general requires using a table, we instead
1462 increment CODE in reverse order. That is, if the number
1463 of bits we are currently using, here named J, is 3, we
1464 count as 000, 100, 010, 110, 001, 101, 011, 111, which is
1465 to say the numbers from 0 to 7 but with the bits
1466 reversed. Going to more bits, aka incrementing J,
1467 effectively just adds more zero bits as the beginning,
1468 and as such does not change the numeric value of CODE.
1470 To increment CODE of length J in reverse order, find the
1471 most significant zero bit and set it to one while
1472 clearing all higher bits. In other words, add 1 modulo
1473 2^J, only reversed. */
1475 incr
= 1U << (j
- 1);
1476 while ((code
& incr
) != 0)
1488 /* Handle the values that require a secondary table. */
1490 /* Set FIRSTCODE, the number at which the codes start, for each
1493 for (j
= 9; j
< 16; j
++)
1502 /* There are JCNT values that have this length, the values
1503 starting from START[j]. Those values are assigned
1504 consecutive values starting at CODE. */
1506 firstcode
[j
- 9] = code
;
1508 /* Reverse add JCNT to CODE modulo 2^J. */
1509 for (k
= 0; k
< j
; ++k
)
1511 if ((jcnt
& (1U << k
)) != 0)
1516 bit
= 1U << (j
- k
- 1);
1517 for (m
= 0; m
< j
- k
; ++m
, bit
>>= 1)
1519 if ((code
& bit
) == 0)
1529 if (unlikely (jcnt
!= 0))
1531 elf_uncompress_failed ();
1536 /* For J from 9 to 15, inclusive, we store COUNT[J] consecutive
1537 values starting at START[J] with consecutive codes starting at
1538 FIRSTCODE[J - 9]. In the primary table we need to point to the
1539 secondary table, and the secondary table will be indexed by J - 9
1540 bits. We count down from 15 so that we install the larger
1541 secondary tables first, as the smaller ones may be embedded in
1544 next_secondary
= 0; /* Index of next secondary table (after primary). */
1545 for (j
= 15; j
>= 9; j
--)
1549 size_t primary
; /* Current primary index. */
1550 size_t secondary
; /* Offset to current secondary table. */
1551 size_t secondary_bits
; /* Bit size of current secondary table. */
1558 code
= firstcode
[j
- 9];
1562 for (i
= 0; i
< jcnt
; ++i
)
1568 if ((code
& 0xff) != primary
)
1572 /* Fill in a new primary table entry. */
1574 primary
= code
& 0xff;
1576 tprimary
= table
[primary
];
1579 /* Start a new secondary table. */
1581 if (unlikely ((next_secondary
& ZLIB_HUFFMAN_VALUE_MASK
)
1584 elf_uncompress_failed ();
1588 secondary
= next_secondary
;
1589 secondary_bits
= j
- 8;
1590 next_secondary
+= 1 << secondary_bits
;
1591 table
[primary
] = (secondary
1592 + ((j
- 8) << ZLIB_HUFFMAN_BITS_SHIFT
)
1593 + (1U << ZLIB_HUFFMAN_SECONDARY_SHIFT
));
1597 /* There is an existing entry. It had better be a
1598 secondary table with enough bits. */
1599 if (unlikely ((tprimary
1600 & (1U << ZLIB_HUFFMAN_SECONDARY_SHIFT
))
1603 elf_uncompress_failed ();
1606 secondary
= tprimary
& ZLIB_HUFFMAN_VALUE_MASK
;
1607 secondary_bits
= ((tprimary
>> ZLIB_HUFFMAN_BITS_SHIFT
)
1608 & ZLIB_HUFFMAN_BITS_MASK
);
1609 if (unlikely (secondary_bits
< j
- 8))
1611 elf_uncompress_failed ();
1617 /* Fill in secondary table entries. */
1619 tval
= val
| ((j
- 8) << ZLIB_HUFFMAN_BITS_SHIFT
);
1621 for (ind
= code
>> 8;
1622 ind
< (1U << secondary_bits
);
1623 ind
+= 1U << (j
- 8))
1625 if (unlikely (table
[secondary
+ 0x100 + ind
] != 0))
1627 elf_uncompress_failed ();
1630 table
[secondary
+ 0x100 + ind
] = tval
;
1636 incr
= 1U << (j
- 1);
1637 while ((code
& incr
) != 0)
1649 #ifdef BACKTRACE_GENERATE_FIXED_HUFFMAN_TABLE
1650 final_next_secondary
= next_secondary
;
1656 #ifdef BACKTRACE_GENERATE_FIXED_HUFFMAN_TABLE
1658 /* Used to generate the fixed Huffman table for block type 1. */
1662 static uint16_t table
[ZLIB_TABLE_SIZE
];
1663 static unsigned char codes
[288];
1670 for (i
= 0; i
<= 143; ++i
)
1672 for (i
= 144; i
<= 255; ++i
)
1674 for (i
= 256; i
<= 279; ++i
)
1676 for (i
= 280; i
<= 287; ++i
)
1678 if (!elf_zlib_inflate_table (&codes
[0], 288, &table
[0], &table
[0]))
1680 fprintf (stderr
, "elf_zlib_inflate_table failed\n");
1681 exit (EXIT_FAILURE
);
1684 printf ("static const uint16_t elf_zlib_default_table[%#zx] =\n",
1685 final_next_secondary
+ 0x100);
1687 for (i
= 0; i
< final_next_secondary
+ 0x100; i
+= 8)
1692 for (j
= i
; j
< final_next_secondary
+ 0x100 && j
< i
+ 8; ++j
)
1693 printf (" %#x,", table
[j
]);
1699 for (i
= 0; i
< 32; ++i
)
1701 if (!elf_zlib_inflate_table (&codes
[0], 32, &table
[0], &table
[0]))
1703 fprintf (stderr
, "elf_zlib_inflate_table failed\n");
1704 exit (EXIT_FAILURE
);
1707 printf ("static const uint16_t elf_zlib_default_dist_table[%#zx] =\n",
1708 final_next_secondary
+ 0x100);
1710 for (i
= 0; i
< final_next_secondary
+ 0x100; i
+= 8)
1715 for (j
= i
; j
< final_next_secondary
+ 0x100 && j
< i
+ 8; ++j
)
1716 printf (" %#x,", table
[j
]);
1726 /* The fixed tables generated by the #ifdef'ed out main function
1729 static const uint16_t elf_zlib_default_table
[0x170] =
1731 0xd00, 0xe50, 0xe10, 0xf18, 0xd10, 0xe70, 0xe30, 0x1230,
1732 0xd08, 0xe60, 0xe20, 0x1210, 0xe00, 0xe80, 0xe40, 0x1250,
1733 0xd04, 0xe58, 0xe18, 0x1200, 0xd14, 0xe78, 0xe38, 0x1240,
1734 0xd0c, 0xe68, 0xe28, 0x1220, 0xe08, 0xe88, 0xe48, 0x1260,
1735 0xd02, 0xe54, 0xe14, 0xf1c, 0xd12, 0xe74, 0xe34, 0x1238,
1736 0xd0a, 0xe64, 0xe24, 0x1218, 0xe04, 0xe84, 0xe44, 0x1258,
1737 0xd06, 0xe5c, 0xe1c, 0x1208, 0xd16, 0xe7c, 0xe3c, 0x1248,
1738 0xd0e, 0xe6c, 0xe2c, 0x1228, 0xe0c, 0xe8c, 0xe4c, 0x1268,
1739 0xd01, 0xe52, 0xe12, 0xf1a, 0xd11, 0xe72, 0xe32, 0x1234,
1740 0xd09, 0xe62, 0xe22, 0x1214, 0xe02, 0xe82, 0xe42, 0x1254,
1741 0xd05, 0xe5a, 0xe1a, 0x1204, 0xd15, 0xe7a, 0xe3a, 0x1244,
1742 0xd0d, 0xe6a, 0xe2a, 0x1224, 0xe0a, 0xe8a, 0xe4a, 0x1264,
1743 0xd03, 0xe56, 0xe16, 0xf1e, 0xd13, 0xe76, 0xe36, 0x123c,
1744 0xd0b, 0xe66, 0xe26, 0x121c, 0xe06, 0xe86, 0xe46, 0x125c,
1745 0xd07, 0xe5e, 0xe1e, 0x120c, 0xd17, 0xe7e, 0xe3e, 0x124c,
1746 0xd0f, 0xe6e, 0xe2e, 0x122c, 0xe0e, 0xe8e, 0xe4e, 0x126c,
1747 0xd00, 0xe51, 0xe11, 0xf19, 0xd10, 0xe71, 0xe31, 0x1232,
1748 0xd08, 0xe61, 0xe21, 0x1212, 0xe01, 0xe81, 0xe41, 0x1252,
1749 0xd04, 0xe59, 0xe19, 0x1202, 0xd14, 0xe79, 0xe39, 0x1242,
1750 0xd0c, 0xe69, 0xe29, 0x1222, 0xe09, 0xe89, 0xe49, 0x1262,
1751 0xd02, 0xe55, 0xe15, 0xf1d, 0xd12, 0xe75, 0xe35, 0x123a,
1752 0xd0a, 0xe65, 0xe25, 0x121a, 0xe05, 0xe85, 0xe45, 0x125a,
1753 0xd06, 0xe5d, 0xe1d, 0x120a, 0xd16, 0xe7d, 0xe3d, 0x124a,
1754 0xd0e, 0xe6d, 0xe2d, 0x122a, 0xe0d, 0xe8d, 0xe4d, 0x126a,
1755 0xd01, 0xe53, 0xe13, 0xf1b, 0xd11, 0xe73, 0xe33, 0x1236,
1756 0xd09, 0xe63, 0xe23, 0x1216, 0xe03, 0xe83, 0xe43, 0x1256,
1757 0xd05, 0xe5b, 0xe1b, 0x1206, 0xd15, 0xe7b, 0xe3b, 0x1246,
1758 0xd0d, 0xe6b, 0xe2b, 0x1226, 0xe0b, 0xe8b, 0xe4b, 0x1266,
1759 0xd03, 0xe57, 0xe17, 0xf1f, 0xd13, 0xe77, 0xe37, 0x123e,
1760 0xd0b, 0xe67, 0xe27, 0x121e, 0xe07, 0xe87, 0xe47, 0x125e,
1761 0xd07, 0xe5f, 0xe1f, 0x120e, 0xd17, 0xe7f, 0xe3f, 0x124e,
1762 0xd0f, 0xe6f, 0xe2f, 0x122e, 0xe0f, 0xe8f, 0xe4f, 0x126e,
1763 0x290, 0x291, 0x292, 0x293, 0x294, 0x295, 0x296, 0x297,
1764 0x298, 0x299, 0x29a, 0x29b, 0x29c, 0x29d, 0x29e, 0x29f,
1765 0x2a0, 0x2a1, 0x2a2, 0x2a3, 0x2a4, 0x2a5, 0x2a6, 0x2a7,
1766 0x2a8, 0x2a9, 0x2aa, 0x2ab, 0x2ac, 0x2ad, 0x2ae, 0x2af,
1767 0x2b0, 0x2b1, 0x2b2, 0x2b3, 0x2b4, 0x2b5, 0x2b6, 0x2b7,
1768 0x2b8, 0x2b9, 0x2ba, 0x2bb, 0x2bc, 0x2bd, 0x2be, 0x2bf,
1769 0x2c0, 0x2c1, 0x2c2, 0x2c3, 0x2c4, 0x2c5, 0x2c6, 0x2c7,
1770 0x2c8, 0x2c9, 0x2ca, 0x2cb, 0x2cc, 0x2cd, 0x2ce, 0x2cf,
1771 0x2d0, 0x2d1, 0x2d2, 0x2d3, 0x2d4, 0x2d5, 0x2d6, 0x2d7,
1772 0x2d8, 0x2d9, 0x2da, 0x2db, 0x2dc, 0x2dd, 0x2de, 0x2df,
1773 0x2e0, 0x2e1, 0x2e2, 0x2e3, 0x2e4, 0x2e5, 0x2e6, 0x2e7,
1774 0x2e8, 0x2e9, 0x2ea, 0x2eb, 0x2ec, 0x2ed, 0x2ee, 0x2ef,
1775 0x2f0, 0x2f1, 0x2f2, 0x2f3, 0x2f4, 0x2f5, 0x2f6, 0x2f7,
1776 0x2f8, 0x2f9, 0x2fa, 0x2fb, 0x2fc, 0x2fd, 0x2fe, 0x2ff,
1779 static const uint16_t elf_zlib_default_dist_table
[0x100] =
1781 0x800, 0x810, 0x808, 0x818, 0x804, 0x814, 0x80c, 0x81c,
1782 0x802, 0x812, 0x80a, 0x81a, 0x806, 0x816, 0x80e, 0x81e,
1783 0x801, 0x811, 0x809, 0x819, 0x805, 0x815, 0x80d, 0x81d,
1784 0x803, 0x813, 0x80b, 0x81b, 0x807, 0x817, 0x80f, 0x81f,
1785 0x800, 0x810, 0x808, 0x818, 0x804, 0x814, 0x80c, 0x81c,
1786 0x802, 0x812, 0x80a, 0x81a, 0x806, 0x816, 0x80e, 0x81e,
1787 0x801, 0x811, 0x809, 0x819, 0x805, 0x815, 0x80d, 0x81d,
1788 0x803, 0x813, 0x80b, 0x81b, 0x807, 0x817, 0x80f, 0x81f,
1789 0x800, 0x810, 0x808, 0x818, 0x804, 0x814, 0x80c, 0x81c,
1790 0x802, 0x812, 0x80a, 0x81a, 0x806, 0x816, 0x80e, 0x81e,
1791 0x801, 0x811, 0x809, 0x819, 0x805, 0x815, 0x80d, 0x81d,
1792 0x803, 0x813, 0x80b, 0x81b, 0x807, 0x817, 0x80f, 0x81f,
1793 0x800, 0x810, 0x808, 0x818, 0x804, 0x814, 0x80c, 0x81c,
1794 0x802, 0x812, 0x80a, 0x81a, 0x806, 0x816, 0x80e, 0x81e,
1795 0x801, 0x811, 0x809, 0x819, 0x805, 0x815, 0x80d, 0x81d,
1796 0x803, 0x813, 0x80b, 0x81b, 0x807, 0x817, 0x80f, 0x81f,
1797 0x800, 0x810, 0x808, 0x818, 0x804, 0x814, 0x80c, 0x81c,
1798 0x802, 0x812, 0x80a, 0x81a, 0x806, 0x816, 0x80e, 0x81e,
1799 0x801, 0x811, 0x809, 0x819, 0x805, 0x815, 0x80d, 0x81d,
1800 0x803, 0x813, 0x80b, 0x81b, 0x807, 0x817, 0x80f, 0x81f,
1801 0x800, 0x810, 0x808, 0x818, 0x804, 0x814, 0x80c, 0x81c,
1802 0x802, 0x812, 0x80a, 0x81a, 0x806, 0x816, 0x80e, 0x81e,
1803 0x801, 0x811, 0x809, 0x819, 0x805, 0x815, 0x80d, 0x81d,
1804 0x803, 0x813, 0x80b, 0x81b, 0x807, 0x817, 0x80f, 0x81f,
1805 0x800, 0x810, 0x808, 0x818, 0x804, 0x814, 0x80c, 0x81c,
1806 0x802, 0x812, 0x80a, 0x81a, 0x806, 0x816, 0x80e, 0x81e,
1807 0x801, 0x811, 0x809, 0x819, 0x805, 0x815, 0x80d, 0x81d,
1808 0x803, 0x813, 0x80b, 0x81b, 0x807, 0x817, 0x80f, 0x81f,
1809 0x800, 0x810, 0x808, 0x818, 0x804, 0x814, 0x80c, 0x81c,
1810 0x802, 0x812, 0x80a, 0x81a, 0x806, 0x816, 0x80e, 0x81e,
1811 0x801, 0x811, 0x809, 0x819, 0x805, 0x815, 0x80d, 0x81d,
1812 0x803, 0x813, 0x80b, 0x81b, 0x807, 0x817, 0x80f, 0x81f,
1815 /* Inflate a zlib stream from PIN/SIN to POUT/SOUT. Return 1 on
1816 success, 0 on some error parsing the stream. */
1819 elf_zlib_inflate (const unsigned char *pin
, size_t sin
, uint16_t *zdebug_table
,
1820 unsigned char *pout
, size_t sout
)
1822 unsigned char *porigout
;
1823 const unsigned char *pinend
;
1824 unsigned char *poutend
;
1826 /* We can apparently see multiple zlib streams concatenated
1827 together, so keep going as long as there is something to read.
1828 The last 4 bytes are the checksum. */
1831 poutend
= pout
+ sout
;
1832 while ((pinend
- pin
) > 4)
1838 /* Read the two byte zlib header. */
1840 if (unlikely ((pin
[0] & 0xf) != 8)) /* 8 is zlib encoding. */
1842 /* Unknown compression method. */
1843 elf_uncompress_failed ();
1846 if (unlikely ((pin
[0] >> 4) > 7))
1848 /* Window size too large. Other than this check, we don't
1849 care about the window size. */
1850 elf_uncompress_failed ();
1853 if (unlikely ((pin
[1] & 0x20) != 0))
1855 /* Stream expects a predefined dictionary, but we have no
1857 elf_uncompress_failed ();
1860 val
= (pin
[0] << 8) | pin
[1];
1861 if (unlikely (val
% 31 != 0))
1863 /* Header check failure. */
1864 elf_uncompress_failed ();
1869 /* Align PIN to a 32-bit boundary. */
1873 while ((((uintptr_t) pin
) & 3) != 0)
1875 val
|= (uint64_t)*pin
<< bits
;
1880 /* Read blocks until one is marked last. */
1887 const uint16_t *tlit
;
1888 const uint16_t *tdist
;
1890 if (!elf_fetch_bits (&pin
, pinend
, &val
, &bits
))
1894 type
= (val
>> 1) & 3;
1898 if (unlikely (type
== 3))
1900 /* Invalid block type. */
1901 elf_uncompress_failed ();
1910 /* An uncompressed block. */
1912 /* If we've read ahead more than a byte, back up. */
1921 if (unlikely ((pinend
- pin
) < 4))
1923 /* Missing length. */
1924 elf_uncompress_failed ();
1927 len
= pin
[0] | (pin
[1] << 8);
1928 lenc
= pin
[2] | (pin
[3] << 8);
1931 if (unlikely (len
!= lenc
))
1934 elf_uncompress_failed ();
1937 if (unlikely (len
> (unsigned int) (pinend
- pin
)
1938 || len
> (unsigned int) (poutend
- pout
)))
1940 /* Not enough space in buffers. */
1941 elf_uncompress_failed ();
1944 memcpy (pout
, pin
, len
);
1949 while ((((uintptr_t) pin
) & 3) != 0)
1951 val
|= (uint64_t)*pin
<< bits
;
1956 /* Go around to read the next block. */
1962 tlit
= elf_zlib_default_table
;
1963 tdist
= elf_zlib_default_dist_table
;
1970 unsigned char codebits
[19];
1971 unsigned char *plenbase
;
1972 unsigned char *plen
;
1973 unsigned char *plenend
;
1975 /* Read a Huffman encoding table. The various magic
1976 numbers here are from RFC 1951. */
1978 if (!elf_fetch_bits (&pin
, pinend
, &val
, &bits
))
1981 nlit
= (val
& 0x1f) + 257;
1983 ndist
= (val
& 0x1f) + 1;
1985 nclen
= (val
& 0xf) + 4;
1988 if (unlikely (nlit
> 286 || ndist
> 30))
1990 /* Values out of range. */
1991 elf_uncompress_failed ();
1995 /* Read and build the table used to compress the
1996 literal, length, and distance codes. */
1998 memset(&codebits
[0], 0, 19);
2000 /* There are always at least 4 elements in the
2003 if (!elf_fetch_bits (&pin
, pinend
, &val
, &bits
))
2006 codebits
[16] = val
& 7;
2007 codebits
[17] = (val
>> 3) & 7;
2008 codebits
[18] = (val
>> 6) & 7;
2009 codebits
[0] = (val
>> 9) & 7;
2016 codebits
[8] = val
& 7;
2023 if (!elf_fetch_bits (&pin
, pinend
, &val
, &bits
))
2026 codebits
[7] = val
& 7;
2033 codebits
[9] = val
& 7;
2040 codebits
[6] = val
& 7;
2047 codebits
[10] = val
& 7;
2054 codebits
[5] = val
& 7;
2061 if (!elf_fetch_bits (&pin
, pinend
, &val
, &bits
))
2064 codebits
[11] = val
& 7;
2071 codebits
[4] = val
& 7;
2078 codebits
[12] = val
& 7;
2085 codebits
[3] = val
& 7;
2092 codebits
[13] = val
& 7;
2099 if (!elf_fetch_bits (&pin
, pinend
, &val
, &bits
))
2102 codebits
[2] = val
& 7;
2109 codebits
[14] = val
& 7;
2116 codebits
[1] = val
& 7;
2123 codebits
[15] = val
& 7;
2129 if (!elf_zlib_inflate_table (codebits
, 19, zdebug_table
,
2133 /* Read the compressed bit lengths of the literal,
2134 length, and distance codes. We have allocated space
2135 at the end of zdebug_table to hold them. */
2137 plenbase
= (((unsigned char *) zdebug_table
)
2138 + ZLIB_TABLE_CODELEN_OFFSET
);
2140 plenend
= plen
+ nlit
+ ndist
;
2141 while (plen
< plenend
)
2147 if (!elf_fetch_bits (&pin
, pinend
, &val
, &bits
))
2150 t
= zdebug_table
[val
& 0xff];
2152 /* The compression here uses bit lengths up to 7, so
2153 a secondary table is never necessary. */
2154 if (unlikely ((t
& (1U << ZLIB_HUFFMAN_SECONDARY_SHIFT
))
2157 elf_uncompress_failed ();
2161 b
= (t
>> ZLIB_HUFFMAN_BITS_SHIFT
) & ZLIB_HUFFMAN_BITS_MASK
;
2165 v
= t
& ZLIB_HUFFMAN_VALUE_MASK
;
2173 /* Copy previous entry 3 to 6 times. */
2175 if (unlikely (plen
== plenbase
))
2177 elf_uncompress_failed ();
2181 /* We used up to 7 bits since the last
2182 elf_fetch_bits, so we have at least 8 bits
2185 c
= 3 + (val
& 0x3);
2188 if (unlikely ((unsigned int) (plenend
- plen
) < c
))
2190 elf_uncompress_failed ();
2199 ATTRIBUTE_FALLTHROUGH
;
2202 ATTRIBUTE_FALLTHROUGH
;
2214 /* Store zero 3 to 10 times. */
2216 /* We used up to 7 bits since the last
2217 elf_fetch_bits, so we have at least 8 bits
2220 c
= 3 + (val
& 0x7);
2223 if (unlikely ((unsigned int) (plenend
- plen
) < c
))
2225 elf_uncompress_failed ();
2233 ATTRIBUTE_FALLTHROUGH
;
2236 ATTRIBUTE_FALLTHROUGH
;
2239 ATTRIBUTE_FALLTHROUGH
;
2242 ATTRIBUTE_FALLTHROUGH
;
2245 ATTRIBUTE_FALLTHROUGH
;
2248 ATTRIBUTE_FALLTHROUGH
;
2260 /* Store zero 11 to 138 times. */
2262 /* We used up to 7 bits since the last
2263 elf_fetch_bits, so we have at least 8 bits
2266 c
= 11 + (val
& 0x7f);
2269 if (unlikely ((unsigned int) (plenend
- plen
) < c
))
2271 elf_uncompress_failed ();
2275 memset (plen
, 0, c
);
2280 elf_uncompress_failed ();
2285 /* Make sure that the stop code can appear. */
2288 if (unlikely (plen
[256] == 0))
2290 elf_uncompress_failed ();
2294 /* Build the decompression tables. */
2296 if (!elf_zlib_inflate_table (plen
, nlit
, zdebug_table
,
2299 if (!elf_zlib_inflate_table (plen
+ nlit
, ndist
, zdebug_table
,
2301 + ZLIB_HUFFMAN_TABLE_SIZE
)))
2303 tlit
= zdebug_table
;
2304 tdist
= zdebug_table
+ ZLIB_HUFFMAN_TABLE_SIZE
;
2307 /* Inflate values until the end of the block. This is the
2308 main loop of the inflation code. */
2317 if (!elf_fetch_bits (&pin
, pinend
, &val
, &bits
))
2320 t
= tlit
[val
& 0xff];
2321 b
= (t
>> ZLIB_HUFFMAN_BITS_SHIFT
) & ZLIB_HUFFMAN_BITS_MASK
;
2322 v
= t
& ZLIB_HUFFMAN_VALUE_MASK
;
2324 if ((t
& (1U << ZLIB_HUFFMAN_SECONDARY_SHIFT
)) == 0)
2332 t
= tlit
[v
+ 0x100 + ((val
>> 8) & ((1U << b
) - 1))];
2333 b
= (t
>> ZLIB_HUFFMAN_BITS_SHIFT
) & ZLIB_HUFFMAN_BITS_MASK
;
2334 lit
= t
& ZLIB_HUFFMAN_VALUE_MASK
;
2341 if (unlikely (pout
== poutend
))
2343 elf_uncompress_failed ();
2349 /* We will need to write the next byte soon. We ask
2350 for high temporal locality because we will write
2351 to the whole cache line soon. */
2352 __builtin_prefetch (pout
, 1, 3);
2354 else if (lit
== 256)
2356 /* The end of the block. */
2364 /* Convert lit into a length. */
2367 len
= lit
- 257 + 3;
2368 else if (lit
== 285)
2370 else if (unlikely (lit
> 285))
2372 elf_uncompress_failed ();
2379 if (!elf_fetch_bits (&pin
, pinend
, &val
, &bits
))
2382 /* This is an expression for the table of length
2383 codes in RFC 1951 3.2.5. */
2385 extra
= (lit
>> 2) + 1;
2386 len
= (lit
& 3) << extra
;
2388 len
+= ((1U << (extra
- 1)) - 1) << 3;
2389 len
+= val
& ((1U << extra
) - 1);
2394 if (!elf_fetch_bits (&pin
, pinend
, &val
, &bits
))
2397 t
= tdist
[val
& 0xff];
2398 b
= (t
>> ZLIB_HUFFMAN_BITS_SHIFT
) & ZLIB_HUFFMAN_BITS_MASK
;
2399 v
= t
& ZLIB_HUFFMAN_VALUE_MASK
;
2401 if ((t
& (1U << ZLIB_HUFFMAN_SECONDARY_SHIFT
)) == 0)
2409 t
= tdist
[v
+ 0x100 + ((val
>> 8) & ((1U << b
) - 1))];
2410 b
= ((t
>> ZLIB_HUFFMAN_BITS_SHIFT
)
2411 & ZLIB_HUFFMAN_BITS_MASK
);
2412 dist
= t
& ZLIB_HUFFMAN_VALUE_MASK
;
2417 /* Convert dist to a distance. */
2421 /* A distance of 1. A common case, meaning
2422 repeat the last character LEN times. */
2424 if (unlikely (pout
== porigout
))
2426 elf_uncompress_failed ();
2430 if (unlikely ((unsigned int) (poutend
- pout
) < len
))
2432 elf_uncompress_failed ();
2436 memset (pout
, pout
[-1], len
);
2439 else if (unlikely (dist
> 29))
2441 elf_uncompress_failed ();
2452 if (!elf_fetch_bits (&pin
, pinend
, &val
, &bits
))
2455 /* This is an expression for the table of
2456 distance codes in RFC 1951 3.2.5. */
2458 extra
= (dist
>> 1) + 1;
2459 dist
= (dist
& 1) << extra
;
2461 dist
+= ((1U << (extra
- 1)) - 1) << 2;
2462 dist
+= val
& ((1U << extra
) - 1);
2467 /* Go back dist bytes, and copy len bytes from
2470 if (unlikely ((unsigned int) (pout
- porigout
) < dist
))
2472 elf_uncompress_failed ();
2476 if (unlikely ((unsigned int) (poutend
- pout
) < len
))
2478 elf_uncompress_failed ();
2484 memcpy (pout
, pout
- dist
, len
);
2493 copy
= len
< dist
? len
: dist
;
2494 memcpy (pout
, pout
- dist
, copy
);
2505 /* We should have filled the output buffer. */
2506 if (unlikely (pout
!= poutend
))
2508 elf_uncompress_failed ();
2515 /* Verify the zlib checksum. The checksum is in the 4 bytes at
2516 CHECKBYTES, and the uncompressed data is at UNCOMPRESSED /
2517 UNCOMPRESSED_SIZE. Returns 1 on success, 0 on failure. */
2520 elf_zlib_verify_checksum (const unsigned char *checkbytes
,
2521 const unsigned char *uncompressed
,
2522 size_t uncompressed_size
)
2526 const unsigned char *p
;
2532 for (i
= 0; i
< 4; i
++)
2533 cksum
= (cksum
<< 8) | checkbytes
[i
];
2538 /* Minimize modulo operations. */
2541 hsz
= uncompressed_size
;
2544 for (i
= 0; i
< 5552; i
+= 16)
2546 /* Manually unroll loop 16 times. */
2587 /* Manually unroll loop 16 times. */
2624 for (i
= 0; i
< hsz
; ++i
)
2633 if (unlikely ((s2
<< 16) + s1
!= cksum
))
2635 elf_uncompress_failed ();
2642 /* Inflate a zlib stream from PIN/SIN to POUT/SOUT, and verify the
2643 checksum. Return 1 on success, 0 on error. */
2646 elf_zlib_inflate_and_verify (const unsigned char *pin
, size_t sin
,
2647 uint16_t *zdebug_table
, unsigned char *pout
,
2650 if (!elf_zlib_inflate (pin
, sin
, zdebug_table
, pout
, sout
))
2652 if (!elf_zlib_verify_checksum (pin
+ sin
- 4, pout
, sout
))
2657 /* For working memory during zstd compression, we need
2658 - a literal length FSE table: 512 64-bit values == 4096 bytes
2659 - a match length FSE table: 512 64-bit values == 4096 bytes
2660 - a offset FSE table: 256 64-bit values == 2048 bytes
2661 - a Huffman tree: 2048 uint16_t values == 4096 bytes
2662 - scratch space, one of
2663 - to build an FSE table: 512 uint16_t values == 1024 bytes
2664 - to build a Huffman tree: 512 uint16_t + 256 uint32_t == 2048 bytes
2667 #define ZSTD_TABLE_SIZE \
2668 (2 * 512 * sizeof (struct elf_zstd_fse_baseline_entry) \
2669 + 256 * sizeof (struct elf_zstd_fse_baseline_entry) \
2670 + 2048 * sizeof (uint16_t) \
2671 + 512 * sizeof (uint16_t) + 256 * sizeof (uint32_t))
2673 #define ZSTD_TABLE_LITERAL_FSE_OFFSET (0)
2675 #define ZSTD_TABLE_MATCH_FSE_OFFSET \
2676 (512 * sizeof (struct elf_zstd_fse_baseline_entry))
2678 #define ZSTD_TABLE_OFFSET_FSE_OFFSET \
2679 (ZSTD_TABLE_MATCH_FSE_OFFSET \
2680 + 512 * sizeof (struct elf_zstd_fse_baseline_entry))
2682 #define ZSTD_TABLE_HUFFMAN_OFFSET \
2683 (ZSTD_TABLE_OFFSET_FSE_OFFSET \
2684 + 256 * sizeof (struct elf_zstd_fse_baseline_entry))
2686 #define ZSTD_TABLE_WORK_OFFSET \
2687 (ZSTD_TABLE_HUFFMAN_OFFSET + 2048 * sizeof (uint16_t))
2689 /* An entry in a zstd FSE table. */
2691 struct elf_zstd_fse_entry
2693 /* The value that this FSE entry represents. */
2694 unsigned char symbol
;
2695 /* The number of bits to read to determine the next state. */
2697 /* Add the bits to this base to get the next state. */
2702 elf_zstd_build_fse (const int16_t *, int, uint16_t *, int,
2703 struct elf_zstd_fse_entry
*);
2705 /* Read a zstd FSE table and build the decoding table in *TABLE, updating *PPIN
2706 as it reads. ZDEBUG_TABLE is scratch space; it must be enough for 512
2707 uint16_t values (1024 bytes). MAXIDX is the maximum number of symbols
2708 permitted. *TABLE_BITS is the maximum number of bits for symbols in the
2709 table: the size of *TABLE is at least 1 << *TABLE_BITS. This updates
2710 *TABLE_BITS to the actual number of bits. Returns 1 on success, 0 on
2714 elf_zstd_read_fse (const unsigned char **ppin
, const unsigned char *pinend
,
2715 uint16_t *zdebug_table
, int maxidx
,
2716 struct elf_zstd_fse_entry
*table
, int *table_bits
)
2718 const unsigned char *pin
;
2732 norm
= (int16_t *) zdebug_table
;
2733 next
= zdebug_table
+ 256;
2735 if (unlikely (pin
+ 3 >= pinend
))
2737 elf_uncompress_failed ();
2741 /* Align PIN to a 32-bit boundary. */
2745 while ((((uintptr_t) pin
) & 3) != 0)
2747 val
|= (uint64_t)*pin
<< bits
;
2752 if (!elf_fetch_bits (&pin
, pinend
, &val
, &bits
))
2755 accuracy_log
= (val
& 0xf) + 5;
2756 if (accuracy_log
> *table_bits
)
2758 elf_uncompress_failed ();
2761 *table_bits
= accuracy_log
;
2765 /* This code is mostly copied from the reference implementation. */
2767 /* The number of remaining probabilities, plus 1. This sets the number of
2768 bits that need to be read for the next value. */
2769 remaining
= (1 << accuracy_log
) + 1;
2771 /* The current difference between small and large values, which depends on
2772 the number of remaining values. Small values use one less bit. */
2773 threshold
= 1 << accuracy_log
;
2775 /* The number of bits used to compute threshold. */
2776 bits_needed
= accuracy_log
+ 1;
2778 /* The next character value. */
2781 /* Whether the last count was 0. */
2784 while (remaining
> 1 && idx
<= maxidx
)
2789 if (!elf_fetch_bits (&pin
, pinend
, &val
, &bits
))
2796 /* Previous count was 0, so there is a 2-bit repeat flag. If the
2797 2-bit flag is 0b11, it adds 3 and then there is another repeat
2800 while ((val
& 0xfff) == 0xfff)
2805 if (!elf_fetch_bits (&pin
, pinend
, &val
, &bits
))
2808 while ((val
& 3) == 3)
2813 if (!elf_fetch_bits (&pin
, pinend
, &val
, &bits
))
2816 /* We have at least 13 bits here, don't need to fetch. */
2821 if (unlikely (zidx
> maxidx
))
2823 elf_uncompress_failed ();
2827 for (; idx
< zidx
; idx
++)
2834 max
= (2 * threshold
- 1) - remaining
;
2835 if ((val
& (threshold
- 1)) < max
)
2837 /* A small value. */
2838 count
= (int32_t) ((uint32_t) val
& (threshold
- 1));
2839 val
>>= bits_needed
- 1;
2840 bits
-= bits_needed
- 1;
2844 /* A large value. */
2845 count
= (int32_t) ((uint32_t) val
& (2 * threshold
- 1));
2846 if (count
>= (int32_t) threshold
)
2847 count
-= (int32_t) max
;
2848 val
>>= bits_needed
;
2849 bits
-= bits_needed
;
2857 if (unlikely (idx
>= 256))
2859 elf_uncompress_failed ();
2862 norm
[idx
] = (int16_t) count
;
2867 while (remaining
< threshold
)
2874 if (unlikely (remaining
!= 1))
2876 elf_uncompress_failed ();
2880 /* If we've read ahead more than a byte, back up. */
2889 for (; idx
<= maxidx
; idx
++)
2892 return elf_zstd_build_fse (norm
, idx
, next
, *table_bits
, table
);
2895 /* Build the FSE decoding table from a list of probabilities. This reads from
2896 NORM of length IDX, uses NEXT as scratch space, and writes to *TABLE, whose
2897 size is TABLE_BITS. */
2900 elf_zstd_build_fse (const int16_t *norm
, int idx
, uint16_t *next
,
2901 int table_bits
, struct elf_zstd_fse_entry
*table
)
2910 table_size
= 1 << table_bits
;
2911 high_threshold
= table_size
- 1;
2912 for (i
= 0; i
< idx
; i
++)
2918 next
[i
] = (uint16_t) n
;
2921 table
[high_threshold
].symbol
= (unsigned char) i
;
2928 step
= (table_size
>> 1) + (table_size
>> 3) + 3;
2929 mask
= table_size
- 1;
2930 for (i
= 0; i
< idx
; i
++)
2936 for (j
= 0; j
< n
; j
++)
2938 table
[pos
].symbol
= (unsigned char) i
;
2939 pos
= (pos
+ step
) & mask
;
2940 while (unlikely (pos
> high_threshold
))
2941 pos
= (pos
+ step
) & mask
;
2944 if (unlikely (pos
!= 0))
2946 elf_uncompress_failed ();
2950 for (i
= 0; i
< table_size
; i
++)
2953 uint16_t next_state
;
2957 sym
= table
[i
].symbol
;
2958 next_state
= next
[sym
];
2961 if (next_state
== 0)
2963 elf_uncompress_failed ();
2966 high_bit
= 31 - __builtin_clz (next_state
);
2968 bits
= table_bits
- high_bit
;
2969 table
[i
].bits
= (unsigned char) bits
;
2970 table
[i
].base
= (uint16_t) ((next_state
<< bits
) - table_size
);
2976 /* Encode the baseline and bits into a single 32-bit value. */
2978 #define ZSTD_ENCODE_BASELINE_BITS(baseline, basebits) \
2979 ((uint32_t)(baseline) | ((uint32_t)(basebits) << 24))
2981 #define ZSTD_DECODE_BASELINE(baseline_basebits) \
2982 ((uint32_t)(baseline_basebits) & 0xffffff)
2984 #define ZSTD_DECODE_BASEBITS(baseline_basebits) \
2985 ((uint32_t)(baseline_basebits) >> 24)
2987 /* Given a literal length code, we need to read a number of bits and add that
2988 to a baseline. For states 0 to 15 the baseline is the state and the number
2991 #define ZSTD_LITERAL_LENGTH_BASELINE_OFFSET (16)
2993 static const uint32_t elf_zstd_literal_length_base
[] =
2995 ZSTD_ENCODE_BASELINE_BITS(16, 1),
2996 ZSTD_ENCODE_BASELINE_BITS(18, 1),
2997 ZSTD_ENCODE_BASELINE_BITS(20, 1),
2998 ZSTD_ENCODE_BASELINE_BITS(22, 1),
2999 ZSTD_ENCODE_BASELINE_BITS(24, 2),
3000 ZSTD_ENCODE_BASELINE_BITS(28, 2),
3001 ZSTD_ENCODE_BASELINE_BITS(32, 3),
3002 ZSTD_ENCODE_BASELINE_BITS(40, 3),
3003 ZSTD_ENCODE_BASELINE_BITS(48, 4),
3004 ZSTD_ENCODE_BASELINE_BITS(64, 6),
3005 ZSTD_ENCODE_BASELINE_BITS(128, 7),
3006 ZSTD_ENCODE_BASELINE_BITS(256, 8),
3007 ZSTD_ENCODE_BASELINE_BITS(512, 9),
3008 ZSTD_ENCODE_BASELINE_BITS(1024, 10),
3009 ZSTD_ENCODE_BASELINE_BITS(2048, 11),
3010 ZSTD_ENCODE_BASELINE_BITS(4096, 12),
3011 ZSTD_ENCODE_BASELINE_BITS(8192, 13),
3012 ZSTD_ENCODE_BASELINE_BITS(16384, 14),
3013 ZSTD_ENCODE_BASELINE_BITS(32768, 15),
3014 ZSTD_ENCODE_BASELINE_BITS(65536, 16)
3017 /* The same applies to match length codes. For states 0 to 31 the baseline is
3018 the state + 3 and the number of bits is zero. */
3020 #define ZSTD_MATCH_LENGTH_BASELINE_OFFSET (32)
3022 static const uint32_t elf_zstd_match_length_base
[] =
3024 ZSTD_ENCODE_BASELINE_BITS(35, 1),
3025 ZSTD_ENCODE_BASELINE_BITS(37, 1),
3026 ZSTD_ENCODE_BASELINE_BITS(39, 1),
3027 ZSTD_ENCODE_BASELINE_BITS(41, 1),
3028 ZSTD_ENCODE_BASELINE_BITS(43, 2),
3029 ZSTD_ENCODE_BASELINE_BITS(47, 2),
3030 ZSTD_ENCODE_BASELINE_BITS(51, 3),
3031 ZSTD_ENCODE_BASELINE_BITS(59, 3),
3032 ZSTD_ENCODE_BASELINE_BITS(67, 4),
3033 ZSTD_ENCODE_BASELINE_BITS(83, 4),
3034 ZSTD_ENCODE_BASELINE_BITS(99, 5),
3035 ZSTD_ENCODE_BASELINE_BITS(131, 7),
3036 ZSTD_ENCODE_BASELINE_BITS(259, 8),
3037 ZSTD_ENCODE_BASELINE_BITS(515, 9),
3038 ZSTD_ENCODE_BASELINE_BITS(1027, 10),
3039 ZSTD_ENCODE_BASELINE_BITS(2051, 11),
3040 ZSTD_ENCODE_BASELINE_BITS(4099, 12),
3041 ZSTD_ENCODE_BASELINE_BITS(8195, 13),
3042 ZSTD_ENCODE_BASELINE_BITS(16387, 14),
3043 ZSTD_ENCODE_BASELINE_BITS(32771, 15),
3044 ZSTD_ENCODE_BASELINE_BITS(65539, 16)
3047 /* An entry in an FSE table used for literal/match/length values. For these we
3048 have to map the symbol to a baseline value, and we have to read zero or more
3049 bits and add that value to the baseline value. Rather than look the values
3050 up in a separate table, we grow the FSE table so that we get better memory
3053 struct elf_zstd_fse_baseline_entry
3055 /* The baseline for the value that this FSE entry represents.. */
3057 /* The number of bits to read to add to the baseline. */
3058 unsigned char basebits
;
3059 /* The number of bits to read to determine the next state. */
3061 /* Add the bits to this base to get the next state. */
3065 /* Convert the literal length FSE table FSE_TABLE to an FSE baseline table at
3066 BASELINE_TABLE. Note that FSE_TABLE and BASELINE_TABLE will overlap. */
3069 elf_zstd_make_literal_baseline_fse (
3070 const struct elf_zstd_fse_entry
*fse_table
,
3072 struct elf_zstd_fse_baseline_entry
*baseline_table
)
3075 const struct elf_zstd_fse_entry
*pfse
;
3076 struct elf_zstd_fse_baseline_entry
*pbaseline
;
3078 /* Convert backward to avoid overlap. */
3080 count
= 1U << table_bits
;
3081 pfse
= fse_table
+ count
;
3082 pbaseline
= baseline_table
+ count
;
3083 while (pfse
> fse_table
)
3085 unsigned char symbol
;
3091 symbol
= pfse
->symbol
;
3094 if (symbol
< ZSTD_LITERAL_LENGTH_BASELINE_OFFSET
)
3096 pbaseline
->baseline
= (uint32_t)symbol
;
3097 pbaseline
->basebits
= 0;
3104 if (unlikely (symbol
> 35))
3106 elf_uncompress_failed ();
3109 idx
= symbol
- ZSTD_LITERAL_LENGTH_BASELINE_OFFSET
;
3110 basebits
= elf_zstd_literal_length_base
[idx
];
3111 pbaseline
->baseline
= ZSTD_DECODE_BASELINE(basebits
);
3112 pbaseline
->basebits
= ZSTD_DECODE_BASEBITS(basebits
);
3114 pbaseline
->bits
= bits
;
3115 pbaseline
->base
= base
;
3121 /* Convert the offset length FSE table FSE_TABLE to an FSE baseline table at
3122 BASELINE_TABLE. Note that FSE_TABLE and BASELINE_TABLE will overlap. */
3125 elf_zstd_make_offset_baseline_fse (
3126 const struct elf_zstd_fse_entry
*fse_table
,
3128 struct elf_zstd_fse_baseline_entry
*baseline_table
)
3131 const struct elf_zstd_fse_entry
*pfse
;
3132 struct elf_zstd_fse_baseline_entry
*pbaseline
;
3134 /* Convert backward to avoid overlap. */
3136 count
= 1U << table_bits
;
3137 pfse
= fse_table
+ count
;
3138 pbaseline
= baseline_table
+ count
;
3139 while (pfse
> fse_table
)
3141 unsigned char symbol
;
3147 symbol
= pfse
->symbol
;
3150 if (unlikely (symbol
> 31))
3152 elf_uncompress_failed ();
3156 /* The simple way to write this is
3158 pbaseline->baseline = (uint32_t)1 << symbol;
3159 pbaseline->basebits = symbol;
3161 That will give us an offset value that corresponds to the one
3162 described in the RFC. However, for offset values > 3, we have to
3163 subtract 3. And for offset values 1, 2, 3 we use a repeated offset.
3164 The baseline is always a power of 2, and is never 0, so for these low
3165 values we will see one entry that is baseline 1, basebits 0, and one
3166 entry that is baseline 2, basebits 1. All other entries will have
3167 baseline >= 4 and basebits >= 2.
3169 So we can check for RFC offset <= 3 by checking for basebits <= 1.
3170 And that means that we can subtract 3 here and not worry about doing
3171 it in the hot loop. */
3173 pbaseline
->baseline
= (uint32_t)1 << symbol
;
3175 pbaseline
->baseline
-= 3;
3176 pbaseline
->basebits
= symbol
;
3177 pbaseline
->bits
= bits
;
3178 pbaseline
->base
= base
;
3184 /* Convert the match length FSE table FSE_TABLE to an FSE baseline table at
3185 BASELINE_TABLE. Note that FSE_TABLE and BASELINE_TABLE will overlap. */
3188 elf_zstd_make_match_baseline_fse (
3189 const struct elf_zstd_fse_entry
*fse_table
,
3191 struct elf_zstd_fse_baseline_entry
*baseline_table
)
3194 const struct elf_zstd_fse_entry
*pfse
;
3195 struct elf_zstd_fse_baseline_entry
*pbaseline
;
3197 /* Convert backward to avoid overlap. */
3199 count
= 1U << table_bits
;
3200 pfse
= fse_table
+ count
;
3201 pbaseline
= baseline_table
+ count
;
3202 while (pfse
> fse_table
)
3204 unsigned char symbol
;
3210 symbol
= pfse
->symbol
;
3213 if (symbol
< ZSTD_MATCH_LENGTH_BASELINE_OFFSET
)
3215 pbaseline
->baseline
= (uint32_t)symbol
+ 3;
3216 pbaseline
->basebits
= 0;
3223 if (unlikely (symbol
> 52))
3225 elf_uncompress_failed ();
3228 idx
= symbol
- ZSTD_MATCH_LENGTH_BASELINE_OFFSET
;
3229 basebits
= elf_zstd_match_length_base
[idx
];
3230 pbaseline
->baseline
= ZSTD_DECODE_BASELINE(basebits
);
3231 pbaseline
->basebits
= ZSTD_DECODE_BASEBITS(basebits
);
3233 pbaseline
->bits
= bits
;
3234 pbaseline
->base
= base
;
3240 #ifdef BACKTRACE_GENERATE_ZSTD_FSE_TABLES
3242 /* Used to generate the predefined FSE decoding tables for zstd. */
3246 /* These values are straight from RFC 8878. */
3248 static int16_t lit
[36] =
3250 4, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1,
3251 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 2, 1, 1, 1, 1, 1,
3255 static int16_t match
[53] =
3257 1, 4, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1,
3258 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
3259 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1,-1,
3263 static int16_t offset
[29] =
3265 1, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1,
3266 1, 1, 1, 1, 1, 1, 1, 1,-1,-1,-1,-1,-1
3269 static uint16_t next
[256];
3272 print_table (const struct elf_zstd_fse_baseline_entry
*table
, size_t size
)
3277 for (i
= 0; i
< size
; i
+= 3)
3282 for (j
= 0; j
< 3 && i
+ j
< size
; ++j
)
3283 printf (" { %u, %d, %d, %d },", table
[i
+ j
].baseline
,
3284 table
[i
+ j
].basebits
, table
[i
+ j
].bits
,
3294 struct elf_zstd_fse_entry lit_table
[64];
3295 struct elf_zstd_fse_baseline_entry lit_baseline
[64];
3296 struct elf_zstd_fse_entry match_table
[64];
3297 struct elf_zstd_fse_baseline_entry match_baseline
[64];
3298 struct elf_zstd_fse_entry offset_table
[32];
3299 struct elf_zstd_fse_baseline_entry offset_baseline
[32];
3301 if (!elf_zstd_build_fse (lit
, sizeof lit
/ sizeof lit
[0], next
,
3304 fprintf (stderr
, "elf_zstd_build_fse failed\n");
3305 exit (EXIT_FAILURE
);
3308 if (!elf_zstd_make_literal_baseline_fse (lit_table
, 6, lit_baseline
))
3310 fprintf (stderr
, "elf_zstd_make_literal_baseline_fse failed\n");
3311 exit (EXIT_FAILURE
);
3314 printf ("static const struct elf_zstd_fse_baseline_entry "
3315 "elf_zstd_lit_table[64] =\n");
3316 print_table (lit_baseline
,
3317 sizeof lit_baseline
/ sizeof lit_baseline
[0]);
3320 if (!elf_zstd_build_fse (match
, sizeof match
/ sizeof match
[0], next
,
3323 fprintf (stderr
, "elf_zstd_build_fse failed\n");
3324 exit (EXIT_FAILURE
);
3327 if (!elf_zstd_make_match_baseline_fse (match_table
, 6, match_baseline
))
3329 fprintf (stderr
, "elf_zstd_make_match_baseline_fse failed\n");
3330 exit (EXIT_FAILURE
);
3333 printf ("static const struct elf_zstd_fse_baseline_entry "
3334 "elf_zstd_match_table[64] =\n");
3335 print_table (match_baseline
,
3336 sizeof match_baseline
/ sizeof match_baseline
[0]);
3339 if (!elf_zstd_build_fse (offset
, sizeof offset
/ sizeof offset
[0], next
,
3342 fprintf (stderr
, "elf_zstd_build_fse failed\n");
3343 exit (EXIT_FAILURE
);
3346 if (!elf_zstd_make_offset_baseline_fse (offset_table
, 5, offset_baseline
))
3348 fprintf (stderr
, "elf_zstd_make_offset_baseline_fse failed\n");
3349 exit (EXIT_FAILURE
);
3352 printf ("static const struct elf_zstd_fse_baseline_entry "
3353 "elf_zstd_offset_table[32] =\n");
3354 print_table (offset_baseline
,
3355 sizeof offset_baseline
/ sizeof offset_baseline
[0]);
3363 /* The fixed tables generated by the #ifdef'ed out main function
3366 static const struct elf_zstd_fse_baseline_entry elf_zstd_lit_table
[64] =
3368 { 0, 0, 4, 0 }, { 0, 0, 4, 16 }, { 1, 0, 5, 32 },
3369 { 3, 0, 5, 0 }, { 4, 0, 5, 0 }, { 6, 0, 5, 0 },
3370 { 7, 0, 5, 0 }, { 9, 0, 5, 0 }, { 10, 0, 5, 0 },
3371 { 12, 0, 5, 0 }, { 14, 0, 6, 0 }, { 16, 1, 5, 0 },
3372 { 20, 1, 5, 0 }, { 22, 1, 5, 0 }, { 28, 2, 5, 0 },
3373 { 32, 3, 5, 0 }, { 48, 4, 5, 0 }, { 64, 6, 5, 32 },
3374 { 128, 7, 5, 0 }, { 256, 8, 6, 0 }, { 1024, 10, 6, 0 },
3375 { 4096, 12, 6, 0 }, { 0, 0, 4, 32 }, { 1, 0, 4, 0 },
3376 { 2, 0, 5, 0 }, { 4, 0, 5, 32 }, { 5, 0, 5, 0 },
3377 { 7, 0, 5, 32 }, { 8, 0, 5, 0 }, { 10, 0, 5, 32 },
3378 { 11, 0, 5, 0 }, { 13, 0, 6, 0 }, { 16, 1, 5, 32 },
3379 { 18, 1, 5, 0 }, { 22, 1, 5, 32 }, { 24, 2, 5, 0 },
3380 { 32, 3, 5, 32 }, { 40, 3, 5, 0 }, { 64, 6, 4, 0 },
3381 { 64, 6, 4, 16 }, { 128, 7, 5, 32 }, { 512, 9, 6, 0 },
3382 { 2048, 11, 6, 0 }, { 0, 0, 4, 48 }, { 1, 0, 4, 16 },
3383 { 2, 0, 5, 32 }, { 3, 0, 5, 32 }, { 5, 0, 5, 32 },
3384 { 6, 0, 5, 32 }, { 8, 0, 5, 32 }, { 9, 0, 5, 32 },
3385 { 11, 0, 5, 32 }, { 12, 0, 5, 32 }, { 15, 0, 6, 0 },
3386 { 18, 1, 5, 32 }, { 20, 1, 5, 32 }, { 24, 2, 5, 32 },
3387 { 28, 2, 5, 32 }, { 40, 3, 5, 32 }, { 48, 4, 5, 32 },
3388 { 65536, 16, 6, 0 }, { 32768, 15, 6, 0 }, { 16384, 14, 6, 0 },
3392 static const struct elf_zstd_fse_baseline_entry elf_zstd_match_table
[64] =
3394 { 3, 0, 6, 0 }, { 4, 0, 4, 0 }, { 5, 0, 5, 32 },
3395 { 6, 0, 5, 0 }, { 8, 0, 5, 0 }, { 9, 0, 5, 0 },
3396 { 11, 0, 5, 0 }, { 13, 0, 6, 0 }, { 16, 0, 6, 0 },
3397 { 19, 0, 6, 0 }, { 22, 0, 6, 0 }, { 25, 0, 6, 0 },
3398 { 28, 0, 6, 0 }, { 31, 0, 6, 0 }, { 34, 0, 6, 0 },
3399 { 37, 1, 6, 0 }, { 41, 1, 6, 0 }, { 47, 2, 6, 0 },
3400 { 59, 3, 6, 0 }, { 83, 4, 6, 0 }, { 131, 7, 6, 0 },
3401 { 515, 9, 6, 0 }, { 4, 0, 4, 16 }, { 5, 0, 4, 0 },
3402 { 6, 0, 5, 32 }, { 7, 0, 5, 0 }, { 9, 0, 5, 32 },
3403 { 10, 0, 5, 0 }, { 12, 0, 6, 0 }, { 15, 0, 6, 0 },
3404 { 18, 0, 6, 0 }, { 21, 0, 6, 0 }, { 24, 0, 6, 0 },
3405 { 27, 0, 6, 0 }, { 30, 0, 6, 0 }, { 33, 0, 6, 0 },
3406 { 35, 1, 6, 0 }, { 39, 1, 6, 0 }, { 43, 2, 6, 0 },
3407 { 51, 3, 6, 0 }, { 67, 4, 6, 0 }, { 99, 5, 6, 0 },
3408 { 259, 8, 6, 0 }, { 4, 0, 4, 32 }, { 4, 0, 4, 48 },
3409 { 5, 0, 4, 16 }, { 7, 0, 5, 32 }, { 8, 0, 5, 32 },
3410 { 10, 0, 5, 32 }, { 11, 0, 5, 32 }, { 14, 0, 6, 0 },
3411 { 17, 0, 6, 0 }, { 20, 0, 6, 0 }, { 23, 0, 6, 0 },
3412 { 26, 0, 6, 0 }, { 29, 0, 6, 0 }, { 32, 0, 6, 0 },
3413 { 65539, 16, 6, 0 }, { 32771, 15, 6, 0 }, { 16387, 14, 6, 0 },
3414 { 8195, 13, 6, 0 }, { 4099, 12, 6, 0 }, { 2051, 11, 6, 0 },
3418 static const struct elf_zstd_fse_baseline_entry elf_zstd_offset_table
[32] =
3420 { 1, 0, 5, 0 }, { 61, 6, 4, 0 }, { 509, 9, 5, 0 },
3421 { 32765, 15, 5, 0 }, { 2097149, 21, 5, 0 }, { 5, 3, 5, 0 },
3422 { 125, 7, 4, 0 }, { 4093, 12, 5, 0 }, { 262141, 18, 5, 0 },
3423 { 8388605, 23, 5, 0 }, { 29, 5, 5, 0 }, { 253, 8, 4, 0 },
3424 { 16381, 14, 5, 0 }, { 1048573, 20, 5, 0 }, { 1, 2, 5, 0 },
3425 { 125, 7, 4, 16 }, { 2045, 11, 5, 0 }, { 131069, 17, 5, 0 },
3426 { 4194301, 22, 5, 0 }, { 13, 4, 5, 0 }, { 253, 8, 4, 16 },
3427 { 8189, 13, 5, 0 }, { 524285, 19, 5, 0 }, { 2, 1, 5, 0 },
3428 { 61, 6, 4, 16 }, { 1021, 10, 5, 0 }, { 65533, 16, 5, 0 },
3429 { 268435453, 28, 5, 0 }, { 134217725, 27, 5, 0 }, { 67108861, 26, 5, 0 },
3430 { 33554429, 25, 5, 0 }, { 16777213, 24, 5, 0 },
3433 /* Read a zstd Huffman table and build the decoding table in *TABLE, reading
3434 and updating *PPIN. This sets *PTABLE_BITS to the number of bits of the
3435 table, such that the table length is 1 << *TABLE_BITS. ZDEBUG_TABLE is
3436 scratch space; it must be enough for 512 uint16_t values + 256 32-bit values
3437 (2048 bytes). Returns 1 on success, 0 on error. */
3440 elf_zstd_read_huff (const unsigned char **ppin
, const unsigned char *pinend
,
3441 uint16_t *zdebug_table
, uint16_t *table
, int *ptable_bits
)
3443 const unsigned char *pin
;
3445 unsigned char *weights
;
3447 uint32_t *weight_mark
;
3449 uint32_t weight_mask
;
3453 if (unlikely (pin
>= pinend
))
3455 elf_uncompress_failed ();
3461 weights
= (unsigned char *) zdebug_table
;
3465 /* Table is compressed using FSE. */
3467 struct elf_zstd_fse_entry
*fse_table
;
3470 const unsigned char *pfse
;
3471 const unsigned char *pback
;
3474 unsigned int state1
, state2
;
3476 /* SCRATCH is used temporarily by elf_zstd_read_fse. It overlaps
3478 scratch
= zdebug_table
;
3479 fse_table
= (struct elf_zstd_fse_entry
*) (scratch
+ 512);
3483 if (!elf_zstd_read_fse (&pfse
, pinend
, scratch
, 255, fse_table
,
3487 if (unlikely (pin
+ hdr
> pinend
))
3489 elf_uncompress_failed ();
3493 /* We no longer need SCRATCH. Start recording weights. We need up to
3494 256 bytes of weights and 64 bytes of rank counts, so it won't overlap
3497 pback
= pin
+ hdr
- 1;
3499 if (!elf_fetch_backward_init (&pback
, pfse
, &val
, &bits
))
3502 bits
-= fse_table_bits
;
3503 state1
= (val
>> bits
) & ((1U << fse_table_bits
) - 1);
3504 bits
-= fse_table_bits
;
3505 state2
= (val
>> bits
) & ((1U << fse_table_bits
) - 1);
3507 /* There are two independent FSE streams, tracked by STATE1 and STATE2.
3508 We decode them alternately. */
3513 struct elf_zstd_fse_entry
*pt
;
3516 pt
= &fse_table
[state1
];
3518 if (unlikely (pin
< pinend
) && bits
< pt
->bits
)
3520 if (unlikely (count
>= 254))
3522 elf_uncompress_failed ();
3525 weights
[count
] = (unsigned char) pt
->symbol
;
3526 weights
[count
+ 1] = (unsigned char) fse_table
[state2
].symbol
;
3531 if (unlikely (pt
->bits
== 0))
3535 if (!elf_fetch_bits_backward (&pback
, pfse
, &val
, &bits
))
3539 v
= (val
>> bits
) & (((uint64_t)1 << pt
->bits
) - 1);
3542 state1
= pt
->base
+ v
;
3544 if (unlikely (count
>= 255))
3546 elf_uncompress_failed ();
3550 weights
[count
] = pt
->symbol
;
3553 pt
= &fse_table
[state2
];
3555 if (unlikely (pin
< pinend
&& bits
< pt
->bits
))
3557 if (unlikely (count
>= 254))
3559 elf_uncompress_failed ();
3562 weights
[count
] = (unsigned char) pt
->symbol
;
3563 weights
[count
+ 1] = (unsigned char) fse_table
[state1
].symbol
;
3568 if (unlikely (pt
->bits
== 0))
3572 if (!elf_fetch_bits_backward (&pback
, pfse
, &val
, &bits
))
3576 v
= (val
>> bits
) & (((uint64_t)1 << pt
->bits
) - 1);
3579 state2
= pt
->base
+ v
;
3581 if (unlikely (count
>= 255))
3583 elf_uncompress_failed ();
3587 weights
[count
] = pt
->symbol
;
3595 /* Table is not compressed. Each weight is 4 bits. */
3598 if (unlikely (pin
+ ((count
+ 1) / 2) >= pinend
))
3600 elf_uncompress_failed ();
3603 for (i
= 0; i
< count
; i
+= 2)
3609 weights
[i
] = b
>> 4;
3610 weights
[i
+ 1] = b
& 0xf;
3614 weight_mark
= (uint32_t *) (weights
+ 256);
3615 memset (weight_mark
, 0, 13 * sizeof (uint32_t));
3617 for (i
= 0; i
< count
; ++i
)
3622 if (unlikely (w
> 12))
3624 elf_uncompress_failed ();
3629 weight_mask
+= 1U << (w
- 1);
3631 if (unlikely (weight_mask
== 0))
3633 elf_uncompress_failed ();
3637 table_bits
= 32 - __builtin_clz (weight_mask
);
3638 if (unlikely (table_bits
> 11))
3640 elf_uncompress_failed ();
3644 /* Work out the last weight value, which is omitted because the weights must
3645 sum to a power of two. */
3650 left
= ((uint32_t)1 << table_bits
) - weight_mask
;
3653 elf_uncompress_failed ();
3656 high_bit
= 31 - __builtin_clz (left
);
3657 if (((uint32_t)1 << high_bit
) != left
)
3659 elf_uncompress_failed ();
3663 if (unlikely (count
>= 256))
3665 elf_uncompress_failed ();
3669 weights
[count
] = high_bit
+ 1;
3671 ++weight_mark
[high_bit
+ 1];
3674 if (weight_mark
[1] < 2 || (weight_mark
[1] & 1) != 0)
3676 elf_uncompress_failed ();
3680 /* Change WEIGHT_MARK from a count of weights to the index of the first
3681 symbol for that weight. We shift the indexes to also store how many we
3682 have seen so far, below. */
3687 for (i
= 0; i
< table_bits
; ++i
)
3692 next
+= weight_mark
[i
+ 1] << i
;
3693 weight_mark
[i
+ 1] = cur
;
3697 for (i
= 0; i
< count
; ++i
)
3699 unsigned char weight
;
3705 weight
= weights
[i
];
3709 length
= 1U << (weight
- 1);
3710 tval
= (i
<< 8) | (table_bits
+ 1 - weight
);
3711 start
= weight_mark
[weight
];
3712 for (j
= 0; j
< length
; ++j
)
3713 table
[start
+ j
] = tval
;
3714 weight_mark
[weight
] += length
;
3718 *ptable_bits
= (int)table_bits
;
3723 /* Read and decompress the literals and store them ending at POUTEND. This
3724 works because we are going to use all the literals in the output, so they
3725 must fit into the output buffer. HUFFMAN_TABLE, and PHUFFMAN_TABLE_BITS
3726 store the Huffman table across calls. SCRATCH is used to read a Huffman
3727 table. Store the start of the decompressed literals in *PPLIT. Update
3728 *PPIN. Return 1 on success, 0 on error. */
3731 elf_zstd_read_literals (const unsigned char **ppin
,
3732 const unsigned char *pinend
,
3733 unsigned char *pout
,
3734 unsigned char *poutend
,
3736 uint16_t *huffman_table
,
3737 int *phuffman_table_bits
,
3738 unsigned char **pplit
)
3740 const unsigned char *pin
;
3741 unsigned char *plit
;
3743 uint32_t regenerated_size
;
3744 uint32_t compressed_size
;
3746 uint32_t total_streams_size
;
3747 unsigned int huffman_table_bits
;
3748 uint64_t huffman_mask
;
3751 if (unlikely (pin
>= pinend
))
3753 elf_uncompress_failed ();
3759 if ((hdr
& 3) == 0 || (hdr
& 3) == 1)
3763 /* Raw_Literals_Block or RLE_Literals_Block */
3765 raw
= (hdr
& 3) == 0;
3767 switch ((hdr
>> 2) & 3)
3770 regenerated_size
= hdr
>> 3;
3773 if (unlikely (pin
>= pinend
))
3775 elf_uncompress_failed ();
3778 regenerated_size
= (hdr
>> 4) + ((uint32_t)(*pin
) << 4);
3782 if (unlikely (pin
+ 1 >= pinend
))
3784 elf_uncompress_failed ();
3787 regenerated_size
= ((hdr
>> 4)
3788 + ((uint32_t)*pin
<< 4)
3789 + ((uint32_t)pin
[1] << 12));
3793 elf_uncompress_failed ();
3797 if (unlikely ((size_t)(poutend
- pout
) < regenerated_size
))
3799 elf_uncompress_failed ();
3803 plit
= poutend
- regenerated_size
;
3807 if (unlikely (pin
+ regenerated_size
>= pinend
))
3809 elf_uncompress_failed ();
3812 memcpy (plit
, pin
, regenerated_size
);
3813 pin
+= regenerated_size
;
3819 elf_uncompress_failed ();
3822 memset (plit
, *pin
, regenerated_size
);
3832 /* Compressed_Literals_Block or Treeless_Literals_Block */
3834 switch ((hdr
>> 2) & 3)
3837 if (unlikely (pin
+ 1 >= pinend
))
3839 elf_uncompress_failed ();
3842 regenerated_size
= (hdr
>> 4) | ((uint32_t)(*pin
& 0x3f) << 4);
3843 compressed_size
= (uint32_t)*pin
>> 6 | ((uint32_t)pin
[1] << 2);
3845 streams
= ((hdr
>> 2) & 3) == 0 ? 1 : 4;
3848 if (unlikely (pin
+ 2 >= pinend
))
3850 elf_uncompress_failed ();
3853 regenerated_size
= (((uint32_t)hdr
>> 4)
3854 | ((uint32_t)*pin
<< 4)
3855 | (((uint32_t)pin
[1] & 3) << 12));
3856 compressed_size
= (((uint32_t)pin
[1] >> 2)
3857 | ((uint32_t)pin
[2] << 6));
3862 if (unlikely (pin
+ 3 >= pinend
))
3864 elf_uncompress_failed ();
3867 regenerated_size
= (((uint32_t)hdr
>> 4)
3868 | ((uint32_t)*pin
<< 4)
3869 | (((uint32_t)pin
[1] & 0x3f) << 12));
3870 compressed_size
= (((uint32_t)pin
[1] >> 6)
3871 | ((uint32_t)pin
[2] << 2)
3872 | ((uint32_t)pin
[3] << 10));
3877 elf_uncompress_failed ();
3881 if (unlikely (pin
+ compressed_size
> pinend
))
3883 elf_uncompress_failed ();
3887 pinend
= pin
+ compressed_size
;
3890 if (unlikely ((size_t)(poutend
- pout
) < regenerated_size
))
3892 elf_uncompress_failed ();
3896 plit
= poutend
- regenerated_size
;
3900 total_streams_size
= compressed_size
;
3903 const unsigned char *ptable
;
3905 /* Compressed_Literals_Block. Read Huffman tree. */
3908 if (!elf_zstd_read_huff (&ptable
, pinend
, scratch
, huffman_table
,
3909 phuffman_table_bits
))
3912 if (unlikely (total_streams_size
< (size_t)(ptable
- pin
)))
3914 elf_uncompress_failed ();
3918 total_streams_size
-= ptable
- pin
;
3923 /* Treeless_Literals_Block. Reuse previous Huffman tree. */
3924 if (unlikely (*phuffman_table_bits
== 0))
3926 elf_uncompress_failed ();
3931 /* Decompress COMPRESSED_SIZE bytes of data at PIN using the huffman table,
3932 storing REGENERATED_SIZE bytes of decompressed data at PLIT. */
3934 huffman_table_bits
= (unsigned int)*phuffman_table_bits
;
3935 huffman_mask
= ((uint64_t)1 << huffman_table_bits
) - 1;
3939 const unsigned char *pback
;
3940 const unsigned char *pbackend
;
3945 pback
= pin
+ total_streams_size
- 1;
3947 if (!elf_fetch_backward_init (&pback
, pbackend
, &val
, &bits
))
3950 /* This is one of the inner loops of the decompression algorithm, so we
3951 put some effort into optimization. We can't get more than 64 bytes
3952 from a single call to elf_fetch_bits_backward, and we can't subtract
3953 more than 11 bits at a time. */
3955 if (regenerated_size
>= 64)
3957 unsigned char *plitstart
;
3958 unsigned char *plitstop
;
3961 plitstop
= plit
+ regenerated_size
- 64;
3962 while (plit
< plitstop
)
3966 if (!elf_fetch_bits_backward (&pback
, pbackend
, &val
, &bits
))
3974 t
= huffman_table
[(val
>> (bits
- huffman_table_bits
))
3980 t
= huffman_table
[(val
>> (bits
- huffman_table_bits
))
3986 t
= huffman_table
[(val
>> (bits
- huffman_table_bits
))
3995 t
= huffman_table
[(val
>> (bits
- huffman_table_bits
))
4003 regenerated_size
-= plit
- plitstart
;
4006 for (i
= 0; i
< regenerated_size
; ++i
)
4010 if (!elf_fetch_bits_backward (&pback
, pbackend
, &val
, &bits
))
4013 if (unlikely (bits
< huffman_table_bits
))
4015 t
= huffman_table
[(val
<< (huffman_table_bits
- bits
))
4017 if (unlikely (bits
< (t
& 0xff)))
4019 elf_uncompress_failed ();
4024 t
= huffman_table
[(val
>> (bits
- huffman_table_bits
))
4036 uint32_t stream_size1
, stream_size2
, stream_size3
, stream_size4
;
4038 const unsigned char *pback1
, *pback2
, *pback3
, *pback4
;
4039 const unsigned char *pbackend1
, *pbackend2
, *pbackend3
, *pbackend4
;
4040 uint64_t val1
, val2
, val3
, val4
;
4041 unsigned int bits1
, bits2
, bits3
, bits4
;
4042 unsigned char *plit1
, *plit2
, *plit3
, *plit4
;
4043 uint32_t regenerated_stream_size
;
4044 uint32_t regenerated_stream_size4
;
4045 uint16_t t1
, t2
, t3
, t4
;
4049 /* Read jump table. */
4050 if (unlikely (pin
+ 5 >= pinend
))
4052 elf_uncompress_failed ();
4055 stream_size1
= (uint32_t)*pin
| ((uint32_t)pin
[1] << 8);
4057 stream_size2
= (uint32_t)*pin
| ((uint32_t)pin
[1] << 8);
4059 stream_size3
= (uint32_t)*pin
| ((uint32_t)pin
[1] << 8);
4061 tot
= stream_size1
+ stream_size2
+ stream_size3
;
4062 if (unlikely (tot
> total_streams_size
- 6))
4064 elf_uncompress_failed ();
4067 stream_size4
= total_streams_size
- 6 - tot
;
4069 pback1
= pin
+ stream_size1
- 1;
4072 pback2
= pback1
+ stream_size2
;
4073 pbackend2
= pback1
+ 1;
4075 pback3
= pback2
+ stream_size3
;
4076 pbackend3
= pback2
+ 1;
4078 pback4
= pback3
+ stream_size4
;
4079 pbackend4
= pback3
+ 1;
4081 if (!elf_fetch_backward_init (&pback1
, pbackend1
, &val1
, &bits1
))
4083 if (!elf_fetch_backward_init (&pback2
, pbackend2
, &val2
, &bits2
))
4085 if (!elf_fetch_backward_init (&pback3
, pbackend3
, &val3
, &bits3
))
4087 if (!elf_fetch_backward_init (&pback4
, pbackend4
, &val4
, &bits4
))
4090 regenerated_stream_size
= (regenerated_size
+ 3) / 4;
4093 plit2
= plit1
+ regenerated_stream_size
;
4094 plit3
= plit2
+ regenerated_stream_size
;
4095 plit4
= plit3
+ regenerated_stream_size
;
4097 regenerated_stream_size4
= regenerated_size
- regenerated_stream_size
* 3;
4099 /* We can't get more than 64 literal bytes from a single call to
4100 elf_fetch_bits_backward. The fourth stream can be up to 3 bytes less,
4101 so use as the limit. */
4103 limit
= regenerated_stream_size4
<= 64 ? 0 : regenerated_stream_size4
- 64;
4107 if (!elf_fetch_bits_backward (&pback1
, pbackend1
, &val1
, &bits1
))
4109 if (!elf_fetch_bits_backward (&pback2
, pbackend2
, &val2
, &bits2
))
4111 if (!elf_fetch_bits_backward (&pback3
, pbackend3
, &val3
, &bits3
))
4113 if (!elf_fetch_bits_backward (&pback4
, pbackend4
, &val4
, &bits4
))
4116 /* We can't subtract more than 11 bits at a time. */
4120 t1
= huffman_table
[(val1
>> (bits1
- huffman_table_bits
))
4122 t2
= huffman_table
[(val2
>> (bits2
- huffman_table_bits
))
4124 t3
= huffman_table
[(val3
>> (bits3
- huffman_table_bits
))
4126 t4
= huffman_table
[(val4
>> (bits4
- huffman_table_bits
))
4147 while (bits1
> 11 && bits2
> 11 && bits3
> 11 && bits4
> 11);
4150 while (i
< regenerated_stream_size
)
4154 use4
= i
< regenerated_stream_size4
;
4156 if (!elf_fetch_bits_backward (&pback1
, pbackend1
, &val1
, &bits1
))
4158 if (!elf_fetch_bits_backward (&pback2
, pbackend2
, &val2
, &bits2
))
4160 if (!elf_fetch_bits_backward (&pback3
, pbackend3
, &val3
, &bits3
))
4164 if (!elf_fetch_bits_backward (&pback4
, pbackend4
, &val4
, &bits4
))
4168 if (unlikely (bits1
< huffman_table_bits
))
4170 t1
= huffman_table
[(val1
<< (huffman_table_bits
- bits1
))
4172 if (unlikely (bits1
< (t1
& 0xff)))
4174 elf_uncompress_failed ();
4179 t1
= huffman_table
[(val1
>> (bits1
- huffman_table_bits
))
4182 if (unlikely (bits2
< huffman_table_bits
))
4184 t2
= huffman_table
[(val2
<< (huffman_table_bits
- bits2
))
4186 if (unlikely (bits2
< (t2
& 0xff)))
4188 elf_uncompress_failed ();
4193 t2
= huffman_table
[(val2
>> (bits2
- huffman_table_bits
))
4196 if (unlikely (bits3
< huffman_table_bits
))
4198 t3
= huffman_table
[(val3
<< (huffman_table_bits
- bits3
))
4200 if (unlikely (bits3
< (t3
& 0xff)))
4202 elf_uncompress_failed ();
4207 t3
= huffman_table
[(val3
>> (bits3
- huffman_table_bits
))
4212 if (unlikely (bits4
< huffman_table_bits
))
4214 t4
= huffman_table
[(val4
<< (huffman_table_bits
- bits4
))
4216 if (unlikely (bits4
< (t4
& 0xff)))
4218 elf_uncompress_failed ();
4223 t4
= huffman_table
[(val4
>> (bits4
- huffman_table_bits
))
4250 /* The information used to decompress a sequence code, which can be a literal
4251 length, an offset, or a match length. */
4253 struct elf_zstd_seq_decode
4255 const struct elf_zstd_fse_baseline_entry
*table
;
4259 /* Unpack a sequence code compression mode. */
4262 elf_zstd_unpack_seq_decode (int mode
,
4263 const unsigned char **ppin
,
4264 const unsigned char *pinend
,
4265 const struct elf_zstd_fse_baseline_entry
*predef
,
4269 struct elf_zstd_fse_baseline_entry
*table
,
4271 int (*conv
)(const struct elf_zstd_fse_entry
*,
4273 struct elf_zstd_fse_baseline_entry
*),
4274 struct elf_zstd_seq_decode
*decode
)
4279 decode
->table
= predef
;
4280 decode
->table_bits
= predef_bits
;
4285 struct elf_zstd_fse_entry entry
;
4287 if (unlikely (*ppin
>= pinend
))
4289 elf_uncompress_failed ();
4292 entry
.symbol
= **ppin
;
4296 decode
->table_bits
= 0;
4297 if (!conv (&entry
, 0, table
))
4304 struct elf_zstd_fse_entry
*fse_table
;
4306 /* We use the same space for the simple FSE table and the baseline
4308 fse_table
= (struct elf_zstd_fse_entry
*)table
;
4309 decode
->table_bits
= table_bits
;
4310 if (!elf_zstd_read_fse (ppin
, pinend
, scratch
, maxidx
, fse_table
,
4311 &decode
->table_bits
))
4313 if (!conv (fse_table
, decode
->table_bits
, table
))
4315 decode
->table
= table
;
4320 if (unlikely (decode
->table_bits
== -1))
4322 elf_uncompress_failed ();
4328 elf_uncompress_failed ();
4335 /* Decompress a zstd stream from PIN/SIN to POUT/SOUT. Code based on RFC 8878.
4336 Return 1 on success, 0 on error. */
4339 elf_zstd_decompress (const unsigned char *pin
, size_t sin
,
4340 unsigned char *zdebug_table
, unsigned char *pout
,
4343 const unsigned char *pinend
;
4344 unsigned char *poutstart
;
4345 unsigned char *poutend
;
4346 struct elf_zstd_seq_decode literal_decode
;
4347 struct elf_zstd_fse_baseline_entry
*literal_fse_table
;
4348 struct elf_zstd_seq_decode match_decode
;
4349 struct elf_zstd_fse_baseline_entry
*match_fse_table
;
4350 struct elf_zstd_seq_decode offset_decode
;
4351 struct elf_zstd_fse_baseline_entry
*offset_fse_table
;
4352 uint16_t *huffman_table
;
4353 int huffman_table_bits
;
4354 uint32_t repeated_offset1
;
4355 uint32_t repeated_offset2
;
4356 uint32_t repeated_offset3
;
4360 uint64_t content_size
;
4365 poutend
= pout
+ sout
;
4367 literal_decode
.table
= NULL
;
4368 literal_decode
.table_bits
= -1;
4369 literal_fse_table
= ((struct elf_zstd_fse_baseline_entry
*)
4370 (zdebug_table
+ ZSTD_TABLE_LITERAL_FSE_OFFSET
));
4372 match_decode
.table
= NULL
;
4373 match_decode
.table_bits
= -1;
4374 match_fse_table
= ((struct elf_zstd_fse_baseline_entry
*)
4375 (zdebug_table
+ ZSTD_TABLE_MATCH_FSE_OFFSET
));
4377 offset_decode
.table
= NULL
;
4378 offset_decode
.table_bits
= -1;
4379 offset_fse_table
= ((struct elf_zstd_fse_baseline_entry
*)
4380 (zdebug_table
+ ZSTD_TABLE_OFFSET_FSE_OFFSET
));
4381 huffman_table
= ((uint16_t *)
4382 (zdebug_table
+ ZSTD_TABLE_HUFFMAN_OFFSET
));
4383 huffman_table_bits
= 0;
4384 scratch
= ((uint16_t *)
4385 (zdebug_table
+ ZSTD_TABLE_WORK_OFFSET
));
4387 repeated_offset1
= 1;
4388 repeated_offset2
= 4;
4389 repeated_offset3
= 8;
4391 if (unlikely (sin
< 4))
4393 elf_uncompress_failed ();
4397 /* These values are the zstd magic number. */
4398 if (unlikely (pin
[0] != 0x28
4403 elf_uncompress_failed ();
4409 if (unlikely (pin
>= pinend
))
4411 elf_uncompress_failed ();
4417 /* We expect a single frame. */
4418 if (unlikely ((hdr
& (1 << 5)) == 0))
4420 elf_uncompress_failed ();
4423 /* Reserved bit must be zero. */
4424 if (unlikely ((hdr
& (1 << 3)) != 0))
4426 elf_uncompress_failed ();
4429 /* We do not expect a dictionary. */
4430 if (unlikely ((hdr
& 3) != 0))
4432 elf_uncompress_failed ();
4435 has_checksum
= (hdr
& (1 << 2)) != 0;
4439 if (unlikely (pin
>= pinend
))
4441 elf_uncompress_failed ();
4444 content_size
= (uint64_t) *pin
++;
4447 if (unlikely (pin
+ 1 >= pinend
))
4449 elf_uncompress_failed ();
4452 content_size
= (((uint64_t) pin
[0]) | (((uint64_t) pin
[1]) << 8)) + 256;
4456 if (unlikely (pin
+ 3 >= pinend
))
4458 elf_uncompress_failed ();
4461 content_size
= ((uint64_t) pin
[0]
4462 | (((uint64_t) pin
[1]) << 8)
4463 | (((uint64_t) pin
[2]) << 16)
4464 | (((uint64_t) pin
[3]) << 24));
4468 if (unlikely (pin
+ 7 >= pinend
))
4470 elf_uncompress_failed ();
4473 content_size
= ((uint64_t) pin
[0]
4474 | (((uint64_t) pin
[1]) << 8)
4475 | (((uint64_t) pin
[2]) << 16)
4476 | (((uint64_t) pin
[3]) << 24)
4477 | (((uint64_t) pin
[4]) << 32)
4478 | (((uint64_t) pin
[5]) << 40)
4479 | (((uint64_t) pin
[6]) << 48)
4480 | (((uint64_t) pin
[7]) << 56));
4484 elf_uncompress_failed ();
4488 if (unlikely (content_size
!= (size_t) content_size
4489 || (size_t) content_size
!= sout
))
4491 elf_uncompress_failed ();
4500 uint32_t block_size
;
4502 if (unlikely (pin
+ 2 >= pinend
))
4504 elf_uncompress_failed ();
4507 block_hdr
= ((uint32_t) pin
[0]
4508 | (((uint32_t) pin
[1]) << 8)
4509 | (((uint32_t) pin
[2]) << 16));
4512 last_block
= block_hdr
& 1;
4513 block_type
= (block_hdr
>> 1) & 3;
4514 block_size
= block_hdr
>> 3;
4520 if (unlikely ((size_t) block_size
> (size_t) (pinend
- pin
)))
4522 elf_uncompress_failed ();
4525 if (unlikely ((size_t) block_size
> (size_t) (poutend
- pout
)))
4527 elf_uncompress_failed ();
4530 memcpy (pout
, pin
, block_size
);
4537 if (unlikely (pin
>= pinend
))
4539 elf_uncompress_failed ();
4542 if (unlikely ((size_t) block_size
> (size_t) (poutend
- pout
)))
4544 elf_uncompress_failed ();
4547 memset (pout
, *pin
, block_size
);
4554 const unsigned char *pblockend
;
4555 unsigned char *plitstack
;
4556 unsigned char *plit
;
4557 uint32_t literal_count
;
4558 unsigned char seq_hdr
;
4561 const unsigned char *pback
;
4564 unsigned int literal_state
;
4565 unsigned int offset_state
;
4566 unsigned int match_state
;
4568 /* Compressed_Block */
4569 if (unlikely ((size_t) block_size
> (size_t) (pinend
- pin
)))
4571 elf_uncompress_failed ();
4575 pblockend
= pin
+ block_size
;
4577 /* Read the literals into the end of the output space, and leave
4578 PLIT pointing at them. */
4580 if (!elf_zstd_read_literals (&pin
, pblockend
, pout
, poutend
,
4581 scratch
, huffman_table
,
4582 &huffman_table_bits
,
4586 literal_count
= poutend
- plit
;
4591 seq_count
= seq_hdr
;
4592 else if (seq_hdr
< 255)
4594 if (unlikely (pin
>= pinend
))
4596 elf_uncompress_failed ();
4599 seq_count
= ((seq_hdr
- 128) << 8) + *pin
;
4604 if (unlikely (pin
+ 1 >= pinend
))
4606 elf_uncompress_failed ();
4609 seq_count
= *pin
+ (pin
[1] << 8) + 0x7f00;
4615 int (*pfn
)(const struct elf_zstd_fse_entry
*,
4616 int, struct elf_zstd_fse_baseline_entry
*);
4618 if (unlikely (pin
>= pinend
))
4620 elf_uncompress_failed ();
4626 pfn
= elf_zstd_make_literal_baseline_fse
;
4627 if (!elf_zstd_unpack_seq_decode ((seq_hdr
>> 6) & 3,
4629 &elf_zstd_lit_table
[0], 6,
4631 literal_fse_table
, 9, pfn
,
4635 pfn
= elf_zstd_make_offset_baseline_fse
;
4636 if (!elf_zstd_unpack_seq_decode ((seq_hdr
>> 4) & 3,
4638 &elf_zstd_offset_table
[0], 5,
4640 offset_fse_table
, 8, pfn
,
4644 pfn
= elf_zstd_make_match_baseline_fse
;
4645 if (!elf_zstd_unpack_seq_decode ((seq_hdr
>> 2) & 3,
4647 &elf_zstd_match_table
[0], 6,
4649 match_fse_table
, 9, pfn
,
4654 pback
= pblockend
- 1;
4655 if (!elf_fetch_backward_init (&pback
, pin
, &val
, &bits
))
4658 bits
-= literal_decode
.table_bits
;
4659 literal_state
= ((val
>> bits
)
4660 & ((1U << literal_decode
.table_bits
) - 1));
4662 if (!elf_fetch_bits_backward (&pback
, pin
, &val
, &bits
))
4664 bits
-= offset_decode
.table_bits
;
4665 offset_state
= ((val
>> bits
)
4666 & ((1U << offset_decode
.table_bits
) - 1));
4668 if (!elf_fetch_bits_backward (&pback
, pin
, &val
, &bits
))
4670 bits
-= match_decode
.table_bits
;
4671 match_state
= ((val
>> bits
)
4672 & ((1U << match_decode
.table_bits
) - 1));
4677 const struct elf_zstd_fse_baseline_entry
*pt
;
4678 uint32_t offset_basebits
;
4679 uint32_t offset_baseline
;
4680 uint32_t offset_bits
;
4681 uint32_t offset_base
;
4683 uint32_t match_baseline
;
4684 uint32_t match_bits
;
4685 uint32_t match_base
;
4687 uint32_t literal_baseline
;
4688 uint32_t literal_bits
;
4689 uint32_t literal_base
;
4694 pt
= &offset_decode
.table
[offset_state
];
4695 offset_basebits
= pt
->basebits
;
4696 offset_baseline
= pt
->baseline
;
4697 offset_bits
= pt
->bits
;
4698 offset_base
= pt
->base
;
4700 /* This case can be more than 16 bits, which is all that
4701 elf_fetch_bits_backward promises. */
4702 need
= offset_basebits
;
4704 if (unlikely (need
> 16))
4706 if (!elf_fetch_bits_backward (&pback
, pin
, &val
, &bits
))
4709 add
= (val
>> bits
) & ((1U << 16) - 1);
4715 if (!elf_fetch_bits_backward (&pback
, pin
, &val
, &bits
))
4718 add
+= (val
>> bits
) & ((1U << need
) - 1);
4721 offset
= offset_baseline
+ add
;
4723 pt
= &match_decode
.table
[match_state
];
4724 need
= pt
->basebits
;
4725 match_baseline
= pt
->baseline
;
4726 match_bits
= pt
->bits
;
4727 match_base
= pt
->base
;
4732 if (!elf_fetch_bits_backward (&pback
, pin
, &val
, &bits
))
4735 add
= (val
>> bits
) & ((1U << need
) - 1);
4738 match
= match_baseline
+ add
;
4740 pt
= &literal_decode
.table
[literal_state
];
4741 need
= pt
->basebits
;
4742 literal_baseline
= pt
->baseline
;
4743 literal_bits
= pt
->bits
;
4744 literal_base
= pt
->base
;
4749 if (!elf_fetch_bits_backward (&pback
, pin
, &val
, &bits
))
4752 add
= (val
>> bits
) & ((1U << need
) - 1);
4755 literal
= literal_baseline
+ add
;
4757 /* See the comment in elf_zstd_make_offset_baseline_fse. */
4758 if (offset_basebits
> 1)
4760 repeated_offset3
= repeated_offset2
;
4761 repeated_offset2
= repeated_offset1
;
4762 repeated_offset1
= offset
;
4766 if (unlikely (literal
== 0))
4771 offset
= repeated_offset1
;
4774 offset
= repeated_offset2
;
4775 repeated_offset2
= repeated_offset1
;
4776 repeated_offset1
= offset
;
4779 offset
= repeated_offset3
;
4780 repeated_offset3
= repeated_offset2
;
4781 repeated_offset2
= repeated_offset1
;
4782 repeated_offset1
= offset
;
4785 offset
= repeated_offset1
- 1;
4786 repeated_offset3
= repeated_offset2
;
4787 repeated_offset2
= repeated_offset1
;
4788 repeated_offset1
= offset
;
4794 if (seq
< seq_count
)
4798 /* Update the three states. */
4800 if (!elf_fetch_bits_backward (&pback
, pin
, &val
, &bits
))
4803 need
= literal_bits
;
4805 v
= (val
>> bits
) & (((uint32_t)1 << need
) - 1);
4807 literal_state
= literal_base
+ v
;
4809 if (!elf_fetch_bits_backward (&pback
, pin
, &val
, &bits
))
4814 v
= (val
>> bits
) & (((uint32_t)1 << need
) - 1);
4816 match_state
= match_base
+ v
;
4818 if (!elf_fetch_bits_backward (&pback
, pin
, &val
, &bits
))
4823 v
= (val
>> bits
) & (((uint32_t)1 << need
) - 1);
4825 offset_state
= offset_base
+ v
;
4828 /* The next sequence is now in LITERAL, OFFSET, MATCH. */
4830 /* Copy LITERAL bytes from the literals. */
4832 if (unlikely ((size_t)(poutend
- pout
) < literal
))
4834 elf_uncompress_failed ();
4838 if (unlikely (literal_count
< literal
))
4840 elf_uncompress_failed ();
4844 literal_count
-= literal
;
4846 /* Often LITERAL is small, so handle small cases quickly. */
4851 ATTRIBUTE_FALLTHROUGH
;
4854 ATTRIBUTE_FALLTHROUGH
;
4857 ATTRIBUTE_FALLTHROUGH
;
4860 ATTRIBUTE_FALLTHROUGH
;
4863 ATTRIBUTE_FALLTHROUGH
;
4866 ATTRIBUTE_FALLTHROUGH
;
4869 ATTRIBUTE_FALLTHROUGH
;
4878 if (unlikely ((size_t)(plit
- pout
) < literal
))
4883 while (literal
> move
)
4885 memcpy (pout
, plit
, move
);
4892 memcpy (pout
, plit
, literal
);
4899 /* Copy MATCH bytes from the decoded output at OFFSET. */
4901 if (unlikely ((size_t)(poutend
- pout
) < match
))
4903 elf_uncompress_failed ();
4907 if (unlikely ((size_t)(pout
- poutstart
) < offset
))
4909 elf_uncompress_failed ();
4913 if (offset
>= match
)
4915 memcpy (pout
, pout
- offset
, match
);
4924 copy
= match
< offset
? match
: offset
;
4925 memcpy (pout
, pout
- offset
, copy
);
4932 if (unlikely (seq
>= seq_count
))
4934 /* Copy remaining literals. */
4935 if (literal_count
> 0 && plit
!= pout
)
4937 if (unlikely ((size_t)(poutend
- pout
)
4940 elf_uncompress_failed ();
4944 if ((size_t)(plit
- pout
) < literal_count
)
4949 while (literal_count
> move
)
4951 memcpy (pout
, plit
, move
);
4954 literal_count
-= move
;
4958 memcpy (pout
, plit
, literal_count
);
4961 pout
+= literal_count
;
4973 elf_uncompress_failed ();
4980 if (unlikely (pin
+ 4 > pinend
))
4982 elf_uncompress_failed ();
4986 /* We don't currently verify the checksum. Currently running GNU ld with
4987 --compress-debug-sections=zstd does not seem to generate a
4995 elf_uncompress_failed ();
5002 #define ZDEBUG_TABLE_SIZE \
5003 (ZLIB_TABLE_SIZE > ZSTD_TABLE_SIZE ? ZLIB_TABLE_SIZE : ZSTD_TABLE_SIZE)
5005 /* Uncompress the old compressed debug format, the one emitted by
5006 --compress-debug-sections=zlib-gnu. The compressed data is in
5007 COMPRESSED / COMPRESSED_SIZE, and the function writes to
5008 *UNCOMPRESSED / *UNCOMPRESSED_SIZE. ZDEBUG_TABLE is work space to
5009 hold Huffman tables. Returns 0 on error, 1 on successful
5010 decompression or if something goes wrong. In general we try to
5011 carry on, by returning 1, even if we can't decompress. */
5014 elf_uncompress_zdebug (struct backtrace_state
*state
,
5015 const unsigned char *compressed
, size_t compressed_size
,
5016 uint16_t *zdebug_table
,
5017 backtrace_error_callback error_callback
, void *data
,
5018 unsigned char **uncompressed
, size_t *uncompressed_size
)
5024 *uncompressed
= NULL
;
5025 *uncompressed_size
= 0;
5027 /* The format starts with the four bytes ZLIB, followed by the 8
5028 byte length of the uncompressed data in big-endian order,
5029 followed by a zlib stream. */
5031 if (compressed_size
< 12 || memcmp (compressed
, "ZLIB", 4) != 0)
5035 for (i
= 0; i
< 8; i
++)
5036 sz
= (sz
<< 8) | compressed
[i
+ 4];
5038 if (*uncompressed
!= NULL
&& *uncompressed_size
>= sz
)
5042 po
= (unsigned char *) backtrace_alloc (state
, sz
, error_callback
, data
);
5047 if (!elf_zlib_inflate_and_verify (compressed
+ 12, compressed_size
- 12,
5048 zdebug_table
, po
, sz
))
5052 *uncompressed_size
= sz
;
5057 /* Uncompress the new compressed debug format, the official standard
5058 ELF approach emitted by --compress-debug-sections=zlib-gabi. The
5059 compressed data is in COMPRESSED / COMPRESSED_SIZE, and the
5060 function writes to *UNCOMPRESSED / *UNCOMPRESSED_SIZE.
5061 ZDEBUG_TABLE is work space as for elf_uncompress_zdebug. Returns 0
5062 on error, 1 on successful decompression or if something goes wrong.
5063 In general we try to carry on, by returning 1, even if we can't
5067 elf_uncompress_chdr (struct backtrace_state
*state
,
5068 const unsigned char *compressed
, size_t compressed_size
,
5069 uint16_t *zdebug_table
,
5070 backtrace_error_callback error_callback
, void *data
,
5071 unsigned char **uncompressed
, size_t *uncompressed_size
)
5078 *uncompressed
= NULL
;
5079 *uncompressed_size
= 0;
5081 /* The format starts with an ELF compression header. */
5082 if (compressed_size
< sizeof (b_elf_chdr
))
5085 /* The lld linker can misalign a compressed section, so we can't safely read
5086 the fields directly as we can for other ELF sections. See
5087 https://github.com/ianlancetaylor/libbacktrace/pull/120. */
5088 memcpy (&chdr
, compressed
, sizeof (b_elf_chdr
));
5092 if (*uncompressed
!= NULL
&& *uncompressed_size
>= chdr
.ch_size
)
5096 alc_len
= chdr
.ch_size
;
5097 alc
= backtrace_alloc (state
, alc_len
, error_callback
, data
);
5100 po
= (unsigned char *) alc
;
5103 switch (chdr
.ch_type
)
5105 case ELFCOMPRESS_ZLIB
:
5106 if (!elf_zlib_inflate_and_verify (compressed
+ sizeof (b_elf_chdr
),
5107 compressed_size
- sizeof (b_elf_chdr
),
5108 zdebug_table
, po
, chdr
.ch_size
))
5112 case ELFCOMPRESS_ZSTD
:
5113 if (!elf_zstd_decompress (compressed
+ sizeof (b_elf_chdr
),
5114 compressed_size
- sizeof (b_elf_chdr
),
5115 (unsigned char *)zdebug_table
, po
,
5121 /* Unsupported compression algorithm. */
5126 *uncompressed_size
= chdr
.ch_size
;
5131 if (alc
!= NULL
&& alc_len
> 0)
5132 backtrace_free (state
, alc
, alc_len
, error_callback
, data
);
5136 /* This function is a hook for testing the zlib support. It is only
5140 backtrace_uncompress_zdebug (struct backtrace_state
*state
,
5141 const unsigned char *compressed
,
5142 size_t compressed_size
,
5143 backtrace_error_callback error_callback
,
5144 void *data
, unsigned char **uncompressed
,
5145 size_t *uncompressed_size
)
5147 uint16_t *zdebug_table
;
5150 zdebug_table
= ((uint16_t *) backtrace_alloc (state
, ZDEBUG_TABLE_SIZE
,
5151 error_callback
, data
));
5152 if (zdebug_table
== NULL
)
5154 ret
= elf_uncompress_zdebug (state
, compressed
, compressed_size
,
5155 zdebug_table
, error_callback
, data
,
5156 uncompressed
, uncompressed_size
);
5157 backtrace_free (state
, zdebug_table
, ZDEBUG_TABLE_SIZE
,
5158 error_callback
, data
);
5162 /* This function is a hook for testing the zstd support. It is only used by
5166 backtrace_uncompress_zstd (struct backtrace_state
*state
,
5167 const unsigned char *compressed
,
5168 size_t compressed_size
,
5169 backtrace_error_callback error_callback
,
5170 void *data
, unsigned char *uncompressed
,
5171 size_t uncompressed_size
)
5173 unsigned char *zdebug_table
;
5176 zdebug_table
= ((unsigned char *) backtrace_alloc (state
, ZDEBUG_TABLE_SIZE
,
5177 error_callback
, data
));
5178 if (zdebug_table
== NULL
)
5180 ret
= elf_zstd_decompress (compressed
, compressed_size
,
5181 zdebug_table
, uncompressed
, uncompressed_size
);
5182 backtrace_free (state
, zdebug_table
, ZDEBUG_TABLE_SIZE
,
5183 error_callback
, data
);
5187 /* Number of LZMA states. */
5188 #define LZMA_STATES (12)
5190 /* Number of LZMA position states. The pb value of the property byte
5191 is the number of bits to include in these states, and the maximum
5192 value of pb is 4. */
5193 #define LZMA_POS_STATES (16)
5195 /* Number of LZMA distance states. These are used match distances
5196 with a short match length: up to 4 bytes. */
5197 #define LZMA_DIST_STATES (4)
5199 /* Number of LZMA distance slots. LZMA uses six bits to encode larger
5200 match lengths, so 1 << 6 possible probabilities. */
5201 #define LZMA_DIST_SLOTS (64)
5203 /* LZMA distances 0 to 3 are encoded directly, larger values use a
5204 probability model. */
5205 #define LZMA_DIST_MODEL_START (4)
5207 /* The LZMA probability model ends at 14. */
5208 #define LZMA_DIST_MODEL_END (14)
5210 /* LZMA distance slots for distances less than 127. */
5211 #define LZMA_FULL_DISTANCES (128)
5213 /* LZMA uses four alignment bits. */
5214 #define LZMA_ALIGN_SIZE (16)
5216 /* LZMA match length is encoded with 4, 5, or 10 bits, some of which
5217 are already known. */
5218 #define LZMA_LEN_LOW_SYMBOLS (8)
5219 #define LZMA_LEN_MID_SYMBOLS (8)
5220 #define LZMA_LEN_HIGH_SYMBOLS (256)
5222 /* LZMA literal encoding. */
5223 #define LZMA_LITERAL_CODERS_MAX (16)
5224 #define LZMA_LITERAL_CODER_SIZE (0x300)
5226 /* LZMA is based on a large set of probabilities, each managed
5227 independently. Each probability is an 11 bit number that we store
5228 in a uint16_t. We use a single large array of probabilities. */
5230 /* Lengths of entries in the LZMA probabilities array. The names used
5231 here are copied from the Linux kernel implementation. */
5233 #define LZMA_PROB_IS_MATCH_LEN (LZMA_STATES * LZMA_POS_STATES)
5234 #define LZMA_PROB_IS_REP_LEN LZMA_STATES
5235 #define LZMA_PROB_IS_REP0_LEN LZMA_STATES
5236 #define LZMA_PROB_IS_REP1_LEN LZMA_STATES
5237 #define LZMA_PROB_IS_REP2_LEN LZMA_STATES
5238 #define LZMA_PROB_IS_REP0_LONG_LEN (LZMA_STATES * LZMA_POS_STATES)
5239 #define LZMA_PROB_DIST_SLOT_LEN (LZMA_DIST_STATES * LZMA_DIST_SLOTS)
5240 #define LZMA_PROB_DIST_SPECIAL_LEN (LZMA_FULL_DISTANCES - LZMA_DIST_MODEL_END)
5241 #define LZMA_PROB_DIST_ALIGN_LEN LZMA_ALIGN_SIZE
5242 #define LZMA_PROB_MATCH_LEN_CHOICE_LEN 1
5243 #define LZMA_PROB_MATCH_LEN_CHOICE2_LEN 1
5244 #define LZMA_PROB_MATCH_LEN_LOW_LEN (LZMA_POS_STATES * LZMA_LEN_LOW_SYMBOLS)
5245 #define LZMA_PROB_MATCH_LEN_MID_LEN (LZMA_POS_STATES * LZMA_LEN_MID_SYMBOLS)
5246 #define LZMA_PROB_MATCH_LEN_HIGH_LEN LZMA_LEN_HIGH_SYMBOLS
5247 #define LZMA_PROB_REP_LEN_CHOICE_LEN 1
5248 #define LZMA_PROB_REP_LEN_CHOICE2_LEN 1
5249 #define LZMA_PROB_REP_LEN_LOW_LEN (LZMA_POS_STATES * LZMA_LEN_LOW_SYMBOLS)
5250 #define LZMA_PROB_REP_LEN_MID_LEN (LZMA_POS_STATES * LZMA_LEN_MID_SYMBOLS)
5251 #define LZMA_PROB_REP_LEN_HIGH_LEN LZMA_LEN_HIGH_SYMBOLS
5252 #define LZMA_PROB_LITERAL_LEN \
5253 (LZMA_LITERAL_CODERS_MAX * LZMA_LITERAL_CODER_SIZE)
5255 /* Offsets into the LZMA probabilities array. This is mechanically
5256 generated from the above lengths. */
5258 #define LZMA_PROB_IS_MATCH_OFFSET 0
5259 #define LZMA_PROB_IS_REP_OFFSET \
5260 (LZMA_PROB_IS_MATCH_OFFSET + LZMA_PROB_IS_MATCH_LEN)
5261 #define LZMA_PROB_IS_REP0_OFFSET \
5262 (LZMA_PROB_IS_REP_OFFSET + LZMA_PROB_IS_REP_LEN)
5263 #define LZMA_PROB_IS_REP1_OFFSET \
5264 (LZMA_PROB_IS_REP0_OFFSET + LZMA_PROB_IS_REP0_LEN)
5265 #define LZMA_PROB_IS_REP2_OFFSET \
5266 (LZMA_PROB_IS_REP1_OFFSET + LZMA_PROB_IS_REP1_LEN)
5267 #define LZMA_PROB_IS_REP0_LONG_OFFSET \
5268 (LZMA_PROB_IS_REP2_OFFSET + LZMA_PROB_IS_REP2_LEN)
5269 #define LZMA_PROB_DIST_SLOT_OFFSET \
5270 (LZMA_PROB_IS_REP0_LONG_OFFSET + LZMA_PROB_IS_REP0_LONG_LEN)
5271 #define LZMA_PROB_DIST_SPECIAL_OFFSET \
5272 (LZMA_PROB_DIST_SLOT_OFFSET + LZMA_PROB_DIST_SLOT_LEN)
5273 #define LZMA_PROB_DIST_ALIGN_OFFSET \
5274 (LZMA_PROB_DIST_SPECIAL_OFFSET + LZMA_PROB_DIST_SPECIAL_LEN)
5275 #define LZMA_PROB_MATCH_LEN_CHOICE_OFFSET \
5276 (LZMA_PROB_DIST_ALIGN_OFFSET + LZMA_PROB_DIST_ALIGN_LEN)
5277 #define LZMA_PROB_MATCH_LEN_CHOICE2_OFFSET \
5278 (LZMA_PROB_MATCH_LEN_CHOICE_OFFSET + LZMA_PROB_MATCH_LEN_CHOICE_LEN)
5279 #define LZMA_PROB_MATCH_LEN_LOW_OFFSET \
5280 (LZMA_PROB_MATCH_LEN_CHOICE2_OFFSET + LZMA_PROB_MATCH_LEN_CHOICE2_LEN)
5281 #define LZMA_PROB_MATCH_LEN_MID_OFFSET \
5282 (LZMA_PROB_MATCH_LEN_LOW_OFFSET + LZMA_PROB_MATCH_LEN_LOW_LEN)
5283 #define LZMA_PROB_MATCH_LEN_HIGH_OFFSET \
5284 (LZMA_PROB_MATCH_LEN_MID_OFFSET + LZMA_PROB_MATCH_LEN_MID_LEN)
5285 #define LZMA_PROB_REP_LEN_CHOICE_OFFSET \
5286 (LZMA_PROB_MATCH_LEN_HIGH_OFFSET + LZMA_PROB_MATCH_LEN_HIGH_LEN)
5287 #define LZMA_PROB_REP_LEN_CHOICE2_OFFSET \
5288 (LZMA_PROB_REP_LEN_CHOICE_OFFSET + LZMA_PROB_REP_LEN_CHOICE_LEN)
5289 #define LZMA_PROB_REP_LEN_LOW_OFFSET \
5290 (LZMA_PROB_REP_LEN_CHOICE2_OFFSET + LZMA_PROB_REP_LEN_CHOICE2_LEN)
5291 #define LZMA_PROB_REP_LEN_MID_OFFSET \
5292 (LZMA_PROB_REP_LEN_LOW_OFFSET + LZMA_PROB_REP_LEN_LOW_LEN)
5293 #define LZMA_PROB_REP_LEN_HIGH_OFFSET \
5294 (LZMA_PROB_REP_LEN_MID_OFFSET + LZMA_PROB_REP_LEN_MID_LEN)
5295 #define LZMA_PROB_LITERAL_OFFSET \
5296 (LZMA_PROB_REP_LEN_HIGH_OFFSET + LZMA_PROB_REP_LEN_HIGH_LEN)
5298 #define LZMA_PROB_TOTAL_COUNT \
5299 (LZMA_PROB_LITERAL_OFFSET + LZMA_PROB_LITERAL_LEN)
5301 /* Check that the number of LZMA probabilities is the same as the
5302 Linux kernel implementation. */
5304 #if LZMA_PROB_TOTAL_COUNT != 1846 + (1 << 4) * 0x300
5305 #error Wrong number of LZMA probabilities
5308 /* Expressions for the offset in the LZMA probabilities array of a
5309 specific probability. */
5311 #define LZMA_IS_MATCH(state, pos) \
5312 (LZMA_PROB_IS_MATCH_OFFSET + (state) * LZMA_POS_STATES + (pos))
5313 #define LZMA_IS_REP(state) \
5314 (LZMA_PROB_IS_REP_OFFSET + (state))
5315 #define LZMA_IS_REP0(state) \
5316 (LZMA_PROB_IS_REP0_OFFSET + (state))
5317 #define LZMA_IS_REP1(state) \
5318 (LZMA_PROB_IS_REP1_OFFSET + (state))
5319 #define LZMA_IS_REP2(state) \
5320 (LZMA_PROB_IS_REP2_OFFSET + (state))
5321 #define LZMA_IS_REP0_LONG(state, pos) \
5322 (LZMA_PROB_IS_REP0_LONG_OFFSET + (state) * LZMA_POS_STATES + (pos))
5323 #define LZMA_DIST_SLOT(dist, slot) \
5324 (LZMA_PROB_DIST_SLOT_OFFSET + (dist) * LZMA_DIST_SLOTS + (slot))
5325 #define LZMA_DIST_SPECIAL(dist) \
5326 (LZMA_PROB_DIST_SPECIAL_OFFSET + (dist))
5327 #define LZMA_DIST_ALIGN(dist) \
5328 (LZMA_PROB_DIST_ALIGN_OFFSET + (dist))
5329 #define LZMA_MATCH_LEN_CHOICE \
5330 LZMA_PROB_MATCH_LEN_CHOICE_OFFSET
5331 #define LZMA_MATCH_LEN_CHOICE2 \
5332 LZMA_PROB_MATCH_LEN_CHOICE2_OFFSET
5333 #define LZMA_MATCH_LEN_LOW(pos, sym) \
5334 (LZMA_PROB_MATCH_LEN_LOW_OFFSET + (pos) * LZMA_LEN_LOW_SYMBOLS + (sym))
5335 #define LZMA_MATCH_LEN_MID(pos, sym) \
5336 (LZMA_PROB_MATCH_LEN_MID_OFFSET + (pos) * LZMA_LEN_MID_SYMBOLS + (sym))
5337 #define LZMA_MATCH_LEN_HIGH(sym) \
5338 (LZMA_PROB_MATCH_LEN_HIGH_OFFSET + (sym))
5339 #define LZMA_REP_LEN_CHOICE \
5340 LZMA_PROB_REP_LEN_CHOICE_OFFSET
5341 #define LZMA_REP_LEN_CHOICE2 \
5342 LZMA_PROB_REP_LEN_CHOICE2_OFFSET
5343 #define LZMA_REP_LEN_LOW(pos, sym) \
5344 (LZMA_PROB_REP_LEN_LOW_OFFSET + (pos) * LZMA_LEN_LOW_SYMBOLS + (sym))
5345 #define LZMA_REP_LEN_MID(pos, sym) \
5346 (LZMA_PROB_REP_LEN_MID_OFFSET + (pos) * LZMA_LEN_MID_SYMBOLS + (sym))
5347 #define LZMA_REP_LEN_HIGH(sym) \
5348 (LZMA_PROB_REP_LEN_HIGH_OFFSET + (sym))
5349 #define LZMA_LITERAL(code, size) \
5350 (LZMA_PROB_LITERAL_OFFSET + (code) * LZMA_LITERAL_CODER_SIZE + (size))
5352 /* Read an LZMA varint from BUF, reading and updating *POFFSET,
5353 setting *VAL. Returns 0 on error, 1 on success. */
5356 elf_lzma_varint (const unsigned char *compressed
, size_t compressed_size
,
5357 size_t *poffset
, uint64_t *val
)
5369 if (unlikely (off
>= compressed_size
))
5371 elf_uncompress_failed ();
5374 b
= compressed
[off
];
5375 v
|= (b
& 0x7f) << (i
* 7);
5377 if ((b
& 0x80) == 0)
5384 if (unlikely (i
>= 9))
5386 elf_uncompress_failed ();
5392 /* Normalize the LZMA range decoder, pulling in an extra input byte if
5396 elf_lzma_range_normalize (const unsigned char *compressed
,
5397 size_t compressed_size
, size_t *poffset
,
5398 uint32_t *prange
, uint32_t *pcode
)
5400 if (*prange
< (1U << 24))
5402 if (unlikely (*poffset
>= compressed_size
))
5404 /* We assume this will be caught elsewhere. */
5405 elf_uncompress_failed ();
5410 *pcode
+= compressed
[*poffset
];
5415 /* Read and return a single bit from the LZMA stream, reading and
5416 updating *PROB. Each bit comes from the range coder. */
5419 elf_lzma_bit (const unsigned char *compressed
, size_t compressed_size
,
5420 uint16_t *prob
, size_t *poffset
, uint32_t *prange
,
5425 elf_lzma_range_normalize (compressed
, compressed_size
, poffset
,
5427 bound
= (*prange
>> 11) * (uint32_t) *prob
;
5431 *prob
+= ((1U << 11) - *prob
) >> 5;
5438 *prob
-= *prob
>> 5;
5443 /* Read an integer of size BITS from the LZMA stream, most significant
5444 bit first. The bits are predicted using PROBS. */
5447 elf_lzma_integer (const unsigned char *compressed
, size_t compressed_size
,
5448 uint16_t *probs
, uint32_t bits
, size_t *poffset
,
5449 uint32_t *prange
, uint32_t *pcode
)
5455 for (i
= 0; i
< bits
; i
++)
5459 bit
= elf_lzma_bit (compressed
, compressed_size
, probs
+ sym
, poffset
,
5464 return sym
- (1 << bits
);
5467 /* Read an integer of size BITS from the LZMA stream, least
5468 significant bit first. The bits are predicted using PROBS. */
5471 elf_lzma_reverse_integer (const unsigned char *compressed
,
5472 size_t compressed_size
, uint16_t *probs
,
5473 uint32_t bits
, size_t *poffset
, uint32_t *prange
,
5482 for (i
= 0; i
< bits
; i
++)
5486 bit
= elf_lzma_bit (compressed
, compressed_size
, probs
+ sym
, poffset
,
5495 /* Read a length from the LZMA stream. IS_REP picks either LZMA_MATCH
5496 or LZMA_REP probabilities. */
5499 elf_lzma_len (const unsigned char *compressed
, size_t compressed_size
,
5500 uint16_t *probs
, int is_rep
, unsigned int pos_state
,
5501 size_t *poffset
, uint32_t *prange
, uint32_t *pcode
)
5503 uint16_t *probs_choice
;
5504 uint16_t *probs_sym
;
5508 probs_choice
= probs
+ (is_rep
5509 ? LZMA_REP_LEN_CHOICE
5510 : LZMA_MATCH_LEN_CHOICE
);
5511 if (elf_lzma_bit (compressed
, compressed_size
, probs_choice
, poffset
,
5514 probs_choice
= probs
+ (is_rep
5515 ? LZMA_REP_LEN_CHOICE2
5516 : LZMA_MATCH_LEN_CHOICE2
);
5517 if (elf_lzma_bit (compressed
, compressed_size
, probs_choice
,
5518 poffset
, prange
, pcode
))
5520 probs_sym
= probs
+ (is_rep
5521 ? LZMA_REP_LEN_HIGH (0)
5522 : LZMA_MATCH_LEN_HIGH (0));
5528 probs_sym
= probs
+ (is_rep
5529 ? LZMA_REP_LEN_MID (pos_state
, 0)
5530 : LZMA_MATCH_LEN_MID (pos_state
, 0));
5537 probs_sym
= probs
+ (is_rep
5538 ? LZMA_REP_LEN_LOW (pos_state
, 0)
5539 : LZMA_MATCH_LEN_LOW (pos_state
, 0));
5544 len
+= elf_lzma_integer (compressed
, compressed_size
, probs_sym
, bits
,
5545 poffset
, prange
, pcode
);
5549 /* Uncompress one LZMA block from a minidebug file. The compressed
5550 data is at COMPRESSED + *POFFSET. Update *POFFSET. Store the data
5551 into the memory at UNCOMPRESSED, size UNCOMPRESSED_SIZE. CHECK is
5552 the stream flag from the xz header. Return 1 on successful
5556 elf_uncompress_lzma_block (const unsigned char *compressed
,
5557 size_t compressed_size
, unsigned char check
,
5558 uint16_t *probs
, unsigned char *uncompressed
,
5559 size_t uncompressed_size
, size_t *poffset
)
5562 size_t block_header_offset
;
5563 size_t block_header_size
;
5564 unsigned char block_flags
;
5565 uint64_t header_compressed_size
;
5566 uint64_t header_uncompressed_size
;
5567 unsigned char lzma2_properties
;
5569 uint32_t computed_crc
;
5570 uint32_t stream_crc
;
5571 size_t uncompressed_offset
;
5572 size_t dict_start_offset
;
5582 block_header_offset
= off
;
5584 /* Block header size is a single byte. */
5585 if (unlikely (off
>= compressed_size
))
5587 elf_uncompress_failed ();
5590 block_header_size
= (compressed
[off
] + 1) * 4;
5591 if (unlikely (off
+ block_header_size
> compressed_size
))
5593 elf_uncompress_failed ();
5598 block_flags
= compressed
[off
+ 1];
5599 if (unlikely ((block_flags
& 0x3c) != 0))
5601 elf_uncompress_failed ();
5607 /* Optional compressed size. */
5608 header_compressed_size
= 0;
5609 if ((block_flags
& 0x40) != 0)
5612 if (!elf_lzma_varint (compressed
, compressed_size
, poffset
,
5613 &header_compressed_size
))
5618 /* Optional uncompressed size. */
5619 header_uncompressed_size
= 0;
5620 if ((block_flags
& 0x80) != 0)
5623 if (!elf_lzma_varint (compressed
, compressed_size
, poffset
,
5624 &header_uncompressed_size
))
5629 /* The recipe for creating a minidebug file is to run the xz program
5630 with no arguments, so we expect exactly one filter: lzma2. */
5632 if (unlikely ((block_flags
& 0x3) != 0))
5634 elf_uncompress_failed ();
5638 if (unlikely (off
+ 2 >= block_header_offset
+ block_header_size
))
5640 elf_uncompress_failed ();
5644 /* The filter ID for LZMA2 is 0x21. */
5645 if (unlikely (compressed
[off
] != 0x21))
5647 elf_uncompress_failed ();
5652 /* The size of the filter properties for LZMA2 is 1. */
5653 if (unlikely (compressed
[off
] != 1))
5655 elf_uncompress_failed ();
5660 lzma2_properties
= compressed
[off
];
5663 if (unlikely (lzma2_properties
> 40))
5665 elf_uncompress_failed ();
5669 /* The properties describe the dictionary size, but we don't care
5672 /* Skip to just before CRC, verifying zero bytes in between. */
5673 crc_offset
= block_header_offset
+ block_header_size
- 4;
5674 if (unlikely (crc_offset
+ 4 > compressed_size
))
5676 elf_uncompress_failed ();
5679 for (; off
< crc_offset
; off
++)
5681 if (compressed
[off
] != 0)
5683 elf_uncompress_failed ();
5688 /* Block header CRC. */
5689 computed_crc
= elf_crc32 (0, compressed
+ block_header_offset
,
5690 block_header_size
- 4);
5691 stream_crc
= ((uint32_t)compressed
[off
]
5692 | ((uint32_t)compressed
[off
+ 1] << 8)
5693 | ((uint32_t)compressed
[off
+ 2] << 16)
5694 | ((uint32_t)compressed
[off
+ 3] << 24));
5695 if (unlikely (computed_crc
!= stream_crc
))
5697 elf_uncompress_failed ();
5702 /* Read a sequence of LZMA2 packets. */
5704 uncompressed_offset
= 0;
5705 dict_start_offset
= 0;
5710 while (off
< compressed_size
)
5712 unsigned char control
;
5717 control
= compressed
[off
];
5719 if (unlikely (control
== 0))
5721 /* End of packets. */
5725 if (control
== 1 || control
>= 0xe0)
5727 /* Reset dictionary to empty. */
5728 dict_start_offset
= uncompressed_offset
;
5735 /* The only valid values here are 1 or 2. A 1 means to
5736 reset the dictionary (done above). Then we see an
5737 uncompressed chunk. */
5739 if (unlikely (control
> 2))
5741 elf_uncompress_failed ();
5745 /* An uncompressed chunk is a two byte size followed by
5748 if (unlikely (off
+ 2 > compressed_size
))
5750 elf_uncompress_failed ();
5754 chunk_size
= compressed
[off
] << 8;
5755 chunk_size
+= compressed
[off
+ 1];
5760 if (unlikely (off
+ chunk_size
> compressed_size
))
5762 elf_uncompress_failed ();
5765 if (unlikely (uncompressed_offset
+ chunk_size
> uncompressed_size
))
5767 elf_uncompress_failed ();
5771 memcpy (uncompressed
+ uncompressed_offset
, compressed
+ off
,
5773 uncompressed_offset
+= chunk_size
;
5778 size_t uncompressed_chunk_start
;
5779 size_t uncompressed_chunk_size
;
5780 size_t compressed_chunk_size
;
5783 /* An LZMA chunk. This starts with an uncompressed size and
5784 a compressed size. */
5786 if (unlikely (off
+ 4 >= compressed_size
))
5788 elf_uncompress_failed ();
5792 uncompressed_chunk_start
= uncompressed_offset
;
5794 uncompressed_chunk_size
= (control
& 0x1f) << 16;
5795 uncompressed_chunk_size
+= compressed
[off
] << 8;
5796 uncompressed_chunk_size
+= compressed
[off
+ 1];
5797 ++uncompressed_chunk_size
;
5799 compressed_chunk_size
= compressed
[off
+ 2] << 8;
5800 compressed_chunk_size
+= compressed
[off
+ 3];
5801 ++compressed_chunk_size
;
5805 /* Bit 7 (0x80) is set.
5806 Bits 6 and 5 (0x40 and 0x20) are as follows:
5807 0: don't reset anything
5809 2: reset state, read properties
5810 3: reset state, read properties, reset dictionary (done above) */
5812 if (control
>= 0xc0)
5814 unsigned char props
;
5816 /* Bit 6 is set, read properties. */
5818 if (unlikely (off
>= compressed_size
))
5820 elf_uncompress_failed ();
5823 props
= compressed
[off
];
5825 if (unlikely (props
> (4 * 5 + 4) * 9 + 8))
5827 elf_uncompress_failed ();
5831 while (props
>= 9 * 5)
5843 if (unlikely (lc
+ lp
> 4))
5845 elf_uncompress_failed ();
5850 if (control
>= 0xa0)
5854 /* Bit 5 or 6 is set, reset LZMA state. */
5857 memset (&dist
, 0, sizeof dist
);
5858 for (i
= 0; i
< LZMA_PROB_TOTAL_COUNT
; i
++)
5864 /* Read the range code. */
5866 if (unlikely (off
+ 5 > compressed_size
))
5868 elf_uncompress_failed ();
5872 /* The byte at compressed[off] is ignored for some
5875 code
= ((compressed
[off
+ 1] << 24)
5876 + (compressed
[off
+ 2] << 16)
5877 + (compressed
[off
+ 3] << 8)
5878 + compressed
[off
+ 4]);
5881 /* This is the main LZMA decode loop. */
5883 limit
= off
+ compressed_chunk_size
;
5885 while (*poffset
< limit
)
5887 unsigned int pos_state
;
5889 if (unlikely (uncompressed_offset
5890 == (uncompressed_chunk_start
5891 + uncompressed_chunk_size
)))
5893 /* We've decompressed all the expected bytes. */
5897 pos_state
= ((uncompressed_offset
- dict_start_offset
)
5900 if (elf_lzma_bit (compressed
, compressed_size
,
5901 probs
+ LZMA_IS_MATCH (lstate
, pos_state
),
5902 poffset
, &range
, &code
))
5906 if (elf_lzma_bit (compressed
, compressed_size
,
5907 probs
+ LZMA_IS_REP (lstate
),
5908 poffset
, &range
, &code
))
5913 /* Repeated match. */
5916 if (elf_lzma_bit (compressed
, compressed_size
,
5917 probs
+ LZMA_IS_REP0 (lstate
),
5918 poffset
, &range
, &code
))
5920 if (elf_lzma_bit (compressed
, compressed_size
,
5921 probs
+ LZMA_IS_REP1 (lstate
),
5922 poffset
, &range
, &code
))
5924 if (elf_lzma_bit (compressed
, compressed_size
,
5925 probs
+ LZMA_IS_REP2 (lstate
),
5926 poffset
, &range
, &code
))
5928 next_dist
= dist
[3];
5933 next_dist
= dist
[2];
5939 next_dist
= dist
[1];
5943 dist
[0] = next_dist
;
5947 if (!elf_lzma_bit (compressed
, compressed_size
,
5949 + LZMA_IS_REP0_LONG (lstate
,
5951 poffset
, &range
, &code
))
5956 lstate
= short_rep
? 9 : 8;
5963 len
= elf_lzma_len (compressed
, compressed_size
,
5964 probs
, 1, pos_state
, poffset
,
5969 uint32_t dist_state
;
5971 uint16_t *probs_dist
;
5982 len
= elf_lzma_len (compressed
, compressed_size
,
5983 probs
, 0, pos_state
, poffset
,
5987 dist_state
= len
- 2;
5990 probs_dist
= probs
+ LZMA_DIST_SLOT (dist_state
, 0);
5991 dist_slot
= elf_lzma_integer (compressed
,
5996 if (dist_slot
< LZMA_DIST_MODEL_START
)
5997 dist
[0] = dist_slot
;
6002 limit
= (dist_slot
>> 1) - 1;
6003 dist
[0] = 2 + (dist_slot
& 1);
6004 if (dist_slot
< LZMA_DIST_MODEL_END
)
6008 + LZMA_DIST_SPECIAL(dist
[0]
6012 elf_lzma_reverse_integer (compressed
,
6024 for (i
= 0; i
< limit
- 4; i
++)
6028 elf_lzma_range_normalize (compressed
,
6034 mask
= -(code
>> 31);
6035 code
+= range
& mask
;
6040 probs_dist
= probs
+ LZMA_DIST_ALIGN (0);
6042 elf_lzma_reverse_integer (compressed
,
6052 if (unlikely (uncompressed_offset
6053 - dict_start_offset
< dist
[0] + 1))
6055 elf_uncompress_failed ();
6058 if (unlikely (uncompressed_offset
+ len
> uncompressed_size
))
6060 elf_uncompress_failed ();
6066 /* A common case, meaning repeat the last
6067 character LEN times. */
6068 memset (uncompressed
+ uncompressed_offset
,
6069 uncompressed
[uncompressed_offset
- 1],
6071 uncompressed_offset
+= len
;
6073 else if (dist
[0] + 1 >= len
)
6075 memcpy (uncompressed
+ uncompressed_offset
,
6076 uncompressed
+ uncompressed_offset
- dist
[0] - 1,
6078 uncompressed_offset
+= len
;
6086 copy
= len
< dist
[0] + 1 ? len
: dist
[0] + 1;
6087 memcpy (uncompressed
+ uncompressed_offset
,
6088 (uncompressed
+ uncompressed_offset
6092 uncompressed_offset
+= copy
;
6101 uint16_t *lit_probs
;
6104 /* Literal value. */
6106 if (uncompressed_offset
> 0)
6107 prev
= uncompressed
[uncompressed_offset
- 1];
6110 low
= prev
>> (8 - lc
);
6111 high
= (((uncompressed_offset
- dict_start_offset
)
6114 lit_probs
= probs
+ LZMA_LITERAL (low
+ high
, 0);
6116 sym
= elf_lzma_integer (compressed
, compressed_size
,
6117 lit_probs
, 8, poffset
, &range
,
6123 unsigned int match_bit
;
6127 if (uncompressed_offset
>= dist
[0] + 1)
6128 match
= uncompressed
[uncompressed_offset
- dist
[0] - 1];
6135 match_bit
= match
& bit
;
6137 idx
= bit
+ match_bit
+ sym
;
6139 if (elf_lzma_bit (compressed
, compressed_size
,
6140 lit_probs
+ idx
, poffset
,
6151 while (sym
< 0x100);
6154 if (unlikely (uncompressed_offset
>= uncompressed_size
))
6156 elf_uncompress_failed ();
6160 uncompressed
[uncompressed_offset
] = (unsigned char) sym
;
6161 ++uncompressed_offset
;
6164 else if (lstate
<= 9)
6171 elf_lzma_range_normalize (compressed
, compressed_size
, poffset
,
6178 /* We have reached the end of the block. Pad to four byte
6180 off
= (off
+ 3) &~ (size_t) 3;
6181 if (unlikely (off
> compressed_size
))
6183 elf_uncompress_failed ();
6195 if (unlikely (off
+ 4 > compressed_size
))
6197 elf_uncompress_failed ();
6200 computed_crc
= elf_crc32 (0, uncompressed
, uncompressed_offset
);
6201 stream_crc
= (compressed
[off
]
6202 | (compressed
[off
+ 1] << 8)
6203 | (compressed
[off
+ 2] << 16)
6204 | (compressed
[off
+ 3] << 24));
6205 if (computed_crc
!= stream_crc
)
6207 elf_uncompress_failed ();
6214 /* CRC64. We don't bother computing a CRC64 checksum. */
6215 if (unlikely (off
+ 8 > compressed_size
))
6217 elf_uncompress_failed ();
6224 /* SHA. We don't bother computing a SHA checksum. */
6225 if (unlikely (off
+ 32 > compressed_size
))
6227 elf_uncompress_failed ();
6234 elf_uncompress_failed ();
6243 /* Uncompress LZMA data found in a minidebug file. The minidebug
6244 format is described at
6245 https://sourceware.org/gdb/current/onlinedocs/gdb/MiniDebugInfo.html.
6246 Returns 0 on error, 1 on successful decompression. For this
6247 function we return 0 on failure to decompress, as the calling code
6248 will carry on in that case. */
6251 elf_uncompress_lzma (struct backtrace_state
*state
,
6252 const unsigned char *compressed
, size_t compressed_size
,
6253 backtrace_error_callback error_callback
, void *data
,
6254 unsigned char **uncompressed
, size_t *uncompressed_size
)
6258 unsigned char check
;
6259 uint32_t computed_crc
;
6260 uint32_t stream_crc
;
6263 size_t footer_offset
;
6264 size_t index_offset
;
6265 uint64_t index_compressed_size
;
6266 uint64_t index_uncompressed_size
;
6269 size_t compressed_block_size
;
6271 /* The format starts with a stream header and ends with a stream
6275 if (unlikely (compressed_size
< header_size
+ footer_size
))
6277 elf_uncompress_failed ();
6281 /* The stream header starts with a magic string. */
6282 if (unlikely (memcmp (compressed
, "\375" "7zXZ\0", 6) != 0))
6284 elf_uncompress_failed ();
6288 /* Next come stream flags. The first byte is zero, the second byte
6290 if (unlikely (compressed
[6] != 0))
6292 elf_uncompress_failed ();
6295 check
= compressed
[7];
6296 if (unlikely ((check
& 0xf8) != 0))
6298 elf_uncompress_failed ();
6302 /* Next comes a CRC of the stream flags. */
6303 computed_crc
= elf_crc32 (0, compressed
+ 6, 2);
6304 stream_crc
= ((uint32_t)compressed
[8]
6305 | ((uint32_t)compressed
[9] << 8)
6306 | ((uint32_t)compressed
[10] << 16)
6307 | ((uint32_t)compressed
[11] << 24));
6308 if (unlikely (computed_crc
!= stream_crc
))
6310 elf_uncompress_failed ();
6314 /* Now that we've parsed the header, parse the footer, so that we
6315 can get the uncompressed size. */
6317 /* The footer ends with two magic bytes. */
6319 offset
= compressed_size
;
6320 if (unlikely (memcmp (compressed
+ offset
- 2, "YZ", 2) != 0))
6322 elf_uncompress_failed ();
6327 /* Before that are the stream flags, which should be the same as the
6328 flags in the header. */
6329 if (unlikely (compressed
[offset
- 2] != 0
6330 || compressed
[offset
- 1] != check
))
6332 elf_uncompress_failed ();
6337 /* Before that is the size of the index field, which precedes the
6339 index_size
= (compressed
[offset
- 4]
6340 | (compressed
[offset
- 3] << 8)
6341 | (compressed
[offset
- 2] << 16)
6342 | (compressed
[offset
- 1] << 24));
6343 index_size
= (index_size
+ 1) * 4;
6346 /* Before that is a footer CRC. */
6347 computed_crc
= elf_crc32 (0, compressed
+ offset
, 6);
6348 stream_crc
= ((uint32_t)compressed
[offset
- 4]
6349 | ((uint32_t)compressed
[offset
- 3] << 8)
6350 | ((uint32_t)compressed
[offset
- 2] << 16)
6351 | ((uint32_t)compressed
[offset
- 1] << 24));
6352 if (unlikely (computed_crc
!= stream_crc
))
6354 elf_uncompress_failed ();
6359 /* The index comes just before the footer. */
6360 if (unlikely (offset
< index_size
+ header_size
))
6362 elf_uncompress_failed ();
6366 footer_offset
= offset
;
6367 offset
-= index_size
;
6368 index_offset
= offset
;
6370 /* The index starts with a zero byte. */
6371 if (unlikely (compressed
[offset
] != 0))
6373 elf_uncompress_failed ();
6378 /* Next is the number of blocks. We expect zero blocks for an empty
6379 stream, and otherwise a single block. */
6380 if (unlikely (compressed
[offset
] == 0))
6382 *uncompressed
= NULL
;
6383 *uncompressed_size
= 0;
6386 if (unlikely (compressed
[offset
] != 1))
6388 elf_uncompress_failed ();
6393 /* Next is the compressed size and the uncompressed size. */
6394 if (!elf_lzma_varint (compressed
, compressed_size
, &offset
,
6395 &index_compressed_size
))
6397 if (!elf_lzma_varint (compressed
, compressed_size
, &offset
,
6398 &index_uncompressed_size
))
6401 /* Pad to a four byte boundary. */
6402 offset
= (offset
+ 3) &~ (size_t) 3;
6404 /* Next is a CRC of the index. */
6405 computed_crc
= elf_crc32 (0, compressed
+ index_offset
,
6406 offset
- index_offset
);
6407 stream_crc
= ((uint32_t)compressed
[offset
]
6408 | ((uint32_t)compressed
[offset
+ 1] << 8)
6409 | ((uint32_t)compressed
[offset
+ 2] << 16)
6410 | ((uint32_t)compressed
[offset
+ 3] << 24));
6411 if (unlikely (computed_crc
!= stream_crc
))
6413 elf_uncompress_failed ();
6418 /* We should now be back at the footer. */
6419 if (unlikely (offset
!= footer_offset
))
6421 elf_uncompress_failed ();
6425 /* Allocate space to hold the uncompressed data. If we succeed in
6426 uncompressing the LZMA data, we never free this memory. */
6427 mem
= (unsigned char *) backtrace_alloc (state
, index_uncompressed_size
,
6428 error_callback
, data
);
6429 if (unlikely (mem
== NULL
))
6431 *uncompressed
= mem
;
6432 *uncompressed_size
= index_uncompressed_size
;
6434 /* Allocate space for probabilities. */
6435 probs
= ((uint16_t *)
6436 backtrace_alloc (state
,
6437 LZMA_PROB_TOTAL_COUNT
* sizeof (uint16_t),
6438 error_callback
, data
));
6439 if (unlikely (probs
== NULL
))
6441 backtrace_free (state
, mem
, index_uncompressed_size
, error_callback
,
6446 /* Uncompress the block, which follows the header. */
6448 if (!elf_uncompress_lzma_block (compressed
, compressed_size
, check
, probs
,
6449 mem
, index_uncompressed_size
, &offset
))
6451 backtrace_free (state
, mem
, index_uncompressed_size
, error_callback
,
6456 compressed_block_size
= offset
- 12;
6457 if (unlikely (compressed_block_size
6458 != ((index_compressed_size
+ 3) &~ (size_t) 3)))
6460 elf_uncompress_failed ();
6461 backtrace_free (state
, mem
, index_uncompressed_size
, error_callback
,
6466 offset
= (offset
+ 3) &~ (size_t) 3;
6467 if (unlikely (offset
!= index_offset
))
6469 elf_uncompress_failed ();
6470 backtrace_free (state
, mem
, index_uncompressed_size
, error_callback
,
6478 /* This function is a hook for testing the LZMA support. It is only
6482 backtrace_uncompress_lzma (struct backtrace_state
*state
,
6483 const unsigned char *compressed
,
6484 size_t compressed_size
,
6485 backtrace_error_callback error_callback
,
6486 void *data
, unsigned char **uncompressed
,
6487 size_t *uncompressed_size
)
6489 return elf_uncompress_lzma (state
, compressed
, compressed_size
,
6490 error_callback
, data
, uncompressed
,
6494 /* Add the backtrace data for one ELF file. Returns 1 on success,
6495 0 on failure (in both cases descriptor is closed) or -1 if exe
6496 is non-zero and the ELF file is ET_DYN, which tells the caller that
6497 elf_add will need to be called on the descriptor again after
6498 base_address is determined. */
6501 elf_add (struct backtrace_state
*state
, const char *filename
, int descriptor
,
6502 const unsigned char *memory
, size_t memory_size
,
6503 struct libbacktrace_base_address base_address
,
6504 struct elf_ppc64_opd_data
*caller_opd
,
6505 backtrace_error_callback error_callback
, void *data
,
6506 fileline
*fileline_fn
, int *found_sym
, int *found_dwarf
,
6507 struct dwarf_data
**fileline_entry
, int exe
, int debuginfo
,
6508 const char *with_buildid_data
, uint32_t with_buildid_size
)
6510 struct elf_view ehdr_view
;
6514 unsigned int shstrndx
;
6515 struct elf_view shdrs_view
;
6516 int shdrs_view_valid
;
6517 const b_elf_shdr
*shdrs
;
6518 const b_elf_shdr
*shstrhdr
;
6521 struct elf_view names_view
;
6522 int names_view_valid
;
6524 unsigned int symtab_shndx
;
6525 unsigned int dynsym_shndx
;
6527 struct debug_section_info sections
[DEBUG_MAX
];
6528 struct debug_section_info zsections
[DEBUG_MAX
];
6529 struct elf_view symtab_view
;
6530 int symtab_view_valid
;
6531 struct elf_view strtab_view
;
6532 int strtab_view_valid
;
6533 struct elf_view buildid_view
;
6534 int buildid_view_valid
;
6535 const char *buildid_data
;
6536 uint32_t buildid_size
;
6537 struct elf_view debuglink_view
;
6538 int debuglink_view_valid
;
6539 const char *debuglink_name
;
6540 uint32_t debuglink_crc
;
6541 struct elf_view debugaltlink_view
;
6542 int debugaltlink_view_valid
;
6543 const char *debugaltlink_name
;
6544 const char *debugaltlink_buildid_data
;
6545 uint32_t debugaltlink_buildid_size
;
6546 struct elf_view gnu_debugdata_view
;
6547 int gnu_debugdata_view_valid
;
6548 size_t gnu_debugdata_size
;
6549 unsigned char *gnu_debugdata_uncompressed
;
6550 size_t gnu_debugdata_uncompressed_size
;
6554 struct elf_view debug_view
;
6555 int debug_view_valid
;
6556 unsigned int using_debug_view
;
6557 uint16_t *zdebug_table
;
6558 struct elf_view split_debug_view
[DEBUG_MAX
];
6559 unsigned char split_debug_view_valid
[DEBUG_MAX
];
6560 struct elf_ppc64_opd_data opd_data
, *opd
;
6562 struct dwarf_sections dwarf_sections
;
6570 shdrs_view_valid
= 0;
6571 names_view_valid
= 0;
6572 symtab_view_valid
= 0;
6573 strtab_view_valid
= 0;
6574 buildid_view_valid
= 0;
6575 buildid_data
= NULL
;
6577 debuglink_view_valid
= 0;
6578 debuglink_name
= NULL
;
6580 debugaltlink_view_valid
= 0;
6581 debugaltlink_name
= NULL
;
6582 debugaltlink_buildid_data
= NULL
;
6583 debugaltlink_buildid_size
= 0;
6584 gnu_debugdata_view_valid
= 0;
6585 gnu_debugdata_size
= 0;
6586 debug_view_valid
= 0;
6587 memset (&split_debug_view_valid
[0], 0, sizeof split_debug_view_valid
);
6591 if (!elf_get_view (state
, descriptor
, memory
, memory_size
, 0, sizeof ehdr
,
6592 error_callback
, data
, &ehdr_view
))
6595 memcpy (&ehdr
, ehdr_view
.view
.data
, sizeof ehdr
);
6597 elf_release_view (state
, &ehdr_view
, error_callback
, data
);
6599 if (ehdr
.e_ident
[EI_MAG0
] != ELFMAG0
6600 || ehdr
.e_ident
[EI_MAG1
] != ELFMAG1
6601 || ehdr
.e_ident
[EI_MAG2
] != ELFMAG2
6602 || ehdr
.e_ident
[EI_MAG3
] != ELFMAG3
)
6604 error_callback (data
, "executable file is not ELF", 0);
6607 if (ehdr
.e_ident
[EI_VERSION
] != EV_CURRENT
)
6609 error_callback (data
, "executable file is unrecognized ELF version", 0);
6613 #if BACKTRACE_ELF_SIZE == 32
6614 #define BACKTRACE_ELFCLASS ELFCLASS32
6616 #define BACKTRACE_ELFCLASS ELFCLASS64
6619 if (ehdr
.e_ident
[EI_CLASS
] != BACKTRACE_ELFCLASS
)
6621 error_callback (data
, "executable file is unexpected ELF class", 0);
6625 if (ehdr
.e_ident
[EI_DATA
] != ELFDATA2LSB
6626 && ehdr
.e_ident
[EI_DATA
] != ELFDATA2MSB
)
6628 error_callback (data
, "executable file has unknown endianness", 0);
6632 /* If the executable is ET_DYN, it is either a PIE, or we are running
6633 directly a shared library with .interp. We need to wait for
6634 dl_iterate_phdr in that case to determine the actual base_address. */
6635 if (exe
&& ehdr
.e_type
== ET_DYN
)
6638 shoff
= ehdr
.e_shoff
;
6639 shnum
= ehdr
.e_shnum
;
6640 shstrndx
= ehdr
.e_shstrndx
;
6642 if ((shnum
== 0 || shstrndx
== SHN_XINDEX
)
6645 struct elf_view shdr_view
;
6646 const b_elf_shdr
*shdr
;
6648 if (!elf_get_view (state
, descriptor
, memory
, memory_size
, shoff
,
6649 sizeof shdr
, error_callback
, data
, &shdr_view
))
6652 shdr
= (const b_elf_shdr
*) shdr_view
.view
.data
;
6655 shnum
= shdr
->sh_size
;
6657 if (shstrndx
== SHN_XINDEX
)
6659 shstrndx
= shdr
->sh_link
;
6661 /* Versions of the GNU binutils between 2.12 and 2.18 did
6662 not handle objects with more than SHN_LORESERVE sections
6663 correctly. All large section indexes were offset by
6664 0x100. There is more information at
6665 http://sourceware.org/bugzilla/show_bug.cgi?id-5900 .
6666 Fortunately these object files are easy to detect, as the
6667 GNU binutils always put the section header string table
6668 near the end of the list of sections. Thus if the
6669 section header string table index is larger than the
6670 number of sections, then we know we have to subtract
6671 0x100 to get the real section index. */
6672 if (shstrndx
>= shnum
&& shstrndx
>= SHN_LORESERVE
+ 0x100)
6676 elf_release_view (state
, &shdr_view
, error_callback
, data
);
6679 if (shnum
== 0 || shstrndx
== 0)
6682 /* To translate PC to file/line when using DWARF, we need to find
6683 the .debug_info and .debug_line sections. */
6685 /* Read the section headers, skipping the first one. */
6687 if (!elf_get_view (state
, descriptor
, memory
, memory_size
,
6688 shoff
+ sizeof (b_elf_shdr
),
6689 (shnum
- 1) * sizeof (b_elf_shdr
),
6690 error_callback
, data
, &shdrs_view
))
6692 shdrs_view_valid
= 1;
6693 shdrs
= (const b_elf_shdr
*) shdrs_view
.view
.data
;
6695 /* Read the section names. */
6697 shstrhdr
= &shdrs
[shstrndx
- 1];
6698 shstr_size
= shstrhdr
->sh_size
;
6699 shstr_off
= shstrhdr
->sh_offset
;
6701 if (!elf_get_view (state
, descriptor
, memory
, memory_size
, shstr_off
,
6702 shstrhdr
->sh_size
, error_callback
, data
, &names_view
))
6704 names_view_valid
= 1;
6705 names
= (const char *) names_view
.view
.data
;
6710 memset (sections
, 0, sizeof sections
);
6711 memset (zsections
, 0, sizeof zsections
);
6713 /* Look for the symbol table. */
6714 for (i
= 1; i
< shnum
; ++i
)
6716 const b_elf_shdr
*shdr
;
6717 unsigned int sh_name
;
6721 shdr
= &shdrs
[i
- 1];
6723 if (shdr
->sh_type
== SHT_SYMTAB
)
6725 else if (shdr
->sh_type
== SHT_DYNSYM
)
6728 sh_name
= shdr
->sh_name
;
6729 if (sh_name
>= shstr_size
)
6731 error_callback (data
, "ELF section name out of range", 0);
6735 name
= names
+ sh_name
;
6737 for (j
= 0; j
< (int) DEBUG_MAX
; ++j
)
6739 if (strcmp (name
, dwarf_section_names
[j
]) == 0)
6741 sections
[j
].offset
= shdr
->sh_offset
;
6742 sections
[j
].size
= shdr
->sh_size
;
6743 sections
[j
].compressed
= (shdr
->sh_flags
& SHF_COMPRESSED
) != 0;
6748 if (name
[0] == '.' && name
[1] == 'z')
6750 for (j
= 0; j
< (int) DEBUG_MAX
; ++j
)
6752 if (strcmp (name
+ 2, dwarf_section_names
[j
] + 1) == 0)
6754 zsections
[j
].offset
= shdr
->sh_offset
;
6755 zsections
[j
].size
= shdr
->sh_size
;
6761 /* Read the build ID if present. This could check for any
6762 SHT_NOTE section with the right note name and type, but gdb
6763 looks for a specific section name. */
6764 if ((!debuginfo
|| with_buildid_data
!= NULL
)
6765 && !buildid_view_valid
6766 && strcmp (name
, ".note.gnu.build-id") == 0)
6768 const b_elf_note
*note
;
6770 if (!elf_get_view (state
, descriptor
, memory
, memory_size
,
6771 shdr
->sh_offset
, shdr
->sh_size
, error_callback
,
6772 data
, &buildid_view
))
6775 buildid_view_valid
= 1;
6776 note
= (const b_elf_note
*) buildid_view
.view
.data
;
6777 if (note
->type
== NT_GNU_BUILD_ID
6778 && note
->namesz
== 4
6779 && strncmp (note
->name
, "GNU", 4) == 0
6780 && shdr
->sh_size
<= 12 + ((note
->namesz
+ 3) & ~ 3) + note
->descsz
)
6782 buildid_data
= ¬e
->name
[0] + ((note
->namesz
+ 3) & ~ 3);
6783 buildid_size
= note
->descsz
;
6786 if (with_buildid_size
!= 0)
6788 if (buildid_size
!= with_buildid_size
)
6791 if (memcmp (buildid_data
, with_buildid_data
, buildid_size
) != 0)
6796 /* Read the debuglink file if present. */
6798 && !debuglink_view_valid
6799 && strcmp (name
, ".gnu_debuglink") == 0)
6801 const char *debuglink_data
;
6804 if (!elf_get_view (state
, descriptor
, memory
, memory_size
,
6805 shdr
->sh_offset
, shdr
->sh_size
, error_callback
,
6806 data
, &debuglink_view
))
6809 debuglink_view_valid
= 1;
6810 debuglink_data
= (const char *) debuglink_view
.view
.data
;
6811 crc_offset
= strnlen (debuglink_data
, shdr
->sh_size
);
6812 crc_offset
= (crc_offset
+ 3) & ~3;
6813 if (crc_offset
+ 4 <= shdr
->sh_size
)
6815 debuglink_name
= debuglink_data
;
6816 debuglink_crc
= *(const uint32_t*)(debuglink_data
+ crc_offset
);
6820 if (!debugaltlink_view_valid
6821 && strcmp (name
, ".gnu_debugaltlink") == 0)
6823 const char *debugaltlink_data
;
6824 size_t debugaltlink_name_len
;
6826 if (!elf_get_view (state
, descriptor
, memory
, memory_size
,
6827 shdr
->sh_offset
, shdr
->sh_size
, error_callback
,
6828 data
, &debugaltlink_view
))
6831 debugaltlink_view_valid
= 1;
6832 debugaltlink_data
= (const char *) debugaltlink_view
.view
.data
;
6833 debugaltlink_name
= debugaltlink_data
;
6834 debugaltlink_name_len
= strnlen (debugaltlink_data
, shdr
->sh_size
);
6835 if (debugaltlink_name_len
< shdr
->sh_size
)
6837 /* Include terminating zero. */
6838 debugaltlink_name_len
+= 1;
6840 debugaltlink_buildid_data
6841 = debugaltlink_data
+ debugaltlink_name_len
;
6842 debugaltlink_buildid_size
= shdr
->sh_size
- debugaltlink_name_len
;
6847 && !gnu_debugdata_view_valid
6848 && strcmp (name
, ".gnu_debugdata") == 0)
6850 if (!elf_get_view (state
, descriptor
, memory
, memory_size
,
6851 shdr
->sh_offset
, shdr
->sh_size
, error_callback
,
6852 data
, &gnu_debugdata_view
))
6855 gnu_debugdata_size
= shdr
->sh_size
;
6856 gnu_debugdata_view_valid
= 1;
6859 /* Read the .opd section on PowerPC64 ELFv1. */
6860 if (ehdr
.e_machine
== EM_PPC64
6861 && (ehdr
.e_flags
& EF_PPC64_ABI
) < 2
6862 && shdr
->sh_type
== SHT_PROGBITS
6863 && strcmp (name
, ".opd") == 0)
6865 if (!elf_get_view (state
, descriptor
, memory
, memory_size
,
6866 shdr
->sh_offset
, shdr
->sh_size
, error_callback
,
6867 data
, &opd_data
.view
))
6871 opd
->addr
= shdr
->sh_addr
;
6872 opd
->data
= (const char *) opd_data
.view
.view
.data
;
6873 opd
->size
= shdr
->sh_size
;
6878 /* A debuginfo file may not have a useful .opd section, but we can use the
6879 one from the original executable. */
6883 if (symtab_shndx
== 0)
6884 symtab_shndx
= dynsym_shndx
;
6885 if (symtab_shndx
!= 0)
6887 const b_elf_shdr
*symtab_shdr
;
6888 unsigned int strtab_shndx
;
6889 const b_elf_shdr
*strtab_shdr
;
6890 struct elf_syminfo_data
*sdata
;
6892 symtab_shdr
= &shdrs
[symtab_shndx
- 1];
6893 strtab_shndx
= symtab_shdr
->sh_link
;
6894 if (strtab_shndx
>= shnum
)
6896 error_callback (data
,
6897 "ELF symbol table strtab link out of range", 0);
6900 strtab_shdr
= &shdrs
[strtab_shndx
- 1];
6902 if (!elf_get_view (state
, descriptor
, memory
, memory_size
,
6903 symtab_shdr
->sh_offset
, symtab_shdr
->sh_size
,
6904 error_callback
, data
, &symtab_view
))
6906 symtab_view_valid
= 1;
6908 if (!elf_get_view (state
, descriptor
, memory
, memory_size
,
6909 strtab_shdr
->sh_offset
, strtab_shdr
->sh_size
,
6910 error_callback
, data
, &strtab_view
))
6912 strtab_view_valid
= 1;
6914 sdata
= ((struct elf_syminfo_data
*)
6915 backtrace_alloc (state
, sizeof *sdata
, error_callback
, data
));
6919 if (!elf_initialize_syminfo (state
, base_address
,
6920 symtab_view
.view
.data
, symtab_shdr
->sh_size
,
6921 strtab_view
.view
.data
, strtab_shdr
->sh_size
,
6922 error_callback
, data
, sdata
, opd
))
6924 backtrace_free (state
, sdata
, sizeof *sdata
, error_callback
, data
);
6928 /* We no longer need the symbol table, but we hold on to the
6929 string table permanently. */
6930 elf_release_view (state
, &symtab_view
, error_callback
, data
);
6931 symtab_view_valid
= 0;
6932 strtab_view_valid
= 0;
6936 elf_add_syminfo_data (state
, sdata
);
6939 elf_release_view (state
, &shdrs_view
, error_callback
, data
);
6940 shdrs_view_valid
= 0;
6941 elf_release_view (state
, &names_view
, error_callback
, data
);
6942 names_view_valid
= 0;
6944 /* If the debug info is in a separate file, read that one instead. */
6946 if (buildid_data
!= NULL
)
6950 d
= elf_open_debugfile_by_buildid (state
, buildid_data
, buildid_size
,
6951 error_callback
, data
);
6956 elf_release_view (state
, &buildid_view
, error_callback
, data
);
6957 if (debuglink_view_valid
)
6958 elf_release_view (state
, &debuglink_view
, error_callback
, data
);
6959 if (debugaltlink_view_valid
)
6960 elf_release_view (state
, &debugaltlink_view
, error_callback
, data
);
6961 ret
= elf_add (state
, "", d
, NULL
, 0, base_address
, opd
,
6962 error_callback
, data
, fileline_fn
, found_sym
,
6963 found_dwarf
, NULL
, 0, 1, NULL
, 0);
6965 backtrace_close (d
, error_callback
, data
);
6966 else if (descriptor
>= 0)
6967 backtrace_close (descriptor
, error_callback
, data
);
6972 if (buildid_view_valid
)
6974 elf_release_view (state
, &buildid_view
, error_callback
, data
);
6975 buildid_view_valid
= 0;
6978 if (debuglink_name
!= NULL
)
6982 d
= elf_open_debugfile_by_debuglink (state
, filename
, debuglink_name
,
6983 debuglink_crc
, error_callback
,
6989 elf_release_view (state
, &debuglink_view
, error_callback
, data
);
6990 if (debugaltlink_view_valid
)
6991 elf_release_view (state
, &debugaltlink_view
, error_callback
, data
);
6992 ret
= elf_add (state
, "", d
, NULL
, 0, base_address
, opd
,
6993 error_callback
, data
, fileline_fn
, found_sym
,
6994 found_dwarf
, NULL
, 0, 1, NULL
, 0);
6996 backtrace_close (d
, error_callback
, data
);
6997 else if (descriptor
>= 0)
6998 backtrace_close(descriptor
, error_callback
, data
);
7003 if (debuglink_view_valid
)
7005 elf_release_view (state
, &debuglink_view
, error_callback
, data
);
7006 debuglink_view_valid
= 0;
7009 struct dwarf_data
*fileline_altlink
= NULL
;
7010 if (debugaltlink_name
!= NULL
)
7014 d
= elf_open_debugfile_by_debuglink (state
, filename
, debugaltlink_name
,
7015 0, error_callback
, data
);
7020 ret
= elf_add (state
, filename
, d
, NULL
, 0, base_address
, opd
,
7021 error_callback
, data
, fileline_fn
, found_sym
,
7022 found_dwarf
, &fileline_altlink
, 0, 1,
7023 debugaltlink_buildid_data
, debugaltlink_buildid_size
);
7024 elf_release_view (state
, &debugaltlink_view
, error_callback
, data
);
7025 debugaltlink_view_valid
= 0;
7028 backtrace_close (d
, error_callback
, data
);
7034 if (debugaltlink_view_valid
)
7036 elf_release_view (state
, &debugaltlink_view
, error_callback
, data
);
7037 debugaltlink_view_valid
= 0;
7040 if (gnu_debugdata_view_valid
)
7044 ret
= elf_uncompress_lzma (state
,
7045 ((const unsigned char *)
7046 gnu_debugdata_view
.view
.data
),
7047 gnu_debugdata_size
, error_callback
, data
,
7048 &gnu_debugdata_uncompressed
,
7049 &gnu_debugdata_uncompressed_size
);
7051 elf_release_view (state
, &gnu_debugdata_view
, error_callback
, data
);
7052 gnu_debugdata_view_valid
= 0;
7056 ret
= elf_add (state
, filename
, -1, gnu_debugdata_uncompressed
,
7057 gnu_debugdata_uncompressed_size
, base_address
, opd
,
7058 error_callback
, data
, fileline_fn
, found_sym
,
7059 found_dwarf
, NULL
, 0, 0, NULL
, 0);
7060 if (ret
>= 0 && descriptor
>= 0)
7061 backtrace_close(descriptor
, error_callback
, data
);
7068 elf_release_view (state
, &opd
->view
, error_callback
, data
);
7073 /* Read all the debug sections in a single view, since they are
7074 probably adjacent in the file. If any of sections are
7075 uncompressed, we never release this view. */
7080 for (i
= 0; i
< (int) DEBUG_MAX
; ++i
)
7084 if (sections
[i
].size
!= 0)
7086 if (min_offset
== 0 || sections
[i
].offset
< min_offset
)
7087 min_offset
= sections
[i
].offset
;
7088 end
= sections
[i
].offset
+ sections
[i
].size
;
7089 if (end
> max_offset
)
7091 debug_size
+= sections
[i
].size
;
7093 if (zsections
[i
].size
!= 0)
7095 if (min_offset
== 0 || zsections
[i
].offset
< min_offset
)
7096 min_offset
= zsections
[i
].offset
;
7097 end
= zsections
[i
].offset
+ zsections
[i
].size
;
7098 if (end
> max_offset
)
7100 debug_size
+= zsections
[i
].size
;
7103 if (min_offset
== 0 || max_offset
== 0)
7105 if (descriptor
>= 0)
7107 if (!backtrace_close (descriptor
, error_callback
, data
))
7113 /* If the total debug section size is large, assume that there are
7114 gaps between the sections, and read them individually. */
7116 if (max_offset
- min_offset
< 0x20000000
7117 || max_offset
- min_offset
< debug_size
+ 0x10000)
7119 if (!elf_get_view (state
, descriptor
, memory
, memory_size
, min_offset
,
7120 max_offset
- min_offset
, error_callback
, data
,
7123 debug_view_valid
= 1;
7127 memset (&split_debug_view
[0], 0, sizeof split_debug_view
);
7128 for (i
= 0; i
< (int) DEBUG_MAX
; ++i
)
7130 struct debug_section_info
*dsec
;
7132 if (sections
[i
].size
!= 0)
7133 dsec
= §ions
[i
];
7134 else if (zsections
[i
].size
!= 0)
7135 dsec
= &zsections
[i
];
7139 if (!elf_get_view (state
, descriptor
, memory
, memory_size
,
7140 dsec
->offset
, dsec
->size
, error_callback
, data
,
7141 &split_debug_view
[i
]))
7143 split_debug_view_valid
[i
] = 1;
7145 if (sections
[i
].size
!= 0)
7146 sections
[i
].data
= ((const unsigned char *)
7147 split_debug_view
[i
].view
.data
);
7149 zsections
[i
].data
= ((const unsigned char *)
7150 split_debug_view
[i
].view
.data
);
7154 /* We've read all we need from the executable. */
7155 if (descriptor
>= 0)
7157 if (!backtrace_close (descriptor
, error_callback
, data
))
7162 using_debug_view
= 0;
7163 if (debug_view_valid
)
7165 for (i
= 0; i
< (int) DEBUG_MAX
; ++i
)
7167 if (sections
[i
].size
== 0)
7168 sections
[i
].data
= NULL
;
7171 sections
[i
].data
= ((const unsigned char *) debug_view
.view
.data
7172 + (sections
[i
].offset
- min_offset
));
7176 if (zsections
[i
].size
== 0)
7177 zsections
[i
].data
= NULL
;
7179 zsections
[i
].data
= ((const unsigned char *) debug_view
.view
.data
7180 + (zsections
[i
].offset
- min_offset
));
7184 /* Uncompress the old format (--compress-debug-sections=zlib-gnu). */
7186 zdebug_table
= NULL
;
7187 for (i
= 0; i
< (int) DEBUG_MAX
; ++i
)
7189 if (sections
[i
].size
== 0 && zsections
[i
].size
> 0)
7191 unsigned char *uncompressed_data
;
7192 size_t uncompressed_size
;
7194 if (zdebug_table
== NULL
)
7196 zdebug_table
= ((uint16_t *)
7197 backtrace_alloc (state
, ZLIB_TABLE_SIZE
,
7198 error_callback
, data
));
7199 if (zdebug_table
== NULL
)
7203 uncompressed_data
= NULL
;
7204 uncompressed_size
= 0;
7205 if (!elf_uncompress_zdebug (state
, zsections
[i
].data
,
7206 zsections
[i
].size
, zdebug_table
,
7207 error_callback
, data
,
7208 &uncompressed_data
, &uncompressed_size
))
7210 sections
[i
].data
= uncompressed_data
;
7211 sections
[i
].size
= uncompressed_size
;
7212 sections
[i
].compressed
= 0;
7214 if (split_debug_view_valid
[i
])
7216 elf_release_view (state
, &split_debug_view
[i
],
7217 error_callback
, data
);
7218 split_debug_view_valid
[i
] = 0;
7223 if (zdebug_table
!= NULL
)
7225 backtrace_free (state
, zdebug_table
, ZLIB_TABLE_SIZE
,
7226 error_callback
, data
);
7227 zdebug_table
= NULL
;
7230 /* Uncompress the official ELF format
7231 (--compress-debug-sections=zlib-gabi, --compress-debug-sections=zstd). */
7232 for (i
= 0; i
< (int) DEBUG_MAX
; ++i
)
7234 unsigned char *uncompressed_data
;
7235 size_t uncompressed_size
;
7237 if (sections
[i
].size
== 0 || !sections
[i
].compressed
)
7240 if (zdebug_table
== NULL
)
7242 zdebug_table
= ((uint16_t *)
7243 backtrace_alloc (state
, ZDEBUG_TABLE_SIZE
,
7244 error_callback
, data
));
7245 if (zdebug_table
== NULL
)
7249 uncompressed_data
= NULL
;
7250 uncompressed_size
= 0;
7251 if (!elf_uncompress_chdr (state
, sections
[i
].data
, sections
[i
].size
,
7252 zdebug_table
, error_callback
, data
,
7253 &uncompressed_data
, &uncompressed_size
))
7255 sections
[i
].data
= uncompressed_data
;
7256 sections
[i
].size
= uncompressed_size
;
7257 sections
[i
].compressed
= 0;
7259 if (debug_view_valid
)
7261 else if (split_debug_view_valid
[i
])
7263 elf_release_view (state
, &split_debug_view
[i
], error_callback
, data
);
7264 split_debug_view_valid
[i
] = 0;
7268 if (zdebug_table
!= NULL
)
7269 backtrace_free (state
, zdebug_table
, ZDEBUG_TABLE_SIZE
,
7270 error_callback
, data
);
7272 if (debug_view_valid
&& using_debug_view
== 0)
7274 elf_release_view (state
, &debug_view
, error_callback
, data
);
7275 debug_view_valid
= 0;
7278 for (i
= 0; i
< (int) DEBUG_MAX
; ++i
)
7280 dwarf_sections
.data
[i
] = sections
[i
].data
;
7281 dwarf_sections
.size
[i
] = sections
[i
].size
;
7284 if (!backtrace_dwarf_add (state
, base_address
, &dwarf_sections
,
7285 ehdr
.e_ident
[EI_DATA
] == ELFDATA2MSB
,
7287 error_callback
, data
, fileline_fn
,
7296 if (shdrs_view_valid
)
7297 elf_release_view (state
, &shdrs_view
, error_callback
, data
);
7298 if (names_view_valid
)
7299 elf_release_view (state
, &names_view
, error_callback
, data
);
7300 if (symtab_view_valid
)
7301 elf_release_view (state
, &symtab_view
, error_callback
, data
);
7302 if (strtab_view_valid
)
7303 elf_release_view (state
, &strtab_view
, error_callback
, data
);
7304 if (debuglink_view_valid
)
7305 elf_release_view (state
, &debuglink_view
, error_callback
, data
);
7306 if (debugaltlink_view_valid
)
7307 elf_release_view (state
, &debugaltlink_view
, error_callback
, data
);
7308 if (gnu_debugdata_view_valid
)
7309 elf_release_view (state
, &gnu_debugdata_view
, error_callback
, data
);
7310 if (buildid_view_valid
)
7311 elf_release_view (state
, &buildid_view
, error_callback
, data
);
7312 if (debug_view_valid
)
7313 elf_release_view (state
, &debug_view
, error_callback
, data
);
7314 for (i
= 0; i
< (int) DEBUG_MAX
; ++i
)
7316 if (split_debug_view_valid
[i
])
7317 elf_release_view (state
, &split_debug_view
[i
], error_callback
, data
);
7320 elf_release_view (state
, &opd
->view
, error_callback
, data
);
7321 if (descriptor
>= 0)
7322 backtrace_close (descriptor
, error_callback
, data
);
7326 /* Data passed to phdr_callback. */
7330 struct backtrace_state
*state
;
7331 backtrace_error_callback error_callback
;
7333 fileline
*fileline_fn
;
7336 const char *exe_filename
;
7340 /* Callback passed to dl_iterate_phdr. Load debug info from shared
7345 __attribute__ ((__force_align_arg_pointer__
))
7347 phdr_callback (struct dl_phdr_info
*info
, size_t size ATTRIBUTE_UNUSED
,
7350 struct phdr_data
*pd
= (struct phdr_data
*) pdata
;
7351 const char *filename
;
7354 struct libbacktrace_base_address base_address
;
7355 fileline elf_fileline_fn
;
7358 /* There is not much we can do if we don't have the module name,
7359 unless executable is ET_DYN, where we expect the very first
7360 phdr_callback to be for the PIE. */
7361 if (info
->dlpi_name
== NULL
|| info
->dlpi_name
[0] == '\0')
7363 if (pd
->exe_descriptor
== -1)
7365 filename
= pd
->exe_filename
;
7366 descriptor
= pd
->exe_descriptor
;
7367 pd
->exe_descriptor
= -1;
7371 if (pd
->exe_descriptor
!= -1)
7373 backtrace_close (pd
->exe_descriptor
, pd
->error_callback
, pd
->data
);
7374 pd
->exe_descriptor
= -1;
7377 filename
= info
->dlpi_name
;
7378 descriptor
= backtrace_open (info
->dlpi_name
, pd
->error_callback
,
7379 pd
->data
, &does_not_exist
);
7384 base_address
.m
= info
->dlpi_addr
;
7385 if (elf_add (pd
->state
, filename
, descriptor
, NULL
, 0, base_address
, NULL
,
7386 pd
->error_callback
, pd
->data
, &elf_fileline_fn
, pd
->found_sym
,
7387 &found_dwarf
, NULL
, 0, 0, NULL
, 0))
7391 *pd
->found_dwarf
= 1;
7392 *pd
->fileline_fn
= elf_fileline_fn
;
7399 /* Initialize the backtrace data we need from an ELF executable. At
7400 the ELF level, all we need to do is find the debug info
7404 backtrace_initialize (struct backtrace_state
*state
, const char *filename
,
7405 int descriptor
, backtrace_error_callback error_callback
,
7406 void *data
, fileline
*fileline_fn
)
7411 fileline elf_fileline_fn
= elf_nodebug
;
7412 struct phdr_data pd
;
7414 /* When using fdpic we must use dl_iterate_phdr for all modules, including
7415 the main executable, so that we can get the right base address
7417 if (!libbacktrace_using_fdpic ())
7419 struct libbacktrace_base_address zero_base_address
;
7421 memset (&zero_base_address
, 0, sizeof zero_base_address
);
7422 ret
= elf_add (state
, filename
, descriptor
, NULL
, 0, zero_base_address
,
7423 NULL
, error_callback
, data
, &elf_fileline_fn
, &found_sym
,
7424 &found_dwarf
, NULL
, 1, 0, NULL
, 0);
7430 pd
.error_callback
= error_callback
;
7432 pd
.fileline_fn
= &elf_fileline_fn
;
7433 pd
.found_sym
= &found_sym
;
7434 pd
.found_dwarf
= &found_dwarf
;
7435 pd
.exe_filename
= filename
;
7436 pd
.exe_descriptor
= ret
< 0 ? descriptor
: -1;
7438 dl_iterate_phdr (phdr_callback
, (void *) &pd
);
7440 if (!state
->threaded
)
7443 state
->syminfo_fn
= elf_syminfo
;
7444 else if (state
->syminfo_fn
== NULL
)
7445 state
->syminfo_fn
= elf_nosyms
;
7450 backtrace_atomic_store_pointer (&state
->syminfo_fn
, elf_syminfo
);
7452 (void) __sync_bool_compare_and_swap (&state
->syminfo_fn
, NULL
,
7456 if (!state
->threaded
)
7457 *fileline_fn
= state
->fileline_fn
;
7459 *fileline_fn
= backtrace_atomic_load_pointer (&state
->fileline_fn
);
7461 if (*fileline_fn
== NULL
|| *fileline_fn
== elf_nodebug
)
7462 *fileline_fn
= elf_fileline_fn
;