]> git.ipfire.org Git - thirdparty/gcc.git/blob - libbacktrace/elf.c
Daily bump.
[thirdparty/gcc.git] / libbacktrace / elf.c
1 /* elf.c -- Get debug data from an ELF file for backtraces.
2 Copyright (C) 2012-2024 Free Software Foundation, Inc.
3 Written by Ian Lance Taylor, Google.
4
5 Redistribution and use in source and binary forms, with or without
6 modification, are permitted provided that the following conditions are
7 met:
8
9 (1) Redistributions of source code must retain the above copyright
10 notice, this list of conditions and the following disclaimer.
11
12 (2) Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in
14 the documentation and/or other materials provided with the
15 distribution.
16
17 (3) The name of the author may not be used to
18 endorse or promote products derived from this software without
19 specific prior written permission.
20
21 THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
22 IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
23 WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
24 DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
25 INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
26 (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27 SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
29 STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
30 IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 POSSIBILITY OF SUCH DAMAGE. */
32
33 #include "config.h"
34
35 #include <errno.h>
36 #include <stdlib.h>
37 #include <string.h>
38 #include <sys/types.h>
39 #include <sys/stat.h>
40 #include <unistd.h>
41
42 #ifdef HAVE_DL_ITERATE_PHDR
43 #ifdef HAVE_LINK_H
44 #include <link.h>
45 #endif
46 #ifdef HAVE_SYS_LINK_H
47 #include <sys/link.h>
48 #endif
49 #endif
50
51 #include "backtrace.h"
52 #include "internal.h"
53
54 #ifndef S_ISLNK
55 #ifndef S_IFLNK
56 #define S_IFLNK 0120000
57 #endif
58 #ifndef S_IFMT
59 #define S_IFMT 0170000
60 #endif
61 #define S_ISLNK(m) (((m) & S_IFMT) == S_IFLNK)
62 #endif
63
64 #ifndef __GNUC__
65 #define __builtin_prefetch(p, r, l)
66 #define unlikely(x) (x)
67 #else
68 #define unlikely(x) __builtin_expect(!!(x), 0)
69 #endif
70
71 #if !defined(HAVE_DECL_STRNLEN) || !HAVE_DECL_STRNLEN
72
73 /* If strnlen is not declared, provide our own version. */
74
75 static size_t
76 xstrnlen (const char *s, size_t maxlen)
77 {
78 size_t i;
79
80 for (i = 0; i < maxlen; ++i)
81 if (s[i] == '\0')
82 break;
83 return i;
84 }
85
86 #define strnlen xstrnlen
87
88 #endif
89
90 #ifndef HAVE_LSTAT
91
92 /* Dummy version of lstat for systems that don't have it. */
93
94 static int
95 xlstat (const char *path ATTRIBUTE_UNUSED, struct stat *st ATTRIBUTE_UNUSED)
96 {
97 return -1;
98 }
99
100 #define lstat xlstat
101
102 #endif
103
104 #ifndef HAVE_READLINK
105
106 /* Dummy version of readlink for systems that don't have it. */
107
108 static ssize_t
109 xreadlink (const char *path ATTRIBUTE_UNUSED, char *buf ATTRIBUTE_UNUSED,
110 size_t bufsz ATTRIBUTE_UNUSED)
111 {
112 return -1;
113 }
114
115 #define readlink xreadlink
116
117 #endif
118
119 #ifndef HAVE_DL_ITERATE_PHDR
120
121 /* Dummy version of dl_iterate_phdr for systems that don't have it. */
122
123 #define dl_phdr_info x_dl_phdr_info
124 #define dl_iterate_phdr x_dl_iterate_phdr
125
126 struct dl_phdr_info
127 {
128 uintptr_t dlpi_addr;
129 const char *dlpi_name;
130 };
131
132 static int
133 dl_iterate_phdr (int (*callback) (struct dl_phdr_info *,
134 size_t, void *) ATTRIBUTE_UNUSED,
135 void *data ATTRIBUTE_UNUSED)
136 {
137 return 0;
138 }
139
140 #endif /* ! defined (HAVE_DL_ITERATE_PHDR) */
141
142 /* The configure script must tell us whether we are 32-bit or 64-bit
143 ELF. We could make this code test and support either possibility,
144 but there is no point. This code only works for the currently
145 running executable, which means that we know the ELF mode at
146 configure time. */
147
148 #if BACKTRACE_ELF_SIZE != 32 && BACKTRACE_ELF_SIZE != 64
149 #error "Unknown BACKTRACE_ELF_SIZE"
150 #endif
151
152 /* <link.h> might #include <elf.h> which might define our constants
153 with slightly different values. Undefine them to be safe. */
154
155 #undef EI_NIDENT
156 #undef EI_MAG0
157 #undef EI_MAG1
158 #undef EI_MAG2
159 #undef EI_MAG3
160 #undef EI_CLASS
161 #undef EI_DATA
162 #undef EI_VERSION
163 #undef ELF_MAG0
164 #undef ELF_MAG1
165 #undef ELF_MAG2
166 #undef ELF_MAG3
167 #undef ELFCLASS32
168 #undef ELFCLASS64
169 #undef ELFDATA2LSB
170 #undef ELFDATA2MSB
171 #undef EV_CURRENT
172 #undef ET_DYN
173 #undef EM_PPC64
174 #undef EF_PPC64_ABI
175 #undef SHN_LORESERVE
176 #undef SHN_XINDEX
177 #undef SHN_UNDEF
178 #undef SHT_PROGBITS
179 #undef SHT_SYMTAB
180 #undef SHT_STRTAB
181 #undef SHT_DYNSYM
182 #undef SHF_COMPRESSED
183 #undef STT_OBJECT
184 #undef STT_FUNC
185 #undef NT_GNU_BUILD_ID
186 #undef ELFCOMPRESS_ZLIB
187 #undef ELFCOMPRESS_ZSTD
188
189 /* Basic types. */
190
191 typedef uint16_t b_elf_half; /* Elf_Half. */
192 typedef uint32_t b_elf_word; /* Elf_Word. */
193 typedef int32_t b_elf_sword; /* Elf_Sword. */
194
195 #if BACKTRACE_ELF_SIZE == 32
196
197 typedef uint32_t b_elf_addr; /* Elf_Addr. */
198 typedef uint32_t b_elf_off; /* Elf_Off. */
199
200 typedef uint32_t b_elf_wxword; /* 32-bit Elf_Word, 64-bit ELF_Xword. */
201
202 #else
203
204 typedef uint64_t b_elf_addr; /* Elf_Addr. */
205 typedef uint64_t b_elf_off; /* Elf_Off. */
206 typedef uint64_t b_elf_xword; /* Elf_Xword. */
207 typedef int64_t b_elf_sxword; /* Elf_Sxword. */
208
209 typedef uint64_t b_elf_wxword; /* 32-bit Elf_Word, 64-bit ELF_Xword. */
210
211 #endif
212
213 /* Data structures and associated constants. */
214
215 #define EI_NIDENT 16
216
217 typedef struct {
218 unsigned char e_ident[EI_NIDENT]; /* ELF "magic number" */
219 b_elf_half e_type; /* Identifies object file type */
220 b_elf_half e_machine; /* Specifies required architecture */
221 b_elf_word e_version; /* Identifies object file version */
222 b_elf_addr e_entry; /* Entry point virtual address */
223 b_elf_off e_phoff; /* Program header table file offset */
224 b_elf_off e_shoff; /* Section header table file offset */
225 b_elf_word e_flags; /* Processor-specific flags */
226 b_elf_half e_ehsize; /* ELF header size in bytes */
227 b_elf_half e_phentsize; /* Program header table entry size */
228 b_elf_half e_phnum; /* Program header table entry count */
229 b_elf_half e_shentsize; /* Section header table entry size */
230 b_elf_half e_shnum; /* Section header table entry count */
231 b_elf_half e_shstrndx; /* Section header string table index */
232 } b_elf_ehdr; /* Elf_Ehdr. */
233
234 #define EI_MAG0 0
235 #define EI_MAG1 1
236 #define EI_MAG2 2
237 #define EI_MAG3 3
238 #define EI_CLASS 4
239 #define EI_DATA 5
240 #define EI_VERSION 6
241
242 #define ELFMAG0 0x7f
243 #define ELFMAG1 'E'
244 #define ELFMAG2 'L'
245 #define ELFMAG3 'F'
246
247 #define ELFCLASS32 1
248 #define ELFCLASS64 2
249
250 #define ELFDATA2LSB 1
251 #define ELFDATA2MSB 2
252
253 #define EV_CURRENT 1
254
255 #define ET_DYN 3
256
257 #define EM_PPC64 21
258 #define EF_PPC64_ABI 3
259
260 typedef struct {
261 b_elf_word sh_name; /* Section name, index in string tbl */
262 b_elf_word sh_type; /* Type of section */
263 b_elf_wxword sh_flags; /* Miscellaneous section attributes */
264 b_elf_addr sh_addr; /* Section virtual addr at execution */
265 b_elf_off sh_offset; /* Section file offset */
266 b_elf_wxword sh_size; /* Size of section in bytes */
267 b_elf_word sh_link; /* Index of another section */
268 b_elf_word sh_info; /* Additional section information */
269 b_elf_wxword sh_addralign; /* Section alignment */
270 b_elf_wxword sh_entsize; /* Entry size if section holds table */
271 } b_elf_shdr; /* Elf_Shdr. */
272
273 #define SHN_UNDEF 0x0000 /* Undefined section */
274 #define SHN_LORESERVE 0xFF00 /* Begin range of reserved indices */
275 #define SHN_XINDEX 0xFFFF /* Section index is held elsewhere */
276
277 #define SHT_PROGBITS 1
278 #define SHT_SYMTAB 2
279 #define SHT_STRTAB 3
280 #define SHT_DYNSYM 11
281
282 #define SHF_COMPRESSED 0x800
283
284 #if BACKTRACE_ELF_SIZE == 32
285
286 typedef struct
287 {
288 b_elf_word st_name; /* Symbol name, index in string tbl */
289 b_elf_addr st_value; /* Symbol value */
290 b_elf_word st_size; /* Symbol size */
291 unsigned char st_info; /* Symbol binding and type */
292 unsigned char st_other; /* Visibility and other data */
293 b_elf_half st_shndx; /* Symbol section index */
294 } b_elf_sym; /* Elf_Sym. */
295
296 #else /* BACKTRACE_ELF_SIZE != 32 */
297
298 typedef struct
299 {
300 b_elf_word st_name; /* Symbol name, index in string tbl */
301 unsigned char st_info; /* Symbol binding and type */
302 unsigned char st_other; /* Visibility and other data */
303 b_elf_half st_shndx; /* Symbol section index */
304 b_elf_addr st_value; /* Symbol value */
305 b_elf_xword st_size; /* Symbol size */
306 } b_elf_sym; /* Elf_Sym. */
307
308 #endif /* BACKTRACE_ELF_SIZE != 32 */
309
310 #define STT_OBJECT 1
311 #define STT_FUNC 2
312
313 typedef struct
314 {
315 uint32_t namesz;
316 uint32_t descsz;
317 uint32_t type;
318 char name[1];
319 } b_elf_note;
320
321 #define NT_GNU_BUILD_ID 3
322
323 #if BACKTRACE_ELF_SIZE == 32
324
325 typedef struct
326 {
327 b_elf_word ch_type; /* Compresstion algorithm */
328 b_elf_word ch_size; /* Uncompressed size */
329 b_elf_word ch_addralign; /* Alignment for uncompressed data */
330 } b_elf_chdr; /* Elf_Chdr */
331
332 #else /* BACKTRACE_ELF_SIZE != 32 */
333
334 typedef struct
335 {
336 b_elf_word ch_type; /* Compression algorithm */
337 b_elf_word ch_reserved; /* Reserved */
338 b_elf_xword ch_size; /* Uncompressed size */
339 b_elf_xword ch_addralign; /* Alignment for uncompressed data */
340 } b_elf_chdr; /* Elf_Chdr */
341
342 #endif /* BACKTRACE_ELF_SIZE != 32 */
343
344 #define ELFCOMPRESS_ZLIB 1
345 #define ELFCOMPRESS_ZSTD 2
346
347 /* Names of sections, indexed by enum dwarf_section in internal.h. */
348
349 static const char * const dwarf_section_names[DEBUG_MAX] =
350 {
351 ".debug_info",
352 ".debug_line",
353 ".debug_abbrev",
354 ".debug_ranges",
355 ".debug_str",
356 ".debug_addr",
357 ".debug_str_offsets",
358 ".debug_line_str",
359 ".debug_rnglists"
360 };
361
362 /* Information we gather for the sections we care about. */
363
364 struct debug_section_info
365 {
366 /* Section file offset. */
367 off_t offset;
368 /* Section size. */
369 size_t size;
370 /* Section contents, after read from file. */
371 const unsigned char *data;
372 /* Whether the SHF_COMPRESSED flag is set for the section. */
373 int compressed;
374 };
375
376 /* Information we keep for an ELF symbol. */
377
378 struct elf_symbol
379 {
380 /* The name of the symbol. */
381 const char *name;
382 /* The address of the symbol. */
383 uintptr_t address;
384 /* The size of the symbol. */
385 size_t size;
386 };
387
388 /* Information to pass to elf_syminfo. */
389
390 struct elf_syminfo_data
391 {
392 /* Symbols for the next module. */
393 struct elf_syminfo_data *next;
394 /* The ELF symbols, sorted by address. */
395 struct elf_symbol *symbols;
396 /* The number of symbols. */
397 size_t count;
398 };
399
400 /* A view that works for either a file or memory. */
401
402 struct elf_view
403 {
404 struct backtrace_view view;
405 int release; /* If non-zero, must call backtrace_release_view. */
406 };
407
408 /* Information about PowerPC64 ELFv1 .opd section. */
409
410 struct elf_ppc64_opd_data
411 {
412 /* Address of the .opd section. */
413 b_elf_addr addr;
414 /* Section data. */
415 const char *data;
416 /* Size of the .opd section. */
417 size_t size;
418 /* Corresponding section view. */
419 struct elf_view view;
420 };
421
422 /* Create a view of SIZE bytes from DESCRIPTOR/MEMORY at OFFSET. */
423
424 static int
425 elf_get_view (struct backtrace_state *state, int descriptor,
426 const unsigned char *memory, size_t memory_size, off_t offset,
427 uint64_t size, backtrace_error_callback error_callback,
428 void *data, struct elf_view *view)
429 {
430 if (memory == NULL)
431 {
432 view->release = 1;
433 return backtrace_get_view (state, descriptor, offset, size,
434 error_callback, data, &view->view);
435 }
436 else
437 {
438 if ((uint64_t) offset + size > (uint64_t) memory_size)
439 {
440 error_callback (data, "out of range for in-memory file", 0);
441 return 0;
442 }
443 view->view.data = (const void *) (memory + offset);
444 view->view.base = NULL;
445 view->view.len = size;
446 view->release = 0;
447 return 1;
448 }
449 }
450
451 /* Release a view read by elf_get_view. */
452
453 static void
454 elf_release_view (struct backtrace_state *state, struct elf_view *view,
455 backtrace_error_callback error_callback, void *data)
456 {
457 if (view->release)
458 backtrace_release_view (state, &view->view, error_callback, data);
459 }
460
461 /* Compute the CRC-32 of BUF/LEN. This uses the CRC used for
462 .gnu_debuglink files. */
463
464 static uint32_t
465 elf_crc32 (uint32_t crc, const unsigned char *buf, size_t len)
466 {
467 static const uint32_t crc32_table[256] =
468 {
469 0x00000000, 0x77073096, 0xee0e612c, 0x990951ba, 0x076dc419,
470 0x706af48f, 0xe963a535, 0x9e6495a3, 0x0edb8832, 0x79dcb8a4,
471 0xe0d5e91e, 0x97d2d988, 0x09b64c2b, 0x7eb17cbd, 0xe7b82d07,
472 0x90bf1d91, 0x1db71064, 0x6ab020f2, 0xf3b97148, 0x84be41de,
473 0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7, 0x136c9856,
474 0x646ba8c0, 0xfd62f97a, 0x8a65c9ec, 0x14015c4f, 0x63066cd9,
475 0xfa0f3d63, 0x8d080df5, 0x3b6e20c8, 0x4c69105e, 0xd56041e4,
476 0xa2677172, 0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b,
477 0x35b5a8fa, 0x42b2986c, 0xdbbbc9d6, 0xacbcf940, 0x32d86ce3,
478 0x45df5c75, 0xdcd60dcf, 0xabd13d59, 0x26d930ac, 0x51de003a,
479 0xc8d75180, 0xbfd06116, 0x21b4f4b5, 0x56b3c423, 0xcfba9599,
480 0xb8bda50f, 0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924,
481 0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d, 0x76dc4190,
482 0x01db7106, 0x98d220bc, 0xefd5102a, 0x71b18589, 0x06b6b51f,
483 0x9fbfe4a5, 0xe8b8d433, 0x7807c9a2, 0x0f00f934, 0x9609a88e,
484 0xe10e9818, 0x7f6a0dbb, 0x086d3d2d, 0x91646c97, 0xe6635c01,
485 0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e, 0x6c0695ed,
486 0x1b01a57b, 0x8208f4c1, 0xf50fc457, 0x65b0d9c6, 0x12b7e950,
487 0x8bbeb8ea, 0xfcb9887c, 0x62dd1ddf, 0x15da2d49, 0x8cd37cf3,
488 0xfbd44c65, 0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2,
489 0x4adfa541, 0x3dd895d7, 0xa4d1c46d, 0xd3d6f4fb, 0x4369e96a,
490 0x346ed9fc, 0xad678846, 0xda60b8d0, 0x44042d73, 0x33031de5,
491 0xaa0a4c5f, 0xdd0d7cc9, 0x5005713c, 0x270241aa, 0xbe0b1010,
492 0xc90c2086, 0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f,
493 0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4, 0x59b33d17,
494 0x2eb40d81, 0xb7bd5c3b, 0xc0ba6cad, 0xedb88320, 0x9abfb3b6,
495 0x03b6e20c, 0x74b1d29a, 0xead54739, 0x9dd277af, 0x04db2615,
496 0x73dc1683, 0xe3630b12, 0x94643b84, 0x0d6d6a3e, 0x7a6a5aa8,
497 0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1, 0xf00f9344,
498 0x8708a3d2, 0x1e01f268, 0x6906c2fe, 0xf762575d, 0x806567cb,
499 0x196c3671, 0x6e6b06e7, 0xfed41b76, 0x89d32be0, 0x10da7a5a,
500 0x67dd4acc, 0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5,
501 0xd6d6a3e8, 0xa1d1937e, 0x38d8c2c4, 0x4fdff252, 0xd1bb67f1,
502 0xa6bc5767, 0x3fb506dd, 0x48b2364b, 0xd80d2bda, 0xaf0a1b4c,
503 0x36034af6, 0x41047a60, 0xdf60efc3, 0xa867df55, 0x316e8eef,
504 0x4669be79, 0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236,
505 0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f, 0xc5ba3bbe,
506 0xb2bd0b28, 0x2bb45a92, 0x5cb36a04, 0xc2d7ffa7, 0xb5d0cf31,
507 0x2cd99e8b, 0x5bdeae1d, 0x9b64c2b0, 0xec63f226, 0x756aa39c,
508 0x026d930a, 0x9c0906a9, 0xeb0e363f, 0x72076785, 0x05005713,
509 0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38, 0x92d28e9b,
510 0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21, 0x86d3d2d4, 0xf1d4e242,
511 0x68ddb3f8, 0x1fda836e, 0x81be16cd, 0xf6b9265b, 0x6fb077e1,
512 0x18b74777, 0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c,
513 0x8f659eff, 0xf862ae69, 0x616bffd3, 0x166ccf45, 0xa00ae278,
514 0xd70dd2ee, 0x4e048354, 0x3903b3c2, 0xa7672661, 0xd06016f7,
515 0x4969474d, 0x3e6e77db, 0xaed16a4a, 0xd9d65adc, 0x40df0b66,
516 0x37d83bf0, 0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9,
517 0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6, 0xbad03605,
518 0xcdd70693, 0x54de5729, 0x23d967bf, 0xb3667a2e, 0xc4614ab8,
519 0x5d681b02, 0x2a6f2b94, 0xb40bbe37, 0xc30c8ea1, 0x5a05df1b,
520 0x2d02ef8d
521 };
522 const unsigned char *end;
523
524 crc = ~crc;
525 for (end = buf + len; buf < end; ++ buf)
526 crc = crc32_table[(crc ^ *buf) & 0xff] ^ (crc >> 8);
527 return ~crc;
528 }
529
530 /* Return the CRC-32 of the entire file open at DESCRIPTOR. */
531
532 static uint32_t
533 elf_crc32_file (struct backtrace_state *state, int descriptor,
534 backtrace_error_callback error_callback, void *data)
535 {
536 struct stat st;
537 struct backtrace_view file_view;
538 uint32_t ret;
539
540 if (fstat (descriptor, &st) < 0)
541 {
542 error_callback (data, "fstat", errno);
543 return 0;
544 }
545
546 if (!backtrace_get_view (state, descriptor, 0, st.st_size, error_callback,
547 data, &file_view))
548 return 0;
549
550 ret = elf_crc32 (0, (const unsigned char *) file_view.data, st.st_size);
551
552 backtrace_release_view (state, &file_view, error_callback, data);
553
554 return ret;
555 }
556
557 /* A dummy callback function used when we can't find a symbol
558 table. */
559
560 static void
561 elf_nosyms (struct backtrace_state *state ATTRIBUTE_UNUSED,
562 uintptr_t addr ATTRIBUTE_UNUSED,
563 backtrace_syminfo_callback callback ATTRIBUTE_UNUSED,
564 backtrace_error_callback error_callback, void *data)
565 {
566 error_callback (data, "no symbol table in ELF executable", -1);
567 }
568
569 /* A callback function used when we can't find any debug info. */
570
571 static int
572 elf_nodebug (struct backtrace_state *state, uintptr_t pc,
573 backtrace_full_callback callback,
574 backtrace_error_callback error_callback, void *data)
575 {
576 if (state->syminfo_fn != NULL && state->syminfo_fn != elf_nosyms)
577 {
578 struct backtrace_call_full bdata;
579
580 /* Fetch symbol information so that we can least get the
581 function name. */
582
583 bdata.full_callback = callback;
584 bdata.full_error_callback = error_callback;
585 bdata.full_data = data;
586 bdata.ret = 0;
587 state->syminfo_fn (state, pc, backtrace_syminfo_to_full_callback,
588 backtrace_syminfo_to_full_error_callback, &bdata);
589 return bdata.ret;
590 }
591
592 error_callback (data, "no debug info in ELF executable (make sure to compile with -g)", -1);
593 return 0;
594 }
595
596 /* Compare struct elf_symbol for qsort. */
597
598 static int
599 elf_symbol_compare (const void *v1, const void *v2)
600 {
601 const struct elf_symbol *e1 = (const struct elf_symbol *) v1;
602 const struct elf_symbol *e2 = (const struct elf_symbol *) v2;
603
604 if (e1->address < e2->address)
605 return -1;
606 else if (e1->address > e2->address)
607 return 1;
608 else
609 return 0;
610 }
611
612 /* Compare an ADDR against an elf_symbol for bsearch. We allocate one
613 extra entry in the array so that this can look safely at the next
614 entry. */
615
616 static int
617 elf_symbol_search (const void *vkey, const void *ventry)
618 {
619 const uintptr_t *key = (const uintptr_t *) vkey;
620 const struct elf_symbol *entry = (const struct elf_symbol *) ventry;
621 uintptr_t addr;
622
623 addr = *key;
624 if (addr < entry->address)
625 return -1;
626 else if (addr >= entry->address + entry->size)
627 return 1;
628 else
629 return 0;
630 }
631
632 /* Initialize the symbol table info for elf_syminfo. */
633
634 static int
635 elf_initialize_syminfo (struct backtrace_state *state,
636 struct libbacktrace_base_address base_address,
637 const unsigned char *symtab_data, size_t symtab_size,
638 const unsigned char *strtab, size_t strtab_size,
639 backtrace_error_callback error_callback,
640 void *data, struct elf_syminfo_data *sdata,
641 struct elf_ppc64_opd_data *opd)
642 {
643 size_t sym_count;
644 const b_elf_sym *sym;
645 size_t elf_symbol_count;
646 size_t elf_symbol_size;
647 struct elf_symbol *elf_symbols;
648 size_t i;
649 unsigned int j;
650
651 sym_count = symtab_size / sizeof (b_elf_sym);
652
653 /* We only care about function symbols. Count them. */
654 sym = (const b_elf_sym *) symtab_data;
655 elf_symbol_count = 0;
656 for (i = 0; i < sym_count; ++i, ++sym)
657 {
658 int info;
659
660 info = sym->st_info & 0xf;
661 if ((info == STT_FUNC || info == STT_OBJECT)
662 && sym->st_shndx != SHN_UNDEF)
663 ++elf_symbol_count;
664 }
665
666 elf_symbol_size = elf_symbol_count * sizeof (struct elf_symbol);
667 elf_symbols = ((struct elf_symbol *)
668 backtrace_alloc (state, elf_symbol_size, error_callback,
669 data));
670 if (elf_symbols == NULL)
671 return 0;
672
673 sym = (const b_elf_sym *) symtab_data;
674 j = 0;
675 for (i = 0; i < sym_count; ++i, ++sym)
676 {
677 int info;
678
679 info = sym->st_info & 0xf;
680 if (info != STT_FUNC && info != STT_OBJECT)
681 continue;
682 if (sym->st_shndx == SHN_UNDEF)
683 continue;
684 if (sym->st_name >= strtab_size)
685 {
686 error_callback (data, "symbol string index out of range", 0);
687 backtrace_free (state, elf_symbols, elf_symbol_size, error_callback,
688 data);
689 return 0;
690 }
691 elf_symbols[j].name = (const char *) strtab + sym->st_name;
692 /* Special case PowerPC64 ELFv1 symbols in .opd section, if the symbol
693 is a function descriptor, read the actual code address from the
694 descriptor. */
695 if (opd
696 && sym->st_value >= opd->addr
697 && sym->st_value < opd->addr + opd->size)
698 elf_symbols[j].address
699 = *(const b_elf_addr *) (opd->data + (sym->st_value - opd->addr));
700 else
701 elf_symbols[j].address = sym->st_value;
702 elf_symbols[j].address =
703 libbacktrace_add_base (elf_symbols[j].address, base_address);
704 elf_symbols[j].size = sym->st_size;
705 ++j;
706 }
707
708 backtrace_qsort (elf_symbols, elf_symbol_count, sizeof (struct elf_symbol),
709 elf_symbol_compare);
710
711 sdata->next = NULL;
712 sdata->symbols = elf_symbols;
713 sdata->count = elf_symbol_count;
714
715 return 1;
716 }
717
718 /* Add EDATA to the list in STATE. */
719
720 static void
721 elf_add_syminfo_data (struct backtrace_state *state,
722 struct elf_syminfo_data *edata)
723 {
724 if (!state->threaded)
725 {
726 struct elf_syminfo_data **pp;
727
728 for (pp = (struct elf_syminfo_data **) (void *) &state->syminfo_data;
729 *pp != NULL;
730 pp = &(*pp)->next)
731 ;
732 *pp = edata;
733 }
734 else
735 {
736 while (1)
737 {
738 struct elf_syminfo_data **pp;
739
740 pp = (struct elf_syminfo_data **) (void *) &state->syminfo_data;
741
742 while (1)
743 {
744 struct elf_syminfo_data *p;
745
746 p = backtrace_atomic_load_pointer (pp);
747
748 if (p == NULL)
749 break;
750
751 pp = &p->next;
752 }
753
754 if (__sync_bool_compare_and_swap (pp, NULL, edata))
755 break;
756 }
757 }
758 }
759
760 /* Return the symbol name and value for an ADDR. */
761
762 static void
763 elf_syminfo (struct backtrace_state *state, uintptr_t addr,
764 backtrace_syminfo_callback callback,
765 backtrace_error_callback error_callback ATTRIBUTE_UNUSED,
766 void *data)
767 {
768 struct elf_syminfo_data *edata;
769 struct elf_symbol *sym = NULL;
770
771 if (!state->threaded)
772 {
773 for (edata = (struct elf_syminfo_data *) state->syminfo_data;
774 edata != NULL;
775 edata = edata->next)
776 {
777 sym = ((struct elf_symbol *)
778 bsearch (&addr, edata->symbols, edata->count,
779 sizeof (struct elf_symbol), elf_symbol_search));
780 if (sym != NULL)
781 break;
782 }
783 }
784 else
785 {
786 struct elf_syminfo_data **pp;
787
788 pp = (struct elf_syminfo_data **) (void *) &state->syminfo_data;
789 while (1)
790 {
791 edata = backtrace_atomic_load_pointer (pp);
792 if (edata == NULL)
793 break;
794
795 sym = ((struct elf_symbol *)
796 bsearch (&addr, edata->symbols, edata->count,
797 sizeof (struct elf_symbol), elf_symbol_search));
798 if (sym != NULL)
799 break;
800
801 pp = &edata->next;
802 }
803 }
804
805 if (sym == NULL)
806 callback (data, addr, NULL, 0, 0);
807 else
808 callback (data, addr, sym->name, sym->address, sym->size);
809 }
810
811 /* Return whether FILENAME is a symlink. */
812
813 static int
814 elf_is_symlink (const char *filename)
815 {
816 struct stat st;
817
818 if (lstat (filename, &st) < 0)
819 return 0;
820 return S_ISLNK (st.st_mode);
821 }
822
823 /* Return the results of reading the symlink FILENAME in a buffer
824 allocated by backtrace_alloc. Return the length of the buffer in
825 *LEN. */
826
827 static char *
828 elf_readlink (struct backtrace_state *state, const char *filename,
829 backtrace_error_callback error_callback, void *data,
830 size_t *plen)
831 {
832 size_t len;
833 char *buf;
834
835 len = 128;
836 while (1)
837 {
838 ssize_t rl;
839
840 buf = backtrace_alloc (state, len, error_callback, data);
841 if (buf == NULL)
842 return NULL;
843 rl = readlink (filename, buf, len);
844 if (rl < 0)
845 {
846 backtrace_free (state, buf, len, error_callback, data);
847 return NULL;
848 }
849 if ((size_t) rl < len - 1)
850 {
851 buf[rl] = '\0';
852 *plen = len;
853 return buf;
854 }
855 backtrace_free (state, buf, len, error_callback, data);
856 len *= 2;
857 }
858 }
859
860 #define SYSTEM_BUILD_ID_DIR "/usr/lib/debug/.build-id/"
861
862 /* Open a separate debug info file, using the build ID to find it.
863 Returns an open file descriptor, or -1.
864
865 The GDB manual says that the only place gdb looks for a debug file
866 when the build ID is known is in /usr/lib/debug/.build-id. */
867
868 static int
869 elf_open_debugfile_by_buildid (struct backtrace_state *state,
870 const char *buildid_data, size_t buildid_size,
871 backtrace_error_callback error_callback,
872 void *data)
873 {
874 const char * const prefix = SYSTEM_BUILD_ID_DIR;
875 const size_t prefix_len = strlen (prefix);
876 const char * const suffix = ".debug";
877 const size_t suffix_len = strlen (suffix);
878 size_t len;
879 char *bd_filename;
880 char *t;
881 size_t i;
882 int ret;
883 int does_not_exist;
884
885 len = prefix_len + buildid_size * 2 + suffix_len + 2;
886 bd_filename = backtrace_alloc (state, len, error_callback, data);
887 if (bd_filename == NULL)
888 return -1;
889
890 t = bd_filename;
891 memcpy (t, prefix, prefix_len);
892 t += prefix_len;
893 for (i = 0; i < buildid_size; i++)
894 {
895 unsigned char b;
896 unsigned char nib;
897
898 b = (unsigned char) buildid_data[i];
899 nib = (b & 0xf0) >> 4;
900 *t++ = nib < 10 ? '0' + nib : 'a' + nib - 10;
901 nib = b & 0x0f;
902 *t++ = nib < 10 ? '0' + nib : 'a' + nib - 10;
903 if (i == 0)
904 *t++ = '/';
905 }
906 memcpy (t, suffix, suffix_len);
907 t[suffix_len] = '\0';
908
909 ret = backtrace_open (bd_filename, error_callback, data, &does_not_exist);
910
911 backtrace_free (state, bd_filename, len, error_callback, data);
912
913 /* gdb checks that the debuginfo file has the same build ID note.
914 That seems kind of pointless to me--why would it have the right
915 name but not the right build ID?--so skipping the check. */
916
917 return ret;
918 }
919
920 /* Try to open a file whose name is PREFIX (length PREFIX_LEN)
921 concatenated with PREFIX2 (length PREFIX2_LEN) concatenated with
922 DEBUGLINK_NAME. Returns an open file descriptor, or -1. */
923
924 static int
925 elf_try_debugfile (struct backtrace_state *state, const char *prefix,
926 size_t prefix_len, const char *prefix2, size_t prefix2_len,
927 const char *debuglink_name,
928 backtrace_error_callback error_callback, void *data)
929 {
930 size_t debuglink_len;
931 size_t try_len;
932 char *try;
933 int does_not_exist;
934 int ret;
935
936 debuglink_len = strlen (debuglink_name);
937 try_len = prefix_len + prefix2_len + debuglink_len + 1;
938 try = backtrace_alloc (state, try_len, error_callback, data);
939 if (try == NULL)
940 return -1;
941
942 memcpy (try, prefix, prefix_len);
943 memcpy (try + prefix_len, prefix2, prefix2_len);
944 memcpy (try + prefix_len + prefix2_len, debuglink_name, debuglink_len);
945 try[prefix_len + prefix2_len + debuglink_len] = '\0';
946
947 ret = backtrace_open (try, error_callback, data, &does_not_exist);
948
949 backtrace_free (state, try, try_len, error_callback, data);
950
951 return ret;
952 }
953
954 /* Find a separate debug info file, using the debuglink section data
955 to find it. Returns an open file descriptor, or -1. */
956
957 static int
958 elf_find_debugfile_by_debuglink (struct backtrace_state *state,
959 const char *filename,
960 const char *debuglink_name,
961 backtrace_error_callback error_callback,
962 void *data)
963 {
964 int ret;
965 char *alc;
966 size_t alc_len;
967 const char *slash;
968 int ddescriptor;
969 const char *prefix;
970 size_t prefix_len;
971
972 /* Resolve symlinks in FILENAME. Since FILENAME is fairly likely to
973 be /proc/self/exe, symlinks are common. We don't try to resolve
974 the whole path name, just the base name. */
975 ret = -1;
976 alc = NULL;
977 alc_len = 0;
978 while (elf_is_symlink (filename))
979 {
980 char *new_buf;
981 size_t new_len;
982
983 new_buf = elf_readlink (state, filename, error_callback, data, &new_len);
984 if (new_buf == NULL)
985 break;
986
987 if (new_buf[0] == '/')
988 filename = new_buf;
989 else
990 {
991 slash = strrchr (filename, '/');
992 if (slash == NULL)
993 filename = new_buf;
994 else
995 {
996 size_t clen;
997 char *c;
998
999 slash++;
1000 clen = slash - filename + strlen (new_buf) + 1;
1001 c = backtrace_alloc (state, clen, error_callback, data);
1002 if (c == NULL)
1003 goto done;
1004
1005 memcpy (c, filename, slash - filename);
1006 memcpy (c + (slash - filename), new_buf, strlen (new_buf));
1007 c[slash - filename + strlen (new_buf)] = '\0';
1008 backtrace_free (state, new_buf, new_len, error_callback, data);
1009 filename = c;
1010 new_buf = c;
1011 new_len = clen;
1012 }
1013 }
1014
1015 if (alc != NULL)
1016 backtrace_free (state, alc, alc_len, error_callback, data);
1017 alc = new_buf;
1018 alc_len = new_len;
1019 }
1020
1021 /* Look for DEBUGLINK_NAME in the same directory as FILENAME. */
1022
1023 slash = strrchr (filename, '/');
1024 if (slash == NULL)
1025 {
1026 prefix = "";
1027 prefix_len = 0;
1028 }
1029 else
1030 {
1031 slash++;
1032 prefix = filename;
1033 prefix_len = slash - filename;
1034 }
1035
1036 ddescriptor = elf_try_debugfile (state, prefix, prefix_len, "", 0,
1037 debuglink_name, error_callback, data);
1038 if (ddescriptor >= 0)
1039 {
1040 ret = ddescriptor;
1041 goto done;
1042 }
1043
1044 /* Look for DEBUGLINK_NAME in a .debug subdirectory of FILENAME. */
1045
1046 ddescriptor = elf_try_debugfile (state, prefix, prefix_len, ".debug/",
1047 strlen (".debug/"), debuglink_name,
1048 error_callback, data);
1049 if (ddescriptor >= 0)
1050 {
1051 ret = ddescriptor;
1052 goto done;
1053 }
1054
1055 /* Look for DEBUGLINK_NAME in /usr/lib/debug. */
1056
1057 ddescriptor = elf_try_debugfile (state, "/usr/lib/debug/",
1058 strlen ("/usr/lib/debug/"), prefix,
1059 prefix_len, debuglink_name,
1060 error_callback, data);
1061 if (ddescriptor >= 0)
1062 ret = ddescriptor;
1063
1064 done:
1065 if (alc != NULL && alc_len > 0)
1066 backtrace_free (state, alc, alc_len, error_callback, data);
1067 return ret;
1068 }
1069
1070 /* Open a separate debug info file, using the debuglink section data
1071 to find it. Returns an open file descriptor, or -1. */
1072
1073 static int
1074 elf_open_debugfile_by_debuglink (struct backtrace_state *state,
1075 const char *filename,
1076 const char *debuglink_name,
1077 uint32_t debuglink_crc,
1078 backtrace_error_callback error_callback,
1079 void *data)
1080 {
1081 int ddescriptor;
1082
1083 ddescriptor = elf_find_debugfile_by_debuglink (state, filename,
1084 debuglink_name,
1085 error_callback, data);
1086 if (ddescriptor < 0)
1087 return -1;
1088
1089 if (debuglink_crc != 0)
1090 {
1091 uint32_t got_crc;
1092
1093 got_crc = elf_crc32_file (state, ddescriptor, error_callback, data);
1094 if (got_crc != debuglink_crc)
1095 {
1096 backtrace_close (ddescriptor, error_callback, data);
1097 return -1;
1098 }
1099 }
1100
1101 return ddescriptor;
1102 }
1103
1104 /* A function useful for setting a breakpoint for an inflation failure
1105 when this code is compiled with -g. */
1106
1107 static void
1108 elf_uncompress_failed(void)
1109 {
1110 }
1111
1112 /* *PVAL is the current value being read from the stream, and *PBITS
1113 is the number of valid bits. Ensure that *PVAL holds at least 15
1114 bits by reading additional bits from *PPIN, up to PINEND, as
1115 needed. Updates *PPIN, *PVAL and *PBITS. Returns 1 on success, 0
1116 on error. */
1117
1118 static int
1119 elf_fetch_bits (const unsigned char **ppin, const unsigned char *pinend,
1120 uint64_t *pval, unsigned int *pbits)
1121 {
1122 unsigned int bits;
1123 const unsigned char *pin;
1124 uint64_t val;
1125 uint32_t next;
1126
1127 bits = *pbits;
1128 if (bits >= 15)
1129 return 1;
1130 pin = *ppin;
1131 val = *pval;
1132
1133 if (unlikely (pinend - pin < 4))
1134 {
1135 elf_uncompress_failed ();
1136 return 0;
1137 }
1138
1139 #if defined(__BYTE_ORDER__) && defined(__ORDER_LITTLE_ENDIAN__) \
1140 && defined(__ORDER_BIG_ENDIAN__) \
1141 && (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ \
1142 || __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)
1143 /* We've ensured that PIN is aligned. */
1144 next = *(const uint32_t *)pin;
1145
1146 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
1147 next = __builtin_bswap32 (next);
1148 #endif
1149 #else
1150 next = pin[0] | (pin[1] << 8) | (pin[2] << 16) | (pin[3] << 24);
1151 #endif
1152
1153 val |= (uint64_t)next << bits;
1154 bits += 32;
1155 pin += 4;
1156
1157 /* We will need the next four bytes soon. */
1158 __builtin_prefetch (pin, 0, 0);
1159
1160 *ppin = pin;
1161 *pval = val;
1162 *pbits = bits;
1163 return 1;
1164 }
1165
1166 /* This is like elf_fetch_bits, but it fetchs the bits backward, and ensures at
1167 least 16 bits. This is for zstd. */
1168
1169 static int
1170 elf_fetch_bits_backward (const unsigned char **ppin,
1171 const unsigned char *pinend,
1172 uint64_t *pval, unsigned int *pbits)
1173 {
1174 unsigned int bits;
1175 const unsigned char *pin;
1176 uint64_t val;
1177 uint32_t next;
1178
1179 bits = *pbits;
1180 if (bits >= 16)
1181 return 1;
1182 pin = *ppin;
1183 val = *pval;
1184
1185 if (unlikely (pin <= pinend))
1186 return 1;
1187
1188 pin -= 4;
1189
1190 #if defined(__BYTE_ORDER__) && defined(__ORDER_LITTLE_ENDIAN__) \
1191 && defined(__ORDER_BIG_ENDIAN__) \
1192 && (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ \
1193 || __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)
1194 /* We've ensured that PIN is aligned. */
1195 next = *(const uint32_t *)pin;
1196
1197 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
1198 next = __builtin_bswap32 (next);
1199 #endif
1200 #else
1201 next = pin[0] | (pin[1] << 8) | (pin[2] << 16) | (pin[3] << 24);
1202 #endif
1203
1204 val <<= 32;
1205 val |= next;
1206 bits += 32;
1207
1208 if (unlikely (pin < pinend))
1209 {
1210 val >>= (pinend - pin) * 8;
1211 bits -= (pinend - pin) * 8;
1212 }
1213
1214 *ppin = pin;
1215 *pval = val;
1216 *pbits = bits;
1217 return 1;
1218 }
1219
1220 /* Initialize backward fetching when the bitstream starts with a 1 bit in the
1221 last byte in memory (which is the first one that we read). This is used by
1222 zstd decompression. Returns 1 on success, 0 on error. */
1223
1224 static int
1225 elf_fetch_backward_init (const unsigned char **ppin,
1226 const unsigned char *pinend,
1227 uint64_t *pval, unsigned int *pbits)
1228 {
1229 const unsigned char *pin;
1230 unsigned int stream_start;
1231 uint64_t val;
1232 unsigned int bits;
1233
1234 pin = *ppin;
1235 stream_start = (unsigned int)*pin;
1236 if (unlikely (stream_start == 0))
1237 {
1238 elf_uncompress_failed ();
1239 return 0;
1240 }
1241 val = 0;
1242 bits = 0;
1243
1244 /* Align to a 32-bit boundary. */
1245 while ((((uintptr_t)pin) & 3) != 0)
1246 {
1247 val <<= 8;
1248 val |= (uint64_t)*pin;
1249 bits += 8;
1250 --pin;
1251 }
1252
1253 val <<= 8;
1254 val |= (uint64_t)*pin;
1255 bits += 8;
1256
1257 *ppin = pin;
1258 *pval = val;
1259 *pbits = bits;
1260 if (!elf_fetch_bits_backward (ppin, pinend, pval, pbits))
1261 return 0;
1262
1263 *pbits -= __builtin_clz (stream_start) - (sizeof (unsigned int) - 1) * 8 + 1;
1264
1265 if (!elf_fetch_bits_backward (ppin, pinend, pval, pbits))
1266 return 0;
1267
1268 return 1;
1269 }
1270
1271 /* Huffman code tables, like the rest of the zlib format, are defined
1272 by RFC 1951. We store a Huffman code table as a series of tables
1273 stored sequentially in memory. Each entry in a table is 16 bits.
1274 The first, main, table has 256 entries. It is followed by a set of
1275 secondary tables of length 2 to 128 entries. The maximum length of
1276 a code sequence in the deflate format is 15 bits, so that is all we
1277 need. Each secondary table has an index, which is the offset of
1278 the table in the overall memory storage.
1279
1280 The deflate format says that all codes of a given bit length are
1281 lexicographically consecutive. Perhaps we could have 130 values
1282 that require a 15-bit code, perhaps requiring three secondary
1283 tables of size 128. I don't know if this is actually possible, but
1284 it suggests that the maximum size required for secondary tables is
1285 3 * 128 + 3 * 64 ... == 768. The zlib enough program reports 660
1286 as the maximum. We permit 768, since in addition to the 256 for
1287 the primary table, with two bytes per entry, and with the two
1288 tables we need, that gives us a page.
1289
1290 A single table entry needs to store a value or (for the main table
1291 only) the index and size of a secondary table. Values range from 0
1292 to 285, inclusive. Secondary table indexes, per above, range from
1293 0 to 510. For a value we need to store the number of bits we need
1294 to determine that value (one value may appear multiple times in the
1295 table), which is 1 to 8. For a secondary table we need to store
1296 the number of bits used to index into the table, which is 1 to 7.
1297 And of course we need 1 bit to decide whether we have a value or a
1298 secondary table index. So each entry needs 9 bits for value/table
1299 index, 3 bits for size, 1 bit what it is. For simplicity we use 16
1300 bits per entry. */
1301
1302 /* Number of entries we allocate to for one code table. We get a page
1303 for the two code tables we need. */
1304
1305 #define ZLIB_HUFFMAN_TABLE_SIZE (1024)
1306
1307 /* Bit masks and shifts for the values in the table. */
1308
1309 #define ZLIB_HUFFMAN_VALUE_MASK 0x01ff
1310 #define ZLIB_HUFFMAN_BITS_SHIFT 9
1311 #define ZLIB_HUFFMAN_BITS_MASK 0x7
1312 #define ZLIB_HUFFMAN_SECONDARY_SHIFT 12
1313
1314 /* For working memory while inflating we need two code tables, we need
1315 an array of code lengths (max value 15, so we use unsigned char),
1316 and an array of unsigned shorts used while building a table. The
1317 latter two arrays must be large enough to hold the maximum number
1318 of code lengths, which RFC 1951 defines as 286 + 30. */
1319
1320 #define ZLIB_TABLE_SIZE \
1321 (2 * ZLIB_HUFFMAN_TABLE_SIZE * sizeof (uint16_t) \
1322 + (286 + 30) * sizeof (uint16_t) \
1323 + (286 + 30) * sizeof (unsigned char))
1324
1325 #define ZLIB_TABLE_CODELEN_OFFSET \
1326 (2 * ZLIB_HUFFMAN_TABLE_SIZE * sizeof (uint16_t) \
1327 + (286 + 30) * sizeof (uint16_t))
1328
1329 #define ZLIB_TABLE_WORK_OFFSET \
1330 (2 * ZLIB_HUFFMAN_TABLE_SIZE * sizeof (uint16_t))
1331
1332 #ifdef BACKTRACE_GENERATE_FIXED_HUFFMAN_TABLE
1333
1334 /* Used by the main function that generates the fixed table to learn
1335 the table size. */
1336 static size_t final_next_secondary;
1337
1338 #endif
1339
1340 /* Build a Huffman code table from an array of lengths in CODES of
1341 length CODES_LEN. The table is stored into *TABLE. ZDEBUG_TABLE
1342 is the same as for elf_zlib_inflate, used to find some work space.
1343 Returns 1 on success, 0 on error. */
1344
1345 static int
1346 elf_zlib_inflate_table (unsigned char *codes, size_t codes_len,
1347 uint16_t *zdebug_table, uint16_t *table)
1348 {
1349 uint16_t count[16];
1350 uint16_t start[16];
1351 uint16_t prev[16];
1352 uint16_t firstcode[7];
1353 uint16_t *next;
1354 size_t i;
1355 size_t j;
1356 unsigned int code;
1357 size_t next_secondary;
1358
1359 /* Count the number of code of each length. Set NEXT[val] to be the
1360 next value after VAL with the same bit length. */
1361
1362 next = (uint16_t *) (((unsigned char *) zdebug_table)
1363 + ZLIB_TABLE_WORK_OFFSET);
1364
1365 memset (&count[0], 0, 16 * sizeof (uint16_t));
1366 for (i = 0; i < codes_len; ++i)
1367 {
1368 if (unlikely (codes[i] >= 16))
1369 {
1370 elf_uncompress_failed ();
1371 return 0;
1372 }
1373
1374 if (count[codes[i]] == 0)
1375 {
1376 start[codes[i]] = i;
1377 prev[codes[i]] = i;
1378 }
1379 else
1380 {
1381 next[prev[codes[i]]] = i;
1382 prev[codes[i]] = i;
1383 }
1384
1385 ++count[codes[i]];
1386 }
1387
1388 /* For each length, fill in the table for the codes of that
1389 length. */
1390
1391 memset (table, 0, ZLIB_HUFFMAN_TABLE_SIZE * sizeof (uint16_t));
1392
1393 /* Handle the values that do not require a secondary table. */
1394
1395 code = 0;
1396 for (j = 1; j <= 8; ++j)
1397 {
1398 unsigned int jcnt;
1399 unsigned int val;
1400
1401 jcnt = count[j];
1402 if (jcnt == 0)
1403 continue;
1404
1405 if (unlikely (jcnt > (1U << j)))
1406 {
1407 elf_uncompress_failed ();
1408 return 0;
1409 }
1410
1411 /* There are JCNT values that have this length, the values
1412 starting from START[j] continuing through NEXT[VAL]. Those
1413 values are assigned consecutive values starting at CODE. */
1414
1415 val = start[j];
1416 for (i = 0; i < jcnt; ++i)
1417 {
1418 uint16_t tval;
1419 size_t ind;
1420 unsigned int incr;
1421
1422 /* In the compressed bit stream, the value VAL is encoded as
1423 J bits with the value C. */
1424
1425 if (unlikely ((val & ~ZLIB_HUFFMAN_VALUE_MASK) != 0))
1426 {
1427 elf_uncompress_failed ();
1428 return 0;
1429 }
1430
1431 tval = val | ((j - 1) << ZLIB_HUFFMAN_BITS_SHIFT);
1432
1433 /* The table lookup uses 8 bits. If J is less than 8, we
1434 don't know what the other bits will be. We need to fill
1435 in all possibilities in the table. Since the Huffman
1436 code is unambiguous, those entries can't be used for any
1437 other code. */
1438
1439 for (ind = code; ind < 0x100; ind += 1 << j)
1440 {
1441 if (unlikely (table[ind] != 0))
1442 {
1443 elf_uncompress_failed ();
1444 return 0;
1445 }
1446 table[ind] = tval;
1447 }
1448
1449 /* Advance to the next value with this length. */
1450 if (i + 1 < jcnt)
1451 val = next[val];
1452
1453 /* The Huffman codes are stored in the bitstream with the
1454 most significant bit first, as is required to make them
1455 unambiguous. The effect is that when we read them from
1456 the bitstream we see the bit sequence in reverse order:
1457 the most significant bit of the Huffman code is the least
1458 significant bit of the value we read from the bitstream.
1459 That means that to make our table lookups work, we need
1460 to reverse the bits of CODE. Since reversing bits is
1461 tedious and in general requires using a table, we instead
1462 increment CODE in reverse order. That is, if the number
1463 of bits we are currently using, here named J, is 3, we
1464 count as 000, 100, 010, 110, 001, 101, 011, 111, which is
1465 to say the numbers from 0 to 7 but with the bits
1466 reversed. Going to more bits, aka incrementing J,
1467 effectively just adds more zero bits as the beginning,
1468 and as such does not change the numeric value of CODE.
1469
1470 To increment CODE of length J in reverse order, find the
1471 most significant zero bit and set it to one while
1472 clearing all higher bits. In other words, add 1 modulo
1473 2^J, only reversed. */
1474
1475 incr = 1U << (j - 1);
1476 while ((code & incr) != 0)
1477 incr >>= 1;
1478 if (incr == 0)
1479 code = 0;
1480 else
1481 {
1482 code &= incr - 1;
1483 code += incr;
1484 }
1485 }
1486 }
1487
1488 /* Handle the values that require a secondary table. */
1489
1490 /* Set FIRSTCODE, the number at which the codes start, for each
1491 length. */
1492
1493 for (j = 9; j < 16; j++)
1494 {
1495 unsigned int jcnt;
1496 unsigned int k;
1497
1498 jcnt = count[j];
1499 if (jcnt == 0)
1500 continue;
1501
1502 /* There are JCNT values that have this length, the values
1503 starting from START[j]. Those values are assigned
1504 consecutive values starting at CODE. */
1505
1506 firstcode[j - 9] = code;
1507
1508 /* Reverse add JCNT to CODE modulo 2^J. */
1509 for (k = 0; k < j; ++k)
1510 {
1511 if ((jcnt & (1U << k)) != 0)
1512 {
1513 unsigned int m;
1514 unsigned int bit;
1515
1516 bit = 1U << (j - k - 1);
1517 for (m = 0; m < j - k; ++m, bit >>= 1)
1518 {
1519 if ((code & bit) == 0)
1520 {
1521 code += bit;
1522 break;
1523 }
1524 code &= ~bit;
1525 }
1526 jcnt &= ~(1U << k);
1527 }
1528 }
1529 if (unlikely (jcnt != 0))
1530 {
1531 elf_uncompress_failed ();
1532 return 0;
1533 }
1534 }
1535
1536 /* For J from 9 to 15, inclusive, we store COUNT[J] consecutive
1537 values starting at START[J] with consecutive codes starting at
1538 FIRSTCODE[J - 9]. In the primary table we need to point to the
1539 secondary table, and the secondary table will be indexed by J - 9
1540 bits. We count down from 15 so that we install the larger
1541 secondary tables first, as the smaller ones may be embedded in
1542 the larger ones. */
1543
1544 next_secondary = 0; /* Index of next secondary table (after primary). */
1545 for (j = 15; j >= 9; j--)
1546 {
1547 unsigned int jcnt;
1548 unsigned int val;
1549 size_t primary; /* Current primary index. */
1550 size_t secondary; /* Offset to current secondary table. */
1551 size_t secondary_bits; /* Bit size of current secondary table. */
1552
1553 jcnt = count[j];
1554 if (jcnt == 0)
1555 continue;
1556
1557 val = start[j];
1558 code = firstcode[j - 9];
1559 primary = 0x100;
1560 secondary = 0;
1561 secondary_bits = 0;
1562 for (i = 0; i < jcnt; ++i)
1563 {
1564 uint16_t tval;
1565 size_t ind;
1566 unsigned int incr;
1567
1568 if ((code & 0xff) != primary)
1569 {
1570 uint16_t tprimary;
1571
1572 /* Fill in a new primary table entry. */
1573
1574 primary = code & 0xff;
1575
1576 tprimary = table[primary];
1577 if (tprimary == 0)
1578 {
1579 /* Start a new secondary table. */
1580
1581 if (unlikely ((next_secondary & ZLIB_HUFFMAN_VALUE_MASK)
1582 != next_secondary))
1583 {
1584 elf_uncompress_failed ();
1585 return 0;
1586 }
1587
1588 secondary = next_secondary;
1589 secondary_bits = j - 8;
1590 next_secondary += 1 << secondary_bits;
1591 table[primary] = (secondary
1592 + ((j - 8) << ZLIB_HUFFMAN_BITS_SHIFT)
1593 + (1U << ZLIB_HUFFMAN_SECONDARY_SHIFT));
1594 }
1595 else
1596 {
1597 /* There is an existing entry. It had better be a
1598 secondary table with enough bits. */
1599 if (unlikely ((tprimary
1600 & (1U << ZLIB_HUFFMAN_SECONDARY_SHIFT))
1601 == 0))
1602 {
1603 elf_uncompress_failed ();
1604 return 0;
1605 }
1606 secondary = tprimary & ZLIB_HUFFMAN_VALUE_MASK;
1607 secondary_bits = ((tprimary >> ZLIB_HUFFMAN_BITS_SHIFT)
1608 & ZLIB_HUFFMAN_BITS_MASK);
1609 if (unlikely (secondary_bits < j - 8))
1610 {
1611 elf_uncompress_failed ();
1612 return 0;
1613 }
1614 }
1615 }
1616
1617 /* Fill in secondary table entries. */
1618
1619 tval = val | ((j - 8) << ZLIB_HUFFMAN_BITS_SHIFT);
1620
1621 for (ind = code >> 8;
1622 ind < (1U << secondary_bits);
1623 ind += 1U << (j - 8))
1624 {
1625 if (unlikely (table[secondary + 0x100 + ind] != 0))
1626 {
1627 elf_uncompress_failed ();
1628 return 0;
1629 }
1630 table[secondary + 0x100 + ind] = tval;
1631 }
1632
1633 if (i + 1 < jcnt)
1634 val = next[val];
1635
1636 incr = 1U << (j - 1);
1637 while ((code & incr) != 0)
1638 incr >>= 1;
1639 if (incr == 0)
1640 code = 0;
1641 else
1642 {
1643 code &= incr - 1;
1644 code += incr;
1645 }
1646 }
1647 }
1648
1649 #ifdef BACKTRACE_GENERATE_FIXED_HUFFMAN_TABLE
1650 final_next_secondary = next_secondary;
1651 #endif
1652
1653 return 1;
1654 }
1655
1656 #ifdef BACKTRACE_GENERATE_FIXED_HUFFMAN_TABLE
1657
1658 /* Used to generate the fixed Huffman table for block type 1. */
1659
1660 #include <stdio.h>
1661
1662 static uint16_t table[ZLIB_TABLE_SIZE];
1663 static unsigned char codes[288];
1664
1665 int
1666 main ()
1667 {
1668 size_t i;
1669
1670 for (i = 0; i <= 143; ++i)
1671 codes[i] = 8;
1672 for (i = 144; i <= 255; ++i)
1673 codes[i] = 9;
1674 for (i = 256; i <= 279; ++i)
1675 codes[i] = 7;
1676 for (i = 280; i <= 287; ++i)
1677 codes[i] = 8;
1678 if (!elf_zlib_inflate_table (&codes[0], 288, &table[0], &table[0]))
1679 {
1680 fprintf (stderr, "elf_zlib_inflate_table failed\n");
1681 exit (EXIT_FAILURE);
1682 }
1683
1684 printf ("static const uint16_t elf_zlib_default_table[%#zx] =\n",
1685 final_next_secondary + 0x100);
1686 printf ("{\n");
1687 for (i = 0; i < final_next_secondary + 0x100; i += 8)
1688 {
1689 size_t j;
1690
1691 printf (" ");
1692 for (j = i; j < final_next_secondary + 0x100 && j < i + 8; ++j)
1693 printf (" %#x,", table[j]);
1694 printf ("\n");
1695 }
1696 printf ("};\n");
1697 printf ("\n");
1698
1699 for (i = 0; i < 32; ++i)
1700 codes[i] = 5;
1701 if (!elf_zlib_inflate_table (&codes[0], 32, &table[0], &table[0]))
1702 {
1703 fprintf (stderr, "elf_zlib_inflate_table failed\n");
1704 exit (EXIT_FAILURE);
1705 }
1706
1707 printf ("static const uint16_t elf_zlib_default_dist_table[%#zx] =\n",
1708 final_next_secondary + 0x100);
1709 printf ("{\n");
1710 for (i = 0; i < final_next_secondary + 0x100; i += 8)
1711 {
1712 size_t j;
1713
1714 printf (" ");
1715 for (j = i; j < final_next_secondary + 0x100 && j < i + 8; ++j)
1716 printf (" %#x,", table[j]);
1717 printf ("\n");
1718 }
1719 printf ("};\n");
1720
1721 return 0;
1722 }
1723
1724 #endif
1725
1726 /* The fixed tables generated by the #ifdef'ed out main function
1727 above. */
1728
1729 static const uint16_t elf_zlib_default_table[0x170] =
1730 {
1731 0xd00, 0xe50, 0xe10, 0xf18, 0xd10, 0xe70, 0xe30, 0x1230,
1732 0xd08, 0xe60, 0xe20, 0x1210, 0xe00, 0xe80, 0xe40, 0x1250,
1733 0xd04, 0xe58, 0xe18, 0x1200, 0xd14, 0xe78, 0xe38, 0x1240,
1734 0xd0c, 0xe68, 0xe28, 0x1220, 0xe08, 0xe88, 0xe48, 0x1260,
1735 0xd02, 0xe54, 0xe14, 0xf1c, 0xd12, 0xe74, 0xe34, 0x1238,
1736 0xd0a, 0xe64, 0xe24, 0x1218, 0xe04, 0xe84, 0xe44, 0x1258,
1737 0xd06, 0xe5c, 0xe1c, 0x1208, 0xd16, 0xe7c, 0xe3c, 0x1248,
1738 0xd0e, 0xe6c, 0xe2c, 0x1228, 0xe0c, 0xe8c, 0xe4c, 0x1268,
1739 0xd01, 0xe52, 0xe12, 0xf1a, 0xd11, 0xe72, 0xe32, 0x1234,
1740 0xd09, 0xe62, 0xe22, 0x1214, 0xe02, 0xe82, 0xe42, 0x1254,
1741 0xd05, 0xe5a, 0xe1a, 0x1204, 0xd15, 0xe7a, 0xe3a, 0x1244,
1742 0xd0d, 0xe6a, 0xe2a, 0x1224, 0xe0a, 0xe8a, 0xe4a, 0x1264,
1743 0xd03, 0xe56, 0xe16, 0xf1e, 0xd13, 0xe76, 0xe36, 0x123c,
1744 0xd0b, 0xe66, 0xe26, 0x121c, 0xe06, 0xe86, 0xe46, 0x125c,
1745 0xd07, 0xe5e, 0xe1e, 0x120c, 0xd17, 0xe7e, 0xe3e, 0x124c,
1746 0xd0f, 0xe6e, 0xe2e, 0x122c, 0xe0e, 0xe8e, 0xe4e, 0x126c,
1747 0xd00, 0xe51, 0xe11, 0xf19, 0xd10, 0xe71, 0xe31, 0x1232,
1748 0xd08, 0xe61, 0xe21, 0x1212, 0xe01, 0xe81, 0xe41, 0x1252,
1749 0xd04, 0xe59, 0xe19, 0x1202, 0xd14, 0xe79, 0xe39, 0x1242,
1750 0xd0c, 0xe69, 0xe29, 0x1222, 0xe09, 0xe89, 0xe49, 0x1262,
1751 0xd02, 0xe55, 0xe15, 0xf1d, 0xd12, 0xe75, 0xe35, 0x123a,
1752 0xd0a, 0xe65, 0xe25, 0x121a, 0xe05, 0xe85, 0xe45, 0x125a,
1753 0xd06, 0xe5d, 0xe1d, 0x120a, 0xd16, 0xe7d, 0xe3d, 0x124a,
1754 0xd0e, 0xe6d, 0xe2d, 0x122a, 0xe0d, 0xe8d, 0xe4d, 0x126a,
1755 0xd01, 0xe53, 0xe13, 0xf1b, 0xd11, 0xe73, 0xe33, 0x1236,
1756 0xd09, 0xe63, 0xe23, 0x1216, 0xe03, 0xe83, 0xe43, 0x1256,
1757 0xd05, 0xe5b, 0xe1b, 0x1206, 0xd15, 0xe7b, 0xe3b, 0x1246,
1758 0xd0d, 0xe6b, 0xe2b, 0x1226, 0xe0b, 0xe8b, 0xe4b, 0x1266,
1759 0xd03, 0xe57, 0xe17, 0xf1f, 0xd13, 0xe77, 0xe37, 0x123e,
1760 0xd0b, 0xe67, 0xe27, 0x121e, 0xe07, 0xe87, 0xe47, 0x125e,
1761 0xd07, 0xe5f, 0xe1f, 0x120e, 0xd17, 0xe7f, 0xe3f, 0x124e,
1762 0xd0f, 0xe6f, 0xe2f, 0x122e, 0xe0f, 0xe8f, 0xe4f, 0x126e,
1763 0x290, 0x291, 0x292, 0x293, 0x294, 0x295, 0x296, 0x297,
1764 0x298, 0x299, 0x29a, 0x29b, 0x29c, 0x29d, 0x29e, 0x29f,
1765 0x2a0, 0x2a1, 0x2a2, 0x2a3, 0x2a4, 0x2a5, 0x2a6, 0x2a7,
1766 0x2a8, 0x2a9, 0x2aa, 0x2ab, 0x2ac, 0x2ad, 0x2ae, 0x2af,
1767 0x2b0, 0x2b1, 0x2b2, 0x2b3, 0x2b4, 0x2b5, 0x2b6, 0x2b7,
1768 0x2b8, 0x2b9, 0x2ba, 0x2bb, 0x2bc, 0x2bd, 0x2be, 0x2bf,
1769 0x2c0, 0x2c1, 0x2c2, 0x2c3, 0x2c4, 0x2c5, 0x2c6, 0x2c7,
1770 0x2c8, 0x2c9, 0x2ca, 0x2cb, 0x2cc, 0x2cd, 0x2ce, 0x2cf,
1771 0x2d0, 0x2d1, 0x2d2, 0x2d3, 0x2d4, 0x2d5, 0x2d6, 0x2d7,
1772 0x2d8, 0x2d9, 0x2da, 0x2db, 0x2dc, 0x2dd, 0x2de, 0x2df,
1773 0x2e0, 0x2e1, 0x2e2, 0x2e3, 0x2e4, 0x2e5, 0x2e6, 0x2e7,
1774 0x2e8, 0x2e9, 0x2ea, 0x2eb, 0x2ec, 0x2ed, 0x2ee, 0x2ef,
1775 0x2f0, 0x2f1, 0x2f2, 0x2f3, 0x2f4, 0x2f5, 0x2f6, 0x2f7,
1776 0x2f8, 0x2f9, 0x2fa, 0x2fb, 0x2fc, 0x2fd, 0x2fe, 0x2ff,
1777 };
1778
1779 static const uint16_t elf_zlib_default_dist_table[0x100] =
1780 {
1781 0x800, 0x810, 0x808, 0x818, 0x804, 0x814, 0x80c, 0x81c,
1782 0x802, 0x812, 0x80a, 0x81a, 0x806, 0x816, 0x80e, 0x81e,
1783 0x801, 0x811, 0x809, 0x819, 0x805, 0x815, 0x80d, 0x81d,
1784 0x803, 0x813, 0x80b, 0x81b, 0x807, 0x817, 0x80f, 0x81f,
1785 0x800, 0x810, 0x808, 0x818, 0x804, 0x814, 0x80c, 0x81c,
1786 0x802, 0x812, 0x80a, 0x81a, 0x806, 0x816, 0x80e, 0x81e,
1787 0x801, 0x811, 0x809, 0x819, 0x805, 0x815, 0x80d, 0x81d,
1788 0x803, 0x813, 0x80b, 0x81b, 0x807, 0x817, 0x80f, 0x81f,
1789 0x800, 0x810, 0x808, 0x818, 0x804, 0x814, 0x80c, 0x81c,
1790 0x802, 0x812, 0x80a, 0x81a, 0x806, 0x816, 0x80e, 0x81e,
1791 0x801, 0x811, 0x809, 0x819, 0x805, 0x815, 0x80d, 0x81d,
1792 0x803, 0x813, 0x80b, 0x81b, 0x807, 0x817, 0x80f, 0x81f,
1793 0x800, 0x810, 0x808, 0x818, 0x804, 0x814, 0x80c, 0x81c,
1794 0x802, 0x812, 0x80a, 0x81a, 0x806, 0x816, 0x80e, 0x81e,
1795 0x801, 0x811, 0x809, 0x819, 0x805, 0x815, 0x80d, 0x81d,
1796 0x803, 0x813, 0x80b, 0x81b, 0x807, 0x817, 0x80f, 0x81f,
1797 0x800, 0x810, 0x808, 0x818, 0x804, 0x814, 0x80c, 0x81c,
1798 0x802, 0x812, 0x80a, 0x81a, 0x806, 0x816, 0x80e, 0x81e,
1799 0x801, 0x811, 0x809, 0x819, 0x805, 0x815, 0x80d, 0x81d,
1800 0x803, 0x813, 0x80b, 0x81b, 0x807, 0x817, 0x80f, 0x81f,
1801 0x800, 0x810, 0x808, 0x818, 0x804, 0x814, 0x80c, 0x81c,
1802 0x802, 0x812, 0x80a, 0x81a, 0x806, 0x816, 0x80e, 0x81e,
1803 0x801, 0x811, 0x809, 0x819, 0x805, 0x815, 0x80d, 0x81d,
1804 0x803, 0x813, 0x80b, 0x81b, 0x807, 0x817, 0x80f, 0x81f,
1805 0x800, 0x810, 0x808, 0x818, 0x804, 0x814, 0x80c, 0x81c,
1806 0x802, 0x812, 0x80a, 0x81a, 0x806, 0x816, 0x80e, 0x81e,
1807 0x801, 0x811, 0x809, 0x819, 0x805, 0x815, 0x80d, 0x81d,
1808 0x803, 0x813, 0x80b, 0x81b, 0x807, 0x817, 0x80f, 0x81f,
1809 0x800, 0x810, 0x808, 0x818, 0x804, 0x814, 0x80c, 0x81c,
1810 0x802, 0x812, 0x80a, 0x81a, 0x806, 0x816, 0x80e, 0x81e,
1811 0x801, 0x811, 0x809, 0x819, 0x805, 0x815, 0x80d, 0x81d,
1812 0x803, 0x813, 0x80b, 0x81b, 0x807, 0x817, 0x80f, 0x81f,
1813 };
1814
1815 /* Inflate a zlib stream from PIN/SIN to POUT/SOUT. Return 1 on
1816 success, 0 on some error parsing the stream. */
1817
1818 static int
1819 elf_zlib_inflate (const unsigned char *pin, size_t sin, uint16_t *zdebug_table,
1820 unsigned char *pout, size_t sout)
1821 {
1822 unsigned char *porigout;
1823 const unsigned char *pinend;
1824 unsigned char *poutend;
1825
1826 /* We can apparently see multiple zlib streams concatenated
1827 together, so keep going as long as there is something to read.
1828 The last 4 bytes are the checksum. */
1829 porigout = pout;
1830 pinend = pin + sin;
1831 poutend = pout + sout;
1832 while ((pinend - pin) > 4)
1833 {
1834 uint64_t val;
1835 unsigned int bits;
1836 int last;
1837
1838 /* Read the two byte zlib header. */
1839
1840 if (unlikely ((pin[0] & 0xf) != 8)) /* 8 is zlib encoding. */
1841 {
1842 /* Unknown compression method. */
1843 elf_uncompress_failed ();
1844 return 0;
1845 }
1846 if (unlikely ((pin[0] >> 4) > 7))
1847 {
1848 /* Window size too large. Other than this check, we don't
1849 care about the window size. */
1850 elf_uncompress_failed ();
1851 return 0;
1852 }
1853 if (unlikely ((pin[1] & 0x20) != 0))
1854 {
1855 /* Stream expects a predefined dictionary, but we have no
1856 dictionary. */
1857 elf_uncompress_failed ();
1858 return 0;
1859 }
1860 val = (pin[0] << 8) | pin[1];
1861 if (unlikely (val % 31 != 0))
1862 {
1863 /* Header check failure. */
1864 elf_uncompress_failed ();
1865 return 0;
1866 }
1867 pin += 2;
1868
1869 /* Align PIN to a 32-bit boundary. */
1870
1871 val = 0;
1872 bits = 0;
1873 while ((((uintptr_t) pin) & 3) != 0)
1874 {
1875 val |= (uint64_t)*pin << bits;
1876 bits += 8;
1877 ++pin;
1878 }
1879
1880 /* Read blocks until one is marked last. */
1881
1882 last = 0;
1883
1884 while (!last)
1885 {
1886 unsigned int type;
1887 const uint16_t *tlit;
1888 const uint16_t *tdist;
1889
1890 if (!elf_fetch_bits (&pin, pinend, &val, &bits))
1891 return 0;
1892
1893 last = val & 1;
1894 type = (val >> 1) & 3;
1895 val >>= 3;
1896 bits -= 3;
1897
1898 if (unlikely (type == 3))
1899 {
1900 /* Invalid block type. */
1901 elf_uncompress_failed ();
1902 return 0;
1903 }
1904
1905 if (type == 0)
1906 {
1907 uint16_t len;
1908 uint16_t lenc;
1909
1910 /* An uncompressed block. */
1911
1912 /* If we've read ahead more than a byte, back up. */
1913 while (bits >= 8)
1914 {
1915 --pin;
1916 bits -= 8;
1917 }
1918
1919 val = 0;
1920 bits = 0;
1921 if (unlikely ((pinend - pin) < 4))
1922 {
1923 /* Missing length. */
1924 elf_uncompress_failed ();
1925 return 0;
1926 }
1927 len = pin[0] | (pin[1] << 8);
1928 lenc = pin[2] | (pin[3] << 8);
1929 pin += 4;
1930 lenc = ~lenc;
1931 if (unlikely (len != lenc))
1932 {
1933 /* Corrupt data. */
1934 elf_uncompress_failed ();
1935 return 0;
1936 }
1937 if (unlikely (len > (unsigned int) (pinend - pin)
1938 || len > (unsigned int) (poutend - pout)))
1939 {
1940 /* Not enough space in buffers. */
1941 elf_uncompress_failed ();
1942 return 0;
1943 }
1944 memcpy (pout, pin, len);
1945 pout += len;
1946 pin += len;
1947
1948 /* Align PIN. */
1949 while ((((uintptr_t) pin) & 3) != 0)
1950 {
1951 val |= (uint64_t)*pin << bits;
1952 bits += 8;
1953 ++pin;
1954 }
1955
1956 /* Go around to read the next block. */
1957 continue;
1958 }
1959
1960 if (type == 1)
1961 {
1962 tlit = elf_zlib_default_table;
1963 tdist = elf_zlib_default_dist_table;
1964 }
1965 else
1966 {
1967 unsigned int nlit;
1968 unsigned int ndist;
1969 unsigned int nclen;
1970 unsigned char codebits[19];
1971 unsigned char *plenbase;
1972 unsigned char *plen;
1973 unsigned char *plenend;
1974
1975 /* Read a Huffman encoding table. The various magic
1976 numbers here are from RFC 1951. */
1977
1978 if (!elf_fetch_bits (&pin, pinend, &val, &bits))
1979 return 0;
1980
1981 nlit = (val & 0x1f) + 257;
1982 val >>= 5;
1983 ndist = (val & 0x1f) + 1;
1984 val >>= 5;
1985 nclen = (val & 0xf) + 4;
1986 val >>= 4;
1987 bits -= 14;
1988 if (unlikely (nlit > 286 || ndist > 30))
1989 {
1990 /* Values out of range. */
1991 elf_uncompress_failed ();
1992 return 0;
1993 }
1994
1995 /* Read and build the table used to compress the
1996 literal, length, and distance codes. */
1997
1998 memset(&codebits[0], 0, 19);
1999
2000 /* There are always at least 4 elements in the
2001 table. */
2002
2003 if (!elf_fetch_bits (&pin, pinend, &val, &bits))
2004 return 0;
2005
2006 codebits[16] = val & 7;
2007 codebits[17] = (val >> 3) & 7;
2008 codebits[18] = (val >> 6) & 7;
2009 codebits[0] = (val >> 9) & 7;
2010 val >>= 12;
2011 bits -= 12;
2012
2013 if (nclen == 4)
2014 goto codebitsdone;
2015
2016 codebits[8] = val & 7;
2017 val >>= 3;
2018 bits -= 3;
2019
2020 if (nclen == 5)
2021 goto codebitsdone;
2022
2023 if (!elf_fetch_bits (&pin, pinend, &val, &bits))
2024 return 0;
2025
2026 codebits[7] = val & 7;
2027 val >>= 3;
2028 bits -= 3;
2029
2030 if (nclen == 6)
2031 goto codebitsdone;
2032
2033 codebits[9] = val & 7;
2034 val >>= 3;
2035 bits -= 3;
2036
2037 if (nclen == 7)
2038 goto codebitsdone;
2039
2040 codebits[6] = val & 7;
2041 val >>= 3;
2042 bits -= 3;
2043
2044 if (nclen == 8)
2045 goto codebitsdone;
2046
2047 codebits[10] = val & 7;
2048 val >>= 3;
2049 bits -= 3;
2050
2051 if (nclen == 9)
2052 goto codebitsdone;
2053
2054 codebits[5] = val & 7;
2055 val >>= 3;
2056 bits -= 3;
2057
2058 if (nclen == 10)
2059 goto codebitsdone;
2060
2061 if (!elf_fetch_bits (&pin, pinend, &val, &bits))
2062 return 0;
2063
2064 codebits[11] = val & 7;
2065 val >>= 3;
2066 bits -= 3;
2067
2068 if (nclen == 11)
2069 goto codebitsdone;
2070
2071 codebits[4] = val & 7;
2072 val >>= 3;
2073 bits -= 3;
2074
2075 if (nclen == 12)
2076 goto codebitsdone;
2077
2078 codebits[12] = val & 7;
2079 val >>= 3;
2080 bits -= 3;
2081
2082 if (nclen == 13)
2083 goto codebitsdone;
2084
2085 codebits[3] = val & 7;
2086 val >>= 3;
2087 bits -= 3;
2088
2089 if (nclen == 14)
2090 goto codebitsdone;
2091
2092 codebits[13] = val & 7;
2093 val >>= 3;
2094 bits -= 3;
2095
2096 if (nclen == 15)
2097 goto codebitsdone;
2098
2099 if (!elf_fetch_bits (&pin, pinend, &val, &bits))
2100 return 0;
2101
2102 codebits[2] = val & 7;
2103 val >>= 3;
2104 bits -= 3;
2105
2106 if (nclen == 16)
2107 goto codebitsdone;
2108
2109 codebits[14] = val & 7;
2110 val >>= 3;
2111 bits -= 3;
2112
2113 if (nclen == 17)
2114 goto codebitsdone;
2115
2116 codebits[1] = val & 7;
2117 val >>= 3;
2118 bits -= 3;
2119
2120 if (nclen == 18)
2121 goto codebitsdone;
2122
2123 codebits[15] = val & 7;
2124 val >>= 3;
2125 bits -= 3;
2126
2127 codebitsdone:
2128
2129 if (!elf_zlib_inflate_table (codebits, 19, zdebug_table,
2130 zdebug_table))
2131 return 0;
2132
2133 /* Read the compressed bit lengths of the literal,
2134 length, and distance codes. We have allocated space
2135 at the end of zdebug_table to hold them. */
2136
2137 plenbase = (((unsigned char *) zdebug_table)
2138 + ZLIB_TABLE_CODELEN_OFFSET);
2139 plen = plenbase;
2140 plenend = plen + nlit + ndist;
2141 while (plen < plenend)
2142 {
2143 uint16_t t;
2144 unsigned int b;
2145 uint16_t v;
2146
2147 if (!elf_fetch_bits (&pin, pinend, &val, &bits))
2148 return 0;
2149
2150 t = zdebug_table[val & 0xff];
2151
2152 /* The compression here uses bit lengths up to 7, so
2153 a secondary table is never necessary. */
2154 if (unlikely ((t & (1U << ZLIB_HUFFMAN_SECONDARY_SHIFT))
2155 != 0))
2156 {
2157 elf_uncompress_failed ();
2158 return 0;
2159 }
2160
2161 b = (t >> ZLIB_HUFFMAN_BITS_SHIFT) & ZLIB_HUFFMAN_BITS_MASK;
2162 val >>= b + 1;
2163 bits -= b + 1;
2164
2165 v = t & ZLIB_HUFFMAN_VALUE_MASK;
2166 if (v < 16)
2167 *plen++ = v;
2168 else if (v == 16)
2169 {
2170 unsigned int c;
2171 unsigned int prev;
2172
2173 /* Copy previous entry 3 to 6 times. */
2174
2175 if (unlikely (plen == plenbase))
2176 {
2177 elf_uncompress_failed ();
2178 return 0;
2179 }
2180
2181 /* We used up to 7 bits since the last
2182 elf_fetch_bits, so we have at least 8 bits
2183 available here. */
2184
2185 c = 3 + (val & 0x3);
2186 val >>= 2;
2187 bits -= 2;
2188 if (unlikely ((unsigned int) (plenend - plen) < c))
2189 {
2190 elf_uncompress_failed ();
2191 return 0;
2192 }
2193
2194 prev = plen[-1];
2195 switch (c)
2196 {
2197 case 6:
2198 *plen++ = prev;
2199 ATTRIBUTE_FALLTHROUGH;
2200 case 5:
2201 *plen++ = prev;
2202 ATTRIBUTE_FALLTHROUGH;
2203 case 4:
2204 *plen++ = prev;
2205 }
2206 *plen++ = prev;
2207 *plen++ = prev;
2208 *plen++ = prev;
2209 }
2210 else if (v == 17)
2211 {
2212 unsigned int c;
2213
2214 /* Store zero 3 to 10 times. */
2215
2216 /* We used up to 7 bits since the last
2217 elf_fetch_bits, so we have at least 8 bits
2218 available here. */
2219
2220 c = 3 + (val & 0x7);
2221 val >>= 3;
2222 bits -= 3;
2223 if (unlikely ((unsigned int) (plenend - plen) < c))
2224 {
2225 elf_uncompress_failed ();
2226 return 0;
2227 }
2228
2229 switch (c)
2230 {
2231 case 10:
2232 *plen++ = 0;
2233 ATTRIBUTE_FALLTHROUGH;
2234 case 9:
2235 *plen++ = 0;
2236 ATTRIBUTE_FALLTHROUGH;
2237 case 8:
2238 *plen++ = 0;
2239 ATTRIBUTE_FALLTHROUGH;
2240 case 7:
2241 *plen++ = 0;
2242 ATTRIBUTE_FALLTHROUGH;
2243 case 6:
2244 *plen++ = 0;
2245 ATTRIBUTE_FALLTHROUGH;
2246 case 5:
2247 *plen++ = 0;
2248 ATTRIBUTE_FALLTHROUGH;
2249 case 4:
2250 *plen++ = 0;
2251 }
2252 *plen++ = 0;
2253 *plen++ = 0;
2254 *plen++ = 0;
2255 }
2256 else if (v == 18)
2257 {
2258 unsigned int c;
2259
2260 /* Store zero 11 to 138 times. */
2261
2262 /* We used up to 7 bits since the last
2263 elf_fetch_bits, so we have at least 8 bits
2264 available here. */
2265
2266 c = 11 + (val & 0x7f);
2267 val >>= 7;
2268 bits -= 7;
2269 if (unlikely ((unsigned int) (plenend - plen) < c))
2270 {
2271 elf_uncompress_failed ();
2272 return 0;
2273 }
2274
2275 memset (plen, 0, c);
2276 plen += c;
2277 }
2278 else
2279 {
2280 elf_uncompress_failed ();
2281 return 0;
2282 }
2283 }
2284
2285 /* Make sure that the stop code can appear. */
2286
2287 plen = plenbase;
2288 if (unlikely (plen[256] == 0))
2289 {
2290 elf_uncompress_failed ();
2291 return 0;
2292 }
2293
2294 /* Build the decompression tables. */
2295
2296 if (!elf_zlib_inflate_table (plen, nlit, zdebug_table,
2297 zdebug_table))
2298 return 0;
2299 if (!elf_zlib_inflate_table (plen + nlit, ndist, zdebug_table,
2300 (zdebug_table
2301 + ZLIB_HUFFMAN_TABLE_SIZE)))
2302 return 0;
2303 tlit = zdebug_table;
2304 tdist = zdebug_table + ZLIB_HUFFMAN_TABLE_SIZE;
2305 }
2306
2307 /* Inflate values until the end of the block. This is the
2308 main loop of the inflation code. */
2309
2310 while (1)
2311 {
2312 uint16_t t;
2313 unsigned int b;
2314 uint16_t v;
2315 unsigned int lit;
2316
2317 if (!elf_fetch_bits (&pin, pinend, &val, &bits))
2318 return 0;
2319
2320 t = tlit[val & 0xff];
2321 b = (t >> ZLIB_HUFFMAN_BITS_SHIFT) & ZLIB_HUFFMAN_BITS_MASK;
2322 v = t & ZLIB_HUFFMAN_VALUE_MASK;
2323
2324 if ((t & (1U << ZLIB_HUFFMAN_SECONDARY_SHIFT)) == 0)
2325 {
2326 lit = v;
2327 val >>= b + 1;
2328 bits -= b + 1;
2329 }
2330 else
2331 {
2332 t = tlit[v + 0x100 + ((val >> 8) & ((1U << b) - 1))];
2333 b = (t >> ZLIB_HUFFMAN_BITS_SHIFT) & ZLIB_HUFFMAN_BITS_MASK;
2334 lit = t & ZLIB_HUFFMAN_VALUE_MASK;
2335 val >>= b + 8;
2336 bits -= b + 8;
2337 }
2338
2339 if (lit < 256)
2340 {
2341 if (unlikely (pout == poutend))
2342 {
2343 elf_uncompress_failed ();
2344 return 0;
2345 }
2346
2347 *pout++ = lit;
2348
2349 /* We will need to write the next byte soon. We ask
2350 for high temporal locality because we will write
2351 to the whole cache line soon. */
2352 __builtin_prefetch (pout, 1, 3);
2353 }
2354 else if (lit == 256)
2355 {
2356 /* The end of the block. */
2357 break;
2358 }
2359 else
2360 {
2361 unsigned int dist;
2362 unsigned int len;
2363
2364 /* Convert lit into a length. */
2365
2366 if (lit < 265)
2367 len = lit - 257 + 3;
2368 else if (lit == 285)
2369 len = 258;
2370 else if (unlikely (lit > 285))
2371 {
2372 elf_uncompress_failed ();
2373 return 0;
2374 }
2375 else
2376 {
2377 unsigned int extra;
2378
2379 if (!elf_fetch_bits (&pin, pinend, &val, &bits))
2380 return 0;
2381
2382 /* This is an expression for the table of length
2383 codes in RFC 1951 3.2.5. */
2384 lit -= 265;
2385 extra = (lit >> 2) + 1;
2386 len = (lit & 3) << extra;
2387 len += 11;
2388 len += ((1U << (extra - 1)) - 1) << 3;
2389 len += val & ((1U << extra) - 1);
2390 val >>= extra;
2391 bits -= extra;
2392 }
2393
2394 if (!elf_fetch_bits (&pin, pinend, &val, &bits))
2395 return 0;
2396
2397 t = tdist[val & 0xff];
2398 b = (t >> ZLIB_HUFFMAN_BITS_SHIFT) & ZLIB_HUFFMAN_BITS_MASK;
2399 v = t & ZLIB_HUFFMAN_VALUE_MASK;
2400
2401 if ((t & (1U << ZLIB_HUFFMAN_SECONDARY_SHIFT)) == 0)
2402 {
2403 dist = v;
2404 val >>= b + 1;
2405 bits -= b + 1;
2406 }
2407 else
2408 {
2409 t = tdist[v + 0x100 + ((val >> 8) & ((1U << b) - 1))];
2410 b = ((t >> ZLIB_HUFFMAN_BITS_SHIFT)
2411 & ZLIB_HUFFMAN_BITS_MASK);
2412 dist = t & ZLIB_HUFFMAN_VALUE_MASK;
2413 val >>= b + 8;
2414 bits -= b + 8;
2415 }
2416
2417 /* Convert dist to a distance. */
2418
2419 if (dist == 0)
2420 {
2421 /* A distance of 1. A common case, meaning
2422 repeat the last character LEN times. */
2423
2424 if (unlikely (pout == porigout))
2425 {
2426 elf_uncompress_failed ();
2427 return 0;
2428 }
2429
2430 if (unlikely ((unsigned int) (poutend - pout) < len))
2431 {
2432 elf_uncompress_failed ();
2433 return 0;
2434 }
2435
2436 memset (pout, pout[-1], len);
2437 pout += len;
2438 }
2439 else if (unlikely (dist > 29))
2440 {
2441 elf_uncompress_failed ();
2442 return 0;
2443 }
2444 else
2445 {
2446 if (dist < 4)
2447 dist = dist + 1;
2448 else
2449 {
2450 unsigned int extra;
2451
2452 if (!elf_fetch_bits (&pin, pinend, &val, &bits))
2453 return 0;
2454
2455 /* This is an expression for the table of
2456 distance codes in RFC 1951 3.2.5. */
2457 dist -= 4;
2458 extra = (dist >> 1) + 1;
2459 dist = (dist & 1) << extra;
2460 dist += 5;
2461 dist += ((1U << (extra - 1)) - 1) << 2;
2462 dist += val & ((1U << extra) - 1);
2463 val >>= extra;
2464 bits -= extra;
2465 }
2466
2467 /* Go back dist bytes, and copy len bytes from
2468 there. */
2469
2470 if (unlikely ((unsigned int) (pout - porigout) < dist))
2471 {
2472 elf_uncompress_failed ();
2473 return 0;
2474 }
2475
2476 if (unlikely ((unsigned int) (poutend - pout) < len))
2477 {
2478 elf_uncompress_failed ();
2479 return 0;
2480 }
2481
2482 if (dist >= len)
2483 {
2484 memcpy (pout, pout - dist, len);
2485 pout += len;
2486 }
2487 else
2488 {
2489 while (len > 0)
2490 {
2491 unsigned int copy;
2492
2493 copy = len < dist ? len : dist;
2494 memcpy (pout, pout - dist, copy);
2495 len -= copy;
2496 pout += copy;
2497 }
2498 }
2499 }
2500 }
2501 }
2502 }
2503 }
2504
2505 /* We should have filled the output buffer. */
2506 if (unlikely (pout != poutend))
2507 {
2508 elf_uncompress_failed ();
2509 return 0;
2510 }
2511
2512 return 1;
2513 }
2514
2515 /* Verify the zlib checksum. The checksum is in the 4 bytes at
2516 CHECKBYTES, and the uncompressed data is at UNCOMPRESSED /
2517 UNCOMPRESSED_SIZE. Returns 1 on success, 0 on failure. */
2518
2519 static int
2520 elf_zlib_verify_checksum (const unsigned char *checkbytes,
2521 const unsigned char *uncompressed,
2522 size_t uncompressed_size)
2523 {
2524 unsigned int i;
2525 unsigned int cksum;
2526 const unsigned char *p;
2527 uint32_t s1;
2528 uint32_t s2;
2529 size_t hsz;
2530
2531 cksum = 0;
2532 for (i = 0; i < 4; i++)
2533 cksum = (cksum << 8) | checkbytes[i];
2534
2535 s1 = 1;
2536 s2 = 0;
2537
2538 /* Minimize modulo operations. */
2539
2540 p = uncompressed;
2541 hsz = uncompressed_size;
2542 while (hsz >= 5552)
2543 {
2544 for (i = 0; i < 5552; i += 16)
2545 {
2546 /* Manually unroll loop 16 times. */
2547 s1 = s1 + *p++;
2548 s2 = s2 + s1;
2549 s1 = s1 + *p++;
2550 s2 = s2 + s1;
2551 s1 = s1 + *p++;
2552 s2 = s2 + s1;
2553 s1 = s1 + *p++;
2554 s2 = s2 + s1;
2555 s1 = s1 + *p++;
2556 s2 = s2 + s1;
2557 s1 = s1 + *p++;
2558 s2 = s2 + s1;
2559 s1 = s1 + *p++;
2560 s2 = s2 + s1;
2561 s1 = s1 + *p++;
2562 s2 = s2 + s1;
2563 s1 = s1 + *p++;
2564 s2 = s2 + s1;
2565 s1 = s1 + *p++;
2566 s2 = s2 + s1;
2567 s1 = s1 + *p++;
2568 s2 = s2 + s1;
2569 s1 = s1 + *p++;
2570 s2 = s2 + s1;
2571 s1 = s1 + *p++;
2572 s2 = s2 + s1;
2573 s1 = s1 + *p++;
2574 s2 = s2 + s1;
2575 s1 = s1 + *p++;
2576 s2 = s2 + s1;
2577 s1 = s1 + *p++;
2578 s2 = s2 + s1;
2579 }
2580 hsz -= 5552;
2581 s1 %= 65521;
2582 s2 %= 65521;
2583 }
2584
2585 while (hsz >= 16)
2586 {
2587 /* Manually unroll loop 16 times. */
2588 s1 = s1 + *p++;
2589 s2 = s2 + s1;
2590 s1 = s1 + *p++;
2591 s2 = s2 + s1;
2592 s1 = s1 + *p++;
2593 s2 = s2 + s1;
2594 s1 = s1 + *p++;
2595 s2 = s2 + s1;
2596 s1 = s1 + *p++;
2597 s2 = s2 + s1;
2598 s1 = s1 + *p++;
2599 s2 = s2 + s1;
2600 s1 = s1 + *p++;
2601 s2 = s2 + s1;
2602 s1 = s1 + *p++;
2603 s2 = s2 + s1;
2604 s1 = s1 + *p++;
2605 s2 = s2 + s1;
2606 s1 = s1 + *p++;
2607 s2 = s2 + s1;
2608 s1 = s1 + *p++;
2609 s2 = s2 + s1;
2610 s1 = s1 + *p++;
2611 s2 = s2 + s1;
2612 s1 = s1 + *p++;
2613 s2 = s2 + s1;
2614 s1 = s1 + *p++;
2615 s2 = s2 + s1;
2616 s1 = s1 + *p++;
2617 s2 = s2 + s1;
2618 s1 = s1 + *p++;
2619 s2 = s2 + s1;
2620
2621 hsz -= 16;
2622 }
2623
2624 for (i = 0; i < hsz; ++i)
2625 {
2626 s1 = s1 + *p++;
2627 s2 = s2 + s1;
2628 }
2629
2630 s1 %= 65521;
2631 s2 %= 65521;
2632
2633 if (unlikely ((s2 << 16) + s1 != cksum))
2634 {
2635 elf_uncompress_failed ();
2636 return 0;
2637 }
2638
2639 return 1;
2640 }
2641
2642 /* Inflate a zlib stream from PIN/SIN to POUT/SOUT, and verify the
2643 checksum. Return 1 on success, 0 on error. */
2644
2645 static int
2646 elf_zlib_inflate_and_verify (const unsigned char *pin, size_t sin,
2647 uint16_t *zdebug_table, unsigned char *pout,
2648 size_t sout)
2649 {
2650 if (!elf_zlib_inflate (pin, sin, zdebug_table, pout, sout))
2651 return 0;
2652 if (!elf_zlib_verify_checksum (pin + sin - 4, pout, sout))
2653 return 0;
2654 return 1;
2655 }
2656
2657 /* For working memory during zstd compression, we need
2658 - a literal length FSE table: 512 64-bit values == 4096 bytes
2659 - a match length FSE table: 512 64-bit values == 4096 bytes
2660 - a offset FSE table: 256 64-bit values == 2048 bytes
2661 - a Huffman tree: 2048 uint16_t values == 4096 bytes
2662 - scratch space, one of
2663 - to build an FSE table: 512 uint16_t values == 1024 bytes
2664 - to build a Huffman tree: 512 uint16_t + 256 uint32_t == 2048 bytes
2665 */
2666
2667 #define ZSTD_TABLE_SIZE \
2668 (2 * 512 * sizeof (struct elf_zstd_fse_baseline_entry) \
2669 + 256 * sizeof (struct elf_zstd_fse_baseline_entry) \
2670 + 2048 * sizeof (uint16_t) \
2671 + 512 * sizeof (uint16_t) + 256 * sizeof (uint32_t))
2672
2673 #define ZSTD_TABLE_LITERAL_FSE_OFFSET (0)
2674
2675 #define ZSTD_TABLE_MATCH_FSE_OFFSET \
2676 (512 * sizeof (struct elf_zstd_fse_baseline_entry))
2677
2678 #define ZSTD_TABLE_OFFSET_FSE_OFFSET \
2679 (ZSTD_TABLE_MATCH_FSE_OFFSET \
2680 + 512 * sizeof (struct elf_zstd_fse_baseline_entry))
2681
2682 #define ZSTD_TABLE_HUFFMAN_OFFSET \
2683 (ZSTD_TABLE_OFFSET_FSE_OFFSET \
2684 + 256 * sizeof (struct elf_zstd_fse_baseline_entry))
2685
2686 #define ZSTD_TABLE_WORK_OFFSET \
2687 (ZSTD_TABLE_HUFFMAN_OFFSET + 2048 * sizeof (uint16_t))
2688
2689 /* An entry in a zstd FSE table. */
2690
2691 struct elf_zstd_fse_entry
2692 {
2693 /* The value that this FSE entry represents. */
2694 unsigned char symbol;
2695 /* The number of bits to read to determine the next state. */
2696 unsigned char bits;
2697 /* Add the bits to this base to get the next state. */
2698 uint16_t base;
2699 };
2700
2701 static int
2702 elf_zstd_build_fse (const int16_t *, int, uint16_t *, int,
2703 struct elf_zstd_fse_entry *);
2704
2705 /* Read a zstd FSE table and build the decoding table in *TABLE, updating *PPIN
2706 as it reads. ZDEBUG_TABLE is scratch space; it must be enough for 512
2707 uint16_t values (1024 bytes). MAXIDX is the maximum number of symbols
2708 permitted. *TABLE_BITS is the maximum number of bits for symbols in the
2709 table: the size of *TABLE is at least 1 << *TABLE_BITS. This updates
2710 *TABLE_BITS to the actual number of bits. Returns 1 on success, 0 on
2711 error. */
2712
2713 static int
2714 elf_zstd_read_fse (const unsigned char **ppin, const unsigned char *pinend,
2715 uint16_t *zdebug_table, int maxidx,
2716 struct elf_zstd_fse_entry *table, int *table_bits)
2717 {
2718 const unsigned char *pin;
2719 int16_t *norm;
2720 uint16_t *next;
2721 uint64_t val;
2722 unsigned int bits;
2723 int accuracy_log;
2724 uint32_t remaining;
2725 uint32_t threshold;
2726 int bits_needed;
2727 int idx;
2728 int prev0;
2729
2730 pin = *ppin;
2731
2732 norm = (int16_t *) zdebug_table;
2733 next = zdebug_table + 256;
2734
2735 if (unlikely (pin + 3 >= pinend))
2736 {
2737 elf_uncompress_failed ();
2738 return 0;
2739 }
2740
2741 /* Align PIN to a 32-bit boundary. */
2742
2743 val = 0;
2744 bits = 0;
2745 while ((((uintptr_t) pin) & 3) != 0)
2746 {
2747 val |= (uint64_t)*pin << bits;
2748 bits += 8;
2749 ++pin;
2750 }
2751
2752 if (!elf_fetch_bits (&pin, pinend, &val, &bits))
2753 return 0;
2754
2755 accuracy_log = (val & 0xf) + 5;
2756 if (accuracy_log > *table_bits)
2757 {
2758 elf_uncompress_failed ();
2759 return 0;
2760 }
2761 *table_bits = accuracy_log;
2762 val >>= 4;
2763 bits -= 4;
2764
2765 /* This code is mostly copied from the reference implementation. */
2766
2767 /* The number of remaining probabilities, plus 1. This sets the number of
2768 bits that need to be read for the next value. */
2769 remaining = (1 << accuracy_log) + 1;
2770
2771 /* The current difference between small and large values, which depends on
2772 the number of remaining values. Small values use one less bit. */
2773 threshold = 1 << accuracy_log;
2774
2775 /* The number of bits used to compute threshold. */
2776 bits_needed = accuracy_log + 1;
2777
2778 /* The next character value. */
2779 idx = 0;
2780
2781 /* Whether the last count was 0. */
2782 prev0 = 0;
2783
2784 while (remaining > 1 && idx <= maxidx)
2785 {
2786 uint32_t max;
2787 int32_t count;
2788
2789 if (!elf_fetch_bits (&pin, pinend, &val, &bits))
2790 return 0;
2791
2792 if (prev0)
2793 {
2794 int zidx;
2795
2796 /* Previous count was 0, so there is a 2-bit repeat flag. If the
2797 2-bit flag is 0b11, it adds 3 and then there is another repeat
2798 flag. */
2799 zidx = idx;
2800 while ((val & 0xfff) == 0xfff)
2801 {
2802 zidx += 3 * 6;
2803 val >>= 12;
2804 bits -= 12;
2805 if (!elf_fetch_bits (&pin, pinend, &val, &bits))
2806 return 0;
2807 }
2808 while ((val & 3) == 3)
2809 {
2810 zidx += 3;
2811 val >>= 2;
2812 bits -= 2;
2813 if (!elf_fetch_bits (&pin, pinend, &val, &bits))
2814 return 0;
2815 }
2816 /* We have at least 13 bits here, don't need to fetch. */
2817 zidx += val & 3;
2818 val >>= 2;
2819 bits -= 2;
2820
2821 if (unlikely (zidx > maxidx))
2822 {
2823 elf_uncompress_failed ();
2824 return 0;
2825 }
2826
2827 for (; idx < zidx; idx++)
2828 norm[idx] = 0;
2829
2830 prev0 = 0;
2831 continue;
2832 }
2833
2834 max = (2 * threshold - 1) - remaining;
2835 if ((val & (threshold - 1)) < max)
2836 {
2837 /* A small value. */
2838 count = (int32_t) ((uint32_t) val & (threshold - 1));
2839 val >>= bits_needed - 1;
2840 bits -= bits_needed - 1;
2841 }
2842 else
2843 {
2844 /* A large value. */
2845 count = (int32_t) ((uint32_t) val & (2 * threshold - 1));
2846 if (count >= (int32_t) threshold)
2847 count -= (int32_t) max;
2848 val >>= bits_needed;
2849 bits -= bits_needed;
2850 }
2851
2852 count--;
2853 if (count >= 0)
2854 remaining -= count;
2855 else
2856 remaining--;
2857 if (unlikely (idx >= 256))
2858 {
2859 elf_uncompress_failed ();
2860 return 0;
2861 }
2862 norm[idx] = (int16_t) count;
2863 ++idx;
2864
2865 prev0 = count == 0;
2866
2867 while (remaining < threshold)
2868 {
2869 bits_needed--;
2870 threshold >>= 1;
2871 }
2872 }
2873
2874 if (unlikely (remaining != 1))
2875 {
2876 elf_uncompress_failed ();
2877 return 0;
2878 }
2879
2880 /* If we've read ahead more than a byte, back up. */
2881 while (bits >= 8)
2882 {
2883 --pin;
2884 bits -= 8;
2885 }
2886
2887 *ppin = pin;
2888
2889 for (; idx <= maxidx; idx++)
2890 norm[idx] = 0;
2891
2892 return elf_zstd_build_fse (norm, idx, next, *table_bits, table);
2893 }
2894
2895 /* Build the FSE decoding table from a list of probabilities. This reads from
2896 NORM of length IDX, uses NEXT as scratch space, and writes to *TABLE, whose
2897 size is TABLE_BITS. */
2898
2899 static int
2900 elf_zstd_build_fse (const int16_t *norm, int idx, uint16_t *next,
2901 int table_bits, struct elf_zstd_fse_entry *table)
2902 {
2903 int table_size;
2904 int high_threshold;
2905 int i;
2906 int pos;
2907 int step;
2908 int mask;
2909
2910 table_size = 1 << table_bits;
2911 high_threshold = table_size - 1;
2912 for (i = 0; i < idx; i++)
2913 {
2914 int16_t n;
2915
2916 n = norm[i];
2917 if (n >= 0)
2918 next[i] = (uint16_t) n;
2919 else
2920 {
2921 table[high_threshold].symbol = (unsigned char) i;
2922 high_threshold--;
2923 next[i] = 1;
2924 }
2925 }
2926
2927 pos = 0;
2928 step = (table_size >> 1) + (table_size >> 3) + 3;
2929 mask = table_size - 1;
2930 for (i = 0; i < idx; i++)
2931 {
2932 int n;
2933 int j;
2934
2935 n = (int) norm[i];
2936 for (j = 0; j < n; j++)
2937 {
2938 table[pos].symbol = (unsigned char) i;
2939 pos = (pos + step) & mask;
2940 while (unlikely (pos > high_threshold))
2941 pos = (pos + step) & mask;
2942 }
2943 }
2944 if (unlikely (pos != 0))
2945 {
2946 elf_uncompress_failed ();
2947 return 0;
2948 }
2949
2950 for (i = 0; i < table_size; i++)
2951 {
2952 unsigned char sym;
2953 uint16_t next_state;
2954 int high_bit;
2955 int bits;
2956
2957 sym = table[i].symbol;
2958 next_state = next[sym];
2959 ++next[sym];
2960
2961 if (next_state == 0)
2962 {
2963 elf_uncompress_failed ();
2964 return 0;
2965 }
2966 high_bit = 31 - __builtin_clz (next_state);
2967
2968 bits = table_bits - high_bit;
2969 table[i].bits = (unsigned char) bits;
2970 table[i].base = (uint16_t) ((next_state << bits) - table_size);
2971 }
2972
2973 return 1;
2974 }
2975
2976 /* Encode the baseline and bits into a single 32-bit value. */
2977
2978 #define ZSTD_ENCODE_BASELINE_BITS(baseline, basebits) \
2979 ((uint32_t)(baseline) | ((uint32_t)(basebits) << 24))
2980
2981 #define ZSTD_DECODE_BASELINE(baseline_basebits) \
2982 ((uint32_t)(baseline_basebits) & 0xffffff)
2983
2984 #define ZSTD_DECODE_BASEBITS(baseline_basebits) \
2985 ((uint32_t)(baseline_basebits) >> 24)
2986
2987 /* Given a literal length code, we need to read a number of bits and add that
2988 to a baseline. For states 0 to 15 the baseline is the state and the number
2989 of bits is zero. */
2990
2991 #define ZSTD_LITERAL_LENGTH_BASELINE_OFFSET (16)
2992
2993 static const uint32_t elf_zstd_literal_length_base[] =
2994 {
2995 ZSTD_ENCODE_BASELINE_BITS(16, 1),
2996 ZSTD_ENCODE_BASELINE_BITS(18, 1),
2997 ZSTD_ENCODE_BASELINE_BITS(20, 1),
2998 ZSTD_ENCODE_BASELINE_BITS(22, 1),
2999 ZSTD_ENCODE_BASELINE_BITS(24, 2),
3000 ZSTD_ENCODE_BASELINE_BITS(28, 2),
3001 ZSTD_ENCODE_BASELINE_BITS(32, 3),
3002 ZSTD_ENCODE_BASELINE_BITS(40, 3),
3003 ZSTD_ENCODE_BASELINE_BITS(48, 4),
3004 ZSTD_ENCODE_BASELINE_BITS(64, 6),
3005 ZSTD_ENCODE_BASELINE_BITS(128, 7),
3006 ZSTD_ENCODE_BASELINE_BITS(256, 8),
3007 ZSTD_ENCODE_BASELINE_BITS(512, 9),
3008 ZSTD_ENCODE_BASELINE_BITS(1024, 10),
3009 ZSTD_ENCODE_BASELINE_BITS(2048, 11),
3010 ZSTD_ENCODE_BASELINE_BITS(4096, 12),
3011 ZSTD_ENCODE_BASELINE_BITS(8192, 13),
3012 ZSTD_ENCODE_BASELINE_BITS(16384, 14),
3013 ZSTD_ENCODE_BASELINE_BITS(32768, 15),
3014 ZSTD_ENCODE_BASELINE_BITS(65536, 16)
3015 };
3016
3017 /* The same applies to match length codes. For states 0 to 31 the baseline is
3018 the state + 3 and the number of bits is zero. */
3019
3020 #define ZSTD_MATCH_LENGTH_BASELINE_OFFSET (32)
3021
3022 static const uint32_t elf_zstd_match_length_base[] =
3023 {
3024 ZSTD_ENCODE_BASELINE_BITS(35, 1),
3025 ZSTD_ENCODE_BASELINE_BITS(37, 1),
3026 ZSTD_ENCODE_BASELINE_BITS(39, 1),
3027 ZSTD_ENCODE_BASELINE_BITS(41, 1),
3028 ZSTD_ENCODE_BASELINE_BITS(43, 2),
3029 ZSTD_ENCODE_BASELINE_BITS(47, 2),
3030 ZSTD_ENCODE_BASELINE_BITS(51, 3),
3031 ZSTD_ENCODE_BASELINE_BITS(59, 3),
3032 ZSTD_ENCODE_BASELINE_BITS(67, 4),
3033 ZSTD_ENCODE_BASELINE_BITS(83, 4),
3034 ZSTD_ENCODE_BASELINE_BITS(99, 5),
3035 ZSTD_ENCODE_BASELINE_BITS(131, 7),
3036 ZSTD_ENCODE_BASELINE_BITS(259, 8),
3037 ZSTD_ENCODE_BASELINE_BITS(515, 9),
3038 ZSTD_ENCODE_BASELINE_BITS(1027, 10),
3039 ZSTD_ENCODE_BASELINE_BITS(2051, 11),
3040 ZSTD_ENCODE_BASELINE_BITS(4099, 12),
3041 ZSTD_ENCODE_BASELINE_BITS(8195, 13),
3042 ZSTD_ENCODE_BASELINE_BITS(16387, 14),
3043 ZSTD_ENCODE_BASELINE_BITS(32771, 15),
3044 ZSTD_ENCODE_BASELINE_BITS(65539, 16)
3045 };
3046
3047 /* An entry in an FSE table used for literal/match/length values. For these we
3048 have to map the symbol to a baseline value, and we have to read zero or more
3049 bits and add that value to the baseline value. Rather than look the values
3050 up in a separate table, we grow the FSE table so that we get better memory
3051 caching. */
3052
3053 struct elf_zstd_fse_baseline_entry
3054 {
3055 /* The baseline for the value that this FSE entry represents.. */
3056 uint32_t baseline;
3057 /* The number of bits to read to add to the baseline. */
3058 unsigned char basebits;
3059 /* The number of bits to read to determine the next state. */
3060 unsigned char bits;
3061 /* Add the bits to this base to get the next state. */
3062 uint16_t base;
3063 };
3064
3065 /* Convert the literal length FSE table FSE_TABLE to an FSE baseline table at
3066 BASELINE_TABLE. Note that FSE_TABLE and BASELINE_TABLE will overlap. */
3067
3068 static int
3069 elf_zstd_make_literal_baseline_fse (
3070 const struct elf_zstd_fse_entry *fse_table,
3071 int table_bits,
3072 struct elf_zstd_fse_baseline_entry *baseline_table)
3073 {
3074 size_t count;
3075 const struct elf_zstd_fse_entry *pfse;
3076 struct elf_zstd_fse_baseline_entry *pbaseline;
3077
3078 /* Convert backward to avoid overlap. */
3079
3080 count = 1U << table_bits;
3081 pfse = fse_table + count;
3082 pbaseline = baseline_table + count;
3083 while (pfse > fse_table)
3084 {
3085 unsigned char symbol;
3086 unsigned char bits;
3087 uint16_t base;
3088
3089 --pfse;
3090 --pbaseline;
3091 symbol = pfse->symbol;
3092 bits = pfse->bits;
3093 base = pfse->base;
3094 if (symbol < ZSTD_LITERAL_LENGTH_BASELINE_OFFSET)
3095 {
3096 pbaseline->baseline = (uint32_t)symbol;
3097 pbaseline->basebits = 0;
3098 }
3099 else
3100 {
3101 unsigned int idx;
3102 uint32_t basebits;
3103
3104 if (unlikely (symbol > 35))
3105 {
3106 elf_uncompress_failed ();
3107 return 0;
3108 }
3109 idx = symbol - ZSTD_LITERAL_LENGTH_BASELINE_OFFSET;
3110 basebits = elf_zstd_literal_length_base[idx];
3111 pbaseline->baseline = ZSTD_DECODE_BASELINE(basebits);
3112 pbaseline->basebits = ZSTD_DECODE_BASEBITS(basebits);
3113 }
3114 pbaseline->bits = bits;
3115 pbaseline->base = base;
3116 }
3117
3118 return 1;
3119 }
3120
3121 /* Convert the offset length FSE table FSE_TABLE to an FSE baseline table at
3122 BASELINE_TABLE. Note that FSE_TABLE and BASELINE_TABLE will overlap. */
3123
3124 static int
3125 elf_zstd_make_offset_baseline_fse (
3126 const struct elf_zstd_fse_entry *fse_table,
3127 int table_bits,
3128 struct elf_zstd_fse_baseline_entry *baseline_table)
3129 {
3130 size_t count;
3131 const struct elf_zstd_fse_entry *pfse;
3132 struct elf_zstd_fse_baseline_entry *pbaseline;
3133
3134 /* Convert backward to avoid overlap. */
3135
3136 count = 1U << table_bits;
3137 pfse = fse_table + count;
3138 pbaseline = baseline_table + count;
3139 while (pfse > fse_table)
3140 {
3141 unsigned char symbol;
3142 unsigned char bits;
3143 uint16_t base;
3144
3145 --pfse;
3146 --pbaseline;
3147 symbol = pfse->symbol;
3148 bits = pfse->bits;
3149 base = pfse->base;
3150 if (unlikely (symbol > 31))
3151 {
3152 elf_uncompress_failed ();
3153 return 0;
3154 }
3155
3156 /* The simple way to write this is
3157
3158 pbaseline->baseline = (uint32_t)1 << symbol;
3159 pbaseline->basebits = symbol;
3160
3161 That will give us an offset value that corresponds to the one
3162 described in the RFC. However, for offset values > 3, we have to
3163 subtract 3. And for offset values 1, 2, 3 we use a repeated offset.
3164 The baseline is always a power of 2, and is never 0, so for these low
3165 values we will see one entry that is baseline 1, basebits 0, and one
3166 entry that is baseline 2, basebits 1. All other entries will have
3167 baseline >= 4 and basebits >= 2.
3168
3169 So we can check for RFC offset <= 3 by checking for basebits <= 1.
3170 And that means that we can subtract 3 here and not worry about doing
3171 it in the hot loop. */
3172
3173 pbaseline->baseline = (uint32_t)1 << symbol;
3174 if (symbol >= 2)
3175 pbaseline->baseline -= 3;
3176 pbaseline->basebits = symbol;
3177 pbaseline->bits = bits;
3178 pbaseline->base = base;
3179 }
3180
3181 return 1;
3182 }
3183
3184 /* Convert the match length FSE table FSE_TABLE to an FSE baseline table at
3185 BASELINE_TABLE. Note that FSE_TABLE and BASELINE_TABLE will overlap. */
3186
3187 static int
3188 elf_zstd_make_match_baseline_fse (
3189 const struct elf_zstd_fse_entry *fse_table,
3190 int table_bits,
3191 struct elf_zstd_fse_baseline_entry *baseline_table)
3192 {
3193 size_t count;
3194 const struct elf_zstd_fse_entry *pfse;
3195 struct elf_zstd_fse_baseline_entry *pbaseline;
3196
3197 /* Convert backward to avoid overlap. */
3198
3199 count = 1U << table_bits;
3200 pfse = fse_table + count;
3201 pbaseline = baseline_table + count;
3202 while (pfse > fse_table)
3203 {
3204 unsigned char symbol;
3205 unsigned char bits;
3206 uint16_t base;
3207
3208 --pfse;
3209 --pbaseline;
3210 symbol = pfse->symbol;
3211 bits = pfse->bits;
3212 base = pfse->base;
3213 if (symbol < ZSTD_MATCH_LENGTH_BASELINE_OFFSET)
3214 {
3215 pbaseline->baseline = (uint32_t)symbol + 3;
3216 pbaseline->basebits = 0;
3217 }
3218 else
3219 {
3220 unsigned int idx;
3221 uint32_t basebits;
3222
3223 if (unlikely (symbol > 52))
3224 {
3225 elf_uncompress_failed ();
3226 return 0;
3227 }
3228 idx = symbol - ZSTD_MATCH_LENGTH_BASELINE_OFFSET;
3229 basebits = elf_zstd_match_length_base[idx];
3230 pbaseline->baseline = ZSTD_DECODE_BASELINE(basebits);
3231 pbaseline->basebits = ZSTD_DECODE_BASEBITS(basebits);
3232 }
3233 pbaseline->bits = bits;
3234 pbaseline->base = base;
3235 }
3236
3237 return 1;
3238 }
3239
3240 #ifdef BACKTRACE_GENERATE_ZSTD_FSE_TABLES
3241
3242 /* Used to generate the predefined FSE decoding tables for zstd. */
3243
3244 #include <stdio.h>
3245
3246 /* These values are straight from RFC 8878. */
3247
3248 static int16_t lit[36] =
3249 {
3250 4, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1,
3251 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 2, 1, 1, 1, 1, 1,
3252 -1,-1,-1,-1
3253 };
3254
3255 static int16_t match[53] =
3256 {
3257 1, 4, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1,
3258 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
3259 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1,-1,
3260 -1,-1,-1,-1,-1
3261 };
3262
3263 static int16_t offset[29] =
3264 {
3265 1, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1,
3266 1, 1, 1, 1, 1, 1, 1, 1,-1,-1,-1,-1,-1
3267 };
3268
3269 static uint16_t next[256];
3270
3271 static void
3272 print_table (const struct elf_zstd_fse_baseline_entry *table, size_t size)
3273 {
3274 size_t i;
3275
3276 printf ("{\n");
3277 for (i = 0; i < size; i += 3)
3278 {
3279 int j;
3280
3281 printf (" ");
3282 for (j = 0; j < 3 && i + j < size; ++j)
3283 printf (" { %u, %d, %d, %d },", table[i + j].baseline,
3284 table[i + j].basebits, table[i + j].bits,
3285 table[i + j].base);
3286 printf ("\n");
3287 }
3288 printf ("};\n");
3289 }
3290
3291 int
3292 main ()
3293 {
3294 struct elf_zstd_fse_entry lit_table[64];
3295 struct elf_zstd_fse_baseline_entry lit_baseline[64];
3296 struct elf_zstd_fse_entry match_table[64];
3297 struct elf_zstd_fse_baseline_entry match_baseline[64];
3298 struct elf_zstd_fse_entry offset_table[32];
3299 struct elf_zstd_fse_baseline_entry offset_baseline[32];
3300
3301 if (!elf_zstd_build_fse (lit, sizeof lit / sizeof lit[0], next,
3302 6, lit_table))
3303 {
3304 fprintf (stderr, "elf_zstd_build_fse failed\n");
3305 exit (EXIT_FAILURE);
3306 }
3307
3308 if (!elf_zstd_make_literal_baseline_fse (lit_table, 6, lit_baseline))
3309 {
3310 fprintf (stderr, "elf_zstd_make_literal_baseline_fse failed\n");
3311 exit (EXIT_FAILURE);
3312 }
3313
3314 printf ("static const struct elf_zstd_fse_baseline_entry "
3315 "elf_zstd_lit_table[64] =\n");
3316 print_table (lit_baseline,
3317 sizeof lit_baseline / sizeof lit_baseline[0]);
3318 printf ("\n");
3319
3320 if (!elf_zstd_build_fse (match, sizeof match / sizeof match[0], next,
3321 6, match_table))
3322 {
3323 fprintf (stderr, "elf_zstd_build_fse failed\n");
3324 exit (EXIT_FAILURE);
3325 }
3326
3327 if (!elf_zstd_make_match_baseline_fse (match_table, 6, match_baseline))
3328 {
3329 fprintf (stderr, "elf_zstd_make_match_baseline_fse failed\n");
3330 exit (EXIT_FAILURE);
3331 }
3332
3333 printf ("static const struct elf_zstd_fse_baseline_entry "
3334 "elf_zstd_match_table[64] =\n");
3335 print_table (match_baseline,
3336 sizeof match_baseline / sizeof match_baseline[0]);
3337 printf ("\n");
3338
3339 if (!elf_zstd_build_fse (offset, sizeof offset / sizeof offset[0], next,
3340 5, offset_table))
3341 {
3342 fprintf (stderr, "elf_zstd_build_fse failed\n");
3343 exit (EXIT_FAILURE);
3344 }
3345
3346 if (!elf_zstd_make_offset_baseline_fse (offset_table, 5, offset_baseline))
3347 {
3348 fprintf (stderr, "elf_zstd_make_offset_baseline_fse failed\n");
3349 exit (EXIT_FAILURE);
3350 }
3351
3352 printf ("static const struct elf_zstd_fse_baseline_entry "
3353 "elf_zstd_offset_table[32] =\n");
3354 print_table (offset_baseline,
3355 sizeof offset_baseline / sizeof offset_baseline[0]);
3356 printf ("\n");
3357
3358 return 0;
3359 }
3360
3361 #endif
3362
3363 /* The fixed tables generated by the #ifdef'ed out main function
3364 above. */
3365
3366 static const struct elf_zstd_fse_baseline_entry elf_zstd_lit_table[64] =
3367 {
3368 { 0, 0, 4, 0 }, { 0, 0, 4, 16 }, { 1, 0, 5, 32 },
3369 { 3, 0, 5, 0 }, { 4, 0, 5, 0 }, { 6, 0, 5, 0 },
3370 { 7, 0, 5, 0 }, { 9, 0, 5, 0 }, { 10, 0, 5, 0 },
3371 { 12, 0, 5, 0 }, { 14, 0, 6, 0 }, { 16, 1, 5, 0 },
3372 { 20, 1, 5, 0 }, { 22, 1, 5, 0 }, { 28, 2, 5, 0 },
3373 { 32, 3, 5, 0 }, { 48, 4, 5, 0 }, { 64, 6, 5, 32 },
3374 { 128, 7, 5, 0 }, { 256, 8, 6, 0 }, { 1024, 10, 6, 0 },
3375 { 4096, 12, 6, 0 }, { 0, 0, 4, 32 }, { 1, 0, 4, 0 },
3376 { 2, 0, 5, 0 }, { 4, 0, 5, 32 }, { 5, 0, 5, 0 },
3377 { 7, 0, 5, 32 }, { 8, 0, 5, 0 }, { 10, 0, 5, 32 },
3378 { 11, 0, 5, 0 }, { 13, 0, 6, 0 }, { 16, 1, 5, 32 },
3379 { 18, 1, 5, 0 }, { 22, 1, 5, 32 }, { 24, 2, 5, 0 },
3380 { 32, 3, 5, 32 }, { 40, 3, 5, 0 }, { 64, 6, 4, 0 },
3381 { 64, 6, 4, 16 }, { 128, 7, 5, 32 }, { 512, 9, 6, 0 },
3382 { 2048, 11, 6, 0 }, { 0, 0, 4, 48 }, { 1, 0, 4, 16 },
3383 { 2, 0, 5, 32 }, { 3, 0, 5, 32 }, { 5, 0, 5, 32 },
3384 { 6, 0, 5, 32 }, { 8, 0, 5, 32 }, { 9, 0, 5, 32 },
3385 { 11, 0, 5, 32 }, { 12, 0, 5, 32 }, { 15, 0, 6, 0 },
3386 { 18, 1, 5, 32 }, { 20, 1, 5, 32 }, { 24, 2, 5, 32 },
3387 { 28, 2, 5, 32 }, { 40, 3, 5, 32 }, { 48, 4, 5, 32 },
3388 { 65536, 16, 6, 0 }, { 32768, 15, 6, 0 }, { 16384, 14, 6, 0 },
3389 { 8192, 13, 6, 0 },
3390 };
3391
3392 static const struct elf_zstd_fse_baseline_entry elf_zstd_match_table[64] =
3393 {
3394 { 3, 0, 6, 0 }, { 4, 0, 4, 0 }, { 5, 0, 5, 32 },
3395 { 6, 0, 5, 0 }, { 8, 0, 5, 0 }, { 9, 0, 5, 0 },
3396 { 11, 0, 5, 0 }, { 13, 0, 6, 0 }, { 16, 0, 6, 0 },
3397 { 19, 0, 6, 0 }, { 22, 0, 6, 0 }, { 25, 0, 6, 0 },
3398 { 28, 0, 6, 0 }, { 31, 0, 6, 0 }, { 34, 0, 6, 0 },
3399 { 37, 1, 6, 0 }, { 41, 1, 6, 0 }, { 47, 2, 6, 0 },
3400 { 59, 3, 6, 0 }, { 83, 4, 6, 0 }, { 131, 7, 6, 0 },
3401 { 515, 9, 6, 0 }, { 4, 0, 4, 16 }, { 5, 0, 4, 0 },
3402 { 6, 0, 5, 32 }, { 7, 0, 5, 0 }, { 9, 0, 5, 32 },
3403 { 10, 0, 5, 0 }, { 12, 0, 6, 0 }, { 15, 0, 6, 0 },
3404 { 18, 0, 6, 0 }, { 21, 0, 6, 0 }, { 24, 0, 6, 0 },
3405 { 27, 0, 6, 0 }, { 30, 0, 6, 0 }, { 33, 0, 6, 0 },
3406 { 35, 1, 6, 0 }, { 39, 1, 6, 0 }, { 43, 2, 6, 0 },
3407 { 51, 3, 6, 0 }, { 67, 4, 6, 0 }, { 99, 5, 6, 0 },
3408 { 259, 8, 6, 0 }, { 4, 0, 4, 32 }, { 4, 0, 4, 48 },
3409 { 5, 0, 4, 16 }, { 7, 0, 5, 32 }, { 8, 0, 5, 32 },
3410 { 10, 0, 5, 32 }, { 11, 0, 5, 32 }, { 14, 0, 6, 0 },
3411 { 17, 0, 6, 0 }, { 20, 0, 6, 0 }, { 23, 0, 6, 0 },
3412 { 26, 0, 6, 0 }, { 29, 0, 6, 0 }, { 32, 0, 6, 0 },
3413 { 65539, 16, 6, 0 }, { 32771, 15, 6, 0 }, { 16387, 14, 6, 0 },
3414 { 8195, 13, 6, 0 }, { 4099, 12, 6, 0 }, { 2051, 11, 6, 0 },
3415 { 1027, 10, 6, 0 },
3416 };
3417
3418 static const struct elf_zstd_fse_baseline_entry elf_zstd_offset_table[32] =
3419 {
3420 { 1, 0, 5, 0 }, { 61, 6, 4, 0 }, { 509, 9, 5, 0 },
3421 { 32765, 15, 5, 0 }, { 2097149, 21, 5, 0 }, { 5, 3, 5, 0 },
3422 { 125, 7, 4, 0 }, { 4093, 12, 5, 0 }, { 262141, 18, 5, 0 },
3423 { 8388605, 23, 5, 0 }, { 29, 5, 5, 0 }, { 253, 8, 4, 0 },
3424 { 16381, 14, 5, 0 }, { 1048573, 20, 5, 0 }, { 1, 2, 5, 0 },
3425 { 125, 7, 4, 16 }, { 2045, 11, 5, 0 }, { 131069, 17, 5, 0 },
3426 { 4194301, 22, 5, 0 }, { 13, 4, 5, 0 }, { 253, 8, 4, 16 },
3427 { 8189, 13, 5, 0 }, { 524285, 19, 5, 0 }, { 2, 1, 5, 0 },
3428 { 61, 6, 4, 16 }, { 1021, 10, 5, 0 }, { 65533, 16, 5, 0 },
3429 { 268435453, 28, 5, 0 }, { 134217725, 27, 5, 0 }, { 67108861, 26, 5, 0 },
3430 { 33554429, 25, 5, 0 }, { 16777213, 24, 5, 0 },
3431 };
3432
3433 /* Read a zstd Huffman table and build the decoding table in *TABLE, reading
3434 and updating *PPIN. This sets *PTABLE_BITS to the number of bits of the
3435 table, such that the table length is 1 << *TABLE_BITS. ZDEBUG_TABLE is
3436 scratch space; it must be enough for 512 uint16_t values + 256 32-bit values
3437 (2048 bytes). Returns 1 on success, 0 on error. */
3438
3439 static int
3440 elf_zstd_read_huff (const unsigned char **ppin, const unsigned char *pinend,
3441 uint16_t *zdebug_table, uint16_t *table, int *ptable_bits)
3442 {
3443 const unsigned char *pin;
3444 unsigned char hdr;
3445 unsigned char *weights;
3446 size_t count;
3447 uint32_t *weight_mark;
3448 size_t i;
3449 uint32_t weight_mask;
3450 size_t table_bits;
3451
3452 pin = *ppin;
3453 if (unlikely (pin >= pinend))
3454 {
3455 elf_uncompress_failed ();
3456 return 0;
3457 }
3458 hdr = *pin;
3459 ++pin;
3460
3461 weights = (unsigned char *) zdebug_table;
3462
3463 if (hdr < 128)
3464 {
3465 /* Table is compressed using FSE. */
3466
3467 struct elf_zstd_fse_entry *fse_table;
3468 int fse_table_bits;
3469 uint16_t *scratch;
3470 const unsigned char *pfse;
3471 const unsigned char *pback;
3472 uint64_t val;
3473 unsigned int bits;
3474 unsigned int state1, state2;
3475
3476 /* SCRATCH is used temporarily by elf_zstd_read_fse. It overlaps
3477 WEIGHTS. */
3478 scratch = zdebug_table;
3479 fse_table = (struct elf_zstd_fse_entry *) (scratch + 512);
3480 fse_table_bits = 6;
3481
3482 pfse = pin;
3483 if (!elf_zstd_read_fse (&pfse, pinend, scratch, 255, fse_table,
3484 &fse_table_bits))
3485 return 0;
3486
3487 if (unlikely (pin + hdr > pinend))
3488 {
3489 elf_uncompress_failed ();
3490 return 0;
3491 }
3492
3493 /* We no longer need SCRATCH. Start recording weights. We need up to
3494 256 bytes of weights and 64 bytes of rank counts, so it won't overlap
3495 FSE_TABLE. */
3496
3497 pback = pin + hdr - 1;
3498
3499 if (!elf_fetch_backward_init (&pback, pfse, &val, &bits))
3500 return 0;
3501
3502 bits -= fse_table_bits;
3503 state1 = (val >> bits) & ((1U << fse_table_bits) - 1);
3504 bits -= fse_table_bits;
3505 state2 = (val >> bits) & ((1U << fse_table_bits) - 1);
3506
3507 /* There are two independent FSE streams, tracked by STATE1 and STATE2.
3508 We decode them alternately. */
3509
3510 count = 0;
3511 while (1)
3512 {
3513 struct elf_zstd_fse_entry *pt;
3514 uint64_t v;
3515
3516 pt = &fse_table[state1];
3517
3518 if (unlikely (pin < pinend) && bits < pt->bits)
3519 {
3520 if (unlikely (count >= 254))
3521 {
3522 elf_uncompress_failed ();
3523 return 0;
3524 }
3525 weights[count] = (unsigned char) pt->symbol;
3526 weights[count + 1] = (unsigned char) fse_table[state2].symbol;
3527 count += 2;
3528 break;
3529 }
3530
3531 if (unlikely (pt->bits == 0))
3532 v = 0;
3533 else
3534 {
3535 if (!elf_fetch_bits_backward (&pback, pfse, &val, &bits))
3536 return 0;
3537
3538 bits -= pt->bits;
3539 v = (val >> bits) & (((uint64_t)1 << pt->bits) - 1);
3540 }
3541
3542 state1 = pt->base + v;
3543
3544 if (unlikely (count >= 255))
3545 {
3546 elf_uncompress_failed ();
3547 return 0;
3548 }
3549
3550 weights[count] = pt->symbol;
3551 ++count;
3552
3553 pt = &fse_table[state2];
3554
3555 if (unlikely (pin < pinend && bits < pt->bits))
3556 {
3557 if (unlikely (count >= 254))
3558 {
3559 elf_uncompress_failed ();
3560 return 0;
3561 }
3562 weights[count] = (unsigned char) pt->symbol;
3563 weights[count + 1] = (unsigned char) fse_table[state1].symbol;
3564 count += 2;
3565 break;
3566 }
3567
3568 if (unlikely (pt->bits == 0))
3569 v = 0;
3570 else
3571 {
3572 if (!elf_fetch_bits_backward (&pback, pfse, &val, &bits))
3573 return 0;
3574
3575 bits -= pt->bits;
3576 v = (val >> bits) & (((uint64_t)1 << pt->bits) - 1);
3577 }
3578
3579 state2 = pt->base + v;
3580
3581 if (unlikely (count >= 255))
3582 {
3583 elf_uncompress_failed ();
3584 return 0;
3585 }
3586
3587 weights[count] = pt->symbol;
3588 ++count;
3589 }
3590
3591 pin += hdr;
3592 }
3593 else
3594 {
3595 /* Table is not compressed. Each weight is 4 bits. */
3596
3597 count = hdr - 127;
3598 if (unlikely (pin + ((count + 1) / 2) >= pinend))
3599 {
3600 elf_uncompress_failed ();
3601 return 0;
3602 }
3603 for (i = 0; i < count; i += 2)
3604 {
3605 unsigned char b;
3606
3607 b = *pin;
3608 ++pin;
3609 weights[i] = b >> 4;
3610 weights[i + 1] = b & 0xf;
3611 }
3612 }
3613
3614 weight_mark = (uint32_t *) (weights + 256);
3615 memset (weight_mark, 0, 13 * sizeof (uint32_t));
3616 weight_mask = 0;
3617 for (i = 0; i < count; ++i)
3618 {
3619 unsigned char w;
3620
3621 w = weights[i];
3622 if (unlikely (w > 12))
3623 {
3624 elf_uncompress_failed ();
3625 return 0;
3626 }
3627 ++weight_mark[w];
3628 if (w > 0)
3629 weight_mask += 1U << (w - 1);
3630 }
3631 if (unlikely (weight_mask == 0))
3632 {
3633 elf_uncompress_failed ();
3634 return 0;
3635 }
3636
3637 table_bits = 32 - __builtin_clz (weight_mask);
3638 if (unlikely (table_bits > 11))
3639 {
3640 elf_uncompress_failed ();
3641 return 0;
3642 }
3643
3644 /* Work out the last weight value, which is omitted because the weights must
3645 sum to a power of two. */
3646 {
3647 uint32_t left;
3648 uint32_t high_bit;
3649
3650 left = ((uint32_t)1 << table_bits) - weight_mask;
3651 if (left == 0)
3652 {
3653 elf_uncompress_failed ();
3654 return 0;
3655 }
3656 high_bit = 31 - __builtin_clz (left);
3657 if (((uint32_t)1 << high_bit) != left)
3658 {
3659 elf_uncompress_failed ();
3660 return 0;
3661 }
3662
3663 if (unlikely (count >= 256))
3664 {
3665 elf_uncompress_failed ();
3666 return 0;
3667 }
3668
3669 weights[count] = high_bit + 1;
3670 ++count;
3671 ++weight_mark[high_bit + 1];
3672 }
3673
3674 if (weight_mark[1] < 2 || (weight_mark[1] & 1) != 0)
3675 {
3676 elf_uncompress_failed ();
3677 return 0;
3678 }
3679
3680 /* Change WEIGHT_MARK from a count of weights to the index of the first
3681 symbol for that weight. We shift the indexes to also store how many we
3682 have seen so far, below. */
3683 {
3684 uint32_t next;
3685
3686 next = 0;
3687 for (i = 0; i < table_bits; ++i)
3688 {
3689 uint32_t cur;
3690
3691 cur = next;
3692 next += weight_mark[i + 1] << i;
3693 weight_mark[i + 1] = cur;
3694 }
3695 }
3696
3697 for (i = 0; i < count; ++i)
3698 {
3699 unsigned char weight;
3700 uint32_t length;
3701 uint16_t tval;
3702 size_t start;
3703 uint32_t j;
3704
3705 weight = weights[i];
3706 if (weight == 0)
3707 continue;
3708
3709 length = 1U << (weight - 1);
3710 tval = (i << 8) | (table_bits + 1 - weight);
3711 start = weight_mark[weight];
3712 for (j = 0; j < length; ++j)
3713 table[start + j] = tval;
3714 weight_mark[weight] += length;
3715 }
3716
3717 *ppin = pin;
3718 *ptable_bits = (int)table_bits;
3719
3720 return 1;
3721 }
3722
3723 /* Read and decompress the literals and store them ending at POUTEND. This
3724 works because we are going to use all the literals in the output, so they
3725 must fit into the output buffer. HUFFMAN_TABLE, and PHUFFMAN_TABLE_BITS
3726 store the Huffman table across calls. SCRATCH is used to read a Huffman
3727 table. Store the start of the decompressed literals in *PPLIT. Update
3728 *PPIN. Return 1 on success, 0 on error. */
3729
3730 static int
3731 elf_zstd_read_literals (const unsigned char **ppin,
3732 const unsigned char *pinend,
3733 unsigned char *pout,
3734 unsigned char *poutend,
3735 uint16_t *scratch,
3736 uint16_t *huffman_table,
3737 int *phuffman_table_bits,
3738 unsigned char **pplit)
3739 {
3740 const unsigned char *pin;
3741 unsigned char *plit;
3742 unsigned char hdr;
3743 uint32_t regenerated_size;
3744 uint32_t compressed_size;
3745 int streams;
3746 uint32_t total_streams_size;
3747 unsigned int huffman_table_bits;
3748 uint64_t huffman_mask;
3749
3750 pin = *ppin;
3751 if (unlikely (pin >= pinend))
3752 {
3753 elf_uncompress_failed ();
3754 return 0;
3755 }
3756 hdr = *pin;
3757 ++pin;
3758
3759 if ((hdr & 3) == 0 || (hdr & 3) == 1)
3760 {
3761 int raw;
3762
3763 /* Raw_Literals_Block or RLE_Literals_Block */
3764
3765 raw = (hdr & 3) == 0;
3766
3767 switch ((hdr >> 2) & 3)
3768 {
3769 case 0: case 2:
3770 regenerated_size = hdr >> 3;
3771 break;
3772 case 1:
3773 if (unlikely (pin >= pinend))
3774 {
3775 elf_uncompress_failed ();
3776 return 0;
3777 }
3778 regenerated_size = (hdr >> 4) + ((uint32_t)(*pin) << 4);
3779 ++pin;
3780 break;
3781 case 3:
3782 if (unlikely (pin + 1 >= pinend))
3783 {
3784 elf_uncompress_failed ();
3785 return 0;
3786 }
3787 regenerated_size = ((hdr >> 4)
3788 + ((uint32_t)*pin << 4)
3789 + ((uint32_t)pin[1] << 12));
3790 pin += 2;
3791 break;
3792 default:
3793 elf_uncompress_failed ();
3794 return 0;
3795 }
3796
3797 if (unlikely ((size_t)(poutend - pout) < regenerated_size))
3798 {
3799 elf_uncompress_failed ();
3800 return 0;
3801 }
3802
3803 plit = poutend - regenerated_size;
3804
3805 if (raw)
3806 {
3807 if (unlikely (pin + regenerated_size >= pinend))
3808 {
3809 elf_uncompress_failed ();
3810 return 0;
3811 }
3812 memcpy (plit, pin, regenerated_size);
3813 pin += regenerated_size;
3814 }
3815 else
3816 {
3817 if (pin >= pinend)
3818 {
3819 elf_uncompress_failed ();
3820 return 0;
3821 }
3822 memset (plit, *pin, regenerated_size);
3823 ++pin;
3824 }
3825
3826 *ppin = pin;
3827 *pplit = plit;
3828
3829 return 1;
3830 }
3831
3832 /* Compressed_Literals_Block or Treeless_Literals_Block */
3833
3834 switch ((hdr >> 2) & 3)
3835 {
3836 case 0: case 1:
3837 if (unlikely (pin + 1 >= pinend))
3838 {
3839 elf_uncompress_failed ();
3840 return 0;
3841 }
3842 regenerated_size = (hdr >> 4) | ((uint32_t)(*pin & 0x3f) << 4);
3843 compressed_size = (uint32_t)*pin >> 6 | ((uint32_t)pin[1] << 2);
3844 pin += 2;
3845 streams = ((hdr >> 2) & 3) == 0 ? 1 : 4;
3846 break;
3847 case 2:
3848 if (unlikely (pin + 2 >= pinend))
3849 {
3850 elf_uncompress_failed ();
3851 return 0;
3852 }
3853 regenerated_size = (((uint32_t)hdr >> 4)
3854 | ((uint32_t)*pin << 4)
3855 | (((uint32_t)pin[1] & 3) << 12));
3856 compressed_size = (((uint32_t)pin[1] >> 2)
3857 | ((uint32_t)pin[2] << 6));
3858 pin += 3;
3859 streams = 4;
3860 break;
3861 case 3:
3862 if (unlikely (pin + 3 >= pinend))
3863 {
3864 elf_uncompress_failed ();
3865 return 0;
3866 }
3867 regenerated_size = (((uint32_t)hdr >> 4)
3868 | ((uint32_t)*pin << 4)
3869 | (((uint32_t)pin[1] & 0x3f) << 12));
3870 compressed_size = (((uint32_t)pin[1] >> 6)
3871 | ((uint32_t)pin[2] << 2)
3872 | ((uint32_t)pin[3] << 10));
3873 pin += 4;
3874 streams = 4;
3875 break;
3876 default:
3877 elf_uncompress_failed ();
3878 return 0;
3879 }
3880
3881 if (unlikely (pin + compressed_size > pinend))
3882 {
3883 elf_uncompress_failed ();
3884 return 0;
3885 }
3886
3887 pinend = pin + compressed_size;
3888 *ppin = pinend;
3889
3890 if (unlikely ((size_t)(poutend - pout) < regenerated_size))
3891 {
3892 elf_uncompress_failed ();
3893 return 0;
3894 }
3895
3896 plit = poutend - regenerated_size;
3897
3898 *pplit = plit;
3899
3900 total_streams_size = compressed_size;
3901 if ((hdr & 3) == 2)
3902 {
3903 const unsigned char *ptable;
3904
3905 /* Compressed_Literals_Block. Read Huffman tree. */
3906
3907 ptable = pin;
3908 if (!elf_zstd_read_huff (&ptable, pinend, scratch, huffman_table,
3909 phuffman_table_bits))
3910 return 0;
3911
3912 if (unlikely (total_streams_size < (size_t)(ptable - pin)))
3913 {
3914 elf_uncompress_failed ();
3915 return 0;
3916 }
3917
3918 total_streams_size -= ptable - pin;
3919 pin = ptable;
3920 }
3921 else
3922 {
3923 /* Treeless_Literals_Block. Reuse previous Huffman tree. */
3924 if (unlikely (*phuffman_table_bits == 0))
3925 {
3926 elf_uncompress_failed ();
3927 return 0;
3928 }
3929 }
3930
3931 /* Decompress COMPRESSED_SIZE bytes of data at PIN using the huffman table,
3932 storing REGENERATED_SIZE bytes of decompressed data at PLIT. */
3933
3934 huffman_table_bits = (unsigned int)*phuffman_table_bits;
3935 huffman_mask = ((uint64_t)1 << huffman_table_bits) - 1;
3936
3937 if (streams == 1)
3938 {
3939 const unsigned char *pback;
3940 const unsigned char *pbackend;
3941 uint64_t val;
3942 unsigned int bits;
3943 uint32_t i;
3944
3945 pback = pin + total_streams_size - 1;
3946 pbackend = pin;
3947 if (!elf_fetch_backward_init (&pback, pbackend, &val, &bits))
3948 return 0;
3949
3950 /* This is one of the inner loops of the decompression algorithm, so we
3951 put some effort into optimization. We can't get more than 64 bytes
3952 from a single call to elf_fetch_bits_backward, and we can't subtract
3953 more than 11 bits at a time. */
3954
3955 if (regenerated_size >= 64)
3956 {
3957 unsigned char *plitstart;
3958 unsigned char *plitstop;
3959
3960 plitstart = plit;
3961 plitstop = plit + regenerated_size - 64;
3962 while (plit < plitstop)
3963 {
3964 uint16_t t;
3965
3966 if (!elf_fetch_bits_backward (&pback, pbackend, &val, &bits))
3967 return 0;
3968
3969 if (bits < 16)
3970 break;
3971
3972 while (bits >= 33)
3973 {
3974 t = huffman_table[(val >> (bits - huffman_table_bits))
3975 & huffman_mask];
3976 *plit = t >> 8;
3977 ++plit;
3978 bits -= t & 0xff;
3979
3980 t = huffman_table[(val >> (bits - huffman_table_bits))
3981 & huffman_mask];
3982 *plit = t >> 8;
3983 ++plit;
3984 bits -= t & 0xff;
3985
3986 t = huffman_table[(val >> (bits - huffman_table_bits))
3987 & huffman_mask];
3988 *plit = t >> 8;
3989 ++plit;
3990 bits -= t & 0xff;
3991 }
3992
3993 while (bits > 11)
3994 {
3995 t = huffman_table[(val >> (bits - huffman_table_bits))
3996 & huffman_mask];
3997 *plit = t >> 8;
3998 ++plit;
3999 bits -= t & 0xff;
4000 }
4001 }
4002
4003 regenerated_size -= plit - plitstart;
4004 }
4005
4006 for (i = 0; i < regenerated_size; ++i)
4007 {
4008 uint16_t t;
4009
4010 if (!elf_fetch_bits_backward (&pback, pbackend, &val, &bits))
4011 return 0;
4012
4013 if (unlikely (bits < huffman_table_bits))
4014 {
4015 t = huffman_table[(val << (huffman_table_bits - bits))
4016 & huffman_mask];
4017 if (unlikely (bits < (t & 0xff)))
4018 {
4019 elf_uncompress_failed ();
4020 return 0;
4021 }
4022 }
4023 else
4024 t = huffman_table[(val >> (bits - huffman_table_bits))
4025 & huffman_mask];
4026
4027 *plit = t >> 8;
4028 ++plit;
4029 bits -= t & 0xff;
4030 }
4031
4032 return 1;
4033 }
4034
4035 {
4036 uint32_t stream_size1, stream_size2, stream_size3, stream_size4;
4037 uint32_t tot;
4038 const unsigned char *pback1, *pback2, *pback3, *pback4;
4039 const unsigned char *pbackend1, *pbackend2, *pbackend3, *pbackend4;
4040 uint64_t val1, val2, val3, val4;
4041 unsigned int bits1, bits2, bits3, bits4;
4042 unsigned char *plit1, *plit2, *plit3, *plit4;
4043 uint32_t regenerated_stream_size;
4044 uint32_t regenerated_stream_size4;
4045 uint16_t t1, t2, t3, t4;
4046 uint32_t i;
4047 uint32_t limit;
4048
4049 /* Read jump table. */
4050 if (unlikely (pin + 5 >= pinend))
4051 {
4052 elf_uncompress_failed ();
4053 return 0;
4054 }
4055 stream_size1 = (uint32_t)*pin | ((uint32_t)pin[1] << 8);
4056 pin += 2;
4057 stream_size2 = (uint32_t)*pin | ((uint32_t)pin[1] << 8);
4058 pin += 2;
4059 stream_size3 = (uint32_t)*pin | ((uint32_t)pin[1] << 8);
4060 pin += 2;
4061 tot = stream_size1 + stream_size2 + stream_size3;
4062 if (unlikely (tot > total_streams_size - 6))
4063 {
4064 elf_uncompress_failed ();
4065 return 0;
4066 }
4067 stream_size4 = total_streams_size - 6 - tot;
4068
4069 pback1 = pin + stream_size1 - 1;
4070 pbackend1 = pin;
4071
4072 pback2 = pback1 + stream_size2;
4073 pbackend2 = pback1 + 1;
4074
4075 pback3 = pback2 + stream_size3;
4076 pbackend3 = pback2 + 1;
4077
4078 pback4 = pback3 + stream_size4;
4079 pbackend4 = pback3 + 1;
4080
4081 if (!elf_fetch_backward_init (&pback1, pbackend1, &val1, &bits1))
4082 return 0;
4083 if (!elf_fetch_backward_init (&pback2, pbackend2, &val2, &bits2))
4084 return 0;
4085 if (!elf_fetch_backward_init (&pback3, pbackend3, &val3, &bits3))
4086 return 0;
4087 if (!elf_fetch_backward_init (&pback4, pbackend4, &val4, &bits4))
4088 return 0;
4089
4090 regenerated_stream_size = (regenerated_size + 3) / 4;
4091
4092 plit1 = plit;
4093 plit2 = plit1 + regenerated_stream_size;
4094 plit3 = plit2 + regenerated_stream_size;
4095 plit4 = plit3 + regenerated_stream_size;
4096
4097 regenerated_stream_size4 = regenerated_size - regenerated_stream_size * 3;
4098
4099 /* We can't get more than 64 literal bytes from a single call to
4100 elf_fetch_bits_backward. The fourth stream can be up to 3 bytes less,
4101 so use as the limit. */
4102
4103 limit = regenerated_stream_size4 <= 64 ? 0 : regenerated_stream_size4 - 64;
4104 i = 0;
4105 while (i < limit)
4106 {
4107 if (!elf_fetch_bits_backward (&pback1, pbackend1, &val1, &bits1))
4108 return 0;
4109 if (!elf_fetch_bits_backward (&pback2, pbackend2, &val2, &bits2))
4110 return 0;
4111 if (!elf_fetch_bits_backward (&pback3, pbackend3, &val3, &bits3))
4112 return 0;
4113 if (!elf_fetch_bits_backward (&pback4, pbackend4, &val4, &bits4))
4114 return 0;
4115
4116 /* We can't subtract more than 11 bits at a time. */
4117
4118 do
4119 {
4120 t1 = huffman_table[(val1 >> (bits1 - huffman_table_bits))
4121 & huffman_mask];
4122 t2 = huffman_table[(val2 >> (bits2 - huffman_table_bits))
4123 & huffman_mask];
4124 t3 = huffman_table[(val3 >> (bits3 - huffman_table_bits))
4125 & huffman_mask];
4126 t4 = huffman_table[(val4 >> (bits4 - huffman_table_bits))
4127 & huffman_mask];
4128
4129 *plit1 = t1 >> 8;
4130 ++plit1;
4131 bits1 -= t1 & 0xff;
4132
4133 *plit2 = t2 >> 8;
4134 ++plit2;
4135 bits2 -= t2 & 0xff;
4136
4137 *plit3 = t3 >> 8;
4138 ++plit3;
4139 bits3 -= t3 & 0xff;
4140
4141 *plit4 = t4 >> 8;
4142 ++plit4;
4143 bits4 -= t4 & 0xff;
4144
4145 ++i;
4146 }
4147 while (bits1 > 11 && bits2 > 11 && bits3 > 11 && bits4 > 11);
4148 }
4149
4150 while (i < regenerated_stream_size)
4151 {
4152 int use4;
4153
4154 use4 = i < regenerated_stream_size4;
4155
4156 if (!elf_fetch_bits_backward (&pback1, pbackend1, &val1, &bits1))
4157 return 0;
4158 if (!elf_fetch_bits_backward (&pback2, pbackend2, &val2, &bits2))
4159 return 0;
4160 if (!elf_fetch_bits_backward (&pback3, pbackend3, &val3, &bits3))
4161 return 0;
4162 if (use4)
4163 {
4164 if (!elf_fetch_bits_backward (&pback4, pbackend4, &val4, &bits4))
4165 return 0;
4166 }
4167
4168 if (unlikely (bits1 < huffman_table_bits))
4169 {
4170 t1 = huffman_table[(val1 << (huffman_table_bits - bits1))
4171 & huffman_mask];
4172 if (unlikely (bits1 < (t1 & 0xff)))
4173 {
4174 elf_uncompress_failed ();
4175 return 0;
4176 }
4177 }
4178 else
4179 t1 = huffman_table[(val1 >> (bits1 - huffman_table_bits))
4180 & huffman_mask];
4181
4182 if (unlikely (bits2 < huffman_table_bits))
4183 {
4184 t2 = huffman_table[(val2 << (huffman_table_bits - bits2))
4185 & huffman_mask];
4186 if (unlikely (bits2 < (t2 & 0xff)))
4187 {
4188 elf_uncompress_failed ();
4189 return 0;
4190 }
4191 }
4192 else
4193 t2 = huffman_table[(val2 >> (bits2 - huffman_table_bits))
4194 & huffman_mask];
4195
4196 if (unlikely (bits3 < huffman_table_bits))
4197 {
4198 t3 = huffman_table[(val3 << (huffman_table_bits - bits3))
4199 & huffman_mask];
4200 if (unlikely (bits3 < (t3 & 0xff)))
4201 {
4202 elf_uncompress_failed ();
4203 return 0;
4204 }
4205 }
4206 else
4207 t3 = huffman_table[(val3 >> (bits3 - huffman_table_bits))
4208 & huffman_mask];
4209
4210 if (use4)
4211 {
4212 if (unlikely (bits4 < huffman_table_bits))
4213 {
4214 t4 = huffman_table[(val4 << (huffman_table_bits - bits4))
4215 & huffman_mask];
4216 if (unlikely (bits4 < (t4 & 0xff)))
4217 {
4218 elf_uncompress_failed ();
4219 return 0;
4220 }
4221 }
4222 else
4223 t4 = huffman_table[(val4 >> (bits4 - huffman_table_bits))
4224 & huffman_mask];
4225
4226 *plit4 = t4 >> 8;
4227 ++plit4;
4228 bits4 -= t4 & 0xff;
4229 }
4230
4231 *plit1 = t1 >> 8;
4232 ++plit1;
4233 bits1 -= t1 & 0xff;
4234
4235 *plit2 = t2 >> 8;
4236 ++plit2;
4237 bits2 -= t2 & 0xff;
4238
4239 *plit3 = t3 >> 8;
4240 ++plit3;
4241 bits3 -= t3 & 0xff;
4242
4243 ++i;
4244 }
4245 }
4246
4247 return 1;
4248 }
4249
4250 /* The information used to decompress a sequence code, which can be a literal
4251 length, an offset, or a match length. */
4252
4253 struct elf_zstd_seq_decode
4254 {
4255 const struct elf_zstd_fse_baseline_entry *table;
4256 int table_bits;
4257 };
4258
4259 /* Unpack a sequence code compression mode. */
4260
4261 static int
4262 elf_zstd_unpack_seq_decode (int mode,
4263 const unsigned char **ppin,
4264 const unsigned char *pinend,
4265 const struct elf_zstd_fse_baseline_entry *predef,
4266 int predef_bits,
4267 uint16_t *scratch,
4268 int maxidx,
4269 struct elf_zstd_fse_baseline_entry *table,
4270 int table_bits,
4271 int (*conv)(const struct elf_zstd_fse_entry *,
4272 int,
4273 struct elf_zstd_fse_baseline_entry *),
4274 struct elf_zstd_seq_decode *decode)
4275 {
4276 switch (mode)
4277 {
4278 case 0:
4279 decode->table = predef;
4280 decode->table_bits = predef_bits;
4281 break;
4282
4283 case 1:
4284 {
4285 struct elf_zstd_fse_entry entry;
4286
4287 if (unlikely (*ppin >= pinend))
4288 {
4289 elf_uncompress_failed ();
4290 return 0;
4291 }
4292 entry.symbol = **ppin;
4293 ++*ppin;
4294 entry.bits = 0;
4295 entry.base = 0;
4296 decode->table_bits = 0;
4297 if (!conv (&entry, 0, table))
4298 return 0;
4299 }
4300 break;
4301
4302 case 2:
4303 {
4304 struct elf_zstd_fse_entry *fse_table;
4305
4306 /* We use the same space for the simple FSE table and the baseline
4307 table. */
4308 fse_table = (struct elf_zstd_fse_entry *)table;
4309 decode->table_bits = table_bits;
4310 if (!elf_zstd_read_fse (ppin, pinend, scratch, maxidx, fse_table,
4311 &decode->table_bits))
4312 return 0;
4313 if (!conv (fse_table, decode->table_bits, table))
4314 return 0;
4315 decode->table = table;
4316 }
4317 break;
4318
4319 case 3:
4320 if (unlikely (decode->table_bits == -1))
4321 {
4322 elf_uncompress_failed ();
4323 return 0;
4324 }
4325 break;
4326
4327 default:
4328 elf_uncompress_failed ();
4329 return 0;
4330 }
4331
4332 return 1;
4333 }
4334
4335 /* Decompress a zstd stream from PIN/SIN to POUT/SOUT. Code based on RFC 8878.
4336 Return 1 on success, 0 on error. */
4337
4338 static int
4339 elf_zstd_decompress (const unsigned char *pin, size_t sin,
4340 unsigned char *zdebug_table, unsigned char *pout,
4341 size_t sout)
4342 {
4343 const unsigned char *pinend;
4344 unsigned char *poutstart;
4345 unsigned char *poutend;
4346 struct elf_zstd_seq_decode literal_decode;
4347 struct elf_zstd_fse_baseline_entry *literal_fse_table;
4348 struct elf_zstd_seq_decode match_decode;
4349 struct elf_zstd_fse_baseline_entry *match_fse_table;
4350 struct elf_zstd_seq_decode offset_decode;
4351 struct elf_zstd_fse_baseline_entry *offset_fse_table;
4352 uint16_t *huffman_table;
4353 int huffman_table_bits;
4354 uint32_t repeated_offset1;
4355 uint32_t repeated_offset2;
4356 uint32_t repeated_offset3;
4357 uint16_t *scratch;
4358 unsigned char hdr;
4359 int has_checksum;
4360 uint64_t content_size;
4361 int last_block;
4362
4363 pinend = pin + sin;
4364 poutstart = pout;
4365 poutend = pout + sout;
4366
4367 literal_decode.table = NULL;
4368 literal_decode.table_bits = -1;
4369 literal_fse_table = ((struct elf_zstd_fse_baseline_entry *)
4370 (zdebug_table + ZSTD_TABLE_LITERAL_FSE_OFFSET));
4371
4372 match_decode.table = NULL;
4373 match_decode.table_bits = -1;
4374 match_fse_table = ((struct elf_zstd_fse_baseline_entry *)
4375 (zdebug_table + ZSTD_TABLE_MATCH_FSE_OFFSET));
4376
4377 offset_decode.table = NULL;
4378 offset_decode.table_bits = -1;
4379 offset_fse_table = ((struct elf_zstd_fse_baseline_entry *)
4380 (zdebug_table + ZSTD_TABLE_OFFSET_FSE_OFFSET));
4381 huffman_table = ((uint16_t *)
4382 (zdebug_table + ZSTD_TABLE_HUFFMAN_OFFSET));
4383 huffman_table_bits = 0;
4384 scratch = ((uint16_t *)
4385 (zdebug_table + ZSTD_TABLE_WORK_OFFSET));
4386
4387 repeated_offset1 = 1;
4388 repeated_offset2 = 4;
4389 repeated_offset3 = 8;
4390
4391 if (unlikely (sin < 4))
4392 {
4393 elf_uncompress_failed ();
4394 return 0;
4395 }
4396
4397 /* These values are the zstd magic number. */
4398 if (unlikely (pin[0] != 0x28
4399 || pin[1] != 0xb5
4400 || pin[2] != 0x2f
4401 || pin[3] != 0xfd))
4402 {
4403 elf_uncompress_failed ();
4404 return 0;
4405 }
4406
4407 pin += 4;
4408
4409 if (unlikely (pin >= pinend))
4410 {
4411 elf_uncompress_failed ();
4412 return 0;
4413 }
4414
4415 hdr = *pin++;
4416
4417 /* We expect a single frame. */
4418 if (unlikely ((hdr & (1 << 5)) == 0))
4419 {
4420 elf_uncompress_failed ();
4421 return 0;
4422 }
4423 /* Reserved bit must be zero. */
4424 if (unlikely ((hdr & (1 << 3)) != 0))
4425 {
4426 elf_uncompress_failed ();
4427 return 0;
4428 }
4429 /* We do not expect a dictionary. */
4430 if (unlikely ((hdr & 3) != 0))
4431 {
4432 elf_uncompress_failed ();
4433 return 0;
4434 }
4435 has_checksum = (hdr & (1 << 2)) != 0;
4436 switch (hdr >> 6)
4437 {
4438 case 0:
4439 if (unlikely (pin >= pinend))
4440 {
4441 elf_uncompress_failed ();
4442 return 0;
4443 }
4444 content_size = (uint64_t) *pin++;
4445 break;
4446 case 1:
4447 if (unlikely (pin + 1 >= pinend))
4448 {
4449 elf_uncompress_failed ();
4450 return 0;
4451 }
4452 content_size = (((uint64_t) pin[0]) | (((uint64_t) pin[1]) << 8)) + 256;
4453 pin += 2;
4454 break;
4455 case 2:
4456 if (unlikely (pin + 3 >= pinend))
4457 {
4458 elf_uncompress_failed ();
4459 return 0;
4460 }
4461 content_size = ((uint64_t) pin[0]
4462 | (((uint64_t) pin[1]) << 8)
4463 | (((uint64_t) pin[2]) << 16)
4464 | (((uint64_t) pin[3]) << 24));
4465 pin += 4;
4466 break;
4467 case 3:
4468 if (unlikely (pin + 7 >= pinend))
4469 {
4470 elf_uncompress_failed ();
4471 return 0;
4472 }
4473 content_size = ((uint64_t) pin[0]
4474 | (((uint64_t) pin[1]) << 8)
4475 | (((uint64_t) pin[2]) << 16)
4476 | (((uint64_t) pin[3]) << 24)
4477 | (((uint64_t) pin[4]) << 32)
4478 | (((uint64_t) pin[5]) << 40)
4479 | (((uint64_t) pin[6]) << 48)
4480 | (((uint64_t) pin[7]) << 56));
4481 pin += 8;
4482 break;
4483 default:
4484 elf_uncompress_failed ();
4485 return 0;
4486 }
4487
4488 if (unlikely (content_size != (size_t) content_size
4489 || (size_t) content_size != sout))
4490 {
4491 elf_uncompress_failed ();
4492 return 0;
4493 }
4494
4495 last_block = 0;
4496 while (!last_block)
4497 {
4498 uint32_t block_hdr;
4499 int block_type;
4500 uint32_t block_size;
4501
4502 if (unlikely (pin + 2 >= pinend))
4503 {
4504 elf_uncompress_failed ();
4505 return 0;
4506 }
4507 block_hdr = ((uint32_t) pin[0]
4508 | (((uint32_t) pin[1]) << 8)
4509 | (((uint32_t) pin[2]) << 16));
4510 pin += 3;
4511
4512 last_block = block_hdr & 1;
4513 block_type = (block_hdr >> 1) & 3;
4514 block_size = block_hdr >> 3;
4515
4516 switch (block_type)
4517 {
4518 case 0:
4519 /* Raw_Block */
4520 if (unlikely ((size_t) block_size > (size_t) (pinend - pin)))
4521 {
4522 elf_uncompress_failed ();
4523 return 0;
4524 }
4525 if (unlikely ((size_t) block_size > (size_t) (poutend - pout)))
4526 {
4527 elf_uncompress_failed ();
4528 return 0;
4529 }
4530 memcpy (pout, pin, block_size);
4531 pout += block_size;
4532 pin += block_size;
4533 break;
4534
4535 case 1:
4536 /* RLE_Block */
4537 if (unlikely (pin >= pinend))
4538 {
4539 elf_uncompress_failed ();
4540 return 0;
4541 }
4542 if (unlikely ((size_t) block_size > (size_t) (poutend - pout)))
4543 {
4544 elf_uncompress_failed ();
4545 return 0;
4546 }
4547 memset (pout, *pin, block_size);
4548 pout += block_size;
4549 pin++;
4550 break;
4551
4552 case 2:
4553 {
4554 const unsigned char *pblockend;
4555 unsigned char *plitstack;
4556 unsigned char *plit;
4557 uint32_t literal_count;
4558 unsigned char seq_hdr;
4559 size_t seq_count;
4560 size_t seq;
4561 const unsigned char *pback;
4562 uint64_t val;
4563 unsigned int bits;
4564 unsigned int literal_state;
4565 unsigned int offset_state;
4566 unsigned int match_state;
4567
4568 /* Compressed_Block */
4569 if (unlikely ((size_t) block_size > (size_t) (pinend - pin)))
4570 {
4571 elf_uncompress_failed ();
4572 return 0;
4573 }
4574
4575 pblockend = pin + block_size;
4576
4577 /* Read the literals into the end of the output space, and leave
4578 PLIT pointing at them. */
4579
4580 if (!elf_zstd_read_literals (&pin, pblockend, pout, poutend,
4581 scratch, huffman_table,
4582 &huffman_table_bits,
4583 &plitstack))
4584 return 0;
4585 plit = plitstack;
4586 literal_count = poutend - plit;
4587
4588 seq_hdr = *pin;
4589 pin++;
4590 if (seq_hdr < 128)
4591 seq_count = seq_hdr;
4592 else if (seq_hdr < 255)
4593 {
4594 if (unlikely (pin >= pinend))
4595 {
4596 elf_uncompress_failed ();
4597 return 0;
4598 }
4599 seq_count = ((seq_hdr - 128) << 8) + *pin;
4600 pin++;
4601 }
4602 else
4603 {
4604 if (unlikely (pin + 1 >= pinend))
4605 {
4606 elf_uncompress_failed ();
4607 return 0;
4608 }
4609 seq_count = *pin + (pin[1] << 8) + 0x7f00;
4610 pin += 2;
4611 }
4612
4613 if (seq_count > 0)
4614 {
4615 int (*pfn)(const struct elf_zstd_fse_entry *,
4616 int, struct elf_zstd_fse_baseline_entry *);
4617
4618 if (unlikely (pin >= pinend))
4619 {
4620 elf_uncompress_failed ();
4621 return 0;
4622 }
4623 seq_hdr = *pin;
4624 ++pin;
4625
4626 pfn = elf_zstd_make_literal_baseline_fse;
4627 if (!elf_zstd_unpack_seq_decode ((seq_hdr >> 6) & 3,
4628 &pin, pinend,
4629 &elf_zstd_lit_table[0], 6,
4630 scratch, 35,
4631 literal_fse_table, 9, pfn,
4632 &literal_decode))
4633 return 0;
4634
4635 pfn = elf_zstd_make_offset_baseline_fse;
4636 if (!elf_zstd_unpack_seq_decode ((seq_hdr >> 4) & 3,
4637 &pin, pinend,
4638 &elf_zstd_offset_table[0], 5,
4639 scratch, 31,
4640 offset_fse_table, 8, pfn,
4641 &offset_decode))
4642 return 0;
4643
4644 pfn = elf_zstd_make_match_baseline_fse;
4645 if (!elf_zstd_unpack_seq_decode ((seq_hdr >> 2) & 3,
4646 &pin, pinend,
4647 &elf_zstd_match_table[0], 6,
4648 scratch, 52,
4649 match_fse_table, 9, pfn,
4650 &match_decode))
4651 return 0;
4652 }
4653
4654 pback = pblockend - 1;
4655 if (!elf_fetch_backward_init (&pback, pin, &val, &bits))
4656 return 0;
4657
4658 bits -= literal_decode.table_bits;
4659 literal_state = ((val >> bits)
4660 & ((1U << literal_decode.table_bits) - 1));
4661
4662 if (!elf_fetch_bits_backward (&pback, pin, &val, &bits))
4663 return 0;
4664 bits -= offset_decode.table_bits;
4665 offset_state = ((val >> bits)
4666 & ((1U << offset_decode.table_bits) - 1));
4667
4668 if (!elf_fetch_bits_backward (&pback, pin, &val, &bits))
4669 return 0;
4670 bits -= match_decode.table_bits;
4671 match_state = ((val >> bits)
4672 & ((1U << match_decode.table_bits) - 1));
4673
4674 seq = 0;
4675 while (1)
4676 {
4677 const struct elf_zstd_fse_baseline_entry *pt;
4678 uint32_t offset_basebits;
4679 uint32_t offset_baseline;
4680 uint32_t offset_bits;
4681 uint32_t offset_base;
4682 uint32_t offset;
4683 uint32_t match_baseline;
4684 uint32_t match_bits;
4685 uint32_t match_base;
4686 uint32_t match;
4687 uint32_t literal_baseline;
4688 uint32_t literal_bits;
4689 uint32_t literal_base;
4690 uint32_t literal;
4691 uint32_t need;
4692 uint32_t add;
4693
4694 pt = &offset_decode.table[offset_state];
4695 offset_basebits = pt->basebits;
4696 offset_baseline = pt->baseline;
4697 offset_bits = pt->bits;
4698 offset_base = pt->base;
4699
4700 /* This case can be more than 16 bits, which is all that
4701 elf_fetch_bits_backward promises. */
4702 need = offset_basebits;
4703 add = 0;
4704 if (unlikely (need > 16))
4705 {
4706 if (!elf_fetch_bits_backward (&pback, pin, &val, &bits))
4707 return 0;
4708 bits -= 16;
4709 add = (val >> bits) & ((1U << 16) - 1);
4710 need -= 16;
4711 add <<= need;
4712 }
4713 if (need > 0)
4714 {
4715 if (!elf_fetch_bits_backward (&pback, pin, &val, &bits))
4716 return 0;
4717 bits -= need;
4718 add += (val >> bits) & ((1U << need) - 1);
4719 }
4720
4721 offset = offset_baseline + add;
4722
4723 pt = &match_decode.table[match_state];
4724 need = pt->basebits;
4725 match_baseline = pt->baseline;
4726 match_bits = pt->bits;
4727 match_base = pt->base;
4728
4729 add = 0;
4730 if (need > 0)
4731 {
4732 if (!elf_fetch_bits_backward (&pback, pin, &val, &bits))
4733 return 0;
4734 bits -= need;
4735 add = (val >> bits) & ((1U << need) - 1);
4736 }
4737
4738 match = match_baseline + add;
4739
4740 pt = &literal_decode.table[literal_state];
4741 need = pt->basebits;
4742 literal_baseline = pt->baseline;
4743 literal_bits = pt->bits;
4744 literal_base = pt->base;
4745
4746 add = 0;
4747 if (need > 0)
4748 {
4749 if (!elf_fetch_bits_backward (&pback, pin, &val, &bits))
4750 return 0;
4751 bits -= need;
4752 add = (val >> bits) & ((1U << need) - 1);
4753 }
4754
4755 literal = literal_baseline + add;
4756
4757 /* See the comment in elf_zstd_make_offset_baseline_fse. */
4758 if (offset_basebits > 1)
4759 {
4760 repeated_offset3 = repeated_offset2;
4761 repeated_offset2 = repeated_offset1;
4762 repeated_offset1 = offset;
4763 }
4764 else
4765 {
4766 if (unlikely (literal == 0))
4767 ++offset;
4768 switch (offset)
4769 {
4770 case 1:
4771 offset = repeated_offset1;
4772 break;
4773 case 2:
4774 offset = repeated_offset2;
4775 repeated_offset2 = repeated_offset1;
4776 repeated_offset1 = offset;
4777 break;
4778 case 3:
4779 offset = repeated_offset3;
4780 repeated_offset3 = repeated_offset2;
4781 repeated_offset2 = repeated_offset1;
4782 repeated_offset1 = offset;
4783 break;
4784 case 4:
4785 offset = repeated_offset1 - 1;
4786 repeated_offset3 = repeated_offset2;
4787 repeated_offset2 = repeated_offset1;
4788 repeated_offset1 = offset;
4789 break;
4790 }
4791 }
4792
4793 ++seq;
4794 if (seq < seq_count)
4795 {
4796 uint32_t v;
4797
4798 /* Update the three states. */
4799
4800 if (!elf_fetch_bits_backward (&pback, pin, &val, &bits))
4801 return 0;
4802
4803 need = literal_bits;
4804 bits -= need;
4805 v = (val >> bits) & (((uint32_t)1 << need) - 1);
4806
4807 literal_state = literal_base + v;
4808
4809 if (!elf_fetch_bits_backward (&pback, pin, &val, &bits))
4810 return 0;
4811
4812 need = match_bits;
4813 bits -= need;
4814 v = (val >> bits) & (((uint32_t)1 << need) - 1);
4815
4816 match_state = match_base + v;
4817
4818 if (!elf_fetch_bits_backward (&pback, pin, &val, &bits))
4819 return 0;
4820
4821 need = offset_bits;
4822 bits -= need;
4823 v = (val >> bits) & (((uint32_t)1 << need) - 1);
4824
4825 offset_state = offset_base + v;
4826 }
4827
4828 /* The next sequence is now in LITERAL, OFFSET, MATCH. */
4829
4830 /* Copy LITERAL bytes from the literals. */
4831
4832 if (unlikely ((size_t)(poutend - pout) < literal))
4833 {
4834 elf_uncompress_failed ();
4835 return 0;
4836 }
4837
4838 if (unlikely (literal_count < literal))
4839 {
4840 elf_uncompress_failed ();
4841 return 0;
4842 }
4843
4844 literal_count -= literal;
4845
4846 /* Often LITERAL is small, so handle small cases quickly. */
4847 switch (literal)
4848 {
4849 case 8:
4850 *pout++ = *plit++;
4851 ATTRIBUTE_FALLTHROUGH;
4852 case 7:
4853 *pout++ = *plit++;
4854 ATTRIBUTE_FALLTHROUGH;
4855 case 6:
4856 *pout++ = *plit++;
4857 ATTRIBUTE_FALLTHROUGH;
4858 case 5:
4859 *pout++ = *plit++;
4860 ATTRIBUTE_FALLTHROUGH;
4861 case 4:
4862 *pout++ = *plit++;
4863 ATTRIBUTE_FALLTHROUGH;
4864 case 3:
4865 *pout++ = *plit++;
4866 ATTRIBUTE_FALLTHROUGH;
4867 case 2:
4868 *pout++ = *plit++;
4869 ATTRIBUTE_FALLTHROUGH;
4870 case 1:
4871 *pout++ = *plit++;
4872 break;
4873
4874 case 0:
4875 break;
4876
4877 default:
4878 if (unlikely ((size_t)(plit - pout) < literal))
4879 {
4880 uint32_t move;
4881
4882 move = plit - pout;
4883 while (literal > move)
4884 {
4885 memcpy (pout, plit, move);
4886 pout += move;
4887 plit += move;
4888 literal -= move;
4889 }
4890 }
4891
4892 memcpy (pout, plit, literal);
4893 pout += literal;
4894 plit += literal;
4895 }
4896
4897 if (match > 0)
4898 {
4899 /* Copy MATCH bytes from the decoded output at OFFSET. */
4900
4901 if (unlikely ((size_t)(poutend - pout) < match))
4902 {
4903 elf_uncompress_failed ();
4904 return 0;
4905 }
4906
4907 if (unlikely ((size_t)(pout - poutstart) < offset))
4908 {
4909 elf_uncompress_failed ();
4910 return 0;
4911 }
4912
4913 if (offset >= match)
4914 {
4915 memcpy (pout, pout - offset, match);
4916 pout += match;
4917 }
4918 else
4919 {
4920 while (match > 0)
4921 {
4922 uint32_t copy;
4923
4924 copy = match < offset ? match : offset;
4925 memcpy (pout, pout - offset, copy);
4926 match -= copy;
4927 pout += copy;
4928 }
4929 }
4930 }
4931
4932 if (unlikely (seq >= seq_count))
4933 {
4934 /* Copy remaining literals. */
4935 if (literal_count > 0 && plit != pout)
4936 {
4937 if (unlikely ((size_t)(poutend - pout)
4938 < literal_count))
4939 {
4940 elf_uncompress_failed ();
4941 return 0;
4942 }
4943
4944 if ((size_t)(plit - pout) < literal_count)
4945 {
4946 uint32_t move;
4947
4948 move = plit - pout;
4949 while (literal_count > move)
4950 {
4951 memcpy (pout, plit, move);
4952 pout += move;
4953 plit += move;
4954 literal_count -= move;
4955 }
4956 }
4957
4958 memcpy (pout, plit, literal_count);
4959 }
4960
4961 pout += literal_count;
4962
4963 break;
4964 }
4965 }
4966
4967 pin = pblockend;
4968 }
4969 break;
4970
4971 case 3:
4972 default:
4973 elf_uncompress_failed ();
4974 return 0;
4975 }
4976 }
4977
4978 if (has_checksum)
4979 {
4980 if (unlikely (pin + 4 > pinend))
4981 {
4982 elf_uncompress_failed ();
4983 return 0;
4984 }
4985
4986 /* We don't currently verify the checksum. Currently running GNU ld with
4987 --compress-debug-sections=zstd does not seem to generate a
4988 checksum. */
4989
4990 pin += 4;
4991 }
4992
4993 if (pin != pinend)
4994 {
4995 elf_uncompress_failed ();
4996 return 0;
4997 }
4998
4999 return 1;
5000 }
5001
5002 #define ZDEBUG_TABLE_SIZE \
5003 (ZLIB_TABLE_SIZE > ZSTD_TABLE_SIZE ? ZLIB_TABLE_SIZE : ZSTD_TABLE_SIZE)
5004
5005 /* Uncompress the old compressed debug format, the one emitted by
5006 --compress-debug-sections=zlib-gnu. The compressed data is in
5007 COMPRESSED / COMPRESSED_SIZE, and the function writes to
5008 *UNCOMPRESSED / *UNCOMPRESSED_SIZE. ZDEBUG_TABLE is work space to
5009 hold Huffman tables. Returns 0 on error, 1 on successful
5010 decompression or if something goes wrong. In general we try to
5011 carry on, by returning 1, even if we can't decompress. */
5012
5013 static int
5014 elf_uncompress_zdebug (struct backtrace_state *state,
5015 const unsigned char *compressed, size_t compressed_size,
5016 uint16_t *zdebug_table,
5017 backtrace_error_callback error_callback, void *data,
5018 unsigned char **uncompressed, size_t *uncompressed_size)
5019 {
5020 size_t sz;
5021 size_t i;
5022 unsigned char *po;
5023
5024 *uncompressed = NULL;
5025 *uncompressed_size = 0;
5026
5027 /* The format starts with the four bytes ZLIB, followed by the 8
5028 byte length of the uncompressed data in big-endian order,
5029 followed by a zlib stream. */
5030
5031 if (compressed_size < 12 || memcmp (compressed, "ZLIB", 4) != 0)
5032 return 1;
5033
5034 sz = 0;
5035 for (i = 0; i < 8; i++)
5036 sz = (sz << 8) | compressed[i + 4];
5037
5038 if (*uncompressed != NULL && *uncompressed_size >= sz)
5039 po = *uncompressed;
5040 else
5041 {
5042 po = (unsigned char *) backtrace_alloc (state, sz, error_callback, data);
5043 if (po == NULL)
5044 return 0;
5045 }
5046
5047 if (!elf_zlib_inflate_and_verify (compressed + 12, compressed_size - 12,
5048 zdebug_table, po, sz))
5049 return 1;
5050
5051 *uncompressed = po;
5052 *uncompressed_size = sz;
5053
5054 return 1;
5055 }
5056
5057 /* Uncompress the new compressed debug format, the official standard
5058 ELF approach emitted by --compress-debug-sections=zlib-gabi. The
5059 compressed data is in COMPRESSED / COMPRESSED_SIZE, and the
5060 function writes to *UNCOMPRESSED / *UNCOMPRESSED_SIZE.
5061 ZDEBUG_TABLE is work space as for elf_uncompress_zdebug. Returns 0
5062 on error, 1 on successful decompression or if something goes wrong.
5063 In general we try to carry on, by returning 1, even if we can't
5064 decompress. */
5065
5066 static int
5067 elf_uncompress_chdr (struct backtrace_state *state,
5068 const unsigned char *compressed, size_t compressed_size,
5069 uint16_t *zdebug_table,
5070 backtrace_error_callback error_callback, void *data,
5071 unsigned char **uncompressed, size_t *uncompressed_size)
5072 {
5073 b_elf_chdr chdr;
5074 char *alc;
5075 size_t alc_len;
5076 unsigned char *po;
5077
5078 *uncompressed = NULL;
5079 *uncompressed_size = 0;
5080
5081 /* The format starts with an ELF compression header. */
5082 if (compressed_size < sizeof (b_elf_chdr))
5083 return 1;
5084
5085 /* The lld linker can misalign a compressed section, so we can't safely read
5086 the fields directly as we can for other ELF sections. See
5087 https://github.com/ianlancetaylor/libbacktrace/pull/120. */
5088 memcpy (&chdr, compressed, sizeof (b_elf_chdr));
5089
5090 alc = NULL;
5091 alc_len = 0;
5092 if (*uncompressed != NULL && *uncompressed_size >= chdr.ch_size)
5093 po = *uncompressed;
5094 else
5095 {
5096 alc_len = chdr.ch_size;
5097 alc = backtrace_alloc (state, alc_len, error_callback, data);
5098 if (alc == NULL)
5099 return 0;
5100 po = (unsigned char *) alc;
5101 }
5102
5103 switch (chdr.ch_type)
5104 {
5105 case ELFCOMPRESS_ZLIB:
5106 if (!elf_zlib_inflate_and_verify (compressed + sizeof (b_elf_chdr),
5107 compressed_size - sizeof (b_elf_chdr),
5108 zdebug_table, po, chdr.ch_size))
5109 goto skip;
5110 break;
5111
5112 case ELFCOMPRESS_ZSTD:
5113 if (!elf_zstd_decompress (compressed + sizeof (b_elf_chdr),
5114 compressed_size - sizeof (b_elf_chdr),
5115 (unsigned char *)zdebug_table, po,
5116 chdr.ch_size))
5117 goto skip;
5118 break;
5119
5120 default:
5121 /* Unsupported compression algorithm. */
5122 goto skip;
5123 }
5124
5125 *uncompressed = po;
5126 *uncompressed_size = chdr.ch_size;
5127
5128 return 1;
5129
5130 skip:
5131 if (alc != NULL && alc_len > 0)
5132 backtrace_free (state, alc, alc_len, error_callback, data);
5133 return 1;
5134 }
5135
5136 /* This function is a hook for testing the zlib support. It is only
5137 used by tests. */
5138
5139 int
5140 backtrace_uncompress_zdebug (struct backtrace_state *state,
5141 const unsigned char *compressed,
5142 size_t compressed_size,
5143 backtrace_error_callback error_callback,
5144 void *data, unsigned char **uncompressed,
5145 size_t *uncompressed_size)
5146 {
5147 uint16_t *zdebug_table;
5148 int ret;
5149
5150 zdebug_table = ((uint16_t *) backtrace_alloc (state, ZDEBUG_TABLE_SIZE,
5151 error_callback, data));
5152 if (zdebug_table == NULL)
5153 return 0;
5154 ret = elf_uncompress_zdebug (state, compressed, compressed_size,
5155 zdebug_table, error_callback, data,
5156 uncompressed, uncompressed_size);
5157 backtrace_free (state, zdebug_table, ZDEBUG_TABLE_SIZE,
5158 error_callback, data);
5159 return ret;
5160 }
5161
5162 /* This function is a hook for testing the zstd support. It is only used by
5163 tests. */
5164
5165 int
5166 backtrace_uncompress_zstd (struct backtrace_state *state,
5167 const unsigned char *compressed,
5168 size_t compressed_size,
5169 backtrace_error_callback error_callback,
5170 void *data, unsigned char *uncompressed,
5171 size_t uncompressed_size)
5172 {
5173 unsigned char *zdebug_table;
5174 int ret;
5175
5176 zdebug_table = ((unsigned char *) backtrace_alloc (state, ZDEBUG_TABLE_SIZE,
5177 error_callback, data));
5178 if (zdebug_table == NULL)
5179 return 0;
5180 ret = elf_zstd_decompress (compressed, compressed_size,
5181 zdebug_table, uncompressed, uncompressed_size);
5182 backtrace_free (state, zdebug_table, ZDEBUG_TABLE_SIZE,
5183 error_callback, data);
5184 return ret;
5185 }
5186
5187 /* Number of LZMA states. */
5188 #define LZMA_STATES (12)
5189
5190 /* Number of LZMA position states. The pb value of the property byte
5191 is the number of bits to include in these states, and the maximum
5192 value of pb is 4. */
5193 #define LZMA_POS_STATES (16)
5194
5195 /* Number of LZMA distance states. These are used match distances
5196 with a short match length: up to 4 bytes. */
5197 #define LZMA_DIST_STATES (4)
5198
5199 /* Number of LZMA distance slots. LZMA uses six bits to encode larger
5200 match lengths, so 1 << 6 possible probabilities. */
5201 #define LZMA_DIST_SLOTS (64)
5202
5203 /* LZMA distances 0 to 3 are encoded directly, larger values use a
5204 probability model. */
5205 #define LZMA_DIST_MODEL_START (4)
5206
5207 /* The LZMA probability model ends at 14. */
5208 #define LZMA_DIST_MODEL_END (14)
5209
5210 /* LZMA distance slots for distances less than 127. */
5211 #define LZMA_FULL_DISTANCES (128)
5212
5213 /* LZMA uses four alignment bits. */
5214 #define LZMA_ALIGN_SIZE (16)
5215
5216 /* LZMA match length is encoded with 4, 5, or 10 bits, some of which
5217 are already known. */
5218 #define LZMA_LEN_LOW_SYMBOLS (8)
5219 #define LZMA_LEN_MID_SYMBOLS (8)
5220 #define LZMA_LEN_HIGH_SYMBOLS (256)
5221
5222 /* LZMA literal encoding. */
5223 #define LZMA_LITERAL_CODERS_MAX (16)
5224 #define LZMA_LITERAL_CODER_SIZE (0x300)
5225
5226 /* LZMA is based on a large set of probabilities, each managed
5227 independently. Each probability is an 11 bit number that we store
5228 in a uint16_t. We use a single large array of probabilities. */
5229
5230 /* Lengths of entries in the LZMA probabilities array. The names used
5231 here are copied from the Linux kernel implementation. */
5232
5233 #define LZMA_PROB_IS_MATCH_LEN (LZMA_STATES * LZMA_POS_STATES)
5234 #define LZMA_PROB_IS_REP_LEN LZMA_STATES
5235 #define LZMA_PROB_IS_REP0_LEN LZMA_STATES
5236 #define LZMA_PROB_IS_REP1_LEN LZMA_STATES
5237 #define LZMA_PROB_IS_REP2_LEN LZMA_STATES
5238 #define LZMA_PROB_IS_REP0_LONG_LEN (LZMA_STATES * LZMA_POS_STATES)
5239 #define LZMA_PROB_DIST_SLOT_LEN (LZMA_DIST_STATES * LZMA_DIST_SLOTS)
5240 #define LZMA_PROB_DIST_SPECIAL_LEN (LZMA_FULL_DISTANCES - LZMA_DIST_MODEL_END)
5241 #define LZMA_PROB_DIST_ALIGN_LEN LZMA_ALIGN_SIZE
5242 #define LZMA_PROB_MATCH_LEN_CHOICE_LEN 1
5243 #define LZMA_PROB_MATCH_LEN_CHOICE2_LEN 1
5244 #define LZMA_PROB_MATCH_LEN_LOW_LEN (LZMA_POS_STATES * LZMA_LEN_LOW_SYMBOLS)
5245 #define LZMA_PROB_MATCH_LEN_MID_LEN (LZMA_POS_STATES * LZMA_LEN_MID_SYMBOLS)
5246 #define LZMA_PROB_MATCH_LEN_HIGH_LEN LZMA_LEN_HIGH_SYMBOLS
5247 #define LZMA_PROB_REP_LEN_CHOICE_LEN 1
5248 #define LZMA_PROB_REP_LEN_CHOICE2_LEN 1
5249 #define LZMA_PROB_REP_LEN_LOW_LEN (LZMA_POS_STATES * LZMA_LEN_LOW_SYMBOLS)
5250 #define LZMA_PROB_REP_LEN_MID_LEN (LZMA_POS_STATES * LZMA_LEN_MID_SYMBOLS)
5251 #define LZMA_PROB_REP_LEN_HIGH_LEN LZMA_LEN_HIGH_SYMBOLS
5252 #define LZMA_PROB_LITERAL_LEN \
5253 (LZMA_LITERAL_CODERS_MAX * LZMA_LITERAL_CODER_SIZE)
5254
5255 /* Offsets into the LZMA probabilities array. This is mechanically
5256 generated from the above lengths. */
5257
5258 #define LZMA_PROB_IS_MATCH_OFFSET 0
5259 #define LZMA_PROB_IS_REP_OFFSET \
5260 (LZMA_PROB_IS_MATCH_OFFSET + LZMA_PROB_IS_MATCH_LEN)
5261 #define LZMA_PROB_IS_REP0_OFFSET \
5262 (LZMA_PROB_IS_REP_OFFSET + LZMA_PROB_IS_REP_LEN)
5263 #define LZMA_PROB_IS_REP1_OFFSET \
5264 (LZMA_PROB_IS_REP0_OFFSET + LZMA_PROB_IS_REP0_LEN)
5265 #define LZMA_PROB_IS_REP2_OFFSET \
5266 (LZMA_PROB_IS_REP1_OFFSET + LZMA_PROB_IS_REP1_LEN)
5267 #define LZMA_PROB_IS_REP0_LONG_OFFSET \
5268 (LZMA_PROB_IS_REP2_OFFSET + LZMA_PROB_IS_REP2_LEN)
5269 #define LZMA_PROB_DIST_SLOT_OFFSET \
5270 (LZMA_PROB_IS_REP0_LONG_OFFSET + LZMA_PROB_IS_REP0_LONG_LEN)
5271 #define LZMA_PROB_DIST_SPECIAL_OFFSET \
5272 (LZMA_PROB_DIST_SLOT_OFFSET + LZMA_PROB_DIST_SLOT_LEN)
5273 #define LZMA_PROB_DIST_ALIGN_OFFSET \
5274 (LZMA_PROB_DIST_SPECIAL_OFFSET + LZMA_PROB_DIST_SPECIAL_LEN)
5275 #define LZMA_PROB_MATCH_LEN_CHOICE_OFFSET \
5276 (LZMA_PROB_DIST_ALIGN_OFFSET + LZMA_PROB_DIST_ALIGN_LEN)
5277 #define LZMA_PROB_MATCH_LEN_CHOICE2_OFFSET \
5278 (LZMA_PROB_MATCH_LEN_CHOICE_OFFSET + LZMA_PROB_MATCH_LEN_CHOICE_LEN)
5279 #define LZMA_PROB_MATCH_LEN_LOW_OFFSET \
5280 (LZMA_PROB_MATCH_LEN_CHOICE2_OFFSET + LZMA_PROB_MATCH_LEN_CHOICE2_LEN)
5281 #define LZMA_PROB_MATCH_LEN_MID_OFFSET \
5282 (LZMA_PROB_MATCH_LEN_LOW_OFFSET + LZMA_PROB_MATCH_LEN_LOW_LEN)
5283 #define LZMA_PROB_MATCH_LEN_HIGH_OFFSET \
5284 (LZMA_PROB_MATCH_LEN_MID_OFFSET + LZMA_PROB_MATCH_LEN_MID_LEN)
5285 #define LZMA_PROB_REP_LEN_CHOICE_OFFSET \
5286 (LZMA_PROB_MATCH_LEN_HIGH_OFFSET + LZMA_PROB_MATCH_LEN_HIGH_LEN)
5287 #define LZMA_PROB_REP_LEN_CHOICE2_OFFSET \
5288 (LZMA_PROB_REP_LEN_CHOICE_OFFSET + LZMA_PROB_REP_LEN_CHOICE_LEN)
5289 #define LZMA_PROB_REP_LEN_LOW_OFFSET \
5290 (LZMA_PROB_REP_LEN_CHOICE2_OFFSET + LZMA_PROB_REP_LEN_CHOICE2_LEN)
5291 #define LZMA_PROB_REP_LEN_MID_OFFSET \
5292 (LZMA_PROB_REP_LEN_LOW_OFFSET + LZMA_PROB_REP_LEN_LOW_LEN)
5293 #define LZMA_PROB_REP_LEN_HIGH_OFFSET \
5294 (LZMA_PROB_REP_LEN_MID_OFFSET + LZMA_PROB_REP_LEN_MID_LEN)
5295 #define LZMA_PROB_LITERAL_OFFSET \
5296 (LZMA_PROB_REP_LEN_HIGH_OFFSET + LZMA_PROB_REP_LEN_HIGH_LEN)
5297
5298 #define LZMA_PROB_TOTAL_COUNT \
5299 (LZMA_PROB_LITERAL_OFFSET + LZMA_PROB_LITERAL_LEN)
5300
5301 /* Check that the number of LZMA probabilities is the same as the
5302 Linux kernel implementation. */
5303
5304 #if LZMA_PROB_TOTAL_COUNT != 1846 + (1 << 4) * 0x300
5305 #error Wrong number of LZMA probabilities
5306 #endif
5307
5308 /* Expressions for the offset in the LZMA probabilities array of a
5309 specific probability. */
5310
5311 #define LZMA_IS_MATCH(state, pos) \
5312 (LZMA_PROB_IS_MATCH_OFFSET + (state) * LZMA_POS_STATES + (pos))
5313 #define LZMA_IS_REP(state) \
5314 (LZMA_PROB_IS_REP_OFFSET + (state))
5315 #define LZMA_IS_REP0(state) \
5316 (LZMA_PROB_IS_REP0_OFFSET + (state))
5317 #define LZMA_IS_REP1(state) \
5318 (LZMA_PROB_IS_REP1_OFFSET + (state))
5319 #define LZMA_IS_REP2(state) \
5320 (LZMA_PROB_IS_REP2_OFFSET + (state))
5321 #define LZMA_IS_REP0_LONG(state, pos) \
5322 (LZMA_PROB_IS_REP0_LONG_OFFSET + (state) * LZMA_POS_STATES + (pos))
5323 #define LZMA_DIST_SLOT(dist, slot) \
5324 (LZMA_PROB_DIST_SLOT_OFFSET + (dist) * LZMA_DIST_SLOTS + (slot))
5325 #define LZMA_DIST_SPECIAL(dist) \
5326 (LZMA_PROB_DIST_SPECIAL_OFFSET + (dist))
5327 #define LZMA_DIST_ALIGN(dist) \
5328 (LZMA_PROB_DIST_ALIGN_OFFSET + (dist))
5329 #define LZMA_MATCH_LEN_CHOICE \
5330 LZMA_PROB_MATCH_LEN_CHOICE_OFFSET
5331 #define LZMA_MATCH_LEN_CHOICE2 \
5332 LZMA_PROB_MATCH_LEN_CHOICE2_OFFSET
5333 #define LZMA_MATCH_LEN_LOW(pos, sym) \
5334 (LZMA_PROB_MATCH_LEN_LOW_OFFSET + (pos) * LZMA_LEN_LOW_SYMBOLS + (sym))
5335 #define LZMA_MATCH_LEN_MID(pos, sym) \
5336 (LZMA_PROB_MATCH_LEN_MID_OFFSET + (pos) * LZMA_LEN_MID_SYMBOLS + (sym))
5337 #define LZMA_MATCH_LEN_HIGH(sym) \
5338 (LZMA_PROB_MATCH_LEN_HIGH_OFFSET + (sym))
5339 #define LZMA_REP_LEN_CHOICE \
5340 LZMA_PROB_REP_LEN_CHOICE_OFFSET
5341 #define LZMA_REP_LEN_CHOICE2 \
5342 LZMA_PROB_REP_LEN_CHOICE2_OFFSET
5343 #define LZMA_REP_LEN_LOW(pos, sym) \
5344 (LZMA_PROB_REP_LEN_LOW_OFFSET + (pos) * LZMA_LEN_LOW_SYMBOLS + (sym))
5345 #define LZMA_REP_LEN_MID(pos, sym) \
5346 (LZMA_PROB_REP_LEN_MID_OFFSET + (pos) * LZMA_LEN_MID_SYMBOLS + (sym))
5347 #define LZMA_REP_LEN_HIGH(sym) \
5348 (LZMA_PROB_REP_LEN_HIGH_OFFSET + (sym))
5349 #define LZMA_LITERAL(code, size) \
5350 (LZMA_PROB_LITERAL_OFFSET + (code) * LZMA_LITERAL_CODER_SIZE + (size))
5351
5352 /* Read an LZMA varint from BUF, reading and updating *POFFSET,
5353 setting *VAL. Returns 0 on error, 1 on success. */
5354
5355 static int
5356 elf_lzma_varint (const unsigned char *compressed, size_t compressed_size,
5357 size_t *poffset, uint64_t *val)
5358 {
5359 size_t off;
5360 int i;
5361 uint64_t v;
5362 unsigned char b;
5363
5364 off = *poffset;
5365 i = 0;
5366 v = 0;
5367 while (1)
5368 {
5369 if (unlikely (off >= compressed_size))
5370 {
5371 elf_uncompress_failed ();
5372 return 0;
5373 }
5374 b = compressed[off];
5375 v |= (b & 0x7f) << (i * 7);
5376 ++off;
5377 if ((b & 0x80) == 0)
5378 {
5379 *poffset = off;
5380 *val = v;
5381 return 1;
5382 }
5383 ++i;
5384 if (unlikely (i >= 9))
5385 {
5386 elf_uncompress_failed ();
5387 return 0;
5388 }
5389 }
5390 }
5391
5392 /* Normalize the LZMA range decoder, pulling in an extra input byte if
5393 needed. */
5394
5395 static void
5396 elf_lzma_range_normalize (const unsigned char *compressed,
5397 size_t compressed_size, size_t *poffset,
5398 uint32_t *prange, uint32_t *pcode)
5399 {
5400 if (*prange < (1U << 24))
5401 {
5402 if (unlikely (*poffset >= compressed_size))
5403 {
5404 /* We assume this will be caught elsewhere. */
5405 elf_uncompress_failed ();
5406 return;
5407 }
5408 *prange <<= 8;
5409 *pcode <<= 8;
5410 *pcode += compressed[*poffset];
5411 ++*poffset;
5412 }
5413 }
5414
5415 /* Read and return a single bit from the LZMA stream, reading and
5416 updating *PROB. Each bit comes from the range coder. */
5417
5418 static int
5419 elf_lzma_bit (const unsigned char *compressed, size_t compressed_size,
5420 uint16_t *prob, size_t *poffset, uint32_t *prange,
5421 uint32_t *pcode)
5422 {
5423 uint32_t bound;
5424
5425 elf_lzma_range_normalize (compressed, compressed_size, poffset,
5426 prange, pcode);
5427 bound = (*prange >> 11) * (uint32_t) *prob;
5428 if (*pcode < bound)
5429 {
5430 *prange = bound;
5431 *prob += ((1U << 11) - *prob) >> 5;
5432 return 0;
5433 }
5434 else
5435 {
5436 *prange -= bound;
5437 *pcode -= bound;
5438 *prob -= *prob >> 5;
5439 return 1;
5440 }
5441 }
5442
5443 /* Read an integer of size BITS from the LZMA stream, most significant
5444 bit first. The bits are predicted using PROBS. */
5445
5446 static uint32_t
5447 elf_lzma_integer (const unsigned char *compressed, size_t compressed_size,
5448 uint16_t *probs, uint32_t bits, size_t *poffset,
5449 uint32_t *prange, uint32_t *pcode)
5450 {
5451 uint32_t sym;
5452 uint32_t i;
5453
5454 sym = 1;
5455 for (i = 0; i < bits; i++)
5456 {
5457 int bit;
5458
5459 bit = elf_lzma_bit (compressed, compressed_size, probs + sym, poffset,
5460 prange, pcode);
5461 sym <<= 1;
5462 sym += bit;
5463 }
5464 return sym - (1 << bits);
5465 }
5466
5467 /* Read an integer of size BITS from the LZMA stream, least
5468 significant bit first. The bits are predicted using PROBS. */
5469
5470 static uint32_t
5471 elf_lzma_reverse_integer (const unsigned char *compressed,
5472 size_t compressed_size, uint16_t *probs,
5473 uint32_t bits, size_t *poffset, uint32_t *prange,
5474 uint32_t *pcode)
5475 {
5476 uint32_t sym;
5477 uint32_t val;
5478 uint32_t i;
5479
5480 sym = 1;
5481 val = 0;
5482 for (i = 0; i < bits; i++)
5483 {
5484 int bit;
5485
5486 bit = elf_lzma_bit (compressed, compressed_size, probs + sym, poffset,
5487 prange, pcode);
5488 sym <<= 1;
5489 sym += bit;
5490 val += bit << i;
5491 }
5492 return val;
5493 }
5494
5495 /* Read a length from the LZMA stream. IS_REP picks either LZMA_MATCH
5496 or LZMA_REP probabilities. */
5497
5498 static uint32_t
5499 elf_lzma_len (const unsigned char *compressed, size_t compressed_size,
5500 uint16_t *probs, int is_rep, unsigned int pos_state,
5501 size_t *poffset, uint32_t *prange, uint32_t *pcode)
5502 {
5503 uint16_t *probs_choice;
5504 uint16_t *probs_sym;
5505 uint32_t bits;
5506 uint32_t len;
5507
5508 probs_choice = probs + (is_rep
5509 ? LZMA_REP_LEN_CHOICE
5510 : LZMA_MATCH_LEN_CHOICE);
5511 if (elf_lzma_bit (compressed, compressed_size, probs_choice, poffset,
5512 prange, pcode))
5513 {
5514 probs_choice = probs + (is_rep
5515 ? LZMA_REP_LEN_CHOICE2
5516 : LZMA_MATCH_LEN_CHOICE2);
5517 if (elf_lzma_bit (compressed, compressed_size, probs_choice,
5518 poffset, prange, pcode))
5519 {
5520 probs_sym = probs + (is_rep
5521 ? LZMA_REP_LEN_HIGH (0)
5522 : LZMA_MATCH_LEN_HIGH (0));
5523 bits = 8;
5524 len = 2 + 8 + 8;
5525 }
5526 else
5527 {
5528 probs_sym = probs + (is_rep
5529 ? LZMA_REP_LEN_MID (pos_state, 0)
5530 : LZMA_MATCH_LEN_MID (pos_state, 0));
5531 bits = 3;
5532 len = 2 + 8;
5533 }
5534 }
5535 else
5536 {
5537 probs_sym = probs + (is_rep
5538 ? LZMA_REP_LEN_LOW (pos_state, 0)
5539 : LZMA_MATCH_LEN_LOW (pos_state, 0));
5540 bits = 3;
5541 len = 2;
5542 }
5543
5544 len += elf_lzma_integer (compressed, compressed_size, probs_sym, bits,
5545 poffset, prange, pcode);
5546 return len;
5547 }
5548
5549 /* Uncompress one LZMA block from a minidebug file. The compressed
5550 data is at COMPRESSED + *POFFSET. Update *POFFSET. Store the data
5551 into the memory at UNCOMPRESSED, size UNCOMPRESSED_SIZE. CHECK is
5552 the stream flag from the xz header. Return 1 on successful
5553 decompression. */
5554
5555 static int
5556 elf_uncompress_lzma_block (const unsigned char *compressed,
5557 size_t compressed_size, unsigned char check,
5558 uint16_t *probs, unsigned char *uncompressed,
5559 size_t uncompressed_size, size_t *poffset)
5560 {
5561 size_t off;
5562 size_t block_header_offset;
5563 size_t block_header_size;
5564 unsigned char block_flags;
5565 uint64_t header_compressed_size;
5566 uint64_t header_uncompressed_size;
5567 unsigned char lzma2_properties;
5568 size_t crc_offset;
5569 uint32_t computed_crc;
5570 uint32_t stream_crc;
5571 size_t uncompressed_offset;
5572 size_t dict_start_offset;
5573 unsigned int lc;
5574 unsigned int lp;
5575 unsigned int pb;
5576 uint32_t range;
5577 uint32_t code;
5578 uint32_t lstate;
5579 uint32_t dist[4];
5580
5581 off = *poffset;
5582 block_header_offset = off;
5583
5584 /* Block header size is a single byte. */
5585 if (unlikely (off >= compressed_size))
5586 {
5587 elf_uncompress_failed ();
5588 return 0;
5589 }
5590 block_header_size = (compressed[off] + 1) * 4;
5591 if (unlikely (off + block_header_size > compressed_size))
5592 {
5593 elf_uncompress_failed ();
5594 return 0;
5595 }
5596
5597 /* Block flags. */
5598 block_flags = compressed[off + 1];
5599 if (unlikely ((block_flags & 0x3c) != 0))
5600 {
5601 elf_uncompress_failed ();
5602 return 0;
5603 }
5604
5605 off += 2;
5606
5607 /* Optional compressed size. */
5608 header_compressed_size = 0;
5609 if ((block_flags & 0x40) != 0)
5610 {
5611 *poffset = off;
5612 if (!elf_lzma_varint (compressed, compressed_size, poffset,
5613 &header_compressed_size))
5614 return 0;
5615 off = *poffset;
5616 }
5617
5618 /* Optional uncompressed size. */
5619 header_uncompressed_size = 0;
5620 if ((block_flags & 0x80) != 0)
5621 {
5622 *poffset = off;
5623 if (!elf_lzma_varint (compressed, compressed_size, poffset,
5624 &header_uncompressed_size))
5625 return 0;
5626 off = *poffset;
5627 }
5628
5629 /* The recipe for creating a minidebug file is to run the xz program
5630 with no arguments, so we expect exactly one filter: lzma2. */
5631
5632 if (unlikely ((block_flags & 0x3) != 0))
5633 {
5634 elf_uncompress_failed ();
5635 return 0;
5636 }
5637
5638 if (unlikely (off + 2 >= block_header_offset + block_header_size))
5639 {
5640 elf_uncompress_failed ();
5641 return 0;
5642 }
5643
5644 /* The filter ID for LZMA2 is 0x21. */
5645 if (unlikely (compressed[off] != 0x21))
5646 {
5647 elf_uncompress_failed ();
5648 return 0;
5649 }
5650 ++off;
5651
5652 /* The size of the filter properties for LZMA2 is 1. */
5653 if (unlikely (compressed[off] != 1))
5654 {
5655 elf_uncompress_failed ();
5656 return 0;
5657 }
5658 ++off;
5659
5660 lzma2_properties = compressed[off];
5661 ++off;
5662
5663 if (unlikely (lzma2_properties > 40))
5664 {
5665 elf_uncompress_failed ();
5666 return 0;
5667 }
5668
5669 /* The properties describe the dictionary size, but we don't care
5670 what that is. */
5671
5672 /* Skip to just before CRC, verifying zero bytes in between. */
5673 crc_offset = block_header_offset + block_header_size - 4;
5674 if (unlikely (crc_offset + 4 > compressed_size))
5675 {
5676 elf_uncompress_failed ();
5677 return 0;
5678 }
5679 for (; off < crc_offset; off++)
5680 {
5681 if (compressed[off] != 0)
5682 {
5683 elf_uncompress_failed ();
5684 return 0;
5685 }
5686 }
5687
5688 /* Block header CRC. */
5689 computed_crc = elf_crc32 (0, compressed + block_header_offset,
5690 block_header_size - 4);
5691 stream_crc = ((uint32_t)compressed[off]
5692 | ((uint32_t)compressed[off + 1] << 8)
5693 | ((uint32_t)compressed[off + 2] << 16)
5694 | ((uint32_t)compressed[off + 3] << 24));
5695 if (unlikely (computed_crc != stream_crc))
5696 {
5697 elf_uncompress_failed ();
5698 return 0;
5699 }
5700 off += 4;
5701
5702 /* Read a sequence of LZMA2 packets. */
5703
5704 uncompressed_offset = 0;
5705 dict_start_offset = 0;
5706 lc = 0;
5707 lp = 0;
5708 pb = 0;
5709 lstate = 0;
5710 while (off < compressed_size)
5711 {
5712 unsigned char control;
5713
5714 range = 0xffffffff;
5715 code = 0;
5716
5717 control = compressed[off];
5718 ++off;
5719 if (unlikely (control == 0))
5720 {
5721 /* End of packets. */
5722 break;
5723 }
5724
5725 if (control == 1 || control >= 0xe0)
5726 {
5727 /* Reset dictionary to empty. */
5728 dict_start_offset = uncompressed_offset;
5729 }
5730
5731 if (control < 0x80)
5732 {
5733 size_t chunk_size;
5734
5735 /* The only valid values here are 1 or 2. A 1 means to
5736 reset the dictionary (done above). Then we see an
5737 uncompressed chunk. */
5738
5739 if (unlikely (control > 2))
5740 {
5741 elf_uncompress_failed ();
5742 return 0;
5743 }
5744
5745 /* An uncompressed chunk is a two byte size followed by
5746 data. */
5747
5748 if (unlikely (off + 2 > compressed_size))
5749 {
5750 elf_uncompress_failed ();
5751 return 0;
5752 }
5753
5754 chunk_size = compressed[off] << 8;
5755 chunk_size += compressed[off + 1];
5756 ++chunk_size;
5757
5758 off += 2;
5759
5760 if (unlikely (off + chunk_size > compressed_size))
5761 {
5762 elf_uncompress_failed ();
5763 return 0;
5764 }
5765 if (unlikely (uncompressed_offset + chunk_size > uncompressed_size))
5766 {
5767 elf_uncompress_failed ();
5768 return 0;
5769 }
5770
5771 memcpy (uncompressed + uncompressed_offset, compressed + off,
5772 chunk_size);
5773 uncompressed_offset += chunk_size;
5774 off += chunk_size;
5775 }
5776 else
5777 {
5778 size_t uncompressed_chunk_start;
5779 size_t uncompressed_chunk_size;
5780 size_t compressed_chunk_size;
5781 size_t limit;
5782
5783 /* An LZMA chunk. This starts with an uncompressed size and
5784 a compressed size. */
5785
5786 if (unlikely (off + 4 >= compressed_size))
5787 {
5788 elf_uncompress_failed ();
5789 return 0;
5790 }
5791
5792 uncompressed_chunk_start = uncompressed_offset;
5793
5794 uncompressed_chunk_size = (control & 0x1f) << 16;
5795 uncompressed_chunk_size += compressed[off] << 8;
5796 uncompressed_chunk_size += compressed[off + 1];
5797 ++uncompressed_chunk_size;
5798
5799 compressed_chunk_size = compressed[off + 2] << 8;
5800 compressed_chunk_size += compressed[off + 3];
5801 ++compressed_chunk_size;
5802
5803 off += 4;
5804
5805 /* Bit 7 (0x80) is set.
5806 Bits 6 and 5 (0x40 and 0x20) are as follows:
5807 0: don't reset anything
5808 1: reset state
5809 2: reset state, read properties
5810 3: reset state, read properties, reset dictionary (done above) */
5811
5812 if (control >= 0xc0)
5813 {
5814 unsigned char props;
5815
5816 /* Bit 6 is set, read properties. */
5817
5818 if (unlikely (off >= compressed_size))
5819 {
5820 elf_uncompress_failed ();
5821 return 0;
5822 }
5823 props = compressed[off];
5824 ++off;
5825 if (unlikely (props > (4 * 5 + 4) * 9 + 8))
5826 {
5827 elf_uncompress_failed ();
5828 return 0;
5829 }
5830 pb = 0;
5831 while (props >= 9 * 5)
5832 {
5833 props -= 9 * 5;
5834 ++pb;
5835 }
5836 lp = 0;
5837 while (props > 9)
5838 {
5839 props -= 9;
5840 ++lp;
5841 }
5842 lc = props;
5843 if (unlikely (lc + lp > 4))
5844 {
5845 elf_uncompress_failed ();
5846 return 0;
5847 }
5848 }
5849
5850 if (control >= 0xa0)
5851 {
5852 size_t i;
5853
5854 /* Bit 5 or 6 is set, reset LZMA state. */
5855
5856 lstate = 0;
5857 memset (&dist, 0, sizeof dist);
5858 for (i = 0; i < LZMA_PROB_TOTAL_COUNT; i++)
5859 probs[i] = 1 << 10;
5860 range = 0xffffffff;
5861 code = 0;
5862 }
5863
5864 /* Read the range code. */
5865
5866 if (unlikely (off + 5 > compressed_size))
5867 {
5868 elf_uncompress_failed ();
5869 return 0;
5870 }
5871
5872 /* The byte at compressed[off] is ignored for some
5873 reason. */
5874
5875 code = ((compressed[off + 1] << 24)
5876 + (compressed[off + 2] << 16)
5877 + (compressed[off + 3] << 8)
5878 + compressed[off + 4]);
5879 off += 5;
5880
5881 /* This is the main LZMA decode loop. */
5882
5883 limit = off + compressed_chunk_size;
5884 *poffset = off;
5885 while (*poffset < limit)
5886 {
5887 unsigned int pos_state;
5888
5889 if (unlikely (uncompressed_offset
5890 == (uncompressed_chunk_start
5891 + uncompressed_chunk_size)))
5892 {
5893 /* We've decompressed all the expected bytes. */
5894 break;
5895 }
5896
5897 pos_state = ((uncompressed_offset - dict_start_offset)
5898 & ((1 << pb) - 1));
5899
5900 if (elf_lzma_bit (compressed, compressed_size,
5901 probs + LZMA_IS_MATCH (lstate, pos_state),
5902 poffset, &range, &code))
5903 {
5904 uint32_t len;
5905
5906 if (elf_lzma_bit (compressed, compressed_size,
5907 probs + LZMA_IS_REP (lstate),
5908 poffset, &range, &code))
5909 {
5910 int short_rep;
5911 uint32_t next_dist;
5912
5913 /* Repeated match. */
5914
5915 short_rep = 0;
5916 if (elf_lzma_bit (compressed, compressed_size,
5917 probs + LZMA_IS_REP0 (lstate),
5918 poffset, &range, &code))
5919 {
5920 if (elf_lzma_bit (compressed, compressed_size,
5921 probs + LZMA_IS_REP1 (lstate),
5922 poffset, &range, &code))
5923 {
5924 if (elf_lzma_bit (compressed, compressed_size,
5925 probs + LZMA_IS_REP2 (lstate),
5926 poffset, &range, &code))
5927 {
5928 next_dist = dist[3];
5929 dist[3] = dist[2];
5930 }
5931 else
5932 {
5933 next_dist = dist[2];
5934 }
5935 dist[2] = dist[1];
5936 }
5937 else
5938 {
5939 next_dist = dist[1];
5940 }
5941
5942 dist[1] = dist[0];
5943 dist[0] = next_dist;
5944 }
5945 else
5946 {
5947 if (!elf_lzma_bit (compressed, compressed_size,
5948 (probs
5949 + LZMA_IS_REP0_LONG (lstate,
5950 pos_state)),
5951 poffset, &range, &code))
5952 short_rep = 1;
5953 }
5954
5955 if (lstate < 7)
5956 lstate = short_rep ? 9 : 8;
5957 else
5958 lstate = 11;
5959
5960 if (short_rep)
5961 len = 1;
5962 else
5963 len = elf_lzma_len (compressed, compressed_size,
5964 probs, 1, pos_state, poffset,
5965 &range, &code);
5966 }
5967 else
5968 {
5969 uint32_t dist_state;
5970 uint32_t dist_slot;
5971 uint16_t *probs_dist;
5972
5973 /* Match. */
5974
5975 if (lstate < 7)
5976 lstate = 7;
5977 else
5978 lstate = 10;
5979 dist[3] = dist[2];
5980 dist[2] = dist[1];
5981 dist[1] = dist[0];
5982 len = elf_lzma_len (compressed, compressed_size,
5983 probs, 0, pos_state, poffset,
5984 &range, &code);
5985
5986 if (len < 4 + 2)
5987 dist_state = len - 2;
5988 else
5989 dist_state = 3;
5990 probs_dist = probs + LZMA_DIST_SLOT (dist_state, 0);
5991 dist_slot = elf_lzma_integer (compressed,
5992 compressed_size,
5993 probs_dist, 6,
5994 poffset, &range,
5995 &code);
5996 if (dist_slot < LZMA_DIST_MODEL_START)
5997 dist[0] = dist_slot;
5998 else
5999 {
6000 uint32_t limit;
6001
6002 limit = (dist_slot >> 1) - 1;
6003 dist[0] = 2 + (dist_slot & 1);
6004 if (dist_slot < LZMA_DIST_MODEL_END)
6005 {
6006 dist[0] <<= limit;
6007 probs_dist = (probs
6008 + LZMA_DIST_SPECIAL(dist[0]
6009 - dist_slot
6010 - 1));
6011 dist[0] +=
6012 elf_lzma_reverse_integer (compressed,
6013 compressed_size,
6014 probs_dist,
6015 limit, poffset,
6016 &range, &code);
6017 }
6018 else
6019 {
6020 uint32_t dist0;
6021 uint32_t i;
6022
6023 dist0 = dist[0];
6024 for (i = 0; i < limit - 4; i++)
6025 {
6026 uint32_t mask;
6027
6028 elf_lzma_range_normalize (compressed,
6029 compressed_size,
6030 poffset,
6031 &range, &code);
6032 range >>= 1;
6033 code -= range;
6034 mask = -(code >> 31);
6035 code += range & mask;
6036 dist0 <<= 1;
6037 dist0 += mask + 1;
6038 }
6039 dist0 <<= 4;
6040 probs_dist = probs + LZMA_DIST_ALIGN (0);
6041 dist0 +=
6042 elf_lzma_reverse_integer (compressed,
6043 compressed_size,
6044 probs_dist, 4,
6045 poffset,
6046 &range, &code);
6047 dist[0] = dist0;
6048 }
6049 }
6050 }
6051
6052 if (unlikely (uncompressed_offset
6053 - dict_start_offset < dist[0] + 1))
6054 {
6055 elf_uncompress_failed ();
6056 return 0;
6057 }
6058 if (unlikely (uncompressed_offset + len > uncompressed_size))
6059 {
6060 elf_uncompress_failed ();
6061 return 0;
6062 }
6063
6064 if (dist[0] == 0)
6065 {
6066 /* A common case, meaning repeat the last
6067 character LEN times. */
6068 memset (uncompressed + uncompressed_offset,
6069 uncompressed[uncompressed_offset - 1],
6070 len);
6071 uncompressed_offset += len;
6072 }
6073 else if (dist[0] + 1 >= len)
6074 {
6075 memcpy (uncompressed + uncompressed_offset,
6076 uncompressed + uncompressed_offset - dist[0] - 1,
6077 len);
6078 uncompressed_offset += len;
6079 }
6080 else
6081 {
6082 while (len > 0)
6083 {
6084 uint32_t copy;
6085
6086 copy = len < dist[0] + 1 ? len : dist[0] + 1;
6087 memcpy (uncompressed + uncompressed_offset,
6088 (uncompressed + uncompressed_offset
6089 - dist[0] - 1),
6090 copy);
6091 len -= copy;
6092 uncompressed_offset += copy;
6093 }
6094 }
6095 }
6096 else
6097 {
6098 unsigned char prev;
6099 unsigned char low;
6100 size_t high;
6101 uint16_t *lit_probs;
6102 unsigned int sym;
6103
6104 /* Literal value. */
6105
6106 if (uncompressed_offset > 0)
6107 prev = uncompressed[uncompressed_offset - 1];
6108 else
6109 prev = 0;
6110 low = prev >> (8 - lc);
6111 high = (((uncompressed_offset - dict_start_offset)
6112 & ((1 << lp) - 1))
6113 << lc);
6114 lit_probs = probs + LZMA_LITERAL (low + high, 0);
6115 if (lstate < 7)
6116 sym = elf_lzma_integer (compressed, compressed_size,
6117 lit_probs, 8, poffset, &range,
6118 &code);
6119 else
6120 {
6121 unsigned int match;
6122 unsigned int bit;
6123 unsigned int match_bit;
6124 unsigned int idx;
6125
6126 sym = 1;
6127 if (uncompressed_offset >= dist[0] + 1)
6128 match = uncompressed[uncompressed_offset - dist[0] - 1];
6129 else
6130 match = 0;
6131 match <<= 1;
6132 bit = 0x100;
6133 do
6134 {
6135 match_bit = match & bit;
6136 match <<= 1;
6137 idx = bit + match_bit + sym;
6138 sym <<= 1;
6139 if (elf_lzma_bit (compressed, compressed_size,
6140 lit_probs + idx, poffset,
6141 &range, &code))
6142 {
6143 ++sym;
6144 bit &= match_bit;
6145 }
6146 else
6147 {
6148 bit &= ~ match_bit;
6149 }
6150 }
6151 while (sym < 0x100);
6152 }
6153
6154 if (unlikely (uncompressed_offset >= uncompressed_size))
6155 {
6156 elf_uncompress_failed ();
6157 return 0;
6158 }
6159
6160 uncompressed[uncompressed_offset] = (unsigned char) sym;
6161 ++uncompressed_offset;
6162 if (lstate <= 3)
6163 lstate = 0;
6164 else if (lstate <= 9)
6165 lstate -= 3;
6166 else
6167 lstate -= 6;
6168 }
6169 }
6170
6171 elf_lzma_range_normalize (compressed, compressed_size, poffset,
6172 &range, &code);
6173
6174 off = *poffset;
6175 }
6176 }
6177
6178 /* We have reached the end of the block. Pad to four byte
6179 boundary. */
6180 off = (off + 3) &~ (size_t) 3;
6181 if (unlikely (off > compressed_size))
6182 {
6183 elf_uncompress_failed ();
6184 return 0;
6185 }
6186
6187 switch (check)
6188 {
6189 case 0:
6190 /* No check. */
6191 break;
6192
6193 case 1:
6194 /* CRC32 */
6195 if (unlikely (off + 4 > compressed_size))
6196 {
6197 elf_uncompress_failed ();
6198 return 0;
6199 }
6200 computed_crc = elf_crc32 (0, uncompressed, uncompressed_offset);
6201 stream_crc = (compressed[off]
6202 | (compressed[off + 1] << 8)
6203 | (compressed[off + 2] << 16)
6204 | (compressed[off + 3] << 24));
6205 if (computed_crc != stream_crc)
6206 {
6207 elf_uncompress_failed ();
6208 return 0;
6209 }
6210 off += 4;
6211 break;
6212
6213 case 4:
6214 /* CRC64. We don't bother computing a CRC64 checksum. */
6215 if (unlikely (off + 8 > compressed_size))
6216 {
6217 elf_uncompress_failed ();
6218 return 0;
6219 }
6220 off += 8;
6221 break;
6222
6223 case 10:
6224 /* SHA. We don't bother computing a SHA checksum. */
6225 if (unlikely (off + 32 > compressed_size))
6226 {
6227 elf_uncompress_failed ();
6228 return 0;
6229 }
6230 off += 32;
6231 break;
6232
6233 default:
6234 elf_uncompress_failed ();
6235 return 0;
6236 }
6237
6238 *poffset = off;
6239
6240 return 1;
6241 }
6242
6243 /* Uncompress LZMA data found in a minidebug file. The minidebug
6244 format is described at
6245 https://sourceware.org/gdb/current/onlinedocs/gdb/MiniDebugInfo.html.
6246 Returns 0 on error, 1 on successful decompression. For this
6247 function we return 0 on failure to decompress, as the calling code
6248 will carry on in that case. */
6249
6250 static int
6251 elf_uncompress_lzma (struct backtrace_state *state,
6252 const unsigned char *compressed, size_t compressed_size,
6253 backtrace_error_callback error_callback, void *data,
6254 unsigned char **uncompressed, size_t *uncompressed_size)
6255 {
6256 size_t header_size;
6257 size_t footer_size;
6258 unsigned char check;
6259 uint32_t computed_crc;
6260 uint32_t stream_crc;
6261 size_t offset;
6262 size_t index_size;
6263 size_t footer_offset;
6264 size_t index_offset;
6265 uint64_t index_compressed_size;
6266 uint64_t index_uncompressed_size;
6267 unsigned char *mem;
6268 uint16_t *probs;
6269 size_t compressed_block_size;
6270
6271 /* The format starts with a stream header and ends with a stream
6272 footer. */
6273 header_size = 12;
6274 footer_size = 12;
6275 if (unlikely (compressed_size < header_size + footer_size))
6276 {
6277 elf_uncompress_failed ();
6278 return 0;
6279 }
6280
6281 /* The stream header starts with a magic string. */
6282 if (unlikely (memcmp (compressed, "\375" "7zXZ\0", 6) != 0))
6283 {
6284 elf_uncompress_failed ();
6285 return 0;
6286 }
6287
6288 /* Next come stream flags. The first byte is zero, the second byte
6289 is the check. */
6290 if (unlikely (compressed[6] != 0))
6291 {
6292 elf_uncompress_failed ();
6293 return 0;
6294 }
6295 check = compressed[7];
6296 if (unlikely ((check & 0xf8) != 0))
6297 {
6298 elf_uncompress_failed ();
6299 return 0;
6300 }
6301
6302 /* Next comes a CRC of the stream flags. */
6303 computed_crc = elf_crc32 (0, compressed + 6, 2);
6304 stream_crc = ((uint32_t)compressed[8]
6305 | ((uint32_t)compressed[9] << 8)
6306 | ((uint32_t)compressed[10] << 16)
6307 | ((uint32_t)compressed[11] << 24));
6308 if (unlikely (computed_crc != stream_crc))
6309 {
6310 elf_uncompress_failed ();
6311 return 0;
6312 }
6313
6314 /* Now that we've parsed the header, parse the footer, so that we
6315 can get the uncompressed size. */
6316
6317 /* The footer ends with two magic bytes. */
6318
6319 offset = compressed_size;
6320 if (unlikely (memcmp (compressed + offset - 2, "YZ", 2) != 0))
6321 {
6322 elf_uncompress_failed ();
6323 return 0;
6324 }
6325 offset -= 2;
6326
6327 /* Before that are the stream flags, which should be the same as the
6328 flags in the header. */
6329 if (unlikely (compressed[offset - 2] != 0
6330 || compressed[offset - 1] != check))
6331 {
6332 elf_uncompress_failed ();
6333 return 0;
6334 }
6335 offset -= 2;
6336
6337 /* Before that is the size of the index field, which precedes the
6338 footer. */
6339 index_size = (compressed[offset - 4]
6340 | (compressed[offset - 3] << 8)
6341 | (compressed[offset - 2] << 16)
6342 | (compressed[offset - 1] << 24));
6343 index_size = (index_size + 1) * 4;
6344 offset -= 4;
6345
6346 /* Before that is a footer CRC. */
6347 computed_crc = elf_crc32 (0, compressed + offset, 6);
6348 stream_crc = ((uint32_t)compressed[offset - 4]
6349 | ((uint32_t)compressed[offset - 3] << 8)
6350 | ((uint32_t)compressed[offset - 2] << 16)
6351 | ((uint32_t)compressed[offset - 1] << 24));
6352 if (unlikely (computed_crc != stream_crc))
6353 {
6354 elf_uncompress_failed ();
6355 return 0;
6356 }
6357 offset -= 4;
6358
6359 /* The index comes just before the footer. */
6360 if (unlikely (offset < index_size + header_size))
6361 {
6362 elf_uncompress_failed ();
6363 return 0;
6364 }
6365
6366 footer_offset = offset;
6367 offset -= index_size;
6368 index_offset = offset;
6369
6370 /* The index starts with a zero byte. */
6371 if (unlikely (compressed[offset] != 0))
6372 {
6373 elf_uncompress_failed ();
6374 return 0;
6375 }
6376 ++offset;
6377
6378 /* Next is the number of blocks. We expect zero blocks for an empty
6379 stream, and otherwise a single block. */
6380 if (unlikely (compressed[offset] == 0))
6381 {
6382 *uncompressed = NULL;
6383 *uncompressed_size = 0;
6384 return 1;
6385 }
6386 if (unlikely (compressed[offset] != 1))
6387 {
6388 elf_uncompress_failed ();
6389 return 0;
6390 }
6391 ++offset;
6392
6393 /* Next is the compressed size and the uncompressed size. */
6394 if (!elf_lzma_varint (compressed, compressed_size, &offset,
6395 &index_compressed_size))
6396 return 0;
6397 if (!elf_lzma_varint (compressed, compressed_size, &offset,
6398 &index_uncompressed_size))
6399 return 0;
6400
6401 /* Pad to a four byte boundary. */
6402 offset = (offset + 3) &~ (size_t) 3;
6403
6404 /* Next is a CRC of the index. */
6405 computed_crc = elf_crc32 (0, compressed + index_offset,
6406 offset - index_offset);
6407 stream_crc = ((uint32_t)compressed[offset]
6408 | ((uint32_t)compressed[offset + 1] << 8)
6409 | ((uint32_t)compressed[offset + 2] << 16)
6410 | ((uint32_t)compressed[offset + 3] << 24));
6411 if (unlikely (computed_crc != stream_crc))
6412 {
6413 elf_uncompress_failed ();
6414 return 0;
6415 }
6416 offset += 4;
6417
6418 /* We should now be back at the footer. */
6419 if (unlikely (offset != footer_offset))
6420 {
6421 elf_uncompress_failed ();
6422 return 0;
6423 }
6424
6425 /* Allocate space to hold the uncompressed data. If we succeed in
6426 uncompressing the LZMA data, we never free this memory. */
6427 mem = (unsigned char *) backtrace_alloc (state, index_uncompressed_size,
6428 error_callback, data);
6429 if (unlikely (mem == NULL))
6430 return 0;
6431 *uncompressed = mem;
6432 *uncompressed_size = index_uncompressed_size;
6433
6434 /* Allocate space for probabilities. */
6435 probs = ((uint16_t *)
6436 backtrace_alloc (state,
6437 LZMA_PROB_TOTAL_COUNT * sizeof (uint16_t),
6438 error_callback, data));
6439 if (unlikely (probs == NULL))
6440 {
6441 backtrace_free (state, mem, index_uncompressed_size, error_callback,
6442 data);
6443 return 0;
6444 }
6445
6446 /* Uncompress the block, which follows the header. */
6447 offset = 12;
6448 if (!elf_uncompress_lzma_block (compressed, compressed_size, check, probs,
6449 mem, index_uncompressed_size, &offset))
6450 {
6451 backtrace_free (state, mem, index_uncompressed_size, error_callback,
6452 data);
6453 return 0;
6454 }
6455
6456 compressed_block_size = offset - 12;
6457 if (unlikely (compressed_block_size
6458 != ((index_compressed_size + 3) &~ (size_t) 3)))
6459 {
6460 elf_uncompress_failed ();
6461 backtrace_free (state, mem, index_uncompressed_size, error_callback,
6462 data);
6463 return 0;
6464 }
6465
6466 offset = (offset + 3) &~ (size_t) 3;
6467 if (unlikely (offset != index_offset))
6468 {
6469 elf_uncompress_failed ();
6470 backtrace_free (state, mem, index_uncompressed_size, error_callback,
6471 data);
6472 return 0;
6473 }
6474
6475 return 1;
6476 }
6477
6478 /* This function is a hook for testing the LZMA support. It is only
6479 used by tests. */
6480
6481 int
6482 backtrace_uncompress_lzma (struct backtrace_state *state,
6483 const unsigned char *compressed,
6484 size_t compressed_size,
6485 backtrace_error_callback error_callback,
6486 void *data, unsigned char **uncompressed,
6487 size_t *uncompressed_size)
6488 {
6489 return elf_uncompress_lzma (state, compressed, compressed_size,
6490 error_callback, data, uncompressed,
6491 uncompressed_size);
6492 }
6493
6494 /* Add the backtrace data for one ELF file. Returns 1 on success,
6495 0 on failure (in both cases descriptor is closed) or -1 if exe
6496 is non-zero and the ELF file is ET_DYN, which tells the caller that
6497 elf_add will need to be called on the descriptor again after
6498 base_address is determined. */
6499
6500 static int
6501 elf_add (struct backtrace_state *state, const char *filename, int descriptor,
6502 const unsigned char *memory, size_t memory_size,
6503 struct libbacktrace_base_address base_address,
6504 struct elf_ppc64_opd_data *caller_opd,
6505 backtrace_error_callback error_callback, void *data,
6506 fileline *fileline_fn, int *found_sym, int *found_dwarf,
6507 struct dwarf_data **fileline_entry, int exe, int debuginfo,
6508 const char *with_buildid_data, uint32_t with_buildid_size)
6509 {
6510 struct elf_view ehdr_view;
6511 b_elf_ehdr ehdr;
6512 off_t shoff;
6513 unsigned int shnum;
6514 unsigned int shstrndx;
6515 struct elf_view shdrs_view;
6516 int shdrs_view_valid;
6517 const b_elf_shdr *shdrs;
6518 const b_elf_shdr *shstrhdr;
6519 size_t shstr_size;
6520 off_t shstr_off;
6521 struct elf_view names_view;
6522 int names_view_valid;
6523 const char *names;
6524 unsigned int symtab_shndx;
6525 unsigned int dynsym_shndx;
6526 unsigned int i;
6527 struct debug_section_info sections[DEBUG_MAX];
6528 struct debug_section_info zsections[DEBUG_MAX];
6529 struct elf_view symtab_view;
6530 int symtab_view_valid;
6531 struct elf_view strtab_view;
6532 int strtab_view_valid;
6533 struct elf_view buildid_view;
6534 int buildid_view_valid;
6535 const char *buildid_data;
6536 uint32_t buildid_size;
6537 struct elf_view debuglink_view;
6538 int debuglink_view_valid;
6539 const char *debuglink_name;
6540 uint32_t debuglink_crc;
6541 struct elf_view debugaltlink_view;
6542 int debugaltlink_view_valid;
6543 const char *debugaltlink_name;
6544 const char *debugaltlink_buildid_data;
6545 uint32_t debugaltlink_buildid_size;
6546 struct elf_view gnu_debugdata_view;
6547 int gnu_debugdata_view_valid;
6548 size_t gnu_debugdata_size;
6549 unsigned char *gnu_debugdata_uncompressed;
6550 size_t gnu_debugdata_uncompressed_size;
6551 off_t min_offset;
6552 off_t max_offset;
6553 off_t debug_size;
6554 struct elf_view debug_view;
6555 int debug_view_valid;
6556 unsigned int using_debug_view;
6557 uint16_t *zdebug_table;
6558 struct elf_view split_debug_view[DEBUG_MAX];
6559 unsigned char split_debug_view_valid[DEBUG_MAX];
6560 struct elf_ppc64_opd_data opd_data, *opd;
6561 int opd_view_valid;
6562 struct dwarf_sections dwarf_sections;
6563
6564 if (!debuginfo)
6565 {
6566 *found_sym = 0;
6567 *found_dwarf = 0;
6568 }
6569
6570 shdrs_view_valid = 0;
6571 names_view_valid = 0;
6572 symtab_view_valid = 0;
6573 strtab_view_valid = 0;
6574 buildid_view_valid = 0;
6575 buildid_data = NULL;
6576 buildid_size = 0;
6577 debuglink_view_valid = 0;
6578 debuglink_name = NULL;
6579 debuglink_crc = 0;
6580 debugaltlink_view_valid = 0;
6581 debugaltlink_name = NULL;
6582 debugaltlink_buildid_data = NULL;
6583 debugaltlink_buildid_size = 0;
6584 gnu_debugdata_view_valid = 0;
6585 gnu_debugdata_size = 0;
6586 debug_view_valid = 0;
6587 memset (&split_debug_view_valid[0], 0, sizeof split_debug_view_valid);
6588 opd = NULL;
6589 opd_view_valid = 0;
6590
6591 if (!elf_get_view (state, descriptor, memory, memory_size, 0, sizeof ehdr,
6592 error_callback, data, &ehdr_view))
6593 goto fail;
6594
6595 memcpy (&ehdr, ehdr_view.view.data, sizeof ehdr);
6596
6597 elf_release_view (state, &ehdr_view, error_callback, data);
6598
6599 if (ehdr.e_ident[EI_MAG0] != ELFMAG0
6600 || ehdr.e_ident[EI_MAG1] != ELFMAG1
6601 || ehdr.e_ident[EI_MAG2] != ELFMAG2
6602 || ehdr.e_ident[EI_MAG3] != ELFMAG3)
6603 {
6604 error_callback (data, "executable file is not ELF", 0);
6605 goto fail;
6606 }
6607 if (ehdr.e_ident[EI_VERSION] != EV_CURRENT)
6608 {
6609 error_callback (data, "executable file is unrecognized ELF version", 0);
6610 goto fail;
6611 }
6612
6613 #if BACKTRACE_ELF_SIZE == 32
6614 #define BACKTRACE_ELFCLASS ELFCLASS32
6615 #else
6616 #define BACKTRACE_ELFCLASS ELFCLASS64
6617 #endif
6618
6619 if (ehdr.e_ident[EI_CLASS] != BACKTRACE_ELFCLASS)
6620 {
6621 error_callback (data, "executable file is unexpected ELF class", 0);
6622 goto fail;
6623 }
6624
6625 if (ehdr.e_ident[EI_DATA] != ELFDATA2LSB
6626 && ehdr.e_ident[EI_DATA] != ELFDATA2MSB)
6627 {
6628 error_callback (data, "executable file has unknown endianness", 0);
6629 goto fail;
6630 }
6631
6632 /* If the executable is ET_DYN, it is either a PIE, or we are running
6633 directly a shared library with .interp. We need to wait for
6634 dl_iterate_phdr in that case to determine the actual base_address. */
6635 if (exe && ehdr.e_type == ET_DYN)
6636 return -1;
6637
6638 shoff = ehdr.e_shoff;
6639 shnum = ehdr.e_shnum;
6640 shstrndx = ehdr.e_shstrndx;
6641
6642 if ((shnum == 0 || shstrndx == SHN_XINDEX)
6643 && shoff != 0)
6644 {
6645 struct elf_view shdr_view;
6646 const b_elf_shdr *shdr;
6647
6648 if (!elf_get_view (state, descriptor, memory, memory_size, shoff,
6649 sizeof shdr, error_callback, data, &shdr_view))
6650 goto fail;
6651
6652 shdr = (const b_elf_shdr *) shdr_view.view.data;
6653
6654 if (shnum == 0)
6655 shnum = shdr->sh_size;
6656
6657 if (shstrndx == SHN_XINDEX)
6658 {
6659 shstrndx = shdr->sh_link;
6660
6661 /* Versions of the GNU binutils between 2.12 and 2.18 did
6662 not handle objects with more than SHN_LORESERVE sections
6663 correctly. All large section indexes were offset by
6664 0x100. There is more information at
6665 http://sourceware.org/bugzilla/show_bug.cgi?id-5900 .
6666 Fortunately these object files are easy to detect, as the
6667 GNU binutils always put the section header string table
6668 near the end of the list of sections. Thus if the
6669 section header string table index is larger than the
6670 number of sections, then we know we have to subtract
6671 0x100 to get the real section index. */
6672 if (shstrndx >= shnum && shstrndx >= SHN_LORESERVE + 0x100)
6673 shstrndx -= 0x100;
6674 }
6675
6676 elf_release_view (state, &shdr_view, error_callback, data);
6677 }
6678
6679 if (shnum == 0 || shstrndx == 0)
6680 goto fail;
6681
6682 /* To translate PC to file/line when using DWARF, we need to find
6683 the .debug_info and .debug_line sections. */
6684
6685 /* Read the section headers, skipping the first one. */
6686
6687 if (!elf_get_view (state, descriptor, memory, memory_size,
6688 shoff + sizeof (b_elf_shdr),
6689 (shnum - 1) * sizeof (b_elf_shdr),
6690 error_callback, data, &shdrs_view))
6691 goto fail;
6692 shdrs_view_valid = 1;
6693 shdrs = (const b_elf_shdr *) shdrs_view.view.data;
6694
6695 /* Read the section names. */
6696
6697 shstrhdr = &shdrs[shstrndx - 1];
6698 shstr_size = shstrhdr->sh_size;
6699 shstr_off = shstrhdr->sh_offset;
6700
6701 if (!elf_get_view (state, descriptor, memory, memory_size, shstr_off,
6702 shstrhdr->sh_size, error_callback, data, &names_view))
6703 goto fail;
6704 names_view_valid = 1;
6705 names = (const char *) names_view.view.data;
6706
6707 symtab_shndx = 0;
6708 dynsym_shndx = 0;
6709
6710 memset (sections, 0, sizeof sections);
6711 memset (zsections, 0, sizeof zsections);
6712
6713 /* Look for the symbol table. */
6714 for (i = 1; i < shnum; ++i)
6715 {
6716 const b_elf_shdr *shdr;
6717 unsigned int sh_name;
6718 const char *name;
6719 int j;
6720
6721 shdr = &shdrs[i - 1];
6722
6723 if (shdr->sh_type == SHT_SYMTAB)
6724 symtab_shndx = i;
6725 else if (shdr->sh_type == SHT_DYNSYM)
6726 dynsym_shndx = i;
6727
6728 sh_name = shdr->sh_name;
6729 if (sh_name >= shstr_size)
6730 {
6731 error_callback (data, "ELF section name out of range", 0);
6732 goto fail;
6733 }
6734
6735 name = names + sh_name;
6736
6737 for (j = 0; j < (int) DEBUG_MAX; ++j)
6738 {
6739 if (strcmp (name, dwarf_section_names[j]) == 0)
6740 {
6741 sections[j].offset = shdr->sh_offset;
6742 sections[j].size = shdr->sh_size;
6743 sections[j].compressed = (shdr->sh_flags & SHF_COMPRESSED) != 0;
6744 break;
6745 }
6746 }
6747
6748 if (name[0] == '.' && name[1] == 'z')
6749 {
6750 for (j = 0; j < (int) DEBUG_MAX; ++j)
6751 {
6752 if (strcmp (name + 2, dwarf_section_names[j] + 1) == 0)
6753 {
6754 zsections[j].offset = shdr->sh_offset;
6755 zsections[j].size = shdr->sh_size;
6756 break;
6757 }
6758 }
6759 }
6760
6761 /* Read the build ID if present. This could check for any
6762 SHT_NOTE section with the right note name and type, but gdb
6763 looks for a specific section name. */
6764 if ((!debuginfo || with_buildid_data != NULL)
6765 && !buildid_view_valid
6766 && strcmp (name, ".note.gnu.build-id") == 0)
6767 {
6768 const b_elf_note *note;
6769
6770 if (!elf_get_view (state, descriptor, memory, memory_size,
6771 shdr->sh_offset, shdr->sh_size, error_callback,
6772 data, &buildid_view))
6773 goto fail;
6774
6775 buildid_view_valid = 1;
6776 note = (const b_elf_note *) buildid_view.view.data;
6777 if (note->type == NT_GNU_BUILD_ID
6778 && note->namesz == 4
6779 && strncmp (note->name, "GNU", 4) == 0
6780 && shdr->sh_size <= 12 + ((note->namesz + 3) & ~ 3) + note->descsz)
6781 {
6782 buildid_data = &note->name[0] + ((note->namesz + 3) & ~ 3);
6783 buildid_size = note->descsz;
6784 }
6785
6786 if (with_buildid_size != 0)
6787 {
6788 if (buildid_size != with_buildid_size)
6789 goto fail;
6790
6791 if (memcmp (buildid_data, with_buildid_data, buildid_size) != 0)
6792 goto fail;
6793 }
6794 }
6795
6796 /* Read the debuglink file if present. */
6797 if (!debuginfo
6798 && !debuglink_view_valid
6799 && strcmp (name, ".gnu_debuglink") == 0)
6800 {
6801 const char *debuglink_data;
6802 size_t crc_offset;
6803
6804 if (!elf_get_view (state, descriptor, memory, memory_size,
6805 shdr->sh_offset, shdr->sh_size, error_callback,
6806 data, &debuglink_view))
6807 goto fail;
6808
6809 debuglink_view_valid = 1;
6810 debuglink_data = (const char *) debuglink_view.view.data;
6811 crc_offset = strnlen (debuglink_data, shdr->sh_size);
6812 crc_offset = (crc_offset + 3) & ~3;
6813 if (crc_offset + 4 <= shdr->sh_size)
6814 {
6815 debuglink_name = debuglink_data;
6816 debuglink_crc = *(const uint32_t*)(debuglink_data + crc_offset);
6817 }
6818 }
6819
6820 if (!debugaltlink_view_valid
6821 && strcmp (name, ".gnu_debugaltlink") == 0)
6822 {
6823 const char *debugaltlink_data;
6824 size_t debugaltlink_name_len;
6825
6826 if (!elf_get_view (state, descriptor, memory, memory_size,
6827 shdr->sh_offset, shdr->sh_size, error_callback,
6828 data, &debugaltlink_view))
6829 goto fail;
6830
6831 debugaltlink_view_valid = 1;
6832 debugaltlink_data = (const char *) debugaltlink_view.view.data;
6833 debugaltlink_name = debugaltlink_data;
6834 debugaltlink_name_len = strnlen (debugaltlink_data, shdr->sh_size);
6835 if (debugaltlink_name_len < shdr->sh_size)
6836 {
6837 /* Include terminating zero. */
6838 debugaltlink_name_len += 1;
6839
6840 debugaltlink_buildid_data
6841 = debugaltlink_data + debugaltlink_name_len;
6842 debugaltlink_buildid_size = shdr->sh_size - debugaltlink_name_len;
6843 }
6844 }
6845
6846 if (!debuginfo
6847 && !gnu_debugdata_view_valid
6848 && strcmp (name, ".gnu_debugdata") == 0)
6849 {
6850 if (!elf_get_view (state, descriptor, memory, memory_size,
6851 shdr->sh_offset, shdr->sh_size, error_callback,
6852 data, &gnu_debugdata_view))
6853 goto fail;
6854
6855 gnu_debugdata_size = shdr->sh_size;
6856 gnu_debugdata_view_valid = 1;
6857 }
6858
6859 /* Read the .opd section on PowerPC64 ELFv1. */
6860 if (ehdr.e_machine == EM_PPC64
6861 && (ehdr.e_flags & EF_PPC64_ABI) < 2
6862 && shdr->sh_type == SHT_PROGBITS
6863 && strcmp (name, ".opd") == 0)
6864 {
6865 if (!elf_get_view (state, descriptor, memory, memory_size,
6866 shdr->sh_offset, shdr->sh_size, error_callback,
6867 data, &opd_data.view))
6868 goto fail;
6869
6870 opd = &opd_data;
6871 opd->addr = shdr->sh_addr;
6872 opd->data = (const char *) opd_data.view.view.data;
6873 opd->size = shdr->sh_size;
6874 opd_view_valid = 1;
6875 }
6876 }
6877
6878 /* A debuginfo file may not have a useful .opd section, but we can use the
6879 one from the original executable. */
6880 if (opd == NULL)
6881 opd = caller_opd;
6882
6883 if (symtab_shndx == 0)
6884 symtab_shndx = dynsym_shndx;
6885 if (symtab_shndx != 0)
6886 {
6887 const b_elf_shdr *symtab_shdr;
6888 unsigned int strtab_shndx;
6889 const b_elf_shdr *strtab_shdr;
6890 struct elf_syminfo_data *sdata;
6891
6892 symtab_shdr = &shdrs[symtab_shndx - 1];
6893 strtab_shndx = symtab_shdr->sh_link;
6894 if (strtab_shndx >= shnum)
6895 {
6896 error_callback (data,
6897 "ELF symbol table strtab link out of range", 0);
6898 goto fail;
6899 }
6900 strtab_shdr = &shdrs[strtab_shndx - 1];
6901
6902 if (!elf_get_view (state, descriptor, memory, memory_size,
6903 symtab_shdr->sh_offset, symtab_shdr->sh_size,
6904 error_callback, data, &symtab_view))
6905 goto fail;
6906 symtab_view_valid = 1;
6907
6908 if (!elf_get_view (state, descriptor, memory, memory_size,
6909 strtab_shdr->sh_offset, strtab_shdr->sh_size,
6910 error_callback, data, &strtab_view))
6911 goto fail;
6912 strtab_view_valid = 1;
6913
6914 sdata = ((struct elf_syminfo_data *)
6915 backtrace_alloc (state, sizeof *sdata, error_callback, data));
6916 if (sdata == NULL)
6917 goto fail;
6918
6919 if (!elf_initialize_syminfo (state, base_address,
6920 symtab_view.view.data, symtab_shdr->sh_size,
6921 strtab_view.view.data, strtab_shdr->sh_size,
6922 error_callback, data, sdata, opd))
6923 {
6924 backtrace_free (state, sdata, sizeof *sdata, error_callback, data);
6925 goto fail;
6926 }
6927
6928 /* We no longer need the symbol table, but we hold on to the
6929 string table permanently. */
6930 elf_release_view (state, &symtab_view, error_callback, data);
6931 symtab_view_valid = 0;
6932 strtab_view_valid = 0;
6933
6934 *found_sym = 1;
6935
6936 elf_add_syminfo_data (state, sdata);
6937 }
6938
6939 elf_release_view (state, &shdrs_view, error_callback, data);
6940 shdrs_view_valid = 0;
6941 elf_release_view (state, &names_view, error_callback, data);
6942 names_view_valid = 0;
6943
6944 /* If the debug info is in a separate file, read that one instead. */
6945
6946 if (buildid_data != NULL)
6947 {
6948 int d;
6949
6950 d = elf_open_debugfile_by_buildid (state, buildid_data, buildid_size,
6951 error_callback, data);
6952 if (d >= 0)
6953 {
6954 int ret;
6955
6956 elf_release_view (state, &buildid_view, error_callback, data);
6957 if (debuglink_view_valid)
6958 elf_release_view (state, &debuglink_view, error_callback, data);
6959 if (debugaltlink_view_valid)
6960 elf_release_view (state, &debugaltlink_view, error_callback, data);
6961 ret = elf_add (state, "", d, NULL, 0, base_address, opd,
6962 error_callback, data, fileline_fn, found_sym,
6963 found_dwarf, NULL, 0, 1, NULL, 0);
6964 if (ret < 0)
6965 backtrace_close (d, error_callback, data);
6966 else if (descriptor >= 0)
6967 backtrace_close (descriptor, error_callback, data);
6968 return ret;
6969 }
6970 }
6971
6972 if (buildid_view_valid)
6973 {
6974 elf_release_view (state, &buildid_view, error_callback, data);
6975 buildid_view_valid = 0;
6976 }
6977
6978 if (debuglink_name != NULL)
6979 {
6980 int d;
6981
6982 d = elf_open_debugfile_by_debuglink (state, filename, debuglink_name,
6983 debuglink_crc, error_callback,
6984 data);
6985 if (d >= 0)
6986 {
6987 int ret;
6988
6989 elf_release_view (state, &debuglink_view, error_callback, data);
6990 if (debugaltlink_view_valid)
6991 elf_release_view (state, &debugaltlink_view, error_callback, data);
6992 ret = elf_add (state, "", d, NULL, 0, base_address, opd,
6993 error_callback, data, fileline_fn, found_sym,
6994 found_dwarf, NULL, 0, 1, NULL, 0);
6995 if (ret < 0)
6996 backtrace_close (d, error_callback, data);
6997 else if (descriptor >= 0)
6998 backtrace_close(descriptor, error_callback, data);
6999 return ret;
7000 }
7001 }
7002
7003 if (debuglink_view_valid)
7004 {
7005 elf_release_view (state, &debuglink_view, error_callback, data);
7006 debuglink_view_valid = 0;
7007 }
7008
7009 struct dwarf_data *fileline_altlink = NULL;
7010 if (debugaltlink_name != NULL)
7011 {
7012 int d;
7013
7014 d = elf_open_debugfile_by_debuglink (state, filename, debugaltlink_name,
7015 0, error_callback, data);
7016 if (d >= 0)
7017 {
7018 int ret;
7019
7020 ret = elf_add (state, filename, d, NULL, 0, base_address, opd,
7021 error_callback, data, fileline_fn, found_sym,
7022 found_dwarf, &fileline_altlink, 0, 1,
7023 debugaltlink_buildid_data, debugaltlink_buildid_size);
7024 elf_release_view (state, &debugaltlink_view, error_callback, data);
7025 debugaltlink_view_valid = 0;
7026 if (ret < 0)
7027 {
7028 backtrace_close (d, error_callback, data);
7029 return ret;
7030 }
7031 }
7032 }
7033
7034 if (debugaltlink_view_valid)
7035 {
7036 elf_release_view (state, &debugaltlink_view, error_callback, data);
7037 debugaltlink_view_valid = 0;
7038 }
7039
7040 if (gnu_debugdata_view_valid)
7041 {
7042 int ret;
7043
7044 ret = elf_uncompress_lzma (state,
7045 ((const unsigned char *)
7046 gnu_debugdata_view.view.data),
7047 gnu_debugdata_size, error_callback, data,
7048 &gnu_debugdata_uncompressed,
7049 &gnu_debugdata_uncompressed_size);
7050
7051 elf_release_view (state, &gnu_debugdata_view, error_callback, data);
7052 gnu_debugdata_view_valid = 0;
7053
7054 if (ret)
7055 {
7056 ret = elf_add (state, filename, -1, gnu_debugdata_uncompressed,
7057 gnu_debugdata_uncompressed_size, base_address, opd,
7058 error_callback, data, fileline_fn, found_sym,
7059 found_dwarf, NULL, 0, 0, NULL, 0);
7060 if (ret >= 0 && descriptor >= 0)
7061 backtrace_close(descriptor, error_callback, data);
7062 return ret;
7063 }
7064 }
7065
7066 if (opd_view_valid)
7067 {
7068 elf_release_view (state, &opd->view, error_callback, data);
7069 opd_view_valid = 0;
7070 opd = NULL;
7071 }
7072
7073 /* Read all the debug sections in a single view, since they are
7074 probably adjacent in the file. If any of sections are
7075 uncompressed, we never release this view. */
7076
7077 min_offset = 0;
7078 max_offset = 0;
7079 debug_size = 0;
7080 for (i = 0; i < (int) DEBUG_MAX; ++i)
7081 {
7082 off_t end;
7083
7084 if (sections[i].size != 0)
7085 {
7086 if (min_offset == 0 || sections[i].offset < min_offset)
7087 min_offset = sections[i].offset;
7088 end = sections[i].offset + sections[i].size;
7089 if (end > max_offset)
7090 max_offset = end;
7091 debug_size += sections[i].size;
7092 }
7093 if (zsections[i].size != 0)
7094 {
7095 if (min_offset == 0 || zsections[i].offset < min_offset)
7096 min_offset = zsections[i].offset;
7097 end = zsections[i].offset + zsections[i].size;
7098 if (end > max_offset)
7099 max_offset = end;
7100 debug_size += zsections[i].size;
7101 }
7102 }
7103 if (min_offset == 0 || max_offset == 0)
7104 {
7105 if (descriptor >= 0)
7106 {
7107 if (!backtrace_close (descriptor, error_callback, data))
7108 goto fail;
7109 }
7110 return 1;
7111 }
7112
7113 /* If the total debug section size is large, assume that there are
7114 gaps between the sections, and read them individually. */
7115
7116 if (max_offset - min_offset < 0x20000000
7117 || max_offset - min_offset < debug_size + 0x10000)
7118 {
7119 if (!elf_get_view (state, descriptor, memory, memory_size, min_offset,
7120 max_offset - min_offset, error_callback, data,
7121 &debug_view))
7122 goto fail;
7123 debug_view_valid = 1;
7124 }
7125 else
7126 {
7127 memset (&split_debug_view[0], 0, sizeof split_debug_view);
7128 for (i = 0; i < (int) DEBUG_MAX; ++i)
7129 {
7130 struct debug_section_info *dsec;
7131
7132 if (sections[i].size != 0)
7133 dsec = &sections[i];
7134 else if (zsections[i].size != 0)
7135 dsec = &zsections[i];
7136 else
7137 continue;
7138
7139 if (!elf_get_view (state, descriptor, memory, memory_size,
7140 dsec->offset, dsec->size, error_callback, data,
7141 &split_debug_view[i]))
7142 goto fail;
7143 split_debug_view_valid[i] = 1;
7144
7145 if (sections[i].size != 0)
7146 sections[i].data = ((const unsigned char *)
7147 split_debug_view[i].view.data);
7148 else
7149 zsections[i].data = ((const unsigned char *)
7150 split_debug_view[i].view.data);
7151 }
7152 }
7153
7154 /* We've read all we need from the executable. */
7155 if (descriptor >= 0)
7156 {
7157 if (!backtrace_close (descriptor, error_callback, data))
7158 goto fail;
7159 descriptor = -1;
7160 }
7161
7162 using_debug_view = 0;
7163 if (debug_view_valid)
7164 {
7165 for (i = 0; i < (int) DEBUG_MAX; ++i)
7166 {
7167 if (sections[i].size == 0)
7168 sections[i].data = NULL;
7169 else
7170 {
7171 sections[i].data = ((const unsigned char *) debug_view.view.data
7172 + (sections[i].offset - min_offset));
7173 ++using_debug_view;
7174 }
7175
7176 if (zsections[i].size == 0)
7177 zsections[i].data = NULL;
7178 else
7179 zsections[i].data = ((const unsigned char *) debug_view.view.data
7180 + (zsections[i].offset - min_offset));
7181 }
7182 }
7183
7184 /* Uncompress the old format (--compress-debug-sections=zlib-gnu). */
7185
7186 zdebug_table = NULL;
7187 for (i = 0; i < (int) DEBUG_MAX; ++i)
7188 {
7189 if (sections[i].size == 0 && zsections[i].size > 0)
7190 {
7191 unsigned char *uncompressed_data;
7192 size_t uncompressed_size;
7193
7194 if (zdebug_table == NULL)
7195 {
7196 zdebug_table = ((uint16_t *)
7197 backtrace_alloc (state, ZLIB_TABLE_SIZE,
7198 error_callback, data));
7199 if (zdebug_table == NULL)
7200 goto fail;
7201 }
7202
7203 uncompressed_data = NULL;
7204 uncompressed_size = 0;
7205 if (!elf_uncompress_zdebug (state, zsections[i].data,
7206 zsections[i].size, zdebug_table,
7207 error_callback, data,
7208 &uncompressed_data, &uncompressed_size))
7209 goto fail;
7210 sections[i].data = uncompressed_data;
7211 sections[i].size = uncompressed_size;
7212 sections[i].compressed = 0;
7213
7214 if (split_debug_view_valid[i])
7215 {
7216 elf_release_view (state, &split_debug_view[i],
7217 error_callback, data);
7218 split_debug_view_valid[i] = 0;
7219 }
7220 }
7221 }
7222
7223 if (zdebug_table != NULL)
7224 {
7225 backtrace_free (state, zdebug_table, ZLIB_TABLE_SIZE,
7226 error_callback, data);
7227 zdebug_table = NULL;
7228 }
7229
7230 /* Uncompress the official ELF format
7231 (--compress-debug-sections=zlib-gabi, --compress-debug-sections=zstd). */
7232 for (i = 0; i < (int) DEBUG_MAX; ++i)
7233 {
7234 unsigned char *uncompressed_data;
7235 size_t uncompressed_size;
7236
7237 if (sections[i].size == 0 || !sections[i].compressed)
7238 continue;
7239
7240 if (zdebug_table == NULL)
7241 {
7242 zdebug_table = ((uint16_t *)
7243 backtrace_alloc (state, ZDEBUG_TABLE_SIZE,
7244 error_callback, data));
7245 if (zdebug_table == NULL)
7246 goto fail;
7247 }
7248
7249 uncompressed_data = NULL;
7250 uncompressed_size = 0;
7251 if (!elf_uncompress_chdr (state, sections[i].data, sections[i].size,
7252 zdebug_table, error_callback, data,
7253 &uncompressed_data, &uncompressed_size))
7254 goto fail;
7255 sections[i].data = uncompressed_data;
7256 sections[i].size = uncompressed_size;
7257 sections[i].compressed = 0;
7258
7259 if (debug_view_valid)
7260 --using_debug_view;
7261 else if (split_debug_view_valid[i])
7262 {
7263 elf_release_view (state, &split_debug_view[i], error_callback, data);
7264 split_debug_view_valid[i] = 0;
7265 }
7266 }
7267
7268 if (zdebug_table != NULL)
7269 backtrace_free (state, zdebug_table, ZDEBUG_TABLE_SIZE,
7270 error_callback, data);
7271
7272 if (debug_view_valid && using_debug_view == 0)
7273 {
7274 elf_release_view (state, &debug_view, error_callback, data);
7275 debug_view_valid = 0;
7276 }
7277
7278 for (i = 0; i < (int) DEBUG_MAX; ++i)
7279 {
7280 dwarf_sections.data[i] = sections[i].data;
7281 dwarf_sections.size[i] = sections[i].size;
7282 }
7283
7284 if (!backtrace_dwarf_add (state, base_address, &dwarf_sections,
7285 ehdr.e_ident[EI_DATA] == ELFDATA2MSB,
7286 fileline_altlink,
7287 error_callback, data, fileline_fn,
7288 fileline_entry))
7289 goto fail;
7290
7291 *found_dwarf = 1;
7292
7293 return 1;
7294
7295 fail:
7296 if (shdrs_view_valid)
7297 elf_release_view (state, &shdrs_view, error_callback, data);
7298 if (names_view_valid)
7299 elf_release_view (state, &names_view, error_callback, data);
7300 if (symtab_view_valid)
7301 elf_release_view (state, &symtab_view, error_callback, data);
7302 if (strtab_view_valid)
7303 elf_release_view (state, &strtab_view, error_callback, data);
7304 if (debuglink_view_valid)
7305 elf_release_view (state, &debuglink_view, error_callback, data);
7306 if (debugaltlink_view_valid)
7307 elf_release_view (state, &debugaltlink_view, error_callback, data);
7308 if (gnu_debugdata_view_valid)
7309 elf_release_view (state, &gnu_debugdata_view, error_callback, data);
7310 if (buildid_view_valid)
7311 elf_release_view (state, &buildid_view, error_callback, data);
7312 if (debug_view_valid)
7313 elf_release_view (state, &debug_view, error_callback, data);
7314 for (i = 0; i < (int) DEBUG_MAX; ++i)
7315 {
7316 if (split_debug_view_valid[i])
7317 elf_release_view (state, &split_debug_view[i], error_callback, data);
7318 }
7319 if (opd_view_valid)
7320 elf_release_view (state, &opd->view, error_callback, data);
7321 if (descriptor >= 0)
7322 backtrace_close (descriptor, error_callback, data);
7323 return 0;
7324 }
7325
7326 /* Data passed to phdr_callback. */
7327
7328 struct phdr_data
7329 {
7330 struct backtrace_state *state;
7331 backtrace_error_callback error_callback;
7332 void *data;
7333 fileline *fileline_fn;
7334 int *found_sym;
7335 int *found_dwarf;
7336 const char *exe_filename;
7337 int exe_descriptor;
7338 };
7339
7340 /* Callback passed to dl_iterate_phdr. Load debug info from shared
7341 libraries. */
7342
7343 static int
7344 #ifdef __i386__
7345 __attribute__ ((__force_align_arg_pointer__))
7346 #endif
7347 phdr_callback (struct dl_phdr_info *info, size_t size ATTRIBUTE_UNUSED,
7348 void *pdata)
7349 {
7350 struct phdr_data *pd = (struct phdr_data *) pdata;
7351 const char *filename;
7352 int descriptor;
7353 int does_not_exist;
7354 struct libbacktrace_base_address base_address;
7355 fileline elf_fileline_fn;
7356 int found_dwarf;
7357
7358 /* There is not much we can do if we don't have the module name,
7359 unless executable is ET_DYN, where we expect the very first
7360 phdr_callback to be for the PIE. */
7361 if (info->dlpi_name == NULL || info->dlpi_name[0] == '\0')
7362 {
7363 if (pd->exe_descriptor == -1)
7364 return 0;
7365 filename = pd->exe_filename;
7366 descriptor = pd->exe_descriptor;
7367 pd->exe_descriptor = -1;
7368 }
7369 else
7370 {
7371 if (pd->exe_descriptor != -1)
7372 {
7373 backtrace_close (pd->exe_descriptor, pd->error_callback, pd->data);
7374 pd->exe_descriptor = -1;
7375 }
7376
7377 filename = info->dlpi_name;
7378 descriptor = backtrace_open (info->dlpi_name, pd->error_callback,
7379 pd->data, &does_not_exist);
7380 if (descriptor < 0)
7381 return 0;
7382 }
7383
7384 base_address.m = info->dlpi_addr;
7385 if (elf_add (pd->state, filename, descriptor, NULL, 0, base_address, NULL,
7386 pd->error_callback, pd->data, &elf_fileline_fn, pd->found_sym,
7387 &found_dwarf, NULL, 0, 0, NULL, 0))
7388 {
7389 if (found_dwarf)
7390 {
7391 *pd->found_dwarf = 1;
7392 *pd->fileline_fn = elf_fileline_fn;
7393 }
7394 }
7395
7396 return 0;
7397 }
7398
7399 /* Initialize the backtrace data we need from an ELF executable. At
7400 the ELF level, all we need to do is find the debug info
7401 sections. */
7402
7403 int
7404 backtrace_initialize (struct backtrace_state *state, const char *filename,
7405 int descriptor, backtrace_error_callback error_callback,
7406 void *data, fileline *fileline_fn)
7407 {
7408 int ret;
7409 int found_sym;
7410 int found_dwarf;
7411 fileline elf_fileline_fn = elf_nodebug;
7412 struct phdr_data pd;
7413
7414 /* When using fdpic we must use dl_iterate_phdr for all modules, including
7415 the main executable, so that we can get the right base address
7416 mapping. */
7417 if (!libbacktrace_using_fdpic ())
7418 {
7419 struct libbacktrace_base_address zero_base_address;
7420
7421 memset (&zero_base_address, 0, sizeof zero_base_address);
7422 ret = elf_add (state, filename, descriptor, NULL, 0, zero_base_address,
7423 NULL, error_callback, data, &elf_fileline_fn, &found_sym,
7424 &found_dwarf, NULL, 1, 0, NULL, 0);
7425 if (!ret)
7426 return 0;
7427 }
7428
7429 pd.state = state;
7430 pd.error_callback = error_callback;
7431 pd.data = data;
7432 pd.fileline_fn = &elf_fileline_fn;
7433 pd.found_sym = &found_sym;
7434 pd.found_dwarf = &found_dwarf;
7435 pd.exe_filename = filename;
7436 pd.exe_descriptor = ret < 0 ? descriptor : -1;
7437
7438 dl_iterate_phdr (phdr_callback, (void *) &pd);
7439
7440 if (!state->threaded)
7441 {
7442 if (found_sym)
7443 state->syminfo_fn = elf_syminfo;
7444 else if (state->syminfo_fn == NULL)
7445 state->syminfo_fn = elf_nosyms;
7446 }
7447 else
7448 {
7449 if (found_sym)
7450 backtrace_atomic_store_pointer (&state->syminfo_fn, elf_syminfo);
7451 else
7452 (void) __sync_bool_compare_and_swap (&state->syminfo_fn, NULL,
7453 elf_nosyms);
7454 }
7455
7456 if (!state->threaded)
7457 *fileline_fn = state->fileline_fn;
7458 else
7459 *fileline_fn = backtrace_atomic_load_pointer (&state->fileline_fn);
7460
7461 if (*fileline_fn == NULL || *fileline_fn == elf_nodebug)
7462 *fileline_fn = elf_fileline_fn;
7463
7464 return 1;
7465 }