static CIE the_CIEs[N_CIEs];
+/* Read, summarise and store CFA unwind info from .eh_frame and
+ .debug_frame sections. is_ehframe tells us which kind we are
+ dealing with -- they are slightly different. */
void ML_(read_callframe_info_dwarf3)
- ( /*OUT*/struct _DebugInfo* di, UChar* frame_image, SizeT frame_size,
- Bool for_eh )
+ ( /*OUT*/struct _DebugInfo* di,
+ UChar* frame_image, SizeT frame_size, Addr frame_avma,
+ Bool is_ehframe )
{
Int nbytes;
HChar* how = NULL;
Int n_CIEs = 0;
UChar* data = frame_image;
- UWord ehframe_cfsis = 0;
- Addr frame_avma = for_eh ? di->ehframe_avma : 0;
+ UWord cfsi_used_orig;
+
+ /* If we're dealing with a .debug_frame, assume zero frame_avma. */
+ if (!is_ehframe)
+ vg_assert(frame_avma == 0);
# if defined(VGP_ppc32_linux) || defined(VGP_ppc64_linux)
/* These targets don't use CFI-based stack unwinding. */
return;
# endif
- /* If we are reading .debug_frame after .eh_frame has been read, only
- add FDEs which weren't covered in .eh_frame. To be able to quickly
- search the FDEs, the records must be sorted. */
- if ( ! for_eh && di->ehframe_size && di->cfsi_used ) {
+ /* If we read more than one .debug_frame or .eh_frame for this
+ DebugInfo*, the second and subsequent reads should only add FDEs
+ for address ranges not already covered by the FDEs already
+ present. To be able to quickly check which address ranges are
+ already present, any existing records (DiCFSIs) must be sorted,
+ so we can binary-search them in the code below. We also record
+ di->cfsi_used so that we know where the boundary is between
+ existing and new records. */
+ if (di->cfsi_used > 0) {
ML_(canonicaliseCFI) ( di );
- ehframe_cfsis = di->cfsi_used;
}
+ cfsi_used_orig = di->cfsi_used;
if (di->trace_cfi) {
VG_(printf)("\n-----------------------------------------------\n");
/* If cie_pointer is zero for .eh_frame or all ones for .debug_frame,
we've got a CIE; else it's an FDE. */
- if (cie_pointer == (for_eh ? 0ULL
+ if (cie_pointer == (is_ehframe ? 0ULL
: dw64 ? 0xFFFFFFFFFFFFFFFFULL : 0xFFFFFFFFULL)) {
Int this_CIE;
cie_pointer bytes back from here. */
/* re sizeof(UInt) / sizeof(ULong), matches XXX above. */
- if (for_eh)
+ if (is_ehframe)
look_for = (data - (dw64 ? sizeof(ULong) : sizeof(UInt))
- frame_image)
- cie_pointer;
data += fde_ilen;
- if (ehframe_cfsis) {
+ /* If this object's DebugInfo* had some DiCFSIs from a
+ previous .eh_frame or .debug_frame read, we must check
+ that we're not adding a duplicate. */
+ if (cfsi_used_orig > 0) {
Addr a_mid_lo, a_mid_hi;
Word mid, size,
lo = 0,
- hi = ehframe_cfsis-1;
+ hi = cfsi_used_orig-1;
while (True) {
/* current unsearched space is from lo to hi, inclusive. */
if (lo > hi) break; /* not found */
TRACE_SYMTAB("rw: contains svmas %#lx .. %#lx with bias %#lx\n",
rw_svma_base, rw_svma_limit - 1, rw_bias );
+ /* Iterate over section headers */
for (i = 0; i < shdr_nent; i++) {
ElfXX_Shdr* shdr = INDEX_BIS( shdr_img, i, shdr_ent_szB );
UChar* name = shdr_strtab_img + shdr->sh_name;
/* Accept .eh_frame where mapped as rx (code). This seems to be
the common case. However, if that doesn't pan out, try for
- rw (data) instead. */
+ rw (data) instead. We can handle up to N_EHFRAME_SECTS per
+ ELF object. */
if (0 == VG_(strcmp)(name, ".eh_frame")) {
- if (inrx && size > 0 && !di->ehframe_present) {
- di->ehframe_present = True;
- di->ehframe_avma = svma + rx_bias;
- di->ehframe_size = size;
- TRACE_SYMTAB("acquiring .eh_frame avma = %#lx\n", di->ehframe_avma);
+ if (inrx && size > 0 && di->n_ehframe < N_EHFRAME_SECTS) {
+ di->ehframe_avma[di->n_ehframe] = svma + rx_bias;
+ di->ehframe_size[di->n_ehframe] = size;
+ TRACE_SYMTAB("acquiring .eh_frame avma = %#lx\n",
+ di->ehframe_avma[di->n_ehframe]);
+ di->n_ehframe++;
} else
- if (inrw && size > 0 && !di->ehframe_present) {
- di->ehframe_present = True;
- di->ehframe_avma = svma + rw_bias;
- di->ehframe_size = size;
- TRACE_SYMTAB("acquiring .eh_frame avma = %#lx\n", di->ehframe_avma);
+ if (inrw && size > 0 && di->n_ehframe < N_EHFRAME_SECTS) {
+ di->ehframe_avma[di->n_ehframe] = svma + rw_bias;
+ di->ehframe_size[di->n_ehframe] = size;
+ TRACE_SYMTAB("acquiring .eh_frame avma = %#lx\n",
+ di->ehframe_avma[di->n_ehframe]);
+ di->n_ehframe++;
} else {
BAD(".eh_frame");
}
UChar* debug_frame_img = NULL; /* .debug_frame (dwarf2) */
UChar* dwarf1d_img = NULL; /* .debug (dwarf1) */
UChar* dwarf1l_img = NULL; /* .line (dwarf1) */
- UChar* ehframe_img = NULL; /* .eh_frame (dwarf2) */
UChar* opd_img = NULL; /* .opd (dwarf2,
ppc64-linux) */
+ UChar* ehframe_img[N_EHFRAME_SECTS]; /* .eh_frame (dwarf2) */
+
/* Section sizes, in bytes */
SizeT strtab_sz = 0;
SizeT symtab_sz = 0;
SizeT debug_frame_sz = 0;
SizeT dwarf1d_sz = 0;
SizeT dwarf1l_sz = 0;
- SizeT ehframe_sz = 0;
SizeT opd_sz_unused = 0;
+ SizeT ehframe_sz[N_EHFRAME_SECTS];
+
+ for (i = 0; i < N_EHFRAME_SECTS; i++) {
+ ehframe_img[i] = NULL;
+ ehframe_sz[i] = 0;
+ }
/* Find all interesting sections */
- /* What FIND does: it finds the section called SEC_NAME. The
- size of it is assigned to SEC_SIZE. The address of the
+ UInt ehframe_ix = 0;
+
+ /* What FIND does: it finds the section called _SEC_NAME. The
+ size of it is assigned to _SEC_SIZE. The address of the
section in the transiently loaded oimage is assigned to
- SEC_FILEA. Even for sections which are marked loadable, the
- client's ld.so may not have loaded them yet, so there is no
- guarantee that we can safely prod around in any such area).
- Because the entire object file is transiently mapped aboard
- for inspection, it's always safe to inspect that area. */
+ _SEC_IMG. If the section is found, _POST_FX is executed
+ after _SEC_NAME and _SEC_SIZE have been assigned to.
+
+ Even for sections which are marked loadable, the client's
+ ld.so may not have loaded them yet, so there is no guarantee
+ that we can safely prod around in any such area). Because
+ the entire object file is transiently mapped aboard for
+ inspection, it's always safe to inspect that area. */
+ /* Iterate over section headers (again) */
for (i = 0; i < ehdr_img->e_shnum; i++) {
-# define FIND(sec_name, sec_size, sec_img) \
+# define FINDX(_sec_name, _sec_size, _sec_img, _post_fx) \
do { ElfXX_Shdr* shdr \
= INDEX_BIS( shdr_img, i, shdr_ent_szB ); \
- if (0 == VG_(strcmp)(sec_name, shdr_strtab_img \
- + shdr->sh_name)) { \
+ if (0 == VG_(strcmp)(_sec_name, shdr_strtab_img \
+ + shdr->sh_name)) { \
Bool nobits; \
- sec_img = (void*)(oimage + shdr->sh_offset); \
- sec_size = shdr->sh_size; \
- nobits = shdr->sh_type == SHT_NOBITS; \
+ _sec_img = (void*)(oimage + shdr->sh_offset); \
+ _sec_size = shdr->sh_size; \
+ nobits = shdr->sh_type == SHT_NOBITS; \
TRACE_SYMTAB( "%18s: img %p .. %p\n", \
- sec_name, (UChar*)sec_img, \
- ((UChar*)sec_img) + sec_size - 1); \
+ _sec_name, (UChar*)_sec_img, \
+ ((UChar*)_sec_img) + _sec_size - 1); \
/* SHT_NOBITS sections have zero size in the file. */ \
if ( shdr->sh_offset \
- + (nobits ? 0 : sec_size) > n_oimage ) { \
+ + (nobits ? 0 : _sec_size) > n_oimage ) { \
ML_(symerr)(di, True, \
" section beyond image end?!"); \
goto out; \
} \
+ _post_fx; \
} \
} while (0);
+ /* Version with no post-effects */
+# define FIND(_sec_name, _sec_size, _sec_img) \
+ FINDX(_sec_name, _sec_size, _sec_img, /**/)
+
/* NAME SIZE IMAGE addr */
FIND(".dynsym", dynsym_sz, dynsym_img)
FIND(".dynstr", dynstr_sz, dynstr_img)
FIND(".debug", dwarf1d_sz, dwarf1d_img)
FIND(".line", dwarf1l_sz, dwarf1l_img)
- FIND(".eh_frame", ehframe_sz, ehframe_img)
FIND(".opd", opd_sz_unused, opd_img)
+ FINDX(".eh_frame", ehframe_sz[ehframe_ix],
+ ehframe_img[ehframe_ix],
+ do { ehframe_ix++; vg_assert(ehframe_ix <= N_EHFRAME_SECTS); }
+ while (0)
+ )
+ /* Comment_on_EH_FRAME_MULTIPLE_INSTANCES: w.r.t. .eh_frame
+ multi-instance kludgery, how are we assured that the order
+ in which we fill in ehframe_sz[] and ehframe_img[] is
+ consistent with the order in which we previously filled in
+ di->ehframe_avma[] and di->ehframe_size[] ? By the fact
+ that in both cases, these arrays were filled in by
+ iterating over the section headers top-to-bottom. So both
+ loops (this one and the previous one) encounter the
+ .eh_frame entries in the same order and so fill in these
+ arrays in a consistent order.
+ */
+
+# undef FINDX
# undef FIND
}
False, opd_img);
}
- /* Read .eh_frame and .debug_frame (call-frame-info) if any */
- if (ehframe_img) {
- vg_assert(ehframe_sz == di->ehframe_size);
- ML_(read_callframe_info_dwarf3)( di, ehframe_img, ehframe_sz, True );
+ /* Read .eh_frame and .debug_frame (call-frame-info) if any. Do
+ the .eh_frame section(s) first. */
+ vg_assert(di->n_ehframe >= 0 && di->n_ehframe <= N_EHFRAME_SECTS);
+ for (i = 0; i < di->n_ehframe; i++) {
+ /* see Comment_on_EH_FRAME_MULTIPLE_INSTANCES above for why
+ this next assertion should hold. */
+ vg_assert(ehframe_sz[i] == di->ehframe_size[i]);
+ ML_(read_callframe_info_dwarf3)( di,
+ ehframe_img[i],
+ ehframe_sz[i],
+ di->ehframe_avma[i],
+ True/*is_ehframe*/ );
}
if (debug_frame_sz) {
- ML_(read_callframe_info_dwarf3)( di, debug_frame_img,
- debug_frame_sz, False );
+ ML_(read_callframe_info_dwarf3)( di,
+ debug_frame_img, debug_frame_sz,
+ 0/*assume zero avma*/,
+ False/*!is_ehframe*/ );
}
/* Read the stabs and/or dwarf2 debug information, if any. It