symbol_conf.bt_stop_list, fp);
}
- if (PRINT_FIELD(IREGS))
- perf_sample__fprintf_iregs(sample, attr, thread__e_machine(thread, machine), fp);
+ if (PRINT_FIELD(IREGS)) {
+ perf_sample__fprintf_iregs(sample, attr,
+ thread__e_machine(thread, machine, /*e_flags=*/NULL),
+ fp);
+ }
- if (PRINT_FIELD(UREGS))
- perf_sample__fprintf_uregs(sample, attr, thread__e_machine(thread, machine), fp);
+ if (PRINT_FIELD(UREGS)) {
+ perf_sample__fprintf_uregs(sample, attr,
+ thread__e_machine(thread, machine, /*e_flags=*/NULL),
+ fp);
+ }
if (PRINT_FIELD(BRSTACK))
perf_sample__fprintf_brstack(sample, thread, evsel, fp);
struct thread_trace *ttrace;
thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
- e_machine = thread__e_machine(thread, trace->host);
+ e_machine = thread__e_machine(thread, trace->host, /*e_flags=*/NULL);
sc = trace__syscall_info(trace, evsel, e_machine, id);
if (sc == NULL)
goto out_put;
thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
- e_machine = thread__e_machine(thread, trace->host);
+ e_machine = thread__e_machine(thread, trace->host, /*e_flags=*/NULL);
sc = trace__syscall_info(trace, evsel, e_machine, id);
if (sc == NULL)
goto out_put;
struct thread_trace *ttrace;
thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
- e_machine = thread__e_machine(thread, trace->host);
+ e_machine = thread__e_machine(thread, trace->host, /*e_flags=*/NULL);
sc = trace__syscall_info(trace, evsel, e_machine, id);
if (sc == NULL)
goto out_put;
if (evsel == trace->syscalls.events.bpf_output) {
int id = perf_evsel__sc_tp_uint(evsel, id, sample);
- int e_machine = thread ? thread__e_machine(thread, trace->host) : EM_HOST;
+ int e_machine = thread
+ ? thread__e_machine(thread, trace->host, /*e_flags=*/NULL)
+ : EM_HOST;
struct syscall *sc = trace__syscall_info(trace, evsel, e_machine, id);
if (sc) {
{
size_t printed = 0;
struct thread_trace *ttrace = thread__priv(thread);
- int e_machine = thread__e_machine(thread, trace->host);
+ int e_machine = thread__e_machine(thread, trace->host, /*e_flags=*/NULL);
double ratio;
if (ttrace == NULL)
{
const struct arch *arch;
struct machine *machine;
+ uint32_t e_flags;
uint16_t e_machine;
if (!thread) {
}
machine = maps__machine(thread__maps(thread));
- e_machine = thread__e_machine(thread, machine);
- arch = arch__find(e_machine, machine->env ? machine->env->cpuid : NULL);
+ e_machine = thread__e_machine(thread, machine, &e_flags);
+ arch = arch__find(e_machine, e_flags, machine->env ? machine->env->cpuid : NULL);
if (arch == NULL) {
pr_err("%s: unsupported arch %d\n", __func__, e_machine);
return errno;
return e_machine_and_eflags__cmp(&(*aa)->id, &(*ab)->id);
}
-const struct arch *arch__find(uint16_t e_machine, const char *cpuid)
+const struct arch *arch__find(uint16_t e_machine, uint32_t e_flags, const char *cpuid)
{
static const struct arch *(*const arch_new_fn[])(const struct e_machine_and_e_flags *id,
const char *cpuid) = {
static size_t num_archs;
struct e_machine_and_e_flags key = {
.e_machine = e_machine,
- // TODO: e_flags should really come from the same source as e_machine.
- .e_flags = EF_HOST,
+ .e_flags = e_flags,
};
const struct arch *result = NULL, **tmp;
char *fileloc;
};
-const struct arch *arch__find(uint16_t e_machine, const char *cpuid);
+const struct arch *arch__find(uint16_t e_machine, uint32_t e_flags, const char *cpuid);
bool arch__is_x86(const struct arch *arch);
bool arch__is_powerpc(const struct arch *arch);
}
/* Reads e_machine from fd, optionally caching data in dso. */
-uint16_t dso__read_e_machine(struct dso *optional_dso, int fd)
+uint16_t dso__read_e_machine(struct dso *optional_dso, int fd, uint32_t *e_flags)
{
uint16_t e_machine = EM_NONE;
unsigned char e_ident[EI_NIDENT];
enum dso_swap_type swap_type;
+ bool need_e_flags;
- _Static_assert(offsetof(Elf32_Ehdr, e_ident) == 0, "Unexpected offset");
- _Static_assert(offsetof(Elf64_Ehdr, e_ident) == 0, "Unexpected offset");
+ if (e_flags)
+ *e_flags = 0;
+
+ {
+ _Static_assert(offsetof(Elf32_Ehdr, e_ident) == 0, "Unexpected offset");
+ _Static_assert(offsetof(Elf64_Ehdr, e_ident) == 0, "Unexpected offset");
+ }
if (pread(fd, &e_ident, sizeof(e_ident), 0) != sizeof(e_ident))
return EM_NONE; // Read failed.
{
_Static_assert(offsetof(Elf32_Ehdr, e_machine) == 18, "Unexpected offset");
_Static_assert(offsetof(Elf64_Ehdr, e_machine) == 18, "Unexpected offset");
- if (pread(fd, &e_machine, sizeof(e_machine), 18) != sizeof(e_machine))
- return EM_NONE; // e_machine read failed.
}
+ if (pread(fd, &e_machine, sizeof(e_machine), 18) != sizeof(e_machine))
+ return EM_NONE; // e_machine read failed.
e_machine = DSO_SWAP_TYPE__SWAP(swap_type, uint16_t, e_machine);
if (e_machine >= EM_NUM)
return EM_NONE; // Bad ELF machine number.
+#ifdef NDEBUG
+ /* In production code the e_flags are only needed on CSKY. */
+ need_e_flags = e_flags && e_machine == EM_CSKY;
+#else
+ /* Debug code will always read the e_flags. */
+ need_e_flags = e_flags != NULL;
+#endif
+ if (need_e_flags) {
+ off_t offset = e_ident[EI_CLASS] == ELFCLASS32
+ ? offsetof(Elf32_Ehdr, e_flags)
+ : offsetof(Elf64_Ehdr, e_flags);
+
+ if (pread(fd, e_flags, sizeof(*e_flags), offset) != sizeof(*e_flags)) {
+ *e_flags = 0;
+ return EM_NONE; // e_flags read failed.
+ }
+ }
return e_machine;
}
-uint16_t dso__e_machine(struct dso *dso, struct machine *machine)
+uint16_t dso__e_machine(struct dso *dso, struct machine *machine, uint32_t *e_flags)
{
uint16_t e_machine = EM_NONE;
int fd;
case DSO_BINARY_TYPE__BPF_IMAGE:
case DSO_BINARY_TYPE__OOL:
case DSO_BINARY_TYPE__JAVA_JIT:
+ if (e_flags)
+ *e_flags = EF_HOST;
return EM_HOST;
case DSO_BINARY_TYPE__DEBUGLINK:
case DSO_BINARY_TYPE__BUILD_ID_CACHE:
break;
case DSO_BINARY_TYPE__NOT_FOUND:
default:
+ if (e_flags)
+ *e_flags = 0;
return EM_NONE;
}
try_to_open_dso(dso, machine);
fd = dso__data(dso)->fd;
if (fd >= 0)
- e_machine = dso__read_e_machine(dso, fd);
+ e_machine = dso__read_e_machine(dso, fd, e_flags);
+ else if (e_flags)
+ *e_flags = 0;
mutex_unlock(dso__data_open_lock());
return e_machine;
off_t dso__data_size(struct dso *dso, struct machine *machine);
ssize_t dso__data_read_offset(struct dso *dso, struct machine *machine,
u64 offset, u8 *data, ssize_t size);
-uint16_t dso__read_e_machine(struct dso *optional_dso, int fd);
-uint16_t dso__e_machine(struct dso *dso, struct machine *machine);
+uint16_t dso__read_e_machine(struct dso *optional_dso, int fd, uint32_t *e_flags);
+uint16_t dso__e_machine(struct dso *dso, struct machine *machine, uint32_t *e_flags);
ssize_t dso__data_read_addr(struct dso *dso, struct map *map,
struct machine *machine, u64 addr,
u8 *data, ssize_t size);
if (al->thread) {
machine = maps__machine(thread__maps(al->thread));
- e_machine = thread__e_machine(al->thread, machine);
+ e_machine = thread__e_machine(al->thread, machine, /*e_flags=*/NULL);
}
if (set_regs_in_dict(dict, sample, evsel, e_machine))
Py_FatalError("Failed to setting regs in dict");
if (sample_type & (PERF_SAMPLE_REGS_USER | PERF_SAMPLE_REGS_INTR)) {
struct thread *thread = machine__find_thread(machine, sample->pid, sample->pid);
- e_machine = thread__e_machine(thread, machine);
+ e_machine = thread__e_machine(thread, machine, /*e_flags=*/NULL);
}
printf("(IP, 0x%x): %d/%d: %#" PRIx64 " period: %" PRIu64 " addr: %#" PRIx64 "\n",
uint16_t *result = arg;
struct machine *machine = maps__machine(thread__maps(thread));
- *result = thread__e_machine(thread, machine);
+ *result = thread__e_machine(thread, machine, /*e_flags=*/NULL);
return *result != EM_NONE ? 1 : 0;
}
}
}
-static uint16_t read_proc_e_machine_for_pid(pid_t pid)
+static uint16_t read_proc_e_machine_for_pid(pid_t pid, uint32_t *e_flags)
{
char path[6 /* "/proc/" */ + 11 /* max length of pid */ + 5 /* "/exe\0" */];
int fd;
snprintf(path, sizeof(path), "/proc/%d/exe", pid);
fd = open(path, O_RDONLY);
if (fd >= 0) {
- e_machine = dso__read_e_machine(/*optional_dso=*/NULL, fd);
+ e_machine = dso__read_e_machine(/*optional_dso=*/NULL, fd, e_flags);
close(fd);
}
return e_machine;
}
-static int thread__e_machine_callback(struct map *map, void *machine)
+struct thread__e_machine_callback_args {
+ struct machine *machine;
+ uint32_t e_flags;
+ uint16_t e_machine;
+};
+
+static int thread__e_machine_callback(struct map *map, void *_args)
{
+ struct thread__e_machine_callback_args *args = _args;
struct dso *dso = map__dso(map);
- _Static_assert(0 == EM_NONE, "Unexpected EM_NONE");
if (!dso)
- return EM_NONE;
+ return 0; // No dso, continue search.
- return dso__e_machine(dso, machine);
+ args->e_machine = dso__e_machine(dso, args->machine, &args->e_flags);
+ return args->e_machine != EM_NONE ? 1 /* stop search */ : 0 /* continue search */;
}
-uint16_t thread__e_machine(struct thread *thread, struct machine *machine)
+uint16_t thread__e_machine(struct thread *thread, struct machine *machine, uint32_t *e_flags)
{
pid_t tid, pid;
uint16_t e_machine = RC_CHK_ACCESS(thread)->e_machine;
+ uint32_t local_e_flags = 0;
+ struct thread__e_machine_callback_args args = {
+ .machine = machine,
+ .e_flags = 0,
+ .e_machine = EM_NONE,
+ };
- if (e_machine != EM_NONE)
+ if (e_machine != EM_NONE) {
+ if (e_flags)
+ *e_flags = thread__e_flags(thread);
return e_machine;
+ }
tid = thread__tid(thread);
pid = thread__pid(thread);
struct thread *parent = machine__findnew_thread(machine, pid, pid);
if (parent) {
- e_machine = thread__e_machine(parent, machine);
+ e_machine = thread__e_machine(parent, machine, &local_e_flags);
thread__put(parent);
- thread__set_e_machine(thread, e_machine);
- return e_machine;
+ goto out;
}
/* Something went wrong, fallback. */
}
/* Reading on the PID thread. First try to find from the maps. */
- e_machine = maps__for_each_map(thread__maps(thread),
- thread__e_machine_callback,
- machine);
- if (e_machine == EM_NONE) {
+ maps__for_each_map(thread__maps(thread), thread__e_machine_callback, &args);
+
+ if (args.e_machine != EM_NONE) {
+ e_machine = args.e_machine;
+ local_e_flags = args.e_flags;
+ } else {
/* Maps failed, perhaps we're live with map events disabled. */
bool is_live = machine->machines == NULL;
}
/* Read from /proc/pid/exe if live. */
if (is_live)
- e_machine = read_proc_e_machine_for_pid(pid);
+ e_machine = read_proc_e_machine_for_pid(pid, &local_e_flags);
}
- if (e_machine != EM_NONE)
+out:
+ if (e_machine != EM_NONE) {
thread__set_e_machine(thread, e_machine);
- else
+ thread__set_e_flags(thread, local_e_flags);
+ } else {
e_machine = EM_HOST;
+ local_e_flags = EF_HOST;
+ }
+ if (e_flags)
+ *e_flags = local_e_flags;
return e_machine;
}
struct srccode_state srccode_state;
bool filter;
int filter_entry_depth;
+ /**
+ * @e_flags: The ELF EF_* associated with the thread. Valid if e_machine != EM_NONE.
+ */
+ uint16_t e_flags;
/**
* @e_machine: The ELF EM_* associated with the thread. EM_NONE if not
* computed.
RC_CHK_ACCESS(thread)->filter_entry_depth = depth;
}
-uint16_t thread__e_machine(struct thread *thread, struct machine *machine);
+uint16_t thread__e_machine(struct thread *thread, struct machine *machine, uint32_t *e_flags);
static inline void thread__set_e_machine(struct thread *thread, uint16_t e_machine)
{
RC_CHK_ACCESS(thread)->e_machine = e_machine;
}
+static inline uint32_t thread__e_flags(const struct thread *thread)
+{
+ return RC_CHK_ACCESS(thread)->e_flags;
+}
+
+static inline void thread__set_e_flags(struct thread *thread, uint32_t e_flags)
+{
+ RC_CHK_ACCESS(thread)->e_flags = e_flags;
+}
+
static inline bool thread__lbr_stitch_enable(const struct thread *thread)
{
{
struct dwfl_ui_thread_info *dwfl_ui_ti = arg;
struct unwind_info *ui = dwfl_ui_ti->ui;
- uint16_t e_machine = thread__e_machine(ui->thread, ui->machine);
+ uint16_t e_machine = thread__e_machine(ui->thread, ui->machine, /*e_flags=*/NULL);
struct stack_dump *stack = &ui->sample->user_stack;
u64 start, end;
int offset;
{
struct maps *maps = thread__maps(thread);
struct machine *machine = maps__machine(maps);
- uint16_t e_machine = thread__e_machine(thread, machine);
+ uint16_t e_machine = thread__e_machine(thread, machine, /*e_flags=*/NULL);
struct dwfl_ui_thread_info *dwfl_ui_ti;
static struct unwind_info *ui;
Dwfl *dwfl;