~PerfReader();
- void process_some(); // run briefly, relay decoded perf_events to consumer
+ void process_some(); // run briefly, relay decoded perf_events to consumer
+ int n_sample_regs() { return this->sample_regs_count; }
};
junk.pid = getpid();
usc->process(& junk);
}
+ void process(const perf_event_header* sample); // handle process lifecycle events; relay unwound call stack events to a consumer
};
}
+////////////////////////////////////////////////////////////////////////
+// perf_events data format
+
+class PerfReader;
+
+/* TODO: Make sure the config above matches PERF_SAMPLE_TYPE */
+#define PERF_SAMPLE_TYPE (PERF_SAMPLE_IP | PERF_SAMPLE_TID | PERF_SAMPLE_TIME \
+ | PERF_SAMPLE_REGS_USER | PERF_SAMPLE_STACK_USER)
+typedef struct
+{
+ struct perf_event_header header;
+ uint64_t ip;
+ uint32_t pid, tid;
+ uint64_t time;
+ uint64_t abi;
+ uint64_t regs[]; /* variable size */
+ /* uint64_t size; */
+ /* char data[]; -- variable size */
+} PerfSample;
+
+uint64_t
+perf_sample_get_size (PerfReader *reader, PerfSample *sample)
+{
+ int nregs = reader->n_sample_regs();
+ return *(uint64_t *)(sample->regs + nregs);
+}
+
+char *
+perf_sample_get_data (PerfReader *reader, PerfSample *sample)
+{
+ int nregs = reader->n_sample_regs();
+ return (char *)(sample->regs + (nregs + 1));
+}
+
////////////////////////////////////////////////////////////////////////
// perf reader
this->mmap_size = this->page_size * (this->page_count + 1); // total mmap size, incl header page
this->event_wraparound_temp.resize(this->mmap_size); // NB: never resize this object again!
- Ebl *default_ebl = ebl_openbackend_machine(EM_X86_64); // XXX
+ Ebl *default_ebl = ebl_openbackend_machine(EM_X86_64); /* TODO: Generalize to architectures beyond x86. */
this->sample_regs_user = ebl_perf_frame_regs_mask (default_ebl);
this->sample_regs_count = bitset<64>(this->sample_regs_user).count();
}
+
+static inline uint64_t
+ring_buffer_read_head(volatile struct perf_event_mmap_page *base)
+{
+ uint64_t head = base->data_head;
+ asm volatile("" ::: "memory"); // memory fence
+ return head;
+}
+
+static inline void
+ring_buffer_write_tail(volatile struct perf_event_mmap_page *base,
+ uint64_t tail)
+{
+ asm volatile("" ::: "memory"); // memory fence
+ base->data_tail = tail;
+}
+
void PerfReader::process_some()
{
if (! this->enabled)
if (this->pollfds[i].revents & POLLIN) // found an fd with fresh yummy events
{
perf_event_mmap_page *header = perf_headers[i];
- uint64_t data_head = header->data_head;
- asm volatile("" ::: "memory"); // memory fence
+ uint64_t data_head = ring_buffer_read_head(header);
uint64_t data_tail = header->data_tail;
uint8_t *base = ((uint8_t *) header) + this->page_size;
struct perf_event_header *ehdr;
if (verbose > 3)
clog << "perf head=" << (void*) data_head
<< " tail=" << (void*) data_tail
- << " ehdr=" << (void*) ehdr << " size=" << ehdr_size << endl;
+ << " ehdr=" << (void*) ehdr
+ << " size=" << setbase(10) << ehdr_size << setbase(16) << endl;
if (((uint8_t *)ehdr) + ehdr_size > base + ring_buffer_size) // mmap region wraparound?
{
data_tail += ehdr_size;
}
- asm volatile("" ::: "memory"); // memory fence
- header->data_tail = data_tail;
+ ring_buffer_write_tail(header, data_tail);
}
}
}
this->event_type_counts[ehdr->type] ++;
}
-
+void PerfConsumerUnwinder::process(const perf_event_header* ehdr)
+{
+}
////////////////////////////////////////////////////////////////////////
-// unwind consumers // gprof
+// UNWIND consumers // gprof
UnwindStatsConsumer::~UnwindStatsConsumer()