struct perf_sample sample_sw;
struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
u32 pid = evsel__intval(evsel, sample, "pid");
+ int ret;
list_for_each_entry(ent, &inject->samples, node) {
if (pid == ent->tid)
perf_event__synthesize_sample(event_sw, evsel->core.attr.sample_type,
evsel->core.attr.read_format, &sample_sw);
build_id__mark_dso_hit(tool, event_sw, &sample_sw, evsel, machine);
- return perf_event__repipe(tool, event_sw, &sample_sw, machine);
+ ret = perf_event__repipe(tool, event_sw, &sample_sw, machine);
+ perf_sample__exit(&sample_sw);
+ return ret;
}
#endif
size_t hdr_sz = sizeof(*hdr);
ssize_t ret;
+ perf_sample__init(&gs->ev.sample, /*all=*/false);
buf = gs->ev.event_buf;
if (!buf) {
buf = malloc(PERF_SAMPLE_MAX_SIZE);
if (!gs->fetched) {
ret = guest_session__fetch(gs);
if (ret)
- return ret;
+ break;
gs->fetched = true;
}
ev = gs->ev.event;
sample = &gs->ev.sample;
- if (!ev->header.size)
- return 0; /* EOF */
-
- if (sample->time > timestamp)
- return 0;
+ if (!ev->header.size) {
+ /* EOF */
+ perf_sample__exit(&gs->ev.sample);
+ gs->fetched = false;
+ ret = 0;
+ break;
+ }
+ if (sample->time > timestamp) {
+ ret = 0;
+ break;
+ }
/* Change cpumode to guest */
cpumode = ev->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
if (id_hdr_size & 7) {
pr_err("Bad id_hdr_size %u\n", id_hdr_size);
- return -EINVAL;
+ ret = -EINVAL;
+ break;
}
if (ev->header.size & 7) {
pr_err("Bad event size %u\n", ev->header.size);
- return -EINVAL;
+ ret = -EINVAL;
+ break;
}
/* Remove guest id sample */
if (ev->header.size & 7) {
pr_err("Bad raw event size %u\n", ev->header.size);
- return -EINVAL;
+ ret = -EINVAL;
+ break;
}
guest_id = guest_session__lookup_id(gs, id);
if (!guest_id) {
pr_err("Guest event with unknown id %llu\n",
(unsigned long long)id);
- return -EINVAL;
+ ret = -EINVAL;
+ break;
}
/* Change to host ID to avoid conflicting ID values */
/* New id sample with new ID and CPU */
ret = evlist__append_id_sample(inject->session->evlist, ev, sample);
if (ret)
- return ret;
+ break;
if (ev->header.size & 7) {
pr_err("Bad new event size %u\n", ev->header.size);
- return -EINVAL;
+ ret = -EINVAL;
+ break;
}
- gs->fetched = false;
-
ret = output_bytes(inject, ev, ev->header.size);
if (ret)
- return ret;
+ break;
+
+ /* Reset for next guest session event fetch. */
+ perf_sample__exit(sample);
+ gs->fetched = false;
}
+ if (ret && gs->fetched) {
+ /* Clear saved sample state on error. */
+ perf_sample__exit(&gs->ev.sample);
+ gs->fetched = false;
+ }
+ return ret;
}
static int guest_session__flush_events(struct guest_session *gs)
}
perf_mmap__consume(&md->core);
+ perf_sample__exit(&sample);
}
perf_mmap__read_done(&md->core);
}
if (!sample.time) {
pr_debug("event with no time\n");
+ perf_sample__exit(&sample);
return -1;
}
node->event_time = sample.time;
+ perf_sample__exit(&sample);
return 0;
}
u64 nr_deferred = sample_callchain->callchain->nr;
struct ip_callchain *callchain;
+ if (sample_orig->merged_callchain) {
+ /* Already merged. */
+ return -EINVAL;
+ }
+
if (sample_orig->callchain->nr < 2) {
sample_orig->deferred_callchain = false;
return -EINVAL;
}
callchain = calloc(1 + nr_orig + nr_deferred, sizeof(u64));
- if (callchain == NULL) {
- sample_orig->deferred_callchain = false;
+ if (callchain == NULL)
return -ENOMEM;
- }
callchain->nr = nr_orig + nr_deferred;
/* copy original including PERF_CONTEXT_USER_DEFERRED (but the cookie) */
memcpy(&callchain->ips[nr_orig], sample_callchain->callchain->ips,
nr_deferred * sizeof(u64));
+ sample_orig->merged_callchain = true;
sample_orig->callchain = callchain;
return 0;
}
struct evsel *evsel = evlist__event2evsel(evlist, event);
int ret;
- if (!evsel)
+ if (!evsel) {
+ /* Ensure the sample is okay for perf_sample__exit. */
+ perf_sample__init(sample, /*all=*/false);
return -EFAULT;
+ }
ret = evsel__parse_sample(evsel, event, sample);
if (ret)
return ret;
#define OVERFLOW_CHECK(offset, size, max_size) \
do { \
if (overflow(endp, (max_size), (offset), (size))) \
- return -EFAULT; \
+ goto out_efault; \
} while (0)
#define OVERFLOW_CHECK_u64(offset) \
data->cgroup = *array;
return 0;
+out_efault:
+ return -EFAULT;
}
int evsel__parse_sample(struct evsel *evsel, union perf_event *event,
*/
union u64_swap u;
- memset(data, 0, sizeof(*data));
+ perf_sample__init(data, /*all=*/true);
data->cpu = data->pid = data->tid = -1;
data->stream_id = data->id = data->time = -1ULL;
data->period = evsel->core.attr.sample_period;
data->callchain = (struct ip_callchain *)&event->callchain_deferred.nr;
if (data->callchain->nr > max_callchain_nr)
- return -EFAULT;
+ goto out_efault;
data->deferred_cookie = event->callchain_deferred.cookie;
if (evsel->core.attr.sample_id_all)
perf_evsel__parse_id_sample(evsel, event, data);
+
return 0;
}
if (event->header.type != PERF_RECORD_SAMPLE) {
- if (!evsel->core.attr.sample_id_all)
- return 0;
- return perf_evsel__parse_id_sample(evsel, event, data);
+ if (evsel->core.attr.sample_id_all)
+ perf_evsel__parse_id_sample(evsel, event, data);
+ return 0;
}
array = event->sample.array;
if (perf_event__check_size(event, evsel->sample_size))
- return -EFAULT;
+ goto out_efault;
if (type & PERF_SAMPLE_IDENTIFIER) {
data->id = *array;
sizeof(struct sample_read_value);
if (data->read.group.nr > max_group_nr)
- return -EFAULT;
+ goto out_efault;
sz = data->read.group.nr * sample_read_value_size(read_format);
OVERFLOW_CHECK(array, sz, max_size);
data->callchain = (struct ip_callchain *)array++;
callchain_nr = data->callchain->nr;
if (callchain_nr > max_callchain_nr)
- return -EFAULT;
+ goto out_efault;
sz = callchain_nr * sizeof(u64);
/*
* Save the cookie for the deferred user callchain. The last 2
data->branch_stack = (struct branch_stack *)array++;
if (data->branch_stack->nr > max_branch_nr)
- return -EFAULT;
+ goto out_efault;
sz = data->branch_stack->nr * sizeof(struct branch_entry);
if (evsel__has_branch_hw_idx(evsel)) {
data->user_stack.size = *array++;
if (WARN_ONCE(data->user_stack.size > sz,
"user stack dump failure\n"))
- return -EFAULT;
+ goto out_efault;
}
}
array = (void *)array + sz;
}
- if (evsel__is_offcpu_event(evsel))
- return __set_offcpu_sample(data);
+ if (evsel__is_offcpu_event(evsel)) {
+ if (__set_offcpu_sample(data))
+ goto out_efault;
+ }
return 0;
+out_efault:
+ perf_sample__exit(data);
+ return -EFAULT;
}
int evsel__parse_sample_timestamp(struct evsel *evsel, union perf_event *event,
{
struct perf_sample sample;
union perf_event event;
+ int ret;
+ perf_sample__init(&sample, /*all=*/true);
sample.ip = be64_to_cpu(record->srr0);
sample.period = 1;
sample.cpu = cpu;
event.sample.header.misc = sample.cpumode;
event.sample.header.size = sizeof(struct perf_event_header);
- if (perf_session__deliver_synth_event(vpa->session, &event, &sample)) {
+ ret = perf_session__deliver_synth_event(vpa->session, &event, &sample);
+ if (ret)
pr_debug("Failed to create sample for dtl entry\n");
- return -1;
- }
- return 0;
+ perf_sample__exit(&sample);
+ return ret;
}
static int powerpc_vpadtl_get_buffer(struct powerpc_vpadtl_queue *vpaq)
} else {
sample->user_regs = NULL;
sample->intr_regs = NULL;
+ sample->merged_callchain = false;
+ sample->callchain = NULL;
}
}
void perf_sample__exit(struct perf_sample *sample)
{
- free(sample->user_regs);
- free(sample->intr_regs);
+ zfree(&sample->user_regs);
+ zfree(&sample->intr_regs);
+ if (sample->merged_callchain) {
+ zfree(&sample->callchain);
+ sample->merged_callchain = false;
+ }
}
struct regs_dump *perf_sample__user_regs(struct perf_sample *sample)
* intel-pt. The instruction itself is held in insn.
*/
u16 insn_len;
- /**
- * @cpumode: The cpumode from struct perf_event_header misc variable
- * masked with CPUMODE_MASK. Gives user, kernel and hypervisor
- * information.
- */
- u8 cpumode;
/** @misc: The entire struct perf_event_header misc variable. */
u16 misc;
/**
* powerpc holds p_stage_cyc.
*/
u16 weight3;
+ /**
+ * @cpumode: The cpumode from struct perf_event_header misc variable
+ * masked with CPUMODE_MASK. Gives user, kernel and hypervisor
+ * information.
+ */
+ u8 cpumode;
/**
* @no_hw_idx: For PERF_SAMPLE_BRANCH_STACK, true when
* PERF_SAMPLE_BRANCH_HW_INDEX isn't set.
* user callchain marker was encountered.
*/
bool deferred_callchain;
+ /**
+ * @merged_callchain: A synthesized merged callchain that is allocated
+ * and needs freeing.
+ */
+ bool merged_callchain;
/**
* @deferred_cookie: Identifier of the deferred callchain in the later
* PERF_RECORD_CALLCHAIN_DEFERRED event.
list_for_each_entry_safe(de, tmp, &evlist->deferred_samples, list) {
struct perf_sample orig_sample;
+ perf_sample__init(&orig_sample, /*all=*/false);
ret = evlist__parse_sample(evlist, de->event, &orig_sample);
if (ret < 0) {
pr_err("failed to parse original sample\n");
+ perf_sample__exit(&orig_sample);
break;
}
- if (sample->tid != orig_sample.tid)
+ if (sample->tid != orig_sample.tid) {
+ perf_sample__exit(&orig_sample);
continue;
+ }
if (event->callchain_deferred.cookie == orig_sample.deferred_cookie)
sample__merge_deferred_callchain(&orig_sample, sample);
ret = evlist__deliver_sample(evlist, tool, de->event,
&orig_sample, evsel, machine);
- if (orig_sample.deferred_callchain)
- free(orig_sample.callchain);
-
+ perf_sample__exit(&orig_sample);
list_del(&de->list);
free(de->event);
free(de);
list_for_each_entry_safe(de, tmp, &evlist->deferred_samples, list) {
struct perf_sample sample;
+ perf_sample__init(&sample, /*all=*/false);
ret = evlist__parse_sample(evlist, de->event, &sample);
if (ret < 0) {
pr_err("failed to parse original sample\n");
+ perf_sample__exit(&sample);
break;
}
ret = evlist__deliver_sample(evlist, tool, de->event,
&sample, evsel, machine);
+ perf_sample__exit(&sample);
list_del(&de->list);
free(de->event);
free(de);