case HEADER_HYBRID_TOPOLOGY:
case HEADER_PMU_CAPS:
case HEADER_CPU_DOMAIN_INFO:
+ case HEADER_CLN_SIZE:
return true;
/* Information that can be updated */
case HEADER_BUILD_ID:
struct cpu_cache_level *caches;
struct cpu_domain_map **cpu_domain;
int caches_cnt;
+ unsigned int cln_size;
u32 comp_ratio;
u32 comp_ver;
u32 comp_type;
#include "bpf-event.h"
#include "bpf-utils.h"
#include "clockid.h"
+#include "cacheline.h"
#include <linux/ctype.h>
#include <internal/lib.h>
return ret;
}
+static int write_cln_size(struct feat_fd *ff,
+ struct evlist *evlist __maybe_unused)
+{
+ int cln_size = cacheline_size();
+
+ if (!cln_size)
+ cln_size = DEFAULT_CACHELINE_SIZE;
+
+ ff->ph->env.cln_size = cln_size;
+
+ return do_write(ff, &cln_size, sizeof(cln_size));
+}
+
static int write_stat(struct feat_fd *ff __maybe_unused,
struct evlist *evlist __maybe_unused)
{
}
}
+static void print_cln_size(struct feat_fd *ff, FILE *fp)
+{
+ fprintf(fp, "# cacheline size: %u\n", ff->ph->env.cln_size);
+}
+
static void print_compressed(struct feat_fd *ff, FILE *fp)
{
fprintf(fp, "# compressed : %s, level = %d, ratio = %d\n",
return -1;
}
+static int process_cln_size(struct feat_fd *ff, void *data __maybe_unused)
+{
+ struct perf_env *env = &ff->ph->env;
+
+ if (do_read_u32(ff, &env->cln_size))
+ return -1;
+
+ return 0;
+}
+
static int process_sample_time(struct feat_fd *ff, void *data __maybe_unused)
{
struct perf_session *session;
FEAT_OPR(PMU_CAPS, pmu_caps, false),
FEAT_OPR(CPU_DOMAIN_INFO, cpu_domain_info, true),
FEAT_OPR(E_MACHINE, e_machine, false),
+ FEAT_OPR(CLN_SIZE, cln_size, false),
};
struct header_print_data {
HEADER_PMU_CAPS,
HEADER_CPU_DOMAIN_INFO,
HEADER_E_MACHINE,
+ HEADER_CLN_SIZE,
HEADER_LAST_FEATURE,
HEADER_FEAT_BITS = 256,
};
int build_caches_for_cpu(u32 cpu, struct cpu_cache_level caches[], u32 *cntp);
+#define DEFAULT_CACHELINE_SIZE 64
+
/*
* arch specific callback
*/
#include "time-utils.h"
#include "cgroup.h"
#include "machine.h"
+#include "session.h"
#include "trace-event.h"
#include <linux/kernel.h>
#include <linux/string.h>
/* --sort typecln */
-#define DEFAULT_CACHELINE_SIZE 64
+static int
+hist_entry__cln_size(struct hist_entry *he)
+{
+ int ret = 0;
+
+ if (he && he->hists) {
+ struct evsel *evsel = hists_to_evsel(he->hists);
+
+ if (evsel) {
+ struct perf_session *session = evsel__session(evsel);
+
+ ret = session->header.env.cln_size;
+ }
+ }
+
+ if (ret < 1)
+ ret = DEFAULT_CACHELINE_SIZE; // avoid div/0 later
+
+ return ret;
+}
static int64_t
sort__typecln_sort(struct hist_entry *left, struct hist_entry *right)
struct annotated_data_type *left_type = left->mem_type;
struct annotated_data_type *right_type = right->mem_type;
int64_t left_cln, right_cln;
+ int64_t cln_size_left = hist_entry__cln_size(left);
+ int64_t cln_size_right = hist_entry__cln_size(right);
int64_t ret;
- int cln_size = cacheline_size();
-
- if (cln_size == 0)
- cln_size = DEFAULT_CACHELINE_SIZE;
if (!left_type) {
sort__type_init(left);
if (ret)
return ret;
- left_cln = left->mem_type_off / cln_size;
- right_cln = right->mem_type_off / cln_size;
+ left_cln = left->mem_type_off / cln_size_left;
+ right_cln = right->mem_type_off / cln_size_right;
return left_cln - right_cln;
}
size_t size, unsigned int width __maybe_unused)
{
struct annotated_data_type *he_type = he->mem_type;
- int cln_size = cacheline_size();
-
- if (cln_size == 0)
- cln_size = DEFAULT_CACHELINE_SIZE;
+ int cln_size = hist_entry__cln_size(he);
return repsep_snprintf(bf, size, "%s: cache-line %d", he_type->self.type_name,
he->mem_type_off / cln_size);