The resctrl file system layer passes the domain, RMID, and event id to the
architecture to fetch an event counter.
Fetching a telemetry event counter requires additional information that is
private to the architecture, for example, the offset into MMIO space from
where the counter should be read.
Add mon_evt::arch_priv that architecture can use for any private data related
to the event. The resctrl filesystem initializes mon_evt::arch_priv when the
architecture enables the event and passes it back to architecture when needing
to fetch an event counter.
Suggested-by: Reinette Chatre <reinette.chatre@intel.com>
Signed-off-by: Tony Luck <tony.luck@intel.com>
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
Reviewed-by: Reinette Chatre <reinette.chatre@intel.com>
Link: https://lore.kernel.org/20251217172121.12030-1-tony.luck@intel.com
bool ret = false;
if (rdt_cpu_has(X86_FEATURE_CQM_OCCUP_LLC)) {
- resctrl_enable_mon_event(QOS_L3_OCCUP_EVENT_ID, false, 0);
+ resctrl_enable_mon_event(QOS_L3_OCCUP_EVENT_ID, false, 0, NULL);
ret = true;
}
if (rdt_cpu_has(X86_FEATURE_CQM_MBM_TOTAL)) {
- resctrl_enable_mon_event(QOS_L3_MBM_TOTAL_EVENT_ID, false, 0);
+ resctrl_enable_mon_event(QOS_L3_MBM_TOTAL_EVENT_ID, false, 0, NULL);
ret = true;
}
if (rdt_cpu_has(X86_FEATURE_CQM_MBM_LOCAL)) {
- resctrl_enable_mon_event(QOS_L3_MBM_LOCAL_EVENT_ID, false, 0);
+ resctrl_enable_mon_event(QOS_L3_MBM_LOCAL_EVENT_ID, false, 0, NULL);
ret = true;
}
if (rdt_cpu_has(X86_FEATURE_ABMC))
int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_domain_hdr *hdr,
u32 unused, u32 rmid, enum resctrl_event_id eventid,
- u64 *val, void *ignored)
+ void *arch_priv, u64 *val, void *ignored)
{
struct rdt_hw_l3_mon_domain *hw_dom;
struct rdt_l3_mon_domain *d;
* @binary_bits: number of fixed-point binary bits from architecture,
* only valid if @is_floating_point is true
* @enabled: true if the event is enabled
+ * @arch_priv: Architecture private data for this event.
+ * The @arch_priv provided by the architecture via
+ * resctrl_enable_mon_event().
*/
struct mon_evt {
enum resctrl_event_id evtid;
bool is_floating_point;
unsigned int binary_bits;
bool enabled;
+ void *arch_priv;
};
extern struct mon_evt mon_event_all[QOS_NUM_EVENTS];
struct rmid_entry *entry;
u32 idx, cur_idx = 1;
void *arch_mon_ctx;
+ void *arch_priv;
bool rmid_dirty;
u64 val = 0;
+ arch_priv = mon_event_all[QOS_L3_OCCUP_EVENT_ID].arch_priv;
arch_mon_ctx = resctrl_arch_mon_ctx_alloc(r, QOS_L3_OCCUP_EVENT_ID);
if (IS_ERR(arch_mon_ctx)) {
pr_warn_ratelimited("Failed to allocate monitor context: %ld",
entry = __rmid_entry(idx);
if (resctrl_arch_rmid_read(r, &d->hdr, entry->closid, entry->rmid,
- QOS_L3_OCCUP_EVENT_ID, &val,
+ QOS_L3_OCCUP_EVENT_ID, arch_priv, &val,
arch_mon_ctx)) {
rmid_dirty = true;
} else {
rr->evt->evtid, &tval);
else
rr->err = resctrl_arch_rmid_read(rr->r, rr->hdr, closid, rmid,
- rr->evt->evtid, &tval, rr->arch_mon_ctx);
+ rr->evt->evtid, rr->evt->arch_priv,
+ &tval, rr->arch_mon_ctx);
if (rr->err)
return rr->err;
if (d->ci_id != rr->ci->id)
continue;
err = resctrl_arch_rmid_read(rr->r, &d->hdr, closid, rmid,
- rr->evt->evtid, &tval, rr->arch_mon_ctx);
+ rr->evt->evtid, rr->evt->arch_priv,
+ &tval, rr->arch_mon_ctx);
if (!err) {
rr->val += tval;
ret = 0;
MON_EVENT(PMT_EVENT_UOPS_RETIRED, "uops_retired", RDT_RESOURCE_PERF_PKG, false),
};
-void resctrl_enable_mon_event(enum resctrl_event_id eventid, bool any_cpu, unsigned int binary_bits)
+void resctrl_enable_mon_event(enum resctrl_event_id eventid, bool any_cpu,
+ unsigned int binary_bits, void *arch_priv)
{
if (WARN_ON_ONCE(eventid < QOS_FIRST_EVENT || eventid >= QOS_NUM_EVENTS ||
binary_bits > MAX_BINARY_BITS))
mon_event_all[eventid].any_cpu = any_cpu;
mon_event_all[eventid].binary_bits = binary_bits;
+ mon_event_all[eventid].arch_priv = arch_priv;
mon_event_all[eventid].enabled = true;
}
int resctrl_arch_update_domains(struct rdt_resource *r, u32 closid);
void resctrl_enable_mon_event(enum resctrl_event_id eventid, bool any_cpu,
- unsigned int binary_bits);
+ unsigned int binary_bits, void *arch_priv);
bool resctrl_is_mon_event_enabled(enum resctrl_event_id eventid);
* only.
* @rmid: rmid of the counter to read.
* @eventid: eventid to read, e.g. L3 occupancy.
+ * @arch_priv: Architecture private data for this event.
+ * The @arch_priv provided by the architecture via
+ * resctrl_enable_mon_event().
* @val: result of the counter read in bytes.
* @arch_mon_ctx: An architecture specific value from
* resctrl_arch_mon_ctx_alloc(), for MPAM this identifies
*/
int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_domain_hdr *hdr,
u32 closid, u32 rmid, enum resctrl_event_id eventid,
- u64 *val, void *arch_mon_ctx);
+ void *arch_priv, u64 *val, void *arch_mon_ctx);
/**
* resctrl_arch_rmid_read_context_check() - warn about invalid contexts