void mon_event_read(struct rmid_read *rr, struct rdt_resource *r,
struct rdt_domain_hdr *hdr, struct rdtgroup *rdtgrp,
- cpumask_t *cpumask, int evtid, int first)
+ cpumask_t *cpumask, struct mon_evt *evt, int first)
{
int cpu;
* Setup the parameters to pass to mon_event_count() to read the data.
*/
rr->rgrp = rdtgrp;
- rr->evtid = evtid;
+ rr->evt = evt;
rr->r = r;
rr->hdr = hdr;
rr->first = first;
if (resctrl_arch_mbm_cntr_assign_enabled(r) &&
- resctrl_is_mbm_event(evtid)) {
+ resctrl_is_mbm_event(evt->evtid)) {
rr->is_mbm_cntr = true;
} else {
- rr->arch_mon_ctx = resctrl_arch_mon_ctx_alloc(r, evtid);
+ rr->arch_mon_ctx = resctrl_arch_mon_ctx_alloc(r, evt->evtid);
if (IS_ERR(rr->arch_mon_ctx)) {
rr->err = -EINVAL;
return;
smp_call_on_cpu(cpu, smp_mon_event_count, rr, false);
if (rr->arch_mon_ctx)
- resctrl_arch_mon_ctx_free(r, evtid, rr->arch_mon_ctx);
+ resctrl_arch_mon_ctx_free(r, evt->evtid, rr->arch_mon_ctx);
}
int rdtgroup_mondata_show(struct seq_file *m, void *arg)
{
struct kernfs_open_file *of = m->private;
enum resctrl_res_level resid;
- enum resctrl_event_id evtid;
struct rdt_l3_mon_domain *d;
struct rdt_domain_hdr *hdr;
struct rmid_read rr = {0};
int domid, cpu, ret = 0;
struct rdt_resource *r;
struct cacheinfo *ci;
+ struct mon_evt *evt;
struct mon_data *md;
rdtgrp = rdtgroup_kn_lock_live(of->kn);
resid = md->rid;
domid = md->domid;
- evtid = md->evtid;
+ evt = md->evt;
r = resctrl_arch_get_resource(resid);
if (md->sum) {
continue;
rr.ci = ci;
mon_event_read(&rr, r, NULL, rdtgrp,
- &ci->shared_cpu_map, evtid, false);
+ &ci->shared_cpu_map, evt, false);
goto checkresult;
}
}
ret = -ENOENT;
goto out;
}
- mon_event_read(&rr, r, hdr, rdtgrp, &hdr->cpu_mask, evtid, false);
+ mon_event_read(&rr, r, hdr, rdtgrp, &hdr->cpu_mask, evt, false);
}
checkresult:
* struct mon_data - Monitoring details for each event file.
* @list: Member of the global @mon_data_kn_priv_list list.
* @rid: Resource id associated with the event file.
- * @evtid: Event id associated with the event file.
+ * @evt: Event structure associated with the event file.
* @sum: Set when event must be summed across multiple
* domains.
* @domid: When @sum is zero this is the domain to which
struct mon_data {
struct list_head list;
enum resctrl_res_level rid;
- enum resctrl_event_id evtid;
+ struct mon_evt *evt;
int domid;
bool sum;
};
* @r: Resource describing the properties of the event being read.
* @hdr: Header of domain that the counter should be read from. If NULL then
* sum all domains in @r sharing L3 @ci.id
- * @evtid: Which monitor event to read.
+ * @evt: Which monitor event to read.
* @first: Initialize MBM counter when true.
* @ci: Cacheinfo for L3. Only set when @hdr is NULL. Used when summing
* domains.
struct rdtgroup *rgrp;
struct rdt_resource *r;
struct rdt_domain_hdr *hdr;
- enum resctrl_event_id evtid;
+ struct mon_evt *evt;
bool first;
struct cacheinfo *ci;
bool is_mbm_cntr;
void mon_event_read(struct rmid_read *rr, struct rdt_resource *r,
struct rdt_domain_hdr *hdr, struct rdtgroup *rdtgrp,
- cpumask_t *cpumask, int evtid, int first);
+ cpumask_t *cpumask, struct mon_evt *evt, int first);
void mbm_setup_overflow_handler(struct rdt_l3_mon_domain *dom,
unsigned long delay_ms,
d = container_of(rr->hdr, struct rdt_l3_mon_domain, hdr);
if (rr->is_mbm_cntr) {
- cntr_id = mbm_cntr_get(rr->r, d, rdtgrp, rr->evtid);
+ cntr_id = mbm_cntr_get(rr->r, d, rdtgrp, rr->evt->evtid);
if (cntr_id < 0) {
rr->err = -ENOENT;
return -EINVAL;
if (rr->first) {
if (rr->is_mbm_cntr)
- resctrl_arch_reset_cntr(rr->r, d, closid, rmid, cntr_id, rr->evtid);
+ resctrl_arch_reset_cntr(rr->r, d, closid, rmid, cntr_id, rr->evt->evtid);
else
- resctrl_arch_reset_rmid(rr->r, d, closid, rmid, rr->evtid);
- m = get_mbm_state(d, closid, rmid, rr->evtid);
+ resctrl_arch_reset_rmid(rr->r, d, closid, rmid, rr->evt->evtid);
+ m = get_mbm_state(d, closid, rmid, rr->evt->evtid);
if (m)
memset(m, 0, sizeof(struct mbm_state));
return 0;
return -EINVAL;
if (rr->is_mbm_cntr)
rr->err = resctrl_arch_cntr_read(rr->r, d, closid, rmid, cntr_id,
- rr->evtid, &tval);
+ rr->evt->evtid, &tval);
else
rr->err = resctrl_arch_rmid_read(rr->r, rr->hdr, closid, rmid,
- rr->evtid, &tval, rr->arch_mon_ctx);
+ rr->evt->evtid, &tval, rr->arch_mon_ctx);
if (rr->err)
return rr->err;
if (d->ci_id != rr->ci->id)
continue;
err = resctrl_arch_rmid_read(rr->r, &d->hdr, closid, rmid,
- rr->evtid, &tval, rr->arch_mon_ctx);
+ rr->evt->evtid, &tval, rr->arch_mon_ctx);
if (!err) {
rr->val += tval;
ret = 0;
if (!domain_header_is_valid(rr->hdr, RESCTRL_MON_DOMAIN, RDT_RESOURCE_L3))
return;
d = container_of(rr->hdr, struct rdt_l3_mon_domain, hdr);
- m = get_mbm_state(d, closid, rmid, rr->evtid);
+ m = get_mbm_state(d, closid, rmid, rr->evt->evtid);
if (WARN_ON_ONCE(!m))
return;
rr.r = r;
rr.hdr = &d->hdr;
- rr.evtid = evtid;
+ rr.evt = &mon_event_all[evtid];
if (resctrl_arch_mbm_cntr_assign_enabled(r)) {
rr.is_mbm_cntr = true;
} else {
- rr.arch_mon_ctx = resctrl_arch_mon_ctx_alloc(rr.r, rr.evtid);
+ rr.arch_mon_ctx = resctrl_arch_mon_ctx_alloc(rr.r, evtid);
if (IS_ERR(rr.arch_mon_ctx)) {
pr_warn_ratelimited("Failed to allocate monitor context: %ld",
PTR_ERR(rr.arch_mon_ctx));
mbm_bw_count(rdtgrp, &rr);
if (rr.arch_mon_ctx)
- resctrl_arch_mon_ctx_free(rr.r, rr.evtid, rr.arch_mon_ctx);
+ resctrl_arch_mon_ctx_free(rr.r, evtid, rr.arch_mon_ctx);
}
static void mbm_update(struct rdt_resource *r, struct rdt_l3_mon_domain *d,
list_for_each_entry(priv, &mon_data_kn_priv_list, list) {
if (priv->rid == rid && priv->domid == domid &&
- priv->sum == do_sum && priv->evtid == mevt->evtid)
+ priv->sum == do_sum && priv->evt == mevt)
return priv;
}
priv->rid = rid;
priv->domid = domid;
priv->sum = do_sum;
- priv->evtid = mevt->evtid;
+ priv->evt = mevt;
list_add_tail(&priv->list, &mon_data_kn_priv_list);
return priv;
return ret;
if (!do_sum && resctrl_is_mbm_event(mevt->evtid))
- mon_event_read(&rr, r, hdr, prgrp, &hdr->cpu_mask, mevt->evtid, true);
+ mon_event_read(&rr, r, hdr, prgrp, &hdr->cpu_mask, mevt, true);
}
return 0;