The L3 resource has several requirements for domains. There are per-domain
structures that hold the 64-bit values of counters, and elements to keep
track of the overflow and limbo threads.
None of these are needed for the PERF_PKG resource. The hardware counters
are wide enough that they do not wrap around for decades.
Define a new rdt_perf_pkg_mon_domain structure which just consists of the
standard rdt_domain_hdr to keep track of domain id and CPU mask.
Update resctrl_online_mon_domain() for RDT_RESOURCE_PERF_PKG. The only action
needed for this resource is to create and populate domain directories if a
domain is added while resctrl is mounted.
Similarly resctrl_offline_mon_domain() only needs to remove domain directories.
Signed-off-by: Tony Luck <tony.luck@intel.com>
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
Reviewed-by: Reinette Chatre <reinette.chatre@intel.com>
Link: https://lore.kernel.org/20251217172121.12030-1-tony.luck@intel.com
if (!hdr)
l3_mon_domain_setup(cpu, id, r, add_pos);
break;
+ case RDT_RESOURCE_PERF_PKG:
+ if (!hdr)
+ intel_aet_mon_domain_setup(cpu, id, r, add_pos);
+ break;
default:
pr_warn_once("Unknown resource rid=%d\n", r->rid);
break;
l3_mon_domain_free(hw_dom);
break;
}
+ case RDT_RESOURCE_PERF_PKG: {
+ struct rdt_perf_pkg_mon_domain *pkgd;
+
+ if (!domain_header_is_valid(hdr, RESCTRL_MON_DOMAIN, RDT_RESOURCE_PERF_PKG))
+ return;
+
+ pkgd = container_of(hdr, struct rdt_perf_pkg_mon_domain, hdr);
+ resctrl_offline_mon_domain(r, hdr);
+ list_del_rcu(&hdr->list);
+ synchronize_rcu();
+ kfree(pkgd);
+ break;
+ }
default:
pr_warn_once("Unknown resource rid=%d\n", r->rid);
break;
#include <linux/bits.h>
#include <linux/compiler_types.h>
#include <linux/container_of.h>
+#include <linux/cpumask.h>
#include <linux/err.h>
#include <linux/errno.h>
+#include <linux/gfp_types.h>
#include <linux/init.h>
#include <linux/intel_pmt_features.h>
#include <linux/intel_vsec.h>
#include <linux/io.h>
#include <linux/printk.h>
+#include <linux/rculist.h>
+#include <linux/rcupdate.h>
#include <linux/resctrl.h>
#include <linux/resctrl_types.h>
+#include <linux/slab.h>
#include <linux/stddef.h>
#include <linux/topology.h>
#include <linux/types.h>
return valid ? 0 : -EINVAL;
}
+
+void intel_aet_mon_domain_setup(int cpu, int id, struct rdt_resource *r,
+ struct list_head *add_pos)
+{
+ struct rdt_perf_pkg_mon_domain *d;
+ int err;
+
+ d = kzalloc_node(sizeof(*d), GFP_KERNEL, cpu_to_node(cpu));
+ if (!d)
+ return;
+
+ d->hdr.id = id;
+ d->hdr.type = RESCTRL_MON_DOMAIN;
+ d->hdr.rid = RDT_RESOURCE_PERF_PKG;
+ cpumask_set_cpu(cpu, &d->hdr.cpu_mask);
+ list_add_tail_rcu(&d->hdr.list, add_pos);
+
+ err = resctrl_online_mon_domain(r, &d->hdr);
+ if (err) {
+ list_del_rcu(&d->hdr.list);
+ synchronize_rcu();
+ kfree(d);
+ }
+}
return container_of(r, struct rdt_hw_l3_mon_domain, d_resctrl);
}
+/**
+ * struct rdt_perf_pkg_mon_domain - CPUs sharing an package scoped resctrl monitor resource
+ * @hdr: common header for different domain types
+ */
+struct rdt_perf_pkg_mon_domain {
+ struct rdt_domain_hdr hdr;
+};
+
/**
* struct msr_param - set a range of MSRs from a domain
* @res: The resource to use
bool intel_aet_get_events(void);
void __exit intel_aet_exit(void);
int intel_aet_read_event(int domid, u32 rmid, void *arch_priv, u64 *val);
+void intel_aet_mon_domain_setup(int cpu, int id, struct rdt_resource *r,
+ struct list_head *add_pos);
#else
static inline bool intel_aet_get_events(void) { return false; }
static inline void __exit intel_aet_exit(void) { }
{
return -EINVAL;
}
+
+static inline void intel_aet_mon_domain_setup(int cpu, int id, struct rdt_resource *r,
+ struct list_head *add_pos) { }
#endif
#endif /* _ASM_X86_RESCTRL_INTERNAL_H */
mutex_lock(&rdtgroup_mutex);
- if (!domain_header_is_valid(hdr, RESCTRL_MON_DOMAIN, RDT_RESOURCE_L3))
- goto out_unlock;
-
- d = container_of(hdr, struct rdt_l3_mon_domain, hdr);
-
/*
* If resctrl is mounted, remove all the
* per domain monitor data directories.
if (resctrl_mounted && resctrl_arch_mon_capable())
rmdir_mondata_subdir_allrdtgrp(r, hdr);
+ if (r->rid != RDT_RESOURCE_L3)
+ goto out_unlock;
+
+ if (!domain_header_is_valid(hdr, RESCTRL_MON_DOMAIN, RDT_RESOURCE_L3))
+ goto out_unlock;
+
+ d = container_of(hdr, struct rdt_l3_mon_domain, hdr);
if (resctrl_is_mbm_enabled())
cancel_delayed_work(&d->mbm_over);
if (resctrl_is_mon_event_enabled(QOS_L3_OCCUP_EVENT_ID) && has_busy_rmid(d)) {
mutex_lock(&rdtgroup_mutex);
+ if (r->rid != RDT_RESOURCE_L3)
+ goto mkdir;
+
if (!domain_header_is_valid(hdr, RESCTRL_MON_DOMAIN, RDT_RESOURCE_L3))
goto out_unlock;
if (resctrl_is_mon_event_enabled(QOS_L3_OCCUP_EVENT_ID))
INIT_DELAYED_WORK(&d->cqm_limbo, cqm_handle_limbo);
+mkdir:
+ err = 0;
/*
* If the filesystem is not mounted then only the default resource group
* exists. Creation of its directories is deferred until mount time