{
struct i915_pmu *pmu = >->i915->pmu;
- if (!pmu->base.event_init)
+ if (pmu->closed)
return;
spin_lock_irq(&pmu->lock);
{
struct i915_pmu *pmu = >->i915->pmu;
- if (!pmu->base.event_init)
+ if (pmu->closed)
return;
spin_lock_irq(&pmu->lock);
{
struct i915_pmu *pmu = hlist_entry_safe(node, typeof(*pmu), cpuhp.node);
- GEM_BUG_ON(!pmu->base.event_init);
-
/* Select the first online CPU as a designated reader. */
if (cpumask_empty(&i915_pmu_cpumask))
cpumask_set_cpu(cpu, &i915_pmu_cpumask);
struct i915_pmu *pmu = hlist_entry_safe(node, typeof(*pmu), cpuhp.node);
unsigned int target = i915_pmu_target_cpu;
- GEM_BUG_ON(!pmu->base.event_init);
-
/*
* Unregistering an instance generates a CPU offline event which we must
* ignore to avoid incorrectly modifying the shared i915_pmu_cpumask.
&i915_pmu_cpumask_attr_group,
NULL
};
-
int ret = -ENOMEM;
+ pmu->closed = true;
+
spin_lock_init(&pmu->lock);
hrtimer_init(&pmu->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
pmu->timer.function = i915_sample;
if (ret)
goto err_unreg;
+ pmu->closed = false;
+
return;
err_unreg:
err_groups:
kfree(pmu->base.attr_groups);
err_attr:
- pmu->base.event_init = NULL;
free_event_attributes(pmu);
err_name:
if (IS_DGFX(i915))
{
struct i915_pmu *pmu = &i915->pmu;
- if (!pmu->base.event_init)
- return;
-
/*
* "Disconnect" the PMU callbacks - since all are atomic synchronize_rcu
* ensures all currently executing ones will have exited before we
i915_pmu_unregister_cpuhp_state(pmu);
perf_pmu_unregister(&pmu->base);
- pmu->base.event_init = NULL;
kfree(pmu->base.attr_groups);
if (IS_DGFX(i915))
kfree(pmu->name);