mutex_unlock(&msc->part_sel_lock);
}
-static void mpam_reset_ris(struct mpam_msc_ris *ris)
+/*
+ * Called via smp_call_on_cpu() to prevent migration, while still being
+ * pre-emptible.
+ */
+static int mpam_reset_ris(void *arg)
{
u16 partid, partid_max;
+ struct mpam_msc_ris *ris = arg;
WARN_ON_ONCE(!srcu_read_lock_held((&mpam_srcu)));
if (ris->in_reset_state)
- return;
+ return 0;
spin_lock(&partid_max_lock);
partid_max = mpam_partid_max;
spin_unlock(&partid_max_lock);
for (partid = 0; partid <= partid_max; partid++)
mpam_reset_ris_partid(ris, partid);
+
+ return 0;
+}
+
+/*
+ * Get the preferred CPU for this MSC. If it is accessible from this CPU,
+ * this CPU is preferred. This can be preempted/migrated, it will only result
+ * in more work.
+ */
+static int mpam_get_msc_preferred_cpu(struct mpam_msc *msc)
+{
+ int cpu = raw_smp_processor_id();
+
+ if (cpumask_test_cpu(cpu, &msc->accessibility))
+ return cpu;
+
+ return cpumask_first_and(&msc->accessibility, cpu_online_mask);
+}
+
+static int mpam_touch_msc(struct mpam_msc *msc, int (*fn)(void *a), void *arg)
+{
+ lockdep_assert_irqs_enabled();
+ lockdep_assert_cpus_held();
+ WARN_ON_ONCE(!srcu_read_lock_held((&mpam_srcu)));
+
+ return smp_call_on_cpu(mpam_get_msc_preferred_cpu(msc), fn, arg, true);
}
static void mpam_reset_msc(struct mpam_msc *msc, bool online)
struct mpam_msc_ris *ris;
list_for_each_entry_srcu(ris, &msc->ris, msc_list, srcu_read_lock_held(&mpam_srcu)) {
- mpam_reset_ris(ris);
+ mpam_touch_msc(msc, &mpam_reset_ris, ris);
/*
* Set in_reset_state when coming online. The reset state