#include <linux/atomic.h>
#include <linux/arm_mpam.h>
#include <linux/bitfield.h>
+#include <linux/bitmap.h>
#include <linux/cacheinfo.h>
#include <linux/cpu.h>
#include <linux/cpumask.h>
return 0;
}
+static void mpam_reset_msc_bitmap(struct mpam_msc *msc, u16 reg, u16 wd)
+{
+ u32 num_words, msb;
+ u32 bm = ~0;
+ int i;
+
+ lockdep_assert_held(&msc->part_sel_lock);
+
+ if (wd == 0)
+ return;
+
+ /*
+ * Write all ~0 to all but the last 32bit-word, which may
+ * have fewer bits...
+ */
+ num_words = DIV_ROUND_UP(wd, 32);
+ for (i = 0; i < num_words - 1; i++, reg += sizeof(bm))
+ __mpam_write_reg(msc, reg, bm);
+
+ /*
+ * ....and then the last (maybe) partial 32bit word. When wd is a
+ * multiple of 32, msb should be 31 to write a full 32bit word.
+ */
+ msb = (wd - 1) % 32;
+ bm = GENMASK(msb, 0);
+ __mpam_write_reg(msc, reg, bm);
+}
+
+static void mpam_reset_ris_partid(struct mpam_msc_ris *ris, u16 partid)
+{
+ struct mpam_msc *msc = ris->vmsc->msc;
+ struct mpam_props *rprops = &ris->props;
+
+ WARN_ON_ONCE(!srcu_read_lock_held((&mpam_srcu)));
+
+ mutex_lock(&msc->part_sel_lock);
+ __mpam_part_sel(ris->ris_idx, partid, msc);
+
+ if (mpam_has_feature(mpam_feat_cpor_part, rprops))
+ mpam_reset_msc_bitmap(msc, MPAMCFG_CPBM, rprops->cpbm_wd);
+
+ if (mpam_has_feature(mpam_feat_mbw_part, rprops))
+ mpam_reset_msc_bitmap(msc, MPAMCFG_MBW_PBM, rprops->mbw_pbm_bits);
+
+ if (mpam_has_feature(mpam_feat_mbw_min, rprops))
+ mpam_write_partsel_reg(msc, MBW_MIN, 0);
+
+ if (mpam_has_feature(mpam_feat_mbw_max, rprops))
+ mpam_write_partsel_reg(msc, MBW_MAX, MPAMCFG_MBW_MAX_MAX);
+
+ mutex_unlock(&msc->part_sel_lock);
+}
+
+static void mpam_reset_ris(struct mpam_msc_ris *ris)
+{
+ u16 partid, partid_max;
+
+ WARN_ON_ONCE(!srcu_read_lock_held((&mpam_srcu)));
+
+ if (ris->in_reset_state)
+ return;
+
+ spin_lock(&partid_max_lock);
+ partid_max = mpam_partid_max;
+ spin_unlock(&partid_max_lock);
+ for (partid = 0; partid <= partid_max; partid++)
+ mpam_reset_ris_partid(ris, partid);
+}
+
+static void mpam_reset_msc(struct mpam_msc *msc, bool online)
+{
+ struct mpam_msc_ris *ris;
+
+ list_for_each_entry_srcu(ris, &msc->ris, msc_list, srcu_read_lock_held(&mpam_srcu)) {
+ mpam_reset_ris(ris);
+
+ /*
+ * Set in_reset_state when coming online. The reset state
+ * for non-zero partid may be lost while the CPUs are offline.
+ */
+ ris->in_reset_state = online;
+ }
+}
+
static int mpam_cpu_online(unsigned int cpu)
{
+ struct mpam_msc *msc;
+
+ guard(srcu)(&mpam_srcu);
+ list_for_each_entry_srcu(msc, &mpam_all_msc, all_msc_list,
+ srcu_read_lock_held(&mpam_srcu)) {
+ if (!cpumask_test_cpu(cpu, &msc->accessibility))
+ continue;
+
+ if (atomic_fetch_inc(&msc->online_refs) == 0)
+ mpam_reset_msc(msc, true);
+ }
+
return 0;
}
static int mpam_cpu_offline(unsigned int cpu)
{
+ struct mpam_msc *msc;
+
+ guard(srcu)(&mpam_srcu);
+ list_for_each_entry_srcu(msc, &mpam_all_msc, all_msc_list,
+ srcu_read_lock_held(&mpam_srcu)) {
+ if (!cpumask_test_cpu(cpu, &msc->accessibility))
+ continue;
+
+ if (atomic_dec_and_test(&msc->online_refs))
+ mpam_reset_msc(msc, false);
+ }
+
return 0;
}