]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
arm_mpam: Track bandwidth counter state for power management
authorJames Morse <james.morse@arm.com>
Wed, 19 Nov 2025 12:22:57 +0000 (12:22 +0000)
committerCatalin Marinas <catalin.marinas@arm.com>
Wed, 19 Nov 2025 18:34:23 +0000 (18:34 +0000)
Bandwidth counters need to run continuously to correctly reflect the
bandwidth.

Save the counter state when the hardware is reset due to CPU hotplug.
Add struct mbwu_state to track the bandwidth counter. Support for
tracking overflow with the same structure will be added in a
subsequent commit.

Cc: Zeng Heng <zengheng4@huawei.com>
Reviewed-by: Gavin Shan <gshan@redhat.com>
Reviewed-by: Zeng Heng <zengheng4@huawei.com>
Reviewed-by: Jonathan Cameron <jonathan.cameron@huawei.com>
Reviewed-by: Shaopeng Tan <tan.shaopeng@jp.fujitsu.com>
Reviewed-by: Fenghua Yu <fenghuay@nvidia.com>
Tested-by: Carl Worth <carl@os.amperecomputing.com>
Tested-by: Gavin Shan <gshan@redhat.com>
Tested-by: Zeng Heng <zengheng4@huawei.com>
Tested-by: Shaopeng Tan <tan.shaopeng@jp.fujitsu.com>
Tested-by: Hanjun Guo <guohanjun@huawei.com>
Signed-off-by: James Morse <james.morse@arm.com>
Signed-off-by: Ben Horgan <ben.horgan@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
drivers/resctrl/mpam_devices.c
drivers/resctrl/mpam_internal.h

index 4859c8b096c3eb0481f447c14631f76a7807e40a..c8ea37558f6905d8eb5dfeb57aee74275c42a70c 100644 (file)
@@ -993,6 +993,7 @@ static void __ris_msmon_read(void *arg)
        struct mon_read *m = arg;
        struct mon_cfg *ctx = m->ctx;
        struct mpam_msc_ris *ris = m->ris;
+       struct msmon_mbwu_state *mbwu_state;
        struct mpam_props *rprops = &ris->props;
        struct mpam_msc *msc = m->ris->vmsc->msc;
        u32 mon_sel, ctl_val, flt_val, cur_ctl, cur_flt;
@@ -1023,11 +1024,21 @@ static void __ris_msmon_read(void *arg)
                now = mpam_read_monsel_reg(msc, CSU);
                if (mpam_has_feature(mpam_feat_msmon_csu_hw_nrdy, rprops))
                        nrdy = now & MSMON___NRDY;
+               now = FIELD_GET(MSMON___VALUE, now);
                break;
        case mpam_feat_msmon_mbwu:
                now = mpam_read_monsel_reg(msc, MBWU);
                if (mpam_has_feature(mpam_feat_msmon_mbwu_hw_nrdy, rprops))
                        nrdy = now & MSMON___NRDY;
+               now = FIELD_GET(MSMON___VALUE, now);
+
+               if (nrdy)
+                       break;
+
+               mbwu_state = &ris->mbwu_state[ctx->mon];
+
+               /* Include bandwidth consumed before the last hardware reset */
+               now += mbwu_state->correction;
                break;
        default:
                m->err = -EINVAL;
@@ -1039,7 +1050,6 @@ static void __ris_msmon_read(void *arg)
                return;
        }
 
-       now = FIELD_GET(MSMON___VALUE, now);
        *m->val += now;
 }
 
@@ -1235,6 +1245,67 @@ static void mpam_reprogram_ris_partid(struct mpam_msc_ris *ris, u16 partid,
        mutex_unlock(&msc->part_sel_lock);
 }
 
+/* Call with msc cfg_lock held */
+static int mpam_restore_mbwu_state(void *_ris)
+{
+       int i;
+       struct mon_read mwbu_arg;
+       struct mpam_msc_ris *ris = _ris;
+
+       for (i = 0; i < ris->props.num_mbwu_mon; i++) {
+               if (ris->mbwu_state[i].enabled) {
+                       mwbu_arg.ris = ris;
+                       mwbu_arg.ctx = &ris->mbwu_state[i].cfg;
+                       mwbu_arg.type = mpam_feat_msmon_mbwu;
+
+                       __ris_msmon_read(&mwbu_arg);
+               }
+       }
+
+       return 0;
+}
+
+/* Call with MSC cfg_lock held */
+static int mpam_save_mbwu_state(void *arg)
+{
+       int i;
+       u64 val;
+       struct mon_cfg *cfg;
+       u32 cur_flt, cur_ctl, mon_sel;
+       struct mpam_msc_ris *ris = arg;
+       struct msmon_mbwu_state *mbwu_state;
+       struct mpam_msc *msc = ris->vmsc->msc;
+
+       for (i = 0; i < ris->props.num_mbwu_mon; i++) {
+               mbwu_state = &ris->mbwu_state[i];
+               cfg = &mbwu_state->cfg;
+
+               if (WARN_ON_ONCE(!mpam_mon_sel_lock(msc)))
+                       return -EIO;
+
+               mon_sel = FIELD_PREP(MSMON_CFG_MON_SEL_MON_SEL, i) |
+                         FIELD_PREP(MSMON_CFG_MON_SEL_RIS, ris->ris_idx);
+               mpam_write_monsel_reg(msc, CFG_MON_SEL, mon_sel);
+
+               cur_flt = mpam_read_monsel_reg(msc, CFG_MBWU_FLT);
+               cur_ctl = mpam_read_monsel_reg(msc, CFG_MBWU_CTL);
+               mpam_write_monsel_reg(msc, CFG_MBWU_CTL, 0);
+
+               val = mpam_read_monsel_reg(msc, MBWU);
+               mpam_write_monsel_reg(msc, MBWU, 0);
+
+               cfg->mon = i;
+               cfg->pmg = FIELD_GET(MSMON_CFG_x_FLT_PMG, cur_flt);
+               cfg->match_pmg = FIELD_GET(MSMON_CFG_x_CTL_MATCH_PMG, cur_ctl);
+               cfg->partid = FIELD_GET(MSMON_CFG_x_FLT_PARTID, cur_flt);
+               mbwu_state->correction += val;
+               mbwu_state->enabled = FIELD_GET(MSMON_CFG_x_CTL_EN, cur_ctl);
+               mpam_mon_sel_unlock(msc);
+       }
+
+       return 0;
+}
+
 static void mpam_init_reset_cfg(struct mpam_config *reset_cfg)
 {
        *reset_cfg = (struct mpam_config) {
@@ -1343,6 +1414,9 @@ static void mpam_reprogram_msc(struct mpam_msc *msc)
                        mpam_touch_msc(msc, __write_config, &arg);
                }
                ris->in_reset_state = reset;
+
+               if (mpam_has_feature(mpam_feat_msmon_mbwu, &ris->props))
+                       mpam_touch_msc(msc, &mpam_restore_mbwu_state, ris);
        }
        mutex_unlock(&msc->cfg_lock);
 }
@@ -1436,6 +1510,9 @@ static int mpam_cpu_offline(unsigned int cpu)
                                 * lost while the CPUs are offline.
                                 */
                                ris->in_reset_state = false;
+
+                               if (mpam_is_enabled())
+                                       mpam_touch_msc(msc, &mpam_save_mbwu_state, ris);
                        }
                        mutex_unlock(&msc->cfg_lock);
                }
@@ -2109,7 +2186,22 @@ static void mpam_unregister_irqs(void)
 
 static void __destroy_component_cfg(struct mpam_component *comp)
 {
+       struct mpam_msc *msc;
+       struct mpam_vmsc *vmsc;
+       struct mpam_msc_ris *ris;
+
+       lockdep_assert_held(&mpam_list_lock);
+
        add_to_garbage(comp->cfg);
+       list_for_each_entry(vmsc, &comp->vmsc, comp_list) {
+               msc = vmsc->msc;
+
+               if (mpam_mon_sel_lock(msc)) {
+                       list_for_each_entry(ris, &vmsc->ris, vmsc_list)
+                               add_to_garbage(ris->mbwu_state);
+                       mpam_mon_sel_unlock(msc);
+               }
+       }
 }
 
 static void mpam_reset_component_cfg(struct mpam_component *comp)
@@ -2135,6 +2227,8 @@ static void mpam_reset_component_cfg(struct mpam_component *comp)
 
 static int __allocate_component_cfg(struct mpam_component *comp)
 {
+       struct mpam_vmsc *vmsc;
+
        mpam_assert_partid_sizes_fixed();
 
        if (comp->cfg)
@@ -2152,6 +2246,36 @@ static int __allocate_component_cfg(struct mpam_component *comp)
 
        mpam_reset_component_cfg(comp);
 
+       list_for_each_entry(vmsc, &comp->vmsc, comp_list) {
+               struct mpam_msc *msc;
+               struct mpam_msc_ris *ris;
+               struct msmon_mbwu_state *mbwu_state;
+
+               if (!vmsc->props.num_mbwu_mon)
+                       continue;
+
+               msc = vmsc->msc;
+               list_for_each_entry(ris, &vmsc->ris, vmsc_list) {
+                       if (!ris->props.num_mbwu_mon)
+                               continue;
+
+                       mbwu_state = kcalloc(ris->props.num_mbwu_mon,
+                                            sizeof(*ris->mbwu_state),
+                                            GFP_KERNEL);
+                       if (!mbwu_state) {
+                               __destroy_component_cfg(comp);
+                               return -ENOMEM;
+                       }
+
+                       init_garbage(&mbwu_state[0].garbage);
+
+                       if (mpam_mon_sel_lock(msc)) {
+                               ris->mbwu_state = mbwu_state;
+                               mpam_mon_sel_unlock(msc);
+                       }
+               }
+       }
+
        return 0;
 }
 
index 12f0a5b7f39ebead8a14bd7dbd26bc707ebe382a..12ce80bc7ff7f5c500790f697d0c71d8198f4c15 100644 (file)
@@ -91,7 +91,10 @@ struct mpam_msc {
         */
        struct mutex            part_sel_lock;
 
-       /* cfg_lock protects the msc configuration. */
+       /*
+        * cfg_lock protects the msc configuration and guards against mbwu_state
+        * save and restore racing.
+        */
        struct mutex            cfg_lock;
 
        /*
@@ -202,6 +205,19 @@ struct mon_cfg {
        enum mon_filter_options opts;
 };
 
+/* Changes to msmon_mbwu_state are protected by the msc's mon_sel_lock. */
+struct msmon_mbwu_state {
+       bool            enabled;
+       struct mon_cfg  cfg;
+
+       /*
+        * The value to add to the new reading to account for power management.
+        */
+       u64             correction;
+
+       struct mpam_garbage     garbage;
+};
+
 struct mpam_class {
        /* mpam_components in this class */
        struct list_head        components;
@@ -295,6 +311,9 @@ struct mpam_msc_ris {
        /* parent: */
        struct mpam_vmsc        *vmsc;
 
+       /* msmon mbwu configuration is preserved over reset */
+       struct msmon_mbwu_state *mbwu_state;
+
        struct mpam_garbage     garbage;
 };