1 // SPDX-License-Identifier: GPL-2.0-only
3 * Resource Director Technology(RDT)
6 * Copyright (C) 2017 Intel Corporation
9 * Vikas Shivappa <vikas.shivappa@intel.com>
11 * This replaces the cqm.c based on perf but we reuse a lot of
12 * code and datastructures originally from Peter Zijlstra and Matt Fleming.
14 * More information about RDT be found in the Intel (R) x86 Architecture
15 * Software Developer Manual June 2016, volume 3, section 17.17.
18 #include <linux/module.h>
19 #include <linux/slab.h>
20 #include <asm/cpu_device_id.h>
26 struct list_head list
;
30 * @rmid_free_lru A least recently used list of free RMIDs
31 * These RMIDs are guaranteed to have an occupancy less than the
34 static LIST_HEAD(rmid_free_lru
);
37 * @rmid_limbo_count count of currently unused but (potentially)
39 * This counts RMIDs that no one is currently using but that
40 * may have a occupancy value > intel_cqm_threshold. User can change
41 * the threshold occupancy value.
43 static unsigned int rmid_limbo_count
;
46 * @rmid_entry - The entry in the limbo and free lists.
48 static struct rmid_entry
*rmid_ptrs
;
51 * Global boolean for rdt_monitor which is true if any
52 * resource monitoring is enabled.
57 * Global to indicate which monitoring events are enabled.
59 unsigned int rdt_mon_features
;
62 * This is the threshold cache occupancy at which we will consider an
63 * RMID available for re-allocation.
65 unsigned int resctrl_cqm_threshold
;
67 static inline struct rmid_entry
*__rmid_entry(u32 rmid
)
69 struct rmid_entry
*entry
;
71 entry
= &rmid_ptrs
[rmid
];
72 WARN_ON(entry
->rmid
!= rmid
);
77 static u64
__rmid_read(u32 rmid
, u32 eventid
)
82 * As per the SDM, when IA32_QM_EVTSEL.EvtID (bits 7:0) is configured
83 * with a valid event code for supported resource type and the bits
84 * IA32_QM_EVTSEL.RMID (bits 41:32) are configured with valid RMID,
85 * IA32_QM_CTR.data (bits 61:0) reports the monitored data.
86 * IA32_QM_CTR.Error (bit 63) and IA32_QM_CTR.Unavailable (bit 62)
89 wrmsr(MSR_IA32_QM_EVTSEL
, eventid
, rmid
);
90 rdmsrl(MSR_IA32_QM_CTR
, val
);
95 static bool rmid_dirty(struct rmid_entry
*entry
)
97 u64 val
= __rmid_read(entry
->rmid
, QOS_L3_OCCUP_EVENT_ID
);
99 return val
>= resctrl_cqm_threshold
;
103 * Check the RMIDs that are marked as busy for this domain. If the
104 * reported LLC occupancy is below the threshold clear the busy bit and
105 * decrement the count. If the busy count gets to zero on an RMID, we
108 void __check_limbo(struct rdt_domain
*d
, bool force_free
)
110 struct rmid_entry
*entry
;
111 struct rdt_resource
*r
;
112 u32 crmid
= 1, nrmid
;
114 r
= &rdt_resources_all
[RDT_RESOURCE_L3
];
117 * Skip RMID 0 and start from RMID 1 and check all the RMIDs that
118 * are marked as busy for occupancy < threshold. If the occupancy
119 * is less than the threshold decrement the busy counter of the
120 * RMID and move it to the free list when the counter reaches 0.
123 nrmid
= find_next_bit(d
->rmid_busy_llc
, r
->num_rmid
, crmid
);
124 if (nrmid
>= r
->num_rmid
)
127 entry
= __rmid_entry(nrmid
);
128 if (force_free
|| !rmid_dirty(entry
)) {
129 clear_bit(entry
->rmid
, d
->rmid_busy_llc
);
130 if (!--entry
->busy
) {
132 list_add_tail(&entry
->list
, &rmid_free_lru
);
139 bool has_busy_rmid(struct rdt_resource
*r
, struct rdt_domain
*d
)
141 return find_first_bit(d
->rmid_busy_llc
, r
->num_rmid
) != r
->num_rmid
;
145 * As of now the RMIDs allocation is global.
146 * However we keep track of which packages the RMIDs
147 * are used to optimize the limbo list management.
151 struct rmid_entry
*entry
;
153 lockdep_assert_held(&rdtgroup_mutex
);
155 if (list_empty(&rmid_free_lru
))
156 return rmid_limbo_count
? -EBUSY
: -ENOSPC
;
158 entry
= list_first_entry(&rmid_free_lru
,
159 struct rmid_entry
, list
);
160 list_del(&entry
->list
);
165 static void add_rmid_to_limbo(struct rmid_entry
*entry
)
167 struct rdt_resource
*r
;
168 struct rdt_domain
*d
;
172 r
= &rdt_resources_all
[RDT_RESOURCE_L3
];
176 list_for_each_entry(d
, &r
->domains
, list
) {
177 if (cpumask_test_cpu(cpu
, &d
->cpu_mask
)) {
178 val
= __rmid_read(entry
->rmid
, QOS_L3_OCCUP_EVENT_ID
);
179 if (val
<= resctrl_cqm_threshold
)
184 * For the first limbo RMID in the domain,
185 * setup up the limbo worker.
187 if (!has_busy_rmid(r
, d
))
188 cqm_setup_limbo_handler(d
, CQM_LIMBOCHECK_INTERVAL
);
189 set_bit(entry
->rmid
, d
->rmid_busy_llc
);
197 list_add_tail(&entry
->list
, &rmid_free_lru
);
200 void free_rmid(u32 rmid
)
202 struct rmid_entry
*entry
;
207 lockdep_assert_held(&rdtgroup_mutex
);
209 entry
= __rmid_entry(rmid
);
211 if (is_llc_occupancy_enabled())
212 add_rmid_to_limbo(entry
);
214 list_add_tail(&entry
->list
, &rmid_free_lru
);
217 static u64
mbm_overflow_count(u64 prev_msr
, u64 cur_msr
)
219 u64 shift
= 64 - MBM_CNTR_WIDTH
, chunks
;
221 chunks
= (cur_msr
<< shift
) - (prev_msr
<< shift
);
222 return chunks
>>= shift
;
225 static int __mon_event_count(u32 rmid
, struct rmid_read
*rr
)
230 tval
= __rmid_read(rmid
, rr
->evtid
);
231 if (tval
& (RMID_VAL_ERROR
| RMID_VAL_UNAVAIL
)) {
236 case QOS_L3_OCCUP_EVENT_ID
:
239 case QOS_L3_MBM_TOTAL_EVENT_ID
:
240 m
= &rr
->d
->mbm_total
[rmid
];
242 case QOS_L3_MBM_LOCAL_EVENT_ID
:
243 m
= &rr
->d
->mbm_local
[rmid
];
247 * Code would never reach here because
248 * an invalid event id would fail the __rmid_read.
254 memset(m
, 0, sizeof(struct mbm_state
));
255 m
->prev_bw_msr
= m
->prev_msr
= tval
;
259 chunks
= mbm_overflow_count(m
->prev_msr
, tval
);
263 rr
->val
+= m
->chunks
;
268 * Supporting function to calculate the memory bandwidth
269 * and delta bandwidth in MBps.
271 static void mbm_bw_count(u32 rmid
, struct rmid_read
*rr
)
273 struct rdt_resource
*r
= &rdt_resources_all
[RDT_RESOURCE_L3
];
274 struct mbm_state
*m
= &rr
->d
->mbm_local
[rmid
];
275 u64 tval
, cur_bw
, chunks
;
277 tval
= __rmid_read(rmid
, rr
->evtid
);
278 if (tval
& (RMID_VAL_ERROR
| RMID_VAL_UNAVAIL
))
281 chunks
= mbm_overflow_count(m
->prev_bw_msr
, tval
);
282 m
->chunks_bw
+= chunks
;
283 m
->chunks
= m
->chunks_bw
;
284 cur_bw
= (chunks
* r
->mon_scale
) >> 20;
287 m
->delta_bw
= abs(cur_bw
- m
->prev_bw
);
288 m
->delta_comp
= false;
290 m
->prev_bw_msr
= tval
;
294 * This is called via IPI to read the CQM/MBM counters
297 void mon_event_count(void *info
)
299 struct rdtgroup
*rdtgrp
, *entry
;
300 struct rmid_read
*rr
= info
;
301 struct list_head
*head
;
305 if (__mon_event_count(rdtgrp
->mon
.rmid
, rr
))
309 * For Ctrl groups read data from child monitor groups.
311 head
= &rdtgrp
->mon
.crdtgrp_list
;
313 if (rdtgrp
->type
== RDTCTRL_GROUP
) {
314 list_for_each_entry(entry
, head
, mon
.crdtgrp_list
) {
315 if (__mon_event_count(entry
->mon
.rmid
, rr
))
322 * Feedback loop for MBA software controller (mba_sc)
324 * mba_sc is a feedback loop where we periodically read MBM counters and
325 * adjust the bandwidth percentage values via the IA32_MBA_THRTL_MSRs so
328 * current bandwdith(cur_bw) < user specified bandwidth(user_bw)
330 * This uses the MBM counters to measure the bandwidth and MBA throttle
331 * MSRs to control the bandwidth for a particular rdtgrp. It builds on the
332 * fact that resctrl rdtgroups have both monitoring and control.
334 * The frequency of the checks is 1s and we just tag along the MBM overflow
335 * timer. Having 1s interval makes the calculation of bandwidth simpler.
337 * Although MBA's goal is to restrict the bandwidth to a maximum, there may
338 * be a need to increase the bandwidth to avoid uncecessarily restricting
339 * the L2 <-> L3 traffic.
341 * Since MBA controls the L2 external bandwidth where as MBM measures the
342 * L3 external bandwidth the following sequence could lead to such a
345 * Consider an rdtgroup which had high L3 <-> memory traffic in initial
346 * phases -> mba_sc kicks in and reduced bandwidth percentage values -> but
347 * after some time rdtgroup has mostly L2 <-> L3 traffic.
349 * In this case we may restrict the rdtgroup's L2 <-> L3 traffic as its
350 * throttle MSRs already have low percentage values. To avoid
351 * unnecessarily restricting such rdtgroups, we also increase the bandwidth.
353 static void update_mba_bw(struct rdtgroup
*rgrp
, struct rdt_domain
*dom_mbm
)
355 u32 closid
, rmid
, cur_msr
, cur_msr_val
, new_msr_val
;
356 struct mbm_state
*pmbm_data
, *cmbm_data
;
357 u32 cur_bw
, delta_bw
, user_bw
;
358 struct rdt_resource
*r_mba
;
359 struct rdt_domain
*dom_mba
;
360 struct list_head
*head
;
361 struct rdtgroup
*entry
;
363 if (!is_mbm_local_enabled())
366 r_mba
= &rdt_resources_all
[RDT_RESOURCE_MBA
];
367 closid
= rgrp
->closid
;
368 rmid
= rgrp
->mon
.rmid
;
369 pmbm_data
= &dom_mbm
->mbm_local
[rmid
];
371 dom_mba
= get_domain_from_cpu(smp_processor_id(), r_mba
);
373 pr_warn_once("Failure to get domain for MBA update\n");
377 cur_bw
= pmbm_data
->prev_bw
;
378 user_bw
= dom_mba
->mbps_val
[closid
];
379 delta_bw
= pmbm_data
->delta_bw
;
380 cur_msr_val
= dom_mba
->ctrl_val
[closid
];
383 * For Ctrl groups read data from child monitor groups.
385 head
= &rgrp
->mon
.crdtgrp_list
;
386 list_for_each_entry(entry
, head
, mon
.crdtgrp_list
) {
387 cmbm_data
= &dom_mbm
->mbm_local
[entry
->mon
.rmid
];
388 cur_bw
+= cmbm_data
->prev_bw
;
389 delta_bw
+= cmbm_data
->delta_bw
;
393 * Scale up/down the bandwidth linearly for the ctrl group. The
394 * bandwidth step is the bandwidth granularity specified by the
397 * The delta_bw is used when increasing the bandwidth so that we
398 * dont alternately increase and decrease the control values
401 * For ex: consider cur_bw = 90MBps, user_bw = 100MBps and if
402 * bandwidth step is 20MBps(> user_bw - cur_bw), we would keep
403 * switching between 90 and 110 continuously if we only check
406 if (cur_msr_val
> r_mba
->membw
.min_bw
&& user_bw
< cur_bw
) {
407 new_msr_val
= cur_msr_val
- r_mba
->membw
.bw_gran
;
408 } else if (cur_msr_val
< MAX_MBA_BW
&&
409 (user_bw
> (cur_bw
+ delta_bw
))) {
410 new_msr_val
= cur_msr_val
+ r_mba
->membw
.bw_gran
;
415 cur_msr
= r_mba
->msr_base
+ closid
;
416 wrmsrl(cur_msr
, delay_bw_map(new_msr_val
, r_mba
));
417 dom_mba
->ctrl_val
[closid
] = new_msr_val
;
420 * Delta values are updated dynamically package wise for each
421 * rdtgrp everytime the throttle MSR changes value.
423 * This is because (1)the increase in bandwidth is not perfectly
424 * linear and only "approximately" linear even when the hardware
425 * says it is linear.(2)Also since MBA is a core specific
426 * mechanism, the delta values vary based on number of cores used
429 pmbm_data
->delta_comp
= true;
430 list_for_each_entry(entry
, head
, mon
.crdtgrp_list
) {
431 cmbm_data
= &dom_mbm
->mbm_local
[entry
->mon
.rmid
];
432 cmbm_data
->delta_comp
= true;
436 static void mbm_update(struct rdt_domain
*d
, int rmid
)
444 * This is protected from concurrent reads from user
445 * as both the user and we hold the global mutex.
447 if (is_mbm_total_enabled()) {
448 rr
.evtid
= QOS_L3_MBM_TOTAL_EVENT_ID
;
449 __mon_event_count(rmid
, &rr
);
451 if (is_mbm_local_enabled()) {
452 rr
.evtid
= QOS_L3_MBM_LOCAL_EVENT_ID
;
455 * Call the MBA software controller only for the
456 * control groups and when user has enabled
457 * the software controller explicitly.
459 if (!is_mba_sc(NULL
))
460 __mon_event_count(rmid
, &rr
);
462 mbm_bw_count(rmid
, &rr
);
467 * Handler to scan the limbo list and move the RMIDs
468 * to free list whose occupancy < threshold_occupancy.
470 void cqm_handle_limbo(struct work_struct
*work
)
472 unsigned long delay
= msecs_to_jiffies(CQM_LIMBOCHECK_INTERVAL
);
473 int cpu
= smp_processor_id();
474 struct rdt_resource
*r
;
475 struct rdt_domain
*d
;
477 mutex_lock(&rdtgroup_mutex
);
479 r
= &rdt_resources_all
[RDT_RESOURCE_L3
];
480 d
= get_domain_from_cpu(cpu
, r
);
483 pr_warn_once("Failure to get domain for limbo worker\n");
487 __check_limbo(d
, false);
489 if (has_busy_rmid(r
, d
))
490 schedule_delayed_work_on(cpu
, &d
->cqm_limbo
, delay
);
493 mutex_unlock(&rdtgroup_mutex
);
496 void cqm_setup_limbo_handler(struct rdt_domain
*dom
, unsigned long delay_ms
)
498 unsigned long delay
= msecs_to_jiffies(delay_ms
);
501 cpu
= cpumask_any(&dom
->cpu_mask
);
502 dom
->cqm_work_cpu
= cpu
;
504 schedule_delayed_work_on(cpu
, &dom
->cqm_limbo
, delay
);
507 void mbm_handle_overflow(struct work_struct
*work
)
509 unsigned long delay
= msecs_to_jiffies(MBM_OVERFLOW_INTERVAL
);
510 struct rdtgroup
*prgrp
, *crgrp
;
511 int cpu
= smp_processor_id();
512 struct list_head
*head
;
513 struct rdt_domain
*d
;
515 mutex_lock(&rdtgroup_mutex
);
517 if (!static_branch_likely(&rdt_enable_key
))
520 d
= get_domain_from_cpu(cpu
, &rdt_resources_all
[RDT_RESOURCE_L3
]);
524 list_for_each_entry(prgrp
, &rdt_all_groups
, rdtgroup_list
) {
525 mbm_update(d
, prgrp
->mon
.rmid
);
527 head
= &prgrp
->mon
.crdtgrp_list
;
528 list_for_each_entry(crgrp
, head
, mon
.crdtgrp_list
)
529 mbm_update(d
, crgrp
->mon
.rmid
);
532 update_mba_bw(prgrp
, d
);
535 schedule_delayed_work_on(cpu
, &d
->mbm_over
, delay
);
538 mutex_unlock(&rdtgroup_mutex
);
541 void mbm_setup_overflow_handler(struct rdt_domain
*dom
, unsigned long delay_ms
)
543 unsigned long delay
= msecs_to_jiffies(delay_ms
);
546 if (!static_branch_likely(&rdt_enable_key
))
548 cpu
= cpumask_any(&dom
->cpu_mask
);
549 dom
->mbm_work_cpu
= cpu
;
550 schedule_delayed_work_on(cpu
, &dom
->mbm_over
, delay
);
553 static int dom_data_init(struct rdt_resource
*r
)
555 struct rmid_entry
*entry
= NULL
;
558 nr_rmids
= r
->num_rmid
;
559 rmid_ptrs
= kcalloc(nr_rmids
, sizeof(struct rmid_entry
), GFP_KERNEL
);
563 for (i
= 0; i
< nr_rmids
; i
++) {
564 entry
= &rmid_ptrs
[i
];
565 INIT_LIST_HEAD(&entry
->list
);
568 list_add_tail(&entry
->list
, &rmid_free_lru
);
572 * RMID 0 is special and is always allocated. It's used for all
573 * tasks that are not monitored.
575 entry
= __rmid_entry(0);
576 list_del(&entry
->list
);
581 static struct mon_evt llc_occupancy_event
= {
582 .name
= "llc_occupancy",
583 .evtid
= QOS_L3_OCCUP_EVENT_ID
,
586 static struct mon_evt mbm_total_event
= {
587 .name
= "mbm_total_bytes",
588 .evtid
= QOS_L3_MBM_TOTAL_EVENT_ID
,
591 static struct mon_evt mbm_local_event
= {
592 .name
= "mbm_local_bytes",
593 .evtid
= QOS_L3_MBM_LOCAL_EVENT_ID
,
597 * Initialize the event list for the resource.
599 * Note that MBM events are also part of RDT_RESOURCE_L3 resource
600 * because as per the SDM the total and local memory bandwidth
601 * are enumerated as part of L3 monitoring.
603 static void l3_mon_evt_init(struct rdt_resource
*r
)
605 INIT_LIST_HEAD(&r
->evt_list
);
607 if (is_llc_occupancy_enabled())
608 list_add_tail(&llc_occupancy_event
.list
, &r
->evt_list
);
609 if (is_mbm_total_enabled())
610 list_add_tail(&mbm_total_event
.list
, &r
->evt_list
);
611 if (is_mbm_local_enabled())
612 list_add_tail(&mbm_local_event
.list
, &r
->evt_list
);
615 int rdt_get_mon_l3_config(struct rdt_resource
*r
)
617 unsigned int cl_size
= boot_cpu_data
.x86_cache_size
;
620 r
->mon_scale
= boot_cpu_data
.x86_cache_occ_scale
;
621 r
->num_rmid
= boot_cpu_data
.x86_cache_max_rmid
+ 1;
624 * A reasonable upper limit on the max threshold is the number
625 * of lines tagged per RMID if all RMIDs have the same number of
626 * lines tagged in the LLC.
628 * For a 35MB LLC and 56 RMIDs, this is ~1.8% of the LLC.
630 resctrl_cqm_threshold
= cl_size
* 1024 / r
->num_rmid
;
632 /* h/w works in units of "boot_cpu_data.x86_cache_occ_scale" */
633 resctrl_cqm_threshold
/= r
->mon_scale
;
635 ret
= dom_data_init(r
);
641 r
->mon_capable
= true;
642 r
->mon_enabled
= true;