1 // SPDX-License-Identifier: GPL-2.0-only
3 * Resource Director Technology(RDT)
6 * Copyright (C) 2017 Intel Corporation
9 * Vikas Shivappa <vikas.shivappa@intel.com>
11 * This replaces the cqm.c based on perf but we reuse a lot of
12 * code and datastructures originally from Peter Zijlstra and Matt Fleming.
14 * More information about RDT be found in the Intel (R) x86 Architecture
15 * Software Developer Manual June 2016, volume 3, section 17.17.
18 #include <linux/module.h>
19 #include <linux/sizes.h>
20 #include <linux/slab.h>
22 #include <asm/cpu_device_id.h>
23 #include <asm/resctrl.h>
30 struct list_head list
;
34 * @rmid_free_lru - A least recently used list of free RMIDs
35 * These RMIDs are guaranteed to have an occupancy less than the
38 static LIST_HEAD(rmid_free_lru
);
41 * @rmid_limbo_count - count of currently unused but (potentially)
43 * This counts RMIDs that no one is currently using but that
44 * may have a occupancy value > resctrl_rmid_realloc_threshold. User can
45 * change the threshold occupancy value.
47 static unsigned int rmid_limbo_count
;
50 * @rmid_entry - The entry in the limbo and free lists.
52 static struct rmid_entry
*rmid_ptrs
;
55 * Global boolean for rdt_monitor which is true if any
56 * resource monitoring is enabled.
61 * Global to indicate which monitoring events are enabled.
63 unsigned int rdt_mon_features
;
66 * This is the threshold cache occupancy in bytes at which we will consider an
67 * RMID available for re-allocation.
69 unsigned int resctrl_rmid_realloc_threshold
;
72 * This is the maximum value for the reallocation threshold, in bytes.
74 unsigned int resctrl_rmid_realloc_limit
;
76 #define CF(cf) ((unsigned long)(1048576 * (cf) + 0.5))
79 * The correction factor table is documented in Documentation/arch/x86/resctrl.rst.
80 * If rmid > rmid threshold, MBM total and local values should be multiplied
81 * by the correction factor.
83 * The original table is modified for better code:
85 * 1. The threshold 0 is changed to rmid count - 1 so don't do correction
87 * 2. MBM total and local correction table indexed by core counter which is
88 * equal to (x86_cache_max_rmid + 1) / 8 - 1 and is from 0 up to 27.
89 * 3. The correction factor is normalized to 2^20 (1048576) so it's faster
90 * to calculate corrected value by shifting:
91 * corrected_value = (original_value * correction_factor) >> 20
93 static const struct mbm_correction_factor_table
{
96 } mbm_cf_table
[] __initconst
= {
127 static u32 mbm_cf_rmidthreshold __read_mostly
= UINT_MAX
;
128 static u64 mbm_cf __read_mostly
;
130 static inline u64
get_corrected_mbm_count(u32 rmid
, unsigned long val
)
132 /* Correct MBM value. */
133 if (rmid
> mbm_cf_rmidthreshold
)
134 val
= (val
* mbm_cf
) >> 20;
139 static inline struct rmid_entry
*__rmid_entry(u32 rmid
)
141 struct rmid_entry
*entry
;
143 entry
= &rmid_ptrs
[rmid
];
144 WARN_ON(entry
->rmid
!= rmid
);
149 static int __rmid_read(u32 rmid
, enum resctrl_event_id eventid
, u64
*val
)
154 * As per the SDM, when IA32_QM_EVTSEL.EvtID (bits 7:0) is configured
155 * with a valid event code for supported resource type and the bits
156 * IA32_QM_EVTSEL.RMID (bits 41:32) are configured with valid RMID,
157 * IA32_QM_CTR.data (bits 61:0) reports the monitored data.
158 * IA32_QM_CTR.Error (bit 63) and IA32_QM_CTR.Unavailable (bit 62)
161 wrmsr(MSR_IA32_QM_EVTSEL
, eventid
, rmid
);
162 rdmsrl(MSR_IA32_QM_CTR
, msr_val
);
164 if (msr_val
& RMID_VAL_ERROR
)
166 if (msr_val
& RMID_VAL_UNAVAIL
)
173 static struct arch_mbm_state
*get_arch_mbm_state(struct rdt_hw_domain
*hw_dom
,
175 enum resctrl_event_id eventid
)
178 case QOS_L3_OCCUP_EVENT_ID
:
180 case QOS_L3_MBM_TOTAL_EVENT_ID
:
181 return &hw_dom
->arch_mbm_total
[rmid
];
182 case QOS_L3_MBM_LOCAL_EVENT_ID
:
183 return &hw_dom
->arch_mbm_local
[rmid
];
186 /* Never expect to get here */
192 void resctrl_arch_reset_rmid(struct rdt_resource
*r
, struct rdt_domain
*d
,
193 u32 rmid
, enum resctrl_event_id eventid
)
195 struct rdt_hw_domain
*hw_dom
= resctrl_to_arch_dom(d
);
196 struct arch_mbm_state
*am
;
198 am
= get_arch_mbm_state(hw_dom
, rmid
, eventid
);
200 memset(am
, 0, sizeof(*am
));
202 /* Record any initial, non-zero count value. */
203 __rmid_read(rmid
, eventid
, &am
->prev_msr
);
208 * Assumes that hardware counters are also reset and thus that there is
209 * no need to record initial non-zero counts.
211 void resctrl_arch_reset_rmid_all(struct rdt_resource
*r
, struct rdt_domain
*d
)
213 struct rdt_hw_domain
*hw_dom
= resctrl_to_arch_dom(d
);
215 if (is_mbm_total_enabled())
216 memset(hw_dom
->arch_mbm_total
, 0,
217 sizeof(*hw_dom
->arch_mbm_total
) * r
->num_rmid
);
219 if (is_mbm_local_enabled())
220 memset(hw_dom
->arch_mbm_local
, 0,
221 sizeof(*hw_dom
->arch_mbm_local
) * r
->num_rmid
);
224 static u64
mbm_overflow_count(u64 prev_msr
, u64 cur_msr
, unsigned int width
)
226 u64 shift
= 64 - width
, chunks
;
228 chunks
= (cur_msr
<< shift
) - (prev_msr
<< shift
);
229 return chunks
>> shift
;
232 int resctrl_arch_rmid_read(struct rdt_resource
*r
, struct rdt_domain
*d
,
233 u32 rmid
, enum resctrl_event_id eventid
, u64
*val
)
235 struct rdt_hw_resource
*hw_res
= resctrl_to_arch_res(r
);
236 struct rdt_hw_domain
*hw_dom
= resctrl_to_arch_dom(d
);
237 struct arch_mbm_state
*am
;
241 if (!cpumask_test_cpu(smp_processor_id(), &d
->cpu_mask
))
244 ret
= __rmid_read(rmid
, eventid
, &msr_val
);
248 am
= get_arch_mbm_state(hw_dom
, rmid
, eventid
);
250 am
->chunks
+= mbm_overflow_count(am
->prev_msr
, msr_val
,
252 chunks
= get_corrected_mbm_count(rmid
, am
->chunks
);
253 am
->prev_msr
= msr_val
;
258 *val
= chunks
* hw_res
->mon_scale
;
264 * Check the RMIDs that are marked as busy for this domain. If the
265 * reported LLC occupancy is below the threshold clear the busy bit and
266 * decrement the count. If the busy count gets to zero on an RMID, we
269 void __check_limbo(struct rdt_domain
*d
, bool force_free
)
271 struct rdt_resource
*r
= &rdt_resources_all
[RDT_RESOURCE_L3
].r_resctrl
;
272 struct rmid_entry
*entry
;
273 u32 crmid
= 1, nrmid
;
278 * Skip RMID 0 and start from RMID 1 and check all the RMIDs that
279 * are marked as busy for occupancy < threshold. If the occupancy
280 * is less than the threshold decrement the busy counter of the
281 * RMID and move it to the free list when the counter reaches 0.
284 nrmid
= find_next_bit(d
->rmid_busy_llc
, r
->num_rmid
, crmid
);
285 if (nrmid
>= r
->num_rmid
)
288 entry
= __rmid_entry(nrmid
);
290 if (resctrl_arch_rmid_read(r
, d
, entry
->rmid
,
291 QOS_L3_OCCUP_EVENT_ID
, &val
)) {
294 rmid_dirty
= (val
>= resctrl_rmid_realloc_threshold
);
297 if (force_free
|| !rmid_dirty
) {
298 clear_bit(entry
->rmid
, d
->rmid_busy_llc
);
299 if (!--entry
->busy
) {
301 list_add_tail(&entry
->list
, &rmid_free_lru
);
308 bool has_busy_rmid(struct rdt_resource
*r
, struct rdt_domain
*d
)
310 return find_first_bit(d
->rmid_busy_llc
, r
->num_rmid
) != r
->num_rmid
;
314 * As of now the RMIDs allocation is global.
315 * However we keep track of which packages the RMIDs
316 * are used to optimize the limbo list management.
320 struct rmid_entry
*entry
;
322 lockdep_assert_held(&rdtgroup_mutex
);
324 if (list_empty(&rmid_free_lru
))
325 return rmid_limbo_count
? -EBUSY
: -ENOSPC
;
327 entry
= list_first_entry(&rmid_free_lru
,
328 struct rmid_entry
, list
);
329 list_del(&entry
->list
);
334 static void add_rmid_to_limbo(struct rmid_entry
*entry
)
336 struct rdt_resource
*r
= &rdt_resources_all
[RDT_RESOURCE_L3
].r_resctrl
;
337 struct rdt_domain
*d
;
343 list_for_each_entry(d
, &r
->domains
, list
) {
344 if (cpumask_test_cpu(cpu
, &d
->cpu_mask
)) {
345 err
= resctrl_arch_rmid_read(r
, d
, entry
->rmid
,
346 QOS_L3_OCCUP_EVENT_ID
,
348 if (err
|| val
<= resctrl_rmid_realloc_threshold
)
353 * For the first limbo RMID in the domain,
354 * setup up the limbo worker.
356 if (!has_busy_rmid(r
, d
))
357 cqm_setup_limbo_handler(d
, CQM_LIMBOCHECK_INTERVAL
);
358 set_bit(entry
->rmid
, d
->rmid_busy_llc
);
366 list_add_tail(&entry
->list
, &rmid_free_lru
);
369 void free_rmid(u32 rmid
)
371 struct rmid_entry
*entry
;
376 lockdep_assert_held(&rdtgroup_mutex
);
378 entry
= __rmid_entry(rmid
);
380 if (is_llc_occupancy_enabled())
381 add_rmid_to_limbo(entry
);
383 list_add_tail(&entry
->list
, &rmid_free_lru
);
386 static struct mbm_state
*get_mbm_state(struct rdt_domain
*d
, u32 rmid
,
387 enum resctrl_event_id evtid
)
390 case QOS_L3_MBM_TOTAL_EVENT_ID
:
391 return &d
->mbm_total
[rmid
];
392 case QOS_L3_MBM_LOCAL_EVENT_ID
:
393 return &d
->mbm_local
[rmid
];
399 static int __mon_event_count(u32 rmid
, struct rmid_read
*rr
)
405 resctrl_arch_reset_rmid(rr
->r
, rr
->d
, rmid
, rr
->evtid
);
406 m
= get_mbm_state(rr
->d
, rmid
, rr
->evtid
);
408 memset(m
, 0, sizeof(struct mbm_state
));
412 rr
->err
= resctrl_arch_rmid_read(rr
->r
, rr
->d
, rmid
, rr
->evtid
, &tval
);
422 * mbm_bw_count() - Update bw count from values previously read by
423 * __mon_event_count().
424 * @rmid: The rmid used to identify the cached mbm_state.
425 * @rr: The struct rmid_read populated by __mon_event_count().
427 * Supporting function to calculate the memory bandwidth
428 * and delta bandwidth in MBps. The chunks value previously read by
429 * __mon_event_count() is compared with the chunks value from the previous
430 * invocation. This must be called once per second to maintain values in MBps.
432 static void mbm_bw_count(u32 rmid
, struct rmid_read
*rr
)
434 struct mbm_state
*m
= &rr
->d
->mbm_local
[rmid
];
435 u64 cur_bw
, bytes
, cur_bytes
;
438 bytes
= cur_bytes
- m
->prev_bw_bytes
;
439 m
->prev_bw_bytes
= cur_bytes
;
441 cur_bw
= bytes
/ SZ_1M
;
444 m
->delta_bw
= abs(cur_bw
- m
->prev_bw
);
445 m
->delta_comp
= false;
450 * This is called via IPI to read the CQM/MBM counters
453 void mon_event_count(void *info
)
455 struct rdtgroup
*rdtgrp
, *entry
;
456 struct rmid_read
*rr
= info
;
457 struct list_head
*head
;
462 ret
= __mon_event_count(rdtgrp
->mon
.rmid
, rr
);
465 * For Ctrl groups read data from child monitor groups and
466 * add them together. Count events which are read successfully.
467 * Discard the rmid_read's reporting errors.
469 head
= &rdtgrp
->mon
.crdtgrp_list
;
471 if (rdtgrp
->type
== RDTCTRL_GROUP
) {
472 list_for_each_entry(entry
, head
, mon
.crdtgrp_list
) {
473 if (__mon_event_count(entry
->mon
.rmid
, rr
) == 0)
479 * __mon_event_count() calls for newly created monitor groups may
480 * report -EINVAL/Unavailable if the monitor hasn't seen any traffic.
481 * Discard error if any of the monitor event reads succeeded.
488 * Feedback loop for MBA software controller (mba_sc)
490 * mba_sc is a feedback loop where we periodically read MBM counters and
491 * adjust the bandwidth percentage values via the IA32_MBA_THRTL_MSRs so
494 * current bandwidth(cur_bw) < user specified bandwidth(user_bw)
496 * This uses the MBM counters to measure the bandwidth and MBA throttle
497 * MSRs to control the bandwidth for a particular rdtgrp. It builds on the
498 * fact that resctrl rdtgroups have both monitoring and control.
500 * The frequency of the checks is 1s and we just tag along the MBM overflow
501 * timer. Having 1s interval makes the calculation of bandwidth simpler.
503 * Although MBA's goal is to restrict the bandwidth to a maximum, there may
504 * be a need to increase the bandwidth to avoid unnecessarily restricting
505 * the L2 <-> L3 traffic.
507 * Since MBA controls the L2 external bandwidth where as MBM measures the
508 * L3 external bandwidth the following sequence could lead to such a
511 * Consider an rdtgroup which had high L3 <-> memory traffic in initial
512 * phases -> mba_sc kicks in and reduced bandwidth percentage values -> but
513 * after some time rdtgroup has mostly L2 <-> L3 traffic.
515 * In this case we may restrict the rdtgroup's L2 <-> L3 traffic as its
516 * throttle MSRs already have low percentage values. To avoid
517 * unnecessarily restricting such rdtgroups, we also increase the bandwidth.
519 static void update_mba_bw(struct rdtgroup
*rgrp
, struct rdt_domain
*dom_mbm
)
521 u32 closid
, rmid
, cur_msr_val
, new_msr_val
;
522 struct mbm_state
*pmbm_data
, *cmbm_data
;
523 u32 cur_bw
, delta_bw
, user_bw
;
524 struct rdt_resource
*r_mba
;
525 struct rdt_domain
*dom_mba
;
526 struct list_head
*head
;
527 struct rdtgroup
*entry
;
529 if (!is_mbm_local_enabled())
532 r_mba
= &rdt_resources_all
[RDT_RESOURCE_MBA
].r_resctrl
;
534 closid
= rgrp
->closid
;
535 rmid
= rgrp
->mon
.rmid
;
536 pmbm_data
= &dom_mbm
->mbm_local
[rmid
];
538 dom_mba
= get_domain_from_cpu(smp_processor_id(), r_mba
);
540 pr_warn_once("Failure to get domain for MBA update\n");
544 cur_bw
= pmbm_data
->prev_bw
;
545 user_bw
= dom_mba
->mbps_val
[closid
];
546 delta_bw
= pmbm_data
->delta_bw
;
548 /* MBA resource doesn't support CDP */
549 cur_msr_val
= resctrl_arch_get_config(r_mba
, dom_mba
, closid
, CDP_NONE
);
552 * For Ctrl groups read data from child monitor groups.
554 head
= &rgrp
->mon
.crdtgrp_list
;
555 list_for_each_entry(entry
, head
, mon
.crdtgrp_list
) {
556 cmbm_data
= &dom_mbm
->mbm_local
[entry
->mon
.rmid
];
557 cur_bw
+= cmbm_data
->prev_bw
;
558 delta_bw
+= cmbm_data
->delta_bw
;
562 * Scale up/down the bandwidth linearly for the ctrl group. The
563 * bandwidth step is the bandwidth granularity specified by the
566 * The delta_bw is used when increasing the bandwidth so that we
567 * dont alternately increase and decrease the control values
570 * For ex: consider cur_bw = 90MBps, user_bw = 100MBps and if
571 * bandwidth step is 20MBps(> user_bw - cur_bw), we would keep
572 * switching between 90 and 110 continuously if we only check
575 if (cur_msr_val
> r_mba
->membw
.min_bw
&& user_bw
< cur_bw
) {
576 new_msr_val
= cur_msr_val
- r_mba
->membw
.bw_gran
;
577 } else if (cur_msr_val
< MAX_MBA_BW
&&
578 (user_bw
> (cur_bw
+ delta_bw
))) {
579 new_msr_val
= cur_msr_val
+ r_mba
->membw
.bw_gran
;
584 resctrl_arch_update_one(r_mba
, dom_mba
, closid
, CDP_NONE
, new_msr_val
);
587 * Delta values are updated dynamically package wise for each
588 * rdtgrp every time the throttle MSR changes value.
590 * This is because (1)the increase in bandwidth is not perfectly
591 * linear and only "approximately" linear even when the hardware
592 * says it is linear.(2)Also since MBA is a core specific
593 * mechanism, the delta values vary based on number of cores used
596 pmbm_data
->delta_comp
= true;
597 list_for_each_entry(entry
, head
, mon
.crdtgrp_list
) {
598 cmbm_data
= &dom_mbm
->mbm_local
[entry
->mon
.rmid
];
599 cmbm_data
->delta_comp
= true;
603 static void mbm_update(struct rdt_resource
*r
, struct rdt_domain
*d
, int rmid
)
612 * This is protected from concurrent reads from user
613 * as both the user and we hold the global mutex.
615 if (is_mbm_total_enabled()) {
616 rr
.evtid
= QOS_L3_MBM_TOTAL_EVENT_ID
;
618 __mon_event_count(rmid
, &rr
);
620 if (is_mbm_local_enabled()) {
621 rr
.evtid
= QOS_L3_MBM_LOCAL_EVENT_ID
;
623 __mon_event_count(rmid
, &rr
);
626 * Call the MBA software controller only for the
627 * control groups and when user has enabled
628 * the software controller explicitly.
631 mbm_bw_count(rmid
, &rr
);
636 * Handler to scan the limbo list and move the RMIDs
637 * to free list whose occupancy < threshold_occupancy.
639 void cqm_handle_limbo(struct work_struct
*work
)
641 unsigned long delay
= msecs_to_jiffies(CQM_LIMBOCHECK_INTERVAL
);
642 int cpu
= smp_processor_id();
643 struct rdt_resource
*r
;
644 struct rdt_domain
*d
;
646 mutex_lock(&rdtgroup_mutex
);
648 r
= &rdt_resources_all
[RDT_RESOURCE_L3
].r_resctrl
;
649 d
= container_of(work
, struct rdt_domain
, cqm_limbo
.work
);
651 __check_limbo(d
, false);
653 if (has_busy_rmid(r
, d
))
654 schedule_delayed_work_on(cpu
, &d
->cqm_limbo
, delay
);
656 mutex_unlock(&rdtgroup_mutex
);
659 void cqm_setup_limbo_handler(struct rdt_domain
*dom
, unsigned long delay_ms
)
661 unsigned long delay
= msecs_to_jiffies(delay_ms
);
664 cpu
= cpumask_any(&dom
->cpu_mask
);
665 dom
->cqm_work_cpu
= cpu
;
667 schedule_delayed_work_on(cpu
, &dom
->cqm_limbo
, delay
);
670 void mbm_handle_overflow(struct work_struct
*work
)
672 unsigned long delay
= msecs_to_jiffies(MBM_OVERFLOW_INTERVAL
);
673 struct rdtgroup
*prgrp
, *crgrp
;
674 int cpu
= smp_processor_id();
675 struct list_head
*head
;
676 struct rdt_resource
*r
;
677 struct rdt_domain
*d
;
679 mutex_lock(&rdtgroup_mutex
);
681 if (!static_branch_likely(&rdt_mon_enable_key
))
684 r
= &rdt_resources_all
[RDT_RESOURCE_L3
].r_resctrl
;
685 d
= container_of(work
, struct rdt_domain
, mbm_over
.work
);
687 list_for_each_entry(prgrp
, &rdt_all_groups
, rdtgroup_list
) {
688 mbm_update(r
, d
, prgrp
->mon
.rmid
);
690 head
= &prgrp
->mon
.crdtgrp_list
;
691 list_for_each_entry(crgrp
, head
, mon
.crdtgrp_list
)
692 mbm_update(r
, d
, crgrp
->mon
.rmid
);
695 update_mba_bw(prgrp
, d
);
698 schedule_delayed_work_on(cpu
, &d
->mbm_over
, delay
);
701 mutex_unlock(&rdtgroup_mutex
);
704 void mbm_setup_overflow_handler(struct rdt_domain
*dom
, unsigned long delay_ms
)
706 unsigned long delay
= msecs_to_jiffies(delay_ms
);
709 if (!static_branch_likely(&rdt_mon_enable_key
))
711 cpu
= cpumask_any(&dom
->cpu_mask
);
712 dom
->mbm_work_cpu
= cpu
;
713 schedule_delayed_work_on(cpu
, &dom
->mbm_over
, delay
);
716 static int dom_data_init(struct rdt_resource
*r
)
718 struct rmid_entry
*entry
= NULL
;
721 nr_rmids
= r
->num_rmid
;
722 rmid_ptrs
= kcalloc(nr_rmids
, sizeof(struct rmid_entry
), GFP_KERNEL
);
726 for (i
= 0; i
< nr_rmids
; i
++) {
727 entry
= &rmid_ptrs
[i
];
728 INIT_LIST_HEAD(&entry
->list
);
731 list_add_tail(&entry
->list
, &rmid_free_lru
);
735 * RMID 0 is special and is always allocated. It's used for all
736 * tasks that are not monitored.
738 entry
= __rmid_entry(0);
739 list_del(&entry
->list
);
744 static struct mon_evt llc_occupancy_event
= {
745 .name
= "llc_occupancy",
746 .evtid
= QOS_L3_OCCUP_EVENT_ID
,
749 static struct mon_evt mbm_total_event
= {
750 .name
= "mbm_total_bytes",
751 .evtid
= QOS_L3_MBM_TOTAL_EVENT_ID
,
754 static struct mon_evt mbm_local_event
= {
755 .name
= "mbm_local_bytes",
756 .evtid
= QOS_L3_MBM_LOCAL_EVENT_ID
,
760 * Initialize the event list for the resource.
762 * Note that MBM events are also part of RDT_RESOURCE_L3 resource
763 * because as per the SDM the total and local memory bandwidth
764 * are enumerated as part of L3 monitoring.
766 static void l3_mon_evt_init(struct rdt_resource
*r
)
768 INIT_LIST_HEAD(&r
->evt_list
);
770 if (is_llc_occupancy_enabled())
771 list_add_tail(&llc_occupancy_event
.list
, &r
->evt_list
);
772 if (is_mbm_total_enabled())
773 list_add_tail(&mbm_total_event
.list
, &r
->evt_list
);
774 if (is_mbm_local_enabled())
775 list_add_tail(&mbm_local_event
.list
, &r
->evt_list
);
778 int __init
rdt_get_mon_l3_config(struct rdt_resource
*r
)
780 unsigned int mbm_offset
= boot_cpu_data
.x86_cache_mbm_width_offset
;
781 struct rdt_hw_resource
*hw_res
= resctrl_to_arch_res(r
);
782 unsigned int threshold
;
785 resctrl_rmid_realloc_limit
= boot_cpu_data
.x86_cache_size
* 1024;
786 hw_res
->mon_scale
= boot_cpu_data
.x86_cache_occ_scale
;
787 r
->num_rmid
= boot_cpu_data
.x86_cache_max_rmid
+ 1;
788 hw_res
->mbm_width
= MBM_CNTR_WIDTH_BASE
;
790 if (mbm_offset
> 0 && mbm_offset
<= MBM_CNTR_WIDTH_OFFSET_MAX
)
791 hw_res
->mbm_width
+= mbm_offset
;
792 else if (mbm_offset
> MBM_CNTR_WIDTH_OFFSET_MAX
)
793 pr_warn("Ignoring impossible MBM counter offset\n");
796 * A reasonable upper limit on the max threshold is the number
797 * of lines tagged per RMID if all RMIDs have the same number of
798 * lines tagged in the LLC.
800 * For a 35MB LLC and 56 RMIDs, this is ~1.8% of the LLC.
802 threshold
= resctrl_rmid_realloc_limit
/ r
->num_rmid
;
805 * Because num_rmid may not be a power of two, round the value
806 * to the nearest multiple of hw_res->mon_scale so it matches a
807 * value the hardware will measure. mon_scale may not be a power of 2.
809 resctrl_rmid_realloc_threshold
= resctrl_arch_round_mon_val(threshold
);
811 ret
= dom_data_init(r
);
815 if (rdt_cpu_has(X86_FEATURE_BMEC
)) {
816 if (rdt_cpu_has(X86_FEATURE_CQM_MBM_TOTAL
)) {
817 mbm_total_event
.configurable
= true;
818 mbm_config_rftype_init("mbm_total_bytes_config");
820 if (rdt_cpu_has(X86_FEATURE_CQM_MBM_LOCAL
)) {
821 mbm_local_event
.configurable
= true;
822 mbm_config_rftype_init("mbm_local_bytes_config");
828 r
->mon_capable
= true;
833 void __init
intel_rdt_mbm_apply_quirk(void)
837 cf_index
= (boot_cpu_data
.x86_cache_max_rmid
+ 1) / 8 - 1;
838 if (cf_index
>= ARRAY_SIZE(mbm_cf_table
)) {
839 pr_info("No MBM correction factor available\n");
843 mbm_cf_rmidthreshold
= mbm_cf_table
[cf_index
].rmidthreshold
;
844 mbm_cf
= mbm_cf_table
[cf_index
].cf
;