1 // SPDX-License-Identifier: GPL-2.0
3 * Resource Director Technology (RDT)
5 * Pseudo-locking support built on top of Cache Allocation Technology (CAT)
7 * Copyright (C) 2018 Intel Corporation
9 * Author: Reinette Chatre <reinette.chatre@intel.com>
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 #include <linux/cacheinfo.h>
15 #include <linux/cpu.h>
16 #include <linux/cpumask.h>
17 #include <linux/debugfs.h>
18 #include <linux/kthread.h>
19 #include <linux/mman.h>
20 #include <linux/perf_event.h>
21 #include <linux/pm_qos.h>
22 #include <linux/slab.h>
23 #include <linux/uaccess.h>
25 #include <asm/cacheflush.h>
26 #include <asm/intel-family.h>
27 #include <asm/resctrl_sched.h>
28 #include <asm/perf_event.h>
30 #include "../../events/perf_event.h" /* For X86_CONFIG() */
33 #define CREATE_TRACE_POINTS
34 #include "pseudo_lock_event.h"
37 * The bits needed to disable hardware prefetching varies based on the
38 * platform. During initialization we will discover which bits to use.
40 static u64 prefetch_disable_bits
;
43 * Major number assigned to and shared by all devices exposing
44 * pseudo-locked regions.
46 static unsigned int pseudo_lock_major
;
47 static unsigned long pseudo_lock_minor_avail
= GENMASK(MINORBITS
, 0);
48 static struct class *pseudo_lock_class
;
51 * get_prefetch_disable_bits - prefetch disable bits of supported platforms
53 * Capture the list of platforms that have been validated to support
54 * pseudo-locking. This includes testing to ensure pseudo-locked regions
55 * with low cache miss rates can be created under variety of load conditions
56 * as well as that these pseudo-locked regions can maintain their low cache
57 * miss rates under variety of load conditions for significant lengths of time.
59 * After a platform has been validated to support pseudo-locking its
60 * hardware prefetch disable bits are included here as they are documented
63 * When adding a platform here also add support for its cache events to
64 * measure_cycles_perf_fn()
67 * If platform is supported, the bits to disable hardware prefetchers, 0
68 * if platform is not supported.
70 static u64
get_prefetch_disable_bits(void)
72 if (boot_cpu_data
.x86_vendor
!= X86_VENDOR_INTEL
||
73 boot_cpu_data
.x86
!= 6)
76 switch (boot_cpu_data
.x86_model
) {
77 case INTEL_FAM6_BROADWELL_X
:
79 * SDM defines bits of MSR_MISC_FEATURE_CONTROL register
81 * 0 L2 Hardware Prefetcher Disable (R/W)
82 * 1 L2 Adjacent Cache Line Prefetcher Disable (R/W)
83 * 2 DCU Hardware Prefetcher Disable (R/W)
84 * 3 DCU IP Prefetcher Disable (R/W)
88 case INTEL_FAM6_ATOM_GOLDMONT
:
89 case INTEL_FAM6_ATOM_GOLDMONT_PLUS
:
91 * SDM defines bits of MSR_MISC_FEATURE_CONTROL register
93 * 0 L2 Hardware Prefetcher Disable (R/W)
95 * 2 DCU Hardware Prefetcher Disable (R/W)
105 * pseudo_lock_minor_get - Obtain available minor number
106 * @minor: Pointer to where new minor number will be stored
108 * A bitmask is used to track available minor numbers. Here the next free
109 * minor number is marked as unavailable and returned.
111 * Return: 0 on success, <0 on failure.
113 static int pseudo_lock_minor_get(unsigned int *minor
)
115 unsigned long first_bit
;
117 first_bit
= find_first_bit(&pseudo_lock_minor_avail
, MINORBITS
);
119 if (first_bit
== MINORBITS
)
122 __clear_bit(first_bit
, &pseudo_lock_minor_avail
);
129 * pseudo_lock_minor_release - Return minor number to available
130 * @minor: The minor number made available
132 static void pseudo_lock_minor_release(unsigned int minor
)
134 __set_bit(minor
, &pseudo_lock_minor_avail
);
138 * region_find_by_minor - Locate a pseudo-lock region by inode minor number
139 * @minor: The minor number of the device representing pseudo-locked region
141 * When the character device is accessed we need to determine which
142 * pseudo-locked region it belongs to. This is done by matching the minor
143 * number of the device to the pseudo-locked region it belongs.
145 * Minor numbers are assigned at the time a pseudo-locked region is associated
146 * with a cache instance.
148 * Return: On success return pointer to resource group owning the pseudo-locked
149 * region, NULL on failure.
151 static struct rdtgroup
*region_find_by_minor(unsigned int minor
)
153 struct rdtgroup
*rdtgrp
, *rdtgrp_match
= NULL
;
155 list_for_each_entry(rdtgrp
, &rdt_all_groups
, rdtgroup_list
) {
156 if (rdtgrp
->plr
&& rdtgrp
->plr
->minor
== minor
) {
157 rdtgrp_match
= rdtgrp
;
165 * pseudo_lock_pm_req - A power management QoS request list entry
166 * @list: Entry within the @pm_reqs list for a pseudo-locked region
167 * @req: PM QoS request
169 struct pseudo_lock_pm_req
{
170 struct list_head list
;
171 struct dev_pm_qos_request req
;
174 static void pseudo_lock_cstates_relax(struct pseudo_lock_region
*plr
)
176 struct pseudo_lock_pm_req
*pm_req
, *next
;
178 list_for_each_entry_safe(pm_req
, next
, &plr
->pm_reqs
, list
) {
179 dev_pm_qos_remove_request(&pm_req
->req
);
180 list_del(&pm_req
->list
);
186 * pseudo_lock_cstates_constrain - Restrict cores from entering C6
188 * To prevent the cache from being affected by power management entering
189 * C6 has to be avoided. This is accomplished by requesting a latency
190 * requirement lower than lowest C6 exit latency of all supported
191 * platforms as found in the cpuidle state tables in the intel_idle driver.
192 * At this time it is possible to do so with a single latency requirement
193 * for all supported platforms.
195 * Since Goldmont is supported, which is affected by X86_BUG_MONITOR,
196 * the ACPI latencies need to be considered while keeping in mind that C2
197 * may be set to map to deeper sleep states. In this case the latency
198 * requirement needs to prevent entering C2 also.
200 static int pseudo_lock_cstates_constrain(struct pseudo_lock_region
*plr
)
202 struct pseudo_lock_pm_req
*pm_req
;
206 for_each_cpu(cpu
, &plr
->d
->cpu_mask
) {
207 pm_req
= kzalloc(sizeof(*pm_req
), GFP_KERNEL
);
209 rdt_last_cmd_puts("Failure to allocate memory for PM QoS\n");
213 ret
= dev_pm_qos_add_request(get_cpu_device(cpu
),
215 DEV_PM_QOS_RESUME_LATENCY
,
218 rdt_last_cmd_printf("Failed to add latency req CPU%d\n",
224 list_add(&pm_req
->list
, &plr
->pm_reqs
);
230 pseudo_lock_cstates_relax(plr
);
235 * pseudo_lock_region_clear - Reset pseudo-lock region data
236 * @plr: pseudo-lock region
238 * All content of the pseudo-locked region is reset - any memory allocated
243 static void pseudo_lock_region_clear(struct pseudo_lock_region
*plr
)
254 plr
->debugfs_dir
= NULL
;
258 * pseudo_lock_region_init - Initialize pseudo-lock region information
259 * @plr: pseudo-lock region
261 * Called after user provided a schemata to be pseudo-locked. From the
262 * schemata the &struct pseudo_lock_region is on entry already initialized
263 * with the resource, domain, and capacity bitmask. Here the information
264 * required for pseudo-locking is deduced from this data and &struct
265 * pseudo_lock_region initialized further. This information includes:
266 * - size in bytes of the region to be pseudo-locked
267 * - cache line size to know the stride with which data needs to be accessed
268 * to be pseudo-locked
269 * - a cpu associated with the cache instance on which the pseudo-locking
270 * flow can be executed
272 * Return: 0 on success, <0 on failure. Descriptive error will be written
273 * to last_cmd_status buffer.
275 static int pseudo_lock_region_init(struct pseudo_lock_region
*plr
)
277 struct cpu_cacheinfo
*ci
;
281 /* Pick the first cpu we find that is associated with the cache. */
282 plr
->cpu
= cpumask_first(&plr
->d
->cpu_mask
);
284 if (!cpu_online(plr
->cpu
)) {
285 rdt_last_cmd_printf("CPU %u associated with cache not online\n",
291 ci
= get_cpu_cacheinfo(plr
->cpu
);
293 plr
->size
= rdtgroup_cbm_to_size(plr
->r
, plr
->d
, plr
->cbm
);
295 for (i
= 0; i
< ci
->num_leaves
; i
++) {
296 if (ci
->info_list
[i
].level
== plr
->r
->cache_level
) {
297 plr
->line_size
= ci
->info_list
[i
].coherency_line_size
;
303 rdt_last_cmd_puts("Unable to determine cache line size\n");
305 pseudo_lock_region_clear(plr
);
310 * pseudo_lock_init - Initialize a pseudo-lock region
311 * @rdtgrp: resource group to which new pseudo-locked region will belong
313 * A pseudo-locked region is associated with a resource group. When this
314 * association is created the pseudo-locked region is initialized. The
315 * details of the pseudo-locked region are not known at this time so only
316 * allocation is done and association established.
318 * Return: 0 on success, <0 on failure
320 static int pseudo_lock_init(struct rdtgroup
*rdtgrp
)
322 struct pseudo_lock_region
*plr
;
324 plr
= kzalloc(sizeof(*plr
), GFP_KERNEL
);
328 init_waitqueue_head(&plr
->lock_thread_wq
);
329 INIT_LIST_HEAD(&plr
->pm_reqs
);
335 * pseudo_lock_region_alloc - Allocate kernel memory that will be pseudo-locked
336 * @plr: pseudo-lock region
338 * Initialize the details required to set up the pseudo-locked region and
339 * allocate the contiguous memory that will be pseudo-locked to the cache.
341 * Return: 0 on success, <0 on failure. Descriptive error will be written
342 * to last_cmd_status buffer.
344 static int pseudo_lock_region_alloc(struct pseudo_lock_region
*plr
)
348 ret
= pseudo_lock_region_init(plr
);
353 * We do not yet support contiguous regions larger than
356 if (plr
->size
> KMALLOC_MAX_SIZE
) {
357 rdt_last_cmd_puts("Requested region exceeds maximum size\n");
362 plr
->kmem
= kzalloc(plr
->size
, GFP_KERNEL
);
364 rdt_last_cmd_puts("Unable to allocate memory\n");
372 pseudo_lock_region_clear(plr
);
378 * pseudo_lock_free - Free a pseudo-locked region
379 * @rdtgrp: resource group to which pseudo-locked region belonged
381 * The pseudo-locked region's resources have already been released, or not
382 * yet created at this point. Now it can be freed and disassociated from the
387 static void pseudo_lock_free(struct rdtgroup
*rdtgrp
)
389 pseudo_lock_region_clear(rdtgrp
->plr
);
395 * pseudo_lock_fn - Load kernel memory into cache
396 * @_rdtgrp: resource group to which pseudo-lock region belongs
398 * This is the core pseudo-locking flow.
400 * First we ensure that the kernel memory cannot be found in the cache.
401 * Then, while taking care that there will be as little interference as
402 * possible, the memory to be loaded is accessed while core is running
403 * with class of service set to the bitmask of the pseudo-locked region.
404 * After this is complete no future CAT allocations will be allowed to
405 * overlap with this bitmask.
407 * Local register variables are utilized to ensure that the memory region
408 * to be locked is the only memory access made during the critical locking
411 * Return: 0. Waiter on waitqueue will be woken on completion.
413 static int pseudo_lock_fn(void *_rdtgrp
)
415 struct rdtgroup
*rdtgrp
= _rdtgrp
;
416 struct pseudo_lock_region
*plr
= rdtgrp
->plr
;
417 u32 rmid_p
, closid_p
;
421 * The registers used for local register variables are also used
422 * when KASAN is active. When KASAN is active we use a regular
423 * variable to ensure we always use a valid pointer, but the cost
424 * is that this variable will enter the cache through evicting the
425 * memory we are trying to lock into the cache. Thus expect lower
426 * pseudo-locking success rate when KASAN is active.
428 unsigned int line_size
;
432 register unsigned int line_size
asm("esi");
433 register unsigned int size
asm("edi");
434 register void *mem_r
asm(_ASM_BX
);
435 #endif /* CONFIG_KASAN */
438 * Make sure none of the allocated memory is cached. If it is we
439 * will get a cache hit in below loop from outside of pseudo-locked
441 * wbinvd (as opposed to clflush/clflushopt) is required to
442 * increase likelihood that allocated cache portion will be filled
443 * with associated memory.
448 * Always called with interrupts enabled. By disabling interrupts
449 * ensure that we will not be preempted during this critical section.
454 * Call wrmsr and rdmsr as directly as possible to avoid tracing
455 * clobbering local register variables or affecting cache accesses.
457 * Disable the hardware prefetcher so that when the end of the memory
458 * being pseudo-locked is reached the hardware will not read beyond
459 * the buffer and evict pseudo-locked memory read earlier from the
462 __wrmsr(MSR_MISC_FEATURE_CONTROL
, prefetch_disable_bits
, 0x0);
463 closid_p
= this_cpu_read(pqr_state
.cur_closid
);
464 rmid_p
= this_cpu_read(pqr_state
.cur_rmid
);
467 line_size
= plr
->line_size
;
469 * Critical section begin: start by writing the closid associated
470 * with the capacity bitmask of the cache region being
471 * pseudo-locked followed by reading of kernel memory to load it
474 __wrmsr(IA32_PQR_ASSOC
, rmid_p
, rdtgrp
->closid
);
476 * Cache was flushed earlier. Now access kernel memory to read it
477 * into cache region associated with just activated plr->closid.
478 * Loop over data twice:
479 * - In first loop the cache region is shared with the page walker
480 * as it populates the paging structure caches (including TLB).
481 * - In the second loop the paging structure caches are used and
482 * cache region is populated with the memory being referenced.
484 for (i
= 0; i
< size
; i
+= PAGE_SIZE
) {
486 * Add a barrier to prevent speculative execution of this
487 * loop reading beyond the end of the buffer.
490 asm volatile("mov (%0,%1,1), %%eax\n\t"
492 : "r" (mem_r
), "r" (i
)
495 for (i
= 0; i
< size
; i
+= line_size
) {
497 * Add a barrier to prevent speculative execution of this
498 * loop reading beyond the end of the buffer.
501 asm volatile("mov (%0,%1,1), %%eax\n\t"
503 : "r" (mem_r
), "r" (i
)
507 * Critical section end: restore closid with capacity bitmask that
508 * does not overlap with pseudo-locked region.
510 __wrmsr(IA32_PQR_ASSOC
, rmid_p
, closid_p
);
512 /* Re-enable the hardware prefetcher(s) */
513 wrmsr(MSR_MISC_FEATURE_CONTROL
, 0x0, 0x0);
516 plr
->thread_done
= 1;
517 wake_up_interruptible(&plr
->lock_thread_wq
);
522 * rdtgroup_monitor_in_progress - Test if monitoring in progress
523 * @r: resource group being queried
525 * Return: 1 if monitor groups have been created for this resource
526 * group, 0 otherwise.
528 static int rdtgroup_monitor_in_progress(struct rdtgroup
*rdtgrp
)
530 return !list_empty(&rdtgrp
->mon
.crdtgrp_list
);
534 * rdtgroup_locksetup_user_restrict - Restrict user access to group
535 * @rdtgrp: resource group needing access restricted
537 * A resource group used for cache pseudo-locking cannot have cpus or tasks
538 * assigned to it. This is communicated to the user by restricting access
539 * to all the files that can be used to make such changes.
541 * Permissions restored with rdtgroup_locksetup_user_restore()
543 * Return: 0 on success, <0 on failure. If a failure occurs during the
544 * restriction of access an attempt will be made to restore permissions but
545 * the state of the mode of these files will be uncertain when a failure
548 static int rdtgroup_locksetup_user_restrict(struct rdtgroup
*rdtgrp
)
552 ret
= rdtgroup_kn_mode_restrict(rdtgrp
, "tasks");
556 ret
= rdtgroup_kn_mode_restrict(rdtgrp
, "cpus");
560 ret
= rdtgroup_kn_mode_restrict(rdtgrp
, "cpus_list");
564 if (rdt_mon_capable
) {
565 ret
= rdtgroup_kn_mode_restrict(rdtgrp
, "mon_groups");
574 rdtgroup_kn_mode_restore(rdtgrp
, "cpus_list", 0777);
576 rdtgroup_kn_mode_restore(rdtgrp
, "cpus", 0777);
578 rdtgroup_kn_mode_restore(rdtgrp
, "tasks", 0777);
584 * rdtgroup_locksetup_user_restore - Restore user access to group
585 * @rdtgrp: resource group needing access restored
587 * Restore all file access previously removed using
588 * rdtgroup_locksetup_user_restrict()
590 * Return: 0 on success, <0 on failure. If a failure occurs during the
591 * restoration of access an attempt will be made to restrict permissions
592 * again but the state of the mode of these files will be uncertain when
595 static int rdtgroup_locksetup_user_restore(struct rdtgroup
*rdtgrp
)
599 ret
= rdtgroup_kn_mode_restore(rdtgrp
, "tasks", 0777);
603 ret
= rdtgroup_kn_mode_restore(rdtgrp
, "cpus", 0777);
607 ret
= rdtgroup_kn_mode_restore(rdtgrp
, "cpus_list", 0777);
611 if (rdt_mon_capable
) {
612 ret
= rdtgroup_kn_mode_restore(rdtgrp
, "mon_groups", 0777);
621 rdtgroup_kn_mode_restrict(rdtgrp
, "cpus_list");
623 rdtgroup_kn_mode_restrict(rdtgrp
, "cpus");
625 rdtgroup_kn_mode_restrict(rdtgrp
, "tasks");
631 * rdtgroup_locksetup_enter - Resource group enters locksetup mode
632 * @rdtgrp: resource group requested to enter locksetup mode
634 * A resource group enters locksetup mode to reflect that it would be used
635 * to represent a pseudo-locked region and is in the process of being set
636 * up to do so. A resource group used for a pseudo-locked region would
637 * lose the closid associated with it so we cannot allow it to have any
638 * tasks or cpus assigned nor permit tasks or cpus to be assigned in the
639 * future. Monitoring of a pseudo-locked region is not allowed either.
641 * The above and more restrictions on a pseudo-locked region are checked
642 * for and enforced before the resource group enters the locksetup mode.
644 * Returns: 0 if the resource group successfully entered locksetup mode, <0
645 * on failure. On failure the last_cmd_status buffer is updated with text to
646 * communicate details of failure to the user.
648 int rdtgroup_locksetup_enter(struct rdtgroup
*rdtgrp
)
653 * The default resource group can neither be removed nor lose the
654 * default closid associated with it.
656 if (rdtgrp
== &rdtgroup_default
) {
657 rdt_last_cmd_puts("Cannot pseudo-lock default group\n");
662 * Cache Pseudo-locking not supported when CDP is enabled.
664 * Some things to consider if you would like to enable this
665 * support (using L3 CDP as example):
666 * - When CDP is enabled two separate resources are exposed,
667 * L3DATA and L3CODE, but they are actually on the same cache.
668 * The implication for pseudo-locking is that if a
669 * pseudo-locked region is created on a domain of one
670 * resource (eg. L3CODE), then a pseudo-locked region cannot
671 * be created on that same domain of the other resource
672 * (eg. L3DATA). This is because the creation of a
673 * pseudo-locked region involves a call to wbinvd that will
674 * affect all cache allocations on particular domain.
675 * - Considering the previous, it may be possible to only
676 * expose one of the CDP resources to pseudo-locking and
677 * hide the other. For example, we could consider to only
678 * expose L3DATA and since the L3 cache is unified it is
679 * still possible to place instructions there are execute it.
680 * - If only one region is exposed to pseudo-locking we should
681 * still keep in mind that availability of a portion of cache
682 * for pseudo-locking should take into account both resources.
683 * Similarly, if a pseudo-locked region is created in one
684 * resource, the portion of cache used by it should be made
685 * unavailable to all future allocations from both resources.
687 if (rdt_resources_all
[RDT_RESOURCE_L3DATA
].alloc_enabled
||
688 rdt_resources_all
[RDT_RESOURCE_L2DATA
].alloc_enabled
) {
689 rdt_last_cmd_puts("CDP enabled\n");
694 * Not knowing the bits to disable prefetching implies that this
695 * platform does not support Cache Pseudo-Locking.
697 prefetch_disable_bits
= get_prefetch_disable_bits();
698 if (prefetch_disable_bits
== 0) {
699 rdt_last_cmd_puts("Pseudo-locking not supported\n");
703 if (rdtgroup_monitor_in_progress(rdtgrp
)) {
704 rdt_last_cmd_puts("Monitoring in progress\n");
708 if (rdtgroup_tasks_assigned(rdtgrp
)) {
709 rdt_last_cmd_puts("Tasks assigned to resource group\n");
713 if (!cpumask_empty(&rdtgrp
->cpu_mask
)) {
714 rdt_last_cmd_puts("CPUs assigned to resource group\n");
718 if (rdtgroup_locksetup_user_restrict(rdtgrp
)) {
719 rdt_last_cmd_puts("Unable to modify resctrl permissions\n");
723 ret
= pseudo_lock_init(rdtgrp
);
725 rdt_last_cmd_puts("Unable to init pseudo-lock region\n");
730 * If this system is capable of monitoring a rmid would have been
731 * allocated when the control group was created. This is not needed
732 * anymore when this group would be used for pseudo-locking. This
733 * is safe to call on platforms not capable of monitoring.
735 free_rmid(rdtgrp
->mon
.rmid
);
741 rdtgroup_locksetup_user_restore(rdtgrp
);
747 * rdtgroup_locksetup_exit - resource group exist locksetup mode
748 * @rdtgrp: resource group
750 * When a resource group exits locksetup mode the earlier restrictions are
753 * Return: 0 on success, <0 on failure
755 int rdtgroup_locksetup_exit(struct rdtgroup
*rdtgrp
)
759 if (rdt_mon_capable
) {
762 rdt_last_cmd_puts("Out of RMIDs\n");
765 rdtgrp
->mon
.rmid
= ret
;
768 ret
= rdtgroup_locksetup_user_restore(rdtgrp
);
770 free_rmid(rdtgrp
->mon
.rmid
);
774 pseudo_lock_free(rdtgrp
);
779 * rdtgroup_cbm_overlaps_pseudo_locked - Test if CBM or portion is pseudo-locked
783 * @d represents a cache instance and @cbm a capacity bitmask that is
784 * considered for it. Determine if @cbm overlaps with any existing
785 * pseudo-locked region on @d.
787 * @cbm is unsigned long, even if only 32 bits are used, to make the
788 * bitmap functions work correctly.
790 * Return: true if @cbm overlaps with pseudo-locked region on @d, false
793 bool rdtgroup_cbm_overlaps_pseudo_locked(struct rdt_domain
*d
, unsigned long cbm
)
795 unsigned int cbm_len
;
799 cbm_len
= d
->plr
->r
->cache
.cbm_len
;
801 if (bitmap_intersects(&cbm
, &cbm_b
, cbm_len
))
808 * rdtgroup_pseudo_locked_in_hierarchy - Pseudo-locked region in cache hierarchy
809 * @d: RDT domain under test
811 * The setup of a pseudo-locked region affects all cache instances within
812 * the hierarchy of the region. It is thus essential to know if any
813 * pseudo-locked regions exist within a cache hierarchy to prevent any
814 * attempts to create new pseudo-locked regions in the same hierarchy.
816 * Return: true if a pseudo-locked region exists in the hierarchy of @d or
817 * if it is not possible to test due to memory allocation issue,
820 bool rdtgroup_pseudo_locked_in_hierarchy(struct rdt_domain
*d
)
822 cpumask_var_t cpu_with_psl
;
823 struct rdt_resource
*r
;
824 struct rdt_domain
*d_i
;
827 if (!zalloc_cpumask_var(&cpu_with_psl
, GFP_KERNEL
))
831 * First determine which cpus have pseudo-locked regions
832 * associated with them.
834 for_each_alloc_enabled_rdt_resource(r
) {
835 list_for_each_entry(d_i
, &r
->domains
, list
) {
837 cpumask_or(cpu_with_psl
, cpu_with_psl
,
843 * Next test if new pseudo-locked region would intersect with
846 if (cpumask_intersects(&d
->cpu_mask
, cpu_with_psl
))
849 free_cpumask_var(cpu_with_psl
);
854 * measure_cycles_lat_fn - Measure cycle latency to read pseudo-locked memory
855 * @_plr: pseudo-lock region to measure
857 * There is no deterministic way to test if a memory region is cached. One
858 * way is to measure how long it takes to read the memory, the speed of
859 * access is a good way to learn how close to the cpu the data was. Even
860 * more, if the prefetcher is disabled and the memory is read at a stride
861 * of half the cache line, then a cache miss will be easy to spot since the
862 * read of the first half would be significantly slower than the read of
865 * Return: 0. Waiter on waitqueue will be woken on completion.
867 static int measure_cycles_lat_fn(void *_plr
)
869 struct pseudo_lock_region
*plr
= _plr
;
876 * Disable hardware prefetchers.
878 wrmsr(MSR_MISC_FEATURE_CONTROL
, prefetch_disable_bits
, 0x0);
879 mem_r
= READ_ONCE(plr
->kmem
);
881 * Dummy execute of the time measurement to load the needed
882 * instructions into the L1 instruction cache.
884 start
= rdtsc_ordered();
885 for (i
= 0; i
< plr
->size
; i
+= 32) {
886 start
= rdtsc_ordered();
887 asm volatile("mov (%0,%1,1), %%eax\n\t"
889 : "r" (mem_r
), "r" (i
)
891 end
= rdtsc_ordered();
892 trace_pseudo_lock_mem_latency((u32
)(end
- start
));
894 wrmsr(MSR_MISC_FEATURE_CONTROL
, 0x0, 0x0);
896 plr
->thread_done
= 1;
897 wake_up_interruptible(&plr
->lock_thread_wq
);
902 * Create a perf_event_attr for the hit and miss perf events that will
903 * be used during the performance measurement. A perf_event maintains
904 * a pointer to its perf_event_attr so a unique attribute structure is
905 * created for each perf_event.
907 * The actual configuration of the event is set right before use in order
908 * to use the X86_CONFIG macro.
910 static struct perf_event_attr perf_miss_attr
= {
911 .type
= PERF_TYPE_RAW
,
912 .size
= sizeof(struct perf_event_attr
),
918 static struct perf_event_attr perf_hit_attr
= {
919 .type
= PERF_TYPE_RAW
,
920 .size
= sizeof(struct perf_event_attr
),
926 struct residency_counts
{
927 u64 miss_before
, hits_before
;
928 u64 miss_after
, hits_after
;
931 static int measure_residency_fn(struct perf_event_attr
*miss_attr
,
932 struct perf_event_attr
*hit_attr
,
933 struct pseudo_lock_region
*plr
,
934 struct residency_counts
*counts
)
936 u64 hits_before
= 0, hits_after
= 0, miss_before
= 0, miss_after
= 0;
937 struct perf_event
*miss_event
, *hit_event
;
938 int hit_pmcnum
, miss_pmcnum
;
939 unsigned int line_size
;
945 miss_event
= perf_event_create_kernel_counter(miss_attr
, plr
->cpu
,
947 if (IS_ERR(miss_event
))
950 hit_event
= perf_event_create_kernel_counter(hit_attr
, plr
->cpu
,
952 if (IS_ERR(hit_event
))
957 * Check any possible error state of events used by performing
960 if (perf_event_read_local(miss_event
, &tmp
, NULL
, NULL
)) {
964 if (perf_event_read_local(hit_event
, &tmp
, NULL
, NULL
)) {
970 * Disable hardware prefetchers.
972 wrmsr(MSR_MISC_FEATURE_CONTROL
, prefetch_disable_bits
, 0x0);
974 /* Initialize rest of local variables */
976 * Performance event has been validated right before this with
977 * interrupts disabled - it is thus safe to read the counter index.
979 miss_pmcnum
= x86_perf_rdpmc_index(miss_event
);
980 hit_pmcnum
= x86_perf_rdpmc_index(hit_event
);
981 line_size
= READ_ONCE(plr
->line_size
);
982 mem_r
= READ_ONCE(plr
->kmem
);
983 size
= READ_ONCE(plr
->size
);
986 * Read counter variables twice - first to load the instructions
987 * used in L1 cache, second to capture accurate value that does not
988 * include cache misses incurred because of instruction loads.
990 rdpmcl(hit_pmcnum
, hits_before
);
991 rdpmcl(miss_pmcnum
, miss_before
);
993 * From SDM: Performing back-to-back fast reads are not guaranteed
995 * Use LFENCE to ensure all previous instructions are retired
999 rdpmcl(hit_pmcnum
, hits_before
);
1000 rdpmcl(miss_pmcnum
, miss_before
);
1002 * Use LFENCE to ensure all previous instructions are retired
1003 * before proceeding.
1006 for (i
= 0; i
< size
; i
+= line_size
) {
1008 * Add a barrier to prevent speculative execution of this
1009 * loop reading beyond the end of the buffer.
1012 asm volatile("mov (%0,%1,1), %%eax\n\t"
1014 : "r" (mem_r
), "r" (i
)
1015 : "%eax", "memory");
1018 * Use LFENCE to ensure all previous instructions are retired
1019 * before proceeding.
1022 rdpmcl(hit_pmcnum
, hits_after
);
1023 rdpmcl(miss_pmcnum
, miss_after
);
1025 * Use LFENCE to ensure all previous instructions are retired
1026 * before proceeding.
1029 /* Re-enable hardware prefetchers */
1030 wrmsr(MSR_MISC_FEATURE_CONTROL
, 0x0, 0x0);
1033 perf_event_release_kernel(hit_event
);
1035 perf_event_release_kernel(miss_event
);
1038 * All counts will be zero on failure.
1040 counts
->miss_before
= miss_before
;
1041 counts
->hits_before
= hits_before
;
1042 counts
->miss_after
= miss_after
;
1043 counts
->hits_after
= hits_after
;
1047 static int measure_l2_residency(void *_plr
)
1049 struct pseudo_lock_region
*plr
= _plr
;
1050 struct residency_counts counts
= {0};
1053 * Non-architectural event for the Goldmont Microarchitecture
1054 * from Intel x86 Architecture Software Developer Manual (SDM):
1055 * MEM_LOAD_UOPS_RETIRED D1H (event number)
1060 switch (boot_cpu_data
.x86_model
) {
1061 case INTEL_FAM6_ATOM_GOLDMONT
:
1062 case INTEL_FAM6_ATOM_GOLDMONT_PLUS
:
1063 perf_miss_attr
.config
= X86_CONFIG(.event
= 0xd1,
1065 perf_hit_attr
.config
= X86_CONFIG(.event
= 0xd1,
1072 measure_residency_fn(&perf_miss_attr
, &perf_hit_attr
, plr
, &counts
);
1074 * If a failure prevented the measurements from succeeding
1075 * tracepoints will still be written and all counts will be zero.
1077 trace_pseudo_lock_l2(counts
.hits_after
- counts
.hits_before
,
1078 counts
.miss_after
- counts
.miss_before
);
1080 plr
->thread_done
= 1;
1081 wake_up_interruptible(&plr
->lock_thread_wq
);
1085 static int measure_l3_residency(void *_plr
)
1087 struct pseudo_lock_region
*plr
= _plr
;
1088 struct residency_counts counts
= {0};
1091 * On Broadwell Microarchitecture the MEM_LOAD_UOPS_RETIRED event
1092 * has two "no fix" errata associated with it: BDM35 and BDM100. On
1093 * this platform the following events are used instead:
1094 * LONGEST_LAT_CACHE 2EH (Documented in SDM)
1099 switch (boot_cpu_data
.x86_model
) {
1100 case INTEL_FAM6_BROADWELL_X
:
1101 /* On BDW the hit event counts references, not hits */
1102 perf_hit_attr
.config
= X86_CONFIG(.event
= 0x2e,
1104 perf_miss_attr
.config
= X86_CONFIG(.event
= 0x2e,
1111 measure_residency_fn(&perf_miss_attr
, &perf_hit_attr
, plr
, &counts
);
1113 * If a failure prevented the measurements from succeeding
1114 * tracepoints will still be written and all counts will be zero.
1117 counts
.miss_after
-= counts
.miss_before
;
1118 if (boot_cpu_data
.x86_model
== INTEL_FAM6_BROADWELL_X
) {
1120 * On BDW references and misses are counted, need to adjust.
1121 * Sometimes the "hits" counter is a bit more than the
1122 * references, for example, x references but x + 1 hits.
1123 * To not report invalid hit values in this case we treat
1124 * that as misses equal to references.
1126 /* First compute the number of cache references measured */
1127 counts
.hits_after
-= counts
.hits_before
;
1128 /* Next convert references to cache hits */
1129 counts
.hits_after
-= min(counts
.miss_after
, counts
.hits_after
);
1131 counts
.hits_after
-= counts
.hits_before
;
1134 trace_pseudo_lock_l3(counts
.hits_after
, counts
.miss_after
);
1136 plr
->thread_done
= 1;
1137 wake_up_interruptible(&plr
->lock_thread_wq
);
1142 * pseudo_lock_measure_cycles - Trigger latency measure to pseudo-locked region
1144 * The measurement of latency to access a pseudo-locked region should be
1145 * done from a cpu that is associated with that pseudo-locked region.
1146 * Determine which cpu is associated with this region and start a thread on
1147 * that cpu to perform the measurement, wait for that thread to complete.
1149 * Return: 0 on success, <0 on failure
1151 static int pseudo_lock_measure_cycles(struct rdtgroup
*rdtgrp
, int sel
)
1153 struct pseudo_lock_region
*plr
= rdtgrp
->plr
;
1154 struct task_struct
*thread
;
1159 mutex_lock(&rdtgroup_mutex
);
1161 if (rdtgrp
->flags
& RDT_DELETED
) {
1171 plr
->thread_done
= 0;
1172 cpu
= cpumask_first(&plr
->d
->cpu_mask
);
1173 if (!cpu_online(cpu
)) {
1181 thread
= kthread_create_on_node(measure_cycles_lat_fn
, plr
,
1183 "pseudo_lock_measure/%u",
1186 thread
= kthread_create_on_node(measure_l2_residency
, plr
,
1188 "pseudo_lock_measure/%u",
1191 thread
= kthread_create_on_node(measure_l3_residency
, plr
,
1193 "pseudo_lock_measure/%u",
1198 if (IS_ERR(thread
)) {
1199 ret
= PTR_ERR(thread
);
1202 kthread_bind(thread
, cpu
);
1203 wake_up_process(thread
);
1205 ret
= wait_event_interruptible(plr
->lock_thread_wq
,
1206 plr
->thread_done
== 1);
1213 mutex_unlock(&rdtgroup_mutex
);
1218 static ssize_t
pseudo_lock_measure_trigger(struct file
*file
,
1219 const char __user
*user_buf
,
1220 size_t count
, loff_t
*ppos
)
1222 struct rdtgroup
*rdtgrp
= file
->private_data
;
1228 buf_size
= min(count
, (sizeof(buf
) - 1));
1229 if (copy_from_user(buf
, user_buf
, buf_size
))
1232 buf
[buf_size
] = '\0';
1233 ret
= kstrtoint(buf
, 10, &sel
);
1235 if (sel
!= 1 && sel
!= 2 && sel
!= 3)
1237 ret
= debugfs_file_get(file
->f_path
.dentry
);
1240 ret
= pseudo_lock_measure_cycles(rdtgrp
, sel
);
1243 debugfs_file_put(file
->f_path
.dentry
);
1249 static const struct file_operations pseudo_measure_fops
= {
1250 .write
= pseudo_lock_measure_trigger
,
1251 .open
= simple_open
,
1252 .llseek
= default_llseek
,
1256 * rdtgroup_pseudo_lock_create - Create a pseudo-locked region
1257 * @rdtgrp: resource group to which pseudo-lock region belongs
1259 * Called when a resource group in the pseudo-locksetup mode receives a
1260 * valid schemata that should be pseudo-locked. Since the resource group is
1261 * in pseudo-locksetup mode the &struct pseudo_lock_region has already been
1262 * allocated and initialized with the essential information. If a failure
1263 * occurs the resource group remains in the pseudo-locksetup mode with the
1264 * &struct pseudo_lock_region associated with it, but cleared from all
1265 * information and ready for the user to re-attempt pseudo-locking by
1266 * writing the schemata again.
1268 * Return: 0 if the pseudo-locked region was successfully pseudo-locked, <0
1269 * on failure. Descriptive error will be written to last_cmd_status buffer.
1271 int rdtgroup_pseudo_lock_create(struct rdtgroup
*rdtgrp
)
1273 struct pseudo_lock_region
*plr
= rdtgrp
->plr
;
1274 struct task_struct
*thread
;
1275 unsigned int new_minor
;
1279 ret
= pseudo_lock_region_alloc(plr
);
1283 ret
= pseudo_lock_cstates_constrain(plr
);
1289 plr
->thread_done
= 0;
1291 thread
= kthread_create_on_node(pseudo_lock_fn
, rdtgrp
,
1292 cpu_to_node(plr
->cpu
),
1293 "pseudo_lock/%u", plr
->cpu
);
1294 if (IS_ERR(thread
)) {
1295 ret
= PTR_ERR(thread
);
1296 rdt_last_cmd_printf("Locking thread returned error %d\n", ret
);
1300 kthread_bind(thread
, plr
->cpu
);
1301 wake_up_process(thread
);
1303 ret
= wait_event_interruptible(plr
->lock_thread_wq
,
1304 plr
->thread_done
== 1);
1307 * If the thread does not get on the CPU for whatever
1308 * reason and the process which sets up the region is
1309 * interrupted then this will leave the thread in runnable
1310 * state and once it gets on the CPU it will derefence
1311 * the cleared, but not freed, plr struct resulting in an
1312 * empty pseudo-locking loop.
1314 rdt_last_cmd_puts("Locking thread interrupted\n");
1318 ret
= pseudo_lock_minor_get(&new_minor
);
1320 rdt_last_cmd_puts("Unable to obtain a new minor number\n");
1325 * Unlock access but do not release the reference. The
1326 * pseudo-locked region will still be here on return.
1328 * The mutex has to be released temporarily to avoid a potential
1329 * deadlock with the mm->mmap_sem semaphore which is obtained in
1330 * the device_create() and debugfs_create_dir() callpath below
1331 * as well as before the mmap() callback is called.
1333 mutex_unlock(&rdtgroup_mutex
);
1335 if (!IS_ERR_OR_NULL(debugfs_resctrl
)) {
1336 plr
->debugfs_dir
= debugfs_create_dir(rdtgrp
->kn
->name
,
1338 if (!IS_ERR_OR_NULL(plr
->debugfs_dir
))
1339 debugfs_create_file("pseudo_lock_measure", 0200,
1340 plr
->debugfs_dir
, rdtgrp
,
1341 &pseudo_measure_fops
);
1344 dev
= device_create(pseudo_lock_class
, NULL
,
1345 MKDEV(pseudo_lock_major
, new_minor
),
1346 rdtgrp
, "%s", rdtgrp
->kn
->name
);
1348 mutex_lock(&rdtgroup_mutex
);
1352 rdt_last_cmd_printf("Failed to create character device: %d\n",
1357 /* We released the mutex - check if group was removed while we did so */
1358 if (rdtgrp
->flags
& RDT_DELETED
) {
1363 plr
->minor
= new_minor
;
1365 rdtgrp
->mode
= RDT_MODE_PSEUDO_LOCKED
;
1366 closid_free(rdtgrp
->closid
);
1367 rdtgroup_kn_mode_restore(rdtgrp
, "cpus", 0444);
1368 rdtgroup_kn_mode_restore(rdtgrp
, "cpus_list", 0444);
1374 device_destroy(pseudo_lock_class
, MKDEV(pseudo_lock_major
, new_minor
));
1376 debugfs_remove_recursive(plr
->debugfs_dir
);
1377 pseudo_lock_minor_release(new_minor
);
1379 pseudo_lock_cstates_relax(plr
);
1381 pseudo_lock_region_clear(plr
);
1387 * rdtgroup_pseudo_lock_remove - Remove a pseudo-locked region
1388 * @rdtgrp: resource group to which the pseudo-locked region belongs
1390 * The removal of a pseudo-locked region can be initiated when the resource
1391 * group is removed from user space via a "rmdir" from userspace or the
1392 * unmount of the resctrl filesystem. On removal the resource group does
1393 * not go back to pseudo-locksetup mode before it is removed, instead it is
1394 * removed directly. There is thus assymmetry with the creation where the
1395 * &struct pseudo_lock_region is removed here while it was not created in
1396 * rdtgroup_pseudo_lock_create().
1400 void rdtgroup_pseudo_lock_remove(struct rdtgroup
*rdtgrp
)
1402 struct pseudo_lock_region
*plr
= rdtgrp
->plr
;
1404 if (rdtgrp
->mode
== RDT_MODE_PSEUDO_LOCKSETUP
) {
1406 * Default group cannot be a pseudo-locked region so we can
1409 closid_free(rdtgrp
->closid
);
1413 pseudo_lock_cstates_relax(plr
);
1414 debugfs_remove_recursive(rdtgrp
->plr
->debugfs_dir
);
1415 device_destroy(pseudo_lock_class
, MKDEV(pseudo_lock_major
, plr
->minor
));
1416 pseudo_lock_minor_release(plr
->minor
);
1419 pseudo_lock_free(rdtgrp
);
1422 static int pseudo_lock_dev_open(struct inode
*inode
, struct file
*filp
)
1424 struct rdtgroup
*rdtgrp
;
1426 mutex_lock(&rdtgroup_mutex
);
1428 rdtgrp
= region_find_by_minor(iminor(inode
));
1430 mutex_unlock(&rdtgroup_mutex
);
1434 filp
->private_data
= rdtgrp
;
1435 atomic_inc(&rdtgrp
->waitcount
);
1436 /* Perform a non-seekable open - llseek is not supported */
1437 filp
->f_mode
&= ~(FMODE_LSEEK
| FMODE_PREAD
| FMODE_PWRITE
);
1439 mutex_unlock(&rdtgroup_mutex
);
1444 static int pseudo_lock_dev_release(struct inode
*inode
, struct file
*filp
)
1446 struct rdtgroup
*rdtgrp
;
1448 mutex_lock(&rdtgroup_mutex
);
1449 rdtgrp
= filp
->private_data
;
1452 mutex_unlock(&rdtgroup_mutex
);
1455 filp
->private_data
= NULL
;
1456 atomic_dec(&rdtgrp
->waitcount
);
1457 mutex_unlock(&rdtgroup_mutex
);
1461 static int pseudo_lock_dev_mremap(struct vm_area_struct
*area
)
1467 static const struct vm_operations_struct pseudo_mmap_ops
= {
1468 .mremap
= pseudo_lock_dev_mremap
,
1471 static int pseudo_lock_dev_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
1473 unsigned long vsize
= vma
->vm_end
- vma
->vm_start
;
1474 unsigned long off
= vma
->vm_pgoff
<< PAGE_SHIFT
;
1475 struct pseudo_lock_region
*plr
;
1476 struct rdtgroup
*rdtgrp
;
1477 unsigned long physical
;
1478 unsigned long psize
;
1480 mutex_lock(&rdtgroup_mutex
);
1482 rdtgrp
= filp
->private_data
;
1485 mutex_unlock(&rdtgroup_mutex
);
1492 mutex_unlock(&rdtgroup_mutex
);
1497 * Task is required to run with affinity to the cpus associated
1498 * with the pseudo-locked region. If this is not the case the task
1499 * may be scheduled elsewhere and invalidate entries in the
1500 * pseudo-locked region.
1502 if (!cpumask_subset(current
->cpus_ptr
, &plr
->d
->cpu_mask
)) {
1503 mutex_unlock(&rdtgroup_mutex
);
1507 physical
= __pa(plr
->kmem
) >> PAGE_SHIFT
;
1508 psize
= plr
->size
- off
;
1510 if (off
> plr
->size
) {
1511 mutex_unlock(&rdtgroup_mutex
);
1516 * Ensure changes are carried directly to the memory being mapped,
1517 * do not allow copy-on-write mapping.
1519 if (!(vma
->vm_flags
& VM_SHARED
)) {
1520 mutex_unlock(&rdtgroup_mutex
);
1524 if (vsize
> psize
) {
1525 mutex_unlock(&rdtgroup_mutex
);
1529 memset(plr
->kmem
+ off
, 0, vsize
);
1531 if (remap_pfn_range(vma
, vma
->vm_start
, physical
+ vma
->vm_pgoff
,
1532 vsize
, vma
->vm_page_prot
)) {
1533 mutex_unlock(&rdtgroup_mutex
);
1536 vma
->vm_ops
= &pseudo_mmap_ops
;
1537 mutex_unlock(&rdtgroup_mutex
);
1541 static const struct file_operations pseudo_lock_dev_fops
= {
1542 .owner
= THIS_MODULE
,
1543 .llseek
= no_llseek
,
1546 .open
= pseudo_lock_dev_open
,
1547 .release
= pseudo_lock_dev_release
,
1548 .mmap
= pseudo_lock_dev_mmap
,
1551 static char *pseudo_lock_devnode(struct device
*dev
, umode_t
*mode
)
1553 struct rdtgroup
*rdtgrp
;
1555 rdtgrp
= dev_get_drvdata(dev
);
1558 return kasprintf(GFP_KERNEL
, "pseudo_lock/%s", rdtgrp
->kn
->name
);
1561 int rdt_pseudo_lock_init(void)
1565 ret
= register_chrdev(0, "pseudo_lock", &pseudo_lock_dev_fops
);
1569 pseudo_lock_major
= ret
;
1571 pseudo_lock_class
= class_create(THIS_MODULE
, "pseudo_lock");
1572 if (IS_ERR(pseudo_lock_class
)) {
1573 ret
= PTR_ERR(pseudo_lock_class
);
1574 unregister_chrdev(pseudo_lock_major
, "pseudo_lock");
1578 pseudo_lock_class
->devnode
= pseudo_lock_devnode
;
1582 void rdt_pseudo_lock_release(void)
1584 class_destroy(pseudo_lock_class
);
1585 pseudo_lock_class
= NULL
;
1586 unregister_chrdev(pseudo_lock_major
, "pseudo_lock");
1587 pseudo_lock_major
= 0;