2 * intel_pstate.c: Native P state management for Intel processors
4 * (C) Copyright 2012 Intel Corporation
5 * Author: Dirk Brandewie <dirk.j.brandewie@intel.com>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; version 2
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/kernel.h>
16 #include <linux/kernel_stat.h>
17 #include <linux/module.h>
18 #include <linux/ktime.h>
19 #include <linux/hrtimer.h>
20 #include <linux/tick.h>
21 #include <linux/slab.h>
22 #include <linux/sched/cpufreq.h>
23 #include <linux/list.h>
24 #include <linux/cpu.h>
25 #include <linux/cpufreq.h>
26 #include <linux/sysfs.h>
27 #include <linux/types.h>
29 #include <linux/acpi.h>
30 #include <linux/vmalloc.h>
31 #include <trace/events/power.h>
33 #include <asm/div64.h>
35 #include <asm/cpu_device_id.h>
36 #include <asm/cpufeature.h>
37 #include <asm/intel-family.h>
39 #define INTEL_PSTATE_SAMPLING_INTERVAL (10 * NSEC_PER_MSEC)
41 #define INTEL_CPUFREQ_TRANSITION_LATENCY 20000
42 #define INTEL_CPUFREQ_TRANSITION_DELAY 500
45 #include <acpi/processor.h>
46 #include <acpi/cppc_acpi.h>
50 #define int_tofp(X) ((int64_t)(X) << FRAC_BITS)
51 #define fp_toint(X) ((X) >> FRAC_BITS)
53 #define ONE_EIGHTH_FP ((int64_t)1 << (FRAC_BITS - 3))
56 #define EXT_FRAC_BITS (EXT_BITS + FRAC_BITS)
57 #define fp_ext_toint(X) ((X) >> EXT_FRAC_BITS)
58 #define int_ext_tofp(X) ((int64_t)(X) << EXT_FRAC_BITS)
60 static inline int32_t mul_fp(int32_t x
, int32_t y
)
62 return ((int64_t)x
* (int64_t)y
) >> FRAC_BITS
;
65 static inline int32_t div_fp(s64 x
, s64 y
)
67 return div64_s64((int64_t)x
<< FRAC_BITS
, y
);
70 static inline int ceiling_fp(int32_t x
)
75 mask
= (1 << FRAC_BITS
) - 1;
81 static inline int32_t percent_fp(int percent
)
83 return div_fp(percent
, 100);
86 static inline u64
mul_ext_fp(u64 x
, u64 y
)
88 return (x
* y
) >> EXT_FRAC_BITS
;
91 static inline u64
div_ext_fp(u64 x
, u64 y
)
93 return div64_u64(x
<< EXT_FRAC_BITS
, y
);
96 static inline int32_t percent_ext_fp(int percent
)
98 return div_ext_fp(percent
, 100);
102 * struct sample - Store performance sample
103 * @core_avg_perf: Ratio of APERF/MPERF which is the actual average
104 * performance during last sample period
105 * @busy_scaled: Scaled busy value which is used to calculate next
106 * P state. This can be different than core_avg_perf
107 * to account for cpu idle period
108 * @aperf: Difference of actual performance frequency clock count
109 * read from APERF MSR between last and current sample
110 * @mperf: Difference of maximum performance frequency clock count
111 * read from MPERF MSR between last and current sample
112 * @tsc: Difference of time stamp counter between last and
114 * @time: Current time from scheduler
116 * This structure is used in the cpudata structure to store performance sample
117 * data for choosing next P State.
120 int32_t core_avg_perf
;
129 * struct pstate_data - Store P state data
130 * @current_pstate: Current requested P state
131 * @min_pstate: Min P state possible for this platform
132 * @max_pstate: Max P state possible for this platform
133 * @max_pstate_physical:This is physical Max P state for a processor
134 * This can be higher than the max_pstate which can
135 * be limited by platform thermal design power limits
136 * @scaling: Scaling factor to convert frequency to cpufreq
138 * @turbo_pstate: Max Turbo P state possible for this platform
139 * @max_freq: @max_pstate frequency in cpufreq units
140 * @turbo_freq: @turbo_pstate frequency in cpufreq units
142 * Stores the per cpu model P state limits and current P state.
148 int max_pstate_physical
;
151 unsigned int max_freq
;
152 unsigned int turbo_freq
;
156 * struct vid_data - Stores voltage information data
157 * @min: VID data for this platform corresponding to
159 * @max: VID data corresponding to the highest P State.
160 * @turbo: VID data for turbo P state
161 * @ratio: Ratio of (vid max - vid min) /
162 * (max P state - Min P State)
164 * Stores the voltage data for DVFS (Dynamic Voltage and Frequency Scaling)
165 * This data is used in Atom platforms, where in addition to target P state,
166 * the voltage data needs to be specified to select next P State.
176 * struct global_params - Global parameters, mostly tunable via sysfs.
177 * @no_turbo: Whether or not to use turbo P-states.
178 * @turbo_disabled: Whethet or not turbo P-states are available at all,
179 * based on the MSR_IA32_MISC_ENABLE value and whether or
180 * not the maximum reported turbo P-state is different from
181 * the maximum reported non-turbo one.
182 * @min_perf_pct: Minimum capacity limit in percent of the maximum turbo
184 * @max_perf_pct: Maximum capacity limit in percent of the maximum turbo
187 struct global_params
{
195 * struct cpudata - Per CPU instance data storage
196 * @cpu: CPU number for this instance data
197 * @policy: CPUFreq policy value
198 * @update_util: CPUFreq utility callback information
199 * @update_util_set: CPUFreq utility callback is set
200 * @iowait_boost: iowait-related boost fraction
201 * @last_update: Time of the last update.
202 * @pstate: Stores P state limits for this CPU
203 * @vid: Stores VID limits for this CPU
204 * @last_sample_time: Last Sample time
205 * @aperf_mperf_shift: Number of clock cycles after aperf, merf is incremented
206 * This shift is a multiplier to mperf delta to
207 * calculate CPU busy.
208 * @prev_aperf: Last APERF value read from APERF MSR
209 * @prev_mperf: Last MPERF value read from MPERF MSR
210 * @prev_tsc: Last timestamp counter (TSC) value
211 * @prev_cummulative_iowait: IO Wait time difference from last and
213 * @sample: Storage for storing last Sample data
214 * @min_perf_ratio: Minimum capacity in terms of PERF or HWP ratios
215 * @max_perf_ratio: Maximum capacity in terms of PERF or HWP ratios
216 * @acpi_perf_data: Stores ACPI perf information read from _PSS
217 * @valid_pss_table: Set to true for valid ACPI _PSS entries found
218 * @epp_powersave: Last saved HWP energy performance preference
219 * (EPP) or energy performance bias (EPB),
220 * when policy switched to performance
221 * @epp_policy: Last saved policy used to set EPP/EPB
222 * @epp_default: Power on default HWP energy performance
224 * @epp_saved: Saved EPP/EPB during system suspend or CPU offline
226 * @hwp_req_cached: Cached value of the last HWP Request MSR
227 * @hwp_cap_cached: Cached value of the last HWP Capabilities MSR
228 * @last_io_update: Last time when IO wake flag was set
229 * @sched_flags: Store scheduler flags for possible cross CPU update
230 * @hwp_boost_min: Last HWP boosted min performance
232 * This structure stores per CPU instance data for all CPUs.
238 struct update_util_data update_util
;
239 bool update_util_set
;
241 struct pstate_data pstate
;
245 u64 last_sample_time
;
246 u64 aperf_mperf_shift
;
250 u64 prev_cummulative_iowait
;
251 struct sample sample
;
252 int32_t min_perf_ratio
;
253 int32_t max_perf_ratio
;
255 struct acpi_processor_performance acpi_perf_data
;
256 bool valid_pss_table
;
258 unsigned int iowait_boost
;
266 unsigned int sched_flags
;
270 static struct cpudata
**all_cpu_data
;
273 * struct pstate_funcs - Per CPU model specific callbacks
274 * @get_max: Callback to get maximum non turbo effective P state
275 * @get_max_physical: Callback to get maximum non turbo physical P state
276 * @get_min: Callback to get minimum P state
277 * @get_turbo: Callback to get turbo P state
278 * @get_scaling: Callback to get frequency scaling factor
279 * @get_val: Callback to convert P state to actual MSR write value
280 * @get_vid: Callback to get VID data for Atom platforms
282 * Core and Atom CPU models have different way to get P State limits. This
283 * structure is used to store those callbacks.
285 struct pstate_funcs
{
286 int (*get_max
)(void);
287 int (*get_max_physical
)(void);
288 int (*get_min
)(void);
289 int (*get_turbo
)(void);
290 int (*get_scaling
)(void);
291 int (*get_aperf_mperf_shift
)(void);
292 u64 (*get_val
)(struct cpudata
*, int pstate
);
293 void (*get_vid
)(struct cpudata
*);
296 static struct pstate_funcs pstate_funcs __read_mostly
;
298 static int hwp_active __read_mostly
;
299 static int hwp_mode_bdw __read_mostly
;
300 static bool per_cpu_limits __read_mostly
;
301 static bool hwp_boost __read_mostly
;
303 static struct cpufreq_driver
*intel_pstate_driver __read_mostly
;
306 static bool acpi_ppc
;
309 static struct global_params global
;
311 static DEFINE_MUTEX(intel_pstate_driver_lock
);
312 static DEFINE_MUTEX(intel_pstate_limits_lock
);
316 static bool intel_pstate_acpi_pm_profile_server(void)
318 if (acpi_gbl_FADT
.preferred_profile
== PM_ENTERPRISE_SERVER
||
319 acpi_gbl_FADT
.preferred_profile
== PM_PERFORMANCE_SERVER
)
325 static bool intel_pstate_get_ppc_enable_status(void)
327 if (intel_pstate_acpi_pm_profile_server())
333 #ifdef CONFIG_ACPI_CPPC_LIB
335 /* The work item is needed to avoid CPU hotplug locking issues */
336 static void intel_pstste_sched_itmt_work_fn(struct work_struct
*work
)
338 sched_set_itmt_support();
341 static DECLARE_WORK(sched_itmt_work
, intel_pstste_sched_itmt_work_fn
);
343 static void intel_pstate_set_itmt_prio(int cpu
)
345 struct cppc_perf_caps cppc_perf
;
346 static u32 max_highest_perf
= 0, min_highest_perf
= U32_MAX
;
349 ret
= cppc_get_perf_caps(cpu
, &cppc_perf
);
354 * The priorities can be set regardless of whether or not
355 * sched_set_itmt_support(true) has been called and it is valid to
356 * update them at any time after it has been called.
358 sched_set_itmt_core_prio(cppc_perf
.highest_perf
, cpu
);
360 if (max_highest_perf
<= min_highest_perf
) {
361 if (cppc_perf
.highest_perf
> max_highest_perf
)
362 max_highest_perf
= cppc_perf
.highest_perf
;
364 if (cppc_perf
.highest_perf
< min_highest_perf
)
365 min_highest_perf
= cppc_perf
.highest_perf
;
367 if (max_highest_perf
> min_highest_perf
) {
369 * This code can be run during CPU online under the
370 * CPU hotplug locks, so sched_set_itmt_support()
371 * cannot be called from here. Queue up a work item
374 schedule_work(&sched_itmt_work
);
379 static int intel_pstate_get_cppc_guranteed(int cpu
)
381 struct cppc_perf_caps cppc_perf
;
384 ret
= cppc_get_perf_caps(cpu
, &cppc_perf
);
388 return cppc_perf
.guaranteed_perf
;
391 #else /* CONFIG_ACPI_CPPC_LIB */
392 static void intel_pstate_set_itmt_prio(int cpu
)
395 #endif /* CONFIG_ACPI_CPPC_LIB */
397 static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy
*policy
)
404 intel_pstate_set_itmt_prio(policy
->cpu
);
408 if (!intel_pstate_get_ppc_enable_status())
411 cpu
= all_cpu_data
[policy
->cpu
];
413 ret
= acpi_processor_register_performance(&cpu
->acpi_perf_data
,
419 * Check if the control value in _PSS is for PERF_CTL MSR, which should
420 * guarantee that the states returned by it map to the states in our
423 if (cpu
->acpi_perf_data
.control_register
.space_id
!=
424 ACPI_ADR_SPACE_FIXED_HARDWARE
)
428 * If there is only one entry _PSS, simply ignore _PSS and continue as
429 * usual without taking _PSS into account
431 if (cpu
->acpi_perf_data
.state_count
< 2)
434 pr_debug("CPU%u - ACPI _PSS perf data\n", policy
->cpu
);
435 for (i
= 0; i
< cpu
->acpi_perf_data
.state_count
; i
++) {
436 pr_debug(" %cP%d: %u MHz, %u mW, 0x%x\n",
437 (i
== cpu
->acpi_perf_data
.state
? '*' : ' '), i
,
438 (u32
) cpu
->acpi_perf_data
.states
[i
].core_frequency
,
439 (u32
) cpu
->acpi_perf_data
.states
[i
].power
,
440 (u32
) cpu
->acpi_perf_data
.states
[i
].control
);
444 * The _PSS table doesn't contain whole turbo frequency range.
445 * This just contains +1 MHZ above the max non turbo frequency,
446 * with control value corresponding to max turbo ratio. But
447 * when cpufreq set policy is called, it will call with this
448 * max frequency, which will cause a reduced performance as
449 * this driver uses real max turbo frequency as the max
450 * frequency. So correct this frequency in _PSS table to
451 * correct max turbo frequency based on the turbo state.
452 * Also need to convert to MHz as _PSS freq is in MHz.
454 if (!global
.turbo_disabled
)
455 cpu
->acpi_perf_data
.states
[0].core_frequency
=
456 policy
->cpuinfo
.max_freq
/ 1000;
457 cpu
->valid_pss_table
= true;
458 pr_debug("_PPC limits will be enforced\n");
463 cpu
->valid_pss_table
= false;
464 acpi_processor_unregister_performance(policy
->cpu
);
467 static void intel_pstate_exit_perf_limits(struct cpufreq_policy
*policy
)
471 cpu
= all_cpu_data
[policy
->cpu
];
472 if (!cpu
->valid_pss_table
)
475 acpi_processor_unregister_performance(policy
->cpu
);
477 #else /* CONFIG_ACPI */
478 static inline void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy
*policy
)
482 static inline void intel_pstate_exit_perf_limits(struct cpufreq_policy
*policy
)
486 static inline bool intel_pstate_acpi_pm_profile_server(void)
490 #endif /* CONFIG_ACPI */
492 #ifndef CONFIG_ACPI_CPPC_LIB
493 static int intel_pstate_get_cppc_guranteed(int cpu
)
497 #endif /* CONFIG_ACPI_CPPC_LIB */
499 static inline void update_turbo_state(void)
504 cpu
= all_cpu_data
[0];
505 rdmsrl(MSR_IA32_MISC_ENABLE
, misc_en
);
506 global
.turbo_disabled
=
507 (misc_en
& MSR_IA32_MISC_ENABLE_TURBO_DISABLE
||
508 cpu
->pstate
.max_pstate
== cpu
->pstate
.turbo_pstate
);
511 static int min_perf_pct_min(void)
513 struct cpudata
*cpu
= all_cpu_data
[0];
514 int turbo_pstate
= cpu
->pstate
.turbo_pstate
;
516 return turbo_pstate
?
517 (cpu
->pstate
.min_pstate
* 100 / turbo_pstate
) : 0;
520 static s16
intel_pstate_get_epb(struct cpudata
*cpu_data
)
525 if (!static_cpu_has(X86_FEATURE_EPB
))
528 ret
= rdmsrl_on_cpu(cpu_data
->cpu
, MSR_IA32_ENERGY_PERF_BIAS
, &epb
);
532 return (s16
)(epb
& 0x0f);
535 static s16
intel_pstate_get_epp(struct cpudata
*cpu_data
, u64 hwp_req_data
)
539 if (static_cpu_has(X86_FEATURE_HWP_EPP
)) {
541 * When hwp_req_data is 0, means that caller didn't read
542 * MSR_HWP_REQUEST, so need to read and get EPP.
545 epp
= rdmsrl_on_cpu(cpu_data
->cpu
, MSR_HWP_REQUEST
,
550 epp
= (hwp_req_data
>> 24) & 0xff;
552 /* When there is no EPP present, HWP uses EPB settings */
553 epp
= intel_pstate_get_epb(cpu_data
);
559 static int intel_pstate_set_epb(int cpu
, s16 pref
)
564 if (!static_cpu_has(X86_FEATURE_EPB
))
567 ret
= rdmsrl_on_cpu(cpu
, MSR_IA32_ENERGY_PERF_BIAS
, &epb
);
571 epb
= (epb
& ~0x0f) | pref
;
572 wrmsrl_on_cpu(cpu
, MSR_IA32_ENERGY_PERF_BIAS
, epb
);
578 * EPP/EPB display strings corresponding to EPP index in the
579 * energy_perf_strings[]
581 *-------------------------------------
584 * 2 balance_performance
588 static const char * const energy_perf_strings
[] = {
591 "balance_performance",
596 static const unsigned int epp_values
[] = {
598 HWP_EPP_BALANCE_PERFORMANCE
,
599 HWP_EPP_BALANCE_POWERSAVE
,
603 static int intel_pstate_get_energy_pref_index(struct cpudata
*cpu_data
)
608 epp
= intel_pstate_get_epp(cpu_data
, 0);
612 if (static_cpu_has(X86_FEATURE_HWP_EPP
)) {
613 if (epp
== HWP_EPP_PERFORMANCE
)
615 if (epp
<= HWP_EPP_BALANCE_PERFORMANCE
)
617 if (epp
<= HWP_EPP_BALANCE_POWERSAVE
)
621 } else if (static_cpu_has(X86_FEATURE_EPB
)) {
624 * 0x00-0x03 : Performance
625 * 0x04-0x07 : Balance performance
626 * 0x08-0x0B : Balance power
628 * The EPB is a 4 bit value, but our ranges restrict the
629 * value which can be set. Here only using top two bits
632 index
= (epp
>> 2) + 1;
638 static int intel_pstate_set_energy_pref_index(struct cpudata
*cpu_data
,
645 epp
= cpu_data
->epp_default
;
647 mutex_lock(&intel_pstate_limits_lock
);
649 if (static_cpu_has(X86_FEATURE_HWP_EPP
)) {
652 ret
= rdmsrl_on_cpu(cpu_data
->cpu
, MSR_HWP_REQUEST
, &value
);
656 value
&= ~GENMASK_ULL(31, 24);
659 epp
= epp_values
[pref_index
- 1];
661 value
|= (u64
)epp
<< 24;
662 ret
= wrmsrl_on_cpu(cpu_data
->cpu
, MSR_HWP_REQUEST
, value
);
665 epp
= (pref_index
- 1) << 2;
666 ret
= intel_pstate_set_epb(cpu_data
->cpu
, epp
);
669 mutex_unlock(&intel_pstate_limits_lock
);
674 static ssize_t
show_energy_performance_available_preferences(
675 struct cpufreq_policy
*policy
, char *buf
)
680 while (energy_perf_strings
[i
] != NULL
)
681 ret
+= sprintf(&buf
[ret
], "%s ", energy_perf_strings
[i
++]);
683 ret
+= sprintf(&buf
[ret
], "\n");
688 cpufreq_freq_attr_ro(energy_performance_available_preferences
);
690 static ssize_t
store_energy_performance_preference(
691 struct cpufreq_policy
*policy
, const char *buf
, size_t count
)
693 struct cpudata
*cpu_data
= all_cpu_data
[policy
->cpu
];
694 char str_preference
[21];
697 ret
= sscanf(buf
, "%20s", str_preference
);
701 ret
= match_string(energy_perf_strings
, -1, str_preference
);
705 intel_pstate_set_energy_pref_index(cpu_data
, ret
);
709 static ssize_t
show_energy_performance_preference(
710 struct cpufreq_policy
*policy
, char *buf
)
712 struct cpudata
*cpu_data
= all_cpu_data
[policy
->cpu
];
715 preference
= intel_pstate_get_energy_pref_index(cpu_data
);
719 return sprintf(buf
, "%s\n", energy_perf_strings
[preference
]);
722 cpufreq_freq_attr_rw(energy_performance_preference
);
724 static ssize_t
show_base_frequency(struct cpufreq_policy
*policy
, char *buf
)
730 ratio
= intel_pstate_get_cppc_guranteed(policy
->cpu
);
732 rdmsrl_on_cpu(policy
->cpu
, MSR_HWP_CAPABILITIES
, &cap
);
733 ratio
= HWP_GUARANTEED_PERF(cap
);
736 cpu
= all_cpu_data
[policy
->cpu
];
738 return sprintf(buf
, "%d\n", ratio
* cpu
->pstate
.scaling
);
741 cpufreq_freq_attr_ro(base_frequency
);
743 static struct freq_attr
*hwp_cpufreq_attrs
[] = {
744 &energy_performance_preference
,
745 &energy_performance_available_preferences
,
750 static void intel_pstate_get_hwp_max(unsigned int cpu
, int *phy_max
,
755 rdmsrl_on_cpu(cpu
, MSR_HWP_CAPABILITIES
, &cap
);
756 WRITE_ONCE(all_cpu_data
[cpu
]->hwp_cap_cached
, cap
);
758 *current_max
= HWP_GUARANTEED_PERF(cap
);
760 *current_max
= HWP_HIGHEST_PERF(cap
);
762 *phy_max
= HWP_HIGHEST_PERF(cap
);
765 static void intel_pstate_hwp_set(unsigned int cpu
)
767 struct cpudata
*cpu_data
= all_cpu_data
[cpu
];
772 max
= cpu_data
->max_perf_ratio
;
773 min
= cpu_data
->min_perf_ratio
;
775 if (cpu_data
->policy
== CPUFREQ_POLICY_PERFORMANCE
)
778 rdmsrl_on_cpu(cpu
, MSR_HWP_REQUEST
, &value
);
780 value
&= ~HWP_MIN_PERF(~0L);
781 value
|= HWP_MIN_PERF(min
);
783 value
&= ~HWP_MAX_PERF(~0L);
784 value
|= HWP_MAX_PERF(max
);
786 if (cpu_data
->epp_policy
== cpu_data
->policy
)
789 cpu_data
->epp_policy
= cpu_data
->policy
;
791 if (cpu_data
->epp_saved
>= 0) {
792 epp
= cpu_data
->epp_saved
;
793 cpu_data
->epp_saved
= -EINVAL
;
797 if (cpu_data
->policy
== CPUFREQ_POLICY_PERFORMANCE
) {
798 epp
= intel_pstate_get_epp(cpu_data
, value
);
799 cpu_data
->epp_powersave
= epp
;
800 /* If EPP read was failed, then don't try to write */
806 /* skip setting EPP, when saved value is invalid */
807 if (cpu_data
->epp_powersave
< 0)
811 * No need to restore EPP when it is not zero. This
813 * - Policy is not changed
814 * - user has manually changed
815 * - Error reading EPB
817 epp
= intel_pstate_get_epp(cpu_data
, value
);
821 epp
= cpu_data
->epp_powersave
;
824 if (static_cpu_has(X86_FEATURE_HWP_EPP
)) {
825 value
&= ~GENMASK_ULL(31, 24);
826 value
|= (u64
)epp
<< 24;
828 intel_pstate_set_epb(cpu
, epp
);
831 WRITE_ONCE(cpu_data
->hwp_req_cached
, value
);
832 wrmsrl_on_cpu(cpu
, MSR_HWP_REQUEST
, value
);
835 static void intel_pstate_hwp_force_min_perf(int cpu
)
840 value
= all_cpu_data
[cpu
]->hwp_req_cached
;
841 value
&= ~GENMASK_ULL(31, 0);
842 min_perf
= HWP_LOWEST_PERF(all_cpu_data
[cpu
]->hwp_cap_cached
);
844 /* Set hwp_max = hwp_min */
845 value
|= HWP_MAX_PERF(min_perf
);
846 value
|= HWP_MIN_PERF(min_perf
);
848 /* Set EPP/EPB to min */
849 if (static_cpu_has(X86_FEATURE_HWP_EPP
))
850 value
|= HWP_ENERGY_PERF_PREFERENCE(HWP_EPP_POWERSAVE
);
852 intel_pstate_set_epb(cpu
, HWP_EPP_BALANCE_POWERSAVE
);
854 wrmsrl_on_cpu(cpu
, MSR_HWP_REQUEST
, value
);
857 static int intel_pstate_hwp_save_state(struct cpufreq_policy
*policy
)
859 struct cpudata
*cpu_data
= all_cpu_data
[policy
->cpu
];
864 cpu_data
->epp_saved
= intel_pstate_get_epp(cpu_data
, 0);
869 static void intel_pstate_hwp_enable(struct cpudata
*cpudata
);
871 static int intel_pstate_resume(struct cpufreq_policy
*policy
)
876 mutex_lock(&intel_pstate_limits_lock
);
878 if (policy
->cpu
== 0)
879 intel_pstate_hwp_enable(all_cpu_data
[policy
->cpu
]);
881 all_cpu_data
[policy
->cpu
]->epp_policy
= 0;
882 intel_pstate_hwp_set(policy
->cpu
);
884 mutex_unlock(&intel_pstate_limits_lock
);
889 static void intel_pstate_update_policies(void)
893 for_each_possible_cpu(cpu
)
894 cpufreq_update_policy(cpu
);
897 /************************** sysfs begin ************************/
898 #define show_one(file_name, object) \
899 static ssize_t show_##file_name \
900 (struct kobject *kobj, struct kobj_attribute *attr, char *buf) \
902 return sprintf(buf, "%u\n", global.object); \
905 static ssize_t
intel_pstate_show_status(char *buf
);
906 static int intel_pstate_update_status(const char *buf
, size_t size
);
908 static ssize_t
show_status(struct kobject
*kobj
,
909 struct kobj_attribute
*attr
, char *buf
)
913 mutex_lock(&intel_pstate_driver_lock
);
914 ret
= intel_pstate_show_status(buf
);
915 mutex_unlock(&intel_pstate_driver_lock
);
920 static ssize_t
store_status(struct kobject
*a
, struct kobj_attribute
*b
,
921 const char *buf
, size_t count
)
923 char *p
= memchr(buf
, '\n', count
);
926 mutex_lock(&intel_pstate_driver_lock
);
927 ret
= intel_pstate_update_status(buf
, p
? p
- buf
: count
);
928 mutex_unlock(&intel_pstate_driver_lock
);
930 return ret
< 0 ? ret
: count
;
933 static ssize_t
show_turbo_pct(struct kobject
*kobj
,
934 struct kobj_attribute
*attr
, char *buf
)
937 int total
, no_turbo
, turbo_pct
;
940 mutex_lock(&intel_pstate_driver_lock
);
942 if (!intel_pstate_driver
) {
943 mutex_unlock(&intel_pstate_driver_lock
);
947 cpu
= all_cpu_data
[0];
949 total
= cpu
->pstate
.turbo_pstate
- cpu
->pstate
.min_pstate
+ 1;
950 no_turbo
= cpu
->pstate
.max_pstate
- cpu
->pstate
.min_pstate
+ 1;
951 turbo_fp
= div_fp(no_turbo
, total
);
952 turbo_pct
= 100 - fp_toint(mul_fp(turbo_fp
, int_tofp(100)));
954 mutex_unlock(&intel_pstate_driver_lock
);
956 return sprintf(buf
, "%u\n", turbo_pct
);
959 static ssize_t
show_num_pstates(struct kobject
*kobj
,
960 struct kobj_attribute
*attr
, char *buf
)
965 mutex_lock(&intel_pstate_driver_lock
);
967 if (!intel_pstate_driver
) {
968 mutex_unlock(&intel_pstate_driver_lock
);
972 cpu
= all_cpu_data
[0];
973 total
= cpu
->pstate
.turbo_pstate
- cpu
->pstate
.min_pstate
+ 1;
975 mutex_unlock(&intel_pstate_driver_lock
);
977 return sprintf(buf
, "%u\n", total
);
980 static ssize_t
show_no_turbo(struct kobject
*kobj
,
981 struct kobj_attribute
*attr
, char *buf
)
985 mutex_lock(&intel_pstate_driver_lock
);
987 if (!intel_pstate_driver
) {
988 mutex_unlock(&intel_pstate_driver_lock
);
992 update_turbo_state();
993 if (global
.turbo_disabled
)
994 ret
= sprintf(buf
, "%u\n", global
.turbo_disabled
);
996 ret
= sprintf(buf
, "%u\n", global
.no_turbo
);
998 mutex_unlock(&intel_pstate_driver_lock
);
1003 static ssize_t
store_no_turbo(struct kobject
*a
, struct kobj_attribute
*b
,
1004 const char *buf
, size_t count
)
1009 ret
= sscanf(buf
, "%u", &input
);
1013 mutex_lock(&intel_pstate_driver_lock
);
1015 if (!intel_pstate_driver
) {
1016 mutex_unlock(&intel_pstate_driver_lock
);
1020 mutex_lock(&intel_pstate_limits_lock
);
1022 update_turbo_state();
1023 if (global
.turbo_disabled
) {
1024 pr_warn("Turbo disabled by BIOS or unavailable on processor\n");
1025 mutex_unlock(&intel_pstate_limits_lock
);
1026 mutex_unlock(&intel_pstate_driver_lock
);
1030 global
.no_turbo
= clamp_t(int, input
, 0, 1);
1032 if (global
.no_turbo
) {
1033 struct cpudata
*cpu
= all_cpu_data
[0];
1034 int pct
= cpu
->pstate
.max_pstate
* 100 / cpu
->pstate
.turbo_pstate
;
1036 /* Squash the global minimum into the permitted range. */
1037 if (global
.min_perf_pct
> pct
)
1038 global
.min_perf_pct
= pct
;
1041 mutex_unlock(&intel_pstate_limits_lock
);
1043 intel_pstate_update_policies();
1045 mutex_unlock(&intel_pstate_driver_lock
);
1050 static ssize_t
store_max_perf_pct(struct kobject
*a
, struct kobj_attribute
*b
,
1051 const char *buf
, size_t count
)
1056 ret
= sscanf(buf
, "%u", &input
);
1060 mutex_lock(&intel_pstate_driver_lock
);
1062 if (!intel_pstate_driver
) {
1063 mutex_unlock(&intel_pstate_driver_lock
);
1067 mutex_lock(&intel_pstate_limits_lock
);
1069 global
.max_perf_pct
= clamp_t(int, input
, global
.min_perf_pct
, 100);
1071 mutex_unlock(&intel_pstate_limits_lock
);
1073 intel_pstate_update_policies();
1075 mutex_unlock(&intel_pstate_driver_lock
);
1080 static ssize_t
store_min_perf_pct(struct kobject
*a
, struct kobj_attribute
*b
,
1081 const char *buf
, size_t count
)
1086 ret
= sscanf(buf
, "%u", &input
);
1090 mutex_lock(&intel_pstate_driver_lock
);
1092 if (!intel_pstate_driver
) {
1093 mutex_unlock(&intel_pstate_driver_lock
);
1097 mutex_lock(&intel_pstate_limits_lock
);
1099 global
.min_perf_pct
= clamp_t(int, input
,
1100 min_perf_pct_min(), global
.max_perf_pct
);
1102 mutex_unlock(&intel_pstate_limits_lock
);
1104 intel_pstate_update_policies();
1106 mutex_unlock(&intel_pstate_driver_lock
);
1111 static ssize_t
show_hwp_dynamic_boost(struct kobject
*kobj
,
1112 struct kobj_attribute
*attr
, char *buf
)
1114 return sprintf(buf
, "%u\n", hwp_boost
);
1117 static ssize_t
store_hwp_dynamic_boost(struct kobject
*a
,
1118 struct kobj_attribute
*b
,
1119 const char *buf
, size_t count
)
1124 ret
= kstrtouint(buf
, 10, &input
);
1128 mutex_lock(&intel_pstate_driver_lock
);
1129 hwp_boost
= !!input
;
1130 intel_pstate_update_policies();
1131 mutex_unlock(&intel_pstate_driver_lock
);
1136 show_one(max_perf_pct
, max_perf_pct
);
1137 show_one(min_perf_pct
, min_perf_pct
);
1139 define_one_global_rw(status
);
1140 define_one_global_rw(no_turbo
);
1141 define_one_global_rw(max_perf_pct
);
1142 define_one_global_rw(min_perf_pct
);
1143 define_one_global_ro(turbo_pct
);
1144 define_one_global_ro(num_pstates
);
1145 define_one_global_rw(hwp_dynamic_boost
);
1147 static struct attribute
*intel_pstate_attributes
[] = {
1155 static const struct attribute_group intel_pstate_attr_group
= {
1156 .attrs
= intel_pstate_attributes
,
1159 static void __init
intel_pstate_sysfs_expose_params(void)
1161 struct kobject
*intel_pstate_kobject
;
1164 intel_pstate_kobject
= kobject_create_and_add("intel_pstate",
1165 &cpu_subsys
.dev_root
->kobj
);
1166 if (WARN_ON(!intel_pstate_kobject
))
1169 rc
= sysfs_create_group(intel_pstate_kobject
, &intel_pstate_attr_group
);
1174 * If per cpu limits are enforced there are no global limits, so
1175 * return without creating max/min_perf_pct attributes
1180 rc
= sysfs_create_file(intel_pstate_kobject
, &max_perf_pct
.attr
);
1183 rc
= sysfs_create_file(intel_pstate_kobject
, &min_perf_pct
.attr
);
1187 rc
= sysfs_create_file(intel_pstate_kobject
,
1188 &hwp_dynamic_boost
.attr
);
1192 /************************** sysfs end ************************/
1194 static void intel_pstate_hwp_enable(struct cpudata
*cpudata
)
1196 /* First disable HWP notification interrupt as we don't process them */
1197 if (static_cpu_has(X86_FEATURE_HWP_NOTIFY
))
1198 wrmsrl_on_cpu(cpudata
->cpu
, MSR_HWP_INTERRUPT
, 0x00);
1200 wrmsrl_on_cpu(cpudata
->cpu
, MSR_PM_ENABLE
, 0x1);
1201 cpudata
->epp_policy
= 0;
1202 if (cpudata
->epp_default
== -EINVAL
)
1203 cpudata
->epp_default
= intel_pstate_get_epp(cpudata
, 0);
1206 #define MSR_IA32_POWER_CTL_BIT_EE 19
1208 /* Disable energy efficiency optimization */
1209 static void intel_pstate_disable_ee(int cpu
)
1214 ret
= rdmsrl_on_cpu(cpu
, MSR_IA32_POWER_CTL
, &power_ctl
);
1218 if (!(power_ctl
& BIT(MSR_IA32_POWER_CTL_BIT_EE
))) {
1219 pr_info("Disabling energy efficiency optimization\n");
1220 power_ctl
|= BIT(MSR_IA32_POWER_CTL_BIT_EE
);
1221 wrmsrl_on_cpu(cpu
, MSR_IA32_POWER_CTL
, power_ctl
);
1225 static int atom_get_min_pstate(void)
1229 rdmsrl(MSR_ATOM_CORE_RATIOS
, value
);
1230 return (value
>> 8) & 0x7F;
1233 static int atom_get_max_pstate(void)
1237 rdmsrl(MSR_ATOM_CORE_RATIOS
, value
);
1238 return (value
>> 16) & 0x7F;
1241 static int atom_get_turbo_pstate(void)
1245 rdmsrl(MSR_ATOM_CORE_TURBO_RATIOS
, value
);
1246 return value
& 0x7F;
1249 static u64
atom_get_val(struct cpudata
*cpudata
, int pstate
)
1255 val
= (u64
)pstate
<< 8;
1256 if (global
.no_turbo
&& !global
.turbo_disabled
)
1257 val
|= (u64
)1 << 32;
1259 vid_fp
= cpudata
->vid
.min
+ mul_fp(
1260 int_tofp(pstate
- cpudata
->pstate
.min_pstate
),
1261 cpudata
->vid
.ratio
);
1263 vid_fp
= clamp_t(int32_t, vid_fp
, cpudata
->vid
.min
, cpudata
->vid
.max
);
1264 vid
= ceiling_fp(vid_fp
);
1266 if (pstate
> cpudata
->pstate
.max_pstate
)
1267 vid
= cpudata
->vid
.turbo
;
1272 static int silvermont_get_scaling(void)
1276 /* Defined in Table 35-6 from SDM (Sept 2015) */
1277 static int silvermont_freq_table
[] = {
1278 83300, 100000, 133300, 116700, 80000};
1280 rdmsrl(MSR_FSB_FREQ
, value
);
1284 return silvermont_freq_table
[i
];
1287 static int airmont_get_scaling(void)
1291 /* Defined in Table 35-10 from SDM (Sept 2015) */
1292 static int airmont_freq_table
[] = {
1293 83300, 100000, 133300, 116700, 80000,
1294 93300, 90000, 88900, 87500};
1296 rdmsrl(MSR_FSB_FREQ
, value
);
1300 return airmont_freq_table
[i
];
1303 static void atom_get_vid(struct cpudata
*cpudata
)
1307 rdmsrl(MSR_ATOM_CORE_VIDS
, value
);
1308 cpudata
->vid
.min
= int_tofp((value
>> 8) & 0x7f);
1309 cpudata
->vid
.max
= int_tofp((value
>> 16) & 0x7f);
1310 cpudata
->vid
.ratio
= div_fp(
1311 cpudata
->vid
.max
- cpudata
->vid
.min
,
1312 int_tofp(cpudata
->pstate
.max_pstate
-
1313 cpudata
->pstate
.min_pstate
));
1315 rdmsrl(MSR_ATOM_CORE_TURBO_VIDS
, value
);
1316 cpudata
->vid
.turbo
= value
& 0x7f;
1319 static int core_get_min_pstate(void)
1323 rdmsrl(MSR_PLATFORM_INFO
, value
);
1324 return (value
>> 40) & 0xFF;
1327 static int core_get_max_pstate_physical(void)
1331 rdmsrl(MSR_PLATFORM_INFO
, value
);
1332 return (value
>> 8) & 0xFF;
1335 static int core_get_tdp_ratio(u64 plat_info
)
1337 /* Check how many TDP levels present */
1338 if (plat_info
& 0x600000000) {
1344 /* Get the TDP level (0, 1, 2) to get ratios */
1345 err
= rdmsrl_safe(MSR_CONFIG_TDP_CONTROL
, &tdp_ctrl
);
1349 /* TDP MSR are continuous starting at 0x648 */
1350 tdp_msr
= MSR_CONFIG_TDP_NOMINAL
+ (tdp_ctrl
& 0x03);
1351 err
= rdmsrl_safe(tdp_msr
, &tdp_ratio
);
1355 /* For level 1 and 2, bits[23:16] contain the ratio */
1356 if (tdp_ctrl
& 0x03)
1359 tdp_ratio
&= 0xff; /* ratios are only 8 bits long */
1360 pr_debug("tdp_ratio %x\n", (int)tdp_ratio
);
1362 return (int)tdp_ratio
;
1368 static int core_get_max_pstate(void)
1376 rdmsrl(MSR_PLATFORM_INFO
, plat_info
);
1377 max_pstate
= (plat_info
>> 8) & 0xFF;
1379 tdp_ratio
= core_get_tdp_ratio(plat_info
);
1384 /* Turbo activation ratio is not used on HWP platforms */
1388 err
= rdmsrl_safe(MSR_TURBO_ACTIVATION_RATIO
, &tar
);
1392 /* Do some sanity checking for safety */
1393 tar_levels
= tar
& 0xff;
1394 if (tdp_ratio
- 1 == tar_levels
) {
1395 max_pstate
= tar_levels
;
1396 pr_debug("max_pstate=TAC %x\n", max_pstate
);
1403 static int core_get_turbo_pstate(void)
1408 rdmsrl(MSR_TURBO_RATIO_LIMIT
, value
);
1409 nont
= core_get_max_pstate();
1410 ret
= (value
) & 255;
1416 static inline int core_get_scaling(void)
1421 static u64
core_get_val(struct cpudata
*cpudata
, int pstate
)
1425 val
= (u64
)pstate
<< 8;
1426 if (global
.no_turbo
&& !global
.turbo_disabled
)
1427 val
|= (u64
)1 << 32;
1432 static int knl_get_aperf_mperf_shift(void)
1437 static int knl_get_turbo_pstate(void)
1442 rdmsrl(MSR_TURBO_RATIO_LIMIT
, value
);
1443 nont
= core_get_max_pstate();
1444 ret
= (((value
) >> 8) & 0xFF);
1450 static void intel_pstate_set_pstate(struct cpudata
*cpu
, int pstate
)
1452 trace_cpu_frequency(pstate
* cpu
->pstate
.scaling
, cpu
->cpu
);
1453 cpu
->pstate
.current_pstate
= pstate
;
1455 * Generally, there is no guarantee that this code will always run on
1456 * the CPU being updated, so force the register update to run on the
1459 wrmsrl_on_cpu(cpu
->cpu
, MSR_IA32_PERF_CTL
,
1460 pstate_funcs
.get_val(cpu
, pstate
));
1463 static void intel_pstate_set_min_pstate(struct cpudata
*cpu
)
1465 intel_pstate_set_pstate(cpu
, cpu
->pstate
.min_pstate
);
1468 static void intel_pstate_max_within_limits(struct cpudata
*cpu
)
1470 int pstate
= max(cpu
->pstate
.min_pstate
, cpu
->max_perf_ratio
);
1472 update_turbo_state();
1473 intel_pstate_set_pstate(cpu
, pstate
);
1476 static void intel_pstate_get_cpu_pstates(struct cpudata
*cpu
)
1478 cpu
->pstate
.min_pstate
= pstate_funcs
.get_min();
1479 cpu
->pstate
.max_pstate
= pstate_funcs
.get_max();
1480 cpu
->pstate
.max_pstate_physical
= pstate_funcs
.get_max_physical();
1481 cpu
->pstate
.turbo_pstate
= pstate_funcs
.get_turbo();
1482 cpu
->pstate
.scaling
= pstate_funcs
.get_scaling();
1483 cpu
->pstate
.max_freq
= cpu
->pstate
.max_pstate
* cpu
->pstate
.scaling
;
1485 if (hwp_active
&& !hwp_mode_bdw
) {
1486 unsigned int phy_max
, current_max
;
1488 intel_pstate_get_hwp_max(cpu
->cpu
, &phy_max
, ¤t_max
);
1489 cpu
->pstate
.turbo_freq
= phy_max
* cpu
->pstate
.scaling
;
1491 cpu
->pstate
.turbo_freq
= cpu
->pstate
.turbo_pstate
* cpu
->pstate
.scaling
;
1494 if (pstate_funcs
.get_aperf_mperf_shift
)
1495 cpu
->aperf_mperf_shift
= pstate_funcs
.get_aperf_mperf_shift();
1497 if (pstate_funcs
.get_vid
)
1498 pstate_funcs
.get_vid(cpu
);
1500 intel_pstate_set_min_pstate(cpu
);
1504 * Long hold time will keep high perf limits for long time,
1505 * which negatively impacts perf/watt for some workloads,
1506 * like specpower. 3ms is based on experiements on some
1509 static int hwp_boost_hold_time_ns
= 3 * NSEC_PER_MSEC
;
1511 static inline void intel_pstate_hwp_boost_up(struct cpudata
*cpu
)
1513 u64 hwp_req
= READ_ONCE(cpu
->hwp_req_cached
);
1514 u32 max_limit
= (hwp_req
& 0xff00) >> 8;
1515 u32 min_limit
= (hwp_req
& 0xff);
1519 * Cases to consider (User changes via sysfs or boot time):
1520 * If, P0 (Turbo max) = P1 (Guaranteed max) = min:
1522 * If, P0 (Turbo max) > P1 (Guaranteed max) = min:
1523 * Should result in one level boost only for P0.
1524 * If, P0 (Turbo max) = P1 (Guaranteed max) > min:
1525 * Should result in two level boost:
1526 * (min + p1)/2 and P1.
1527 * If, P0 (Turbo max) > P1 (Guaranteed max) > min:
1528 * Should result in three level boost:
1529 * (min + p1)/2, P1 and P0.
1532 /* If max and min are equal or already at max, nothing to boost */
1533 if (max_limit
== min_limit
|| cpu
->hwp_boost_min
>= max_limit
)
1536 if (!cpu
->hwp_boost_min
)
1537 cpu
->hwp_boost_min
= min_limit
;
1539 /* level at half way mark between min and guranteed */
1540 boost_level1
= (HWP_GUARANTEED_PERF(cpu
->hwp_cap_cached
) + min_limit
) >> 1;
1542 if (cpu
->hwp_boost_min
< boost_level1
)
1543 cpu
->hwp_boost_min
= boost_level1
;
1544 else if (cpu
->hwp_boost_min
< HWP_GUARANTEED_PERF(cpu
->hwp_cap_cached
))
1545 cpu
->hwp_boost_min
= HWP_GUARANTEED_PERF(cpu
->hwp_cap_cached
);
1546 else if (cpu
->hwp_boost_min
== HWP_GUARANTEED_PERF(cpu
->hwp_cap_cached
) &&
1547 max_limit
!= HWP_GUARANTEED_PERF(cpu
->hwp_cap_cached
))
1548 cpu
->hwp_boost_min
= max_limit
;
1552 hwp_req
= (hwp_req
& ~GENMASK_ULL(7, 0)) | cpu
->hwp_boost_min
;
1553 wrmsrl(MSR_HWP_REQUEST
, hwp_req
);
1554 cpu
->last_update
= cpu
->sample
.time
;
1557 static inline void intel_pstate_hwp_boost_down(struct cpudata
*cpu
)
1559 if (cpu
->hwp_boost_min
) {
1562 /* Check if we are idle for hold time to boost down */
1563 expired
= time_after64(cpu
->sample
.time
, cpu
->last_update
+
1564 hwp_boost_hold_time_ns
);
1566 wrmsrl(MSR_HWP_REQUEST
, cpu
->hwp_req_cached
);
1567 cpu
->hwp_boost_min
= 0;
1570 cpu
->last_update
= cpu
->sample
.time
;
1573 static inline void intel_pstate_update_util_hwp_local(struct cpudata
*cpu
,
1576 cpu
->sample
.time
= time
;
1578 if (cpu
->sched_flags
& SCHED_CPUFREQ_IOWAIT
) {
1581 cpu
->sched_flags
= 0;
1583 * Set iowait_boost flag and update time. Since IO WAIT flag
1584 * is set all the time, we can't just conclude that there is
1585 * some IO bound activity is scheduled on this CPU with just
1586 * one occurrence. If we receive at least two in two
1587 * consecutive ticks, then we treat as boost candidate.
1589 if (time_before64(time
, cpu
->last_io_update
+ 2 * TICK_NSEC
))
1592 cpu
->last_io_update
= time
;
1595 intel_pstate_hwp_boost_up(cpu
);
1598 intel_pstate_hwp_boost_down(cpu
);
1602 static inline void intel_pstate_update_util_hwp(struct update_util_data
*data
,
1603 u64 time
, unsigned int flags
)
1605 struct cpudata
*cpu
= container_of(data
, struct cpudata
, update_util
);
1607 cpu
->sched_flags
|= flags
;
1609 if (smp_processor_id() == cpu
->cpu
)
1610 intel_pstate_update_util_hwp_local(cpu
, time
);
1613 static inline void intel_pstate_calc_avg_perf(struct cpudata
*cpu
)
1615 struct sample
*sample
= &cpu
->sample
;
1617 sample
->core_avg_perf
= div_ext_fp(sample
->aperf
, sample
->mperf
);
1620 static inline bool intel_pstate_sample(struct cpudata
*cpu
, u64 time
)
1623 unsigned long flags
;
1626 local_irq_save(flags
);
1627 rdmsrl(MSR_IA32_APERF
, aperf
);
1628 rdmsrl(MSR_IA32_MPERF
, mperf
);
1630 if (cpu
->prev_mperf
== mperf
|| cpu
->prev_tsc
== tsc
) {
1631 local_irq_restore(flags
);
1634 local_irq_restore(flags
);
1636 cpu
->last_sample_time
= cpu
->sample
.time
;
1637 cpu
->sample
.time
= time
;
1638 cpu
->sample
.aperf
= aperf
;
1639 cpu
->sample
.mperf
= mperf
;
1640 cpu
->sample
.tsc
= tsc
;
1641 cpu
->sample
.aperf
-= cpu
->prev_aperf
;
1642 cpu
->sample
.mperf
-= cpu
->prev_mperf
;
1643 cpu
->sample
.tsc
-= cpu
->prev_tsc
;
1645 cpu
->prev_aperf
= aperf
;
1646 cpu
->prev_mperf
= mperf
;
1647 cpu
->prev_tsc
= tsc
;
1649 * First time this function is invoked in a given cycle, all of the
1650 * previous sample data fields are equal to zero or stale and they must
1651 * be populated with meaningful numbers for things to work, so assume
1652 * that sample.time will always be reset before setting the utilization
1653 * update hook and make the caller skip the sample then.
1655 if (cpu
->last_sample_time
) {
1656 intel_pstate_calc_avg_perf(cpu
);
1662 static inline int32_t get_avg_frequency(struct cpudata
*cpu
)
1664 return mul_ext_fp(cpu
->sample
.core_avg_perf
, cpu_khz
);
1667 static inline int32_t get_avg_pstate(struct cpudata
*cpu
)
1669 return mul_ext_fp(cpu
->pstate
.max_pstate_physical
,
1670 cpu
->sample
.core_avg_perf
);
1673 static inline int32_t get_target_pstate(struct cpudata
*cpu
)
1675 struct sample
*sample
= &cpu
->sample
;
1677 int target
, avg_pstate
;
1679 busy_frac
= div_fp(sample
->mperf
<< cpu
->aperf_mperf_shift
,
1682 if (busy_frac
< cpu
->iowait_boost
)
1683 busy_frac
= cpu
->iowait_boost
;
1685 sample
->busy_scaled
= busy_frac
* 100;
1687 target
= global
.no_turbo
|| global
.turbo_disabled
?
1688 cpu
->pstate
.max_pstate
: cpu
->pstate
.turbo_pstate
;
1689 target
+= target
>> 2;
1690 target
= mul_fp(target
, busy_frac
);
1691 if (target
< cpu
->pstate
.min_pstate
)
1692 target
= cpu
->pstate
.min_pstate
;
1695 * If the average P-state during the previous cycle was higher than the
1696 * current target, add 50% of the difference to the target to reduce
1697 * possible performance oscillations and offset possible performance
1698 * loss related to moving the workload from one CPU to another within
1701 avg_pstate
= get_avg_pstate(cpu
);
1702 if (avg_pstate
> target
)
1703 target
+= (avg_pstate
- target
) >> 1;
1708 static int intel_pstate_prepare_request(struct cpudata
*cpu
, int pstate
)
1710 int min_pstate
= max(cpu
->pstate
.min_pstate
, cpu
->min_perf_ratio
);
1711 int max_pstate
= max(min_pstate
, cpu
->max_perf_ratio
);
1713 return clamp_t(int, pstate
, min_pstate
, max_pstate
);
1716 static void intel_pstate_update_pstate(struct cpudata
*cpu
, int pstate
)
1718 if (pstate
== cpu
->pstate
.current_pstate
)
1721 cpu
->pstate
.current_pstate
= pstate
;
1722 wrmsrl(MSR_IA32_PERF_CTL
, pstate_funcs
.get_val(cpu
, pstate
));
1725 static void intel_pstate_adjust_pstate(struct cpudata
*cpu
)
1727 int from
= cpu
->pstate
.current_pstate
;
1728 struct sample
*sample
;
1731 update_turbo_state();
1733 target_pstate
= get_target_pstate(cpu
);
1734 target_pstate
= intel_pstate_prepare_request(cpu
, target_pstate
);
1735 trace_cpu_frequency(target_pstate
* cpu
->pstate
.scaling
, cpu
->cpu
);
1736 intel_pstate_update_pstate(cpu
, target_pstate
);
1738 sample
= &cpu
->sample
;
1739 trace_pstate_sample(mul_ext_fp(100, sample
->core_avg_perf
),
1740 fp_toint(sample
->busy_scaled
),
1742 cpu
->pstate
.current_pstate
,
1746 get_avg_frequency(cpu
),
1747 fp_toint(cpu
->iowait_boost
* 100));
1750 static void intel_pstate_update_util(struct update_util_data
*data
, u64 time
,
1753 struct cpudata
*cpu
= container_of(data
, struct cpudata
, update_util
);
1756 /* Don't allow remote callbacks */
1757 if (smp_processor_id() != cpu
->cpu
)
1760 delta_ns
= time
- cpu
->last_update
;
1761 if (flags
& SCHED_CPUFREQ_IOWAIT
) {
1762 /* Start over if the CPU may have been idle. */
1763 if (delta_ns
> TICK_NSEC
) {
1764 cpu
->iowait_boost
= ONE_EIGHTH_FP
;
1765 } else if (cpu
->iowait_boost
) {
1766 cpu
->iowait_boost
<<= 1;
1767 if (cpu
->iowait_boost
> int_tofp(1))
1768 cpu
->iowait_boost
= int_tofp(1);
1770 cpu
->iowait_boost
= ONE_EIGHTH_FP
;
1772 } else if (cpu
->iowait_boost
) {
1773 /* Clear iowait_boost if the CPU may have been idle. */
1774 if (delta_ns
> TICK_NSEC
)
1775 cpu
->iowait_boost
= 0;
1777 cpu
->iowait_boost
>>= 1;
1779 cpu
->last_update
= time
;
1780 delta_ns
= time
- cpu
->sample
.time
;
1781 if ((s64
)delta_ns
< INTEL_PSTATE_SAMPLING_INTERVAL
)
1784 if (intel_pstate_sample(cpu
, time
))
1785 intel_pstate_adjust_pstate(cpu
);
1788 static struct pstate_funcs core_funcs
= {
1789 .get_max
= core_get_max_pstate
,
1790 .get_max_physical
= core_get_max_pstate_physical
,
1791 .get_min
= core_get_min_pstate
,
1792 .get_turbo
= core_get_turbo_pstate
,
1793 .get_scaling
= core_get_scaling
,
1794 .get_val
= core_get_val
,
1797 static const struct pstate_funcs silvermont_funcs
= {
1798 .get_max
= atom_get_max_pstate
,
1799 .get_max_physical
= atom_get_max_pstate
,
1800 .get_min
= atom_get_min_pstate
,
1801 .get_turbo
= atom_get_turbo_pstate
,
1802 .get_val
= atom_get_val
,
1803 .get_scaling
= silvermont_get_scaling
,
1804 .get_vid
= atom_get_vid
,
1807 static const struct pstate_funcs airmont_funcs
= {
1808 .get_max
= atom_get_max_pstate
,
1809 .get_max_physical
= atom_get_max_pstate
,
1810 .get_min
= atom_get_min_pstate
,
1811 .get_turbo
= atom_get_turbo_pstate
,
1812 .get_val
= atom_get_val
,
1813 .get_scaling
= airmont_get_scaling
,
1814 .get_vid
= atom_get_vid
,
1817 static const struct pstate_funcs knl_funcs
= {
1818 .get_max
= core_get_max_pstate
,
1819 .get_max_physical
= core_get_max_pstate_physical
,
1820 .get_min
= core_get_min_pstate
,
1821 .get_turbo
= knl_get_turbo_pstate
,
1822 .get_aperf_mperf_shift
= knl_get_aperf_mperf_shift
,
1823 .get_scaling
= core_get_scaling
,
1824 .get_val
= core_get_val
,
1827 #define ICPU(model, policy) \
1828 { X86_VENDOR_INTEL, 6, model, X86_FEATURE_APERFMPERF,\
1829 (unsigned long)&policy }
1831 static const struct x86_cpu_id intel_pstate_cpu_ids
[] = {
1832 ICPU(INTEL_FAM6_SANDYBRIDGE
, core_funcs
),
1833 ICPU(INTEL_FAM6_SANDYBRIDGE_X
, core_funcs
),
1834 ICPU(INTEL_FAM6_ATOM_SILVERMONT
, silvermont_funcs
),
1835 ICPU(INTEL_FAM6_IVYBRIDGE
, core_funcs
),
1836 ICPU(INTEL_FAM6_HASWELL_CORE
, core_funcs
),
1837 ICPU(INTEL_FAM6_BROADWELL_CORE
, core_funcs
),
1838 ICPU(INTEL_FAM6_IVYBRIDGE_X
, core_funcs
),
1839 ICPU(INTEL_FAM6_HASWELL_X
, core_funcs
),
1840 ICPU(INTEL_FAM6_HASWELL_ULT
, core_funcs
),
1841 ICPU(INTEL_FAM6_HASWELL_GT3E
, core_funcs
),
1842 ICPU(INTEL_FAM6_BROADWELL_GT3E
, core_funcs
),
1843 ICPU(INTEL_FAM6_ATOM_AIRMONT
, airmont_funcs
),
1844 ICPU(INTEL_FAM6_SKYLAKE_MOBILE
, core_funcs
),
1845 ICPU(INTEL_FAM6_BROADWELL_X
, core_funcs
),
1846 ICPU(INTEL_FAM6_SKYLAKE_DESKTOP
, core_funcs
),
1847 ICPU(INTEL_FAM6_BROADWELL_XEON_D
, core_funcs
),
1848 ICPU(INTEL_FAM6_XEON_PHI_KNL
, knl_funcs
),
1849 ICPU(INTEL_FAM6_XEON_PHI_KNM
, knl_funcs
),
1850 ICPU(INTEL_FAM6_ATOM_GOLDMONT
, core_funcs
),
1851 ICPU(INTEL_FAM6_ATOM_GOLDMONT_PLUS
, core_funcs
),
1852 ICPU(INTEL_FAM6_SKYLAKE_X
, core_funcs
),
1855 MODULE_DEVICE_TABLE(x86cpu
, intel_pstate_cpu_ids
);
1857 static const struct x86_cpu_id intel_pstate_cpu_oob_ids
[] __initconst
= {
1858 ICPU(INTEL_FAM6_BROADWELL_XEON_D
, core_funcs
),
1859 ICPU(INTEL_FAM6_BROADWELL_X
, core_funcs
),
1860 ICPU(INTEL_FAM6_SKYLAKE_X
, core_funcs
),
1864 static const struct x86_cpu_id intel_pstate_cpu_ee_disable_ids
[] = {
1865 ICPU(INTEL_FAM6_KABYLAKE_DESKTOP
, core_funcs
),
1869 static const struct x86_cpu_id intel_pstate_hwp_boost_ids
[] = {
1870 ICPU(INTEL_FAM6_SKYLAKE_X
, core_funcs
),
1871 ICPU(INTEL_FAM6_SKYLAKE_DESKTOP
, core_funcs
),
1875 static int intel_pstate_init_cpu(unsigned int cpunum
)
1877 struct cpudata
*cpu
;
1879 cpu
= all_cpu_data
[cpunum
];
1882 cpu
= kzalloc(sizeof(*cpu
), GFP_KERNEL
);
1886 all_cpu_data
[cpunum
] = cpu
;
1888 cpu
->epp_default
= -EINVAL
;
1889 cpu
->epp_powersave
= -EINVAL
;
1890 cpu
->epp_saved
= -EINVAL
;
1893 cpu
= all_cpu_data
[cpunum
];
1898 const struct x86_cpu_id
*id
;
1900 id
= x86_match_cpu(intel_pstate_cpu_ee_disable_ids
);
1902 intel_pstate_disable_ee(cpunum
);
1904 intel_pstate_hwp_enable(cpu
);
1906 id
= x86_match_cpu(intel_pstate_hwp_boost_ids
);
1907 if (id
&& intel_pstate_acpi_pm_profile_server())
1911 intel_pstate_get_cpu_pstates(cpu
);
1913 pr_debug("controlling: cpu %d\n", cpunum
);
1918 static void intel_pstate_set_update_util_hook(unsigned int cpu_num
)
1920 struct cpudata
*cpu
= all_cpu_data
[cpu_num
];
1922 if (hwp_active
&& !hwp_boost
)
1925 if (cpu
->update_util_set
)
1928 /* Prevent intel_pstate_update_util() from using stale data. */
1929 cpu
->sample
.time
= 0;
1930 cpufreq_add_update_util_hook(cpu_num
, &cpu
->update_util
,
1932 intel_pstate_update_util_hwp
:
1933 intel_pstate_update_util
));
1934 cpu
->update_util_set
= true;
1937 static void intel_pstate_clear_update_util_hook(unsigned int cpu
)
1939 struct cpudata
*cpu_data
= all_cpu_data
[cpu
];
1941 if (!cpu_data
->update_util_set
)
1944 cpufreq_remove_update_util_hook(cpu
);
1945 cpu_data
->update_util_set
= false;
1949 static int intel_pstate_get_max_freq(struct cpudata
*cpu
)
1951 return global
.turbo_disabled
|| global
.no_turbo
?
1952 cpu
->pstate
.max_freq
: cpu
->pstate
.turbo_freq
;
1955 static void intel_pstate_update_perf_limits(struct cpufreq_policy
*policy
,
1956 struct cpudata
*cpu
)
1958 int max_freq
= intel_pstate_get_max_freq(cpu
);
1959 int32_t max_policy_perf
, min_policy_perf
;
1960 int max_state
, turbo_max
;
1963 * HWP needs some special consideration, because on BDX the
1964 * HWP_REQUEST uses abstract value to represent performance
1965 * rather than pure ratios.
1968 intel_pstate_get_hwp_max(cpu
->cpu
, &turbo_max
, &max_state
);
1970 max_state
= global
.no_turbo
|| global
.turbo_disabled
?
1971 cpu
->pstate
.max_pstate
: cpu
->pstate
.turbo_pstate
;
1972 turbo_max
= cpu
->pstate
.turbo_pstate
;
1975 max_policy_perf
= max_state
* policy
->max
/ max_freq
;
1976 if (policy
->max
== policy
->min
) {
1977 min_policy_perf
= max_policy_perf
;
1979 min_policy_perf
= max_state
* policy
->min
/ max_freq
;
1980 min_policy_perf
= clamp_t(int32_t, min_policy_perf
,
1981 0, max_policy_perf
);
1984 pr_debug("cpu:%d max_state %d min_policy_perf:%d max_policy_perf:%d\n",
1985 policy
->cpu
, max_state
,
1986 min_policy_perf
, max_policy_perf
);
1988 /* Normalize user input to [min_perf, max_perf] */
1989 if (per_cpu_limits
) {
1990 cpu
->min_perf_ratio
= min_policy_perf
;
1991 cpu
->max_perf_ratio
= max_policy_perf
;
1993 int32_t global_min
, global_max
;
1995 /* Global limits are in percent of the maximum turbo P-state. */
1996 global_max
= DIV_ROUND_UP(turbo_max
* global
.max_perf_pct
, 100);
1997 global_min
= DIV_ROUND_UP(turbo_max
* global
.min_perf_pct
, 100);
1998 global_min
= clamp_t(int32_t, global_min
, 0, global_max
);
2000 pr_debug("cpu:%d global_min:%d global_max:%d\n", policy
->cpu
,
2001 global_min
, global_max
);
2003 cpu
->min_perf_ratio
= max(min_policy_perf
, global_min
);
2004 cpu
->min_perf_ratio
= min(cpu
->min_perf_ratio
, max_policy_perf
);
2005 cpu
->max_perf_ratio
= min(max_policy_perf
, global_max
);
2006 cpu
->max_perf_ratio
= max(min_policy_perf
, cpu
->max_perf_ratio
);
2008 /* Make sure min_perf <= max_perf */
2009 cpu
->min_perf_ratio
= min(cpu
->min_perf_ratio
,
2010 cpu
->max_perf_ratio
);
2013 pr_debug("cpu:%d max_perf_ratio:%d min_perf_ratio:%d\n", policy
->cpu
,
2014 cpu
->max_perf_ratio
,
2015 cpu
->min_perf_ratio
);
2018 static int intel_pstate_set_policy(struct cpufreq_policy
*policy
)
2020 struct cpudata
*cpu
;
2022 if (!policy
->cpuinfo
.max_freq
)
2025 pr_debug("set_policy cpuinfo.max %u policy->max %u\n",
2026 policy
->cpuinfo
.max_freq
, policy
->max
);
2028 cpu
= all_cpu_data
[policy
->cpu
];
2029 cpu
->policy
= policy
->policy
;
2031 mutex_lock(&intel_pstate_limits_lock
);
2033 intel_pstate_update_perf_limits(policy
, cpu
);
2035 if (cpu
->policy
== CPUFREQ_POLICY_PERFORMANCE
) {
2037 * NOHZ_FULL CPUs need this as the governor callback may not
2038 * be invoked on them.
2040 intel_pstate_clear_update_util_hook(policy
->cpu
);
2041 intel_pstate_max_within_limits(cpu
);
2043 intel_pstate_set_update_util_hook(policy
->cpu
);
2048 * When hwp_boost was active before and dynamically it
2049 * was turned off, in that case we need to clear the
2053 intel_pstate_clear_update_util_hook(policy
->cpu
);
2054 intel_pstate_hwp_set(policy
->cpu
);
2057 mutex_unlock(&intel_pstate_limits_lock
);
2062 static void intel_pstate_adjust_policy_max(struct cpufreq_policy
*policy
,
2063 struct cpudata
*cpu
)
2066 cpu
->pstate
.max_pstate_physical
> cpu
->pstate
.max_pstate
&&
2067 policy
->max
< policy
->cpuinfo
.max_freq
&&
2068 policy
->max
> cpu
->pstate
.max_freq
) {
2069 pr_debug("policy->max > max non turbo frequency\n");
2070 policy
->max
= policy
->cpuinfo
.max_freq
;
2074 static int intel_pstate_verify_policy(struct cpufreq_policy
*policy
)
2076 struct cpudata
*cpu
= all_cpu_data
[policy
->cpu
];
2078 update_turbo_state();
2079 cpufreq_verify_within_limits(policy
, policy
->cpuinfo
.min_freq
,
2080 intel_pstate_get_max_freq(cpu
));
2082 if (policy
->policy
!= CPUFREQ_POLICY_POWERSAVE
&&
2083 policy
->policy
!= CPUFREQ_POLICY_PERFORMANCE
)
2086 intel_pstate_adjust_policy_max(policy
, cpu
);
2091 static void intel_cpufreq_stop_cpu(struct cpufreq_policy
*policy
)
2093 intel_pstate_set_min_pstate(all_cpu_data
[policy
->cpu
]);
2096 static void intel_pstate_stop_cpu(struct cpufreq_policy
*policy
)
2098 pr_debug("CPU %d exiting\n", policy
->cpu
);
2100 intel_pstate_clear_update_util_hook(policy
->cpu
);
2102 intel_pstate_hwp_save_state(policy
);
2103 intel_pstate_hwp_force_min_perf(policy
->cpu
);
2105 intel_cpufreq_stop_cpu(policy
);
2109 static int intel_pstate_cpu_exit(struct cpufreq_policy
*policy
)
2111 intel_pstate_exit_perf_limits(policy
);
2113 policy
->fast_switch_possible
= false;
2118 static int __intel_pstate_cpu_init(struct cpufreq_policy
*policy
)
2120 struct cpudata
*cpu
;
2123 rc
= intel_pstate_init_cpu(policy
->cpu
);
2127 cpu
= all_cpu_data
[policy
->cpu
];
2129 cpu
->max_perf_ratio
= 0xFF;
2130 cpu
->min_perf_ratio
= 0;
2132 policy
->min
= cpu
->pstate
.min_pstate
* cpu
->pstate
.scaling
;
2133 policy
->max
= cpu
->pstate
.turbo_pstate
* cpu
->pstate
.scaling
;
2135 /* cpuinfo and default policy values */
2136 policy
->cpuinfo
.min_freq
= cpu
->pstate
.min_pstate
* cpu
->pstate
.scaling
;
2137 update_turbo_state();
2138 policy
->cpuinfo
.max_freq
= global
.turbo_disabled
?
2139 cpu
->pstate
.max_pstate
: cpu
->pstate
.turbo_pstate
;
2140 policy
->cpuinfo
.max_freq
*= cpu
->pstate
.scaling
;
2143 unsigned int max_freq
;
2145 max_freq
= global
.turbo_disabled
?
2146 cpu
->pstate
.max_freq
: cpu
->pstate
.turbo_freq
;
2147 if (max_freq
< policy
->cpuinfo
.max_freq
)
2148 policy
->cpuinfo
.max_freq
= max_freq
;
2151 intel_pstate_init_acpi_perf_limits(policy
);
2153 policy
->fast_switch_possible
= true;
2158 static int intel_pstate_cpu_init(struct cpufreq_policy
*policy
)
2160 int ret
= __intel_pstate_cpu_init(policy
);
2165 if (IS_ENABLED(CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE
))
2166 policy
->policy
= CPUFREQ_POLICY_PERFORMANCE
;
2168 policy
->policy
= CPUFREQ_POLICY_POWERSAVE
;
2173 static struct cpufreq_driver intel_pstate
= {
2174 .flags
= CPUFREQ_CONST_LOOPS
,
2175 .verify
= intel_pstate_verify_policy
,
2176 .setpolicy
= intel_pstate_set_policy
,
2177 .suspend
= intel_pstate_hwp_save_state
,
2178 .resume
= intel_pstate_resume
,
2179 .init
= intel_pstate_cpu_init
,
2180 .exit
= intel_pstate_cpu_exit
,
2181 .stop_cpu
= intel_pstate_stop_cpu
,
2182 .name
= "intel_pstate",
2185 static int intel_cpufreq_verify_policy(struct cpufreq_policy
*policy
)
2187 struct cpudata
*cpu
= all_cpu_data
[policy
->cpu
];
2189 update_turbo_state();
2190 cpufreq_verify_within_limits(policy
, policy
->cpuinfo
.min_freq
,
2191 intel_pstate_get_max_freq(cpu
));
2193 intel_pstate_adjust_policy_max(policy
, cpu
);
2195 intel_pstate_update_perf_limits(policy
, cpu
);
2200 /* Use of trace in passive mode:
2202 * In passive mode the trace core_busy field (also known as the
2203 * performance field, and lablelled as such on the graphs; also known as
2204 * core_avg_perf) is not needed and so is re-assigned to indicate if the
2205 * driver call was via the normal or fast switch path. Various graphs
2206 * output from the intel_pstate_tracer.py utility that include core_busy
2207 * (or performance or core_avg_perf) have a fixed y-axis from 0 to 100%,
2208 * so we use 10 to indicate the the normal path through the driver, and
2209 * 90 to indicate the fast switch path through the driver.
2210 * The scaled_busy field is not used, and is set to 0.
2213 #define INTEL_PSTATE_TRACE_TARGET 10
2214 #define INTEL_PSTATE_TRACE_FAST_SWITCH 90
2216 static void intel_cpufreq_trace(struct cpudata
*cpu
, unsigned int trace_type
, int old_pstate
)
2218 struct sample
*sample
;
2220 if (!trace_pstate_sample_enabled())
2223 if (!intel_pstate_sample(cpu
, ktime_get()))
2226 sample
= &cpu
->sample
;
2227 trace_pstate_sample(trace_type
,
2230 cpu
->pstate
.current_pstate
,
2234 get_avg_frequency(cpu
),
2235 fp_toint(cpu
->iowait_boost
* 100));
2238 static int intel_cpufreq_target(struct cpufreq_policy
*policy
,
2239 unsigned int target_freq
,
2240 unsigned int relation
)
2242 struct cpudata
*cpu
= all_cpu_data
[policy
->cpu
];
2243 struct cpufreq_freqs freqs
;
2244 int target_pstate
, old_pstate
;
2246 update_turbo_state();
2248 freqs
.old
= policy
->cur
;
2249 freqs
.new = target_freq
;
2251 cpufreq_freq_transition_begin(policy
, &freqs
);
2253 case CPUFREQ_RELATION_L
:
2254 target_pstate
= DIV_ROUND_UP(freqs
.new, cpu
->pstate
.scaling
);
2256 case CPUFREQ_RELATION_H
:
2257 target_pstate
= freqs
.new / cpu
->pstate
.scaling
;
2260 target_pstate
= DIV_ROUND_CLOSEST(freqs
.new, cpu
->pstate
.scaling
);
2263 target_pstate
= intel_pstate_prepare_request(cpu
, target_pstate
);
2264 old_pstate
= cpu
->pstate
.current_pstate
;
2265 if (target_pstate
!= cpu
->pstate
.current_pstate
) {
2266 cpu
->pstate
.current_pstate
= target_pstate
;
2267 wrmsrl_on_cpu(policy
->cpu
, MSR_IA32_PERF_CTL
,
2268 pstate_funcs
.get_val(cpu
, target_pstate
));
2270 freqs
.new = target_pstate
* cpu
->pstate
.scaling
;
2271 intel_cpufreq_trace(cpu
, INTEL_PSTATE_TRACE_TARGET
, old_pstate
);
2272 cpufreq_freq_transition_end(policy
, &freqs
, false);
2277 static unsigned int intel_cpufreq_fast_switch(struct cpufreq_policy
*policy
,
2278 unsigned int target_freq
)
2280 struct cpudata
*cpu
= all_cpu_data
[policy
->cpu
];
2281 int target_pstate
, old_pstate
;
2283 update_turbo_state();
2285 target_pstate
= DIV_ROUND_UP(target_freq
, cpu
->pstate
.scaling
);
2286 target_pstate
= intel_pstate_prepare_request(cpu
, target_pstate
);
2287 old_pstate
= cpu
->pstate
.current_pstate
;
2288 intel_pstate_update_pstate(cpu
, target_pstate
);
2289 intel_cpufreq_trace(cpu
, INTEL_PSTATE_TRACE_FAST_SWITCH
, old_pstate
);
2290 return target_pstate
* cpu
->pstate
.scaling
;
2293 static int intel_cpufreq_cpu_init(struct cpufreq_policy
*policy
)
2295 int ret
= __intel_pstate_cpu_init(policy
);
2300 policy
->cpuinfo
.transition_latency
= INTEL_CPUFREQ_TRANSITION_LATENCY
;
2301 policy
->transition_delay_us
= INTEL_CPUFREQ_TRANSITION_DELAY
;
2302 /* This reflects the intel_pstate_get_cpu_pstates() setting. */
2303 policy
->cur
= policy
->cpuinfo
.min_freq
;
2308 static struct cpufreq_driver intel_cpufreq
= {
2309 .flags
= CPUFREQ_CONST_LOOPS
,
2310 .verify
= intel_cpufreq_verify_policy
,
2311 .target
= intel_cpufreq_target
,
2312 .fast_switch
= intel_cpufreq_fast_switch
,
2313 .init
= intel_cpufreq_cpu_init
,
2314 .exit
= intel_pstate_cpu_exit
,
2315 .stop_cpu
= intel_cpufreq_stop_cpu
,
2316 .name
= "intel_cpufreq",
2319 static struct cpufreq_driver
*default_driver
= &intel_pstate
;
2321 static void intel_pstate_driver_cleanup(void)
2326 for_each_online_cpu(cpu
) {
2327 if (all_cpu_data
[cpu
]) {
2328 if (intel_pstate_driver
== &intel_pstate
)
2329 intel_pstate_clear_update_util_hook(cpu
);
2331 kfree(all_cpu_data
[cpu
]);
2332 all_cpu_data
[cpu
] = NULL
;
2336 intel_pstate_driver
= NULL
;
2339 static int intel_pstate_register_driver(struct cpufreq_driver
*driver
)
2343 memset(&global
, 0, sizeof(global
));
2344 global
.max_perf_pct
= 100;
2346 intel_pstate_driver
= driver
;
2347 ret
= cpufreq_register_driver(intel_pstate_driver
);
2349 intel_pstate_driver_cleanup();
2353 global
.min_perf_pct
= min_perf_pct_min();
2358 static int intel_pstate_unregister_driver(void)
2363 cpufreq_unregister_driver(intel_pstate_driver
);
2364 intel_pstate_driver_cleanup();
2369 static ssize_t
intel_pstate_show_status(char *buf
)
2371 if (!intel_pstate_driver
)
2372 return sprintf(buf
, "off\n");
2374 return sprintf(buf
, "%s\n", intel_pstate_driver
== &intel_pstate
?
2375 "active" : "passive");
2378 static int intel_pstate_update_status(const char *buf
, size_t size
)
2382 if (size
== 3 && !strncmp(buf
, "off", size
))
2383 return intel_pstate_driver
?
2384 intel_pstate_unregister_driver() : -EINVAL
;
2386 if (size
== 6 && !strncmp(buf
, "active", size
)) {
2387 if (intel_pstate_driver
) {
2388 if (intel_pstate_driver
== &intel_pstate
)
2391 ret
= intel_pstate_unregister_driver();
2396 return intel_pstate_register_driver(&intel_pstate
);
2399 if (size
== 7 && !strncmp(buf
, "passive", size
)) {
2400 if (intel_pstate_driver
) {
2401 if (intel_pstate_driver
== &intel_cpufreq
)
2404 ret
= intel_pstate_unregister_driver();
2409 return intel_pstate_register_driver(&intel_cpufreq
);
2415 static int no_load __initdata
;
2416 static int no_hwp __initdata
;
2417 static int hwp_only __initdata
;
2418 static unsigned int force_load __initdata
;
2420 static int __init
intel_pstate_msrs_not_valid(void)
2422 if (!pstate_funcs
.get_max() ||
2423 !pstate_funcs
.get_min() ||
2424 !pstate_funcs
.get_turbo())
2430 static void __init
copy_cpu_funcs(struct pstate_funcs
*funcs
)
2432 pstate_funcs
.get_max
= funcs
->get_max
;
2433 pstate_funcs
.get_max_physical
= funcs
->get_max_physical
;
2434 pstate_funcs
.get_min
= funcs
->get_min
;
2435 pstate_funcs
.get_turbo
= funcs
->get_turbo
;
2436 pstate_funcs
.get_scaling
= funcs
->get_scaling
;
2437 pstate_funcs
.get_val
= funcs
->get_val
;
2438 pstate_funcs
.get_vid
= funcs
->get_vid
;
2439 pstate_funcs
.get_aperf_mperf_shift
= funcs
->get_aperf_mperf_shift
;
2444 static bool __init
intel_pstate_no_acpi_pss(void)
2448 for_each_possible_cpu(i
) {
2450 union acpi_object
*pss
;
2451 struct acpi_buffer buffer
= { ACPI_ALLOCATE_BUFFER
, NULL
};
2452 struct acpi_processor
*pr
= per_cpu(processors
, i
);
2457 status
= acpi_evaluate_object(pr
->handle
, "_PSS", NULL
, &buffer
);
2458 if (ACPI_FAILURE(status
))
2461 pss
= buffer
.pointer
;
2462 if (pss
&& pss
->type
== ACPI_TYPE_PACKAGE
) {
2470 pr_debug("ACPI _PSS not found\n");
2474 static bool __init
intel_pstate_no_acpi_pcch(void)
2479 status
= acpi_get_handle(NULL
, "\\_SB", &handle
);
2480 if (ACPI_FAILURE(status
))
2483 if (acpi_has_method(handle
, "PCCH"))
2487 pr_debug("ACPI PCCH not found\n");
2491 static bool __init
intel_pstate_has_acpi_ppc(void)
2495 for_each_possible_cpu(i
) {
2496 struct acpi_processor
*pr
= per_cpu(processors
, i
);
2500 if (acpi_has_method(pr
->handle
, "_PPC"))
2503 pr_debug("ACPI _PPC not found\n");
2512 /* Hardware vendor-specific info that has its own power management modes */
2513 static struct acpi_platform_list plat_info
[] __initdata
= {
2514 {"HP ", "ProLiant", 0, ACPI_SIG_FADT
, all_versions
, 0, PSS
},
2515 {"ORACLE", "X4-2 ", 0, ACPI_SIG_FADT
, all_versions
, 0, PPC
},
2516 {"ORACLE", "X4-2L ", 0, ACPI_SIG_FADT
, all_versions
, 0, PPC
},
2517 {"ORACLE", "X4-2B ", 0, ACPI_SIG_FADT
, all_versions
, 0, PPC
},
2518 {"ORACLE", "X3-2 ", 0, ACPI_SIG_FADT
, all_versions
, 0, PPC
},
2519 {"ORACLE", "X3-2L ", 0, ACPI_SIG_FADT
, all_versions
, 0, PPC
},
2520 {"ORACLE", "X3-2B ", 0, ACPI_SIG_FADT
, all_versions
, 0, PPC
},
2521 {"ORACLE", "X4470M2 ", 0, ACPI_SIG_FADT
, all_versions
, 0, PPC
},
2522 {"ORACLE", "X4270M3 ", 0, ACPI_SIG_FADT
, all_versions
, 0, PPC
},
2523 {"ORACLE", "X4270M2 ", 0, ACPI_SIG_FADT
, all_versions
, 0, PPC
},
2524 {"ORACLE", "X4170M2 ", 0, ACPI_SIG_FADT
, all_versions
, 0, PPC
},
2525 {"ORACLE", "X4170 M3", 0, ACPI_SIG_FADT
, all_versions
, 0, PPC
},
2526 {"ORACLE", "X4275 M3", 0, ACPI_SIG_FADT
, all_versions
, 0, PPC
},
2527 {"ORACLE", "X6-2 ", 0, ACPI_SIG_FADT
, all_versions
, 0, PPC
},
2528 {"ORACLE", "Sudbury ", 0, ACPI_SIG_FADT
, all_versions
, 0, PPC
},
2532 static bool __init
intel_pstate_platform_pwr_mgmt_exists(void)
2534 const struct x86_cpu_id
*id
;
2538 id
= x86_match_cpu(intel_pstate_cpu_oob_ids
);
2540 rdmsrl(MSR_MISC_PWR_MGMT
, misc_pwr
);
2541 if (misc_pwr
& (1 << 8)) {
2542 pr_debug("Bit 8 in the MISC_PWR_MGMT MSR set\n");
2547 idx
= acpi_match_platform_list(plat_info
);
2551 switch (plat_info
[idx
].data
) {
2553 if (!intel_pstate_no_acpi_pss())
2556 return intel_pstate_no_acpi_pcch();
2558 return intel_pstate_has_acpi_ppc() && !force_load
;
2564 static void intel_pstate_request_control_from_smm(void)
2567 * It may be unsafe to request P-states control from SMM if _PPC support
2568 * has not been enabled.
2571 acpi_processor_pstate_control();
2573 #else /* CONFIG_ACPI not enabled */
2574 static inline bool intel_pstate_platform_pwr_mgmt_exists(void) { return false; }
2575 static inline bool intel_pstate_has_acpi_ppc(void) { return false; }
2576 static inline void intel_pstate_request_control_from_smm(void) {}
2577 #endif /* CONFIG_ACPI */
2579 #define INTEL_PSTATE_HWP_BROADWELL 0x01
2581 #define ICPU_HWP(model, hwp_mode) \
2582 { X86_VENDOR_INTEL, 6, model, X86_FEATURE_HWP, hwp_mode }
2584 static const struct x86_cpu_id hwp_support_ids
[] __initconst
= {
2585 ICPU_HWP(INTEL_FAM6_BROADWELL_X
, INTEL_PSTATE_HWP_BROADWELL
),
2586 ICPU_HWP(INTEL_FAM6_BROADWELL_XEON_D
, INTEL_PSTATE_HWP_BROADWELL
),
2587 ICPU_HWP(X86_MODEL_ANY
, 0),
2591 static int __init
intel_pstate_init(void)
2593 const struct x86_cpu_id
*id
;
2599 id
= x86_match_cpu(hwp_support_ids
);
2601 copy_cpu_funcs(&core_funcs
);
2604 hwp_mode_bdw
= id
->driver_data
;
2605 intel_pstate
.attr
= hwp_cpufreq_attrs
;
2606 goto hwp_cpu_matched
;
2609 id
= x86_match_cpu(intel_pstate_cpu_ids
);
2611 pr_info("CPU ID not supported\n");
2615 copy_cpu_funcs((struct pstate_funcs
*)id
->driver_data
);
2618 if (intel_pstate_msrs_not_valid()) {
2619 pr_info("Invalid MSRs\n");
2625 * The Intel pstate driver will be ignored if the platform
2626 * firmware has its own power management modes.
2628 if (intel_pstate_platform_pwr_mgmt_exists()) {
2629 pr_info("P-states controlled by the platform\n");
2633 if (!hwp_active
&& hwp_only
)
2636 pr_info("Intel P-state driver initializing\n");
2638 all_cpu_data
= vzalloc(array_size(sizeof(void *), num_possible_cpus()));
2642 intel_pstate_request_control_from_smm();
2644 intel_pstate_sysfs_expose_params();
2646 mutex_lock(&intel_pstate_driver_lock
);
2647 rc
= intel_pstate_register_driver(default_driver
);
2648 mutex_unlock(&intel_pstate_driver_lock
);
2653 pr_info("HWP enabled\n");
2657 device_initcall(intel_pstate_init
);
2659 static int __init
intel_pstate_setup(char *str
)
2664 if (!strcmp(str
, "disable")) {
2666 } else if (!strcmp(str
, "passive")) {
2667 pr_info("Passive mode enabled\n");
2668 default_driver
= &intel_cpufreq
;
2671 if (!strcmp(str
, "no_hwp")) {
2672 pr_info("HWP disabled\n");
2675 if (!strcmp(str
, "force"))
2677 if (!strcmp(str
, "hwp_only"))
2679 if (!strcmp(str
, "per_cpu_perf_limits"))
2680 per_cpu_limits
= true;
2683 if (!strcmp(str
, "support_acpi_ppc"))
2689 early_param("intel_pstate", intel_pstate_setup
);
2691 MODULE_AUTHOR("Dirk Brandewie <dirk.j.brandewie@intel.com>");
2692 MODULE_DESCRIPTION("'intel_pstate' - P state driver Intel Core processors");
2693 MODULE_LICENSE("GPL");