]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
x86/tsc: Provide ART base clock information for TSC
authorLakshmi Sowjanya D <lakshmi.sowjanya.d@intel.com>
Mon, 13 May 2024 10:38:03 +0000 (16:08 +0530)
committerThomas Gleixner <tglx@linutronix.de>
Mon, 3 Jun 2024 09:18:50 +0000 (11:18 +0200)
The core code provides a new mechanism to allow conversion between ART and
TSC. This allows to replace the x86 specific ART/TSC conversion functions.

Prepare for removal by filling in the base clock conversion information for
ART and associating the base clock to the TSC clocksource.

The existing conversion functions will be removed once the usage sites are
converted over to the new model.

[ tglx: Massaged change log ]

Co-developed-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Co-developed-by: Christopher S. Hall <christopher.s.hall@intel.com>
Signed-off-by: Christopher S. Hall <christopher.s.hall@intel.com>
Signed-off-by: Lakshmi Sowjanya D <lakshmi.sowjanya.d@intel.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Link: https://lore.kernel.org/r/20240513103813.5666-3-lakshmi.sowjanya.d@intel.com
arch/x86/kernel/tsc.c
include/linux/clocksource_ids.h

index 06b170759e5bf581d78a0420ae887cf5d139d6d8..d1888db5db91f826fe1139caf9a977dd72377ef1 100644 (file)
@@ -50,9 +50,9 @@ int tsc_clocksource_reliable;
 
 static int __read_mostly tsc_force_recalibrate;
 
-static u32 art_to_tsc_numerator;
-static u32 art_to_tsc_denominator;
-static u64 art_to_tsc_offset;
+static struct clocksource_base art_base_clk = {
+       .id    = CSID_X86_ART,
+};
 static bool have_art;
 
 struct cyc2ns {
@@ -1074,7 +1074,7 @@ core_initcall(cpufreq_register_tsc_scaling);
  */
 static void __init detect_art(void)
 {
-       unsigned int unused[2];
+       unsigned int unused;
 
        if (boot_cpu_data.cpuid_level < ART_CPUID_LEAF)
                return;
@@ -1089,13 +1089,14 @@ static void __init detect_art(void)
            tsc_async_resets)
                return;
 
-       cpuid(ART_CPUID_LEAF, &art_to_tsc_denominator,
-             &art_to_tsc_numerator, unused, unused+1);
+       cpuid(ART_CPUID_LEAF, &art_base_clk.denominator,
+             &art_base_clk.numerator, &art_base_clk.freq_khz, &unused);
 
-       if (art_to_tsc_denominator < ART_MIN_DENOMINATOR)
+       art_base_clk.freq_khz /= KHZ;
+       if (art_base_clk.denominator < ART_MIN_DENOMINATOR)
                return;
 
-       rdmsrl(MSR_IA32_TSC_ADJUST, art_to_tsc_offset);
+       rdmsrl(MSR_IA32_TSC_ADJUST, art_base_clk.offset);
 
        /* Make this sticky over multiple CPU init calls */
        setup_force_cpu_cap(X86_FEATURE_ART);
@@ -1303,13 +1304,13 @@ struct system_counterval_t convert_art_to_tsc(u64 art)
 {
        u64 tmp, res, rem;
 
-       rem = do_div(art, art_to_tsc_denominator);
+       rem = do_div(art, art_base_clk.denominator);
 
-       res = art * art_to_tsc_numerator;
-       tmp = rem * art_to_tsc_numerator;
+       res = art * art_base_clk.numerator;
+       tmp = rem * art_base_clk.numerator;
 
-       do_div(tmp, art_to_tsc_denominator);
-       res += tmp + art_to_tsc_offset;
+       do_div(tmp, art_base_clk.denominator);
+       res += tmp + art_base_clk.offset;
 
        return (struct system_counterval_t) {
                .cs_id  = have_art ? CSID_X86_TSC : CSID_GENERIC,
@@ -1356,7 +1357,6 @@ struct system_counterval_t convert_art_ns_to_tsc(u64 art_ns)
 }
 EXPORT_SYMBOL(convert_art_ns_to_tsc);
 
-
 static void tsc_refine_calibration_work(struct work_struct *work);
 static DECLARE_DELAYED_WORK(tsc_irqwork, tsc_refine_calibration_work);
 /**
@@ -1458,8 +1458,10 @@ out:
        if (tsc_unstable)
                goto unreg;
 
-       if (boot_cpu_has(X86_FEATURE_ART))
+       if (boot_cpu_has(X86_FEATURE_ART)) {
                have_art = true;
+               clocksource_tsc.base = &art_base_clk;
+       }
        clocksource_register_khz(&clocksource_tsc, tsc_khz);
 unreg:
        clocksource_unregister(&clocksource_tsc_early);
@@ -1484,8 +1486,10 @@ static int __init init_tsc_clocksource(void)
         * the refined calibration and directly register it as a clocksource.
         */
        if (boot_cpu_has(X86_FEATURE_TSC_KNOWN_FREQ)) {
-               if (boot_cpu_has(X86_FEATURE_ART))
+               if (boot_cpu_has(X86_FEATURE_ART)) {
                        have_art = true;
+                       clocksource_tsc.base = &art_base_clk;
+               }
                clocksource_register_khz(&clocksource_tsc, tsc_khz);
                clocksource_unregister(&clocksource_tsc_early);
 
@@ -1509,10 +1513,12 @@ static bool __init determine_cpu_tsc_frequencies(bool early)
 
        if (early) {
                cpu_khz = x86_platform.calibrate_cpu();
-               if (tsc_early_khz)
+               if (tsc_early_khz) {
                        tsc_khz = tsc_early_khz;
-               else
+               } else {
                        tsc_khz = x86_platform.calibrate_tsc();
+                       clocksource_tsc.freq_khz = tsc_khz;
+               }
        } else {
                /* We should not be here with non-native cpu calibration */
                WARN_ON(x86_platform.calibrate_cpu != native_calibrate_cpu);
index a4fa3436940c8fc9f5ab58884b890c6fa4a5a721..2bb4d8c2f1b0c82b04449394ca430b03f4cf6534 100644 (file)
@@ -9,6 +9,7 @@ enum clocksource_ids {
        CSID_X86_TSC_EARLY,
        CSID_X86_TSC,
        CSID_X86_KVM_CLK,
+       CSID_X86_ART,
        CSID_MAX,
 };