From: Thomas Gleixner Date: Tue, 24 Feb 2026 16:36:49 +0000 (+0100) Subject: x86/apic: Enable TSC coupled programming mode X-Git-Url: http://git.ipfire.org/gitweb.cgi?a=commitdiff_plain;h=f246ec3478cfdab830ee0815209f48923e7ee5e2;p=thirdparty%2Fkernel%2Flinux.git x86/apic: Enable TSC coupled programming mode The TSC deadline timer is directly coupled to the TSC and setting the next deadline is tedious as the clockevents core code converts the CLOCK_MONOTONIC based absolute expiry time to a relative expiry by reading the current time from the TSC. It converts that delta to cycles and hands the result to lapic_next_deadline(), which then has read to the TSC and add the delta to program the timer. The core code now supports coupled clock event devices and can provide the expiry time in TSC cycles directly without reading the TSC at all. This obviouly works only when the TSC is the current clocksource, but that's the default for all modern CPUs which implement the TSC deadline timer. If the TSC is not the current clocksource (e.g. early boot) then the core code falls back to the relative set_next_event() callback as before. Signed-off-by: Thomas Gleixner Signed-off-by: Peter Zijlstra (Intel) Link: https://patch.msgid.link/20260224163430.076565985@kernel.org --- diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index d337d8dced86a..560d2ce8cedda 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -164,6 +164,7 @@ config X86 select EDAC_SUPPORT select GENERIC_CLOCKEVENTS_BROADCAST if X86_64 || (X86_32 && X86_LOCAL_APIC) select GENERIC_CLOCKEVENTS_BROADCAST_IDLE if GENERIC_CLOCKEVENTS_BROADCAST + select GENERIC_CLOCKEVENTS_COUPLED_INLINE if X86_64 select GENERIC_CLOCKEVENTS_MIN_ADJUST select GENERIC_CMOS_UPDATE select GENERIC_CPU_AUTOPROBE diff --git a/arch/x86/include/asm/clock_inlined.h b/arch/x86/include/asm/clock_inlined.h index 29902c5bcc5c8..b2dee8db2fb9c 100644 --- a/arch/x86/include/asm/clock_inlined.h +++ b/arch/x86/include/asm/clock_inlined.h @@ -11,4 +11,12 @@ static __always_inline u64 arch_inlined_clocksource_read(struct clocksource *cs) return (u64)rdtsc_ordered(); } +struct clock_event_device; + +static __always_inline void +arch_inlined_clockevent_set_next_coupled(u64 cycles, struct clock_event_device *evt) +{ + native_wrmsrq(MSR_IA32_TSC_DEADLINE, cycles); +} + #endif diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index 5bb5b39376ca5..60cab20b79016 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c @@ -591,14 +591,14 @@ static void setup_APIC_timer(void) if (this_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER)) { levt->name = "lapic-deadline"; - levt->features &= ~(CLOCK_EVT_FEAT_PERIODIC | - CLOCK_EVT_FEAT_DUMMY); + levt->features &= ~(CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_DUMMY); + levt->features |= CLOCK_EVT_FEAT_CLOCKSOURCE_COUPLED; + levt->cs_id = CSID_X86_TSC; levt->set_next_event = lapic_next_deadline; - clockevents_config_and_register(levt, - tsc_khz * (1000 / TSC_DIVISOR), - 0xF, ~0UL); - } else + clockevents_config_and_register(levt, tsc_khz * (1000 / TSC_DIVISOR), 0xF, ~0UL); + } else { clockevents_register_device(levt); + } apic_update_vector(smp_processor_id(), LOCAL_TIMER_VECTOR, true); } diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index 74a26fb4417cc..f31046f98a923 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c @@ -1203,7 +1203,8 @@ static struct clocksource clocksource_tsc = { CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_CAN_INLINE_READ | CLOCK_SOURCE_MUST_VERIFY | - CLOCK_SOURCE_VERIFY_PERCPU, + CLOCK_SOURCE_VERIFY_PERCPU | + CLOCK_SOURCE_HAS_COUPLED_CLOCK_EVENT, .id = CSID_X86_TSC, .vdso_clock_mode = VDSO_CLOCKMODE_TSC, .enable = tsc_cs_enable,