need to do a quick release.
revert-x86-hyperv-fix-hv-tsc-page-based-sched_clock-for-hibernation.patch
revert-x86-crash-wrap-crash-dumping-code-into-crash-related-ifdefs.patch
x86-hyperv-fix-hv-tsc-page-based-sched_clock-for-hibernation.patch
-memblock-make-memblock_set_node-also-warn-about-use-.patch
-memblock-use-numa_valid_node-helper-to-check-for-invalid-node-id.patch
-jbd2-increase-io-priority-for-writing-revoke-records.patch
-jbd2-flush-filesystem-device-before-updating-tail-se.patch
-dm-array-fix-releasing-a-faulty-array-block-twice-in.patch
-dm-array-fix-unreleased-btree-blocks-on-closing-a-fa.patch
-dm-array-fix-cursor-index-when-skipping-across-block.patch
-exfat-fix-the-infinite-loop-in-exfat_readdir.patch
-exfat-fix-the-infinite-loop-in-__exfat_free_cluster.patch
-ovl-do-not-encode-lower-fh-with-upper-sb_writers-hel.patch
-ovl-pass-realinode-to-ovl_encode_real_fh-instead-of-.patch
-ovl-support-encoding-fid-from-inode-with-no-alias.patch
-erofs-handle-overlapped-pclusters-out-of-crafted-ima.patch
-erofs-fix-psi-memstall-accounting.patch
-asoc-rt722-add-delay-time-to-wait-for-the-calibratio.patch
-asoc-mediatek-disable-buffer-pre-allocation.patch
-selftests-alsa-fix-circular-dependency-involving-glo.patch
--- /dev/null
+From 129885ec461fd82348e557524b087496947532fe Mon Sep 17 00:00:00 2001
+From: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Date: Fri, 10 Jan 2025 13:09:11 +0100
+Subject: Revert "x86, crash: wrap crash dumping code into crash related ifdefs"
+
+From: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+This reverts commit e5b1574a8ca28c40cf53eda43f6c3b016ed41e27 which is
+commit a4eeb2176d89fdf2785851521577b94b31690a60 upstream.
+
+When this change is backported to the 6.6.y tree, it can cause build
+errors on some configurations when KEXEC is not enabled, so revert it
+for now.
+
+Reported-by: Ignat Korchagin <ignat@cloudflare.com>
+Link: https://lore.kernel.org/r/3DB3A6D3-0D3A-4682-B4FA-407B2D3263B2@cloudflare.com
+Reported-by: Lars Wendler <wendler.lars@web.de>
+Link: https://lore.kernel.org/r/20250110103328.0e3906a8@chagall.paradoxon.rec
+Reported-by: Chris Clayton <chris2553@googlemail.com>
+Link: https://lore.kernel.org/r/10c7be00-b1f8-4389-801b-fb2d0b22468d@googlemail.com
+Cc: Al Viro <viro@zeniv.linux.org.uk>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Cc: Dexuan Cui <decui@microsoft.com>
+Cc: Eric W. Biederman <ebiederm@xmission.com>
+Cc: Hari Bathini <hbathini@linux.ibm.com>
+Cc: Klara Modin <klarasmodin@gmail.com>
+Cc: Michael Kelley <mhklinux@outlook.com>
+Cc: Michael Kelley <mhklinux@outlook.com>
+Cc: Naman Jain <namjain@linux.microsoft.com>
+Cc: Nathan Chancellor <nathan@kernel.org>
+Cc: Pingfan Liu <piliu@redhat.com>
+Cc: Sasha Levin <sashal@kernel.org>
+Cc: Stephen Rothwell <sfr@canb.auug.org.au>
+Cc: Wei Liu <wei.liu@kernel.org>
+Cc: Yang Li <yang.lee@linux.alibaba.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/Makefile | 4 ++--
+ arch/x86/kernel/cpu/mshyperv.c | 10 ++--------
+ arch/x86/kernel/kexec-bzimage64.c | 4 ----
+ arch/x86/kernel/kvm.c | 4 ++--
+ arch/x86/kernel/machine_kexec_64.c | 3 ---
+ arch/x86/kernel/reboot.c | 4 ++--
+ arch/x86/kernel/setup.c | 2 +-
+ arch/x86/kernel/smp.c | 2 +-
+ arch/x86/xen/enlighten_hvm.c | 4 ----
+ arch/x86/xen/mmu_pv.c | 2 +-
+ 10 files changed, 11 insertions(+), 28 deletions(-)
+
+--- a/arch/x86/kernel/Makefile
++++ b/arch/x86/kernel/Makefile
+@@ -99,9 +99,9 @@ obj-$(CONFIG_TRACING) += trace.o
+ obj-$(CONFIG_RETHOOK) += rethook.o
+ obj-$(CONFIG_CRASH_CORE) += crash_core_$(BITS).o
+ obj-$(CONFIG_KEXEC_CORE) += machine_kexec_$(BITS).o
+-obj-$(CONFIG_KEXEC_CORE) += relocate_kernel_$(BITS).o
++obj-$(CONFIG_KEXEC_CORE) += relocate_kernel_$(BITS).o crash.o
+ obj-$(CONFIG_KEXEC_FILE) += kexec-bzimage64.o
+-obj-$(CONFIG_CRASH_DUMP) += crash_dump_$(BITS).o crash.o
++obj-$(CONFIG_CRASH_DUMP) += crash_dump_$(BITS).o
+ obj-y += kprobes/
+ obj-$(CONFIG_MODULES) += module.o
+ obj-$(CONFIG_X86_32) += doublefault_32.o
+--- a/arch/x86/kernel/cpu/mshyperv.c
++++ b/arch/x86/kernel/cpu/mshyperv.c
+@@ -209,9 +209,7 @@ static void hv_machine_shutdown(void)
+ if (kexec_in_progress)
+ hyperv_cleanup();
+ }
+-#endif /* CONFIG_KEXEC_CORE */
+
+-#ifdef CONFIG_CRASH_DUMP
+ static void hv_machine_crash_shutdown(struct pt_regs *regs)
+ {
+ if (hv_crash_handler)
+@@ -223,7 +221,7 @@ static void hv_machine_crash_shutdown(st
+ /* Disable the hypercall page when there is only 1 active CPU. */
+ hyperv_cleanup();
+ }
+-#endif /* CONFIG_CRASH_DUMP */
++#endif /* CONFIG_KEXEC_CORE */
+ #endif /* CONFIG_HYPERV */
+
+ static uint32_t __init ms_hyperv_platform(void)
+@@ -495,14 +493,10 @@ static void __init ms_hyperv_init_platfo
+ no_timer_check = 1;
+ #endif
+
+-#if IS_ENABLED(CONFIG_HYPERV)
+-#if defined(CONFIG_KEXEC_CORE)
++#if IS_ENABLED(CONFIG_HYPERV) && defined(CONFIG_KEXEC_CORE)
+ machine_ops.shutdown = hv_machine_shutdown;
+-#endif
+-#if defined(CONFIG_CRASH_DUMP)
+ machine_ops.crash_shutdown = hv_machine_crash_shutdown;
+ #endif
+-#endif
+ if (ms_hyperv.features & HV_ACCESS_TSC_INVARIANT) {
+ /*
+ * Writing to synthetic MSR 0x40000118 updates/changes the
+--- a/arch/x86/kernel/kexec-bzimage64.c
++++ b/arch/x86/kernel/kexec-bzimage64.c
+@@ -263,13 +263,11 @@ setup_boot_parameters(struct kimage *ima
+ memset(¶ms->hd0_info, 0, sizeof(params->hd0_info));
+ memset(¶ms->hd1_info, 0, sizeof(params->hd1_info));
+
+-#ifdef CONFIG_CRASH_DUMP
+ if (image->type == KEXEC_TYPE_CRASH) {
+ ret = crash_setup_memmap_entries(image, params);
+ if (ret)
+ return ret;
+ } else
+-#endif
+ setup_e820_entries(params);
+
+ nr_e820_entries = params->e820_entries;
+@@ -430,14 +428,12 @@ static void *bzImage64_load(struct kimag
+ return ERR_PTR(-EINVAL);
+ }
+
+-#ifdef CONFIG_CRASH_DUMP
+ /* Allocate and load backup region */
+ if (image->type == KEXEC_TYPE_CRASH) {
+ ret = crash_load_segments(image);
+ if (ret)
+ return ERR_PTR(ret);
+ }
+-#endif
+
+ /*
+ * Load purgatory. For 64bit entry point, purgatory code can be
+--- a/arch/x86/kernel/kvm.c
++++ b/arch/x86/kernel/kvm.c
+@@ -769,7 +769,7 @@ static struct notifier_block kvm_pv_rebo
+ * won't be valid. In cases like kexec, in which you install a new kernel, this
+ * means a random memory location will be kept being written.
+ */
+-#ifdef CONFIG_CRASH_DUMP
++#ifdef CONFIG_KEXEC_CORE
+ static void kvm_crash_shutdown(struct pt_regs *regs)
+ {
+ kvm_guest_cpu_offline(true);
+@@ -852,7 +852,7 @@ static void __init kvm_guest_init(void)
+ kvm_guest_cpu_init();
+ #endif
+
+-#ifdef CONFIG_CRASH_DUMP
++#ifdef CONFIG_KEXEC_CORE
+ machine_ops.crash_shutdown = kvm_crash_shutdown;
+ #endif
+
+--- a/arch/x86/kernel/machine_kexec_64.c
++++ b/arch/x86/kernel/machine_kexec_64.c
+@@ -545,8 +545,6 @@ int arch_kimage_file_post_load_cleanup(s
+ }
+ #endif /* CONFIG_KEXEC_FILE */
+
+-#ifdef CONFIG_CRASH_DUMP
+-
+ static int
+ kexec_mark_range(unsigned long start, unsigned long end, bool protect)
+ {
+@@ -591,7 +589,6 @@ void arch_kexec_unprotect_crashkres(void
+ {
+ kexec_mark_crashkres(false);
+ }
+-#endif
+
+ /*
+ * During a traditional boot under SME, SME will encrypt the kernel,
+--- a/arch/x86/kernel/reboot.c
++++ b/arch/x86/kernel/reboot.c
+@@ -796,7 +796,7 @@ struct machine_ops machine_ops __ro_afte
+ .emergency_restart = native_machine_emergency_restart,
+ .restart = native_machine_restart,
+ .halt = native_machine_halt,
+-#ifdef CONFIG_CRASH_DUMP
++#ifdef CONFIG_KEXEC_CORE
+ .crash_shutdown = native_machine_crash_shutdown,
+ #endif
+ };
+@@ -826,7 +826,7 @@ void machine_halt(void)
+ machine_ops.halt();
+ }
+
+-#ifdef CONFIG_CRASH_DUMP
++#ifdef CONFIG_KEXEC_CORE
+ void machine_crash_shutdown(struct pt_regs *regs)
+ {
+ machine_ops.crash_shutdown(regs);
+--- a/arch/x86/kernel/setup.c
++++ b/arch/x86/kernel/setup.c
+@@ -547,7 +547,7 @@ static void __init reserve_crashkernel(v
+ bool high = false;
+ int ret;
+
+- if (!IS_ENABLED(CONFIG_CRASH_RESERVE))
++ if (!IS_ENABLED(CONFIG_KEXEC_CORE))
+ return;
+
+ total_mem = memblock_phys_mem_size();
+--- a/arch/x86/kernel/smp.c
++++ b/arch/x86/kernel/smp.c
+@@ -282,7 +282,7 @@ struct smp_ops smp_ops = {
+ .smp_cpus_done = native_smp_cpus_done,
+
+ .stop_other_cpus = native_stop_other_cpus,
+-#if defined(CONFIG_CRASH_DUMP)
++#if defined(CONFIG_KEXEC_CORE)
+ .crash_stop_other_cpus = kdump_nmi_shootdown_cpus,
+ #endif
+ .smp_send_reschedule = native_smp_send_reschedule,
+--- a/arch/x86/xen/enlighten_hvm.c
++++ b/arch/x86/xen/enlighten_hvm.c
+@@ -141,9 +141,7 @@ static void xen_hvm_shutdown(void)
+ if (kexec_in_progress)
+ xen_reboot(SHUTDOWN_soft_reset);
+ }
+-#endif
+
+-#ifdef CONFIG_CRASH_DUMP
+ static void xen_hvm_crash_shutdown(struct pt_regs *regs)
+ {
+ native_machine_crash_shutdown(regs);
+@@ -231,8 +229,6 @@ static void __init xen_hvm_guest_init(vo
+
+ #ifdef CONFIG_KEXEC_CORE
+ machine_ops.shutdown = xen_hvm_shutdown;
+-#endif
+-#ifdef CONFIG_CRASH_DUMP
+ machine_ops.crash_shutdown = xen_hvm_crash_shutdown;
+ #endif
+ }
+--- a/arch/x86/xen/mmu_pv.c
++++ b/arch/x86/xen/mmu_pv.c
+@@ -2517,7 +2517,7 @@ out:
+ }
+ EXPORT_SYMBOL_GPL(xen_remap_pfn);
+
+-#ifdef CONFIG_VMCORE_INFO
++#ifdef CONFIG_KEXEC_CORE
+ phys_addr_t paddr_vmcoreinfo_note(void)
+ {
+ if (xen_pv_domain())
--- /dev/null
+From be4f07d3d03f35e86493195f4a7b34187e995b70 Mon Sep 17 00:00:00 2001
+From: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Date: Fri, 10 Jan 2025 13:06:34 +0100
+Subject: Revert "x86/hyperv: Fix hv tsc page based sched_clock for hibernation"
+
+From: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+This reverts commit 6681113633dc738ec95fe33104843a1e25acef3b which is
+commit bcc80dec91ee745b3d66f3e48f0ec2efdea97149 upstream.
+
+The dependant patch before this one caused build errors in the 6.6.y
+tree, so revert this for now so that we can fix them up properly.
+
+Reported-by: Ignat Korchagin <ignat@cloudflare.com>
+Link: https://lore.kernel.org/r/3DB3A6D3-0D3A-4682-B4FA-407B2D3263B2@cloudflare.com
+Reported-by: Lars Wendler <wendler.lars@web.de>
+Link: https://lore.kernel.org/r/20250110103328.0e3906a8@chagall.paradoxon.rec
+Reported-by: Chris Clayton <chris2553@googlemail.com>
+Link: https://lore.kernel.org/r/10c7be00-b1f8-4389-801b-fb2d0b22468d@googlemail.com
+Cc: Dexuan Cui <decui@microsoft.com>
+Cc: Naman Jain <namjain@linux.microsoft.com>
+Cc: Michael Kelley <mhklinux@outlook.com>
+Cc: Wei Liu <wei.liu@kernel.org>
+Cc: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/cpu/mshyperv.c | 58 -------------------------------------
+ drivers/clocksource/hyperv_timer.c | 14 --------
+ include/clocksource/hyperv_timer.h | 2 -
+ 3 files changed, 1 insertion(+), 73 deletions(-)
+
+--- a/arch/x86/kernel/cpu/mshyperv.c
++++ b/arch/x86/kernel/cpu/mshyperv.c
+@@ -224,63 +224,6 @@ static void hv_machine_crash_shutdown(st
+ hyperv_cleanup();
+ }
+ #endif /* CONFIG_CRASH_DUMP */
+-
+-static u64 hv_ref_counter_at_suspend;
+-static void (*old_save_sched_clock_state)(void);
+-static void (*old_restore_sched_clock_state)(void);
+-
+-/*
+- * Hyper-V clock counter resets during hibernation. Save and restore clock
+- * offset during suspend/resume, while also considering the time passed
+- * before suspend. This is to make sure that sched_clock using hv tsc page
+- * based clocksource, proceeds from where it left off during suspend and
+- * it shows correct time for the timestamps of kernel messages after resume.
+- */
+-static void save_hv_clock_tsc_state(void)
+-{
+- hv_ref_counter_at_suspend = hv_read_reference_counter();
+-}
+-
+-static void restore_hv_clock_tsc_state(void)
+-{
+- /*
+- * Adjust the offsets used by hv tsc clocksource to
+- * account for the time spent before hibernation.
+- * adjusted value = reference counter (time) at suspend
+- * - reference counter (time) now.
+- */
+- hv_adj_sched_clock_offset(hv_ref_counter_at_suspend - hv_read_reference_counter());
+-}
+-
+-/*
+- * Functions to override save_sched_clock_state and restore_sched_clock_state
+- * functions of x86_platform. The Hyper-V clock counter is reset during
+- * suspend-resume and the offset used to measure time needs to be
+- * corrected, post resume.
+- */
+-static void hv_save_sched_clock_state(void)
+-{
+- old_save_sched_clock_state();
+- save_hv_clock_tsc_state();
+-}
+-
+-static void hv_restore_sched_clock_state(void)
+-{
+- restore_hv_clock_tsc_state();
+- old_restore_sched_clock_state();
+-}
+-
+-static void __init x86_setup_ops_for_tsc_pg_clock(void)
+-{
+- if (!(ms_hyperv.features & HV_MSR_REFERENCE_TSC_AVAILABLE))
+- return;
+-
+- old_save_sched_clock_state = x86_platform.save_sched_clock_state;
+- x86_platform.save_sched_clock_state = hv_save_sched_clock_state;
+-
+- old_restore_sched_clock_state = x86_platform.restore_sched_clock_state;
+- x86_platform.restore_sched_clock_state = hv_restore_sched_clock_state;
+-}
+ #endif /* CONFIG_HYPERV */
+
+ static uint32_t __init ms_hyperv_platform(void)
+@@ -635,7 +578,6 @@ static void __init ms_hyperv_init_platfo
+
+ /* Register Hyper-V specific clocksource */
+ hv_init_clocksource();
+- x86_setup_ops_for_tsc_pg_clock();
+ hv_vtl_init_platform();
+ #endif
+ /*
+--- a/drivers/clocksource/hyperv_timer.c
++++ b/drivers/clocksource/hyperv_timer.c
+@@ -27,8 +27,7 @@
+ #include <asm/mshyperv.h>
+
+ static struct clock_event_device __percpu *hv_clock_event;
+-/* Note: offset can hold negative values after hibernation. */
+-static u64 hv_sched_clock_offset __read_mostly;
++static u64 hv_sched_clock_offset __ro_after_init;
+
+ /*
+ * If false, we're using the old mechanism for stimer0 interrupts
+@@ -457,17 +456,6 @@ static void resume_hv_clock_tsc(struct c
+ hv_set_register(HV_REGISTER_REFERENCE_TSC, tsc_msr.as_uint64);
+ }
+
+-/*
+- * Called during resume from hibernation, from overridden
+- * x86_platform.restore_sched_clock_state routine. This is to adjust offsets
+- * used to calculate time for hv tsc page based sched_clock, to account for
+- * time spent before hibernation.
+- */
+-void hv_adj_sched_clock_offset(u64 offset)
+-{
+- hv_sched_clock_offset -= offset;
+-}
+-
+ #ifdef HAVE_VDSO_CLOCKMODE_HVCLOCK
+ static int hv_cs_enable(struct clocksource *cs)
+ {
+--- a/include/clocksource/hyperv_timer.h
++++ b/include/clocksource/hyperv_timer.h
+@@ -38,8 +38,6 @@ extern void hv_remap_tsc_clocksource(voi
+ extern unsigned long hv_get_tsc_pfn(void);
+ extern struct ms_hyperv_tsc_page *hv_get_tsc_page(void);
+
+-extern void hv_adj_sched_clock_offset(u64 offset);
+-
+ static __always_inline bool
+ hv_read_tsc_page_tsc(const struct ms_hyperv_tsc_page *tsc_pg,
+ u64 *cur_tsc, u64 *time)
--- /dev/null
+revert-x86-hyperv-fix-hv-tsc-page-based-sched_clock-for-hibernation.patch
+revert-x86-crash-wrap-crash-dumping-code-into-crash-related-ifdefs.patch
+x86-hyperv-fix-hv-tsc-page-based-sched_clock-for-hibernation.patch
+memblock-make-memblock_set_node-also-warn-about-use-.patch
+memblock-use-numa_valid_node-helper-to-check-for-invalid-node-id.patch
+jbd2-increase-io-priority-for-writing-revoke-records.patch
+jbd2-flush-filesystem-device-before-updating-tail-se.patch
+dm-array-fix-releasing-a-faulty-array-block-twice-in.patch
+dm-array-fix-unreleased-btree-blocks-on-closing-a-fa.patch
+dm-array-fix-cursor-index-when-skipping-across-block.patch
+exfat-fix-the-infinite-loop-in-exfat_readdir.patch
+exfat-fix-the-infinite-loop-in-__exfat_free_cluster.patch
+ovl-do-not-encode-lower-fh-with-upper-sb_writers-hel.patch
+ovl-pass-realinode-to-ovl_encode_real_fh-instead-of-.patch
+ovl-support-encoding-fid-from-inode-with-no-alias.patch
+erofs-handle-overlapped-pclusters-out-of-crafted-ima.patch
+erofs-fix-psi-memstall-accounting.patch
+asoc-rt722-add-delay-time-to-wait-for-the-calibratio.patch
+asoc-mediatek-disable-buffer-pre-allocation.patch
+selftests-alsa-fix-circular-dependency-involving-glo.patch
--- /dev/null
+From bcc80dec91ee745b3d66f3e48f0ec2efdea97149 Mon Sep 17 00:00:00 2001
+From: Naman Jain <namjain@linux.microsoft.com>
+Date: Tue, 17 Sep 2024 11:09:17 +0530
+Subject: x86/hyperv: Fix hv tsc page based sched_clock for hibernation
+
+From: Naman Jain <namjain@linux.microsoft.com>
+
+commit bcc80dec91ee745b3d66f3e48f0ec2efdea97149 upstream.
+
+read_hv_sched_clock_tsc() assumes that the Hyper-V clock counter is
+bigger than the variable hv_sched_clock_offset, which is cached during
+early boot, but depending on the timing this assumption may be false
+when a hibernated VM starts again (the clock counter starts from 0
+again) and is resuming back (Note: hv_init_tsc_clocksource() is not
+called during hibernation/resume); consequently,
+read_hv_sched_clock_tsc() may return a negative integer (which is
+interpreted as a huge positive integer since the return type is u64)
+and new kernel messages are prefixed with huge timestamps before
+read_hv_sched_clock_tsc() grows big enough (which typically takes
+several seconds).
+
+Fix the issue by saving the Hyper-V clock counter just before the
+suspend, and using it to correct the hv_sched_clock_offset in
+resume. This makes hv tsc page based sched_clock continuous and ensures
+that post resume, it starts from where it left off during suspend.
+Override x86_platform.save_sched_clock_state and
+x86_platform.restore_sched_clock_state routines to correct this as soon
+as possible.
+
+Note: if Invariant TSC is available, the issue doesn't happen because
+1) we don't register read_hv_sched_clock_tsc() for sched clock:
+See commit e5313f1c5404 ("clocksource/drivers/hyper-v: Rework
+clocksource and sched clock setup");
+2) the common x86 code adjusts TSC similarly: see
+__restore_processor_state() -> tsc_verify_tsc_adjust(true) and
+x86_platform.restore_sched_clock_state().
+
+Cc: stable@vger.kernel.org
+Fixes: 1349401ff1aa ("clocksource/drivers/hyper-v: Suspend/resume Hyper-V clocksource for hibernation")
+Co-developed-by: Dexuan Cui <decui@microsoft.com>
+Signed-off-by: Dexuan Cui <decui@microsoft.com>
+Signed-off-by: Naman Jain <namjain@linux.microsoft.com>
+Reviewed-by: Michael Kelley <mhklinux@outlook.com>
+Link: https://lore.kernel.org/r/20240917053917.76787-1-namjain@linux.microsoft.com
+Signed-off-by: Wei Liu <wei.liu@kernel.org>
+Message-ID: <20240917053917.76787-1-namjain@linux.microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/cpu/mshyperv.c | 58 +++++++++++++++++++++++++++++++++++++
+ drivers/clocksource/hyperv_timer.c | 14 ++++++++
+ include/clocksource/hyperv_timer.h | 2 +
+ 3 files changed, 73 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/kernel/cpu/mshyperv.c
++++ b/arch/x86/kernel/cpu/mshyperv.c
+@@ -222,6 +222,63 @@ static void hv_machine_crash_shutdown(st
+ hyperv_cleanup();
+ }
+ #endif /* CONFIG_KEXEC_CORE */
++
++static u64 hv_ref_counter_at_suspend;
++static void (*old_save_sched_clock_state)(void);
++static void (*old_restore_sched_clock_state)(void);
++
++/*
++ * Hyper-V clock counter resets during hibernation. Save and restore clock
++ * offset during suspend/resume, while also considering the time passed
++ * before suspend. This is to make sure that sched_clock using hv tsc page
++ * based clocksource, proceeds from where it left off during suspend and
++ * it shows correct time for the timestamps of kernel messages after resume.
++ */
++static void save_hv_clock_tsc_state(void)
++{
++ hv_ref_counter_at_suspend = hv_read_reference_counter();
++}
++
++static void restore_hv_clock_tsc_state(void)
++{
++ /*
++ * Adjust the offsets used by hv tsc clocksource to
++ * account for the time spent before hibernation.
++ * adjusted value = reference counter (time) at suspend
++ * - reference counter (time) now.
++ */
++ hv_adj_sched_clock_offset(hv_ref_counter_at_suspend - hv_read_reference_counter());
++}
++
++/*
++ * Functions to override save_sched_clock_state and restore_sched_clock_state
++ * functions of x86_platform. The Hyper-V clock counter is reset during
++ * suspend-resume and the offset used to measure time needs to be
++ * corrected, post resume.
++ */
++static void hv_save_sched_clock_state(void)
++{
++ old_save_sched_clock_state();
++ save_hv_clock_tsc_state();
++}
++
++static void hv_restore_sched_clock_state(void)
++{
++ restore_hv_clock_tsc_state();
++ old_restore_sched_clock_state();
++}
++
++static void __init x86_setup_ops_for_tsc_pg_clock(void)
++{
++ if (!(ms_hyperv.features & HV_MSR_REFERENCE_TSC_AVAILABLE))
++ return;
++
++ old_save_sched_clock_state = x86_platform.save_sched_clock_state;
++ x86_platform.save_sched_clock_state = hv_save_sched_clock_state;
++
++ old_restore_sched_clock_state = x86_platform.restore_sched_clock_state;
++ x86_platform.restore_sched_clock_state = hv_restore_sched_clock_state;
++}
+ #endif /* CONFIG_HYPERV */
+
+ static uint32_t __init ms_hyperv_platform(void)
+@@ -572,6 +629,7 @@ static void __init ms_hyperv_init_platfo
+
+ /* Register Hyper-V specific clocksource */
+ hv_init_clocksource();
++ x86_setup_ops_for_tsc_pg_clock();
+ hv_vtl_init_platform();
+ #endif
+ /*
+--- a/drivers/clocksource/hyperv_timer.c
++++ b/drivers/clocksource/hyperv_timer.c
+@@ -27,7 +27,8 @@
+ #include <asm/mshyperv.h>
+
+ static struct clock_event_device __percpu *hv_clock_event;
+-static u64 hv_sched_clock_offset __ro_after_init;
++/* Note: offset can hold negative values after hibernation. */
++static u64 hv_sched_clock_offset __read_mostly;
+
+ /*
+ * If false, we're using the old mechanism for stimer0 interrupts
+@@ -456,6 +457,17 @@ static void resume_hv_clock_tsc(struct c
+ hv_set_register(HV_REGISTER_REFERENCE_TSC, tsc_msr.as_uint64);
+ }
+
++/*
++ * Called during resume from hibernation, from overridden
++ * x86_platform.restore_sched_clock_state routine. This is to adjust offsets
++ * used to calculate time for hv tsc page based sched_clock, to account for
++ * time spent before hibernation.
++ */
++void hv_adj_sched_clock_offset(u64 offset)
++{
++ hv_sched_clock_offset -= offset;
++}
++
+ #ifdef HAVE_VDSO_CLOCKMODE_HVCLOCK
+ static int hv_cs_enable(struct clocksource *cs)
+ {
+--- a/include/clocksource/hyperv_timer.h
++++ b/include/clocksource/hyperv_timer.h
+@@ -38,6 +38,8 @@ extern void hv_remap_tsc_clocksource(voi
+ extern unsigned long hv_get_tsc_pfn(void);
+ extern struct ms_hyperv_tsc_page *hv_get_tsc_page(void);
+
++extern void hv_adj_sched_clock_offset(u64 offset);
++
+ static __always_inline bool
+ hv_read_tsc_page_tsc(const struct ms_hyperv_tsc_page *tsc_pg,
+ u64 *cur_tsc, u64 *time)