bool (*rdtscp_supported)(void);
        void (*adjust_tsc_offset)(struct kvm_vcpu *vcpu, s64 adjustment);
 
+       void (*set_tdp_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);
+
        void (*set_supported_cpuid)(u32 func, struct kvm_cpuid_entry2 *entry);
 
        bool (*has_wbinvd_exit)(void);
 
        context->shadow_root_level = kvm_x86_ops->get_tdp_level();
        context->root_hpa = INVALID_PAGE;
        context->direct_map = true;
-       context->set_cr3 = kvm_x86_ops->set_cr3;
+       context->set_cr3 = kvm_x86_ops->set_tdp_cr3;
 
        if (!is_paging(vcpu)) {
                context->gva_to_gpa = nonpaging_gva_to_gpa;
 
        savesegment(gs, gs_selector);
        ldt_selector = kvm_read_ldt();
        svm->vmcb->save.cr2 = vcpu->arch.cr2;
-       /* required for live migration with NPT */
-       if (npt_enabled)
-               svm->vmcb->save.cr3 = vcpu->arch.cr3;
 
        clgi();
 
 {
        struct vcpu_svm *svm = to_svm(vcpu);
 
-       if (npt_enabled) {
-               svm->vmcb->control.nested_cr3 = root;
-               force_new_asid(vcpu);
-               return;
-       }
-
        svm->vmcb->save.cr3 = root;
        force_new_asid(vcpu);
 }
 
+static void set_tdp_cr3(struct kvm_vcpu *vcpu, unsigned long root)
+{
+       struct vcpu_svm *svm = to_svm(vcpu);
+
+       svm->vmcb->control.nested_cr3 = root;
+
+       /* Also sync guest cr3 here in case we live migrate */
+       svm->vmcb->save.cr3 = vcpu->arch.cr3;
+
+       force_new_asid(vcpu);
+}
+
 static int is_disabled(void)
 {
        u64 vm_cr;
 
        .write_tsc_offset = svm_write_tsc_offset,
        .adjust_tsc_offset = svm_adjust_tsc_offset,
+
+       .set_tdp_cr3 = set_tdp_cr3,
 };
 
 static int __init svm_init(void)
 
 
        .write_tsc_offset = vmx_write_tsc_offset,
        .adjust_tsc_offset = vmx_adjust_tsc_offset,
+
+       .set_tdp_cr3 = vmx_set_cr3,
 };
 
 static int __init vmx_init(void)