]>
git.ipfire.org Git - thirdparty/kernel/stable.git/blob - arch/x86/kvm/mmu/tdp_mmu.h
733a3aef3a96eaa32964e8fc042c26fae0a0ee9c
1 // SPDX-License-Identifier: GPL-2.0
3 #ifndef __KVM_X86_MMU_TDP_MMU_H
4 #define __KVM_X86_MMU_TDP_MMU_H
6 #include <linux/kvm_host.h>
10 void kvm_mmu_init_tdp_mmu(struct kvm
*kvm
);
11 void kvm_mmu_uninit_tdp_mmu(struct kvm
*kvm
);
13 hpa_t
kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu
*vcpu
);
15 __must_check
static inline bool kvm_tdp_mmu_get_root(struct kvm_mmu_page
*root
)
17 return refcount_inc_not_zero(&root
->tdp_mmu_root_count
);
20 void kvm_tdp_mmu_put_root(struct kvm
*kvm
, struct kvm_mmu_page
*root
,
23 bool kvm_tdp_mmu_zap_leafs(struct kvm
*kvm
, gfn_t start
, gfn_t end
, bool flush
);
24 bool kvm_tdp_mmu_zap_sp(struct kvm
*kvm
, struct kvm_mmu_page
*sp
);
25 void kvm_tdp_mmu_zap_all(struct kvm
*kvm
);
26 void kvm_tdp_mmu_invalidate_all_roots(struct kvm
*kvm
);
27 void kvm_tdp_mmu_zap_invalidated_roots(struct kvm
*kvm
);
29 int kvm_tdp_mmu_map(struct kvm_vcpu
*vcpu
, struct kvm_page_fault
*fault
);
31 bool kvm_tdp_mmu_unmap_gfn_range(struct kvm
*kvm
, struct kvm_gfn_range
*range
,
33 bool kvm_tdp_mmu_age_gfn_range(struct kvm
*kvm
, struct kvm_gfn_range
*range
);
34 bool kvm_tdp_mmu_test_age_gfn(struct kvm
*kvm
, struct kvm_gfn_range
*range
);
35 bool kvm_tdp_mmu_set_spte_gfn(struct kvm
*kvm
, struct kvm_gfn_range
*range
);
37 bool kvm_tdp_mmu_wrprot_slot(struct kvm
*kvm
,
38 const struct kvm_memory_slot
*slot
, int min_level
);
39 bool kvm_tdp_mmu_clear_dirty_slot(struct kvm
*kvm
,
40 const struct kvm_memory_slot
*slot
);
41 void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm
*kvm
,
42 struct kvm_memory_slot
*slot
,
43 gfn_t gfn
, unsigned long mask
,
45 void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm
*kvm
,
46 const struct kvm_memory_slot
*slot
);
48 bool kvm_tdp_mmu_write_protect_gfn(struct kvm
*kvm
,
49 struct kvm_memory_slot
*slot
, gfn_t gfn
,
52 void kvm_tdp_mmu_try_split_huge_pages(struct kvm
*kvm
,
53 const struct kvm_memory_slot
*slot
,
54 gfn_t start
, gfn_t end
,
55 int target_level
, bool shared
);
57 static inline void kvm_tdp_mmu_walk_lockless_begin(void)
62 static inline void kvm_tdp_mmu_walk_lockless_end(void)
67 int kvm_tdp_mmu_get_walk(struct kvm_vcpu
*vcpu
, u64 addr
, u64
*sptes
,
69 u64
*kvm_tdp_mmu_fast_pf_get_last_sptep(struct kvm_vcpu
*vcpu
, u64 addr
,
73 static inline bool is_tdp_mmu_page(struct kvm_mmu_page
*sp
) { return sp
->tdp_mmu_page
; }
75 static inline bool is_tdp_mmu_page(struct kvm_mmu_page
*sp
) { return false; }
78 #endif /* __KVM_X86_MMU_TDP_MMU_H */