0001
0002
0003 #ifndef __KVM_X86_MMU_TDP_MMU_H
0004 #define __KVM_X86_MMU_TDP_MMU_H
0005
0006 #include <linux/kvm_host.h>
0007
0008 hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu);
0009
0010 __must_check static inline bool kvm_tdp_mmu_get_root(struct kvm_mmu_page *root)
0011 {
0012 return refcount_inc_not_zero(&root->tdp_mmu_root_count);
0013 }
0014
0015 void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root,
0016 bool shared);
0017
0018 bool kvm_tdp_mmu_zap_leafs(struct kvm *kvm, int as_id, gfn_t start,
0019 gfn_t end, bool can_yield, bool flush);
0020 bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp);
0021 void kvm_tdp_mmu_zap_all(struct kvm *kvm);
0022 void kvm_tdp_mmu_invalidate_all_roots(struct kvm *kvm);
0023 void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm);
0024
0025 int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault);
0026
0027 bool kvm_tdp_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range,
0028 bool flush);
0029 bool kvm_tdp_mmu_age_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range);
0030 bool kvm_tdp_mmu_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
0031 bool kvm_tdp_mmu_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
0032
0033 bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm,
0034 const struct kvm_memory_slot *slot, int min_level);
0035 bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm,
0036 const struct kvm_memory_slot *slot);
0037 void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm,
0038 struct kvm_memory_slot *slot,
0039 gfn_t gfn, unsigned long mask,
0040 bool wrprot);
0041 void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
0042 const struct kvm_memory_slot *slot);
0043
0044 bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
0045 struct kvm_memory_slot *slot, gfn_t gfn,
0046 int min_level);
0047
0048 void kvm_tdp_mmu_try_split_huge_pages(struct kvm *kvm,
0049 const struct kvm_memory_slot *slot,
0050 gfn_t start, gfn_t end,
0051 int target_level, bool shared);
0052
0053 static inline void kvm_tdp_mmu_walk_lockless_begin(void)
0054 {
0055 rcu_read_lock();
0056 }
0057
0058 static inline void kvm_tdp_mmu_walk_lockless_end(void)
0059 {
0060 rcu_read_unlock();
0061 }
0062
0063 int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
0064 int *root_level);
0065 u64 *kvm_tdp_mmu_fast_pf_get_last_sptep(struct kvm_vcpu *vcpu, u64 addr,
0066 u64 *spte);
0067
0068 #ifdef CONFIG_X86_64
0069 int kvm_mmu_init_tdp_mmu(struct kvm *kvm);
0070 void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm);
0071 static inline bool is_tdp_mmu_page(struct kvm_mmu_page *sp) { return sp->tdp_mmu_page; }
0072
0073 static inline bool is_tdp_mmu(struct kvm_mmu *mmu)
0074 {
0075 struct kvm_mmu_page *sp;
0076 hpa_t hpa = mmu->root.hpa;
0077
0078 if (WARN_ON(!VALID_PAGE(hpa)))
0079 return false;
0080
0081
0082
0083
0084
0085
0086 sp = to_shadow_page(hpa);
0087 return sp && is_tdp_mmu_page(sp) && sp->root_count;
0088 }
0089 #else
0090 static inline int kvm_mmu_init_tdp_mmu(struct kvm *kvm) { return 0; }
0091 static inline void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm) {}
0092 static inline bool is_tdp_mmu_page(struct kvm_mmu_page *sp) { return false; }
0093 static inline bool is_tdp_mmu(struct kvm_mmu *mmu) { return false; }
0094 #endif
0095
0096 #endif