Lines Matching refs:kvm
10 __must_check static inline bool kvm_tdp_mmu_get_root(struct kvm *kvm, in kvm_tdp_mmu_get_root() argument
19 void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root,
22 bool __kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, int as_id, gfn_t start,
24 static inline bool kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, int as_id, in kvm_tdp_mmu_zap_gfn_range() argument
27 return __kvm_tdp_mmu_zap_gfn_range(kvm, as_id, start, end, true, flush); in kvm_tdp_mmu_zap_gfn_range()
29 static inline bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp) in kvm_tdp_mmu_zap_sp() argument
42 lockdep_assert_held_write(&kvm->mmu_lock); in kvm_tdp_mmu_zap_sp()
43 return __kvm_tdp_mmu_zap_gfn_range(kvm, kvm_mmu_page_as_id(sp), in kvm_tdp_mmu_zap_sp()
47 void kvm_tdp_mmu_zap_all(struct kvm *kvm);
48 void kvm_tdp_mmu_invalidate_all_roots(struct kvm *kvm);
49 void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm);
53 bool kvm_tdp_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range,
55 bool kvm_tdp_mmu_age_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range);
56 bool kvm_tdp_mmu_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
57 bool kvm_tdp_mmu_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
59 bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm,
61 bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm,
63 void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm,
67 void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
70 bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
90 bool kvm_mmu_init_tdp_mmu(struct kvm *kvm);
91 void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm);
111 static inline bool kvm_mmu_init_tdp_mmu(struct kvm *kvm) { return false; } in kvm_mmu_init_tdp_mmu() argument
112 static inline void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm) {} in kvm_mmu_uninit_tdp_mmu() argument