Lines Matching refs:kvm

54 	debug_sprintf_event(d_vcpu->kvm->arch.dbf, d_loglevel, \
82 return test_bit(vcpu->vcpu_idx, vcpu->kvm->arch.idle_mask); in is_vcpu_idle()
85 static inline int kvm_is_ucontrol(struct kvm *kvm) in kvm_is_ucontrol() argument
88 if (kvm->arch.gmap) in kvm_is_ucontrol()
182 static inline int test_kvm_facility(struct kvm *kvm, unsigned long nr) in test_kvm_facility() argument
184 return __test_facility(nr, kvm->arch.model.fac_mask) && in test_kvm_facility()
185 __test_facility(nr, kvm->arch.model.fac_list); in test_kvm_facility()
199 static inline int test_kvm_cpu_feat(struct kvm *kvm, unsigned long nr) in test_kvm_cpu_feat() argument
202 return test_bit_inv(nr, kvm->arch.cpu_feat); in test_kvm_cpu_feat()
206 static inline int kvm_s390_user_cpu_state_ctrl(struct kvm *kvm) in kvm_s390_user_cpu_state_ctrl() argument
208 return kvm->arch.user_cpu_state_ctrl != 0; in kvm_s390_user_cpu_state_ctrl()
211 static inline void kvm_s390_set_user_cpu_state_ctrl(struct kvm *kvm) in kvm_s390_set_user_cpu_state_ctrl() argument
213 if (kvm->arch.user_cpu_state_ctrl) in kvm_s390_set_user_cpu_state_ctrl()
216 VM_EVENT(kvm, 3, "%s", "ENABLE: Userspace CPU state control"); in kvm_s390_set_user_cpu_state_ctrl()
217 kvm->arch.user_cpu_state_ctrl = 1; in kvm_s390_set_user_cpu_state_ctrl()
223 int kvm_s390_pv_deinit_vm(struct kvm *kvm, u16 *rc, u16 *rrc);
224 int kvm_s390_pv_init_vm(struct kvm *kvm, u16 *rc, u16 *rrc);
225 int kvm_s390_pv_set_sec_parms(struct kvm *kvm, void *hdr, u64 length, u16 *rc,
227 int kvm_s390_pv_unpack(struct kvm *kvm, unsigned long addr, unsigned long size,
231 static inline u64 kvm_s390_pv_get_handle(struct kvm *kvm) in kvm_s390_pv_get_handle() argument
233 return kvm->arch.pv.handle; in kvm_s390_pv_get_handle()
241 static inline bool kvm_s390_pv_is_protected(struct kvm *kvm) in kvm_s390_pv_is_protected() argument
243 lockdep_assert_held(&kvm->lock); in kvm_s390_pv_is_protected()
244 return !!kvm_s390_pv_get_handle(kvm); in kvm_s390_pv_is_protected()
259 void kvm_s390_clear_float_irqs(struct kvm *kvm);
260 int __must_check kvm_s390_inject_vm(struct kvm *kvm,
283 struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm,
285 int kvm_s390_reinject_io_int(struct kvm *kvm,
287 int kvm_s390_mask_adapter(struct kvm *kvm, unsigned int id, bool masked);
330 void kvm_s390_vsie_init(struct kvm *kvm);
331 void kvm_s390_vsie_destroy(struct kvm *kvm);
338 void kvm_s390_set_tod_clock(struct kvm *kvm,
358 static inline void kvm_s390_vcpu_block_all(struct kvm *kvm) in kvm_s390_vcpu_block_all() argument
363 WARN_ON(!mutex_is_locked(&kvm->lock)); in kvm_s390_vcpu_block_all()
364 kvm_for_each_vcpu(i, vcpu, kvm) in kvm_s390_vcpu_block_all()
368 static inline void kvm_s390_vcpu_unblock_all(struct kvm *kvm) in kvm_s390_vcpu_unblock_all() argument
373 kvm_for_each_vcpu(i, vcpu, kvm) in kvm_s390_vcpu_unblock_all()
377 static inline u64 kvm_s390_get_tod_clock_fast(struct kvm *kvm) in kvm_s390_get_tod_clock_fast() argument
382 rc = get_tod_clock_fast() + kvm->arch.epoch; in kvm_s390_get_tod_clock_fast()
426 void kvm_s390_destroy_adapters(struct kvm *kvm);
435 void kvm_s390_gisa_init(struct kvm *kvm);
436 void kvm_s390_gisa_clear(struct kvm *kvm);
437 void kvm_s390_gisa_destroy(struct kvm *kvm);
453 static inline union ipte_control *kvm_s390_get_ipte_control(struct kvm *kvm) in kvm_s390_get_ipte_control() argument
455 struct bsca_block *sca = kvm->arch.sca; /* SCA version doesn't matter */ in kvm_s390_get_ipte_control()
482 void kvm_s390_vcpu_crypto_reset_all(struct kvm *kvm);