Lines Matching refs:vcpu
106 void kvm_service_local_tlb_flush_requests(struct kvm_vcpu *vcpu);
107 int kvm_check_nested_events(struct kvm_vcpu *vcpu);
109 static inline void kvm_clear_exception_queue(struct kvm_vcpu *vcpu) in kvm_clear_exception_queue() argument
111 vcpu->arch.exception.pending = false; in kvm_clear_exception_queue()
112 vcpu->arch.exception.injected = false; in kvm_clear_exception_queue()
115 static inline void kvm_queue_interrupt(struct kvm_vcpu *vcpu, u8 vector, in kvm_queue_interrupt() argument
118 vcpu->arch.interrupt.injected = true; in kvm_queue_interrupt()
119 vcpu->arch.interrupt.soft = soft; in kvm_queue_interrupt()
120 vcpu->arch.interrupt.nr = vector; in kvm_queue_interrupt()
123 static inline void kvm_clear_interrupt_queue(struct kvm_vcpu *vcpu) in kvm_clear_interrupt_queue() argument
125 vcpu->arch.interrupt.injected = false; in kvm_clear_interrupt_queue()
128 static inline bool kvm_event_needs_reinjection(struct kvm_vcpu *vcpu) in kvm_event_needs_reinjection() argument
130 return vcpu->arch.exception.injected || vcpu->arch.interrupt.injected || in kvm_event_needs_reinjection()
131 vcpu->arch.nmi_injected; in kvm_event_needs_reinjection()
139 static inline bool is_protmode(struct kvm_vcpu *vcpu) in is_protmode() argument
141 return kvm_read_cr0_bits(vcpu, X86_CR0_PE); in is_protmode()
144 static inline int is_long_mode(struct kvm_vcpu *vcpu) in is_long_mode() argument
147 return vcpu->arch.efer & EFER_LMA; in is_long_mode()
153 static inline bool is_64_bit_mode(struct kvm_vcpu *vcpu) in is_64_bit_mode() argument
157 WARN_ON_ONCE(vcpu->arch.guest_state_protected); in is_64_bit_mode()
159 if (!is_long_mode(vcpu)) in is_64_bit_mode()
161 static_call(kvm_x86_get_cs_db_l_bits)(vcpu, &cs_db, &cs_l); in is_64_bit_mode()
165 static inline bool is_64_bit_hypercall(struct kvm_vcpu *vcpu) in is_64_bit_hypercall() argument
172 return vcpu->arch.guest_state_protected || is_64_bit_mode(vcpu); in is_64_bit_hypercall()
184 static inline bool mmu_is_nested(struct kvm_vcpu *vcpu) in mmu_is_nested() argument
186 return vcpu->arch.walk_mmu == &vcpu->arch.nested_mmu; in mmu_is_nested()
189 static inline int is_pae(struct kvm_vcpu *vcpu) in is_pae() argument
191 return kvm_read_cr4_bits(vcpu, X86_CR4_PAE); in is_pae()
194 static inline int is_pse(struct kvm_vcpu *vcpu) in is_pse() argument
196 return kvm_read_cr4_bits(vcpu, X86_CR4_PSE); in is_pse()
199 static inline int is_paging(struct kvm_vcpu *vcpu) in is_paging() argument
201 return likely(kvm_read_cr0_bits(vcpu, X86_CR0_PG)); in is_paging()
204 static inline bool is_pae_paging(struct kvm_vcpu *vcpu) in is_pae_paging() argument
206 return !is_long_mode(vcpu) && is_pae(vcpu) && is_paging(vcpu); in is_pae_paging()
209 static inline u8 vcpu_virt_addr_bits(struct kvm_vcpu *vcpu) in vcpu_virt_addr_bits() argument
211 return kvm_read_cr4_bits(vcpu, X86_CR4_LA57) ? 57 : 48; in vcpu_virt_addr_bits()
219 static inline bool is_noncanonical_address(u64 la, struct kvm_vcpu *vcpu) in is_noncanonical_address() argument
221 return get_canonical(la, vcpu_virt_addr_bits(vcpu)) != la; in is_noncanonical_address()
224 static inline void vcpu_cache_mmio_info(struct kvm_vcpu *vcpu, in vcpu_cache_mmio_info() argument
227 u64 gen = kvm_memslots(vcpu->kvm)->generation; in vcpu_cache_mmio_info()
236 vcpu->arch.mmio_gva = mmu_is_nested(vcpu) ? 0 : gva & PAGE_MASK; in vcpu_cache_mmio_info()
237 vcpu->arch.mmio_access = access; in vcpu_cache_mmio_info()
238 vcpu->arch.mmio_gfn = gfn; in vcpu_cache_mmio_info()
239 vcpu->arch.mmio_gen = gen; in vcpu_cache_mmio_info()
242 static inline bool vcpu_match_mmio_gen(struct kvm_vcpu *vcpu) in vcpu_match_mmio_gen() argument
244 return vcpu->arch.mmio_gen == kvm_memslots(vcpu->kvm)->generation; in vcpu_match_mmio_gen()
253 static inline void vcpu_clear_mmio_info(struct kvm_vcpu *vcpu, gva_t gva) in vcpu_clear_mmio_info() argument
255 if (gva != MMIO_GVA_ANY && vcpu->arch.mmio_gva != (gva & PAGE_MASK)) in vcpu_clear_mmio_info()
258 vcpu->arch.mmio_gva = 0; in vcpu_clear_mmio_info()
261 static inline bool vcpu_match_mmio_gva(struct kvm_vcpu *vcpu, unsigned long gva) in vcpu_match_mmio_gva() argument
263 if (vcpu_match_mmio_gen(vcpu) && vcpu->arch.mmio_gva && in vcpu_match_mmio_gva()
264 vcpu->arch.mmio_gva == (gva & PAGE_MASK)) in vcpu_match_mmio_gva()
270 static inline bool vcpu_match_mmio_gpa(struct kvm_vcpu *vcpu, gpa_t gpa) in vcpu_match_mmio_gpa() argument
272 if (vcpu_match_mmio_gen(vcpu) && vcpu->arch.mmio_gfn && in vcpu_match_mmio_gpa()
273 vcpu->arch.mmio_gfn == gpa >> PAGE_SHIFT) in vcpu_match_mmio_gpa()
279 static inline unsigned long kvm_register_read(struct kvm_vcpu *vcpu, int reg) in kvm_register_read() argument
281 unsigned long val = kvm_register_read_raw(vcpu, reg); in kvm_register_read()
283 return is_64_bit_mode(vcpu) ? val : (u32)val; in kvm_register_read()
286 static inline void kvm_register_write(struct kvm_vcpu *vcpu, in kvm_register_write() argument
289 if (!is_64_bit_mode(vcpu)) in kvm_register_write()
291 return kvm_register_write_raw(vcpu, reg, val); in kvm_register_write()
299 static inline bool kvm_vcpu_latch_init(struct kvm_vcpu *vcpu) in kvm_vcpu_latch_init() argument
301 return is_smm(vcpu) || static_call(kvm_x86_apic_init_signal_blocked)(vcpu); in kvm_vcpu_latch_init()
305 void kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip);
309 int kvm_read_guest_virt(struct kvm_vcpu *vcpu,
313 int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu,
317 int handle_ud(struct kvm_vcpu *vcpu);
319 void kvm_deliver_exception_payload(struct kvm_vcpu *vcpu);
321 void kvm_vcpu_mtrr_init(struct kvm_vcpu *vcpu);
322 u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn);
323 bool kvm_mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data);
324 int kvm_mtrr_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data);
325 int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata);
326 bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn,
329 void kvm_fixup_and_inject_pf_error(struct kvm_vcpu *vcpu, gva_t gva, u16 error_code);
330 int x86_decode_emulated_instruction(struct kvm_vcpu *vcpu, int emulation_type,
332 int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
334 fastpath_t handle_fastpath_set_msr_irqoff(struct kvm_vcpu *vcpu);
355 static inline u64 nsec_to_cycles(struct kvm_vcpu *vcpu, u64 nsec) in nsec_to_cycles() argument
357 return pvclock_scale_delta(nsec, vcpu->arch.virtual_tsc_mult, in nsec_to_cycles()
358 vcpu->arch.virtual_tsc_shift); in nsec_to_cycles()
397 static inline void kvm_before_interrupt(struct kvm_vcpu *vcpu) in kvm_before_interrupt() argument
399 __this_cpu_write(current_vcpu, vcpu); in kvm_before_interrupt()
402 static inline void kvm_after_interrupt(struct kvm_vcpu *vcpu) in kvm_after_interrupt() argument
446 void kvm_load_guest_xsave_state(struct kvm_vcpu *vcpu);
447 void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu);
449 bool kvm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
450 int kvm_handle_memory_failure(struct kvm_vcpu *vcpu, int r,
452 int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva);
453 bool kvm_msr_allowed(struct kvm_vcpu *vcpu, u32 index, u32 type);
488 int kvm_sev_es_mmio_write(struct kvm_vcpu *vcpu, gpa_t src, unsigned int bytes,
490 int kvm_sev_es_mmio_read(struct kvm_vcpu *vcpu, gpa_t src, unsigned int bytes,
492 int kvm_sev_es_string_io(struct kvm_vcpu *vcpu, unsigned int size,