Lines Matching refs:vcpu
54 bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu) in kvm_arch_dy_runnable() argument
56 return kvm_arch_vcpu_runnable(vcpu); in kvm_arch_dy_runnable()
59 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_in_kernel() argument
64 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_should_kick() argument
78 int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu) in kvmppc_prepare_to_enter() argument
94 kvmppc_account_exit(vcpu, SIGNAL_EXITS); in kvmppc_prepare_to_enter()
95 vcpu->run->exit_reason = KVM_EXIT_INTR; in kvmppc_prepare_to_enter()
100 vcpu->mode = IN_GUEST_MODE; in kvmppc_prepare_to_enter()
113 if (kvm_request_pending(vcpu)) { in kvmppc_prepare_to_enter()
116 trace_kvm_check_requests(vcpu); in kvmppc_prepare_to_enter()
117 r = kvmppc_core_check_requests(vcpu); in kvmppc_prepare_to_enter()
124 if (kvmppc_core_prepare_to_enter(vcpu)) { in kvmppc_prepare_to_enter()
141 static void kvmppc_swab_shared(struct kvm_vcpu *vcpu) in kvmppc_swab_shared() argument
143 struct kvm_vcpu_arch_shared *shared = vcpu->arch.shared; in kvmppc_swab_shared()
161 int kvmppc_kvm_pv(struct kvm_vcpu *vcpu) in kvmppc_kvm_pv() argument
163 int nr = kvmppc_get_gpr(vcpu, 11); in kvmppc_kvm_pv()
165 unsigned long __maybe_unused param1 = kvmppc_get_gpr(vcpu, 3); in kvmppc_kvm_pv()
166 unsigned long __maybe_unused param2 = kvmppc_get_gpr(vcpu, 4); in kvmppc_kvm_pv()
167 unsigned long __maybe_unused param3 = kvmppc_get_gpr(vcpu, 5); in kvmppc_kvm_pv()
168 unsigned long __maybe_unused param4 = kvmppc_get_gpr(vcpu, 6); in kvmppc_kvm_pv()
171 if (!(kvmppc_get_msr(vcpu) & MSR_SF)) { in kvmppc_kvm_pv()
185 if (vcpu->arch.intr_msr & MSR_LE) in kvmppc_kvm_pv()
187 if (shared_big_endian != vcpu->arch.shared_big_endian) in kvmppc_kvm_pv()
188 kvmppc_swab_shared(vcpu); in kvmppc_kvm_pv()
189 vcpu->arch.shared_big_endian = shared_big_endian; in kvmppc_kvm_pv()
198 vcpu->arch.disable_kernel_nx = true; in kvmppc_kvm_pv()
199 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); in kvmppc_kvm_pv()
202 vcpu->arch.magic_page_pa = param1 & ~0xfffULL; in kvmppc_kvm_pv()
203 vcpu->arch.magic_page_ea = param2 & ~0xfffULL; in kvmppc_kvm_pv()
210 if ((vcpu->arch.magic_page_pa & 0xf000) != in kvmppc_kvm_pv()
211 ((ulong)vcpu->arch.shared & 0xf000)) { in kvmppc_kvm_pv()
212 void *old_shared = vcpu->arch.shared; in kvmppc_kvm_pv()
213 ulong shared = (ulong)vcpu->arch.shared; in kvmppc_kvm_pv()
217 shared |= vcpu->arch.magic_page_pa & 0xf000; in kvmppc_kvm_pv()
220 vcpu->arch.shared = new_shared; in kvmppc_kvm_pv()
239 kvm_vcpu_block(vcpu); in kvmppc_kvm_pv()
240 kvm_clear_request(KVM_REQ_UNHALT, vcpu); in kvmppc_kvm_pv()
247 kvmppc_set_gpr(vcpu, 4, r2); in kvmppc_kvm_pv()
253 int kvmppc_sanity_check(struct kvm_vcpu *vcpu) in kvmppc_sanity_check() argument
258 if (!vcpu->arch.pvr) in kvmppc_sanity_check()
262 if ((vcpu->arch.cpu_type != KVM_CPU_3S_64) && vcpu->arch.papr_enabled) in kvmppc_sanity_check()
266 if (!vcpu->arch.papr_enabled && is_kvmppc_hv_enabled(vcpu->kvm)) in kvmppc_sanity_check()
277 vcpu->arch.sane = r; in kvmppc_sanity_check()
282 int kvmppc_emulate_mmio(struct kvm_vcpu *vcpu) in kvmppc_emulate_mmio() argument
287 er = kvmppc_emulate_loadstore(vcpu); in kvmppc_emulate_mmio()
298 vcpu->run->exit_reason = KVM_EXIT_MMIO; in kvmppc_emulate_mmio()
309 kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst); in kvmppc_emulate_mmio()
324 int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, in kvmppc_st() argument
327 ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK; in kvmppc_st()
331 vcpu->stat.st++; in kvmppc_st()
333 if (vcpu->kvm->arch.kvm_ops && vcpu->kvm->arch.kvm_ops->store_to_eaddr) in kvmppc_st()
334 r = vcpu->kvm->arch.kvm_ops->store_to_eaddr(vcpu, eaddr, ptr, in kvmppc_st()
340 r = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST, in kvmppc_st()
351 if (kvmppc_supports_magic_page(vcpu) && mp_pa && in kvmppc_st()
353 !(kvmppc_get_msr(vcpu) & MSR_PR)) { in kvmppc_st()
354 void *magic = vcpu->arch.shared; in kvmppc_st()
360 if (kvm_write_guest(vcpu->kvm, pte.raddr, ptr, size)) in kvmppc_st()
367 int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, in kvmppc_ld() argument
370 ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK; in kvmppc_ld()
374 vcpu->stat.ld++; in kvmppc_ld()
376 if (vcpu->kvm->arch.kvm_ops && vcpu->kvm->arch.kvm_ops->load_from_eaddr) in kvmppc_ld()
377 rc = vcpu->kvm->arch.kvm_ops->load_from_eaddr(vcpu, eaddr, ptr, in kvmppc_ld()
383 rc = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST, in kvmppc_ld()
397 if (kvmppc_supports_magic_page(vcpu) && mp_pa && in kvmppc_ld()
399 !(kvmppc_get_msr(vcpu) & MSR_PR)) { in kvmppc_ld()
400 void *magic = vcpu->arch.shared; in kvmppc_ld()
406 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); in kvmppc_ld()
407 rc = kvm_read_guest(vcpu->kvm, pte.raddr, ptr, size); in kvmppc_ld()
408 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); in kvmppc_ld()
467 struct kvm_vcpu *vcpu; in kvm_arch_destroy_vm() local
479 kvm_for_each_vcpu(i, vcpu, kvm) in kvm_arch_destroy_vm()
480 kvm_vcpu_destroy(vcpu); in kvm_arch_destroy_vm()
738 struct kvm_vcpu *vcpu; in kvmppc_decrementer_wakeup() local
740 vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer); in kvmppc_decrementer_wakeup()
741 kvmppc_decrementer_func(vcpu); in kvmppc_decrementer_wakeup()
746 int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_create() argument
750 hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS); in kvm_arch_vcpu_create()
751 vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup; in kvm_arch_vcpu_create()
752 vcpu->arch.dec_expires = get_tb(); in kvm_arch_vcpu_create()
755 mutex_init(&vcpu->arch.exit_timing_lock); in kvm_arch_vcpu_create()
757 err = kvmppc_subarch_vcpu_init(vcpu); in kvm_arch_vcpu_create()
761 err = kvmppc_core_vcpu_create(vcpu); in kvm_arch_vcpu_create()
765 vcpu->arch.waitp = &vcpu->wait; in kvm_arch_vcpu_create()
766 kvmppc_create_vcpu_debugfs(vcpu, vcpu->vcpu_id); in kvm_arch_vcpu_create()
770 kvmppc_subarch_vcpu_uninit(vcpu); in kvm_arch_vcpu_create()
774 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_postcreate() argument
778 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_destroy() argument
781 hrtimer_cancel(&vcpu->arch.dec_timer); in kvm_arch_vcpu_destroy()
783 kvmppc_remove_vcpu_debugfs(vcpu); in kvm_arch_vcpu_destroy()
785 switch (vcpu->arch.irq_type) { in kvm_arch_vcpu_destroy()
787 kvmppc_mpic_disconnect_vcpu(vcpu->arch.mpic, vcpu); in kvm_arch_vcpu_destroy()
791 kvmppc_xive_cleanup_vcpu(vcpu); in kvm_arch_vcpu_destroy()
793 kvmppc_xics_free_icp(vcpu); in kvm_arch_vcpu_destroy()
796 kvmppc_xive_native_cleanup_vcpu(vcpu); in kvm_arch_vcpu_destroy()
800 kvmppc_core_vcpu_free(vcpu); in kvm_arch_vcpu_destroy()
802 kvmppc_subarch_vcpu_uninit(vcpu); in kvm_arch_vcpu_destroy()
805 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) in kvm_cpu_has_pending_timer() argument
807 return kvmppc_core_pending_dec(vcpu); in kvm_cpu_has_pending_timer()
810 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) in kvm_arch_vcpu_load() argument
820 mtspr(SPRN_VRSAVE, vcpu->arch.vrsave); in kvm_arch_vcpu_load()
822 kvmppc_core_vcpu_load(vcpu, cpu); in kvm_arch_vcpu_load()
825 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_put() argument
827 kvmppc_core_vcpu_put(vcpu); in kvm_arch_vcpu_put()
829 vcpu->arch.vrsave = mfspr(SPRN_VRSAVE); in kvm_arch_vcpu_put()
901 static inline void kvmppc_set_vsr_dword(struct kvm_vcpu *vcpu, in kvmppc_set_vsr_dword() argument
905 int offset = kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset); in kvmppc_set_vsr_dword()
906 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; in kvmppc_set_vsr_dword()
912 val.vval = VCPU_VSX_VR(vcpu, index - 32); in kvmppc_set_vsr_dword()
914 VCPU_VSX_VR(vcpu, index - 32) = val.vval; in kvmppc_set_vsr_dword()
916 VCPU_VSX_FPR(vcpu, index, offset) = gpr; in kvmppc_set_vsr_dword()
920 static inline void kvmppc_set_vsr_dword_dump(struct kvm_vcpu *vcpu, in kvmppc_set_vsr_dword_dump() argument
924 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; in kvmppc_set_vsr_dword_dump()
927 val.vval = VCPU_VSX_VR(vcpu, index - 32); in kvmppc_set_vsr_dword_dump()
930 VCPU_VSX_VR(vcpu, index - 32) = val.vval; in kvmppc_set_vsr_dword_dump()
932 VCPU_VSX_FPR(vcpu, index, 0) = gpr; in kvmppc_set_vsr_dword_dump()
933 VCPU_VSX_FPR(vcpu, index, 1) = gpr; in kvmppc_set_vsr_dword_dump()
937 static inline void kvmppc_set_vsr_word_dump(struct kvm_vcpu *vcpu, in kvmppc_set_vsr_word_dump() argument
941 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; in kvmppc_set_vsr_word_dump()
948 VCPU_VSX_VR(vcpu, index - 32) = val.vval; in kvmppc_set_vsr_word_dump()
952 VCPU_VSX_FPR(vcpu, index, 0) = val.vsxval[0]; in kvmppc_set_vsr_word_dump()
953 VCPU_VSX_FPR(vcpu, index, 1) = val.vsxval[0]; in kvmppc_set_vsr_word_dump()
957 static inline void kvmppc_set_vsr_word(struct kvm_vcpu *vcpu, in kvmppc_set_vsr_word() argument
961 int offset = kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset); in kvmppc_set_vsr_word()
962 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; in kvmppc_set_vsr_word()
969 val.vval = VCPU_VSX_VR(vcpu, index - 32); in kvmppc_set_vsr_word()
971 VCPU_VSX_VR(vcpu, index - 32) = val.vval; in kvmppc_set_vsr_word()
975 val.vsxval[0] = VCPU_VSX_FPR(vcpu, index, dword_offset); in kvmppc_set_vsr_word()
977 VCPU_VSX_FPR(vcpu, index, dword_offset) = val.vsxval[0]; in kvmppc_set_vsr_word()
983 static inline int kvmppc_get_vmx_offset_generic(struct kvm_vcpu *vcpu, in kvmppc_get_vmx_offset_generic() argument
992 if (kvmppc_need_byteswap(vcpu)) in kvmppc_get_vmx_offset_generic()
1000 static inline int kvmppc_get_vmx_dword_offset(struct kvm_vcpu *vcpu, in kvmppc_get_vmx_dword_offset() argument
1003 return kvmppc_get_vmx_offset_generic(vcpu, index, 8); in kvmppc_get_vmx_dword_offset()
1006 static inline int kvmppc_get_vmx_word_offset(struct kvm_vcpu *vcpu, in kvmppc_get_vmx_word_offset() argument
1009 return kvmppc_get_vmx_offset_generic(vcpu, index, 4); in kvmppc_get_vmx_word_offset()
1012 static inline int kvmppc_get_vmx_hword_offset(struct kvm_vcpu *vcpu, in kvmppc_get_vmx_hword_offset() argument
1015 return kvmppc_get_vmx_offset_generic(vcpu, index, 2); in kvmppc_get_vmx_hword_offset()
1018 static inline int kvmppc_get_vmx_byte_offset(struct kvm_vcpu *vcpu, in kvmppc_get_vmx_byte_offset() argument
1021 return kvmppc_get_vmx_offset_generic(vcpu, index, 1); in kvmppc_get_vmx_byte_offset()
1025 static inline void kvmppc_set_vmx_dword(struct kvm_vcpu *vcpu, in kvmppc_set_vmx_dword() argument
1029 int offset = kvmppc_get_vmx_dword_offset(vcpu, in kvmppc_set_vmx_dword()
1030 vcpu->arch.mmio_vmx_offset); in kvmppc_set_vmx_dword()
1031 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; in kvmppc_set_vmx_dword()
1036 val.vval = VCPU_VSX_VR(vcpu, index); in kvmppc_set_vmx_dword()
1038 VCPU_VSX_VR(vcpu, index) = val.vval; in kvmppc_set_vmx_dword()
1041 static inline void kvmppc_set_vmx_word(struct kvm_vcpu *vcpu, in kvmppc_set_vmx_word() argument
1045 int offset = kvmppc_get_vmx_word_offset(vcpu, in kvmppc_set_vmx_word()
1046 vcpu->arch.mmio_vmx_offset); in kvmppc_set_vmx_word()
1047 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; in kvmppc_set_vmx_word()
1052 val.vval = VCPU_VSX_VR(vcpu, index); in kvmppc_set_vmx_word()
1054 VCPU_VSX_VR(vcpu, index) = val.vval; in kvmppc_set_vmx_word()
1057 static inline void kvmppc_set_vmx_hword(struct kvm_vcpu *vcpu, in kvmppc_set_vmx_hword() argument
1061 int offset = kvmppc_get_vmx_hword_offset(vcpu, in kvmppc_set_vmx_hword()
1062 vcpu->arch.mmio_vmx_offset); in kvmppc_set_vmx_hword()
1063 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; in kvmppc_set_vmx_hword()
1068 val.vval = VCPU_VSX_VR(vcpu, index); in kvmppc_set_vmx_hword()
1070 VCPU_VSX_VR(vcpu, index) = val.vval; in kvmppc_set_vmx_hword()
1073 static inline void kvmppc_set_vmx_byte(struct kvm_vcpu *vcpu, in kvmppc_set_vmx_byte() argument
1077 int offset = kvmppc_get_vmx_byte_offset(vcpu, in kvmppc_set_vmx_byte()
1078 vcpu->arch.mmio_vmx_offset); in kvmppc_set_vmx_byte()
1079 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; in kvmppc_set_vmx_byte()
1084 val.vval = VCPU_VSX_VR(vcpu, index); in kvmppc_set_vmx_byte()
1086 VCPU_VSX_VR(vcpu, index) = val.vval; in kvmppc_set_vmx_byte()
1120 static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu) in kvmppc_complete_mmio_load() argument
1122 struct kvm_run *run = vcpu->run; in kvmppc_complete_mmio_load()
1130 if (!vcpu->arch.mmio_host_swabbed) { in kvmppc_complete_mmio_load()
1147 if ((vcpu->arch.mmio_sp64_extend) && (run->mmio.len == 4)) in kvmppc_complete_mmio_load()
1150 if (vcpu->arch.mmio_sign_extend) { in kvmppc_complete_mmio_load()
1166 switch (vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) { in kvmppc_complete_mmio_load()
1168 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr); in kvmppc_complete_mmio_load()
1171 if (vcpu->kvm->arch.kvm_ops->giveup_ext) in kvmppc_complete_mmio_load()
1172 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_FP); in kvmppc_complete_mmio_load()
1174 VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr; in kvmppc_complete_mmio_load()
1178 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; in kvmppc_complete_mmio_load()
1181 VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr; in kvmppc_complete_mmio_load()
1182 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; in kvmppc_complete_mmio_load()
1187 if (vcpu->kvm->arch.kvm_ops->giveup_ext) in kvmppc_complete_mmio_load()
1188 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VSX); in kvmppc_complete_mmio_load()
1190 if (vcpu->arch.mmio_copy_type == KVMPPC_VSX_COPY_DWORD) in kvmppc_complete_mmio_load()
1191 kvmppc_set_vsr_dword(vcpu, gpr); in kvmppc_complete_mmio_load()
1192 else if (vcpu->arch.mmio_copy_type == KVMPPC_VSX_COPY_WORD) in kvmppc_complete_mmio_load()
1193 kvmppc_set_vsr_word(vcpu, gpr); in kvmppc_complete_mmio_load()
1194 else if (vcpu->arch.mmio_copy_type == in kvmppc_complete_mmio_load()
1196 kvmppc_set_vsr_dword_dump(vcpu, gpr); in kvmppc_complete_mmio_load()
1197 else if (vcpu->arch.mmio_copy_type == in kvmppc_complete_mmio_load()
1199 kvmppc_set_vsr_word_dump(vcpu, gpr); in kvmppc_complete_mmio_load()
1204 if (vcpu->kvm->arch.kvm_ops->giveup_ext) in kvmppc_complete_mmio_load()
1205 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VEC); in kvmppc_complete_mmio_load()
1207 if (vcpu->arch.mmio_copy_type == KVMPPC_VMX_COPY_DWORD) in kvmppc_complete_mmio_load()
1208 kvmppc_set_vmx_dword(vcpu, gpr); in kvmppc_complete_mmio_load()
1209 else if (vcpu->arch.mmio_copy_type == KVMPPC_VMX_COPY_WORD) in kvmppc_complete_mmio_load()
1210 kvmppc_set_vmx_word(vcpu, gpr); in kvmppc_complete_mmio_load()
1211 else if (vcpu->arch.mmio_copy_type == in kvmppc_complete_mmio_load()
1213 kvmppc_set_vmx_hword(vcpu, gpr); in kvmppc_complete_mmio_load()
1214 else if (vcpu->arch.mmio_copy_type == in kvmppc_complete_mmio_load()
1216 kvmppc_set_vmx_byte(vcpu, gpr); in kvmppc_complete_mmio_load()
1221 if (kvmppc_need_byteswap(vcpu)) in kvmppc_complete_mmio_load()
1223 kvm_vcpu_write_guest(vcpu, vcpu->arch.nested_io_gpr, &gpr, in kvmppc_complete_mmio_load()
1232 static int __kvmppc_handle_load(struct kvm_vcpu *vcpu, in __kvmppc_handle_load() argument
1236 struct kvm_run *run = vcpu->run; in __kvmppc_handle_load()
1241 if (kvmppc_need_byteswap(vcpu)) { in __kvmppc_handle_load()
1252 run->mmio.phys_addr = vcpu->arch.paddr_accessed; in __kvmppc_handle_load()
1256 vcpu->arch.io_gpr = rt; in __kvmppc_handle_load()
1257 vcpu->arch.mmio_host_swabbed = host_swabbed; in __kvmppc_handle_load()
1258 vcpu->mmio_needed = 1; in __kvmppc_handle_load()
1259 vcpu->mmio_is_write = 0; in __kvmppc_handle_load()
1260 vcpu->arch.mmio_sign_extend = sign_extend; in __kvmppc_handle_load()
1262 idx = srcu_read_lock(&vcpu->kvm->srcu); in __kvmppc_handle_load()
1264 ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr, in __kvmppc_handle_load()
1267 srcu_read_unlock(&vcpu->kvm->srcu, idx); in __kvmppc_handle_load()
1270 kvmppc_complete_mmio_load(vcpu); in __kvmppc_handle_load()
1271 vcpu->mmio_needed = 0; in __kvmppc_handle_load()
1278 int kvmppc_handle_load(struct kvm_vcpu *vcpu, in kvmppc_handle_load() argument
1282 return __kvmppc_handle_load(vcpu, rt, bytes, is_default_endian, 0); in kvmppc_handle_load()
1287 int kvmppc_handle_loads(struct kvm_vcpu *vcpu, in kvmppc_handle_loads() argument
1291 return __kvmppc_handle_load(vcpu, rt, bytes, is_default_endian, 1); in kvmppc_handle_loads()
1295 int kvmppc_handle_vsx_load(struct kvm_vcpu *vcpu, in kvmppc_handle_vsx_load() argument
1302 if (vcpu->arch.mmio_vsx_copy_nums > 4) in kvmppc_handle_vsx_load()
1305 while (vcpu->arch.mmio_vsx_copy_nums) { in kvmppc_handle_vsx_load()
1306 emulated = __kvmppc_handle_load(vcpu, rt, bytes, in kvmppc_handle_vsx_load()
1312 vcpu->arch.paddr_accessed += vcpu->run->mmio.len; in kvmppc_handle_vsx_load()
1314 vcpu->arch.mmio_vsx_copy_nums--; in kvmppc_handle_vsx_load()
1315 vcpu->arch.mmio_vsx_offset++; in kvmppc_handle_vsx_load()
1321 int kvmppc_handle_store(struct kvm_vcpu *vcpu, in kvmppc_handle_store() argument
1324 struct kvm_run *run = vcpu->run; in kvmppc_handle_store()
1330 if (kvmppc_need_byteswap(vcpu)) { in kvmppc_handle_store()
1341 run->mmio.phys_addr = vcpu->arch.paddr_accessed; in kvmppc_handle_store()
1344 vcpu->mmio_needed = 1; in kvmppc_handle_store()
1345 vcpu->mmio_is_write = 1; in kvmppc_handle_store()
1347 if ((vcpu->arch.mmio_sp64_extend) && (bytes == 4)) in kvmppc_handle_store()
1367 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvmppc_handle_store()
1369 ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr, in kvmppc_handle_store()
1372 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvmppc_handle_store()
1375 vcpu->mmio_needed = 0; in kvmppc_handle_store()
1384 static inline int kvmppc_get_vsr_data(struct kvm_vcpu *vcpu, int rs, u64 *val) in kvmppc_get_vsr_data() argument
1389 int copy_type = vcpu->arch.mmio_copy_type; in kvmppc_get_vsr_data()
1395 kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset); in kvmppc_get_vsr_data()
1403 *val = VCPU_VSX_FPR(vcpu, rs, vsx_offset); in kvmppc_get_vsr_data()
1405 reg.vval = VCPU_VSX_VR(vcpu, rs - 32); in kvmppc_get_vsr_data()
1412 kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset); in kvmppc_get_vsr_data()
1422 reg.vsxval[0] = VCPU_VSX_FPR(vcpu, rs, dword_offset); in kvmppc_get_vsr_data()
1425 reg.vval = VCPU_VSX_VR(vcpu, rs - 32); in kvmppc_get_vsr_data()
1438 int kvmppc_handle_vsx_store(struct kvm_vcpu *vcpu, in kvmppc_handle_vsx_store() argument
1444 vcpu->arch.io_gpr = rs; in kvmppc_handle_vsx_store()
1447 if (vcpu->arch.mmio_vsx_copy_nums > 4) in kvmppc_handle_vsx_store()
1450 while (vcpu->arch.mmio_vsx_copy_nums) { in kvmppc_handle_vsx_store()
1451 if (kvmppc_get_vsr_data(vcpu, rs, &val) == -1) in kvmppc_handle_vsx_store()
1454 emulated = kvmppc_handle_store(vcpu, in kvmppc_handle_vsx_store()
1460 vcpu->arch.paddr_accessed += vcpu->run->mmio.len; in kvmppc_handle_vsx_store()
1462 vcpu->arch.mmio_vsx_copy_nums--; in kvmppc_handle_vsx_store()
1463 vcpu->arch.mmio_vsx_offset++; in kvmppc_handle_vsx_store()
1469 static int kvmppc_emulate_mmio_vsx_loadstore(struct kvm_vcpu *vcpu) in kvmppc_emulate_mmio_vsx_loadstore() argument
1471 struct kvm_run *run = vcpu->run; in kvmppc_emulate_mmio_vsx_loadstore()
1475 vcpu->arch.paddr_accessed += run->mmio.len; in kvmppc_emulate_mmio_vsx_loadstore()
1477 if (!vcpu->mmio_is_write) { in kvmppc_emulate_mmio_vsx_loadstore()
1478 emulated = kvmppc_handle_vsx_load(vcpu, vcpu->arch.io_gpr, in kvmppc_emulate_mmio_vsx_loadstore()
1479 run->mmio.len, 1, vcpu->arch.mmio_sign_extend); in kvmppc_emulate_mmio_vsx_loadstore()
1481 emulated = kvmppc_handle_vsx_store(vcpu, in kvmppc_emulate_mmio_vsx_loadstore()
1482 vcpu->arch.io_gpr, run->mmio.len, 1); in kvmppc_emulate_mmio_vsx_loadstore()
1505 int kvmppc_handle_vmx_load(struct kvm_vcpu *vcpu, in kvmppc_handle_vmx_load() argument
1510 if (vcpu->arch.mmio_vsx_copy_nums > 2) in kvmppc_handle_vmx_load()
1513 while (vcpu->arch.mmio_vmx_copy_nums) { in kvmppc_handle_vmx_load()
1514 emulated = __kvmppc_handle_load(vcpu, rt, bytes, in kvmppc_handle_vmx_load()
1520 vcpu->arch.paddr_accessed += vcpu->run->mmio.len; in kvmppc_handle_vmx_load()
1521 vcpu->arch.mmio_vmx_copy_nums--; in kvmppc_handle_vmx_load()
1522 vcpu->arch.mmio_vmx_offset++; in kvmppc_handle_vmx_load()
1528 static int kvmppc_get_vmx_dword(struct kvm_vcpu *vcpu, int index, u64 *val) in kvmppc_get_vmx_dword() argument
1535 kvmppc_get_vmx_dword_offset(vcpu, vcpu->arch.mmio_vmx_offset); in kvmppc_get_vmx_dword()
1540 reg.vval = VCPU_VSX_VR(vcpu, index); in kvmppc_get_vmx_dword()
1546 static int kvmppc_get_vmx_word(struct kvm_vcpu *vcpu, int index, u64 *val) in kvmppc_get_vmx_word() argument
1553 kvmppc_get_vmx_word_offset(vcpu, vcpu->arch.mmio_vmx_offset); in kvmppc_get_vmx_word()
1558 reg.vval = VCPU_VSX_VR(vcpu, index); in kvmppc_get_vmx_word()
1564 static int kvmppc_get_vmx_hword(struct kvm_vcpu *vcpu, int index, u64 *val) in kvmppc_get_vmx_hword() argument
1571 kvmppc_get_vmx_hword_offset(vcpu, vcpu->arch.mmio_vmx_offset); in kvmppc_get_vmx_hword()
1576 reg.vval = VCPU_VSX_VR(vcpu, index); in kvmppc_get_vmx_hword()
1582 static int kvmppc_get_vmx_byte(struct kvm_vcpu *vcpu, int index, u64 *val) in kvmppc_get_vmx_byte() argument
1589 kvmppc_get_vmx_byte_offset(vcpu, vcpu->arch.mmio_vmx_offset); in kvmppc_get_vmx_byte()
1594 reg.vval = VCPU_VSX_VR(vcpu, index); in kvmppc_get_vmx_byte()
1600 int kvmppc_handle_vmx_store(struct kvm_vcpu *vcpu, in kvmppc_handle_vmx_store() argument
1607 if (vcpu->arch.mmio_vsx_copy_nums > 2) in kvmppc_handle_vmx_store()
1610 vcpu->arch.io_gpr = rs; in kvmppc_handle_vmx_store()
1612 while (vcpu->arch.mmio_vmx_copy_nums) { in kvmppc_handle_vmx_store()
1613 switch (vcpu->arch.mmio_copy_type) { in kvmppc_handle_vmx_store()
1615 if (kvmppc_get_vmx_dword(vcpu, index, &val) == -1) in kvmppc_handle_vmx_store()
1620 if (kvmppc_get_vmx_word(vcpu, index, &val) == -1) in kvmppc_handle_vmx_store()
1624 if (kvmppc_get_vmx_hword(vcpu, index, &val) == -1) in kvmppc_handle_vmx_store()
1628 if (kvmppc_get_vmx_byte(vcpu, index, &val) == -1) in kvmppc_handle_vmx_store()
1635 emulated = kvmppc_handle_store(vcpu, val, bytes, in kvmppc_handle_vmx_store()
1640 vcpu->arch.paddr_accessed += vcpu->run->mmio.len; in kvmppc_handle_vmx_store()
1641 vcpu->arch.mmio_vmx_copy_nums--; in kvmppc_handle_vmx_store()
1642 vcpu->arch.mmio_vmx_offset++; in kvmppc_handle_vmx_store()
1648 static int kvmppc_emulate_mmio_vmx_loadstore(struct kvm_vcpu *vcpu) in kvmppc_emulate_mmio_vmx_loadstore() argument
1650 struct kvm_run *run = vcpu->run; in kvmppc_emulate_mmio_vmx_loadstore()
1654 vcpu->arch.paddr_accessed += run->mmio.len; in kvmppc_emulate_mmio_vmx_loadstore()
1656 if (!vcpu->mmio_is_write) { in kvmppc_emulate_mmio_vmx_loadstore()
1657 emulated = kvmppc_handle_vmx_load(vcpu, in kvmppc_emulate_mmio_vmx_loadstore()
1658 vcpu->arch.io_gpr, run->mmio.len, 1); in kvmppc_emulate_mmio_vmx_loadstore()
1660 emulated = kvmppc_handle_vmx_store(vcpu, in kvmppc_emulate_mmio_vmx_loadstore()
1661 vcpu->arch.io_gpr, run->mmio.len, 1); in kvmppc_emulate_mmio_vmx_loadstore()
1683 int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) in kvm_vcpu_ioctl_get_one_reg() argument
1693 r = kvmppc_get_one_reg(vcpu, reg->id, &val); in kvm_vcpu_ioctl_get_one_reg()
1703 val.vval = vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0]; in kvm_vcpu_ioctl_get_one_reg()
1710 val = get_reg_val(reg->id, vcpu->arch.vr.vscr.u[3]); in kvm_vcpu_ioctl_get_one_reg()
1713 val = get_reg_val(reg->id, vcpu->arch.vrsave); in kvm_vcpu_ioctl_get_one_reg()
1731 int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) in kvm_vcpu_ioctl_set_one_reg() argument
1744 r = kvmppc_set_one_reg(vcpu, reg->id, &val); in kvm_vcpu_ioctl_set_one_reg()
1754 vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0] = val.vval; in kvm_vcpu_ioctl_set_one_reg()
1761 vcpu->arch.vr.vscr.u[3] = set_reg_val(reg->id, val); in kvm_vcpu_ioctl_set_one_reg()
1768 vcpu->arch.vrsave = set_reg_val(reg->id, val); in kvm_vcpu_ioctl_set_one_reg()
1780 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_ioctl_run() argument
1782 struct kvm_run *run = vcpu->run; in kvm_arch_vcpu_ioctl_run()
1785 vcpu_load(vcpu); in kvm_arch_vcpu_ioctl_run()
1787 if (vcpu->mmio_needed) { in kvm_arch_vcpu_ioctl_run()
1788 vcpu->mmio_needed = 0; in kvm_arch_vcpu_ioctl_run()
1789 if (!vcpu->mmio_is_write) in kvm_arch_vcpu_ioctl_run()
1790 kvmppc_complete_mmio_load(vcpu); in kvm_arch_vcpu_ioctl_run()
1792 if (vcpu->arch.mmio_vsx_copy_nums > 0) { in kvm_arch_vcpu_ioctl_run()
1793 vcpu->arch.mmio_vsx_copy_nums--; in kvm_arch_vcpu_ioctl_run()
1794 vcpu->arch.mmio_vsx_offset++; in kvm_arch_vcpu_ioctl_run()
1797 if (vcpu->arch.mmio_vsx_copy_nums > 0) { in kvm_arch_vcpu_ioctl_run()
1798 r = kvmppc_emulate_mmio_vsx_loadstore(vcpu); in kvm_arch_vcpu_ioctl_run()
1800 vcpu->mmio_needed = 1; in kvm_arch_vcpu_ioctl_run()
1806 if (vcpu->arch.mmio_vmx_copy_nums > 0) { in kvm_arch_vcpu_ioctl_run()
1807 vcpu->arch.mmio_vmx_copy_nums--; in kvm_arch_vcpu_ioctl_run()
1808 vcpu->arch.mmio_vmx_offset++; in kvm_arch_vcpu_ioctl_run()
1811 if (vcpu->arch.mmio_vmx_copy_nums > 0) { in kvm_arch_vcpu_ioctl_run()
1812 r = kvmppc_emulate_mmio_vmx_loadstore(vcpu); in kvm_arch_vcpu_ioctl_run()
1814 vcpu->mmio_needed = 1; in kvm_arch_vcpu_ioctl_run()
1819 } else if (vcpu->arch.osi_needed) { in kvm_arch_vcpu_ioctl_run()
1824 kvmppc_set_gpr(vcpu, i, gprs[i]); in kvm_arch_vcpu_ioctl_run()
1825 vcpu->arch.osi_needed = 0; in kvm_arch_vcpu_ioctl_run()
1826 } else if (vcpu->arch.hcall_needed) { in kvm_arch_vcpu_ioctl_run()
1829 kvmppc_set_gpr(vcpu, 3, run->papr_hcall.ret); in kvm_arch_vcpu_ioctl_run()
1831 kvmppc_set_gpr(vcpu, 4 + i, run->papr_hcall.args[i]); in kvm_arch_vcpu_ioctl_run()
1832 vcpu->arch.hcall_needed = 0; in kvm_arch_vcpu_ioctl_run()
1834 } else if (vcpu->arch.epr_needed) { in kvm_arch_vcpu_ioctl_run()
1835 kvmppc_set_epr(vcpu, run->epr.epr); in kvm_arch_vcpu_ioctl_run()
1836 vcpu->arch.epr_needed = 0; in kvm_arch_vcpu_ioctl_run()
1840 kvm_sigset_activate(vcpu); in kvm_arch_vcpu_ioctl_run()
1845 r = kvmppc_vcpu_run(vcpu); in kvm_arch_vcpu_ioctl_run()
1847 kvm_sigset_deactivate(vcpu); in kvm_arch_vcpu_ioctl_run()
1852 vcpu_put(vcpu); in kvm_arch_vcpu_ioctl_run()
1856 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq) in kvm_vcpu_ioctl_interrupt() argument
1859 kvmppc_core_dequeue_external(vcpu); in kvm_vcpu_ioctl_interrupt()
1863 kvmppc_core_queue_external(vcpu, irq); in kvm_vcpu_ioctl_interrupt()
1865 kvm_vcpu_kick(vcpu); in kvm_vcpu_ioctl_interrupt()
1870 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, in kvm_vcpu_ioctl_enable_cap() argument
1881 vcpu->arch.osi_enabled = true; in kvm_vcpu_ioctl_enable_cap()
1885 vcpu->arch.papr_enabled = true; in kvm_vcpu_ioctl_enable_cap()
1890 vcpu->arch.epr_flags |= KVMPPC_EPR_USER; in kvm_vcpu_ioctl_enable_cap()
1892 vcpu->arch.epr_flags &= ~KVMPPC_EPR_USER; in kvm_vcpu_ioctl_enable_cap()
1897 vcpu->arch.watchdog_enabled = true; in kvm_vcpu_ioctl_enable_cap()
1909 r = kvm_vcpu_ioctl_config_tlb(vcpu, &cfg); in kvm_vcpu_ioctl_enable_cap()
1926 r = kvmppc_mpic_connect_vcpu(dev, vcpu, cap->args[1]); in kvm_vcpu_ioctl_enable_cap()
1946 r = kvmppc_xive_connect_vcpu(dev, vcpu, cap->args[1]); in kvm_vcpu_ioctl_enable_cap()
1948 r = kvmppc_xics_connect_vcpu(dev, vcpu, cap->args[1]); in kvm_vcpu_ioctl_enable_cap()
1972 r = kvmppc_xive_native_connect_vcpu(dev, vcpu, in kvm_vcpu_ioctl_enable_cap()
1982 if (!is_kvmppc_hv_enabled(vcpu->kvm)) in kvm_vcpu_ioctl_enable_cap()
1985 vcpu->kvm->arch.fwnmi_enabled = true; in kvm_vcpu_ioctl_enable_cap()
1994 r = kvmppc_sanity_check(vcpu); in kvm_vcpu_ioctl_enable_cap()
2012 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_get_mpstate() argument
2018 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_set_mpstate() argument
2027 struct kvm_vcpu *vcpu = filp->private_data; in kvm_arch_vcpu_async_ioctl() local
2034 return kvm_vcpu_ioctl_interrupt(vcpu, &irq); in kvm_arch_vcpu_async_ioctl()
2042 struct kvm_vcpu *vcpu = filp->private_data; in kvm_arch_vcpu_ioctl() local
2053 vcpu_load(vcpu); in kvm_arch_vcpu_ioctl()
2054 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap); in kvm_arch_vcpu_ioctl()
2055 vcpu_put(vcpu); in kvm_arch_vcpu_ioctl()
2067 r = kvm_vcpu_ioctl_set_one_reg(vcpu, ®); in kvm_arch_vcpu_ioctl()
2069 r = kvm_vcpu_ioctl_get_one_reg(vcpu, ®); in kvm_arch_vcpu_ioctl()
2079 vcpu_load(vcpu); in kvm_arch_vcpu_ioctl()
2080 r = kvm_vcpu_ioctl_dirty_tlb(vcpu, &dirty); in kvm_arch_vcpu_ioctl()
2081 vcpu_put(vcpu); in kvm_arch_vcpu_ioctl()
2093 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) in kvm_arch_vcpu_fault() argument