/linux/arch/arm64/include/asm/ |
A D | kvm_emulate.h | 53 vcpu->arch.hcr_el2 |= HCR_E2H; in vcpu_reset_hcr() 56 vcpu->arch.hcr_el2 |= HCR_TEA; in vcpu_reset_hcr() 81 if (!vcpu_el1_is_32bit(vcpu)) in vcpu_reset_hcr() 85 vcpu_el1_is_32bit(vcpu)) in vcpu_reset_hcr() 88 if (kvm_has_mte(vcpu->kvm)) in vcpu_reset_hcr() 125 return vcpu->arch.vsesr_el2; in vcpu_get_vsesr() 130 vcpu->arch.vsesr_el2 = vsesr; in vcpu_set_vsesr() 324 return kvm_vcpu_trap_is_iabt(vcpu) && !kvm_vcpu_abt_iss1tw(vcpu); in kvm_vcpu_trap_is_exec_fault() 399 if (vcpu_mode_priv(vcpu)) in kvm_vcpu_is_be() 409 if (kvm_vcpu_is_be(vcpu)) { in vcpu_data_guest_to_host() [all …]
|
/linux/arch/s390/kvm/ |
A D | priv.c | 36 vcpu->stat.instruction_ri++; in handle_ri() 50 return handle_ri(vcpu); in kvm_s390_handle_aa() 80 return handle_gs(vcpu); in kvm_s390_handle_e3() 433 wait_event(vcpu->kvm->arch.ipte_wq, !ipte_lock_held(vcpu)); in handle_ipte_interlock() 453 return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm); in handle_test_block() 484 inti = kvm_s390_get_io_int(vcpu->kvm, vcpu->arch.sie_block->gcr[6], 0); in handle_tpi() 563 vcpu->run->s390_tsch.ipb = vcpu->arch.sie_block->ipb; in handle_tsch() 997 vcpu->run->s.regs.gprs[reg1] |= vcpu->arch.sie_block->gpsw.mask >> 32; in handle_epsw() 1055 return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm); in handle_pfmf() 1465 ipte_lock(vcpu); in handle_tprot() [all …]
|
A D | intercept.c | 87 kvm_s390_vcpu_stop(vcpu); in handle_stop() 216 if (!IS_TE_ENABLED(vcpu) || !IS_ITDB_VALID(vcpu)) in handle_itdb() 229 #define per_event(vcpu) (vcpu->arch.sie_block->iprcc & PGM_PER) argument 245 if (guestdbg_enabled(vcpu) && per_event(vcpu)) { in handle_prog() 254 trace_kvm_s390_intercept_prog(vcpu, vcpu->arch.sie_block->iprcc); in handle_prog() 263 rc = handle_itdb(vcpu); in handle_prog() 335 rc = guest_translate_address(vcpu, vcpu->run->s.regs.gprs[reg2], in handle_mvpg_pei() 344 rc = guest_translate_address(vcpu, vcpu->run->s.regs.gprs[reg1], in handle_mvpg_pei() 359 vcpu->stat.exit_pei++; in handle_partial_execution() 435 trace_kvm_s390_handle_operexc(vcpu, vcpu->arch.sie_block->ipa, in handle_operexc() [all …]
|
A D | diag.c | 25 start = vcpu->run->s.regs.gprs[(vcpu->arch.sie_block->ipa & 0xf0) >> 4]; in diag_release_pages() 26 end = vcpu->run->s.regs.gprs[vcpu->arch.sie_block->ipa & 0xf] + PAGE_SIZE; in diag_release_pages() 76 vcpu->run->s.regs.gprs[rx]); in __diag_page_ref_service() 80 rc = read_guest(vcpu, vcpu->run->s.regs.gprs[rx], rx, &parm, sizeof(parm)); in __diag_page_ref_service() 149 kvm_vcpu_on_spin(vcpu, true); in __diag_time_slice_end() 171 tid = vcpu->run->s.regs.gprs[(vcpu->arch.sie_block->ipa & 0xf0) >> 4]; in __diag_time_slice_end_directed() 175 if (tid == vcpu->vcpu_id) in __diag_time_slice_end_directed() 192 VCPU_EVENT(vcpu, 5, in __diag_time_slice_end_directed() 195 vcpu->stat.diag_9c_forward++; in __diag_time_slice_end_directed() 206 vcpu->stat.diag_9c_ignored++; in __diag_time_slice_end_directed() [all …]
|
A D | kvm-s390.h | 24 #define IS_TE_ENABLED(vcpu) ((vcpu->arch.sie_block->ecb & ECB_TE)) argument 26 #define IS_ITDB_VALID(vcpu) ((*(char *)vcpu->arch.sie_block->itdba == TDB_FORMAT1)) argument 82 return test_bit(vcpu->vcpu_idx, vcpu->kvm->arch.idle_mask); in is_vcpu_idle() 104 VCPU_EVENT(vcpu, 3, "set prefix of cpu %03u to 0x%x", vcpu->vcpu_id, in kvm_s390_set_prefix() 238 return vcpu->arch.pv.handle; in kvm_s390_pv_cpu_get_handle() 306 kvm_s390_rewind_psw(vcpu, kvm_s390_get_ilen(vcpu)); in kvm_s390_retry_instr() 361 struct kvm_vcpu *vcpu; in kvm_s390_vcpu_block_all() local 365 kvm_s390_vcpu_block(vcpu); in kvm_s390_vcpu_block_all() 371 struct kvm_vcpu *vcpu; in kvm_s390_vcpu_unblock_all() local 374 kvm_s390_vcpu_unblock(vcpu); in kvm_s390_vcpu_unblock_all() [all …]
|
A D | guestdbg.c | 132 vcpu->arch.guestdbg.cr0 = vcpu->arch.sie_block->gcr[0]; in kvm_s390_backup_guest_per_regs() 133 vcpu->arch.guestdbg.cr9 = vcpu->arch.sie_block->gcr[9]; in kvm_s390_backup_guest_per_regs() 134 vcpu->arch.guestdbg.cr10 = vcpu->arch.sie_block->gcr[10]; in kvm_s390_backup_guest_per_regs() 135 vcpu->arch.guestdbg.cr11 = vcpu->arch.sie_block->gcr[11]; in kvm_s390_backup_guest_per_regs() 140 vcpu->arch.sie_block->gcr[0] = vcpu->arch.guestdbg.cr0; in kvm_s390_restore_guest_per_regs() 141 vcpu->arch.sie_block->gcr[9] = vcpu->arch.guestdbg.cr9; in kvm_s390_restore_guest_per_regs() 142 vcpu->arch.sie_block->gcr[10] = vcpu->arch.guestdbg.cr10; in kvm_s390_restore_guest_per_regs() 143 vcpu->arch.sie_block->gcr[11] = vcpu->arch.guestdbg.cr11; in kvm_s390_restore_guest_per_regs() 590 if (debug_exit_required(vcpu, vcpu->arch.sie_block->perc, in kvm_s390_handle_per_event() 614 (pssec(vcpu) || hssec(vcpu))) in kvm_s390_handle_per_event() [all …]
|
/linux/arch/powerpc/kvm/ |
A D | booke.c | 510 set_guest_srr(vcpu, vcpu->arch.regs.nip, in kvmppc_booke_irqprio_deliver() 514 set_guest_csrr(vcpu, vcpu->arch.regs.nip, in kvmppc_booke_irqprio_deliver() 518 set_guest_dsrr(vcpu, vcpu->arch.regs.nip, in kvmppc_booke_irqprio_deliver() 522 set_guest_mcsrr(vcpu, vcpu->arch.regs.nip, in kvmppc_booke_irqprio_deliver() 527 vcpu->arch.regs.nip = vcpu->arch.ivpr | in kvmppc_booke_irqprio_deliver() 803 vcpu->arch.pgdir = vcpu->kvm->mm->pgd; in kvmppc_vcpu_run() 1887 vcpu->arch.dec = vcpu->arch.decar; in kvmppc_decrementer_func() 2132 vcpu->arch.shared->pir = vcpu->vcpu_id; in kvmppc_core_vcpu_create() 2152 vcpu->kvm->arch.kvm_ops->vcpu_free(vcpu); in kvmppc_core_vcpu_create() 2159 vcpu->kvm->arch.kvm_ops->vcpu_free(vcpu); in kvmppc_core_vcpu_free() [all …]
|
A D | book3s_emulate.c | 92 vcpu->arch.ppr_tm = vcpu->arch.ppr; in kvmppc_copyto_vcpu_tm() 94 vcpu->arch.amr_tm = vcpu->arch.amr; in kvmppc_copyto_vcpu_tm() 96 vcpu->arch.tar_tm = vcpu->arch.tar; in kvmppc_copyto_vcpu_tm() 111 vcpu->arch.ppr = vcpu->arch.ppr_tm; in kvmppc_copyfrom_vcpu_tm() 113 vcpu->arch.amr = vcpu->arch.amr_tm; in kvmppc_copyfrom_vcpu_tm() 115 vcpu->arch.tar = vcpu->arch.tar_tm; in kvmppc_copyfrom_vcpu_tm() 337 vcpu->arch.mmu.mtsrin(vcpu, in kvmppc_core_emulate_op_pr() 342 vcpu->arch.mmu.mtsrin(vcpu, in kvmppc_core_emulate_op_pr() 388 vcpu->arch.mmu.slbmte(vcpu, in kvmppc_core_emulate_op_pr() 396 vcpu->arch.mmu.slbie(vcpu, in kvmppc_core_emulate_op_pr() [all …]
|
A D | booke_emulate.c | 26 vcpu->arch.regs.nip = vcpu->arch.shared->srr0; in kvmppc_emul_rfi() 27 kvmppc_set_msr(vcpu, vcpu->arch.shared->srr1); in kvmppc_emul_rfi() 32 vcpu->arch.regs.nip = vcpu->arch.dsrr0; in kvmppc_emul_rfdi() 33 kvmppc_set_msr(vcpu, vcpu->arch.dsrr1); in kvmppc_emul_rfdi() 38 vcpu->arch.regs.nip = vcpu->arch.csrr0; in kvmppc_emul_rfci() 39 kvmppc_set_msr(vcpu, vcpu->arch.csrr1); in kvmppc_emul_rfci() 53 kvmppc_emul_rfi(vcpu); in kvmppc_booke_emulate_op() 80 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->msr); in kvmppc_booke_emulate_op() 86 kvmppc_set_msr(vcpu, kvmppc_get_gpr(vcpu, rs)); in kvmppc_booke_emulate_op() 90 vcpu->arch.shared->msr = (vcpu->arch.shared->msr & ~MSR_EE) in kvmppc_booke_emulate_op() [all …]
|
A D | book3s_pr.c | 301 vcpu->arch.purr += get_tb() - vcpu->arch.entry_tb; in kvmppc_copy_from_svcpu() 302 vcpu->arch.spurr += get_tb() - vcpu->arch.entry_tb; in kvmppc_copy_from_svcpu() 405 _kvmppc_restore_tm_pr(vcpu, kvmppc_get_msr(vcpu)); in kvmppc_restore_tm_pr() 737 if (vcpu->arch.mmu.is_dcbz32(vcpu) && in kvmppc_handle_pagefault() 782 else if (vcpu->arch.mmu.is_dcbz32(vcpu) && in kvmppc_handle_pagefault() 1182 } else if (vcpu->arch.mmu.is_dcbz32(vcpu) && in kvmppc_handle_exit_pr() 1328 kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu)); in kvmppc_handle_exit_pr() 1492 vcpu->arch.mmu.slbmte(vcpu, 0, 0); in kvm_arch_vcpu_ioctl_set_sregs_pr() 1493 vcpu->arch.mmu.slbia(vcpu); in kvm_arch_vcpu_ioctl_set_sregs_pr() 1500 vcpu->arch.mmu.slbmte(vcpu, rs, rb); in kvm_arch_vcpu_ioctl_set_sregs_pr() [all …]
|
A D | emulate_loadstore.c | 87 vcpu->arch.mmio_vsx_offset = 0; in kvmppc_emulate_loadstore() 92 vcpu->arch.mmio_vmx_offset = 0; in kvmppc_emulate_loadstore() 96 vcpu->arch.regs.msr = vcpu->arch.shared->msr; in kvmppc_emulate_loadstore() 147 vcpu->arch.mmio_copy_type = in kvmppc_emulate_loadstore() 150 vcpu->arch.mmio_copy_type = in kvmppc_emulate_loadstore() 153 vcpu->arch.mmio_copy_type = in kvmppc_emulate_loadstore() 156 vcpu->arch.mmio_copy_type = in kvmppc_emulate_loadstore() 245 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, in kvmppc_emulate_loadstore() 269 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, in kvmppc_emulate_loadstore() 314 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, in kvmppc_emulate_loadstore() [all …]
|
A D | book3s.c | 516 ret = vcpu->kvm->arch.kvm_ops->get_sregs(vcpu, sregs); in kvm_arch_vcpu_ioctl_get_sregs() 528 ret = vcpu->kvm->arch.kvm_ops->set_sregs(vcpu, sregs); in kvm_arch_vcpu_ioctl_set_sregs() 638 if (!vcpu->arch.icp && !vcpu->arch.xive_vcpu) { in kvmppc_get_one_reg() 723 if (!vcpu->arch.icp && !vcpu->arch.xive_vcpu) { in kvmppc_set_one_reg() 774 vcpu->kvm->arch.kvm_ops->vcpu_load(vcpu, cpu); in kvmppc_core_vcpu_load() 779 vcpu->kvm->arch.kvm_ops->vcpu_put(vcpu); in kvmppc_core_vcpu_put() 784 vcpu->kvm->arch.kvm_ops->set_msr(vcpu, msr); in kvmppc_set_msr() 790 return vcpu->kvm->arch.kvm_ops->vcpu_run(vcpu); in kvmppc_vcpu_run() 816 return vcpu->kvm->arch.kvm_ops->vcpu_create(vcpu); in kvmppc_core_vcpu_create() 821 vcpu->kvm->arch.kvm_ops->vcpu_free(vcpu); in kvmppc_core_vcpu_free() [all …]
|
A D | book3s_paired_singles.c | 152 kvm_cvt_df(&VCPU_FPR(vcpu, rt), &vcpu->arch.qpr[rt]); in kvmppc_sync_qpr() 757 VCPU_FPR(vcpu, ax_rd) = VCPU_FPR(vcpu, ax_rb); in kvmppc_emulate_paired_single() 759 vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb]; in kvmppc_emulate_paired_single() 768 VCPU_FPR(vcpu, ax_rd) = VCPU_FPR(vcpu, ax_rb); in kvmppc_emulate_paired_single() 769 vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb]; in kvmppc_emulate_paired_single() 777 VCPU_FPR(vcpu, ax_rd) = VCPU_FPR(vcpu, ax_rb); in kvmppc_emulate_paired_single() 779 vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb]; in kvmppc_emulate_paired_single() 784 VCPU_FPR(vcpu, ax_rd) = VCPU_FPR(vcpu, ax_rb); in kvmppc_emulate_paired_single() 786 vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb]; in kvmppc_emulate_paired_single() 791 VCPU_FPR(vcpu, ax_rd) = VCPU_FPR(vcpu, ax_ra); in kvmppc_emulate_paired_single() [all …]
|
A D | book3s_hv_tm.c | 29 vcpu->arch.tfiar = tfiar; in emulate_tx_failure() 31 vcpu->arch.texasr = (vcpu->arch.texasr & 0x3ffffff) | texasr; in emulate_tx_failure() 56 vcpu->arch.regs.nip -= 4; in kvmhv_p9_tm_emulation() 79 vcpu->arch.cfar = vcpu->arch.regs.nip; in kvmhv_p9_tm_emulation() 80 vcpu->arch.regs.nip = vcpu->arch.shregs.srr0; in kvmhv_p9_tm_emulation() 103 bescr = vcpu->arch.bescr; in kvmhv_p9_tm_emulation() 113 vcpu->arch.cfar = vcpu->arch.regs.nip; in kvmhv_p9_tm_emulation() 114 vcpu->arch.regs.nip = vcpu->arch.ebbrr; in kvmhv_p9_tm_emulation() 156 vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) | in kvmhv_p9_tm_emulation() 203 vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) | in kvmhv_p9_tm_emulation() [all …]
|
A D | powerpc.c | 406 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); in kvmppc_ld() 408 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); in kvmppc_ld() 765 vcpu->arch.waitp = &vcpu->wait; in kvm_arch_vcpu_create() 766 kvmppc_create_vcpu_debugfs(vcpu, vcpu->vcpu_id); in kvm_arch_vcpu_create() 787 kvmppc_mpic_disconnect_vcpu(vcpu->arch.mpic, vcpu); in kvm_arch_vcpu_destroy() 1168 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr); in kvmppc_complete_mmio_load() 1172 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_FP); in kvmppc_complete_mmio_load() 1312 vcpu->arch.paddr_accessed += vcpu->run->mmio.len; in kvmppc_handle_vsx_load() 1460 vcpu->arch.paddr_accessed += vcpu->run->mmio.len; in kvmppc_handle_vsx_store() 1520 vcpu->arch.paddr_accessed += vcpu->run->mmio.len; in kvmppc_handle_vmx_load() [all …]
|
/linux/arch/mips/kvm/ |
A D | emulate.c | 248 err = kvm_compute_return_epc(vcpu, vcpu->arch.pc, in update_pc() 1005 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr, in kvm_mips_emulate_store() 1015 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr, in kvm_mips_emulate_store() 1024 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr, in kvm_mips_emulate_store() 1033 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr, in kvm_mips_emulate_store() 1063 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr, in kvm_mips_emulate_store() 1093 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr, in kvm_mips_emulate_store() 1141 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr, in kvm_mips_emulate_store() 1188 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr, in kvm_mips_emulate_store() 1292 vcpu->arch.io_pc = vcpu->arch.pc; in kvm_mips_emulate_load() [all …]
|
A D | mips.c | 321 vcpu->kvm, vcpu->vcpu_id, vcpu); in kvm_arch_vcpu_create() 445 vcpu_load(vcpu); in kvm_arch_vcpu_ioctl_run() 481 vcpu_put(vcpu); in kvm_arch_vcpu_ioctl_run() 499 dvcpu = vcpu; in kvm_vcpu_ioctl_interrupt() 931 vcpu_load(vcpu); in kvm_arch_vcpu_ioctl() 978 vcpu_put(vcpu); in kvm_arch_vcpu_ioctl() 1115 if (!vcpu) in kvm_arch_vcpu_dump_regs() 1126 vcpu->arch.gprs[i + 2], vcpu->arch.gprs[i + 3]); in kvm_arch_vcpu_dump_regs() 1145 vcpu_load(vcpu); in kvm_arch_vcpu_ioctl_set_regs() 1154 vcpu_put(vcpu); in kvm_arch_vcpu_ioctl_set_regs() [all …]
|
/linux/arch/x86/kvm/ |
A D | x86.c | 890 !load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu))) in kvm_set_cr0() 929 vcpu->arch.pkru != vcpu->arch.host_pkru) in kvm_load_guest_xsave_state() 1003 __kvm_set_xcr(vcpu, kvm_rcx_read(vcpu), kvm_read_edx_eax(vcpu))) { in kvm_emulate_xsetbv() 1157 if (is_pae_paging(vcpu) && !load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3)) in kvm_set_cr3() 1208 vcpu->arch.eff_db[i] = vcpu->arch.db[i]; in kvm_update_dr0123() 2462 vcpu, vcpu->arch.tsc_scaling_ratio); in kvm_vcpu_write_tsc_multiplier() 7617 ctxt->vcpu = vcpu; in alloc_emulate_ctxt() 8306 emulator_pio_in(vcpu, vcpu->arch.pio.size, vcpu->arch.pio.port, &val, 1); in complete_fast_pio_in() 10576 load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu)); in __set_sregs_common() 10832 vcpu, &vcpu->run->s.regs.events); in store_regs() [all …]
|
A D | kvm_cache_regs.h | 85 return vcpu->arch.regs[reg]; in kvm_register_read_raw() 94 vcpu->arch.regs[reg] = val; in kvm_register_write_raw() 95 kvm_register_mark_dirty(vcpu, reg); in kvm_register_write_raw() 139 return vcpu->arch.cr0 & mask; in kvm_read_cr0_bits() 153 return vcpu->arch.cr4 & mask; in kvm_read_cr4_bits() 160 return vcpu->arch.cr3; in kvm_read_cr3() 170 return (kvm_rax_read(vcpu) & -1u) in kvm_read_edx_eax() 176 vcpu->arch.hflags |= HF_GUEST_MASK; in enter_guest_mode() 177 vcpu->stat.guest_mode = 1; in enter_guest_mode() 182 vcpu->arch.hflags &= ~HF_GUEST_MASK; in leave_guest_mode() [all …]
|
A D | x86.h | 130 return vcpu->arch.exception.injected || vcpu->arch.interrupt.injected || in kvm_event_needs_reinjection() 131 vcpu->arch.nmi_injected; in kvm_event_needs_reinjection() 159 if (!is_long_mode(vcpu)) in is_64_bit_mode() 172 return vcpu->arch.guest_state_protected || is_64_bit_mode(vcpu); in is_64_bit_hypercall() 186 return vcpu->arch.walk_mmu == &vcpu->arch.nested_mmu; in mmu_is_nested() 206 return !is_long_mode(vcpu) && is_pae(vcpu) && is_paging(vcpu); in is_pae_paging() 236 vcpu->arch.mmio_gva = mmu_is_nested(vcpu) ? 0 : gva & PAGE_MASK; in vcpu_cache_mmio_info() 244 return vcpu->arch.mmio_gen == kvm_memslots(vcpu->kvm)->generation; in vcpu_match_mmio_gen() 263 if (vcpu_match_mmio_gen(vcpu) && vcpu->arch.mmio_gva && in vcpu_match_mmio_gva() 272 if (vcpu_match_mmio_gen(vcpu) && vcpu->arch.mmio_gfn && in vcpu_match_mmio_gpa() [all …]
|
/linux/arch/riscv/kvm/ |
A D | vcpu.c | 130 !vcpu->arch.power_off && !vcpu->arch.pause); in kvm_arch_vcpu_runnable() 648 (!vcpu->arch.power_off) && (!vcpu->arch.pause), in kvm_riscv_check_vcpu_requests() 651 if (vcpu->arch.power_off || vcpu->arch.pause) { in kvm_riscv_check_vcpu_requests() 687 vcpu->arch.srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_arch_vcpu_ioctl_run() 691 ret = kvm_riscv_vcpu_mmio_return(vcpu, vcpu->run); in kvm_arch_vcpu_ioctl_run() 700 ret = kvm_riscv_vcpu_sbi_return(vcpu, vcpu->run); in kvm_arch_vcpu_ioctl_run() 708 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->arch.srcu_idx); in kvm_arch_vcpu_ioctl_run() 747 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->arch.srcu_idx); in kvm_arch_vcpu_ioctl_run() 765 vcpu->arch.srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_arch_vcpu_ioctl_run() 813 vcpu->arch.srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_arch_vcpu_ioctl_run() [all …]
|
/linux/arch/arm64/kvm/ |
A D | debug.c | 99 if (vcpu->guest_debug) in kvm_arm_setup_mdcr_el2() 126 kvm_arm_setup_mdcr_el2(vcpu); in kvm_arm_vcpu_init_debug() 136 vcpu->arch.debug_ptr = &vcpu->arch.vcpu_debug_state; in kvm_arm_reset_debug_ptr() 159 trace_kvm_arm_setup_debug(vcpu, vcpu->guest_debug); in kvm_arm_setup_debug() 161 kvm_arm_setup_mdcr_el2(vcpu); in kvm_arm_setup_debug() 164 if (vcpu->guest_debug) { in kvm_arm_setup_debug() 166 save_guest_debug_regs(vcpu); in kvm_arm_setup_debug() 216 vcpu->arch.debug_ptr = &vcpu->arch.external_debug_state; in kvm_arm_setup_debug() 229 BUG_ON(!vcpu->guest_debug && in kvm_arm_setup_debug() 230 vcpu->arch.debug_ptr != &vcpu->arch.vcpu_debug_state); in kvm_arm_setup_debug() [all …]
|
A D | pmu-emul.c | 118 eventsel = __vcpu_sys_reg(vcpu, reg) & kvm_pmu_event_mask(vcpu->kvm); in kvm_pmu_idx_has_chain_evtype() 192 __vcpu_sys_reg(vcpu, reg) += (s64)val - kvm_pmu_get_counter_value(vcpu, select_idx); in kvm_pmu_set_counter_value() 396 int ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id, in kvm_pmu_update_state() 436 kvm_pmu_update_state(vcpu); in kvm_pmu_flush_hwstate() 448 kvm_pmu_update_state(vcpu); in kvm_pmu_sync_hwstate() 458 struct kvm_vcpu *vcpu; in kvm_pmu_perf_overflow_notify_vcpu() local 464 kvm_vcpu_kick(vcpu); in kvm_pmu_perf_overflow_notify_vcpu() 501 kvm_vcpu_kick(vcpu); in kvm_pmu_perf_overflow() 883 ret = kvm_vgic_set_owner(vcpu, vcpu->arch.pmu.irq_num, in kvm_arm_pmu_v3_init() 884 &vcpu->arch.pmu); in kvm_arm_pmu_v3_init() [all …]
|
/linux/arch/x86/kvm/vmx/ |
A D | sgx.c | 31 if (!is_long_mode(vcpu)) { in sgx_get_encls_gva() 49 kvm_inject_gp(vcpu, 0); in sgx_get_encls_gva() 134 kvm_inject_gp(vcpu, 0); in sgx_inject_fault() 168 kvm_inject_gp(vcpu, 0); in __handle_encls_ecreate() 178 kvm_inject_gp(vcpu, 0); in __handle_encls_ecreate() 186 kvm_inject_gp(vcpu, 0); in __handle_encls_ecreate() 217 if (sgx_get_encls_gva(vcpu, kvm_rbx_read(vcpu), 32, 32, &pageinfo_gva) || in handle_encls_ecreate() 218 sgx_get_encls_gva(vcpu, kvm_rcx_read(vcpu), 4096, 4096, &secs_gva)) in handle_encls_ecreate() 294 if (sgx_get_encls_gva(vcpu, kvm_rbx_read(vcpu), 1808, 4096, &sig_gva) || in handle_encls_einit() 295 sgx_get_encls_gva(vcpu, kvm_rcx_read(vcpu), 4096, 4096, &secs_gva) || in handle_encls_einit() [all …]
|
/linux/arch/powerpc/include/asm/ |
A D | kvm_ppc.h | 333 ret = kvmppc_load_last_inst(vcpu, type, &vcpu->arch.last_inst); in kvmppc_get_last_inst() 338 swab32(vcpu->arch.last_inst) : in kvmppc_get_last_inst() 339 vcpu->arch.last_inst; in kvmppc_get_last_inst() 548 vcpu->kvm->arch.kvm_ops->fast_vcpu_kick(vcpu); in kvmppc_fast_vcpu_kick() 583 kvm_vcpu_kick(vcpu); in kvmppc_fast_vcpu_kick() 821 return vcpu->arch.epr; in kvmppc_get_epr() 832 vcpu->arch.epr = epr; in kvmppc_set_epr() 850 struct kvm_vcpu *vcpu, u32 cpu) in kvmppc_mpic_connect_vcpu() argument 856 struct kvm_vcpu *vcpu) in kvmppc_mpic_disconnect_vcpu() argument 1024 ea = kvmppc_get_gpr(vcpu, rb); in kvmppc_get_ea_indexed() [all …]
|