/linux/arch/arm64/kvm/hyp/nvhe/ |
A D | hyp-main.c | 22 void __kvm_hyp_host_forward_smc(struct kvm_cpu_context *host_ctxt); 24 static void handle___kvm_vcpu_run(struct kvm_cpu_context *host_ctxt) in handle___kvm_vcpu_run() 31 static void handle___kvm_adjust_pc(struct kvm_cpu_context *host_ctxt) in handle___kvm_adjust_pc() 71 static void handle___kvm_enable_ssbs(struct kvm_cpu_context *host_ctxt) in handle___kvm_enable_ssbs() 95 static void handle___vgic_v3_init_lrs(struct kvm_cpu_context *host_ctxt) in handle___vgic_v3_init_lrs() 100 static void handle___kvm_get_mdcr_el2(struct kvm_cpu_context *host_ctxt) in handle___kvm_get_mdcr_el2() 119 static void handle___pkvm_init(struct kvm_cpu_context *host_ctxt) in handle___pkvm_init() 171 typedef void (*hcall_t)(struct kvm_cpu_context *); 201 static void handle_host_hcall(struct kvm_cpu_context *host_ctxt) in handle_host_hcall() 241 static void handle_host_smc(struct kvm_cpu_context *host_ctxt) in handle_host_smc() [all …]
|
A D | psci-relay.c | 20 void __noreturn __host_enter(struct kvm_cpu_context *host_ctxt); 72 static unsigned long psci_forward(struct kvm_cpu_context *host_ctxt) in psci_forward() 107 static int psci_cpu_on(u64 func_id, struct kvm_cpu_context *host_ctxt) in psci_cpu_on() 151 static int psci_cpu_suspend(u64 func_id, struct kvm_cpu_context *host_ctxt) in psci_cpu_suspend() 179 static int psci_system_suspend(u64 func_id, struct kvm_cpu_context *host_ctxt) in psci_system_suspend() 206 struct kvm_cpu_context *host_ctxt; in kvm_host_psci_cpu_entry() 224 static unsigned long psci_0_1_handler(u64 func_id, struct kvm_cpu_context *host_ctxt) in psci_0_1_handler() 236 static unsigned long psci_0_2_handler(u64 func_id, struct kvm_cpu_context *host_ctxt) in psci_0_2_handler() 262 static unsigned long psci_1_0_handler(u64 func_id, struct kvm_cpu_context *host_ctxt) in psci_1_0_handler() 276 bool kvm_host_psci_handler(struct kvm_cpu_context *host_ctxt) in kvm_host_psci_handler()
|
A D | switch.c | 35 DEFINE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt); 56 struct kvm_cpu_context *ctxt = &vcpu->arch.ctxt; in __activate_traps() 127 static bool __pmu_switch_to_guest(struct kvm_cpu_context *host_ctxt) in __pmu_switch_to_guest() 147 static void __pmu_switch_to_host(struct kvm_cpu_context *host_ctxt) in __pmu_switch_to_host() 257 struct kvm_cpu_context *host_ctxt; in __kvm_vcpu_run() 258 struct kvm_cpu_context *guest_ctxt; in __kvm_vcpu_run() 356 struct kvm_cpu_context *host_ctxt; in hyp_panic()
|
A D | sysreg-sr.c | 21 void __sysreg_save_state_nvhe(struct kvm_cpu_context *ctxt) in __sysreg_save_state_nvhe() 29 void __sysreg_restore_state_nvhe(struct kvm_cpu_context *ctxt) in __sysreg_restore_state_nvhe()
|
A D | setup.c | 227 struct kvm_cpu_context *host_ctxt = &host_data->host_ctxt; in __pkvm_init_finalise()
|
A D | mem_protect.c | 441 void handle_host_mem_abort(struct kvm_cpu_context *host_ctxt) in handle_host_mem_abort()
|
/linux/arch/riscv/include/asm/ |
A D | kvm_vcpu_fp.h | 15 struct kvm_cpu_context; 18 void __kvm_riscv_fp_f_save(struct kvm_cpu_context *context); 19 void __kvm_riscv_fp_f_restore(struct kvm_cpu_context *context); 20 void __kvm_riscv_fp_d_save(struct kvm_cpu_context *context); 21 void __kvm_riscv_fp_d_restore(struct kvm_cpu_context *context); 24 void kvm_riscv_vcpu_guest_fp_save(struct kvm_cpu_context *cntx, 26 void kvm_riscv_vcpu_guest_fp_restore(struct kvm_cpu_context *cntx, 28 void kvm_riscv_vcpu_host_fp_save(struct kvm_cpu_context *cntx); 29 void kvm_riscv_vcpu_host_fp_restore(struct kvm_cpu_context *cntx); 39 struct kvm_cpu_context *cntx, in kvm_riscv_vcpu_guest_fp_restore() [all …]
|
A D | kvm_host.h | 95 struct kvm_cpu_context { struct 160 struct kvm_cpu_context host_context; 163 struct kvm_cpu_context guest_context; 169 struct kvm_cpu_context guest_reset_context;
|
/linux/arch/riscv/kernel/ |
A D | asm-offsets.c | 199 OFFSET(KVM_ARCH_FP_F_F0, kvm_cpu_context, fp.f.f[0]); in asm_offsets() 200 OFFSET(KVM_ARCH_FP_F_F1, kvm_cpu_context, fp.f.f[1]); in asm_offsets() 201 OFFSET(KVM_ARCH_FP_F_F2, kvm_cpu_context, fp.f.f[2]); in asm_offsets() 202 OFFSET(KVM_ARCH_FP_F_F3, kvm_cpu_context, fp.f.f[3]); in asm_offsets() 203 OFFSET(KVM_ARCH_FP_F_F4, kvm_cpu_context, fp.f.f[4]); in asm_offsets() 204 OFFSET(KVM_ARCH_FP_F_F5, kvm_cpu_context, fp.f.f[5]); in asm_offsets() 205 OFFSET(KVM_ARCH_FP_F_F6, kvm_cpu_context, fp.f.f[6]); in asm_offsets() 206 OFFSET(KVM_ARCH_FP_F_F7, kvm_cpu_context, fp.f.f[7]); in asm_offsets() 207 OFFSET(KVM_ARCH_FP_F_F8, kvm_cpu_context, fp.f.f[8]); in asm_offsets() 208 OFFSET(KVM_ARCH_FP_F_F9, kvm_cpu_context, fp.f.f[9]); in asm_offsets() [all …]
|
/linux/arch/arm64/kvm/hyp/vhe/ |
A D | sysreg-sr.c | 27 void sysreg_save_host_state_vhe(struct kvm_cpu_context *ctxt) in sysreg_save_host_state_vhe() 33 void sysreg_save_guest_state_vhe(struct kvm_cpu_context *ctxt) in sysreg_save_guest_state_vhe() 40 void sysreg_restore_host_state_vhe(struct kvm_cpu_context *ctxt) in sysreg_restore_host_state_vhe() 46 void sysreg_restore_guest_state_vhe(struct kvm_cpu_context *ctxt) in sysreg_restore_guest_state_vhe() 66 struct kvm_cpu_context *guest_ctxt = &vcpu->arch.ctxt; in kvm_vcpu_load_sysregs_vhe() 67 struct kvm_cpu_context *host_ctxt; in kvm_vcpu_load_sysregs_vhe() 100 struct kvm_cpu_context *guest_ctxt = &vcpu->arch.ctxt; in kvm_vcpu_put_sysregs_vhe() 101 struct kvm_cpu_context *host_ctxt; in kvm_vcpu_put_sysregs_vhe()
|
A D | switch.c | 31 DEFINE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt); 122 struct kvm_cpu_context *host_ctxt; in __kvm_vcpu_run_vhe() 123 struct kvm_cpu_context *guest_ctxt; in __kvm_vcpu_run_vhe() 210 struct kvm_cpu_context *host_ctxt; in __hyp_call_panic()
|
/linux/arch/arm64/include/asm/ |
A D | kvm_hyp.h | 15 DECLARE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt); 74 void __sysreg_save_state_nvhe(struct kvm_cpu_context *ctxt); 75 void __sysreg_restore_state_nvhe(struct kvm_cpu_context *ctxt); 77 void sysreg_save_host_state_vhe(struct kvm_cpu_context *ctxt); 78 void sysreg_restore_host_state_vhe(struct kvm_cpu_context *ctxt); 79 void sysreg_save_guest_state_vhe(struct kvm_cpu_context *ctxt); 80 void sysreg_restore_guest_state_vhe(struct kvm_cpu_context *ctxt); 103 bool kvm_host_psci_handler(struct kvm_cpu_context *host_ctxt); 106 void __noreturn __hyp_do_panic(struct kvm_cpu_context *host_ctxt, u64 spsr, 115 void __noreturn __host_enter(struct kvm_cpu_context *host_ctxt);
|
A D | kvm_host.h | 228 struct kvm_cpu_context { struct 249 struct kvm_cpu_context host_ctxt; argument 283 struct kvm_cpu_context ctxt; 709 static inline void kvm_init_host_cpu_context(struct kvm_cpu_context *cpu_ctxt) in kvm_init_host_cpu_context()
|
/linux/arch/riscv/kvm/ |
A D | vcpu_fp.c | 19 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; in kvm_riscv_vcpu_fp_reset() 29 void kvm_riscv_vcpu_fp_clean(struct kvm_cpu_context *cntx) in kvm_riscv_vcpu_fp_clean() 35 void kvm_riscv_vcpu_guest_fp_save(struct kvm_cpu_context *cntx, in kvm_riscv_vcpu_guest_fp_save() 47 void kvm_riscv_vcpu_guest_fp_restore(struct kvm_cpu_context *cntx, in kvm_riscv_vcpu_guest_fp_restore() 59 void kvm_riscv_vcpu_host_fp_save(struct kvm_cpu_context *cntx) in kvm_riscv_vcpu_host_fp_save() 68 void kvm_riscv_vcpu_host_fp_restore(struct kvm_cpu_context *cntx) in kvm_riscv_vcpu_host_fp_restore() 81 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; in kvm_riscv_vcpu_get_reg_fp() 127 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; in kvm_riscv_vcpu_set_reg_fp()
|
A D | vcpu_sbi.c | 22 struct kvm_cpu_context *cp = &vcpu->arch.guest_context; in kvm_riscv_vcpu_sbi_forward() 41 struct kvm_cpu_context *cp = &vcpu->arch.guest_context; in kvm_riscv_vcpu_sbi_return() 85 struct kvm_cpu_context *cp = &vcpu->arch.guest_context; in kvm_riscv_vcpu_sbi_ecall()
|
A D | vcpu.c | 54 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; in kvm_riscv_reset_vcpu() 55 struct kvm_cpu_context *reset_cntx = &vcpu->arch.guest_reset_context; in kvm_riscv_reset_vcpu() 76 struct kvm_cpu_context *cntx; in kvm_arch_vcpu_create() 212 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; in kvm_riscv_vcpu_get_reg_core() 245 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; in kvm_riscv_vcpu_set_reg_core()
|
A D | vcpu_exit.c | 165 struct kvm_cpu_context *ct; in virtual_inst_fault() 198 struct kvm_cpu_context *ct = &vcpu->arch.guest_context; in emulate_load() 311 struct kvm_cpu_context *ct = &vcpu->arch.guest_context; in emulate_store()
|
/linux/arch/arm64/kvm/hyp/include/hyp/ |
A D | sysreg-sr.h | 19 static inline void __sysreg_save_common_state(struct kvm_cpu_context *ctxt) in __sysreg_save_common_state() 24 static inline void __sysreg_save_user_state(struct kvm_cpu_context *ctxt) in __sysreg_save_user_state() 30 static inline bool ctxt_has_mte(struct kvm_cpu_context *ctxt) in ctxt_has_mte() 40 static inline void __sysreg_save_el1_state(struct kvm_cpu_context *ctxt) in __sysreg_save_el1_state() 70 static inline void __sysreg_save_el2_return_state(struct kvm_cpu_context *ctxt) in __sysreg_save_el2_return_state() 84 static inline void __sysreg_restore_common_state(struct kvm_cpu_context *ctxt) in __sysreg_restore_common_state() 89 static inline void __sysreg_restore_user_state(struct kvm_cpu_context *ctxt) in __sysreg_restore_user_state() 95 static inline void __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt) in __sysreg_restore_el1_state() 159 static inline void __sysreg_restore_el2_return_state(struct kvm_cpu_context *ctxt) in __sysreg_restore_el2_return_state()
|
A D | debug-sr.h | 92 struct kvm_cpu_context *ctxt) in __debug_save_state() 110 struct kvm_cpu_context *ctxt) in __debug_restore_state() 130 struct kvm_cpu_context *host_ctxt; in __debug_switch_to_guest_common() 131 struct kvm_cpu_context *guest_ctxt; in __debug_switch_to_guest_common() 149 struct kvm_cpu_context *host_ctxt; in __debug_switch_to_host_common() 150 struct kvm_cpu_context *guest_ctxt; in __debug_switch_to_host_common()
|
A D | switch.h | 315 DECLARE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt); 319 struct kvm_cpu_context *ctxt; in kvm_hyp_handle_ptrauth()
|
/linux/arch/arm64/kernel/ |
A D | asm-offsets.c | 116 DEFINE(CPU_USER_PT_REGS, offsetof(struct kvm_cpu_context, regs)); in main() 117 DEFINE(CPU_RGSR_EL1, offsetof(struct kvm_cpu_context, sys_regs[RGSR_EL1])); in main() 118 DEFINE(CPU_GCR_EL1, offsetof(struct kvm_cpu_context, sys_regs[GCR_EL1])); in main() 119 DEFINE(CPU_APIAKEYLO_EL1, offsetof(struct kvm_cpu_context, sys_regs[APIAKEYLO_EL1])); in main() 120 DEFINE(CPU_APIBKEYLO_EL1, offsetof(struct kvm_cpu_context, sys_regs[APIBKEYLO_EL1])); in main() 121 DEFINE(CPU_APDAKEYLO_EL1, offsetof(struct kvm_cpu_context, sys_regs[APDAKEYLO_EL1])); in main() 122 DEFINE(CPU_APDBKEYLO_EL1, offsetof(struct kvm_cpu_context, sys_regs[APDBKEYLO_EL1])); in main() 123 DEFINE(CPU_APGAKEYLO_EL1, offsetof(struct kvm_cpu_context, sys_regs[APGAKEYLO_EL1])); in main() 124 DEFINE(HOST_CONTEXT_VCPU, offsetof(struct kvm_cpu_context, __hyp_running_vcpu)); in main()
|
/linux/arch/arm64/kvm/hyp/include/nvhe/ |
A D | mem_protect.h | 58 void handle_host_mem_abort(struct kvm_cpu_context *host_ctxt);
|