1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2012,2013 - ARM Ltd
4  * Author: Marc Zyngier <marc.zyngier@arm.com>
5  *
6  * Derived from arch/arm/include/asm/kvm_host.h:
7  * Copyright (C) 2012 - Virtual Open Systems and Columbia University
8  * Author: Christoffer Dall <c.dall@virtualopensystems.com>
9  */
10 
11 #ifndef __ARM64_KVM_HOST_H__
12 #define __ARM64_KVM_HOST_H__
13 
14 #include <linux/arm-smccc.h>
15 #include <linux/bitmap.h>
16 #include <linux/types.h>
17 #include <linux/jump_label.h>
18 #include <linux/kvm_types.h>
19 #include <linux/percpu.h>
20 #include <linux/psci.h>
21 #include <asm/arch_gicv3.h>
22 #include <asm/barrier.h>
23 #include <asm/cpufeature.h>
24 #include <asm/cputype.h>
25 #include <asm/daifflags.h>
26 #include <asm/fpsimd.h>
27 #include <asm/kvm.h>
28 #include <asm/kvm_asm.h>
29 #include <asm/thread_info.h>
30 
31 #define __KVM_HAVE_ARCH_INTC_INITIALIZED
32 
33 #define KVM_HALT_POLL_NS_DEFAULT 500000
34 
35 #include <kvm/arm_vgic.h>
36 #include <kvm/arm_arch_timer.h>
37 #include <kvm/arm_pmu.h>
38 
39 #define KVM_MAX_VCPUS VGIC_V3_MAX_CPUS
40 
41 #define KVM_VCPU_MAX_FEATURES 7
42 
43 #define KVM_REQ_SLEEP \
44 	KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
45 #define KVM_REQ_IRQ_PENDING	KVM_ARCH_REQ(1)
46 #define KVM_REQ_VCPU_RESET	KVM_ARCH_REQ(2)
47 #define KVM_REQ_RECORD_STEAL	KVM_ARCH_REQ(3)
48 #define KVM_REQ_RELOAD_GICv4	KVM_ARCH_REQ(4)
49 #define KVM_REQ_RELOAD_PMU	KVM_ARCH_REQ(5)
50 
51 #define KVM_DIRTY_LOG_MANUAL_CAPS   (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE | \
52 				     KVM_DIRTY_LOG_INITIALLY_SET)
53 
54 /*
55  * Mode of operation configurable with kvm-arm.mode early param.
56  * See Documentation/admin-guide/kernel-parameters.txt for more information.
57  */
58 enum kvm_mode {
59 	KVM_MODE_DEFAULT,
60 	KVM_MODE_PROTECTED,
61 	KVM_MODE_NONE,
62 };
63 enum kvm_mode kvm_get_mode(void);
64 
65 DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
66 
67 extern unsigned int kvm_sve_max_vl;
68 int kvm_arm_init_sve(void);
69 
70 u32 __attribute_const__ kvm_target_cpu(void);
71 int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
72 void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu);
73 
74 struct kvm_vmid {
75 	/* The VMID generation used for the virt. memory system */
76 	u64    vmid_gen;
77 	u32    vmid;
78 };
79 
80 struct kvm_s2_mmu {
81 	struct kvm_vmid vmid;
82 
83 	/*
84 	 * stage2 entry level table
85 	 *
86 	 * Two kvm_s2_mmu structures in the same VM can point to the same
87 	 * pgd here.  This happens when running a guest using a
88 	 * translation regime that isn't affected by its own stage-2
89 	 * translation, such as a non-VHE hypervisor running at vEL2, or
90 	 * for vEL1/EL0 with vHCR_EL2.VM == 0.  In that case, we use the
91 	 * canonical stage-2 page tables.
92 	 */
93 	phys_addr_t	pgd_phys;
94 	struct kvm_pgtable *pgt;
95 
96 	/* The last vcpu id that ran on each physical CPU */
97 	int __percpu *last_vcpu_ran;
98 
99 	struct kvm_arch *arch;
100 };
101 
102 struct kvm_arch_memory_slot {
103 };
104 
105 struct kvm_arch {
106 	struct kvm_s2_mmu mmu;
107 
108 	/* VTCR_EL2 value for this VM */
109 	u64    vtcr;
110 
111 	/* The maximum number of vCPUs depends on the used GIC model */
112 	int max_vcpus;
113 
114 	/* Interrupt controller */
115 	struct vgic_dist	vgic;
116 
117 	/* Mandated version of PSCI */
118 	u32 psci_version;
119 
120 	/*
121 	 * If we encounter a data abort without valid instruction syndrome
122 	 * information, report this to user space.  User space can (and
123 	 * should) opt in to this feature if KVM_CAP_ARM_NISV_TO_USER is
124 	 * supported.
125 	 */
126 	bool return_nisv_io_abort_to_user;
127 
128 	/*
129 	 * VM-wide PMU filter, implemented as a bitmap and big enough for
130 	 * up to 2^10 events (ARMv8.0) or 2^16 events (ARMv8.1+).
131 	 */
132 	unsigned long *pmu_filter;
133 	unsigned int pmuver;
134 
135 	u8 pfr0_csv2;
136 	u8 pfr0_csv3;
137 
138 	/* Memory Tagging Extension enabled for the guest */
139 	bool mte_enabled;
140 };
141 
142 struct kvm_vcpu_fault_info {
143 	u32 esr_el2;		/* Hyp Syndrom Register */
144 	u64 far_el2;		/* Hyp Fault Address Register */
145 	u64 hpfar_el2;		/* Hyp IPA Fault Address Register */
146 	u64 disr_el1;		/* Deferred [SError] Status Register */
147 };
148 
149 enum vcpu_sysreg {
150 	__INVALID_SYSREG__,   /* 0 is reserved as an invalid value */
151 	MPIDR_EL1,	/* MultiProcessor Affinity Register */
152 	CSSELR_EL1,	/* Cache Size Selection Register */
153 	SCTLR_EL1,	/* System Control Register */
154 	ACTLR_EL1,	/* Auxiliary Control Register */
155 	CPACR_EL1,	/* Coprocessor Access Control */
156 	ZCR_EL1,	/* SVE Control */
157 	TTBR0_EL1,	/* Translation Table Base Register 0 */
158 	TTBR1_EL1,	/* Translation Table Base Register 1 */
159 	TCR_EL1,	/* Translation Control Register */
160 	ESR_EL1,	/* Exception Syndrome Register */
161 	AFSR0_EL1,	/* Auxiliary Fault Status Register 0 */
162 	AFSR1_EL1,	/* Auxiliary Fault Status Register 1 */
163 	FAR_EL1,	/* Fault Address Register */
164 	MAIR_EL1,	/* Memory Attribute Indirection Register */
165 	VBAR_EL1,	/* Vector Base Address Register */
166 	CONTEXTIDR_EL1,	/* Context ID Register */
167 	TPIDR_EL0,	/* Thread ID, User R/W */
168 	TPIDRRO_EL0,	/* Thread ID, User R/O */
169 	TPIDR_EL1,	/* Thread ID, Privileged */
170 	AMAIR_EL1,	/* Aux Memory Attribute Indirection Register */
171 	CNTKCTL_EL1,	/* Timer Control Register (EL1) */
172 	PAR_EL1,	/* Physical Address Register */
173 	MDSCR_EL1,	/* Monitor Debug System Control Register */
174 	MDCCINT_EL1,	/* Monitor Debug Comms Channel Interrupt Enable Reg */
175 	DISR_EL1,	/* Deferred Interrupt Status Register */
176 
177 	/* Performance Monitors Registers */
178 	PMCR_EL0,	/* Control Register */
179 	PMSELR_EL0,	/* Event Counter Selection Register */
180 	PMEVCNTR0_EL0,	/* Event Counter Register (0-30) */
181 	PMEVCNTR30_EL0 = PMEVCNTR0_EL0 + 30,
182 	PMCCNTR_EL0,	/* Cycle Counter Register */
183 	PMEVTYPER0_EL0,	/* Event Type Register (0-30) */
184 	PMEVTYPER30_EL0 = PMEVTYPER0_EL0 + 30,
185 	PMCCFILTR_EL0,	/* Cycle Count Filter Register */
186 	PMCNTENSET_EL0,	/* Count Enable Set Register */
187 	PMINTENSET_EL1,	/* Interrupt Enable Set Register */
188 	PMOVSSET_EL0,	/* Overflow Flag Status Set Register */
189 	PMUSERENR_EL0,	/* User Enable Register */
190 
191 	/* Pointer Authentication Registers in a strict increasing order. */
192 	APIAKEYLO_EL1,
193 	APIAKEYHI_EL1,
194 	APIBKEYLO_EL1,
195 	APIBKEYHI_EL1,
196 	APDAKEYLO_EL1,
197 	APDAKEYHI_EL1,
198 	APDBKEYLO_EL1,
199 	APDBKEYHI_EL1,
200 	APGAKEYLO_EL1,
201 	APGAKEYHI_EL1,
202 
203 	ELR_EL1,
204 	SP_EL1,
205 	SPSR_EL1,
206 
207 	CNTVOFF_EL2,
208 	CNTV_CVAL_EL0,
209 	CNTV_CTL_EL0,
210 	CNTP_CVAL_EL0,
211 	CNTP_CTL_EL0,
212 
213 	/* Memory Tagging Extension registers */
214 	RGSR_EL1,	/* Random Allocation Tag Seed Register */
215 	GCR_EL1,	/* Tag Control Register */
216 	TFSR_EL1,	/* Tag Fault Status Register (EL1) */
217 	TFSRE0_EL1,	/* Tag Fault Status Register (EL0) */
218 
219 	/* 32bit specific registers. Keep them at the end of the range */
220 	DACR32_EL2,	/* Domain Access Control Register */
221 	IFSR32_EL2,	/* Instruction Fault Status Register */
222 	FPEXC32_EL2,	/* Floating-Point Exception Control Register */
223 	DBGVCR32_EL2,	/* Debug Vector Catch Register */
224 
225 	NR_SYS_REGS	/* Nothing after this line! */
226 };
227 
228 struct kvm_cpu_context {
229 	struct user_pt_regs regs;	/* sp = sp_el0 */
230 
231 	u64	spsr_abt;
232 	u64	spsr_und;
233 	u64	spsr_irq;
234 	u64	spsr_fiq;
235 
236 	struct user_fpsimd_state fp_regs;
237 
238 	u64 sys_regs[NR_SYS_REGS];
239 
240 	struct kvm_vcpu *__hyp_running_vcpu;
241 };
242 
243 struct kvm_pmu_events {
244 	u32 events_host;
245 	u32 events_guest;
246 };
247 
248 struct kvm_host_data {
249 	struct kvm_cpu_context host_ctxt;
250 	struct kvm_pmu_events pmu_events;
251 };
252 
253 struct kvm_host_psci_config {
254 	/* PSCI version used by host. */
255 	u32 version;
256 
257 	/* Function IDs used by host if version is v0.1. */
258 	struct psci_0_1_function_ids function_ids_0_1;
259 
260 	bool psci_0_1_cpu_suspend_implemented;
261 	bool psci_0_1_cpu_on_implemented;
262 	bool psci_0_1_cpu_off_implemented;
263 	bool psci_0_1_migrate_implemented;
264 };
265 
266 extern struct kvm_host_psci_config kvm_nvhe_sym(kvm_host_psci_config);
267 #define kvm_host_psci_config CHOOSE_NVHE_SYM(kvm_host_psci_config)
268 
269 extern s64 kvm_nvhe_sym(hyp_physvirt_offset);
270 #define hyp_physvirt_offset CHOOSE_NVHE_SYM(hyp_physvirt_offset)
271 
272 extern u64 kvm_nvhe_sym(hyp_cpu_logical_map)[NR_CPUS];
273 #define hyp_cpu_logical_map CHOOSE_NVHE_SYM(hyp_cpu_logical_map)
274 
275 struct vcpu_reset_state {
276 	unsigned long	pc;
277 	unsigned long	r0;
278 	bool		be;
279 	bool		reset;
280 };
281 
282 struct kvm_vcpu_arch {
283 	struct kvm_cpu_context ctxt;
284 	void *sve_state;
285 	unsigned int sve_max_vl;
286 
287 	/* Stage 2 paging state used by the hardware on next switch */
288 	struct kvm_s2_mmu *hw_mmu;
289 
290 	/* Values of trap registers for the guest. */
291 	u64 hcr_el2;
292 	u64 mdcr_el2;
293 	u64 cptr_el2;
294 
295 	/* Values of trap registers for the host before guest entry. */
296 	u64 mdcr_el2_host;
297 
298 	/* Exception Information */
299 	struct kvm_vcpu_fault_info fault;
300 
301 	/* State of various workarounds, see kvm_asm.h for bit assignment */
302 	u64 workaround_flags;
303 
304 	/* Miscellaneous vcpu state flags */
305 	u64 flags;
306 
307 	/*
308 	 * We maintain more than a single set of debug registers to support
309 	 * debugging the guest from the host and to maintain separate host and
310 	 * guest state during world switches. vcpu_debug_state are the debug
311 	 * registers of the vcpu as the guest sees them.  host_debug_state are
312 	 * the host registers which are saved and restored during
313 	 * world switches. external_debug_state contains the debug
314 	 * values we want to debug the guest. This is set via the
315 	 * KVM_SET_GUEST_DEBUG ioctl.
316 	 *
317 	 * debug_ptr points to the set of debug registers that should be loaded
318 	 * onto the hardware when running the guest.
319 	 */
320 	struct kvm_guest_debug_arch *debug_ptr;
321 	struct kvm_guest_debug_arch vcpu_debug_state;
322 	struct kvm_guest_debug_arch external_debug_state;
323 
324 	struct thread_info *host_thread_info;	/* hyp VA */
325 	struct user_fpsimd_state *host_fpsimd_state;	/* hyp VA */
326 
327 	struct {
328 		/* {Break,watch}point registers */
329 		struct kvm_guest_debug_arch regs;
330 		/* Statistical profiling extension */
331 		u64 pmscr_el1;
332 		/* Self-hosted trace */
333 		u64 trfcr_el1;
334 	} host_debug_state;
335 
336 	/* VGIC state */
337 	struct vgic_cpu vgic_cpu;
338 	struct arch_timer_cpu timer_cpu;
339 	struct kvm_pmu pmu;
340 
341 	/*
342 	 * Anything that is not used directly from assembly code goes
343 	 * here.
344 	 */
345 
346 	/*
347 	 * Guest registers we preserve during guest debugging.
348 	 *
349 	 * These shadow registers are updated by the kvm_handle_sys_reg
350 	 * trap handler if the guest accesses or updates them while we
351 	 * are using guest debug.
352 	 */
353 	struct {
354 		u32	mdscr_el1;
355 	} guest_debug_preserved;
356 
357 	/* vcpu power-off state */
358 	bool power_off;
359 
360 	/* Don't run the guest (internal implementation need) */
361 	bool pause;
362 
363 	/* Cache some mmu pages needed inside spinlock regions */
364 	struct kvm_mmu_memory_cache mmu_page_cache;
365 
366 	/* Target CPU and feature flags */
367 	int target;
368 	DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES);
369 
370 	/* Detect first run of a vcpu */
371 	bool has_run_once;
372 
373 	/* Virtual SError ESR to restore when HCR_EL2.VSE is set */
374 	u64 vsesr_el2;
375 
376 	/* Additional reset state */
377 	struct vcpu_reset_state	reset_state;
378 
379 	/* True when deferrable sysregs are loaded on the physical CPU,
380 	 * see kvm_vcpu_load_sysregs_vhe and kvm_vcpu_put_sysregs_vhe. */
381 	bool sysregs_loaded_on_cpu;
382 
383 	/* Guest PV state */
384 	struct {
385 		u64 last_steal;
386 		gpa_t base;
387 	} steal;
388 };
389 
390 /* Pointer to the vcpu's SVE FFR for sve_{save,load}_state() */
391 #define vcpu_sve_pffr(vcpu) (kern_hyp_va((vcpu)->arch.sve_state) +	\
392 			     sve_ffr_offset((vcpu)->arch.sve_max_vl))
393 
394 #define vcpu_sve_max_vq(vcpu)	sve_vq_from_vl((vcpu)->arch.sve_max_vl)
395 
396 #define vcpu_sve_state_size(vcpu) ({					\
397 	size_t __size_ret;						\
398 	unsigned int __vcpu_vq;						\
399 									\
400 	if (WARN_ON(!sve_vl_valid((vcpu)->arch.sve_max_vl))) {		\
401 		__size_ret = 0;						\
402 	} else {							\
403 		__vcpu_vq = vcpu_sve_max_vq(vcpu);			\
404 		__size_ret = SVE_SIG_REGS_SIZE(__vcpu_vq);		\
405 	}								\
406 									\
407 	__size_ret;							\
408 })
409 
410 /* vcpu_arch flags field values: */
411 #define KVM_ARM64_DEBUG_DIRTY		(1 << 0)
412 #define KVM_ARM64_FP_ENABLED		(1 << 1) /* guest FP regs loaded */
413 #define KVM_ARM64_FP_HOST		(1 << 2) /* host FP regs loaded */
414 #define KVM_ARM64_HOST_SVE_IN_USE	(1 << 3) /* backup for host TIF_SVE */
415 #define KVM_ARM64_HOST_SVE_ENABLED	(1 << 4) /* SVE enabled for EL0 */
416 #define KVM_ARM64_GUEST_HAS_SVE		(1 << 5) /* SVE exposed to guest */
417 #define KVM_ARM64_VCPU_SVE_FINALIZED	(1 << 6) /* SVE config completed */
418 #define KVM_ARM64_GUEST_HAS_PTRAUTH	(1 << 7) /* PTRAUTH exposed to guest */
419 #define KVM_ARM64_PENDING_EXCEPTION	(1 << 8) /* Exception pending */
420 #define KVM_ARM64_EXCEPT_MASK		(7 << 9) /* Target EL/MODE */
421 #define KVM_ARM64_DEBUG_STATE_SAVE_SPE	(1 << 12) /* Save SPE context if active  */
422 #define KVM_ARM64_DEBUG_STATE_SAVE_TRBE	(1 << 13) /* Save TRBE context if active  */
423 
424 #define KVM_GUESTDBG_VALID_MASK (KVM_GUESTDBG_ENABLE | \
425 				 KVM_GUESTDBG_USE_SW_BP | \
426 				 KVM_GUESTDBG_USE_HW | \
427 				 KVM_GUESTDBG_SINGLESTEP)
428 /*
429  * When KVM_ARM64_PENDING_EXCEPTION is set, KVM_ARM64_EXCEPT_MASK can
430  * take the following values:
431  *
432  * For AArch32 EL1:
433  */
434 #define KVM_ARM64_EXCEPT_AA32_UND	(0 << 9)
435 #define KVM_ARM64_EXCEPT_AA32_IABT	(1 << 9)
436 #define KVM_ARM64_EXCEPT_AA32_DABT	(2 << 9)
437 /* For AArch64: */
438 #define KVM_ARM64_EXCEPT_AA64_ELx_SYNC	(0 << 9)
439 #define KVM_ARM64_EXCEPT_AA64_ELx_IRQ	(1 << 9)
440 #define KVM_ARM64_EXCEPT_AA64_ELx_FIQ	(2 << 9)
441 #define KVM_ARM64_EXCEPT_AA64_ELx_SERR	(3 << 9)
442 #define KVM_ARM64_EXCEPT_AA64_EL1	(0 << 11)
443 #define KVM_ARM64_EXCEPT_AA64_EL2	(1 << 11)
444 
445 /*
446  * Overlaps with KVM_ARM64_EXCEPT_MASK on purpose so that it can't be
447  * set together with an exception...
448  */
449 #define KVM_ARM64_INCREMENT_PC		(1 << 9) /* Increment PC */
450 
451 #define vcpu_has_sve(vcpu) (system_supports_sve() &&			\
452 			    ((vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_SVE))
453 
454 #ifdef CONFIG_ARM64_PTR_AUTH
455 #define vcpu_has_ptrauth(vcpu)						\
456 	((cpus_have_final_cap(ARM64_HAS_ADDRESS_AUTH) ||		\
457 	  cpus_have_final_cap(ARM64_HAS_GENERIC_AUTH)) &&		\
458 	 (vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_PTRAUTH)
459 #else
460 #define vcpu_has_ptrauth(vcpu)		false
461 #endif
462 
463 #define vcpu_gp_regs(v)		(&(v)->arch.ctxt.regs)
464 
465 /*
466  * Only use __vcpu_sys_reg/ctxt_sys_reg if you know you want the
467  * memory backed version of a register, and not the one most recently
468  * accessed by a running VCPU.  For example, for userspace access or
469  * for system registers that are never context switched, but only
470  * emulated.
471  */
472 #define __ctxt_sys_reg(c,r)	(&(c)->sys_regs[(r)])
473 
474 #define ctxt_sys_reg(c,r)	(*__ctxt_sys_reg(c,r))
475 
476 #define __vcpu_sys_reg(v,r)	(ctxt_sys_reg(&(v)->arch.ctxt, (r)))
477 
478 u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg);
479 void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg);
480 
__vcpu_read_sys_reg_from_cpu(int reg,u64 * val)481 static inline bool __vcpu_read_sys_reg_from_cpu(int reg, u64 *val)
482 {
483 	/*
484 	 * *** VHE ONLY ***
485 	 *
486 	 * System registers listed in the switch are not saved on every
487 	 * exit from the guest but are only saved on vcpu_put.
488 	 *
489 	 * Note that MPIDR_EL1 for the guest is set by KVM via VMPIDR_EL2 but
490 	 * should never be listed below, because the guest cannot modify its
491 	 * own MPIDR_EL1 and MPIDR_EL1 is accessed for VCPU A from VCPU B's
492 	 * thread when emulating cross-VCPU communication.
493 	 */
494 	if (!has_vhe())
495 		return false;
496 
497 	switch (reg) {
498 	case CSSELR_EL1:	*val = read_sysreg_s(SYS_CSSELR_EL1);	break;
499 	case SCTLR_EL1:		*val = read_sysreg_s(SYS_SCTLR_EL12);	break;
500 	case CPACR_EL1:		*val = read_sysreg_s(SYS_CPACR_EL12);	break;
501 	case TTBR0_EL1:		*val = read_sysreg_s(SYS_TTBR0_EL12);	break;
502 	case TTBR1_EL1:		*val = read_sysreg_s(SYS_TTBR1_EL12);	break;
503 	case TCR_EL1:		*val = read_sysreg_s(SYS_TCR_EL12);	break;
504 	case ESR_EL1:		*val = read_sysreg_s(SYS_ESR_EL12);	break;
505 	case AFSR0_EL1:		*val = read_sysreg_s(SYS_AFSR0_EL12);	break;
506 	case AFSR1_EL1:		*val = read_sysreg_s(SYS_AFSR1_EL12);	break;
507 	case FAR_EL1:		*val = read_sysreg_s(SYS_FAR_EL12);	break;
508 	case MAIR_EL1:		*val = read_sysreg_s(SYS_MAIR_EL12);	break;
509 	case VBAR_EL1:		*val = read_sysreg_s(SYS_VBAR_EL12);	break;
510 	case CONTEXTIDR_EL1:	*val = read_sysreg_s(SYS_CONTEXTIDR_EL12);break;
511 	case TPIDR_EL0:		*val = read_sysreg_s(SYS_TPIDR_EL0);	break;
512 	case TPIDRRO_EL0:	*val = read_sysreg_s(SYS_TPIDRRO_EL0);	break;
513 	case TPIDR_EL1:		*val = read_sysreg_s(SYS_TPIDR_EL1);	break;
514 	case AMAIR_EL1:		*val = read_sysreg_s(SYS_AMAIR_EL12);	break;
515 	case CNTKCTL_EL1:	*val = read_sysreg_s(SYS_CNTKCTL_EL12);	break;
516 	case ELR_EL1:		*val = read_sysreg_s(SYS_ELR_EL12);	break;
517 	case PAR_EL1:		*val = read_sysreg_par();		break;
518 	case DACR32_EL2:	*val = read_sysreg_s(SYS_DACR32_EL2);	break;
519 	case IFSR32_EL2:	*val = read_sysreg_s(SYS_IFSR32_EL2);	break;
520 	case DBGVCR32_EL2:	*val = read_sysreg_s(SYS_DBGVCR32_EL2);	break;
521 	default:		return false;
522 	}
523 
524 	return true;
525 }
526 
__vcpu_write_sys_reg_to_cpu(u64 val,int reg)527 static inline bool __vcpu_write_sys_reg_to_cpu(u64 val, int reg)
528 {
529 	/*
530 	 * *** VHE ONLY ***
531 	 *
532 	 * System registers listed in the switch are not restored on every
533 	 * entry to the guest but are only restored on vcpu_load.
534 	 *
535 	 * Note that MPIDR_EL1 for the guest is set by KVM via VMPIDR_EL2 but
536 	 * should never be listed below, because the MPIDR should only be set
537 	 * once, before running the VCPU, and never changed later.
538 	 */
539 	if (!has_vhe())
540 		return false;
541 
542 	switch (reg) {
543 	case CSSELR_EL1:	write_sysreg_s(val, SYS_CSSELR_EL1);	break;
544 	case SCTLR_EL1:		write_sysreg_s(val, SYS_SCTLR_EL12);	break;
545 	case CPACR_EL1:		write_sysreg_s(val, SYS_CPACR_EL12);	break;
546 	case TTBR0_EL1:		write_sysreg_s(val, SYS_TTBR0_EL12);	break;
547 	case TTBR1_EL1:		write_sysreg_s(val, SYS_TTBR1_EL12);	break;
548 	case TCR_EL1:		write_sysreg_s(val, SYS_TCR_EL12);	break;
549 	case ESR_EL1:		write_sysreg_s(val, SYS_ESR_EL12);	break;
550 	case AFSR0_EL1:		write_sysreg_s(val, SYS_AFSR0_EL12);	break;
551 	case AFSR1_EL1:		write_sysreg_s(val, SYS_AFSR1_EL12);	break;
552 	case FAR_EL1:		write_sysreg_s(val, SYS_FAR_EL12);	break;
553 	case MAIR_EL1:		write_sysreg_s(val, SYS_MAIR_EL12);	break;
554 	case VBAR_EL1:		write_sysreg_s(val, SYS_VBAR_EL12);	break;
555 	case CONTEXTIDR_EL1:	write_sysreg_s(val, SYS_CONTEXTIDR_EL12);break;
556 	case TPIDR_EL0:		write_sysreg_s(val, SYS_TPIDR_EL0);	break;
557 	case TPIDRRO_EL0:	write_sysreg_s(val, SYS_TPIDRRO_EL0);	break;
558 	case TPIDR_EL1:		write_sysreg_s(val, SYS_TPIDR_EL1);	break;
559 	case AMAIR_EL1:		write_sysreg_s(val, SYS_AMAIR_EL12);	break;
560 	case CNTKCTL_EL1:	write_sysreg_s(val, SYS_CNTKCTL_EL12);	break;
561 	case ELR_EL1:		write_sysreg_s(val, SYS_ELR_EL12);	break;
562 	case PAR_EL1:		write_sysreg_s(val, SYS_PAR_EL1);	break;
563 	case DACR32_EL2:	write_sysreg_s(val, SYS_DACR32_EL2);	break;
564 	case IFSR32_EL2:	write_sysreg_s(val, SYS_IFSR32_EL2);	break;
565 	case DBGVCR32_EL2:	write_sysreg_s(val, SYS_DBGVCR32_EL2);	break;
566 	default:		return false;
567 	}
568 
569 	return true;
570 }
571 
572 struct kvm_vm_stat {
573 	struct kvm_vm_stat_generic generic;
574 };
575 
576 struct kvm_vcpu_stat {
577 	struct kvm_vcpu_stat_generic generic;
578 	u64 hvc_exit_stat;
579 	u64 wfe_exit_stat;
580 	u64 wfi_exit_stat;
581 	u64 mmio_exit_user;
582 	u64 mmio_exit_kernel;
583 	u64 signal_exits;
584 	u64 exits;
585 };
586 
587 void kvm_vcpu_preferred_target(struct kvm_vcpu_init *init);
588 unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
589 int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
590 int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
591 int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
592 
593 unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu);
594 int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices);
595 int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
596 int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
597 
598 int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu,
599 			      struct kvm_vcpu_events *events);
600 
601 int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
602 			      struct kvm_vcpu_events *events);
603 
604 #define KVM_ARCH_WANT_MMU_NOTIFIER
605 
606 void kvm_arm_halt_guest(struct kvm *kvm);
607 void kvm_arm_resume_guest(struct kvm *kvm);
608 
609 #ifndef __KVM_NVHE_HYPERVISOR__
610 #define kvm_call_hyp_nvhe(f, ...)						\
611 	({								\
612 		struct arm_smccc_res res;				\
613 									\
614 		arm_smccc_1_1_hvc(KVM_HOST_SMCCC_FUNC(f),		\
615 				  ##__VA_ARGS__, &res);			\
616 		WARN_ON(res.a0 != SMCCC_RET_SUCCESS);			\
617 									\
618 		res.a1;							\
619 	})
620 
621 /*
622  * The couple of isb() below are there to guarantee the same behaviour
623  * on VHE as on !VHE, where the eret to EL1 acts as a context
624  * synchronization event.
625  */
626 #define kvm_call_hyp(f, ...)						\
627 	do {								\
628 		if (has_vhe()) {					\
629 			f(__VA_ARGS__);					\
630 			isb();						\
631 		} else {						\
632 			kvm_call_hyp_nvhe(f, ##__VA_ARGS__);		\
633 		}							\
634 	} while(0)
635 
636 #define kvm_call_hyp_ret(f, ...)					\
637 	({								\
638 		typeof(f(__VA_ARGS__)) ret;				\
639 									\
640 		if (has_vhe()) {					\
641 			ret = f(__VA_ARGS__);				\
642 			isb();						\
643 		} else {						\
644 			ret = kvm_call_hyp_nvhe(f, ##__VA_ARGS__);	\
645 		}							\
646 									\
647 		ret;							\
648 	})
649 #else /* __KVM_NVHE_HYPERVISOR__ */
650 #define kvm_call_hyp(f, ...) f(__VA_ARGS__)
651 #define kvm_call_hyp_ret(f, ...) f(__VA_ARGS__)
652 #define kvm_call_hyp_nvhe(f, ...) f(__VA_ARGS__)
653 #endif /* __KVM_NVHE_HYPERVISOR__ */
654 
655 void force_vm_exit(const cpumask_t *mask);
656 
657 int handle_exit(struct kvm_vcpu *vcpu, int exception_index);
658 void handle_exit_early(struct kvm_vcpu *vcpu, int exception_index);
659 
660 int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu);
661 int kvm_handle_cp14_32(struct kvm_vcpu *vcpu);
662 int kvm_handle_cp14_64(struct kvm_vcpu *vcpu);
663 int kvm_handle_cp15_32(struct kvm_vcpu *vcpu);
664 int kvm_handle_cp15_64(struct kvm_vcpu *vcpu);
665 int kvm_handle_sys_reg(struct kvm_vcpu *vcpu);
666 
667 void kvm_reset_sys_regs(struct kvm_vcpu *vcpu);
668 
669 void kvm_sys_reg_table_init(void);
670 
671 /* MMIO helpers */
672 void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data);
673 unsigned long kvm_mmio_read_buf(const void *buf, unsigned int len);
674 
675 int kvm_handle_mmio_return(struct kvm_vcpu *vcpu);
676 int io_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa);
677 
678 int kvm_perf_init(void);
679 int kvm_perf_teardown(void);
680 
681 long kvm_hypercall_pv_features(struct kvm_vcpu *vcpu);
682 gpa_t kvm_init_stolen_time(struct kvm_vcpu *vcpu);
683 void kvm_update_stolen_time(struct kvm_vcpu *vcpu);
684 
685 bool kvm_arm_pvtime_supported(void);
686 int kvm_arm_pvtime_set_attr(struct kvm_vcpu *vcpu,
687 			    struct kvm_device_attr *attr);
688 int kvm_arm_pvtime_get_attr(struct kvm_vcpu *vcpu,
689 			    struct kvm_device_attr *attr);
690 int kvm_arm_pvtime_has_attr(struct kvm_vcpu *vcpu,
691 			    struct kvm_device_attr *attr);
692 
kvm_arm_pvtime_vcpu_init(struct kvm_vcpu_arch * vcpu_arch)693 static inline void kvm_arm_pvtime_vcpu_init(struct kvm_vcpu_arch *vcpu_arch)
694 {
695 	vcpu_arch->steal.base = GPA_INVALID;
696 }
697 
kvm_arm_is_pvtime_enabled(struct kvm_vcpu_arch * vcpu_arch)698 static inline bool kvm_arm_is_pvtime_enabled(struct kvm_vcpu_arch *vcpu_arch)
699 {
700 	return (vcpu_arch->steal.base != GPA_INVALID);
701 }
702 
703 void kvm_set_sei_esr(struct kvm_vcpu *vcpu, u64 syndrome);
704 
705 struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
706 
707 DECLARE_KVM_HYP_PER_CPU(struct kvm_host_data, kvm_host_data);
708 
kvm_init_host_cpu_context(struct kvm_cpu_context * cpu_ctxt)709 static inline void kvm_init_host_cpu_context(struct kvm_cpu_context *cpu_ctxt)
710 {
711 	/* The host's MPIDR is immutable, so let's set it up at boot time */
712 	ctxt_sys_reg(cpu_ctxt, MPIDR_EL1) = read_cpuid_mpidr();
713 }
714 
715 void kvm_arm_vcpu_ptrauth_trap(struct kvm_vcpu *vcpu);
716 
kvm_arch_hardware_unsetup(void)717 static inline void kvm_arch_hardware_unsetup(void) {}
kvm_arch_sync_events(struct kvm * kvm)718 static inline void kvm_arch_sync_events(struct kvm *kvm) {}
kvm_arch_sched_in(struct kvm_vcpu * vcpu,int cpu)719 static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
kvm_arch_vcpu_block_finish(struct kvm_vcpu * vcpu)720 static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
721 
722 void kvm_arm_init_debug(void);
723 void kvm_arm_vcpu_init_debug(struct kvm_vcpu *vcpu);
724 void kvm_arm_setup_debug(struct kvm_vcpu *vcpu);
725 void kvm_arm_clear_debug(struct kvm_vcpu *vcpu);
726 void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu);
727 int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu,
728 			       struct kvm_device_attr *attr);
729 int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu,
730 			       struct kvm_device_attr *attr);
731 int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu,
732 			       struct kvm_device_attr *attr);
733 
734 long kvm_vm_ioctl_mte_copy_tags(struct kvm *kvm,
735 				struct kvm_arm_copy_mte_tags *copy_tags);
736 
737 /* Guest/host FPSIMD coordination helpers */
738 int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu);
739 void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu);
740 void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu);
741 void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu);
742 
kvm_pmu_counter_deferred(struct perf_event_attr * attr)743 static inline bool kvm_pmu_counter_deferred(struct perf_event_attr *attr)
744 {
745 	return (!has_vhe() && attr->exclude_host);
746 }
747 
748 /* Flags for host debug state */
749 void kvm_arch_vcpu_load_debug_state_flags(struct kvm_vcpu *vcpu);
750 void kvm_arch_vcpu_put_debug_state_flags(struct kvm_vcpu *vcpu);
751 
752 #ifdef CONFIG_KVM /* Avoid conflicts with core headers if CONFIG_KVM=n */
kvm_arch_vcpu_run_pid_change(struct kvm_vcpu * vcpu)753 static inline int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
754 {
755 	return kvm_arch_vcpu_run_map_fp(vcpu);
756 }
757 
758 void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr);
759 void kvm_clr_pmu_events(u32 clr);
760 
761 void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu);
762 void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu);
763 #else
kvm_set_pmu_events(u32 set,struct perf_event_attr * attr)764 static inline void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr) {}
kvm_clr_pmu_events(u32 clr)765 static inline void kvm_clr_pmu_events(u32 clr) {}
766 #endif
767 
768 void kvm_vcpu_load_sysregs_vhe(struct kvm_vcpu *vcpu);
769 void kvm_vcpu_put_sysregs_vhe(struct kvm_vcpu *vcpu);
770 
771 int kvm_set_ipa_limit(void);
772 
773 #define __KVM_HAVE_ARCH_VM_ALLOC
774 struct kvm *kvm_arch_alloc_vm(void);
775 
776 int kvm_arm_setup_stage2(struct kvm *kvm, unsigned long type);
777 
kvm_vm_is_protected(struct kvm * kvm)778 static inline bool kvm_vm_is_protected(struct kvm *kvm)
779 {
780 	return false;
781 }
782 
783 void kvm_init_protected_traps(struct kvm_vcpu *vcpu);
784 
785 int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature);
786 bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu);
787 
788 #define kvm_arm_vcpu_sve_finalized(vcpu) \
789 	((vcpu)->arch.flags & KVM_ARM64_VCPU_SVE_FINALIZED)
790 
791 #define kvm_has_mte(kvm) (system_supports_mte() && (kvm)->arch.mte_enabled)
792 #define kvm_vcpu_has_pmu(vcpu)					\
793 	(test_bit(KVM_ARM_VCPU_PMU_V3, (vcpu)->arch.features))
794 
795 int kvm_trng_call(struct kvm_vcpu *vcpu);
796 #ifdef CONFIG_KVM
797 extern phys_addr_t hyp_mem_base;
798 extern phys_addr_t hyp_mem_size;
799 void __init kvm_hyp_reserve(void);
800 #else
kvm_hyp_reserve(void)801 static inline void kvm_hyp_reserve(void) { }
802 #endif
803 
804 #endif /* __ARM64_KVM_HOST_H__ */
805