1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Copyright (C) 2020 - Google Inc
4 * Author: Andrew Scull <ascull@google.com>
5 */
6
7#include <linux/linkage.h>
8
9#include <asm/assembler.h>
10#include <asm/kvm_arm.h>
11#include <asm/kvm_asm.h>
12#include <asm/kvm_mmu.h>
13
14	.text
15
16SYM_FUNC_START(__host_exit)
17	get_host_ctxt	x0, x1
18
19	/* Store the host regs x2 and x3 */
20	stp	x2, x3,   [x0, #CPU_XREG_OFFSET(2)]
21
22	/* Retrieve the host regs x0-x1 from the stack */
23	ldp	x2, x3, [sp], #16	// x0, x1
24
25	/* Store the host regs x0-x1 and x4-x17 */
26	stp	x2, x3,   [x0, #CPU_XREG_OFFSET(0)]
27	stp	x4, x5,   [x0, #CPU_XREG_OFFSET(4)]
28	stp	x6, x7,   [x0, #CPU_XREG_OFFSET(6)]
29	stp	x8, x9,   [x0, #CPU_XREG_OFFSET(8)]
30	stp	x10, x11, [x0, #CPU_XREG_OFFSET(10)]
31	stp	x12, x13, [x0, #CPU_XREG_OFFSET(12)]
32	stp	x14, x15, [x0, #CPU_XREG_OFFSET(14)]
33	stp	x16, x17, [x0, #CPU_XREG_OFFSET(16)]
34
35	/* Store the host regs x18-x29, lr */
36	save_callee_saved_regs x0
37
38	/* Save the host context pointer in x29 across the function call */
39	mov	x29, x0
40	bl	handle_trap
41
42	/* Restore host regs x0-x17 */
43__host_enter_restore_full:
44	ldp	x0, x1,   [x29, #CPU_XREG_OFFSET(0)]
45	ldp	x2, x3,   [x29, #CPU_XREG_OFFSET(2)]
46	ldp	x4, x5,   [x29, #CPU_XREG_OFFSET(4)]
47	ldp	x6, x7,   [x29, #CPU_XREG_OFFSET(6)]
48
49	/* x0-7 are use for panic arguments */
50__host_enter_for_panic:
51	ldp	x8, x9,   [x29, #CPU_XREG_OFFSET(8)]
52	ldp	x10, x11, [x29, #CPU_XREG_OFFSET(10)]
53	ldp	x12, x13, [x29, #CPU_XREG_OFFSET(12)]
54	ldp	x14, x15, [x29, #CPU_XREG_OFFSET(14)]
55	ldp	x16, x17, [x29, #CPU_XREG_OFFSET(16)]
56
57	/* Restore host regs x18-x29, lr */
58	restore_callee_saved_regs x29
59
60	/* Do not touch any register after this! */
61__host_enter_without_restoring:
62	eret
63	sb
64SYM_FUNC_END(__host_exit)
65
66/*
67 * void __noreturn __host_enter(struct kvm_cpu_context *host_ctxt);
68 */
69SYM_FUNC_START(__host_enter)
70	mov	x29, x0
71	b	__host_enter_restore_full
72SYM_FUNC_END(__host_enter)
73
74/*
75 * void __noreturn __hyp_do_panic(struct kvm_cpu_context *host_ctxt, u64 spsr,
76 * 				  u64 elr, u64 par);
77 */
78SYM_FUNC_START(__hyp_do_panic)
79	/* Prepare and exit to the host's panic funciton. */
80	mov	lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
81		      PSR_MODE_EL1h)
82	msr	spsr_el2, lr
83	ldr	lr, =nvhe_hyp_panic_handler
84	hyp_kimg_va lr, x6
85	msr	elr_el2, lr
86
87	mov	x29, x0
88
89#ifdef CONFIG_NVHE_EL2_DEBUG
90	/* Ensure host stage-2 is disabled */
91	mrs	x0, hcr_el2
92	bic	x0, x0, #HCR_VM
93	msr	hcr_el2, x0
94	isb
95	tlbi	vmalls12e1
96	dsb	nsh
97#endif
98
99	/* Load the panic arguments into x0-7 */
100	mrs	x0, esr_el2
101	mov	x4, x3
102	mov	x3, x2
103	hyp_pa	x3, x6
104	get_vcpu_ptr x5, x6
105	mrs	x6, far_el2
106	mrs	x7, hpfar_el2
107
108	/* Enter the host, conditionally restoring the host context. */
109	cbz	x29, __host_enter_without_restoring
110	b	__host_enter_for_panic
111SYM_FUNC_END(__hyp_do_panic)
112
113SYM_FUNC_START(__host_hvc)
114	ldp	x0, x1, [sp]		// Don't fixup the stack yet
115
116	/* No stub for you, sonny Jim */
117alternative_if ARM64_KVM_PROTECTED_MODE
118	b	__host_exit
119alternative_else_nop_endif
120
121	/* Check for a stub HVC call */
122	cmp	x0, #HVC_STUB_HCALL_NR
123	b.hs	__host_exit
124
125	add	sp, sp, #16
126	/*
127	 * Compute the idmap address of __kvm_handle_stub_hvc and
128	 * jump there. Since we use kimage_voffset, do not use the
129	 * HYP VA for __kvm_handle_stub_hvc, but the kernel VA instead
130	 * (by loading it from the constant pool).
131	 *
132	 * Preserve x0-x4, which may contain stub parameters.
133	 */
134	ldr	x5, =__kvm_handle_stub_hvc
135	hyp_pa	x5, x6
136	br	x5
137SYM_FUNC_END(__host_hvc)
138
139.macro host_el1_sync_vect
140	.align 7
141.L__vect_start\@:
142	stp	x0, x1, [sp, #-16]!
143	mrs	x0, esr_el2
144	ubfx	x0, x0, #ESR_ELx_EC_SHIFT, #ESR_ELx_EC_WIDTH
145	cmp	x0, #ESR_ELx_EC_HVC64
146	b.eq	__host_hvc
147	b	__host_exit
148.L__vect_end\@:
149.if ((.L__vect_end\@ - .L__vect_start\@) > 0x80)
150	.error "host_el1_sync_vect larger than vector entry"
151.endif
152.endm
153
154.macro invalid_host_el2_vect
155	.align 7
156	/* If a guest is loaded, panic out of it. */
157	stp	x0, x1, [sp, #-16]!
158	get_loaded_vcpu x0, x1
159	cbnz	x0, __guest_exit_panic
160	add	sp, sp, #16
161
162	/*
163	 * The panic may not be clean if the exception is taken before the host
164	 * context has been saved by __host_exit or after the hyp context has
165	 * been partially clobbered by __host_enter.
166	 */
167	b	hyp_panic
168.endm
169
170.macro invalid_host_el1_vect
171	.align 7
172	mov	x0, xzr		/* restore_host = false */
173	mrs	x1, spsr_el2
174	mrs	x2, elr_el2
175	mrs	x3, par_el1
176	b	__hyp_do_panic
177.endm
178
179/*
180 * The host vector does not use an ESB instruction in order to avoid consuming
181 * SErrors that should only be consumed by the host. Guest entry is deferred by
182 * __guest_enter if there are any pending asynchronous exceptions so hyp will
183 * always return to the host without having consumerd host SErrors.
184 *
185 * CONFIG_KVM_INDIRECT_VECTORS is not applied to the host vectors because the
186 * host knows about the EL2 vectors already, and there is no point in hiding
187 * them.
188 */
189	.align 11
190SYM_CODE_START(__kvm_hyp_host_vector)
191	invalid_host_el2_vect			// Synchronous EL2t
192	invalid_host_el2_vect			// IRQ EL2t
193	invalid_host_el2_vect			// FIQ EL2t
194	invalid_host_el2_vect			// Error EL2t
195
196	invalid_host_el2_vect			// Synchronous EL2h
197	invalid_host_el2_vect			// IRQ EL2h
198	invalid_host_el2_vect			// FIQ EL2h
199	invalid_host_el2_vect			// Error EL2h
200
201	host_el1_sync_vect			// Synchronous 64-bit EL1
202	invalid_host_el1_vect			// IRQ 64-bit EL1
203	invalid_host_el1_vect			// FIQ 64-bit EL1
204	invalid_host_el1_vect			// Error 64-bit EL1
205
206	invalid_host_el1_vect			// Synchronous 32-bit EL1
207	invalid_host_el1_vect			// IRQ 32-bit EL1
208	invalid_host_el1_vect			// FIQ 32-bit EL1
209	invalid_host_el1_vect			// Error 32-bit EL1
210SYM_CODE_END(__kvm_hyp_host_vector)
211
212/*
213 * Forward SMC with arguments in struct kvm_cpu_context, and
214 * store the result into the same struct. Assumes SMCCC 1.2 or older.
215 *
216 * x0: struct kvm_cpu_context*
217 */
218SYM_CODE_START(__kvm_hyp_host_forward_smc)
219	/*
220	 * Use x18 to keep the pointer to the host context because
221	 * x18 is callee-saved in SMCCC but not in AAPCS64.
222	 */
223	mov	x18, x0
224
225	ldp	x0, x1,   [x18, #CPU_XREG_OFFSET(0)]
226	ldp	x2, x3,   [x18, #CPU_XREG_OFFSET(2)]
227	ldp	x4, x5,   [x18, #CPU_XREG_OFFSET(4)]
228	ldp	x6, x7,   [x18, #CPU_XREG_OFFSET(6)]
229	ldp	x8, x9,   [x18, #CPU_XREG_OFFSET(8)]
230	ldp	x10, x11, [x18, #CPU_XREG_OFFSET(10)]
231	ldp	x12, x13, [x18, #CPU_XREG_OFFSET(12)]
232	ldp	x14, x15, [x18, #CPU_XREG_OFFSET(14)]
233	ldp	x16, x17, [x18, #CPU_XREG_OFFSET(16)]
234
235	smc	#0
236
237	stp	x0, x1,   [x18, #CPU_XREG_OFFSET(0)]
238	stp	x2, x3,   [x18, #CPU_XREG_OFFSET(2)]
239	stp	x4, x5,   [x18, #CPU_XREG_OFFSET(4)]
240	stp	x6, x7,   [x18, #CPU_XREG_OFFSET(6)]
241	stp	x8, x9,   [x18, #CPU_XREG_OFFSET(8)]
242	stp	x10, x11, [x18, #CPU_XREG_OFFSET(10)]
243	stp	x12, x13, [x18, #CPU_XREG_OFFSET(12)]
244	stp	x14, x15, [x18, #CPU_XREG_OFFSET(14)]
245	stp	x16, x17, [x18, #CPU_XREG_OFFSET(16)]
246
247	ret
248SYM_CODE_END(__kvm_hyp_host_forward_smc)
249