1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Copyright (C) 2019 Western Digital Corporation or its affiliates.
4 *
5 * Authors:
6 * Anup Patel <anup.patel@wdc.com>
7 */
8
9 #ifndef __RISCV_KVM_HOST_H__
10 #define __RISCV_KVM_HOST_H__
11
12 #include <linux/types.h>
13 #include <linux/kvm.h>
14 #include <linux/kvm_types.h>
15 #include <asm/csr.h>
16 #include <asm/kvm_vcpu_fp.h>
17 #include <asm/kvm_vcpu_timer.h>
18
19 #define KVM_MAX_VCPUS \
20 ((HGATP_VMID_MASK >> HGATP_VMID_SHIFT) + 1)
21
22 #define KVM_HALT_POLL_NS_DEFAULT 500000
23
24 #define KVM_VCPU_MAX_FEATURES 0
25
26 #define KVM_REQ_SLEEP \
27 KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
28 #define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(1)
29 #define KVM_REQ_UPDATE_HGATP KVM_ARCH_REQ(2)
30
31 struct kvm_vm_stat {
32 struct kvm_vm_stat_generic generic;
33 };
34
35 struct kvm_vcpu_stat {
36 struct kvm_vcpu_stat_generic generic;
37 u64 ecall_exit_stat;
38 u64 wfi_exit_stat;
39 u64 mmio_exit_user;
40 u64 mmio_exit_kernel;
41 u64 exits;
42 };
43
44 struct kvm_arch_memory_slot {
45 };
46
47 struct kvm_vmid {
48 /*
49 * Writes to vmid_version and vmid happen with vmid_lock held
50 * whereas reads happen without any lock held.
51 */
52 unsigned long vmid_version;
53 unsigned long vmid;
54 };
55
56 struct kvm_arch {
57 /* stage2 vmid */
58 struct kvm_vmid vmid;
59
60 /* stage2 page table */
61 pgd_t *pgd;
62 phys_addr_t pgd_phys;
63
64 /* Guest Timer */
65 struct kvm_guest_timer timer;
66 };
67
68 struct kvm_mmio_decode {
69 unsigned long insn;
70 int insn_len;
71 int len;
72 int shift;
73 int return_handled;
74 };
75
76 struct kvm_sbi_context {
77 int return_handled;
78 };
79
80 #define KVM_MMU_PAGE_CACHE_NR_OBJS 32
81
82 struct kvm_mmu_page_cache {
83 int nobjs;
84 void *objects[KVM_MMU_PAGE_CACHE_NR_OBJS];
85 };
86
87 struct kvm_cpu_trap {
88 unsigned long sepc;
89 unsigned long scause;
90 unsigned long stval;
91 unsigned long htval;
92 unsigned long htinst;
93 };
94
95 struct kvm_cpu_context {
96 unsigned long zero;
97 unsigned long ra;
98 unsigned long sp;
99 unsigned long gp;
100 unsigned long tp;
101 unsigned long t0;
102 unsigned long t1;
103 unsigned long t2;
104 unsigned long s0;
105 unsigned long s1;
106 unsigned long a0;
107 unsigned long a1;
108 unsigned long a2;
109 unsigned long a3;
110 unsigned long a4;
111 unsigned long a5;
112 unsigned long a6;
113 unsigned long a7;
114 unsigned long s2;
115 unsigned long s3;
116 unsigned long s4;
117 unsigned long s5;
118 unsigned long s6;
119 unsigned long s7;
120 unsigned long s8;
121 unsigned long s9;
122 unsigned long s10;
123 unsigned long s11;
124 unsigned long t3;
125 unsigned long t4;
126 unsigned long t5;
127 unsigned long t6;
128 unsigned long sepc;
129 unsigned long sstatus;
130 unsigned long hstatus;
131 union __riscv_fp_state fp;
132 };
133
134 struct kvm_vcpu_csr {
135 unsigned long vsstatus;
136 unsigned long vsie;
137 unsigned long vstvec;
138 unsigned long vsscratch;
139 unsigned long vsepc;
140 unsigned long vscause;
141 unsigned long vstval;
142 unsigned long hvip;
143 unsigned long vsatp;
144 unsigned long scounteren;
145 };
146
147 struct kvm_vcpu_arch {
148 /* VCPU ran at least once */
149 bool ran_atleast_once;
150
151 /* ISA feature bits (similar to MISA) */
152 unsigned long isa;
153
154 /* SSCRATCH, STVEC, and SCOUNTEREN of Host */
155 unsigned long host_sscratch;
156 unsigned long host_stvec;
157 unsigned long host_scounteren;
158
159 /* CPU context of Host */
160 struct kvm_cpu_context host_context;
161
162 /* CPU context of Guest VCPU */
163 struct kvm_cpu_context guest_context;
164
165 /* CPU CSR context of Guest VCPU */
166 struct kvm_vcpu_csr guest_csr;
167
168 /* CPU context upon Guest VCPU reset */
169 struct kvm_cpu_context guest_reset_context;
170
171 /* CPU CSR context upon Guest VCPU reset */
172 struct kvm_vcpu_csr guest_reset_csr;
173
174 /*
175 * VCPU interrupts
176 *
177 * We have a lockless approach for tracking pending VCPU interrupts
178 * implemented using atomic bitops. The irqs_pending bitmap represent
179 * pending interrupts whereas irqs_pending_mask represent bits changed
180 * in irqs_pending. Our approach is modeled around multiple producer
181 * and single consumer problem where the consumer is the VCPU itself.
182 */
183 unsigned long irqs_pending;
184 unsigned long irqs_pending_mask;
185
186 /* VCPU Timer */
187 struct kvm_vcpu_timer timer;
188
189 /* MMIO instruction details */
190 struct kvm_mmio_decode mmio_decode;
191
192 /* SBI context */
193 struct kvm_sbi_context sbi_context;
194
195 /* Cache pages needed to program page tables with spinlock held */
196 struct kvm_mmu_page_cache mmu_page_cache;
197
198 /* VCPU power-off state */
199 bool power_off;
200
201 /* Don't run the VCPU (blocked) */
202 bool pause;
203
204 /* SRCU lock index for in-kernel run loop */
205 int srcu_idx;
206 };
207
kvm_arch_hardware_unsetup(void)208 static inline void kvm_arch_hardware_unsetup(void) {}
kvm_arch_sync_events(struct kvm * kvm)209 static inline void kvm_arch_sync_events(struct kvm *kvm) {}
kvm_arch_sched_in(struct kvm_vcpu * vcpu,int cpu)210 static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
kvm_arch_vcpu_block_finish(struct kvm_vcpu * vcpu)211 static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
212
213 #define KVM_ARCH_WANT_MMU_NOTIFIER
214
215 void __kvm_riscv_hfence_gvma_vmid_gpa(unsigned long gpa_divby_4,
216 unsigned long vmid);
217 void __kvm_riscv_hfence_gvma_vmid(unsigned long vmid);
218 void __kvm_riscv_hfence_gvma_gpa(unsigned long gpa_divby_4);
219 void __kvm_riscv_hfence_gvma_all(void);
220
221 int kvm_riscv_stage2_map(struct kvm_vcpu *vcpu,
222 struct kvm_memory_slot *memslot,
223 gpa_t gpa, unsigned long hva, bool is_write);
224 void kvm_riscv_stage2_flush_cache(struct kvm_vcpu *vcpu);
225 int kvm_riscv_stage2_alloc_pgd(struct kvm *kvm);
226 void kvm_riscv_stage2_free_pgd(struct kvm *kvm);
227 void kvm_riscv_stage2_update_hgatp(struct kvm_vcpu *vcpu);
228 void kvm_riscv_stage2_mode_detect(void);
229 unsigned long kvm_riscv_stage2_mode(void);
230
231 void kvm_riscv_stage2_vmid_detect(void);
232 unsigned long kvm_riscv_stage2_vmid_bits(void);
233 int kvm_riscv_stage2_vmid_init(struct kvm *kvm);
234 bool kvm_riscv_stage2_vmid_ver_changed(struct kvm_vmid *vmid);
235 void kvm_riscv_stage2_vmid_update(struct kvm_vcpu *vcpu);
236
237 void __kvm_riscv_unpriv_trap(void);
238
239 unsigned long kvm_riscv_vcpu_unpriv_read(struct kvm_vcpu *vcpu,
240 bool read_insn,
241 unsigned long guest_addr,
242 struct kvm_cpu_trap *trap);
243 void kvm_riscv_vcpu_trap_redirect(struct kvm_vcpu *vcpu,
244 struct kvm_cpu_trap *trap);
245 int kvm_riscv_vcpu_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run);
246 int kvm_riscv_vcpu_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
247 struct kvm_cpu_trap *trap);
248
249 void __kvm_riscv_switch_to(struct kvm_vcpu_arch *vcpu_arch);
250
251 int kvm_riscv_vcpu_set_interrupt(struct kvm_vcpu *vcpu, unsigned int irq);
252 int kvm_riscv_vcpu_unset_interrupt(struct kvm_vcpu *vcpu, unsigned int irq);
253 void kvm_riscv_vcpu_flush_interrupts(struct kvm_vcpu *vcpu);
254 void kvm_riscv_vcpu_sync_interrupts(struct kvm_vcpu *vcpu);
255 bool kvm_riscv_vcpu_has_interrupts(struct kvm_vcpu *vcpu, unsigned long mask);
256 void kvm_riscv_vcpu_power_off(struct kvm_vcpu *vcpu);
257 void kvm_riscv_vcpu_power_on(struct kvm_vcpu *vcpu);
258
259 int kvm_riscv_vcpu_sbi_return(struct kvm_vcpu *vcpu, struct kvm_run *run);
260 int kvm_riscv_vcpu_sbi_ecall(struct kvm_vcpu *vcpu, struct kvm_run *run);
261
262 #endif /* __RISCV_KVM_HOST_H__ */
263