1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2019 Western Digital Corporation or its affiliates.
4 *
5 * Authors:
6 * Anup Patel <anup.patel@wdc.com>
7 */
8
9 #include <linux/bitops.h>
10 #include <linux/errno.h>
11 #include <linux/err.h>
12 #include <linux/kvm_host.h>
13 #include <asm/csr.h>
14
15 #define INSN_OPCODE_MASK 0x007c
16 #define INSN_OPCODE_SHIFT 2
17 #define INSN_OPCODE_SYSTEM 28
18
19 #define INSN_MASK_WFI 0xffffffff
20 #define INSN_MATCH_WFI 0x10500073
21
22 #define INSN_MATCH_LB 0x3
23 #define INSN_MASK_LB 0x707f
24 #define INSN_MATCH_LH 0x1003
25 #define INSN_MASK_LH 0x707f
26 #define INSN_MATCH_LW 0x2003
27 #define INSN_MASK_LW 0x707f
28 #define INSN_MATCH_LD 0x3003
29 #define INSN_MASK_LD 0x707f
30 #define INSN_MATCH_LBU 0x4003
31 #define INSN_MASK_LBU 0x707f
32 #define INSN_MATCH_LHU 0x5003
33 #define INSN_MASK_LHU 0x707f
34 #define INSN_MATCH_LWU 0x6003
35 #define INSN_MASK_LWU 0x707f
36 #define INSN_MATCH_SB 0x23
37 #define INSN_MASK_SB 0x707f
38 #define INSN_MATCH_SH 0x1023
39 #define INSN_MASK_SH 0x707f
40 #define INSN_MATCH_SW 0x2023
41 #define INSN_MASK_SW 0x707f
42 #define INSN_MATCH_SD 0x3023
43 #define INSN_MASK_SD 0x707f
44
45 #define INSN_MATCH_C_LD 0x6000
46 #define INSN_MASK_C_LD 0xe003
47 #define INSN_MATCH_C_SD 0xe000
48 #define INSN_MASK_C_SD 0xe003
49 #define INSN_MATCH_C_LW 0x4000
50 #define INSN_MASK_C_LW 0xe003
51 #define INSN_MATCH_C_SW 0xc000
52 #define INSN_MASK_C_SW 0xe003
53 #define INSN_MATCH_C_LDSP 0x6002
54 #define INSN_MASK_C_LDSP 0xe003
55 #define INSN_MATCH_C_SDSP 0xe002
56 #define INSN_MASK_C_SDSP 0xe003
57 #define INSN_MATCH_C_LWSP 0x4002
58 #define INSN_MASK_C_LWSP 0xe003
59 #define INSN_MATCH_C_SWSP 0xc002
60 #define INSN_MASK_C_SWSP 0xe003
61
62 #define INSN_16BIT_MASK 0x3
63
64 #define INSN_IS_16BIT(insn) (((insn) & INSN_16BIT_MASK) != INSN_16BIT_MASK)
65
66 #define INSN_LEN(insn) (INSN_IS_16BIT(insn) ? 2 : 4)
67
68 #ifdef CONFIG_64BIT
69 #define LOG_REGBYTES 3
70 #else
71 #define LOG_REGBYTES 2
72 #endif
73 #define REGBYTES (1 << LOG_REGBYTES)
74
75 #define SH_RD 7
76 #define SH_RS1 15
77 #define SH_RS2 20
78 #define SH_RS2C 2
79
80 #define RV_X(x, s, n) (((x) >> (s)) & ((1 << (n)) - 1))
81 #define RVC_LW_IMM(x) ((RV_X(x, 6, 1) << 2) | \
82 (RV_X(x, 10, 3) << 3) | \
83 (RV_X(x, 5, 1) << 6))
84 #define RVC_LD_IMM(x) ((RV_X(x, 10, 3) << 3) | \
85 (RV_X(x, 5, 2) << 6))
86 #define RVC_LWSP_IMM(x) ((RV_X(x, 4, 3) << 2) | \
87 (RV_X(x, 12, 1) << 5) | \
88 (RV_X(x, 2, 2) << 6))
89 #define RVC_LDSP_IMM(x) ((RV_X(x, 5, 2) << 3) | \
90 (RV_X(x, 12, 1) << 5) | \
91 (RV_X(x, 2, 3) << 6))
92 #define RVC_SWSP_IMM(x) ((RV_X(x, 9, 4) << 2) | \
93 (RV_X(x, 7, 2) << 6))
94 #define RVC_SDSP_IMM(x) ((RV_X(x, 10, 3) << 3) | \
95 (RV_X(x, 7, 3) << 6))
96 #define RVC_RS1S(insn) (8 + RV_X(insn, SH_RD, 3))
97 #define RVC_RS2S(insn) (8 + RV_X(insn, SH_RS2C, 3))
98 #define RVC_RS2(insn) RV_X(insn, SH_RS2C, 5)
99
100 #define SHIFT_RIGHT(x, y) \
101 ((y) < 0 ? ((x) << -(y)) : ((x) >> (y)))
102
103 #define REG_MASK \
104 ((1 << (5 + LOG_REGBYTES)) - (1 << LOG_REGBYTES))
105
106 #define REG_OFFSET(insn, pos) \
107 (SHIFT_RIGHT((insn), (pos) - LOG_REGBYTES) & REG_MASK)
108
109 #define REG_PTR(insn, pos, regs) \
110 ((ulong *)((ulong)(regs) + REG_OFFSET(insn, pos)))
111
112 #define GET_RM(insn) (((insn) >> 12) & 7)
113
114 #define GET_RS1(insn, regs) (*REG_PTR(insn, SH_RS1, regs))
115 #define GET_RS2(insn, regs) (*REG_PTR(insn, SH_RS2, regs))
116 #define GET_RS1S(insn, regs) (*REG_PTR(RVC_RS1S(insn), 0, regs))
117 #define GET_RS2S(insn, regs) (*REG_PTR(RVC_RS2S(insn), 0, regs))
118 #define GET_RS2C(insn, regs) (*REG_PTR(insn, SH_RS2C, regs))
119 #define GET_SP(regs) (*REG_PTR(2, 0, regs))
120 #define SET_RD(insn, regs, val) (*REG_PTR(insn, SH_RD, regs) = (val))
121 #define IMM_I(insn) ((s32)(insn) >> 20)
122 #define IMM_S(insn) (((s32)(insn) >> 25 << 5) | \
123 (s32)(((insn) >> 7) & 0x1f))
124 #define MASK_FUNCT3 0x7000
125
truly_illegal_insn(struct kvm_vcpu * vcpu,struct kvm_run * run,ulong insn)126 static int truly_illegal_insn(struct kvm_vcpu *vcpu,
127 struct kvm_run *run,
128 ulong insn)
129 {
130 struct kvm_cpu_trap utrap = { 0 };
131
132 /* Redirect trap to Guest VCPU */
133 utrap.sepc = vcpu->arch.guest_context.sepc;
134 utrap.scause = EXC_INST_ILLEGAL;
135 utrap.stval = insn;
136 kvm_riscv_vcpu_trap_redirect(vcpu, &utrap);
137
138 return 1;
139 }
140
system_opcode_insn(struct kvm_vcpu * vcpu,struct kvm_run * run,ulong insn)141 static int system_opcode_insn(struct kvm_vcpu *vcpu,
142 struct kvm_run *run,
143 ulong insn)
144 {
145 if ((insn & INSN_MASK_WFI) == INSN_MATCH_WFI) {
146 vcpu->stat.wfi_exit_stat++;
147 if (!kvm_arch_vcpu_runnable(vcpu)) {
148 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->arch.srcu_idx);
149 kvm_vcpu_block(vcpu);
150 vcpu->arch.srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
151 kvm_clear_request(KVM_REQ_UNHALT, vcpu);
152 }
153 vcpu->arch.guest_context.sepc += INSN_LEN(insn);
154 return 1;
155 }
156
157 return truly_illegal_insn(vcpu, run, insn);
158 }
159
virtual_inst_fault(struct kvm_vcpu * vcpu,struct kvm_run * run,struct kvm_cpu_trap * trap)160 static int virtual_inst_fault(struct kvm_vcpu *vcpu, struct kvm_run *run,
161 struct kvm_cpu_trap *trap)
162 {
163 unsigned long insn = trap->stval;
164 struct kvm_cpu_trap utrap = { 0 };
165 struct kvm_cpu_context *ct;
166
167 if (unlikely(INSN_IS_16BIT(insn))) {
168 if (insn == 0) {
169 ct = &vcpu->arch.guest_context;
170 insn = kvm_riscv_vcpu_unpriv_read(vcpu, true,
171 ct->sepc,
172 &utrap);
173 if (utrap.scause) {
174 utrap.sepc = ct->sepc;
175 kvm_riscv_vcpu_trap_redirect(vcpu, &utrap);
176 return 1;
177 }
178 }
179 if (INSN_IS_16BIT(insn))
180 return truly_illegal_insn(vcpu, run, insn);
181 }
182
183 switch ((insn & INSN_OPCODE_MASK) >> INSN_OPCODE_SHIFT) {
184 case INSN_OPCODE_SYSTEM:
185 return system_opcode_insn(vcpu, run, insn);
186 default:
187 return truly_illegal_insn(vcpu, run, insn);
188 }
189 }
190
emulate_load(struct kvm_vcpu * vcpu,struct kvm_run * run,unsigned long fault_addr,unsigned long htinst)191 static int emulate_load(struct kvm_vcpu *vcpu, struct kvm_run *run,
192 unsigned long fault_addr, unsigned long htinst)
193 {
194 u8 data_buf[8];
195 unsigned long insn;
196 int shift = 0, len = 0, insn_len = 0;
197 struct kvm_cpu_trap utrap = { 0 };
198 struct kvm_cpu_context *ct = &vcpu->arch.guest_context;
199
200 /* Determine trapped instruction */
201 if (htinst & 0x1) {
202 /*
203 * Bit[0] == 1 implies trapped instruction value is
204 * transformed instruction or custom instruction.
205 */
206 insn = htinst | INSN_16BIT_MASK;
207 insn_len = (htinst & BIT(1)) ? INSN_LEN(insn) : 2;
208 } else {
209 /*
210 * Bit[0] == 0 implies trapped instruction value is
211 * zero or special value.
212 */
213 insn = kvm_riscv_vcpu_unpriv_read(vcpu, true, ct->sepc,
214 &utrap);
215 if (utrap.scause) {
216 /* Redirect trap if we failed to read instruction */
217 utrap.sepc = ct->sepc;
218 kvm_riscv_vcpu_trap_redirect(vcpu, &utrap);
219 return 1;
220 }
221 insn_len = INSN_LEN(insn);
222 }
223
224 /* Decode length of MMIO and shift */
225 if ((insn & INSN_MASK_LW) == INSN_MATCH_LW) {
226 len = 4;
227 shift = 8 * (sizeof(ulong) - len);
228 } else if ((insn & INSN_MASK_LB) == INSN_MATCH_LB) {
229 len = 1;
230 shift = 8 * (sizeof(ulong) - len);
231 } else if ((insn & INSN_MASK_LBU) == INSN_MATCH_LBU) {
232 len = 1;
233 shift = 8 * (sizeof(ulong) - len);
234 #ifdef CONFIG_64BIT
235 } else if ((insn & INSN_MASK_LD) == INSN_MATCH_LD) {
236 len = 8;
237 shift = 8 * (sizeof(ulong) - len);
238 } else if ((insn & INSN_MASK_LWU) == INSN_MATCH_LWU) {
239 len = 4;
240 #endif
241 } else if ((insn & INSN_MASK_LH) == INSN_MATCH_LH) {
242 len = 2;
243 shift = 8 * (sizeof(ulong) - len);
244 } else if ((insn & INSN_MASK_LHU) == INSN_MATCH_LHU) {
245 len = 2;
246 #ifdef CONFIG_64BIT
247 } else if ((insn & INSN_MASK_C_LD) == INSN_MATCH_C_LD) {
248 len = 8;
249 shift = 8 * (sizeof(ulong) - len);
250 insn = RVC_RS2S(insn) << SH_RD;
251 } else if ((insn & INSN_MASK_C_LDSP) == INSN_MATCH_C_LDSP &&
252 ((insn >> SH_RD) & 0x1f)) {
253 len = 8;
254 shift = 8 * (sizeof(ulong) - len);
255 #endif
256 } else if ((insn & INSN_MASK_C_LW) == INSN_MATCH_C_LW) {
257 len = 4;
258 shift = 8 * (sizeof(ulong) - len);
259 insn = RVC_RS2S(insn) << SH_RD;
260 } else if ((insn & INSN_MASK_C_LWSP) == INSN_MATCH_C_LWSP &&
261 ((insn >> SH_RD) & 0x1f)) {
262 len = 4;
263 shift = 8 * (sizeof(ulong) - len);
264 } else {
265 return -EOPNOTSUPP;
266 }
267
268 /* Fault address should be aligned to length of MMIO */
269 if (fault_addr & (len - 1))
270 return -EIO;
271
272 /* Save instruction decode info */
273 vcpu->arch.mmio_decode.insn = insn;
274 vcpu->arch.mmio_decode.insn_len = insn_len;
275 vcpu->arch.mmio_decode.shift = shift;
276 vcpu->arch.mmio_decode.len = len;
277 vcpu->arch.mmio_decode.return_handled = 0;
278
279 /* Update MMIO details in kvm_run struct */
280 run->mmio.is_write = false;
281 run->mmio.phys_addr = fault_addr;
282 run->mmio.len = len;
283
284 /* Try to handle MMIO access in the kernel */
285 if (!kvm_io_bus_read(vcpu, KVM_MMIO_BUS, fault_addr, len, data_buf)) {
286 /* Successfully handled MMIO access in the kernel so resume */
287 memcpy(run->mmio.data, data_buf, len);
288 vcpu->stat.mmio_exit_kernel++;
289 kvm_riscv_vcpu_mmio_return(vcpu, run);
290 return 1;
291 }
292
293 /* Exit to userspace for MMIO emulation */
294 vcpu->stat.mmio_exit_user++;
295 run->exit_reason = KVM_EXIT_MMIO;
296
297 return 0;
298 }
299
emulate_store(struct kvm_vcpu * vcpu,struct kvm_run * run,unsigned long fault_addr,unsigned long htinst)300 static int emulate_store(struct kvm_vcpu *vcpu, struct kvm_run *run,
301 unsigned long fault_addr, unsigned long htinst)
302 {
303 u8 data8;
304 u16 data16;
305 u32 data32;
306 u64 data64;
307 ulong data;
308 unsigned long insn;
309 int len = 0, insn_len = 0;
310 struct kvm_cpu_trap utrap = { 0 };
311 struct kvm_cpu_context *ct = &vcpu->arch.guest_context;
312
313 /* Determine trapped instruction */
314 if (htinst & 0x1) {
315 /*
316 * Bit[0] == 1 implies trapped instruction value is
317 * transformed instruction or custom instruction.
318 */
319 insn = htinst | INSN_16BIT_MASK;
320 insn_len = (htinst & BIT(1)) ? INSN_LEN(insn) : 2;
321 } else {
322 /*
323 * Bit[0] == 0 implies trapped instruction value is
324 * zero or special value.
325 */
326 insn = kvm_riscv_vcpu_unpriv_read(vcpu, true, ct->sepc,
327 &utrap);
328 if (utrap.scause) {
329 /* Redirect trap if we failed to read instruction */
330 utrap.sepc = ct->sepc;
331 kvm_riscv_vcpu_trap_redirect(vcpu, &utrap);
332 return 1;
333 }
334 insn_len = INSN_LEN(insn);
335 }
336
337 data = GET_RS2(insn, &vcpu->arch.guest_context);
338 data8 = data16 = data32 = data64 = data;
339
340 if ((insn & INSN_MASK_SW) == INSN_MATCH_SW) {
341 len = 4;
342 } else if ((insn & INSN_MASK_SB) == INSN_MATCH_SB) {
343 len = 1;
344 #ifdef CONFIG_64BIT
345 } else if ((insn & INSN_MASK_SD) == INSN_MATCH_SD) {
346 len = 8;
347 #endif
348 } else if ((insn & INSN_MASK_SH) == INSN_MATCH_SH) {
349 len = 2;
350 #ifdef CONFIG_64BIT
351 } else if ((insn & INSN_MASK_C_SD) == INSN_MATCH_C_SD) {
352 len = 8;
353 data64 = GET_RS2S(insn, &vcpu->arch.guest_context);
354 } else if ((insn & INSN_MASK_C_SDSP) == INSN_MATCH_C_SDSP &&
355 ((insn >> SH_RD) & 0x1f)) {
356 len = 8;
357 data64 = GET_RS2C(insn, &vcpu->arch.guest_context);
358 #endif
359 } else if ((insn & INSN_MASK_C_SW) == INSN_MATCH_C_SW) {
360 len = 4;
361 data32 = GET_RS2S(insn, &vcpu->arch.guest_context);
362 } else if ((insn & INSN_MASK_C_SWSP) == INSN_MATCH_C_SWSP &&
363 ((insn >> SH_RD) & 0x1f)) {
364 len = 4;
365 data32 = GET_RS2C(insn, &vcpu->arch.guest_context);
366 } else {
367 return -EOPNOTSUPP;
368 }
369
370 /* Fault address should be aligned to length of MMIO */
371 if (fault_addr & (len - 1))
372 return -EIO;
373
374 /* Save instruction decode info */
375 vcpu->arch.mmio_decode.insn = insn;
376 vcpu->arch.mmio_decode.insn_len = insn_len;
377 vcpu->arch.mmio_decode.shift = 0;
378 vcpu->arch.mmio_decode.len = len;
379 vcpu->arch.mmio_decode.return_handled = 0;
380
381 /* Copy data to kvm_run instance */
382 switch (len) {
383 case 1:
384 *((u8 *)run->mmio.data) = data8;
385 break;
386 case 2:
387 *((u16 *)run->mmio.data) = data16;
388 break;
389 case 4:
390 *((u32 *)run->mmio.data) = data32;
391 break;
392 case 8:
393 *((u64 *)run->mmio.data) = data64;
394 break;
395 default:
396 return -EOPNOTSUPP;
397 }
398
399 /* Update MMIO details in kvm_run struct */
400 run->mmio.is_write = true;
401 run->mmio.phys_addr = fault_addr;
402 run->mmio.len = len;
403
404 /* Try to handle MMIO access in the kernel */
405 if (!kvm_io_bus_write(vcpu, KVM_MMIO_BUS,
406 fault_addr, len, run->mmio.data)) {
407 /* Successfully handled MMIO access in the kernel so resume */
408 vcpu->stat.mmio_exit_kernel++;
409 kvm_riscv_vcpu_mmio_return(vcpu, run);
410 return 1;
411 }
412
413 /* Exit to userspace for MMIO emulation */
414 vcpu->stat.mmio_exit_user++;
415 run->exit_reason = KVM_EXIT_MMIO;
416
417 return 0;
418 }
419
stage2_page_fault(struct kvm_vcpu * vcpu,struct kvm_run * run,struct kvm_cpu_trap * trap)420 static int stage2_page_fault(struct kvm_vcpu *vcpu, struct kvm_run *run,
421 struct kvm_cpu_trap *trap)
422 {
423 struct kvm_memory_slot *memslot;
424 unsigned long hva, fault_addr;
425 bool writeable;
426 gfn_t gfn;
427 int ret;
428
429 fault_addr = (trap->htval << 2) | (trap->stval & 0x3);
430 gfn = fault_addr >> PAGE_SHIFT;
431 memslot = gfn_to_memslot(vcpu->kvm, gfn);
432 hva = gfn_to_hva_memslot_prot(memslot, gfn, &writeable);
433
434 if (kvm_is_error_hva(hva) ||
435 (trap->scause == EXC_STORE_GUEST_PAGE_FAULT && !writeable)) {
436 switch (trap->scause) {
437 case EXC_LOAD_GUEST_PAGE_FAULT:
438 return emulate_load(vcpu, run, fault_addr,
439 trap->htinst);
440 case EXC_STORE_GUEST_PAGE_FAULT:
441 return emulate_store(vcpu, run, fault_addr,
442 trap->htinst);
443 default:
444 return -EOPNOTSUPP;
445 };
446 }
447
448 ret = kvm_riscv_stage2_map(vcpu, memslot, fault_addr, hva,
449 (trap->scause == EXC_STORE_GUEST_PAGE_FAULT) ? true : false);
450 if (ret < 0)
451 return ret;
452
453 return 1;
454 }
455
456 /**
457 * kvm_riscv_vcpu_unpriv_read -- Read machine word from Guest memory
458 *
459 * @vcpu: The VCPU pointer
460 * @read_insn: Flag representing whether we are reading instruction
461 * @guest_addr: Guest address to read
462 * @trap: Output pointer to trap details
463 */
kvm_riscv_vcpu_unpriv_read(struct kvm_vcpu * vcpu,bool read_insn,unsigned long guest_addr,struct kvm_cpu_trap * trap)464 unsigned long kvm_riscv_vcpu_unpriv_read(struct kvm_vcpu *vcpu,
465 bool read_insn,
466 unsigned long guest_addr,
467 struct kvm_cpu_trap *trap)
468 {
469 register unsigned long taddr asm("a0") = (unsigned long)trap;
470 register unsigned long ttmp asm("a1");
471 register unsigned long val asm("t0");
472 register unsigned long tmp asm("t1");
473 register unsigned long addr asm("t2") = guest_addr;
474 unsigned long flags;
475 unsigned long old_stvec, old_hstatus;
476
477 local_irq_save(flags);
478
479 old_hstatus = csr_swap(CSR_HSTATUS, vcpu->arch.guest_context.hstatus);
480 old_stvec = csr_swap(CSR_STVEC, (ulong)&__kvm_riscv_unpriv_trap);
481
482 if (read_insn) {
483 /*
484 * HLVX.HU instruction
485 * 0110010 00011 rs1 100 rd 1110011
486 */
487 asm volatile ("\n"
488 ".option push\n"
489 ".option norvc\n"
490 "add %[ttmp], %[taddr], 0\n"
491 /*
492 * HLVX.HU %[val], (%[addr])
493 * HLVX.HU t0, (t2)
494 * 0110010 00011 00111 100 00101 1110011
495 */
496 ".word 0x6433c2f3\n"
497 "andi %[tmp], %[val], 3\n"
498 "addi %[tmp], %[tmp], -3\n"
499 "bne %[tmp], zero, 2f\n"
500 "addi %[addr], %[addr], 2\n"
501 /*
502 * HLVX.HU %[tmp], (%[addr])
503 * HLVX.HU t1, (t2)
504 * 0110010 00011 00111 100 00110 1110011
505 */
506 ".word 0x6433c373\n"
507 "sll %[tmp], %[tmp], 16\n"
508 "add %[val], %[val], %[tmp]\n"
509 "2:\n"
510 ".option pop"
511 : [val] "=&r" (val), [tmp] "=&r" (tmp),
512 [taddr] "+&r" (taddr), [ttmp] "+&r" (ttmp),
513 [addr] "+&r" (addr) : : "memory");
514
515 if (trap->scause == EXC_LOAD_PAGE_FAULT)
516 trap->scause = EXC_INST_PAGE_FAULT;
517 } else {
518 /*
519 * HLV.D instruction
520 * 0110110 00000 rs1 100 rd 1110011
521 *
522 * HLV.W instruction
523 * 0110100 00000 rs1 100 rd 1110011
524 */
525 asm volatile ("\n"
526 ".option push\n"
527 ".option norvc\n"
528 "add %[ttmp], %[taddr], 0\n"
529 #ifdef CONFIG_64BIT
530 /*
531 * HLV.D %[val], (%[addr])
532 * HLV.D t0, (t2)
533 * 0110110 00000 00111 100 00101 1110011
534 */
535 ".word 0x6c03c2f3\n"
536 #else
537 /*
538 * HLV.W %[val], (%[addr])
539 * HLV.W t0, (t2)
540 * 0110100 00000 00111 100 00101 1110011
541 */
542 ".word 0x6803c2f3\n"
543 #endif
544 ".option pop"
545 : [val] "=&r" (val),
546 [taddr] "+&r" (taddr), [ttmp] "+&r" (ttmp)
547 : [addr] "r" (addr) : "memory");
548 }
549
550 csr_write(CSR_STVEC, old_stvec);
551 csr_write(CSR_HSTATUS, old_hstatus);
552
553 local_irq_restore(flags);
554
555 return val;
556 }
557
558 /**
559 * kvm_riscv_vcpu_trap_redirect -- Redirect trap to Guest
560 *
561 * @vcpu: The VCPU pointer
562 * @trap: Trap details
563 */
kvm_riscv_vcpu_trap_redirect(struct kvm_vcpu * vcpu,struct kvm_cpu_trap * trap)564 void kvm_riscv_vcpu_trap_redirect(struct kvm_vcpu *vcpu,
565 struct kvm_cpu_trap *trap)
566 {
567 unsigned long vsstatus = csr_read(CSR_VSSTATUS);
568
569 /* Change Guest SSTATUS.SPP bit */
570 vsstatus &= ~SR_SPP;
571 if (vcpu->arch.guest_context.sstatus & SR_SPP)
572 vsstatus |= SR_SPP;
573
574 /* Change Guest SSTATUS.SPIE bit */
575 vsstatus &= ~SR_SPIE;
576 if (vsstatus & SR_SIE)
577 vsstatus |= SR_SPIE;
578
579 /* Clear Guest SSTATUS.SIE bit */
580 vsstatus &= ~SR_SIE;
581
582 /* Update Guest SSTATUS */
583 csr_write(CSR_VSSTATUS, vsstatus);
584
585 /* Update Guest SCAUSE, STVAL, and SEPC */
586 csr_write(CSR_VSCAUSE, trap->scause);
587 csr_write(CSR_VSTVAL, trap->stval);
588 csr_write(CSR_VSEPC, trap->sepc);
589
590 /* Set Guest PC to Guest exception vector */
591 vcpu->arch.guest_context.sepc = csr_read(CSR_VSTVEC);
592 }
593
594 /**
595 * kvm_riscv_vcpu_mmio_return -- Handle MMIO loads after user space emulation
596 * or in-kernel IO emulation
597 *
598 * @vcpu: The VCPU pointer
599 * @run: The VCPU run struct containing the mmio data
600 */
kvm_riscv_vcpu_mmio_return(struct kvm_vcpu * vcpu,struct kvm_run * run)601 int kvm_riscv_vcpu_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
602 {
603 u8 data8;
604 u16 data16;
605 u32 data32;
606 u64 data64;
607 ulong insn;
608 int len, shift;
609
610 if (vcpu->arch.mmio_decode.return_handled)
611 return 0;
612
613 vcpu->arch.mmio_decode.return_handled = 1;
614 insn = vcpu->arch.mmio_decode.insn;
615
616 if (run->mmio.is_write)
617 goto done;
618
619 len = vcpu->arch.mmio_decode.len;
620 shift = vcpu->arch.mmio_decode.shift;
621
622 switch (len) {
623 case 1:
624 data8 = *((u8 *)run->mmio.data);
625 SET_RD(insn, &vcpu->arch.guest_context,
626 (ulong)data8 << shift >> shift);
627 break;
628 case 2:
629 data16 = *((u16 *)run->mmio.data);
630 SET_RD(insn, &vcpu->arch.guest_context,
631 (ulong)data16 << shift >> shift);
632 break;
633 case 4:
634 data32 = *((u32 *)run->mmio.data);
635 SET_RD(insn, &vcpu->arch.guest_context,
636 (ulong)data32 << shift >> shift);
637 break;
638 case 8:
639 data64 = *((u64 *)run->mmio.data);
640 SET_RD(insn, &vcpu->arch.guest_context,
641 (ulong)data64 << shift >> shift);
642 break;
643 default:
644 return -EOPNOTSUPP;
645 }
646
647 done:
648 /* Move to next instruction */
649 vcpu->arch.guest_context.sepc += vcpu->arch.mmio_decode.insn_len;
650
651 return 0;
652 }
653
654 /*
655 * Return > 0 to return to guest, < 0 on error, 0 (and set exit_reason) on
656 * proper exit to userspace.
657 */
kvm_riscv_vcpu_exit(struct kvm_vcpu * vcpu,struct kvm_run * run,struct kvm_cpu_trap * trap)658 int kvm_riscv_vcpu_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
659 struct kvm_cpu_trap *trap)
660 {
661 int ret;
662
663 /* If we got host interrupt then do nothing */
664 if (trap->scause & CAUSE_IRQ_FLAG)
665 return 1;
666
667 /* Handle guest traps */
668 ret = -EFAULT;
669 run->exit_reason = KVM_EXIT_UNKNOWN;
670 switch (trap->scause) {
671 case EXC_VIRTUAL_INST_FAULT:
672 if (vcpu->arch.guest_context.hstatus & HSTATUS_SPV)
673 ret = virtual_inst_fault(vcpu, run, trap);
674 break;
675 case EXC_INST_GUEST_PAGE_FAULT:
676 case EXC_LOAD_GUEST_PAGE_FAULT:
677 case EXC_STORE_GUEST_PAGE_FAULT:
678 if (vcpu->arch.guest_context.hstatus & HSTATUS_SPV)
679 ret = stage2_page_fault(vcpu, run, trap);
680 break;
681 case EXC_SUPERVISOR_SYSCALL:
682 if (vcpu->arch.guest_context.hstatus & HSTATUS_SPV)
683 ret = kvm_riscv_vcpu_sbi_ecall(vcpu, run);
684 break;
685 default:
686 break;
687 }
688
689 /* Print details in-case of error */
690 if (ret < 0) {
691 kvm_err("VCPU exit error %d\n", ret);
692 kvm_err("SEPC=0x%lx SSTATUS=0x%lx HSTATUS=0x%lx\n",
693 vcpu->arch.guest_context.sepc,
694 vcpu->arch.guest_context.sstatus,
695 vcpu->arch.guest_context.hstatus);
696 kvm_err("SCAUSE=0x%lx STVAL=0x%lx HTVAL=0x%lx HTINST=0x%lx\n",
697 trap->scause, trap->stval, trap->htval, trap->htinst);
698 }
699
700 return ret;
701 }
702