1 /******************************************************************************
2 * x86_emulate.h
3 *
4 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
5 *
6 * Copyright (c) 2005-2007 Keir Fraser
7 * Copyright (c) 2005-2007 XenSource Inc.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; If not, see <http://www.gnu.org/licenses/>.
21 */
22
23 #ifndef __X86_EMULATE_H__
24 #define __X86_EMULATE_H__
25
26 #include <xen/lib/x86/cpuid.h>
27
28 #define MAX_INST_LEN 15
29
30 #if defined(__i386__)
31 # define X86_NR_GPRS 8
32 #elif defined(__x86_64__)
33 # define X86_NR_GPRS 16
34 #else
35 # error Unknown compilation width
36 #endif
37
38 struct x86_emulate_ctxt;
39
40 /*
41 * Comprehensive enumeration of x86 segment registers. Various bits of code
42 * rely on this order (general purpose before system, tr at the beginning of
43 * system).
44 */
45 enum x86_segment {
46 /* General purpose. Matches the SReg3 encoding in opcode/ModRM bytes. */
47 x86_seg_es,
48 x86_seg_cs,
49 x86_seg_ss,
50 x86_seg_ds,
51 x86_seg_fs,
52 x86_seg_gs,
53 /* System: Valid to use for implicit table references. */
54 x86_seg_tr,
55 x86_seg_ldtr,
56 x86_seg_gdtr,
57 x86_seg_idtr,
58 /* No Segment: For accesses which are already linear. */
59 x86_seg_none
60 };
61
is_x86_user_segment(enum x86_segment seg)62 static inline bool is_x86_user_segment(enum x86_segment seg)
63 {
64 unsigned int idx = seg;
65
66 return idx <= x86_seg_gs;
67 }
is_x86_system_segment(enum x86_segment seg)68 static inline bool is_x86_system_segment(enum x86_segment seg)
69 {
70 return seg >= x86_seg_tr && seg < x86_seg_none;
71 }
72
73 /*
74 * x86 event types. This enumeration is valid for:
75 * Intel VMX: {VM_ENTRY,VM_EXIT,IDT_VECTORING}_INTR_INFO[10:8]
76 * AMD SVM: eventinj[10:8] and exitintinfo[10:8] (types 0-4 only)
77 */
78 enum x86_event_type {
79 X86_EVENTTYPE_EXT_INTR, /* External interrupt */
80 X86_EVENTTYPE_NMI = 2, /* NMI */
81 X86_EVENTTYPE_HW_EXCEPTION, /* Hardware exception */
82 X86_EVENTTYPE_SW_INTERRUPT, /* Software interrupt (CD nn) */
83 X86_EVENTTYPE_PRI_SW_EXCEPTION, /* ICEBP (F1) */
84 X86_EVENTTYPE_SW_EXCEPTION, /* INT3 (CC), INTO (CE) */
85 };
86 #define X86_EVENT_NO_EC (-1) /* No error code. */
87
88 struct x86_event {
89 int16_t vector;
90 uint8_t type; /* X86_EVENTTYPE_* */
91 uint8_t insn_len; /* Instruction length */
92 int32_t error_code; /* X86_EVENT_NO_EC if n/a */
93 unsigned long cr2; /* Only for TRAP_page_fault h/w exception */
94 };
95
96 /*
97 * Full state of a segment register (visible and hidden portions).
98 * Chosen to match the format of an AMD SVM VMCB.
99 */
100 struct segment_register {
101 uint16_t sel;
102 union {
103 uint16_t attr;
104 struct {
105 uint16_t type:4;
106 uint16_t s: 1;
107 uint16_t dpl: 2;
108 uint16_t p: 1;
109 uint16_t avl: 1;
110 uint16_t l: 1;
111 uint16_t db: 1;
112 uint16_t g: 1;
113 uint16_t pad: 4;
114 };
115 };
116 uint32_t limit;
117 uint64_t base;
118 };
119
120 struct x86_emul_fpu_aux {
121 unsigned long ip, dp;
122 uint16_t cs, ds;
123 unsigned int op:11;
124 unsigned int dval:1;
125 };
126
127 /*
128 * Return codes from state-accessor functions and from x86_emulate().
129 */
130 /* Completed successfully. State modified appropriately. */
131 #define X86EMUL_OKAY 0
132 /* Unhandleable access or emulation. No state modified. */
133 #define X86EMUL_UNHANDLEABLE 1
134 /* Exception raised and requires delivery. */
135 #define X86EMUL_EXCEPTION 2
136 /* Retry the emulation for some reason. No state modified. */
137 #define X86EMUL_RETRY 3
138 /*
139 * Operation fully done by one of the hooks:
140 * - validate(): operation completed (except common insn retire logic)
141 * - read_segment(x86_seg_tr, ...): bypass I/O bitmap access
142 * - read_io() / write_io(): bypass GPR update (non-string insns only)
143 * Undefined behavior when used anywhere else.
144 */
145 #define X86EMUL_DONE 4
146 /*
147 * Current instruction is not implemented by the emulator.
148 * This value should only be returned by the core emulator when a valid
149 * opcode is found but the execution logic for that instruction is missing.
150 * It should NOT be returned by any of the x86_emulate_ops callbacks.
151 */
152 #define X86EMUL_UNIMPLEMENTED 5
153 /*
154 * The current instruction's opcode is not valid.
155 * If this error code is returned by a function, an #UD trap should be
156 * raised by the final consumer of it.
157 *
158 * TODO: For the moment X86EMUL_UNRECOGNIZED and X86EMUL_UNIMPLEMENTED
159 * can be used interchangeably therefore raising an #UD trap is not
160 * strictly expected for now.
161 */
162 #define X86EMUL_UNRECOGNIZED X86EMUL_UNIMPLEMENTED
163 /* (cmpxchg accessor): CMPXCHG failed. */
164 #define X86EMUL_CMPXCHG_FAILED 7
165
166 /* FPU sub-types which may be requested via ->get_fpu(). */
167 enum x86_emulate_fpu_type {
168 X86EMUL_FPU_fpu, /* Standard FPU coprocessor instruction set */
169 X86EMUL_FPU_wait, /* WAIT/FWAIT instruction */
170 X86EMUL_FPU_mmx, /* MMX instruction set (%mm0-%mm7) */
171 X86EMUL_FPU_xmm, /* SSE instruction set (%xmm0-%xmm7/15) */
172 X86EMUL_FPU_ymm, /* AVX/XOP instruction set (%ymm0-%ymm7/15) */
173 X86EMUL_FPU_opmask, /* AVX512 opmask instruction set (%k0-%k7) */
174 X86EMUL_FPU_zmm, /* AVX512 instruction set (%zmm0-%zmm7/31) */
175 /* This sentinel will never be passed to ->get_fpu(). */
176 X86EMUL_FPU_none
177 };
178
179 enum x86emul_cache_op {
180 x86emul_clflush,
181 x86emul_clflushopt,
182 x86emul_clwb,
183 x86emul_invd,
184 x86emul_wbinvd,
185 x86emul_wbnoinvd,
186 };
187
188 enum x86emul_tlb_op {
189 x86emul_invlpg,
190 x86emul_invlpga,
191 x86emul_invpcid,
192 };
193
x86emul_invpcid_aux(unsigned int pcid,unsigned int type)194 static inline unsigned int x86emul_invpcid_aux(unsigned int pcid,
195 unsigned int type)
196 {
197 ASSERT(!(pcid & ~0xfff));
198 return (type << 12) | pcid;
199 }
200
x86emul_invpcid_pcid(unsigned int aux)201 static inline unsigned int x86emul_invpcid_pcid(unsigned int aux)
202 {
203 return aux & 0xfff;
204 }
205
x86emul_invpcid_type(unsigned int aux)206 static inline unsigned int x86emul_invpcid_type(unsigned int aux)
207 {
208 return aux >> 12;
209 }
210
211 struct x86_emulate_state;
212
213 /*
214 * These operations represent the instruction emulator's interface to memory,
215 * I/O ports, privileged state... pretty much everything other than GPRs.
216 *
217 * NOTES:
218 * 1. If the access fails (cannot emulate, or a standard access faults) then
219 * it is up to the memop to propagate the fault to the guest VM via
220 * some out-of-band mechanism, unknown to the emulator. The memop signals
221 * failure by returning X86EMUL_EXCEPTION to the emulator, which will
222 * then immediately bail.
223 * 2. The emulator cannot handle 64-bit mode emulation on an x86/32 system.
224 */
225 struct x86_emulate_ops
226 {
227 /*
228 * All functions:
229 * @ctxt: [IN ] Emulation context info as passed to the emulator.
230 * All memory-access functions:
231 * @seg: [IN ] Segment being dereferenced (specified as x86_seg_??).
232 * @offset:[IN ] Offset within segment.
233 * @p_data:[IN ] Pointer to i/o data buffer (length is @bytes)
234 * Read functions:
235 * @val: [OUT] Value read, zero-extended to 'ulong'.
236 * Write functions:
237 * @val: [IN ] Value to write (low-order bytes used as req'd).
238 * Variable-length access functions:
239 * @bytes: [IN ] Number of bytes to read or write. Valid access sizes are
240 * 1, 2, 4 and 8 (x86/64 only) bytes, unless otherwise
241 * stated.
242 */
243
244 /*
245 * read: Emulate a memory read.
246 * @bytes: Access length (0 < @bytes < 4096).
247 */
248 int (*read)(
249 enum x86_segment seg,
250 unsigned long offset,
251 void *p_data,
252 unsigned int bytes,
253 struct x86_emulate_ctxt *ctxt);
254
255 /*
256 * insn_fetch: Emulate fetch from instruction byte stream.
257 * Except for @bytes, all parameters are the same as for 'read'.
258 * @bytes: Access length (0 <= @bytes < 16, with zero meaning
259 * "validate address only").
260 * @seg is always x86_seg_cs.
261 */
262 int (*insn_fetch)(
263 enum x86_segment seg,
264 unsigned long offset,
265 void *p_data,
266 unsigned int bytes,
267 struct x86_emulate_ctxt *ctxt);
268
269 /*
270 * write: Emulate a memory write.
271 * @bytes: Access length (0 < @bytes < 4096).
272 */
273 int (*write)(
274 enum x86_segment seg,
275 unsigned long offset,
276 void *p_data,
277 unsigned int bytes,
278 struct x86_emulate_ctxt *ctxt);
279
280 /*
281 * rmw: Emulate a memory read-modify-write.
282 * @eflags: [IN/OUT] Pointer to EFLAGS to be updated according to
283 * instruction effects.
284 * @state: [IN/OUT] Pointer to (opaque) emulator state.
285 */
286 int (*rmw)(
287 enum x86_segment seg,
288 unsigned long offset,
289 unsigned int bytes,
290 uint32_t *eflags,
291 struct x86_emulate_state *state,
292 struct x86_emulate_ctxt *ctxt);
293
294 /*
295 * cmpxchg: Emulate a CMPXCHG operation.
296 * @p_old: [IN ] Pointer to value expected to be current at @addr.
297 * [OUT] Pointer to value found at @addr (may always be
298 * updated, meaningful for X86EMUL_CMPXCHG_FAILED only).
299 * @p_new: [IN ] Pointer to value to write to @addr.
300 * @bytes: [IN ] Operation size (up to 8 (x86/32) or 16 (x86/64) bytes).
301 * @lock: [IN ] atomic (LOCKed) operation
302 */
303 int (*cmpxchg)(
304 enum x86_segment seg,
305 unsigned long offset,
306 void *p_old,
307 void *p_new,
308 unsigned int bytes,
309 bool lock,
310 struct x86_emulate_ctxt *ctxt);
311
312 /*
313 * blk: Emulate a large (block) memory access.
314 * @p_data: [IN/OUT] (optional) Pointer to source/destination buffer.
315 * @eflags: [IN/OUT] Pointer to EFLAGS to be updated according to
316 * instruction effects.
317 * @state: [IN/OUT] Pointer to (opaque) emulator state.
318 */
319 int (*blk)(
320 enum x86_segment seg,
321 unsigned long offset,
322 void *p_data,
323 unsigned int bytes,
324 uint32_t *eflags,
325 struct x86_emulate_state *state,
326 struct x86_emulate_ctxt *ctxt);
327
328 /*
329 * validate: Post-decode, pre-emulate hook to allow caller controlled
330 * filtering.
331 */
332 int (*validate)(
333 const struct x86_emulate_state *state,
334 struct x86_emulate_ctxt *ctxt);
335
336 /*
337 * rep_ins: Emulate INS: <src_port> -> <dst_seg:dst_offset>.
338 * @bytes_per_rep: [IN ] Bytes transferred per repetition.
339 * @reps: [IN ] Maximum repetitions to be emulated.
340 * [OUT] Number of repetitions actually emulated.
341 */
342 int (*rep_ins)(
343 uint16_t src_port,
344 enum x86_segment dst_seg,
345 unsigned long dst_offset,
346 unsigned int bytes_per_rep,
347 unsigned long *reps,
348 struct x86_emulate_ctxt *ctxt);
349
350 /*
351 * rep_outs: Emulate OUTS: <src_seg:src_offset> -> <dst_port>.
352 * @bytes_per_rep: [IN ] Bytes transferred per repetition.
353 * @reps: [IN ] Maximum repetitions to be emulated.
354 * [OUT] Number of repetitions actually emulated.
355 */
356 int (*rep_outs)(
357 enum x86_segment src_seg,
358 unsigned long src_offset,
359 uint16_t dst_port,
360 unsigned int bytes_per_rep,
361 unsigned long *reps,
362 struct x86_emulate_ctxt *ctxt);
363
364 /*
365 * rep_movs: Emulate MOVS: <src_seg:src_offset> -> <dst_seg:dst_offset>.
366 * @bytes_per_rep: [IN ] Bytes transferred per repetition.
367 * @reps: [IN ] Maximum repetitions to be emulated.
368 * [OUT] Number of repetitions actually emulated.
369 */
370 int (*rep_movs)(
371 enum x86_segment src_seg,
372 unsigned long src_offset,
373 enum x86_segment dst_seg,
374 unsigned long dst_offset,
375 unsigned int bytes_per_rep,
376 unsigned long *reps,
377 struct x86_emulate_ctxt *ctxt);
378
379 /*
380 * rep_stos: Emulate STOS: <*p_data> -> <seg:offset>.
381 * @bytes_per_rep: [IN ] Bytes transferred per repetition.
382 * @reps: [IN ] Maximum repetitions to be emulated.
383 * [OUT] Number of repetitions actually emulated.
384 */
385 int (*rep_stos)(
386 void *p_data,
387 enum x86_segment seg,
388 unsigned long offset,
389 unsigned int bytes_per_rep,
390 unsigned long *reps,
391 struct x86_emulate_ctxt *ctxt);
392
393 /*
394 * read_segment: Emulate a read of full context of a segment register.
395 * @reg: [OUT] Contents of segment register (visible and hidden state).
396 */
397 int (*read_segment)(
398 enum x86_segment seg,
399 struct segment_register *reg,
400 struct x86_emulate_ctxt *ctxt);
401
402 /*
403 * write_segment: Emulate a read of full context of a segment register.
404 * @reg: [OUT] Contents of segment register (visible and hidden state).
405 */
406 int (*write_segment)(
407 enum x86_segment seg,
408 const struct segment_register *reg,
409 struct x86_emulate_ctxt *ctxt);
410
411 /*
412 * read_io: Read from I/O port(s).
413 * @port: [IN ] Base port for access.
414 */
415 int (*read_io)(
416 unsigned int port,
417 unsigned int bytes,
418 unsigned long *val,
419 struct x86_emulate_ctxt *ctxt);
420
421 /*
422 * write_io: Write to I/O port(s).
423 * @port: [IN ] Base port for access.
424 */
425 int (*write_io)(
426 unsigned int port,
427 unsigned int bytes,
428 unsigned long val,
429 struct x86_emulate_ctxt *ctxt);
430
431 /*
432 * read_cr: Read from control register.
433 * @reg: [IN ] Register to read (0-15).
434 */
435 int (*read_cr)(
436 unsigned int reg,
437 unsigned long *val,
438 struct x86_emulate_ctxt *ctxt);
439
440 /*
441 * write_cr: Write to control register.
442 * @reg: [IN ] Register to write (0-15).
443 */
444 int (*write_cr)(
445 unsigned int reg,
446 unsigned long val,
447 struct x86_emulate_ctxt *ctxt);
448
449 /*
450 * read_dr: Read from debug register.
451 * @reg: [IN ] Register to read (0-15).
452 */
453 int (*read_dr)(
454 unsigned int reg,
455 unsigned long *val,
456 struct x86_emulate_ctxt *ctxt);
457
458 /*
459 * write_dr: Write to debug register.
460 * @reg: [IN ] Register to write (0-15).
461 */
462 int (*write_dr)(
463 unsigned int reg,
464 unsigned long val,
465 struct x86_emulate_ctxt *ctxt);
466
467 /*
468 * read_xcr: Read from extended control register.
469 * @reg: [IN ] Register to read.
470 */
471 int (*read_xcr)(
472 unsigned int reg,
473 uint64_t *val,
474 struct x86_emulate_ctxt *ctxt);
475
476 /*
477 * write_xcr: Write to extended control register.
478 * @reg: [IN ] Register to write.
479 */
480 int (*write_xcr)(
481 unsigned int reg,
482 uint64_t val,
483 struct x86_emulate_ctxt *ctxt);
484
485 /*
486 * read_msr: Read from model-specific register.
487 * @reg: [IN ] Register to read.
488 */
489 int (*read_msr)(
490 unsigned int reg,
491 uint64_t *val,
492 struct x86_emulate_ctxt *ctxt);
493
494 /*
495 * write_dr: Write to model-specific register.
496 * @reg: [IN ] Register to write.
497 */
498 int (*write_msr)(
499 unsigned int reg,
500 uint64_t val,
501 struct x86_emulate_ctxt *ctxt);
502
503 /*
504 * cache_op: Write-back and/or invalidate cache contents.
505 *
506 * @seg:@offset applicable only to some of enum x86emul_cache_op.
507 */
508 int (*cache_op)(
509 enum x86emul_cache_op op,
510 enum x86_segment seg,
511 unsigned long offset,
512 struct x86_emulate_ctxt *ctxt);
513
514 /*
515 * tlb_op: Invalidate paging structures which map addressed byte.
516 *
517 * @addr and @aux have @op-specific meaning:
518 * - INVLPG: @aux:@addr represent seg:offset
519 * - INVLPGA: @addr is the linear address, @aux the ASID
520 * - INVPCID: @addr is the linear address, @aux the combination of
521 * PCID and type (see x86emul_invpcid_*()).
522 */
523 int (*tlb_op)(
524 enum x86emul_tlb_op op,
525 unsigned long addr,
526 unsigned long aux,
527 struct x86_emulate_ctxt *ctxt);
528
529 /* cpuid: Emulate CPUID via given set of EAX-EDX inputs/outputs. */
530 int (*cpuid)(
531 uint32_t leaf,
532 uint32_t subleaf,
533 struct cpuid_leaf *res,
534 struct x86_emulate_ctxt *ctxt);
535
536 /*
537 * get_fpu: Load emulated environment's FPU state onto processor.
538 */
539 int (*get_fpu)(
540 enum x86_emulate_fpu_type type,
541 struct x86_emulate_ctxt *ctxt);
542
543 /*
544 * put_fpu: Relinquish the FPU. Unhook from FPU/SIMD exception handlers.
545 * The handler, if installed, must be prepared to get called without
546 * the get_fpu one having got called before!
547 * @backout: Undo updates to the specified register file (can, besides
548 * X86EMUL_FPU_none, only be X86EMUL_FPU_fpu at present);
549 * @aux: Packaged up FIP/FDP/FOP values to load into FPU.
550 */
551 void (*put_fpu)(
552 struct x86_emulate_ctxt *ctxt,
553 enum x86_emulate_fpu_type backout,
554 const struct x86_emul_fpu_aux *aux);
555
556 /* vmfunc: Emulate VMFUNC via given set of EAX ECX inputs */
557 int (*vmfunc)(
558 struct x86_emulate_ctxt *ctxt);
559 };
560
561 struct cpu_user_regs;
562
563 struct x86_emulate_ctxt
564 {
565 /*
566 * Input-only state:
567 */
568
569 /* CPUID Policy for the domain. */
570 const struct cpuid_policy *cpuid;
571
572 /* Set this if writes may have side effects. */
573 bool force_writeback;
574
575 /* Caller data that can be used by x86_emulate_ops' routines. */
576 void *data;
577
578 /*
579 * Input/output state:
580 */
581
582 /* Register state before/after emulation. */
583 struct cpu_user_regs *regs;
584
585 /* Default address size in current execution mode (16, 32, or 64). */
586 unsigned int addr_size;
587
588 /* Stack pointer width in bits (16, 32 or 64). */
589 unsigned int sp_size;
590
591 /* Long mode active? */
592 bool lma;
593
594 /*
595 * Output-only state:
596 */
597
598 /* Canonical opcode (see below) (valid only on X86EMUL_OKAY). */
599 unsigned int opcode;
600
601 /* Retirement state, set by the emulator (valid only on X86EMUL_OKAY). */
602 union {
603 uint8_t raw;
604 struct {
605 bool hlt:1; /* Instruction HLTed. */
606 bool mov_ss:1; /* Instruction sets MOV-SS irq shadow. */
607 bool sti:1; /* Instruction sets STI irq shadow. */
608 bool unblock_nmi:1; /* Instruction clears NMI blocking. */
609 bool singlestep:1; /* Singlestepping was active. */
610 };
611 } retire;
612
613 bool event_pending;
614 struct x86_event event;
615 };
616
617 /*
618 * Encode opcode extensions in the following way:
619 * 0x0xxxx for one byte opcodes
620 * 0x0fxxxx for 0f-prefixed opcodes (or their VEX/EVEX equivalents)
621 * 0x0f38xxxx for 0f38-prefixed opcodes (or their VEX/EVEX equivalents)
622 * 0x0f3axxxx for 0f3a-prefixed opcodes (or their VEX/EVEX equivalents)
623 * 0x8f08xxxx for 8f/8-prefixed XOP opcodes
624 * 0x8f09xxxx for 8f/9-prefixed XOP opcodes
625 * 0x8f0axxxx for 8f/a-prefixed XOP opcodes
626 * The low byte represents the base opcode withing the resepctive space,
627 * and some of bits 8..15 are used for encoding further information (see
628 * below).
629 * Hence no separate #define-s get added.
630 */
631 #define X86EMUL_OPC_EXT_MASK 0xffff0000
632 #define X86EMUL_OPC(ext, byte) ((uint8_t)(byte) | \
633 MASK_INSR((ext), X86EMUL_OPC_EXT_MASK))
634 /*
635 * This includes the 66, F3, and F2 prefixes (see also below)
636 * as well as VEX/EVEX:
637 */
638 #define X86EMUL_OPC_MASK (0x000000ff | X86EMUL_OPC_PFX_MASK | \
639 X86EMUL_OPC_ENCODING_MASK)
640
641 /*
642 * Note that prefixes 66, F2, and F3 get encoded only when semantically
643 * meaningful, to reduce the complexity of interpreting this representation.
644 */
645 #define X86EMUL_OPC_PFX_MASK 0x00000300
646 # define X86EMUL_OPC_66(ext, byte) (X86EMUL_OPC(ext, byte) | 0x00000100)
647 # define X86EMUL_OPC_F3(ext, byte) (X86EMUL_OPC(ext, byte) | 0x00000200)
648 # define X86EMUL_OPC_F2(ext, byte) (X86EMUL_OPC(ext, byte) | 0x00000300)
649
650 #define X86EMUL_OPC_ENCODING_MASK 0x00003000
651 #define X86EMUL_OPC_LEGACY_ 0x00000000
652 #define X86EMUL_OPC_VEX_ 0x00001000
653 # define X86EMUL_OPC_VEX(ext, byte) \
654 (X86EMUL_OPC(ext, byte) | X86EMUL_OPC_VEX_)
655 # define X86EMUL_OPC_VEX_66(ext, byte) \
656 (X86EMUL_OPC_66(ext, byte) | X86EMUL_OPC_VEX_)
657 # define X86EMUL_OPC_VEX_F3(ext, byte) \
658 (X86EMUL_OPC_F3(ext, byte) | X86EMUL_OPC_VEX_)
659 # define X86EMUL_OPC_VEX_F2(ext, byte) \
660 (X86EMUL_OPC_F2(ext, byte) | X86EMUL_OPC_VEX_)
661 #define X86EMUL_OPC_EVEX_ 0x00002000
662 # define X86EMUL_OPC_EVEX(ext, byte) \
663 (X86EMUL_OPC(ext, byte) | X86EMUL_OPC_EVEX_)
664 # define X86EMUL_OPC_EVEX_66(ext, byte) \
665 (X86EMUL_OPC_66(ext, byte) | X86EMUL_OPC_EVEX_)
666 # define X86EMUL_OPC_EVEX_F3(ext, byte) \
667 (X86EMUL_OPC_F3(ext, byte) | X86EMUL_OPC_EVEX_)
668 # define X86EMUL_OPC_EVEX_F2(ext, byte) \
669 (X86EMUL_OPC_F2(ext, byte) | X86EMUL_OPC_EVEX_)
670
671 #define X86EMUL_OPC_XOP(ext, byte) X86EMUL_OPC(0x8f##ext, byte)
672 #define X86EMUL_OPC_XOP_66(ext, byte) X86EMUL_OPC_66(0x8f##ext, byte)
673 #define X86EMUL_OPC_XOP_F3(ext, byte) X86EMUL_OPC_F3(0x8f##ext, byte)
674 #define X86EMUL_OPC_XOP_F2(ext, byte) X86EMUL_OPC_F2(0x8f##ext, byte)
675
676 struct x86_emulate_stub {
677 union {
678 void (*func)(void);
679 uintptr_t addr;
680 };
681 #ifdef __XEN__
682 void *ptr;
683 #else
684 /* Room for one insn and a (single byte) RET. */
685 uint8_t buf[MAX_INST_LEN + 1];
686 #endif
687 };
688
689 /*
690 * x86_emulate: Emulate an instruction.
691 * Returns X86EMUL_* constants.
692 */
693 int
694 x86_emulate(
695 struct x86_emulate_ctxt *ctxt,
696 const struct x86_emulate_ops *ops);
697
698 #ifndef NDEBUG
699 /*
700 * In debug builds, wrap x86_emulate() with some assertions about its expected
701 * behaviour.
702 */
703 int x86_emulate_wrapper(
704 struct x86_emulate_ctxt *ctxt,
705 const struct x86_emulate_ops *ops);
706 #define x86_emulate x86_emulate_wrapper
707 #endif
708
709 #ifdef __XEN__
710 # include <xen/nospec.h>
711 #else
712 # define array_access_nospec(arr, idx) arr[idx]
713 #endif
714
715 /* Map GPRs by ModRM encoding to their offset within struct cpu_user_regs. */
716 extern const uint8_t cpu_user_regs_gpr_offsets[X86_NR_GPRS];
717
718 /*
719 * Given the 'reg' portion of a ModRM byte, and a register block, return a
720 * pointer into the block that addresses the relevant register.
721 */
decode_gpr(struct cpu_user_regs * regs,unsigned int modrm)722 static inline unsigned long *decode_gpr(struct cpu_user_regs *regs,
723 unsigned int modrm)
724 {
725 /* Check that the array is a power of two. */
726 BUILD_BUG_ON(ARRAY_SIZE(cpu_user_regs_gpr_offsets) &
727 (ARRAY_SIZE(cpu_user_regs_gpr_offsets) - 1));
728
729 /*
730 * Note that this also acts as array_access_nospec() stand-in. Higher
731 * bits may legitimately come in set here, from EVEX decoding, and
732 * hence truncation is what we want (bits not ignored will get checked
733 * elsewhere).
734 */
735 modrm &= ARRAY_SIZE(cpu_user_regs_gpr_offsets) - 1;
736
737 return (void *)regs + cpu_user_regs_gpr_offsets[modrm];
738 }
739
740 /* Unhandleable read, write or instruction fetch */
741 int
742 x86emul_unhandleable_rw(
743 enum x86_segment seg,
744 unsigned long offset,
745 void *p_data,
746 unsigned int bytes,
747 struct x86_emulate_ctxt *ctxt);
748
749 struct x86_emulate_state *
750 x86_decode_insn(
751 struct x86_emulate_ctxt *ctxt,
752 int (*insn_fetch)(
753 enum x86_segment seg, unsigned long offset,
754 void *p_data, unsigned int bytes,
755 struct x86_emulate_ctxt *ctxt));
756
757 unsigned int
758 x86_insn_opsize(const struct x86_emulate_state *state);
759 int
760 x86_insn_modrm(const struct x86_emulate_state *state,
761 unsigned int *rm, unsigned int *reg);
762 unsigned long
763 x86_insn_operand_ea(const struct x86_emulate_state *state,
764 enum x86_segment *seg);
765 unsigned long
766 x86_insn_immediate(const struct x86_emulate_state *state,
767 unsigned int nr);
768 unsigned int
769 x86_insn_length(const struct x86_emulate_state *state,
770 const struct x86_emulate_ctxt *ctxt);
771 bool
772 x86_insn_is_mem_access(const struct x86_emulate_state *state,
773 const struct x86_emulate_ctxt *ctxt);
774 bool
775 x86_insn_is_mem_write(const struct x86_emulate_state *state,
776 const struct x86_emulate_ctxt *ctxt);
777 bool
778 x86_insn_is_portio(const struct x86_emulate_state *state,
779 const struct x86_emulate_ctxt *ctxt);
780 bool
781 x86_insn_is_cr_access(const struct x86_emulate_state *state,
782 const struct x86_emulate_ctxt *ctxt);
783
784 #if !defined(__XEN__) || defined(NDEBUG)
x86_emulate_free_state(struct x86_emulate_state * state)785 static inline void x86_emulate_free_state(struct x86_emulate_state *state) {}
786 #else
787 void x86_emulate_free_state(struct x86_emulate_state *state);
788 #endif
789
790 #ifdef __XEN__
791
792 int x86emul_read_xcr(unsigned int reg, uint64_t *val,
793 struct x86_emulate_ctxt *ctxt);
794 int x86emul_write_xcr(unsigned int reg, uint64_t val,
795 struct x86_emulate_ctxt *ctxt);
796
797 int x86emul_read_dr(unsigned int reg, unsigned long *val,
798 struct x86_emulate_ctxt *ctxt);
799 int x86emul_write_dr(unsigned int reg, unsigned long val,
800 struct x86_emulate_ctxt *ctxt);
801 int x86emul_cpuid(uint32_t leaf, uint32_t subleaf,
802 struct cpuid_leaf *res, struct x86_emulate_ctxt *ctxt);
803
804 #endif
805
806 int
807 x86_emul_rmw(
808 void *ptr,
809 unsigned int bytes,
810 uint32_t *eflags,
811 struct x86_emulate_state *state,
812 struct x86_emulate_ctxt *ctxt);
813 int
814 x86_emul_blk(
815 void *ptr,
816 void *data,
817 unsigned int bytes,
818 uint32_t *eflags,
819 struct x86_emulate_state *state,
820 struct x86_emulate_ctxt *ctxt);
821
x86_emul_hw_exception(unsigned int vector,int error_code,struct x86_emulate_ctxt * ctxt)822 static inline void x86_emul_hw_exception(
823 unsigned int vector, int error_code, struct x86_emulate_ctxt *ctxt)
824 {
825 ASSERT(!ctxt->event_pending);
826
827 ctxt->event.vector = vector;
828 ctxt->event.type = X86_EVENTTYPE_HW_EXCEPTION;
829 ctxt->event.error_code = error_code;
830
831 ctxt->event_pending = true;
832 }
833
x86_emul_pagefault(int error_code,unsigned long cr2,struct x86_emulate_ctxt * ctxt)834 static inline void x86_emul_pagefault(
835 int error_code, unsigned long cr2, struct x86_emulate_ctxt *ctxt)
836 {
837 ASSERT(!ctxt->event_pending);
838
839 ctxt->event.vector = 14; /* TRAP_page_fault */
840 ctxt->event.type = X86_EVENTTYPE_HW_EXCEPTION;
841 ctxt->event.error_code = error_code;
842 ctxt->event.cr2 = cr2;
843
844 ctxt->event_pending = true;
845 }
846
x86_emul_reset_event(struct x86_emulate_ctxt * ctxt)847 static inline void x86_emul_reset_event(struct x86_emulate_ctxt *ctxt)
848 {
849 ctxt->event_pending = false;
850 ctxt->event = (struct x86_event){};
851 }
852
853 #endif /* __X86_EMULATE_H__ */
854