1 /******************************************************************************
2  * hvm/emulate.h
3  *
4  * HVM instruction emulation. Used for MMIO and VMX real mode.
5  *
6  * Copyright (c) 2008 Citrix Systems, Inc.
7  *
8  * Authors:
9  *    Keir Fraser <keir@xen.org>
10  */
11 
12 #ifndef __ASM_X86_HVM_EMULATE_H__
13 #define __ASM_X86_HVM_EMULATE_H__
14 
15 #include <xen/err.h>
16 #include <xen/mm.h>
17 #include <xen/sched.h>
18 #include <asm/hvm/hvm.h>
19 #include <asm/x86_emulate.h>
20 
21 typedef bool hvm_emulate_validate_t(const struct x86_emulate_state *state,
22                                     const struct x86_emulate_ctxt *ctxt);
23 
24 struct hvm_emulate_ctxt {
25     struct x86_emulate_ctxt ctxt;
26 
27     /*
28      * validate: Post-decode, pre-emulate hook to allow caller controlled
29      * filtering.
30      */
31     hvm_emulate_validate_t *validate;
32 
33     /* Cache of 16 bytes of instruction. */
34     uint8_t insn_buf[16];
35     unsigned long insn_buf_eip;
36     unsigned int insn_buf_bytes;
37 
38     struct segment_register seg_reg[10];
39     unsigned long seg_reg_accessed;
40     unsigned long seg_reg_dirty;
41 
42     /*
43      * MFNs behind temporary mappings in the write callback.  The length is
44      * arbitrary, and can be increased if writes longer than PAGE_SIZE+1 are
45      * needed.
46      */
47     mfn_t mfn[2];
48 
49     uint32_t intr_shadow;
50 
51     bool is_mem_access;
52 
53     bool_t set_context;
54 };
55 
56 enum emul_kind {
57     EMUL_KIND_NORMAL,
58     EMUL_KIND_NOWRITE,
59     EMUL_KIND_SET_CONTEXT_DATA,
60     EMUL_KIND_SET_CONTEXT_INSN
61 };
62 
63 bool __nonnull(1, 2) hvm_emulate_one_insn(
64     hvm_emulate_validate_t *validate,
65     const char *descr);
66 int hvm_emulate_one(
67     struct hvm_emulate_ctxt *hvmemul_ctxt,
68     enum hvm_io_completion completion);
69 void hvm_emulate_one_vm_event(enum emul_kind kind,
70     unsigned int trapnr,
71     unsigned int errcode);
72 /* Must be called once to set up hvmemul state. */
73 void hvm_emulate_init_once(
74     struct hvm_emulate_ctxt *hvmemul_ctxt,
75     hvm_emulate_validate_t *validate,
76     struct cpu_user_regs *regs);
77 /* Must be called once before each instruction emulated. */
78 void hvm_emulate_init_per_insn(
79     struct hvm_emulate_ctxt *hvmemul_ctxt,
80     const unsigned char *insn_buf,
81     unsigned int insn_bytes);
82 void hvm_emulate_writeback(
83     struct hvm_emulate_ctxt *hvmemul_ctxt);
84 void hvmemul_cancel(struct vcpu *v);
85 struct segment_register *hvmemul_get_seg_reg(
86     enum x86_segment seg,
87     struct hvm_emulate_ctxt *hvmemul_ctxt);
88 int hvm_emulate_one_mmio(unsigned long mfn, unsigned long gla);
89 
handle_mmio(void)90 static inline bool handle_mmio(void)
91 {
92     return hvm_emulate_one_insn(x86_insn_is_mem_access, "MMIO");
93 }
94 
95 int hvmemul_insn_fetch(enum x86_segment seg,
96                        unsigned long offset,
97                        void *p_data,
98                        unsigned int bytes,
99                        struct x86_emulate_ctxt *ctxt);
100 int hvmemul_do_pio_buffer(uint16_t port,
101                           unsigned int size,
102                           uint8_t dir,
103                           void *buffer);
104 
105 #ifdef CONFIG_HVM
106 /*
107  * The cache controlled by the functions below is not like an ordinary CPU
108  * cache, i.e. aiming to help performance, but a "secret store" which is
109  * needed for correctness.  The issue it helps addressing is the need for
110  * re-execution of an insn (after data was provided by a device model) to
111  * observe the exact same memory state, i.e. to specifically not observe any
112  * updates which may have occurred in the meantime by other agents.
113  * Therefore this cache gets
114  * - enabled when emulation of an insn starts,
115  * - disabled across processing secondary things like a hypercall resulting
116  *   from insn emulation,
117  * - disabled again when an emulated insn is known to not require any
118  *   further re-execution.
119  */
120 int __must_check hvmemul_cache_init(struct vcpu *v);
hvmemul_cache_destroy(struct vcpu * v)121 static inline void hvmemul_cache_destroy(struct vcpu *v)
122 {
123     XFREE(v->arch.hvm.hvm_io.cache);
124 }
125 bool hvmemul_read_cache(const struct vcpu *, paddr_t gpa,
126                         void *buffer, unsigned int size);
127 void hvmemul_write_cache(const struct vcpu *, paddr_t gpa,
128                          const void *buffer, unsigned int size);
129 unsigned int hvmemul_cache_disable(struct vcpu *);
130 void hvmemul_cache_restore(struct vcpu *, unsigned int token);
131 /* For use in ASSERT()s only: */
hvmemul_cache_disabled(struct vcpu * v)132 static inline bool hvmemul_cache_disabled(struct vcpu *v)
133 {
134     return hvmemul_cache_disable(v) == hvmemul_cache_disable(v);
135 }
136 #else
hvmemul_read_cache(const struct vcpu * v,paddr_t gpa,void * buf,unsigned int size)137 static inline bool hvmemul_read_cache(const struct vcpu *v, paddr_t gpa,
138                                       void *buf,
139                                       unsigned int size) { return false; }
hvmemul_write_cache(const struct vcpu * v,paddr_t gpa,const void * buf,unsigned int size)140 static inline void hvmemul_write_cache(const struct vcpu *v, paddr_t gpa,
141                                        const void *buf, unsigned int size) {}
142 #endif
143 
144 void hvm_dump_emulation_state(const char *loglvl, const char *prefix,
145                               struct hvm_emulate_ctxt *hvmemul_ctxt, int rc);
146 
147 #endif /* __ASM_X86_HVM_EMULATE_H__ */
148 
149 /*
150  * Local variables:
151  * mode: C
152  * c-file-style: "BSD"
153  * c-basic-offset: 4
154  * tab-width: 4
155  * indent-tabs-mode: nil
156  * End:
157  */
158