1 /*
2  * vcpu.h: HVM per vcpu definitions
3  *
4  * Copyright (c) 2005, International Business Machines Corporation.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms and conditions of the GNU General Public License,
8  * version 2, as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  * You should have received a copy of the GNU General Public License along with
16  * this program; If not, see <http://www.gnu.org/licenses/>.
17  */
18 
19 #ifndef __ASM_X86_HVM_VCPU_H__
20 #define __ASM_X86_HVM_VCPU_H__
21 
22 #include <xen/tasklet.h>
23 #include <asm/hvm/vlapic.h>
24 #include <asm/hvm/vmx/vmcs.h>
25 #include <asm/hvm/vmx/vvmx.h>
26 #include <asm/hvm/svm/vmcb.h>
27 #include <asm/hvm/svm/nestedsvm.h>
28 #include <asm/mtrr.h>
29 #include <public/hvm/ioreq.h>
30 
31 enum hvm_io_completion {
32     HVMIO_no_completion,
33     HVMIO_mmio_completion,
34     HVMIO_pio_completion,
35     HVMIO_realmode_completion
36 };
37 
38 struct hvm_vcpu_asid {
39     uint64_t generation;
40     uint32_t asid;
41 };
42 
43 /*
44  * We may read or write up to m512 as a number of device-model
45  * transactions.
46  */
47 struct hvm_mmio_cache {
48     unsigned long gla;
49     unsigned int size;
50     uint8_t dir;
51     uint8_t buffer[64] __aligned(sizeof(long));
52 };
53 
54 struct hvm_vcpu_io {
55     /* I/O request in flight to device model. */
56     enum hvm_io_completion io_completion;
57     ioreq_t                io_req;
58 
59     /*
60      * HVM emulation:
61      *  Linear address @mmio_gla maps to MMIO physical frame @mmio_gpfn.
62      *  The latter is known to be an MMIO frame (not RAM).
63      *  This translation is only valid for accesses as per @mmio_access.
64      */
65     struct npfec        mmio_access;
66     unsigned long       mmio_gla;
67     unsigned long       mmio_gpfn;
68 
69     /*
70      * We may need to handle up to 3 distinct memory accesses per
71      * instruction.
72      */
73     struct hvm_mmio_cache mmio_cache[3];
74     unsigned int mmio_cache_count;
75 
76     /* For retries we shouldn't re-fetch the instruction. */
77     unsigned int mmio_insn_bytes;
78     unsigned char mmio_insn[16];
79     struct hvmemul_cache *cache;
80 
81     /*
82      * For string instruction emulation we need to be able to signal a
83      * necessary retry through other than function return codes.
84      */
85     bool_t mmio_retry;
86 
87     unsigned long msix_unmask_address;
88     unsigned long msix_snoop_address;
89     unsigned long msix_snoop_gpa;
90 
91     const struct g2m_ioport *g2m_ioport;
92 };
93 
hvm_ioreq_needs_completion(const ioreq_t * ioreq)94 static inline bool hvm_ioreq_needs_completion(const ioreq_t *ioreq)
95 {
96     return ioreq->state == STATE_IOREQ_READY &&
97            !ioreq->data_is_ptr &&
98            (ioreq->type != IOREQ_TYPE_PIO || ioreq->dir != IOREQ_WRITE);
99 }
100 
101 struct nestedvcpu {
102     bool_t nv_guestmode; /* vcpu in guestmode? */
103     void *nv_vvmcx; /* l1 guest virtual VMCB/VMCS */
104     void *nv_n1vmcx; /* VMCB/VMCS used to run l1 guest */
105     void *nv_n2vmcx; /* shadow VMCB/VMCS used to run l2 guest */
106 
107     uint64_t nv_vvmcxaddr; /* l1 guest physical address of nv_vvmcx */
108     paddr_t nv_n1vmcx_pa; /* host physical address of nv_n1vmcx */
109     paddr_t nv_n2vmcx_pa; /* host physical address of nv_n2vmcx */
110 
111     /* SVM/VMX arch specific */
112     union {
113         struct nestedsvm nsvm;
114         struct nestedvmx nvmx;
115     } u;
116 
117     bool_t nv_flushp2m; /* True, when p2m table must be flushed */
118     struct p2m_domain *nv_p2m; /* used p2m table for this vcpu */
119     bool stale_np2m; /* True when p2m_base in VMCx02 is no longer valid */
120     uint64_t np2m_generation;
121 
122     struct hvm_vcpu_asid nv_n2asid;
123 
124     bool_t nv_vmentry_pending;
125     bool_t nv_vmexit_pending;
126     bool_t nv_vmswitch_in_progress; /* true during vmentry/vmexit emulation */
127 
128     /* Does l1 guest intercept io ports 0x80 and/or 0xED ?
129      * Useful to optimize io permission handling.
130      */
131     bool_t nv_ioport80;
132     bool_t nv_ioportED;
133 
134     /* L2's control-resgister, just as the L2 sees them. */
135     unsigned long       guest_cr[5];
136 };
137 
138 #define vcpu_nestedhvm(v) ((v)->arch.hvm.nvcpu)
139 
140 struct altp2mvcpu {
141     /*
142      * #VE information page.  This pointer being non-NULL indicates that a
143      * VMCS's VIRT_EXCEPTION_INFO field is pointing to the page, and an extra
144      * page reference is held.
145      */
146     struct page_info *veinfo_pg;
147     uint16_t    p2midx;         /* alternate p2m index */
148 };
149 
150 #define vcpu_altp2m(v) ((v)->arch.hvm.avcpu)
151 
152 struct hvm_vcpu {
153     /* Guest control-register and EFER values, just as the guest sees them. */
154     unsigned long       guest_cr[5];
155     unsigned long       guest_efer;
156 
157     /*
158      * Processor-visible control-register values, while guest executes.
159      *  CR0, CR4: Used as a cache of VMCS contents by VMX only.
160      *  CR1, CR2: Never used (guest_cr[2] is always processor-visible CR2).
161      *  CR3:      Always used and kept up to date by paging subsystem.
162      */
163     unsigned long       hw_cr[5];
164 
165     struct vlapic       vlapic;
166     s64                 cache_tsc_offset;
167     u64                 guest_time;
168 
169     /* Lock and list for virtual platform timers. */
170     spinlock_t          tm_lock;
171     struct list_head    tm_list;
172 
173     bool                flag_dr_dirty;
174     bool                debug_state_latch;
175     bool                single_step;
176     struct {
177         bool     enabled;
178         uint16_t p2midx;
179     } fast_single_step;
180 
181     /* (MFN) hypervisor page table */
182     pagetable_t         monitor_table;
183 
184     struct hvm_vcpu_asid n1asid;
185 
186     u64                 msr_tsc_adjust;
187 
188     union {
189         struct vmx_vcpu vmx;
190         struct svm_vcpu svm;
191     };
192 
193     struct tasklet      assert_evtchn_irq_tasklet;
194 
195     struct nestedvcpu   nvcpu;
196 
197     struct altp2mvcpu   avcpu;
198 
199     struct mtrr_state   mtrr;
200     u64                 pat_cr;
201 
202     /* In mode delay_for_missed_ticks, VCPUs have differing guest times. */
203     int64_t             stime_offset;
204 
205     u8                  evtchn_upcall_vector;
206 
207     /* Which cache mode is this VCPU in (CR0:CD/NW)? */
208     u8                  cache_mode;
209 
210     struct hvm_vcpu_io  hvm_io;
211 
212     /* Pending hw/sw interrupt (.vector = -1 means nothing pending). */
213     struct x86_event     inject_event;
214 
215     struct viridian_vcpu *viridian;
216 };
217 
218 #endif /* __ASM_X86_HVM_VCPU_H__ */
219 
220 /*
221  * Local variables:
222  * mode: C
223  * c-file-style: "BSD"
224  * c-basic-offset: 4
225  * tab-width: 4
226  * indent-tabs-mode: nil
227  * End:
228  */
229