1 
2 /*
3  * vvmx.h: Support virtual VMX for nested virtualization.
4  *
5  * Copyright (c) 2010, Intel Corporation.
6  * Author: Qing He <qing.he@intel.com>
7  *         Eddie Dong <eddie.dong@intel.com>
8  *
9  * This program is free software; you can redistribute it and/or modify it
10  * under the terms and conditions of the GNU General Public License,
11  * version 2, as published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope it will be useful, but WITHOUT
14  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
16  * more details.
17  *
18  * You should have received a copy of the GNU General Public License along with
19  * this program; If not, see <http://www.gnu.org/licenses/>.
20  *
21  */
22 #ifndef __ASM_X86_HVM_VVMX_H__
23 #define __ASM_X86_HVM_VVMX_H__
24 
25 struct vvmcs_list {
26     unsigned long vvmcs_mfn;
27     struct list_head node;
28 };
29 
30 struct nestedvmx {
31     /*
32      * vmxon_region_pa is also used to indicate whether a vcpu is in
33      * the VMX operation. When a vcpu is out of the VMX operation, its
34      * vmxon_region_pa is set to an invalid address INVALID_PADDR. We
35      * cannot use 0 for this purpose, because it's a valid VMXON region
36      * address.
37      */
38     paddr_t    vmxon_region_pa;
39     void       *iobitmap[2];		/* map (va) of L1 guest I/O bitmap */
40     struct vmx_msr_bitmap *msrbitmap;	/* map (va) of L1 guest MSR bitmap */
41     struct vmx_msr_bitmap *msr_merged;	/* merged L1 and L2 MSR bitmap */
42     /* deferred nested interrupt */
43     struct {
44         unsigned long intr_info;
45         u32           error_code;
46         u8            source;
47     } intr;
48     struct {
49         bool_t   enabled;
50         uint32_t exit_reason;
51         uint32_t exit_qual;
52     } ept;
53     uint32_t guest_vpid;
54     struct list_head launched_list;
55 };
56 
57 #define vcpu_2_nvmx(v)	(vcpu_nestedhvm(v).u.nvmx)
58 
59 /* bit 1, 2, 4 must be 1 */
60 #define VMX_PINBASED_CTLS_DEFAULT1	0x16
61 /* bit 1, 4-6,8,13-16,26 must be 1 */
62 #define VMX_PROCBASED_CTLS_DEFAULT1	0x401e172
63 /* bit 0-8, 10,11,13,14,16,17 must be 1 */
64 #define VMX_EXIT_CTLS_DEFAULT1		0x36dff
65 /* bit 0-8, and 12 must be 1 */
66 #define VMX_ENTRY_CTLS_DEFAULT1		0x11ff
67 
68 
69 union vmx_inst_info {
70     struct {
71         unsigned int scaling           :2; /* bit 0-1 */
72         unsigned int __rsvd0           :1; /* bit 2 */
73         unsigned int reg1              :4; /* bit 3-6 */
74         unsigned int addr_size         :3; /* bit 7-9 */
75         unsigned int memreg            :1; /* bit 10 */
76         unsigned int __rsvd1           :4; /* bit 11-14 */
77         unsigned int segment           :3; /* bit 15-17 */
78         unsigned int index_reg         :4; /* bit 18-21 */
79         unsigned int index_reg_invalid :1; /* bit 22 */
80         unsigned int base_reg          :4; /* bit 23-26 */
81         unsigned int base_reg_invalid  :1; /* bit 27 */
82         unsigned int reg2              :4; /* bit 28-31 */
83     } fields;
84     u32 word;
85 };
86 
87 int nvmx_vcpu_initialise(struct vcpu *v);
88 void nvmx_vcpu_destroy(struct vcpu *v);
89 int nvmx_vcpu_reset(struct vcpu *v);
90 uint64_t nvmx_vcpu_eptp_base(struct vcpu *v);
91 enum hvm_intblk nvmx_intr_blocked(struct vcpu *v);
92 bool_t nvmx_intercepts_exception(
93     struct vcpu *v, unsigned int vector, int error_code);
94 void nvmx_domain_relinquish_resources(struct domain *d);
95 
96 bool_t nvmx_ept_enabled(struct vcpu *v);
97 
98 #define EPT_TRANSLATE_SUCCEED       0
99 #define EPT_TRANSLATE_VIOLATION     1
100 #define EPT_TRANSLATE_MISCONFIG     2
101 #define EPT_TRANSLATE_RETRY         3
102 
103 int
104 nvmx_hap_walk_L1_p2m(struct vcpu *v, paddr_t L2_gpa, paddr_t *L1_gpa,
105                      unsigned int *page_order, uint8_t *p2m_acc,
106                      bool_t access_r, bool_t access_w, bool_t access_x);
107 /*
108  * Virtual VMCS layout
109  *
110  * Since physical VMCS layout is unknown, a custom layout is used
111  * for virtual VMCS seen by guest. It occupies a 4k page, and the
112  * field is offset by an 9-bit offset into u64[], The offset is as
113  * follow, which means every <width, type> pair has a max of 32
114  * fields available.
115  *
116  *             9       7      5               0
117  *             --------------------------------
118  *     offset: | width | type |     index     |
119  *             --------------------------------
120  *
121  * Also, since the lower range <width=0, type={0,1}> has only one
122  * field: VPID, it is moved to a higher offset (63), and leaves the
123  * lower range to non-indexed field like VMCS revision.
124  *
125  */
126 
127 struct vvmcs_header {
128     u32 revision;
129     u32 abort;
130 };
131 
132 union vmcs_encoding {
133     struct {
134         u32 access_type : 1;
135         u32 index : 9;
136         u32 type : 2;
137         u32 rsv1 : 1;
138         u32 width : 2;
139         u32 rsv2 : 17;
140     };
141     u32 word;
142 };
143 
144 enum vvmcs_encoding_width {
145     VVMCS_WIDTH_16 = 0,
146     VVMCS_WIDTH_64,
147     VVMCS_WIDTH_32,
148     VVMCS_WIDTH_NATURAL,
149 };
150 
151 enum vvmcs_encoding_type {
152     VVMCS_TYPE_CONTROL = 0,
153     VVMCS_TYPE_RO,
154     VVMCS_TYPE_GSTATE,
155     VVMCS_TYPE_HSTATE,
156 };
157 
158 u64 get_vvmcs_virtual(void *vvmcs, u32 encoding);
159 u64 get_vvmcs_real(const struct vcpu *, u32 encoding);
160 void set_vvmcs_virtual(void *vvmcs, u32 encoding, u64 val);
161 void set_vvmcs_real(const struct vcpu *, u32 encoding, u64 val);
162 enum vmx_insn_errno get_vvmcs_virtual_safe(void *vvmcs, u32 encoding, u64 *val);
163 enum vmx_insn_errno get_vvmcs_real_safe(const struct vcpu *, u32 encoding,
164                                         u64 *val);
165 enum vmx_insn_errno set_vvmcs_virtual_safe(void *vvmcs, u32 encoding, u64 val);
166 enum vmx_insn_errno set_vvmcs_real_safe(const struct vcpu *, u32 encoding,
167                                         u64 val);
168 
169 #define get_vvmcs(vcpu, encoding) \
170   (cpu_has_vmx_vmcs_shadowing ? \
171    get_vvmcs_real(vcpu, encoding) : \
172    get_vvmcs_virtual(vcpu_nestedhvm(vcpu).nv_vvmcx, encoding))
173 
174 #define set_vvmcs(vcpu, encoding, val) \
175   (cpu_has_vmx_vmcs_shadowing ? \
176    set_vvmcs_real(vcpu, encoding, val) : \
177    set_vvmcs_virtual(vcpu_nestedhvm(vcpu).nv_vvmcx, encoding, val))
178 
179 #define get_vvmcs_safe(vcpu, encoding, val) \
180   (cpu_has_vmx_vmcs_shadowing ? \
181    get_vvmcs_real_safe(vcpu, encoding, val) : \
182    get_vvmcs_virtual_safe(vcpu_nestedhvm(vcpu).nv_vvmcx, encoding, val))
183 
184 #define set_vvmcs_safe(vcpu, encoding, val) \
185   (cpu_has_vmx_vmcs_shadowing ? \
186    set_vvmcs_real_safe(vcpu, encoding, val) : \
187    set_vvmcs_virtual_safe(vcpu_nestedhvm(vcpu).nv_vvmcx, encoding, val))
188 
189 void nvmx_destroy_vmcs(struct vcpu *v);
190 int nvmx_handle_vmx_insn(struct cpu_user_regs *regs, unsigned int exit_reason);
191 int nvmx_msr_read_intercept(unsigned int msr,
192                                 u64 *msr_content);
193 
194 void nvmx_update_exec_control(struct vcpu *v, u32 value);
195 void nvmx_update_secondary_exec_control(struct vcpu *v,
196                                         unsigned long value);
197 void nvmx_update_exception_bitmap(struct vcpu *v, unsigned long value);
198 void nvmx_switch_guest(void);
199 void nvmx_idtv_handling(void);
200 u64 nvmx_get_tsc_offset(struct vcpu *v);
201 int nvmx_n2_vmexit_handler(struct cpu_user_regs *regs,
202                           unsigned int exit_reason);
203 void nvmx_set_cr_read_shadow(struct vcpu *v, unsigned int cr);
204 
205 uint64_t nept_get_ept_vpid_cap(void);
206 
207 int nept_translate_l2ga(struct vcpu *v, paddr_t l2ga,
208                         unsigned int *page_order, uint32_t rwx_acc,
209                         unsigned long *l1gfn, uint8_t *p2m_acc,
210                         uint64_t *exit_qual, uint32_t *exit_reason);
211 int nvmx_cpu_up_prepare(unsigned int cpu);
212 void nvmx_cpu_dead(unsigned int cpu);
213 #endif /* __ASM_X86_HVM_VVMX_H__ */
214 
215