1 /*
2 * Nested HVM
3 * Copyright (c) 2011, Advanced Micro Devices, Inc.
4 * Author: Christoph Egger <Christoph.Egger@amd.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; If not, see <http://www.gnu.org/licenses/>.
17 */
18
19 #include <asm/msr.h>
20 #include <asm/hvm/support.h>
21 #include <asm/hvm/hvm.h>
22 #include <asm/p2m.h> /* for struct p2m_domain */
23 #include <asm/hvm/nestedhvm.h>
24 #include <asm/event.h> /* for local_event_delivery_(en|dis)able */
25 #include <asm/paging.h> /* for paging_mode_hap() */
26
27 static unsigned long *shadow_io_bitmap[3];
28
29 /* Nested VCPU */
30 bool_t
nestedhvm_vcpu_in_guestmode(struct vcpu * v)31 nestedhvm_vcpu_in_guestmode(struct vcpu *v)
32 {
33 return vcpu_nestedhvm(v).nv_guestmode;
34 }
35
36 void
nestedhvm_vcpu_reset(struct vcpu * v)37 nestedhvm_vcpu_reset(struct vcpu *v)
38 {
39 struct nestedvcpu *nv = &vcpu_nestedhvm(v);
40
41 nv->nv_vmentry_pending = 0;
42 nv->nv_vmexit_pending = 0;
43 nv->nv_vmswitch_in_progress = 0;
44 nv->nv_ioport80 = 0;
45 nv->nv_ioportED = 0;
46
47 hvm_unmap_guest_frame(nv->nv_vvmcx, 1);
48 nv->nv_vvmcx = NULL;
49 nv->nv_vvmcxaddr = INVALID_PADDR;
50 nv->nv_flushp2m = 0;
51 nv->nv_p2m = NULL;
52 nv->stale_np2m = false;
53 nv->np2m_generation = 0;
54
55 hvm_asid_flush_vcpu_asid(&nv->nv_n2asid);
56
57 if ( hvm_funcs.nhvm_vcpu_reset )
58 hvm_funcs.nhvm_vcpu_reset(v);
59
60 /* vcpu is in host mode */
61 nestedhvm_vcpu_exit_guestmode(v);
62 }
63
64 int
nestedhvm_vcpu_initialise(struct vcpu * v)65 nestedhvm_vcpu_initialise(struct vcpu *v)
66 {
67 int rc = -EOPNOTSUPP;
68
69 if ( !shadow_io_bitmap[0] )
70 return -ENOMEM;
71
72 if ( !hvm_funcs.nhvm_vcpu_initialise ||
73 ((rc = hvm_funcs.nhvm_vcpu_initialise(v)) != 0) )
74 return rc;
75
76 nestedhvm_vcpu_reset(v);
77 return 0;
78 }
79
80 void
nestedhvm_vcpu_destroy(struct vcpu * v)81 nestedhvm_vcpu_destroy(struct vcpu *v)
82 {
83 if ( hvm_funcs.nhvm_vcpu_destroy )
84 hvm_funcs.nhvm_vcpu_destroy(v);
85 }
86
87 static void
nestedhvm_flushtlb_ipi(void * info)88 nestedhvm_flushtlb_ipi(void *info)
89 {
90 struct vcpu *v = current;
91 struct domain *d = info;
92
93 ASSERT(d != NULL);
94 if (v->domain != d) {
95 /* This cpu doesn't belong to the domain */
96 return;
97 }
98
99 /* Just flush the ASID (or request a new one).
100 * This is cheaper than flush_tlb_local() and has
101 * the same desired effect.
102 */
103 hvm_asid_flush_core();
104 vcpu_nestedhvm(v).nv_p2m = NULL;
105 vcpu_nestedhvm(v).stale_np2m = true;
106 }
107
108 void
nestedhvm_vmcx_flushtlb(struct p2m_domain * p2m)109 nestedhvm_vmcx_flushtlb(struct p2m_domain *p2m)
110 {
111 on_selected_cpus(p2m->dirty_cpumask, nestedhvm_flushtlb_ipi,
112 p2m->domain, 1);
113 cpumask_clear(p2m->dirty_cpumask);
114 }
115
116 /* Common shadow IO Permission bitmap */
117
118 /* There four global patterns of io bitmap each guest can
119 * choose depending on interception of io port 0x80 and/or
120 * 0xED (shown in table below).
121 * The users of the bitmap patterns are in SVM/VMX specific code.
122 *
123 * bitmap port 0x80 port 0xed
124 * hvm_io_bitmap cleared cleared
125 * iomap[0] cleared set
126 * iomap[1] set cleared
127 * iomap[2] set set
128 */
129
130 static int __init
nestedhvm_setup(void)131 nestedhvm_setup(void)
132 {
133 /* Same format and size as hvm_io_bitmap (Intel needs only 2 pages). */
134 unsigned nr = cpu_has_vmx ? 2 : 3;
135 unsigned int i, order = get_order_from_pages(nr);
136
137 if ( !hvm_funcs.name )
138 return 0;
139
140 /* shadow_io_bitmaps can't be declared static because
141 * they must fulfill hw requirements (page aligned section)
142 * and doing so triggers the ASSERT(va >= XEN_VIRT_START)
143 * in __virt_to_maddr()
144 *
145 * So as a compromise pre-allocate them when xen boots.
146 * This function must be called from within start_xen() when
147 * it is valid to use _xmalloc()
148 */
149
150 for ( i = 0; i < ARRAY_SIZE(shadow_io_bitmap); i++ )
151 {
152 shadow_io_bitmap[i] = alloc_xenheap_pages(order, 0);
153 if ( !shadow_io_bitmap[i] )
154 {
155 while ( i-- )
156 {
157 free_xenheap_pages(shadow_io_bitmap[i], order);
158 shadow_io_bitmap[i] = NULL;
159 }
160 return -ENOMEM;
161 }
162 memset(shadow_io_bitmap[i], ~0U, nr << PAGE_SHIFT);
163 }
164
165 __clear_bit(0x80, shadow_io_bitmap[0]);
166 __clear_bit(0xed, shadow_io_bitmap[1]);
167
168 return 0;
169 }
170 __initcall(nestedhvm_setup);
171
172 unsigned long *
nestedhvm_vcpu_iomap_get(bool_t port_80,bool_t port_ed)173 nestedhvm_vcpu_iomap_get(bool_t port_80, bool_t port_ed)
174 {
175 int i;
176
177 if (!hvm_port80_allowed)
178 port_80 = 1;
179
180 if (port_80 == 0) {
181 if (port_ed == 0)
182 return hvm_io_bitmap;
183 i = 0;
184 } else {
185 if (port_ed == 0)
186 i = 1;
187 else
188 i = 2;
189 }
190
191 return shadow_io_bitmap[i];
192 }
193