1 /*
2 * This library is free software; you can redistribute it and/or
3 * modify it under the terms of the GNU Lesser General Public
4 * License as published by the Free Software Foundation;
5 * version 2.1 of the License.
6 *
7 * This library is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
10 * Lesser General Public License for more details.
11 *
12 * You should have received a copy of the GNU Lesser General Public
13 * License along with this library; If not, see <http://www.gnu.org/licenses/>.
14 */
15
16 #include "xc_private.h"
17 #include "xg_private.h"
18 #include "xg_save_restore.h"
19
20 #if defined(__i386__) || defined(__x86_64__)
21
22 #include <xen/foreign/x86_32.h>
23 #include <xen/foreign/x86_64.h>
24 #include <xen/hvm/params.h>
25
modify_returncode(xc_interface * xch,uint32_t domid)26 static int modify_returncode(xc_interface *xch, uint32_t domid)
27 {
28 vcpu_guest_context_any_t ctxt;
29 xc_dominfo_t info;
30 xen_capabilities_info_t caps;
31 struct domain_info_context _dinfo = {};
32 struct domain_info_context *dinfo = &_dinfo;
33 int rc;
34
35 if ( xc_domain_getinfo(xch, domid, 1, &info) != 1 ||
36 info.domid != domid )
37 {
38 PERROR("Could not get domain info");
39 return -1;
40 }
41
42 if ( !info.shutdown || (info.shutdown_reason != SHUTDOWN_suspend) )
43 {
44 ERROR("Dom %d not suspended: (shutdown %d, reason %d)", domid,
45 info.shutdown, info.shutdown_reason);
46 errno = EINVAL;
47 return -1;
48 }
49
50 if ( info.hvm )
51 {
52 /* HVM guests without PV drivers have no return code to modify. */
53 uint64_t irq = 0;
54 xc_hvm_param_get(xch, domid, HVM_PARAM_CALLBACK_IRQ, &irq);
55 if ( !irq )
56 return 0;
57
58 /* HVM guests have host address width. */
59 if ( xc_version(xch, XENVER_capabilities, &caps) != 0 )
60 {
61 PERROR("Could not get Xen capabilities");
62 return -1;
63 }
64 dinfo->guest_width = strstr(caps, "x86_64") ? 8 : 4;
65 }
66 else
67 {
68 /* Probe PV guest address width. */
69 if ( xc_domain_get_guest_width(xch, domid, &dinfo->guest_width) )
70 return -1;
71 }
72
73 if ( (rc = xc_vcpu_getcontext(xch, domid, 0, &ctxt)) != 0 )
74 return rc;
75
76 SET_FIELD(&ctxt, user_regs.eax, 1, dinfo->guest_width);
77
78 if ( (rc = xc_vcpu_setcontext(xch, domid, 0, &ctxt)) != 0 )
79 return rc;
80
81 return 0;
82 }
83
84 #else
85
modify_returncode(xc_interface * xch,uint32_t domid)86 static int modify_returncode(xc_interface *xch, uint32_t domid)
87 {
88 return 0;
89
90 }
91
92 #endif
93
xc_domain_resume_cooperative(xc_interface * xch,uint32_t domid)94 static int xc_domain_resume_cooperative(xc_interface *xch, uint32_t domid)
95 {
96 DECLARE_DOMCTL;
97 int rc;
98
99 /*
100 * Set hypercall return code to indicate that suspend is cancelled
101 * (rather than resuming in a new domain context).
102 */
103 if ( (rc = modify_returncode(xch, domid)) != 0 )
104 return rc;
105
106 domctl.cmd = XEN_DOMCTL_resumedomain;
107 domctl.domain = domid;
108 return do_domctl(xch, &domctl);
109 }
110
111 #if defined(__i386__) || defined(__x86_64__)
xc_domain_resume_hvm(xc_interface * xch,uint32_t domid)112 static int xc_domain_resume_hvm(xc_interface *xch, uint32_t domid)
113 {
114 DECLARE_DOMCTL;
115
116 /*
117 * The domctl XEN_DOMCTL_resumedomain unpause each vcpu. After
118 * the domctl, the guest will run.
119 *
120 * If it is PVHVM, the guest called the hypercall
121 * SCHEDOP_shutdown:SHUTDOWN_suspend
122 * to suspend itself. We don't modify the return code, so the PV driver
123 * will disconnect and reconnect.
124 *
125 * If it is a HVM, the guest will continue running.
126 */
127 domctl.cmd = XEN_DOMCTL_resumedomain;
128 domctl.domain = domid;
129 return do_domctl(xch, &domctl);
130 }
131 #endif
132
xc_domain_resume_any(xc_interface * xch,uint32_t domid)133 static int xc_domain_resume_any(xc_interface *xch, uint32_t domid)
134 {
135 DECLARE_DOMCTL;
136 xc_dominfo_t info;
137 int i, rc = -1;
138 #if defined(__i386__) || defined(__x86_64__)
139 struct domain_info_context _dinfo = { .guest_width = 0,
140 .p2m_size = 0 };
141 struct domain_info_context *dinfo = &_dinfo;
142 unsigned long mfn;
143 vcpu_guest_context_any_t ctxt;
144 start_info_t *start_info;
145 shared_info_t *shinfo = NULL;
146 xen_pfn_t *p2m_frame_list_list = NULL;
147 xen_pfn_t *p2m_frame_list = NULL;
148 xen_pfn_t *p2m = NULL;
149 #endif
150
151 if ( xc_domain_getinfo(xch, domid, 1, &info) != 1 )
152 {
153 PERROR("Could not get domain info");
154 return rc;
155 }
156
157 /*
158 * (x86 only) Rewrite store_mfn and console_mfn back to MFN (from PFN).
159 */
160 #if defined(__i386__) || defined(__x86_64__)
161 if ( info.hvm )
162 return xc_domain_resume_hvm(xch, domid);
163
164 if ( xc_domain_get_guest_width(xch, domid, &dinfo->guest_width) != 0 )
165 {
166 PERROR("Could not get domain width");
167 return rc;
168 }
169 if ( dinfo->guest_width != sizeof(long) )
170 {
171 ERROR("Cannot resume uncooperative cross-address-size guests");
172 return rc;
173 }
174
175 /* Map the shared info frame */
176 shinfo = xc_map_foreign_range(xch, domid, PAGE_SIZE,
177 PROT_READ, info.shared_info_frame);
178 if ( shinfo == NULL )
179 {
180 ERROR("Couldn't map shared info");
181 goto out;
182 }
183
184 dinfo->p2m_size = shinfo->arch.max_pfn;
185
186 p2m_frame_list_list =
187 xc_map_foreign_range(xch, domid, PAGE_SIZE, PROT_READ,
188 shinfo->arch.pfn_to_mfn_frame_list_list);
189 if ( p2m_frame_list_list == NULL )
190 {
191 ERROR("Couldn't map p2m_frame_list_list");
192 goto out;
193 }
194
195 p2m_frame_list = xc_map_foreign_pages(xch, domid, PROT_READ,
196 p2m_frame_list_list,
197 P2M_FLL_ENTRIES);
198 if ( p2m_frame_list == NULL )
199 {
200 ERROR("Couldn't map p2m_frame_list");
201 goto out;
202 }
203
204 /* Map all the frames of the pfn->mfn table. For migrate to succeed,
205 the guest must not change which frames are used for this purpose.
206 (its not clear why it would want to change them, and we'll be OK
207 from a safety POV anyhow. */
208 p2m = xc_map_foreign_pages(xch, domid, PROT_READ,
209 p2m_frame_list,
210 P2M_FL_ENTRIES);
211 if ( p2m == NULL )
212 {
213 ERROR("Couldn't map p2m table");
214 goto out;
215 }
216
217 if ( xc_vcpu_getcontext(xch, domid, 0, &ctxt) )
218 {
219 ERROR("Could not get vcpu context");
220 goto out;
221 }
222
223 mfn = GET_FIELD(&ctxt, user_regs.edx, dinfo->guest_width);
224
225 start_info = xc_map_foreign_range(xch, domid, PAGE_SIZE,
226 PROT_READ | PROT_WRITE, mfn);
227 if ( start_info == NULL )
228 {
229 ERROR("Couldn't map start_info");
230 goto out;
231 }
232
233 start_info->store_mfn = p2m[start_info->store_mfn];
234 start_info->console.domU.mfn = p2m[start_info->console.domU.mfn];
235
236 munmap(start_info, PAGE_SIZE);
237 #endif /* defined(__i386__) || defined(__x86_64__) */
238
239 /* Reset all secondary CPU states. */
240 for ( i = 1; i <= info.max_vcpu_id; i++ )
241 if ( xc_vcpu_setcontext(xch, domid, i, NULL) != 0 )
242 {
243 ERROR("Couldn't reset vcpu state");
244 goto out;
245 }
246
247 /* Ready to resume domain execution now. */
248 domctl.cmd = XEN_DOMCTL_resumedomain;
249 domctl.domain = domid;
250 rc = do_domctl(xch, &domctl);
251
252 out:
253 #if defined(__i386__) || defined(__x86_64__)
254 if (p2m)
255 munmap(p2m, P2M_FL_ENTRIES*PAGE_SIZE);
256 if (p2m_frame_list)
257 munmap(p2m_frame_list, P2M_FLL_ENTRIES*PAGE_SIZE);
258 if (p2m_frame_list_list)
259 munmap(p2m_frame_list_list, PAGE_SIZE);
260 if (shinfo)
261 munmap(shinfo, PAGE_SIZE);
262 #endif
263
264 return rc;
265 }
266
267 /*
268 * Resume execution of a domain after suspend shutdown.
269 * This can happen in one of two ways:
270 * 1. (fast=1) Resume the guest without resetting the domain environment.
271 * The guests's call to SCHEDOP_shutdown(SHUTDOWN_suspend) will return 1.
272 *
273 * 2. (fast=0) Reset guest environment so it believes it is resumed in a new
274 * domain context. The guests's call to SCHEDOP_shutdown(SHUTDOWN_suspend)
275 * will return 0.
276 *
277 * (1) should only by used for guests which can handle the special return
278 * code. Also note that the insertion of the return code is quite interesting
279 * and that the guest MUST be paused - otherwise we would be corrupting
280 * the guest vCPU state.
281 *
282 * (2) should be used only for guests which cannot handle the special
283 * new return code - and it is always safe (but slower).
284 */
xc_domain_resume(xc_interface * xch,uint32_t domid,int fast)285 int xc_domain_resume(xc_interface *xch, uint32_t domid, int fast)
286 {
287 return (fast
288 ? xc_domain_resume_cooperative(xch, domid)
289 : xc_domain_resume_any(xch, domid));
290 }
291