1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/kvm.h>
3 #include <linux/psp-sev.h>
4 #include <stdio.h>
5 #include <sys/ioctl.h>
6 #include <stdlib.h>
7 #include <errno.h>
8 #include <pthread.h>
9
10 #include "test_util.h"
11 #include "kvm_util.h"
12 #include "processor.h"
13 #include "svm_util.h"
14 #include "kselftest.h"
15 #include "../lib/kvm_util_internal.h"
16
17 #define SEV_POLICY_ES 0b100
18
19 #define NR_MIGRATE_TEST_VCPUS 4
20 #define NR_MIGRATE_TEST_VMS 3
21 #define NR_LOCK_TESTING_THREADS 3
22 #define NR_LOCK_TESTING_ITERATIONS 10000
23
sev_ioctl(int vm_fd,int cmd_id,void * data)24 static void sev_ioctl(int vm_fd, int cmd_id, void *data)
25 {
26 struct kvm_sev_cmd cmd = {
27 .id = cmd_id,
28 .data = (uint64_t)data,
29 .sev_fd = open_sev_dev_path_or_exit(),
30 };
31 int ret;
32
33 ret = ioctl(vm_fd, KVM_MEMORY_ENCRYPT_OP, &cmd);
34 TEST_ASSERT((ret == 0 || cmd.error == SEV_RET_SUCCESS),
35 "%d failed: return code: %d, errno: %d, fw error: %d",
36 cmd_id, ret, errno, cmd.error);
37 }
38
sev_vm_create(bool es)39 static struct kvm_vm *sev_vm_create(bool es)
40 {
41 struct kvm_vm *vm;
42 struct kvm_sev_launch_start start = { 0 };
43 int i;
44
45 vm = vm_create(VM_MODE_DEFAULT, 0, O_RDWR);
46 sev_ioctl(vm->fd, es ? KVM_SEV_ES_INIT : KVM_SEV_INIT, NULL);
47 for (i = 0; i < NR_MIGRATE_TEST_VCPUS; ++i)
48 vm_vcpu_add(vm, i);
49 if (es)
50 start.policy |= SEV_POLICY_ES;
51 sev_ioctl(vm->fd, KVM_SEV_LAUNCH_START, &start);
52 if (es)
53 sev_ioctl(vm->fd, KVM_SEV_LAUNCH_UPDATE_VMSA, NULL);
54 return vm;
55 }
56
aux_vm_create(bool with_vcpus)57 static struct kvm_vm *aux_vm_create(bool with_vcpus)
58 {
59 struct kvm_vm *vm;
60 int i;
61
62 vm = vm_create(VM_MODE_DEFAULT, 0, O_RDWR);
63 if (!with_vcpus)
64 return vm;
65
66 for (i = 0; i < NR_MIGRATE_TEST_VCPUS; ++i)
67 vm_vcpu_add(vm, i);
68
69 return vm;
70 }
71
__sev_migrate_from(int dst_fd,int src_fd)72 static int __sev_migrate_from(int dst_fd, int src_fd)
73 {
74 struct kvm_enable_cap cap = {
75 .cap = KVM_CAP_VM_MOVE_ENC_CONTEXT_FROM,
76 .args = { src_fd }
77 };
78
79 return ioctl(dst_fd, KVM_ENABLE_CAP, &cap);
80 }
81
82
sev_migrate_from(int dst_fd,int src_fd)83 static void sev_migrate_from(int dst_fd, int src_fd)
84 {
85 int ret;
86
87 ret = __sev_migrate_from(dst_fd, src_fd);
88 TEST_ASSERT(!ret, "Migration failed, ret: %d, errno: %d\n", ret, errno);
89 }
90
test_sev_migrate_from(bool es)91 static void test_sev_migrate_from(bool es)
92 {
93 struct kvm_vm *src_vm;
94 struct kvm_vm *dst_vms[NR_MIGRATE_TEST_VMS];
95 int i, ret;
96
97 src_vm = sev_vm_create(es);
98 for (i = 0; i < NR_MIGRATE_TEST_VMS; ++i)
99 dst_vms[i] = aux_vm_create(true);
100
101 /* Initial migration from the src to the first dst. */
102 sev_migrate_from(dst_vms[0]->fd, src_vm->fd);
103
104 for (i = 1; i < NR_MIGRATE_TEST_VMS; i++)
105 sev_migrate_from(dst_vms[i]->fd, dst_vms[i - 1]->fd);
106
107 /* Migrate the guest back to the original VM. */
108 ret = __sev_migrate_from(src_vm->fd, dst_vms[NR_MIGRATE_TEST_VMS - 1]->fd);
109 TEST_ASSERT(ret == -1 && errno == EIO,
110 "VM that was migrated from should be dead. ret %d, errno: %d\n", ret,
111 errno);
112
113 kvm_vm_free(src_vm);
114 for (i = 0; i < NR_MIGRATE_TEST_VMS; ++i)
115 kvm_vm_free(dst_vms[i]);
116 }
117
118 struct locking_thread_input {
119 struct kvm_vm *vm;
120 int source_fds[NR_LOCK_TESTING_THREADS];
121 };
122
locking_test_thread(void * arg)123 static void *locking_test_thread(void *arg)
124 {
125 int i, j;
126 struct locking_thread_input *input = (struct locking_thread_input *)arg;
127
128 for (i = 0; i < NR_LOCK_TESTING_ITERATIONS; ++i) {
129 j = i % NR_LOCK_TESTING_THREADS;
130 __sev_migrate_from(input->vm->fd, input->source_fds[j]);
131 }
132
133 return NULL;
134 }
135
test_sev_migrate_locking(void)136 static void test_sev_migrate_locking(void)
137 {
138 struct locking_thread_input input[NR_LOCK_TESTING_THREADS];
139 pthread_t pt[NR_LOCK_TESTING_THREADS];
140 int i;
141
142 for (i = 0; i < NR_LOCK_TESTING_THREADS; ++i) {
143 input[i].vm = sev_vm_create(/* es= */ false);
144 input[0].source_fds[i] = input[i].vm->fd;
145 }
146 for (i = 1; i < NR_LOCK_TESTING_THREADS; ++i)
147 memcpy(input[i].source_fds, input[0].source_fds,
148 sizeof(input[i].source_fds));
149
150 for (i = 0; i < NR_LOCK_TESTING_THREADS; ++i)
151 pthread_create(&pt[i], NULL, locking_test_thread, &input[i]);
152
153 for (i = 0; i < NR_LOCK_TESTING_THREADS; ++i)
154 pthread_join(pt[i], NULL);
155 for (i = 0; i < NR_LOCK_TESTING_THREADS; ++i)
156 kvm_vm_free(input[i].vm);
157 }
158
test_sev_migrate_parameters(void)159 static void test_sev_migrate_parameters(void)
160 {
161 struct kvm_vm *sev_vm, *sev_es_vm, *vm_no_vcpu, *vm_no_sev,
162 *sev_es_vm_no_vmsa;
163 int ret;
164
165 sev_vm = sev_vm_create(/* es= */ false);
166 sev_es_vm = sev_vm_create(/* es= */ true);
167 vm_no_vcpu = vm_create(VM_MODE_DEFAULT, 0, O_RDWR);
168 vm_no_sev = aux_vm_create(true);
169 sev_es_vm_no_vmsa = vm_create(VM_MODE_DEFAULT, 0, O_RDWR);
170 sev_ioctl(sev_es_vm_no_vmsa->fd, KVM_SEV_ES_INIT, NULL);
171 vm_vcpu_add(sev_es_vm_no_vmsa, 1);
172
173 ret = __sev_migrate_from(sev_vm->fd, sev_es_vm->fd);
174 TEST_ASSERT(
175 ret == -1 && errno == EINVAL,
176 "Should not be able migrate to SEV enabled VM. ret: %d, errno: %d\n",
177 ret, errno);
178
179 ret = __sev_migrate_from(sev_es_vm->fd, sev_vm->fd);
180 TEST_ASSERT(
181 ret == -1 && errno == EINVAL,
182 "Should not be able migrate to SEV-ES enabled VM. ret: %d, errno: %d\n",
183 ret, errno);
184
185 ret = __sev_migrate_from(vm_no_vcpu->fd, sev_es_vm->fd);
186 TEST_ASSERT(
187 ret == -1 && errno == EINVAL,
188 "SEV-ES migrations require same number of vCPUS. ret: %d, errno: %d\n",
189 ret, errno);
190
191 ret = __sev_migrate_from(vm_no_vcpu->fd, sev_es_vm_no_vmsa->fd);
192 TEST_ASSERT(
193 ret == -1 && errno == EINVAL,
194 "SEV-ES migrations require UPDATE_VMSA. ret %d, errno: %d\n",
195 ret, errno);
196
197 ret = __sev_migrate_from(vm_no_vcpu->fd, vm_no_sev->fd);
198 TEST_ASSERT(ret == -1 && errno == EINVAL,
199 "Migrations require SEV enabled. ret %d, errno: %d\n", ret,
200 errno);
201
202 kvm_vm_free(sev_vm);
203 kvm_vm_free(sev_es_vm);
204 kvm_vm_free(sev_es_vm_no_vmsa);
205 kvm_vm_free(vm_no_vcpu);
206 kvm_vm_free(vm_no_sev);
207 }
208
__sev_mirror_create(int dst_fd,int src_fd)209 static int __sev_mirror_create(int dst_fd, int src_fd)
210 {
211 struct kvm_enable_cap cap = {
212 .cap = KVM_CAP_VM_COPY_ENC_CONTEXT_FROM,
213 .args = { src_fd }
214 };
215
216 return ioctl(dst_fd, KVM_ENABLE_CAP, &cap);
217 }
218
219
sev_mirror_create(int dst_fd,int src_fd)220 static void sev_mirror_create(int dst_fd, int src_fd)
221 {
222 int ret;
223
224 ret = __sev_mirror_create(dst_fd, src_fd);
225 TEST_ASSERT(!ret, "Copying context failed, ret: %d, errno: %d\n", ret, errno);
226 }
227
test_sev_mirror(bool es)228 static void test_sev_mirror(bool es)
229 {
230 struct kvm_vm *src_vm, *dst_vm;
231 struct kvm_sev_launch_start start = {
232 .policy = es ? SEV_POLICY_ES : 0
233 };
234 int i;
235
236 src_vm = sev_vm_create(es);
237 dst_vm = aux_vm_create(false);
238
239 sev_mirror_create(dst_vm->fd, src_vm->fd);
240
241 /* Check that we can complete creation of the mirror VM. */
242 for (i = 0; i < NR_MIGRATE_TEST_VCPUS; ++i)
243 vm_vcpu_add(dst_vm, i);
244 sev_ioctl(dst_vm->fd, KVM_SEV_LAUNCH_START, &start);
245 if (es)
246 sev_ioctl(dst_vm->fd, KVM_SEV_LAUNCH_UPDATE_VMSA, NULL);
247
248 kvm_vm_free(src_vm);
249 kvm_vm_free(dst_vm);
250 }
251
test_sev_mirror_parameters(void)252 static void test_sev_mirror_parameters(void)
253 {
254 struct kvm_vm *sev_vm, *sev_es_vm, *vm_no_vcpu, *vm_with_vcpu;
255 int ret;
256
257 sev_vm = sev_vm_create(/* es= */ false);
258 sev_es_vm = sev_vm_create(/* es= */ true);
259 vm_with_vcpu = aux_vm_create(true);
260 vm_no_vcpu = aux_vm_create(false);
261
262 ret = __sev_mirror_create(sev_vm->fd, sev_vm->fd);
263 TEST_ASSERT(
264 ret == -1 && errno == EINVAL,
265 "Should not be able copy context to self. ret: %d, errno: %d\n",
266 ret, errno);
267
268 ret = __sev_mirror_create(sev_vm->fd, sev_es_vm->fd);
269 TEST_ASSERT(
270 ret == -1 && errno == EINVAL,
271 "Should not be able copy context to SEV enabled VM. ret: %d, errno: %d\n",
272 ret, errno);
273
274 ret = __sev_mirror_create(sev_es_vm->fd, sev_vm->fd);
275 TEST_ASSERT(
276 ret == -1 && errno == EINVAL,
277 "Should not be able copy context to SEV-ES enabled VM. ret: %d, errno: %d\n",
278 ret, errno);
279
280 ret = __sev_mirror_create(vm_no_vcpu->fd, vm_with_vcpu->fd);
281 TEST_ASSERT(ret == -1 && errno == EINVAL,
282 "Copy context requires SEV enabled. ret %d, errno: %d\n", ret,
283 errno);
284
285 ret = __sev_mirror_create(vm_with_vcpu->fd, sev_vm->fd);
286 TEST_ASSERT(
287 ret == -1 && errno == EINVAL,
288 "SEV copy context requires no vCPUS on the destination. ret: %d, errno: %d\n",
289 ret, errno);
290
291 kvm_vm_free(sev_vm);
292 kvm_vm_free(sev_es_vm);
293 kvm_vm_free(vm_with_vcpu);
294 kvm_vm_free(vm_no_vcpu);
295 }
296
test_sev_move_copy(void)297 static void test_sev_move_copy(void)
298 {
299 struct kvm_vm *dst_vm, *sev_vm, *mirror_vm, *dst_mirror_vm;
300 int ret;
301
302 sev_vm = sev_vm_create(/* es= */ false);
303 dst_vm = aux_vm_create(true);
304 mirror_vm = aux_vm_create(false);
305 dst_mirror_vm = aux_vm_create(false);
306
307 sev_mirror_create(mirror_vm->fd, sev_vm->fd);
308 ret = __sev_migrate_from(dst_vm->fd, sev_vm->fd);
309 TEST_ASSERT(ret == -1 && errno == EBUSY,
310 "Cannot migrate VM that has mirrors. ret %d, errno: %d\n", ret,
311 errno);
312
313 /* The mirror itself can be migrated. */
314 sev_migrate_from(dst_mirror_vm->fd, mirror_vm->fd);
315 ret = __sev_migrate_from(dst_vm->fd, sev_vm->fd);
316 TEST_ASSERT(ret == -1 && errno == EBUSY,
317 "Cannot migrate VM that has mirrors. ret %d, errno: %d\n", ret,
318 errno);
319
320 /*
321 * mirror_vm is not a mirror anymore, dst_mirror_vm is. Thus,
322 * the owner can be copied as soon as dst_mirror_vm is gone.
323 */
324 kvm_vm_free(dst_mirror_vm);
325 sev_migrate_from(dst_vm->fd, sev_vm->fd);
326
327 kvm_vm_free(mirror_vm);
328 kvm_vm_free(dst_vm);
329 kvm_vm_free(sev_vm);
330 }
331
main(int argc,char * argv[])332 int main(int argc, char *argv[])
333 {
334 if (kvm_check_cap(KVM_CAP_VM_MOVE_ENC_CONTEXT_FROM)) {
335 test_sev_migrate_from(/* es= */ false);
336 test_sev_migrate_from(/* es= */ true);
337 test_sev_migrate_locking();
338 test_sev_migrate_parameters();
339 if (kvm_check_cap(KVM_CAP_VM_COPY_ENC_CONTEXT_FROM))
340 test_sev_move_copy();
341 }
342 if (kvm_check_cap(KVM_CAP_VM_COPY_ENC_CONTEXT_FROM)) {
343 test_sev_mirror(/* es= */ false);
344 test_sev_mirror(/* es= */ true);
345 test_sev_mirror_parameters();
346 }
347 return 0;
348 }
349