1 #include <console.h>
2 #include <unistd.h>
3 #include <errno.h>
4 #include <string.h>
5 #include <inttypes.h>
6 #include <stdlib.h>
7 #include <stdbool.h>
8 #include <mini-os/byteorder.h>
9
10 #include "vtpm_manager.h"
11 #include "log.h"
12 #include "uuid.h"
13
14 #include "vtpmmgr.h"
15 #include "vtpm_disk.h"
16 #include "disk_tpm.h"
17 #include "disk_io.h"
18 #include "disk_crypto.h"
19 #include "disk_format.h"
20
disk_read_crypt_sector(void * data,size_t size,sector_t block,const struct mem_tpm_mgr * mgr)21 static int disk_read_crypt_sector(void *data, size_t size, sector_t block, const struct mem_tpm_mgr *mgr)
22 {
23 struct disk_crypt_sector_plain *sector = disk_read_sector(block);
24 if (!sector)
25 return 2;
26
27 if (aes_cmac_verify(§or->mac, sector->data, sizeof(sector->data), &mgr->tm_key_e))
28 return 2;
29
30 aes_decrypt_ctr(data, size, sector->iv_data, sizeof(sector->iv_data), &mgr->tm_key_e);
31 return 0;
32 }
33
group_free(struct mem_group * group)34 static void group_free(struct mem_group *group)
35 {
36 int i, j;
37 if (!group)
38 return;
39 if (group->data) {
40 for (i = 0; i < group->nr_pages; i++) {
41 for (j = 0; j < group->data[i].size; j++) {
42 free(group->data[i].vtpms[j]);
43 }
44 }
45 free(group->data);
46 }
47 free(group->seals);
48 free(group);
49 }
50
mgr_free(struct mem_tpm_mgr * mgr)51 static void mgr_free(struct mem_tpm_mgr *mgr)
52 {
53 int i;
54 if (!mgr)
55 return;
56 if (mgr->groups) {
57 for(i=0; i < mgr->nr_groups; i++)
58 group_free(mgr->groups[i].v);
59 free(mgr->groups);
60 }
61 free(mgr);
62 }
63
64 /* Open the group keys from one of the sealed strutures */
find_group_key(struct mem_group * dst,const struct disk_group_sector * group,const struct mem_tpm_mgr * parent)65 static int find_group_key(struct mem_group *dst,
66 const struct disk_group_sector *group,
67 const struct mem_tpm_mgr *parent)
68 {
69 int i, rc, rv = 1;
70 unsigned int olen;
71 struct hash160 buf;
72 struct disk_group_sealed_data sealed;
73
74 dst->nr_seals = be32_native(group->v.boot_configs.nr_cfgs);
75 if (dst->nr_seals > NR_SEALS_PER_GROUP)
76 return 3; // TODO support spill to extra pages
77
78 dst->seals = calloc(dst->nr_seals, sizeof(dst->seals[0]));
79 if (!dst->seals) {
80 vtpmlogerror(VTPM_LOG_VTPM, "find_group_key alloc %x\n", dst->nr_seals);
81 return 2;
82 }
83
84 for(i=0; i < dst->nr_seals; i++) {
85 const struct disk_seal_entry *cfg = &group->v.boot_configs.entry[i];
86 dst->seals[i].pcr_selection = cfg->pcr_selection;
87 memcpy(&dst->seals[i].digest_release, &cfg->digest_release, 20);
88
89 TPM_pcr_digest(&buf, cfg->pcr_selection);
90 if (memcmp(&buf, &cfg->digest_release, 20))
91 continue;
92
93 /*TPM 2.0 unbind | TPM 1.x unseal*/
94 if (hw_is_tpm2())
95 rc = TPM2_disk_unbind(&sealed, &olen, cfg);
96 else
97 rc = TPM_disk_unseal(&sealed, sizeof(sealed), cfg);
98
99 if (rc)
100 continue;
101 if (memcmp(&sealed.magic, DISK_GROUP_BOUND_MAGIC, 4))
102 continue;
103 if (memcmp(sealed.tpm_manager_uuid, parent->uuid, 16))
104 continue;
105
106 memcpy(&dst->rollback_mac_key, &sealed.rollback_mac_key, 16);
107 memcpy(&dst->group_key, &sealed.group_key, 16);
108 memcpy(&dst->aik_authdata, &sealed.aik_authdata, 20);
109 rv = 0;
110 }
111
112 // cache the list to allow writes without touching the TPM
113 memcpy(&dst->seal_bits, &group->v.boot_configs, sizeof(dst->seal_bits));
114 dst->flags |= MEM_GROUP_FLAG_SEAL_VALID;
115
116 return rv;
117 }
118
parse_root_key(struct mem_tpm_mgr * dst,struct disk_seal_entry * src)119 static int parse_root_key(struct mem_tpm_mgr *dst, struct disk_seal_entry *src)
120 {
121 int rc;
122 unsigned int olen;
123 struct disk_root_sealed_data sealed;
124
125 /*TPM 2.0 unbind | TPM 1.x unseal*/
126 if (hw_is_tpm2())
127 rc = TPM2_disk_unbind(&sealed, &olen, src);
128 else
129 rc = TPM_disk_unseal(&sealed, sizeof(sealed), src);
130
131 if (rc)
132 return rc;
133
134 if (memcmp(&sealed.magic, DISK_ROOT_BOUND_MAGIC, 4))
135 return 1;
136
137 rc = TPM_disk_nvread(&dst->nv_key, 16, sealed.nvram_slot, sealed.nvram_auth);
138 if (rc)
139 return rc;
140
141 // TODO when an NV slot in the physical TPM is used to populate nv_key,
142 // that value should be used to mask the master key so that the value
143 // can be changed to revoke old disk state
144 #if 0
145 aes_decrypt_one(&dst->tm_key, &sealed.tm_key, &dst->nv_key);
146 #else
147 memcpy(&dst->tm_key, &sealed.tm_key, 16);
148 #endif
149
150 memcpy(dst->uuid, sealed.tpm_manager_uuid, 16);
151 dst->nvram_slot = sealed.nvram_slot;
152 memcpy(&dst->nvram_auth, &sealed.nvram_auth, sizeof(struct tpm_authdata));
153 dst->counter_index = sealed.counter_index;
154 memcpy(&dst->counter_auth, &sealed.counter_auth, sizeof(struct tpm_authdata));
155
156 return 0;
157 }
158
find_root_key(int active_root)159 static struct mem_tpm_mgr *find_root_key(int active_root)
160 {
161 sector_t seal_list = native_be32(active_root);
162 struct disk_seal_list *seal = disk_read_sector(seal_list);
163 struct hash160 buf;
164 int i, rc, nr;
165 struct mem_tpm_mgr *dst;
166
167 if (memcmp(seal->hdr.magic, TPM_MGR_MAGIC, 12))
168 return NULL;
169
170 if (be32_native(seal->hdr.version) != TPM_MGR_VERSION)
171 return NULL;
172
173 dst = calloc(1, sizeof(*dst));
174 dst->active_root = active_root;
175
176 for (nr = 0; nr < 100; nr++) {
177 disk_set_used(seal_list, dst);
178 uint32_t nr_seals = be32_native(seal->length);
179 if (nr_seals > SEALS_PER_ROOT_SEAL_LIST)
180 break;
181 for (i = 0; i < nr_seals; i++) {
182 struct disk_seal_entry *src = &seal->entry[i];
183
184 TPM_pcr_digest(&buf, src->pcr_selection);
185 if (memcmp(&buf, &src->digest_release, 20))
186 continue;
187
188 rc = parse_root_key(dst, src);
189 if (rc)
190 continue;
191 return dst;
192 }
193 seal_list = seal->next;
194 if (seal_list.value == 0)
195 break;
196 seal = disk_read_sector(seal_list);
197 }
198 mgr_free(dst);
199 return NULL;
200 }
201
202 /* Load and verify one sector's worth of vTPMs. This loads all the vTPM entries
203 * and decrypts their state data into memory.
204 */
load_verify_vtpm_page(struct mem_vtpm_page * dst,int base,const struct mem_tpm_mgr * mgr,const aes_context * group_key)205 static int load_verify_vtpm_page(struct mem_vtpm_page *dst, int base,
206 const struct mem_tpm_mgr *mgr, const aes_context *group_key)
207 {
208 struct disk_vtpm_sector pt;
209 int i, rc;
210
211 disk_set_used(dst->disk_loc, mgr);
212
213 rc = disk_read_crypt_sector(&pt, sizeof(pt), dst->disk_loc, mgr);
214 if (rc) {
215 printk("Malformed sector %d\n", be32_native(dst->disk_loc));
216 return rc;
217 }
218
219 rc = sha256_verify(&dst->disk_hash, &pt, sizeof(pt));
220 if (rc) {
221 printk("Hash mismatch in sector %d\n", be32_native(dst->disk_loc));
222 return rc;
223 }
224
225 if (!group_key)
226 return 0;
227
228 aes_decrypt_ctr(pt.data, sizeof(pt.data), &pt.iv, sizeof(pt.data) + 16, group_key);
229
230 for (i = 0; i < dst->size; i++) {
231 struct mem_vtpm *vtpm = calloc(1, sizeof(*vtpm));
232 dst->vtpms[i] = vtpm;
233 memcpy(vtpm->uuid, pt.header[i].uuid, 16);
234 memcpy(vtpm->data, pt.data[i].data, 64);
235 vtpm->flags = be32_native(pt.header[i].flags);
236 vtpm->index_in_parent = i + base;
237 }
238 return 0;
239 }
240
load_verify_vtpm_pages(struct mem_group * group,int base,int size,const struct hash256 * hash,const sector_t * loc,const struct mem_tpm_mgr * mgr,const aes_context * group_key)241 static int load_verify_vtpm_pages(struct mem_group *group, int base, int size,
242 const struct hash256 *hash, const sector_t *loc,
243 const struct mem_tpm_mgr *mgr, const aes_context *group_key)
244 {
245 int i, rc;
246 struct mem_vtpm_page *page = group->data + base;
247
248 /* base was in terms of sectors; convert to vtpms */
249 base *= VTPMS_PER_SECTOR;
250
251 for (i = 0; i < size; i++) {
252 page->disk_hash = hash[i];
253 page->disk_loc = loc[i];
254 if (group->nr_vtpms - base > VTPMS_PER_SECTOR)
255 page->size = VTPMS_PER_SECTOR;
256 else
257 page->size = group->nr_vtpms - base;
258 rc = load_verify_vtpm_page(page, base, mgr, group_key);
259 if (rc)
260 return rc;
261 base += VTPMS_PER_SECTOR;
262 }
263
264 return 0;
265 }
266
267 static int load_verify_vtpm_itree(struct mem_group_hdr *hdr, int base, int nr_entries,
268 const struct hash256 *hash, const sector_t *loc, int hsize,
269 const struct mem_tpm_mgr *mgr, const aes_context *group_key);
270
load_verify_vtpm_itree(struct mem_group_hdr * hdr,int base,int nr_entries,const struct hash256 * hash,const sector_t * loc,int hsize,const struct mem_tpm_mgr * mgr,const aes_context * group_key)271 static int load_verify_vtpm_itree(struct mem_group_hdr *hdr, int base, int nr_entries,
272 const struct hash256 *hash, const sector_t *loc, int hsize,
273 const struct mem_tpm_mgr *mgr, const aes_context *group_key)
274 {
275 int i, rc, incr = 1, inuse_base = hdr->disk_nr_inuse, lsize;
276
277 // increase tree depth until all entries fit
278 while (nr_entries > incr * hsize)
279 incr *= NR_ENTRIES_PER_ITREE;
280
281 // save the list of used sectors (itree and vtpm) in the header
282 lsize = 1 + (nr_entries - 1) / incr;
283 hdr->disk_nr_inuse += lsize;
284 hdr->disk_inuse = realloc(hdr->disk_inuse, hdr->disk_nr_inuse * sizeof(sector_t));
285 memcpy(&hdr->disk_inuse[inuse_base], loc, lsize * sizeof(sector_t));
286
287 // if the entries already fit, process vtpm pages
288 if (nr_entries <= hsize)
289 return load_verify_vtpm_pages(hdr->v, base, nr_entries, hash, loc, mgr, group_key);
290
291 for (i = 0; i * incr < nr_entries; i++) {
292 struct disk_itree_sector pt;
293 int child_entries = incr;
294
295 // the last sector is not completely full
296 if (nr_entries - i * incr < incr)
297 child_entries = nr_entries - i * incr;
298
299 disk_set_used(loc[i], mgr);
300 hdr->disk_inuse[inuse_base++] = loc[i];
301
302 rc = disk_read_crypt_sector(&pt, sizeof(pt), loc[i], mgr);
303 if (rc) {
304 printk("Malformed sector %d\n", be32_native(loc[i]));
305 return rc;
306 }
307
308 rc = sha256_verify(&hash[i], pt.hash, sizeof(pt.hash));
309 if (rc) {
310 printk("Hash mismatch in sector %d\n", be32_native(loc[i]));
311 return rc;
312 }
313
314 rc = load_verify_vtpm_itree(hdr, base, child_entries, pt.hash, pt.location,
315 NR_ENTRIES_PER_ITREE, mgr, group_key);
316 if (rc)
317 return rc;
318
319 base += incr;
320 }
321
322 return 0;
323 }
324
325 /* Load and verify one group's data structure, including its vTPMs.
326 */
load_verify_group(struct mem_group_hdr * dst,const struct mem_tpm_mgr * mgr)327 static int load_verify_group(struct mem_group_hdr *dst, const struct mem_tpm_mgr *mgr)
328 {
329 struct mem_group *group;
330 struct disk_group_sector disk;
331 int rc;
332 aes_context key_e;
333 aes_context *opened_key = NULL;
334
335 disk_set_used(dst->disk_loc, mgr);
336
337 rc = disk_read_crypt_sector(&disk, sizeof(disk), dst->disk_loc, mgr);
338 if (rc) {
339 printk("Malformed sector %d\n", be32_native(dst->disk_loc));
340 return rc;
341 }
342
343 rc = sha256_verify(&dst->disk_hash, &disk.v, sizeof(disk.v) + sizeof(disk.group_mac));
344 if (rc) {
345 printk("Hash mismatch in sector %d\n", be32_native(dst->disk_loc));
346 return rc;
347 }
348
349 dst->v = group = calloc(1, sizeof(*group));
350
351 rc = find_group_key(group, &disk, mgr);
352 if (rc == 0) {
353 opened_key = &key_e;
354 /* Verify the group with the group's own key */
355 aes_setup(opened_key, &group->group_key);
356 if (aes_cmac_verify(&disk.group_mac, &disk.v, sizeof(disk.v), opened_key)) {
357 printk("Group CMAC failed\n");
358 return 2;
359 }
360
361 memcpy(&group->id_data, &disk.v.id_data, sizeof(group->id_data));
362 memcpy(&group->details, &disk.v.details, sizeof(group->details));
363 } else if (rc == 1) {
364 // still need to walk the vtpm list
365 rc = 0;
366 } else {
367 printk("Group key unsealing failed\n");
368 return rc;
369 }
370
371 group->nr_vtpms = be32_native(disk.v.nr_vtpms);
372 group->nr_pages = (group->nr_vtpms + VTPMS_PER_SECTOR - 1) / VTPMS_PER_SECTOR;
373
374 group->data = calloc(group->nr_pages, sizeof(group->data[0]));
375
376 rc = load_verify_vtpm_itree(dst, 0, group->nr_pages, disk.v.vtpm_hash,
377 disk.vtpm_location, NR_ENTRIES_PER_GROUP_BASE, mgr, opened_key);
378
379 if (!opened_key) {
380 /* remove the struct */
381 free(group->data);
382 free(group->seals);
383 free(group);
384 dst->v = NULL;
385 }
386
387 return rc;
388 }
389
load_root_pre(struct disk_root_sector * root,struct mem_tpm_mgr * dst)390 static int load_root_pre(struct disk_root_sector *root, struct mem_tpm_mgr *dst)
391 {
392 int rc;
393
394 aes_setup(&dst->tm_key_e, &dst->tm_key);
395
396 rc = disk_read_crypt_sector(root, sizeof(*root), root_loc(dst), dst);
397
398 if (rc) {
399 vtpmloginfo(VTPM_LOG_VTPM, "root cmac verify failed in slot %d\n", dst->active_root);
400 return 2;
401 }
402
403 dst->root_seals_valid = 1 + dst->active_root;
404 dst->sequence = be64_native(root->v.sequence);
405
406 return 0;
407 }
408
409 static int load_verify_group_itree(struct mem_tpm_mgr *dst, int base, int nr_entries,
410 const struct hash256 *hash, const sector_t *loc, int hsize);
411
load_verify_group_itree(struct mem_tpm_mgr * dst,int base,int nr_entries,const struct hash256 * hash,const sector_t * loc,int hsize)412 static int load_verify_group_itree(struct mem_tpm_mgr *dst, int base, int nr_entries,
413 const struct hash256 *hash, const sector_t *loc, int hsize)
414 {
415 int i, rc, incr = 1;
416
417 if (nr_entries <= hsize) {
418 for(i=0; i < nr_entries; i++) {
419 struct mem_group_hdr *group = dst->groups + base + i;
420 group->disk_loc = loc[i];
421 memcpy(&group->disk_hash, &hash[i], sizeof(group->disk_hash));
422 rc = load_verify_group(group, dst);
423 if (rc) {
424 printk("Error loading group %d\n", base + i);
425 return rc;
426 }
427 }
428 return 0;
429 }
430
431 // increase tree depth until all entries fit
432 while (nr_entries > incr * hsize)
433 incr *= NR_ENTRIES_PER_ITREE;
434
435 for (i = 0; i * incr < nr_entries; i++) {
436 struct disk_itree_sector pt;
437 int child_entries = incr;
438
439 // the last sector is not completely full
440 if (nr_entries - i * incr < incr)
441 child_entries = nr_entries - i * incr;
442
443 disk_set_used(loc[i], dst);
444
445 rc = disk_read_crypt_sector(&pt, sizeof(pt), loc[i], dst);
446 if (rc) {
447 printk("Malformed sector %d\n", be32_native(loc[i]));
448 return rc;
449 }
450
451 rc = sha256_verify(&hash[i], pt.hash, sizeof(pt.hash));
452 if (rc) {
453 printk("Hash mismatch in sector %d\n", be32_native(loc[i]));
454 return rc;
455 }
456
457 rc = load_verify_group_itree(dst, base, child_entries, pt.hash, pt.location, NR_ENTRIES_PER_ITREE);
458 if (rc)
459 return rc;
460
461 base += incr;
462 }
463
464 return 0;
465 }
466
load_root_post(struct mem_tpm_mgr * dst,const struct disk_root_sector * root)467 static int load_root_post(struct mem_tpm_mgr *dst, const struct disk_root_sector *root)
468 {
469 int rc, i, j;
470 uint32_t nr_disk_rbs = be32_native(root->nr_rb_macs);
471
472 rc = TPM_disk_check_counter(dst->counter_index, dst->counter_auth,
473 root->v.tpm_counter_value);
474 if (rc)
475 return 2;
476 dst->counter_value = root->v.tpm_counter_value;
477
478 dst->nr_groups = be32_native(root->v.nr_groups);
479 dst->groups = calloc(sizeof(dst->groups[0]), dst->nr_groups);
480
481 if (!dst->groups) {
482 vtpmlogerror(VTPM_LOG_VTPM, "load_root_post alloc %x\n", dst->nr_groups);
483 return 2;
484 }
485
486 rc = load_verify_group_itree(dst, 0, dst->nr_groups,
487 root->v.group_hash, root->group_loc, NR_ENTRIES_PER_ROOT);
488 if (rc)
489 return rc;
490
491 /* Sanity check: group0 must be open */
492 if (!dst->groups[0].v) {
493 printk("Error opening group 0\n");
494 return 2;
495 }
496
497 /* TODO support for spilling rollback list */
498 if (nr_disk_rbs > NR_RB_MACS_PER_ROOT)
499 return 3;
500
501 i = 0;
502 j = 0;
503 while (i < dst->nr_groups) {
504 aes_context key_e;
505 struct mem_group_hdr *group = &dst->groups[i];
506 struct mem_group *groupv = group->v;
507 const struct disk_rb_mac_entry *ent = &root->rb_macs[j];
508
509 if (!groupv) {
510 i++;
511 // this group is not open - no need to verify now
512 continue;
513 }
514
515 if (be32_native(ent->id) < i) {
516 // this entry is for a group that is not open
517 j++;
518 continue;
519 }
520
521 if (j >= nr_disk_rbs || be32_native(ent->id) != i) {
522 // TODO allow delegation
523 if (!(groupv->details.flags.value & FLAG_ROLLBACK_DETECTED)) {
524 groupv->details.flags.value |= FLAG_ROLLBACK_DETECTED;
525 group->disk_loc.value = 0;
526 }
527 i++;
528 continue;
529 }
530
531 aes_setup(&key_e, &groupv->rollback_mac_key);
532 if (aes_cmac_verify(&ent->mac, &root->v, sizeof(root->v), &key_e)) {
533 if (!(groupv->details.flags.value & FLAG_ROLLBACK_DETECTED)) {
534 groupv->details.flags.value |= FLAG_ROLLBACK_DETECTED;
535 group->disk_loc.value = 0;
536 }
537 }
538 i++; j++;
539 }
540
541 return 0;
542 }
543
vtpm_load_disk(void)544 int vtpm_load_disk(void)
545 {
546 struct disk_root_sector root1, root2;
547 int rc = 0;
548 TPM_read_pcrs();
549
550 printk("TPM Manager - disk format %d\n", TPM_MGR_VERSION);
551 printk(" root seal: %zu; sector of %d: %zu\n",
552 sizeof(struct disk_root_sealed_data), SEALS_PER_ROOT_SEAL_LIST, sizeof(struct disk_seal_list));
553 printk(" root: %zu v=%zu\n", sizeof(root1), sizeof(root1.v));
554 printk(" itree: %u; sector of %d: %zu\n",
555 4 + 32, NR_ENTRIES_PER_ITREE, sizeof(struct disk_itree_sector));
556 printk(" group: %zu v=%zu id=%zu md=%zu\n",
557 sizeof(struct disk_group_sector), sizeof(struct disk_group_sector_mac3_area),
558 sizeof(struct group_id_data), sizeof(struct group_details));
559 printk(" group seal: %zu; %d in parent: %zu; sector of %d: %zu\n",
560 sizeof(struct disk_group_sealed_data), NR_SEALS_PER_GROUP, sizeof(struct disk_group_boot_config_list),
561 SEALS_PER_GROUP_SEAL_LIST, sizeof(struct disk_group_seal_list));
562 printk(" vtpm: %zu+%zu; sector of %d: %zu\n",
563 sizeof(struct disk_vtpm_plain), sizeof(struct disk_vtpm_secret),
564 VTPMS_PER_SECTOR, sizeof(struct disk_vtpm_sector));
565
566 struct mem_tpm_mgr *mgr1 = find_root_key(0);
567 struct mem_tpm_mgr *mgr2 = find_root_key(1);
568
569 rc = mgr1 ? load_root_pre(&root1, mgr1) : 0;
570 if (rc) {
571 mgr_free(mgr1);
572 mgr1 = NULL;
573 }
574
575 rc = mgr2 ? load_root_pre(&root2, mgr2) : 0;
576 if (rc) {
577 mgr_free(mgr2);
578 mgr2 = NULL;
579 }
580
581 printk("load_root_pre: %c/%c\n", mgr1 ? 'y' : 'n', mgr2 ? 'y' : 'n');
582
583 if (!mgr1 && !mgr2)
584 return 2;
585
586 if (mgr1 && mgr2 && mgr2->sequence > mgr1->sequence) {
587 rc = load_root_post(mgr2, &root2);
588 if (rc) {
589 mgr_free(mgr2);
590 mgr2 = NULL;
591 } else {
592 mgr_free(mgr1);
593 g_mgr = mgr2;
594 return 0;
595 }
596 }
597 if (mgr1) {
598 rc = load_root_post(mgr1, &root1);
599 if (rc) {
600 mgr_free(mgr1);
601 } else {
602 mgr_free(mgr2);
603 g_mgr = mgr1;
604 return 0;
605 }
606 }
607 if (mgr2) {
608 rc = load_root_post(mgr2, &root2);
609 if (rc) {
610 mgr_free(mgr2);
611 } else {
612 g_mgr = mgr2;
613 return 0;
614 }
615 }
616 printk("Could not read vTPM disk\n");
617
618 return 2;
619 }
620