1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3 * Copyright (c) 2019-2021, Linaro Limited
4 */
5
6 #include <config.h>
7 #include <crypto/crypto.h>
8 #include <crypto/internal_aes-gcm.h>
9 #include <initcall.h>
10 #include <kernel/boot.h>
11 #include <kernel/panic.h>
12 #include <mm/core_memprot.h>
13 #include <mm/core_mmu.h>
14 #include <mm/fobj.h>
15 #include <mm/tee_mm.h>
16 #include <stdlib.h>
17 #include <string.h>
18 #include <tee_api_types.h>
19 #include <types_ext.h>
20 #include <util.h>
21
22 #ifdef CFG_WITH_PAGER
23
24 #define RWP_AE_KEY_BITS 256
25
26 struct rwp_aes_gcm_iv {
27 uint32_t iv[3];
28 };
29
30 #define RWP_AES_GCM_TAG_LEN 16
31
32 struct rwp_state {
33 uint64_t iv;
34 uint8_t tag[RWP_AES_GCM_TAG_LEN];
35 };
36
37 /*
38 * Note that this struct is padded to a size which is a power of 2, this
39 * guarantees that this state will not span two pages. This avoids a corner
40 * case in the pager when making the state available.
41 */
42 struct rwp_state_padded {
43 struct rwp_state state;
44 uint64_t pad;
45 };
46
47 struct fobj_rwp_unpaged_iv {
48 uint8_t *store;
49 struct rwp_state *state;
50 struct fobj fobj;
51 };
52
53 struct fobj_rwp_paged_iv {
54 size_t idx;
55 struct fobj fobj;
56 };
57
58 const struct fobj_ops ops_rwp_paged_iv;
59 const struct fobj_ops ops_rwp_unpaged_iv;
60
61 static struct internal_aes_gcm_key rwp_ae_key;
62
63 static struct rwp_state_padded *rwp_state_base;
64 static uint8_t *rwp_store_base;
65
fobj_init(struct fobj * fobj,const struct fobj_ops * ops,unsigned int num_pages)66 static void fobj_init(struct fobj *fobj, const struct fobj_ops *ops,
67 unsigned int num_pages)
68 {
69 fobj->ops = ops;
70 fobj->num_pages = num_pages;
71 refcount_set(&fobj->refc, 1);
72 TAILQ_INIT(&fobj->regions);
73 }
74
fobj_uninit(struct fobj * fobj)75 static void fobj_uninit(struct fobj *fobj)
76 {
77 assert(!refcount_val(&fobj->refc));
78 assert(TAILQ_EMPTY(&fobj->regions));
79 tee_pager_invalidate_fobj(fobj);
80 }
81
rwp_load_page(void * va,struct rwp_state * state,const uint8_t * src)82 static TEE_Result rwp_load_page(void *va, struct rwp_state *state,
83 const uint8_t *src)
84 {
85 struct rwp_aes_gcm_iv iv = {
86 .iv = { (vaddr_t)state, state->iv >> 32, state->iv }
87 };
88
89 if (!state->iv) {
90 /*
91 * IV still zero which means that this is previously unused
92 * page.
93 */
94 memset(va, 0, SMALL_PAGE_SIZE);
95 return TEE_SUCCESS;
96 }
97
98 return internal_aes_gcm_dec(&rwp_ae_key, &iv, sizeof(iv),
99 NULL, 0, src, SMALL_PAGE_SIZE, va,
100 state->tag, sizeof(state->tag));
101 }
102
rwp_save_page(const void * va,struct rwp_state * state,uint8_t * dst)103 static TEE_Result rwp_save_page(const void *va, struct rwp_state *state,
104 uint8_t *dst)
105 {
106 size_t tag_len = sizeof(state->tag);
107 struct rwp_aes_gcm_iv iv = { };
108
109 assert(state->iv + 1 > state->iv);
110
111 state->iv++;
112
113 /*
114 * IV is constructed as recommended in section "8.2.1 Deterministic
115 * Construction" of "Recommendation for Block Cipher Modes of
116 * Operation: Galois/Counter Mode (GCM) and GMAC",
117 * http://csrc.nist.gov/publications/nistpubs/800-38D/SP-800-38D.pdf
118 */
119 iv.iv[0] = (vaddr_t)state;
120 iv.iv[1] = state->iv >> 32;
121 iv.iv[2] = state->iv;
122
123 return internal_aes_gcm_enc(&rwp_ae_key, &iv, sizeof(iv),
124 NULL, 0, va, SMALL_PAGE_SIZE, dst,
125 state->tag, &tag_len);
126 }
127
idx_to_state_padded(size_t idx)128 static struct rwp_state_padded *idx_to_state_padded(size_t idx)
129 {
130 assert(rwp_state_base);
131 return rwp_state_base + idx;
132 }
133
idx_to_store(size_t idx)134 static uint8_t *idx_to_store(size_t idx)
135 {
136 assert(rwp_store_base);
137 return rwp_store_base + idx * SMALL_PAGE_SIZE;
138 }
139
rwp_paged_iv_alloc(unsigned int num_pages)140 static struct fobj *rwp_paged_iv_alloc(unsigned int num_pages)
141 {
142 struct fobj_rwp_paged_iv *rwp = NULL;
143 tee_mm_entry_t *mm = NULL;
144 size_t size = 0;
145
146 COMPILE_TIME_ASSERT(IS_POWER_OF_TWO(sizeof(struct rwp_state_padded)));
147
148 rwp = calloc(1, sizeof(*rwp));
149 if (!rwp)
150 return NULL;
151
152 if (MUL_OVERFLOW(num_pages, SMALL_PAGE_SIZE, &size))
153 goto err;
154 mm = tee_mm_alloc(&tee_mm_sec_ddr, size);
155 if (!mm)
156 goto err;
157 rwp->idx = (tee_mm_get_smem(mm) - tee_mm_sec_ddr.lo) / SMALL_PAGE_SIZE;
158
159 memset(idx_to_state_padded(rwp->idx), 0,
160 num_pages * sizeof(struct rwp_state_padded));
161
162 fobj_init(&rwp->fobj, &ops_rwp_paged_iv, num_pages);
163
164 return &rwp->fobj;
165 err:
166 tee_mm_free(mm);
167 free(rwp);
168
169 return NULL;
170 }
171
to_rwp_paged_iv(struct fobj * fobj)172 static struct fobj_rwp_paged_iv *to_rwp_paged_iv(struct fobj *fobj)
173 {
174 assert(fobj->ops == &ops_rwp_paged_iv);
175
176 return container_of(fobj, struct fobj_rwp_paged_iv, fobj);
177 }
178
rwp_paged_iv_load_page(struct fobj * fobj,unsigned int page_idx,void * va)179 static TEE_Result rwp_paged_iv_load_page(struct fobj *fobj,
180 unsigned int page_idx, void *va)
181 {
182 struct fobj_rwp_paged_iv *rwp = to_rwp_paged_iv(fobj);
183 uint8_t *src = idx_to_store(rwp->idx) + page_idx * SMALL_PAGE_SIZE;
184 struct rwp_state_padded *st = idx_to_state_padded(rwp->idx + page_idx);
185
186 assert(refcount_val(&fobj->refc));
187 assert(page_idx < fobj->num_pages);
188
189 return rwp_load_page(va, &st->state, src);
190 }
191 DECLARE_KEEP_PAGER(rwp_paged_iv_load_page);
192
rwp_paged_iv_save_page(struct fobj * fobj,unsigned int page_idx,const void * va)193 static TEE_Result rwp_paged_iv_save_page(struct fobj *fobj,
194 unsigned int page_idx, const void *va)
195 {
196 struct fobj_rwp_paged_iv *rwp = to_rwp_paged_iv(fobj);
197 uint8_t *dst = idx_to_store(rwp->idx) + page_idx * SMALL_PAGE_SIZE;
198 struct rwp_state_padded *st = idx_to_state_padded(rwp->idx + page_idx);
199
200 assert(page_idx < fobj->num_pages);
201
202 if (!refcount_val(&fobj->refc)) {
203 /*
204 * This fobj is being teared down, it just hasn't had the time
205 * to call tee_pager_invalidate_fobj() yet.
206 */
207 assert(TAILQ_EMPTY(&fobj->regions));
208 return TEE_SUCCESS;
209 }
210
211 return rwp_save_page(va, &st->state, dst);
212 }
213 DECLARE_KEEP_PAGER(rwp_paged_iv_save_page);
214
rwp_paged_iv_free(struct fobj * fobj)215 static void rwp_paged_iv_free(struct fobj *fobj)
216 {
217 struct fobj_rwp_paged_iv *rwp = to_rwp_paged_iv(fobj);
218 paddr_t pa = rwp->idx * SMALL_PAGE_SIZE + tee_mm_sec_ddr.lo;
219 tee_mm_entry_t *mm = tee_mm_find(&tee_mm_sec_ddr, pa);
220
221 assert(mm);
222
223 fobj_uninit(fobj);
224 tee_mm_free(mm);
225 free(rwp);
226 }
227
rwp_paged_iv_get_iv_vaddr(struct fobj * fobj,unsigned int page_idx)228 static vaddr_t rwp_paged_iv_get_iv_vaddr(struct fobj *fobj,
229 unsigned int page_idx)
230 {
231 struct fobj_rwp_paged_iv *rwp = to_rwp_paged_iv(fobj);
232 struct rwp_state_padded *st = idx_to_state_padded(rwp->idx + page_idx);
233
234 assert(page_idx < fobj->num_pages);
235 return (vaddr_t)&st->state & ~SMALL_PAGE_MASK;
236 }
237 DECLARE_KEEP_PAGER(rwp_paged_iv_get_iv_vaddr);
238
239 /*
240 * Note: this variable is weak just to ease breaking its dependency chain
241 * when added to the unpaged area.
242 */
243 const struct fobj_ops ops_rwp_paged_iv
244 __weak __rodata_unpaged("ops_rwp_paged_iv") = {
245 .free = rwp_paged_iv_free,
246 .load_page = rwp_paged_iv_load_page,
247 .save_page = rwp_paged_iv_save_page,
248 .get_iv_vaddr = rwp_paged_iv_get_iv_vaddr,
249 };
250
rwp_unpaged_iv_alloc(unsigned int num_pages)251 static struct fobj *rwp_unpaged_iv_alloc(unsigned int num_pages)
252 {
253 struct fobj_rwp_unpaged_iv *rwp = NULL;
254 tee_mm_entry_t *mm = NULL;
255 size_t size = 0;
256
257 rwp = calloc(1, sizeof(*rwp));
258 if (!rwp)
259 return NULL;
260
261 rwp->state = calloc(num_pages, sizeof(*rwp->state));
262 if (!rwp->state)
263 goto err_free_rwp;
264
265 if (MUL_OVERFLOW(num_pages, SMALL_PAGE_SIZE, &size))
266 goto err_free_state;
267 mm = tee_mm_alloc(&tee_mm_sec_ddr, size);
268 if (!mm)
269 goto err_free_state;
270 rwp->store = phys_to_virt(tee_mm_get_smem(mm), MEM_AREA_TA_RAM, size);
271 assert(rwp->store);
272
273 fobj_init(&rwp->fobj, &ops_rwp_unpaged_iv, num_pages);
274
275 return &rwp->fobj;
276
277 err_free_state:
278 free(rwp->state);
279 err_free_rwp:
280 free(rwp);
281 return NULL;
282 }
283
to_rwp_unpaged_iv(struct fobj * fobj)284 static struct fobj_rwp_unpaged_iv *to_rwp_unpaged_iv(struct fobj *fobj)
285 {
286 assert(fobj->ops == &ops_rwp_unpaged_iv);
287
288 return container_of(fobj, struct fobj_rwp_unpaged_iv, fobj);
289 }
290
rwp_unpaged_iv_load_page(struct fobj * fobj,unsigned int page_idx,void * va)291 static TEE_Result rwp_unpaged_iv_load_page(struct fobj *fobj,
292 unsigned int page_idx, void *va)
293 {
294 struct fobj_rwp_unpaged_iv *rwp = to_rwp_unpaged_iv(fobj);
295 uint8_t *src = rwp->store + page_idx * SMALL_PAGE_SIZE;
296
297 assert(refcount_val(&fobj->refc));
298 assert(page_idx < fobj->num_pages);
299
300 return rwp_load_page(va, rwp->state + page_idx, src);
301 }
302 DECLARE_KEEP_PAGER(rwp_unpaged_iv_load_page);
303
rwp_unpaged_iv_save_page(struct fobj * fobj,unsigned int page_idx,const void * va)304 static TEE_Result rwp_unpaged_iv_save_page(struct fobj *fobj,
305 unsigned int page_idx,
306 const void *va)
307 {
308 struct fobj_rwp_unpaged_iv *rwp = to_rwp_unpaged_iv(fobj);
309 uint8_t *dst = rwp->store + page_idx * SMALL_PAGE_SIZE;
310
311 assert(page_idx < fobj->num_pages);
312
313 if (!refcount_val(&fobj->refc)) {
314 /*
315 * This fobj is being teared down, it just hasn't had the time
316 * to call tee_pager_invalidate_fobj() yet.
317 */
318 assert(TAILQ_EMPTY(&fobj->regions));
319 return TEE_SUCCESS;
320 }
321
322 return rwp_save_page(va, rwp->state + page_idx, dst);
323 }
324 DECLARE_KEEP_PAGER(rwp_unpaged_iv_save_page);
325
rwp_unpaged_iv_free(struct fobj * fobj)326 static void rwp_unpaged_iv_free(struct fobj *fobj)
327 {
328 struct fobj_rwp_unpaged_iv *rwp = NULL;
329 tee_mm_entry_t *mm = NULL;
330
331 if (IS_ENABLED(CFG_CORE_PAGE_TAG_AND_IV))
332 panic();
333
334 rwp = to_rwp_unpaged_iv(fobj);
335 mm = tee_mm_find(&tee_mm_sec_ddr, virt_to_phys(rwp->store));
336
337 assert(mm);
338
339 fobj_uninit(fobj);
340 tee_mm_free(mm);
341 free(rwp->state);
342 free(rwp);
343 }
344
345 /*
346 * Note: this variable is weak just to ease breaking its dependency chain
347 * when added to the unpaged area.
348 */
349 const struct fobj_ops ops_rwp_unpaged_iv
350 __weak __rodata_unpaged("ops_rwp_unpaged_iv") = {
351 .free = rwp_unpaged_iv_free,
352 .load_page = rwp_unpaged_iv_load_page,
353 .save_page = rwp_unpaged_iv_save_page,
354 };
355
rwp_init(void)356 static TEE_Result rwp_init(void)
357 {
358 uint8_t key[RWP_AE_KEY_BITS / 8] = { 0 };
359 struct fobj *fobj = NULL;
360 size_t num_pool_pages = 0;
361 size_t num_fobj_pages = 0;
362
363 if (crypto_rng_read(key, sizeof(key)) != TEE_SUCCESS)
364 panic("failed to generate random");
365 if (crypto_aes_expand_enc_key(key, sizeof(key), rwp_ae_key.data,
366 sizeof(rwp_ae_key.data),
367 &rwp_ae_key.rounds))
368 panic("failed to expand key");
369
370 if (!IS_ENABLED(CFG_CORE_PAGE_TAG_AND_IV))
371 return TEE_SUCCESS;
372
373 assert(tee_mm_sec_ddr.size && !(tee_mm_sec_ddr.size & SMALL_PAGE_SIZE));
374
375 num_pool_pages = tee_mm_sec_ddr.size / SMALL_PAGE_SIZE;
376 num_fobj_pages = ROUNDUP(num_pool_pages * sizeof(*rwp_state_base),
377 SMALL_PAGE_SIZE) / SMALL_PAGE_SIZE;
378
379 /*
380 * Each page in the pool needs a struct rwp_state.
381 *
382 * This isn't entirely true, the pages not used by
383 * fobj_rw_paged_alloc() don't need any. A future optimization
384 * may try to avoid allocating for such pages.
385 */
386 fobj = rwp_unpaged_iv_alloc(num_fobj_pages);
387 if (!fobj)
388 panic();
389
390 rwp_state_base = (void *)tee_pager_init_iv_region(fobj);
391 assert(rwp_state_base);
392
393 rwp_store_base = phys_to_virt(tee_mm_sec_ddr.lo, MEM_AREA_TA_RAM,
394 tee_mm_sec_ddr.size);
395 assert(rwp_store_base);
396
397 return TEE_SUCCESS;
398 }
399 driver_init_late(rwp_init);
400
fobj_rw_paged_alloc(unsigned int num_pages)401 struct fobj *fobj_rw_paged_alloc(unsigned int num_pages)
402 {
403 assert(num_pages);
404
405 if (IS_ENABLED(CFG_CORE_PAGE_TAG_AND_IV))
406 return rwp_paged_iv_alloc(num_pages);
407 else
408 return rwp_unpaged_iv_alloc(num_pages);
409 }
410
411 struct fobj_rop {
412 uint8_t *hashes;
413 uint8_t *store;
414 struct fobj fobj;
415 };
416
417 const struct fobj_ops ops_ro_paged;
418
rop_init(struct fobj_rop * rop,const struct fobj_ops * ops,unsigned int num_pages,void * hashes,void * store)419 static void rop_init(struct fobj_rop *rop, const struct fobj_ops *ops,
420 unsigned int num_pages, void *hashes, void *store)
421 {
422 rop->hashes = hashes;
423 rop->store = store;
424 fobj_init(&rop->fobj, ops, num_pages);
425 }
426
fobj_ro_paged_alloc(unsigned int num_pages,void * hashes,void * store)427 struct fobj *fobj_ro_paged_alloc(unsigned int num_pages, void *hashes,
428 void *store)
429 {
430 struct fobj_rop *rop = NULL;
431
432 assert(num_pages && hashes && store);
433
434 rop = calloc(1, sizeof(*rop));
435 if (!rop)
436 return NULL;
437
438 rop_init(rop, &ops_ro_paged, num_pages, hashes, store);
439
440 return &rop->fobj;
441 }
442
to_rop(struct fobj * fobj)443 static struct fobj_rop *to_rop(struct fobj *fobj)
444 {
445 assert(fobj->ops == &ops_ro_paged);
446
447 return container_of(fobj, struct fobj_rop, fobj);
448 }
449
rop_uninit(struct fobj_rop * rop)450 static void rop_uninit(struct fobj_rop *rop)
451 {
452 fobj_uninit(&rop->fobj);
453 tee_mm_free(tee_mm_find(&tee_mm_sec_ddr, virt_to_phys(rop->store)));
454 free(rop->hashes);
455 }
456
rop_free(struct fobj * fobj)457 static void rop_free(struct fobj *fobj)
458 {
459 struct fobj_rop *rop = to_rop(fobj);
460
461 rop_uninit(rop);
462 free(rop);
463 }
464
rop_load_page_helper(struct fobj_rop * rop,unsigned int page_idx,void * va)465 static TEE_Result rop_load_page_helper(struct fobj_rop *rop,
466 unsigned int page_idx, void *va)
467 {
468 const uint8_t *hash = rop->hashes + page_idx * TEE_SHA256_HASH_SIZE;
469 const uint8_t *src = rop->store + page_idx * SMALL_PAGE_SIZE;
470
471 assert(refcount_val(&rop->fobj.refc));
472 assert(page_idx < rop->fobj.num_pages);
473 memcpy(va, src, SMALL_PAGE_SIZE);
474
475 return hash_sha256_check(hash, va, SMALL_PAGE_SIZE);
476 }
477
rop_load_page(struct fobj * fobj,unsigned int page_idx,void * va)478 static TEE_Result rop_load_page(struct fobj *fobj, unsigned int page_idx,
479 void *va)
480 {
481 return rop_load_page_helper(to_rop(fobj), page_idx, va);
482 }
483 DECLARE_KEEP_PAGER(rop_load_page);
484
rop_save_page(struct fobj * fobj __unused,unsigned int page_idx __unused,const void * va __unused)485 static TEE_Result rop_save_page(struct fobj *fobj __unused,
486 unsigned int page_idx __unused,
487 const void *va __unused)
488 {
489 return TEE_ERROR_GENERIC;
490 }
491 DECLARE_KEEP_PAGER(rop_save_page);
492
493 /*
494 * Note: this variable is weak just to ease breaking its dependency chain
495 * when added to the unpaged area.
496 */
497 const struct fobj_ops ops_ro_paged __weak __rodata_unpaged("ops_ro_paged") = {
498 .free = rop_free,
499 .load_page = rop_load_page,
500 .save_page = rop_save_page,
501 };
502
503 #ifdef CFG_CORE_ASLR
504 /*
505 * When using relocated pages the relocation information must be applied
506 * before the pages can be used. With read-only paging the content is only
507 * integrity protected so relocation cannot be applied on pages in the less
508 * secure "store" or the load_address selected by ASLR could be given away.
509 * This means that each time a page has been loaded and verified it has to
510 * have its relocation information applied before it can be used.
511 *
512 * Only the relative relocations are supported, this allows a rather compact
513 * represenation of the needed relocation information in this struct.
514 * r_offset is replaced with the offset into the page that need to be updated,
515 * this number can never be larger than SMALL_PAGE_SIZE so a uint16_t can be
516 * used to represent it.
517 *
518 * All relocations are converted and stored in @relocs. @page_reloc_idx is
519 * an array of length @rop.fobj.num_pages with an entry for each page. If
520 * @page_reloc_idx[page_idx] isn't UINT16_MAX it's an index into @relocs.
521 */
522 struct fobj_ro_reloc_paged {
523 uint16_t *page_reloc_idx;
524 uint16_t *relocs;
525 unsigned int num_relocs;
526 struct fobj_rop rop;
527 };
528
529 const struct fobj_ops ops_ro_reloc_paged;
530
get_num_rels(unsigned int num_pages,unsigned int reloc_offs,const uint32_t * reloc,unsigned int num_relocs)531 static unsigned int get_num_rels(unsigned int num_pages,
532 unsigned int reloc_offs,
533 const uint32_t *reloc, unsigned int num_relocs)
534 {
535 const unsigned int align_mask __maybe_unused = sizeof(long) - 1;
536 unsigned int nrels = 0;
537 unsigned int n = 0;
538 vaddr_t offs = 0;
539
540 /*
541 * Count the number of relocations which are needed for these
542 * pages. Also check that the data is well formed, only expected
543 * relocations and sorted in order of address which it applies to.
544 */
545 for (; n < num_relocs; n++) {
546 assert(IS_ALIGNED_WITH_TYPE(reloc[n], unsigned long));
547 assert(offs < reloc[n]); /* check that it's sorted */
548 offs = reloc[n];
549 if (offs >= reloc_offs &&
550 offs <= reloc_offs + num_pages * SMALL_PAGE_SIZE)
551 nrels++;
552 }
553
554 return nrels;
555 }
556
init_rels(struct fobj_ro_reloc_paged * rrp,unsigned int reloc_offs,const uint32_t * reloc,unsigned int num_relocs)557 static void init_rels(struct fobj_ro_reloc_paged *rrp, unsigned int reloc_offs,
558 const uint32_t *reloc, unsigned int num_relocs)
559 {
560 unsigned int npg = rrp->rop.fobj.num_pages;
561 unsigned int pg_idx = 0;
562 unsigned int reln = 0;
563 unsigned int n = 0;
564 uint32_t r = 0;
565
566 for (n = 0; n < npg; n++)
567 rrp->page_reloc_idx[n] = UINT16_MAX;
568
569 for (n = 0; n < num_relocs ; n++) {
570 if (reloc[n] < reloc_offs)
571 continue;
572
573 /* r is the offset from beginning of this fobj */
574 r = reloc[n] - reloc_offs;
575
576 pg_idx = r / SMALL_PAGE_SIZE;
577 if (pg_idx >= npg)
578 break;
579
580 if (rrp->page_reloc_idx[pg_idx] == UINT16_MAX)
581 rrp->page_reloc_idx[pg_idx] = reln;
582 rrp->relocs[reln] = r - pg_idx * SMALL_PAGE_SIZE;
583 reln++;
584 }
585
586 assert(reln == rrp->num_relocs);
587 }
588
fobj_ro_reloc_paged_alloc(unsigned int num_pages,void * hashes,unsigned int reloc_offs,const void * reloc,unsigned int reloc_len,void * store)589 struct fobj *fobj_ro_reloc_paged_alloc(unsigned int num_pages, void *hashes,
590 unsigned int reloc_offs,
591 const void *reloc,
592 unsigned int reloc_len, void *store)
593 {
594 struct fobj_ro_reloc_paged *rrp = NULL;
595 const unsigned int num_relocs = reloc_len / sizeof(uint32_t);
596 unsigned int nrels = 0;
597
598 assert(IS_ALIGNED_WITH_TYPE(reloc, uint32_t));
599 assert(IS_ALIGNED_WITH_TYPE(reloc_len, uint32_t));
600 assert(num_pages && hashes && store);
601 if (!reloc_len) {
602 assert(!reloc);
603 return fobj_ro_paged_alloc(num_pages, hashes, store);
604 }
605 assert(reloc);
606
607 nrels = get_num_rels(num_pages, reloc_offs, reloc, num_relocs);
608 if (!nrels)
609 return fobj_ro_paged_alloc(num_pages, hashes, store);
610
611 rrp = calloc(1, sizeof(*rrp) + num_pages * sizeof(uint16_t) +
612 nrels * sizeof(uint16_t));
613 if (!rrp)
614 return NULL;
615 rop_init(&rrp->rop, &ops_ro_reloc_paged, num_pages, hashes, store);
616 rrp->page_reloc_idx = (uint16_t *)(rrp + 1);
617 rrp->relocs = rrp->page_reloc_idx + num_pages;
618 rrp->num_relocs = nrels;
619 init_rels(rrp, reloc_offs, reloc, num_relocs);
620
621 return &rrp->rop.fobj;
622 }
623
to_rrp(struct fobj * fobj)624 static struct fobj_ro_reloc_paged *to_rrp(struct fobj *fobj)
625 {
626 assert(fobj->ops == &ops_ro_reloc_paged);
627
628 return container_of(fobj, struct fobj_ro_reloc_paged, rop.fobj);
629 }
630
rrp_free(struct fobj * fobj)631 static void rrp_free(struct fobj *fobj)
632 {
633 struct fobj_ro_reloc_paged *rrp = to_rrp(fobj);
634
635 rop_uninit(&rrp->rop);
636 free(rrp);
637 }
638
rrp_load_page(struct fobj * fobj,unsigned int page_idx,void * va)639 static TEE_Result rrp_load_page(struct fobj *fobj, unsigned int page_idx,
640 void *va)
641 {
642 struct fobj_ro_reloc_paged *rrp = to_rrp(fobj);
643 unsigned int end_rel = rrp->num_relocs;
644 TEE_Result res = TEE_SUCCESS;
645 unsigned long *where = NULL;
646 unsigned int n = 0;
647
648 res = rop_load_page_helper(&rrp->rop, page_idx, va);
649 if (res)
650 return res;
651
652 /* Find the reloc index of the next page to tell when we're done */
653 for (n = page_idx + 1; n < fobj->num_pages; n++) {
654 if (rrp->page_reloc_idx[n] != UINT16_MAX) {
655 end_rel = rrp->page_reloc_idx[n];
656 break;
657 }
658 }
659
660 for (n = rrp->page_reloc_idx[page_idx]; n < end_rel; n++) {
661 where = (void *)((vaddr_t)va + rrp->relocs[n]);
662 *where += boot_mmu_config.load_offset;
663 }
664
665 return TEE_SUCCESS;
666 }
667 DECLARE_KEEP_PAGER(rrp_load_page);
668
669 /*
670 * Note: this variable is weak just to ease breaking its dependency chain
671 * when added to the unpaged area.
672 */
673 const struct fobj_ops ops_ro_reloc_paged
674 __weak __rodata_unpaged("ops_ro_reloc_paged") = {
675 .free = rrp_free,
676 .load_page = rrp_load_page,
677 .save_page = rop_save_page, /* Direct reuse */
678 };
679 #endif /*CFG_CORE_ASLR*/
680
681 const struct fobj_ops ops_locked_paged;
682
fobj_locked_paged_alloc(unsigned int num_pages)683 struct fobj *fobj_locked_paged_alloc(unsigned int num_pages)
684 {
685 struct fobj *f = NULL;
686
687 assert(num_pages);
688
689 f = calloc(1, sizeof(*f));
690 if (!f)
691 return NULL;
692
693 fobj_init(f, &ops_locked_paged, num_pages);
694
695 return f;
696 }
697
lop_free(struct fobj * fobj)698 static void lop_free(struct fobj *fobj)
699 {
700 assert(fobj->ops == &ops_locked_paged);
701 fobj_uninit(fobj);
702 free(fobj);
703 }
704
lop_load_page(struct fobj * fobj __maybe_unused,unsigned int page_idx __maybe_unused,void * va)705 static TEE_Result lop_load_page(struct fobj *fobj __maybe_unused,
706 unsigned int page_idx __maybe_unused,
707 void *va)
708 {
709 assert(fobj->ops == &ops_locked_paged);
710 assert(refcount_val(&fobj->refc));
711 assert(page_idx < fobj->num_pages);
712
713 memset(va, 0, SMALL_PAGE_SIZE);
714
715 return TEE_SUCCESS;
716 }
717 DECLARE_KEEP_PAGER(lop_load_page);
718
lop_save_page(struct fobj * fobj __unused,unsigned int page_idx __unused,const void * va __unused)719 static TEE_Result lop_save_page(struct fobj *fobj __unused,
720 unsigned int page_idx __unused,
721 const void *va __unused)
722 {
723 return TEE_ERROR_GENERIC;
724 }
725 DECLARE_KEEP_PAGER(lop_save_page);
726
727 /*
728 * Note: this variable is weak just to ease breaking its dependency chain
729 * when added to the unpaged area.
730 */
731 const struct fobj_ops ops_locked_paged
732 __weak __rodata_unpaged("ops_locked_paged") = {
733 .free = lop_free,
734 .load_page = lop_load_page,
735 .save_page = lop_save_page,
736 };
737 #endif /*CFG_WITH_PAGER*/
738
739 #ifndef CFG_PAGED_USER_TA
740
741 struct fobj_sec_mem {
742 tee_mm_entry_t *mm;
743 struct fobj fobj;
744 };
745
746 const struct fobj_ops ops_sec_mem;
747
fobj_sec_mem_alloc(unsigned int num_pages)748 struct fobj *fobj_sec_mem_alloc(unsigned int num_pages)
749 {
750 struct fobj_sec_mem *f = calloc(1, sizeof(*f));
751 size_t size = 0;
752 void *va = NULL;
753
754 if (!f)
755 return NULL;
756
757 if (MUL_OVERFLOW(num_pages, SMALL_PAGE_SIZE, &size))
758 goto err;
759
760 f->mm = tee_mm_alloc(&tee_mm_sec_ddr, size);
761 if (!f->mm)
762 goto err;
763
764 va = phys_to_virt(tee_mm_get_smem(f->mm), MEM_AREA_TA_RAM, size);
765 if (!va)
766 goto err;
767
768 memset(va, 0, size);
769 f->fobj.ops = &ops_sec_mem;
770 f->fobj.num_pages = num_pages;
771 refcount_set(&f->fobj.refc, 1);
772
773 return &f->fobj;
774 err:
775 tee_mm_free(f->mm);
776 free(f);
777
778 return NULL;
779 }
780
to_sec_mem(struct fobj * fobj)781 static struct fobj_sec_mem *to_sec_mem(struct fobj *fobj)
782 {
783 assert(fobj->ops == &ops_sec_mem);
784
785 return container_of(fobj, struct fobj_sec_mem, fobj);
786 }
787
sec_mem_free(struct fobj * fobj)788 static void sec_mem_free(struct fobj *fobj)
789 {
790 struct fobj_sec_mem *f = to_sec_mem(fobj);
791
792 assert(!refcount_val(&fobj->refc));
793 tee_mm_free(f->mm);
794 free(f);
795 }
796
sec_mem_get_pa(struct fobj * fobj,unsigned int page_idx)797 static paddr_t sec_mem_get_pa(struct fobj *fobj, unsigned int page_idx)
798 {
799 struct fobj_sec_mem *f = to_sec_mem(fobj);
800
801 assert(refcount_val(&fobj->refc));
802 assert(page_idx < fobj->num_pages);
803
804 return tee_mm_get_smem(f->mm) + page_idx * SMALL_PAGE_SIZE;
805 }
806
807 /*
808 * Note: this variable is weak just to ease breaking its dependency chain
809 * when added to the unpaged area.
810 */
811 const struct fobj_ops ops_sec_mem __weak __rodata_unpaged("ops_sec_mem") = {
812 .free = sec_mem_free,
813 .get_pa = sec_mem_get_pa,
814 };
815
816 #endif /*PAGED_USER_TA*/
817