1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2016, Linaro Limited
4  */
5 
6 #include <assert.h>
7 #include <kernel/mutex.h>
8 #include <kernel/tee_misc.h>
9 #include <mm/core_mmu.h>
10 #include <mm/pgt_cache.h>
11 #include <mm/tee_pager.h>
12 #include <stdlib.h>
13 #include <trace.h>
14 #include <util.h>
15 
16 /*
17  * With pager enabled we allocate page table from the pager.
18  *
19  * For LPAE each page table is a complete page which is allocated and freed
20  * using the interface provided by the pager.
21  *
22  * For compat v7 page tables there's room for four page table in one page
23  * so we need to keep track of how much of an allocated page is used. When
24  * a page is completely unused it's returned to the pager.
25  *
26  * With pager disabled we have a static allocation of page tables instead.
27  *
28  * In all cases we limit the number of active page tables to
29  * PGT_CACHE_SIZE.  This pool of page tables are shared between all
30  * threads. In case a thread can't allocate the needed number of pager
31  * tables it will release all its current tables and wait for some more to
32  * be freed. A threads allocated tables are freed each time a TA is
33  * unmapped so each thread should be able to allocate the needed tables in
34  * turn if needed.
35  */
36 
37 #if defined(CFG_WITH_PAGER) && !defined(CFG_WITH_LPAE)
38 struct pgt_parent {
39 	size_t num_used;
40 	struct pgt_cache pgt_cache;
41 };
42 
43 static struct pgt_parent pgt_parents[PGT_CACHE_SIZE / PGT_NUM_PGT_PER_PAGE];
44 #else
45 
46 static struct pgt_cache pgt_free_list = SLIST_HEAD_INITIALIZER(pgt_free_list);
47 #endif
48 
49 #ifdef CFG_PAGED_USER_TA
50 /*
51  * When a user TA context is temporarily unmapped the used struct pgt's of
52  * the context (page tables holding valid physical pages) are saved in this
53  * cache in the hope that some of the valid physical pages may still be
54  * valid when the context is mapped again.
55  */
56 static struct pgt_cache pgt_cache_list = SLIST_HEAD_INITIALIZER(pgt_cache_list);
57 #endif
58 
59 static struct pgt pgt_entries[PGT_CACHE_SIZE];
60 
61 static struct mutex pgt_mu = MUTEX_INITIALIZER;
62 static struct condvar pgt_cv = CONDVAR_INITIALIZER;
63 
64 #if defined(CFG_WITH_PAGER) && defined(CFG_WITH_LPAE)
pgt_init(void)65 void pgt_init(void)
66 {
67 	size_t n;
68 
69 	for (n = 0; n < PGT_CACHE_SIZE; n++) {
70 		struct pgt *p = pgt_entries + n;
71 
72 		p->tbl = tee_pager_alloc(PGT_SIZE);
73 		SLIST_INSERT_HEAD(&pgt_free_list, p, link);
74 	}
75 }
76 #elif defined(CFG_WITH_PAGER) && !defined(CFG_WITH_LPAE)
pgt_init(void)77 void pgt_init(void)
78 {
79 	size_t n;
80 	size_t m;
81 
82 	COMPILE_TIME_ASSERT(PGT_CACHE_SIZE % PGT_NUM_PGT_PER_PAGE == 0);
83 	COMPILE_TIME_ASSERT(PGT_SIZE * PGT_NUM_PGT_PER_PAGE == SMALL_PAGE_SIZE);
84 
85 	for (n = 0; n < ARRAY_SIZE(pgt_parents); n++) {
86 		uint8_t *tbl = tee_pager_alloc(SMALL_PAGE_SIZE);
87 
88 		SLIST_INIT(&pgt_parents[n].pgt_cache);
89 		for (m = 0; m < PGT_NUM_PGT_PER_PAGE; m++) {
90 			struct pgt *p = pgt_entries +
91 					n * PGT_NUM_PGT_PER_PAGE + m;
92 
93 			p->tbl = tbl + m * PGT_SIZE;
94 			p->parent = &pgt_parents[n];
95 			SLIST_INSERT_HEAD(&pgt_parents[n].pgt_cache, p, link);
96 		}
97 	}
98 }
99 #else
pgt_init(void)100 void pgt_init(void)
101 {
102 	/*
103 	 * We're putting this in .nozi.* instead of .bss because .nozi.* already
104 	 * has a large alignment, while .bss has a small alignment. The current
105 	 * link script is optimized for small alignment in .bss
106 	 */
107 	static uint8_t pgt_tables[PGT_CACHE_SIZE][PGT_SIZE]
108 			__aligned(PGT_SIZE) __section(".nozi.pgt_cache");
109 	size_t n;
110 
111 	for (n = 0; n < ARRAY_SIZE(pgt_tables); n++) {
112 		struct pgt *p = pgt_entries + n;
113 
114 		p->tbl = pgt_tables[n];
115 		SLIST_INSERT_HEAD(&pgt_free_list, p, link);
116 	}
117 }
118 #endif
119 
120 #if defined(CFG_WITH_LPAE) || !defined(CFG_WITH_PAGER)
pop_from_free_list(void)121 static struct pgt *pop_from_free_list(void)
122 {
123 	struct pgt *p = SLIST_FIRST(&pgt_free_list);
124 
125 	if (p) {
126 		SLIST_REMOVE_HEAD(&pgt_free_list, link);
127 		memset(p->tbl, 0, PGT_SIZE);
128 	}
129 	return p;
130 }
131 
push_to_free_list(struct pgt * p)132 static void push_to_free_list(struct pgt *p)
133 {
134 	SLIST_INSERT_HEAD(&pgt_free_list, p, link);
135 #if defined(CFG_WITH_PAGER)
136 	tee_pager_release_phys(p->tbl, PGT_SIZE);
137 #endif
138 }
139 #else
pop_from_free_list(void)140 static struct pgt *pop_from_free_list(void)
141 {
142 	size_t n;
143 
144 	for (n = 0; n < ARRAY_SIZE(pgt_parents); n++) {
145 		struct pgt *p = SLIST_FIRST(&pgt_parents[n].pgt_cache);
146 
147 		if (p) {
148 			SLIST_REMOVE_HEAD(&pgt_parents[n].pgt_cache, link);
149 			pgt_parents[n].num_used++;
150 			memset(p->tbl, 0, PGT_SIZE);
151 			return p;
152 		}
153 	}
154 	return NULL;
155 }
156 
push_to_free_list(struct pgt * p)157 static void push_to_free_list(struct pgt *p)
158 {
159 	SLIST_INSERT_HEAD(&p->parent->pgt_cache, p, link);
160 	assert(p->parent->num_used > 0);
161 	p->parent->num_used--;
162 	if (!p->parent->num_used) {
163 		vaddr_t va = (vaddr_t)p->tbl & ~SMALL_PAGE_MASK;
164 
165 		tee_pager_release_phys((void *)va, SMALL_PAGE_SIZE);
166 	}
167 }
168 #endif
169 
170 #ifdef CFG_PAGED_USER_TA
push_to_cache_list(struct pgt * pgt)171 static void push_to_cache_list(struct pgt *pgt)
172 {
173 	SLIST_INSERT_HEAD(&pgt_cache_list, pgt, link);
174 }
175 
match_pgt(struct pgt * pgt,vaddr_t vabase,void * ctx)176 static bool match_pgt(struct pgt *pgt, vaddr_t vabase, void *ctx)
177 {
178 	return pgt->ctx == ctx && pgt->vabase == vabase;
179 }
180 
pop_from_cache_list(vaddr_t vabase,void * ctx)181 static struct pgt *pop_from_cache_list(vaddr_t vabase, void *ctx)
182 {
183 	struct pgt *pgt;
184 	struct pgt *p;
185 
186 	pgt = SLIST_FIRST(&pgt_cache_list);
187 	if (!pgt)
188 		return NULL;
189 	if (match_pgt(pgt, vabase, ctx)) {
190 		SLIST_REMOVE_HEAD(&pgt_cache_list, link);
191 		return pgt;
192 	}
193 
194 	while (true) {
195 		p = SLIST_NEXT(pgt, link);
196 		if (!p)
197 			break;
198 		if (match_pgt(p, vabase, ctx)) {
199 			SLIST_REMOVE_AFTER(pgt, link);
200 			break;
201 		}
202 		pgt = p;
203 	}
204 	return p;
205 }
206 
pop_least_used_from_cache_list(void)207 static struct pgt *pop_least_used_from_cache_list(void)
208 {
209 	struct pgt *pgt;
210 	struct pgt *p_prev = NULL;
211 	size_t least_used;
212 
213 	pgt = SLIST_FIRST(&pgt_cache_list);
214 	if (!pgt)
215 		return NULL;
216 	if (!pgt->num_used_entries)
217 		goto out;
218 	least_used = pgt->num_used_entries;
219 
220 	while (true) {
221 		if (!SLIST_NEXT(pgt, link))
222 			break;
223 		if (SLIST_NEXT(pgt, link)->num_used_entries <= least_used) {
224 			p_prev = pgt;
225 			least_used = SLIST_NEXT(pgt, link)->num_used_entries;
226 		}
227 		pgt = SLIST_NEXT(pgt, link);
228 	}
229 
230 out:
231 	if (p_prev) {
232 		pgt = SLIST_NEXT(p_prev, link);
233 		SLIST_REMOVE_AFTER(p_prev, link);
234 	} else {
235 		pgt = SLIST_FIRST(&pgt_cache_list);
236 		SLIST_REMOVE_HEAD(&pgt_cache_list, link);
237 	}
238 	return pgt;
239 }
240 
pgt_free_unlocked(struct pgt_cache * pgt_cache,bool save_ctx)241 static void pgt_free_unlocked(struct pgt_cache *pgt_cache, bool save_ctx)
242 {
243 	while (!SLIST_EMPTY(pgt_cache)) {
244 		struct pgt *p = SLIST_FIRST(pgt_cache);
245 
246 		SLIST_REMOVE_HEAD(pgt_cache, link);
247 		if (save_ctx && p->num_used_entries) {
248 			push_to_cache_list(p);
249 		} else {
250 			tee_pager_pgt_save_and_release_entries(p);
251 			assert(!p->num_used_entries);
252 			p->ctx = NULL;
253 			p->vabase = 0;
254 
255 			push_to_free_list(p);
256 		}
257 	}
258 }
259 
pop_from_some_list(vaddr_t vabase,void * ctx)260 static struct pgt *pop_from_some_list(vaddr_t vabase, void *ctx)
261 {
262 	struct pgt *p = pop_from_cache_list(vabase, ctx);
263 
264 	if (p)
265 		return p;
266 	p = pop_from_free_list();
267 	if (!p) {
268 		p = pop_least_used_from_cache_list();
269 		if (!p)
270 			return NULL;
271 		tee_pager_pgt_save_and_release_entries(p);
272 		memset(p->tbl, 0, PGT_SIZE);
273 	}
274 	assert(!p->num_used_entries);
275 	p->ctx = ctx;
276 	p->vabase = vabase;
277 	return p;
278 }
279 
pgt_flush_ctx(struct ts_ctx * ctx)280 void pgt_flush_ctx(struct ts_ctx *ctx)
281 {
282 	struct pgt *p;
283 	struct pgt *pp = NULL;
284 
285 	mutex_lock(&pgt_mu);
286 
287 	while (true) {
288 		p = SLIST_FIRST(&pgt_cache_list);
289 		if (!p)
290 			goto out;
291 		if (p->ctx != ctx)
292 			break;
293 		SLIST_REMOVE_HEAD(&pgt_cache_list, link);
294 		tee_pager_pgt_save_and_release_entries(p);
295 		assert(!p->num_used_entries);
296 		p->ctx = NULL;
297 		p->vabase = 0;
298 		push_to_free_list(p);
299 	}
300 
301 	pp = p;
302 	while (true) {
303 		p = SLIST_NEXT(pp, link);
304 		if (!p)
305 			break;
306 		if (p->ctx == ctx) {
307 			SLIST_REMOVE_AFTER(pp, link);
308 			tee_pager_pgt_save_and_release_entries(p);
309 			assert(!p->num_used_entries);
310 			p->ctx = NULL;
311 			p->vabase = 0;
312 			push_to_free_list(p);
313 		} else {
314 			pp = p;
315 		}
316 	}
317 
318 out:
319 	mutex_unlock(&pgt_mu);
320 }
321 
flush_pgt_entry(struct pgt * p)322 static void flush_pgt_entry(struct pgt *p)
323 {
324 	tee_pager_pgt_save_and_release_entries(p);
325 	assert(!p->num_used_entries);
326 	p->ctx = NULL;
327 	p->vabase = 0;
328 }
329 
pgt_entry_matches(struct pgt * p,void * ctx,vaddr_t begin,vaddr_t last)330 static bool pgt_entry_matches(struct pgt *p, void *ctx, vaddr_t begin,
331 			      vaddr_t last)
332 {
333 	if (!p)
334 		return false;
335 	if (p->ctx != ctx)
336 		return false;
337 	if (last <= begin)
338 		return false;
339 	if (!core_is_buffer_inside(p->vabase, SMALL_PAGE_SIZE, begin,
340 				   last - begin))
341 		return false;
342 
343 	return true;
344 }
345 
flush_ctx_range_from_list(struct pgt_cache * pgt_cache,void * ctx,vaddr_t begin,vaddr_t last)346 static void flush_ctx_range_from_list(struct pgt_cache *pgt_cache, void *ctx,
347 				      vaddr_t begin, vaddr_t last)
348 {
349 	struct pgt *p;
350 	struct pgt *next_p;
351 
352 	/*
353 	 * Do the special case where the first element in the list is
354 	 * removed first.
355 	 */
356 	p = SLIST_FIRST(pgt_cache);
357 	while (pgt_entry_matches(p, ctx, begin, last)) {
358 		flush_pgt_entry(p);
359 		SLIST_REMOVE_HEAD(pgt_cache, link);
360 		push_to_free_list(p);
361 		p = SLIST_FIRST(pgt_cache);
362 	}
363 
364 	/*
365 	 * p either points to the first element in the list or it's NULL,
366 	 * if NULL the list is empty and we're done.
367 	 */
368 	if (!p)
369 		return;
370 
371 	/*
372 	 * Do the common case where the next element in the list is
373 	 * removed.
374 	 */
375 	while (true) {
376 		next_p = SLIST_NEXT(p, link);
377 		if (!next_p)
378 			break;
379 		if (pgt_entry_matches(next_p, ctx, begin, last)) {
380 			flush_pgt_entry(next_p);
381 			SLIST_REMOVE_AFTER(p, link);
382 			push_to_free_list(next_p);
383 			continue;
384 		}
385 
386 		p = SLIST_NEXT(p, link);
387 	}
388 }
389 
pgt_flush_ctx_range(struct pgt_cache * pgt_cache,struct ts_ctx * ctx,vaddr_t begin,vaddr_t last)390 void pgt_flush_ctx_range(struct pgt_cache *pgt_cache, struct ts_ctx *ctx,
391 			 vaddr_t begin, vaddr_t last)
392 {
393 	mutex_lock(&pgt_mu);
394 
395 	if (pgt_cache)
396 		flush_ctx_range_from_list(pgt_cache, ctx, begin, last);
397 	flush_ctx_range_from_list(&pgt_cache_list, ctx, begin, last);
398 
399 	condvar_broadcast(&pgt_cv);
400 	mutex_unlock(&pgt_mu);
401 }
402 
403 #else /*!CFG_PAGED_USER_TA*/
404 
pgt_free_unlocked(struct pgt_cache * pgt_cache,bool save_ctx __unused)405 static void pgt_free_unlocked(struct pgt_cache *pgt_cache,
406 			      bool save_ctx __unused)
407 {
408 	while (!SLIST_EMPTY(pgt_cache)) {
409 		struct pgt *p = SLIST_FIRST(pgt_cache);
410 
411 		SLIST_REMOVE_HEAD(pgt_cache, link);
412 		push_to_free_list(p);
413 	}
414 }
415 
pop_from_some_list(vaddr_t vabase,struct ts_ctx * ctx __unused)416 static struct pgt *pop_from_some_list(vaddr_t vabase,
417 				      struct ts_ctx *ctx __unused)
418 {
419 	struct pgt *p = pop_from_free_list();
420 
421 	if (p)
422 		p->vabase = vabase;
423 
424 	return p;
425 }
426 #endif /*!CFG_PAGED_USER_TA*/
427 
clear_ctx_range_from_list(struct pgt_cache * pgt_cache,void * ctx __maybe_unused,vaddr_t begin,vaddr_t end)428 static void clear_ctx_range_from_list(struct pgt_cache *pgt_cache,
429 				      void *ctx __maybe_unused,
430 				      vaddr_t begin, vaddr_t end)
431 {
432 	struct pgt *p = NULL;
433 #ifdef CFG_WITH_LPAE
434 	uint64_t *tbl = NULL;
435 #else
436 	uint32_t *tbl = NULL;
437 #endif
438 	unsigned int idx = 0;
439 	unsigned int n = 0;
440 
441 	SLIST_FOREACH(p, pgt_cache, link) {
442 		vaddr_t b = MAX(p->vabase, begin);
443 		vaddr_t e = MIN(p->vabase + CORE_MMU_PGDIR_SIZE, end);
444 
445 #ifdef CFG_PAGED_USER_TA
446 		if (p->ctx != ctx)
447 			continue;
448 #endif
449 		if (b >= e)
450 			continue;
451 
452 		tbl = p->tbl;
453 		idx = (b - p->vabase) / SMALL_PAGE_SIZE;
454 		n = (e - b) / SMALL_PAGE_SIZE;
455 		memset(tbl + idx, 0, n * sizeof(*tbl));
456 	}
457 }
458 
pgt_clear_ctx_range(struct pgt_cache * pgt_cache,struct ts_ctx * ctx,vaddr_t begin,vaddr_t end)459 void pgt_clear_ctx_range(struct pgt_cache *pgt_cache, struct ts_ctx *ctx,
460 			 vaddr_t begin, vaddr_t end)
461 {
462 	mutex_lock(&pgt_mu);
463 
464 	if (pgt_cache)
465 		clear_ctx_range_from_list(pgt_cache, ctx, begin, end);
466 #ifdef CFG_PAGED_USER_TA
467 	clear_ctx_range_from_list(&pgt_cache_list, ctx, begin, end);
468 #endif
469 
470 	mutex_unlock(&pgt_mu);
471 }
472 
pgt_alloc_unlocked(struct pgt_cache * pgt_cache,struct ts_ctx * ctx,vaddr_t begin,vaddr_t last)473 static bool pgt_alloc_unlocked(struct pgt_cache *pgt_cache, struct ts_ctx *ctx,
474 			       vaddr_t begin, vaddr_t last)
475 {
476 	const vaddr_t base = ROUNDDOWN(begin, CORE_MMU_PGDIR_SIZE);
477 	const size_t num_tbls = ((last - base) >> CORE_MMU_PGDIR_SHIFT) + 1;
478 	size_t n = 0;
479 	struct pgt *p;
480 	struct pgt *pp = NULL;
481 
482 	while (n < num_tbls) {
483 		p = pop_from_some_list(base + n * CORE_MMU_PGDIR_SIZE, ctx);
484 		if (!p) {
485 			pgt_free_unlocked(pgt_cache, ctx);
486 			return false;
487 		}
488 
489 		if (pp)
490 			SLIST_INSERT_AFTER(pp, p, link);
491 		else
492 			SLIST_INSERT_HEAD(pgt_cache, p, link);
493 		pp = p;
494 		n++;
495 	}
496 
497 	return true;
498 }
499 
pgt_alloc(struct pgt_cache * pgt_cache,struct ts_ctx * ctx,vaddr_t begin,vaddr_t last)500 void pgt_alloc(struct pgt_cache *pgt_cache, struct ts_ctx *ctx,
501 	       vaddr_t begin, vaddr_t last)
502 {
503 	if (last <= begin)
504 		return;
505 
506 	mutex_lock(&pgt_mu);
507 
508 	pgt_free_unlocked(pgt_cache, ctx);
509 	while (!pgt_alloc_unlocked(pgt_cache, ctx, begin, last)) {
510 		DMSG("Waiting for page tables");
511 		condvar_broadcast(&pgt_cv);
512 		condvar_wait(&pgt_cv, &pgt_mu);
513 	}
514 
515 	mutex_unlock(&pgt_mu);
516 }
517 
pgt_free(struct pgt_cache * pgt_cache,bool save_ctx)518 void pgt_free(struct pgt_cache *pgt_cache, bool save_ctx)
519 {
520 	if (SLIST_EMPTY(pgt_cache))
521 		return;
522 
523 	mutex_lock(&pgt_mu);
524 
525 	pgt_free_unlocked(pgt_cache, save_ctx);
526 
527 	condvar_broadcast(&pgt_cv);
528 	mutex_unlock(&pgt_mu);
529 }
530