1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2016-2017, Linaro Limited
4  */
5 
6 #include <assert.h>
7 #include <initcall.h>
8 #include <keep.h>
9 #include <kernel/linker.h>
10 #include <kernel/mutex.h>
11 #include <kernel/panic.h>
12 #include <kernel/refcount.h>
13 #include <kernel/spinlock.h>
14 #include <kernel/tee_misc.h>
15 #include <mm/core_mmu.h>
16 #include <mm/mobj.h>
17 #include <mm/tee_pager.h>
18 #include <mm/vm.h>
19 #include <optee_msg.h>
20 #include <sm/optee_smc.h>
21 #include <stdlib.h>
22 #include <tee_api_types.h>
23 #include <types_ext.h>
24 #include <util.h>
25 
26 static struct mutex shm_mu = MUTEX_INITIALIZER;
27 static struct condvar shm_cv = CONDVAR_INITIALIZER;
28 static size_t shm_release_waiters;
29 
30 /*
31  * mobj_reg_shm implementation. Describes shared memory provided by normal world
32  */
33 
34 struct mobj_reg_shm {
35 	struct mobj mobj;
36 	SLIST_ENTRY(mobj_reg_shm) next;
37 	uint64_t cookie;
38 	tee_mm_entry_t *mm;
39 	paddr_t page_offset;
40 	struct refcount mapcount;
41 	bool guarded;
42 	bool releasing;
43 	bool release_frees;
44 	paddr_t pages[];
45 };
46 
mobj_reg_shm_size(size_t nr_pages)47 static size_t mobj_reg_shm_size(size_t nr_pages)
48 {
49 	size_t s = 0;
50 
51 	if (MUL_OVERFLOW(sizeof(paddr_t), nr_pages, &s))
52 		return 0;
53 	if (ADD_OVERFLOW(sizeof(struct mobj_reg_shm), s, &s))
54 		return 0;
55 	return s;
56 }
57 
58 static SLIST_HEAD(reg_shm_head, mobj_reg_shm) reg_shm_list =
59 	SLIST_HEAD_INITIALIZER(reg_shm_head);
60 
61 static unsigned int reg_shm_slist_lock = SPINLOCK_UNLOCK;
62 static unsigned int reg_shm_map_lock = SPINLOCK_UNLOCK;
63 
64 static struct mobj_reg_shm *to_mobj_reg_shm(struct mobj *mobj);
65 
mobj_reg_shm_get_pa(struct mobj * mobj,size_t offst,size_t granule,paddr_t * pa)66 static TEE_Result mobj_reg_shm_get_pa(struct mobj *mobj, size_t offst,
67 				      size_t granule, paddr_t *pa)
68 {
69 	struct mobj_reg_shm *mobj_reg_shm = to_mobj_reg_shm(mobj);
70 	size_t full_offset = 0;
71 	paddr_t p = 0;
72 
73 	if (!pa)
74 		return TEE_ERROR_GENERIC;
75 
76 	if (offst >= mobj->size)
77 		return TEE_ERROR_GENERIC;
78 
79 	full_offset = offst + mobj_reg_shm->page_offset;
80 	switch (granule) {
81 	case 0:
82 		p = mobj_reg_shm->pages[full_offset / SMALL_PAGE_SIZE] +
83 			(full_offset & SMALL_PAGE_MASK);
84 		break;
85 	case SMALL_PAGE_SIZE:
86 		p = mobj_reg_shm->pages[full_offset / SMALL_PAGE_SIZE];
87 		break;
88 	default:
89 		return TEE_ERROR_GENERIC;
90 	}
91 	*pa = p;
92 
93 	return TEE_SUCCESS;
94 }
95 DECLARE_KEEP_PAGER(mobj_reg_shm_get_pa);
96 
mobj_reg_shm_get_phys_offs(struct mobj * mobj,size_t granule __maybe_unused)97 static size_t mobj_reg_shm_get_phys_offs(struct mobj *mobj,
98 					 size_t granule __maybe_unused)
99 {
100 	assert(granule >= mobj->phys_granule);
101 	return to_mobj_reg_shm(mobj)->page_offset;
102 }
103 
mobj_reg_shm_get_va(struct mobj * mobj,size_t offst,size_t len)104 static void *mobj_reg_shm_get_va(struct mobj *mobj, size_t offst, size_t len)
105 {
106 	struct mobj_reg_shm *mrs = to_mobj_reg_shm(mobj);
107 
108 	if (!mrs->mm || !mobj_check_offset_and_len(mobj, offst, len))
109 		return NULL;
110 
111 	return (void *)(vaddr_t)(tee_mm_get_smem(mrs->mm) + offst +
112 				 mrs->page_offset);
113 }
114 
reg_shm_unmap_helper(struct mobj_reg_shm * r)115 static void reg_shm_unmap_helper(struct mobj_reg_shm *r)
116 {
117 	assert(r->mm);
118 	assert(r->mm->pool->shift == SMALL_PAGE_SHIFT);
119 	core_mmu_unmap_pages(tee_mm_get_smem(r->mm), r->mm->size);
120 	tee_mm_free(r->mm);
121 	r->mm = NULL;
122 }
123 
reg_shm_free_helper(struct mobj_reg_shm * mobj_reg_shm)124 static void reg_shm_free_helper(struct mobj_reg_shm *mobj_reg_shm)
125 {
126 	uint32_t exceptions = cpu_spin_lock_xsave(&reg_shm_map_lock);
127 
128 	if (mobj_reg_shm->mm)
129 		reg_shm_unmap_helper(mobj_reg_shm);
130 
131 	cpu_spin_unlock_xrestore(&reg_shm_map_lock, exceptions);
132 
133 	SLIST_REMOVE(&reg_shm_list, mobj_reg_shm, mobj_reg_shm, next);
134 	free(mobj_reg_shm);
135 }
136 
mobj_reg_shm_free(struct mobj * mobj)137 static void mobj_reg_shm_free(struct mobj *mobj)
138 {
139 	struct mobj_reg_shm *r = to_mobj_reg_shm(mobj);
140 	uint32_t exceptions = 0;
141 
142 	if (r->guarded && !r->releasing) {
143 		/*
144 		 * Guarded registersted shared memory can't be released
145 		 * by cookie, only by mobj_put(). However, unguarded
146 		 * registered shared memory can also be freed by mobj_put()
147 		 * unless mobj_reg_shm_release_by_cookie() is waiting for
148 		 * the mobj to be released.
149 		 */
150 		exceptions = cpu_spin_lock_xsave(&reg_shm_slist_lock);
151 		reg_shm_free_helper(r);
152 		cpu_spin_unlock_xrestore(&reg_shm_slist_lock, exceptions);
153 	} else {
154 		/*
155 		 * We've reached the point where an unguarded reg shm can
156 		 * be released by cookie. Notify eventual waiters.
157 		 */
158 		exceptions = cpu_spin_lock_xsave(&reg_shm_slist_lock);
159 		r->release_frees = true;
160 		cpu_spin_unlock_xrestore(&reg_shm_slist_lock, exceptions);
161 
162 		mutex_lock(&shm_mu);
163 		if (shm_release_waiters)
164 			condvar_broadcast(&shm_cv);
165 		mutex_unlock(&shm_mu);
166 	}
167 }
168 
mobj_reg_shm_get_cattr(struct mobj * mobj __unused,uint32_t * cattr)169 static TEE_Result mobj_reg_shm_get_cattr(struct mobj *mobj __unused,
170 					 uint32_t *cattr)
171 {
172 	if (!cattr)
173 		return TEE_ERROR_GENERIC;
174 
175 	*cattr = TEE_MATTR_CACHE_CACHED;
176 
177 	return TEE_SUCCESS;
178 }
179 
mobj_reg_shm_inc_map(struct mobj * mobj)180 static TEE_Result mobj_reg_shm_inc_map(struct mobj *mobj)
181 {
182 	TEE_Result res = TEE_SUCCESS;
183 	struct mobj_reg_shm *r = to_mobj_reg_shm(mobj);
184 	uint32_t exceptions = 0;
185 	size_t sz = 0;
186 
187 	while (true) {
188 		if (refcount_inc(&r->mapcount))
189 			return TEE_SUCCESS;
190 
191 		exceptions = cpu_spin_lock_xsave(&reg_shm_map_lock);
192 
193 		if (!refcount_val(&r->mapcount))
194 			break; /* continue to reinitialize */
195 		/*
196 		 * If another thread beat us to initialize mapcount,
197 		 * restart to make sure we still increase it.
198 		 */
199 		cpu_spin_unlock_xrestore(&reg_shm_map_lock, exceptions);
200 	}
201 
202 	/*
203 	 * If we have beated another thread calling mobj_reg_shm_dec_map()
204 	 * to get the lock we need only to reinitialize mapcount to 1.
205 	 */
206 	if (!r->mm) {
207 		sz = ROUNDUP(mobj->size + r->page_offset, SMALL_PAGE_SIZE);
208 		r->mm = tee_mm_alloc(&tee_mm_shm, sz);
209 		if (!r->mm) {
210 			res = TEE_ERROR_OUT_OF_MEMORY;
211 			goto out;
212 		}
213 
214 		res = core_mmu_map_pages(tee_mm_get_smem(r->mm), r->pages,
215 					 sz / SMALL_PAGE_SIZE,
216 					 MEM_AREA_NSEC_SHM);
217 		if (res) {
218 			tee_mm_free(r->mm);
219 			r->mm = NULL;
220 			goto out;
221 		}
222 	}
223 
224 	refcount_set(&r->mapcount, 1);
225 out:
226 	cpu_spin_unlock_xrestore(&reg_shm_map_lock, exceptions);
227 
228 	return res;
229 }
230 
mobj_reg_shm_dec_map(struct mobj * mobj)231 static TEE_Result mobj_reg_shm_dec_map(struct mobj *mobj)
232 {
233 	struct mobj_reg_shm *r = to_mobj_reg_shm(mobj);
234 	uint32_t exceptions = 0;
235 
236 	if (!refcount_dec(&r->mapcount))
237 		return TEE_SUCCESS;
238 
239 	exceptions = cpu_spin_lock_xsave(&reg_shm_map_lock);
240 
241 	if (!refcount_val(&r->mapcount))
242 		reg_shm_unmap_helper(r);
243 
244 	cpu_spin_unlock_xrestore(&reg_shm_map_lock, exceptions);
245 
246 	return TEE_SUCCESS;
247 }
248 
249 static bool mobj_reg_shm_matches(struct mobj *mobj, enum buf_is_attr attr);
250 
mobj_reg_shm_get_cookie(struct mobj * mobj)251 static uint64_t mobj_reg_shm_get_cookie(struct mobj *mobj)
252 {
253 	return to_mobj_reg_shm(mobj)->cookie;
254 }
255 
256 /*
257  * When CFG_PREALLOC_RPC_CACHE is disabled, this variable is weak just
258  * to ease breaking its dependency chain when added to the unpaged area.
259  * When CFG_PREALLOC_RPC_CACHE is enabled, releasing RPC preallocated
260  * shm mandates these resources to be unpaged.
261  */
262 const struct mobj_ops mobj_reg_shm_ops
263 __weak __rodata_unpaged("mobj_reg_shm_ops") = {
264 	.get_pa = mobj_reg_shm_get_pa,
265 	.get_phys_offs = mobj_reg_shm_get_phys_offs,
266 	.get_va = mobj_reg_shm_get_va,
267 	.get_cattr = mobj_reg_shm_get_cattr,
268 	.matches = mobj_reg_shm_matches,
269 	.free = mobj_reg_shm_free,
270 	.get_cookie = mobj_reg_shm_get_cookie,
271 	.inc_map = mobj_reg_shm_inc_map,
272 	.dec_map = mobj_reg_shm_dec_map,
273 };
274 
275 #ifdef CFG_PREALLOC_RPC_CACHE
276 /* Releasing RPC preallocated shm mandates few resources to be unpaged */
277 DECLARE_KEEP_PAGER(mobj_reg_shm_get_cookie);
278 DECLARE_KEEP_PAGER(mobj_reg_shm_matches);
279 DECLARE_KEEP_PAGER(mobj_reg_shm_free);
280 #endif
281 
mobj_reg_shm_matches(struct mobj * mobj __maybe_unused,enum buf_is_attr attr)282 static bool mobj_reg_shm_matches(struct mobj *mobj __maybe_unused,
283 				   enum buf_is_attr attr)
284 {
285 	assert(mobj->ops == &mobj_reg_shm_ops);
286 
287 	return attr == CORE_MEM_NON_SEC || attr == CORE_MEM_REG_SHM;
288 }
289 
to_mobj_reg_shm(struct mobj * mobj)290 static struct mobj_reg_shm *to_mobj_reg_shm(struct mobj *mobj)
291 {
292 	assert(mobj->ops == &mobj_reg_shm_ops);
293 	return container_of(mobj, struct mobj_reg_shm, mobj);
294 }
295 
mobj_reg_shm_alloc(paddr_t * pages,size_t num_pages,paddr_t page_offset,uint64_t cookie)296 struct mobj *mobj_reg_shm_alloc(paddr_t *pages, size_t num_pages,
297 				paddr_t page_offset, uint64_t cookie)
298 {
299 	struct mobj_reg_shm *mobj_reg_shm = NULL;
300 	size_t i = 0;
301 	uint32_t exceptions = 0;
302 	size_t s = 0;
303 
304 	if (!num_pages || page_offset >= SMALL_PAGE_SIZE)
305 		return NULL;
306 
307 	s = mobj_reg_shm_size(num_pages);
308 	if (!s)
309 		return NULL;
310 	mobj_reg_shm = calloc(1, s);
311 	if (!mobj_reg_shm)
312 		return NULL;
313 
314 	mobj_reg_shm->mobj.ops = &mobj_reg_shm_ops;
315 	mobj_reg_shm->mobj.size = num_pages * SMALL_PAGE_SIZE - page_offset;
316 	mobj_reg_shm->mobj.phys_granule = SMALL_PAGE_SIZE;
317 	refcount_set(&mobj_reg_shm->mobj.refc, 1);
318 	mobj_reg_shm->cookie = cookie;
319 	mobj_reg_shm->guarded = true;
320 	mobj_reg_shm->page_offset = page_offset;
321 	memcpy(mobj_reg_shm->pages, pages, sizeof(*pages) * num_pages);
322 
323 	/* Ensure loaded references match format and security constraints */
324 	for (i = 0; i < num_pages; i++) {
325 		if (mobj_reg_shm->pages[i] & SMALL_PAGE_MASK)
326 			goto err;
327 
328 		/* Only Non-secure memory can be mapped there */
329 		if (!core_pbuf_is(CORE_MEM_NON_SEC, mobj_reg_shm->pages[i],
330 				  SMALL_PAGE_SIZE))
331 			goto err;
332 	}
333 
334 	exceptions = cpu_spin_lock_xsave(&reg_shm_slist_lock);
335 	SLIST_INSERT_HEAD(&reg_shm_list, mobj_reg_shm, next);
336 	cpu_spin_unlock_xrestore(&reg_shm_slist_lock, exceptions);
337 
338 	return &mobj_reg_shm->mobj;
339 err:
340 	free(mobj_reg_shm);
341 	return NULL;
342 }
343 
mobj_reg_shm_unguard(struct mobj * mobj)344 void mobj_reg_shm_unguard(struct mobj *mobj)
345 {
346 	uint32_t exceptions = cpu_spin_lock_xsave(&reg_shm_slist_lock);
347 
348 	to_mobj_reg_shm(mobj)->guarded = false;
349 	cpu_spin_unlock_xrestore(&reg_shm_slist_lock, exceptions);
350 }
351 
reg_shm_find_unlocked(uint64_t cookie)352 static struct mobj_reg_shm *reg_shm_find_unlocked(uint64_t cookie)
353 {
354 	struct mobj_reg_shm *mobj_reg_shm = NULL;
355 
356 	SLIST_FOREACH(mobj_reg_shm, &reg_shm_list, next)
357 		if (mobj_reg_shm->cookie == cookie)
358 			return mobj_reg_shm;
359 
360 	return NULL;
361 }
362 
mobj_reg_shm_get_by_cookie(uint64_t cookie)363 struct mobj *mobj_reg_shm_get_by_cookie(uint64_t cookie)
364 {
365 	uint32_t exceptions = cpu_spin_lock_xsave(&reg_shm_slist_lock);
366 	struct mobj_reg_shm *r = reg_shm_find_unlocked(cookie);
367 
368 	cpu_spin_unlock_xrestore(&reg_shm_slist_lock, exceptions);
369 	if (!r)
370 		return NULL;
371 
372 	return mobj_get(&r->mobj);
373 }
374 
mobj_reg_shm_release_by_cookie(uint64_t cookie)375 TEE_Result mobj_reg_shm_release_by_cookie(uint64_t cookie)
376 {
377 	uint32_t exceptions = 0;
378 	struct mobj_reg_shm *r = NULL;
379 
380 	/*
381 	 * Try to find r and see can be released by this function, if so
382 	 * call mobj_put(). Otherwise this function is called either by
383 	 * wrong cookie and perhaps a second time, regardless return
384 	 * TEE_ERROR_BAD_PARAMETERS.
385 	 */
386 	exceptions = cpu_spin_lock_xsave(&reg_shm_slist_lock);
387 	r = reg_shm_find_unlocked(cookie);
388 	if (!r || r->guarded || r->releasing)
389 		r = NULL;
390 	else
391 		r->releasing = true;
392 
393 	cpu_spin_unlock_xrestore(&reg_shm_slist_lock, exceptions);
394 
395 	if (!r)
396 		return TEE_ERROR_BAD_PARAMETERS;
397 
398 	mobj_put(&r->mobj);
399 
400 	/*
401 	 * We've established that this function can release the cookie.
402 	 * Now we wait until mobj_reg_shm_free() is called by the last
403 	 * mobj_put() needed to free this mobj. Note that the call to
404 	 * mobj_put() above could very well be that call.
405 	 *
406 	 * Once mobj_reg_shm_free() is called it will set r->release_frees
407 	 * to true and we can free the mobj here.
408 	 */
409 	mutex_lock(&shm_mu);
410 	shm_release_waiters++;
411 	assert(shm_release_waiters);
412 
413 	while (true) {
414 		exceptions = cpu_spin_lock_xsave(&reg_shm_slist_lock);
415 		if (r->release_frees) {
416 			reg_shm_free_helper(r);
417 			r = NULL;
418 		}
419 		cpu_spin_unlock_xrestore(&reg_shm_slist_lock, exceptions);
420 
421 		if (!r)
422 			break;
423 		condvar_wait(&shm_cv, &shm_mu);
424 	}
425 
426 	assert(shm_release_waiters);
427 	shm_release_waiters--;
428 	mutex_unlock(&shm_mu);
429 
430 	return TEE_SUCCESS;
431 }
432 
mobj_mapped_shm_alloc(paddr_t * pages,size_t num_pages,paddr_t page_offset,uint64_t cookie)433 struct mobj *mobj_mapped_shm_alloc(paddr_t *pages, size_t num_pages,
434 				  paddr_t page_offset, uint64_t cookie)
435 {
436 	struct mobj *mobj = mobj_reg_shm_alloc(pages, num_pages,
437 					       page_offset, cookie);
438 
439 	if (!mobj)
440 		return NULL;
441 
442 	if (mobj_inc_map(mobj)) {
443 		mobj_put(mobj);
444 		return NULL;
445 	}
446 
447 	return mobj;
448 }
449 
mobj_mapped_shm_init(void)450 static TEE_Result mobj_mapped_shm_init(void)
451 {
452 	vaddr_t pool_start = 0;
453 	vaddr_t pool_end = 0;
454 
455 	core_mmu_get_mem_by_type(MEM_AREA_SHM_VASPACE, &pool_start, &pool_end);
456 	if (!pool_start || !pool_end)
457 		panic("Can't find region for shmem pool");
458 
459 	if (!tee_mm_init(&tee_mm_shm, pool_start, pool_end - pool_start,
460 			 SMALL_PAGE_SHIFT, TEE_MM_POOL_NO_FLAGS))
461 		panic("Could not create shmem pool");
462 
463 	DMSG("Shared memory address range: %" PRIxVA ", %" PRIxVA,
464 	     pool_start, pool_end);
465 	return TEE_SUCCESS;
466 }
467 
468 preinit(mobj_mapped_shm_init);
469