1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2014, STMicroelectronics International N.V.
4  */
5 
6 #include <kernel/panic.h>
7 #include <kernel/spinlock.h>
8 #include <kernel/tee_common.h>
9 #include <mm/tee_mm.h>
10 #include <mm/tee_pager.h>
11 #include <trace.h>
12 #include <util.h>
13 
pmalloc(tee_mm_pool_t * pool,size_t size)14 static void *pmalloc(tee_mm_pool_t *pool, size_t size)
15 {
16 	if (pool->flags & TEE_MM_POOL_NEX_MALLOC)
17 		return nex_malloc(size);
18 	else
19 		return malloc(size);
20 }
21 
pcalloc(tee_mm_pool_t * pool,size_t num_el,size_t size)22 static void *pcalloc(tee_mm_pool_t *pool, size_t num_el, size_t size)
23 {
24 	if (pool->flags & TEE_MM_POOL_NEX_MALLOC)
25 		return nex_calloc(num_el, size);
26 	else
27 		return calloc(num_el, size);
28 }
29 
pfree(tee_mm_pool_t * pool,void * ptr)30 static void pfree(tee_mm_pool_t *pool, void *ptr)
31 {
32 	if (pool->flags & TEE_MM_POOL_NEX_MALLOC)
33 		nex_free(ptr);
34 	else
35 		free(ptr);
36 }
37 
tee_mm_init(tee_mm_pool_t * pool,paddr_t lo,paddr_size_t size,uint8_t shift,uint32_t flags)38 bool tee_mm_init(tee_mm_pool_t *pool, paddr_t lo, paddr_size_t size,
39 		 uint8_t shift, uint32_t flags)
40 {
41 	paddr_size_t rounded = 0;
42 	paddr_t initial_lo = lo;
43 
44 	if (pool == NULL)
45 		return false;
46 
47 	lo = ROUNDUP(lo, 1 << shift);
48 	rounded = lo - initial_lo;
49 	size = ROUNDDOWN(size - rounded, 1 << shift);
50 
51 	assert(((uint64_t)size >> shift) < (uint64_t)UINT32_MAX);
52 
53 	pool->lo = lo;
54 	pool->size = size;
55 	pool->shift = shift;
56 	pool->flags = flags;
57 	pool->entry = pcalloc(pool, 1, sizeof(tee_mm_entry_t));
58 
59 	if (pool->entry == NULL)
60 		return false;
61 
62 	if (pool->flags & TEE_MM_POOL_HI_ALLOC)
63 		pool->entry->offset = ((size - 1) >> shift) + 1;
64 
65 	pool->entry->pool = pool;
66 	pool->lock = SPINLOCK_UNLOCK;
67 
68 	return true;
69 }
70 
tee_mm_final(tee_mm_pool_t * pool)71 void tee_mm_final(tee_mm_pool_t *pool)
72 {
73 	if (pool == NULL || pool->entry == NULL)
74 		return;
75 
76 	while (pool->entry->next != NULL)
77 		tee_mm_free(pool->entry->next);
78 	pfree(pool, pool->entry);
79 	pool->entry = NULL;
80 }
81 
tee_mm_add(tee_mm_entry_t * p,tee_mm_entry_t * nn)82 static void tee_mm_add(tee_mm_entry_t *p, tee_mm_entry_t *nn)
83 {
84 	/* add to list */
85 	nn->next = p->next;
86 	p->next = nn;
87 }
88 
89 #ifdef CFG_WITH_STATS
tee_mm_stats_allocated(tee_mm_pool_t * pool)90 static size_t tee_mm_stats_allocated(tee_mm_pool_t *pool)
91 {
92 	tee_mm_entry_t *entry;
93 	uint32_t sz = 0;
94 
95 	if (!pool)
96 		return 0;
97 
98 	entry = pool->entry;
99 	while (entry) {
100 		sz += entry->size;
101 		entry = entry->next;
102 	}
103 
104 	return sz << pool->shift;
105 }
106 
tee_mm_get_pool_stats(tee_mm_pool_t * pool,struct malloc_stats * stats,bool reset)107 void tee_mm_get_pool_stats(tee_mm_pool_t *pool, struct malloc_stats *stats,
108 			   bool reset)
109 {
110 	uint32_t exceptions;
111 
112 	if (!pool)
113 		return;
114 
115 	memset(stats, 0, sizeof(*stats));
116 
117 	exceptions = cpu_spin_lock_xsave(&pool->lock);
118 
119 	stats->size = pool->size;
120 	stats->max_allocated = pool->max_allocated;
121 	stats->allocated = tee_mm_stats_allocated(pool);
122 
123 	if (reset)
124 		pool->max_allocated = 0;
125 	cpu_spin_unlock_xrestore(&pool->lock, exceptions);
126 }
127 
update_max_allocated(tee_mm_pool_t * pool)128 static void update_max_allocated(tee_mm_pool_t *pool)
129 {
130 	size_t sz = tee_mm_stats_allocated(pool);
131 
132 	if (sz > pool->max_allocated)
133 		pool->max_allocated = sz;
134 }
135 #else /* CFG_WITH_STATS */
update_max_allocated(tee_mm_pool_t * pool __unused)136 static inline void update_max_allocated(tee_mm_pool_t *pool __unused)
137 {
138 }
139 #endif /* CFG_WITH_STATS */
140 
tee_mm_alloc(tee_mm_pool_t * pool,size_t size)141 tee_mm_entry_t *tee_mm_alloc(tee_mm_pool_t *pool, size_t size)
142 {
143 	size_t psize;
144 	tee_mm_entry_t *entry;
145 	tee_mm_entry_t *nn;
146 	size_t remaining;
147 	uint32_t exceptions;
148 
149 	/* Check that pool is initialized */
150 	if (!pool || !pool->entry)
151 		return NULL;
152 
153 	nn = pmalloc(pool, sizeof(tee_mm_entry_t));
154 	if (!nn)
155 		return NULL;
156 
157 	exceptions = cpu_spin_lock_xsave(&pool->lock);
158 
159 	entry = pool->entry;
160 	if (!size)
161 		psize = 0;
162 	else
163 		psize = ((size - 1) >> pool->shift) + 1;
164 
165 	/* find free slot */
166 	if (pool->flags & TEE_MM_POOL_HI_ALLOC) {
167 		while (entry->next != NULL && psize >
168 		       (entry->offset - entry->next->offset -
169 			entry->next->size))
170 			entry = entry->next;
171 	} else {
172 		while (entry->next != NULL && psize >
173 		       (entry->next->offset - entry->size - entry->offset))
174 			entry = entry->next;
175 	}
176 
177 	/* check if we have enough memory */
178 	if (entry->next == NULL) {
179 		if (pool->flags & TEE_MM_POOL_HI_ALLOC) {
180 			/*
181 			 * entry->offset is a "block count" offset from
182 			 * pool->lo. The byte offset is
183 			 * (entry->offset << pool->shift).
184 			 * In the HI_ALLOC allocation scheme the memory is
185 			 * allocated from the end of the segment, thus to
186 			 * validate there is sufficient memory validate that
187 			 * (entry->offset << pool->shift) > size.
188 			 */
189 			if ((entry->offset << pool->shift) < size) {
190 				/* out of memory */
191 				goto err;
192 			}
193 		} else {
194 			if (!pool->size)
195 				panic("invalid pool");
196 
197 			remaining = pool->size;
198 			remaining -= ((entry->offset + entry->size) <<
199 				      pool->shift);
200 
201 			if (remaining < size) {
202 				/* out of memory */
203 				goto err;
204 			}
205 		}
206 	}
207 
208 	tee_mm_add(entry, nn);
209 
210 	if (pool->flags & TEE_MM_POOL_HI_ALLOC)
211 		nn->offset = entry->offset - psize;
212 	else
213 		nn->offset = entry->offset + entry->size;
214 	nn->size = psize;
215 	nn->pool = pool;
216 
217 	update_max_allocated(pool);
218 
219 	cpu_spin_unlock_xrestore(&pool->lock, exceptions);
220 	return nn;
221 err:
222 	cpu_spin_unlock_xrestore(&pool->lock, exceptions);
223 	pfree(pool, nn);
224 	return NULL;
225 }
226 
fit_in_gap(tee_mm_pool_t * pool,tee_mm_entry_t * e,paddr_t offslo,paddr_t offshi)227 static inline bool fit_in_gap(tee_mm_pool_t *pool, tee_mm_entry_t *e,
228 			      paddr_t offslo, paddr_t offshi)
229 {
230 	if (pool->flags & TEE_MM_POOL_HI_ALLOC) {
231 		if (offshi > e->offset ||
232 		    (e->next != NULL &&
233 		     (offslo < e->next->offset + e->next->size)) ||
234 		    (offshi << pool->shift) - 1 > pool->size)
235 			/* memory not available */
236 			return false;
237 	} else {
238 		if (offslo < (e->offset + e->size) ||
239 		    (e->next != NULL && (offshi > e->next->offset)) ||
240 		    (offshi << pool->shift) > pool->size)
241 			/* memory not available */
242 			return false;
243 	}
244 
245 	return true;
246 }
247 
tee_mm_alloc2(tee_mm_pool_t * pool,paddr_t base,size_t size)248 tee_mm_entry_t *tee_mm_alloc2(tee_mm_pool_t *pool, paddr_t base, size_t size)
249 {
250 	tee_mm_entry_t *entry;
251 	paddr_t offslo;
252 	paddr_t offshi;
253 	tee_mm_entry_t *mm;
254 	uint32_t exceptions;
255 
256 	/* Check that pool is initialized */
257 	if (!pool || !pool->entry)
258 		return NULL;
259 
260 	/* Wrapping and sanity check */
261 	if ((base + size) < base || base < pool->lo)
262 		return NULL;
263 
264 	mm = pmalloc(pool, sizeof(tee_mm_entry_t));
265 	if (!mm)
266 		return NULL;
267 
268 	exceptions = cpu_spin_lock_xsave(&pool->lock);
269 
270 	entry = pool->entry;
271 	offslo = (base - pool->lo) >> pool->shift;
272 	offshi = ((base - pool->lo + size - 1) >> pool->shift) + 1;
273 
274 	/* find slot */
275 	if (pool->flags & TEE_MM_POOL_HI_ALLOC) {
276 		while (entry->next != NULL &&
277 		       offshi < entry->next->offset + entry->next->size)
278 			entry = entry->next;
279 	} else {
280 		while (entry->next != NULL && offslo > entry->next->offset)
281 			entry = entry->next;
282 	}
283 
284 	/* Check that memory is available */
285 	if (!fit_in_gap(pool, entry, offslo, offshi))
286 		goto err;
287 
288 	tee_mm_add(entry, mm);
289 
290 	mm->offset = offslo;
291 	mm->size = offshi - offslo;
292 	mm->pool = pool;
293 
294 	update_max_allocated(pool);
295 	cpu_spin_unlock_xrestore(&pool->lock, exceptions);
296 	return mm;
297 err:
298 	cpu_spin_unlock_xrestore(&pool->lock, exceptions);
299 	pfree(pool, mm);
300 	return NULL;
301 }
302 
tee_mm_free(tee_mm_entry_t * p)303 void tee_mm_free(tee_mm_entry_t *p)
304 {
305 	tee_mm_entry_t *entry;
306 	uint32_t exceptions;
307 
308 	if (!p || !p->pool)
309 		return;
310 
311 	exceptions = cpu_spin_lock_xsave(&p->pool->lock);
312 	entry = p->pool->entry;
313 
314 	/* remove entry from list */
315 	while (entry->next != NULL && entry->next != p)
316 		entry = entry->next;
317 
318 	if (!entry->next)
319 		panic("invalid mm_entry");
320 
321 	entry->next = entry->next->next;
322 	cpu_spin_unlock_xrestore(&p->pool->lock, exceptions);
323 
324 	pfree(p->pool, p);
325 }
326 
tee_mm_get_bytes(const tee_mm_entry_t * mm)327 size_t tee_mm_get_bytes(const tee_mm_entry_t *mm)
328 {
329 	if (!mm || !mm->pool)
330 		return 0;
331 	else
332 		return mm->size << mm->pool->shift;
333 }
334 
tee_mm_addr_is_within_range(const tee_mm_pool_t * pool,paddr_t addr)335 bool tee_mm_addr_is_within_range(const tee_mm_pool_t *pool, paddr_t addr)
336 {
337 	return pool && addr >= pool->lo &&
338 		addr <= (pool->lo + (pool->size - 1));
339 }
340 
tee_mm_is_empty(tee_mm_pool_t * pool)341 bool tee_mm_is_empty(tee_mm_pool_t *pool)
342 {
343 	bool ret;
344 	uint32_t exceptions;
345 
346 	if (pool == NULL || pool->entry == NULL)
347 		return true;
348 
349 	exceptions = cpu_spin_lock_xsave(&pool->lock);
350 	ret = pool->entry == NULL || pool->entry->next == NULL;
351 	cpu_spin_unlock_xrestore(&pool->lock, exceptions);
352 
353 	return ret;
354 }
355 
356 /* Physical Secure DDR pool */
357 tee_mm_pool_t tee_mm_sec_ddr;
358 
359 /* Virtual eSRAM pool */
360 tee_mm_pool_t tee_mm_vcore;
361 
362 /* Shared memory pool */
363 tee_mm_pool_t tee_mm_shm;
364 
tee_mm_find(const tee_mm_pool_t * pool,paddr_t addr)365 tee_mm_entry_t *tee_mm_find(const tee_mm_pool_t *pool, paddr_t addr)
366 {
367 	tee_mm_entry_t *entry = pool->entry;
368 	uint16_t offset = (addr - pool->lo) >> pool->shift;
369 	uint32_t exceptions;
370 
371 	if (!tee_mm_addr_is_within_range(pool, addr))
372 		return NULL;
373 
374 	exceptions = cpu_spin_lock_xsave(&((tee_mm_pool_t *)pool)->lock);
375 
376 	while (entry->next != NULL) {
377 		entry = entry->next;
378 
379 		if ((offset >= entry->offset) &&
380 		    (offset < (entry->offset + entry->size))) {
381 			cpu_spin_unlock_xrestore(&((tee_mm_pool_t *)pool)->lock,
382 						 exceptions);
383 			return entry;
384 		}
385 	}
386 
387 	cpu_spin_unlock_xrestore(&((tee_mm_pool_t *)pool)->lock, exceptions);
388 	return NULL;
389 }
390 
tee_mm_get_smem(const tee_mm_entry_t * mm)391 uintptr_t tee_mm_get_smem(const tee_mm_entry_t *mm)
392 {
393 	return (mm->offset << mm->pool->shift) + mm->pool->lo;
394 }
395