1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3 * Copyright (c) 2021, Arm Limited. All rights reserved.
4 */
5 #include <assert.h>
6 #include <bitstring.h>
7 #include <ffa.h>
8 #include <kernel/spinlock.h>
9 #include <mm/mobj.h>
10 #include <mm/sp_mem.h>
11
12 #define NUM_SHARES 64
13
14 static bitstr_t bit_decl(share_bits, NUM_SHARES);
15 static unsigned int sp_mem_lock = SPINLOCK_UNLOCK;
16
17 /* mem_shares stores all active FF-A shares. */
18 SLIST_HEAD(sp_mem_head, sp_mem);
19 static struct sp_mem_head mem_shares = SLIST_HEAD_INITIALIZER(sp_mem_head);
20 /* Weak instance of mobj_sp_ops mandates it is not static */
21 const struct mobj_ops mobj_sp_ops;
22
23 struct mobj_sp {
24 struct mobj mobj;
25 paddr_t pages[];
26 };
27
to_mobj_sp(struct mobj * mobj)28 static struct mobj_sp *to_mobj_sp(struct mobj *mobj)
29 {
30 assert(mobj->ops == &mobj_sp_ops);
31 return container_of(mobj, struct mobj_sp, mobj);
32 }
33
mobj_sp_size(size_t num_pages)34 static size_t mobj_sp_size(size_t num_pages)
35 {
36 size_t s = 0;
37
38 if (MUL_OVERFLOW(sizeof(paddr_t), num_pages, &s))
39 return 0;
40 if (ADD_OVERFLOW(sizeof(struct mobj_sp), s, &s))
41 return 0;
42 return s;
43 }
44
sp_mem_new_mobj(uint64_t pages)45 struct mobj *sp_mem_new_mobj(uint64_t pages)
46 {
47 struct mobj_sp *m = NULL;
48 size_t s = 0;
49
50 s = mobj_sp_size(pages);
51 if (!s)
52 return NULL;
53
54 m = calloc(1, s);
55 if (!m)
56 return NULL;
57
58 m->mobj.ops = &mobj_sp_ops;
59 m->mobj.size = pages * SMALL_PAGE_SIZE;
60 m->mobj.phys_granule = SMALL_PAGE_SIZE;
61
62 refcount_set(&m->mobj.refc, 1);
63 return &m->mobj;
64 }
65
get_page_count(struct mobj_sp * ms)66 static size_t get_page_count(struct mobj_sp *ms)
67 {
68 return ROUNDUP(ms->mobj.size, SMALL_PAGE_SIZE) / SMALL_PAGE_SIZE;
69 }
70
71 /* Add some physical pages to the mobj object. */
sp_mem_add_pages(struct mobj * mobj,unsigned int * idx,paddr_t pa,unsigned int num_pages)72 int sp_mem_add_pages(struct mobj *mobj, unsigned int *idx,
73 paddr_t pa, unsigned int num_pages)
74 {
75 struct mobj_sp *ms = to_mobj_sp(mobj);
76 unsigned int n = 0;
77 size_t tot_page_count = get_page_count(ms);
78
79 if (ADD_OVERFLOW(*idx, num_pages, &n) || n > tot_page_count)
80 return TEE_ERROR_BAD_PARAMETERS;
81
82 if (!core_pbuf_is(CORE_MEM_NON_SEC, pa, num_pages * SMALL_PAGE_SIZE))
83 return TEE_ERROR_BAD_PARAMETERS;
84
85 for (n = 0; n < num_pages; n++)
86 ms->pages[n + *idx] = pa + n * SMALL_PAGE_SIZE;
87
88 *idx += n;
89 return TEE_SUCCESS;
90 }
91
sp_mem_get_cattr(struct mobj * mobj __unused,uint32_t * cattr)92 static TEE_Result sp_mem_get_cattr(struct mobj *mobj __unused, uint32_t *cattr)
93 {
94 *cattr = TEE_MATTR_CACHE_CACHED;
95
96 return TEE_SUCCESS;
97 }
98
mobj_sp_matches(struct mobj * mobj __maybe_unused,enum buf_is_attr attr)99 static bool mobj_sp_matches(struct mobj *mobj __maybe_unused,
100 enum buf_is_attr attr)
101 {
102 assert(mobj->ops == &mobj_sp_ops);
103
104 return attr == CORE_MEM_NON_SEC || attr == CORE_MEM_REG_SHM;
105 }
106
get_pa(struct mobj * mobj,size_t offset,size_t granule,paddr_t * pa)107 static TEE_Result get_pa(struct mobj *mobj, size_t offset,
108 size_t granule, paddr_t *pa)
109 {
110 struct mobj_sp *ms = to_mobj_sp(mobj);
111 paddr_t p = 0;
112
113 if (!pa)
114 return TEE_ERROR_GENERIC;
115
116 if (offset >= mobj->size)
117 return TEE_ERROR_GENERIC;
118
119 switch (granule) {
120 case 0:
121 p = ms->pages[offset / SMALL_PAGE_SIZE] +
122 (offset & SMALL_PAGE_MASK);
123 break;
124 case SMALL_PAGE_SIZE:
125 p = ms->pages[offset / SMALL_PAGE_SIZE];
126 break;
127 default:
128 return TEE_ERROR_GENERIC;
129 }
130 *pa = p;
131
132 return TEE_SUCCESS;
133 }
134 DECLARE_KEEP_PAGER(get_pa);
135
get_phys_offs(struct mobj * mobj __maybe_unused,size_t granule __maybe_unused)136 static size_t get_phys_offs(struct mobj *mobj __maybe_unused,
137 size_t granule __maybe_unused)
138 {
139 return 0;
140 }
141
inactivate(struct mobj * mobj)142 static void inactivate(struct mobj *mobj)
143 {
144 struct mobj_sp *ms = to_mobj_sp(mobj);
145 uint32_t exceptions = 0;
146
147 exceptions = cpu_spin_lock_xsave(&sp_mem_lock);
148 /*
149 * If refcount isn't 0 some other thread has found this mobj in
150 * shm_head after the mobj_put() that put us here and before we got
151 * the lock.
152 */
153 if (!refcount_val(&mobj->refc))
154 free(ms);
155
156 cpu_spin_unlock_xrestore(&sp_mem_lock, exceptions);
157 }
158
159 const struct mobj_ops mobj_sp_ops __weak __rodata_unpaged("mobj_sp_ops") = {
160 .get_pa = get_pa,
161 .get_phys_offs = get_phys_offs,
162 .get_cattr = sp_mem_get_cattr,
163 .matches = mobj_sp_matches,
164 .free = inactivate,
165 };
166
sp_mem_get_receiver(uint32_t s_id,struct sp_mem * smem)167 struct sp_mem_receiver *sp_mem_get_receiver(uint32_t s_id, struct sp_mem *smem)
168 {
169 struct sp_mem_receiver *r = NULL;
170
171 SLIST_FOREACH(r, &smem->receivers, link) {
172 if (r->perm.endpoint_id == s_id)
173 return r;
174 }
175 return NULL;
176 }
177
sp_mem_get(uint64_t handle)178 struct sp_mem *sp_mem_get(uint64_t handle)
179 {
180 struct sp_mem *smem = NULL;
181 uint32_t exceptions = cpu_spin_lock_xsave(&sp_mem_lock);
182
183 SLIST_FOREACH(smem, &mem_shares, link) {
184 if (smem->global_handle == handle)
185 break;
186 }
187
188 cpu_spin_unlock_xrestore(&sp_mem_lock, exceptions);
189 return smem;
190 }
191
sp_mem_get_va(const struct user_mode_ctx * uctx,size_t offset,struct mobj * mobj)192 void *sp_mem_get_va(const struct user_mode_ctx *uctx, size_t offset,
193 struct mobj *mobj)
194 {
195 struct vm_region *region = NULL;
196
197 TAILQ_FOREACH(region, &uctx->vm_info.regions, link) {
198 if (region->mobj == mobj && region->offset == offset)
199 return (void *)region->va;
200 }
201 return NULL;
202 }
203
sp_mem_new(void)204 struct sp_mem *sp_mem_new(void)
205 {
206 struct sp_mem *smem = NULL;
207 uint32_t exceptions = 0;
208 int i = 0;
209
210 smem = calloc(sizeof(*smem), 1);
211 if (!smem)
212 return NULL;
213
214 exceptions = cpu_spin_lock_xsave(&sp_mem_lock);
215
216 bit_ffc(share_bits, NUM_SHARES, &i);
217 if (i == -1) {
218 cpu_spin_unlock_xrestore(&sp_mem_lock, exceptions);
219 free(smem);
220 return NULL;
221 }
222
223 bit_set(share_bits, i);
224 /*
225 * OP-TEE SHAREs use bit 44 use bit 45 instead.
226 */
227 smem->global_handle = i | FFA_MEMORY_HANDLE_SECURE_BIT;
228 SLIST_INIT(&smem->regions);
229 SLIST_INIT(&smem->receivers);
230
231 cpu_spin_unlock_xrestore(&sp_mem_lock, exceptions);
232
233 return smem;
234 }
235
sp_mem_add(struct sp_mem * smem)236 void sp_mem_add(struct sp_mem *smem)
237 {
238 uint32_t exceptions = cpu_spin_lock_xsave(&sp_mem_lock);
239
240 SLIST_INSERT_HEAD(&mem_shares, smem, link);
241
242 cpu_spin_unlock_xrestore(&sp_mem_lock, exceptions);
243 }
244
sp_mem_is_shared(struct sp_mem_map_region * new_reg)245 bool sp_mem_is_shared(struct sp_mem_map_region *new_reg)
246 {
247 struct sp_mem *smem = NULL;
248 uint32_t exceptions = cpu_spin_lock_xsave(&sp_mem_lock);
249 uint64_t new_reg_end = new_reg->page_offset +
250 (new_reg->page_count * SMALL_PAGE_SIZE);
251
252 SLIST_FOREACH(smem, &mem_shares, link) {
253 struct sp_mem_map_region *reg = NULL;
254
255 SLIST_FOREACH(reg, &smem->regions, link) {
256 if (new_reg->mobj == reg->mobj) {
257 uint64_t reg_end = 0;
258
259 reg_end = reg->page_offset +
260 (reg->page_count * SMALL_PAGE_SIZE);
261
262 if (new_reg->page_offset < reg_end &&
263 new_reg_end > reg->page_offset) {
264 cpu_spin_unlock_xrestore(&sp_mem_lock,
265 exceptions);
266 return true;
267 }
268 }
269 }
270 }
271
272 cpu_spin_unlock_xrestore(&sp_mem_lock, exceptions);
273 return false;
274 }
275
sp_mem_remove(struct sp_mem * smem)276 void sp_mem_remove(struct sp_mem *smem)
277 {
278 uint32_t exceptions = 0;
279 int i = 0;
280 struct sp_mem *tsmem = NULL;
281
282 if (!smem)
283 return;
284
285 /* Remove all receivers */
286 while (!SLIST_EMPTY(&smem->receivers)) {
287 struct sp_mem_receiver *receiver = NULL;
288
289 receiver = SLIST_FIRST(&smem->receivers);
290 SLIST_REMOVE_HEAD(&smem->receivers, link);
291 free(receiver);
292 }
293 /* Remove all regions */
294 while (!SLIST_EMPTY(&smem->regions)) {
295 struct sp_mem_map_region *region = SLIST_FIRST(&smem->regions);
296
297 mobj_put(region->mobj);
298
299 SLIST_REMOVE_HEAD(&smem->regions, link);
300 free(region);
301 }
302
303 exceptions = cpu_spin_lock_xsave(&sp_mem_lock);
304
305 i = smem->global_handle & ~FFA_MEMORY_HANDLE_SECURE_BIT;
306 assert(i < NUM_SHARES);
307
308 bit_clear(share_bits, i);
309
310 SLIST_FOREACH(tsmem, &mem_shares, link) {
311 if (tsmem == smem) {
312 SLIST_REMOVE(&mem_shares, smem, sp_mem, link);
313 break;
314 }
315 }
316
317 cpu_spin_unlock_xrestore(&sp_mem_lock, exceptions);
318
319 free(smem);
320 }
321