1 /* SPDX-License-Identifier: BSD-2-Clause */
2 /*
3  * Copyright (c) 2016-2017, Linaro Limited
4  */
5 
6 #ifndef __MM_MOBJ_H
7 #define __MM_MOBJ_H
8 
9 #include <compiler.h>
10 #include <mm/core_memprot.h>
11 #include <mm/file.h>
12 #include <mm/fobj.h>
13 #include <string_ext.h>
14 #include <sys/queue.h>
15 #include <tee_api_types.h>
16 #include <types_ext.h>
17 
18 #include <optee_msg.h>
19 
20 struct mobj {
21 	const struct mobj_ops *ops;
22 	size_t size;
23 	size_t phys_granule;
24 	struct refcount refc;
25 };
26 
27 struct mobj_ops {
28 	void *(*get_va)(struct mobj *mobj, size_t offs, size_t len);
29 	TEE_Result (*get_pa)(struct mobj *mobj, size_t offs, size_t granule,
30 			     paddr_t *pa);
31 	size_t (*get_phys_offs)(struct mobj *mobj, size_t granule);
32 	TEE_Result (*get_cattr)(struct mobj *mobj, uint32_t *cattr);
33 	bool (*matches)(struct mobj *mobj, enum buf_is_attr attr);
34 	void (*free)(struct mobj *mobj);
35 	uint64_t (*get_cookie)(struct mobj *mobj);
36 	struct fobj *(*get_fobj)(struct mobj *mobj);
37 	TEE_Result (*inc_map)(struct mobj *mobj);
38 	TEE_Result (*dec_map)(struct mobj *mobj);
39 };
40 
41 extern struct mobj mobj_virt;
42 extern struct mobj *mobj_sec_ddr;
43 extern struct mobj *mobj_tee_ram_rx;
44 extern struct mobj *mobj_tee_ram_rw;
45 
46 /*
47  * mobj_get_va() - get virtual address of a mapped mobj
48  * @mobj:	memory object
49  * @offset:	find the va of this offset into @mobj
50  * @len:	how many bytes after @offset that must be valid, can be 1 if
51  *		the caller knows by other means that the expected buffer is
52  *		available.
53  *
54  * return a virtual address on success or NULL on error
55  */
mobj_get_va(struct mobj * mobj,size_t offset,size_t len)56 static inline void *mobj_get_va(struct mobj *mobj, size_t offset, size_t len)
57 {
58 	if (mobj && mobj->ops && mobj->ops->get_va)
59 		return mobj->ops->get_va(mobj, offset, len);
60 	return NULL;
61 }
62 
mobj_get_pa(struct mobj * mobj,size_t offs,size_t granule,paddr_t * pa)63 static inline TEE_Result mobj_get_pa(struct mobj *mobj, size_t offs,
64 				     size_t granule, paddr_t *pa)
65 {
66 	if (mobj && mobj->ops && mobj->ops->get_pa)
67 		return mobj->ops->get_pa(mobj, offs, granule, pa);
68 	return TEE_ERROR_GENERIC;
69 }
70 
mobj_get_phys_offs(struct mobj * mobj,size_t granule)71 static inline size_t mobj_get_phys_offs(struct mobj *mobj, size_t granule)
72 {
73 	if (mobj && mobj->ops && mobj->ops->get_phys_offs)
74 		return mobj->ops->get_phys_offs(mobj, granule);
75 	return 0;
76 }
77 
mobj_get_cattr(struct mobj * mobj,uint32_t * cattr)78 static inline TEE_Result mobj_get_cattr(struct mobj *mobj, uint32_t *cattr)
79 {
80 	if (mobj && mobj->ops && mobj->ops->get_cattr)
81 		return mobj->ops->get_cattr(mobj, cattr);
82 	return TEE_ERROR_GENERIC;
83 }
84 
mobj_matches(struct mobj * mobj,enum buf_is_attr attr)85 static inline bool mobj_matches(struct mobj *mobj, enum buf_is_attr attr)
86 {
87 	if (mobj && mobj->ops && mobj->ops->matches)
88 		return mobj->ops->matches(mobj, attr);
89 	return false;
90 }
91 
92 /**
93  * mobj_inc_map() - increase map count
94  * @mobj:	pointer to a MOBJ
95  *
96  * Maps the MOBJ if it isn't mapped already and increases the map count
97  * Each call to mobj_inc_map() is supposed to be matches by a call to
98  * mobj_dec_map().
99  *
100  * Returns TEE_SUCCESS on success or an error code on failure
101  */
mobj_inc_map(struct mobj * mobj)102 static inline TEE_Result mobj_inc_map(struct mobj *mobj)
103 {
104 	if (mobj && mobj->ops) {
105 		if (mobj->ops->inc_map)
106 			return mobj->ops->inc_map(mobj);
107 		return TEE_SUCCESS;
108 	}
109 	return TEE_ERROR_GENERIC;
110 }
111 
112 /**
113  * mobj_dec_map() - decrease map count
114  * @mobj:	pointer to a MOBJ
115  *
116  * Decreases the map count and also unmaps the MOBJ if the map count
117  * reaches 0.  Each call to mobj_inc_map() is supposed to be matched by a
118  * call to mobj_dec_map().
119  *
120  * Returns TEE_SUCCESS on success or an error code on failure
121  */
mobj_dec_map(struct mobj * mobj)122 static inline TEE_Result mobj_dec_map(struct mobj *mobj)
123 {
124 	if (mobj && mobj->ops) {
125 		if (mobj->ops->dec_map)
126 			return mobj->ops->dec_map(mobj);
127 		return TEE_SUCCESS;
128 	}
129 	return TEE_ERROR_GENERIC;
130 }
131 
132 /**
133  * mobj_get() - get a MOBJ
134  * @mobj:	Pointer to a MOBJ or NULL
135  *
136  * Increases reference counter of the @mobj
137  *
138  * Returns @mobj with reference counter increased or NULL if @mobj was NULL
139  */
mobj_get(struct mobj * mobj)140 static inline struct mobj *mobj_get(struct mobj *mobj)
141 {
142 	if (mobj && !refcount_inc(&mobj->refc))
143 		panic();
144 
145 	return mobj;
146 }
147 
148 /**
149  * mobj_put() - put a MOBJ
150  * @mobj:	Pointer to a MOBJ or NULL
151  *
152  * Decreases reference counter of the @mobj and frees it if the counter
153  * reaches 0.
154  */
mobj_put(struct mobj * mobj)155 static inline void mobj_put(struct mobj *mobj)
156 {
157 	if (mobj && refcount_dec(&mobj->refc))
158 		mobj->ops->free(mobj);
159 }
160 
161 /**
162  * mobj_put_wipe() - wipe and put a MOBJ
163  * @mobj:	Pointer to a MOBJ or NULL
164  *
165  * Clears the memory represented by the mobj and then puts it.
166  */
mobj_put_wipe(struct mobj * mobj)167 static inline void mobj_put_wipe(struct mobj *mobj)
168 {
169 	if (mobj) {
170 		void *buf = mobj_get_va(mobj, 0, mobj->size);
171 
172 		if (buf)
173 			memzero_explicit(buf, mobj->size);
174 		mobj_put(mobj);
175 	}
176 }
177 
mobj_get_cookie(struct mobj * mobj)178 static inline uint64_t mobj_get_cookie(struct mobj *mobj)
179 {
180 	if (mobj && mobj->ops && mobj->ops->get_cookie)
181 		return mobj->ops->get_cookie(mobj);
182 
183 #if defined(CFG_CORE_SEL1_SPMC) || defined(CFG_CORE_SEL2_SPMC)
184 	return OPTEE_MSG_FMEM_INVALID_GLOBAL_ID;
185 #else
186 	return 0;
187 #endif
188 }
189 
mobj_get_fobj(struct mobj * mobj)190 static inline struct fobj *mobj_get_fobj(struct mobj *mobj)
191 {
192 	if (mobj && mobj->ops && mobj->ops->get_fobj)
193 		return mobj->ops->get_fobj(mobj);
194 
195 	return NULL;
196 }
197 
mobj_is_nonsec(struct mobj * mobj)198 static inline bool mobj_is_nonsec(struct mobj *mobj)
199 {
200 	return mobj_matches(mobj, CORE_MEM_NON_SEC);
201 }
202 
mobj_is_secure(struct mobj * mobj)203 static inline bool mobj_is_secure(struct mobj *mobj)
204 {
205 	return mobj_matches(mobj, CORE_MEM_SEC);
206 }
207 
mobj_is_sdp_mem(struct mobj * mobj)208 static inline bool mobj_is_sdp_mem(struct mobj *mobj)
209 {
210 	return mobj_matches(mobj, CORE_MEM_SDP_MEM);
211 }
212 
mobj_get_phys_granule(struct mobj * mobj)213 static inline size_t mobj_get_phys_granule(struct mobj *mobj)
214 {
215 	if (mobj->phys_granule)
216 		return mobj->phys_granule;
217 	return mobj->size;
218 }
219 
mobj_check_offset_and_len(struct mobj * mobj,size_t offset,size_t len)220 static inline bool mobj_check_offset_and_len(struct mobj *mobj, size_t offset,
221 					     size_t len)
222 {
223 	size_t end_offs = 0;
224 
225 	return len && !ADD_OVERFLOW(offset, len - 1, &end_offs) &&
226 	       end_offs < mobj->size;
227 }
228 
229 struct mobj *mobj_mm_alloc(struct mobj *mobj_parent, size_t size,
230 			   tee_mm_pool_t *pool);
231 
232 struct mobj *mobj_phys_alloc(paddr_t pa, size_t size, uint32_t cattr,
233 			     enum buf_is_attr battr);
234 
235 #if defined(CFG_CORE_FFA)
236 struct mobj *mobj_ffa_get_by_cookie(uint64_t cookie,
237 				    unsigned int internal_offs);
238 
239 TEE_Result mobj_ffa_unregister_by_cookie(uint64_t cookie);
240 
241 /* Functions for SPMC */
242 #ifdef CFG_CORE_SEL1_SPMC
243 struct mobj_ffa *mobj_ffa_sel1_spmc_new(unsigned int num_pages);
244 void mobj_ffa_sel1_spmc_delete(struct mobj_ffa *mobj);
245 TEE_Result mobj_ffa_sel1_spmc_reclaim(uint64_t cookie);
246 #endif
247 #ifdef CFG_CORE_SEL2_SPMC
248 struct mobj_ffa *mobj_ffa_sel2_spmc_new(uint64_t cookie,
249 					unsigned int num_pages);
250 void mobj_ffa_sel2_spmc_delete(struct mobj_ffa *mobj);
251 #endif
252 
253 uint64_t mobj_ffa_get_cookie(struct mobj_ffa *mobj);
254 TEE_Result mobj_ffa_add_pages_at(struct mobj_ffa *mobj, unsigned int *idx,
255 				 paddr_t pa, unsigned int num_pages);
256 uint64_t mobj_ffa_push_to_inactive(struct mobj_ffa *mobj);
257 
258 #elif defined(CFG_CORE_DYN_SHM)
259 /* reg_shm represents TEE shared memory */
260 struct mobj *mobj_reg_shm_alloc(paddr_t *pages, size_t num_pages,
261 				paddr_t page_offset, uint64_t cookie);
262 
263 /**
264  * mobj_reg_shm_get_by_cookie() - get a MOBJ based on cookie
265  * @cookie:	Cookie used by normal world when suppling the shared memory
266  *
267  * Searches for a registered shared memory MOBJ and if one with a matching
268  * @cookie is found its reference counter is increased before returning
269  * the MOBJ.
270  *
271  * Returns a valid pointer on success or NULL on failure.
272  */
273 struct mobj *mobj_reg_shm_get_by_cookie(uint64_t cookie);
274 
275 TEE_Result mobj_reg_shm_release_by_cookie(uint64_t cookie);
276 
277 /**
278  * mobj_reg_shm_unguard() - unguards a reg_shm
279  * @mobj:	pointer to a registered shared memory mobj
280  *
281  * A registered shared memory mobj is normally guarded against being
282  * released with mobj_reg_shm_try_release_by_cookie(). After this function
283  * has returned the mobj can be released by a call to
284  * mobj_reg_shm_try_release_by_cookie() if the reference counter allows it.
285  */
286 void mobj_reg_shm_unguard(struct mobj *mobj);
287 
288 /*
289  * mapped_shm represents registered shared buffer
290  * which is mapped into OPTEE va space
291  */
292 struct mobj *mobj_mapped_shm_alloc(paddr_t *pages, size_t num_pages,
293 				   paddr_t page_offset, uint64_t cookie);
294 #endif /*CFG_CORE_DYN_SHM*/
295 
296 #if !defined(CFG_CORE_DYN_SHM)
mobj_mapped_shm_alloc(paddr_t * pages __unused,size_t num_pages __unused,paddr_t page_offset __unused,uint64_t cookie __unused)297 static inline struct mobj *mobj_mapped_shm_alloc(paddr_t *pages __unused,
298 						 size_t num_pages __unused,
299 						 paddr_t page_offset __unused,
300 						 uint64_t cookie __unused)
301 {
302 	return NULL;
303 }
304 #endif
305 
306 struct mobj *mobj_shm_alloc(paddr_t pa, size_t size, uint64_t cookie);
307 
308 #ifdef CFG_PAGED_USER_TA
309 bool mobj_is_paged(struct mobj *mobj);
310 #else
mobj_is_paged(struct mobj * mobj __unused)311 static inline bool mobj_is_paged(struct mobj *mobj __unused)
312 {
313 	return false;
314 }
315 #endif
316 
317 struct mobj *mobj_seccpy_shm_alloc(size_t size);
318 
319 struct mobj *mobj_with_fobj_alloc(struct fobj *fobj, struct file *file);
320 
321 #endif /*__MM_MOBJ_H*/
322