1 /* SPDX-License-Identifier: BSD-2-Clause */
2 /*
3  * Copyright (c) 2016-2021, Linaro Limited
4  * Copyright (c) 2014, STMicroelectronics International N.V.
5  */
6 
7 #ifndef MM_TEE_PAGER_H
8 #define MM_TEE_PAGER_H
9 
10 #include <kernel/abort.h>
11 #include <kernel/panic.h>
12 #include <kernel/user_ta.h>
13 #include <mm/core_mmu.h>
14 #include <mm/tee_mm.h>
15 #include <string.h>
16 #include <trace.h>
17 
18 /*
19  * tee_pager_early_init() - Perform early initialization of pager
20  *
21  * Panics if some error occurs
22  */
23 void tee_pager_early_init(void);
24 
25 /*
26  * tee_pager_get_table_info() - Fills in table info for address mapped in
27  * translation table managed by the pager.
28  * @va:		address to look up
29  * @ti:		filled in table info
30  *
31  * Returns true if address is in the pager translation tables else false
32  */
33 bool tee_pager_get_table_info(vaddr_t va, struct core_mmu_table_info *ti);
34 
35 /*
36  * tee_pager_phys_to_virt() - Translate physical address to virtual address
37  * looking in the pager page tables
38  * @pa:	address to translate
39  * @len: check for length is mapped linearly in CORE_MMU_PGDIR_SIZE range
40  *
41  * Returns found virtual address or NULL on error
42  */
43 void *tee_pager_phys_to_virt(paddr_t pa, size_t len);
44 
45 /*
46  * tee_pager_set_alias_area() - Initialize pager alias area
47  * @mm_alias:	The alias area where all physical pages managed by the
48  *		pager are aliased
49  *
50  * Panics if called twice or some other error occurs.
51  */
52 void tee_pager_set_alias_area(tee_mm_entry_t *mm_alias);
53 
54 /*
55  * tee_pager_init_iv_region() - Initialized pager region for tags IVs used by RW
56  *			        paged fobjs
57  * @fobj:	fobj backing the region
58  *
59  * Panics if called twice or some other error occurs.
60  *
61  * Returns virtual address of start of IV region.
62  */
63 vaddr_t tee_pager_init_iv_region(struct fobj *fobj);
64 
65 /*
66  * tee_pager_generate_authenc_key() - Generates authenc key for r/w paging
67  *
68  * Needs to draw random from RNG, panics if some error occurs.
69  */
70 #ifdef CFG_WITH_PAGER
71 void tee_pager_generate_authenc_key(void);
72 #else
tee_pager_generate_authenc_key(void)73 static inline void tee_pager_generate_authenc_key(void)
74 {
75 }
76 #endif
77 
78 /*
79  * tee_pager_add_core_region() - Adds a pageable core region
80  * @base:	base of covered memory region
81  * @type:	type of memory region
82  * @fobj:	fobj backing the region
83  *
84  * Non-page aligned base or size will cause a panic.
85  */
86 void tee_pager_add_core_region(vaddr_t base, enum vm_paged_region_type type,
87 			       struct fobj *fobj);
88 
89 /*
90  * tee_pager_add_um_region() - Adds a pageable user TA region
91  * @uctx:	user mode context of the region
92  * @base:	base of covered memory region
93  * @fobj:	fobj of the store backing the memory region
94  *
95  * The mapping is created suitable to initialize the memory content while
96  * loading the TA. Once the TA is properly loaded the regions should be
97  * finalized with tee_pager_set_um_region_attr() to get more strict settings.
98  *
99  * Return TEE_SUCCESS on success, anything else if the region can't be added
100  */
101 #ifdef CFG_PAGED_USER_TA
102 TEE_Result tee_pager_add_um_region(struct user_mode_ctx *uctx, vaddr_t base,
103 				   struct fobj *fobj, uint32_t prot);
104 #else
105 static inline TEE_Result
tee_pager_add_um_region(struct user_mode_ctx * uctx __unused,vaddr_t base __unused,struct fobj * fobj __unused,uint32_t prot __unused)106 tee_pager_add_um_region(struct user_mode_ctx *uctx __unused,
107 			vaddr_t base __unused, struct fobj *fobj __unused,
108 			uint32_t prot __unused)
109 {
110 	return TEE_ERROR_NOT_SUPPORTED;
111 }
112 #endif
113 
114 /*
115  * tee_pager_set_um_region_attr() - Set attributes of a initialized memory
116  *				    region
117  * @uctx:	user mode context of the region
118  * @base:	base of covered memory region
119  * @size:	size of covered memory region
120  * @flags:	TEE_MATTR_U* flags describing permissions of the region
121  *
122  * Return true on success of false if the region can't be updated
123  */
124 #ifdef CFG_PAGED_USER_TA
125 bool tee_pager_set_um_region_attr(struct user_mode_ctx *uctx, vaddr_t base,
126 				  size_t size, uint32_t flags);
127 #else
128 static inline bool
tee_pager_set_um_region_attr(struct user_mode_ctx * uctx __unused,vaddr_t base __unused,size_t size __unused,uint32_t flags __unused)129 tee_pager_set_um_region_attr(struct user_mode_ctx *uctx __unused,
130 			     vaddr_t base __unused, size_t size __unused,
131 			     uint32_t flags __unused)
132 {
133 	return false;
134 }
135 #endif
136 
137 #ifdef CFG_PAGED_USER_TA
138 void tee_pager_rem_um_region(struct user_mode_ctx *uctx, vaddr_t base,
139 			     size_t size);
140 #else
tee_pager_rem_um_region(struct user_mode_ctx * uctx __unused,vaddr_t base __unused,size_t size __unused)141 static inline void tee_pager_rem_um_region(struct user_mode_ctx *uctx __unused,
142 					   vaddr_t base __unused,
143 					   size_t size __unused)
144 {
145 }
146 #endif
147 
148 #ifdef CFG_PAGED_USER_TA
149 TEE_Result tee_pager_split_um_region(struct user_mode_ctx *uctx, vaddr_t va);
150 #else
151 static inline TEE_Result
tee_pager_split_um_region(struct user_mode_ctx * uctx __unused,vaddr_t va __unused)152 tee_pager_split_um_region(struct user_mode_ctx *uctx __unused,
153 			  vaddr_t va __unused)
154 {
155 	return TEE_ERROR_NOT_SUPPORTED;
156 }
157 
158 #endif
159 #ifdef CFG_PAGED_USER_TA
160 void tee_pager_merge_um_region(struct user_mode_ctx *uctx, vaddr_t va,
161 			       size_t len);
162 #else
163 static inline void
tee_pager_merge_um_region(struct user_mode_ctx * uctx __unused,vaddr_t va __unused,size_t len __unused)164 tee_pager_merge_um_region(struct user_mode_ctx *uctx __unused,
165 			  vaddr_t va __unused, size_t len __unused)
166 {
167 }
168 #endif
169 
170 /*
171  * tee_pager_rem_uma_regions() - Remove all user TA regions
172  * @uctx:	user mode context
173  *
174  * This function is called when a user mode context is teared down.
175  */
176 #ifdef CFG_PAGED_USER_TA
177 void tee_pager_rem_um_regions(struct user_mode_ctx *uctx);
178 #else
tee_pager_rem_um_regions(struct user_mode_ctx * uctx __unused)179 static inline void tee_pager_rem_um_regions(struct user_mode_ctx *uctx __unused)
180 {
181 }
182 #endif
183 
184 /*
185  * tee_pager_assign_um_tables() - Assigns translation table to a user ta
186  * @uctx:	user mode context
187  *
188  * This function is called to assign translation tables for the pageable
189  * regions of a user TA.
190  */
191 #ifdef CFG_PAGED_USER_TA
192 void tee_pager_assign_um_tables(struct user_mode_ctx *uctx);
193 #else
194 static inline void
tee_pager_assign_um_tables(struct user_mode_ctx * uctx __unused)195 tee_pager_assign_um_tables(struct user_mode_ctx *uctx __unused)
196 {
197 }
198 #endif
199 
200 /*
201  * Adds physical pages to the pager to use. The supplied virtual address range
202  * is searched for mapped physical pages and unmapped pages are ignored.
203  *
204  * vaddr is the first virtual address
205  * npages is the number of pages to add
206  */
207 void tee_pager_add_pages(vaddr_t vaddr, size_t npages, bool unmap);
208 
209 /*
210  * tee_pager_alloc() - Allocate read-write virtual memory from pager.
211  * @size:	size of memory in bytes
212  *
213  * @return NULL on failure or a pointer to the virtual memory on success.
214  */
215 void *tee_pager_alloc(size_t size);
216 
217 #ifdef CFG_PAGED_USER_TA
218 /*
219  * tee_pager_pgt_save_and_release_entries() - Save dirty pages to backing store
220  * and remove physical page from translation table
221  * @pgt: page table descriptor
222  *
223  * This function is called when a translation table needs to be recycled
224  */
225 void tee_pager_pgt_save_and_release_entries(struct pgt *pgt);
226 #endif
227 
228 /*
229  * tee_pager_release_phys() - Release physical pages used for mapping
230  * @addr:	virtual address of first page to release
231  * @size:	number of bytes to release
232  *
233  * Only pages completely covered by the supplied range are affected.  This
234  * function only supplies a hint to the pager that the physical page can be
235  * reused. The caller can't expect a released memory range to hold a
236  * specific bit pattern when used next time.
237  *
238  * Note that the virtual memory allocation is still valid after this
239  * function has returned, it's just the content that may or may not have
240  * changed.
241  */
242 #ifdef CFG_WITH_PAGER
243 void tee_pager_release_phys(void *addr, size_t size);
244 #else
tee_pager_release_phys(void * addr __unused,size_t size __unused)245 static inline void tee_pager_release_phys(void *addr __unused,
246 			size_t size __unused)
247 {
248 }
249 #endif
250 
251 /*
252  * Statistics on the pager
253  */
254 struct tee_pager_stats {
255 	size_t hidden_hits;
256 	size_t ro_hits;
257 	size_t rw_hits;
258 	size_t zi_released;
259 	size_t npages;		/* number of load pages */
260 	size_t npages_all;	/* number of pages */
261 };
262 
263 #ifdef CFG_WITH_PAGER
264 void tee_pager_get_stats(struct tee_pager_stats *stats);
265 bool tee_pager_handle_fault(struct abort_info *ai);
266 #else /*CFG_WITH_PAGER*/
tee_pager_handle_fault(struct abort_info * ai __unused)267 static inline bool tee_pager_handle_fault(struct abort_info *ai __unused)
268 {
269 	return false;
270 }
271 
tee_pager_get_stats(struct tee_pager_stats * stats)272 static inline void tee_pager_get_stats(struct tee_pager_stats *stats)
273 {
274 	memset(stats, 0, sizeof(struct tee_pager_stats));
275 }
276 #endif /*CFG_WITH_PAGER*/
277 
278 void tee_pager_invalidate_fobj(struct fobj *fobj);
279 
280 #endif /*MM_TEE_PAGER_H*/
281