1 /* SPDX-License-Identifier: BSD-2-Clause */
2 /*
3 * Copyright (c) 2016, Linaro Limited
4 */
5 #ifndef MM_PGT_CACHE_H
6 #define MM_PGT_CACHE_H
7
8 #ifdef CFG_WITH_LPAE
9 #define PGT_SIZE (4 * 1024)
10 #define PGT_NUM_PGT_PER_PAGE 1
11 #else
12 #define PGT_SIZE (1 * 1024)
13 #define PGT_NUM_PGT_PER_PAGE 4
14 #endif
15
16 #include <assert.h>
17 #include <kernel/tee_ta_manager.h>
18 #include <sys/queue.h>
19 #include <types_ext.h>
20 #include <util.h>
21
22 struct ts_ctx;
23
24 struct pgt {
25 void *tbl;
26 vaddr_t vabase;
27 #if defined(CFG_PAGED_USER_TA)
28 struct ts_ctx *ctx;
29 size_t num_used_entries;
30 #endif
31 #if defined(CFG_WITH_PAGER)
32 #if !defined(CFG_WITH_LPAE)
33 struct pgt_parent *parent;
34 #endif
35 #endif
36 SLIST_ENTRY(pgt) link;
37 };
38
39 /*
40 * A proper value for PGT_CACHE_SIZE depends on many factors: CFG_WITH_LPAE,
41 * CFG_TA_ASLR, size of TA, size of memrefs passed to TA, CFG_ULIBS_SHARED and
42 * possibly others. The value is based on the number of threads as an indicator
43 * on how large the system might be.
44 */
45 #if CFG_NUM_THREADS < 2
46 #define PGT_CACHE_SIZE 4
47 #elif (CFG_NUM_THREADS == 2 && !defined(CFG_WITH_LPAE))
48 #define PGT_CACHE_SIZE 8
49 #else
50 #define PGT_CACHE_SIZE ROUNDUP(CFG_NUM_THREADS * 2, PGT_NUM_PGT_PER_PAGE)
51 #endif
52
53 SLIST_HEAD(pgt_cache, pgt);
54
pgt_check_avail(size_t num_tbls)55 static inline bool pgt_check_avail(size_t num_tbls)
56 {
57 return num_tbls <= PGT_CACHE_SIZE;
58 }
59
60 void pgt_alloc(struct pgt_cache *pgt_cache, struct ts_ctx *owning_ctx,
61 vaddr_t begin, vaddr_t last);
62 void pgt_free(struct pgt_cache *pgt_cache, bool save_ctx);
63
64 void pgt_clear_ctx_range(struct pgt_cache *pgt_cache, struct ts_ctx *ctx,
65 vaddr_t begin, vaddr_t end);
66 #ifdef CFG_PAGED_USER_TA
67 void pgt_flush_ctx_range(struct pgt_cache *pgt_cache, struct ts_ctx *ctx,
68 vaddr_t begin, vaddr_t last);
69 #else
pgt_flush_ctx_range(struct pgt_cache * pgt_cache __unused,struct ts_ctx * ctx __unused,vaddr_t begin __unused,vaddr_t last __unused)70 static inline void pgt_flush_ctx_range(struct pgt_cache *pgt_cache __unused,
71 struct ts_ctx *ctx __unused,
72 vaddr_t begin __unused,
73 vaddr_t last __unused)
74 {
75 }
76 #endif
77
78 void pgt_init(void);
79
80 #if defined(CFG_PAGED_USER_TA)
81 void pgt_flush_ctx(struct ts_ctx *ctx);
82
pgt_inc_used_entries(struct pgt * pgt)83 static inline void pgt_inc_used_entries(struct pgt *pgt)
84 {
85 pgt->num_used_entries++;
86 assert(pgt->num_used_entries);
87 }
88
pgt_dec_used_entries(struct pgt * pgt)89 static inline void pgt_dec_used_entries(struct pgt *pgt)
90 {
91 assert(pgt->num_used_entries);
92 pgt->num_used_entries--;
93 }
94
pgt_set_used_entries(struct pgt * pgt,size_t val)95 static inline void pgt_set_used_entries(struct pgt *pgt, size_t val)
96 {
97 pgt->num_used_entries = val;
98 }
99
100 #else
pgt_flush_ctx(struct ts_ctx * ctx __unused)101 static inline void pgt_flush_ctx(struct ts_ctx *ctx __unused)
102 {
103 }
104
pgt_inc_used_entries(struct pgt * pgt __unused)105 static inline void pgt_inc_used_entries(struct pgt *pgt __unused)
106 {
107 }
108
pgt_dec_used_entries(struct pgt * pgt __unused)109 static inline void pgt_dec_used_entries(struct pgt *pgt __unused)
110 {
111 }
112
pgt_set_used_entries(struct pgt * pgt __unused,size_t val __unused)113 static inline void pgt_set_used_entries(struct pgt *pgt __unused,
114 size_t val __unused)
115 {
116 }
117
118 #endif
119
120 #endif /*MM_PGT_CACHE_H*/
121