1 #ifndef __ARM_LPAE_H__
2 #define __ARM_LPAE_H__
3 
4 #ifndef __ASSEMBLY__
5 
6 #include <xen/page-defs.h>
7 
8 /*
9  * WARNING!  Unlike the x86 pagetable code, where l1 is the lowest level and
10  * l4 is the root of the trie, the ARM pagetables follow ARM's documentation:
11  * the levels are called first, second &c in the order that the MMU walks them
12  * (i.e. "first" is the root of the trie).
13  */
14 
15 /******************************************************************************
16  * ARMv7-A LPAE pagetables: 3-level trie, mapping 40-bit input to
17  * 40-bit output addresses.  Tables at all levels have 512 64-bit entries
18  * (i.e. are 4Kb long).
19  *
20  * The bit-shuffling that has the permission bits in branch nodes in a
21  * different place from those in leaf nodes seems to be to allow linear
22  * pagetable tricks.  If we're not doing that then the set of permission
23  * bits that's not in use in a given node type can be used as
24  * extra software-defined bits.
25  */
26 
27 typedef struct __packed {
28     /* These are used in all kinds of entry. */
29     unsigned long valid:1;      /* Valid mapping */
30     unsigned long table:1;      /* == 1 in 4k map entries too */
31 
32     /*
33      * These ten bits are only used in Block entries and are ignored
34      * in Table entries.
35      */
36     unsigned long ai:3;         /* Attribute Index */
37     unsigned long ns:1;         /* Not-Secure */
38     unsigned long up:1;         /* Unpriviledged access */
39     unsigned long ro:1;         /* Read-Only */
40     unsigned long sh:2;         /* Shareability */
41     unsigned long af:1;         /* Access Flag */
42     unsigned long ng:1;         /* Not-Global */
43 
44     /* The base address must be appropriately aligned for Block entries */
45     unsigned long long base:36; /* Base address of block or next table */
46     unsigned long sbz:4;        /* Must be zero */
47 
48     /*
49      * These seven bits are only used in Block entries and are ignored
50      * in Table entries.
51      */
52     unsigned long contig:1;     /* In a block of 16 contiguous entries */
53     unsigned long pxn:1;        /* Privileged-XN */
54     unsigned long xn:1;         /* eXecute-Never */
55     unsigned long avail:4;      /* Ignored by hardware */
56 
57     /*
58      * These 5 bits are only used in Table entries and are ignored in
59      * Block entries.
60      */
61     unsigned long pxnt:1;       /* Privileged-XN */
62     unsigned long xnt:1;        /* eXecute-Never */
63     unsigned long apt:2;        /* Access Permissions */
64     unsigned long nst:1;        /* Not-Secure */
65 } lpae_pt_t;
66 
67 /*
68  * The p2m tables have almost the same layout, but some of the permission
69  * and cache-control bits are laid out differently (or missing).
70  */
71 typedef struct __packed {
72     /* These are used in all kinds of entry. */
73     unsigned long valid:1;      /* Valid mapping */
74     unsigned long table:1;      /* == 1 in 4k map entries too */
75 
76     /*
77      * These ten bits are only used in Block entries and are ignored
78      * in Table entries.
79      */
80     unsigned long mattr:4;      /* Memory Attributes */
81     unsigned long read:1;       /* Read access */
82     unsigned long write:1;      /* Write access */
83     unsigned long sh:2;         /* Shareability */
84     unsigned long af:1;         /* Access Flag */
85     unsigned long sbz4:1;
86 
87     /* The base address must be appropriately aligned for Block entries */
88     unsigned long long base:36; /* Base address of block or next table */
89     unsigned long sbz3:4;
90 
91     /*
92      * These seven bits are only used in Block entries and are ignored
93      * in Table entries.
94      */
95     unsigned long contig:1;     /* In a block of 16 contiguous entries */
96     unsigned long sbz2:1;
97     unsigned long xn:1;         /* eXecute-Never */
98     unsigned long type:4;       /* Ignore by hardware. Used to store p2m types */
99 
100     unsigned long sbz1:5;
101 } lpae_p2m_t;
102 
103 /* Permission mask: xn, write, read */
104 #define P2M_PERM_MASK (0x00400000000000C0ULL)
105 #define P2M_CLEAR_PERM(pte) ((pte).bits & ~P2M_PERM_MASK)
106 
107 /*
108  * Walk is the common bits of p2m and pt entries which are needed to
109  * simply walk the table (e.g. for debug).
110  */
111 typedef struct __packed {
112     /* These are used in all kinds of entry. */
113     unsigned long valid:1;      /* Valid mapping */
114     unsigned long table:1;      /* == 1 in 4k map entries too */
115 
116     unsigned long pad2:10;
117 
118     /* The base address must be appropriately aligned for Block entries */
119     unsigned long long base:36; /* Base address of block or next table */
120 
121     unsigned long pad1:16;
122 } lpae_walk_t;
123 
124 typedef union {
125     uint64_t bits;
126     lpae_pt_t pt;
127     lpae_p2m_t p2m;
128     lpae_walk_t walk;
129 } lpae_t;
130 
lpae_is_valid(lpae_t pte)131 static inline bool lpae_is_valid(lpae_t pte)
132 {
133     return pte.walk.valid;
134 }
135 
136 /*
137  * lpae_is_* don't check the valid bit. This gives an opportunity for the
138  * callers to operate on the entry even if they are not valid. For
139  * instance to store information in advance.
140  */
lpae_is_table(lpae_t pte,unsigned int level)141 static inline bool lpae_is_table(lpae_t pte, unsigned int level)
142 {
143     return (level < 3) && pte.walk.table;
144 }
145 
lpae_is_mapping(lpae_t pte,unsigned int level)146 static inline bool lpae_is_mapping(lpae_t pte, unsigned int level)
147 {
148     if ( level == 3 )
149         return pte.walk.table;
150     else
151         return !pte.walk.table;
152 }
153 
lpae_is_superpage(lpae_t pte,unsigned int level)154 static inline bool lpae_is_superpage(lpae_t pte, unsigned int level)
155 {
156     return (level < 3) && lpae_is_mapping(pte, level);
157 }
158 
159 #define lpae_get_mfn(pte)    (_mfn((pte).walk.base))
160 #define lpae_set_mfn(pte, mfn)  ((pte).walk.base = mfn_x(mfn))
161 
162 /*
163  * AArch64 supports pages with different sizes (4K, 16K, and 64K). To enable
164  * page table walks for various configurations, the following helpers enable
165  * walking the translation table with varying page size granularities.
166  */
167 
168 #define LPAE_SHIFT_4K           (9)
169 #define LPAE_SHIFT_16K          (11)
170 #define LPAE_SHIFT_64K          (13)
171 
172 #define lpae_entries(gran)      (_AC(1,U) << LPAE_SHIFT_##gran)
173 #define lpae_entry_mask(gran)   (lpae_entries(gran) - 1)
174 
175 #define third_shift(gran)       (PAGE_SHIFT_##gran)
176 #define third_size(gran)        ((paddr_t)1 << third_shift(gran))
177 
178 #define second_shift(gran)      (third_shift(gran) + LPAE_SHIFT_##gran)
179 #define second_size(gran)       ((paddr_t)1 << second_shift(gran))
180 
181 #define first_shift(gran)       (second_shift(gran) + LPAE_SHIFT_##gran)
182 #define first_size(gran)        ((paddr_t)1 << first_shift(gran))
183 
184 /* Note that there is no zeroeth lookup level with a 64K granule size. */
185 #define zeroeth_shift(gran)     (first_shift(gran) + LPAE_SHIFT_##gran)
186 #define zeroeth_size(gran)      ((paddr_t)1 << zeroeth_shift(gran))
187 
188 #define TABLE_OFFSET(offs, gran)      (offs & lpae_entry_mask(gran))
189 #define TABLE_OFFSET_HELPERS(gran)                                          \
190 static inline paddr_t third_table_offset_##gran##K(paddr_t va)              \
191 {                                                                           \
192     return TABLE_OFFSET((va >> third_shift(gran##K)), gran##K);             \
193 }                                                                           \
194                                                                             \
195 static inline paddr_t second_table_offset_##gran##K(paddr_t va)             \
196 {                                                                           \
197     return TABLE_OFFSET((va >> second_shift(gran##K)), gran##K);            \
198 }                                                                           \
199                                                                             \
200 static inline paddr_t first_table_offset_##gran##K(paddr_t va)              \
201 {                                                                           \
202     return TABLE_OFFSET((va >> first_shift(gran##K)), gran##K);             \
203 }                                                                           \
204                                                                             \
205 static inline paddr_t zeroeth_table_offset_##gran##K(paddr_t va)            \
206 {                                                                           \
207     /* Note that there is no zeroeth lookup level with 64K granule sizes. */\
208     if ( gran == 64 )                                                       \
209         return 0;                                                           \
210     else                                                                    \
211         return TABLE_OFFSET((va >> zeroeth_shift(gran##K)), gran##K);       \
212 }                                                                           \
213 
214 TABLE_OFFSET_HELPERS(4);
215 TABLE_OFFSET_HELPERS(16);
216 TABLE_OFFSET_HELPERS(64);
217 
218 #undef TABLE_OFFSET
219 #undef TABLE_OFFSET_HELPERS
220 
221 /* Generate an array @var containing the offset for each level from @addr */
222 #define DECLARE_OFFSETS(var, addr)          \
223     const unsigned int var[4] = {           \
224         zeroeth_table_offset(addr),         \
225         first_table_offset(addr),           \
226         second_table_offset(addr),          \
227         third_table_offset(addr)            \
228     }
229 
230 #endif /* __ASSEMBLY__ */
231 
232 /*
233  * These numbers add up to a 48-bit input address space.
234  *
235  * On 32-bit the zeroeth level does not exist, therefore the total is
236  * 39-bits. The ARMv7-A architecture actually specifies a 40-bit input
237  * address space for the p2m, with an 8K (1024-entry) top-level table.
238  * However Xen only supports 16GB of RAM on 32-bit ARM systems and
239  * therefore 39-bits are sufficient.
240  */
241 
242 #define LPAE_SHIFT      9
243 #define LPAE_ENTRIES    (_AC(1,U) << LPAE_SHIFT)
244 #define LPAE_ENTRY_MASK (LPAE_ENTRIES - 1)
245 
246 #define THIRD_SHIFT    (PAGE_SHIFT)
247 #define THIRD_ORDER    (THIRD_SHIFT - PAGE_SHIFT)
248 #define THIRD_SIZE     (_AT(paddr_t, 1) << THIRD_SHIFT)
249 #define THIRD_MASK     (~(THIRD_SIZE - 1))
250 #define SECOND_SHIFT   (THIRD_SHIFT + LPAE_SHIFT)
251 #define SECOND_ORDER   (SECOND_SHIFT - PAGE_SHIFT)
252 #define SECOND_SIZE    (_AT(paddr_t, 1) << SECOND_SHIFT)
253 #define SECOND_MASK    (~(SECOND_SIZE - 1))
254 #define FIRST_SHIFT    (SECOND_SHIFT + LPAE_SHIFT)
255 #define FIRST_ORDER    (FIRST_SHIFT - PAGE_SHIFT)
256 #define FIRST_SIZE     (_AT(paddr_t, 1) << FIRST_SHIFT)
257 #define FIRST_MASK     (~(FIRST_SIZE - 1))
258 #define ZEROETH_SHIFT  (FIRST_SHIFT + LPAE_SHIFT)
259 #define ZEROETH_ORDER  (ZEROETH_SHIFT - PAGE_SHIFT)
260 #define ZEROETH_SIZE   (_AT(paddr_t, 1) << ZEROETH_SHIFT)
261 #define ZEROETH_MASK   (~(ZEROETH_SIZE - 1))
262 
263 /* Calculate the offsets into the pagetables for a given VA */
264 #define zeroeth_linear_offset(va) ((va) >> ZEROETH_SHIFT)
265 #define first_linear_offset(va) ((va) >> FIRST_SHIFT)
266 #define second_linear_offset(va) ((va) >> SECOND_SHIFT)
267 #define third_linear_offset(va) ((va) >> THIRD_SHIFT)
268 
269 #define TABLE_OFFSET(offs) (_AT(unsigned int, offs) & LPAE_ENTRY_MASK)
270 #define first_table_offset(va)  TABLE_OFFSET(first_linear_offset(va))
271 #define second_table_offset(va) TABLE_OFFSET(second_linear_offset(va))
272 #define third_table_offset(va)  TABLE_OFFSET(third_linear_offset(va))
273 #define zeroeth_table_offset(va)  TABLE_OFFSET(zeroeth_linear_offset(va))
274 
275 #endif /* __ARM_LPAE_H__ */
276 
277 /*
278  * Local variables:
279  * mode: C
280  * c-file-style: "BSD"
281  * c-basic-offset: 4
282  * tab-width: 4
283  * indent-tabs-mode: nil
284  * End:
285  */
286