1 #ifndef __ARM_PAGE_H__
2 #define __ARM_PAGE_H__
3 
4 #include <public/xen.h>
5 #include <asm/processor.h>
6 #include <asm/lpae.h>
7 #include <asm/sysregs.h>
8 
9 #ifdef CONFIG_ARM_64
10 #define PADDR_BITS              48
11 #else
12 #define PADDR_BITS              40
13 #endif
14 #define PADDR_MASK              ((1ULL << PADDR_BITS)-1)
15 #define PAGE_OFFSET(ptr)        ((vaddr_t)(ptr) & ~PAGE_MASK)
16 
17 #define VADDR_BITS              32
18 #define VADDR_MASK              (~0UL)
19 
20 /* Shareability values for the LPAE entries */
21 #define LPAE_SH_NON_SHAREABLE 0x0
22 #define LPAE_SH_UNPREDICTALE  0x1
23 #define LPAE_SH_OUTER         0x2
24 #define LPAE_SH_INNER         0x3
25 
26 /*
27  * Attribute Indexes.
28  *
29  * These are valid in the AttrIndx[2:0] field of an LPAE stage 1 page
30  * table entry. They are indexes into the bytes of the MAIR*
31  * registers, as defined below.
32  *
33  */
34 #define MT_DEVICE_nGnRnE 0x0
35 #define MT_NORMAL_NC     0x1
36 #define MT_NORMAL_WT     0x2
37 #define MT_NORMAL_WB     0x3
38 #define MT_DEVICE_nGnRE  0x4
39 #define MT_NORMAL        0x7
40 
41 /*
42  * LPAE Memory region attributes. Indexed by the AttrIndex bits of a
43  * LPAE entry; the 8-bit fields are packed little-endian into MAIR0 and MAIR1.
44  *
45  * See section "Device memory" B2.7.2 in ARM DDI 0487B.a for more
46  * details about the meaning of *G*R*E.
47  *
48  *                    ai    encoding
49  *   MT_DEVICE_nGnRnE 000   0000 0000  -- Strongly Ordered/Device nGnRnE
50  *   MT_NORMAL_NC     001   0100 0100  -- Non-Cacheable
51  *   MT_NORMAL_WT     010   1010 1010  -- Write-through
52  *   MT_NORMAL_WB     011   1110 1110  -- Write-back
53  *   MT_DEVICE_nGnRE  100   0000 0100  -- Device nGnRE
54  *   ??               101
55  *   reserved         110
56  *   MT_NORMAL        111   1111 1111  -- Write-back write-allocate
57  *
58  * /!\ It is not possible to combine the definition in MAIRVAL and then
59  * split because it would result to a 64-bit value that some assembler
60  * doesn't understand.
61  */
62 #define _MAIR0(attr, mt) (_AC(attr, ULL) << ((mt) * 8))
63 #define _MAIR1(attr, mt) (_AC(attr, ULL) << (((mt) * 8) - 32))
64 
65 #define MAIR0VAL (_MAIR0(0x00, MT_DEVICE_nGnRnE)| \
66                   _MAIR0(0x44, MT_NORMAL_NC)    | \
67                   _MAIR0(0xaa, MT_NORMAL_WT)    | \
68                   _MAIR0(0xee, MT_NORMAL_WB))
69 
70 #define MAIR1VAL (_MAIR1(0x04, MT_DEVICE_nGnRE) | \
71                   _MAIR1(0xff, MT_NORMAL))
72 
73 #define MAIRVAL (MAIR1VAL << 32 | MAIR0VAL)
74 
75 /*
76  * Layout of the flags used for updating the hypervisor page tables
77  *
78  * [0:2] Memory Attribute Index
79  * [3:4] Permission flags
80  * [5]   Page present
81  * [6]   Only populate page tables
82  */
83 #define PAGE_AI_MASK(x) ((x) & 0x7U)
84 
85 #define _PAGE_XN_BIT    3
86 #define _PAGE_RO_BIT    4
87 #define _PAGE_XN    (1U << _PAGE_XN_BIT)
88 #define _PAGE_RO    (1U << _PAGE_RO_BIT)
89 #define PAGE_XN_MASK(x) (((x) >> _PAGE_XN_BIT) & 0x1U)
90 #define PAGE_RO_MASK(x) (((x) >> _PAGE_RO_BIT) & 0x1U)
91 
92 #define _PAGE_PRESENT    (1U << 5)
93 #define _PAGE_POPULATE   (1U << 6)
94 
95 /*
96  * _PAGE_DEVICE and _PAGE_NORMAL are convenience defines. They are not
97  * meant to be used outside of this header.
98  */
99 #define _PAGE_DEVICE    (_PAGE_XN|_PAGE_PRESENT)
100 #define _PAGE_NORMAL    (MT_NORMAL|_PAGE_PRESENT)
101 
102 #define PAGE_HYPERVISOR_RO      (_PAGE_NORMAL|_PAGE_RO|_PAGE_XN)
103 #define PAGE_HYPERVISOR_RX      (_PAGE_NORMAL|_PAGE_RO)
104 #define PAGE_HYPERVISOR_RW      (_PAGE_NORMAL|_PAGE_XN)
105 
106 #define PAGE_HYPERVISOR         PAGE_HYPERVISOR_RW
107 #define PAGE_HYPERVISOR_NOCACHE (_PAGE_DEVICE|MT_DEVICE_nGnRE)
108 #define PAGE_HYPERVISOR_WC      (_PAGE_DEVICE|MT_NORMAL_NC)
109 
110 /*
111  * Stage 2 Memory Type.
112  *
113  * These are valid in the MemAttr[3:0] field of an LPAE stage 2 page
114  * table entry.
115  *
116  */
117 #define MATTR_DEV     0x1
118 #define MATTR_MEM_NC  0x5
119 #define MATTR_MEM     0xf
120 
121 /* Flags for get_page_from_gva, gvirt_to_maddr etc */
122 #define GV2M_READ  (0u<<0)
123 #define GV2M_WRITE (1u<<0)
124 #define GV2M_EXEC  (1u<<1)
125 
126 #ifndef __ASSEMBLY__
127 
128 #include <xen/errno.h>
129 #include <xen/types.h>
130 #include <xen/lib.h>
131 #include <asm/system.h>
132 
133 #if defined(CONFIG_ARM_32)
134 # include <asm/arm32/page.h>
135 #elif defined(CONFIG_ARM_64)
136 # include <asm/arm64/page.h>
137 #else
138 # error "unknown ARM variant"
139 #endif
140 
141 /* Architectural minimum cacheline size is 4 32-bit words. */
142 #define MIN_CACHELINE_BYTES 16
143 /* Min dcache line size on the boot CPU. */
144 extern size_t dcache_line_bytes;
145 
146 #define copy_page(dp, sp) memcpy(dp, sp, PAGE_SIZE)
147 
read_dcache_line_bytes(void)148 static inline size_t read_dcache_line_bytes(void)
149 {
150     uint32_t ctr;
151 
152     /* Read CTR */
153     ctr = READ_SYSREG32(CTR_EL0);
154 
155     /* Bits 16-19 are the log2 number of words in the cacheline. */
156     return (size_t) (4 << ((ctr >> 16) & 0xf));
157 }
158 
159 /* Functions for flushing medium-sized areas.
160  * if 'range' is large enough we might want to use model-specific
161  * full-cache flushes. */
162 
invalidate_dcache_va_range(const void * p,unsigned long size)163 static inline int invalidate_dcache_va_range(const void *p, unsigned long size)
164 {
165     const void *end = p + size;
166     size_t cacheline_mask = dcache_line_bytes - 1;
167 
168     dsb(sy);           /* So the CPU issues all writes to the range */
169 
170     if ( (uintptr_t)p & cacheline_mask )
171     {
172         p = (void *)((uintptr_t)p & ~cacheline_mask);
173         asm volatile (__clean_and_invalidate_dcache_one(0) : : "r" (p));
174         p += dcache_line_bytes;
175     }
176     if ( (uintptr_t)end & cacheline_mask )
177     {
178         end = (void *)((uintptr_t)end & ~cacheline_mask);
179         asm volatile (__clean_and_invalidate_dcache_one(0) : : "r" (end));
180     }
181 
182     for ( ; p < end; p += dcache_line_bytes )
183         asm volatile (__invalidate_dcache_one(0) : : "r" (p));
184 
185     dsb(sy);           /* So we know the flushes happen before continuing */
186 
187     return 0;
188 }
189 
clean_dcache_va_range(const void * p,unsigned long size)190 static inline int clean_dcache_va_range(const void *p, unsigned long size)
191 {
192     const void *end = p + size;
193     dsb(sy);           /* So the CPU issues all writes to the range */
194     p = (void *)((uintptr_t)p & ~(dcache_line_bytes - 1));
195     for ( ; p < end; p += dcache_line_bytes )
196         asm volatile (__clean_dcache_one(0) : : "r" (p));
197     dsb(sy);           /* So we know the flushes happen before continuing */
198     /* ARM callers assume that dcache_* functions cannot fail. */
199     return 0;
200 }
201 
clean_and_invalidate_dcache_va_range(const void * p,unsigned long size)202 static inline int clean_and_invalidate_dcache_va_range
203     (const void *p, unsigned long size)
204 {
205     const void *end = p + size;
206     dsb(sy);         /* So the CPU issues all writes to the range */
207     p = (void *)((uintptr_t)p & ~(dcache_line_bytes - 1));
208     for ( ; p < end; p += dcache_line_bytes )
209         asm volatile (__clean_and_invalidate_dcache_one(0) : : "r" (p));
210     dsb(sy);         /* So we know the flushes happen before continuing */
211     /* ARM callers assume that dcache_* functions cannot fail. */
212     return 0;
213 }
214 
215 /* Macros for flushing a single small item.  The predicate is always
216  * compile-time constant so this will compile down to 3 instructions in
217  * the common case. */
218 #define clean_dcache(x) do {                                            \
219     typeof(x) *_p = &(x);                                               \
220     if ( sizeof(x) > MIN_CACHELINE_BYTES || sizeof(x) > alignof(x) )    \
221         clean_dcache_va_range(_p, sizeof(x));                           \
222     else                                                                \
223         asm volatile (                                                  \
224             "dsb sy;"   /* Finish all earlier writes */                 \
225             __clean_dcache_one(0)                                       \
226             "dsb sy;"   /* Finish flush before continuing */            \
227             : : "r" (_p), "m" (*_p));                                   \
228 } while (0)
229 
230 #define clean_and_invalidate_dcache(x) do {                             \
231     typeof(x) *_p = &(x);                                               \
232     if ( sizeof(x) > MIN_CACHELINE_BYTES || sizeof(x) > alignof(x) )    \
233         clean_and_invalidate_dcache_va_range(_p, sizeof(x));            \
234     else                                                                \
235         asm volatile (                                                  \
236             "dsb sy;"   /* Finish all earlier writes */                 \
237             __clean_and_invalidate_dcache_one(0)                        \
238             "dsb sy;"   /* Finish flush before continuing */            \
239             : : "r" (_p), "m" (*_p));                                   \
240 } while (0)
241 
242 /* Flush the dcache for an entire page. */
243 void flush_page_to_ram(unsigned long mfn, bool sync_icache);
244 
245 /*
246  * Print a walk of a page table or p2m
247  *
248  * ttbr is the base address register (TTBR0_EL2 or VTTBR_EL2)
249  * addr is the PA or IPA to translate
250  * root_level is the starting level of the page table
251  *   (e.g. TCR_EL2.SL0 or VTCR_EL2.SL0 )
252  * nr_root_tables is the number of concatenated tables at the root.
253  *   this can only be != 1 for P2M walks starting at the first or
254  *   subsequent level.
255  */
256 void dump_pt_walk(paddr_t ttbr, paddr_t addr,
257                   unsigned int root_level,
258                   unsigned int nr_root_tables);
259 
260 /* Print a walk of the hypervisor's page tables for a virtual addr. */
261 extern void dump_hyp_walk(vaddr_t addr);
262 /* Print a walk of the p2m for a domain for a physical address. */
263 extern void dump_p2m_lookup(struct domain *d, paddr_t addr);
264 
va_to_par(vaddr_t va)265 static inline uint64_t va_to_par(vaddr_t va)
266 {
267     uint64_t par = __va_to_par(va);
268     /* It is not OK to call this with an invalid VA */
269     if ( par & PAR_F )
270     {
271         dump_hyp_walk(va);
272         panic_PAR(par);
273     }
274     return par;
275 }
276 
gva_to_ipa(vaddr_t va,paddr_t * paddr,unsigned int flags)277 static inline int gva_to_ipa(vaddr_t va, paddr_t *paddr, unsigned int flags)
278 {
279     uint64_t par = gva_to_ipa_par(va, flags);
280     if ( par & PAR_F )
281         return -EFAULT;
282     *paddr = (par & PADDR_MASK & PAGE_MASK) | ((unsigned long) va & ~PAGE_MASK);
283     return 0;
284 }
285 
286 /* Bits in the PAR returned by va_to_par */
287 #define PAR_FAULT 0x1
288 
289 #endif /* __ASSEMBLY__ */
290 
291 #define PAGE_ALIGN(x) (((x) + PAGE_SIZE - 1) & PAGE_MASK)
292 
293 #endif /* __ARM_PAGE_H__ */
294 
295 /*
296  * Local variables:
297  * mode: C
298  * c-file-style: "BSD"
299  * c-basic-offset: 4
300  * tab-width: 4
301  * indent-tabs-mode: nil
302  * End:
303  */
304