1 #ifndef __ARCH_ARM_MM__
2 #define __ARCH_ARM_MM__
3 
4 #include <xen/kernel.h>
5 #include <asm/page.h>
6 #include <public/xen.h>
7 #include <xen/pdx.h>
8 
9 #if defined(CONFIG_ARM_32)
10 # include <asm/arm32/mm.h>
11 #elif defined(CONFIG_ARM_64)
12 # include <asm/arm64/mm.h>
13 #else
14 # error "unknown ARM variant"
15 #endif
16 
17 /* Align Xen to a 2 MiB boundary. */
18 #define XEN_PADDR_ALIGN (1 << 21)
19 
20 /*
21  * Per-page-frame information.
22  *
23  * Every architecture must ensure the following:
24  *  1. 'struct page_info' contains a 'struct page_list_entry list'.
25  *  2. Provide a PFN_ORDER() macro for accessing the order of a free page.
26  */
27 #define PFN_ORDER(_pfn) ((_pfn)->v.free.order)
28 
29 struct page_info
30 {
31     /* Each frame can be threaded onto a doubly-linked list. */
32     struct page_list_entry list;
33 
34     /* Reference count and various PGC_xxx flags and fields. */
35     unsigned long count_info;
36 
37     /* Context-dependent fields follow... */
38     union {
39         /* Page is in use: ((count_info & PGC_count_mask) != 0). */
40         struct {
41             /* Type reference count and various PGT_xxx flags and fields. */
42             unsigned long type_info;
43         } inuse;
44         /* Page is on a free list: ((count_info & PGC_count_mask) == 0). */
45         union {
46             struct {
47                 /*
48                  * Index of the first *possibly* unscrubbed page in the buddy.
49                  * One more bit than maximum possible order to accommodate
50                  * INVALID_DIRTY_IDX.
51                  */
52 #define INVALID_DIRTY_IDX ((1UL << (MAX_ORDER + 1)) - 1)
53                 unsigned long first_dirty:MAX_ORDER + 1;
54 
55                 /* Do TLBs need flushing for safety before next page use? */
56                 bool need_tlbflush:1;
57 
58 #define BUDDY_NOT_SCRUBBING    0
59 #define BUDDY_SCRUBBING        1
60 #define BUDDY_SCRUB_ABORT      2
61                 unsigned long scrub_state:2;
62             };
63 
64             unsigned long val;
65             } free;
66 
67     } u;
68 
69     union {
70         /* Page is in use, but not as a shadow. */
71         struct {
72             /* Owner of this page (zero if page is anonymous). */
73             struct domain *domain;
74         } inuse;
75 
76         /* Page is on a free list. */
77         struct {
78             /* Order-size of the free chunk this page is the head of. */
79             unsigned int order;
80         } free;
81 
82     } v;
83 
84     union {
85         /*
86          * Timestamp from 'TLB clock', used to avoid extra safety flushes.
87          * Only valid for: a) free pages, and b) pages with zero type count
88          */
89         u32 tlbflush_timestamp;
90     };
91     u64 pad;
92 };
93 
94 #define PG_shift(idx)   (BITS_PER_LONG - (idx))
95 #define PG_mask(x, idx) (x ## UL << PG_shift(idx))
96 
97 #define PGT_none          PG_mask(0, 1)  /* no special uses of this page   */
98 #define PGT_writable_page PG_mask(1, 1)  /* has writable mappings?         */
99 #define PGT_type_mask     PG_mask(1, 1)  /* Bits 31 or 63.                 */
100 
101  /* Count of uses of this frame as its current type. */
102 #define PGT_count_width   PG_shift(2)
103 #define PGT_count_mask    ((1UL<<PGT_count_width)-1)
104 
105  /* Cleared when the owning guest 'frees' this page. */
106 #define _PGC_allocated    PG_shift(1)
107 #define PGC_allocated     PG_mask(1, 1)
108   /* Page is Xen heap? */
109 #define _PGC_xen_heap     PG_shift(2)
110 #define PGC_xen_heap      PG_mask(1, 2)
111 /* ... */
112 /* Page is broken? */
113 #define _PGC_broken       PG_shift(7)
114 #define PGC_broken        PG_mask(1, 7)
115  /* Mutually-exclusive page states: { inuse, offlining, offlined, free }. */
116 #define PGC_state         PG_mask(3, 9)
117 #define PGC_state_inuse   PG_mask(0, 9)
118 #define PGC_state_offlining PG_mask(1, 9)
119 #define PGC_state_offlined PG_mask(2, 9)
120 #define PGC_state_free    PG_mask(3, 9)
121 #define page_state_is(pg, st) (((pg)->count_info&PGC_state) == PGC_state_##st)
122 /* Page is not reference counted */
123 #define _PGC_extra        PG_shift(10)
124 #define PGC_extra         PG_mask(1, 10)
125 
126 /* Count of references to this frame. */
127 #define PGC_count_width   PG_shift(10)
128 #define PGC_count_mask    ((1UL<<PGC_count_width)-1)
129 
130 /*
131  * Page needs to be scrubbed. Since this bit can only be set on a page that is
132  * free (i.e. in PGC_state_free) we can reuse PGC_allocated bit.
133  */
134 #define _PGC_need_scrub   _PGC_allocated
135 #define PGC_need_scrub    PGC_allocated
136 
137 extern mfn_t xenheap_mfn_start, xenheap_mfn_end;
138 extern vaddr_t xenheap_virt_end;
139 #ifdef CONFIG_ARM_64
140 extern vaddr_t xenheap_virt_start;
141 extern unsigned long xenheap_base_pdx;
142 #endif
143 
144 #ifdef CONFIG_ARM_32
145 #define is_xen_heap_page(page) is_xen_heap_mfn(page_to_mfn(page))
146 #define is_xen_heap_mfn(mfn) ({                                 \
147     unsigned long mfn_ = mfn_x(mfn);                            \
148     (mfn_ >= mfn_x(xenheap_mfn_start) &&                        \
149      mfn_ < mfn_x(xenheap_mfn_end));                            \
150 })
151 #else
152 #define is_xen_heap_page(page) ((page)->count_info & PGC_xen_heap)
153 #define is_xen_heap_mfn(mfn) \
154     (mfn_valid(mfn) && is_xen_heap_page(mfn_to_page(mfn)))
155 #endif
156 
157 #define is_xen_fixed_mfn(mfn)                                   \
158     ((mfn_to_maddr(mfn) >= virt_to_maddr(&_start)) &&           \
159      (mfn_to_maddr(mfn) <= virt_to_maddr((vaddr_t)_end - 1)))
160 
161 #define page_get_owner(_p)    (_p)->v.inuse.domain
162 #define page_set_owner(_p,_d) ((_p)->v.inuse.domain = (_d))
163 
164 #define maddr_get_owner(ma)   (page_get_owner(maddr_to_page((ma))))
165 
166 #define frame_table ((struct page_info *)FRAMETABLE_VIRT_START)
167 /* PDX of the first page in the frame table. */
168 extern unsigned long frametable_base_pdx;
169 
170 extern unsigned long max_page;
171 extern unsigned long total_pages;
172 
173 #define PDX_GROUP_SHIFT SECOND_SHIFT
174 
175 /* Boot-time pagetable setup */
176 extern void setup_pagetables(unsigned long boot_phys_offset);
177 /* Map FDT in boot pagetable */
178 extern void *early_fdt_map(paddr_t fdt_paddr);
179 /* Remove early mappings */
180 extern void remove_early_mappings(void);
181 /* Allocate and initialise pagetables for a secondary CPU. Sets init_ttbr to the
182  * new page table */
183 extern int init_secondary_pagetables(int cpu);
184 /* Switch secondary CPUS to its own pagetables and finalise MMU setup */
185 extern void mmu_init_secondary_cpu(void);
186 /* Set up the xenheap: up to 1GB of contiguous, always-mapped memory.
187  * Base must be 32MB aligned and size a multiple of 32MB. */
188 extern void setup_xenheap_mappings(unsigned long base_mfn, unsigned long nr_mfns);
189 /* Map a frame table to cover physical addresses ps through pe */
190 extern void setup_frametable_mappings(paddr_t ps, paddr_t pe);
191 /* Map a 4k page in a fixmap entry */
192 extern void set_fixmap(unsigned map, mfn_t mfn, unsigned attributes);
193 /* Remove a mapping from a fixmap entry */
194 extern void clear_fixmap(unsigned map);
195 /* map a physical range in virtual memory */
196 void __iomem *ioremap_attr(paddr_t start, size_t len, unsigned attributes);
197 
ioremap_nocache(paddr_t start,size_t len)198 static inline void __iomem *ioremap_nocache(paddr_t start, size_t len)
199 {
200     return ioremap_attr(start, len, PAGE_HYPERVISOR_NOCACHE);
201 }
202 
ioremap_cache(paddr_t start,size_t len)203 static inline void __iomem *ioremap_cache(paddr_t start, size_t len)
204 {
205     return ioremap_attr(start, len, PAGE_HYPERVISOR);
206 }
207 
ioremap_wc(paddr_t start,size_t len)208 static inline void __iomem *ioremap_wc(paddr_t start, size_t len)
209 {
210     return ioremap_attr(start, len, PAGE_HYPERVISOR_WC);
211 }
212 
213 /* XXX -- account for base */
214 #define mfn_valid(mfn)        ({                                              \
215     unsigned long __m_f_n = mfn_x(mfn);                                       \
216     likely(pfn_to_pdx(__m_f_n) >= frametable_base_pdx && __mfn_valid(__m_f_n)); \
217 })
218 
219 /* Convert between machine frame numbers and page-info structures. */
220 #define mfn_to_page(mfn)                                            \
221     (frame_table + (mfn_to_pdx(mfn) - frametable_base_pdx))
222 #define page_to_mfn(pg)                                             \
223     pdx_to_mfn((unsigned long)((pg) - frame_table) + frametable_base_pdx)
224 
225 /* Convert between machine addresses and page-info structures. */
226 #define maddr_to_page(ma) mfn_to_page(maddr_to_mfn(ma))
227 #define page_to_maddr(pg) (mfn_to_maddr(page_to_mfn(pg)))
228 
229 /* Convert between frame number and address formats.  */
230 #define pfn_to_paddr(pfn) ((paddr_t)(pfn) << PAGE_SHIFT)
231 #define paddr_to_pfn(pa)  ((unsigned long)((pa) >> PAGE_SHIFT))
232 #define paddr_to_pdx(pa)    mfn_to_pdx(maddr_to_mfn(pa))
233 #define gfn_to_gaddr(gfn)   pfn_to_paddr(gfn_x(gfn))
234 #define gaddr_to_gfn(ga)    _gfn(paddr_to_pfn(ga))
235 #define mfn_to_maddr(mfn)   pfn_to_paddr(mfn_x(mfn))
236 #define maddr_to_mfn(ma)    _mfn(paddr_to_pfn(ma))
237 #define vmap_to_mfn(va)     maddr_to_mfn(virt_to_maddr((vaddr_t)va))
238 #define vmap_to_page(va)    mfn_to_page(vmap_to_mfn(va))
239 
240 /* Page-align address and convert to frame number format */
241 #define paddr_to_pfn_aligned(paddr)    paddr_to_pfn(PAGE_ALIGN(paddr))
242 
__virt_to_maddr(vaddr_t va)243 static inline paddr_t __virt_to_maddr(vaddr_t va)
244 {
245     uint64_t par = va_to_par(va);
246     return (par & PADDR_MASK & PAGE_MASK) | (va & ~PAGE_MASK);
247 }
248 #define virt_to_maddr(va)   __virt_to_maddr((vaddr_t)(va))
249 
250 #ifdef CONFIG_ARM_32
maddr_to_virt(paddr_t ma)251 static inline void *maddr_to_virt(paddr_t ma)
252 {
253     ASSERT(is_xen_heap_mfn(maddr_to_mfn(ma)));
254     ma -= mfn_to_maddr(xenheap_mfn_start);
255     return (void *)(unsigned long) ma + XENHEAP_VIRT_START;
256 }
257 #else
maddr_to_virt(paddr_t ma)258 static inline void *maddr_to_virt(paddr_t ma)
259 {
260     ASSERT((mfn_to_pdx(maddr_to_mfn(ma)) - xenheap_base_pdx) <
261            (DIRECTMAP_SIZE >> PAGE_SHIFT));
262     return (void *)(XENHEAP_VIRT_START -
263                     (xenheap_base_pdx << PAGE_SHIFT) +
264                     ((ma & ma_va_bottom_mask) |
265                      ((ma & ma_top_mask) >> pfn_pdx_hole_shift)));
266 }
267 #endif
268 
269 /*
270  * Translate a guest virtual address to a machine address.
271  * Return the fault information if the translation has failed else 0.
272  */
gvirt_to_maddr(vaddr_t va,paddr_t * pa,unsigned int flags)273 static inline uint64_t gvirt_to_maddr(vaddr_t va, paddr_t *pa,
274                                       unsigned int flags)
275 {
276     uint64_t par = gva_to_ma_par(va, flags);
277     if ( par & PAR_F )
278         return par;
279     *pa = (par & PADDR_MASK & PAGE_MASK) | ((unsigned long) va & ~PAGE_MASK);
280     return 0;
281 }
282 
283 /* Convert between Xen-heap virtual addresses and machine addresses. */
284 #define __pa(x)             (virt_to_maddr(x))
285 #define __va(x)             (maddr_to_virt(x))
286 
287 /* Convert between Xen-heap virtual addresses and machine frame numbers. */
288 #define __virt_to_mfn(va) (virt_to_maddr(va) >> PAGE_SHIFT)
289 #define __mfn_to_virt(mfn) (maddr_to_virt((paddr_t)(mfn) << PAGE_SHIFT))
290 
291 /*
292  * We define non-underscored wrappers for above conversion functions.
293  * These are overriden in various source files while underscored version
294  * remain intact.
295  */
296 #define virt_to_mfn(va)     __virt_to_mfn(va)
297 #define mfn_to_virt(mfn)    __mfn_to_virt(mfn)
298 
299 /* Convert between Xen-heap virtual addresses and page-info structures. */
virt_to_page(const void * v)300 static inline struct page_info *virt_to_page(const void *v)
301 {
302     unsigned long va = (unsigned long)v;
303     unsigned long pdx;
304 
305     ASSERT(va >= XENHEAP_VIRT_START);
306     ASSERT(va < xenheap_virt_end);
307 
308     pdx = (va - XENHEAP_VIRT_START) >> PAGE_SHIFT;
309     pdx += mfn_to_pdx(xenheap_mfn_start);
310     return frame_table + pdx - frametable_base_pdx;
311 }
312 
page_to_virt(const struct page_info * pg)313 static inline void *page_to_virt(const struct page_info *pg)
314 {
315     return mfn_to_virt(mfn_x(page_to_mfn(pg)));
316 }
317 
318 struct page_info *get_page_from_gva(struct vcpu *v, vaddr_t va,
319                                     unsigned long flags);
320 
321 /*
322  * Arm does not have an M2P, but common code expects a handful of
323  * M2P-related defines and functions. Provide dummy versions of these.
324  */
325 #define INVALID_M2P_ENTRY        (~0UL)
326 #define SHARED_M2P_ENTRY         (~0UL - 1UL)
327 #define SHARED_M2P(_e)           ((_e) == SHARED_M2P_ENTRY)
328 
329 /* Xen always owns P2M on ARM */
330 #define set_gpfn_from_mfn(mfn, pfn) do { (void) (mfn), (void)(pfn); } while (0)
331 #define mfn_to_gmfn(_d, mfn)  (mfn)
332 
333 
334 /* Arch-specific portion of memory_op hypercall. */
335 long arch_memory_op(int op, XEN_GUEST_HANDLE_PARAM(void) arg);
336 
337 #define domain_set_alloc_bitsize(d) ((void)0)
338 #define domain_clamp_alloc_bitsize(d, b) (b)
339 
340 unsigned long domain_get_maximum_gpfn(struct domain *d);
341 
342 #define memguard_guard_stack(_p)       ((void)0)
343 #define memguard_guard_range(_p,_l)    ((void)0)
344 #define memguard_unguard_range(_p,_l)  ((void)0)
345 
346 /* Release all __init and __initdata ranges to be reused */
347 void free_init_memory(void);
348 
349 int guest_physmap_mark_populate_on_demand(struct domain *d, unsigned long gfn,
350                                           unsigned int order);
351 
352 extern void put_page_type(struct page_info *page);
put_page_and_type(struct page_info * page)353 static inline void put_page_and_type(struct page_info *page)
354 {
355     put_page_type(page);
356     put_page(page);
357 }
358 
359 void clear_and_clean_page(struct page_info *page);
360 
361 static inline
arch_acquire_resource(struct domain * d,unsigned int type,unsigned int id,unsigned long frame,unsigned int nr_frames,xen_pfn_t mfn_list[])362 int arch_acquire_resource(struct domain *d, unsigned int type, unsigned int id,
363                           unsigned long frame, unsigned int nr_frames,
364                           xen_pfn_t mfn_list[])
365 {
366     return -EOPNOTSUPP;
367 }
368 
369 unsigned int arch_get_dma_bitsize(void);
370 
371 #endif /*  __ARCH_ARM_MM__ */
372 /*
373  * Local variables:
374  * mode: C
375  * c-file-style: "BSD"
376  * c-basic-offset: 4
377  * indent-tabs-mode: nil
378  * End:
379  */
380