1 #ifndef _XEN_P2M_H
2 #define _XEN_P2M_H
3 
4 #include <xen/mm.h>
5 #include <xen/radix-tree.h>
6 #include <xen/rwlock.h>
7 #include <xen/mem_access.h>
8 
9 #include <asm/current.h>
10 
11 #define paddr_bits PADDR_BITS
12 
13 /* Holds the bit size of IPAs in p2m tables.  */
14 extern unsigned int p2m_ipa_bits;
15 
16 #ifdef CONFIG_ARM_64
17 extern unsigned int p2m_root_order;
18 extern unsigned int p2m_root_level;
19 #define P2M_ROOT_ORDER    p2m_root_order
20 #define P2M_ROOT_LEVEL p2m_root_level
21 #else
22 /* First level P2M is always 2 consecutive pages */
23 #define P2M_ROOT_ORDER    1
24 #define P2M_ROOT_LEVEL 1
25 #endif
26 
27 struct domain;
28 
29 extern void memory_type_changed(struct domain *);
30 
31 /* Per-p2m-table state */
32 struct p2m_domain {
33     /*
34      * Lock that protects updates to the p2m.
35      */
36     rwlock_t lock;
37 
38     /* Pages used to construct the p2m */
39     struct page_list_head pages;
40 
41     /* The root of the p2m tree. May be concatenated */
42     struct page_info *root;
43 
44     /* Current VMID in use */
45     uint16_t vmid;
46 
47     /* Current Translation Table Base Register for the p2m */
48     uint64_t vttbr;
49 
50     /* Highest guest frame that's ever been mapped in the p2m */
51     gfn_t max_mapped_gfn;
52 
53     /*
54      * Lowest mapped gfn in the p2m. When releasing mapped gfn's in a
55      * preemptible manner this is update to track recall where to
56      * resume the search. Apart from during teardown this can only
57      * decrease. */
58     gfn_t lowest_mapped_gfn;
59 
60     /* Indicate if it is required to clean the cache when writing an entry */
61     bool clean_pte;
62 
63     /*
64      * P2M updates may required TLBs to be flushed (invalidated).
65      *
66      * Flushes may be deferred by setting 'need_flush' and then flushing
67      * when the p2m write lock is released.
68      *
69      * If an immediate flush is required (e.g, if a super page is
70      * shattered), call p2m_tlb_flush_sync().
71      */
72     bool need_flush;
73 
74     /* Gather some statistics for information purposes only */
75     struct {
76         /* Number of mappings at each p2m tree level */
77         unsigned long mappings[4];
78         /* Number of times we have shattered a mapping
79          * at each p2m tree level. */
80         unsigned long shattered[4];
81     } stats;
82 
83     /*
84      * If true, and an access fault comes in and there is no vm_event listener,
85      * pause domain. Otherwise, remove access restrictions.
86      */
87     bool access_required;
88 
89     /* Defines if mem_access is in use for the domain. */
90     bool mem_access_enabled;
91 
92     /*
93      * Default P2M access type for each page in the the domain: new pages,
94      * swapped in pages, cleared pages, and pages that are ambiguously
95      * retyped get this access type. See definition of p2m_access_t.
96      */
97     p2m_access_t default_access;
98 
99     /*
100      * Radix tree to store the p2m_access_t settings as the pte's don't have
101      * enough available bits to store this information.
102      */
103     struct radix_tree_root mem_access_settings;
104 
105     /* back pointer to domain */
106     struct domain *domain;
107 
108     /* Keeping track on which CPU this p2m was used and for which vCPU */
109     uint8_t last_vcpu_ran[NR_CPUS];
110 };
111 
112 /*
113  * List of possible type for each page in the p2m entry.
114  * The number of available bit per page in the pte for this purpose is 4 bits.
115  * So it's possible to only have 16 fields. If we run out of value in the
116  * future, it's possible to use higher value for pseudo-type and don't store
117  * them in the p2m entry.
118  */
119 typedef enum {
120     p2m_invalid = 0,    /* Nothing mapped here */
121     p2m_ram_rw,         /* Normal read/write guest RAM */
122     p2m_ram_ro,         /* Read-only; writes are silently dropped */
123     p2m_mmio_direct_dev,/* Read/write mapping of genuine Device MMIO area */
124     p2m_mmio_direct_nc, /* Read/write mapping of genuine MMIO area non-cacheable */
125     p2m_mmio_direct_c,  /* Read/write mapping of genuine MMIO area cacheable */
126     p2m_map_foreign_rw, /* Read/write RAM pages from foreign domain */
127     p2m_map_foreign_ro, /* Read-only RAM pages from foreign domain */
128     p2m_grant_map_rw,   /* Read/write grant mapping */
129     p2m_grant_map_ro,   /* Read-only grant mapping */
130     /* The types below are only used to decide the page attribute in the P2M */
131     p2m_iommu_map_rw,   /* Read/write iommu mapping */
132     p2m_iommu_map_ro,   /* Read-only iommu mapping */
133     p2m_max_real_type,  /* Types after this won't be store in the p2m */
134 } p2m_type_t;
135 
136 /* We use bitmaps and mask to handle groups of types */
137 #define p2m_to_mask(_t) (1UL << (_t))
138 
139 /* RAM types, which map to real machine frames */
140 #define P2M_RAM_TYPES (p2m_to_mask(p2m_ram_rw) |        \
141                        p2m_to_mask(p2m_ram_ro))
142 
143 /* Grant mapping types, which map to a real frame in another VM */
144 #define P2M_GRANT_TYPES (p2m_to_mask(p2m_grant_map_rw) |  \
145                          p2m_to_mask(p2m_grant_map_ro))
146 
147 /* Foreign mappings types */
148 #define P2M_FOREIGN_TYPES (p2m_to_mask(p2m_map_foreign_rw) | \
149                            p2m_to_mask(p2m_map_foreign_ro))
150 
151 /* Useful predicates */
152 #define p2m_is_ram(_t) (p2m_to_mask(_t) & P2M_RAM_TYPES)
153 #define p2m_is_foreign(_t) (p2m_to_mask(_t) & P2M_FOREIGN_TYPES)
154 #define p2m_is_any_ram(_t) (p2m_to_mask(_t) &                   \
155                             (P2M_RAM_TYPES | P2M_GRANT_TYPES |  \
156                              P2M_FOREIGN_TYPES))
157 
158 /* All common type definitions should live ahead of this inclusion. */
159 #ifdef _XEN_P2M_COMMON_H
160 # error "xen/p2m-common.h should not be included directly"
161 #endif
162 #include <xen/p2m-common.h>
163 
164 static inline
p2m_altp2m_check(struct vcpu * v,uint16_t idx)165 void p2m_altp2m_check(struct vcpu *v, uint16_t idx)
166 {
167     /* Not supported on ARM. */
168 }
169 
170 /*
171  * Helper to restrict "p2m_ipa_bits" according the external entity
172  * (e.g. IOMMU) requirements.
173  *
174  * Each corresponding driver should report the maximum IPA bits
175  * (Stage-2 input size) it can support.
176  */
177 void p2m_restrict_ipa_bits(unsigned int ipa_bits);
178 
179 /* Second stage paging setup, to be called on all CPUs */
180 void setup_virt_paging(void);
181 
182 /* Init the datastructures for later use by the p2m code */
183 int p2m_init(struct domain *d);
184 
185 /* Return all the p2m resources to Xen. */
186 void p2m_teardown(struct domain *d);
187 
188 /*
189  * Remove mapping refcount on each mapping page in the p2m
190  *
191  * TODO: For the moment only foreign mappings are handled
192  */
193 int relinquish_p2m_mapping(struct domain *d);
194 
195 /* Context switch */
196 void p2m_save_state(struct vcpu *p);
197 void p2m_restore_state(struct vcpu *n);
198 
199 /* Print debugging/statistial info about a domain's p2m */
200 void p2m_dump_info(struct domain *d);
201 
p2m_write_lock(struct p2m_domain * p2m)202 static inline void p2m_write_lock(struct p2m_domain *p2m)
203 {
204     write_lock(&p2m->lock);
205 }
206 
207 void p2m_write_unlock(struct p2m_domain *p2m);
208 
p2m_read_lock(struct p2m_domain * p2m)209 static inline void p2m_read_lock(struct p2m_domain *p2m)
210 {
211     read_lock(&p2m->lock);
212 }
213 
p2m_read_unlock(struct p2m_domain * p2m)214 static inline void p2m_read_unlock(struct p2m_domain *p2m)
215 {
216     read_unlock(&p2m->lock);
217 }
218 
p2m_is_locked(struct p2m_domain * p2m)219 static inline int p2m_is_locked(struct p2m_domain *p2m)
220 {
221     return rw_is_locked(&p2m->lock);
222 }
223 
p2m_is_write_locked(struct p2m_domain * p2m)224 static inline int p2m_is_write_locked(struct p2m_domain *p2m)
225 {
226     return rw_is_write_locked(&p2m->lock);
227 }
228 
229 void p2m_tlb_flush_sync(struct p2m_domain *p2m);
230 
231 /* Look up the MFN corresponding to a domain's GFN. */
232 mfn_t p2m_lookup(struct domain *d, gfn_t gfn, p2m_type_t *t);
233 
234 /*
235  * Get details of a given gfn.
236  * The P2M lock should be taken by the caller.
237  */
238 mfn_t p2m_get_entry(struct p2m_domain *p2m, gfn_t gfn,
239                     p2m_type_t *t, p2m_access_t *a,
240                     unsigned int *page_order,
241                     bool *valid);
242 
243 /*
244  * Direct set a p2m entry: only for use by the P2M code.
245  * The P2M write lock should be taken.
246  */
247 int p2m_set_entry(struct p2m_domain *p2m,
248                   gfn_t sgfn,
249                   unsigned long nr,
250                   mfn_t smfn,
251                   p2m_type_t t,
252                   p2m_access_t a);
253 
254 bool p2m_resolve_translation_fault(struct domain *d, gfn_t gfn);
255 
256 void p2m_invalidate_root(struct p2m_domain *p2m);
257 
258 /*
259  * Clean & invalidate caches corresponding to a region [start,end) of guest
260  * address space.
261  *
262  * start will get updated if the function is preempted.
263  */
264 int p2m_cache_flush_range(struct domain *d, gfn_t *pstart, gfn_t end);
265 
266 void p2m_set_way_flush(struct vcpu *v);
267 
268 void p2m_toggle_cache(struct vcpu *v, bool was_enabled);
269 
270 void p2m_flush_vm(struct vcpu *v);
271 
272 /*
273  * Map a region in the guest p2m with a specific p2m type.
274  * The memory attributes will be derived from the p2m type.
275  */
276 int map_regions_p2mt(struct domain *d,
277                      gfn_t gfn,
278                      unsigned long nr,
279                      mfn_t mfn,
280                      p2m_type_t p2mt);
281 
282 int unmap_regions_p2mt(struct domain *d,
283                        gfn_t gfn,
284                        unsigned long nr,
285                        mfn_t mfn);
286 
287 int map_dev_mmio_region(struct domain *d,
288                         gfn_t gfn,
289                         unsigned long nr,
290                         mfn_t mfn);
291 
292 int guest_physmap_add_entry(struct domain *d,
293                             gfn_t gfn,
294                             mfn_t mfn,
295                             unsigned long page_order,
296                             p2m_type_t t);
297 
298 /* Untyped version for RAM only, for compatibility */
guest_physmap_add_page(struct domain * d,gfn_t gfn,mfn_t mfn,unsigned int page_order)299 static inline int guest_physmap_add_page(struct domain *d,
300                                          gfn_t gfn,
301                                          mfn_t mfn,
302                                          unsigned int page_order)
303 {
304     return guest_physmap_add_entry(d, gfn, mfn, page_order, p2m_ram_rw);
305 }
306 
307 mfn_t gfn_to_mfn(struct domain *d, gfn_t gfn);
308 
309 /* Look up a GFN and take a reference count on the backing page. */
310 typedef unsigned int p2m_query_t;
311 #define P2M_ALLOC    (1u<<0)   /* Populate PoD and paged-out entries */
312 #define P2M_UNSHARE  (1u<<1)   /* Break CoW sharing */
313 
314 struct page_info *p2m_get_page_from_gfn(struct domain *d, gfn_t gfn,
315                                         p2m_type_t *t);
316 
get_page_from_gfn(struct domain * d,unsigned long gfn,p2m_type_t * t,p2m_query_t q)317 static inline struct page_info *get_page_from_gfn(
318     struct domain *d, unsigned long gfn, p2m_type_t *t, p2m_query_t q)
319 {
320     mfn_t mfn;
321     p2m_type_t _t;
322     struct page_info *page;
323 
324     /*
325      * Special case for DOMID_XEN as it is the only domain so far that is
326      * not auto-translated.
327      */
328     if ( likely(d != dom_xen) )
329         return p2m_get_page_from_gfn(d, _gfn(gfn), t);
330 
331     if ( !t )
332         t = &_t;
333 
334     *t = p2m_invalid;
335 
336     /*
337      * DOMID_XEN sees 1-1 RAM. The p2m_type is based on the type of the
338      * page.
339      */
340     mfn = _mfn(gfn);
341     page = mfn_to_page(mfn);
342 
343     if ( !mfn_valid(mfn) || !get_page(page, d) )
344         return NULL;
345 
346     if ( page->u.inuse.type_info & PGT_writable_page )
347         *t = p2m_ram_rw;
348     else
349         *t = p2m_ram_ro;
350 
351     return page;
352 }
353 
354 int get_page_type(struct page_info *page, unsigned long type);
355 bool is_iomem_page(mfn_t mfn);
get_page_and_type(struct page_info * page,struct domain * domain,unsigned long type)356 static inline int get_page_and_type(struct page_info *page,
357                                     struct domain *domain,
358                                     unsigned long type)
359 {
360     int rc = get_page(page, domain);
361 
362     if ( likely(rc) && unlikely(!get_page_type(page, type)) )
363     {
364         put_page(page);
365         rc = 0;
366     }
367 
368     return rc;
369 }
370 
371 /* get host p2m table */
372 #define p2m_get_hostp2m(d) (&(d)->arch.p2m)
373 
p2m_vm_event_sanity_check(struct domain * d)374 static inline bool p2m_vm_event_sanity_check(struct domain *d)
375 {
376     return true;
377 }
378 
379 /*
380  * Return the start of the next mapping based on the order of the
381  * current one.
382  */
gfn_next_boundary(gfn_t gfn,unsigned int order)383 static inline gfn_t gfn_next_boundary(gfn_t gfn, unsigned int order)
384 {
385     /*
386      * The order corresponds to the order of the mapping (or invalid
387      * range) in the page table. So we need to align the GFN before
388      * incrementing.
389      */
390     gfn = _gfn(gfn_x(gfn) & ~((1UL << order) - 1));
391 
392     return gfn_add(gfn, 1UL << order);
393 }
394 
set_foreign_p2m_entry(struct domain * d,unsigned long gfn,mfn_t mfn)395 static inline int set_foreign_p2m_entry(struct domain *d, unsigned long gfn,
396                                         mfn_t mfn)
397 {
398     /*
399      * NOTE: If this is implemented then proper reference counting of
400      *       foreign entries will need to be implemented.
401      */
402     return -EOPNOTSUPP;
403 }
404 
405 /*
406  * A vCPU has cache enabled only when the MMU is enabled and data cache
407  * is enabled.
408  */
vcpu_has_cache_enabled(struct vcpu * v)409 static inline bool vcpu_has_cache_enabled(struct vcpu *v)
410 {
411     const register_t mask = SCTLR_Axx_ELx_C | SCTLR_Axx_ELx_M;
412 
413     /* Only works with the current vCPU */
414     ASSERT(current == v);
415 
416     return (READ_SYSREG(SCTLR_EL1) & mask) == mask;
417 }
418 
419 #endif /* _XEN_P2M_H */
420 
421 /*
422  * Local variables:
423  * mode: C
424  * c-file-style: "BSD"
425  * c-basic-offset: 4
426  * indent-tabs-mode: nil
427  * End:
428  */
429