1 /******************************************************************************
2  * include/xen/mm.h
3  *
4  * Definitions for memory pages, frame numbers, addresses, allocations, etc.
5  *
6  * Copyright (c) 2002-2006, K A Fraser <keir@xensource.com>
7  *
8  *                         +---------------------+
9  *                          Xen Memory Management
10  *                         +---------------------+
11  *
12  * Xen has to handle many different address spaces.  It is important not to
13  * get these spaces mixed up.  The following is a consistent terminology which
14  * should be adhered to.
15  *
16  * mfn: Machine Frame Number
17  *   The values Xen puts into its own pagetables.  This is the host physical
18  *   memory address space with RAM, MMIO etc.
19  *
20  * gfn: Guest Frame Number
21  *   The values a guest puts in its own pagetables.  For an auto-translated
22  *   guest (hardware assisted with 2nd stage translation, or shadowed), gfn !=
23  *   mfn.  For a non-translated guest which is aware of Xen, gfn == mfn.
24  *
25  * pfn: Pseudophysical Frame Number
26  *   A linear idea of a guest physical address space. For an auto-translated
27  *   guest, pfn == gfn while for a non-translated guest, pfn != gfn.
28  *
29  * dfn: Device DMA Frame Number (definitions in include/xen/iommu.h)
30  *   The linear frame numbers of device DMA address space. All initiators for
31  *   (i.e. all devices assigned to) a guest share a single DMA address space
32  *   and, by default, Xen will ensure dfn == pfn.
33  *
34  * WARNING: Some of these terms have changed over time while others have been
35  * used inconsistently, meaning that a lot of existing code does not match the
36  * definitions above.  New code should use these terms as described here, and
37  * over time older code should be corrected to be consistent.
38  *
39  * An incomplete list of larger work area:
40  * - Phase out the use of 'pfn' from the x86 pagetable code.  Callers should
41  *   know explicitly whether they are talking about mfns or gfns.
42  * - Phase out the use of 'pfn' from the ARM mm code.  A cursory glance
43  *   suggests that 'mfn' and 'pfn' are currently used interchangeably, where
44  *   'mfn' is the appropriate term to use.
45  * - Phase out the use of gpfn/gmfn where pfn/mfn are meant.  This excludes
46  *   the x86 shadow code, which uses gmfn/smfn pairs with different,
47  *   documented, meanings.
48  */
49 
50 #ifndef __XEN_MM_H__
51 #define __XEN_MM_H__
52 
53 #include <xen/compiler.h>
54 #include <xen/types.h>
55 #include <xen/list.h>
56 #include <xen/spinlock.h>
57 #include <xen/typesafe.h>
58 #include <xen/kernel.h>
59 #include <xen/perfc.h>
60 #include <public/memory.h>
61 
62 TYPE_SAFE(unsigned long, mfn);
63 #define PRI_mfn          "05lx"
64 #define INVALID_MFN      _mfn(~0UL)
65 /*
66  * To be used for global variable initialization. This workaround a bug
67  * in GCC < 5.0.
68  */
69 #define INVALID_MFN_INITIALIZER { ~0UL }
70 
71 #ifndef mfn_t
72 #define mfn_t /* Grep fodder: mfn_t, _mfn() and mfn_x() are defined above */
73 #define _mfn
74 #define mfn_x
75 #undef mfn_t
76 #undef _mfn
77 #undef mfn_x
78 #endif
79 
mfn_add(mfn_t mfn,unsigned long i)80 static inline mfn_t mfn_add(mfn_t mfn, unsigned long i)
81 {
82     return _mfn(mfn_x(mfn) + i);
83 }
84 
mfn_max(mfn_t x,mfn_t y)85 static inline mfn_t mfn_max(mfn_t x, mfn_t y)
86 {
87     return _mfn(max(mfn_x(x), mfn_x(y)));
88 }
89 
mfn_min(mfn_t x,mfn_t y)90 static inline mfn_t mfn_min(mfn_t x, mfn_t y)
91 {
92     return _mfn(min(mfn_x(x), mfn_x(y)));
93 }
94 
mfn_eq(mfn_t x,mfn_t y)95 static inline bool_t mfn_eq(mfn_t x, mfn_t y)
96 {
97     return mfn_x(x) == mfn_x(y);
98 }
99 
100 TYPE_SAFE(unsigned long, gfn);
101 #define PRI_gfn          "05lx"
102 #define INVALID_GFN      _gfn(~0UL)
103 /*
104  * To be used for global variable initialization. This workaround a bug
105  * in GCC < 5.0 https://gcc.gnu.org/bugzilla/show_bug.cgi?id=64856
106  */
107 #define INVALID_GFN_INITIALIZER { ~0UL }
108 
109 #ifndef gfn_t
110 #define gfn_t /* Grep fodder: gfn_t, _gfn() and gfn_x() are defined above */
111 #define _gfn
112 #define gfn_x
113 #undef gfn_t
114 #undef _gfn
115 #undef gfn_x
116 #endif
117 
gfn_add(gfn_t gfn,unsigned long i)118 static inline gfn_t gfn_add(gfn_t gfn, unsigned long i)
119 {
120     return _gfn(gfn_x(gfn) + i);
121 }
122 
gfn_max(gfn_t x,gfn_t y)123 static inline gfn_t gfn_max(gfn_t x, gfn_t y)
124 {
125     return _gfn(max(gfn_x(x), gfn_x(y)));
126 }
127 
gfn_min(gfn_t x,gfn_t y)128 static inline gfn_t gfn_min(gfn_t x, gfn_t y)
129 {
130     return _gfn(min(gfn_x(x), gfn_x(y)));
131 }
132 
gfn_eq(gfn_t x,gfn_t y)133 static inline bool_t gfn_eq(gfn_t x, gfn_t y)
134 {
135     return gfn_x(x) == gfn_x(y);
136 }
137 
138 TYPE_SAFE(unsigned long, pfn);
139 #define PRI_pfn          "05lx"
140 #define INVALID_PFN      (~0UL)
141 
142 #ifndef pfn_t
143 #define pfn_t /* Grep fodder: pfn_t, _pfn() and pfn_x() are defined above */
144 #define _pfn
145 #define pfn_x
146 #undef pfn_t
147 #undef _pfn
148 #undef pfn_x
149 #endif
150 
151 struct page_info;
152 
153 void put_page(struct page_info *);
154 int get_page(struct page_info *, struct domain *);
155 struct domain *__must_check page_get_owner_and_reference(struct page_info *);
156 
157 /* Boot-time allocator. Turns into generic allocator after bootstrap. */
158 void init_boot_pages(paddr_t ps, paddr_t pe);
159 mfn_t alloc_boot_pages(unsigned long nr_pfns, unsigned long pfn_align);
160 void end_boot_allocator(void);
161 
162 /* Xen suballocator. These functions are interrupt-safe. */
163 void init_xenheap_pages(paddr_t ps, paddr_t pe);
164 void xenheap_max_mfn(unsigned long mfn);
165 void *alloc_xenheap_pages(unsigned int order, unsigned int memflags);
166 void free_xenheap_pages(void *v, unsigned int order);
167 bool scrub_free_pages(void);
168 #define alloc_xenheap_page() (alloc_xenheap_pages(0,0))
169 #define free_xenheap_page(v) (free_xenheap_pages(v,0))
170 
171 /* Free an allocation, and zero the pointer to it. */
172 #define FREE_XENHEAP_PAGES(p, o) do { \
173     free_xenheap_pages(p, o);         \
174     (p) = NULL;                       \
175 } while ( false )
176 #define FREE_XENHEAP_PAGE(p) FREE_XENHEAP_PAGES(p, 0)
177 
178 /* Map machine page range in Xen virtual address space. */
179 int map_pages_to_xen(
180     unsigned long virt,
181     mfn_t mfn,
182     unsigned long nr_mfns,
183     unsigned int flags);
184 /* Alter the permissions of a range of Xen virtual address space. */
185 int modify_xen_mappings(unsigned long s, unsigned long e, unsigned int flags);
186 int destroy_xen_mappings(unsigned long v, unsigned long e);
187 /*
188  * Create only non-leaf page table entries for the
189  * page range in Xen virtual address space.
190  */
191 int populate_pt_range(unsigned long virt, unsigned long nr_mfns);
192 /* Claim handling */
193 unsigned long __must_check domain_adjust_tot_pages(struct domain *d,
194     long pages);
195 int domain_set_outstanding_pages(struct domain *d, unsigned long pages);
196 void get_outstanding_claims(uint64_t *free_pages, uint64_t *outstanding_pages);
197 
198 /* Domain suballocator. These functions are *not* interrupt-safe.*/
199 void init_domheap_pages(paddr_t ps, paddr_t pe);
200 struct page_info *alloc_domheap_pages(
201     struct domain *d, unsigned int order, unsigned int memflags);
202 void free_domheap_pages(struct page_info *pg, unsigned int order);
203 unsigned long avail_domheap_pages_region(
204     unsigned int node, unsigned int min_width, unsigned int max_width);
205 unsigned long avail_domheap_pages(void);
206 unsigned long avail_node_heap_pages(unsigned int);
207 #define alloc_domheap_page(d,f) (alloc_domheap_pages(d,0,f))
208 #define free_domheap_page(p)  (free_domheap_pages(p,0))
209 unsigned int online_page(mfn_t mfn, uint32_t *status);
210 int offline_page(mfn_t mfn, int broken, uint32_t *status);
211 int query_page_offline(mfn_t mfn, uint32_t *status);
212 
213 void heap_init_late(void);
214 
215 int assign_pages(
216     struct domain *d,
217     struct page_info *pg,
218     unsigned int order,
219     unsigned int memflags);
220 
221 /* Dump info to serial console */
222 void arch_dump_shared_mem_info(void);
223 
224 /*
225  * Extra fault info types which are used to further describe
226  * the source of an access violation.
227  */
228 typedef enum {
229     npfec_kind_unknown, /* must be first */
230     npfec_kind_in_gpt,  /* violation in guest page table */
231     npfec_kind_with_gla /* violation with guest linear address */
232 } npfec_kind_t;
233 
234 /*
235  * Nested page fault exception codes.
236  */
237 struct npfec {
238     unsigned int read_access:1;
239     unsigned int write_access:1;
240     unsigned int insn_fetch:1;
241     unsigned int present:1;
242     unsigned int gla_valid:1;
243     unsigned int kind:2;  /* npfec_kind_t */
244 };
245 
246 /* memflags: */
247 #define _MEMF_no_refcount 0
248 #define  MEMF_no_refcount (1U<<_MEMF_no_refcount)
249 #define _MEMF_populate_on_demand 1
250 #define  MEMF_populate_on_demand (1U<<_MEMF_populate_on_demand)
251 #define _MEMF_no_dma      3
252 #define  MEMF_no_dma      (1U<<_MEMF_no_dma)
253 #define _MEMF_exact_node  4
254 #define  MEMF_exact_node  (1U<<_MEMF_exact_node)
255 #define _MEMF_no_owner    5
256 #define  MEMF_no_owner    (1U<<_MEMF_no_owner)
257 #define _MEMF_no_tlbflush 6
258 #define  MEMF_no_tlbflush (1U<<_MEMF_no_tlbflush)
259 #define _MEMF_no_icache_flush 7
260 #define  MEMF_no_icache_flush (1U<<_MEMF_no_icache_flush)
261 #define _MEMF_no_scrub    8
262 #define  MEMF_no_scrub    (1U<<_MEMF_no_scrub)
263 #define _MEMF_node        16
264 #define  MEMF_node_mask   ((1U << (8 * sizeof(nodeid_t))) - 1)
265 #define  MEMF_node(n)     ((((n) + 1) & MEMF_node_mask) << _MEMF_node)
266 #define  MEMF_get_node(f) ((((f) >> _MEMF_node) - 1) & MEMF_node_mask)
267 #define _MEMF_bits        24
268 #define  MEMF_bits(n)     ((n)<<_MEMF_bits)
269 
270 #ifdef CONFIG_PAGEALLOC_MAX_ORDER
271 #define MAX_ORDER CONFIG_PAGEALLOC_MAX_ORDER
272 #else
273 #define MAX_ORDER 20 /* 2^20 contiguous pages */
274 #endif
275 
276 /* Private domain structs for DOMID_XEN, DOMID_IO, etc. */
277 extern struct domain *dom_xen, *dom_io;
278 #ifdef CONFIG_MEM_SHARING
279 extern struct domain *dom_cow;
280 #else
281 # define dom_cow NULL
282 #endif
283 
284 #define page_list_entry list_head
285 
286 #include <asm/mm.h>
287 
is_special_page(const struct page_info * page)288 static inline bool is_special_page(const struct page_info *page)
289 {
290     return is_xen_heap_page(page) || (page->count_info & PGC_extra);
291 }
292 
293 #ifndef page_list_entry
294 struct page_list_head
295 {
296     struct page_info *next, *tail;
297 };
298 /* These must only have instances in struct page_info. */
299 # define page_list_entry
300 
301 # define PAGE_LIST_NULL ((typeof(((struct page_info){}).list.next))~0)
302 
303 # if !defined(pdx_to_page) && !defined(page_to_pdx)
304 #   define page_to_pdx page_to_mfn
305 #   define pdx_to_page mfn_to_page
306 # endif
307 
308 # define PAGE_LIST_HEAD_INIT(name) { NULL, NULL }
309 # define PAGE_LIST_HEAD(name) \
310     struct page_list_head name = PAGE_LIST_HEAD_INIT(name)
311 # define INIT_PAGE_LIST_HEAD(head) ((head)->tail = (head)->next = NULL)
312 # define INIT_PAGE_LIST_ENTRY(ent) ((ent)->prev = (ent)->next = PAGE_LIST_NULL)
313 
314 static inline bool_t
page_list_empty(const struct page_list_head * head)315 page_list_empty(const struct page_list_head *head)
316 {
317     return !head->next;
318 }
319 static inline struct page_info *
page_list_first(const struct page_list_head * head)320 page_list_first(const struct page_list_head *head)
321 {
322     return head->next;
323 }
324 static inline struct page_info *
page_list_last(const struct page_list_head * head)325 page_list_last(const struct page_list_head *head)
326 {
327     return head->tail;
328 }
329 static inline struct page_info *
page_list_next(const struct page_info * page,const struct page_list_head * head)330 page_list_next(const struct page_info *page,
331                const struct page_list_head *head)
332 {
333     return page != head->tail ? pdx_to_page(page->list.next) : NULL;
334 }
335 static inline struct page_info *
page_list_prev(const struct page_info * page,const struct page_list_head * head)336 page_list_prev(const struct page_info *page,
337                const struct page_list_head *head)
338 {
339     return page != head->next ? pdx_to_page(page->list.prev) : NULL;
340 }
341 static inline void
page_list_add(struct page_info * page,struct page_list_head * head)342 page_list_add(struct page_info *page, struct page_list_head *head)
343 {
344     if ( head->next )
345     {
346         page->list.next = page_to_pdx(head->next);
347         head->next->list.prev = page_to_pdx(page);
348     }
349     else
350     {
351         head->tail = page;
352         page->list.next = PAGE_LIST_NULL;
353     }
354     page->list.prev = PAGE_LIST_NULL;
355     head->next = page;
356 }
357 static inline void
page_list_add_tail(struct page_info * page,struct page_list_head * head)358 page_list_add_tail(struct page_info *page, struct page_list_head *head)
359 {
360     page->list.next = PAGE_LIST_NULL;
361     if ( head->next )
362     {
363         page->list.prev = page_to_pdx(head->tail);
364         head->tail->list.next = page_to_pdx(page);
365     }
366     else
367     {
368         page->list.prev = PAGE_LIST_NULL;
369         head->next = page;
370     }
371     head->tail = page;
372 }
373 static inline bool_t
__page_list_del_head(struct page_info * page,struct page_list_head * head,struct page_info * next,struct page_info * prev)374 __page_list_del_head(struct page_info *page, struct page_list_head *head,
375                      struct page_info *next, struct page_info *prev)
376 {
377     if ( head->next == page )
378     {
379         if ( head->tail != page )
380         {
381             next->list.prev = PAGE_LIST_NULL;
382             head->next = next;
383         }
384         else
385             head->tail = head->next = NULL;
386         return 1;
387     }
388 
389     if ( head->tail == page )
390     {
391         prev->list.next = PAGE_LIST_NULL;
392         head->tail = prev;
393         return 1;
394     }
395 
396     return 0;
397 }
398 static inline void
page_list_del(struct page_info * page,struct page_list_head * head)399 page_list_del(struct page_info *page, struct page_list_head *head)
400 {
401     struct page_info *next = pdx_to_page(page->list.next);
402     struct page_info *prev = pdx_to_page(page->list.prev);
403 
404     if ( !__page_list_del_head(page, head, next, prev) )
405     {
406         next->list.prev = page->list.prev;
407         prev->list.next = page->list.next;
408     }
409 }
410 static inline void
page_list_del2(struct page_info * page,struct page_list_head * head1,struct page_list_head * head2)411 page_list_del2(struct page_info *page, struct page_list_head *head1,
412                struct page_list_head *head2)
413 {
414     struct page_info *next = pdx_to_page(page->list.next);
415     struct page_info *prev = pdx_to_page(page->list.prev);
416 
417     if ( !__page_list_del_head(page, head1, next, prev) &&
418          !__page_list_del_head(page, head2, next, prev) )
419     {
420         next->list.prev = page->list.prev;
421         prev->list.next = page->list.next;
422     }
423 }
424 static inline struct page_info *
page_list_remove_head(struct page_list_head * head)425 page_list_remove_head(struct page_list_head *head)
426 {
427     struct page_info *page = head->next;
428 
429     if ( page )
430         page_list_del(page, head);
431 
432     return page;
433 }
434 static inline void
page_list_move(struct page_list_head * dst,struct page_list_head * src)435 page_list_move(struct page_list_head *dst, struct page_list_head *src)
436 {
437     if ( !page_list_empty(src) )
438     {
439         *dst = *src;
440         INIT_PAGE_LIST_HEAD(src);
441     }
442 }
443 static inline void
page_list_splice(struct page_list_head * list,struct page_list_head * head)444 page_list_splice(struct page_list_head *list, struct page_list_head *head)
445 {
446     struct page_info *first, *last, *at;
447 
448     if ( page_list_empty(list) )
449         return;
450 
451     if ( page_list_empty(head) )
452     {
453         head->next = list->next;
454         head->tail = list->tail;
455         return;
456     }
457 
458     first = list->next;
459     last = list->tail;
460     at = head->next;
461 
462     ASSERT(first->list.prev == PAGE_LIST_NULL);
463     ASSERT(first->list.prev == at->list.prev);
464     head->next = first;
465 
466     last->list.next = page_to_pdx(at);
467     at->list.prev = page_to_pdx(last);
468 }
469 
470 #define page_list_for_each(pos, head) \
471     for ( pos = (head)->next; pos; pos = page_list_next(pos, head) )
472 #define page_list_for_each_safe(pos, tmp, head) \
473     for ( pos = (head)->next; \
474           pos ? (tmp = page_list_next(pos, head), 1) : 0; \
475           pos = tmp )
476 #define page_list_for_each_safe_reverse(pos, tmp, head) \
477     for ( pos = (head)->tail; \
478           pos ? (tmp = page_list_prev(pos, head), 1) : 0; \
479           pos = tmp )
480 #else
481 # define page_list_head                  list_head
482 # define PAGE_LIST_HEAD_INIT             LIST_HEAD_INIT
483 # define PAGE_LIST_HEAD                  LIST_HEAD
484 # define INIT_PAGE_LIST_HEAD             INIT_LIST_HEAD
485 # define INIT_PAGE_LIST_ENTRY            INIT_LIST_HEAD
486 
487 static inline bool_t
page_list_empty(const struct page_list_head * head)488 page_list_empty(const struct page_list_head *head)
489 {
490     return !!list_empty(head);
491 }
492 static inline struct page_info *
page_list_first(const struct page_list_head * head)493 page_list_first(const struct page_list_head *head)
494 {
495     return list_first_entry(head, struct page_info, list);
496 }
497 static inline struct page_info *
page_list_last(const struct page_list_head * head)498 page_list_last(const struct page_list_head *head)
499 {
500     return list_last_entry(head, struct page_info, list);
501 }
502 static inline struct page_info *
page_list_next(const struct page_info * page,const struct page_list_head * head)503 page_list_next(const struct page_info *page,
504                const struct page_list_head *head)
505 {
506     return list_entry(page->list.next, struct page_info, list);
507 }
508 static inline struct page_info *
page_list_prev(const struct page_info * page,const struct page_list_head * head)509 page_list_prev(const struct page_info *page,
510                const struct page_list_head *head)
511 {
512     return list_entry(page->list.prev, struct page_info, list);
513 }
514 static inline void
page_list_add(struct page_info * page,struct page_list_head * head)515 page_list_add(struct page_info *page, struct page_list_head *head)
516 {
517     list_add(&page->list, head);
518 }
519 static inline void
page_list_add_tail(struct page_info * page,struct page_list_head * head)520 page_list_add_tail(struct page_info *page, struct page_list_head *head)
521 {
522     list_add_tail(&page->list, head);
523 }
524 static inline void
page_list_del(struct page_info * page,struct page_list_head * head)525 page_list_del(struct page_info *page, struct page_list_head *head)
526 {
527     list_del(&page->list);
528 }
529 static inline void
page_list_del2(struct page_info * page,struct page_list_head * head1,struct page_list_head * head2)530 page_list_del2(struct page_info *page, struct page_list_head *head1,
531                struct page_list_head *head2)
532 {
533     list_del(&page->list);
534 }
535 static inline struct page_info *
page_list_remove_head(struct page_list_head * head)536 page_list_remove_head(struct page_list_head *head)
537 {
538     struct page_info *pg;
539 
540     if ( page_list_empty(head) )
541         return NULL;
542 
543     pg = page_list_first(head);
544     list_del(&pg->list);
545     return pg;
546 }
547 static inline void
page_list_move(struct page_list_head * dst,struct page_list_head * src)548 page_list_move(struct page_list_head *dst, struct page_list_head *src)
549 {
550     if ( !list_empty(src) )
551         list_replace_init(src, dst);
552 }
553 static inline void
page_list_splice(struct page_list_head * list,struct page_list_head * head)554 page_list_splice(struct page_list_head *list, struct page_list_head *head)
555 {
556     list_splice(list, head);
557 }
558 
559 # define page_list_for_each(pos, head)   list_for_each_entry(pos, head, list)
560 # define page_list_for_each_safe(pos, tmp, head) \
561     list_for_each_entry_safe(pos, tmp, head, list)
562 # define page_list_for_each_safe_reverse(pos, tmp, head) \
563     list_for_each_entry_safe_reverse(pos, tmp, head, list)
564 #endif
565 
get_order_from_bytes(paddr_t size)566 static inline unsigned int get_order_from_bytes(paddr_t size)
567 {
568     unsigned int order;
569 
570     size = (size - 1) >> PAGE_SHIFT;
571     for ( order = 0; size; order++ )
572         size >>= 1;
573 
574     return order;
575 }
576 
get_order_from_pages(unsigned long nr_pages)577 static inline unsigned int get_order_from_pages(unsigned long nr_pages)
578 {
579     unsigned int order;
580 
581     nr_pages--;
582     for ( order = 0; nr_pages; order++ )
583         nr_pages >>= 1;
584 
585     return order;
586 }
587 
588 void scrub_one_page(struct page_info *);
589 
590 #ifndef arch_free_heap_page
591 #define arch_free_heap_page(d, pg) \
592     page_list_del(pg, page_to_list(d, pg))
593 #endif
594 
595 union add_to_physmap_extra {
596     /*
597      * XENMAPSPACE_gmfn: When deferring TLB flushes, a page reference needs
598      * to be kept until after the flush, so the page can't get removed from
599      * the domain (and re-used for another purpose) beforehand. By passing
600      * non-NULL, the caller of xenmem_add_to_physmap_one() indicates it wants
601      * to have ownership of such a reference transferred in the success case.
602      */
603     struct page_info **ppage;
604 
605     /* XENMAPSPACE_gmfn_foreign */
606     domid_t foreign_domid;
607 };
608 
609 int xenmem_add_to_physmap_one(struct domain *d, unsigned int space,
610                               union add_to_physmap_extra extra,
611                               unsigned long idx, gfn_t gfn);
612 
613 int xenmem_add_to_physmap(struct domain *d, struct xen_add_to_physmap *xatp,
614                           unsigned int start);
615 
616 /* Return 0 on success, or negative on error. */
617 int __must_check guest_remove_page(struct domain *d, unsigned long gmfn);
618 int __must_check steal_page(struct domain *d, struct page_info *page,
619                             unsigned int memflags);
620 
621 #define RAM_TYPE_CONVENTIONAL 0x00000001
622 #define RAM_TYPE_RESERVED     0x00000002
623 #define RAM_TYPE_UNUSABLE     0x00000004
624 #define RAM_TYPE_ACPI         0x00000008
625 #define RAM_TYPE_UNKNOWN      0x00000010
626 /* TRUE if the whole page at @mfn is of the requested RAM type(s) above. */
627 int page_is_ram_type(unsigned long mfn, unsigned long mem_type);
628 /* Returns the page type(s). */
629 unsigned int page_get_ram_type(mfn_t mfn);
630 
631 /* Prepare/destroy a ring for a dom0 helper. Helper with talk
632  * with Xen on behalf of this domain. */
633 int prepare_ring_for_helper(struct domain *d, unsigned long gmfn,
634                             struct page_info **_page, void **_va);
635 void destroy_ring_for_helper(void **_va, struct page_info *page);
636 
637 /* Return the upper bound of MFNs, including hotplug memory. */
638 unsigned long get_upper_mfn_bound(void);
639 
640 #include <asm/flushtlb.h>
641 
accumulate_tlbflush(bool * need_tlbflush,const struct page_info * page,uint32_t * tlbflush_timestamp)642 static inline void accumulate_tlbflush(bool *need_tlbflush,
643                                        const struct page_info *page,
644                                        uint32_t *tlbflush_timestamp)
645 {
646     if ( page->u.free.need_tlbflush &&
647          page->tlbflush_timestamp <= tlbflush_current_time() &&
648          (!*need_tlbflush ||
649           page->tlbflush_timestamp > *tlbflush_timestamp) )
650     {
651         *need_tlbflush = true;
652         *tlbflush_timestamp = page->tlbflush_timestamp;
653     }
654 }
655 
filtered_flush_tlb_mask(uint32_t tlbflush_timestamp)656 static inline void filtered_flush_tlb_mask(uint32_t tlbflush_timestamp)
657 {
658     cpumask_t mask;
659 
660     cpumask_copy(&mask, &cpu_online_map);
661     tlbflush_filter(&mask, tlbflush_timestamp);
662     if ( !cpumask_empty(&mask) )
663     {
664         perfc_incr(need_flush_tlb_flush);
665         arch_flush_tlb_mask(&mask);
666     }
667 }
668 
669 enum XENSHARE_flags {
670     SHARE_rw,
671     SHARE_ro,
672 };
673 void share_xen_page_with_guest(struct page_info *page, struct domain *d,
674                                enum XENSHARE_flags flags);
675 
share_xen_page_with_privileged_guests(struct page_info * page,enum XENSHARE_flags flags)676 static inline void share_xen_page_with_privileged_guests(
677     struct page_info *page, enum XENSHARE_flags flags)
678 {
679     share_xen_page_with_guest(page, dom_xen, flags);
680 }
681 
put_page_alloc_ref(struct page_info * page)682 static inline void put_page_alloc_ref(struct page_info *page)
683 {
684     /*
685      * Whenever a page is assigned to a domain then the _PGC_allocated
686      * bit is set and the reference count is set to at least 1. This
687      * function clears that 'allocation reference' but it is unsafe to
688      * do so to domheap pages without the caller holding an additional
689      * reference. I.e. the allocation reference must never be the last
690      * reference held.
691      *
692      * (It's safe for xenheap pages, because put_page() will not cause
693      * them to be freed.)
694      */
695     if ( test_and_clear_bit(_PGC_allocated, &page->count_info) )
696     {
697         BUG_ON((page->count_info & (PGC_xen_heap | PGC_count_mask)) <= 1);
698         put_page(page);
699     }
700 }
701 
702 #endif /* __XEN_MM_H__ */
703