1
2 #ifndef __ASM_X86_MM_H__
3 #define __ASM_X86_MM_H__
4
5 #include <xen/list.h>
6 #include <xen/spinlock.h>
7 #include <xen/rwlock.h>
8 #include <asm/io.h>
9 #include <asm/uaccess.h>
10 #include <asm/x86_emulate.h>
11
12 /*
13 * Per-page-frame information.
14 *
15 * Every architecture must ensure the following:
16 * 1. 'struct page_info' contains a 'struct page_list_entry list'.
17 * 2. Provide a PFN_ORDER() macro for accessing the order of a free page.
18 */
19 #define PFN_ORDER(_pfn) ((_pfn)->v.free.order)
20
21 #define PG_shift(idx) (BITS_PER_LONG - (idx))
22 #define PG_mask(x, idx) (x ## UL << PG_shift(idx))
23
24 /* The following page types are MUTUALLY EXCLUSIVE. */
25 #define PGT_none PG_mask(0, 3) /* no special uses of this page */
26 #define PGT_l1_page_table PG_mask(1, 3) /* using as an L1 page table? */
27 #define PGT_l2_page_table PG_mask(2, 3) /* using as an L2 page table? */
28 #define PGT_l3_page_table PG_mask(3, 3) /* using as an L3 page table? */
29 #define PGT_l4_page_table PG_mask(4, 3) /* using as an L4 page table? */
30 #define PGT_seg_desc_page PG_mask(5, 3) /* using this page in a GDT/LDT? */
31 #define PGT_shared_page PG_mask(6, 3) /* CoW sharable page */
32 #define PGT_writable_page PG_mask(7, 3) /* has writable mappings? */
33 #define PGT_type_mask PG_mask(7, 3) /* Bits 61-63. */
34
35 /* Page is locked? */
36 #define _PGT_locked PG_shift(4)
37 #define PGT_locked PG_mask(1, 4)
38 /* Owning guest has pinned this page to its current type? */
39 #define _PGT_pinned PG_shift(5)
40 #define PGT_pinned PG_mask(1, 5)
41 /* Has this page been validated for use as its current type? */
42 #define _PGT_validated PG_shift(6)
43 #define PGT_validated PG_mask(1, 6)
44 /* PAE only: is this an L2 page directory containing Xen-private mappings? */
45 #define _PGT_pae_xen_l2 PG_shift(7)
46 #define PGT_pae_xen_l2 PG_mask(1, 7)
47 /* Has this page been *partially* validated for use as its current type? */
48 #define _PGT_partial PG_shift(8)
49 #define PGT_partial PG_mask(1, 8)
50
51 /* Count of uses of this frame as its current type. */
52 #define PGT_count_width PG_shift(8)
53 #define PGT_count_mask ((1UL<<PGT_count_width)-1)
54
55 /* Are the 'type mask' bits identical? */
56 #define PGT_type_equal(x, y) (!(((x) ^ (y)) & PGT_type_mask))
57
58 /* Cleared when the owning guest 'frees' this page. */
59 #define _PGC_allocated PG_shift(1)
60 #define PGC_allocated PG_mask(1, 1)
61 /* Page is Xen heap? */
62 #define _PGC_xen_heap PG_shift(2)
63 #define PGC_xen_heap PG_mask(1, 2)
64 /* Set when is using a page as a page table */
65 #define _PGC_page_table PG_shift(3)
66 #define PGC_page_table PG_mask(1, 3)
67 /* 3-bit PAT/PCD/PWT cache-attribute hint. */
68 #define PGC_cacheattr_base PG_shift(6)
69 #define PGC_cacheattr_mask PG_mask(7, 6)
70 /* Page is broken? */
71 #define _PGC_broken PG_shift(7)
72 #define PGC_broken PG_mask(1, 7)
73 /* Mutually-exclusive page states: { inuse, offlining, offlined, free }. */
74 #define PGC_state PG_mask(3, 9)
75 #define PGC_state_inuse PG_mask(0, 9)
76 #define PGC_state_offlining PG_mask(1, 9)
77 #define PGC_state_offlined PG_mask(2, 9)
78 #define PGC_state_free PG_mask(3, 9)
79 #define page_state_is(pg, st) (((pg)->count_info&PGC_state) == PGC_state_##st)
80 /* Page is not reference counted */
81 #define _PGC_extra PG_shift(10)
82 #define PGC_extra PG_mask(1, 10)
83
84 /* Count of references to this frame. */
85 #define PGC_count_width PG_shift(10)
86 #define PGC_count_mask ((1UL<<PGC_count_width)-1)
87
88 /*
89 * Page needs to be scrubbed. Since this bit can only be set on a page that is
90 * free (i.e. in PGC_state_free) we can reuse PGC_allocated bit.
91 */
92 #define _PGC_need_scrub _PGC_allocated
93 #define PGC_need_scrub PGC_allocated
94
95 #ifndef CONFIG_BIGMEM
96 /*
97 * This definition is solely for the use in struct page_info (and
98 * struct page_list_head), intended to allow easy adjustment once x86-64
99 * wants to support more than 16TB.
100 * 'unsigned long' should be used for MFNs everywhere else.
101 */
102 #define __pdx_t unsigned int
103
104 #undef page_list_entry
105 struct page_list_entry
106 {
107 __pdx_t next, prev;
108 };
109 #else
110 #define __pdx_t unsigned long
111 #endif
112
113 struct page_sharing_info;
114
115 struct page_info
116 {
117 union {
118 /* Each frame can be threaded onto a doubly-linked list.
119 *
120 * For unused shadow pages, a list of free shadow pages;
121 * for multi-page shadows, links to the other pages in this shadow;
122 * for pinnable shadows, if pinned, a list of all pinned shadows
123 * (see sh_type_is_pinnable() for the definition of "pinnable"
124 * shadow types). N.B. a shadow may be both pinnable and multi-page.
125 * In that case the pages are inserted in order in the list of
126 * pinned shadows and walkers of that list must be prepared
127 * to keep them all together during updates.
128 */
129 struct page_list_entry list;
130 /* For non-pinnable single-page shadows, a higher entry that points
131 * at us. */
132 paddr_t up;
133
134 #ifdef CONFIG_MEM_SHARING
135 /* For shared/sharable pages, we use a doubly-linked list
136 * of all the {pfn,domain} pairs that map this page. We also include
137 * an opaque handle, which is effectively a version, so that clients
138 * of sharing share the version they expect to.
139 * This list is allocated and freed when a page is shared/unshared.
140 */
141 struct page_sharing_info *sharing;
142 #endif
143 };
144
145 /* Reference count and various PGC_xxx flags and fields. */
146 unsigned long count_info;
147
148 /* Context-dependent fields follow... */
149 union {
150
151 /* Page is in use: ((count_info & PGC_count_mask) != 0). */
152 struct {
153 /* Type reference count and various PGT_xxx flags and fields. */
154 unsigned long type_info;
155 } inuse;
156
157 /* Page is in use as a shadow: count_info == 0. */
158 struct {
159 unsigned long type:5; /* What kind of shadow is this? */
160 unsigned long pinned:1; /* Is the shadow pinned? */
161 unsigned long head:1; /* Is this the first page of the shadow? */
162 #define PAGE_SH_REFCOUNT_WIDTH (PGT_count_width - 7)
163 unsigned long count:PAGE_SH_REFCOUNT_WIDTH; /* Reference count */
164 } sh;
165
166 /* Page is on a free list: ((count_info & PGC_count_mask) == 0). */
167 union {
168 struct {
169 /*
170 * Index of the first *possibly* unscrubbed page in the buddy.
171 * One more bit than maximum possible order to accommodate
172 * INVALID_DIRTY_IDX.
173 */
174 #define INVALID_DIRTY_IDX ((1UL << (MAX_ORDER + 1)) - 1)
175 unsigned int first_dirty;
176
177 /* Do TLBs need flushing for safety before next page use? */
178 bool need_tlbflush;
179
180 #define BUDDY_NOT_SCRUBBING 0
181 #define BUDDY_SCRUBBING 1
182 #define BUDDY_SCRUB_ABORT 2
183 uint8_t scrub_state;
184 };
185
186 unsigned long val;
187 } free;
188
189 } u;
190
191 union {
192
193 /* Page is in use, but not as a shadow. */
194 struct {
195 /* Owner of this page (zero if page is anonymous). */
196 __pdx_t _domain;
197 } inuse;
198
199 /* Page is in use as a shadow. */
200 struct {
201 /* GMFN of guest page we're a shadow of. */
202 __pdx_t back;
203 } sh;
204
205 /* Page is on a free list. */
206 struct {
207 /* Order-size of the free chunk this page is the head of. */
208 unsigned int order;
209 } free;
210
211 } v;
212
213 union {
214 /*
215 * Timestamp from 'TLB clock', used to avoid extra safety flushes.
216 * Only valid for: a) free pages, and b) pages with zero type count
217 * (except page table pages when the guest is in shadow mode).
218 */
219 u32 tlbflush_timestamp;
220
221 /*
222 * When PGT_partial is true then the first two fields are valid and
223 * indicate that PTEs in the range [0, @nr_validated_ptes) have been
224 * validated. An extra page reference must be acquired (or not dropped)
225 * whenever PGT_partial gets set, and it must be dropped when the flag
226 * gets cleared. This is so that a get() leaving a page in partially
227 * validated state (where the caller would drop the reference acquired
228 * due to the getting of the type [apparently] failing [-ERESTART])
229 * would not accidentally result in a page left with zero general
230 * reference count, but non-zero type reference count (possible when
231 * the partial get() is followed immediately by domain destruction).
232 * Likewise, the ownership of the single type reference for partially
233 * (in-)validated pages is tied to this flag, i.e. the instance
234 * setting the flag must not drop that reference, whereas the instance
235 * clearing it will have to.
236 *
237 * If partial_flags & PTF_partial_set is set, then the page at
238 * at @nr_validated_ptes had PGT_partial set as a result of an
239 * operation on the current page. (That page may or may not
240 * still have PGT_partial set.)
241 *
242 * Additionally, if PTF_partial_set is set, then the PTE at
243 * @nr_validated_ptef holds a general reference count for the
244 * page.
245 *
246 * This happens:
247 * - During validation or de-validation, if the operation was
248 * interrupted
249 * - During validation, if an invalid entry is encountered and
250 * validation is preemptible
251 * - During validation, if PTF_partial_set was set on this
252 * entry to begin with (perhaps because it picked up a
253 * previous operation)
254 *
255 * When resuming validation, if PTF_partial_set is clear, then
256 * a general reference must be re-acquired; if it is set, no
257 * reference should be acquired.
258 *
259 * When resuming de-validation, if PTF_partial_set is clear,
260 * no reference should be dropped; if it is set, a reference
261 * should be dropped.
262 *
263 * NB that PTF_partial_set is defined in mm.c, the only place
264 * where it is used.
265 *
266 * The 3rd field, @linear_pt_count, indicates
267 * - by a positive value, how many same-level page table entries a page
268 * table has,
269 * - by a negative value, in how many same-level page tables a page is
270 * in use.
271 */
272 struct {
273 u16 nr_validated_ptes:PAGETABLE_ORDER + 1;
274 u16 :16 - PAGETABLE_ORDER - 1 - 1;
275 u16 partial_flags:1;
276 s16 linear_pt_count;
277 };
278
279 /*
280 * Guest pages with a shadow. This does not conflict with
281 * tlbflush_timestamp since page table pages are explicitly not
282 * tracked for TLB-flush avoidance when a guest runs in shadow mode.
283 *
284 * pagetable_dying is used for HVM domains only. The layout here has
285 * to avoid re-use of the space used by linear_pt_count, which (only)
286 * PV guests use.
287 */
288 struct {
289 uint16_t shadow_flags;
290 #ifdef CONFIG_HVM
291 bool pagetable_dying;
292 #endif
293 };
294
295 /* When in use as a shadow, next shadow in this hash chain. */
296 __pdx_t next_shadow;
297 };
298 };
299
300 #undef __pdx_t
301
302 #define is_xen_heap_page(page) ((page)->count_info & PGC_xen_heap)
303 #define is_xen_heap_mfn(mfn) \
304 (mfn_valid(mfn) && is_xen_heap_page(mfn_to_page(mfn)))
305 #define is_xen_fixed_mfn(mfn) \
306 (((mfn_to_maddr(mfn)) >= __pa(_stext)) && \
307 ((mfn_to_maddr(mfn)) <= __pa(__2M_rwdata_end - 1)))
308
309 #define PRtype_info "016lx"/* should only be used for printk's */
310
311 /* The number of out-of-sync shadows we allow per vcpu (prime, please) */
312 #define SHADOW_OOS_PAGES 3
313
314 /* OOS fixup entries */
315 #define SHADOW_OOS_FIXUPS 2
316
317 #define page_get_owner(_p) \
318 ((struct domain *)((_p)->v.inuse._domain ? \
319 pdx_to_virt((_p)->v.inuse._domain) : NULL))
320 #define page_set_owner(_p,_d) \
321 ((_p)->v.inuse._domain = (_d) ? virt_to_pdx(_d) : 0)
322
323 #define maddr_get_owner(ma) (page_get_owner(maddr_to_page((ma))))
324
325 #define frame_table ((struct page_info *)FRAMETABLE_VIRT_START)
326 extern unsigned long max_page;
327 extern unsigned long total_pages;
328 void init_frametable(void);
329
330 #define PDX_GROUP_SHIFT L2_PAGETABLE_SHIFT
331
332 /* Convert between Xen-heap virtual addresses and page-info structures. */
__virt_to_page(const void * v)333 static inline struct page_info *__virt_to_page(const void *v)
334 {
335 unsigned long va = (unsigned long)v;
336
337 ASSERT(va >= XEN_VIRT_START);
338 ASSERT(va < DIRECTMAP_VIRT_END);
339 if ( va < XEN_VIRT_END )
340 va += DIRECTMAP_VIRT_START - XEN_VIRT_START + xen_phys_start;
341 else
342 ASSERT(va >= DIRECTMAP_VIRT_START);
343 return frame_table + ((va - DIRECTMAP_VIRT_START) >> PAGE_SHIFT);
344 }
345
__page_to_virt(const struct page_info * pg)346 static inline void *__page_to_virt(const struct page_info *pg)
347 {
348 ASSERT((unsigned long)pg - FRAMETABLE_VIRT_START < FRAMETABLE_SIZE);
349 /*
350 * (sizeof(*pg) & -sizeof(*pg)) selects the LS bit of sizeof(*pg). The
351 * division and re-multiplication avoids one shift when sizeof(*pg) is a
352 * power of two (otherwise there would be a right shift followed by a
353 * left shift, which the compiler can't know it can fold into one).
354 */
355 return (void *)(DIRECTMAP_VIRT_START +
356 ((unsigned long)pg - FRAMETABLE_VIRT_START) /
357 (sizeof(*pg) / (sizeof(*pg) & -sizeof(*pg))) *
358 (PAGE_SIZE / (sizeof(*pg) & -sizeof(*pg))));
359 }
360
361 int devalidate_page(struct page_info *page, unsigned long type,
362 int preemptible);
363
364 void init_xen_pae_l2_slots(l2_pgentry_t *l2t, const struct domain *d);
365 void init_xen_l4_slots(l4_pgentry_t *l4t, mfn_t l4mfn,
366 const struct domain *d, mfn_t sl4mfn, bool ro_mpt);
367 bool fill_ro_mpt(mfn_t mfn);
368 void zap_ro_mpt(mfn_t mfn);
369
370 bool is_iomem_page(mfn_t mfn);
371
372 struct platform_bad_page {
373 unsigned long mfn;
374 unsigned int order;
375 };
376
377 const struct platform_bad_page *get_platform_badpages(unsigned int *array_size);
378
379 /* Per page locks:
380 * page_lock() is used for pte serialization.
381 *
382 * All users of page lock for pte serialization live in mm.c, use it
383 * to lock a page table page during pte updates, do not take other locks within
384 * the critical section delimited by page_lock/unlock, and perform no
385 * nesting.
386 *
387 * The use of PGT_locked in mem_sharing does not collide, since mem_sharing is
388 * only supported for hvm guests, which do not have PV PTEs updated.
389 */
390 int page_lock(struct page_info *page);
391 void page_unlock(struct page_info *page);
392
393 void put_page_type(struct page_info *page);
394 int get_page_type(struct page_info *page, unsigned long type);
395 int put_page_type_preemptible(struct page_info *page);
396 int get_page_type_preemptible(struct page_info *page, unsigned long type);
397 int put_old_guest_table(struct vcpu *);
398 int get_page_from_l1e(
399 l1_pgentry_t l1e, struct domain *l1e_owner, struct domain *pg_owner);
400 void put_page_from_l1e(l1_pgentry_t l1e, struct domain *l1e_owner);
401
get_page_from_mfn(mfn_t mfn,struct domain * d)402 static inline struct page_info *get_page_from_mfn(mfn_t mfn, struct domain *d)
403 {
404 struct page_info *page = mfn_to_page(mfn);
405
406 if ( unlikely(!mfn_valid(mfn)) || unlikely(!get_page(page, d)) )
407 {
408 gdprintk(XENLOG_WARNING,
409 "Could not get page ref for mfn %"PRI_mfn"\n", mfn_x(mfn));
410 return NULL;
411 }
412
413 return page;
414 }
415
put_page_and_type(struct page_info * page)416 static inline void put_page_and_type(struct page_info *page)
417 {
418 put_page_type(page);
419 put_page(page);
420 }
421
put_page_and_type_preemptible(struct page_info * page)422 static inline int put_page_and_type_preemptible(struct page_info *page)
423 {
424 int rc = put_page_type_preemptible(page);
425
426 if ( likely(rc == 0) )
427 put_page(page);
428 return rc;
429 }
430
get_page_and_type(struct page_info * page,struct domain * domain,unsigned long type)431 static inline int get_page_and_type(struct page_info *page,
432 struct domain *domain,
433 unsigned long type)
434 {
435 int rc = get_page(page, domain);
436
437 if ( likely(rc) && unlikely(!get_page_type(page, type)) )
438 {
439 put_page(page);
440 rc = 0;
441 }
442
443 return rc;
444 }
445
446 #define ASSERT_PAGE_IS_TYPE(_p, _t) \
447 ASSERT(((_p)->u.inuse.type_info & PGT_type_mask) == (_t)); \
448 ASSERT(((_p)->u.inuse.type_info & PGT_count_mask) != 0)
449 #define ASSERT_PAGE_IS_DOMAIN(_p, _d) \
450 ASSERT(((_p)->count_info & PGC_count_mask) != 0); \
451 ASSERT(page_get_owner(_p) == (_d))
452
453 int check_descriptor(const struct domain *d, seg_desc_t *desc);
454
455 extern paddr_t mem_hotplug;
456
457 /******************************************************************************
458 * With shadow pagetables, the different kinds of address start
459 * to get get confusing.
460 *
461 * Virtual addresses are what they usually are: the addresses that are used
462 * to accessing memory while the guest is running. The MMU translates from
463 * virtual addresses to machine addresses.
464 *
465 * (Pseudo-)physical addresses are the abstraction of physical memory the
466 * guest uses for allocation and so forth. For the purposes of this code,
467 * we can largely ignore them.
468 *
469 * Guest frame numbers (gfns) are the entries that the guest puts in its
470 * pagetables. For normal paravirtual guests, they are actual frame numbers,
471 * with the translation done by the guest.
472 *
473 * Machine frame numbers (mfns) are the entries that the hypervisor puts
474 * in the shadow page tables.
475 *
476 * Elsewhere in the xen code base, the name "gmfn" is generally used to refer
477 * to a "machine frame number, from the guest's perspective", or in other
478 * words, pseudo-physical frame numbers. However, in the shadow code, the
479 * term "gmfn" means "the mfn of a guest page"; this combines naturally with
480 * other terms such as "smfn" (the mfn of a shadow page), gl2mfn (the mfn of a
481 * guest L2 page), etc...
482 */
483
484 /*
485 * The MPT (machine->physical mapping table) is an array of word-sized
486 * values, indexed on machine frame number. It is expected that guest OSes
487 * will use it to store a "physical" frame number to give the appearance of
488 * contiguous (or near contiguous) physical memory.
489 */
490 #undef machine_to_phys_mapping
491 #define machine_to_phys_mapping ((unsigned long *)RDWR_MPT_VIRT_START)
492 #define INVALID_M2P_ENTRY (~0UL)
493 #define VALID_M2P(_e) (!((_e) & (1UL<<(BITS_PER_LONG-1))))
494 #define SHARED_M2P_ENTRY (~0UL - 1UL)
495 #define SHARED_M2P(_e) ((_e) == SHARED_M2P_ENTRY)
496
497 #define compat_machine_to_phys_mapping ((unsigned int *)RDWR_COMPAT_MPT_VIRT_START)
498 #define _set_gpfn_from_mfn(mfn, pfn) ({ \
499 struct domain *d = page_get_owner(mfn_to_page(_mfn(mfn))); \
500 unsigned long entry = (d && (d == dom_cow)) ? \
501 SHARED_M2P_ENTRY : (pfn); \
502 ((void)((mfn) >= (RDWR_COMPAT_MPT_VIRT_END - RDWR_COMPAT_MPT_VIRT_START) / 4 || \
503 (compat_machine_to_phys_mapping[(mfn)] = (unsigned int)(entry))), \
504 machine_to_phys_mapping[(mfn)] = (entry)); \
505 })
506
507 /*
508 * Disable some users of set_gpfn_from_mfn() (e.g., free_heap_pages()) until
509 * the machine_to_phys_mapping is actually set up.
510 */
511 extern bool machine_to_phys_mapping_valid;
512 #define set_gpfn_from_mfn(mfn, pfn) do { \
513 if ( machine_to_phys_mapping_valid ) \
514 _set_gpfn_from_mfn(mfn, pfn); \
515 } while (0)
516
517 extern struct rangeset *mmio_ro_ranges;
518
519 #define get_gpfn_from_mfn(mfn) (machine_to_phys_mapping[(mfn)])
520
521 #define mfn_to_gmfn(_d, mfn) \
522 ( (paging_mode_translate(_d)) \
523 ? get_gpfn_from_mfn(mfn) \
524 : (mfn) )
525
526 #define compat_pfn_to_cr3(pfn) (((unsigned)(pfn) << 12) | ((unsigned)(pfn) >> 20))
527 #define compat_cr3_to_pfn(cr3) (((unsigned)(cr3) >> 12) | ((unsigned)(cr3) << 20))
528
529 #ifdef MEMORY_GUARD
530 void memguard_guard_range(void *p, unsigned long l);
531 void memguard_unguard_range(void *p, unsigned long l);
532 #else
533 #define memguard_guard_range(_p,_l) ((void)0)
534 #define memguard_unguard_range(_p,_l) ((void)0)
535 #endif
536
537 void memguard_guard_stack(void *p);
538 void memguard_unguard_stack(void *p);
539
540 struct mmio_ro_emulate_ctxt {
541 unsigned long cr2;
542 unsigned int seg, bdf;
543 };
544
545 extern int mmio_ro_emulated_write(enum x86_segment seg,
546 unsigned long offset,
547 void *p_data,
548 unsigned int bytes,
549 struct x86_emulate_ctxt *ctxt);
550 extern int mmcfg_intercept_write(enum x86_segment seg,
551 unsigned long offset,
552 void *p_data,
553 unsigned int bytes,
554 struct x86_emulate_ctxt *ctxt);
555
556 int audit_adjust_pgtables(struct domain *d, int dir, int noisy);
557
558 extern int pagefault_by_memadd(unsigned long addr, struct cpu_user_regs *regs);
559 extern int handle_memadd_fault(unsigned long addr, struct cpu_user_regs *regs);
560
561 #ifndef NDEBUG
562
563 #define AUDIT_SHADOW_ALREADY_LOCKED ( 1u << 0 )
564 #define AUDIT_ERRORS_OK ( 1u << 1 )
565 #define AUDIT_QUIET ( 1u << 2 )
566
567 void _audit_domain(struct domain *d, int flags);
568 #define audit_domain(_d) _audit_domain((_d), AUDIT_ERRORS_OK)
569 void audit_domains(void);
570
571 #else
572
573 #define _audit_domain(_d, _f) ((void)0)
574 #define audit_domain(_d) ((void)0)
575 #define audit_domains() ((void)0)
576
577 #endif
578
579 void make_cr3(struct vcpu *v, mfn_t mfn);
580 void update_cr3(struct vcpu *v);
581 int vcpu_destroy_pagetables(struct vcpu *);
582 void *do_page_walk(struct vcpu *v, unsigned long addr);
583
584 /* Allocator functions for Xen pagetables. */
585 void *alloc_xen_pagetable(void);
586 void free_xen_pagetable(void *v);
587 mfn_t alloc_xen_pagetable_new(void);
588 void free_xen_pagetable_new(mfn_t mfn);
589
590 l1_pgentry_t *virt_to_xen_l1e(unsigned long v);
591
592 int __sync_local_execstate(void);
593
594 /* Arch-specific portion of memory_op hypercall. */
595 long arch_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg);
596 long subarch_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg);
597 int compat_arch_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void));
598 int compat_subarch_memory_op(int op, XEN_GUEST_HANDLE_PARAM(void));
599
600 #define NIL(type) ((type *)-sizeof(type))
601 #define IS_NIL(ptr) (!((uintptr_t)(ptr) + sizeof(*(ptr))))
602
603 int create_perdomain_mapping(struct domain *, unsigned long va,
604 unsigned int nr, l1_pgentry_t **,
605 struct page_info **);
606 void destroy_perdomain_mapping(struct domain *, unsigned long va,
607 unsigned int nr);
608 void free_perdomain_mappings(struct domain *);
609
610 extern int memory_add(unsigned long spfn, unsigned long epfn, unsigned int pxm);
611
612 void domain_set_alloc_bitsize(struct domain *d);
613 unsigned int domain_clamp_alloc_bitsize(struct domain *d, unsigned int bits);
614
615 unsigned long domain_get_maximum_gpfn(struct domain *d);
616
617 /* Definition of an mm lock: spinlock with extra fields for debugging */
618 typedef struct mm_lock {
619 spinlock_t lock;
620 int unlock_level;
621 int locker; /* processor which holds the lock */
622 const char *locker_function; /* func that took it */
623 } mm_lock_t;
624
625 typedef struct mm_rwlock {
626 percpu_rwlock_t lock;
627 int unlock_level;
628 int recurse_count;
629 int locker; /* CPU that holds the write lock */
630 const char *locker_function; /* func that took it */
631 } mm_rwlock_t;
632
633 #define arch_free_heap_page(d, pg) \
634 page_list_del2(pg, page_to_list(d, pg), &(d)->arch.relmem_list)
635
636 extern const char zero_page[];
637
638 /* Build a 32bit PSE page table using 4MB pages. */
639 void write_32bit_pse_identmap(uint32_t *l2);
640
641 /*
642 * x86 maps part of physical memory via the directmap region.
643 * Return whether the input MFN falls in that range.
644 */
arch_mfn_in_directmap(unsigned long mfn)645 static inline bool arch_mfn_in_directmap(unsigned long mfn)
646 {
647 unsigned long eva = min(DIRECTMAP_VIRT_END, HYPERVISOR_VIRT_END);
648
649 return mfn <= (virt_to_mfn(eva - 1) + 1);
650 }
651
652 int arch_acquire_resource(struct domain *d, unsigned int type,
653 unsigned int id, unsigned long frame,
654 unsigned int nr_frames, xen_pfn_t mfn_list[]);
655
656 #endif /* __ASM_X86_MM_H__ */
657