1 #ifndef __ASM_DOMAIN_H__
2 #define __ASM_DOMAIN_H__
3
4 #include <xen/mm.h>
5 #include <xen/radix-tree.h>
6 #include <asm/hvm/vcpu.h>
7 #include <asm/hvm/domain.h>
8 #include <asm/e820.h>
9 #include <asm/mce.h>
10 #include <asm/vpmu.h>
11 #include <asm/x86_emulate.h>
12 #include <public/vcpu.h>
13 #include <public/hvm/hvm_info_table.h>
14
15 #define has_32bit_shinfo(d) ((d)->arch.has_32bit_shinfo)
16
17 #define is_hvm_pv_evtchn_domain(d) (is_hvm_domain(d) && \
18 (d)->arch.hvm.irq->callback_via_type == HVMIRQ_callback_vector)
19 #define is_hvm_pv_evtchn_vcpu(v) (is_hvm_pv_evtchn_domain(v->domain))
20 #define is_domain_direct_mapped(d) ((void)(d), 0)
21
22 #define VCPU_TRAP_NONE 0
23 #define VCPU_TRAP_NMI 1
24 #define VCPU_TRAP_MCE 2
25 #define VCPU_TRAP_LAST VCPU_TRAP_MCE
26
27 #define nmi_state async_exception_state(VCPU_TRAP_NMI)
28 #define mce_state async_exception_state(VCPU_TRAP_MCE)
29
30 #define nmi_pending nmi_state.pending
31 #define mce_pending mce_state.pending
32
33 struct trap_bounce {
34 uint32_t error_code;
35 uint8_t flags; /* TBF_ */
36 uint16_t cs;
37 unsigned long eip;
38 };
39
40 #define MAPHASH_ENTRIES 8
41 #define MAPHASH_HASHFN(pfn) ((pfn) & (MAPHASH_ENTRIES-1))
42 #define MAPHASHENT_NOTINUSE ((u32)~0U)
43 struct mapcache_vcpu {
44 /* Shadow of mapcache_domain.epoch. */
45 unsigned int shadow_epoch;
46
47 /* Lock-free per-VCPU hash of recently-used mappings. */
48 struct vcpu_maphash_entry {
49 unsigned long mfn;
50 uint32_t idx;
51 uint32_t refcnt;
52 } hash[MAPHASH_ENTRIES];
53 };
54
55 struct mapcache_domain {
56 /* The number of array entries, and a cursor into the array. */
57 unsigned int entries;
58 unsigned int cursor;
59
60 /* Protects map_domain_page(). */
61 spinlock_t lock;
62
63 /* Garbage mappings are flushed from TLBs in batches called 'epochs'. */
64 unsigned int epoch;
65 u32 tlbflush_timestamp;
66
67 /* Which mappings are in use, and which are garbage to reap next epoch? */
68 unsigned long *inuse;
69 unsigned long *garbage;
70 };
71
72 int mapcache_domain_init(struct domain *);
73 int mapcache_vcpu_init(struct vcpu *);
74 void mapcache_override_current(struct vcpu *);
75
76 /* x86/64: toggle guest between kernel and user modes. */
77 void toggle_guest_mode(struct vcpu *);
78 /* x86/64: toggle guest page tables between kernel and user modes. */
79 void toggle_guest_pt(struct vcpu *);
80
81 void cpuid_policy_updated(struct vcpu *v);
82
83 /*
84 * Initialise a hypercall-transfer page. The given pointer must be mapped
85 * in Xen virtual address space (accesses are not validated or checked).
86 */
87 void init_hypercall_page(struct domain *d, void *);
88
89 /************************************************/
90 /* shadow paging extension */
91 /************************************************/
92 struct shadow_domain {
93 #ifdef CONFIG_SHADOW_PAGING
94 unsigned int opt_flags; /* runtime tunable optimizations on/off */
95 struct page_list_head pinned_shadows;
96
97 /* Memory allocation */
98 struct page_list_head freelist;
99 unsigned int total_pages; /* number of pages allocated */
100 unsigned int free_pages; /* number of pages on freelists */
101 unsigned int p2m_pages; /* number of pages allocated to p2m */
102
103 /* 1-to-1 map for use when HVM vcpus have paging disabled */
104 pagetable_t unpaged_pagetable;
105
106 /* reflect guest table dirty status, incremented by write
107 * emulation and remove write permission */
108 atomic_t gtable_dirty_version;
109
110 /* Shadow hashtable */
111 struct page_info **hash_table;
112 bool_t hash_walking; /* Some function is walking the hash table */
113
114 /* Fast MMIO path heuristic */
115 bool has_fast_mmio_entries;
116
117 /* OOS */
118 bool_t oos_active;
119
120 #ifdef CONFIG_HVM
121 /* Has this domain ever used HVMOP_pagetable_dying? */
122 bool_t pagetable_dying_op;
123 #endif
124
125 #ifdef CONFIG_PV
126 /* PV L1 Terminal Fault mitigation. */
127 struct tasklet pv_l1tf_tasklet;
128 #endif /* CONFIG_PV */
129 #endif
130 };
131
132 struct shadow_vcpu {
133 #ifdef CONFIG_SHADOW_PAGING
134 /* PAE guests: per-vcpu shadow top-level table */
135 l3_pgentry_t l3table[4] __attribute__((__aligned__(32)));
136 /* PAE guests: per-vcpu cache of the top-level *guest* entries */
137 l3_pgentry_t gl3e[4] __attribute__((__aligned__(32)));
138 /* Last MFN that we emulated a write to as unshadow heuristics. */
139 unsigned long last_emulated_mfn_for_unshadow;
140 /* MFN of the last shadow that we shot a writeable mapping in */
141 unsigned long last_writeable_pte_smfn;
142 #ifdef CONFIG_HVM
143 /* Last frame number that we emulated a write to. */
144 unsigned long last_emulated_frame;
145 /* Last MFN that we emulated a write successfully */
146 unsigned long last_emulated_mfn;
147 #endif
148
149 /* Shadow out-of-sync: pages that this vcpu has let go out of sync */
150 mfn_t oos[SHADOW_OOS_PAGES];
151 mfn_t oos_snapshot[SHADOW_OOS_PAGES];
152 struct oos_fixup {
153 int next;
154 mfn_t smfn[SHADOW_OOS_FIXUPS];
155 unsigned long off[SHADOW_OOS_FIXUPS];
156 } oos_fixup[SHADOW_OOS_PAGES];
157
158 #ifdef CONFIG_HVM
159 bool_t pagetable_dying;
160 #endif
161 #endif
162 };
163
164 /************************************************/
165 /* hardware assisted paging */
166 /************************************************/
167 struct hap_domain {
168 struct page_list_head freelist;
169 unsigned int total_pages; /* number of pages allocated */
170 unsigned int free_pages; /* number of pages on freelists */
171 unsigned int p2m_pages; /* number of pages allocated to p2m */
172 };
173
174 /************************************************/
175 /* common paging data structure */
176 /************************************************/
177 struct log_dirty_domain {
178 /* log-dirty radix tree to record dirty pages */
179 mfn_t top;
180 unsigned int allocs;
181 unsigned int failed_allocs;
182
183 /* log-dirty mode stats */
184 unsigned int fault_count;
185 unsigned int dirty_count;
186
187 /* functions which are paging mode specific */
188 const struct log_dirty_ops {
189 int (*enable )(struct domain *d, bool log_global);
190 int (*disable )(struct domain *d);
191 void (*clean )(struct domain *d);
192 } *ops;
193 };
194
195 struct paging_domain {
196 /* paging lock */
197 mm_lock_t lock;
198
199 /* flags to control paging operation */
200 u32 mode;
201 /* Has that pool ever run out of memory? */
202 bool_t p2m_alloc_failed;
203 /* extension for shadow paging support */
204 struct shadow_domain shadow;
205 /* extension for hardware-assited paging */
206 struct hap_domain hap;
207 /* log dirty support */
208 struct log_dirty_domain log_dirty;
209
210 /* preemption handling */
211 struct {
212 const struct domain *dom;
213 unsigned int op;
214 union {
215 struct {
216 unsigned long done:PADDR_BITS - PAGE_SHIFT;
217 unsigned long i4:PAGETABLE_ORDER;
218 unsigned long i3:PAGETABLE_ORDER;
219 } log_dirty;
220 };
221 } preempt;
222
223 /* alloc/free pages from the pool for paging-assistance structures
224 * (used by p2m and log-dirty code for their tries) */
225 struct page_info * (*alloc_page)(struct domain *d);
226 void (*free_page)(struct domain *d, struct page_info *pg);
227 };
228
229 struct paging_vcpu {
230 /* Pointers to mode-specific entry points. */
231 const struct paging_mode *mode;
232 /* Nested Virtualization: paging mode of nested guest */
233 const struct paging_mode *nestedmode;
234 #ifdef CONFIG_HVM
235 /* HVM guest: last emulate was to a pagetable */
236 unsigned int last_write_was_pt:1;
237 /* HVM guest: last write emulation succeeds */
238 unsigned int last_write_emul_ok:1;
239 #endif
240 /* Translated guest: virtual TLB */
241 struct shadow_vtlb *vtlb;
242 spinlock_t vtlb_lock;
243
244 /* paging support extension */
245 struct shadow_vcpu shadow;
246 };
247
248 #define MAX_NESTEDP2M 10
249
250 #define MAX_ALTP2M 10 /* arbitrary */
251 #define INVALID_ALTP2M 0xffff
252 #define MAX_EPTP (PAGE_SIZE / sizeof(uint64_t))
253 struct p2m_domain;
254 struct time_scale {
255 int shift;
256 u32 mul_frac;
257 };
258
259 struct pv_domain
260 {
261 l1_pgentry_t **gdt_ldt_l1tab;
262
263 atomic_t nr_l4_pages;
264
265 /* Is a 32-bit PV guest? */
266 bool is_32bit;
267 /* XPTI active? */
268 bool xpti;
269 /* Use PCID feature? */
270 bool pcid;
271 /* Mitigate L1TF with shadow/crashing? */
272 bool check_l1tf;
273
274 /* map_domain_page() mapping cache. */
275 struct mapcache_domain mapcache;
276
277 struct cpuidmasks *cpuidmasks;
278 };
279
280 struct monitor_write_data {
281 struct {
282 unsigned int msr : 1;
283 unsigned int cr0 : 1;
284 unsigned int cr3 : 1;
285 unsigned int cr4 : 1;
286 } do_write;
287
288 bool cr3_noflush;
289
290 uint32_t msr;
291 uint64_t value;
292 uint64_t cr0;
293 uint64_t cr3;
294 uint64_t cr4;
295 };
296
297 struct arch_domain
298 {
299 struct page_info *perdomain_l3_pg;
300
301 unsigned int hv_compat_vstart;
302
303 /* Maximum physical-address bitwidth supported by this guest. */
304 unsigned int physaddr_bitsize;
305
306 /* I/O-port admin-specified access capabilities. */
307 struct rangeset *ioport_caps;
308 uint32_t pci_cf8;
309 uint8_t cmos_idx;
310
311 union {
312 struct pv_domain pv;
313 struct hvm_domain hvm;
314 };
315
316 struct paging_domain paging;
317 struct p2m_domain *p2m;
318 /* To enforce lock ordering in the pod code wrt the
319 * page_alloc lock */
320 int page_alloc_unlock_level;
321
322 /* Continuable domain_relinquish_resources(). */
323 unsigned int rel_priv;
324 struct page_list_head relmem_list;
325
326 const struct arch_csw {
327 void (*from)(struct vcpu *);
328 void (*to)(struct vcpu *);
329 void noreturn (*tail)(void);
330 } *ctxt_switch;
331
332 #ifdef CONFIG_HVM
333 /* nestedhvm: translate l2 guest physical to host physical */
334 struct p2m_domain *nested_p2m[MAX_NESTEDP2M];
335 mm_lock_t nested_p2m_lock;
336
337 /* altp2m: allow multiple copies of host p2m */
338 bool_t altp2m_active;
339 struct p2m_domain *altp2m_p2m[MAX_ALTP2M];
340 mm_lock_t altp2m_list_lock;
341 uint64_t *altp2m_eptp;
342 uint64_t *altp2m_visible_eptp;
343 #endif
344
345 /* NB. protected by d->event_lock and by irq_desc[irq].lock */
346 struct radix_tree_root irq_pirq;
347
348 /* Is shared-info page in 32-bit format? */
349 bool_t has_32bit_shinfo;
350
351 /* Is PHYSDEVOP_eoi to automatically unmask the event channel? */
352 bool_t auto_unmask;
353
354 /*
355 * The width of the FIP/FDP register in the FPU that needs to be
356 * saved/restored during a context switch. This is needed because
357 * the FPU can either: a) restore the 64-bit FIP/FDP and clear FCS
358 * and FDS; or b) restore the 32-bit FIP/FDP (clearing the upper
359 * 32-bits of FIP/FDP) and restore FCS/FDS.
360 *
361 * Which one is needed depends on the guest.
362 *
363 * This can be either: 8, 4 or 0. 0 means auto-detect the size
364 * based on the width of FIP/FDP values that are written by the
365 * guest.
366 */
367 uint8_t x87_fip_width;
368
369 /* CPUID and MSR policy objects. */
370 struct cpuid_policy *cpuid;
371 struct msr_policy *msr;
372
373 struct PITState vpit;
374
375 /* TSC management (emulation, pv, scaling, stats) */
376 int tsc_mode; /* see include/asm-x86/time.h */
377 bool_t vtsc; /* tsc is emulated (may change after migrate) */
378 s_time_t vtsc_last; /* previous TSC value (guarantee monotonicity) */
379 uint64_t vtsc_offset; /* adjustment for save/restore/migrate */
380 uint32_t tsc_khz; /* cached guest khz for certain emulated or
381 hardware TSC scaling cases */
382 struct time_scale vtsc_to_ns; /* scaling for certain emulated or
383 hardware TSC scaling cases */
384 struct time_scale ns_to_vtsc; /* scaling for certain emulated or
385 hardware TSC scaling cases */
386 uint32_t incarnation; /* incremented every restore or live migrate
387 (possibly other cases in the future */
388
389 /* Pseudophysical e820 map (XENMEM_memory_map). */
390 spinlock_t e820_lock;
391 struct e820entry *e820;
392 unsigned int nr_e820;
393
394 /* RMID assigned to the domain for CMT */
395 unsigned int psr_rmid;
396 /* COS assigned to the domain for each socket */
397 unsigned int *psr_cos_ids;
398
399 /* Shared page for notifying that explicit PIRQ EOI is required. */
400 unsigned long *pirq_eoi_map;
401 unsigned long pirq_eoi_map_mfn;
402
403 /* Arch-specific monitor options */
404 struct {
405 unsigned int write_ctrlreg_enabled : 4;
406 unsigned int write_ctrlreg_sync : 4;
407 unsigned int write_ctrlreg_onchangeonly : 4;
408 unsigned int singlestep_enabled : 1;
409 unsigned int software_breakpoint_enabled : 1;
410 unsigned int debug_exception_enabled : 1;
411 unsigned int debug_exception_sync : 1;
412 unsigned int cpuid_enabled : 1;
413 unsigned int descriptor_access_enabled : 1;
414 unsigned int guest_request_userspace_enabled : 1;
415 unsigned int emul_unimplemented_enabled : 1;
416 /*
417 * By default all events are sent.
418 * This is used to filter out pagefaults.
419 */
420 unsigned int inguest_pagefault_disabled : 1;
421 unsigned int control_register_values : 1;
422 struct monitor_msr_bitmap *msr_bitmap;
423 uint64_t write_ctrlreg_mask[4];
424 } monitor;
425
426 /* Mem_access emulation control */
427 bool_t mem_access_emulate_each_rep;
428
429 /* Emulated devices enabled bitmap. */
430 uint32_t emulation_flags;
431 } __cacheline_aligned;
432
433 #ifdef CONFIG_HVM
434 #define X86_EMU_LAPIC XEN_X86_EMU_LAPIC
435 #define X86_EMU_HPET XEN_X86_EMU_HPET
436 #define X86_EMU_PM XEN_X86_EMU_PM
437 #define X86_EMU_RTC XEN_X86_EMU_RTC
438 #define X86_EMU_IOAPIC XEN_X86_EMU_IOAPIC
439 #define X86_EMU_PIC XEN_X86_EMU_PIC
440 #define X86_EMU_VGA XEN_X86_EMU_VGA
441 #define X86_EMU_IOMMU XEN_X86_EMU_IOMMU
442 #define X86_EMU_USE_PIRQ XEN_X86_EMU_USE_PIRQ
443 #define X86_EMU_VPCI XEN_X86_EMU_VPCI
444 #else
445 #define X86_EMU_LAPIC 0
446 #define X86_EMU_HPET 0
447 #define X86_EMU_PM 0
448 #define X86_EMU_RTC 0
449 #define X86_EMU_IOAPIC 0
450 #define X86_EMU_PIC 0
451 #define X86_EMU_VGA 0
452 #define X86_EMU_IOMMU 0
453 #define X86_EMU_USE_PIRQ 0
454 #define X86_EMU_VPCI 0
455 #endif
456
457 #define X86_EMU_PIT XEN_X86_EMU_PIT
458
459 /* This must match XEN_X86_EMU_ALL in xen.h */
460 #define X86_EMU_ALL (X86_EMU_LAPIC | X86_EMU_HPET | \
461 X86_EMU_PM | X86_EMU_RTC | \
462 X86_EMU_IOAPIC | X86_EMU_PIC | \
463 X86_EMU_VGA | X86_EMU_IOMMU | \
464 X86_EMU_PIT | X86_EMU_USE_PIRQ | \
465 X86_EMU_VPCI)
466
467 #define has_vlapic(d) (!!((d)->arch.emulation_flags & X86_EMU_LAPIC))
468 #define has_vhpet(d) (!!((d)->arch.emulation_flags & X86_EMU_HPET))
469 #define has_vpm(d) (!!((d)->arch.emulation_flags & X86_EMU_PM))
470 #define has_vrtc(d) (!!((d)->arch.emulation_flags & X86_EMU_RTC))
471 #define has_vioapic(d) (!!((d)->arch.emulation_flags & X86_EMU_IOAPIC))
472 #define has_vpic(d) (!!((d)->arch.emulation_flags & X86_EMU_PIC))
473 #define has_vvga(d) (!!((d)->arch.emulation_flags & X86_EMU_VGA))
474 #define has_viommu(d) (!!((d)->arch.emulation_flags & X86_EMU_IOMMU))
475 #define has_vpit(d) (!!((d)->arch.emulation_flags & X86_EMU_PIT))
476 #define has_pirq(d) (!!((d)->arch.emulation_flags & X86_EMU_USE_PIRQ))
477 #define has_vpci(d) (!!((d)->arch.emulation_flags & X86_EMU_VPCI))
478
479 #define gdt_ldt_pt_idx(v) \
480 ((v)->vcpu_id >> (PAGETABLE_ORDER - GDT_LDT_VCPU_SHIFT))
481 #define pv_gdt_ptes(v) \
482 ((v)->domain->arch.pv.gdt_ldt_l1tab[gdt_ldt_pt_idx(v)] + \
483 (((v)->vcpu_id << GDT_LDT_VCPU_SHIFT) & (L1_PAGETABLE_ENTRIES - 1)))
484 #define pv_ldt_ptes(v) (pv_gdt_ptes(v) + 16)
485
486 struct pv_vcpu
487 {
488 /* map_domain_page() mapping cache. */
489 struct mapcache_vcpu mapcache;
490
491 unsigned int vgc_flags;
492
493 struct trap_info *trap_ctxt;
494
495 unsigned long gdt_frames[FIRST_RESERVED_GDT_PAGE];
496 unsigned long ldt_base;
497 unsigned int gdt_ents, ldt_ents;
498
499 unsigned long kernel_ss, kernel_sp;
500 unsigned long ctrlreg[8];
501
502 unsigned long event_callback_eip;
503 unsigned long failsafe_callback_eip;
504 union {
505 unsigned long syscall_callback_eip;
506 struct {
507 unsigned int event_callback_cs;
508 unsigned int failsafe_callback_cs;
509 };
510 };
511
512 unsigned long syscall32_callback_eip;
513 unsigned long sysenter_callback_eip;
514 unsigned short syscall32_callback_cs;
515 unsigned short sysenter_callback_cs;
516 bool_t syscall32_disables_events;
517 bool_t sysenter_disables_events;
518
519 /*
520 * 64bit segment bases.
521 *
522 * FS and the active GS are always stale when the vCPU is in context, as
523 * the guest can change them behind Xen's back with MOV SREG, or
524 * WR{FS,GS}BASE on capable hardware.
525 *
526 * The inactive GS base is never stale, as guests can't use SWAPGS to
527 * access it - all modification is performed by Xen either directly
528 * (hypercall, #GP emulation), or indirectly (toggle_guest_mode()).
529 *
530 * The vCPU context switch path is optimised based on this fact, so any
531 * path updating or swapping the inactive base must update the cached
532 * value as well.
533 *
534 * Which GS base is active and inactive depends on whether the vCPU is in
535 * user or kernel context.
536 */
537 unsigned long fs_base;
538 unsigned long gs_base_kernel;
539 unsigned long gs_base_user;
540
541 /* Bounce information for propagating an exception to guest OS. */
542 struct trap_bounce trap_bounce;
543
544 /* I/O-port access bitmap. */
545 XEN_GUEST_HANDLE(uint8) iobmp; /* Guest kernel vaddr of the bitmap. */
546 unsigned int iobmp_limit; /* Number of ports represented in the bitmap. */
547 #define IOPL(val) MASK_INSR(val, X86_EFLAGS_IOPL)
548 unsigned int iopl; /* Current IOPL for this VCPU, shifted left by
549 * 12 to match the eflags register. */
550
551 /*
552 * %dr7 bits the guest has set, but aren't loaded into hardware, and are
553 * completely emulated.
554 */
555 uint32_t dr7_emul;
556
557 /* Deferred VA-based update state. */
558 bool_t need_update_runstate_area;
559 struct vcpu_time_info pending_system_time;
560 };
561
562 struct arch_vcpu
563 {
564 /*
565 * guest context (mirroring struct vcpu_guest_context) common
566 * between pv and hvm guests
567 */
568
569 void *fpu_ctxt;
570 struct cpu_user_regs user_regs;
571
572 /* Debug registers. */
573 unsigned long dr[4];
574 unsigned long dr7; /* Ideally int, but __vmread() needs long. */
575 unsigned int dr6;
576
577 /* other state */
578
579 unsigned long flags; /* TF_ */
580
581 struct vpmu_struct vpmu;
582
583 struct {
584 bool pending;
585 uint8_t old_mask;
586 } async_exception_state[VCPU_TRAP_LAST];
587 #define async_exception_state(t) async_exception_state[(t)-1]
588 uint8_t async_exception_mask;
589
590 /* Virtual Machine Extensions */
591 union {
592 struct pv_vcpu pv;
593 struct hvm_vcpu hvm;
594 };
595
596 pagetable_t guest_table_user; /* (MFN) x86/64 user-space pagetable */
597 pagetable_t guest_table; /* (MFN) guest notion of cr3 */
598 struct page_info *old_guest_table; /* partially destructed pagetable */
599 struct page_info *old_guest_ptpg; /* containing page table of the */
600 /* former, if any */
601 bool old_guest_table_partial; /* Are we dropping a type ref, or just
602 * finishing up a partial de-validation? */
603 /* guest_table holds a ref to the page, and also a type-count unless
604 * shadow refcounts are in use */
605 pagetable_t shadow_table[4]; /* (MFN) shadow(s) of guest */
606 unsigned long cr3; /* (MA) value to install in HW CR3 */
607
608 /*
609 * The save area for Processor Extended States and the bitmask of the
610 * XSAVE/XRSTOR features. They are used by: 1) when a vcpu (which has
611 * dirtied FPU/SSE) is scheduled out we XSAVE the states here; 2) in
612 * #NM handler, we XRSTOR the states we XSAVE-ed;
613 */
614 struct xsave_struct *xsave_area;
615 uint64_t xcr0;
616 /* Accumulated eXtended features mask for using XSAVE/XRESTORE by Xen
617 * itself, as we can never know whether guest OS depends on content
618 * preservation whenever guest OS clears one feature flag (for example,
619 * temporarily).
620 * However, processor should not be able to touch eXtended states before
621 * it explicitly enables it via xcr0.
622 */
623 uint64_t xcr0_accum;
624 /* This variable determines whether nonlazy extended state has been used,
625 * and thus should be saved/restored. */
626 bool_t nonlazy_xstate_used;
627
628 /* Restore all FPU state (lazy and non-lazy state) on context switch? */
629 bool fully_eager_fpu;
630
631 struct vmce vmce;
632
633 struct paging_vcpu paging;
634
635 uint32_t gdbsx_vcpu_event;
636
637 /* A secondary copy of the vcpu time info. */
638 XEN_GUEST_HANDLE(vcpu_time_info_t) time_info_guest;
639
640 struct arch_vm_event *vm_event;
641
642 struct vcpu_msrs *msrs;
643
644 struct {
645 bool next_interrupt_enabled;
646 } monitor;
647 };
648
649 struct guest_memory_policy
650 {
651 bool nested_guest_mode;
652 };
653
654 void update_guest_memory_policy(struct vcpu *v,
655 struct guest_memory_policy *policy);
656
657 void domain_cpu_policy_changed(struct domain *d);
658
659 bool update_runstate_area(struct vcpu *);
660 bool update_secondary_system_time(struct vcpu *,
661 struct vcpu_time_info *);
662
663 void vcpu_show_execution_state(struct vcpu *);
664 void vcpu_show_registers(const struct vcpu *);
665
alloc_vcpu_guest_context(void)666 static inline struct vcpu_guest_context *alloc_vcpu_guest_context(void)
667 {
668 return vmalloc(sizeof(struct vcpu_guest_context));
669 }
670
free_vcpu_guest_context(struct vcpu_guest_context * vgc)671 static inline void free_vcpu_guest_context(struct vcpu_guest_context *vgc)
672 {
673 vfree(vgc);
674 }
675
676 void arch_vcpu_regs_init(struct vcpu *v);
677
678 struct vcpu_hvm_context;
679 int arch_set_info_hvm_guest(struct vcpu *v, const struct vcpu_hvm_context *ctx);
680
681 #ifdef CONFIG_PV
682 void pv_inject_event(const struct x86_event *event);
683 #else
pv_inject_event(const struct x86_event * event)684 static inline void pv_inject_event(const struct x86_event *event)
685 {
686 ASSERT_UNREACHABLE();
687 }
688 #endif
689
pv_inject_hw_exception(unsigned int vector,int errcode)690 static inline void pv_inject_hw_exception(unsigned int vector, int errcode)
691 {
692 const struct x86_event event = {
693 .vector = vector,
694 .type = X86_EVENTTYPE_HW_EXCEPTION,
695 .error_code = errcode,
696 };
697
698 pv_inject_event(&event);
699 }
700
pv_inject_page_fault(int errcode,unsigned long cr2)701 static inline void pv_inject_page_fault(int errcode, unsigned long cr2)
702 {
703 const struct x86_event event = {
704 .vector = TRAP_page_fault,
705 .type = X86_EVENTTYPE_HW_EXCEPTION,
706 .error_code = errcode,
707 .cr2 = cr2,
708 };
709
710 pv_inject_event(&event);
711 }
712
pv_inject_sw_interrupt(unsigned int vector)713 static inline void pv_inject_sw_interrupt(unsigned int vector)
714 {
715 const struct x86_event event = {
716 .vector = vector,
717 .type = X86_EVENTTYPE_SW_INTERRUPT,
718 .error_code = X86_EVENT_NO_EC,
719 };
720
721 pv_inject_event(&event);
722 }
723
724 #define PV32_VM_ASSIST_MASK ((1UL << VMASST_TYPE_4gb_segments) | \
725 (1UL << VMASST_TYPE_4gb_segments_notify) | \
726 (1UL << VMASST_TYPE_writable_pagetables) | \
727 (1UL << VMASST_TYPE_pae_extended_cr3) | \
728 (1UL << VMASST_TYPE_architectural_iopl) | \
729 (1UL << VMASST_TYPE_runstate_update_flag))
730 /*
731 * Various of what PV32_VM_ASSIST_MASK has isn't really applicable to 64-bit,
732 * but we can't make such requests fail all of the sudden.
733 */
734 #define PV64_VM_ASSIST_MASK (PV32_VM_ASSIST_MASK | \
735 (1UL << VMASST_TYPE_m2p_strict))
736 #define HVM_VM_ASSIST_MASK (1UL << VMASST_TYPE_runstate_update_flag)
737
738 #define arch_vm_assist_valid_mask(d) \
739 (is_hvm_domain(d) ? HVM_VM_ASSIST_MASK \
740 : is_pv_32bit_domain(d) ? PV32_VM_ASSIST_MASK \
741 : PV64_VM_ASSIST_MASK)
742
743 #endif /* __ASM_DOMAIN_H__ */
744
745 /*
746 * Local variables:
747 * mode: C
748 * c-file-style: "BSD"
749 * c-basic-offset: 4
750 * tab-width: 4
751 * indent-tabs-mode: nil
752 * End:
753 */
754