/xen/xen/include/asm-x86/hvm/ |
A D | nestedhvm.h | 47 bool_t nestedhvm_vcpu_in_guestmode(struct vcpu *v); 62 bool_t access_r, bool_t access_w, bool_t access_x); 66 bool_t access_r, bool_t access_w, bool_t access_x); 69 unsigned long *nestedhvm_vcpu_iomap_get(bool_t ioport_80, bool_t ioport_ed);
|
A D | vcpu.h | 85 bool_t mmio_retry; 102 bool_t nv_guestmode; /* vcpu in guestmode? */ 117 bool_t nv_flushp2m; /* True, when p2m table must be flushed */ 124 bool_t nv_vmentry_pending; 125 bool_t nv_vmexit_pending; 126 bool_t nv_vmswitch_in_progress; /* true during vmentry/vmexit emulation */ 131 bool_t nv_ioport80; 132 bool_t nv_ioportED;
|
A D | hvm.h | 31 extern bool_t opt_hvm_fep; 94 bool_t hap_supported; 177 void (*set_rdtsc_exiting)(struct vcpu *v, bool_t); 186 bool_t (*nhvm_vmcx_guest_intercepts_event)( 189 bool_t (*nhvm_vmcx_hap_enabled)(struct vcpu *v); 206 bool_t access_w, bool_t access_x); 209 bool_t (*is_singlestep_supported)(void); 214 bool_t (*altp2m_vcpu_emulate_ve)(struct vcpu *v); 233 extern bool_t hvm_enabled; 309 bool_t hvm_virtual_to_linear_addr( [all …]
|
A D | vlapic.h | 80 bool_t hw, regs; 111 bool_t is_vlapic_lvtpc_enabled(struct vlapic *vlapic); 117 int vlapic_ack_pending_irq(struct vcpu *v, int vector, bool_t force_ack); 145 int short_hand, uint32_t dest, bool_t dest_mode); 147 bool_t vlapic_match_dest( 149 int short_hand, uint32_t dest, bool_t dest_mode);
|
A D | domain.h | 160 bool_t is_in_uc_mode; 167 bool_t qemu_mapcache_invalidate; 168 bool_t is_s3_suspended;
|
/xen/xen/include/asm-x86/hvm/svm/ |
A D | nestedsvm.h | 29 bool_t ns_gif; 75 bool_t ns_hap_enabled; 116 bool_t nsvm_vmcb_guest_intercepts_event( 118 bool_t nsvm_vmcb_hap_enabled(struct vcpu *v); 128 bool_t nestedsvm_gif_isset(struct vcpu *v); 131 bool_t access_r, bool_t access_w, bool_t access_x);
|
/xen/xen/arch/x86/mm/hap/ |
A D | nested_hap.c | 136 bool_t access_r, bool_t access_w, bool_t access_x) in nestedhap_walk_L1_p2m() 153 bool_t access_r, bool_t access_w, bool_t access_x) in nestedhap_walk_L0_p2m() 194 bool_t access_r, bool_t access_w, bool_t access_x) in nestedhvm_hap_nested_page_fault()
|
A D | nested_ept.c | 57 static bool_t nept_rsv_bits_check(ept_entry_t e, uint32_t level) in nept_rsv_bits_check() 83 static bool_t nept_emt_bits_check(ept_entry_t e, uint32_t level) in nept_emt_bits_check() 94 static bool_t nept_permission_check(uint32_t rwx_acc, uint32_t rwx_bits) in nept_permission_check() 100 static bool_t nept_non_present_check(ept_entry_t e) in nept_non_present_check() 121 static bool_t nept_rwx_bits_check(ept_entry_t e) in nept_rwx_bits_check() 137 static bool_t nept_misconfiguration_check(ept_entry_t e, uint32_t level) in nept_misconfiguration_check()
|
/xen/xen/include/asm-x86/ |
A D | mtrr.h | 63 bool_t overlapped; 80 bool_t direct_mmio); 89 extern bool_t mtrr_var_range_msr_set(struct domain *, struct mtrr_state *, 91 extern bool_t mtrr_fix_range_msr_set(struct domain *, struct mtrr_state *, 93 extern bool_t mtrr_def_type_msr_set(struct domain *, struct mtrr_state *, 101 extern bool_t pat_msr_set(uint64_t *pat, uint64_t msr);
|
A D | domain.h | 118 bool_t oos_active; 122 bool_t pagetable_dying_op; 159 bool_t pagetable_dying; 338 bool_t altp2m_active; 349 bool_t has_32bit_shinfo; 352 bool_t auto_unmask; 427 bool_t mem_access_emulate_each_rep; 516 bool_t syscall32_disables_events; 517 bool_t sysenter_disables_events; 558 bool_t need_update_runstate_area; [all …]
|
A D | vpmu.h | 47 int (*arch_vpmu_save)(struct vcpu *v, bool_t to_guest); 48 int (*arch_vpmu_load)(struct vcpu *v, bool_t from_guest); 97 static inline bool_t vpmu_is_set(const struct vpmu_struct *vpmu, const u32 mask) in vpmu_is_set() 101 static inline bool_t vpmu_are_all_set(const struct vpmu_struct *vpmu, in vpmu_are_all_set() 109 uint64_t supported, bool_t is_write); 114 int vpmu_load(struct vcpu *v, bool_t from_guest);
|
A D | pci.h | 25 bool_t pci_mmcfg_decode(unsigned long mfn, unsigned int *seg, 28 bool_t pci_ro_mmcfg_decode(unsigned long mfn, unsigned int *seg,
|
/xen/xen/include/xen/ |
A D | device_tree.h | 31 const bool_t not_available; 136 static inline bool_t dt_irq_is_level_triggered(const struct dt_irq *irq) in dt_irq_is_level_triggered() 273 static inline bool_t dt_node_name_is_equal(const struct dt_device_node *np, in dt_node_name_is_equal() 279 static inline bool_t dt_node_path_is_equal(const struct dt_device_node *np, in dt_node_path_is_equal() 285 static inline bool_t 356 bool_t dt_property_read_u32(const struct dt_device_node *np, 366 bool_t dt_property_read_u64(const struct dt_device_node *np, 407 bool_t dt_device_is_compatible(const struct dt_device_node *device, 417 bool_t dt_machine_is_compatible(const char *compat); 609 bool_t dt_device_is_available(const struct dt_device_node *device); [all …]
|
A D | kimage.h | 50 kimage_entry_t *kimage_entry_next(kimage_entry_t *entry, bool_t compat); 51 mfn_t kimage_entry_mfn(kimage_entry_t *entry, bool_t compat); 52 unsigned long kimage_entry_ind(kimage_entry_t *entry, bool_t compat); 54 bool_t compat);
|
A D | rangeset.h | 55 bool_t __must_check rangeset_is_empty( 65 bool_t __must_check rangeset_contains_range( 67 bool_t __must_check rangeset_overlaps_range( 91 bool_t __must_check rangeset_contains_singleton(
|
A D | pci.h | 70 bool_t is_extfn; 71 bool_t is_virtfn; 150 bool_t __must_check pcidevs_locked(void); 151 bool_t __must_check pcidevs_trylock(void); 153 bool_t pci_known_segment(u16 seg); 154 bool_t pci_device_detect(u16 seg, u8 bus, u8 dev, u8 func); 208 bool_t pcie_aer_get_firmware_first(const struct pci_dev *);
|
A D | tasklet.h | 21 bool_t is_softirq; 22 bool_t is_running; 23 bool_t is_dead;
|
A D | keyhandler.h | 42 bool_t diagnostic); 46 bool_t diagnostic);
|
A D | iommu.h | 50 static inline bool_t dfn_eq(dfn_t x, dfn_t y) in dfn_eq() 55 extern bool_t iommu_enable, iommu_enabled; 101 extern bool_t iommu_debug; 102 extern bool_t amd_iommu_perdev_intremap; 176 bool_t iommu_has_feature(struct domain *d, enum iommu_feature feature); 382 DECLARE_PER_CPU(bool_t, iommu_dont_flush_iotlb);
|
A D | livepatch.h | 51 bool_t new_symbol; 57 bool_t is_patch(const void *addr); 172 static inline bool_t is_patch(const void *addr) in is_patch()
|
/xen/xen/include/acpi/cpufreq/ |
A D | cpufreq.h | 25 extern bool_t cpufreq_verbose; 45 bool_t no_turbo; 46 bool_t turbo_disabled; 70 bool_t resume; /* flag for cpufreq 1st run 75 bool_t aperf_mperf; /* CPU has APERF/MPERF MSRs */ 109 bool_t (*handle_option)(const char *name, const char *value);
|
/xen/xen/include/asm-x86/hvm/vmx/ |
A D | vvmx.h | 49 bool_t enabled; 92 bool_t nvmx_intercepts_exception( 96 bool_t nvmx_ept_enabled(struct vcpu *v); 106 bool_t access_r, bool_t access_w, bool_t access_x);
|
/xen/xen/drivers/passthrough/ |
A D | iommu.c | 30 bool_t __initdata iommu_enable = 1; 31 bool_t __read_mostly iommu_enabled; 32 bool_t __read_mostly force_iommu; 33 bool_t __read_mostly iommu_verbose; 35 bool_t __read_mostly iommu_crash_disable; 47 bool_t __read_mostly iommu_debug; 48 bool_t __read_mostly amd_iommu_perdev_intremap = 1; 50 DEFINE_PER_CPU(bool_t, iommu_dont_flush_iotlb); 463 bool_t force_intremap = force_iommu && iommu_intremap; in iommu_setup() 588 bool_t iommu_has_feature(struct domain *d, enum iommu_feature feature) in iommu_has_feature()
|
/xen/xen/drivers/passthrough/vtd/ |
A D | extern.h | 29 extern bool_t rwbf_quirk; 103 bool_t platform_supports_intremap(void); 104 bool_t platform_supports_x2apic(void);
|
/xen/xen/arch/x86/hvm/ |
A D | nestedhvm.c | 30 bool_t 173 nestedhvm_vcpu_iomap_get(bool_t port_80, bool_t port_ed) in nestedhvm_vcpu_iomap_get()
|