Searched refs:vpmu (Results 1 – 10 of 10) sorted by relevance
/xen/xen/arch/x86/cpu/ |
A D | vpmu.c | 100 struct vpmu_struct *vpmu; in vpmu_lvtpc_update() local 106 vpmu = vcpu_vpmu(curr); in vpmu_lvtpc_update() 120 struct vpmu_struct *vpmu; in vpmu_do_msr() local 133 vpmu = vcpu_vpmu(curr); in vpmu_do_msr() 182 struct vpmu_struct *vpmu; in vpmu_do_interrupt() local 364 vpmu->last_pcpu = pcpu; in vpmu_save() 425 if ( vpmu->arch_vpmu_ops && vpmu->arch_vpmu_ops->arch_vpmu_load ) in vpmu_load() 453 ASSERT(!(vpmu->flags & ~VPMU_AVAILABLE) && !vpmu->context); in vpmu_arch_initialise() 569 if ( vpmu->arch_vpmu_ops && vpmu->arch_vpmu_ops->arch_vpmu_destroy ) in vpmu_arch_destroy() 626 vpmu = vcpu_vpmu(v); in pvpmu_init() [all …]
|
A D | vpmu_amd.c | 100 #define is_msr_bitmap_on(vpmu) ((vpmu)->priv_context != NULL) argument 178 msr_bitmap_on(vpmu); in amd_vpmu_set_msr_bitmap() 192 msr_bitmap_off(vpmu); in amd_vpmu_unset_msr_bitmap() 226 ctxt = vpmu->context; in amd_vpmu_load() 242 ctxt = vpmu->context; in amd_vpmu_load() 317 ctxt = vpmu->context; in amd_vpmu_save() 439 xfree(vpmu->context); in amd_vpmu_destroy() 440 vpmu->context = NULL; in amd_vpmu_destroy() 441 vpmu->priv_context = NULL; in amd_vpmu_destroy() 446 vpmu_clear(vpmu); in amd_vpmu_destroy() [all …]
|
A D | vpmu_intel.c | 401 vpmu_set(vpmu, VPMU_RUNNING); in core2_vpmu_verify() 476 vpmu->context = core2_vpmu_cxt; in core2_vpmu_alloc_resource() 477 vpmu->priv_context = p; in core2_vpmu_alloc_resource() 482 ASSERT(vpmu->xenpmu_data); in core2_vpmu_alloc_resource() 544 core2_vpmu_cxt = vpmu->context; in core2_vpmu_do_wrmsr() 689 vpmu_set(vpmu, VPMU_RUNNING); in core2_vpmu_do_wrmsr() 812 xfree(vpmu->context); in core2_vpmu_destroy() 813 vpmu->context = NULL; in core2_vpmu_destroy() 814 xfree(vpmu->priv_context); in core2_vpmu_destroy() 815 vpmu->priv_context = NULL; in core2_vpmu_destroy() [all …]
|
A D | Makefile | 13 obj-y += vpmu.o vpmu_amd.o vpmu_intel.o
|
/xen/xen/include/asm-x86/ |
A D | vpmu.h | 26 #define vcpu_vpmu(vcpu) (&(vcpu)->arch.vpmu) 27 #define vpmu_vcpu(vpmu) container_of((vpmu), struct vcpu, arch.vpmu) argument 84 static inline void vpmu_set(struct vpmu_struct *vpmu, const u32 mask) in vpmu_set() argument 86 vpmu->flags |= mask; in vpmu_set() 88 static inline void vpmu_reset(struct vpmu_struct *vpmu, const u32 mask) in vpmu_reset() argument 90 vpmu->flags &= ~mask; in vpmu_reset() 92 static inline void vpmu_clear(struct vpmu_struct *vpmu) in vpmu_clear() argument 95 vpmu->flags &= VPMU_AVAILABLE; in vpmu_clear() 99 return !!(vpmu->flags & mask); in vpmu_is_set() 101 static inline bool_t vpmu_are_all_set(const struct vpmu_struct *vpmu, in vpmu_are_all_set() argument [all …]
|
A D | domain.h | 581 struct vpmu_struct vpmu; member
|
/xen/xen/arch/x86/oprofile/ |
A D | op_model_ppro.c | 231 struct vpmu_struct *vpmu = vcpu_vpmu(v); in ppro_allocate_msr() local 237 vpmu->context = (void *)msr_content; in ppro_allocate_msr() 238 vpmu_clear(vpmu); in ppro_allocate_msr() 239 vpmu_set(vpmu, VPMU_PASSIVE_DOMAIN_ALLOCATED); in ppro_allocate_msr() 250 struct vpmu_struct *vpmu = vcpu_vpmu(v); in ppro_free_msr() local 252 if ( !vpmu_is_set(vpmu, VPMU_PASSIVE_DOMAIN_ALLOCATED) ) in ppro_free_msr() 254 xfree(vpmu->context); in ppro_free_msr() 255 vpmu_reset(vpmu, VPMU_PASSIVE_DOMAIN_ALLOCATED); in ppro_free_msr()
|
A D | nmi_int.c | 43 struct vpmu_struct *vpmu = vcpu_vpmu(current); in passive_domain_msr_op_checks() local 51 if ( !vpmu_is_set(vpmu, VPMU_PASSIVE_DOMAIN_ALLOCATED) ) in passive_domain_msr_op_checks() 81 struct vpmu_struct *vpmu = vcpu_vpmu(v); in passive_domain_destroy() local 82 if ( vpmu_is_set(vpmu, VPMU_PASSIVE_DOMAIN_ALLOCATED) ) in passive_domain_destroy()
|
/xen/xen/arch/x86/ |
A D | domain.c | 397 spin_lock_init(&v->arch.vpmu.vpmu_lock); in arch_vcpu_create()
|
/xen/docs/misc/ |
A D | xen-command-line.pandoc | 2320 ### vpmu (x86) 2370 As the virtualisation is not 100% safe, don't use the vpmu flag on
|
Completed in 19 milliseconds