1 /*
2  * vpmu.h: PMU virtualization for HVM domain.
3  *
4  * Copyright (c) 2007, Intel Corporation.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms and conditions of the GNU General Public License,
8  * version 2, as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  * You should have received a copy of the GNU General Public License along with
16  * this program; If not, see <http://www.gnu.org/licenses/>.
17  *
18  * Author: Haitao Shan <haitao.shan@intel.com>
19  */
20 
21 #ifndef __ASM_X86_HVM_VPMU_H_
22 #define __ASM_X86_HVM_VPMU_H_
23 
24 #include <public/pmu.h>
25 
26 #define vcpu_vpmu(vcpu)   (&(vcpu)->arch.vpmu)
27 #define vpmu_vcpu(vpmu)   container_of((vpmu), struct vcpu, arch.vpmu)
28 #define vpmu_available(vcpu) vpmu_is_set(vcpu_vpmu(vcpu), VPMU_AVAILABLE)
29 
30 #define MSR_TYPE_COUNTER            0
31 #define MSR_TYPE_CTRL               1
32 #define MSR_TYPE_GLOBAL             2
33 #define MSR_TYPE_ARCH_COUNTER       3
34 #define MSR_TYPE_ARCH_CTRL          4
35 
36 /* Start of PMU register bank */
37 #define vpmu_reg_pointer(ctxt, offset) ((void *)((uintptr_t)ctxt + \
38                                                  (uintptr_t)ctxt->offset))
39 
40 /* Arch specific operations shared by all vpmus */
41 struct arch_vpmu_ops {
42     int (*do_wrmsr)(unsigned int msr, uint64_t msr_content,
43                     uint64_t supported);
44     int (*do_rdmsr)(unsigned int msr, uint64_t *msr_content);
45     int (*do_interrupt)(struct cpu_user_regs *regs);
46     void (*arch_vpmu_destroy)(struct vcpu *v);
47     int (*arch_vpmu_save)(struct vcpu *v, bool_t to_guest);
48     int (*arch_vpmu_load)(struct vcpu *v, bool_t from_guest);
49     void (*arch_vpmu_dump)(const struct vcpu *);
50 };
51 
52 int core2_vpmu_init(void);
53 int vmx_vpmu_initialise(struct vcpu *);
54 int amd_vpmu_init(void);
55 int hygon_vpmu_init(void);
56 int svm_vpmu_initialise(struct vcpu *);
57 
58 struct vpmu_struct {
59     u32 flags;
60     u32 last_pcpu;
61     u32 hw_lapic_lvtpc;
62     void *context;      /* May be shared with PV guest */
63     void *priv_context; /* hypervisor-only */
64     const struct arch_vpmu_ops *arch_vpmu_ops;
65     struct xen_pmu_data *xenpmu_data;
66     spinlock_t vpmu_lock;
67 };
68 
69 /* VPMU states */
70 #define VPMU_CONTEXT_ALLOCATED              0x1
71 #define VPMU_CONTEXT_LOADED                 0x2
72 #define VPMU_RUNNING                        0x4
73 #define VPMU_CONTEXT_SAVE                   0x8   /* Force context save */
74 #define VPMU_FROZEN                         0x10  /* Stop counters while VCPU is not running */
75 #define VPMU_PASSIVE_DOMAIN_ALLOCATED       0x20
76 /* PV(H) guests: VPMU registers are accessed by guest from shared page */
77 #define VPMU_CACHED                         0x40
78 #define VPMU_AVAILABLE                      0x80
79 
80 /* Intel-specific VPMU features */
81 #define VPMU_CPU_HAS_DS                     0x100 /* Has Debug Store */
82 #define VPMU_CPU_HAS_BTS                    0x200 /* Has Branch Trace Store */
83 
vpmu_set(struct vpmu_struct * vpmu,const u32 mask)84 static inline void vpmu_set(struct vpmu_struct *vpmu, const u32 mask)
85 {
86     vpmu->flags |= mask;
87 }
vpmu_reset(struct vpmu_struct * vpmu,const u32 mask)88 static inline void vpmu_reset(struct vpmu_struct *vpmu, const u32 mask)
89 {
90     vpmu->flags &= ~mask;
91 }
vpmu_clear(struct vpmu_struct * vpmu)92 static inline void vpmu_clear(struct vpmu_struct *vpmu)
93 {
94     /* VPMU_AVAILABLE should be altered by get/put_vpmu(). */
95     vpmu->flags &= VPMU_AVAILABLE;
96 }
vpmu_is_set(const struct vpmu_struct * vpmu,const u32 mask)97 static inline bool_t vpmu_is_set(const struct vpmu_struct *vpmu, const u32 mask)
98 {
99     return !!(vpmu->flags & mask);
100 }
vpmu_are_all_set(const struct vpmu_struct * vpmu,const u32 mask)101 static inline bool_t vpmu_are_all_set(const struct vpmu_struct *vpmu,
102                                       const u32 mask)
103 {
104     return !!((vpmu->flags & mask) == mask);
105 }
106 
107 void vpmu_lvtpc_update(uint32_t val);
108 int vpmu_do_msr(unsigned int msr, uint64_t *msr_content,
109                 uint64_t supported, bool_t is_write);
110 void vpmu_do_interrupt(struct cpu_user_regs *regs);
111 void vpmu_initialise(struct vcpu *v);
112 void vpmu_destroy(struct vcpu *v);
113 void vpmu_save(struct vcpu *v);
114 int vpmu_load(struct vcpu *v, bool_t from_guest);
115 void vpmu_dump(struct vcpu *v);
116 
vpmu_do_wrmsr(unsigned int msr,uint64_t msr_content,uint64_t supported)117 static inline int vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content,
118                                 uint64_t supported)
119 {
120     return vpmu_do_msr(msr, &msr_content, supported, 1);
121 }
vpmu_do_rdmsr(unsigned int msr,uint64_t * msr_content)122 static inline int vpmu_do_rdmsr(unsigned int msr, uint64_t *msr_content)
123 {
124     return vpmu_do_msr(msr, msr_content, 0, 0);
125 }
126 
127 extern unsigned int vpmu_mode;
128 extern unsigned int vpmu_features;
129 extern bool opt_rtm_abort;
130 
131 /* Context switch */
vpmu_switch_from(struct vcpu * prev)132 static inline void vpmu_switch_from(struct vcpu *prev)
133 {
134     if ( vpmu_mode & (XENPMU_MODE_SELF | XENPMU_MODE_HV) )
135         vpmu_save(prev);
136 }
137 
vpmu_switch_to(struct vcpu * next)138 static inline void vpmu_switch_to(struct vcpu *next)
139 {
140     if ( vpmu_mode & (XENPMU_MODE_SELF | XENPMU_MODE_HV) )
141         vpmu_load(next, 0);
142 }
143 
144 #endif /* __ASM_X86_HVM_VPMU_H_*/
145 
146