1 /*
2  * arch/x86/hvm/monitor.c
3  *
4  * Arch-specific hardware virtual machine event abstractions.
5  *
6  * Copyright (c) 2004, Intel Corporation.
7  * Copyright (c) 2005, International Business Machines Corporation.
8  * Copyright (c) 2008, Citrix Systems, Inc.
9  * Copyright (c) 2016, Bitdefender S.R.L.
10  * Copyright (c) 2016, Tamas K Lengyel (tamas@tklengyel.com)
11  *
12  * This program is free software; you can redistribute it and/or modify it
13  * under the terms and conditions of the GNU General Public License,
14  * version 2, as published by the Free Software Foundation.
15  *
16  * This program is distributed in the hope it will be useful, but WITHOUT
17  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18  * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
19  * more details.
20  *
21  * You should have received a copy of the GNU General Public License along with
22  * this program; If not, see <http://www.gnu.org/licenses/>.
23  */
24 
25 #include <xen/vm_event.h>
26 #include <xen/mem_access.h>
27 #include <xen/monitor.h>
28 #include <asm/hvm/monitor.h>
29 #include <asm/altp2m.h>
30 #include <asm/monitor.h>
31 #include <asm/p2m.h>
32 #include <asm/paging.h>
33 #include <asm/vm_event.h>
34 #include <public/vm_event.h>
35 
hvm_monitor_cr(unsigned int index,unsigned long value,unsigned long old)36 bool hvm_monitor_cr(unsigned int index, unsigned long value, unsigned long old)
37 {
38     struct vcpu *curr = current;
39     struct arch_domain *ad = &curr->domain->arch;
40     unsigned int ctrlreg_bitmask = monitor_ctrlreg_bitmask(index);
41 
42     if ( (ad->monitor.write_ctrlreg_enabled & ctrlreg_bitmask) &&
43          (!(ad->monitor.write_ctrlreg_onchangeonly & ctrlreg_bitmask) ||
44           value != old) &&
45          ((value ^ old) & ~ad->monitor.write_ctrlreg_mask[index]) )
46     {
47         bool sync = ad->monitor.write_ctrlreg_sync & ctrlreg_bitmask;
48 
49         vm_event_request_t req = {
50             .reason = VM_EVENT_REASON_WRITE_CTRLREG,
51             .u.write_ctrlreg.index = index,
52             .u.write_ctrlreg.new_value = value,
53             .u.write_ctrlreg.old_value = old
54         };
55 
56         return monitor_traps(curr, sync, &req) >= 0 &&
57                curr->domain->arch.monitor.control_register_values;
58     }
59 
60     return false;
61 }
62 
hvm_monitor_emul_unimplemented(void)63 bool hvm_monitor_emul_unimplemented(void)
64 {
65     struct vcpu *curr = current;
66 
67     /*
68      * Send a vm_event to the monitor to signal that the current
69      * instruction couldn't be emulated.
70      */
71     vm_event_request_t req = {
72         .reason = VM_EVENT_REASON_EMUL_UNIMPLEMENTED,
73         .vcpu_id  = curr->vcpu_id,
74     };
75 
76     return curr->domain->arch.monitor.emul_unimplemented_enabled &&
77         monitor_traps(curr, true, &req) == 1;
78 }
79 
hvm_monitor_msr(unsigned int msr,uint64_t new_value,uint64_t old_value)80 bool hvm_monitor_msr(unsigned int msr, uint64_t new_value, uint64_t old_value)
81 {
82     struct vcpu *curr = current;
83 
84     if ( monitored_msr(curr->domain, msr) &&
85          (!monitored_msr_onchangeonly(curr->domain, msr) ||
86            new_value != old_value) )
87     {
88         vm_event_request_t req = {
89             .reason = VM_EVENT_REASON_MOV_TO_MSR,
90             .u.mov_to_msr.msr = msr,
91             .u.mov_to_msr.new_value = new_value,
92             .u.mov_to_msr.old_value = old_value
93         };
94 
95         return monitor_traps(curr, 1, &req) >= 0 &&
96                curr->domain->arch.monitor.control_register_values;
97     }
98 
99     return false;
100 }
101 
hvm_monitor_descriptor_access(uint64_t exit_info,uint64_t vmx_exit_qualification,uint8_t descriptor,bool is_write)102 void hvm_monitor_descriptor_access(uint64_t exit_info,
103                                    uint64_t vmx_exit_qualification,
104                                    uint8_t descriptor, bool is_write)
105 {
106     vm_event_request_t req = {
107         .reason = VM_EVENT_REASON_DESCRIPTOR_ACCESS,
108         .u.desc_access.descriptor = descriptor,
109         .u.desc_access.is_write = is_write,
110     };
111 
112     if ( cpu_has_vmx )
113     {
114         req.u.desc_access.arch.vmx.instr_info = exit_info;
115         req.u.desc_access.arch.vmx.exit_qualification = vmx_exit_qualification;
116     }
117 
118     monitor_traps(current, true, &req);
119 }
120 
gfn_of_rip(unsigned long rip)121 static inline unsigned long gfn_of_rip(unsigned long rip)
122 {
123     struct vcpu *curr = current;
124     struct segment_register sreg;
125     uint32_t pfec = PFEC_page_present | PFEC_insn_fetch;
126 
127     if ( hvm_get_cpl(curr) == 3 )
128         pfec |= PFEC_user_mode;
129 
130     hvm_get_segment_register(curr, x86_seg_cs, &sreg);
131 
132     return paging_gva_to_gfn(curr, sreg.base + rip, &pfec);
133 }
134 
hvm_monitor_debug(unsigned long rip,enum hvm_monitor_debug_type type,unsigned int trap_type,unsigned int insn_length,unsigned int pending_dbg)135 int hvm_monitor_debug(unsigned long rip, enum hvm_monitor_debug_type type,
136                       unsigned int trap_type, unsigned int insn_length,
137                       unsigned int pending_dbg)
138 {
139    /*
140     * rc < 0 error in monitor/vm_event, crash
141     * !rc    continue normally
142     * rc > 0 paused waiting for response, work here is done
143     */
144     struct vcpu *curr = current;
145     struct arch_domain *ad = &curr->domain->arch;
146     vm_event_request_t req = {};
147     bool sync;
148 
149     switch ( type )
150     {
151     case HVM_MONITOR_SOFTWARE_BREAKPOINT:
152         if ( !ad->monitor.software_breakpoint_enabled )
153             return 0;
154         req.reason = VM_EVENT_REASON_SOFTWARE_BREAKPOINT;
155         req.u.software_breakpoint.gfn = gfn_of_rip(rip);
156         req.u.software_breakpoint.type = trap_type;
157         req.u.software_breakpoint.insn_length = insn_length;
158         sync = true;
159         break;
160 
161     case HVM_MONITOR_SINGLESTEP_BREAKPOINT:
162         if ( !ad->monitor.singlestep_enabled )
163             return 0;
164         if ( curr->arch.hvm.fast_single_step.enabled )
165         {
166             p2m_altp2m_check(curr, curr->arch.hvm.fast_single_step.p2midx);
167             curr->arch.hvm.single_step = false;
168             curr->arch.hvm.fast_single_step.enabled = false;
169             curr->arch.hvm.fast_single_step.p2midx = 0;
170             return 0;
171         }
172         req.reason = VM_EVENT_REASON_SINGLESTEP;
173         req.u.singlestep.gfn = gfn_of_rip(rip);
174         sync = true;
175         break;
176 
177     case HVM_MONITOR_DEBUG_EXCEPTION:
178         if ( !ad->monitor.debug_exception_enabled )
179             return 0;
180         req.reason = VM_EVENT_REASON_DEBUG_EXCEPTION;
181         req.u.debug_exception.gfn = gfn_of_rip(rip);
182         req.u.debug_exception.pending_dbg = pending_dbg;
183         req.u.debug_exception.type = trap_type;
184         req.u.debug_exception.insn_length = insn_length;
185         sync = !!ad->monitor.debug_exception_sync;
186         break;
187 
188     default:
189         return -EOPNOTSUPP;
190     }
191 
192     return monitor_traps(curr, sync, &req);
193 }
194 
hvm_monitor_cpuid(unsigned long insn_length,unsigned int leaf,unsigned int subleaf)195 int hvm_monitor_cpuid(unsigned long insn_length, unsigned int leaf,
196                       unsigned int subleaf)
197 {
198     struct vcpu *curr = current;
199     struct arch_domain *ad = &curr->domain->arch;
200     vm_event_request_t req = {};
201 
202     if ( !ad->monitor.cpuid_enabled )
203         return 0;
204 
205     req.reason = VM_EVENT_REASON_CPUID;
206     req.u.cpuid.insn_length = insn_length;
207     req.u.cpuid.leaf = leaf;
208     req.u.cpuid.subleaf = subleaf;
209 
210     return monitor_traps(curr, 1, &req);
211 }
212 
hvm_monitor_interrupt(unsigned int vector,unsigned int type,unsigned int err,uint64_t cr2)213 void hvm_monitor_interrupt(unsigned int vector, unsigned int type,
214                            unsigned int err, uint64_t cr2)
215 {
216     vm_event_request_t req = {
217         .reason = VM_EVENT_REASON_INTERRUPT,
218         .u.interrupt.x86.vector = vector,
219         .u.interrupt.x86.type = type,
220         .u.interrupt.x86.error_code = err,
221         .u.interrupt.x86.cr2 = cr2,
222     };
223 
224     monitor_traps(current, 1, &req);
225 }
226 
227 /*
228  * Send memory access vm_events based on pfec. Returns true if the event was
229  * sent and false for p2m_get_mem_access() error, no violation and event send
230  * error. Assumes the caller will enable/disable arch.vm_event->send_event.
231  */
hvm_monitor_check_p2m(unsigned long gla,gfn_t gfn,uint32_t pfec,uint16_t kind)232 bool hvm_monitor_check_p2m(unsigned long gla, gfn_t gfn, uint32_t pfec,
233                            uint16_t kind)
234 {
235     xenmem_access_t access;
236     struct vcpu *curr = current;
237     vm_event_request_t req = {};
238     paddr_t gpa = (gfn_to_gaddr(gfn) | (gla & ~PAGE_MASK));
239     int rc;
240 
241     ASSERT(curr->arch.vm_event->send_event);
242 
243     /*
244      * p2m_get_mem_access() can fail from a invalid MFN and return -ESRCH
245      * in which case access must be restricted.
246      */
247     rc = p2m_get_mem_access(curr->domain, gfn, &access, altp2m_vcpu_idx(curr));
248 
249     if ( rc == -ESRCH )
250         access = XENMEM_access_n;
251     else if ( rc )
252         return false;
253 
254     switch ( access )
255     {
256     case XENMEM_access_x:
257     case XENMEM_access_rx:
258         if ( pfec & PFEC_write_access )
259             req.u.mem_access.flags = MEM_ACCESS_R | MEM_ACCESS_W;
260         break;
261 
262     case XENMEM_access_w:
263     case XENMEM_access_rw:
264         if ( pfec & PFEC_insn_fetch )
265             req.u.mem_access.flags = MEM_ACCESS_X;
266         break;
267 
268     case XENMEM_access_r:
269     case XENMEM_access_n:
270         if ( pfec & PFEC_write_access )
271             req.u.mem_access.flags |= MEM_ACCESS_R | MEM_ACCESS_W;
272         if ( pfec & PFEC_insn_fetch )
273             req.u.mem_access.flags |= MEM_ACCESS_X;
274         break;
275 
276     case XENMEM_access_wx:
277     case XENMEM_access_rwx:
278     case XENMEM_access_rx2rw:
279     case XENMEM_access_n2rwx:
280     case XENMEM_access_default:
281         break;
282     }
283 
284     if ( !req.u.mem_access.flags )
285         return false; /* no violation */
286 
287     if ( kind == npfec_kind_with_gla )
288         req.u.mem_access.flags |= MEM_ACCESS_FAULT_WITH_GLA |
289                                   MEM_ACCESS_GLA_VALID;
290     else if ( kind == npfec_kind_in_gpt )
291         req.u.mem_access.flags |= MEM_ACCESS_FAULT_IN_GPT |
292                                   MEM_ACCESS_GLA_VALID;
293 
294 
295     req.reason = VM_EVENT_REASON_MEM_ACCESS;
296     req.u.mem_access.gfn = gfn_x(gfn);
297     req.u.mem_access.gla = gla;
298     req.u.mem_access.offset = gpa & ~PAGE_MASK;
299 
300     return monitor_traps(curr, true, &req) >= 0;
301 }
302 
303 /*
304  * Local variables:
305  * mode: C
306  * c-file-style: "BSD"
307  * c-basic-offset: 4
308  * tab-width: 4
309  * indent-tabs-mode: nil
310  * End:
311  */
312