1 /*
2 * arch/x86/hvm/vm_event.c
3 *
4 * HVM vm_event handling routines
5 *
6 * Copyright (c) 2004, Intel Corporation.
7 * Copyright (c) 2005, International Business Machines Corporation.
8 * Copyright (c) 2008, Citrix Systems, Inc.
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public
12 * License v2 as published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public
20 * License along with this program; If not, see <http://www.gnu.org/licenses/>.
21 */
22
23 #include <xen/sched.h>
24 #include <xen/vm_event.h>
25 #include <asm/hvm/emulate.h>
26 #include <asm/hvm/support.h>
27 #include <asm/vm_event.h>
28
hvm_vm_event_set_registers(const struct vcpu * v)29 static void hvm_vm_event_set_registers(const struct vcpu *v)
30 {
31 ASSERT(v == current);
32
33 if ( unlikely(v->arch.vm_event->set_gprs) )
34 {
35 struct cpu_user_regs *regs = guest_cpu_user_regs();
36
37 regs->rax = v->arch.vm_event->gprs.rax;
38 regs->rbx = v->arch.vm_event->gprs.rbx;
39 regs->rcx = v->arch.vm_event->gprs.rcx;
40 regs->rdx = v->arch.vm_event->gprs.rdx;
41 regs->rsp = v->arch.vm_event->gprs.rsp;
42 regs->rbp = v->arch.vm_event->gprs.rbp;
43 regs->rsi = v->arch.vm_event->gprs.rsi;
44 regs->rdi = v->arch.vm_event->gprs.rdi;
45
46 regs->r8 = v->arch.vm_event->gprs.r8;
47 regs->r9 = v->arch.vm_event->gprs.r9;
48 regs->r10 = v->arch.vm_event->gprs.r10;
49 regs->r11 = v->arch.vm_event->gprs.r11;
50 regs->r12 = v->arch.vm_event->gprs.r12;
51 regs->r13 = v->arch.vm_event->gprs.r13;
52 regs->r14 = v->arch.vm_event->gprs.r14;
53 regs->r15 = v->arch.vm_event->gprs.r15;
54
55 regs->rflags = v->arch.vm_event->gprs.rflags;
56 regs->rip = v->arch.vm_event->gprs.rip;
57
58 v->arch.vm_event->set_gprs = false;
59 }
60 }
61
hvm_vm_event_do_resume(struct vcpu * v)62 void hvm_vm_event_do_resume(struct vcpu *v)
63 {
64 struct monitor_write_data *w;
65
66 ASSERT(v->arch.vm_event);
67
68 hvm_vm_event_set_registers(v);
69
70 w = &v->arch.vm_event->write_data;
71
72 if ( unlikely(v->arch.vm_event->emulate_flags) )
73 {
74 enum emul_kind kind = EMUL_KIND_NORMAL;
75
76 /*
77 * Please observe the order here to match the flag descriptions
78 * provided in public/vm_event.h
79 */
80 if ( v->arch.vm_event->emulate_flags &
81 VM_EVENT_FLAG_SET_EMUL_READ_DATA )
82 kind = EMUL_KIND_SET_CONTEXT_DATA;
83 else if ( v->arch.vm_event->emulate_flags &
84 VM_EVENT_FLAG_EMULATE_NOWRITE )
85 kind = EMUL_KIND_NOWRITE;
86 else if ( v->arch.vm_event->emulate_flags &
87 VM_EVENT_FLAG_SET_EMUL_INSN_DATA )
88 kind = EMUL_KIND_SET_CONTEXT_INSN;
89
90 hvm_emulate_one_vm_event(kind, TRAP_invalid_op,
91 X86_EVENT_NO_EC);
92
93 v->arch.vm_event->emulate_flags = 0;
94 }
95
96 if ( unlikely(w->do_write.cr0) )
97 {
98 if ( hvm_set_cr0(w->cr0, false) == X86EMUL_EXCEPTION )
99 hvm_inject_hw_exception(TRAP_gp_fault, 0);
100
101 w->do_write.cr0 = 0;
102 }
103
104 if ( unlikely(w->do_write.cr4) )
105 {
106 if ( hvm_set_cr4(w->cr4, false) == X86EMUL_EXCEPTION )
107 hvm_inject_hw_exception(TRAP_gp_fault, 0);
108
109 w->do_write.cr4 = 0;
110 }
111
112 if ( unlikely(w->do_write.cr3) )
113 {
114 if ( hvm_set_cr3(w->cr3, w->cr3_noflush, false) == X86EMUL_EXCEPTION )
115 hvm_inject_hw_exception(TRAP_gp_fault, 0);
116
117 w->do_write.cr3 = 0;
118 }
119
120 if ( unlikely(w->do_write.msr) )
121 {
122 if ( hvm_msr_write_intercept(w->msr, w->value, false) ==
123 X86EMUL_EXCEPTION )
124 hvm_inject_hw_exception(TRAP_gp_fault, 0);
125
126 w->do_write.msr = 0;
127 }
128
129 vm_event_sync_event(v, false);
130 }
131
132 /*
133 * Local variables:
134 * mode: C
135 * c-file-style: "BSD"
136 * c-basic-offset: 4
137 * tab-width: 4
138 * indent-tabs-mode: nil
139 * End:
140 */
141