1 /*
2  * support.h: HVM support routines used by VT-x and SVM.
3  *
4  * Leendert van Doorn, leendert@watson.ibm.com
5  * Copyright (c) 2005, International Business Machines Corporation.
6  *
7  * This program is free software; you can redistribute it and/or modify it
8  * under the terms and conditions of the GNU General Public License,
9  * version 2, as published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope it will be useful, but WITHOUT
12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14  * more details.
15  *
16  * You should have received a copy of the GNU General Public License along with
17  * this program; If not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #ifndef __ASM_X86_HVM_SUPPORT_H__
21 #define __ASM_X86_HVM_SUPPORT_H__
22 
23 #include <xen/types.h>
24 #include <xen/sched.h>
25 #include <asm/hvm/save.h>
26 #include <asm/processor.h>
27 #include <asm/p2m.h>
28 
29 #ifndef NDEBUG
30 #define DBG_LEVEL_0                 (1 << 0)
31 #define DBG_LEVEL_1                 (1 << 1)
32 #define DBG_LEVEL_2                 (1 << 2)
33 #define DBG_LEVEL_3                 (1 << 3)
34 #define DBG_LEVEL_IO                (1 << 4)
35 #define DBG_LEVEL_VMMU              (1 << 5)
36 #define DBG_LEVEL_VLAPIC            (1 << 6)
37 #define DBG_LEVEL_VLAPIC_TIMER      (1 << 7)
38 #define DBG_LEVEL_VLAPIC_INTERRUPT  (1 << 8)
39 #define DBG_LEVEL_IOAPIC            (1 << 9)
40 #define DBG_LEVEL_HCALL             (1 << 10)
41 #define DBG_LEVEL_MSR               (1 << 11)
42 
43 extern unsigned int opt_hvm_debug_level;
44 #define HVM_DBG_LOG(level, _f, _a...)                                         \
45     do {                                                                      \
46         if ( unlikely((level) & opt_hvm_debug_level) )                        \
47             printk("[HVM:%d.%d] <%s> " _f "\n",                               \
48                    current->domain->domain_id, current->vcpu_id, __func__,    \
49                    ## _a);                                                    \
50     } while (0)
51 #else
52 #define HVM_DBG_LOG(level, _f, _a...) do {} while (0)
53 #endif
54 
55 extern unsigned long hvm_io_bitmap[];
56 
57 enum hvm_translation_result {
58     HVMTRANS_okay,
59     HVMTRANS_bad_linear_to_gfn,
60     HVMTRANS_bad_gfn_to_mfn,
61     HVMTRANS_unhandleable,
62     HVMTRANS_gfn_paged_out,
63     HVMTRANS_gfn_shared,
64     HVMTRANS_need_retry,
65 };
66 
67 /*
68  * Copy to/from a guest physical address.
69  * Returns HVMTRANS_okay, else HVMTRANS_bad_gfn_to_mfn if the given physical
70  * address range does not map entirely onto ordinary machine memory.
71  */
72 enum hvm_translation_result hvm_copy_to_guest_phys(
73     paddr_t paddr, void *buf, unsigned int size, struct vcpu *v);
74 enum hvm_translation_result hvm_copy_from_guest_phys(
75     void *buf, paddr_t paddr, unsigned int size);
76 
77 /*
78  * Copy to/from a guest linear address. @pfec should include PFEC_user_mode
79  * if emulating a user-mode access (CPL=3). All other flags in @pfec are
80  * managed by the called function: it is therefore optional for the caller
81  * to set them.
82  *
83  * Returns:
84  *  HVMTRANS_okay: Copy was entirely successful.
85  *  HVMTRANS_bad_gfn_to_mfn: Some guest physical address did not map to
86  *                           ordinary machine memory.
87  *  HVMTRANS_bad_linear_to_gfn: Some guest linear address did not have a
88  *                              valid mapping to a guest physical address.
89  *                              The pagefault_info_t structure will be filled
90  *                              in if provided.
91  */
92 typedef struct pagefault_info
93 {
94     unsigned long linear;
95     int ec;
96 } pagefault_info_t;
97 
98 enum hvm_translation_result hvm_copy_to_guest_linear(
99     unsigned long addr, void *buf, unsigned int size, uint32_t pfec,
100     pagefault_info_t *pfinfo);
101 enum hvm_translation_result hvm_copy_from_guest_linear(
102     void *buf, unsigned long addr, unsigned int size, uint32_t pfec,
103     pagefault_info_t *pfinfo);
104 
105 /*
106  * Get a reference on the page under an HVM physical or linear address.  If
107  * linear, a pagewalk is performed using pfec (fault details optionally in
108  * pfinfo).
109  * On success, returns HVMTRANS_okay with a reference taken on **_page.
110  */
111 enum hvm_translation_result hvm_translate_get_page(
112     struct vcpu *v, unsigned long addr, bool linear, uint32_t pfec,
113     pagefault_info_t *pfinfo, struct page_info **page_p,
114     gfn_t *gfn_p, p2m_type_t *p2mt_p);
115 
116 #define HVM_HCALL_completed  0 /* hypercall completed - no further action */
117 #define HVM_HCALL_preempted  1 /* hypercall preempted - re-execute VMCALL */
118 int hvm_hypercall(struct cpu_user_regs *regs);
119 
120 void hvm_hlt(unsigned int eflags);
121 void hvm_triple_fault(void);
122 
123 #define VM86_TSS_UPDATED (1ULL << 63)
124 void hvm_prepare_vm86_tss(struct vcpu *v, uint32_t base, uint32_t limit);
125 
126 void hvm_rdtsc_intercept(struct cpu_user_regs *regs);
127 
128 int __must_check hvm_handle_xsetbv(u32 index, u64 new_bv);
129 
130 void hvm_shadow_handle_cd(struct vcpu *v, unsigned long value);
131 
132 /*
133  * These functions all return X86EMUL return codes.  For hvm_set_*(), the
134  * caller is responsible for injecting #GP[0] if X86EMUL_EXCEPTION is
135  * returned.
136  */
137 int hvm_set_efer(uint64_t value);
138 int hvm_set_cr0(unsigned long value, bool may_defer);
139 int hvm_set_cr3(unsigned long value, bool noflush, bool may_defer);
140 int hvm_set_cr4(unsigned long value, bool may_defer);
141 int hvm_descriptor_access_intercept(uint64_t exit_info,
142                                     uint64_t vmx_exit_qualification,
143                                     unsigned int descriptor, bool is_write);
144 int hvm_mov_to_cr(unsigned int cr, unsigned int gpr);
145 int hvm_mov_from_cr(unsigned int cr, unsigned int gpr);
146 void hvm_ud_intercept(struct cpu_user_regs *);
147 
148 /*
149  * May return X86EMUL_EXCEPTION, at which point the caller is responsible for
150  * injecting a #GP fault.  Used to support speculative reads.
151  */
152 int __must_check hvm_msr_read_intercept(
153     unsigned int msr, uint64_t *msr_content);
154 int __must_check hvm_msr_write_intercept(
155     unsigned int msr, uint64_t msr_content, bool may_defer);
156 
157 #endif /* __ASM_X86_HVM_SUPPORT_H__ */
158 
159 /*
160  * Local variables:
161  * mode: C
162  * c-file-style: "BSD"
163  * c-basic-offset: 4
164  * tab-width: 4
165  * indent-tabs-mode: nil
166  * End:
167  */
168