1 #ifndef __ASM_DOMAIN_H__
2 #define __ASM_DOMAIN_H__
3 
4 #include <xen/cache.h>
5 #include <xen/sched.h>
6 #include <asm/page.h>
7 #include <asm/p2m.h>
8 #include <asm/vfp.h>
9 #include <asm/mmio.h>
10 #include <asm/gic.h>
11 #include <asm/vgic.h>
12 #include <asm/vpl011.h>
13 #include <public/hvm/params.h>
14 #include <xen/serial.h>
15 #include <xen/rbtree.h>
16 
17 struct hvm_domain
18 {
19     uint64_t              params[HVM_NR_PARAMS];
20 };
21 
22 #ifdef CONFIG_ARM_64
23 enum domain_type {
24     DOMAIN_32BIT,
25     DOMAIN_64BIT,
26 };
27 #define is_32bit_domain(d) ((d)->arch.type == DOMAIN_32BIT)
28 #define is_64bit_domain(d) ((d)->arch.type == DOMAIN_64BIT)
29 #else
30 #define is_32bit_domain(d) (1)
31 #define is_64bit_domain(d) (0)
32 #endif
33 
34 /* The hardware domain has always its memory direct mapped. */
35 #define is_domain_direct_mapped(d) ((d) == hardware_domain)
36 
37 struct vtimer {
38     struct vcpu *v;
39     int irq;
40     struct timer timer;
41     uint32_t ctl;
42     uint64_t cval;
43 };
44 
45 struct arch_domain
46 {
47 #ifdef CONFIG_ARM_64
48     enum domain_type type;
49 #endif
50 
51     /* Virtual MMU */
52     struct p2m_domain p2m;
53 
54     struct hvm_domain hvm;
55 
56     struct vmmio vmmio;
57 
58     /* Continuable domain_relinquish_resources(). */
59     unsigned int rel_priv;
60 
61     struct {
62         uint64_t offset;
63     } virt_timer_base;
64 
65     struct vgic_dist vgic;
66 
67     struct vuart {
68 #define VUART_BUF_SIZE 128
69         char                        *buf;
70         int                         idx;
71         const struct vuart_info     *info;
72         spinlock_t                  lock;
73     } vuart;
74 
75     unsigned int evtchn_irq;
76 #ifdef CONFIG_ACPI
77     void *efi_acpi_table;
78     paddr_t efi_acpi_gpa;
79     paddr_t efi_acpi_len;
80 #endif
81 
82     /* Monitor options */
83     struct {
84         uint8_t privileged_call_enabled : 1;
85     } monitor;
86 
87 #ifdef CONFIG_SBSA_VUART_CONSOLE
88     struct vpl011 vpl011;
89 #endif
90 
91 #ifdef CONFIG_TEE
92     void *tee;
93 #endif
94 }  __cacheline_aligned;
95 
96 struct arch_vcpu
97 {
98     struct {
99 #ifdef CONFIG_ARM_32
100         register_t r4;
101         register_t r5;
102         register_t r6;
103         register_t r7;
104         register_t r8;
105         register_t r9;
106         register_t sl;
107 #else
108         register_t x19;
109         register_t x20;
110         register_t x21;
111         register_t x22;
112         register_t x23;
113         register_t x24;
114         register_t x25;
115         register_t x26;
116         register_t x27;
117         register_t x28;
118 #endif
119         register_t fp;
120         register_t sp;
121         register_t pc;
122     } saved_context;
123 
124     void *stack;
125 
126     /*
127      * Points into ->stack, more convenient than doing pointer arith
128      * all the time.
129      */
130     struct cpu_info *cpu_info;
131 
132     /* Fault Status */
133 #ifdef CONFIG_ARM_32
134     uint32_t dfsr;
135     uint32_t dfar, ifar;
136 #else
137     uint64_t far;
138     uint32_t esr;
139 #endif
140 
141     uint32_t ifsr; /* 32-bit guests only */
142     uint32_t afsr0, afsr1;
143 
144     /* MMU */
145     register_t vbar;
146     register_t ttbcr;
147     uint64_t ttbr0, ttbr1;
148 
149     uint32_t dacr; /* 32-bit guests only */
150     uint64_t par;
151 #ifdef CONFIG_ARM_32
152     uint32_t mair0, mair1;
153     uint32_t amair0, amair1;
154 #else
155     uint64_t mair;
156     uint64_t amair;
157 #endif
158 
159     /* Control Registers */
160     register_t sctlr;
161     uint32_t actlr;
162     uint32_t cpacr;
163 
164     uint32_t contextidr;
165     register_t tpidr_el0;
166     register_t tpidr_el1;
167     register_t tpidrro_el0;
168 
169     /* HYP configuration */
170     register_t hcr_el2;
171 
172     uint32_t teecr, teehbr; /* ThumbEE, 32-bit guests only */
173 #ifdef CONFIG_ARM_32
174     /*
175      * ARMv8 only supports a trivial implementation on Jazelle when in AArch32
176      * mode and therefore has no extended control registers.
177      */
178     uint32_t joscr, jmcr;
179 #endif
180 
181     /* Float-pointer */
182     struct vfp_state vfp;
183 
184     /* CP 15 */
185     uint32_t csselr;
186     register_t vmpidr;
187 
188     /* Holds gic context data */
189     union gic_state_data gic;
190     uint64_t lr_mask;
191 
192     struct vgic_cpu vgic;
193 
194     /* Timer registers  */
195     uint32_t cntkctl;
196 
197     struct vtimer phys_timer;
198     struct vtimer virt_timer;
199     bool   vtimer_initialized;
200 
201     /*
202      * The full P2M may require some cleaning (e.g when emulation
203      * set/way). As the action can take a long time, it requires
204      * preemption. It is deferred until we return to guest, where we can
205      * more easily check for softirqs and preempt the vCPU safely.
206      */
207     bool need_flush_to_ram;
208 
209 }  __cacheline_aligned;
210 
211 void vcpu_show_execution_state(struct vcpu *);
212 void vcpu_show_registers(const struct vcpu *);
213 void vcpu_switch_to_aarch64_mode(struct vcpu *);
214 
215 /*
216  * Due to the restriction of GICv3, the number of vCPUs in AFF0 is
217  * limited to 16, thus only the first 4 bits of AFF0 are legal. We will
218  * use the first 2 affinity levels here, expanding the number of vCPU up
219  * to 4096(==16*256), which is more than the PEs that GIC-500 supports.
220  *
221  * Since we don't save information of vCPU's topology (affinity) in
222  * vMPIDR at the moment, we map the vcpuid to the vMPIDR linearly.
223  */
vaffinity_to_vcpuid(register_t vaff)224 static inline unsigned int vaffinity_to_vcpuid(register_t vaff)
225 {
226     unsigned int vcpuid;
227 
228     vaff &= MPIDR_HWID_MASK;
229 
230     vcpuid = MPIDR_AFFINITY_LEVEL(vaff, 0);
231     vcpuid |= MPIDR_AFFINITY_LEVEL(vaff, 1) << 4;
232 
233     return vcpuid;
234 }
235 
vcpuid_to_vaffinity(unsigned int vcpuid)236 static inline register_t vcpuid_to_vaffinity(unsigned int vcpuid)
237 {
238     register_t vaff;
239 
240     /*
241      * Right now only AFF0 and AFF1 are supported in virtual affinity.
242      * Since only the first 4 bits in AFF0 are used in GICv3, the
243      * available bits are 12 (4+8).
244      */
245     BUILD_BUG_ON(!(MAX_VIRT_CPUS < ((1 << 12))));
246 
247     vaff = (vcpuid & 0x0f) << MPIDR_LEVEL_SHIFT(0);
248     vaff |= ((vcpuid >> 4) & MPIDR_LEVEL_MASK) << MPIDR_LEVEL_SHIFT(1);
249 
250     return vaff;
251 }
252 
alloc_vcpu_guest_context(void)253 static inline struct vcpu_guest_context *alloc_vcpu_guest_context(void)
254 {
255     return xmalloc(struct vcpu_guest_context);
256 }
257 
free_vcpu_guest_context(struct vcpu_guest_context * vgc)258 static inline void free_vcpu_guest_context(struct vcpu_guest_context *vgc)
259 {
260     xfree(vgc);
261 }
262 
arch_vcpu_block(struct vcpu * v)263 static inline void arch_vcpu_block(struct vcpu *v) {}
264 
265 #define arch_vm_assist_valid_mask(d) (1UL << VMASST_TYPE_runstate_update_flag)
266 
267 #endif /* __ASM_DOMAIN_H__ */
268 
269 /*
270  * Local variables:
271  * mode: C
272  * c-file-style: "BSD"
273  * c-basic-offset: 4
274  * tab-width: 4
275  * indent-tabs-mode: nil
276  * End:
277  */
278