1 /*
2 * xen/arch/arm/vtimer.c
3 *
4 * ARM Virtual Timer emulation support
5 *
6 * Ian Campbell <ian.campbell@citrix.com>
7 * Copyright (c) 2011 Citrix Systems.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 */
19
20 #include <xen/lib.h>
21 #include <xen/perfc.h>
22 #include <xen/sched.h>
23 #include <xen/timer.h>
24
25 #include <asm/cpregs.h>
26 #include <asm/div64.h>
27 #include <asm/irq.h>
28 #include <asm/regs.h>
29 #include <asm/time.h>
30 #include <asm/vgic.h>
31 #include <asm/vreg.h>
32 #include <asm/regs.h>
33
34 /*
35 * Check if regs is allowed access, user_gate is tail end of a
36 * CNTKCTL_EL1_ bit name which gates user access
37 */
38 #define ACCESS_ALLOWED(regs, user_gate) \
39 ( !psr_mode_is_user(regs) || \
40 (READ_SYSREG(CNTKCTL_EL1) & CNTKCTL_EL1_##user_gate) )
41
phys_timer_expired(void * data)42 static void phys_timer_expired(void *data)
43 {
44 struct vtimer *t = data;
45 t->ctl |= CNTx_CTL_PENDING;
46 if ( !(t->ctl & CNTx_CTL_MASK) )
47 {
48 perfc_incr(vtimer_phys_inject);
49 vgic_inject_irq(t->v->domain, t->v, t->irq, true);
50 }
51 else
52 perfc_incr(vtimer_phys_masked);
53 }
54
virt_timer_expired(void * data)55 static void virt_timer_expired(void *data)
56 {
57 struct vtimer *t = data;
58 t->ctl |= CNTx_CTL_MASK;
59 vgic_inject_irq(t->v->domain, t->v, t->irq, true);
60 perfc_incr(vtimer_virt_inject);
61 }
62
domain_vtimer_init(struct domain * d,struct xen_arch_domainconfig * config)63 int domain_vtimer_init(struct domain *d, struct xen_arch_domainconfig *config)
64 {
65 d->arch.virt_timer_base.offset = READ_SYSREG64(CNTPCT_EL0);
66 d->time_offset.seconds = ticks_to_ns(d->arch.virt_timer_base.offset - boot_count);
67 do_div(d->time_offset.seconds, 1000000000);
68
69 config->clock_frequency = timer_dt_clock_frequency;
70
71 /* At this stage vgic_reserve_virq can't fail */
72 if ( is_hardware_domain(d) )
73 {
74 if ( !vgic_reserve_virq(d, timer_get_irq(TIMER_PHYS_SECURE_PPI)) )
75 BUG();
76
77 if ( !vgic_reserve_virq(d, timer_get_irq(TIMER_PHYS_NONSECURE_PPI)) )
78 BUG();
79
80 if ( !vgic_reserve_virq(d, timer_get_irq(TIMER_VIRT_PPI)) )
81 BUG();
82 }
83 else
84 {
85 if ( !vgic_reserve_virq(d, GUEST_TIMER_PHYS_S_PPI) )
86 BUG();
87
88 if ( !vgic_reserve_virq(d, GUEST_TIMER_PHYS_NS_PPI) )
89 BUG();
90
91 if ( !vgic_reserve_virq(d, GUEST_TIMER_VIRT_PPI) )
92 BUG();
93 }
94
95 return 0;
96 }
97
vcpu_vtimer_init(struct vcpu * v)98 int vcpu_vtimer_init(struct vcpu *v)
99 {
100 struct vtimer *t = &v->arch.phys_timer;
101 bool d0 = is_hardware_domain(v->domain);
102
103 /*
104 * Hardware domain uses the hardware interrupts, guests get the virtual
105 * platform.
106 */
107
108 init_timer(&t->timer, phys_timer_expired, t, v->processor);
109 t->ctl = 0;
110 t->irq = d0
111 ? timer_get_irq(TIMER_PHYS_NONSECURE_PPI)
112 : GUEST_TIMER_PHYS_NS_PPI;
113 t->v = v;
114
115 t = &v->arch.virt_timer;
116 init_timer(&t->timer, virt_timer_expired, t, v->processor);
117 t->ctl = 0;
118 t->irq = d0
119 ? timer_get_irq(TIMER_VIRT_PPI)
120 : GUEST_TIMER_VIRT_PPI;
121 t->v = v;
122
123 v->arch.vtimer_initialized = 1;
124
125 return 0;
126 }
127
vcpu_timer_destroy(struct vcpu * v)128 void vcpu_timer_destroy(struct vcpu *v)
129 {
130 if ( !v->arch.vtimer_initialized )
131 return;
132
133 kill_timer(&v->arch.virt_timer.timer);
134 kill_timer(&v->arch.phys_timer.timer);
135 }
136
virt_timer_save(struct vcpu * v)137 void virt_timer_save(struct vcpu *v)
138 {
139 ASSERT(!is_idle_vcpu(v));
140
141 v->arch.virt_timer.ctl = READ_SYSREG32(CNTV_CTL_EL0);
142 WRITE_SYSREG32(v->arch.virt_timer.ctl & ~CNTx_CTL_ENABLE, CNTV_CTL_EL0);
143 v->arch.virt_timer.cval = READ_SYSREG64(CNTV_CVAL_EL0);
144 if ( (v->arch.virt_timer.ctl & CNTx_CTL_ENABLE) &&
145 !(v->arch.virt_timer.ctl & CNTx_CTL_MASK))
146 {
147 set_timer(&v->arch.virt_timer.timer, ticks_to_ns(v->arch.virt_timer.cval +
148 v->domain->arch.virt_timer_base.offset - boot_count));
149 }
150 }
151
virt_timer_restore(struct vcpu * v)152 void virt_timer_restore(struct vcpu *v)
153 {
154 ASSERT(!is_idle_vcpu(v));
155
156 stop_timer(&v->arch.virt_timer.timer);
157 migrate_timer(&v->arch.virt_timer.timer, v->processor);
158 migrate_timer(&v->arch.phys_timer.timer, v->processor);
159
160 WRITE_SYSREG64(v->domain->arch.virt_timer_base.offset, CNTVOFF_EL2);
161 WRITE_SYSREG64(v->arch.virt_timer.cval, CNTV_CVAL_EL0);
162 WRITE_SYSREG32(v->arch.virt_timer.ctl, CNTV_CTL_EL0);
163 }
164
vtimer_cntp_ctl(struct cpu_user_regs * regs,uint32_t * r,bool read)165 static bool vtimer_cntp_ctl(struct cpu_user_regs *regs, uint32_t *r, bool read)
166 {
167 struct vcpu *v = current;
168 s_time_t expires;
169
170 if ( !ACCESS_ALLOWED(regs, EL0PTEN) )
171 return false;
172
173 if ( read )
174 {
175 *r = v->arch.phys_timer.ctl;
176 }
177 else
178 {
179 uint32_t ctl = *r & ~CNTx_CTL_PENDING;
180 if ( ctl & CNTx_CTL_ENABLE )
181 ctl |= v->arch.phys_timer.ctl & CNTx_CTL_PENDING;
182 v->arch.phys_timer.ctl = ctl;
183
184 if ( v->arch.phys_timer.ctl & CNTx_CTL_ENABLE )
185 {
186 /*
187 * If cval is before the point Xen started, expire timer
188 * immediately.
189 */
190 expires = v->arch.phys_timer.cval > boot_count
191 ? ticks_to_ns(v->arch.phys_timer.cval - boot_count) : 0;
192 set_timer(&v->arch.phys_timer.timer, expires);
193 }
194 else
195 stop_timer(&v->arch.phys_timer.timer);
196 }
197 return true;
198 }
199
vtimer_cntp_tval(struct cpu_user_regs * regs,uint32_t * r,bool read)200 static bool vtimer_cntp_tval(struct cpu_user_regs *regs, uint32_t *r,
201 bool read)
202 {
203 struct vcpu *v = current;
204 uint64_t cntpct;
205 s_time_t expires;
206
207 if ( !ACCESS_ALLOWED(regs, EL0PTEN) )
208 return false;
209
210 cntpct = get_cycles();
211
212 if ( read )
213 {
214 *r = (uint32_t)((v->arch.phys_timer.cval - cntpct) & 0xffffffffull);
215 }
216 else
217 {
218 v->arch.phys_timer.cval = cntpct + (uint64_t)(int32_t)*r;
219 if ( v->arch.phys_timer.ctl & CNTx_CTL_ENABLE )
220 {
221 v->arch.phys_timer.ctl &= ~CNTx_CTL_PENDING;
222 /*
223 * If cval is before the point Xen started, expire timer
224 * immediately.
225 */
226 expires = v->arch.phys_timer.cval > boot_count
227 ? ticks_to_ns(v->arch.phys_timer.cval - boot_count) : 0;
228 set_timer(&v->arch.phys_timer.timer, expires);
229 }
230 }
231 return true;
232 }
233
vtimer_cntp_cval(struct cpu_user_regs * regs,uint64_t * r,bool read)234 static bool vtimer_cntp_cval(struct cpu_user_regs *regs, uint64_t *r,
235 bool read)
236 {
237 struct vcpu *v = current;
238 s_time_t expires;
239
240 if ( !ACCESS_ALLOWED(regs, EL0PTEN) )
241 return false;
242
243 if ( read )
244 {
245 *r = v->arch.phys_timer.cval;
246 }
247 else
248 {
249 v->arch.phys_timer.cval = *r;
250 if ( v->arch.phys_timer.ctl & CNTx_CTL_ENABLE )
251 {
252 v->arch.phys_timer.ctl &= ~CNTx_CTL_PENDING;
253 /*
254 * If cval is before the point Xen started, expire timer
255 * immediately.
256 */
257 expires = v->arch.phys_timer.cval > boot_count
258 ? ticks_to_ns(v->arch.phys_timer.cval - boot_count) : 0;
259 set_timer(&v->arch.phys_timer.timer, expires);
260 }
261 }
262 return true;
263 }
264
vtimer_emulate_cp32(struct cpu_user_regs * regs,union hsr hsr)265 static bool vtimer_emulate_cp32(struct cpu_user_regs *regs, union hsr hsr)
266 {
267 struct hsr_cp32 cp32 = hsr.cp32;
268
269 if ( cp32.read )
270 perfc_incr(vtimer_cp32_reads);
271 else
272 perfc_incr(vtimer_cp32_writes);
273
274 switch ( hsr.bits & HSR_CP32_REGS_MASK )
275 {
276 case HSR_CPREG32(CNTP_CTL):
277 return vreg_emulate_cp32(regs, hsr, vtimer_cntp_ctl);
278
279 case HSR_CPREG32(CNTP_TVAL):
280 return vreg_emulate_cp32(regs, hsr, vtimer_cntp_tval);
281
282 default:
283 return false;
284 }
285 }
286
vtimer_emulate_cp64(struct cpu_user_regs * regs,union hsr hsr)287 static bool vtimer_emulate_cp64(struct cpu_user_regs *regs, union hsr hsr)
288 {
289 struct hsr_cp64 cp64 = hsr.cp64;
290
291 if ( cp64.read )
292 perfc_incr(vtimer_cp64_reads);
293 else
294 perfc_incr(vtimer_cp64_writes);
295
296 switch ( hsr.bits & HSR_CP64_REGS_MASK )
297 {
298 case HSR_CPREG64(CNTP_CVAL):
299 return vreg_emulate_cp64(regs, hsr, vtimer_cntp_cval);
300
301 default:
302 return false;
303 }
304 }
305
306 #ifdef CONFIG_ARM_64
vtimer_emulate_sysreg(struct cpu_user_regs * regs,union hsr hsr)307 static bool vtimer_emulate_sysreg(struct cpu_user_regs *regs, union hsr hsr)
308 {
309 struct hsr_sysreg sysreg = hsr.sysreg;
310
311 if ( sysreg.read )
312 perfc_incr(vtimer_sysreg_reads);
313 else
314 perfc_incr(vtimer_sysreg_writes);
315
316 switch ( hsr.bits & HSR_SYSREG_REGS_MASK )
317 {
318 case HSR_SYSREG_CNTP_CTL_EL0:
319 return vreg_emulate_sysreg32(regs, hsr, vtimer_cntp_ctl);
320 case HSR_SYSREG_CNTP_TVAL_EL0:
321 return vreg_emulate_sysreg32(regs, hsr, vtimer_cntp_tval);
322 case HSR_SYSREG_CNTP_CVAL_EL0:
323 return vreg_emulate_sysreg64(regs, hsr, vtimer_cntp_cval);
324
325 default:
326 return false;
327 }
328
329 }
330 #endif
331
vtimer_emulate(struct cpu_user_regs * regs,union hsr hsr)332 bool vtimer_emulate(struct cpu_user_regs *regs, union hsr hsr)
333 {
334
335 switch (hsr.ec) {
336 case HSR_EC_CP15_32:
337 return vtimer_emulate_cp32(regs, hsr);
338 case HSR_EC_CP15_64:
339 return vtimer_emulate_cp64(regs, hsr);
340 #ifdef CONFIG_ARM_64
341 case HSR_EC_SYSREG:
342 return vtimer_emulate_sysreg(regs, hsr);
343 #endif
344 default:
345 return false;
346 }
347 }
348
vtimer_update_irq(struct vcpu * v,struct vtimer * vtimer,uint32_t vtimer_ctl)349 static void vtimer_update_irq(struct vcpu *v, struct vtimer *vtimer,
350 uint32_t vtimer_ctl)
351 {
352 bool level;
353
354 /* Filter for the three bits that determine the status of the timer */
355 vtimer_ctl &= (CNTx_CTL_ENABLE | CNTx_CTL_PENDING | CNTx_CTL_MASK);
356
357 /* The level is high if the timer is pending and enabled, but not masked. */
358 level = (vtimer_ctl == (CNTx_CTL_ENABLE | CNTx_CTL_PENDING));
359
360 /*
361 * This is mostly here to *lower* the virtual interrupt line if the timer
362 * is no longer pending.
363 * We would have injected an IRQ already via SOFTIRQ when the timer expired.
364 * Doing it here again is basically a NOP if the line was already high.
365 */
366 vgic_inject_irq(v->domain, v, vtimer->irq, level);
367 }
368
369 /**
370 * vtimer_update_irqs() - update the virtual timers' IRQ lines after a guest run
371 * @vcpu: The VCPU to sync the timer state
372 *
373 * After returning from a guest, update the state of the timers' virtual
374 * interrupt lines, to model the level triggered interrupts correctly.
375 * If the guest has handled a timer interrupt, the virtual interrupt line
376 * needs to be lowered explicitly. vgic_inject_irq() takes care of that.
377 */
vtimer_update_irqs(struct vcpu * v)378 void vtimer_update_irqs(struct vcpu *v)
379 {
380 /*
381 * For the virtual timer we read the current state from the hardware.
382 * Technically we should keep the CNTx_CTL_MASK bit here, to catch if
383 * the timer interrupt is masked. However Xen *always* masks the timer
384 * upon entering the hypervisor, leaving it up to the guest to un-mask it.
385 * So we would always read a "low" level, despite the condition being
386 * actually "high". Ignoring the mask bit solves this (for now).
387 *
388 * TODO: The proper fix for this is to make vtimer vIRQ hardware mapped,
389 * but this requires reworking the arch timer to implement this.
390 */
391 vtimer_update_irq(v, &v->arch.virt_timer,
392 READ_SYSREG32(CNTV_CTL_EL0) & ~CNTx_CTL_MASK);
393
394 /* For the physical timer we rely on our emulated state. */
395 vtimer_update_irq(v, &v->arch.phys_timer, v->arch.phys_timer.ctl);
396 }
397
398 /*
399 * Local variables:
400 * mode: C
401 * c-file-style: "BSD"
402 * c-basic-offset: 4
403 * indent-tabs-mode: nil
404 * End:
405 */
406