1 /*
2  * hvm/pmtimer.c: emulation of the ACPI PM timer
3  *
4  * Copyright (c) 2007, XenSource inc.
5  * Copyright (c) 2006, Intel Corporation.
6  *
7  * This program is free software; you can redistribute it and/or modify it
8  * under the terms and conditions of the GNU General Public License,
9  * version 2, as published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope it will be useful, but WITHOUT
12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14  * more details.
15  *
16  * You should have received a copy of the GNU General Public License along with
17  * this program; If not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include <asm/hvm/vpt.h>
21 #include <asm/hvm/io.h>
22 #include <asm/hvm/support.h>
23 #include <asm/acpi.h> /* for hvm_acpi_power_button prototype */
24 #include <public/hvm/params.h>
25 
26 /* Slightly more readable port I/O addresses for the registers we intercept */
27 #define PM1a_STS_ADDR_V0 (ACPI_PM1A_EVT_BLK_ADDRESS_V0)
28 #define PM1a_EN_ADDR_V0  (ACPI_PM1A_EVT_BLK_ADDRESS_V0 + 2)
29 #define TMR_VAL_ADDR_V0  (ACPI_PM_TMR_BLK_ADDRESS_V0)
30 #define PM1a_STS_ADDR_V1 (ACPI_PM1A_EVT_BLK_ADDRESS_V1)
31 #define PM1a_EN_ADDR_V1  (ACPI_PM1A_EVT_BLK_ADDRESS_V1 + 2)
32 #define TMR_VAL_ADDR_V1  (ACPI_PM_TMR_BLK_ADDRESS_V1)
33 
34 /* The interesting bits of the PM1a_STS register */
35 #define TMR_STS    (1 << 0)
36 #define GBL_STS    (1 << 5)
37 #define PWRBTN_STS (1 << 8)
38 #define SLPBTN_STS (1 << 9)
39 
40 /* The same in PM1a_EN */
41 #define TMR_EN     (1 << 0)
42 #define GBL_EN     (1 << 5)
43 #define PWRBTN_EN  (1 << 8)
44 #define SLPBTN_EN  (1 << 9)
45 
46 /* Mask of bits in PM1a_STS that can generate an SCI. */
47 #define SCI_MASK (TMR_STS|PWRBTN_STS|SLPBTN_STS|GBL_STS)
48 
49 /* SCI IRQ number (must match SCI_INT number in ACPI FADT in hvmloader) */
50 #define SCI_IRQ 9
51 
52 /* We provide a 32-bit counter (must match the TMR_VAL_EXT bit in the FADT) */
53 #define TMR_VAL_MASK  (0xffffffff)
54 #define TMR_VAL_MSB   (0x80000000)
55 
56 /* Dispatch SCIs based on the PM1a_STS and PM1a_EN registers */
pmt_update_sci(PMTState * s)57 static void pmt_update_sci(PMTState *s)
58 {
59     struct hvm_hw_acpi *acpi = &s->vcpu->domain->arch.hvm.acpi;
60 
61     ASSERT(spin_is_locked(&s->lock));
62 
63     if ( acpi->pm1a_en & acpi->pm1a_sts & SCI_MASK )
64         hvm_isa_irq_assert(s->vcpu->domain, SCI_IRQ, NULL);
65     else
66         hvm_isa_irq_deassert(s->vcpu->domain, SCI_IRQ);
67 }
68 
hvm_acpi_power_button(struct domain * d)69 void hvm_acpi_power_button(struct domain *d)
70 {
71     PMTState *s = &d->arch.hvm.pl_time->vpmt;
72 
73     if ( !has_vpm(d) )
74         return;
75 
76     spin_lock(&s->lock);
77     d->arch.hvm.acpi.pm1a_sts |= PWRBTN_STS;
78     pmt_update_sci(s);
79     spin_unlock(&s->lock);
80 }
81 
hvm_acpi_sleep_button(struct domain * d)82 void hvm_acpi_sleep_button(struct domain *d)
83 {
84     PMTState *s = &d->arch.hvm.pl_time->vpmt;
85 
86     if ( !has_vpm(d) )
87         return;
88 
89     spin_lock(&s->lock);
90     d->arch.hvm.acpi.pm1a_sts |= SLPBTN_STS;
91     pmt_update_sci(s);
92     spin_unlock(&s->lock);
93 }
94 
95 /* Set the correct value in the timer, accounting for time elapsed
96  * since the last time we did that. */
pmt_update_time(PMTState * s)97 static void pmt_update_time(PMTState *s)
98 {
99     uint64_t curr_gtime, tmp;
100     struct hvm_hw_acpi *acpi = &s->vcpu->domain->arch.hvm.acpi;
101     uint32_t tmr_val = acpi->tmr_val, msb = tmr_val & TMR_VAL_MSB;
102 
103     ASSERT(spin_is_locked(&s->lock));
104 
105     /* Update the timer */
106     curr_gtime = hvm_get_guest_time(s->vcpu);
107     tmp = ((curr_gtime - s->last_gtime) * s->scale) + s->not_accounted;
108     s->not_accounted = (uint32_t)tmp;
109     tmr_val += tmp >> 32;
110     tmr_val &= TMR_VAL_MASK;
111     s->last_gtime = curr_gtime;
112 
113     /* Update timer value atomically wrt lock-free reads in handle_pmt_io(). */
114     write_atomic(&acpi->tmr_val, tmr_val);
115 
116     /* If the counter's MSB has changed, set the status bit */
117     if ( (tmr_val & TMR_VAL_MSB) != msb )
118     {
119         acpi->pm1a_sts |= TMR_STS;
120         pmt_update_sci(s);
121     }
122 }
123 
124 /* This function should be called soon after each time the MSB of the
125  * pmtimer register rolls over, to make sure we update the status
126  * registers and SCI at least once per rollover */
pmt_timer_callback(void * opaque)127 static void pmt_timer_callback(void *opaque)
128 {
129     PMTState *s = opaque;
130     uint32_t pmt_cycles_until_flip;
131     uint64_t time_until_flip;
132 
133     spin_lock(&s->lock);
134 
135     /* Recalculate the timer and make sure we get an SCI if we need one */
136     pmt_update_time(s);
137 
138     /* How close are we to the next MSB flip? */
139     pmt_cycles_until_flip = TMR_VAL_MSB -
140         (s->vcpu->domain->arch.hvm.acpi.tmr_val & (TMR_VAL_MSB - 1));
141 
142     /* Overall time between MSB flips */
143     time_until_flip = (1000000000ULL << 23) / FREQUENCE_PMTIMER;
144 
145     /* Reduced appropriately */
146     time_until_flip = (time_until_flip * pmt_cycles_until_flip) >> 23;
147 
148     /* Wake up again near the next bit-flip */
149     set_timer(&s->timer, NOW() + time_until_flip + MILLISECS(1));
150 
151     spin_unlock(&s->lock);
152 }
153 
154 /* Handle port I/O to the PM1a_STS and PM1a_EN registers */
handle_evt_io(int dir,unsigned int port,unsigned int bytes,uint32_t * val)155 static int handle_evt_io(
156     int dir, unsigned int port, unsigned int bytes, uint32_t *val)
157 {
158     struct vcpu *v = current;
159     struct hvm_hw_acpi *acpi = &v->domain->arch.hvm.acpi;
160     PMTState *s = &v->domain->arch.hvm.pl_time->vpmt;
161     uint32_t addr, data, byte;
162     int i;
163 
164     addr = port -
165         ((v->domain->arch.hvm.params[
166             HVM_PARAM_ACPI_IOPORTS_LOCATION] == 0) ?
167          PM1a_STS_ADDR_V0 : PM1a_STS_ADDR_V1);
168 
169     spin_lock(&s->lock);
170 
171     if ( dir == IOREQ_WRITE )
172     {
173         /* Handle this I/O one byte at a time */
174         for ( i = bytes, data = *val;
175               i > 0;
176               i--, addr++, data >>= 8 )
177         {
178             byte = data & 0xff;
179             switch ( addr )
180             {
181                 /* PM1a_STS register bits are write-to-clear */
182             case 0 /* PM1a_STS_ADDR */:
183                 acpi->pm1a_sts &= ~byte;
184                 break;
185             case 1 /* PM1a_STS_ADDR + 1 */:
186                 acpi->pm1a_sts &= ~(byte << 8);
187                 break;
188             case 2 /* PM1a_EN_ADDR */:
189                 acpi->pm1a_en = (acpi->pm1a_en & 0xff00) | byte;
190                 break;
191             case 3 /* PM1a_EN_ADDR + 1 */:
192                 acpi->pm1a_en = (acpi->pm1a_en & 0xff) | (byte << 8);
193                 break;
194             default:
195                 gdprintk(XENLOG_WARNING,
196                          "Bad ACPI PM register write: %x bytes (%x) at %x\n",
197                          bytes, *val, port);
198             }
199         }
200         /* Fix up the SCI state to match the new register state */
201         pmt_update_sci(s);
202     }
203     else /* p->dir == IOREQ_READ */
204     {
205         data = acpi->pm1a_sts | ((uint32_t)acpi->pm1a_en << 16);
206         data >>= 8 * addr;
207         if ( bytes == 1 ) data &= 0xff;
208         else if ( bytes == 2 ) data &= 0xffff;
209         *val = data;
210     }
211 
212     spin_unlock(&s->lock);
213 
214     return X86EMUL_OKAY;
215 }
216 
217 
218 /* Handle port I/O to the TMR_VAL register */
handle_pmt_io(int dir,unsigned int port,unsigned int bytes,uint32_t * val)219 static int handle_pmt_io(
220     int dir, unsigned int port, unsigned int bytes, uint32_t *val)
221 {
222     struct vcpu *v = current;
223     struct hvm_hw_acpi *acpi = &v->domain->arch.hvm.acpi;
224     PMTState *s = &v->domain->arch.hvm.pl_time->vpmt;
225 
226     if ( bytes != 4 || dir != IOREQ_READ )
227     {
228         gdprintk(XENLOG_WARNING, "HVM_PMT bad access\n");
229         *val = ~0;
230     }
231     else if ( spin_trylock(&s->lock) )
232     {
233         /* We hold the lock: update timer value and return it. */
234         pmt_update_time(s);
235         *val = acpi->tmr_val;
236         spin_unlock(&s->lock);
237     }
238     else
239     {
240         /*
241          * Someone else is updating the timer: rather than do the work
242          * again ourselves, wait for them to finish and then steal their
243          * updated value with a lock-free atomic read.
244          */
245         spin_barrier(&s->lock);
246         *val = read_atomic(&acpi->tmr_val);
247     }
248 
249     return X86EMUL_OKAY;
250 }
251 
acpi_save(struct vcpu * v,hvm_domain_context_t * h)252 static int acpi_save(struct vcpu *v, hvm_domain_context_t *h)
253 {
254     struct domain *d = v->domain;
255     struct hvm_hw_acpi *acpi = &d->arch.hvm.acpi;
256     PMTState *s = &d->arch.hvm.pl_time->vpmt;
257     uint32_t x, msb = acpi->tmr_val & TMR_VAL_MSB;
258     int rc;
259 
260     if ( !has_vpm(d) )
261         return 0;
262 
263     spin_lock(&s->lock);
264 
265     /*
266      * Update the counter to the guest's current time.  Make sure it only
267      * goes forwards.
268      */
269     x = (((s->vcpu->arch.hvm.guest_time ?: hvm_get_guest_time(s->vcpu)) -
270           s->last_gtime) * s->scale) >> 32;
271     if ( x < 1UL<<31 )
272         acpi->tmr_val += x;
273     if ( (acpi->tmr_val & TMR_VAL_MSB) != msb )
274         acpi->pm1a_sts |= TMR_STS;
275     /* No point in setting the SCI here because we'll already have saved the
276      * IRQ and *PIC state; we'll fix it up when we restore the domain */
277     rc = hvm_save_entry(PMTIMER, 0, h, acpi);
278 
279     spin_unlock(&s->lock);
280 
281     return rc;
282 }
283 
acpi_load(struct domain * d,hvm_domain_context_t * h)284 static int acpi_load(struct domain *d, hvm_domain_context_t *h)
285 {
286     struct hvm_hw_acpi *acpi = &d->arch.hvm.acpi;
287     PMTState *s = &d->arch.hvm.pl_time->vpmt;
288 
289     if ( !has_vpm(d) )
290         return -ENODEV;
291 
292     spin_lock(&s->lock);
293 
294     /* Reload the registers */
295     if ( hvm_load_entry(PMTIMER, h, acpi) )
296     {
297         spin_unlock(&s->lock);
298         return -EINVAL;
299     }
300 
301     /* Calculate future counter values from now. */
302     s->last_gtime = hvm_get_guest_time(s->vcpu);
303     s->not_accounted = 0;
304 
305     /* Set the SCI state from the registers */
306     pmt_update_sci(s);
307 
308     spin_unlock(&s->lock);
309 
310     return 0;
311 }
312 
313 HVM_REGISTER_SAVE_RESTORE(PMTIMER, acpi_save, acpi_load,
314                           1, HVMSR_PER_DOM);
315 
pmtimer_change_ioport(struct domain * d,uint64_t version)316 int pmtimer_change_ioport(struct domain *d, uint64_t version)
317 {
318     uint64_t old_version;
319 
320     if ( !has_vpm(d) )
321         return -ENODEV;
322 
323     /* Check that version is changing. */
324     old_version = d->arch.hvm.params[HVM_PARAM_ACPI_IOPORTS_LOCATION];
325     if ( version == old_version )
326         return 0;
327 
328     /* Only allow changes between versions 0 and 1. */
329     if ( (version ^ old_version) != 1 )
330         return -EINVAL;
331 
332     if ( version == 1 )
333     {
334         /* Moving from version 0 to version 1. */
335         relocate_portio_handler(d, TMR_VAL_ADDR_V0, TMR_VAL_ADDR_V1, 4);
336         relocate_portio_handler(d, PM1a_STS_ADDR_V0, PM1a_STS_ADDR_V1, 4);
337     }
338     else
339     {
340         /* Moving from version 1 to version 0. */
341         relocate_portio_handler(d, TMR_VAL_ADDR_V1, TMR_VAL_ADDR_V0, 4);
342         relocate_portio_handler(d, PM1a_STS_ADDR_V1, PM1a_STS_ADDR_V0, 4);
343     }
344 
345     return 0;
346 }
347 
pmtimer_init(struct vcpu * v)348 void pmtimer_init(struct vcpu *v)
349 {
350     PMTState *s = &v->domain->arch.hvm.pl_time->vpmt;
351 
352     if ( !has_vpm(v->domain) )
353         return;
354 
355     spin_lock_init(&s->lock);
356 
357     s->scale = ((uint64_t)FREQUENCE_PMTIMER << 32) / SYSTEM_TIME_HZ;
358     s->not_accounted = 0;
359     s->vcpu = v;
360 
361     /* Intercept port I/O (need two handlers because PM1a_CNT is between
362      * PM1a_EN and TMR_VAL and is handled by qemu) */
363     register_portio_handler(v->domain, TMR_VAL_ADDR_V0, 4, handle_pmt_io);
364     register_portio_handler(v->domain, PM1a_STS_ADDR_V0, 4, handle_evt_io);
365 
366     /* Set up callback to fire SCIs when the MSB of TMR_VAL changes */
367     init_timer(&s->timer, pmt_timer_callback, s, v->processor);
368     pmt_timer_callback(s);
369 }
370 
371 
pmtimer_deinit(struct domain * d)372 void pmtimer_deinit(struct domain *d)
373 {
374     PMTState *s = &d->arch.hvm.pl_time->vpmt;
375 
376     if ( !has_vpm(d) || !d->arch.hvm.pl_time || !s->vcpu )
377         return;
378 
379     kill_timer(&s->timer);
380 }
381 
pmtimer_reset(struct domain * d)382 void pmtimer_reset(struct domain *d)
383 {
384     if ( !has_vpm(d) )
385         return;
386 
387     /* Reset the counter. */
388     d->arch.hvm.acpi.tmr_val = 0;
389 }
390 
391 /*
392  * Local variables:
393  * mode: C
394  * c-file-style: "BSD"
395  * c-basic-offset: 4
396  * tab-width: 4
397  * indent-tabs-mode: nil
398  * End:
399  */
400