1 /******************************************************************************
2 * arch/x86/hpet.c
3 *
4 * HPET management.
5 */
6
7 #include <xen/errno.h>
8 #include <xen/time.h>
9 #include <xen/timer.h>
10 #include <xen/smp.h>
11 #include <xen/softirq.h>
12 #include <xen/irq.h>
13 #include <xen/numa.h>
14 #include <xen/param.h>
15 #include <xen/sched.h>
16 #include <asm/fixmap.h>
17 #include <asm/div64.h>
18 #include <asm/hpet.h>
19 #include <asm/msi.h>
20 #include <mach_apic.h>
21 #include <xen/cpuidle.h>
22
23 #define MAX_DELTA_NS MILLISECS(10*1000)
24 #define MIN_DELTA_NS MICROSECS(20)
25
26 #define HPET_EVT_USED_BIT 0
27 #define HPET_EVT_USED (1 << HPET_EVT_USED_BIT)
28 #define HPET_EVT_DISABLE_BIT 1
29 #define HPET_EVT_DISABLE (1 << HPET_EVT_DISABLE_BIT)
30 #define HPET_EVT_LEGACY_BIT 2
31 #define HPET_EVT_LEGACY (1 << HPET_EVT_LEGACY_BIT)
32
33 struct hpet_event_channel
34 {
35 unsigned long mult;
36 int shift;
37 s_time_t next_event;
38 cpumask_var_t cpumask;
39 spinlock_t lock;
40 void (*event_handler)(struct hpet_event_channel *);
41
42 unsigned int idx; /* physical channel idx */
43 unsigned int cpu; /* msi target */
44 struct msi_desc msi;/* msi state */
45 unsigned int flags; /* HPET_EVT_x */
46 } __cacheline_aligned;
47 static struct hpet_event_channel *__read_mostly hpet_events;
48
49 /* msi hpet channels used for broadcast */
50 static unsigned int __read_mostly num_hpets_used;
51
52 DEFINE_PER_CPU(struct hpet_event_channel *, cpu_bc_channel);
53
54 unsigned long __initdata hpet_address;
55 u8 __initdata hpet_blockid;
56 u8 __initdata hpet_flags;
57
58 /*
59 * force_hpet_broadcast: by default legacy hpet broadcast will be stopped
60 * if RTC interrupts are enabled. Enable this option if want to always enable
61 * legacy hpet broadcast for deep C state
62 */
63 static bool __initdata force_hpet_broadcast;
64 boolean_param("hpetbroadcast", force_hpet_broadcast);
65
66 /*
67 * Calculate a multiplication factor for scaled math, which is used to convert
68 * nanoseconds based values to clock ticks:
69 *
70 * clock_ticks = (nanoseconds * factor) >> shift.
71 *
72 * div_sc is the rearranged equation to calculate a factor from a given clock
73 * ticks / nanoseconds ratio:
74 *
75 * factor = (clock_ticks << shift) / nanoseconds
76 */
div_sc(unsigned long ticks,unsigned long nsec,int shift)77 static inline unsigned long div_sc(unsigned long ticks, unsigned long nsec,
78 int shift)
79 {
80 uint64_t tmp = ((uint64_t)ticks) << shift;
81
82 do_div(tmp, nsec);
83 return (unsigned long) tmp;
84 }
85
86 /*
87 * Convert nanoseconds based values to clock ticks:
88 *
89 * clock_ticks = (nanoseconds * factor) >> shift.
90 */
ns2ticks(unsigned long nsec,int shift,unsigned long factor)91 static inline unsigned long ns2ticks(unsigned long nsec, int shift,
92 unsigned long factor)
93 {
94 uint64_t tmp = ((uint64_t)nsec * factor) >> shift;
95
96 return (unsigned long) tmp;
97 }
98
hpet_next_event(unsigned long delta,int timer)99 static int hpet_next_event(unsigned long delta, int timer)
100 {
101 uint32_t cnt, cmp;
102 unsigned long flags;
103
104 local_irq_save(flags);
105 cnt = hpet_read32(HPET_COUNTER);
106 cmp = cnt + delta;
107 hpet_write32(cmp, HPET_Tn_CMP(timer));
108 cmp = hpet_read32(HPET_COUNTER);
109 local_irq_restore(flags);
110
111 /* Are we within two ticks of the deadline passing? Then we may miss. */
112 return ((cmp + 2 - cnt) > delta) ? -ETIME : 0;
113 }
114
reprogram_hpet_evt_channel(struct hpet_event_channel * ch,s_time_t expire,s_time_t now,int force)115 static int reprogram_hpet_evt_channel(
116 struct hpet_event_channel *ch,
117 s_time_t expire, s_time_t now, int force)
118 {
119 int64_t delta;
120 int ret;
121
122 if ( (ch->flags & HPET_EVT_DISABLE) || (expire == 0) )
123 return 0;
124
125 if ( unlikely(expire < 0) )
126 {
127 printk(KERN_DEBUG "reprogram: expire <= 0\n");
128 return -ETIME;
129 }
130
131 delta = expire - now;
132 if ( (delta <= 0) && !force )
133 return -ETIME;
134
135 ch->next_event = expire;
136
137 if ( expire == STIME_MAX )
138 {
139 /* We assume it will take a long time for the timer to wrap. */
140 hpet_write32(0, HPET_Tn_CMP(ch->idx));
141 return 0;
142 }
143
144 delta = min_t(int64_t, delta, MAX_DELTA_NS);
145 delta = max_t(int64_t, delta, MIN_DELTA_NS);
146 delta = ns2ticks(delta, ch->shift, ch->mult);
147
148 ret = hpet_next_event(delta, ch->idx);
149 while ( ret && force )
150 {
151 delta += delta;
152 ret = hpet_next_event(delta, ch->idx);
153 }
154
155 return ret;
156 }
157
evt_do_broadcast(cpumask_t * mask)158 static void evt_do_broadcast(cpumask_t *mask)
159 {
160 unsigned int cpu = smp_processor_id();
161
162 if ( __cpumask_test_and_clear_cpu(cpu, mask) )
163 raise_softirq(TIMER_SOFTIRQ);
164
165 cpuidle_wakeup_mwait(mask);
166
167 if ( !cpumask_empty(mask) )
168 cpumask_raise_softirq(mask, TIMER_SOFTIRQ);
169 }
170
handle_hpet_broadcast(struct hpet_event_channel * ch)171 static void handle_hpet_broadcast(struct hpet_event_channel *ch)
172 {
173 cpumask_t mask;
174 s_time_t now, next_event;
175 unsigned int cpu;
176 unsigned long flags;
177
178 spin_lock_irqsave(&ch->lock, flags);
179
180 again:
181 ch->next_event = STIME_MAX;
182
183 spin_unlock_irqrestore(&ch->lock, flags);
184
185 next_event = STIME_MAX;
186 cpumask_clear(&mask);
187 now = NOW();
188
189 /* find all expired events */
190 for_each_cpu(cpu, ch->cpumask)
191 {
192 s_time_t deadline = ACCESS_ONCE(per_cpu(timer_deadline, cpu));
193
194 if ( deadline <= now )
195 __cpumask_set_cpu(cpu, &mask);
196 else if ( deadline < next_event )
197 next_event = deadline;
198 }
199
200 /* wakeup the cpus which have an expired event. */
201 evt_do_broadcast(&mask);
202
203 if ( next_event != STIME_MAX )
204 {
205 spin_lock_irqsave(&ch->lock, flags);
206
207 if ( next_event < ch->next_event &&
208 reprogram_hpet_evt_channel(ch, next_event, now, 0) )
209 goto again;
210
211 spin_unlock_irqrestore(&ch->lock, flags);
212 }
213 }
214
hpet_interrupt_handler(int irq,void * data,struct cpu_user_regs * regs)215 static void hpet_interrupt_handler(int irq, void *data,
216 struct cpu_user_regs *regs)
217 {
218 struct hpet_event_channel *ch = data;
219
220 this_cpu(irq_count)--;
221
222 if ( !ch->event_handler )
223 {
224 printk(XENLOG_WARNING "Spurious HPET timer interrupt on HPET timer %d\n", ch->idx);
225 return;
226 }
227
228 ch->event_handler(ch);
229 }
230
hpet_msi_unmask(struct irq_desc * desc)231 static void hpet_msi_unmask(struct irq_desc *desc)
232 {
233 u32 cfg;
234 struct hpet_event_channel *ch = desc->action->dev_id;
235
236 cfg = hpet_read32(HPET_Tn_CFG(ch->idx));
237 cfg |= HPET_TN_ENABLE;
238 hpet_write32(cfg, HPET_Tn_CFG(ch->idx));
239 ch->msi.msi_attrib.host_masked = 0;
240 }
241
hpet_msi_mask(struct irq_desc * desc)242 static void hpet_msi_mask(struct irq_desc *desc)
243 {
244 u32 cfg;
245 struct hpet_event_channel *ch = desc->action->dev_id;
246
247 cfg = hpet_read32(HPET_Tn_CFG(ch->idx));
248 cfg &= ~HPET_TN_ENABLE;
249 hpet_write32(cfg, HPET_Tn_CFG(ch->idx));
250 ch->msi.msi_attrib.host_masked = 1;
251 }
252
hpet_msi_write(struct hpet_event_channel * ch,struct msi_msg * msg)253 static int hpet_msi_write(struct hpet_event_channel *ch, struct msi_msg *msg)
254 {
255 ch->msi.msg = *msg;
256
257 if ( iommu_intremap )
258 {
259 int rc = iommu_update_ire_from_msi(&ch->msi, msg);
260
261 if ( rc )
262 return rc;
263 }
264
265 hpet_write32(msg->data, HPET_Tn_ROUTE(ch->idx));
266 hpet_write32(msg->address_lo, HPET_Tn_ROUTE(ch->idx) + 4);
267
268 return 0;
269 }
270
271 static void __maybe_unused
hpet_msi_read(struct hpet_event_channel * ch,struct msi_msg * msg)272 hpet_msi_read(struct hpet_event_channel *ch, struct msi_msg *msg)
273 {
274 msg->data = hpet_read32(HPET_Tn_ROUTE(ch->idx));
275 msg->address_lo = hpet_read32(HPET_Tn_ROUTE(ch->idx) + 4);
276 msg->address_hi = MSI_ADDR_BASE_HI;
277 if ( iommu_intremap )
278 iommu_read_msi_from_ire(&ch->msi, msg);
279 }
280
hpet_msi_startup(struct irq_desc * desc)281 static unsigned int hpet_msi_startup(struct irq_desc *desc)
282 {
283 hpet_msi_unmask(desc);
284 return 0;
285 }
286
287 #define hpet_msi_shutdown hpet_msi_mask
288
hpet_msi_ack(struct irq_desc * desc)289 static void hpet_msi_ack(struct irq_desc *desc)
290 {
291 irq_complete_move(desc);
292 move_native_irq(desc);
293 ack_APIC_irq();
294 }
295
hpet_msi_set_affinity(struct irq_desc * desc,const cpumask_t * mask)296 static void hpet_msi_set_affinity(struct irq_desc *desc, const cpumask_t *mask)
297 {
298 struct hpet_event_channel *ch = desc->action->dev_id;
299 struct msi_msg msg = ch->msi.msg;
300
301 msg.dest32 = set_desc_affinity(desc, mask);
302 if ( msg.dest32 == BAD_APICID )
303 return;
304
305 msg.data &= ~MSI_DATA_VECTOR_MASK;
306 msg.data |= MSI_DATA_VECTOR(desc->arch.vector);
307 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
308 msg.address_lo |= MSI_ADDR_DEST_ID(msg.dest32);
309 if ( msg.data != ch->msi.msg.data || msg.dest32 != ch->msi.msg.dest32 )
310 hpet_msi_write(ch, &msg);
311 }
312
313 /*
314 * IRQ Chip for MSI HPET Devices,
315 */
316 static hw_irq_controller hpet_msi_type = {
317 .typename = "HPET-MSI",
318 .startup = hpet_msi_startup,
319 .shutdown = hpet_msi_shutdown,
320 .enable = hpet_msi_unmask,
321 .disable = hpet_msi_mask,
322 .ack = hpet_msi_ack,
323 .set_affinity = hpet_msi_set_affinity,
324 };
325
__hpet_setup_msi_irq(struct irq_desc * desc)326 static int __hpet_setup_msi_irq(struct irq_desc *desc)
327 {
328 struct msi_msg msg;
329
330 msi_compose_msg(desc->arch.vector, desc->arch.cpu_mask, &msg);
331 return hpet_msi_write(desc->action->dev_id, &msg);
332 }
333
hpet_setup_msi_irq(struct hpet_event_channel * ch)334 static int __init hpet_setup_msi_irq(struct hpet_event_channel *ch)
335 {
336 int ret;
337 u32 cfg = hpet_read32(HPET_Tn_CFG(ch->idx));
338 irq_desc_t *desc = irq_to_desc(ch->msi.irq);
339
340 if ( iommu_intremap )
341 {
342 ch->msi.hpet_id = hpet_blockid;
343 ret = iommu_setup_hpet_msi(&ch->msi);
344 if ( ret )
345 return ret;
346 }
347
348 /* set HPET Tn as oneshot */
349 cfg &= ~(HPET_TN_LEVEL | HPET_TN_PERIODIC);
350 cfg |= HPET_TN_FSB | HPET_TN_32BIT;
351 hpet_write32(cfg, HPET_Tn_CFG(ch->idx));
352
353 desc->handler = &hpet_msi_type;
354 ret = request_irq(ch->msi.irq, 0, hpet_interrupt_handler, "HPET", ch);
355 if ( ret >= 0 )
356 ret = __hpet_setup_msi_irq(desc);
357 if ( ret < 0 )
358 {
359 if ( iommu_intremap )
360 iommu_update_ire_from_msi(&ch->msi, NULL);
361 return ret;
362 }
363
364 desc->msi_desc = &ch->msi;
365
366 return 0;
367 }
368
hpet_assign_irq(struct hpet_event_channel * ch)369 static int __init hpet_assign_irq(struct hpet_event_channel *ch)
370 {
371 int irq;
372
373 if ( (irq = create_irq(NUMA_NO_NODE, false)) < 0 )
374 return irq;
375
376 ch->msi.irq = irq;
377 if ( hpet_setup_msi_irq(ch) )
378 {
379 destroy_irq(irq);
380 return -EINVAL;
381 }
382
383 return 0;
384 }
385
hpet_fsb_cap_lookup(void)386 static void __init hpet_fsb_cap_lookup(void)
387 {
388 u32 id;
389 unsigned int i, num_chs;
390
391 if ( unlikely(acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_MSI) )
392 return;
393
394 id = hpet_read32(HPET_ID);
395
396 num_chs = ((id & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT);
397 num_chs++; /* Value read out starts from 0 */
398
399 hpet_events = xzalloc_array(struct hpet_event_channel, num_chs);
400 if ( !hpet_events )
401 return;
402
403 for ( i = 0; i < num_chs && num_hpets_used < nr_cpu_ids; i++ )
404 {
405 struct hpet_event_channel *ch = &hpet_events[num_hpets_used];
406 u32 cfg = hpet_read32(HPET_Tn_CFG(i));
407
408 /* Only consider HPET timer with MSI support */
409 if ( !(cfg & HPET_TN_FSB_CAP) )
410 continue;
411
412 if ( !zalloc_cpumask_var(&ch->cpumask) )
413 {
414 if ( !num_hpets_used )
415 {
416 xfree(hpet_events);
417 hpet_events = NULL;
418 }
419 break;
420 }
421
422 ch->flags = 0;
423 ch->idx = i;
424
425 if ( hpet_assign_irq(ch) == 0 )
426 num_hpets_used++;
427 }
428
429 printk(XENLOG_INFO "HPET: %u timers usable for broadcast (%u total)\n",
430 num_hpets_used, num_chs);
431 }
432
hpet_get_channel(unsigned int cpu)433 static struct hpet_event_channel *hpet_get_channel(unsigned int cpu)
434 {
435 static unsigned int next_channel;
436 unsigned int i, next;
437 struct hpet_event_channel *ch;
438
439 if ( num_hpets_used == 0 )
440 return hpet_events;
441
442 if ( num_hpets_used >= nr_cpu_ids )
443 return &hpet_events[cpu];
444
445 do {
446 next = next_channel;
447 if ( (i = next + 1) == num_hpets_used )
448 i = 0;
449 } while ( cmpxchg(&next_channel, next, i) != next );
450
451 /* try unused channel first */
452 for ( i = next; i < next + num_hpets_used; i++ )
453 {
454 ch = &hpet_events[i % num_hpets_used];
455 if ( !test_and_set_bit(HPET_EVT_USED_BIT, &ch->flags) )
456 {
457 ch->cpu = cpu;
458 return ch;
459 }
460 }
461
462 /* share a in-use channel */
463 ch = &hpet_events[next];
464 if ( !test_and_set_bit(HPET_EVT_USED_BIT, &ch->flags) )
465 ch->cpu = cpu;
466
467 return ch;
468 }
469
set_channel_irq_affinity(struct hpet_event_channel * ch)470 static void set_channel_irq_affinity(struct hpet_event_channel *ch)
471 {
472 struct irq_desc *desc = irq_to_desc(ch->msi.irq);
473
474 ASSERT(!local_irq_is_enabled());
475 spin_lock(&desc->lock);
476 hpet_msi_mask(desc);
477 hpet_msi_set_affinity(desc, cpumask_of(ch->cpu));
478 hpet_msi_unmask(desc);
479 spin_unlock(&desc->lock);
480
481 spin_unlock(&ch->lock);
482
483 /* We may have missed an interrupt due to the temporary masking. */
484 if ( ch->event_handler && ch->next_event < NOW() )
485 ch->event_handler(ch);
486 }
487
hpet_attach_channel(unsigned int cpu,struct hpet_event_channel * ch)488 static void hpet_attach_channel(unsigned int cpu,
489 struct hpet_event_channel *ch)
490 {
491 ASSERT(!local_irq_is_enabled());
492 spin_lock(&ch->lock);
493
494 per_cpu(cpu_bc_channel, cpu) = ch;
495
496 /* try to be the channel owner again while holding the lock */
497 if ( !test_and_set_bit(HPET_EVT_USED_BIT, &ch->flags) )
498 ch->cpu = cpu;
499
500 if ( ch->cpu != cpu )
501 spin_unlock(&ch->lock);
502 else
503 set_channel_irq_affinity(ch);
504 }
505
hpet_detach_channel(unsigned int cpu,struct hpet_event_channel * ch)506 static void hpet_detach_channel(unsigned int cpu,
507 struct hpet_event_channel *ch)
508 {
509 unsigned int next;
510
511 spin_lock_irq(&ch->lock);
512
513 ASSERT(ch == per_cpu(cpu_bc_channel, cpu));
514
515 per_cpu(cpu_bc_channel, cpu) = NULL;
516
517 if ( cpu != ch->cpu )
518 spin_unlock_irq(&ch->lock);
519 else if ( (next = cpumask_first(ch->cpumask)) >= nr_cpu_ids )
520 {
521 ch->cpu = -1;
522 clear_bit(HPET_EVT_USED_BIT, &ch->flags);
523 spin_unlock_irq(&ch->lock);
524 }
525 else
526 {
527 ch->cpu = next;
528 set_channel_irq_affinity(ch);
529 local_irq_enable();
530 }
531 }
532
533 #include <asm/mc146818rtc.h>
534
535 void (*__read_mostly pv_rtc_handler)(uint8_t index, uint8_t value);
536
handle_rtc_once(uint8_t index,uint8_t value)537 static void handle_rtc_once(uint8_t index, uint8_t value)
538 {
539 if ( index != RTC_REG_B )
540 return;
541
542 /* RTC Reg B, contain PIE/AIE/UIE */
543 if ( value & (RTC_PIE | RTC_AIE | RTC_UIE ) )
544 {
545 cpuidle_disable_deep_cstate();
546 ACCESS_ONCE(pv_rtc_handler) = NULL;
547 }
548 }
549
hpet_broadcast_init(void)550 void __init hpet_broadcast_init(void)
551 {
552 u64 hpet_rate = hpet_setup();
553 u32 hpet_id, cfg;
554 unsigned int i, n;
555
556 if ( hpet_rate == 0 || hpet_broadcast_is_available() )
557 return;
558
559 cfg = hpet_read32(HPET_CFG);
560
561 hpet_fsb_cap_lookup();
562 if ( num_hpets_used > 0 )
563 {
564 /* Stop HPET legacy interrupts */
565 cfg &= ~HPET_CFG_LEGACY;
566 n = num_hpets_used;
567 }
568 else
569 {
570 hpet_id = hpet_read32(HPET_ID);
571 if ( !(hpet_id & HPET_ID_LEGSUP) )
572 return;
573
574 if ( !hpet_events )
575 hpet_events = xzalloc(struct hpet_event_channel);
576 if ( !hpet_events || !zalloc_cpumask_var(&hpet_events->cpumask) )
577 return;
578 hpet_events->msi.irq = -1;
579
580 /* Start HPET legacy interrupts */
581 cfg |= HPET_CFG_LEGACY;
582 n = 1;
583
584 if ( !force_hpet_broadcast )
585 pv_rtc_handler = handle_rtc_once;
586 }
587
588 hpet_write32(cfg, HPET_CFG);
589
590 for ( i = 0; i < n; i++ )
591 {
592 if ( i == 0 && (cfg & HPET_CFG_LEGACY) )
593 {
594 /* set HPET T0 as oneshot */
595 cfg = hpet_read32(HPET_Tn_CFG(0));
596 cfg &= ~(HPET_TN_LEVEL | HPET_TN_PERIODIC);
597 cfg |= HPET_TN_ENABLE | HPET_TN_32BIT;
598 hpet_write32(cfg, HPET_Tn_CFG(0));
599 }
600
601 /*
602 * The period is a femto seconds value. We need to calculate the scaled
603 * math multiplication factor for nanosecond to hpet tick conversion.
604 */
605 hpet_events[i].mult = div_sc((unsigned long)hpet_rate,
606 1000000000ul, 32);
607 hpet_events[i].shift = 32;
608 hpet_events[i].next_event = STIME_MAX;
609 spin_lock_init(&hpet_events[i].lock);
610 smp_wmb();
611 hpet_events[i].event_handler = handle_hpet_broadcast;
612
613 hpet_events[i].msi.msi_attrib.maskbit = 1;
614 hpet_events[i].msi.msi_attrib.pos = MSI_TYPE_HPET;
615 }
616
617 if ( !num_hpets_used )
618 hpet_events->flags = HPET_EVT_LEGACY;
619 }
620
hpet_broadcast_resume(void)621 void hpet_broadcast_resume(void)
622 {
623 u32 cfg;
624 unsigned int i, n;
625
626 if ( !hpet_events )
627 return;
628
629 hpet_resume(NULL);
630
631 cfg = hpet_read32(HPET_CFG);
632
633 if ( num_hpets_used > 0 )
634 {
635 /* Stop HPET legacy interrupts */
636 cfg &= ~HPET_CFG_LEGACY;
637 n = num_hpets_used;
638 }
639 else if ( hpet_events->flags & HPET_EVT_DISABLE )
640 return;
641 else
642 {
643 /* Start HPET legacy interrupts */
644 cfg |= HPET_CFG_LEGACY;
645 n = 1;
646 }
647
648 hpet_write32(cfg, HPET_CFG);
649
650 for ( i = 0; i < n; i++ )
651 {
652 if ( hpet_events[i].msi.irq >= 0 )
653 __hpet_setup_msi_irq(irq_to_desc(hpet_events[i].msi.irq));
654
655 /* set HPET Tn as oneshot */
656 cfg = hpet_read32(HPET_Tn_CFG(hpet_events[i].idx));
657 cfg &= ~(HPET_TN_LEVEL | HPET_TN_PERIODIC);
658 cfg |= HPET_TN_ENABLE | HPET_TN_32BIT;
659 if ( !(hpet_events[i].flags & HPET_EVT_LEGACY) )
660 cfg |= HPET_TN_FSB;
661 hpet_write32(cfg, HPET_Tn_CFG(hpet_events[i].idx));
662
663 hpet_events[i].next_event = STIME_MAX;
664 }
665 }
666
hpet_disable_legacy_broadcast(void)667 void hpet_disable_legacy_broadcast(void)
668 {
669 u32 cfg;
670 unsigned long flags;
671
672 if ( !hpet_events || !(hpet_events->flags & HPET_EVT_LEGACY) )
673 return;
674
675 spin_lock_irqsave(&hpet_events->lock, flags);
676
677 hpet_events->flags |= HPET_EVT_DISABLE;
678
679 /* disable HPET T0 */
680 cfg = hpet_read32(HPET_Tn_CFG(0));
681 cfg &= ~HPET_TN_ENABLE;
682 hpet_write32(cfg, HPET_Tn_CFG(0));
683
684 /* Stop HPET legacy interrupts */
685 cfg = hpet_read32(HPET_CFG);
686 cfg &= ~HPET_CFG_LEGACY;
687 hpet_write32(cfg, HPET_CFG);
688
689 spin_unlock_irqrestore(&hpet_events->lock, flags);
690
691 smp_send_event_check_mask(&cpu_online_map);
692 }
693
hpet_broadcast_enter(void)694 void hpet_broadcast_enter(void)
695 {
696 unsigned int cpu = smp_processor_id();
697 struct hpet_event_channel *ch = per_cpu(cpu_bc_channel, cpu);
698 s_time_t deadline = per_cpu(timer_deadline, cpu);
699
700 if ( deadline == 0 )
701 return;
702
703 if ( !ch )
704 ch = hpet_get_channel(cpu);
705
706 ASSERT(!local_irq_is_enabled());
707
708 if ( !(ch->flags & HPET_EVT_LEGACY) )
709 hpet_attach_channel(cpu, ch);
710
711 /* Disable LAPIC timer interrupts. */
712 disable_APIC_timer();
713 cpumask_set_cpu(cpu, ch->cpumask);
714
715 spin_lock(&ch->lock);
716 /*
717 * Reprogram if current cpu expire time is nearer. deadline is never
718 * written by a remote cpu, so the value read earlier is still valid.
719 */
720 if ( deadline < ch->next_event )
721 reprogram_hpet_evt_channel(ch, deadline, NOW(), 1);
722 spin_unlock(&ch->lock);
723 }
724
hpet_broadcast_exit(void)725 void hpet_broadcast_exit(void)
726 {
727 unsigned int cpu = smp_processor_id();
728 struct hpet_event_channel *ch = per_cpu(cpu_bc_channel, cpu);
729 s_time_t deadline = per_cpu(timer_deadline, cpu);
730
731 if ( deadline == 0 )
732 return;
733
734 if ( !ch )
735 ch = hpet_get_channel(cpu);
736
737 /* Reprogram the deadline; trigger timer work now if it has passed. */
738 enable_APIC_timer();
739 if ( !reprogram_timer(deadline) )
740 raise_softirq(TIMER_SOFTIRQ);
741
742 cpumask_clear_cpu(cpu, ch->cpumask);
743
744 if ( !(ch->flags & HPET_EVT_LEGACY) )
745 hpet_detach_channel(cpu, ch);
746 }
747
hpet_broadcast_is_available(void)748 int hpet_broadcast_is_available(void)
749 {
750 return ((hpet_events && (hpet_events->flags & HPET_EVT_LEGACY))
751 || num_hpets_used > 0);
752 }
753
hpet_legacy_irq_tick(void)754 int hpet_legacy_irq_tick(void)
755 {
756 this_cpu(irq_count)--;
757
758 if ( !hpet_events ||
759 (hpet_events->flags & (HPET_EVT_DISABLE|HPET_EVT_LEGACY)) !=
760 HPET_EVT_LEGACY )
761 return 0;
762 hpet_events->event_handler(hpet_events);
763 return 1;
764 }
765
766 static u32 *hpet_boot_cfg;
767
hpet_setup(void)768 u64 __init hpet_setup(void)
769 {
770 static u64 __initdata hpet_rate;
771 u32 hpet_id, hpet_period;
772 unsigned int last;
773
774 if ( hpet_rate )
775 return hpet_rate;
776
777 if ( hpet_address == 0 )
778 return 0;
779
780 set_fixmap_nocache(FIX_HPET_BASE, hpet_address);
781
782 hpet_id = hpet_read32(HPET_ID);
783 if ( (hpet_id & HPET_ID_REV) == 0 )
784 {
785 printk("BAD HPET revision id.\n");
786 return 0;
787 }
788
789 /* Check for sane period (100ps <= period <= 100ns). */
790 hpet_period = hpet_read32(HPET_PERIOD);
791 if ( (hpet_period > 100000000) || (hpet_period < 100000) )
792 {
793 printk("BAD HPET period %u.\n", hpet_period);
794 return 0;
795 }
796
797 last = (hpet_id & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT;
798 hpet_boot_cfg = xmalloc_array(u32, 2 + last);
799 hpet_resume(hpet_boot_cfg);
800
801 hpet_rate = 1000000000000000ULL; /* 10^15 */
802 last = do_div(hpet_rate, hpet_period);
803
804 return hpet_rate + (last * 2 > hpet_period);
805 }
806
hpet_resume(u32 * boot_cfg)807 void hpet_resume(u32 *boot_cfg)
808 {
809 static u32 system_reset_latch;
810 u32 hpet_id, cfg;
811 unsigned int i, last;
812
813 if ( system_reset_latch == system_reset_counter )
814 return;
815 system_reset_latch = system_reset_counter;
816
817 cfg = hpet_read32(HPET_CFG);
818 if ( boot_cfg )
819 *boot_cfg = cfg;
820 cfg &= ~(HPET_CFG_ENABLE | HPET_CFG_LEGACY);
821 if ( cfg )
822 {
823 printk(XENLOG_WARNING
824 "HPET: reserved bits %#x set in global config register\n",
825 cfg);
826 cfg = 0;
827 }
828 hpet_write32(cfg, HPET_CFG);
829
830 hpet_id = hpet_read32(HPET_ID);
831 last = (hpet_id & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT;
832 for ( i = 0; i <= last; ++i )
833 {
834 cfg = hpet_read32(HPET_Tn_CFG(i));
835 if ( boot_cfg )
836 boot_cfg[i + 1] = cfg;
837 cfg &= ~HPET_TN_ENABLE;
838 if ( cfg & HPET_TN_RESERVED )
839 {
840 printk(XENLOG_WARNING
841 "HPET: reserved bits %#x set in channel %u config register\n",
842 cfg & HPET_TN_RESERVED, i);
843 cfg &= ~HPET_TN_RESERVED;
844 }
845 hpet_write32(cfg, HPET_Tn_CFG(i));
846 }
847
848 cfg = hpet_read32(HPET_CFG);
849 cfg |= HPET_CFG_ENABLE;
850 hpet_write32(cfg, HPET_CFG);
851 }
852
hpet_disable(void)853 void hpet_disable(void)
854 {
855 unsigned int i;
856 u32 id;
857
858 if ( !hpet_boot_cfg )
859 {
860 if ( hpet_broadcast_is_available() )
861 hpet_disable_legacy_broadcast();
862 return;
863 }
864
865 hpet_write32(*hpet_boot_cfg & ~HPET_CFG_ENABLE, HPET_CFG);
866
867 id = hpet_read32(HPET_ID);
868 for ( i = 0; i <= ((id & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT); ++i )
869 hpet_write32(hpet_boot_cfg[i + 1], HPET_Tn_CFG(i));
870
871 if ( *hpet_boot_cfg & HPET_CFG_ENABLE )
872 hpet_write32(*hpet_boot_cfg, HPET_CFG);
873 }
874