1 /******************************************************************************
2 * timer.c
3 *
4 * Copyright (c) 2002-2003 Rolf Neugebauer
5 * Copyright (c) 2002-2005 K A Fraser
6 */
7
8 #include <xen/init.h>
9 #include <xen/types.h>
10 #include <xen/errno.h>
11 #include <xen/sched.h>
12 #include <xen/lib.h>
13 #include <xen/param.h>
14 #include <xen/smp.h>
15 #include <xen/perfc.h>
16 #include <xen/time.h>
17 #include <xen/softirq.h>
18 #include <xen/timer.h>
19 #include <xen/keyhandler.h>
20 #include <xen/percpu.h>
21 #include <xen/cpu.h>
22 #include <xen/rcupdate.h>
23 #include <xen/symbols.h>
24 #include <asm/system.h>
25 #include <asm/desc.h>
26 #include <asm/atomic.h>
27
28 /* We program the time hardware this far behind the closest deadline. */
29 static unsigned int timer_slop __read_mostly = 50000; /* 50 us */
30 integer_param("timer_slop", timer_slop);
31
32 struct timers {
33 spinlock_t lock;
34 struct timer **heap;
35 struct timer *list;
36 struct timer *running;
37 struct list_head inactive;
38 } __cacheline_aligned;
39
40 static DEFINE_PER_CPU(struct timers, timers);
41
42 /* Protects lock-free access to per-timer cpu field against cpu offlining. */
43 static DEFINE_RCU_READ_LOCK(timer_cpu_read_lock);
44
45 DEFINE_PER_CPU(s_time_t, timer_deadline);
46
47 /****************************************************************************
48 * HEAP OPERATIONS.
49 *
50 * Slot 0 of the heap is never a valid timer pointer, and instead holds the
51 * heap metadata.
52 */
53
54 struct heap_metadata {
55 uint16_t size, limit;
56 };
57
heap_metadata(struct timer ** heap)58 static struct heap_metadata *heap_metadata(struct timer **heap)
59 {
60 /* Check that our type-punning doesn't overflow into heap[1] */
61 BUILD_BUG_ON(sizeof(struct heap_metadata) > sizeof(struct timer *));
62
63 return (struct heap_metadata *)&heap[0];
64 }
65
66 /* Sink down element @pos of @heap. */
down_heap(struct timer ** heap,unsigned int pos)67 static void down_heap(struct timer **heap, unsigned int pos)
68 {
69 unsigned int sz = heap_metadata(heap)->size, nxt;
70 struct timer *t = heap[pos];
71
72 while ( (nxt = (pos << 1)) <= sz )
73 {
74 if ( ((nxt+1) <= sz) && (heap[nxt+1]->expires < heap[nxt]->expires) )
75 nxt++;
76 if ( heap[nxt]->expires > t->expires )
77 break;
78 heap[pos] = heap[nxt];
79 heap[pos]->heap_offset = pos;
80 pos = nxt;
81 }
82
83 heap[pos] = t;
84 t->heap_offset = pos;
85 }
86
87 /* Float element @pos up @heap. */
up_heap(struct timer ** heap,unsigned int pos)88 static void up_heap(struct timer **heap, unsigned int pos)
89 {
90 struct timer *t = heap[pos];
91
92 while ( (pos > 1) && (t->expires < heap[pos>>1]->expires) )
93 {
94 heap[pos] = heap[pos>>1];
95 heap[pos]->heap_offset = pos;
96 pos >>= 1;
97 }
98
99 heap[pos] = t;
100 t->heap_offset = pos;
101 }
102
103
104 /* Delete @t from @heap. Return TRUE if new top of heap. */
remove_from_heap(struct timer ** heap,struct timer * t)105 static int remove_from_heap(struct timer **heap, struct timer *t)
106 {
107 unsigned int sz = heap_metadata(heap)->size;
108 unsigned int pos = t->heap_offset;
109
110 if ( unlikely(pos == sz) )
111 {
112 heap_metadata(heap)->size = sz - 1;
113 goto out;
114 }
115
116 heap[pos] = heap[sz];
117 heap[pos]->heap_offset = pos;
118
119 heap_metadata(heap)->size = --sz;
120
121 if ( (pos > 1) && (heap[pos]->expires < heap[pos>>1]->expires) )
122 up_heap(heap, pos);
123 else
124 down_heap(heap, pos);
125
126 out:
127 return (pos == 1);
128 }
129
130
131 /* Add new entry @t to @heap. Return TRUE if new top of heap. */
add_to_heap(struct timer ** heap,struct timer * t)132 static int add_to_heap(struct timer **heap, struct timer *t)
133 {
134 unsigned int sz = heap_metadata(heap)->size;
135
136 /* Fail if the heap is full. */
137 if ( unlikely(sz == heap_metadata(heap)->limit) )
138 return 0;
139
140 heap_metadata(heap)->size = ++sz;
141 heap[sz] = t;
142 t->heap_offset = sz;
143 up_heap(heap, sz);
144
145 return (t->heap_offset == 1);
146 }
147
148
149 /****************************************************************************
150 * LINKED LIST OPERATIONS.
151 */
152
remove_from_list(struct timer ** pprev,struct timer * t)153 static int remove_from_list(struct timer **pprev, struct timer *t)
154 {
155 struct timer *curr, **_pprev = pprev;
156
157 while ( (curr = *_pprev) != t )
158 _pprev = &curr->list_next;
159
160 *_pprev = t->list_next;
161
162 return (_pprev == pprev);
163 }
164
add_to_list(struct timer ** pprev,struct timer * t)165 static int add_to_list(struct timer **pprev, struct timer *t)
166 {
167 struct timer *curr, **_pprev = pprev;
168
169 while ( ((curr = *_pprev) != NULL) && (curr->expires <= t->expires) )
170 _pprev = &curr->list_next;
171
172 t->list_next = curr;
173 *_pprev = t;
174
175 return (_pprev == pprev);
176 }
177
178
179 /****************************************************************************
180 * TIMER OPERATIONS.
181 */
182
remove_entry(struct timer * t)183 static int remove_entry(struct timer *t)
184 {
185 struct timers *timers = &per_cpu(timers, t->cpu);
186 int rc;
187
188 switch ( t->status )
189 {
190 case TIMER_STATUS_in_heap:
191 rc = remove_from_heap(timers->heap, t);
192 break;
193 case TIMER_STATUS_in_list:
194 rc = remove_from_list(&timers->list, t);
195 break;
196 default:
197 rc = 0;
198 BUG();
199 }
200
201 t->status = TIMER_STATUS_invalid;
202 return rc;
203 }
204
add_entry(struct timer * t)205 static int add_entry(struct timer *t)
206 {
207 struct timers *timers = &per_cpu(timers, t->cpu);
208 int rc;
209
210 ASSERT(t->status == TIMER_STATUS_invalid);
211
212 /* Try to add to heap. t->heap_offset indicates whether we succeed. */
213 t->heap_offset = 0;
214 t->status = TIMER_STATUS_in_heap;
215 rc = add_to_heap(timers->heap, t);
216 if ( t->heap_offset != 0 )
217 return rc;
218
219 /* Fall back to adding to the slower linked list. */
220 t->status = TIMER_STATUS_in_list;
221 return add_to_list(&timers->list, t);
222 }
223
activate_timer(struct timer * timer)224 static inline void activate_timer(struct timer *timer)
225 {
226 ASSERT(timer->status == TIMER_STATUS_inactive);
227 timer->status = TIMER_STATUS_invalid;
228 list_del(&timer->inactive);
229
230 if ( add_entry(timer) )
231 cpu_raise_softirq(timer->cpu, TIMER_SOFTIRQ);
232 }
233
deactivate_timer(struct timer * timer)234 static inline void deactivate_timer(struct timer *timer)
235 {
236 if ( remove_entry(timer) )
237 cpu_raise_softirq(timer->cpu, TIMER_SOFTIRQ);
238
239 timer->status = TIMER_STATUS_inactive;
240 list_add(&timer->inactive, &per_cpu(timers, timer->cpu).inactive);
241 }
242
timer_lock(struct timer * timer)243 static inline bool_t timer_lock(struct timer *timer)
244 {
245 unsigned int cpu;
246
247 rcu_read_lock(&timer_cpu_read_lock);
248
249 for ( ; ; )
250 {
251 cpu = read_atomic(&timer->cpu);
252 if ( unlikely(cpu == TIMER_CPU_status_killed) )
253 {
254 rcu_read_unlock(&timer_cpu_read_lock);
255 return 0;
256 }
257 spin_lock(&per_cpu(timers, cpu).lock);
258 if ( likely(timer->cpu == cpu) )
259 break;
260 spin_unlock(&per_cpu(timers, cpu).lock);
261 }
262
263 rcu_read_unlock(&timer_cpu_read_lock);
264 return 1;
265 }
266
267 #define timer_lock_irqsave(t, flags) ({ \
268 bool_t __x; \
269 local_irq_save(flags); \
270 if ( !(__x = timer_lock(t)) ) \
271 local_irq_restore(flags); \
272 __x; \
273 })
274
timer_unlock(struct timer * timer)275 static inline void timer_unlock(struct timer *timer)
276 {
277 spin_unlock(&per_cpu(timers, timer->cpu).lock);
278 }
279
280 #define timer_unlock_irqrestore(t, flags) ({ \
281 timer_unlock(t); \
282 local_irq_restore(flags); \
283 })
284
285
active_timer(const struct timer * timer)286 static bool active_timer(const struct timer *timer)
287 {
288 ASSERT(timer->status >= TIMER_STATUS_inactive);
289 return timer_is_active(timer);
290 }
291
292
init_timer(struct timer * timer,void (* function)(void *),void * data,unsigned int cpu)293 void init_timer(
294 struct timer *timer,
295 void (*function)(void *),
296 void *data,
297 unsigned int cpu)
298 {
299 unsigned long flags;
300 memset(timer, 0, sizeof(*timer));
301 timer->function = function;
302 timer->data = data;
303 write_atomic(&timer->cpu, cpu);
304 timer->status = TIMER_STATUS_inactive;
305 if ( !timer_lock_irqsave(timer, flags) )
306 BUG();
307 list_add(&timer->inactive, &per_cpu(timers, cpu).inactive);
308 timer_unlock_irqrestore(timer, flags);
309 }
310
311
set_timer(struct timer * timer,s_time_t expires)312 void set_timer(struct timer *timer, s_time_t expires)
313 {
314 unsigned long flags;
315
316 if ( !timer_lock_irqsave(timer, flags) )
317 return;
318
319 if ( active_timer(timer) )
320 deactivate_timer(timer);
321
322 timer->expires = expires;
323
324 activate_timer(timer);
325
326 timer_unlock_irqrestore(timer, flags);
327 }
328
329
stop_timer(struct timer * timer)330 void stop_timer(struct timer *timer)
331 {
332 unsigned long flags;
333
334 if ( !timer_lock_irqsave(timer, flags) )
335 return;
336
337 if ( active_timer(timer) )
338 deactivate_timer(timer);
339
340 timer_unlock_irqrestore(timer, flags);
341 }
342
timer_expires_before(struct timer * timer,s_time_t t)343 bool timer_expires_before(struct timer *timer, s_time_t t)
344 {
345 unsigned long flags;
346 bool ret;
347
348 if ( !timer_lock_irqsave(timer, flags) )
349 return false;
350
351 ret = active_timer(timer) && timer->expires <= t;
352
353 timer_unlock_irqrestore(timer, flags);
354
355 return ret;
356 }
357
migrate_timer(struct timer * timer,unsigned int new_cpu)358 void migrate_timer(struct timer *timer, unsigned int new_cpu)
359 {
360 unsigned int old_cpu;
361 bool_t active;
362 unsigned long flags;
363
364 rcu_read_lock(&timer_cpu_read_lock);
365
366 for ( ; ; )
367 {
368 old_cpu = read_atomic(&timer->cpu);
369 if ( (old_cpu == new_cpu) || (old_cpu == TIMER_CPU_status_killed) )
370 {
371 rcu_read_unlock(&timer_cpu_read_lock);
372 return;
373 }
374
375 if ( old_cpu < new_cpu )
376 {
377 spin_lock_irqsave(&per_cpu(timers, old_cpu).lock, flags);
378 spin_lock(&per_cpu(timers, new_cpu).lock);
379 }
380 else
381 {
382 spin_lock_irqsave(&per_cpu(timers, new_cpu).lock, flags);
383 spin_lock(&per_cpu(timers, old_cpu).lock);
384 }
385
386 if ( likely(timer->cpu == old_cpu) )
387 break;
388
389 spin_unlock(&per_cpu(timers, old_cpu).lock);
390 spin_unlock_irqrestore(&per_cpu(timers, new_cpu).lock, flags);
391 }
392
393 rcu_read_unlock(&timer_cpu_read_lock);
394
395 active = active_timer(timer);
396 if ( active )
397 deactivate_timer(timer);
398
399 list_del(&timer->inactive);
400 write_atomic(&timer->cpu, new_cpu);
401 list_add(&timer->inactive, &per_cpu(timers, new_cpu).inactive);
402
403 if ( active )
404 activate_timer(timer);
405
406 spin_unlock(&per_cpu(timers, old_cpu).lock);
407 spin_unlock_irqrestore(&per_cpu(timers, new_cpu).lock, flags);
408 }
409
410
kill_timer(struct timer * timer)411 void kill_timer(struct timer *timer)
412 {
413 unsigned int old_cpu, cpu;
414 unsigned long flags;
415
416 BUG_ON(this_cpu(timers).running == timer);
417
418 if ( !timer_lock_irqsave(timer, flags) )
419 return;
420
421 if ( active_timer(timer) )
422 deactivate_timer(timer);
423
424 list_del(&timer->inactive);
425 timer->status = TIMER_STATUS_killed;
426 old_cpu = timer->cpu;
427 write_atomic(&timer->cpu, TIMER_CPU_status_killed);
428
429 spin_unlock_irqrestore(&per_cpu(timers, old_cpu).lock, flags);
430
431 for_each_online_cpu ( cpu )
432 while ( per_cpu(timers, cpu).running == timer )
433 cpu_relax();
434 }
435
436
execute_timer(struct timers * ts,struct timer * t)437 static void execute_timer(struct timers *ts, struct timer *t)
438 {
439 void (*fn)(void *) = t->function;
440 void *data = t->data;
441
442 t->status = TIMER_STATUS_inactive;
443 list_add(&t->inactive, &ts->inactive);
444
445 ts->running = t;
446 spin_unlock_irq(&ts->lock);
447 (*fn)(data);
448 spin_lock_irq(&ts->lock);
449 ts->running = NULL;
450 }
451
452
timer_softirq_action(void)453 static void timer_softirq_action(void)
454 {
455 struct timer *t, **heap, *next;
456 struct timers *ts;
457 s_time_t now, deadline;
458
459 ts = &this_cpu(timers);
460 heap = ts->heap;
461
462 /* If we overflowed the heap, try to allocate a larger heap. */
463 if ( unlikely(ts->list != NULL) )
464 {
465 /* old_limit == (2^n)-1; new_limit == (2^(n+4))-1 */
466 unsigned int old_limit = heap_metadata(heap)->limit;
467 unsigned int new_limit = ((old_limit + 1) << 4) - 1;
468 struct timer **newheap = NULL;
469
470 /* Don't grow the heap beyond what is representable in its metadata. */
471 if ( new_limit == (typeof(heap_metadata(heap)->limit))new_limit &&
472 new_limit + 1 )
473 newheap = xmalloc_array(struct timer *, new_limit + 1);
474 else
475 printk_once(XENLOG_WARNING "CPU%u: timer heap limit reached\n",
476 smp_processor_id());
477 if ( newheap != NULL )
478 {
479 spin_lock_irq(&ts->lock);
480 memcpy(newheap, heap, (old_limit + 1) * sizeof(*heap));
481 heap_metadata(newheap)->limit = new_limit;
482 ts->heap = newheap;
483 spin_unlock_irq(&ts->lock);
484 if ( old_limit != 0 )
485 xfree(heap);
486 heap = newheap;
487 }
488 }
489
490 spin_lock_irq(&ts->lock);
491
492 now = NOW();
493
494 /* Execute ready heap timers. */
495 while ( (heap_metadata(heap)->size != 0) &&
496 ((t = heap[1])->expires < now) )
497 {
498 remove_from_heap(heap, t);
499 execute_timer(ts, t);
500 }
501
502 /* Execute ready list timers. */
503 while ( ((t = ts->list) != NULL) && (t->expires < now) )
504 {
505 ts->list = t->list_next;
506 execute_timer(ts, t);
507 }
508
509 /* Try to move timers from linked list to more efficient heap. */
510 next = ts->list;
511 ts->list = NULL;
512 while ( unlikely((t = next) != NULL) )
513 {
514 next = t->list_next;
515 t->status = TIMER_STATUS_invalid;
516 add_entry(t);
517 }
518
519 /* Find earliest deadline from head of linked list and top of heap. */
520 deadline = STIME_MAX;
521 if ( heap_metadata(heap)->size != 0 )
522 deadline = heap[1]->expires;
523 if ( (ts->list != NULL) && (ts->list->expires < deadline) )
524 deadline = ts->list->expires;
525 now = NOW();
526 this_cpu(timer_deadline) =
527 (deadline == STIME_MAX) ? 0 : MAX(deadline, now + timer_slop);
528
529 if ( !reprogram_timer(this_cpu(timer_deadline)) )
530 raise_softirq(TIMER_SOFTIRQ);
531
532 spin_unlock_irq(&ts->lock);
533 }
534
align_timer(s_time_t firsttick,uint64_t period)535 s_time_t align_timer(s_time_t firsttick, uint64_t period)
536 {
537 if ( !period )
538 return firsttick;
539
540 return firsttick + (period - 1) - ((firsttick - 1) % period);
541 }
542
dump_timer(struct timer * t,s_time_t now)543 static void dump_timer(struct timer *t, s_time_t now)
544 {
545 printk(" ex=%12"PRId64"us timer=%p cb=%ps(%p)\n",
546 (t->expires - now) / 1000, t, t->function, t->data);
547 }
548
dump_timerq(unsigned char key)549 static void dump_timerq(unsigned char key)
550 {
551 struct timer *t;
552 struct timers *ts;
553 unsigned long flags;
554 s_time_t now = NOW();
555 unsigned int i, j;
556
557 printk("Dumping timer queues:\n");
558
559 for_each_online_cpu( i )
560 {
561 ts = &per_cpu(timers, i);
562
563 printk("CPU%02d:\n", i);
564 spin_lock_irqsave(&ts->lock, flags);
565 for ( j = 1; j <= heap_metadata(ts->heap)->size; j++ )
566 dump_timer(ts->heap[j], now);
567 for ( t = ts->list; t != NULL; t = t->list_next )
568 dump_timer(t, now);
569 spin_unlock_irqrestore(&ts->lock, flags);
570 }
571 }
572
migrate_timers_from_cpu(unsigned int old_cpu)573 static void migrate_timers_from_cpu(unsigned int old_cpu)
574 {
575 unsigned int new_cpu = cpumask_any(&cpu_online_map);
576 struct timers *old_ts, *new_ts;
577 struct timer *t;
578 bool_t notify = 0;
579
580 ASSERT(!cpu_online(old_cpu) && cpu_online(new_cpu));
581
582 old_ts = &per_cpu(timers, old_cpu);
583 new_ts = &per_cpu(timers, new_cpu);
584
585 if ( old_cpu < new_cpu )
586 {
587 spin_lock_irq(&old_ts->lock);
588 spin_lock(&new_ts->lock);
589 }
590 else
591 {
592 spin_lock_irq(&new_ts->lock);
593 spin_lock(&old_ts->lock);
594 }
595
596 while ( (t = heap_metadata(old_ts->heap)->size
597 ? old_ts->heap[1] : old_ts->list) != NULL )
598 {
599 remove_entry(t);
600 write_atomic(&t->cpu, new_cpu);
601 notify |= add_entry(t);
602 }
603
604 while ( !list_empty(&old_ts->inactive) )
605 {
606 t = list_entry(old_ts->inactive.next, struct timer, inactive);
607 list_del(&t->inactive);
608 write_atomic(&t->cpu, new_cpu);
609 list_add(&t->inactive, &new_ts->inactive);
610 }
611
612 spin_unlock(&old_ts->lock);
613 spin_unlock_irq(&new_ts->lock);
614
615 if ( notify )
616 cpu_raise_softirq(new_cpu, TIMER_SOFTIRQ);
617 }
618
619 /*
620 * All CPUs initially share an empty dummy heap. Only those CPUs that
621 * are brought online will be dynamically allocated their own heap.
622 * The size/limit metadata are both 0 by being in .bss
623 */
624 static struct timer *dummy_heap[1];
625
free_percpu_timers(unsigned int cpu)626 static void free_percpu_timers(unsigned int cpu)
627 {
628 struct timers *ts = &per_cpu(timers, cpu);
629
630 ASSERT(heap_metadata(ts->heap)->size == 0);
631 if ( heap_metadata(ts->heap)->limit )
632 {
633 xfree(ts->heap);
634 ts->heap = dummy_heap;
635 }
636 else
637 ASSERT(ts->heap == dummy_heap);
638 }
639
cpu_callback(struct notifier_block * nfb,unsigned long action,void * hcpu)640 static int cpu_callback(
641 struct notifier_block *nfb, unsigned long action, void *hcpu)
642 {
643 unsigned int cpu = (unsigned long)hcpu;
644 struct timers *ts = &per_cpu(timers, cpu);
645
646 switch ( action )
647 {
648 case CPU_UP_PREPARE:
649 /* Only initialise ts once. */
650 if ( !ts->heap )
651 {
652 INIT_LIST_HEAD(&ts->inactive);
653 spin_lock_init(&ts->lock);
654 ts->heap = dummy_heap;
655 }
656 break;
657
658 case CPU_UP_CANCELED:
659 case CPU_DEAD:
660 case CPU_RESUME_FAILED:
661 migrate_timers_from_cpu(cpu);
662
663 if ( !park_offline_cpus && system_state != SYS_STATE_suspend )
664 free_percpu_timers(cpu);
665 break;
666
667 case CPU_REMOVE:
668 if ( park_offline_cpus )
669 free_percpu_timers(cpu);
670 break;
671
672 default:
673 break;
674 }
675
676 return NOTIFY_DONE;
677 }
678
679 static struct notifier_block cpu_nfb = {
680 .notifier_call = cpu_callback,
681 .priority = 99
682 };
683
timer_init(void)684 void __init timer_init(void)
685 {
686 void *cpu = (void *)(long)smp_processor_id();
687
688 open_softirq(TIMER_SOFTIRQ, timer_softirq_action);
689
690 cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu);
691 register_cpu_notifier(&cpu_nfb);
692
693 register_keyhandler('a', dump_timerq, "dump timer queues", 1);
694 }
695
696 /*
697 * Local variables:
698 * mode: C
699 * c-file-style: "BSD"
700 * c-basic-offset: 4
701 * tab-width: 4
702 * indent-tabs-mode: nil
703 * End:
704 */
705