1 /*
2 * arch/xtensa/kernel/time.c
3 *
4 * Timer and clock support.
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 *
10 * Copyright (C) 2005 Tensilica Inc.
11 *
12 * Chris Zankel <chris@zankel.net>
13 */
14
15 #include <linux/clk.h>
16 #include <linux/of_clk.h>
17 #include <linux/errno.h>
18 #include <linux/sched.h>
19 #include <linux/time.h>
20 #include <linux/clocksource.h>
21 #include <linux/clockchips.h>
22 #include <linux/interrupt.h>
23 #include <linux/module.h>
24 #include <linux/init.h>
25 #include <linux/irq.h>
26 #include <linux/profile.h>
27 #include <linux/delay.h>
28 #include <linux/irqdomain.h>
29 #include <linux/sched_clock.h>
30
31 #include <asm/timex.h>
32 #include <asm/platform.h>
33
34 unsigned long ccount_freq; /* ccount Hz */
35 EXPORT_SYMBOL(ccount_freq);
36
ccount_read(struct clocksource * cs)37 static u64 ccount_read(struct clocksource *cs)
38 {
39 return (u64)get_ccount();
40 }
41
ccount_sched_clock_read(void)42 static u64 notrace ccount_sched_clock_read(void)
43 {
44 return get_ccount();
45 }
46
47 static struct clocksource ccount_clocksource = {
48 .name = "ccount",
49 .rating = 200,
50 .read = ccount_read,
51 .mask = CLOCKSOURCE_MASK(32),
52 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
53 };
54
55 struct ccount_timer {
56 struct clock_event_device evt;
57 int irq_enabled;
58 char name[24];
59 };
60
ccount_timer_set_next_event(unsigned long delta,struct clock_event_device * dev)61 static int ccount_timer_set_next_event(unsigned long delta,
62 struct clock_event_device *dev)
63 {
64 unsigned long flags, next;
65 int ret = 0;
66
67 local_irq_save(flags);
68 next = get_ccount() + delta;
69 set_linux_timer(next);
70 if (next - get_ccount() > delta)
71 ret = -ETIME;
72 local_irq_restore(flags);
73
74 return ret;
75 }
76
77 /*
78 * There is no way to disable the timer interrupt at the device level,
79 * only at the intenable register itself. Since enable_irq/disable_irq
80 * calls are nested, we need to make sure that these calls are
81 * balanced.
82 */
ccount_timer_shutdown(struct clock_event_device * evt)83 static int ccount_timer_shutdown(struct clock_event_device *evt)
84 {
85 struct ccount_timer *timer =
86 container_of(evt, struct ccount_timer, evt);
87
88 if (timer->irq_enabled) {
89 disable_irq_nosync(evt->irq);
90 timer->irq_enabled = 0;
91 }
92 return 0;
93 }
94
ccount_timer_set_oneshot(struct clock_event_device * evt)95 static int ccount_timer_set_oneshot(struct clock_event_device *evt)
96 {
97 struct ccount_timer *timer =
98 container_of(evt, struct ccount_timer, evt);
99
100 if (!timer->irq_enabled) {
101 enable_irq(evt->irq);
102 timer->irq_enabled = 1;
103 }
104 return 0;
105 }
106
107 static DEFINE_PER_CPU(struct ccount_timer, ccount_timer) = {
108 .evt = {
109 .features = CLOCK_EVT_FEAT_ONESHOT,
110 .rating = 300,
111 .set_next_event = ccount_timer_set_next_event,
112 .set_state_shutdown = ccount_timer_shutdown,
113 .set_state_oneshot = ccount_timer_set_oneshot,
114 .tick_resume = ccount_timer_set_oneshot,
115 },
116 };
117
timer_interrupt(int irq,void * dev_id)118 static irqreturn_t timer_interrupt(int irq, void *dev_id)
119 {
120 struct clock_event_device *evt = &this_cpu_ptr(&ccount_timer)->evt;
121
122 set_linux_timer(get_linux_timer());
123 evt->event_handler(evt);
124
125 /* Allow platform to do something useful (Wdog). */
126 platform_heartbeat();
127
128 return IRQ_HANDLED;
129 }
130
local_timer_setup(unsigned cpu)131 void local_timer_setup(unsigned cpu)
132 {
133 struct ccount_timer *timer = &per_cpu(ccount_timer, cpu);
134 struct clock_event_device *clockevent = &timer->evt;
135
136 timer->irq_enabled = 1;
137 snprintf(timer->name, sizeof(timer->name), "ccount_clockevent_%u", cpu);
138 clockevent->name = timer->name;
139 clockevent->cpumask = cpumask_of(cpu);
140 clockevent->irq = irq_create_mapping(NULL, LINUX_TIMER_INT);
141 if (WARN(!clockevent->irq, "error: can't map timer irq"))
142 return;
143 clockevents_config_and_register(clockevent, ccount_freq,
144 0xf, 0xffffffff);
145 }
146
147 #ifdef CONFIG_XTENSA_CALIBRATE_CCOUNT
148 #ifdef CONFIG_OF
calibrate_ccount(void)149 static void __init calibrate_ccount(void)
150 {
151 struct device_node *cpu;
152 struct clk *clk;
153
154 cpu = of_find_compatible_node(NULL, NULL, "cdns,xtensa-cpu");
155 if (cpu) {
156 clk = of_clk_get(cpu, 0);
157 if (!IS_ERR(clk)) {
158 ccount_freq = clk_get_rate(clk);
159 return;
160 } else {
161 pr_warn("%s: CPU input clock not found\n",
162 __func__);
163 }
164 } else {
165 pr_warn("%s: CPU node not found in the device tree\n",
166 __func__);
167 }
168
169 platform_calibrate_ccount();
170 }
171 #else
calibrate_ccount(void)172 static inline void calibrate_ccount(void)
173 {
174 platform_calibrate_ccount();
175 }
176 #endif
177 #endif
178
time_init(void)179 void __init time_init(void)
180 {
181 int irq;
182
183 of_clk_init(NULL);
184 #ifdef CONFIG_XTENSA_CALIBRATE_CCOUNT
185 pr_info("Calibrating CPU frequency ");
186 calibrate_ccount();
187 pr_cont("%d.%02d MHz\n",
188 (int)ccount_freq / 1000000,
189 (int)(ccount_freq / 10000) % 100);
190 #else
191 ccount_freq = CONFIG_XTENSA_CPU_CLOCK*1000000UL;
192 #endif
193 WARN(!ccount_freq,
194 "%s: CPU clock frequency is not set up correctly\n",
195 __func__);
196 clocksource_register_hz(&ccount_clocksource, ccount_freq);
197 local_timer_setup(0);
198 irq = this_cpu_ptr(&ccount_timer)->evt.irq;
199 if (request_irq(irq, timer_interrupt, IRQF_TIMER, "timer", NULL))
200 pr_err("Failed to request irq %d (timer)\n", irq);
201 sched_clock_register(ccount_sched_clock_read, 32, ccount_freq);
202 timer_probe();
203 }
204
205 #ifndef CONFIG_GENERIC_CALIBRATE_DELAY
calibrate_delay(void)206 void calibrate_delay(void)
207 {
208 loops_per_jiffy = ccount_freq / HZ;
209 pr_info("Calibrating delay loop (skipped)... %lu.%02lu BogoMIPS preset\n",
210 loops_per_jiffy / (1000000 / HZ),
211 (loops_per_jiffy / (10000 / HZ)) % 100);
212 }
213 #endif
214