1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Watchdog support on powerpc systems.
4 *
5 * Copyright 2017, IBM Corporation.
6 *
7 * This uses code from arch/sparc/kernel/nmi.c and kernel/watchdog.c
8 */
9
10 #define pr_fmt(fmt) "watchdog: " fmt
11
12 #include <linux/kernel.h>
13 #include <linux/param.h>
14 #include <linux/init.h>
15 #include <linux/percpu.h>
16 #include <linux/cpu.h>
17 #include <linux/nmi.h>
18 #include <linux/module.h>
19 #include <linux/export.h>
20 #include <linux/kprobes.h>
21 #include <linux/hardirq.h>
22 #include <linux/reboot.h>
23 #include <linux/slab.h>
24 #include <linux/kdebug.h>
25 #include <linux/sched/debug.h>
26 #include <linux/delay.h>
27 #include <linux/processor.h>
28 #include <linux/smp.h>
29
30 #include <asm/interrupt.h>
31 #include <asm/paca.h>
32 #include <asm/nmi.h>
33
34 /*
35 * The powerpc watchdog ensures that each CPU is able to service timers.
36 * The watchdog sets up a simple timer on each CPU to run once per timer
37 * period, and updates a per-cpu timestamp and a "pending" cpumask. This is
38 * the heartbeat.
39 *
40 * Then there are two systems to check that the heartbeat is still running.
41 * The local soft-NMI, and the SMP checker.
42 *
43 * The soft-NMI checker can detect lockups on the local CPU. When interrupts
44 * are disabled with local_irq_disable(), platforms that use soft-masking
45 * can leave hardware interrupts enabled and handle them with a masked
46 * interrupt handler. The masked handler can send the timer interrupt to the
47 * watchdog's soft_nmi_interrupt(), which appears to Linux as an NMI
48 * interrupt, and can be used to detect CPUs stuck with IRQs disabled.
49 *
50 * The soft-NMI checker will compare the heartbeat timestamp for this CPU
51 * with the current time, and take action if the difference exceeds the
52 * watchdog threshold.
53 *
54 * The limitation of the soft-NMI watchdog is that it does not work when
55 * interrupts are hard disabled or otherwise not being serviced. This is
56 * solved by also having a SMP watchdog where all CPUs check all other
57 * CPUs heartbeat.
58 *
59 * The SMP checker can detect lockups on other CPUs. A gobal "pending"
60 * cpumask is kept, containing all CPUs which enable the watchdog. Each
61 * CPU clears their pending bit in their heartbeat timer. When the bitmask
62 * becomes empty, the last CPU to clear its pending bit updates a global
63 * timestamp and refills the pending bitmask.
64 *
65 * In the heartbeat timer, if any CPU notices that the global timestamp has
66 * not been updated for a period exceeding the watchdog threshold, then it
67 * means the CPU(s) with their bit still set in the pending mask have had
68 * their heartbeat stop, and action is taken.
69 *
70 * Some platforms implement true NMI IPIs, which can be used by the SMP
71 * watchdog to detect an unresponsive CPU and pull it out of its stuck
72 * state with the NMI IPI, to get crash/debug data from it. This way the
73 * SMP watchdog can detect hardware interrupts off lockups.
74 */
75
76 static cpumask_t wd_cpus_enabled __read_mostly;
77
78 static u64 wd_panic_timeout_tb __read_mostly; /* timebase ticks until panic */
79 static u64 wd_smp_panic_timeout_tb __read_mostly; /* panic other CPUs */
80
81 static u64 wd_timer_period_ms __read_mostly; /* interval between heartbeat */
82
83 static DEFINE_PER_CPU(struct hrtimer, wd_hrtimer);
84 static DEFINE_PER_CPU(u64, wd_timer_tb);
85
86 /* SMP checker bits */
87 static unsigned long __wd_smp_lock;
88 static cpumask_t wd_smp_cpus_pending;
89 static cpumask_t wd_smp_cpus_stuck;
90 static u64 wd_smp_last_reset_tb;
91
wd_smp_lock(unsigned long * flags)92 static inline void wd_smp_lock(unsigned long *flags)
93 {
94 /*
95 * Avoid locking layers if possible.
96 * This may be called from low level interrupt handlers at some
97 * point in future.
98 */
99 raw_local_irq_save(*flags);
100 hard_irq_disable(); /* Make it soft-NMI safe */
101 while (unlikely(test_and_set_bit_lock(0, &__wd_smp_lock))) {
102 raw_local_irq_restore(*flags);
103 spin_until_cond(!test_bit(0, &__wd_smp_lock));
104 raw_local_irq_save(*flags);
105 hard_irq_disable();
106 }
107 }
108
wd_smp_unlock(unsigned long * flags)109 static inline void wd_smp_unlock(unsigned long *flags)
110 {
111 clear_bit_unlock(0, &__wd_smp_lock);
112 raw_local_irq_restore(*flags);
113 }
114
wd_lockup_ipi(struct pt_regs * regs)115 static void wd_lockup_ipi(struct pt_regs *regs)
116 {
117 int cpu = raw_smp_processor_id();
118 u64 tb = get_tb();
119
120 pr_emerg("CPU %d Hard LOCKUP\n", cpu);
121 pr_emerg("CPU %d TB:%lld, last heartbeat TB:%lld (%lldms ago)\n",
122 cpu, tb, per_cpu(wd_timer_tb, cpu),
123 tb_to_ns(tb - per_cpu(wd_timer_tb, cpu)) / 1000000);
124 print_modules();
125 print_irqtrace_events(current);
126 if (regs)
127 show_regs(regs);
128 else
129 dump_stack();
130
131 /* Do not panic from here because that can recurse into NMI IPI layer */
132 }
133
set_cpumask_stuck(const struct cpumask * cpumask,u64 tb)134 static void set_cpumask_stuck(const struct cpumask *cpumask, u64 tb)
135 {
136 cpumask_or(&wd_smp_cpus_stuck, &wd_smp_cpus_stuck, cpumask);
137 cpumask_andnot(&wd_smp_cpus_pending, &wd_smp_cpus_pending, cpumask);
138 if (cpumask_empty(&wd_smp_cpus_pending)) {
139 wd_smp_last_reset_tb = tb;
140 cpumask_andnot(&wd_smp_cpus_pending,
141 &wd_cpus_enabled,
142 &wd_smp_cpus_stuck);
143 }
144 }
set_cpu_stuck(int cpu,u64 tb)145 static void set_cpu_stuck(int cpu, u64 tb)
146 {
147 set_cpumask_stuck(cpumask_of(cpu), tb);
148 }
149
watchdog_smp_panic(int cpu,u64 tb)150 static void watchdog_smp_panic(int cpu, u64 tb)
151 {
152 unsigned long flags;
153 int c;
154
155 wd_smp_lock(&flags);
156 /* Double check some things under lock */
157 if ((s64)(tb - wd_smp_last_reset_tb) < (s64)wd_smp_panic_timeout_tb)
158 goto out;
159 if (cpumask_test_cpu(cpu, &wd_smp_cpus_pending))
160 goto out;
161 if (cpumask_weight(&wd_smp_cpus_pending) == 0)
162 goto out;
163
164 pr_emerg("CPU %d detected hard LOCKUP on other CPUs %*pbl\n",
165 cpu, cpumask_pr_args(&wd_smp_cpus_pending));
166 pr_emerg("CPU %d TB:%lld, last SMP heartbeat TB:%lld (%lldms ago)\n",
167 cpu, tb, wd_smp_last_reset_tb,
168 tb_to_ns(tb - wd_smp_last_reset_tb) / 1000000);
169
170 if (!sysctl_hardlockup_all_cpu_backtrace) {
171 /*
172 * Try to trigger the stuck CPUs, unless we are going to
173 * get a backtrace on all of them anyway.
174 */
175 for_each_cpu(c, &wd_smp_cpus_pending) {
176 if (c == cpu)
177 continue;
178 smp_send_nmi_ipi(c, wd_lockup_ipi, 1000000);
179 }
180 }
181
182 /* Take the stuck CPUs out of the watch group */
183 set_cpumask_stuck(&wd_smp_cpus_pending, tb);
184
185 wd_smp_unlock(&flags);
186
187 if (sysctl_hardlockup_all_cpu_backtrace)
188 trigger_allbutself_cpu_backtrace();
189
190 /*
191 * Force flush any remote buffers that might be stuck in IRQ context
192 * and therefore could not run their irq_work.
193 */
194 printk_trigger_flush();
195
196 if (hardlockup_panic)
197 nmi_panic(NULL, "Hard LOCKUP");
198
199 return;
200
201 out:
202 wd_smp_unlock(&flags);
203 }
204
wd_smp_clear_cpu_pending(int cpu,u64 tb)205 static void wd_smp_clear_cpu_pending(int cpu, u64 tb)
206 {
207 if (!cpumask_test_cpu(cpu, &wd_smp_cpus_pending)) {
208 if (unlikely(cpumask_test_cpu(cpu, &wd_smp_cpus_stuck))) {
209 struct pt_regs *regs = get_irq_regs();
210 unsigned long flags;
211
212 wd_smp_lock(&flags);
213
214 pr_emerg("CPU %d became unstuck TB:%lld\n",
215 cpu, tb);
216 print_irqtrace_events(current);
217 if (regs)
218 show_regs(regs);
219 else
220 dump_stack();
221
222 cpumask_clear_cpu(cpu, &wd_smp_cpus_stuck);
223 wd_smp_unlock(&flags);
224 }
225 return;
226 }
227 cpumask_clear_cpu(cpu, &wd_smp_cpus_pending);
228 if (cpumask_empty(&wd_smp_cpus_pending)) {
229 unsigned long flags;
230
231 wd_smp_lock(&flags);
232 if (cpumask_empty(&wd_smp_cpus_pending)) {
233 wd_smp_last_reset_tb = tb;
234 cpumask_andnot(&wd_smp_cpus_pending,
235 &wd_cpus_enabled,
236 &wd_smp_cpus_stuck);
237 }
238 wd_smp_unlock(&flags);
239 }
240 }
241
watchdog_timer_interrupt(int cpu)242 static void watchdog_timer_interrupt(int cpu)
243 {
244 u64 tb = get_tb();
245
246 per_cpu(wd_timer_tb, cpu) = tb;
247
248 wd_smp_clear_cpu_pending(cpu, tb);
249
250 if ((s64)(tb - wd_smp_last_reset_tb) >= (s64)wd_smp_panic_timeout_tb)
251 watchdog_smp_panic(cpu, tb);
252 }
253
DEFINE_INTERRUPT_HANDLER_NMI(soft_nmi_interrupt)254 DEFINE_INTERRUPT_HANDLER_NMI(soft_nmi_interrupt)
255 {
256 unsigned long flags;
257 int cpu = raw_smp_processor_id();
258 u64 tb;
259
260 /* should only arrive from kernel, with irqs disabled */
261 WARN_ON_ONCE(!arch_irq_disabled_regs(regs));
262
263 if (!cpumask_test_cpu(cpu, &wd_cpus_enabled))
264 return 0;
265
266 __this_cpu_inc(irq_stat.soft_nmi_irqs);
267
268 tb = get_tb();
269 if (tb - per_cpu(wd_timer_tb, cpu) >= wd_panic_timeout_tb) {
270 wd_smp_lock(&flags);
271 if (cpumask_test_cpu(cpu, &wd_smp_cpus_stuck)) {
272 wd_smp_unlock(&flags);
273 return 0;
274 }
275 set_cpu_stuck(cpu, tb);
276
277 pr_emerg("CPU %d self-detected hard LOCKUP @ %pS\n",
278 cpu, (void *)regs->nip);
279 pr_emerg("CPU %d TB:%lld, last heartbeat TB:%lld (%lldms ago)\n",
280 cpu, tb, per_cpu(wd_timer_tb, cpu),
281 tb_to_ns(tb - per_cpu(wd_timer_tb, cpu)) / 1000000);
282 print_modules();
283 print_irqtrace_events(current);
284 show_regs(regs);
285
286 wd_smp_unlock(&flags);
287
288 if (sysctl_hardlockup_all_cpu_backtrace)
289 trigger_allbutself_cpu_backtrace();
290
291 if (hardlockup_panic)
292 nmi_panic(regs, "Hard LOCKUP");
293 }
294 if (wd_panic_timeout_tb < 0x7fffffff)
295 mtspr(SPRN_DEC, wd_panic_timeout_tb);
296
297 return 0;
298 }
299
watchdog_timer_fn(struct hrtimer * hrtimer)300 static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
301 {
302 int cpu = smp_processor_id();
303
304 if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED))
305 return HRTIMER_NORESTART;
306
307 if (!cpumask_test_cpu(cpu, &watchdog_cpumask))
308 return HRTIMER_NORESTART;
309
310 watchdog_timer_interrupt(cpu);
311
312 hrtimer_forward_now(hrtimer, ms_to_ktime(wd_timer_period_ms));
313
314 return HRTIMER_RESTART;
315 }
316
arch_touch_nmi_watchdog(void)317 void arch_touch_nmi_watchdog(void)
318 {
319 unsigned long ticks = tb_ticks_per_usec * wd_timer_period_ms * 1000;
320 int cpu = smp_processor_id();
321 u64 tb = get_tb();
322
323 if (tb - per_cpu(wd_timer_tb, cpu) >= ticks) {
324 per_cpu(wd_timer_tb, cpu) = tb;
325 wd_smp_clear_cpu_pending(cpu, tb);
326 }
327 }
328 EXPORT_SYMBOL(arch_touch_nmi_watchdog);
329
start_watchdog(void * arg)330 static void start_watchdog(void *arg)
331 {
332 struct hrtimer *hrtimer = this_cpu_ptr(&wd_hrtimer);
333 int cpu = smp_processor_id();
334 unsigned long flags;
335
336 if (cpumask_test_cpu(cpu, &wd_cpus_enabled)) {
337 WARN_ON(1);
338 return;
339 }
340
341 if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED))
342 return;
343
344 if (!cpumask_test_cpu(cpu, &watchdog_cpumask))
345 return;
346
347 wd_smp_lock(&flags);
348 cpumask_set_cpu(cpu, &wd_cpus_enabled);
349 if (cpumask_weight(&wd_cpus_enabled) == 1) {
350 cpumask_set_cpu(cpu, &wd_smp_cpus_pending);
351 wd_smp_last_reset_tb = get_tb();
352 }
353 wd_smp_unlock(&flags);
354
355 *this_cpu_ptr(&wd_timer_tb) = get_tb();
356
357 hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
358 hrtimer->function = watchdog_timer_fn;
359 hrtimer_start(hrtimer, ms_to_ktime(wd_timer_period_ms),
360 HRTIMER_MODE_REL_PINNED);
361 }
362
start_watchdog_on_cpu(unsigned int cpu)363 static int start_watchdog_on_cpu(unsigned int cpu)
364 {
365 return smp_call_function_single(cpu, start_watchdog, NULL, true);
366 }
367
stop_watchdog(void * arg)368 static void stop_watchdog(void *arg)
369 {
370 struct hrtimer *hrtimer = this_cpu_ptr(&wd_hrtimer);
371 int cpu = smp_processor_id();
372 unsigned long flags;
373
374 if (!cpumask_test_cpu(cpu, &wd_cpus_enabled))
375 return; /* Can happen in CPU unplug case */
376
377 hrtimer_cancel(hrtimer);
378
379 wd_smp_lock(&flags);
380 cpumask_clear_cpu(cpu, &wd_cpus_enabled);
381 wd_smp_unlock(&flags);
382
383 wd_smp_clear_cpu_pending(cpu, get_tb());
384 }
385
stop_watchdog_on_cpu(unsigned int cpu)386 static int stop_watchdog_on_cpu(unsigned int cpu)
387 {
388 return smp_call_function_single(cpu, stop_watchdog, NULL, true);
389 }
390
watchdog_calc_timeouts(void)391 static void watchdog_calc_timeouts(void)
392 {
393 wd_panic_timeout_tb = watchdog_thresh * ppc_tb_freq;
394
395 /* Have the SMP detector trigger a bit later */
396 wd_smp_panic_timeout_tb = wd_panic_timeout_tb * 3 / 2;
397
398 /* 2/5 is the factor that the perf based detector uses */
399 wd_timer_period_ms = watchdog_thresh * 1000 * 2 / 5;
400 }
401
watchdog_nmi_stop(void)402 void watchdog_nmi_stop(void)
403 {
404 int cpu;
405
406 for_each_cpu(cpu, &wd_cpus_enabled)
407 stop_watchdog_on_cpu(cpu);
408 }
409
watchdog_nmi_start(void)410 void watchdog_nmi_start(void)
411 {
412 int cpu;
413
414 watchdog_calc_timeouts();
415 for_each_cpu_and(cpu, cpu_online_mask, &watchdog_cpumask)
416 start_watchdog_on_cpu(cpu);
417 }
418
419 /*
420 * Invoked from core watchdog init.
421 */
watchdog_nmi_probe(void)422 int __init watchdog_nmi_probe(void)
423 {
424 int err;
425
426 err = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
427 "powerpc/watchdog:online",
428 start_watchdog_on_cpu,
429 stop_watchdog_on_cpu);
430 if (err < 0) {
431 pr_warn("could not be initialized");
432 return err;
433 }
434 return 0;
435 }
436