1 /*
2 * xen/arch/arm/smpboot.c
3 *
4 * Dummy smpboot support
5 *
6 * Copyright (c) 2011 Citrix Systems.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 */
18
19 #include <xen/cpu.h>
20 #include <xen/cpumask.h>
21 #include <xen/delay.h>
22 #include <xen/domain_page.h>
23 #include <xen/errno.h>
24 #include <xen/init.h>
25 #include <xen/mm.h>
26 #include <xen/param.h>
27 #include <xen/sched.h>
28 #include <xen/smp.h>
29 #include <xen/softirq.h>
30 #include <xen/timer.h>
31 #include <xen/warning.h>
32 #include <xen/irq.h>
33 #include <xen/console.h>
34 #include <asm/cpuerrata.h>
35 #include <asm/gic.h>
36 #include <asm/procinfo.h>
37 #include <asm/psci.h>
38 #include <asm/acpi.h>
39
40 cpumask_t cpu_online_map;
41 cpumask_t cpu_present_map;
42 cpumask_t cpu_possible_map;
43
44 struct cpuinfo_arm cpu_data[NR_CPUS];
45
46 /* CPU logical map: map xen cpuid to an MPIDR */
47 register_t __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = MPIDR_INVALID };
48
49 /* Fake one node for now. See also include/asm-arm/numa.h */
50 nodemask_t __read_mostly node_online_map = { { [0] = 1UL } };
51
52 /* Xen stack for bringing up the first CPU. */
53 static unsigned char __initdata cpu0_boot_stack[STACK_SIZE]
54 __attribute__((__aligned__(STACK_SIZE)));
55
56 /* Boot cpu data */
57 struct init_info init_data =
58 {
59 .stack = cpu0_boot_stack,
60 };
61
62 /* Shared state for coordinating CPU bringup */
63 unsigned long smp_up_cpu = MPIDR_INVALID;
64 /* Shared state for coordinating CPU teardown */
65 static bool cpu_is_dead;
66
67 /* ID of the PCPU we're running on */
68 DEFINE_PER_CPU(unsigned int, cpu_id);
69 /* XXX these seem awfully x86ish... */
70 /* representing HT siblings of each logical CPU */
71 DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_sibling_mask);
72 /* representing HT and core siblings of each logical CPU */
73 DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_mask);
74
75 /*
76 * By default non-boot CPUs not identical to the boot CPU will be
77 * parked.
78 */
79 static bool __read_mostly opt_hmp_unsafe = false;
80 boolean_param("hmp-unsafe", opt_hmp_unsafe);
81
setup_cpu_sibling_map(int cpu)82 static void setup_cpu_sibling_map(int cpu)
83 {
84 if ( !zalloc_cpumask_var(&per_cpu(cpu_sibling_mask, cpu)) ||
85 !zalloc_cpumask_var(&per_cpu(cpu_core_mask, cpu)) )
86 panic("No memory for CPU sibling/core maps\n");
87
88 /* A CPU is a sibling with itself and is always on its own core. */
89 cpumask_set_cpu(cpu, per_cpu(cpu_sibling_mask, cpu));
90 cpumask_set_cpu(cpu, per_cpu(cpu_core_mask, cpu));
91 }
92
remove_cpu_sibling_map(int cpu)93 static void remove_cpu_sibling_map(int cpu)
94 {
95 free_cpumask_var(per_cpu(cpu_sibling_mask, cpu));
96 free_cpumask_var(per_cpu(cpu_core_mask, cpu));
97 }
98
99 void __init
smp_clear_cpu_maps(void)100 smp_clear_cpu_maps (void)
101 {
102 cpumask_clear(&cpu_possible_map);
103 cpumask_clear(&cpu_online_map);
104 cpumask_set_cpu(0, &cpu_online_map);
105 cpumask_set_cpu(0, &cpu_possible_map);
106 cpu_logical_map(0) = READ_SYSREG(MPIDR_EL1) & MPIDR_HWID_MASK;
107 }
108
109 /* Parse the device tree and build the logical map array containing
110 * MPIDR values related to logical cpus
111 * Code base on Linux arch/arm/kernel/devtree.c
112 */
dt_smp_init_cpus(void)113 static void __init dt_smp_init_cpus(void)
114 {
115 register_t mpidr;
116 struct dt_device_node *cpus = dt_find_node_by_path("/cpus");
117 struct dt_device_node *cpu;
118 unsigned int i, j;
119 unsigned int cpuidx = 1;
120 static register_t tmp_map[NR_CPUS] __initdata =
121 {
122 [0 ... NR_CPUS - 1] = MPIDR_INVALID
123 };
124 bool bootcpu_valid = false;
125 int rc;
126
127 mpidr = boot_cpu_data.mpidr.bits & MPIDR_HWID_MASK;
128
129 if ( !cpus )
130 {
131 printk(XENLOG_WARNING "WARNING: Can't find /cpus in the device tree.\n"
132 "Using only 1 CPU\n");
133 return;
134 }
135
136 dt_for_each_child_node( cpus, cpu )
137 {
138 const __be32 *prop;
139 u64 addr;
140 u32 reg_len;
141 register_t hwid;
142
143 if ( !dt_device_type_is_equal(cpu, "cpu") )
144 continue;
145
146 if ( dt_n_size_cells(cpu) != 0 )
147 printk(XENLOG_WARNING "cpu node `%s`: #size-cells %d\n",
148 dt_node_full_name(cpu), dt_n_size_cells(cpu));
149
150 prop = dt_get_property(cpu, "reg", ®_len);
151 if ( !prop )
152 {
153 printk(XENLOG_WARNING "cpu node `%s`: has no reg property\n",
154 dt_node_full_name(cpu));
155 continue;
156 }
157
158 if ( reg_len < dt_cells_to_size(dt_n_addr_cells(cpu)) )
159 {
160 printk(XENLOG_WARNING "cpu node `%s`: reg property too short\n",
161 dt_node_full_name(cpu));
162 continue;
163 }
164
165 addr = dt_read_number(prop, dt_n_addr_cells(cpu));
166
167 hwid = addr;
168 if ( hwid != addr )
169 {
170 printk(XENLOG_WARNING "cpu node `%s`: hwid overflow %"PRIx64"\n",
171 dt_node_full_name(cpu), addr);
172 continue;
173 }
174
175 /*
176 * 8 MSBs must be set to 0 in the DT since the reg property
177 * defines the MPIDR[23:0]
178 */
179 if ( hwid & ~MPIDR_HWID_MASK )
180 {
181 printk(XENLOG_WARNING "cpu node `%s`: invalid hwid value (0x%"PRIregister")\n",
182 dt_node_full_name(cpu), hwid);
183 continue;
184 }
185
186 /*
187 * Duplicate MPIDRs are a recipe for disaster. Scan all initialized
188 * entries and check for duplicates. If any found just skip the node.
189 * temp values values are initialized to MPIDR_INVALID to avoid
190 * matching valid MPIDR[23:0] values.
191 */
192 for ( j = 0; j < cpuidx; j++ )
193 {
194 if ( tmp_map[j] == hwid )
195 {
196 printk(XENLOG_WARNING
197 "cpu node `%s`: duplicate /cpu reg properties %"PRIregister" in the DT\n",
198 dt_node_full_name(cpu), hwid);
199 break;
200 }
201 }
202 if ( j != cpuidx )
203 continue;
204
205 /*
206 * Build a stashed array of MPIDR values. Numbering scheme requires
207 * that if detected the boot CPU must be assigned logical id 0. Other
208 * CPUs get sequential indexes starting from 1. If a CPU node
209 * with a reg property matching the boot CPU MPIDR is detected,
210 * this is recorded and so that the logical map build from DT is
211 * validated and can be used to set the map.
212 */
213 if ( hwid == mpidr )
214 {
215 i = 0;
216 bootcpu_valid = true;
217 }
218 else
219 i = cpuidx++;
220
221 if ( cpuidx > NR_CPUS )
222 {
223 printk(XENLOG_WARNING
224 "DT /cpu %u node greater than max cores %u, capping them\n",
225 cpuidx, NR_CPUS);
226 cpuidx = NR_CPUS;
227 break;
228 }
229
230 if ( (rc = arch_cpu_init(i, cpu)) < 0 )
231 {
232 printk("cpu%d init failed (hwid %"PRIregister"): %d\n", i, hwid, rc);
233 tmp_map[i] = MPIDR_INVALID;
234 }
235 else
236 tmp_map[i] = hwid;
237 }
238
239 if ( !bootcpu_valid )
240 {
241 printk(XENLOG_WARNING "DT missing boot CPU MPIDR[23:0]\n"
242 "Using only 1 CPU\n");
243 return;
244 }
245
246 for ( i = 0; i < cpuidx; i++ )
247 {
248 if ( tmp_map[i] == MPIDR_INVALID )
249 continue;
250 cpumask_set_cpu(i, &cpu_possible_map);
251 cpu_logical_map(i) = tmp_map[i];
252 }
253 }
254
smp_init_cpus(void)255 void __init smp_init_cpus(void)
256 {
257 int rc;
258
259 /* initialize PSCI and set a global variable */
260 psci_init();
261
262 if ( (rc = arch_smp_init()) < 0 )
263 {
264 printk(XENLOG_WARNING "SMP init failed (%d)\n"
265 "Using only 1 CPU\n", rc);
266 return;
267 }
268
269 if ( acpi_disabled )
270 dt_smp_init_cpus();
271 else
272 acpi_smp_init_cpus();
273
274 if ( opt_hmp_unsafe )
275 warning_add("WARNING: HMP COMPUTING HAS BEEN ENABLED.\n"
276 "It has implications on the security and stability of the system,\n"
277 "unless the cpu affinity of all domains is specified.\n");
278 }
279
280 int __init
smp_get_max_cpus(void)281 smp_get_max_cpus (void)
282 {
283 int i, max_cpus = 0;
284
285 for ( i = 0; i < nr_cpu_ids; i++ )
286 if ( cpu_possible(i) )
287 max_cpus++;
288
289 return max_cpus;
290 }
291
292 void __init
smp_prepare_cpus(void)293 smp_prepare_cpus(void)
294 {
295 cpumask_copy(&cpu_present_map, &cpu_possible_map);
296
297 setup_cpu_sibling_map(0);
298 }
299
300 /* Boot the current CPU */
start_secondary(void)301 void start_secondary(void)
302 {
303 unsigned int cpuid = init_data.cpuid;
304
305 memset(get_cpu_info(), 0, sizeof (struct cpu_info));
306
307 set_processor_id(cpuid);
308
309 identify_cpu(¤t_cpu_data);
310 processor_setup();
311
312 init_traps();
313
314 /*
315 * Currently Xen assumes the platform has only one kind of CPUs.
316 * This assumption does not hold on big.LITTLE platform and may
317 * result to instability and insecure platform (unless cpu affinity
318 * is manually specified for all domains). Better to park them for
319 * now.
320 */
321 if ( !opt_hmp_unsafe &&
322 current_cpu_data.midr.bits != boot_cpu_data.midr.bits )
323 {
324 printk(XENLOG_ERR "CPU%u MIDR (0x%x) does not match boot CPU MIDR (0x%x),\n"
325 "disable cpu (see big.LITTLE.txt under docs/).\n",
326 smp_processor_id(), current_cpu_data.midr.bits,
327 boot_cpu_data.midr.bits);
328 stop_cpu();
329 }
330
331 if ( dcache_line_bytes != read_dcache_line_bytes() )
332 {
333 printk(XENLOG_ERR "CPU%u dcache line size (%zu) does not match the boot CPU (%zu)\n",
334 smp_processor_id(), read_dcache_line_bytes(),
335 dcache_line_bytes);
336 stop_cpu();
337 }
338
339 mmu_init_secondary_cpu();
340
341 gic_init_secondary_cpu();
342
343 init_secondary_IRQ();
344
345 init_maintenance_interrupt();
346 init_timer_interrupt();
347
348 set_current(idle_vcpu[cpuid]);
349
350 setup_cpu_sibling_map(cpuid);
351
352 /* Run local notifiers */
353 notify_cpu_starting(cpuid);
354 /*
355 * Ensure that previous writes are visible before marking the cpu as
356 * online.
357 */
358 smp_wmb();
359
360 /* Now report this CPU is up */
361 cpumask_set_cpu(cpuid, &cpu_online_map);
362
363 local_irq_enable();
364 local_abort_enable();
365
366 check_local_cpu_errata();
367
368 printk(XENLOG_DEBUG "CPU %u booted.\n", smp_processor_id());
369
370 startup_cpu_idle_loop();
371 }
372
373 /* Shut down the current CPU */
__cpu_disable(void)374 void __cpu_disable(void)
375 {
376 unsigned int cpu = get_processor_id();
377
378 local_irq_disable();
379 gic_disable_cpu();
380 /* Allow any queued timer interrupts to get serviced */
381 local_irq_enable();
382 mdelay(1);
383 local_irq_disable();
384
385 /* It's now safe to remove this processor from the online map */
386 cpumask_clear_cpu(cpu, &cpu_online_map);
387
388 smp_mb();
389
390 /* Return to caller; eventually the IPI mechanism will unwind and the
391 * scheduler will drop to the idle loop, which will call stop_cpu(). */
392 }
393
stop_cpu(void)394 void stop_cpu(void)
395 {
396 local_irq_disable();
397 cpu_is_dead = true;
398 /* Make sure the write happens before we sleep forever */
399 dsb(sy);
400 isb();
401 call_psci_cpu_off();
402
403 while ( 1 )
404 wfi();
405 }
406
cpu_up_send_sgi(int cpu)407 int __init cpu_up_send_sgi(int cpu)
408 {
409 /* We don't know the GIC ID of the CPU until it has woken up, so just
410 * signal everyone and rely on our own smp_up_cpu gate to ensure only
411 * the one we want gets through. */
412 send_SGI_allbutself(GIC_SGI_EVENT_CHECK);
413
414 return 0;
415 }
416
417 /* Bring up a remote CPU */
__cpu_up(unsigned int cpu)418 int __cpu_up(unsigned int cpu)
419 {
420 int rc;
421 s_time_t deadline;
422
423 printk("Bringing up CPU%d\n", cpu);
424
425 rc = init_secondary_pagetables(cpu);
426 if ( rc < 0 )
427 return rc;
428
429 console_start_sync(); /* Secondary may use early_printk */
430
431 /* Tell the remote CPU which stack to boot on. */
432 init_data.stack = idle_vcpu[cpu]->arch.stack;
433
434 /* Tell the remote CPU what its logical CPU ID is. */
435 init_data.cpuid = cpu;
436
437 /* Open the gate for this CPU */
438 smp_up_cpu = cpu_logical_map(cpu);
439 clean_dcache(smp_up_cpu);
440
441 rc = arch_cpu_up(cpu);
442
443 console_end_sync();
444
445 if ( rc < 0 )
446 {
447 printk("Failed to bring up CPU%d\n", cpu);
448 return rc;
449 }
450
451 deadline = NOW() + MILLISECS(1000);
452
453 while ( !cpu_online(cpu) && NOW() < deadline )
454 {
455 cpu_relax();
456 process_pending_softirqs();
457 }
458 /*
459 * Ensure that other cpus' initializations are visible before
460 * proceeding. Corresponds to smp_wmb() in start_secondary.
461 */
462 smp_rmb();
463
464 /*
465 * Nuke start of day info before checking one last time if the CPU
466 * actually came online. If it is not online it may still be
467 * trying to come up and may show up later unexpectedly.
468 *
469 * This doesn't completely avoid the possibility of the supposedly
470 * failed CPU trying to progress with another CPUs stack settings
471 * etc, but better than nothing, hopefully.
472 */
473 init_data.stack = NULL;
474 init_data.cpuid = ~0;
475 smp_up_cpu = MPIDR_INVALID;
476 clean_dcache(smp_up_cpu);
477
478 if ( !cpu_online(cpu) )
479 {
480 printk("CPU%d never came online\n", cpu);
481 return -EIO;
482 }
483
484 return 0;
485 }
486
487 /* Wait for a remote CPU to die */
__cpu_die(unsigned int cpu)488 void __cpu_die(unsigned int cpu)
489 {
490 unsigned int i = 0;
491
492 while ( !cpu_is_dead )
493 {
494 mdelay(100);
495 cpu_relax();
496 process_pending_softirqs();
497 if ( (++i % 10) == 0 )
498 printk(KERN_ERR "CPU %u still not dead...\n", cpu);
499 smp_mb();
500 }
501 cpu_is_dead = false;
502 smp_mb();
503 }
504
cpu_smpboot_callback(struct notifier_block * nfb,unsigned long action,void * hcpu)505 static int cpu_smpboot_callback(struct notifier_block *nfb,
506 unsigned long action,
507 void *hcpu)
508 {
509 unsigned int cpu = (unsigned long)hcpu;
510
511 switch ( action )
512 {
513 case CPU_DEAD:
514 remove_cpu_sibling_map(cpu);
515 break;
516 default:
517 break;
518 }
519
520 return NOTIFY_DONE;
521 }
522
523 static struct notifier_block cpu_smpboot_nfb = {
524 .notifier_call = cpu_smpboot_callback,
525 };
526
cpu_smpboot_notifier_init(void)527 static int __init cpu_smpboot_notifier_init(void)
528 {
529 register_cpu_notifier(&cpu_smpboot_nfb);
530
531 return 0;
532 }
533 presmp_initcall(cpu_smpboot_notifier_init);
534
535 /*
536 * Local variables:
537 * mode: C
538 * c-file-style: "BSD"
539 * c-basic-offset: 4
540 * indent-tabs-mode: nil
541 * End:
542 */
543