1 /*
2 * Intel Multiprocessor Specification 1.1 and 1.4
3 * compliant MP-table parsing routines.
4 *
5 * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
6 * (c) 1998, 1999, 2000 Ingo Molnar <mingo@redhat.com>
7 *
8 * Fixes
9 * Erich Boleyn : MP v1.4 and additional changes.
10 * Alan Cox : Added EBDA scanning
11 * Ingo Molnar : various cleanups and rewrites
12 * Maciej W. Rozycki: Bits for default MP configurations
13 * Paul Diefenbaugh: Added full ACPI support
14 */
15
16 #include <xen/types.h>
17 #include <xen/irq.h>
18 #include <xen/init.h>
19 #include <xen/acpi.h>
20 #include <xen/delay.h>
21 #include <xen/efi.h>
22 #include <xen/sched.h>
23
24 #include <xen/bitops.h>
25 #include <asm/smp.h>
26 #include <asm/acpi.h>
27 #include <asm/mtrr.h>
28 #include <asm/mpspec.h>
29 #include <asm/io_apic.h>
30 #include <asm/setup.h>
31
32 #include <mach_apic.h>
33 #include <mach_mpparse.h>
34 #include <bios_ebda.h>
35
36 /* Have we found an MP table */
37 bool __initdata smp_found_config;
38
39 /*
40 * Various Linux-internal data structures created from the
41 * MP-table.
42 */
43 unsigned char __read_mostly apic_version[MAX_APICS];
44 unsigned char __read_mostly mp_bus_id_to_type[MAX_MP_BUSSES];
45
46 /* I/O APIC entries */
47 struct mpc_config_ioapic __read_mostly mp_ioapics[MAX_IO_APICS];
48
49 /* # of MP IRQ source entries */
50 struct mpc_config_intsrc __read_mostly mp_irqs[MAX_IRQ_SOURCES];
51
52 /* MP IRQ source entries */
53 int __read_mostly mp_irq_entries;
54
55 bool __read_mostly pic_mode;
56 bool __read_mostly def_to_bigsmp;
57 unsigned long __read_mostly mp_lapic_addr;
58
59 /* Processor that is doing the boot up */
60 unsigned int __read_mostly boot_cpu_physical_apicid = BAD_APICID;
61
62 /* Internal processor count */
63 static unsigned int num_processors;
64 unsigned int __read_mostly disabled_cpus;
65
66 /* Bitmask of physically existing CPUs */
67 physid_mask_t phys_cpu_present_map;
68
69 /* Record whether CPUs haven't been added due to overflows. */
70 bool __read_mostly unaccounted_cpus;
71
set_nr_cpu_ids(unsigned int max_cpus)72 void __init set_nr_cpu_ids(unsigned int max_cpus)
73 {
74 unsigned int tot_cpus = num_processors + disabled_cpus;
75
76 if (!max_cpus)
77 max_cpus = tot_cpus;
78 if (max_cpus > NR_CPUS)
79 max_cpus = NR_CPUS;
80 else if (!max_cpus)
81 max_cpus = 1;
82 printk(XENLOG_INFO "SMP: Allowing %u CPUs (%d hotplug CPUs)\n",
83 max_cpus, max_t(int, max_cpus - num_processors, 0));
84
85 if (!park_offline_cpus)
86 tot_cpus = max_cpus;
87 nr_cpu_ids = min(tot_cpus, NR_CPUS + 0u);
88 if (park_offline_cpus && nr_cpu_ids < num_processors)
89 printk(XENLOG_WARNING "SMP: Cannot bring up %u further CPUs\n",
90 num_processors - nr_cpu_ids);
91
92 #ifndef nr_cpumask_bits
93 nr_cpumask_bits = ROUNDUP(nr_cpu_ids, BITS_PER_LONG);
94 printk(XENLOG_DEBUG "NR_CPUS:%u nr_cpumask_bits:%u\n",
95 NR_CPUS, nr_cpumask_bits);
96 #endif
97 }
98
set_nr_sockets(void)99 void __init set_nr_sockets(void)
100 {
101 nr_sockets = last_physid(phys_cpu_present_map)
102 / boot_cpu_data.x86_max_cores
103 / boot_cpu_data.x86_num_siblings + 1;
104 if (disabled_cpus)
105 nr_sockets += (disabled_cpus - 1)
106 / boot_cpu_data.x86_max_cores
107 / boot_cpu_data.x86_num_siblings + 1;
108 printk(XENLOG_DEBUG "nr_sockets: %u\n", nr_sockets);
109 }
110
111 /*
112 * Intel MP BIOS table parsing routines:
113 */
114
115
116 /*
117 * Checksum an MP configuration block.
118 */
119
mpf_checksum(unsigned char * mp,int len)120 static int __init mpf_checksum(unsigned char *mp, int len)
121 {
122 int sum = 0;
123
124 while (len--)
125 sum += *mp++;
126
127 return sum & 0xFF;
128 }
129
130 /* Return xen's logical cpu_id of the new added cpu or <0 if error */
MP_processor_info_x(struct mpc_config_processor * m,u32 apicid,bool hotplug)131 static int MP_processor_info_x(struct mpc_config_processor *m,
132 u32 apicid, bool hotplug)
133 {
134 int ver, cpu = 0;
135
136 if (!(m->mpc_cpuflag & CPU_ENABLED)) {
137 if (!hotplug)
138 ++disabled_cpus;
139 return -EINVAL;
140 }
141
142 if (m->mpc_cpuflag & CPU_BOOTPROCESSOR) {
143 Dprintk(" Bootup CPU\n");
144 boot_cpu_physical_apicid = apicid;
145 }
146
147 ver = m->mpc_apicver;
148
149 /*
150 * Validate version
151 */
152 if (ver == 0x0) {
153 printk(KERN_WARNING "BIOS bug, APIC version is 0 for CPU#%d! "
154 "fixing up to 0x10. (tell your hw vendor)\n",
155 apicid);
156 ver = 0x10;
157 }
158 apic_version[apicid] = ver;
159
160 set_apicid(apicid, &phys_cpu_present_map);
161
162 if (num_processors >= nr_cpu_ids) {
163 printk_once(XENLOG_WARNING
164 "WARNING: NR_CPUS limit of %u reached - ignoring further processors\n",
165 nr_cpu_ids);
166 unaccounted_cpus = true;
167 return -ENOSPC;
168 }
169
170 if (num_processors >= 8 && hotplug
171 && genapic.name == apic_default.name) {
172 printk_once(XENLOG_WARNING
173 "WARNING: CPUs limit of 8 reached - ignoring futher processors\n");
174 unaccounted_cpus = true;
175 return -ENOSPC;
176 }
177
178 /* Boot cpu has been marked present in smp_prepare_boot_cpu */
179 if (!(m->mpc_cpuflag & CPU_BOOTPROCESSOR)) {
180 cpu = alloc_cpu_id();
181 if (cpu < 0) {
182 printk(KERN_WARNING "WARNING: Can't alloc cpu_id."
183 " Processor with apicid %i ignored\n", apicid);
184 return cpu;
185 }
186 x86_cpu_to_apicid[cpu] = apicid;
187 cpumask_set_cpu(cpu, &cpu_present_map);
188 }
189
190 if (++num_processors > 8) {
191 /*
192 * No need for processor or APIC checks: physical delivery
193 * (bigsmp) mode should always work.
194 */
195 def_to_bigsmp = true;
196 }
197
198 return cpu;
199 }
200
MP_processor_info(struct mpc_config_processor * m)201 static int MP_processor_info(struct mpc_config_processor *m)
202 {
203 return MP_processor_info_x(m, m->mpc_apicid, 0);
204 }
205
MP_bus_info(struct mpc_config_bus * m)206 static void __init MP_bus_info (struct mpc_config_bus *m)
207 {
208 char str[7];
209
210 memcpy(str, m->mpc_bustype, 6);
211 str[6] = 0;
212
213 #if 0 /* size of mpc_busid (8 bits) makes this check unnecessary */
214 if (m->mpc_busid >= MAX_MP_BUSSES) {
215 printk(KERN_WARNING "MP table busid value (%d) for bustype %s "
216 " is too large, max. supported is %d\n",
217 m->mpc_busid, str, MAX_MP_BUSSES - 1);
218 return;
219 }
220 #endif
221
222 if (strncmp(str, BUSTYPE_ISA, sizeof(BUSTYPE_ISA)-1) == 0) {
223 mp_bus_id_to_type[m->mpc_busid] = MP_BUS_ISA;
224 } else if (strncmp(str, BUSTYPE_EISA, sizeof(BUSTYPE_EISA)-1) == 0) {
225 mp_bus_id_to_type[m->mpc_busid] = MP_BUS_EISA;
226 } else if (strncmp(str, BUSTYPE_PCI, sizeof(BUSTYPE_PCI)-1) == 0) {
227 mp_bus_id_to_type[m->mpc_busid] = MP_BUS_PCI;
228 } else if (strncmp(str, BUSTYPE_MCA, sizeof(BUSTYPE_MCA)-1) == 0) {
229 mp_bus_id_to_type[m->mpc_busid] = MP_BUS_MCA;
230 } else if (strncmp(str, BUSTYPE_NEC98, sizeof(BUSTYPE_NEC98)-1) == 0) {
231 mp_bus_id_to_type[m->mpc_busid] = MP_BUS_NEC98;
232 } else {
233 printk(KERN_WARNING "Unknown bustype %s - ignoring\n", str);
234 }
235 }
236
MP_ioapic_info(struct mpc_config_ioapic * m)237 static void __init MP_ioapic_info (struct mpc_config_ioapic *m)
238 {
239 if (!(m->mpc_flags & MPC_APIC_USABLE))
240 return;
241
242 printk(KERN_INFO "I/O APIC #%d Version %d at %#x.\n",
243 m->mpc_apicid, m->mpc_apicver, m->mpc_apicaddr);
244 if (nr_ioapics >= MAX_IO_APICS) {
245 printk(KERN_CRIT "Max # of I/O APICs (%d) exceeded (found %d).\n",
246 MAX_IO_APICS, nr_ioapics);
247 panic("Recompile kernel with bigger MAX_IO_APICS\n");
248 }
249 if (!m->mpc_apicaddr) {
250 printk(KERN_ERR "WARNING: bogus zero I/O APIC address"
251 " found in MP table, skipping!\n");
252 return;
253 }
254 mp_ioapics[nr_ioapics] = *m;
255 nr_ioapics++;
256 }
257
MP_intsrc_info(struct mpc_config_intsrc * m)258 static void __init MP_intsrc_info (struct mpc_config_intsrc *m)
259 {
260 mp_irqs [mp_irq_entries] = *m;
261 Dprintk("Int: type %d, pol %d, trig %d, bus %d,"
262 " IRQ %02x, APIC ID %x, APIC INT %02x\n",
263 m->mpc_irqtype, m->mpc_irqflag & 3,
264 (m->mpc_irqflag >> 2) & 3, m->mpc_srcbus,
265 m->mpc_srcbusirq, m->mpc_dstapic, m->mpc_dstirq);
266 if (++mp_irq_entries == MAX_IRQ_SOURCES)
267 panic("Max # of irq sources exceeded\n");
268 }
269
MP_lintsrc_info(struct mpc_config_lintsrc * m)270 static void __init MP_lintsrc_info (struct mpc_config_lintsrc *m)
271 {
272 Dprintk("Lint: type %d, pol %d, trig %d, bus %d,"
273 " IRQ %02x, APIC ID %x, APIC LINT %02x\n",
274 m->mpc_irqtype, m->mpc_irqflag & 3,
275 (m->mpc_irqflag >> 2) &3, m->mpc_srcbusid,
276 m->mpc_srcbusirq, m->mpc_destapic, m->mpc_destapiclint);
277 /*
278 * Well it seems all SMP boards in existence
279 * use ExtINT/LVT1 == LINT0 and
280 * NMI/LVT2 == LINT1 - the following check
281 * will show us if this assumptions is false.
282 * Until then we do not have to add baggage.
283 */
284 if ((m->mpc_irqtype == mp_ExtINT) &&
285 (m->mpc_destapiclint != 0))
286 BUG();
287 if ((m->mpc_irqtype == mp_NMI) &&
288 (m->mpc_destapiclint != 1))
289 BUG();
290 }
291
292 /*
293 * Read/parse the MPC
294 */
295
smp_read_mpc(struct mp_config_table * mpc)296 static int __init smp_read_mpc(struct mp_config_table *mpc)
297 {
298 char str[16];
299 char oem[10];
300 int count=sizeof(*mpc);
301 unsigned char *mpt=((unsigned char *)mpc)+count;
302
303 if (memcmp(mpc->mpc_signature,MPC_SIGNATURE,4)) {
304 printk(KERN_ERR "SMP mptable: bad signature [%#x]!\n",
305 *(u32 *)mpc->mpc_signature);
306 return 0;
307 }
308 if (mpf_checksum((unsigned char *)mpc,mpc->mpc_length)) {
309 printk(KERN_ERR "SMP mptable: checksum error!\n");
310 return 0;
311 }
312 if (mpc->mpc_spec!=0x01 && mpc->mpc_spec!=0x04) {
313 printk(KERN_ERR "SMP mptable: bad table version (%d)!!\n",
314 mpc->mpc_spec);
315 return 0;
316 }
317 if (!mpc->mpc_lapic) {
318 printk(KERN_ERR "SMP mptable: null local APIC address!\n");
319 return 0;
320 }
321 memcpy(oem,mpc->mpc_oem,8);
322 oem[8]=0;
323 printk(KERN_INFO "OEM ID: %s ",oem);
324
325 memcpy(str,mpc->mpc_productid,12);
326 str[12]=0;
327 printk("Product ID: %s ",str);
328
329 mps_oem_check(mpc, oem, str);
330
331 printk("APIC at: %#x\n", mpc->mpc_lapic);
332
333 /*
334 * Save the local APIC address (it might be non-default) -- but only
335 * if we're not using ACPI.
336 */
337 if (!acpi_lapic)
338 mp_lapic_addr = mpc->mpc_lapic;
339
340 /*
341 * Now process the configuration blocks.
342 */
343 while (count < mpc->mpc_length) {
344 switch(*mpt) {
345 case MP_PROCESSOR:
346 {
347 struct mpc_config_processor *m=
348 (struct mpc_config_processor *)mpt;
349
350 mpt += sizeof(*m);
351 count += sizeof(*m);
352
353 /* ACPI may have already provided this data. */
354 if (acpi_lapic)
355 break;
356
357 printk("Processor #%02x %u:%u APIC version %u%s\n",
358 m->mpc_apicid,
359 MASK_EXTR(m->mpc_cpufeature,
360 CPU_FAMILY_MASK),
361 MASK_EXTR(m->mpc_cpufeature,
362 CPU_MODEL_MASK),
363 m->mpc_apicver,
364 m->mpc_cpuflag & CPU_ENABLED
365 ? "" : " [disabled]");
366 MP_processor_info(m);
367 break;
368 }
369 case MP_BUS:
370 {
371 struct mpc_config_bus *m=
372 (struct mpc_config_bus *)mpt;
373 MP_bus_info(m);
374 mpt += sizeof(*m);
375 count += sizeof(*m);
376 break;
377 }
378 case MP_IOAPIC:
379 {
380 struct mpc_config_ioapic *m=
381 (struct mpc_config_ioapic *)mpt;
382 MP_ioapic_info(m);
383 mpt+=sizeof(*m);
384 count+=sizeof(*m);
385 break;
386 }
387 case MP_INTSRC:
388 {
389 struct mpc_config_intsrc *m=
390 (struct mpc_config_intsrc *)mpt;
391
392 MP_intsrc_info(m);
393 mpt+=sizeof(*m);
394 count+=sizeof(*m);
395 break;
396 }
397 case MP_LINTSRC:
398 {
399 struct mpc_config_lintsrc *m=
400 (struct mpc_config_lintsrc *)mpt;
401 MP_lintsrc_info(m);
402 mpt+=sizeof(*m);
403 count+=sizeof(*m);
404 break;
405 }
406 default:
407 {
408 count = mpc->mpc_length;
409 break;
410 }
411 }
412 }
413 clustered_apic_check();
414 if (!num_processors)
415 printk(KERN_ERR "SMP mptable: no processors registered!\n");
416 return num_processors;
417 }
418
ELCR_trigger(unsigned int irq)419 static int __init ELCR_trigger(unsigned int irq)
420 {
421 unsigned int port;
422
423 port = 0x4d0 + (irq >> 3);
424 return (inb(port) >> (irq & 7)) & 1;
425 }
426
construct_default_ioirq_mptable(int mpc_default_type)427 static void __init construct_default_ioirq_mptable(int mpc_default_type)
428 {
429 struct mpc_config_intsrc intsrc;
430 int i;
431 int ELCR_fallback = 0;
432
433 intsrc.mpc_type = MP_INTSRC;
434 intsrc.mpc_irqflag = 0; /* conforming */
435 intsrc.mpc_srcbus = 0;
436 intsrc.mpc_dstapic = mp_ioapics[0].mpc_apicid;
437
438 intsrc.mpc_irqtype = mp_INT;
439
440 /*
441 * If true, we have an ISA/PCI system with no IRQ entries
442 * in the MP table. To prevent the PCI interrupts from being set up
443 * incorrectly, we try to use the ELCR. The sanity check to see if
444 * there is good ELCR data is very simple - IRQ0, 1, 2 and 13 can
445 * never be level sensitive, so we simply see if the ELCR agrees.
446 * If it does, we assume it's valid.
447 */
448 if (mpc_default_type == 5) {
449 printk(KERN_INFO "ISA/PCI bus type with no IRQ information... falling back to ELCR\n");
450
451 if (ELCR_trigger(0) || ELCR_trigger(1) || ELCR_trigger(2) || ELCR_trigger(13))
452 printk(KERN_WARNING "ELCR contains invalid data... not using ELCR\n");
453 else {
454 printk(KERN_INFO "Using ELCR to identify PCI interrupts\n");
455 ELCR_fallback = 1;
456 }
457 }
458
459 for (i = 0; platform_legacy_irq(i); i++) {
460 switch (mpc_default_type) {
461 case 2:
462 if (i == 0 || i == 13)
463 continue; /* IRQ0 & IRQ13 not connected */
464 /* fall through */
465 default:
466 if (i == 2)
467 continue; /* IRQ2 is never connected */
468 }
469
470 if (ELCR_fallback) {
471 /*
472 * If the ELCR indicates a level-sensitive interrupt, we
473 * copy that information over to the MP table in the
474 * irqflag field (level sensitive, active high polarity).
475 */
476 if (ELCR_trigger(i))
477 intsrc.mpc_irqflag = 13;
478 else
479 intsrc.mpc_irqflag = 0;
480 }
481
482 intsrc.mpc_srcbusirq = i;
483 intsrc.mpc_dstirq = i ? i : 2; /* IRQ0 to INTIN2 */
484 MP_intsrc_info(&intsrc);
485 }
486
487 intsrc.mpc_irqtype = mp_ExtINT;
488 intsrc.mpc_srcbusirq = 0;
489 intsrc.mpc_dstirq = 0; /* 8259A to INTIN0 */
490 MP_intsrc_info(&intsrc);
491 }
492
construct_default_ISA_mptable(int mpc_default_type)493 static inline void __init construct_default_ISA_mptable(int mpc_default_type)
494 {
495 struct mpc_config_processor processor;
496 struct mpc_config_bus bus;
497 struct mpc_config_ioapic ioapic;
498 struct mpc_config_lintsrc lintsrc;
499 int linttypes[2] = { mp_ExtINT, mp_NMI };
500 int i;
501
502 /*
503 * local APIC has default address
504 */
505 mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
506
507 /*
508 * 2 CPUs, numbered 0 & 1.
509 */
510 processor.mpc_type = MP_PROCESSOR;
511 /* Either an integrated APIC or a discrete 82489DX. */
512 processor.mpc_apicver = mpc_default_type > 4 ? 0x10 : 0x01;
513 processor.mpc_cpuflag = CPU_ENABLED;
514 processor.mpc_cpufeature = (boot_cpu_data.x86 << 8) |
515 (boot_cpu_data.x86_model << 4) |
516 boot_cpu_data.x86_mask;
517 processor.mpc_featureflag =
518 boot_cpu_data.x86_capability[cpufeat_word(X86_FEATURE_FPU)];
519 processor.mpc_reserved[0] = 0;
520 processor.mpc_reserved[1] = 0;
521 for (i = 0; i < 2; i++) {
522 processor.mpc_apicid = i;
523 MP_processor_info(&processor);
524 }
525
526 bus.mpc_type = MP_BUS;
527 bus.mpc_busid = 0;
528 switch (mpc_default_type) {
529 default:
530 printk("???\n");
531 printk(KERN_ERR "Unknown standard configuration %d\n",
532 mpc_default_type);
533 /* fall through */
534 case 1:
535 case 5:
536 memcpy(bus.mpc_bustype, "ISA ", 6);
537 break;
538 case 2:
539 case 6:
540 case 3:
541 memcpy(bus.mpc_bustype, "EISA ", 6);
542 break;
543 case 4:
544 case 7:
545 memcpy(bus.mpc_bustype, "MCA ", 6);
546 }
547 MP_bus_info(&bus);
548 if (mpc_default_type > 4) {
549 bus.mpc_busid = 1;
550 memcpy(bus.mpc_bustype, "PCI ", 6);
551 MP_bus_info(&bus);
552 }
553
554 ioapic.mpc_type = MP_IOAPIC;
555 ioapic.mpc_apicid = 2;
556 ioapic.mpc_apicver = mpc_default_type > 4 ? 0x10 : 0x01;
557 ioapic.mpc_flags = MPC_APIC_USABLE;
558 ioapic.mpc_apicaddr = 0xFEC00000;
559 MP_ioapic_info(&ioapic);
560
561 /*
562 * We set up most of the low 16 IO-APIC pins according to MPS rules.
563 */
564 construct_default_ioirq_mptable(mpc_default_type);
565
566 lintsrc.mpc_type = MP_LINTSRC;
567 lintsrc.mpc_irqflag = 0; /* conforming */
568 lintsrc.mpc_srcbusid = 0;
569 lintsrc.mpc_srcbusirq = 0;
570 lintsrc.mpc_destapic = MP_APIC_ALL;
571 for (i = 0; i < 2; i++) {
572 lintsrc.mpc_irqtype = linttypes[i];
573 lintsrc.mpc_destapiclint = i;
574 MP_lintsrc_info(&lintsrc);
575 }
576 }
577
efi_unmap_mpf(void)578 static __init void efi_unmap_mpf(void)
579 {
580 if (efi_enabled(EFI_BOOT))
581 clear_fixmap(FIX_EFI_MPF);
582 }
583
584 static struct intel_mp_floating *__initdata mpf_found;
585
586 /*
587 * Scan the memory blocks for an SMP configuration block.
588 */
get_smp_config(void)589 void __init get_smp_config (void)
590 {
591 struct intel_mp_floating *mpf = mpf_found;
592
593 /*
594 * ACPI supports both logical (e.g. Hyper-Threading) and physical
595 * processors, where MPS only supports physical.
596 */
597 if (acpi_lapic && acpi_ioapic) {
598 efi_unmap_mpf();
599 printk(KERN_INFO "Using ACPI (MADT) for SMP configuration information\n");
600 return;
601 }
602 else if (acpi_lapic)
603 printk(KERN_INFO "Using ACPI for processor (LAPIC) configuration information\n");
604
605 printk(KERN_INFO "Intel MultiProcessor Specification v1.%d\n", mpf->mpf_specification);
606 if (mpf->mpf_feature2 & (1<<7)) {
607 printk(KERN_INFO " IMCR and PIC compatibility mode.\n");
608 pic_mode = true;
609 } else {
610 printk(KERN_INFO " Virtual Wire compatibility mode.\n");
611 pic_mode = false;
612 }
613
614 /*
615 * Now see if we need to read further.
616 */
617 if (mpf->mpf_feature1 != 0) {
618
619 printk(KERN_INFO "Default MP configuration #%d\n", mpf->mpf_feature1);
620 construct_default_ISA_mptable(mpf->mpf_feature1);
621
622 } else if (mpf->mpf_physptr) {
623
624 /*
625 * Read the physical hardware table. Anything here will
626 * override the defaults.
627 */
628 if (!smp_read_mpc((void *)(unsigned long)mpf->mpf_physptr)) {
629 efi_unmap_mpf();
630 smp_found_config = false;
631 printk(KERN_ERR "BIOS bug, MP table errors detected!...\n");
632 printk(KERN_ERR "... disabling SMP support. (tell your hw vendor)\n");
633 return;
634 }
635 /*
636 * If there are no explicit MP IRQ entries, then we are
637 * broken. We set up most of the low 16 IO-APIC pins to
638 * ISA defaults and hope it will work.
639 */
640 if (!mp_irq_entries) {
641 struct mpc_config_bus bus;
642
643 printk(KERN_ERR "BIOS bug, no explicit IRQ entries, using default mptable. (tell your hw vendor)\n");
644
645 bus.mpc_type = MP_BUS;
646 bus.mpc_busid = 0;
647 memcpy(bus.mpc_bustype, "ISA ", 6);
648 MP_bus_info(&bus);
649
650 construct_default_ioirq_mptable(0);
651 }
652
653 } else
654 BUG();
655
656 efi_unmap_mpf();
657
658 printk(KERN_INFO "Processors: %d\n", num_processors);
659 /*
660 * Only use the first configuration found.
661 */
662 }
663
smp_scan_config(unsigned long base,unsigned long length)664 static int __init smp_scan_config (unsigned long base, unsigned long length)
665 {
666 unsigned int *bp = maddr_to_virt(base);
667 struct intel_mp_floating *mpf;
668
669 Dprintk("Scan SMP from %p for %ld bytes.\n", bp,length);
670 if (sizeof(*mpf) != 16)
671 printk("Error: MPF size\n");
672
673 while (length > 0) {
674 mpf = (struct intel_mp_floating *)bp;
675 if ((*bp == SMP_MAGIC_IDENT) &&
676 (mpf->mpf_length == 1) &&
677 !mpf_checksum((unsigned char *)bp, 16) &&
678 ((mpf->mpf_specification == 1)
679 || (mpf->mpf_specification == 4)) ) {
680
681 smp_found_config = true;
682 printk(KERN_INFO "found SMP MP-table at %08lx\n",
683 virt_to_maddr(mpf));
684 #if 0
685 reserve_bootmem(virt_to_maddr(mpf), PAGE_SIZE);
686 if (mpf->mpf_physptr) {
687 /*
688 * We cannot access to MPC table to compute
689 * table size yet, as only few megabytes from
690 * the bottom is mapped now.
691 * PC-9800's MPC table places on the very last
692 * of physical memory; so that simply reserving
693 * PAGE_SIZE from mpg->mpf_physptr yields BUG()
694 * in reserve_bootmem.
695 */
696 unsigned long size = PAGE_SIZE;
697 unsigned long end = max_low_pfn * PAGE_SIZE;
698 if (mpf->mpf_physptr + size > end)
699 size = end - mpf->mpf_physptr;
700 reserve_bootmem(mpf->mpf_physptr, size);
701 }
702 #endif
703 mpf_found = mpf;
704 return 1;
705 }
706 bp += 4;
707 length -= 16;
708 }
709 return 0;
710 }
711
efi_check_config(void)712 static void __init efi_check_config(void)
713 {
714 struct intel_mp_floating *mpf;
715
716 if (efi.mps == EFI_INVALID_TABLE_ADDR)
717 return;
718
719 __set_fixmap(FIX_EFI_MPF, PFN_DOWN(efi.mps), __PAGE_HYPERVISOR);
720 mpf = fix_to_virt(FIX_EFI_MPF) + ((long)efi.mps & (PAGE_SIZE-1));
721
722 if (memcmp(mpf->mpf_signature, "_MP_", 4) == 0 &&
723 mpf->mpf_length == 1 &&
724 mpf_checksum((void *)mpf, 16) &&
725 (mpf->mpf_specification == 1 || mpf->mpf_specification == 4)) {
726 smp_found_config = true;
727 printk(KERN_INFO "SMP MP-table at %08lx\n", efi.mps);
728 mpf_found = mpf;
729 }
730 else
731 efi_unmap_mpf();
732 }
733
find_smp_config(void)734 void __init find_smp_config (void)
735 {
736 unsigned int address;
737
738 if (efi_enabled(EFI_BOOT)) {
739 efi_check_config();
740 return;
741 }
742
743 /*
744 * FIXME: Linux assumes you have 640K of base ram..
745 * this continues the error...
746 *
747 * 1) Scan the bottom 1K for a signature
748 * 2) Scan the top 1K of base RAM
749 * 3) Scan the 64K of bios
750 */
751 if (smp_scan_config(0x0,0x400) ||
752 smp_scan_config(639*0x400,0x400) ||
753 smp_scan_config(0xF0000,0x10000))
754 return;
755 /*
756 * If it is an SMP machine we should know now, unless the
757 * configuration is in an EISA/MCA bus machine with an
758 * extended bios data area.
759 *
760 * there is a real-mode segmented pointer pointing to the
761 * 4K EBDA area at 0x40E, calculate and scan it here.
762 *
763 * NOTE! There are Linux loaders that will corrupt the EBDA
764 * area, and as such this kind of SMP config may be less
765 * trustworthy, simply because the SMP table may have been
766 * stomped on during early boot. These loaders are buggy and
767 * should be fixed.
768 *
769 * MP1.4 SPEC states to only scan first 1K of 4K EBDA.
770 */
771
772 address = get_bios_ebda();
773 if (address)
774 smp_scan_config(address, 0x400);
775 }
776
777 /* --------------------------------------------------------------------------
778 ACPI-based MP Configuration
779 -------------------------------------------------------------------------- */
780
781 #ifdef CONFIG_ACPI
782
mp_register_lapic_address(u64 address)783 void __init mp_register_lapic_address (
784 u64 address)
785 {
786 if (!x2apic_enabled) {
787 mp_lapic_addr = (unsigned long) address;
788 set_fixmap_nocache(FIX_APIC_BASE, mp_lapic_addr);
789 }
790
791 if (boot_cpu_physical_apicid == -1U)
792 boot_cpu_physical_apicid = get_apic_id();
793
794 Dprintk("Boot CPU = %d\n", boot_cpu_physical_apicid);
795 }
796
797
mp_register_lapic(u32 id,bool enabled,bool hotplug)798 int mp_register_lapic(u32 id, bool enabled, bool hotplug)
799 {
800 struct mpc_config_processor processor = {
801 .mpc_type = MP_PROCESSOR,
802 /* Note: We don't fill in fields not consumed anywhere. */
803 .mpc_apicid = id,
804 .mpc_apicver = GET_APIC_VERSION(apic_read(APIC_LVR)),
805 .mpc_cpuflag = (enabled ? CPU_ENABLED : 0) |
806 (id == boot_cpu_physical_apicid ?
807 CPU_BOOTPROCESSOR : 0),
808 };
809
810 if (MAX_APICS <= id) {
811 printk(KERN_WARNING "Processor #%d invalid (max %d)\n",
812 id, MAX_APICS);
813 return -EINVAL;
814 }
815
816 return MP_processor_info_x(&processor, id, hotplug);
817 }
818
mp_unregister_lapic(uint32_t apic_id,uint32_t cpu)819 void mp_unregister_lapic(uint32_t apic_id, uint32_t cpu)
820 {
821 if (!cpu || (apic_id == boot_cpu_physical_apicid))
822 return;
823
824 if (x86_cpu_to_apicid[cpu] != apic_id)
825 return;
826
827 physid_clear(apic_id, phys_cpu_present_map);
828
829 x86_cpu_to_apicid[cpu] = BAD_APICID;
830 cpumask_clear_cpu(cpu, &cpu_present_map);
831 }
832
833 #define MP_ISA_BUS 0
834 #define MP_MAX_IOAPIC_PIN 127
835
836 static struct mp_ioapic_routing {
837 int gsi_base;
838 int gsi_end;
839 unsigned long pin_programmed[BITS_TO_LONGS(MP_MAX_IOAPIC_PIN + 1)];
840 } mp_ioapic_routing[MAX_IO_APICS];
841
842
mp_find_ioapic(int gsi)843 static int mp_find_ioapic (
844 int gsi)
845 {
846 unsigned int i;
847
848 /* Find the IOAPIC that manages this GSI. */
849 for (i = 0; i < nr_ioapics; i++) {
850 if ((gsi >= mp_ioapic_routing[i].gsi_base)
851 && (gsi <= mp_ioapic_routing[i].gsi_end))
852 return i;
853 }
854
855 printk(KERN_ERR "ERROR: Unable to locate IOAPIC for GSI %d\n", gsi);
856
857 return -1;
858 }
859
860
mp_register_ioapic(u8 id,u32 address,u32 gsi_base)861 void __init mp_register_ioapic (
862 u8 id,
863 u32 address,
864 u32 gsi_base)
865 {
866 int idx = 0;
867 int tmpid;
868
869 if (nr_ioapics >= MAX_IO_APICS) {
870 printk(KERN_ERR "ERROR: Max # of I/O APICs (%d) exceeded "
871 "(found %d)\n", MAX_IO_APICS, nr_ioapics);
872 panic("Recompile kernel with bigger MAX_IO_APICS\n");
873 }
874 if (!address) {
875 printk(KERN_ERR "WARNING: Bogus (zero) I/O APIC address"
876 " found in MADT table, skipping!\n");
877 return;
878 }
879
880 idx = nr_ioapics++;
881
882 mp_ioapics[idx].mpc_type = MP_IOAPIC;
883 mp_ioapics[idx].mpc_flags = MPC_APIC_USABLE;
884 mp_ioapics[idx].mpc_apicaddr = address;
885
886 set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address);
887 if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
888 && !APIC_XAPIC(apic_version[boot_cpu_physical_apicid]))
889 tmpid = io_apic_get_unique_id(idx, id);
890 else
891 tmpid = id;
892 if (tmpid == -1) {
893 nr_ioapics--;
894 return;
895 }
896 mp_ioapics[idx].mpc_apicid = tmpid;
897 mp_ioapics[idx].mpc_apicver = io_apic_get_version(idx);
898
899 /*
900 * Build basic GSI lookup table to facilitate gsi->io_apic lookups
901 * and to prevent reprogramming of IOAPIC pins (PCI GSIs).
902 */
903 mp_ioapic_routing[idx].gsi_base = gsi_base;
904 mp_ioapic_routing[idx].gsi_end = gsi_base +
905 io_apic_get_redir_entries(idx);
906
907 printk("IOAPIC[%d]: apic_id %d, version %d, address %#x, "
908 "GSI %d-%d\n", idx, mp_ioapics[idx].mpc_apicid,
909 mp_ioapics[idx].mpc_apicver, mp_ioapics[idx].mpc_apicaddr,
910 mp_ioapic_routing[idx].gsi_base,
911 mp_ioapic_routing[idx].gsi_end);
912
913 return;
914 }
915
highest_gsi(void)916 unsigned __init highest_gsi(void)
917 {
918 unsigned x, res = 0;
919 for (x = 0; x < nr_ioapics; x++)
920 if (res < mp_ioapic_routing[x].gsi_end)
921 res = mp_ioapic_routing[x].gsi_end;
922 return res;
923 }
924
io_apic_gsi_base(unsigned int apic)925 unsigned int io_apic_gsi_base(unsigned int apic)
926 {
927 return mp_ioapic_routing[apic].gsi_base;
928 }
929
mp_override_legacy_irq(u8 bus_irq,u8 polarity,u8 trigger,u32 gsi)930 void __init mp_override_legacy_irq (
931 u8 bus_irq,
932 u8 polarity,
933 u8 trigger,
934 u32 gsi)
935 {
936 struct mpc_config_intsrc intsrc;
937 int ioapic = -1;
938 int pin = -1;
939
940 /*
941 * Convert 'gsi' to 'ioapic.pin'.
942 */
943 ioapic = mp_find_ioapic(gsi);
944 if (ioapic < 0)
945 return;
946 pin = gsi - mp_ioapic_routing[ioapic].gsi_base;
947
948 /*
949 * TBD: This check is for faulty timer entries, where the override
950 * erroneously sets the trigger to level, resulting in a HUGE
951 * increase of timer interrupts!
952 */
953 if ((bus_irq == 0) && (trigger == 3))
954 trigger = 1;
955
956 intsrc.mpc_type = MP_INTSRC;
957 intsrc.mpc_irqtype = mp_INT;
958 intsrc.mpc_irqflag = (trigger << 2) | polarity;
959 intsrc.mpc_srcbus = MP_ISA_BUS;
960 intsrc.mpc_srcbusirq = bus_irq; /* IRQ */
961 intsrc.mpc_dstapic = mp_ioapics[ioapic].mpc_apicid; /* APIC ID */
962 intsrc.mpc_dstirq = pin; /* INTIN# */
963
964 Dprintk("Int: type %d, pol %d, trig %d, bus %d, irq %d, %d-%d\n",
965 intsrc.mpc_irqtype, intsrc.mpc_irqflag & 3,
966 (intsrc.mpc_irqflag >> 2) & 3, intsrc.mpc_srcbus,
967 intsrc.mpc_srcbusirq, intsrc.mpc_dstapic, intsrc.mpc_dstirq);
968
969 mp_irqs[mp_irq_entries] = intsrc;
970 if (++mp_irq_entries == MAX_IRQ_SOURCES)
971 panic("Max # of irq sources exceeded\n");
972
973 return;
974 }
975
mp_config_acpi_legacy_irqs(void)976 void __init mp_config_acpi_legacy_irqs (void)
977 {
978 struct mpc_config_intsrc intsrc;
979 int i = 0;
980 int ioapic = -1;
981
982 /*
983 * Fabricate the legacy ISA bus (bus #31).
984 */
985 mp_bus_id_to_type[MP_ISA_BUS] = MP_BUS_ISA;
986 Dprintk("Bus #%d is ISA\n", MP_ISA_BUS);
987
988 /*
989 * Locate the IOAPIC that manages the ISA IRQs (0-15).
990 */
991 ioapic = mp_find_ioapic(0);
992 if (ioapic < 0)
993 return;
994
995 intsrc.mpc_type = MP_INTSRC;
996 intsrc.mpc_irqflag = 0; /* Conforming */
997 intsrc.mpc_srcbus = MP_ISA_BUS;
998 intsrc.mpc_dstapic = mp_ioapics[ioapic].mpc_apicid;
999
1000 /*
1001 * Use the default configuration for the IRQs 0-15. Unless
1002 * overriden by (MADT) interrupt source override entries.
1003 */
1004 for (i = 0; platform_legacy_irq(i); i++) {
1005 int idx;
1006
1007 for (idx = 0; idx < mp_irq_entries; idx++) {
1008 struct mpc_config_intsrc *irq = mp_irqs + idx;
1009
1010 /* Do we already have a mapping for this ISA IRQ? */
1011 if (irq->mpc_srcbus == MP_ISA_BUS && irq->mpc_srcbusirq == i)
1012 break;
1013
1014 /* Do we already have a mapping for this IOAPIC pin */
1015 if ((irq->mpc_dstapic == intsrc.mpc_dstapic) &&
1016 (irq->mpc_dstirq == i))
1017 break;
1018 }
1019
1020 if (idx != mp_irq_entries) {
1021 printk(KERN_DEBUG "ACPI: IRQ%d used by override.\n", i);
1022 continue; /* IRQ already used */
1023 }
1024
1025 intsrc.mpc_irqtype = mp_INT;
1026 intsrc.mpc_srcbusirq = i; /* Identity mapped */
1027 intsrc.mpc_dstirq = i;
1028
1029 Dprintk("Int: type %d, pol %d, trig %d, bus %d, irq %d, "
1030 "%d-%d\n", intsrc.mpc_irqtype, intsrc.mpc_irqflag & 3,
1031 (intsrc.mpc_irqflag >> 2) & 3, intsrc.mpc_srcbus,
1032 intsrc.mpc_srcbusirq, intsrc.mpc_dstapic,
1033 intsrc.mpc_dstirq);
1034
1035 mp_irqs[mp_irq_entries] = intsrc;
1036 if (++mp_irq_entries == MAX_IRQ_SOURCES)
1037 panic("Max # of irq sources exceeded\n");
1038 }
1039 }
1040
mp_register_gsi(u32 gsi,int triggering,int polarity)1041 int mp_register_gsi (u32 gsi, int triggering, int polarity)
1042 {
1043 int ioapic;
1044 int ioapic_pin;
1045 struct irq_desc * desc;
1046 unsigned long flags;
1047
1048 /*
1049 * Mapping between Global System Interrups, which
1050 * represent all possible interrupts, and IRQs
1051 * assigned to actual devices.
1052 */
1053
1054 #ifdef CONFIG_ACPI_BUS
1055 /* Don't set up the ACPI SCI because it's already set up */
1056 if (acpi_fadt.sci_int == gsi)
1057 return gsi;
1058 #endif
1059
1060 if (!nr_ioapics) {
1061 unsigned int port = 0x4d0 + (gsi >> 3);
1062 u8 val;
1063
1064 if (!platform_legacy_irq(gsi))
1065 return -EINVAL;
1066 val = inb(port);
1067 if (triggering)
1068 val |= 1 << (gsi & 7);
1069 else
1070 val &= ~(1 << (gsi & 7));
1071 outb(val, port);
1072 return 0;
1073 }
1074
1075 ioapic = mp_find_ioapic(gsi);
1076 if (ioapic < 0) {
1077 printk(KERN_WARNING "No IOAPIC for GSI %u\n", gsi);
1078 return -EINVAL;
1079 }
1080
1081 ioapic_pin = gsi - mp_ioapic_routing[ioapic].gsi_base;
1082
1083 desc = irq_to_desc(gsi);
1084 spin_lock_irqsave(&desc->lock, flags);
1085 if (!(desc->status & IRQ_DISABLED) && desc->handler != &no_irq_type) {
1086 spin_unlock_irqrestore(&desc->lock, flags);
1087 return -EEXIST;
1088 }
1089 spin_unlock_irqrestore(&desc->lock, flags);
1090
1091 /*
1092 * Avoid pin reprogramming. PRTs typically include entries
1093 * with redundant pin->gsi mappings (but unique PCI devices);
1094 * we only program the IOAPIC on the first.
1095 */
1096 if (ioapic_pin > MP_MAX_IOAPIC_PIN) {
1097 printk(KERN_ERR "Invalid reference to IOAPIC pin "
1098 "%d-%d\n", mp_ioapics[ioapic].mpc_apicid,
1099 ioapic_pin);
1100 return -EINVAL;
1101 }
1102 if (test_and_set_bit(ioapic_pin,
1103 mp_ioapic_routing[ioapic].pin_programmed)) {
1104 Dprintk(KERN_DEBUG "Pin %d-%d already programmed\n",
1105 mp_ioapics[ioapic].mpc_apicid, ioapic_pin);
1106 return -EEXIST;
1107 }
1108
1109 return io_apic_set_pci_routing(ioapic, ioapic_pin, gsi,
1110 triggering, polarity);
1111 }
1112
1113 #endif /* CONFIG_ACPI */
1114