1 /*
2 * boot.c - Architecture-Specific Low-Level ACPI Boot Support
3 *
4 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
5 * Copyright (C) 2001 Jun Nakajima <jun.nakajima@intel.com>
6 *
7 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; If not, see <http://www.gnu.org/licenses/>.
21 *
22 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
23 */
24
25 #include <xen/errno.h>
26 #include <xen/init.h>
27 #include <xen/acpi.h>
28 #include <xen/irq.h>
29 #include <xen/mm.h>
30 #include <xen/param.h>
31 #include <xen/dmi.h>
32 #include <asm/fixmap.h>
33 #include <asm/page.h>
34 #include <asm/apic.h>
35 #include <asm/io_apic.h>
36 #include <asm/apic.h>
37 #include <asm/io.h>
38 #include <asm/mpspec.h>
39 #include <asm/processor.h>
40 #ifdef CONFIG_HPET_TIMER
41 #include <asm/hpet.h> /* for hpet_address */
42 #endif
43 #include <mach_apic.h>
44 #include <mach_mpparse.h>
45
46 #define PREFIX "ACPI: "
47
48 bool __initdata acpi_noirq; /* skip ACPI IRQ initialization */
49 bool __initdata acpi_ht = true; /* enable HT */
50
51 bool __initdata acpi_lapic;
52 bool __initdata acpi_ioapic;
53
54 /* acpi_skip_timer_override: Skip IRQ0 overrides. */
55 static bool __initdata acpi_skip_timer_override;
56 boolean_param("acpi_skip_timer_override", acpi_skip_timer_override);
57
58 static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE;
59
60 /* --------------------------------------------------------------------------
61 Boot-time Configuration
62 -------------------------------------------------------------------------- */
63
acpi_parse_madt(struct acpi_table_header * table)64 static int __init acpi_parse_madt(struct acpi_table_header *table)
65 {
66 struct acpi_table_madt *madt;
67
68 madt = (struct acpi_table_madt *)table;
69
70 if (madt->address) {
71 acpi_lapic_addr = (u64) madt->address;
72
73 printk(KERN_DEBUG PREFIX "Local APIC address 0x%08x\n",
74 madt->address);
75 }
76
77 acpi_madt_oem_check(madt->header.oem_id, madt->header.oem_table_id);
78
79 return 0;
80 }
81
82 static int __init
acpi_parse_x2apic(struct acpi_subtable_header * header,const unsigned long end)83 acpi_parse_x2apic(struct acpi_subtable_header *header, const unsigned long end)
84 {
85 struct acpi_madt_local_x2apic *processor =
86 container_of(header, struct acpi_madt_local_x2apic, header);
87 bool enabled = false, log = false;
88
89 if (BAD_MADT_ENTRY(processor, end))
90 return -EINVAL;
91
92 if ((processor->lapic_flags & ACPI_MADT_ENABLED) ||
93 processor->local_apic_id != 0xffffffff || opt_cpu_info) {
94 acpi_table_print_madt_entry(header);
95 log = true;
96 }
97
98 /* Record local apic id only when enabled and fitting. */
99 if (processor->local_apic_id >= MAX_APICS ||
100 processor->uid >= MAX_MADT_ENTRIES) {
101 if (log)
102 printk("%sAPIC ID %#x and/or ACPI ID %#x beyond limit"
103 " - processor ignored\n",
104 processor->lapic_flags & ACPI_MADT_ENABLED
105 ? KERN_WARNING "WARNING: " : KERN_INFO,
106 processor->local_apic_id, processor->uid);
107 unaccounted_cpus = true;
108 /*
109 * Must not return an error here, to prevent
110 * acpi_table_parse_entries() from terminating early.
111 */
112 return 0 /* -ENOSPC */;
113 }
114 if (processor->lapic_flags & ACPI_MADT_ENABLED) {
115 x86_acpiid_to_apicid[processor->uid] =
116 processor->local_apic_id;
117 enabled = true;
118 }
119
120 /*
121 * We need to register disabled CPU as well to permit
122 * counting disabled CPUs. This allows us to size
123 * cpus_possible_map more accurately, to permit
124 * to not preallocating memory for all NR_CPUS
125 * when we use CPU hotplug.
126 */
127 mp_register_lapic(processor->local_apic_id, enabled, 0);
128
129 return 0;
130 }
131
132 static int __init
acpi_parse_lapic(struct acpi_subtable_header * header,const unsigned long end)133 acpi_parse_lapic(struct acpi_subtable_header * header, const unsigned long end)
134 {
135 struct acpi_madt_local_apic *processor =
136 container_of(header, struct acpi_madt_local_apic, header);
137 bool enabled = false;
138
139 if (BAD_MADT_ENTRY(processor, end))
140 return -EINVAL;
141
142 if ((processor->lapic_flags & ACPI_MADT_ENABLED) ||
143 processor->id != 0xff || opt_cpu_info)
144 acpi_table_print_madt_entry(header);
145
146 /* Record local apic id only when enabled */
147 if (processor->lapic_flags & ACPI_MADT_ENABLED) {
148 x86_acpiid_to_apicid[processor->processor_id] = processor->id;
149 enabled = true;
150 }
151
152 /*
153 * We need to register disabled CPU as well to permit
154 * counting disabled CPUs. This allows us to size
155 * cpus_possible_map more accurately, to permit
156 * to not preallocating memory for all NR_CPUS
157 * when we use CPU hotplug.
158 */
159 mp_register_lapic(processor->id, enabled, 0);
160
161 return 0;
162 }
163
164 static int __init
acpi_parse_lapic_addr_ovr(struct acpi_subtable_header * header,const unsigned long end)165 acpi_parse_lapic_addr_ovr(struct acpi_subtable_header * header,
166 const unsigned long end)
167 {
168 struct acpi_madt_local_apic_override *lapic_addr_ovr =
169 container_of(header, struct acpi_madt_local_apic_override,
170 header);
171
172 if (BAD_MADT_ENTRY(lapic_addr_ovr, end))
173 return -EINVAL;
174
175 acpi_lapic_addr = lapic_addr_ovr->address;
176
177 return 0;
178 }
179
180 static int __init
acpi_parse_x2apic_nmi(struct acpi_subtable_header * header,const unsigned long end)181 acpi_parse_x2apic_nmi(struct acpi_subtable_header *header,
182 const unsigned long end)
183 {
184 struct acpi_madt_local_x2apic_nmi *x2apic_nmi =
185 container_of(header, struct acpi_madt_local_x2apic_nmi,
186 header);
187
188 if (BAD_MADT_ENTRY(x2apic_nmi, end))
189 return -EINVAL;
190
191 acpi_table_print_madt_entry(header);
192
193 if (x2apic_nmi->lint != 1)
194 printk(KERN_WARNING PREFIX "NMI not connected to LINT 1!\n");
195
196 return 0;
197 }
198
199 static int __init
acpi_parse_lapic_nmi(struct acpi_subtable_header * header,const unsigned long end)200 acpi_parse_lapic_nmi(struct acpi_subtable_header * header, const unsigned long end)
201 {
202 struct acpi_madt_local_apic_nmi *lapic_nmi =
203 container_of(header, struct acpi_madt_local_apic_nmi, header);
204
205 if (BAD_MADT_ENTRY(lapic_nmi, end))
206 return -EINVAL;
207
208 acpi_table_print_madt_entry(header);
209
210 if (lapic_nmi->lint != 1)
211 printk(KERN_WARNING PREFIX "NMI not connected to LINT 1!\n");
212
213 return 0;
214 }
215
216 static int __init
acpi_parse_ioapic(struct acpi_subtable_header * header,const unsigned long end)217 acpi_parse_ioapic(struct acpi_subtable_header * header, const unsigned long end)
218 {
219 struct acpi_madt_io_apic *ioapic =
220 container_of(header, struct acpi_madt_io_apic, header);
221
222 if (BAD_MADT_ENTRY(ioapic, end))
223 return -EINVAL;
224
225 acpi_table_print_madt_entry(header);
226
227 mp_register_ioapic(ioapic->id,
228 ioapic->address, ioapic->global_irq_base);
229
230 return 0;
231 }
232
233 static int __init
acpi_parse_int_src_ovr(struct acpi_subtable_header * header,const unsigned long end)234 acpi_parse_int_src_ovr(struct acpi_subtable_header * header,
235 const unsigned long end)
236 {
237 struct acpi_madt_interrupt_override *intsrc =
238 container_of(header, struct acpi_madt_interrupt_override,
239 header);
240
241 if (BAD_MADT_ENTRY(intsrc, end))
242 return -EINVAL;
243
244 acpi_table_print_madt_entry(header);
245
246 if (acpi_skip_timer_override &&
247 intsrc->source_irq == 0 && intsrc->global_irq == 2) {
248 printk(PREFIX "BIOS IRQ0 pin2 override ignored.\n");
249 return 0;
250 }
251
252 mp_override_legacy_irq(intsrc->source_irq,
253 ACPI_MADT_GET_POLARITY(intsrc->inti_flags),
254 ACPI_MADT_GET_TRIGGER(intsrc->inti_flags),
255 intsrc->global_irq);
256
257 return 0;
258 }
259
260 static int __init
acpi_parse_nmi_src(struct acpi_subtable_header * header,const unsigned long end)261 acpi_parse_nmi_src(struct acpi_subtable_header * header, const unsigned long end)
262 {
263 struct acpi_madt_nmi_source *nmi_src =
264 container_of(header, struct acpi_madt_nmi_source, header);
265
266 if (BAD_MADT_ENTRY(nmi_src, end))
267 return -EINVAL;
268
269 acpi_table_print_madt_entry(header);
270
271 /* TBD: Support nimsrc entries? */
272
273 return 0;
274 }
275
276 #ifdef CONFIG_HPET_TIMER
277
acpi_parse_hpet(struct acpi_table_header * table)278 static int __init acpi_parse_hpet(struct acpi_table_header *table)
279 {
280 struct acpi_table_hpet *hpet_tbl = (struct acpi_table_hpet *)table;
281
282 if (hpet_tbl->address.space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) {
283 printk(KERN_WARNING PREFIX "HPET timers must be located in "
284 "memory.\n");
285 return -1;
286 }
287
288 /*
289 * Some BIOSes provide multiple HPET tables. Sometimes this is a BIOS
290 * bug; the intended way of supporting more than 1 HPET is to use AML
291 * entries.
292 *
293 * If someone finds a real system with two genuine HPET tables, perhaps
294 * they will be kind and implement support. Until then however, warn
295 * that we will ignore subsequent tables.
296 */
297 if (hpet_address)
298 {
299 printk(KERN_WARNING PREFIX
300 "Found multiple HPET tables. Only using first\n");
301 return -1;
302 }
303
304 hpet_address = hpet_tbl->address.address;
305 hpet_blockid = hpet_tbl->sequence;
306 hpet_flags = hpet_tbl->flags;
307 printk(KERN_INFO PREFIX "HPET id: %#x base: %#lx\n",
308 hpet_tbl->id, hpet_address);
309
310 return 0;
311 }
312 #else
313 #define acpi_parse_hpet NULL
314 #endif
315
acpi_invalidate_bgrt(struct acpi_table_header * table)316 static int __init acpi_invalidate_bgrt(struct acpi_table_header *table)
317 {
318 struct acpi_table_bgrt *bgrt_tbl =
319 container_of(table, struct acpi_table_bgrt, header);
320
321 if (table->length < sizeof(*bgrt_tbl))
322 return -1;
323
324 if (bgrt_tbl->version == 1 && bgrt_tbl->image_address
325 && !page_is_ram_type(PFN_DOWN(bgrt_tbl->image_address),
326 RAM_TYPE_CONVENTIONAL))
327 return 0;
328
329 printk(KERN_INFO PREFIX "BGRT: invalidating v%d image at %#"PRIx64"\n",
330 bgrt_tbl->version, bgrt_tbl->image_address);
331 bgrt_tbl->image_address = 0;
332 bgrt_tbl->status &= ~1;
333
334 return 0;
335 }
336
337 #define acpi_fadt_copy_address(dst, src, len) do { \
338 if (fadt->header.revision >= FADT2_REVISION_ID && \
339 fadt->header.length >= ACPI_FADT_V2_SIZE) \
340 acpi_sinfo.dst##_blk = fadt->x##src##_block; \
341 if (!acpi_sinfo.dst##_blk.address) { \
342 acpi_sinfo.dst##_blk.address = fadt->src##_block; \
343 acpi_sinfo.dst##_blk.space_id = ACPI_ADR_SPACE_SYSTEM_IO; \
344 acpi_sinfo.dst##_blk.bit_width = fadt->len##_length << 3; \
345 acpi_sinfo.dst##_blk.bit_offset = 0; \
346 acpi_sinfo.dst##_blk.access_width = fadt->len##_length; \
347 } \
348 } while (0)
349
350 /* Get pm1x_cnt and pm1x_evt information for ACPI sleep */
351 static void __init
acpi_fadt_parse_sleep_info(struct acpi_table_fadt * fadt)352 acpi_fadt_parse_sleep_info(struct acpi_table_fadt *fadt)
353 {
354 struct acpi_table_facs *facs = NULL;
355 uint64_t facs_pa;
356
357 if (fadt->header.revision >= 5 &&
358 fadt->header.length >= ACPI_FADT_V5_SIZE) {
359 acpi_sinfo.sleep_control = fadt->sleep_control;
360 acpi_sinfo.sleep_status = fadt->sleep_status;
361
362 printk(KERN_INFO PREFIX
363 "v5 SLEEP INFO: control[%d:%"PRIx64"],"
364 " status[%d:%"PRIx64"]\n",
365 acpi_sinfo.sleep_control.space_id,
366 acpi_sinfo.sleep_control.address,
367 acpi_sinfo.sleep_status.space_id,
368 acpi_sinfo.sleep_status.address);
369
370 if ((fadt->sleep_control.address &&
371 (fadt->sleep_control.bit_offset ||
372 fadt->sleep_control.bit_width !=
373 fadt->sleep_control.access_width * 8)) ||
374 (fadt->sleep_status.address &&
375 (fadt->sleep_status.bit_offset ||
376 fadt->sleep_status.bit_width !=
377 fadt->sleep_status.access_width * 8))) {
378 printk(KERN_WARNING PREFIX
379 "Invalid sleep control/status register data:"
380 " %#x:%#x:%#x %#x:%#x:%#x\n",
381 fadt->sleep_control.bit_offset,
382 fadt->sleep_control.bit_width,
383 fadt->sleep_control.access_width,
384 fadt->sleep_status.bit_offset,
385 fadt->sleep_status.bit_width,
386 fadt->sleep_status.access_width);
387 fadt->sleep_control.address = 0;
388 fadt->sleep_status.address = 0;
389 }
390 }
391
392 if (fadt->flags & ACPI_FADT_HW_REDUCED)
393 goto bad;
394
395 acpi_fadt_copy_address(pm1a_cnt, pm1a_control, pm1_control);
396 acpi_fadt_copy_address(pm1b_cnt, pm1b_control, pm1_control);
397 acpi_fadt_copy_address(pm1a_evt, pm1a_event, pm1_event);
398 acpi_fadt_copy_address(pm1b_evt, pm1b_event, pm1_event);
399
400 printk(KERN_INFO PREFIX
401 "SLEEP INFO: pm1x_cnt[%d:%"PRIx64",%d:%"PRIx64"], "
402 "pm1x_evt[%d:%"PRIx64",%d:%"PRIx64"]\n",
403 acpi_sinfo.pm1a_cnt_blk.space_id,
404 acpi_sinfo.pm1a_cnt_blk.address,
405 acpi_sinfo.pm1b_cnt_blk.space_id,
406 acpi_sinfo.pm1b_cnt_blk.address,
407 acpi_sinfo.pm1a_evt_blk.space_id,
408 acpi_sinfo.pm1a_evt_blk.address,
409 acpi_sinfo.pm1b_evt_blk.space_id,
410 acpi_sinfo.pm1b_evt_blk.address);
411
412 /* Now FACS... */
413 facs_pa = ((fadt->header.revision >= FADT2_REVISION_ID)
414 ? fadt->Xfacs : (uint64_t)fadt->facs);
415 if (fadt->facs && ((uint64_t)fadt->facs != facs_pa)) {
416 printk(KERN_WARNING PREFIX
417 "32/64X FACS address mismatch in FADT - "
418 "%08x/%016"PRIx64", using 32\n",
419 fadt->facs, facs_pa);
420 facs_pa = (uint64_t)fadt->facs;
421 }
422 if (!facs_pa)
423 goto bad;
424
425 facs = (struct acpi_table_facs *)
426 __acpi_map_table(facs_pa, sizeof(struct acpi_table_facs));
427 if (!facs)
428 goto bad;
429
430 if (strncmp(facs->signature, "FACS", 4)) {
431 printk(KERN_ERR PREFIX "Invalid FACS signature %.4s\n",
432 facs->signature);
433 goto bad;
434 }
435
436 if (facs->length < 24) {
437 printk(KERN_ERR PREFIX "Invalid FACS table length: %#x",
438 facs->length);
439 goto bad;
440 }
441
442 if (facs->length < 64)
443 printk(KERN_WARNING PREFIX
444 "FACS is shorter than ACPI spec allow: %#x",
445 facs->length);
446
447 acpi_sinfo.wakeup_vector = facs_pa +
448 offsetof(struct acpi_table_facs, firmware_waking_vector);
449 acpi_sinfo.vector_width = 32;
450
451 printk(KERN_INFO PREFIX
452 " wakeup_vec[%"PRIx64"], vec_size[%x]\n",
453 acpi_sinfo.wakeup_vector, acpi_sinfo.vector_width);
454 return;
455 bad:
456 memset(&acpi_sinfo, 0,
457 offsetof(struct acpi_sleep_info, sleep_control));
458 memset(&acpi_sinfo.sleep_status + 1, 0,
459 (long)(&acpi_sinfo + 1) - (long)(&acpi_sinfo.sleep_status + 1));
460 }
461
acpi_parse_fadt(struct acpi_table_header * table)462 static int __init acpi_parse_fadt(struct acpi_table_header *table)
463 {
464 struct acpi_table_fadt *fadt = (struct acpi_table_fadt *)table;
465
466 #ifdef CONFIG_ACPI_INTERPRETER
467 /* initialize sci_int early for INT_SRC_OVR MADT parsing */
468 acpi_fadt.sci_int = fadt->sci_int;
469
470 /* initialize rev and apic_phys_dest_mode for x86_64 genapic */
471 acpi_fadt.revision = fadt->revision;
472 acpi_fadt.force_apic_physical_destination_mode =
473 fadt->force_apic_physical_destination_mode;
474 #endif
475
476 #ifdef CONFIG_X86_PM_TIMER
477 /* detect the location of the ACPI PM Timer */
478 if (fadt->header.revision >= FADT2_REVISION_ID &&
479 fadt->xpm_timer_block.space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
480 /* FADT rev. 2 */
481 if (fadt->xpm_timer_block.access_width != 0 &&
482 ACPI_ACCESS_BIT_WIDTH(fadt->xpm_timer_block.access_width) != 32)
483 printk(KERN_WARNING PREFIX "PM-Timer has invalid access width(%u)\n",
484 fadt->xpm_timer_block.access_width);
485 else if (fadt->xpm_timer_block.bit_offset != 0)
486 printk(KERN_WARNING PREFIX "PM-Timer has invalid bit offset(%u)\n",
487 fadt->xpm_timer_block.bit_offset);
488 else {
489 pmtmr_ioport = fadt->xpm_timer_block.address;
490 pmtmr_width = fadt->xpm_timer_block.bit_width;
491 }
492 }
493 /*
494 * "X" fields are optional extensions to the original V1.0
495 * fields, so we must selectively expand V1.0 fields if the
496 * corresponding X field is zero.
497 */
498 if (!pmtmr_ioport) {
499 pmtmr_ioport = fadt->pm_timer_block;
500 pmtmr_width = fadt->pm_timer_length == 4 ? 32 : 0;
501 }
502 if (pmtmr_width < 32 && (fadt->flags & ACPI_FADT_32BIT_TIMER))
503 printk(KERN_WARNING PREFIX "PM-Timer is too short\n");
504 if (pmtmr_width > 24 && !(fadt->flags & ACPI_FADT_32BIT_TIMER))
505 pmtmr_width = 24;
506 if (pmtmr_ioport)
507 printk(KERN_INFO PREFIX "PM-Timer IO Port: %#x (%u bits)\n",
508 pmtmr_ioport, pmtmr_width);
509 #endif
510
511 acpi_smi_cmd = fadt->smi_command;
512 acpi_enable_value = fadt->acpi_enable;
513 acpi_disable_value = fadt->acpi_disable;
514
515 acpi_fadt_parse_sleep_info(fadt);
516
517 return 0;
518 }
519
520 /*
521 * Parse LAPIC entries in MADT
522 * returns 0 on success, < 0 on error
523 */
acpi_parse_madt_lapic_entries(void)524 static int __init acpi_parse_madt_lapic_entries(void)
525 {
526 int count, x2count;
527
528 if (!cpu_has_apic)
529 return -ENODEV;
530
531 /*
532 * Note that the LAPIC address is obtained from the MADT (32-bit value)
533 * and (optionally) overriden by a LAPIC_ADDR_OVR entry (64-bit value).
534 */
535
536 count =
537 acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC_OVERRIDE,
538 acpi_parse_lapic_addr_ovr, 0);
539 if (count < 0) {
540 printk(KERN_ERR PREFIX
541 "Error parsing LAPIC address override entry\n");
542 return count;
543 }
544
545 mp_register_lapic_address(acpi_lapic_addr);
546
547 BUILD_BUG_ON(MAX_APICS != MAX_LOCAL_APIC);
548 count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC,
549 acpi_parse_lapic, MAX_APICS);
550 x2count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_X2APIC,
551 acpi_parse_x2apic, MAX_APICS);
552 if (!count && !x2count) {
553 printk(KERN_ERR PREFIX "No LAPIC entries present\n");
554 /* TBD: Cleanup to allow fallback to MPS */
555 return -ENODEV;
556 } else if (count < 0 || x2count < 0) {
557 printk(KERN_ERR PREFIX "Error parsing LAPIC entry\n");
558 /* TBD: Cleanup to allow fallback to MPS */
559 return count < 0 ? count : x2count;
560 }
561
562 count =
563 acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC_NMI,
564 acpi_parse_lapic_nmi, 0);
565 x2count =
566 acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_X2APIC_NMI,
567 acpi_parse_x2apic_nmi, 0);
568 if (count < 0 || x2count < 0) {
569 printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n");
570 /* TBD: Cleanup to allow fallback to MPS */
571 return count < 0 ? count : x2count;
572 }
573 return 0;
574 }
575
576 /*
577 * Parse IOAPIC related entries in MADT
578 * returns 0 on success, < 0 on error
579 */
acpi_parse_madt_ioapic_entries(void)580 static int __init acpi_parse_madt_ioapic_entries(void)
581 {
582 int count;
583
584 /*
585 * ACPI interpreter is required to complete interrupt setup,
586 * so if it is off, don't enumerate the io-apics with ACPI.
587 * If MPS is present, it will handle them,
588 * otherwise the system will stay in PIC mode
589 */
590 if (acpi_disabled || acpi_noirq) {
591 return -ENODEV;
592 }
593
594 if (!cpu_has_apic)
595 return -ENODEV;
596
597 /*
598 * if "noapic" boot option, don't look for IO-APICs
599 */
600 if (skip_ioapic_setup) {
601 printk(KERN_INFO PREFIX "Skipping IOAPIC probe "
602 "due to 'noapic' option.\n");
603 return -ENODEV;
604 }
605
606 count =
607 acpi_table_parse_madt(ACPI_MADT_TYPE_IO_APIC, acpi_parse_ioapic,
608 MAX_IO_APICS);
609 if (!count) {
610 printk(KERN_ERR PREFIX "No IOAPIC entries present\n");
611 return -ENODEV;
612 } else if (count < 0) {
613 printk(KERN_ERR PREFIX "Error parsing IOAPIC entry\n");
614 return count;
615 }
616
617 count =
618 acpi_table_parse_madt(ACPI_MADT_TYPE_INTERRUPT_OVERRIDE,
619 acpi_parse_int_src_ovr, MAX_IRQ_SOURCES);
620 if (count < 0) {
621 printk(KERN_ERR PREFIX
622 "Error parsing interrupt source overrides entry\n");
623 /* TBD: Cleanup to allow fallback to MPS */
624 return count;
625 }
626
627 /* Fill in identity legacy mapings where no override */
628 mp_config_acpi_legacy_irqs();
629
630 count =
631 acpi_table_parse_madt(ACPI_MADT_TYPE_NMI_SOURCE,
632 acpi_parse_nmi_src, MAX_IRQ_SOURCES);
633 if (count < 0) {
634 printk(KERN_ERR PREFIX "Error parsing NMI SRC entry\n");
635 /* TBD: Cleanup to allow fallback to MPS */
636 return count;
637 }
638
639 return 0;
640 }
641
acpi_process_madt(void)642 static void __init acpi_process_madt(void)
643 {
644 int error;
645
646 if (!acpi_table_parse(ACPI_SIG_MADT, acpi_parse_madt)) {
647
648 /*
649 * Parse MADT LAPIC entries
650 */
651 error = acpi_parse_madt_lapic_entries();
652 if (!error) {
653 acpi_lapic = true;
654 generic_bigsmp_probe();
655
656 /*
657 * Parse MADT IO-APIC entries
658 */
659 error = acpi_parse_madt_ioapic_entries();
660 if (!error) {
661 acpi_ioapic = true;
662
663 smp_found_config = true;
664 clustered_apic_check();
665 }
666 }
667 if (error == -EINVAL) {
668 /*
669 * Dell Precision Workstation 410, 610 come here.
670 */
671 printk(KERN_ERR PREFIX
672 "Invalid BIOS MADT, disabling ACPI\n");
673 disable_acpi();
674 }
675 }
676 }
677
678 /*
679 * acpi_boot_table_init() and acpi_boot_init()
680 * called from setup_arch(), always.
681 * 1. checksums all tables
682 * 2. enumerates lapics
683 * 3. enumerates io-apics
684 *
685 * acpi_table_init() is separate to allow reading SRAT without
686 * other side effects.
687 *
688 * side effects of acpi_boot_init:
689 * acpi_lapic = true if LAPIC found
690 * acpi_ioapic = true if IOAPIC found
691 * if (acpi_lapic && acpi_ioapic) smp_found_config = true;
692 * ...
693 *
694 * return value: (currently ignored)
695 * 0: success
696 * !0: failure
697 */
698
acpi_boot_table_init(void)699 int __init acpi_boot_table_init(void)
700 {
701 int error;
702
703 /*
704 * If acpi_disabled, bail out
705 * One exception: acpi=ht continues far enough to enumerate LAPICs
706 */
707 if (acpi_disabled && !acpi_ht)
708 return 1;
709
710 /*
711 * Initialize the ACPI boot-time table parser.
712 */
713 error = acpi_table_init();
714 if (error) {
715 disable_acpi();
716 return error;
717 }
718
719 return 0;
720 }
721
acpi_boot_init(void)722 int __init acpi_boot_init(void)
723 {
724 /*
725 * If acpi_disabled, bail out
726 * One exception: acpi=ht continues far enough to enumerate LAPICs
727 */
728 if (acpi_disabled && !acpi_ht)
729 return 1;
730
731 /*
732 * set sci_int and PM timer address
733 */
734 acpi_table_parse(ACPI_SIG_FADT, acpi_parse_fadt);
735
736 /*
737 * Process the Multiple APIC Description Table (MADT), if present
738 */
739 acpi_process_madt();
740
741 acpi_table_parse(ACPI_SIG_HPET, acpi_parse_hpet);
742
743 acpi_mmcfg_init();
744
745 acpi_iommu_init();
746
747 erst_init();
748
749 acpi_hest_init();
750
751 acpi_table_parse(ACPI_SIG_BGRT, acpi_invalidate_bgrt);
752
753 return 0;
754 }
755