1 /*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; If not, see <http://www.gnu.org/licenses/>.
15 *
16 * Copyright (C) Ashok Raj <ashok.raj@intel.com>
17 * Copyright (C) Shaohua Li <shaohua.li@intel.com>
18 * Copyright (C) Allen Kay <allen.m.kay@intel.com> - adapted to xen
19 */
20
21 #include <xen/init.h>
22 #include <xen/bitmap.h>
23 #include <xen/errno.h>
24 #include <xen/kernel.h>
25 #include <xen/acpi.h>
26 #include <xen/mm.h>
27 #include <xen/param.h>
28 #include <xen/xmalloc.h>
29 #include <xen/pci.h>
30 #include <xen/pci_regs.h>
31 #include <asm/atomic.h>
32 #include <asm/e820.h>
33 #include <asm/string.h>
34 #include "dmar.h"
35 #include "iommu.h"
36 #include "extern.h"
37 #include "vtd.h"
38
39 #undef PREFIX
40 #define PREFIX VTDPREFIX "ACPI DMAR:"
41 #define DEBUG
42
43 #define MIN_SCOPE_LEN (sizeof(struct acpi_dmar_device_scope) + \
44 sizeof(struct acpi_dmar_pci_path))
45
46 LIST_HEAD_READ_MOSTLY(acpi_drhd_units);
47 LIST_HEAD_READ_MOSTLY(acpi_rmrr_units);
48 static LIST_HEAD_READ_MOSTLY(acpi_atsr_units);
49 static LIST_HEAD_READ_MOSTLY(acpi_rhsa_units);
50
51 static struct acpi_table_header *__read_mostly dmar_table;
52 static int __read_mostly dmar_flags;
53 static u64 __read_mostly igd_drhd_address;
54
dmar_scope_add_buses(struct dmar_scope * scope,u16 sec_bus,u16 sub_bus)55 static void __init dmar_scope_add_buses(struct dmar_scope *scope, u16 sec_bus,
56 u16 sub_bus)
57 {
58 sub_bus &= 0xff;
59 if (sec_bus > sub_bus)
60 return;
61
62 while ( sec_bus <= sub_bus )
63 set_bit(sec_bus++, scope->buses);
64 }
65
acpi_register_drhd_unit(struct acpi_drhd_unit * drhd)66 static int __init acpi_register_drhd_unit(struct acpi_drhd_unit *drhd)
67 {
68 /*
69 * add INCLUDE_ALL at the tail, so scan the list will find it at
70 * the very end.
71 */
72 if ( drhd->include_all )
73 list_add_tail(&drhd->list, &acpi_drhd_units);
74 else
75 list_add(&drhd->list, &acpi_drhd_units);
76 return 0;
77 }
78
acpi_register_rmrr_unit(struct acpi_rmrr_unit * rmrr)79 static int __init acpi_register_rmrr_unit(struct acpi_rmrr_unit *rmrr)
80 {
81 list_add(&rmrr->list, &acpi_rmrr_units);
82 return 0;
83 }
84
scope_devices_free(struct dmar_scope * scope)85 static void scope_devices_free(struct dmar_scope *scope)
86 {
87 if ( !scope )
88 return;
89
90 scope->devices_cnt = 0;
91 xfree(scope->devices);
92 scope->devices = NULL;
93 }
94
disable_all_dmar_units(void)95 static void __init disable_all_dmar_units(void)
96 {
97 struct acpi_drhd_unit *drhd, *_drhd;
98 struct acpi_rmrr_unit *rmrr, *_rmrr;
99 struct acpi_atsr_unit *atsr, *_atsr;
100
101 list_for_each_entry_safe ( drhd, _drhd, &acpi_drhd_units, list )
102 {
103 list_del(&drhd->list);
104 scope_devices_free(&drhd->scope);
105 iommu_free(drhd);
106 xfree(drhd);
107 }
108 list_for_each_entry_safe ( rmrr, _rmrr, &acpi_rmrr_units, list )
109 {
110 list_del(&rmrr->list);
111 scope_devices_free(&rmrr->scope);
112 xfree(rmrr);
113 }
114 list_for_each_entry_safe ( atsr, _atsr, &acpi_atsr_units, list )
115 {
116 list_del(&atsr->list);
117 scope_devices_free(&atsr->scope);
118 xfree(atsr);
119 }
120 }
121
acpi_ioapic_device_match(struct list_head * ioapic_list,unsigned int apic_id)122 static int acpi_ioapic_device_match(
123 struct list_head *ioapic_list, unsigned int apic_id)
124 {
125 struct acpi_ioapic_unit *ioapic;
126 list_for_each_entry( ioapic, ioapic_list, list ) {
127 if (ioapic->apic_id == apic_id)
128 return 1;
129 }
130 return 0;
131 }
132
ioapic_to_drhd(unsigned int apic_id)133 struct acpi_drhd_unit *ioapic_to_drhd(unsigned int apic_id)
134 {
135 struct acpi_drhd_unit *drhd;
136 list_for_each_entry( drhd, &acpi_drhd_units, list )
137 if ( acpi_ioapic_device_match(&drhd->ioapic_list, apic_id) )
138 return drhd;
139 return NULL;
140 }
141
ioapic_to_iommu(unsigned int apic_id)142 struct vtd_iommu *ioapic_to_iommu(unsigned int apic_id)
143 {
144 struct acpi_drhd_unit *drhd;
145
146 list_for_each_entry( drhd, &acpi_drhd_units, list )
147 if ( acpi_ioapic_device_match(&drhd->ioapic_list, apic_id) )
148 return drhd->iommu;
149 return NULL;
150 }
151
acpi_hpet_device_match(struct list_head * list,unsigned int hpet_id)152 static bool_t acpi_hpet_device_match(
153 struct list_head *list, unsigned int hpet_id)
154 {
155 struct acpi_hpet_unit *hpet;
156
157 list_for_each_entry( hpet, list, list )
158 if (hpet->id == hpet_id)
159 return 1;
160 return 0;
161 }
162
hpet_to_drhd(unsigned int hpet_id)163 struct acpi_drhd_unit *hpet_to_drhd(unsigned int hpet_id)
164 {
165 struct acpi_drhd_unit *drhd;
166
167 list_for_each_entry( drhd, &acpi_drhd_units, list )
168 if ( acpi_hpet_device_match(&drhd->hpet_list, hpet_id) )
169 return drhd;
170 return NULL;
171 }
172
hpet_to_iommu(unsigned int hpet_id)173 struct vtd_iommu *hpet_to_iommu(unsigned int hpet_id)
174 {
175 struct acpi_drhd_unit *drhd = hpet_to_drhd(hpet_id);
176
177 return drhd ? drhd->iommu : NULL;
178 }
179
acpi_register_atsr_unit(struct acpi_atsr_unit * atsr)180 static int __init acpi_register_atsr_unit(struct acpi_atsr_unit *atsr)
181 {
182 /*
183 * add ALL_PORTS at the tail, so scan the list will find it at
184 * the very end.
185 */
186 if ( atsr->all_ports )
187 list_add_tail(&atsr->list, &acpi_atsr_units);
188 else
189 list_add(&atsr->list, &acpi_atsr_units);
190 return 0;
191 }
192
acpi_find_matched_drhd_unit(const struct pci_dev * pdev)193 struct acpi_drhd_unit *acpi_find_matched_drhd_unit(const struct pci_dev *pdev)
194 {
195 u8 bus, devfn;
196 struct acpi_drhd_unit *drhd;
197 struct acpi_drhd_unit *include_all = NULL;
198 int i;
199
200 if ( pdev == NULL )
201 return NULL;
202
203 if ( pdev->info.is_virtfn )
204 {
205 bus = pdev->info.physfn.bus;
206 devfn = !pdev->info.is_extfn ? pdev->info.physfn.devfn : 0;
207 }
208 else if ( pdev->info.is_extfn )
209 {
210 bus = pdev->bus;
211 devfn = 0;
212 }
213 else
214 {
215 bus = pdev->bus;
216 devfn = pdev->devfn;
217 }
218
219 list_for_each_entry ( drhd, &acpi_drhd_units, list )
220 {
221 if ( drhd->segment != pdev->seg )
222 continue;
223
224 for (i = 0; i < drhd->scope.devices_cnt; i++)
225 if ( drhd->scope.devices[i] == PCI_BDF2(bus, devfn) )
226 return drhd;
227
228 if ( test_bit(bus, drhd->scope.buses) )
229 return drhd;
230
231 if ( drhd->include_all )
232 include_all = drhd;
233 }
234 return include_all;
235 }
236
acpi_find_matched_atsr_unit(const struct pci_dev * pdev)237 struct acpi_atsr_unit *acpi_find_matched_atsr_unit(const struct pci_dev *pdev)
238 {
239 struct acpi_atsr_unit *atsr;
240 struct acpi_atsr_unit *all_ports = NULL;
241
242 list_for_each_entry ( atsr, &acpi_atsr_units, list )
243 {
244 if ( atsr->segment != pdev->seg )
245 continue;
246
247 if ( test_bit(pdev->bus, atsr->scope.buses) )
248 return atsr;
249
250 if ( atsr->all_ports )
251 all_ports = atsr;
252 }
253 return all_ports;
254 }
255
drhd_to_rhsa(const struct acpi_drhd_unit * drhd)256 struct acpi_rhsa_unit *drhd_to_rhsa(const struct acpi_drhd_unit *drhd)
257 {
258 struct acpi_rhsa_unit *rhsa;
259
260 if ( drhd == NULL )
261 return NULL;
262
263 list_for_each_entry ( rhsa, &acpi_rhsa_units, list )
264 {
265 if ( rhsa->address == drhd->address )
266 return rhsa;
267 }
268 return NULL;
269 }
270
is_igd_drhd(struct acpi_drhd_unit * drhd)271 int is_igd_drhd(struct acpi_drhd_unit *drhd)
272 {
273 return drhd && (drhd->address == igd_drhd_address);
274 }
275
276 /*
277 * Count number of devices in device scope. Do not include PCI sub
278 * hierarchies.
279 */
scope_device_count(const void * start,const void * end)280 static int __init scope_device_count(const void *start, const void *end)
281 {
282 const struct acpi_dmar_device_scope *scope;
283 int count = 0;
284
285 while ( start < end )
286 {
287 scope = start;
288 if ( scope->length < MIN_SCOPE_LEN )
289 {
290 printk(XENLOG_WARNING VTDPREFIX "Invalid device scope\n");
291 return -EINVAL;
292 }
293
294 if ( scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE ||
295 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
296 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_IOAPIC ||
297 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_HPET )
298 count++;
299
300 start += scope->length;
301 }
302
303 return count;
304 }
305
306
acpi_parse_dev_scope(const void * start,const void * end,struct dmar_scope * scope,int type,u16 seg)307 static int __init acpi_parse_dev_scope(
308 const void *start, const void *end, struct dmar_scope *scope,
309 int type, u16 seg)
310 {
311 struct acpi_ioapic_unit *acpi_ioapic_unit;
312 const struct acpi_dmar_device_scope *acpi_scope;
313 u16 bus, sub_bus, sec_bus;
314 const struct acpi_dmar_pci_path *path;
315 struct acpi_drhd_unit *drhd = type == DMAR_TYPE ?
316 container_of(scope, struct acpi_drhd_unit, scope) : NULL;
317 int depth, cnt, didx = 0, ret;
318
319 if ( (cnt = scope_device_count(start, end)) < 0 )
320 return cnt;
321
322 if ( cnt > 0 )
323 {
324 scope->devices = xzalloc_array(u16, cnt);
325 if ( !scope->devices )
326 return -ENOMEM;
327 }
328 scope->devices_cnt = cnt;
329
330 while ( start < end )
331 {
332 acpi_scope = start;
333 path = (const void *)(acpi_scope + 1);
334 depth = (acpi_scope->length - sizeof(*acpi_scope)) / sizeof(*path);
335 bus = acpi_scope->bus;
336
337 while ( --depth > 0 )
338 {
339 bus = pci_conf_read8(PCI_SBDF(seg, bus, path->dev, path->fn),
340 PCI_SECONDARY_BUS);
341 path++;
342 }
343
344 switch ( acpi_scope->entry_type )
345 {
346 case ACPI_DMAR_SCOPE_TYPE_BRIDGE:
347 sec_bus = pci_conf_read8(PCI_SBDF(seg, bus, path->dev, path->fn),
348 PCI_SECONDARY_BUS);
349 sub_bus = pci_conf_read8(PCI_SBDF(seg, bus, path->dev, path->fn),
350 PCI_SUBORDINATE_BUS);
351 if ( iommu_verbose )
352 printk(VTDPREFIX
353 " bridge: %04x:%02x:%02x.%u start=%x sec=%x sub=%x\n",
354 seg, bus, path->dev, path->fn,
355 acpi_scope->bus, sec_bus, sub_bus);
356
357 dmar_scope_add_buses(scope, sec_bus, sub_bus);
358 break;
359
360 case ACPI_DMAR_SCOPE_TYPE_HPET:
361 if ( iommu_verbose )
362 printk(VTDPREFIX " MSI HPET: %04x:%02x:%02x.%u\n",
363 seg, bus, path->dev, path->fn);
364
365 if ( drhd )
366 {
367 struct acpi_hpet_unit *acpi_hpet_unit;
368
369 ret = -ENOMEM;
370 acpi_hpet_unit = xmalloc(struct acpi_hpet_unit);
371 if ( !acpi_hpet_unit )
372 goto out;
373 acpi_hpet_unit->id = acpi_scope->enumeration_id;
374 acpi_hpet_unit->bus = bus;
375 acpi_hpet_unit->dev = path->dev;
376 acpi_hpet_unit->func = path->fn;
377 list_add(&acpi_hpet_unit->list, &drhd->hpet_list);
378 }
379
380 break;
381
382 case ACPI_DMAR_SCOPE_TYPE_ENDPOINT:
383 if ( iommu_verbose )
384 printk(VTDPREFIX " endpoint: %04x:%02x:%02x.%u\n",
385 seg, bus, path->dev, path->fn);
386
387 if ( drhd )
388 {
389 if ( (seg == 0) && (bus == 0) && (path->dev == 2) &&
390 (path->fn == 0) )
391 igd_drhd_address = drhd->address;
392 }
393
394 break;
395
396 case ACPI_DMAR_SCOPE_TYPE_IOAPIC:
397 if ( iommu_verbose )
398 printk(VTDPREFIX " IOAPIC: %04x:%02x:%02x.%u\n",
399 seg, bus, path->dev, path->fn);
400
401 if ( drhd )
402 {
403 ret = -ENOMEM;
404 acpi_ioapic_unit = xmalloc(struct acpi_ioapic_unit);
405 if ( !acpi_ioapic_unit )
406 goto out;
407 acpi_ioapic_unit->apic_id = acpi_scope->enumeration_id;
408 acpi_ioapic_unit->ioapic.bdf.bus = bus;
409 acpi_ioapic_unit->ioapic.bdf.dev = path->dev;
410 acpi_ioapic_unit->ioapic.bdf.func = path->fn;
411 list_add(&acpi_ioapic_unit->list, &drhd->ioapic_list);
412 }
413
414 break;
415
416 default:
417 if ( iommu_verbose )
418 printk(XENLOG_WARNING VTDPREFIX "Unknown scope type %#x\n",
419 acpi_scope->entry_type);
420 start += acpi_scope->length;
421 continue;
422 }
423 scope->devices[didx++] = PCI_BDF(bus, path->dev, path->fn);
424 start += acpi_scope->length;
425 }
426
427 ret = 0;
428
429 out:
430 if ( ret )
431 scope_devices_free(scope);
432
433 return ret;
434 }
435
acpi_dmar_check_length(const struct acpi_dmar_header * h,unsigned int min_len)436 static int __init acpi_dmar_check_length(
437 const struct acpi_dmar_header *h, unsigned int min_len)
438 {
439 if ( h->length >= min_len )
440 return 0;
441 printk(XENLOG_ERR VTDPREFIX "Invalid ACPI DMAR entry length: %#x\n",
442 h->length);
443 return -EINVAL;
444 }
445
446 static int __init
acpi_parse_one_drhd(struct acpi_dmar_header * header)447 acpi_parse_one_drhd(struct acpi_dmar_header *header)
448 {
449 struct acpi_dmar_hardware_unit *drhd =
450 container_of(header, struct acpi_dmar_hardware_unit, header);
451 void *dev_scope_start, *dev_scope_end;
452 struct acpi_drhd_unit *dmaru;
453 int ret;
454 static int include_all = 0;
455
456 if ( (ret = acpi_dmar_check_length(header, sizeof(*drhd))) != 0 )
457 return ret;
458
459 if ( !drhd->address || !(drhd->address + 1) )
460 return -ENODEV;
461
462 dmaru = xzalloc(struct acpi_drhd_unit);
463 if ( !dmaru )
464 return -ENOMEM;
465
466 dmaru->address = drhd->address;
467 dmaru->segment = drhd->segment;
468 dmaru->include_all = drhd->flags & ACPI_DMAR_INCLUDE_ALL;
469 INIT_LIST_HEAD(&dmaru->ioapic_list);
470 INIT_LIST_HEAD(&dmaru->hpet_list);
471 if ( iommu_verbose )
472 printk(VTDPREFIX " dmaru->address = %"PRIx64"\n", dmaru->address);
473
474 ret = iommu_alloc(dmaru);
475 if ( ret )
476 goto out;
477
478 dev_scope_start = (void *)(drhd + 1);
479 dev_scope_end = ((void *)drhd) + header->length;
480 ret = acpi_parse_dev_scope(dev_scope_start, dev_scope_end,
481 &dmaru->scope, DMAR_TYPE, drhd->segment);
482
483 if ( dmaru->include_all )
484 {
485 if ( iommu_verbose )
486 printk(VTDPREFIX " flags: INCLUDE_ALL\n");
487 /* Only allow one INCLUDE_ALL */
488 if ( drhd->segment == 0 && include_all )
489 {
490 printk(XENLOG_WARNING VTDPREFIX
491 "Only one INCLUDE_ALL device scope is allowed\n");
492 ret = -EINVAL;
493 }
494 if ( drhd->segment == 0 )
495 include_all = 1;
496 }
497
498 if ( ret )
499 goto out;
500 else if ( force_iommu || dmaru->include_all )
501 acpi_register_drhd_unit(dmaru);
502 else
503 {
504 u8 b, d, f;
505 unsigned int i = 0;
506 union {
507 const void *raw;
508 const struct acpi_dmar_device_scope *scope;
509 } p;
510
511 /* Skip checking if segment is not accessible yet. */
512 if ( !pci_known_segment(drhd->segment) )
513 i = UINT_MAX;
514
515 for ( p.raw = dev_scope_start; i < dmaru->scope.devices_cnt;
516 i++, p.raw += p.scope->length )
517 {
518 if ( p.scope->entry_type == ACPI_DMAR_SCOPE_TYPE_IOAPIC ||
519 p.scope->entry_type == ACPI_DMAR_SCOPE_TYPE_HPET )
520 continue;
521
522 b = PCI_BUS(dmaru->scope.devices[i]);
523 d = PCI_SLOT(dmaru->scope.devices[i]);
524 f = PCI_FUNC(dmaru->scope.devices[i]);
525
526 if ( !pci_device_detect(drhd->segment, b, d, f) )
527 printk(XENLOG_WARNING VTDPREFIX
528 " Non-existent device (%04x:%02x:%02x.%u) in this DRHD's scope!\n",
529 drhd->segment, b, d, f);
530 }
531
532 acpi_register_drhd_unit(dmaru);
533 }
534
535 out:
536 if ( ret )
537 {
538 scope_devices_free(&dmaru->scope);
539 iommu_free(dmaru);
540 xfree(dmaru);
541 }
542
543 return ret;
544 }
545
register_one_rmrr(struct acpi_rmrr_unit * rmrru)546 static int register_one_rmrr(struct acpi_rmrr_unit *rmrru)
547 {
548 bool ignore = false;
549 unsigned int i = 0;
550 int ret = 0;
551
552 /* Skip checking if segment is not accessible yet. */
553 if ( !pci_known_segment(rmrru->segment) )
554 i = UINT_MAX;
555
556 for ( ; i < rmrru->scope.devices_cnt; i++ )
557 {
558 u8 b = PCI_BUS(rmrru->scope.devices[i]);
559 u8 d = PCI_SLOT(rmrru->scope.devices[i]);
560 u8 f = PCI_FUNC(rmrru->scope.devices[i]);
561
562 if ( pci_device_detect(rmrru->segment, b, d, f) == 0 )
563 {
564 dprintk(XENLOG_WARNING VTDPREFIX,
565 " Non-existent device (%04x:%02x:%02x.%u) is reported"
566 " in RMRR [%"PRIx64",%"PRIx64"]'s scope!\n",
567 rmrru->segment, b, d, f,
568 rmrru->base_address, rmrru->end_address);
569 ignore = true;
570 }
571 else
572 {
573 ignore = false;
574 break;
575 }
576 }
577
578 if ( ignore )
579 {
580 dprintk(XENLOG_WARNING VTDPREFIX,
581 " Ignore RMRR [%"PRIx64",%"PRIx64"] as no device"
582 " under its scope is PCI discoverable!\n",
583 rmrru->base_address, rmrru->end_address);
584 scope_devices_free(&rmrru->scope);
585 xfree(rmrru);
586 return 1;
587 }
588 else if ( rmrru->base_address > rmrru->end_address )
589 {
590 dprintk(XENLOG_WARNING VTDPREFIX,
591 " RMRR [%"PRIx64",%"PRIx64"] is incorrect!\n",
592 rmrru->base_address, rmrru->end_address);
593 scope_devices_free(&rmrru->scope);
594 xfree(rmrru);
595 ret = -EFAULT;
596 }
597 else
598 {
599 if ( iommu_verbose )
600 dprintk(VTDPREFIX, " RMRR: [%"PRIx64",%"PRIx64"]\n",
601 rmrru->base_address, rmrru->end_address);
602 acpi_register_rmrr_unit(rmrru);
603 }
604
605 return ret;
606 }
607
608 static int __init
acpi_parse_one_rmrr(struct acpi_dmar_header * header)609 acpi_parse_one_rmrr(struct acpi_dmar_header *header)
610 {
611 struct acpi_dmar_reserved_memory *rmrr =
612 container_of(header, struct acpi_dmar_reserved_memory, header);
613 struct acpi_rmrr_unit *rmrru;
614 void *dev_scope_start, *dev_scope_end;
615 u64 base_addr = rmrr->base_address, end_addr = rmrr->end_address;
616 int ret;
617
618 if ( (ret = acpi_dmar_check_length(header, sizeof(*rmrr))) != 0 )
619 return ret;
620
621 list_for_each_entry(rmrru, &acpi_rmrr_units, list)
622 if ( base_addr <= rmrru->end_address && rmrru->base_address <= end_addr )
623 {
624 printk(XENLOG_ERR VTDPREFIX
625 "Overlapping RMRRs [%"PRIx64",%"PRIx64"] and [%"PRIx64",%"PRIx64"]\n",
626 rmrru->base_address, rmrru->end_address,
627 base_addr, end_addr);
628 return -EEXIST;
629 }
630
631 /* This check is here simply to detect when RMRR values are
632 * not properly represented in the system memory map and
633 * inform the user
634 */
635 if ( !e820_all_mapped(base_addr, end_addr + 1, E820_RESERVED) &&
636 !e820_all_mapped(base_addr, end_addr + 1, E820_NVS) &&
637 !e820_all_mapped(base_addr, end_addr + 1, E820_ACPI) )
638 printk(XENLOG_WARNING VTDPREFIX
639 " RMRR [%"PRIx64",%"PRIx64"] not in reserved memory;"
640 " need \"iommu_inclusive_mapping=1\"?\n",
641 base_addr, end_addr);
642
643 rmrru = xzalloc(struct acpi_rmrr_unit);
644 if ( !rmrru )
645 return -ENOMEM;
646
647 rmrru->base_address = base_addr;
648 rmrru->end_address = end_addr;
649 rmrru->segment = rmrr->segment;
650
651 dev_scope_start = (void *)(rmrr + 1);
652 dev_scope_end = ((void *)rmrr) + header->length;
653 ret = acpi_parse_dev_scope(dev_scope_start, dev_scope_end,
654 &rmrru->scope, RMRR_TYPE, rmrr->segment);
655
656 if ( !ret && (rmrru->scope.devices_cnt != 0) )
657 {
658 ret = register_one_rmrr(rmrru);
659 /*
660 * register_one_rmrr() returns greater than 0 when a specified
661 * PCIe device cannot be detected. To prevent VT-d from being
662 * disabled in such cases, reset the return value to 0 here.
663 */
664 if ( ret > 0 )
665 ret = 0;
666
667 }
668 else
669 xfree(rmrru);
670
671 return ret;
672 }
673
674 static int __init
acpi_parse_one_atsr(struct acpi_dmar_header * header)675 acpi_parse_one_atsr(struct acpi_dmar_header *header)
676 {
677 struct acpi_dmar_atsr *atsr =
678 container_of(header, struct acpi_dmar_atsr, header);
679 struct acpi_atsr_unit *atsru;
680 int ret;
681 static int all_ports;
682 void *dev_scope_start, *dev_scope_end;
683
684 if ( (ret = acpi_dmar_check_length(header, sizeof(*atsr))) != 0 )
685 return ret;
686
687 atsru = xzalloc(struct acpi_atsr_unit);
688 if ( !atsru )
689 return -ENOMEM;
690
691 atsru->segment = atsr->segment;
692 atsru->all_ports = atsr->flags & ACPI_DMAR_ALL_PORTS;
693 if ( iommu_verbose )
694 printk(VTDPREFIX " atsru->all_ports: %x\n", atsru->all_ports);
695 if ( !atsru->all_ports )
696 {
697 dev_scope_start = (void *)(atsr + 1);
698 dev_scope_end = ((void *)atsr) + header->length;
699 ret = acpi_parse_dev_scope(dev_scope_start, dev_scope_end,
700 &atsru->scope, ATSR_TYPE, atsr->segment);
701 }
702 else
703 {
704 if ( iommu_verbose )
705 printk(VTDPREFIX " flags: ALL_PORTS\n");
706 /* Only allow one ALL_PORTS */
707 if ( atsr->segment == 0 && all_ports )
708 {
709 printk(XENLOG_WARNING VTDPREFIX
710 "Only one ALL_PORTS device scope is allowed\n");
711 ret = -EINVAL;
712 }
713 if ( atsr->segment == 0 )
714 all_ports = 1;
715 }
716
717 if ( ret )
718 {
719 scope_devices_free(&atsru->scope);
720 xfree(atsru);
721 }
722 else
723 acpi_register_atsr_unit(atsru);
724 return ret;
725 }
726
727 static int __init
acpi_parse_one_rhsa(struct acpi_dmar_header * header)728 acpi_parse_one_rhsa(struct acpi_dmar_header *header)
729 {
730 struct acpi_dmar_rhsa *rhsa =
731 container_of(header, struct acpi_dmar_rhsa, header);
732 struct acpi_rhsa_unit *rhsau;
733 int ret;
734
735 if ( (ret = acpi_dmar_check_length(header, sizeof(*rhsa))) != 0 )
736 return ret;
737
738 rhsau = xzalloc(struct acpi_rhsa_unit);
739 if ( !rhsau )
740 return -ENOMEM;
741
742 rhsau->address = rhsa->base_address;
743 rhsau->proximity_domain = rhsa->proximity_domain;
744 list_add_tail(&rhsau->list, &acpi_rhsa_units);
745 if ( iommu_verbose )
746 printk(VTDPREFIX
747 " rhsau->address: %"PRIx64" rhsau->proximity_domain: %"PRIx32"\n",
748 rhsau->address, rhsau->proximity_domain);
749
750 return ret;
751 }
752
acpi_parse_dmar(struct acpi_table_header * table)753 static int __init acpi_parse_dmar(struct acpi_table_header *table)
754 {
755 struct acpi_table_dmar *dmar;
756 struct acpi_dmar_header *entry_header;
757 u8 dmar_host_address_width;
758 int ret = 0;
759
760 dmar = (struct acpi_table_dmar *)table;
761 dmar_flags = dmar->flags;
762
763 if ( !iommu_enable && !iommu_intremap )
764 {
765 ret = -EINVAL;
766 goto out;
767 }
768
769 if ( !dmar->width )
770 {
771 printk(XENLOG_WARNING VTDPREFIX "Zero: Invalid DMAR width\n");
772 ret = -EINVAL;
773 goto out;
774 }
775
776 dmar_host_address_width = dmar->width + 1;
777 if ( iommu_verbose )
778 printk(VTDPREFIX "Host address width %d\n", dmar_host_address_width);
779
780 entry_header = (void *)(dmar + 1);
781 while ( ((unsigned long)entry_header) <
782 (((unsigned long)dmar) + table->length) )
783 {
784 ret = acpi_dmar_check_length(entry_header, sizeof(*entry_header));
785 if ( ret )
786 break;
787
788 switch ( entry_header->type )
789 {
790 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
791 if ( iommu_verbose )
792 printk(VTDPREFIX "found ACPI_DMAR_DRHD:\n");
793 ret = acpi_parse_one_drhd(entry_header);
794 break;
795 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
796 if ( iommu_verbose )
797 printk(VTDPREFIX "found ACPI_DMAR_RMRR:\n");
798 ret = acpi_parse_one_rmrr(entry_header);
799 break;
800 case ACPI_DMAR_TYPE_ATSR:
801 if ( iommu_verbose )
802 printk(VTDPREFIX "found ACPI_DMAR_ATSR:\n");
803 ret = acpi_parse_one_atsr(entry_header);
804 break;
805 case ACPI_DMAR_HARDWARE_AFFINITY:
806 if ( iommu_verbose )
807 printk(VTDPREFIX "found ACPI_DMAR_RHSA:\n");
808 ret = acpi_parse_one_rhsa(entry_header);
809 break;
810 default:
811 dprintk(XENLOG_WARNING VTDPREFIX,
812 "Ignore unknown DMAR structure type (%#x)\n",
813 entry_header->type);
814 break;
815 }
816 if ( ret )
817 break;
818
819 entry_header = ((void *)entry_header + entry_header->length);
820 }
821
822 if ( ret )
823 {
824 printk(XENLOG_WARNING
825 "Failed to parse ACPI DMAR. Disabling VT-d.\n");
826 disable_all_dmar_units();
827 }
828
829 out:
830 /* Zap ACPI DMAR signature to prevent dom0 using vt-d HW. */
831 acpi_dmar_zap();
832 return ret;
833 }
834
835 #define MAX_USER_RMRR_PAGES 16
836 #define MAX_USER_RMRR 10
837
838 /* RMRR units derived from command line rmrr option. */
839 #define MAX_USER_RMRR_DEV 20
840 struct user_rmrr {
841 unsigned long base_pfn, end_pfn;
842 unsigned int dev_count;
843 u32 sbdf[MAX_USER_RMRR_DEV];
844 };
845
846 static unsigned int __initdata nr_rmrr;
847 static struct user_rmrr __initdata user_rmrrs[MAX_USER_RMRR];
848
849 /* Macro for RMRR inclusive range formatting. */
850 #define ERMRRU_FMT "[%lx-%lx]"
851 #define ERMRRU_ARG(eru) eru.base_pfn, eru.end_pfn
852
add_user_rmrr(void)853 static int __init add_user_rmrr(void)
854 {
855 struct acpi_rmrr_unit *rmrr, *rmrru;
856 unsigned int idx, seg, i;
857 unsigned long base, end;
858 bool overlap;
859
860 for ( i = 0; i < nr_rmrr; i++ )
861 {
862 base = user_rmrrs[i].base_pfn;
863 end = user_rmrrs[i].end_pfn;
864
865 if ( base > end )
866 {
867 printk(XENLOG_ERR VTDPREFIX
868 "Invalid RMRR Range "ERMRRU_FMT"\n",
869 ERMRRU_ARG(user_rmrrs[i]));
870 continue;
871 }
872
873 if ( (end - base) >= MAX_USER_RMRR_PAGES )
874 {
875 printk(XENLOG_ERR VTDPREFIX
876 "RMRR range "ERMRRU_FMT" exceeds "\
877 __stringify(MAX_USER_RMRR_PAGES)" pages\n",
878 ERMRRU_ARG(user_rmrrs[i]));
879 continue;
880 }
881
882 overlap = false;
883 list_for_each_entry(rmrru, &acpi_rmrr_units, list)
884 {
885 if ( pfn_to_paddr(base) <= rmrru->end_address &&
886 rmrru->base_address <= pfn_to_paddr(end) )
887 {
888 printk(XENLOG_ERR VTDPREFIX
889 "Overlapping RMRRs: "ERMRRU_FMT" and [%lx-%lx]\n",
890 ERMRRU_ARG(user_rmrrs[i]),
891 paddr_to_pfn(rmrru->base_address),
892 paddr_to_pfn(rmrru->end_address));
893 overlap = true;
894 break;
895 }
896 }
897 /* Don't add overlapping RMRR. */
898 if ( overlap )
899 continue;
900
901 do
902 {
903 if ( !mfn_valid(_mfn(base)) )
904 {
905 printk(XENLOG_ERR VTDPREFIX
906 "Invalid pfn in RMRR range "ERMRRU_FMT"\n",
907 ERMRRU_ARG(user_rmrrs[i]));
908 break;
909 }
910 } while ( base++ < end );
911
912 /* Invalid pfn in range as the loop ended before end_pfn was reached. */
913 if ( base <= end )
914 continue;
915
916 rmrr = xzalloc(struct acpi_rmrr_unit);
917 if ( !rmrr )
918 return -ENOMEM;
919
920 rmrr->scope.devices = xmalloc_array(u16, user_rmrrs[i].dev_count);
921 if ( !rmrr->scope.devices )
922 {
923 xfree(rmrr);
924 return -ENOMEM;
925 }
926
927 seg = 0;
928 for ( idx = 0; idx < user_rmrrs[i].dev_count; idx++ )
929 {
930 rmrr->scope.devices[idx] = user_rmrrs[i].sbdf[idx];
931 seg |= PCI_SEG(user_rmrrs[i].sbdf[idx]);
932 }
933 if ( seg != PCI_SEG(user_rmrrs[i].sbdf[0]) )
934 {
935 printk(XENLOG_ERR VTDPREFIX
936 "Segments are not equal for RMRR range "ERMRRU_FMT"\n",
937 ERMRRU_ARG(user_rmrrs[i]));
938 scope_devices_free(&rmrr->scope);
939 xfree(rmrr);
940 continue;
941 }
942
943 rmrr->segment = seg;
944 rmrr->base_address = pfn_to_paddr(user_rmrrs[i].base_pfn);
945 /* Align the end_address to the end of the page */
946 rmrr->end_address = pfn_to_paddr(user_rmrrs[i].end_pfn) | ~PAGE_MASK;
947 rmrr->scope.devices_cnt = user_rmrrs[i].dev_count;
948
949 if ( register_one_rmrr(rmrr) )
950 printk(XENLOG_ERR VTDPREFIX
951 "Could not register RMMR range "ERMRRU_FMT"\n",
952 ERMRRU_ARG(user_rmrrs[i]));
953 }
954
955 return 0;
956 }
957
958 #include <asm/tboot.h>
959 /* ACPI tables may not be DMA protected by tboot, so use DMAR copy */
960 /* SINIT saved in SinitMleData in TXT heap (which is DMA protected) */
961 #define parse_dmar_table(h) tboot_parse_dmar_table(h)
962
acpi_dmar_init(void)963 int __init acpi_dmar_init(void)
964 {
965 acpi_physical_address dmar_addr;
966 acpi_native_uint dmar_len;
967 const struct acpi_drhd_unit *drhd;
968 int ret;
969
970 if ( ACPI_SUCCESS(acpi_get_table_phys(ACPI_SIG_DMAR, 0,
971 &dmar_addr, &dmar_len)) )
972 {
973 map_pages_to_xen((unsigned long)__va(dmar_addr), maddr_to_mfn(dmar_addr),
974 PFN_UP(dmar_addr + dmar_len) - PFN_DOWN(dmar_addr),
975 PAGE_HYPERVISOR);
976 dmar_table = __va(dmar_addr);
977 }
978
979 ret = parse_dmar_table(acpi_parse_dmar);
980
981 for_each_drhd_unit ( drhd )
982 {
983 const struct acpi_rhsa_unit *rhsa = drhd_to_rhsa(drhd);
984 struct vtd_iommu *iommu = drhd->iommu;
985
986 if ( ret )
987 break;
988
989 if ( rhsa )
990 iommu->node = pxm_to_node(rhsa->proximity_domain);
991
992 if ( !(iommu->root_maddr = alloc_pgtable_maddr(1, iommu->node)) )
993 ret = -ENOMEM;
994 }
995
996 if ( !ret )
997 {
998 iommu_init_ops = &intel_iommu_init_ops;
999
1000 return add_user_rmrr();
1001 }
1002
1003 return ret;
1004 }
1005
acpi_dmar_reinstate(void)1006 void acpi_dmar_reinstate(void)
1007 {
1008 uint32_t sig = 0x52414d44; /* "DMAR" */
1009
1010 if ( dmar_table )
1011 write_atomic((uint32_t*)&dmar_table->signature[0], sig);
1012 }
1013
acpi_dmar_zap(void)1014 void acpi_dmar_zap(void)
1015 {
1016 uint32_t sig = 0x44414d52; /* "RMAD" - doesn't alter table checksum */
1017
1018 if ( dmar_table )
1019 write_atomic((uint32_t*)&dmar_table->signature[0], sig);
1020 }
1021
platform_supports_intremap(void)1022 bool_t platform_supports_intremap(void)
1023 {
1024 const unsigned int mask = ACPI_DMAR_INTR_REMAP;
1025
1026 return (dmar_flags & mask) == ACPI_DMAR_INTR_REMAP;
1027 }
1028
platform_supports_x2apic(void)1029 bool_t __init platform_supports_x2apic(void)
1030 {
1031 const unsigned int mask = ACPI_DMAR_INTR_REMAP | ACPI_DMAR_X2APIC_OPT_OUT;
1032
1033 return cpu_has_x2apic && ((dmar_flags & mask) == ACPI_DMAR_INTR_REMAP);
1034 }
1035
intel_iommu_get_reserved_device_memory(iommu_grdm_t * func,void * ctxt)1036 int intel_iommu_get_reserved_device_memory(iommu_grdm_t *func, void *ctxt)
1037 {
1038 struct acpi_rmrr_unit *rmrr, *rmrr_cur = NULL;
1039 unsigned int i;
1040 u16 bdf;
1041
1042 for_each_rmrr_device ( rmrr, bdf, i )
1043 {
1044 int rc;
1045
1046 if ( rmrr == rmrr_cur )
1047 continue;
1048
1049 rc = func(PFN_DOWN(rmrr->base_address),
1050 PFN_UP(rmrr->end_address) - PFN_DOWN(rmrr->base_address),
1051 PCI_SBDF2(rmrr->segment, bdf).sbdf, ctxt);
1052
1053 if ( unlikely(rc < 0) )
1054 return rc;
1055
1056 if ( rc )
1057 rmrr_cur = rmrr;
1058 }
1059
1060 return 0;
1061 }
1062
1063 /*
1064 * Parse rmrr Xen command line options and add parsed devices and regions into
1065 * acpi_rmrr_unit list to mapped as RMRRs parsed from ACPI.
1066 * Format:
1067 * rmrr=start<-end>=[s1]bdf1[,[s1]bdf2[,...]];start<-end>=[s2]bdf1[,[s2]bdf2[,...]]
1068 * If the segment of the first device is not specified,
1069 * segment zero will be used.
1070 * If other segments are not specified, first device segment will be used.
1071 * If a segment is specified for other than the first device, and it does not
1072 * match the one specified for the first one, an error will be reported.
1073 */
parse_rmrr_param(const char * str)1074 static int __init parse_rmrr_param(const char *str)
1075 {
1076 const char *s = str, *cur, *stmp;
1077 unsigned int seg, bus, dev, func, dev_count;
1078 unsigned long start, end;
1079
1080 do {
1081 if ( nr_rmrr >= MAX_USER_RMRR )
1082 return -E2BIG;
1083
1084 start = simple_strtoul(cur = s, &s, 16);
1085 if ( cur == s )
1086 return -EINVAL;
1087
1088 if ( *s == '-' )
1089 {
1090 end = simple_strtoul(cur = s + 1, &s, 16);
1091 if ( cur == s )
1092 return -EINVAL;
1093 }
1094 else
1095 end = start;
1096
1097 user_rmrrs[nr_rmrr].base_pfn = start;
1098 user_rmrrs[nr_rmrr].end_pfn = end;
1099
1100 if ( *s != '=' )
1101 continue;
1102
1103 do {
1104 bool def_seg = false;
1105
1106 stmp = parse_pci_seg(s + 1, &seg, &bus, &dev, &func, &def_seg);
1107 if ( !stmp )
1108 return -EINVAL;
1109
1110 /*
1111 * Not specified.
1112 * Segment will be replaced with one from first device.
1113 */
1114 if ( user_rmrrs[nr_rmrr].dev_count && def_seg )
1115 seg = PCI_SEG(user_rmrrs[nr_rmrr].sbdf[0]);
1116
1117 /* Keep sbdf's even if they differ and later report an error. */
1118 dev_count = user_rmrrs[nr_rmrr].dev_count;
1119 user_rmrrs[nr_rmrr].sbdf[dev_count] =
1120 PCI_SBDF(seg, bus, dev, func).sbdf;
1121
1122 user_rmrrs[nr_rmrr].dev_count++;
1123 s = stmp;
1124 } while ( *s == ',' &&
1125 user_rmrrs[nr_rmrr].dev_count < MAX_USER_RMRR_DEV );
1126
1127 if ( user_rmrrs[nr_rmrr].dev_count )
1128 nr_rmrr++;
1129
1130 } while ( *s++ == ';' );
1131
1132 return s[-1] ? -EINVAL : 0;
1133 }
1134 custom_param("rmrr", parse_rmrr_param);
1135