1 /*
2  * Copyright (C) 2007 Advanced Micro Devices, Inc.
3  * Author: Leo Duran <leo.duran@amd.com>
4  * Author: Wei Wang <wei.wang2@amd.com> - adapted to xen
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; If not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include <xen/acpi.h>
21 #include <xen/pci.h>
22 
23 #include "iommu.h"
24 
get_iommu_msi_capabilities(u16 seg,u8 bus,u8 dev,u8 func,struct amd_iommu * iommu)25 static int __init get_iommu_msi_capabilities(
26     u16 seg, u8 bus, u8 dev, u8 func, struct amd_iommu *iommu)
27 {
28     int pos;
29 
30     pos = pci_find_cap_offset(seg, bus, dev, func, PCI_CAP_ID_MSI);
31 
32     if ( !pos )
33         return -ENODEV;
34 
35     AMD_IOMMU_DEBUG("Found MSI capability block at %#x\n", pos);
36 
37     iommu->msi.msi_attrib.type = PCI_CAP_ID_MSI;
38     iommu->msi.msi_attrib.pos = pos;
39     iommu->msi.msi_attrib.is_64 = 1;
40     return 0;
41 }
42 
get_iommu_capabilities(u16 seg,u8 bus,u8 dev,u8 func,u16 cap_ptr,struct amd_iommu * iommu)43 static int __init get_iommu_capabilities(
44     u16 seg, u8 bus, u8 dev, u8 func, u16 cap_ptr, struct amd_iommu *iommu)
45 {
46     u8 type;
47 
48     iommu->cap.header = pci_conf_read32(PCI_SBDF(seg, bus, dev, func), cap_ptr);
49     type = get_field_from_reg_u32(iommu->cap.header, PCI_CAP_TYPE_MASK,
50                                   PCI_CAP_TYPE_SHIFT);
51 
52     if ( type != PCI_CAP_TYPE_IOMMU )
53         return -ENODEV;
54 
55     return 0;
56 }
57 
get_iommu_features(struct amd_iommu * iommu)58 void __init get_iommu_features(struct amd_iommu *iommu)
59 {
60     const struct amd_iommu *first;
61     ASSERT( iommu->mmio_base );
62 
63     if ( !iommu_has_cap(iommu, PCI_CAP_EFRSUP_SHIFT) )
64     {
65         iommu->features.raw = 0;
66         return;
67     }
68 
69     iommu->features.raw =
70         readq(iommu->mmio_base + IOMMU_EXT_FEATURE_MMIO_OFFSET);
71 
72     /* Don't log the same set of features over and over. */
73     first = list_first_entry(&amd_iommu_head, struct amd_iommu, list);
74     if ( iommu != first && iommu->features.raw == first->features.raw )
75         return;
76 
77     printk("AMD-Vi: IOMMU Extended Features:\n");
78 
79 #define FEAT(fld, str) do {                                    \
80     if ( --((union amd_iommu_ext_features){}).flds.fld > 1 )   \
81         printk( "- " str ": %#x\n", iommu->features.flds.fld); \
82     else if ( iommu->features.flds.fld )                       \
83         printk( "- " str "\n");                                \
84 } while ( false )
85 
86     FEAT(pref_sup,           "Prefetch Pages Command");
87     FEAT(ppr_sup,            "Peripheral Page Service Request");
88     FEAT(xt_sup,             "x2APIC");
89     FEAT(nx_sup,             "NX bit");
90     FEAT(gappi_sup,          "Guest APIC Physical Processor Interrupt");
91     FEAT(ia_sup,             "Invalidate All Command");
92     FEAT(ga_sup,             "Guest APIC");
93     FEAT(he_sup,             "Hardware Error Registers");
94     FEAT(pc_sup,             "Performance Counters");
95     FEAT(hats,               "Host Address Translation Size");
96 
97     if ( iommu->features.flds.gt_sup )
98     {
99         FEAT(gats,           "Guest Address Translation Size");
100         FEAT(glx_sup,        "Guest CR3 Root Table Level");
101         FEAT(pas_max,        "Maximum PASID");
102     }
103 
104     FEAT(smif_sup,           "SMI Filter Register");
105     FEAT(smif_rc,            "SMI Filter Register Count");
106     FEAT(gam_sup,            "Guest Virtual APIC Modes");
107     FEAT(dual_ppr_log_sup,   "Dual PPR Log");
108     FEAT(dual_event_log_sup, "Dual Event Log");
109     FEAT(sats_sup,           "Secure ATS");
110     FEAT(us_sup,             "User / Supervisor Page Protection");
111     FEAT(dev_tbl_seg_sup,    "Device Table Segmentation");
112     FEAT(ppr_early_of_sup,   "PPR Log Overflow Early Warning");
113     FEAT(ppr_auto_rsp_sup,   "PPR Automatic Response");
114     FEAT(marc_sup,           "Memory Access Routing and Control");
115     FEAT(blk_stop_mrk_sup,   "Block StopMark Message");
116     FEAT(perf_opt_sup ,      "Performance Optimization");
117     FEAT(msi_cap_mmio_sup,   "MSI Capability MMIO Access");
118     FEAT(gio_sup,            "Guest I/O Protection");
119     FEAT(ha_sup,             "Host Access");
120     FEAT(eph_sup,            "Enhanced PPR Handling");
121     FEAT(attr_fw_sup,        "Attribute Forward");
122     FEAT(hd_sup,             "Host Dirty");
123     FEAT(inv_iotlb_type_sup, "Invalidate IOTLB Type");
124     FEAT(viommu_sup,         "Virtualized IOMMU");
125     FEAT(vm_guard_io_sup,    "VMGuard I/O Support");
126     FEAT(vm_table_size,      "VM Table Size");
127     FEAT(ga_update_dis_sup,  "Guest Access Bit Update Disable");
128 
129 #undef FEAT
130 }
131 
amd_iommu_detect_one_acpi(const struct acpi_ivrs_hardware * ivhd_block)132 int __init amd_iommu_detect_one_acpi(
133     const struct acpi_ivrs_hardware *ivhd_block)
134 {
135     struct amd_iommu *iommu;
136     u8 bus, dev, func;
137     int rt = 0;
138 
139     if ( ivhd_block->header.length < sizeof(*ivhd_block) )
140     {
141         AMD_IOMMU_DEBUG("Invalid IVHD Block Length!\n");
142         return -ENODEV;
143     }
144 
145     if ( !ivhd_block->header.device_id ||
146         !ivhd_block->capability_offset || !ivhd_block->base_address)
147     {
148         AMD_IOMMU_DEBUG("Invalid IVHD Block!\n");
149         return -ENODEV;
150     }
151 
152     iommu = xzalloc(struct amd_iommu);
153     if ( !iommu )
154     {
155         AMD_IOMMU_DEBUG("Error allocating amd_iommu\n");
156         return -ENOMEM;
157     }
158 
159     spin_lock_init(&iommu->lock);
160     INIT_LIST_HEAD(&iommu->ats_devices);
161 
162     iommu->seg = ivhd_block->pci_segment_group;
163     iommu->bdf = ivhd_block->header.device_id;
164     iommu->cap_offset = ivhd_block->capability_offset;
165     iommu->mmio_base_phys = ivhd_block->base_address;
166 
167     /* override IOMMU HT flags */
168     iommu->ht_flags = ivhd_block->header.flags;
169 
170     bus = PCI_BUS(iommu->bdf);
171     dev = PCI_SLOT(iommu->bdf);
172     func = PCI_FUNC(iommu->bdf);
173 
174     rt = get_iommu_capabilities(iommu->seg, bus, dev, func,
175                                 iommu->cap_offset, iommu);
176     if ( rt )
177         goto out;
178 
179     rt = get_iommu_msi_capabilities(iommu->seg, bus, dev, func, iommu);
180     if ( rt )
181         goto out;
182 
183     rt = pci_ro_device(iommu->seg, bus, PCI_DEVFN(dev, func));
184     if ( rt )
185         printk(XENLOG_ERR
186                "Could not mark config space of %04x:%02x:%02x.%u read-only (%d)\n",
187                iommu->seg, bus, dev, func, rt);
188 
189     list_add_tail(&iommu->list, &amd_iommu_head);
190     rt = 0;
191 
192  out:
193     if ( rt )
194         xfree(iommu);
195 
196     return rt;
197 }
198