1 /*
2 * mmconfig.c - Low-level direct PCI config space access via MMCONFIG
3 *
4 * This is an 64bit optimized version that always keeps the full mmconfig
5 * space mapped. This allows lockless config space operation.
6 *
7 * copied from Linux
8 */
9
10 #include <xen/init.h>
11 #include <xen/mm.h>
12 #include <xen/acpi.h>
13 #include <xen/xmalloc.h>
14 #include <xen/pci.h>
15 #include <xen/pci_regs.h>
16 #include <xen/iommu.h>
17 #include <xen/rangeset.h>
18
19 #include "mmconfig.h"
20
21 /* Static virtual mapping of the MMCONFIG aperture */
22 struct mmcfg_virt {
23 struct acpi_mcfg_allocation *cfg;
24 char __iomem *virt;
25 };
26 static struct mmcfg_virt *pci_mmcfg_virt;
27 static unsigned int mmcfg_pci_segment_shift;
28
get_virt(unsigned int seg,unsigned int * bus)29 static char __iomem *get_virt(unsigned int seg, unsigned int *bus)
30 {
31 struct acpi_mcfg_allocation *cfg;
32 int cfg_num;
33
34 for (cfg_num = 0; cfg_num < pci_mmcfg_config_num; cfg_num++) {
35 cfg = pci_mmcfg_virt[cfg_num].cfg;
36 if (cfg->pci_segment == seg &&
37 (cfg->start_bus_number <= *bus) &&
38 (cfg->end_bus_number >= *bus)) {
39 *bus -= cfg->start_bus_number;
40 return pci_mmcfg_virt[cfg_num].virt;
41 }
42 }
43
44 /* Fall back to type 0 */
45 return NULL;
46 }
47
pci_dev_base(unsigned int seg,unsigned int bus,unsigned int devfn)48 static char __iomem *pci_dev_base(unsigned int seg, unsigned int bus, unsigned int devfn)
49 {
50 char __iomem *addr;
51
52 addr = get_virt(seg, &bus);
53 if (!addr)
54 return NULL;
55 return addr + ((bus << 20) | (devfn << 12));
56 }
57
pci_mmcfg_read(unsigned int seg,unsigned int bus,unsigned int devfn,int reg,int len,u32 * value)58 int pci_mmcfg_read(unsigned int seg, unsigned int bus,
59 unsigned int devfn, int reg, int len, u32 *value)
60 {
61 char __iomem *addr;
62
63 /* Why do we have this when nobody checks it. How about a BUG()!? -AK */
64 if (unlikely((bus > 255) || (devfn > 255) || (reg > 4095))) {
65 err: *value = -1;
66 return -EINVAL;
67 }
68
69 addr = pci_dev_base(seg, bus, devfn);
70 if (!addr)
71 goto err;
72
73 switch (len) {
74 case 1:
75 *value = mmio_config_readb(addr + reg);
76 break;
77 case 2:
78 *value = mmio_config_readw(addr + reg);
79 break;
80 case 4:
81 *value = mmio_config_readl(addr + reg);
82 break;
83 }
84
85 return 0;
86 }
87
pci_mmcfg_write(unsigned int seg,unsigned int bus,unsigned int devfn,int reg,int len,u32 value)88 int pci_mmcfg_write(unsigned int seg, unsigned int bus,
89 unsigned int devfn, int reg, int len, u32 value)
90 {
91 char __iomem *addr;
92
93 /* Why do we have this when nobody checks it. How about a BUG()!? -AK */
94 if (unlikely((bus > 255) || (devfn > 255) || (reg > 4095)))
95 return -EINVAL;
96
97 addr = pci_dev_base(seg, bus, devfn);
98 if (!addr)
99 return -EINVAL;
100
101 switch (len) {
102 case 1:
103 mmio_config_writeb(addr + reg, value);
104 break;
105 case 2:
106 mmio_config_writew(addr + reg, value);
107 break;
108 case 4:
109 mmio_config_writel(addr + reg, value);
110 break;
111 }
112
113 return 0;
114 }
115
mcfg_ioremap(const struct acpi_mcfg_allocation * cfg,unsigned long idx,unsigned int prot)116 static void __iomem *mcfg_ioremap(const struct acpi_mcfg_allocation *cfg,
117 unsigned long idx, unsigned int prot)
118 {
119 unsigned long virt, size;
120
121 virt = PCI_MCFG_VIRT_START + (idx << mmcfg_pci_segment_shift) +
122 (cfg->start_bus_number << 20);
123 size = (cfg->end_bus_number - cfg->start_bus_number + 1) << 20;
124 if (virt + size < virt || virt + size > PCI_MCFG_VIRT_END)
125 return NULL;
126
127 if (map_pages_to_xen(virt,
128 mfn_add(maddr_to_mfn(cfg->address),
129 (cfg->start_bus_number << (20 - PAGE_SHIFT))),
130 PFN_DOWN(size), prot))
131 return NULL;
132
133 return (void __iomem *) virt;
134 }
135
pci_mmcfg_arch_enable(unsigned int idx)136 int pci_mmcfg_arch_enable(unsigned int idx)
137 {
138 const typeof(pci_mmcfg_config[0]) *cfg = pci_mmcfg_virt[idx].cfg;
139 unsigned long start_mfn, end_mfn;
140
141 if (pci_mmcfg_virt[idx].virt)
142 return 0;
143 pci_mmcfg_virt[idx].virt = mcfg_ioremap(cfg, idx, PAGE_HYPERVISOR_UC);
144 if (!pci_mmcfg_virt[idx].virt) {
145 printk(KERN_ERR "PCI: Cannot map MCFG aperture for segment %04x\n",
146 cfg->pci_segment);
147 return -ENOMEM;
148 }
149 printk(KERN_INFO "PCI: Using MCFG for segment %04x bus %02x-%02x\n",
150 cfg->pci_segment, cfg->start_bus_number, cfg->end_bus_number);
151
152 start_mfn = PFN_DOWN(cfg->address) + PCI_BDF(cfg->start_bus_number, 0, 0);
153 end_mfn = PFN_DOWN(cfg->address) + PCI_BDF(cfg->end_bus_number, ~0, ~0);
154 if ( rangeset_add_range(mmio_ro_ranges, start_mfn, end_mfn) )
155 printk(XENLOG_ERR
156 "%04x:%02x-%02x: could not mark MCFG (mfns %lx-%lx) read-only\n",
157 cfg->pci_segment, cfg->start_bus_number, cfg->end_bus_number,
158 start_mfn, end_mfn);
159
160 return 0;
161 }
162
pci_mmcfg_arch_disable(unsigned int idx)163 void pci_mmcfg_arch_disable(unsigned int idx)
164 {
165 const typeof(pci_mmcfg_config[0]) *cfg = pci_mmcfg_virt[idx].cfg;
166
167 pci_mmcfg_virt[idx].virt = NULL;
168 /*
169 * Don't use destroy_xen_mappings() here, or make sure that at least
170 * the necessary L4 entries get populated (so that they get properly
171 * propagated to guest domains' page tables).
172 */
173 mcfg_ioremap(cfg, idx, 0);
174 printk(KERN_WARNING "PCI: Not using MCFG for segment %04x bus %02x-%02x\n",
175 cfg->pci_segment, cfg->start_bus_number, cfg->end_bus_number);
176 }
177
pci_mmcfg_decode(unsigned long mfn,unsigned int * seg,unsigned int * bdf)178 bool_t pci_mmcfg_decode(unsigned long mfn, unsigned int *seg,
179 unsigned int *bdf)
180 {
181 unsigned int idx;
182
183 for (idx = 0; idx < pci_mmcfg_config_num; ++idx) {
184 const struct acpi_mcfg_allocation *cfg = pci_mmcfg_virt[idx].cfg;
185
186 if (pci_mmcfg_virt[idx].virt &&
187 mfn >= PFN_DOWN(cfg->address) + PCI_BDF(cfg->start_bus_number,
188 0, 0) &&
189 mfn <= PFN_DOWN(cfg->address) + PCI_BDF(cfg->end_bus_number,
190 ~0, ~0)) {
191 *seg = cfg->pci_segment;
192 *bdf = mfn - PFN_DOWN(cfg->address);
193 return 1;
194 }
195 }
196
197 return 0;
198 }
199
pci_ro_mmcfg_decode(unsigned long mfn,unsigned int * seg,unsigned int * bdf)200 bool_t pci_ro_mmcfg_decode(unsigned long mfn, unsigned int *seg,
201 unsigned int *bdf)
202 {
203 const unsigned long *ro_map;
204
205 return pci_mmcfg_decode(mfn, seg, bdf) &&
206 ((ro_map = pci_get_ro_map(*seg)) == NULL ||
207 !test_bit(*bdf, ro_map));
208 }
209
pci_mmcfg_arch_init(void)210 int __init pci_mmcfg_arch_init(void)
211 {
212 int i;
213
214 if (pci_mmcfg_virt)
215 return 0;
216
217 pci_mmcfg_virt = xzalloc_array(struct mmcfg_virt, pci_mmcfg_config_num);
218 if (pci_mmcfg_virt == NULL) {
219 printk(KERN_ERR "PCI: Can not allocate memory for mmconfig structures\n");
220 pci_mmcfg_config_num = 0;
221 return 0;
222 }
223
224 for (i = 0; i < pci_mmcfg_config_num; ++i) {
225 pci_mmcfg_virt[i].cfg = &pci_mmcfg_config[i];
226 while (pci_mmcfg_config[i].end_bus_number >> mmcfg_pci_segment_shift)
227 ++mmcfg_pci_segment_shift;
228 }
229 mmcfg_pci_segment_shift += 20;
230 return 1;
231 }
232