1 /*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; If not, see <http://www.gnu.org/licenses/>.
15 *
16 * Copyright (C) Allen Kay <allen.m.kay@intel.com>
17 */
18
19 #ifndef _IOMMU_H_
20 #define _IOMMU_H_
21
22 #include <xen/init.h>
23 #include <xen/page-defs.h>
24 #include <xen/spinlock.h>
25 #include <xen/pci.h>
26 #include <xen/typesafe.h>
27 #include <xen/mm.h>
28 #include <public/hvm/ioreq.h>
29 #include <public/domctl.h>
30 #include <asm/device.h>
31
32 TYPE_SAFE(uint64_t, dfn);
33 #define PRI_dfn PRIx64
34 #define INVALID_DFN _dfn(~0ULL)
35
36 #ifndef dfn_t
37 #define dfn_t /* Grep fodder: dfn_t, _dfn() and dfn_x() are defined above */
38 #define _dfn
39 #define dfn_x
40 #undef dfn_t
41 #undef _dfn
42 #undef dfn_x
43 #endif
44
dfn_add(dfn_t dfn,unsigned long i)45 static inline dfn_t dfn_add(dfn_t dfn, unsigned long i)
46 {
47 return _dfn(dfn_x(dfn) + i);
48 }
49
dfn_eq(dfn_t x,dfn_t y)50 static inline bool_t dfn_eq(dfn_t x, dfn_t y)
51 {
52 return dfn_x(x) == dfn_x(y);
53 }
54
55 extern bool_t iommu_enable, iommu_enabled;
56 extern bool force_iommu, iommu_quarantine, iommu_verbose;
57
58 #ifdef CONFIG_X86
59 extern enum __packed iommu_intremap {
60 /*
61 * In order to allow traditional boolean uses of the iommu_intremap
62 * variable, the "off" value has to come first (yielding a value of zero).
63 */
64 iommu_intremap_off,
65 /*
66 * Interrupt remapping enabled, but only able to generate interrupts
67 * with an 8-bit APIC ID.
68 */
69 iommu_intremap_restricted,
70 iommu_intremap_full,
71 } iommu_intremap;
72 extern bool iommu_igfx, iommu_qinval, iommu_snoop;
73 #else
74 # define iommu_intremap false
75 # define iommu_snoop false
76 #endif
77
78 #if defined(CONFIG_X86) && defined(CONFIG_HVM)
79 extern bool iommu_intpost;
80 #else
81 # define iommu_intpost false
82 #endif
83
84 #if defined(CONFIG_IOMMU_FORCE_PT_SHARE)
85 #define iommu_hap_pt_share true
86 #elif defined(CONFIG_HVM)
87 extern bool iommu_hap_pt_share;
88 #else
89 #define iommu_hap_pt_share false
90 #endif
91
clear_iommu_hap_pt_share(void)92 static inline void clear_iommu_hap_pt_share(void)
93 {
94 #ifndef iommu_hap_pt_share
95 iommu_hap_pt_share = false;
96 #elif iommu_hap_pt_share
97 ASSERT_UNREACHABLE();
98 #endif
99 }
100
101 extern bool_t iommu_debug;
102 extern bool_t amd_iommu_perdev_intremap;
103
104 extern bool iommu_hwdom_strict, iommu_hwdom_passthrough, iommu_hwdom_inclusive;
105 extern int8_t iommu_hwdom_reserved;
106
107 extern unsigned int iommu_dev_iotlb_timeout;
108
109 int iommu_setup(void);
110 int iommu_hardware_setup(void);
111
112 int iommu_domain_init(struct domain *d, unsigned int opts);
113 void iommu_hwdom_init(struct domain *d);
114 void iommu_domain_destroy(struct domain *d);
115
116 void arch_iommu_domain_destroy(struct domain *d);
117 int arch_iommu_domain_init(struct domain *d);
118 void arch_iommu_check_autotranslated_hwdom(struct domain *d);
119 void arch_iommu_hwdom_init(struct domain *d);
120
121 /*
122 * The following flags are passed to map operations and passed by lookup
123 * operations.
124 */
125 #define _IOMMUF_readable 0
126 #define IOMMUF_readable (1u<<_IOMMUF_readable)
127 #define _IOMMUF_writable 1
128 #define IOMMUF_writable (1u<<_IOMMUF_writable)
129
130 /*
131 * flush_flags:
132 *
133 * IOMMU_FLUSHF_added -> A new 'present' PTE has been inserted.
134 * IOMMU_FLUSHF_modified -> An existing 'present' PTE has been modified
135 * (whether the new PTE value is 'present' or not).
136 *
137 * These flags are passed back from map/unmap operations and passed into
138 * flush operations.
139 */
140 enum
141 {
142 _IOMMU_FLUSHF_added,
143 _IOMMU_FLUSHF_modified,
144 };
145 #define IOMMU_FLUSHF_added (1u << _IOMMU_FLUSHF_added)
146 #define IOMMU_FLUSHF_modified (1u << _IOMMU_FLUSHF_modified)
147
148 int __must_check iommu_map(struct domain *d, dfn_t dfn, mfn_t mfn,
149 unsigned int page_order, unsigned int flags,
150 unsigned int *flush_flags);
151 int __must_check iommu_unmap(struct domain *d, dfn_t dfn,
152 unsigned int page_order,
153 unsigned int *flush_flags);
154
155 int __must_check iommu_legacy_map(struct domain *d, dfn_t dfn, mfn_t mfn,
156 unsigned int page_order,
157 unsigned int flags);
158 int __must_check iommu_legacy_unmap(struct domain *d, dfn_t dfn,
159 unsigned int page_order);
160
161 int __must_check iommu_lookup_page(struct domain *d, dfn_t dfn, mfn_t *mfn,
162 unsigned int *flags);
163
164 int __must_check iommu_iotlb_flush(struct domain *d, dfn_t dfn,
165 unsigned int page_count,
166 unsigned int flush_flags);
167 int __must_check iommu_iotlb_flush_all(struct domain *d,
168 unsigned int flush_flags);
169
170 enum iommu_feature
171 {
172 IOMMU_FEAT_COHERENT_WALK,
173 IOMMU_FEAT_count
174 };
175
176 bool_t iommu_has_feature(struct domain *d, enum iommu_feature feature);
177
178 #ifdef CONFIG_HAS_PCI
179 struct pirq;
180 int hvm_do_IRQ_dpci(struct domain *, struct pirq *);
181 int pt_irq_create_bind(struct domain *, const struct xen_domctl_bind_pt_irq *);
182 int pt_irq_destroy_bind(struct domain *, const struct xen_domctl_bind_pt_irq *);
183
184 void hvm_dpci_isairq_eoi(struct domain *d, unsigned int isairq);
185 struct hvm_irq_dpci *domain_get_irq_dpci(const struct domain *);
186 void free_hvm_irq_dpci(struct hvm_irq_dpci *dpci);
187 #ifdef CONFIG_HVM
188 bool pt_irq_need_timer(uint32_t flags);
189 #else
pt_irq_need_timer(unsigned int flags)190 static inline bool pt_irq_need_timer(unsigned int flags) { return false; }
191 #endif
192
193 struct msi_desc;
194 struct msi_msg;
195
196 int iommu_update_ire_from_msi(struct msi_desc *msi_desc, struct msi_msg *msg);
197 void iommu_read_msi_from_ire(struct msi_desc *msi_desc, struct msi_msg *msg);
198
199 #define PT_IRQ_TIME_OUT MILLISECS(8)
200 #endif /* HAS_PCI */
201
202 #ifdef CONFIG_HAS_DEVICE_TREE
203 #include <xen/device_tree.h>
204
205 int iommu_assign_dt_device(struct domain *d, struct dt_device_node *dev);
206 int iommu_deassign_dt_device(struct domain *d, struct dt_device_node *dev);
207 int iommu_dt_domain_init(struct domain *d);
208 int iommu_release_dt_devices(struct domain *d);
209
210 /*
211 * Helper to add master device to the IOMMU using generic IOMMU DT bindings.
212 *
213 * Return values:
214 * 0 : device is protected by an IOMMU
215 * <0 : device is not protected by an IOMMU, but must be (error condition)
216 * >0 : device doesn't need to be protected by an IOMMU
217 * (IOMMU is not enabled/present or device is not connected to it).
218 */
219 int iommu_add_dt_device(struct dt_device_node *np);
220
221 int iommu_do_dt_domctl(struct xen_domctl *, struct domain *,
222 XEN_GUEST_HANDLE_PARAM(xen_domctl_t));
223
224 #endif /* HAS_DEVICE_TREE */
225
226 struct page_info;
227
228 /*
229 * Any non-zero value returned from callbacks of this type will cause the
230 * function the callback was handed to terminate its iteration. Assigning
231 * meaning of these non-zero values is left to the top level caller /
232 * callback pair.
233 */
234 typedef int iommu_grdm_t(xen_pfn_t start, xen_ulong_t nr, u32 id, void *ctxt);
235
236 struct iommu_ops {
237 int (*init)(struct domain *d);
238 void (*hwdom_init)(struct domain *d);
239 int (*quarantine_init)(struct domain *d);
240 int (*add_device)(u8 devfn, device_t *dev);
241 int (*enable_device)(device_t *dev);
242 int (*remove_device)(u8 devfn, device_t *dev);
243 int (*assign_device)(struct domain *, u8 devfn, device_t *dev, u32 flag);
244 int (*reassign_device)(struct domain *s, struct domain *t,
245 u8 devfn, device_t *dev);
246 #ifdef CONFIG_HAS_PCI
247 int (*get_device_group_id)(u16 seg, u8 bus, u8 devfn);
248 int (*update_ire_from_msi)(struct msi_desc *msi_desc, struct msi_msg *msg);
249 void (*read_msi_from_ire)(struct msi_desc *msi_desc, struct msi_msg *msg);
250 #endif /* HAS_PCI */
251
252 void (*teardown)(struct domain *d);
253
254 /*
255 * This block of operations must be appropriately locked against each
256 * other by the caller in order to have meaningful results.
257 */
258 int __must_check (*map_page)(struct domain *d, dfn_t dfn, mfn_t mfn,
259 unsigned int flags,
260 unsigned int *flush_flags);
261 int __must_check (*unmap_page)(struct domain *d, dfn_t dfn,
262 unsigned int *flush_flags);
263 int __must_check (*lookup_page)(struct domain *d, dfn_t dfn, mfn_t *mfn,
264 unsigned int *flags);
265
266 void (*free_page_table)(struct page_info *);
267
268 #ifdef CONFIG_X86
269 int (*enable_x2apic)(void);
270 void (*disable_x2apic)(void);
271
272 void (*update_ire_from_apic)(unsigned int apic, unsigned int reg, unsigned int value);
273 unsigned int (*read_apic_from_ire)(unsigned int apic, unsigned int reg);
274
275 int (*setup_hpet_msi)(struct msi_desc *);
276
277 int (*adjust_irq_affinities)(void);
278 void (*sync_cache)(const void *addr, unsigned int size);
279 #endif /* CONFIG_X86 */
280
281 int __must_check (*suspend)(void);
282 void (*resume)(void);
283 void (*share_p2m)(struct domain *d);
284 void (*crash_shutdown)(void);
285 int __must_check (*iotlb_flush)(struct domain *d, dfn_t dfn,
286 unsigned int page_count,
287 unsigned int flush_flags);
288 int __must_check (*iotlb_flush_all)(struct domain *d);
289 int (*get_reserved_device_memory)(iommu_grdm_t *, void *);
290 void (*dump_p2m_table)(struct domain *d);
291
292 #ifdef CONFIG_HAS_DEVICE_TREE
293 /*
294 * All IOMMU drivers which support generic IOMMU DT bindings should use
295 * this callback. This is a way for the framework to provide the driver
296 * with DT IOMMU specifier which describes the IOMMU master interfaces of
297 * that device (device IDs, etc).
298 */
299 int (*dt_xlate)(device_t *dev, const struct dt_phandle_args *args);
300 #endif
301 };
302
303 #include <asm/iommu.h>
304
305 #ifndef iommu_call
306 # define iommu_call(ops, fn, args...) ((ops)->fn(args))
307 # define iommu_vcall iommu_call
308 #endif
309
310 struct domain_iommu {
311 struct arch_iommu arch;
312
313 /* iommu_ops */
314 const struct iommu_ops *platform_ops;
315
316 #ifdef CONFIG_HAS_DEVICE_TREE
317 /* List of DT devices assigned to this domain */
318 struct list_head dt_devices;
319 #endif
320
321 #ifdef CONFIG_NUMA
322 /* NUMA node to do IOMMU related allocations against. */
323 nodeid_t node;
324 #endif
325
326 /* Features supported by the IOMMU */
327 DECLARE_BITMAP(features, IOMMU_FEAT_count);
328
329 /* Does the guest share HAP mapping with the IOMMU? */
330 bool hap_pt_share;
331
332 /*
333 * Does the guest require mappings to be synchronized, to maintain
334 * the default dfn == pfn map? (See comment on dfn at the top of
335 * include/xen/mm.h). Note that hap_pt_share == false does not
336 * necessarily imply this is true.
337 */
338 bool need_sync;
339 };
340
341 #define dom_iommu(d) (&(d)->iommu)
342 #define iommu_set_feature(d, f) set_bit(f, dom_iommu(d)->features)
343 #define iommu_clear_feature(d, f) clear_bit(f, dom_iommu(d)->features)
344
345 /* Are we using the domain P2M table as its IOMMU pagetable? */
346 #define iommu_use_hap_pt(d) (dom_iommu(d)->hap_pt_share)
347
348 /* Does the IOMMU pagetable need to be kept synchronized with the P2M */
349 #ifdef CONFIG_HAS_PASSTHROUGH
350 #define need_iommu_pt_sync(d) (dom_iommu(d)->need_sync)
351 #else
352 #define need_iommu_pt_sync(d) ({ (void)(d); false; })
353 #endif
354
355 int __must_check iommu_suspend(void);
356 void iommu_resume(void);
357 void iommu_crash_shutdown(void);
358 int iommu_get_reserved_device_memory(iommu_grdm_t *, void *);
359
360 void iommu_share_p2m_table(struct domain *d);
361
362 #ifdef CONFIG_HAS_PCI
363 int iommu_do_pci_domctl(struct xen_domctl *, struct domain *d,
364 XEN_GUEST_HANDLE_PARAM(xen_domctl_t));
365 #endif
366
367 int iommu_do_domctl(struct xen_domctl *, struct domain *d,
368 XEN_GUEST_HANDLE_PARAM(xen_domctl_t));
369
370 void iommu_dev_iotlb_flush_timeout(struct domain *d, struct pci_dev *pdev);
371
372 /*
373 * The purpose of the iommu_dont_flush_iotlb optional cpu flag is to
374 * avoid unecessary iotlb_flush in the low level IOMMU code.
375 *
376 * iommu_map_page/iommu_unmap_page must flush the iotlb but somethimes
377 * this operation can be really expensive. This flag will be set by the
378 * caller to notify the low level IOMMU code to avoid the iotlb flushes.
379 * iommu_iotlb_flush/iommu_iotlb_flush_all will be explicitly called by
380 * the caller.
381 */
382 DECLARE_PER_CPU(bool_t, iommu_dont_flush_iotlb);
383
384 extern struct spinlock iommu_pt_cleanup_lock;
385 extern struct page_list_head iommu_pt_cleanup_list;
386
387 #endif /* _IOMMU_H_ */
388
389 /*
390 * Local variables:
391 * mode: C
392 * c-file-style: "BSD"
393 * c-basic-offset: 4
394 * indent-tabs-mode: nil
395 * End:
396 */
397