1 /*
2  * Copyright (c) 2006, Intel Corporation.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms and conditions of the GNU General Public License,
6  * version 2, as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  * more details.
12  *
13  * You should have received a copy of the GNU General Public License along with
14  * this program; If not, see <http://www.gnu.org/licenses/>.
15  *
16  * Copyright (C) Ashok Raj <ashok.raj@intel.com>
17  */
18 
19 #ifndef _INTEL_IOMMU_H_
20 #define _INTEL_IOMMU_H_
21 
22 #include <xen/iommu.h>
23 #include <asm/msi.h>
24 
25 /*
26  * Intel IOMMU register specification per version 1.0 public spec.
27  */
28 
29 #define    DMAR_VER_REG    0x0    /* Arch version supported by this IOMMU */
30 #define    DMAR_CAP_REG    0x8    /* Hardware supported capabilities */
31 #define    DMAR_ECAP_REG    0x10    /* Extended capabilities supported */
32 #define    DMAR_GCMD_REG    0x18    /* Global command register */
33 #define    DMAR_GSTS_REG    0x1c    /* Global status register */
34 #define    DMAR_RTADDR_REG    0x20    /* Root entry table */
35 #define    DMAR_CCMD_REG    0x28    /* Context command reg */
36 #define    DMAR_FSTS_REG    0x34    /* Fault Status register */
37 #define    DMAR_FECTL_REG    0x38    /* Fault control register */
38 #define    DMAR_FEDATA_REG    0x3c    /* Fault event interrupt data register */
39 #define    DMAR_FEADDR_REG    0x40    /* Fault event interrupt addr register */
40 #define    DMAR_FEUADDR_REG 0x44    /* Upper address register */
41 #define    DMAR_AFLOG_REG    0x58    /* Advanced Fault control */
42 #define    DMAR_PMEN_REG    0x64    /* Enable Protected Memory Region */
43 #define    DMAR_PLMBASE_REG 0x68    /* PMRR Low addr */
44 #define    DMAR_PLMLIMIT_REG 0x6c    /* PMRR low limit */
45 #define    DMAR_PHMBASE_REG 0x70    /* pmrr high base addr */
46 #define    DMAR_PHMLIMIT_REG 0x78    /* pmrr high limit */
47 #define    DMAR_IQH_REG    0x80    /* invalidation queue head */
48 #define    DMAR_IQT_REG    0x88    /* invalidation queue tail */
49 #define    DMAR_IQA_REG    0x90    /* invalidation queue addr */
50 #define    DMAR_IRTA_REG   0xB8    /* intr remap */
51 
52 #define OFFSET_STRIDE        (9)
53 #define dmar_readl(dmar, reg) readl((dmar) + (reg))
54 #define dmar_readq(dmar, reg) readq((dmar) + (reg))
55 #define dmar_writel(dmar, reg, val) writel(val, (dmar) + (reg))
56 #define dmar_writeq(dmar, reg, val) writeq(val, (dmar) + (reg))
57 
58 #define VER_MAJOR(v)        (((v) & 0xf0) >> 4)
59 #define VER_MINOR(v)        ((v) & 0x0f)
60 
61 /*
62  * Decoding Capability Register
63  */
64 #define cap_intr_post(c)       (((c) >> 59) & 1)
65 #define cap_read_drain(c)      (((c) >> 55) & 1)
66 #define cap_write_drain(c)     (((c) >> 54) & 1)
67 #define cap_max_amask_val(c)   (((c) >> 48) & 0x3f)
68 #define cap_num_fault_regs(c)  ((((c) >> 40) & 0xff) + 1)
69 #define cap_pgsel_inv(c)       (((c) >> 39) & 1)
70 
71 #define cap_super_page_val(c)  (((c) >> 34) & 0xf)
72 #define cap_super_offset(c)    (((find_first_bit(&cap_super_page_val(c), 4)) \
73                                  * OFFSET_STRIDE) + 21)
74 #define cap_sps_2mb(c)         ((c >> 34) & 1)
75 #define cap_sps_1gb(c)         ((c >> 35) & 1)
76 #define cap_sps_512gb(c)       ((c >> 36) & 1)
77 #define cap_sps_1tb(c)         ((c >> 37) & 1)
78 
79 #define cap_fault_reg_offset(c)    ((((c) >> 24) & 0x3ff) * 16)
80 
81 #define cap_isoch(c)        (((c) >> 23) & 1)
82 #define cap_qos(c)        (((c) >> 22) & 1)
83 #define cap_mgaw(c)        ((((c) >> 16) & 0x3f) + 1)
84 #define cap_sagaw(c)        (((c) >> 8) & 0x1f)
85 #define cap_caching_mode(c)    (((c) >> 7) & 1)
86 #define cap_phmr(c)        (((c) >> 6) & 1)
87 #define cap_plmr(c)        (((c) >> 5) & 1)
88 #define cap_rwbf(c)        (((c) >> 4) & 1)
89 #define cap_afl(c)        (((c) >> 3) & 1)
90 #define cap_ndoms(c)        (1 << (4 + 2 * ((c) & 0x7)))
91 
92 /*
93  * Extended Capability Register
94  */
95 
96 #define ecap_niotlb_iunits(e)    ((((e) >> 24) & 0xff) + 1)
97 #define ecap_iotlb_offset(e)     ((((e) >> 8) & 0x3ff) * 16)
98 #define ecap_coherent(e)         ((e >> 0) & 0x1)
99 #define ecap_queued_inval(e)     ((e >> 1) & 0x1)
100 #define ecap_dev_iotlb(e)        ((e >> 2) & 0x1)
101 #define ecap_intr_remap(e)       ((e >> 3) & 0x1)
102 #define ecap_eim(e)              ((e >> 4) & 0x1)
103 #define ecap_cache_hints(e)      ((e >> 5) & 0x1)
104 #define ecap_pass_thru(e)        ((e >> 6) & 0x1)
105 #define ecap_snp_ctl(e)          ((e >> 7) & 0x1)
106 
107 /* IOTLB_REG */
108 #define DMA_TLB_FLUSH_GRANU_OFFSET  60
109 #define DMA_TLB_GLOBAL_FLUSH (((u64)1) << 60)
110 #define DMA_TLB_DSI_FLUSH (((u64)2) << 60)
111 #define DMA_TLB_PSI_FLUSH (((u64)3) << 60)
112 #define DMA_TLB_IIRG(x) (((x) >> 60) & 7)
113 #define DMA_TLB_IAIG(val) (((val) >> 57) & 7)
114 #define DMA_TLB_DID(x) (((u64)(x & 0xffff)) << 32)
115 
116 #define DMA_TLB_READ_DRAIN (((u64)1) << 49)
117 #define DMA_TLB_WRITE_DRAIN (((u64)1) << 48)
118 #define DMA_TLB_IVT (((u64)1) << 63)
119 
120 #define DMA_TLB_IVA_ADDR(x) ((((u64)x) >> 12) << 12)
121 #define DMA_TLB_IVA_HINT(x) ((((u64)x) & 1) << 6)
122 
123 /* GCMD_REG */
124 #define DMA_GCMD_TE     (((u64)1) << 31)
125 #define DMA_GCMD_SRTP   (((u64)1) << 30)
126 #define DMA_GCMD_SFL    (((u64)1) << 29)
127 #define DMA_GCMD_EAFL   (((u64)1) << 28)
128 #define DMA_GCMD_WBF    (((u64)1) << 27)
129 #define DMA_GCMD_QIE    (((u64)1) << 26)
130 #define DMA_GCMD_IRE    (((u64)1) << 25)
131 #define DMA_GCMD_SIRTP  (((u64)1) << 24)
132 #define DMA_GCMD_CFI    (((u64)1) << 23)
133 
134 /* GSTS_REG */
135 #define DMA_GSTS_TES    (((u64)1) << 31)
136 #define DMA_GSTS_RTPS   (((u64)1) << 30)
137 #define DMA_GSTS_FLS    (((u64)1) << 29)
138 #define DMA_GSTS_AFLS   (((u64)1) << 28)
139 #define DMA_GSTS_WBFS   (((u64)1) << 27)
140 #define DMA_GSTS_QIES   (((u64)1) <<26)
141 #define DMA_GSTS_IRES   (((u64)1) <<25)
142 #define DMA_GSTS_SIRTPS (((u64)1) << 24)
143 #define DMA_GSTS_CFIS   (((u64)1) <<23)
144 
145 /* PMEN_REG */
146 #define DMA_PMEN_EPM    (((u32)1) << 31)
147 #define DMA_PMEN_PRS    (((u32)1) << 0)
148 
149 /* CCMD_REG */
150 #define DMA_CCMD_INVL_GRANU_OFFSET  61
151 #define DMA_CCMD_ICC   (((u64)1) << 63)
152 #define DMA_CCMD_GLOBAL_INVL (((u64)1) << 61)
153 #define DMA_CCMD_DOMAIN_INVL (((u64)2) << 61)
154 #define DMA_CCMD_DEVICE_INVL (((u64)3) << 61)
155 #define DMA_CCMD_FM(m) (((u64)((m) & 0x3)) << 32)
156 #define DMA_CCMD_CIRG(x) ((((u64)3) << 61) & x)
157 #define DMA_CCMD_MASK_NOBIT 0
158 #define DMA_CCMD_MASK_1BIT 1
159 #define DMA_CCMD_MASK_2BIT 2
160 #define DMA_CCMD_MASK_3BIT 3
161 #define DMA_CCMD_SID(s) (((u64)((s) & 0xffff)) << 16)
162 #define DMA_CCMD_DID(d) ((u64)((d) & 0xffff))
163 
164 #define DMA_CCMD_CAIG_MASK(x) (((u64)x) & ((u64) 0x3 << 59))
165 
166 /* FECTL_REG */
167 #define DMA_FECTL_IM (((u64)1) << 31)
168 
169 /* FSTS_REG */
170 #define DMA_FSTS_PFO ((u64)1 << 0)
171 #define DMA_FSTS_PPF ((u64)1 << 1)
172 #define DMA_FSTS_AFO ((u64)1 << 2)
173 #define DMA_FSTS_APF ((u64)1 << 3)
174 #define DMA_FSTS_IQE ((u64)1 << 4)
175 #define DMA_FSTS_ICE ((u64)1 << 5)
176 #define DMA_FSTS_ITE ((u64)1 << 6)
177 #define DMA_FSTS_FAULTS    DMA_FSTS_PFO | DMA_FSTS_PPF | DMA_FSTS_AFO | DMA_FSTS_APF | DMA_FSTS_IQE | DMA_FSTS_ICE | DMA_FSTS_ITE
178 #define dma_fsts_fault_record_index(s) (((s) >> 8) & 0xff)
179 
180 /* FRCD_REG, 32 bits access */
181 #define DMA_FRCD_F (((u64)1) << 31)
182 #define dma_frcd_type(d) ((d >> 30) & 1)
183 #define dma_frcd_fault_reason(c) (c & 0xff)
184 #define dma_frcd_source_id(c) (c & 0xffff)
185 #define dma_frcd_page_addr(d) (d & (((u64)-1) << 12)) /* low 64 bit */
186 
187 /*
188  * 0: Present
189  * 1-11: Reserved
190  * 12-63: Context Ptr (12 - (haw-1))
191  * 64-127: Reserved
192  */
193 struct root_entry {
194     u64    val;
195     u64    rsvd1;
196 };
197 #define root_present(root)    ((root).val & 1)
198 #define set_root_present(root) do {(root).val |= 1;} while(0)
199 #define get_context_addr(root) ((root).val & PAGE_MASK_4K)
200 #define set_root_value(root, value) \
201     do {(root).val |= ((value) & PAGE_MASK_4K);} while(0)
202 
203 struct context_entry {
204     u64 lo;
205     u64 hi;
206 };
207 #define ROOT_ENTRY_NR (PAGE_SIZE_4K/sizeof(struct root_entry))
208 #define context_present(c) ((c).lo & 1)
209 #define context_fault_disable(c) (((c).lo >> 1) & 1)
210 #define context_translation_type(c) (((c).lo >> 2) & 3)
211 #define context_address_root(c) ((c).lo & PAGE_MASK_4K)
212 #define context_address_width(c) ((c).hi &  7)
213 #define context_domain_id(c) (((c).hi >> 8) & ((1 << 16) - 1))
214 
215 #define context_set_present(c) do {(c).lo |= 1;} while(0)
216 #define context_clear_present(c) do {(c).lo &= ~1;} while(0)
217 #define context_set_fault_enable(c) \
218     do {(c).lo &= (((u64)-1) << 2) | 1;} while(0)
219 
220 #define context_set_translation_type(c, val) do { \
221         (c).lo &= (((u64)-1) << 4) | 3; \
222         (c).lo |= (val & 3) << 2; \
223     } while(0)
224 #define CONTEXT_TT_MULTI_LEVEL 0
225 #define CONTEXT_TT_DEV_IOTLB   1
226 #define CONTEXT_TT_PASS_THRU   2
227 
228 #define context_set_address_root(c, val) \
229     do {(c).lo &= 0xfff; (c).lo |= (val) & PAGE_MASK_4K ;} while(0)
230 #define context_set_address_width(c, val) \
231     do {(c).hi &= 0xfffffff8; (c).hi |= (val) & 7;} while(0)
232 #define context_clear_entry(c) do {(c).lo = 0; (c).hi = 0;} while(0)
233 
234 /* page table handling */
235 #define LEVEL_STRIDE       (9)
236 #define LEVEL_MASK         ((1 << LEVEL_STRIDE) - 1)
237 #define PTE_NUM            (1 << LEVEL_STRIDE)
238 #define level_to_agaw(val) ((val) - 2)
239 #define agaw_to_level(val) ((val) + 2)
240 #define agaw_to_width(val) (30 + val * LEVEL_STRIDE)
241 #define width_to_agaw(w)   ((w - 30)/LEVEL_STRIDE)
242 #define level_to_offset_bits(l) (12 + (l - 1) * LEVEL_STRIDE)
243 #define address_level_offset(addr, level) \
244             ((addr >> level_to_offset_bits(level)) & LEVEL_MASK)
245 #define offset_level_address(offset, level) \
246             ((u64)(offset) << level_to_offset_bits(level))
247 #define level_mask(l) (((u64)(-1)) << level_to_offset_bits(l))
248 #define level_size(l) (1 << level_to_offset_bits(l))
249 #define align_to_level(addr, l) ((addr + level_size(l) - 1) & level_mask(l))
250 
251 /*
252  * 0: readable
253  * 1: writable
254  * 2-6: reserved
255  * 7: super page
256  * 8-11: available
257  * 12-63: Host physcial address
258  */
259 struct dma_pte {
260     u64 val;
261 };
262 #define DMA_PTE_READ (1)
263 #define DMA_PTE_WRITE (2)
264 #define DMA_PTE_PROT (DMA_PTE_READ | DMA_PTE_WRITE)
265 #define DMA_PTE_SP   (1 << 7)
266 #define DMA_PTE_SNP  (1 << 11)
267 #define dma_clear_pte(p)    do {(p).val = 0;} while(0)
268 #define dma_set_pte_readable(p) do {(p).val |= DMA_PTE_READ;} while(0)
269 #define dma_set_pte_writable(p) do {(p).val |= DMA_PTE_WRITE;} while(0)
270 #define dma_set_pte_superpage(p) do {(p).val |= DMA_PTE_SP;} while(0)
271 #define dma_set_pte_snp(p)  do {(p).val |= DMA_PTE_SNP;} while(0)
272 #define dma_set_pte_prot(p, prot) do { \
273         (p).val = ((p).val & ~DMA_PTE_PROT) | ((prot) & DMA_PTE_PROT); \
274     } while (0)
275 #define dma_pte_prot(p) ((p).val & DMA_PTE_PROT)
276 #define dma_pte_read(p) (dma_pte_prot(p) & DMA_PTE_READ)
277 #define dma_pte_write(p) (dma_pte_prot(p) & DMA_PTE_WRITE)
278 #define dma_pte_addr(p) ((p).val & PADDR_MASK & PAGE_MASK_4K)
279 #define dma_set_pte_addr(p, addr) do {\
280             (p).val |= ((addr) & PAGE_MASK_4K); } while (0)
281 #define dma_pte_present(p) (((p).val & DMA_PTE_PROT) != 0)
282 #define dma_pte_superpage(p) (((p).val & DMA_PTE_SP) != 0)
283 
284 /* interrupt remap entry */
285 struct iremap_entry {
286   union {
287     __uint128_t val;
288     struct { u64 lo, hi; };
289     struct {
290         u16 p       : 1,
291             fpd     : 1,
292             dm      : 1,
293             rh      : 1,
294             tm      : 1,
295             dlm     : 3,
296             avail   : 4,
297             res_1   : 3,
298             im      : 1;
299         u8  vector;
300         u8  res_2;
301         u32 dst;
302         u16 sid;
303         u16 sq      : 2,
304             svt     : 2,
305             res_3   : 12;
306         u32 res_4;
307     } remap;
308     struct {
309         u16 p       : 1,
310             fpd     : 1,
311             res_1   : 6,
312             avail   : 4,
313             res_2   : 2,
314             urg     : 1,
315             im      : 1;
316         u8  vector;
317         u8  res_3;
318         u32 res_4   : 6,
319             pda_l   : 26;
320         u16 sid;
321         u16 sq      : 2,
322             svt     : 2,
323             res_5   : 12;
324         u32 pda_h;
325     } post;
326   };
327 };
328 
329 /*
330  * Posted-interrupt descriptor address is 64 bits with 64-byte aligned, only
331  * the upper 26 bits of lest significiant 32 bits is available.
332  */
333 #define PDA_LOW_BIT    26
334 
335 /* Max intr remapping table page order is 8, as max number of IRTEs is 64K */
336 #define IREMAP_PAGE_ORDER  8
337 
338 /*
339  * VTd engine handles 4K page, while CPU may have different page size on
340  * different arch. E.g. 16K on IPF.
341  */
342 #define IREMAP_ARCH_PAGE_ORDER  (IREMAP_PAGE_ORDER + PAGE_SHIFT_4K - PAGE_SHIFT)
343 #define IREMAP_ARCH_PAGE_NR     ( IREMAP_ARCH_PAGE_ORDER < 0 ?  \
344                                 1 :                             \
345                                 1 << IREMAP_ARCH_PAGE_ORDER )
346 
347 /* Each entry is 16 bytes, so 2^8 entries per 4K page */
348 #define IREMAP_ENTRY_ORDER  ( PAGE_SHIFT - 4 )
349 #define IREMAP_ENTRY_NR     ( 1 << ( IREMAP_PAGE_ORDER + 8 ) )
350 
351 #define iremap_present(v) ((v).lo & 1)
352 #define iremap_fault_disable(v) (((v).lo >> 1) & 1)
353 
354 #define iremap_set_present(v) do {(v).lo |= 1;} while(0)
355 #define iremap_clear_present(v) do {(v).lo &= ~1;} while(0)
356 
357 /*
358  * Get the intr remap entry:
359  * maddr   - machine addr of the table
360  * index   - index of the entry
361  * entries - return addr of the page holding this entry, need unmap it
362  * entry   - return required entry
363  */
364 #define GET_IREMAP_ENTRY(maddr, index, entries, entry)                        \
365 do {                                                                          \
366     entries = (struct iremap_entry *)map_vtd_domain_page(                     \
367               (maddr) + (( (index) >> IREMAP_ENTRY_ORDER ) << PAGE_SHIFT ) ); \
368     entry = &entries[(index) % (1 << IREMAP_ENTRY_ORDER)];                    \
369 } while(0)
370 
371 /* queue invalidation entry */
372 struct qinval_entry {
373     union {
374         struct {
375             u64 lo;
376             u64 hi;
377         }val;
378         struct {
379             struct {
380                 u64 type    : 4,
381                     granu   : 2,
382                     res_1   : 10,
383                     did     : 16,
384                     sid     : 16,
385                     fm      : 2,
386                     res_2   : 14;
387             }lo;
388             struct {
389                 u64 res;
390             }hi;
391         }cc_inv_dsc;
392         struct {
393             struct {
394                 u64 type    : 4,
395                     granu   : 2,
396                     dw      : 1,
397                     dr      : 1,
398                     res_1   : 8,
399                     did     : 16,
400                     res_2   : 32;
401             }lo;
402             struct {
403                 u64 am      : 6,
404                     ih      : 1,
405                     res_1   : 5,
406                     addr    : 52;
407             }hi;
408         }iotlb_inv_dsc;
409         struct {
410             struct {
411                 u64 type    : 4,
412                     res_1   : 12,
413                     max_invs_pend: 5,
414                     res_2   : 11,
415                     sid     : 16,
416                     res_3   : 16;
417             }lo;
418             struct {
419                 u64 size    : 1,
420                     res_1   : 11,
421                     addr    : 52;
422             }hi;
423         }dev_iotlb_inv_dsc;
424         struct {
425             struct {
426                 u64 type    : 4,
427                     granu   : 1,
428                     res_1   : 22,
429                     im      : 5,
430                     iidx    : 16,
431                     res_2   : 16;
432             }lo;
433             struct {
434                 u64 res;
435             }hi;
436         }iec_inv_dsc;
437         struct {
438             struct {
439                 u64 type    : 4,
440                     iflag   : 1,
441                     sw      : 1,
442                     fn      : 1,
443                     res_1   : 25,
444                     sdata   : 32;
445             }lo;
446             struct {
447                 u64 saddr;
448             }hi;
449         }inv_wait_dsc;
450     }q;
451 };
452 
453 /* Order of queue invalidation pages(max is 8) */
454 #define QINVAL_PAGE_ORDER   2
455 
456 #define QINVAL_ARCH_PAGE_ORDER  (QINVAL_PAGE_ORDER + PAGE_SHIFT_4K - PAGE_SHIFT)
457 #define QINVAL_ARCH_PAGE_NR     ( QINVAL_ARCH_PAGE_ORDER < 0 ?  \
458                                 1 :                             \
459                                 1 << QINVAL_ARCH_PAGE_ORDER )
460 
461 /* Each entry is 16 bytes, so 2^8 entries per page */
462 #define QINVAL_ENTRY_ORDER  ( PAGE_SHIFT - 4 )
463 #define QINVAL_ENTRY_NR     (1 << (QINVAL_PAGE_ORDER + 8))
464 
465 /* Status data flag */
466 #define QINVAL_STAT_INIT  0
467 #define QINVAL_STAT_DONE  1
468 
469 /* Queue invalidation head/tail shift */
470 #define QINVAL_INDEX_SHIFT 4
471 
472 #define qinval_present(v) ((v).lo & 1)
473 #define qinval_fault_disable(v) (((v).lo >> 1) & 1)
474 
475 #define qinval_set_present(v) do {(v).lo |= 1;} while(0)
476 #define qinval_clear_present(v) do {(v).lo &= ~1;} while(0)
477 
478 #define RESERVED_VAL        0
479 
480 #define TYPE_INVAL_CONTEXT      0x1
481 #define TYPE_INVAL_IOTLB        0x2
482 #define TYPE_INVAL_DEVICE_IOTLB 0x3
483 #define TYPE_INVAL_IEC          0x4
484 #define TYPE_INVAL_WAIT         0x5
485 
486 #define NOTIFY_TYPE_POLL        1
487 #define NOTIFY_TYPE_INTR        1
488 #define INTERRUTP_FLAG          1
489 #define STATUS_WRITE            1
490 #define FENCE_FLAG              1
491 
492 #define IEC_GLOBAL_INVL         0
493 #define IEC_INDEX_INVL          1
494 #define IRTA_EIME               (((u64)1) << 11)
495 
496 /* 2^(IRTA_REG_TABLE_SIZE + 1) = IREMAP_ENTRY_NR */
497 #define IRTA_REG_TABLE_SIZE     ( IREMAP_PAGE_ORDER + 7 )
498 
499 #define VTD_PAGE_TABLE_LEVEL_3  3
500 #define VTD_PAGE_TABLE_LEVEL_4  4
501 
502 #define MAX_IOMMU_REGS 0xc0
503 
504 extern struct list_head acpi_drhd_units;
505 extern struct list_head acpi_rmrr_units;
506 extern struct list_head acpi_ioapic_units;
507 
508 struct vtd_iommu {
509     struct list_head list;
510     void __iomem *reg; /* Pointer to hardware regs, virtual addr */
511     u32	index;         /* Sequence number of iommu */
512     u32 nr_pt_levels;
513     u64	cap;
514     u64	ecap;
515     spinlock_t lock; /* protect context, domain ids */
516     spinlock_t register_lock; /* protect iommu register handling */
517     u64 root_maddr; /* root entry machine address */
518     nodeid_t node;
519     struct msi_desc msi;
520     struct acpi_drhd_unit *drhd;
521 
522     uint64_t qinval_maddr;   /* queue invalidation page machine address */
523 
524     struct {
525         uint64_t maddr;   /* interrupt remap table machine address */
526         unsigned int num; /* total num of used interrupt remap entry */
527         spinlock_t lock;  /* lock for irq remapping table */
528     } intremap;
529 
530     struct {
531         int __must_check (*context)(struct vtd_iommu *iommu, u16 did,
532                                     u16 source_id, u8 function_mask, u64 type,
533                                     bool non_present_entry_flush);
534         int __must_check (*iotlb)(struct vtd_iommu *iommu, u16 did, u64 addr,
535                                   unsigned int size_order, u64 type,
536                                   bool flush_non_present_entry,
537                                   bool flush_dev_iotlb);
538     } flush;
539 
540     struct list_head ats_devices;
541     unsigned long *domid_bitmap;  /* domain id bitmap */
542     u16 *domid_map;               /* domain id mapping array */
543 };
544 
545 #define INTEL_IOMMU_DEBUG(fmt, args...) \
546     do  \
547     {   \
548         if ( iommu_debug )  \
549             dprintk(XENLOG_WARNING VTDPREFIX, fmt, ## args);    \
550     } while(0)
551 
552 #endif
553