/xen/xen/include/acpi/ |
A D | actbl2.h | 124 u16 length; 254 u16 length; 337 u16 type; 338 u16 length; 386 u16 segment; 399 u16 segment; 414 u16 segment; 484 u16 length; 532 u16 vlan; 579 u16 length; [all …]
|
A D | actbl3.h | 110 u16 version; 169 u16 type; 218 u16 type; 277 u16 length; 372 u16 node_id; 478 u16 command; 479 u16 status; 499 u16 length; 500 u16 flags; 576 u16 status; [all …]
|
A D | actbl1.h | 384 u16 type; 385 u16 source_id; 427 u16 reserved1; 433 u16 device; 434 u16 function; 436 u16 reserved2; 498 u16 reserved1; 513 u16 reserved1; 598 u16 revision; 828 u16 spi_count; [all …]
|
A D | actbl.h | 211 u16 sci_interrupt; /* System vector of SCI interrupt */ 233 u16 C2latency; /* Worst case HW latency to enter/exit C2 state */ 234 u16 C3latency; /* Worst case HW latency to enter/exit C3 state */ 235 u16 flush_size; /* Processor's memory cache line width, in bytes */ 236 u16 flush_stride; /* Number of flush strides that need to be read */ 242 u16 boot_flags; /* IA-PC Boot Architecture Flags (see below for individual flags) */ 247 u16 arm_boot_flags; /* ARM-Specific Boot Flags (see below for individual flags) (ACPI 5.1) */ 330 #define ACPI_FADT_OFFSET(f) (u16) ACPI_OFFSET (struct acpi_table_fadt, f)
|
/xen/xen/include/asm-x86/ |
A D | edd.h | 39 u16 length; 40 u16 info_flags; 45 u16 bytes_per_sector; 47 u16 key; /* = 0xBEDD */ 50 u16 reserved3; 55 u16 base_address; 56 u16 reserved1; 84 u16 reserved2; 97 u16 id; 99 u16 reserved1; [all …]
|
/xen/xen/drivers/passthrough/vtd/ |
A D | dmar.h | 32 u16 info; 34 u16 func: 3, 45 u16 bdf; 47 u16 func: 3, 56 u16 *devices; /* devices owned by this unit */ 64 u16 segment; 76 u16 segment; 83 u16 segment;
|
A D | extern.h | 50 int iommu_flush_iec_index(struct vtd_iommu *iommu, u8 im, u16 iidx); 63 int dev_invalidate_iotlb(struct vtd_iommu *iommu, u16 did, 68 u16 did, u16 size, u64 addr);
|
A D | qinval.c | 76 u16 did, u16 source_id, in queue_invalidate_context_sync() 111 u16 did, u8 am, u8 ih, in queue_invalidate_iotlb_sync() 211 struct pci_dev *pdev, u16 did) in dev_invalidate_sync() 239 u16 did, u16 size, u64 addr) in qinval_device_iotlb_sync() 273 u8 granu, u8 im, u16 iidx) in queue_invalidate_iec_sync() 316 int iommu_flush_iec_index(struct vtd_iommu *iommu, u8 im, u16 iidx) in iommu_flush_iec_index() 321 static int __must_check flush_context_qi(struct vtd_iommu *iommu, u16 did, in flush_context_qi() 322 u16 sid, u8 fm, u64 type, in flush_context_qi() 345 static int __must_check flush_iotlb_qi(struct vtd_iommu *iommu, u16 did, in flush_iotlb_qi()
|
A D | iommu.h | 290 u16 p : 1, 302 u16 sid; 303 u16 sq : 2, 309 u16 p : 1, 320 u16 sid; 321 u16 sq : 2, 531 int __must_check (*context)(struct vtd_iommu *iommu, u16 did, 532 u16 source_id, u8 function_mask, u64 type, 534 int __must_check (*iotlb)(struct vtd_iommu *iommu, u16 did, u64 addr, 542 u16 *domid_map; /* domain id mapping array */
|
/xen/xen/include/asm-arm/arm32/ |
A D | io.h | 34 static inline void __raw_writew(u16 val, volatile void __iomem *addr) in __raw_writew() 37 : "+Q" (*(volatile u16 __force *)addr) in __raw_writew() 57 static inline u16 __raw_readw(const volatile void __iomem *addr) in __raw_readw() 59 u16 val; in __raw_readw() 61 : "+Q" (*(volatile u16 __force *)addr), in __raw_readw() 79 #define readw_relaxed(c) ({ u16 __r = le16_to_cpu((__force __le16) \ 85 #define writew_relaxed(v,c) __raw_writew((__force u16) cpu_to_le16(v),c) 89 #define readw(c) ({ u16 __v = readw_relaxed(c); __iormb(); __v; })
|
/xen/xen/drivers/passthrough/amd/ |
A D | iommu_acpi.c | 108 u16 seg, u16 bdf, u16 cap_offset) in find_iommu_from_bdf_cap() 146 u16 seg, u16 bdf, unsigned long base, in reserve_unity_map_for_device() 214 u16 req; in register_exclusion_range_for_device() 258 u16 req; in register_exclusion_range_for_iommu_devices() 297 u16 bdf; in parse_ivmd_device_select() 419 u16 pad_length, u16 header_length, u16 block_length) in parse_ivhd_device_padding() 433 u16 bdf; in parse_ivhd_device_select() 450 u16 header_length, u16 block_length, struct amd_iommu *iommu) in parse_ivhd_device_range() 496 u16 header_length, u16 block_length, struct amd_iommu *iommu) in parse_ivhd_device_alias() 531 u16 header_length, u16 block_length, struct amd_iommu *iommu) in parse_ivhd_device_alias_range() [all …]
|
A D | iommu.h | 80 u16 seg; 81 u16 bdf; 84 u16 cap_offset; 134 struct ivrs_mappings *get_ivrs_mappings(u16 seg); 135 int iterate_ivrs_mappings(int (*)(u16 seg, struct ivrs_mappings *)); 286 u16 bdf, seg; 289 u16 *pin_2_idx; 297 u16 bdf, seg, id;
|
A D | iommu_cmd.c | 101 u64 io_addr, u16 domain_id, u16 order) in invalidate_iommu_pages() 157 u16 maxpend, u32 pasid, u16 queueid, in invalidate_iotlb_pages() 158 u64 io_addr, u16 dev_id, u16 order) in invalidate_iotlb_pages() 227 u16 device_id) in invalidate_dev_table_entry() 245 static void invalidate_interrupt_table(struct amd_iommu *iommu, u16 device_id) in invalidate_interrupt_table()
|
/xen/xen/include/xen/ |
A D | pci.h | 153 bool_t pci_known_segment(u16 seg); 154 bool_t pci_device_detect(u16 seg, u8 bus, u8 dev, u8 func); 156 enum pdev_type pdev_type(u16 seg, u8 bus, u8 devfn); 157 int find_upstream_bridge(u16 seg, u8 *bus, u8 *devfn, u8 *secbus); 166 int pci_add_segment(u16 seg); 167 const unsigned long *pci_get_ro_map(u16 seg); 168 int pci_add_device(u16 seg, u8 bus, u8 devfn, 170 int pci_remove_device(u16 seg, u8 bus, u8 devfn); 177 void pci_check_disable_device(u16 seg, u8 bus, u8 devfn); 191 int pci_find_cap_offset(u16 seg, u8 bus, u8 dev, u8 func, u8 cap); [all …]
|
/xen/tools/libxc/ |
A D | xc_dom_decompress_unsafe_lzo1x.c | 12 typedef uint16_t u16; typedef 19 static inline u16 be16_to_cpup(const u16 *p) in be16_to_cpup() 21 u16 v = *p; in be16_to_cpup()
|
/xen/xen/xsm/flask/ss/ |
A D | avtab.h | 30 u16 source_type; /* source type */ 31 u16 target_type; /* target type */ 32 u16 target_class; /* target object class */ 43 u16 specified; /* what field is specified */ 60 u16 mask; /* mask to compute hash func */
|
A D | avtab.c | 31 static inline int avtab_hash(struct avtab_key *keyp, u16 mask) in avtab_hash() 67 u16 specified = key->specified & ~(AVTAB_ENABLED|AVTAB_ENABLED_OLD); in avtab_insert() 108 u16 specified = key->specified & ~(AVTAB_ENABLED|AVTAB_ENABLED_OLD); in avtab_insert_nonunique() 140 u16 specified = key->specified & ~(AVTAB_ENABLED|AVTAB_ENABLED_OLD); in avtab_search() 266 u16 mask = 0; in avtab_alloc() 346 u16 enabled; in avtab_read_item() 381 key.source_type = (u16)val; in avtab_read_item() 388 key.target_type = (u16)val; in avtab_read_item() 395 key.target_class = (u16)val; in avtab_read_item() 416 for ( i = 0; i < sizeof(spec_order)/sizeof(u16); i++ ) in avtab_read_item() [all …]
|
/xen/xen/include/asm-x86/guest/ |
A D | hyperv-tlfs.h | 587 u16 host_es_selector; 588 u16 host_cs_selector; 589 u16 host_ss_selector; 590 u16 host_ds_selector; 591 u16 host_fs_selector; 592 u16 host_gs_selector; 593 u16 host_tr_selector; 595 u16 padding16_1; 617 u16 guest_es_selector; 618 u16 guest_cs_selector; [all …]
|
/xen/xen/xsm/flask/include/ |
A D | security.h | 68 int security_compute_av(u32 ssid, u32 tsid, u16 tclass, u32 requested, 71 int security_transition_sid(u32 ssid, u32 tsid, u16 tclass, u32 *out_sid); 73 int security_member_sid(u32 ssid, u32 tsid, u16 tclass, u32 *out_sid); 75 int security_change_sid(u32 ssid, u32 tsid, u16 tclass, u32 *out_sid); 94 u16 tclass);
|
A D | avc.h | 85 void avc_audit(u32 ssid, u32 tsid, u16 tclass, u32 requested, 88 int avc_has_perm_noaudit(u32 ssid, u32 tsid, u16 tclass, u32 requested, 91 int avc_has_perm(u32 ssid, u32 tsid, u16 tclass, u32 requested,
|
/xen/xen/include/asm-arm/arm64/ |
A D | io.h | 35 static inline void __raw_writew(u16 val, volatile void __iomem *addr) in __raw_writew() 60 static inline u16 __raw_readw(const volatile void __iomem *addr) in __raw_readw() 62 u16 val; in __raw_readw() 102 #define readw_relaxed(c) ({ u16 __v = le16_to_cpu((__force __le16)__raw_readw(c)); __v; }) 107 #define writew_relaxed(v,c) ((void)__raw_writew((__force u16)cpu_to_le16(v),(c))) 117 #define readw(c) ({ u16 __v = readw_relaxed(c); __iormb(); __v; })
|
/xen/xen/arch/x86/x86_64/ |
A D | gdbstub.c | 131 case 18: regs->cs = (u16)val; break; in gdb_arch_write_reg() 132 case 19: regs->ss = (u16)val; break; in gdb_arch_write_reg() 133 case 20: regs->ds = (u16)val; break; in gdb_arch_write_reg() 134 case 21: regs->es = (u16)val; break; in gdb_arch_write_reg() 135 case 22: regs->fs = (u16)val; break; in gdb_arch_write_reg() 136 case 23: regs->gs = (u16)val; break; in gdb_arch_write_reg()
|
/xen/xen/arch/x86/pv/ |
A D | hypercall.c | 288 *(u16 *)(p+ 1) = 0x5341; /* push %r11 */ in pv_ring3_init_hypercall_page() 292 *(u16 *)(p+ 9) = 0x050f; /* syscall */ in pv_ring3_init_hypercall_page() 298 *(u16 *)(p+ 1) = 0x5341; /* push %r11 */ in pv_ring3_init_hypercall_page() 301 *(u16 *)(p+ 8) = 0x050f; /* syscall */ in pv_ring3_init_hypercall_page() 302 *(u16 *)(p+10) = 0x5b41; /* pop %r11 */ in pv_ring3_init_hypercall_page() 325 *(u16 *)(p+ 6) = (HYPERCALL_VECTOR << 8) | 0xcd; /* int $xx */ in pv_ring1_init_hypercall_page() 332 *(u16 *)(p+ 5) = (HYPERCALL_VECTOR << 8) | 0xcd; /* int $xx */ in pv_ring1_init_hypercall_page()
|
/xen/xen/xsm/flask/ |
A D | avc.c | 71 u16 tclass; 100 static inline int avc_hash(u32 ssid, u32 tsid, u16 tclass) in avc_hash() 152 static void avc_dump_av(struct avc_dump_buf *buf, u16 tclass, u32 av) in avc_dump_av() 365 u16 tclass, struct av_decision *avd) in avc_node_populate() 373 static inline struct avc_node *avc_search_node(u32 ssid, u32 tsid, u16 tclass) in avc_search_node() 410 static struct avc_node *avc_lookup(u32 ssid, u32 tsid, u16 tclass) in avc_lookup() 468 static struct avc_node *avc_insert(u32 ssid, u32 tsid, u16 tclass, in avc_insert() 529 void avc_audit(u32 ssid, u32 tsid, u16 tclass, u32 requested, in avc_audit() 616 static int avc_update_node(u32 perms, u32 ssid, u32 tsid, u16 tclass, in avc_update_node() 722 int avc_has_perm_noaudit(u32 ssid, u32 tsid, u16 tclass, u32 requested, in avc_has_perm_noaudit() [all …]
|
/xen/xen/arch/x86/boot/ |
A D | cmdline.c | 44 u16 boot_vid_mode; 45 u16 vesa_width; 46 u16 vesa_height; 47 u16 vesa_depth; 248 static u16 rows2vmode(unsigned int rows) in rows2vmode()
|