/xen/xen/arch/arm/ |
A D | README.LinuxPrimitives | 33 linux/arch/arm64/lib/memchr.S xen/arch/arm/arm64/lib/memchr.S 34 linux/arch/arm64/lib/memcmp.S xen/arch/arm/arm64/lib/memcmp.S 35 linux/arch/arm64/lib/memcpy.S xen/arch/arm/arm64/lib/memcpy.S 36 linux/arch/arm64/lib/memmove.S xen/arch/arm/arm64/lib/memmove.S 37 linux/arch/arm64/lib/memset.S xen/arch/arm/arm64/lib/memset.S 40 diff -u linux/arch/arm64/lib/$i xen/arch/arm/arm64/lib/$i 47 linux/arch/arm64/lib/strchr.S xen/arch/arm/arm64/lib/strchr.S 48 linux/arch/arm64/lib/strcmp.S xen/arch/arm/arm64/lib/strcmp.S 55 diff -u linux/arch/arm64/lib/$i xen/arch/arm/arm64/lib/$i 104 diff -u linux/arch/arm/lib/$i xen/arch/arm/arm32/lib/$i [all …]
|
A D | domain.c | 126 p->arch.jmcr = READ_CP32(JMCR); in ctxt_switch_from() 151 p->arch.dfar = READ_CP32(DFAR); in ctxt_switch_from() 565 if ( v->arch.stack == NULL ) in arch_vcpu_create() 568 v->arch.cpu_info = (struct cpu_info *)(v->arch.stack in arch_vcpu_create() 571 memset(v->arch.cpu_info, 0, sizeof(*v->arch.cpu_info)); in arch_vcpu_create() 573 v->arch.saved_context.sp = (register_t)v->arch.cpu_info; in arch_vcpu_create() 615 v->arch.hcr_el2 |= HCR_RW; in vcpu_switch_to_aarch64_mode() 902 v->arch.sctlr = ctxt->sctlr; in arch_set_info_guest() 903 v->arch.ttbr0 = ctxt->ttbr0; in arch_set_info_guest() 904 v->arch.ttbr1 = ctxt->ttbr1; in arch_set_info_guest() [all …]
|
A D | vtimer.c | 115 t = &v->arch.virt_timer; in vcpu_vtimer_init() 123 v->arch.vtimer_initialized = 1; in vcpu_vtimer_init() 130 if ( !v->arch.vtimer_initialized ) in vcpu_timer_destroy() 133 kill_timer(&v->arch.virt_timer.timer); in vcpu_timer_destroy() 134 kill_timer(&v->arch.phys_timer.timer); in vcpu_timer_destroy() 147 set_timer(&v->arch.virt_timer.timer, ticks_to_ns(v->arch.virt_timer.cval + in virt_timer_save() 175 *r = v->arch.phys_timer.ctl; in vtimer_cntp_ctl() 182 v->arch.phys_timer.ctl = ctl; in vtimer_cntp_ctl() 245 *r = v->arch.phys_timer.cval; in vtimer_cntp_cval() 249 v->arch.phys_timer.cval = *r; in vtimer_cntp_cval() [all …]
|
A D | vgic.c | 99 switch ( d->arch.vgic.version ) in domain_vgic_register() 125 d->arch.vgic.ctlr = 0; in domain_vgic_init() 138 d->arch.vgic.nr_spis = nr_spis; in domain_vgic_init() 142 d->arch.vgic.shared_irqs = in domain_vgic_init() 147 d->arch.vgic.pending_irqs = in domain_vgic_init() 163 d->arch.vgic.allocated_irqs = in domain_vgic_init() 177 d->arch.vgic.handler = ops; in register_vgic_ops() 198 if ( d->arch.vgic.handler ) in domain_vgic_free() 200 xfree(d->arch.vgic.shared_irqs); in domain_vgic_free() 201 xfree(d->arch.vgic.pending_irqs); in domain_vgic_free() [all …]
|
A D | vuart.c | 60 if ( !d->arch.vuart.info ) in domain_vuart_init() 63 spin_lock_init(&d->arch.vuart.lock); in domain_vuart_init() 64 d->arch.vuart.idx = 0; in domain_vuart_init() 67 if ( !d->arch.vuart.buf ) in domain_vuart_init() 71 d->arch.vuart.info->base_addr, in domain_vuart_init() 72 d->arch.vuart.info->size, in domain_vuart_init() 83 xfree(d->arch.vuart.buf); in domain_vuart_free() 89 struct vuart *uart = &d->arch.vuart; in vuart_print_char() 119 if ( offset == d->arch.vuart.info->status_off ) in vuart_mmio_read() 121 *r = d->arch.vuart.info->status; in vuart_mmio_read() [all …]
|
/xen/xen/arch/arm/arm32/ |
A D | vfp.c | 8 v->arch.vfp.fpexc = READ_CP32(FPEXC); in vfp_save_state() 12 v->arch.vfp.fpscr = READ_CP32(FPSCR); in vfp_save_state() 16 v->arch.vfp.fpinst = READ_CP32(FPINST); in vfp_save_state() 18 if ( v->arch.vfp.fpexc & FPEXC_FP2V ) in vfp_save_state() 26 : "=Q" (*v->arch.vfp.fpregs1) : "r" (v->arch.vfp.fpregs1)); in vfp_save_state() 33 : "=Q" (*v->arch.vfp.fpregs2) : "r" (v->arch.vfp.fpregs2)); in vfp_save_state() 46 : : "Q" (*v->arch.vfp.fpregs1), "r" (v->arch.vfp.fpregs1)); in vfp_restore_state() 52 : : "Q" (*v->arch.vfp.fpregs2), "r" (v->arch.vfp.fpregs2)); in vfp_restore_state() 54 if ( v->arch.vfp.fpexc & FPEXC_EX ) in vfp_restore_state() 61 WRITE_CP32(v->arch.vfp.fpscr, FPSCR); in vfp_restore_state() [all …]
|
/xen/xen/arch/x86/ |
A D | i387.c | 28 ASSERT(v->arch.xsave_area); in fpu_xrstor() 43 const typeof(v->arch.xsave_area->fpu_sse) *fpu_ctxt = v->arch.fpu_ctxt; in fpu_fxrstor() 141 ASSERT(v->arch.xsave_area); in fpu_xsave() 156 typeof(v->arch.xsave_area->fpu_sse) *fpu_ctxt = v->arch.fpu_ctxt; in fpu_fxsave() 210 if ( !v->arch.fully_eager_fpu && !v->arch.nonlazy_xstate_used ) in vcpu_restore_fpu_nonlazy() 223 if ( v->arch.fully_eager_fpu || (v->arch.xsave_area && xstate_all(v)) ) in vcpu_restore_fpu_nonlazy() 317 if ( v->arch.xsave_area ) in vcpu_init_fpu() 318 v->arch.fpu_ctxt = &v->arch.xsave_area->fpu_sse; in vcpu_init_fpu() 322 v->arch.fpu_ctxt = _xzalloc(sizeof(v->arch.xsave_area->fpu_sse), in vcpu_init_fpu() 326 typeof(v->arch.xsave_area->fpu_sse) *fpu_sse = v->arch.fpu_ctxt; in vcpu_init_fpu() [all …]
|
A D | domain.c | 361 memset(&v->arch.user_regs, 0, sizeof(v->arch.user_regs)); in arch_vcpu_regs_init() 364 memset(v->arch.dr, 0, sizeof(v->arch.dr)); in arch_vcpu_regs_init() 425 xfree(v->arch.msrs); in arch_vcpu_create() 663 xfree(d->arch.msr); in arch_domain_create() 678 xfree(d->arch.msr); in arch_domain_destroy() 940 v->arch.pv.iopl = v->arch.user_regs.eflags & X86_EFLAGS_IOPL; in arch_set_info_guest() 956 v->arch.pv.ldt_base = v->arch.pv.ldt_ents in arch_set_info_guest() 1019 v->arch.pv.ctrlreg[4] = pv_fixup_guest_cr4(v, v->arch.pv.ctrlreg[4]); in arch_set_info_guest() 1021 memset(v->arch.dr, 0, sizeof(v->arch.dr)); in arch_set_info_guest() 1382 ? n->arch.pv.gs_base_kernel : n->arch.pv.gs_base_user; in load_segments() [all …]
|
A D | irq.c | 542 if ( desc->arch.move_in_progress || desc->arch.move_cleanup_count ) in _assign_irq_vector() 601 cpumask_and(desc->arch.old_cpu_mask, desc->arch.cpu_mask, in _assign_irq_vector() 603 desc->arch.old_vector = desc->arch.vector; in _assign_irq_vector() 809 cpumask_and(desc->arch.old_cpu_mask, desc->arch.old_cpu_mask, in send_cleanup_vector() 811 desc->arch.move_cleanup_count = cpumask_weight(desc->arch.old_cpu_mask); in send_cleanup_vector() 1242 pirq->arch.irq = irq; in set_domain_irq_pirq() 1247 pirq->arch.irq = 0; in clear_domain_irq_pirq() 1322 if ( pirq->arch.irq ) 1937 desc->arch.vector, desc->arch.old_vector, in do_IRQ() 2201 desc->arch.used_vectors = &pdev->arch.used_vectors; in map_domain_pirq() [all …]
|
/xen/xen/arch/x86/hvm/ |
A D | vm_event.c | 33 if ( unlikely(v->arch.vm_event->set_gprs) ) in hvm_vm_event_set_registers() 37 regs->rax = v->arch.vm_event->gprs.rax; in hvm_vm_event_set_registers() 38 regs->rbx = v->arch.vm_event->gprs.rbx; in hvm_vm_event_set_registers() 39 regs->rcx = v->arch.vm_event->gprs.rcx; in hvm_vm_event_set_registers() 40 regs->rdx = v->arch.vm_event->gprs.rdx; in hvm_vm_event_set_registers() 46 regs->r8 = v->arch.vm_event->gprs.r8; in hvm_vm_event_set_registers() 47 regs->r9 = v->arch.vm_event->gprs.r9; in hvm_vm_event_set_registers() 58 v->arch.vm_event->set_gprs = false; in hvm_vm_event_set_registers() 66 ASSERT(v->arch.vm_event); in hvm_vm_event_do_resume() 70 w = &v->arch.vm_event->write_data; in hvm_vm_event_do_resume() [all …]
|
A D | hvm.c | 448 v->arch.hvm.msr_tsc_adjust += v->arch.hvm.cache_tsc_offset - tsc_offset; in hvm_set_guest_tsc_msr() 455 v->arch.hvm.cache_tsc_offset += tsc_adjust - v->arch.hvm.msr_tsc_adjust; in hvm_set_guest_tsc_adjust() 566 if ( unlikely(v->arch.vm_event) && v->arch.monitor.next_interrupt_enabled ) in hvm_do_resume() 648 if ( !d->arch.hvm.pl_time || !d->arch.hvm.irq || in hvm_domain_initialise() 649 !d->arch.hvm.params || !d->arch.hvm.io_handler ) in hvm_domain_initialise() 720 XFREE(d->arch.hvm.irq); in hvm_domain_initialise() 779 XFREE(d->arch.hvm.irq); in hvm_domain_destroy() 851 .dr0 = v->arch.dr[0], in hvm_save_cpu_ctxt() 855 .dr6 = v->arch.dr6, in hvm_save_cpu_ctxt() 856 .dr7 = v->arch.dr7, in hvm_save_cpu_ctxt() [all …]
|
/xen/xen/arch/x86/pv/ |
A D | domain.c | 139 v->arch.guest_table_user = v->arch.guest_table; in setup_compat_l4() 197 if ( d->arch.pv.pcid ) in pv_make_cr4() 206 if ( d->arch.vtsc || (v->arch.pv.ctrlreg[4] & X86_CR4_TSD) ) in pv_make_cr4() 248 d->arch.x87_fip_width = 4; in switch_compat() 250 d->arch.pv.xpti = false; in switch_compat() 251 d->arch.pv.pcid = false; in switch_compat() 256 d->arch.pv.is_32bit = d->arch.has_32bit_shinfo = false; in switch_compat() 387 d->arch.pv.pcid = d->arch.pv.xpti; in pv_domain_initialise() 391 d->arch.pv.pcid = !d->arch.pv.xpti; in pv_domain_initialise() 428 cr3 = v->arch.cr3; in _toggle_guest_pt() [all …]
|
A D | callback.c | 33 struct trap_info *t = &curr->arch.pv.trap_ctxt[TRAP_nmi]; in register_guest_nmi_callback() 50 curr->arch.nmi_pending = true; in register_guest_nmi_callback() 74 curr->arch.pv.event_callback_eip = reg->address; in register_guest_callback() 78 curr->arch.pv.failsafe_callback_eip = reg->address; in register_guest_callback() 86 curr->arch.pv.syscall_callback_eip = reg->address; in register_guest_callback() 95 curr->arch.pv.syscall32_disables_events = in register_guest_callback() 101 curr->arch.pv.sysenter_disables_events = in register_guest_callback() 232 curr->arch.pv.syscall32_disables_events = in compat_register_guest_callback() 239 curr->arch.pv.sysenter_disables_events = in compat_register_guest_callback() 350 struct trap_info *dst = curr->arch.pv.trap_ctxt; in do_set_trap_table() [all …]
|
A D | misc-hypercalls.c | 45 v->arch.pv.ctrlreg[0] |= X86_CR0_TS; in do_fpu_taskswitch() 50 v->arch.pv.ctrlreg[0] &= ~X86_CR0_TS; in do_fpu_taskswitch() 74 v->arch.dr[reg] = value; in set_debugreg() 88 if ( v->arch.pv.ctrlreg[4] & X86_CR4_DE ) in set_debugreg() 104 v->arch.dr6 = value; in set_debugreg() 110 if ( v->arch.pv.ctrlreg[4] & X86_CR4_DE ) in set_debugreg() 141 if ( !(v->arch.pv.ctrlreg[4] & X86_CR4_DE) ) in set_debugreg() 147 v->arch.pv.dr7_emul = io_enable; in set_debugreg() 155 if ( (v == curr) && !(v->arch.dr7 & DR7_ACTIVE_MASK) ) in set_debugreg() 160 v->arch.pv.dr7_emul = 0; in set_debugreg() [all …]
|
/xen/tools/include/xen-foreign/ |
A D | mkheader.py | 7 arch = sys.argv[1]; variable 133 """ % (arch, sys.argv[0], fileid, fileid) 135 if arch in header: 136 output += header[arch]; 152 replace = define + "_" + arch; 181 output += "typedef struct %s_%s %s_%s_t;\n" % (struct, arch, struct, arch); 185 if arch in footer: 186 output += footer[arch]; 193 replace = define + "_" + arch.upper(); 195 replace = define + "_" + arch; [all …]
|
/xen/xen/arch/x86/mm/hap/ |
A D | hap.c | 253 d->arch.paging.hap.free_pages--; in hap_alloc() 266 d->arch.paging.hap.free_pages++; in hap_free() 282 d->arch.paging.hap.p2m_pages++; in hap_alloc_p2m_page() 316 d->arch.paging.hap.p2m_pages--; in hap_free_p2m_page() 317 d->arch.paging.hap.total_pages++; in hap_free_p2m_page() 451 if ( d->arch.paging.mode != 0 ) in hap_enable() 514 d->arch.altp2m_active = 0; in hap_enable() 531 d->arch.altp2m_active = 0; in hap_final_teardown() 533 if ( d->arch.altp2m_eptp ) in hap_final_teardown() 602 XFREE(d->arch.hvm.dirty_vram); in hap_teardown() [all …]
|
/xen/xen/arch/x86/x86_64/ |
A D | asm-offsets.c | 56 OFFSET(VCPU_thread_flags, struct vcpu, arch.flags); in __dummy__() 63 struct vcpu, arch.pv.syscall32_disables_events); in __dummy__() 67 struct vcpu, arch.pv.sysenter_disables_events); in __dummy__() 68 OFFSET(VCPU_trap_ctxt, struct vcpu, arch.pv.trap_ctxt); in __dummy__() 69 OFFSET(VCPU_kernel_sp, struct vcpu, arch.pv.kernel_sp); in __dummy__() 70 OFFSET(VCPU_kernel_ss, struct vcpu, arch.pv.kernel_ss); in __dummy__() 71 OFFSET(VCPU_iopl, struct vcpu, arch.pv.iopl); in __dummy__() 73 OFFSET(VCPU_cr3, struct vcpu, arch.cr3); in __dummy__() 74 OFFSET(VCPU_arch_msrs, struct vcpu, arch.msrs); in __dummy__() 75 OFFSET(VCPU_nmi_pending, struct vcpu, arch.nmi_pending); in __dummy__() [all …]
|
/xen/xen/arch/x86/cpu/mcheck/ |
A D | vmce.c | 65 v->arch.vmce.mcg_status = 0; in vmce_init_vcpu() 71 spin_lock_init(&v->arch.vmce.lock); in vmce_init_vcpu() 92 v->arch.vmce.mcg_cap = ctxt->caps; in vmce_restore_vcpu() 184 spin_lock(&cur->arch.vmce.lock); in vmce_rdmsr() 196 *val = cur->arch.vmce.mcg_cap; in vmce_rdmsr() 231 spin_unlock(&cur->arch.vmce.lock); in vmce_rdmsr() 313 spin_lock(&cur->arch.vmce.lock); in vmce_wrmsr() 351 spin_unlock(&cur->arch.vmce.lock); in vmce_wrmsr() 359 .caps = v->arch.vmce.mcg_cap, in vmce_save_vcpu_ctxt() 447 spin_lock(&v->arch.vmce.lock); in vcpu_fill_mc_msrs() [all …]
|
/xen/xen/arch/arm/arm64/ |
A D | vfp.c | 27 : "=Q" (*v->arch.vfp.fpregs) : "r" (v->arch.vfp.fpregs)); in vfp_save_state() 29 v->arch.vfp.fpsr = READ_SYSREG32(FPSR); in vfp_save_state() 30 v->arch.vfp.fpcr = READ_SYSREG32(FPCR); in vfp_save_state() 32 v->arch.vfp.fpexc32_el2 = READ_SYSREG32(FPEXC32_EL2); in vfp_save_state() 56 : : "Q" (*v->arch.vfp.fpregs), "r" (v->arch.vfp.fpregs)); in vfp_restore_state() 58 WRITE_SYSREG32(v->arch.vfp.fpsr, FPSR); in vfp_restore_state() 59 WRITE_SYSREG32(v->arch.vfp.fpcr, FPCR); in vfp_restore_state() 61 WRITE_SYSREG32(v->arch.vfp.fpexc32_el2, FPEXC32_EL2); in vfp_restore_state()
|
/xen/xen/arch/x86/mm/ |
A D | paging.c | 58 page = d->arch.paging.alloc_page(d); in paging_new_log_dirty_page() 65 d->arch.paging.log_dirty.allocs++; in paging_new_log_dirty_page() 106 d->arch.paging.log_dirty.allocs--; in paging_free_log_dirty_page() 123 if ( !d->arch.paging.preempt.dom ) in paging_free_log_dirty_bitmap() 199 d->arch.paging.preempt.dom = NULL; in paging_free_log_dirty_bitmap() 370 mfn = d->arch.paging.log_dirty.top; in paging_mfn_is_dirty() 433 if ( !d->arch.paging.preempt.dom ) in paging_log_dirty_op() 573 d->arch.paging.preempt.dom = NULL; in paging_log_dirty_op() 628 d->arch.paging.log_dirty.ops = ops; in paging_log_dirty_init() 642 mm_lock_init(&d->arch.paging.lock); in paging_domain_init() [all …]
|
/xen/xen/arch/x86/hvm/vmx/ |
A D | vmx.c | 406 d->arch.ctxt_switch = &csw; in vmx_domain_initialise() 615 v->arch.hvm.flag_dr_dirty = 0; in vmx_save_dr() 633 v->arch.hvm.flag_dr_dirty = 1; in __restore_debug_registers() 700 v->arch.guest_table = in vmx_restore_cr0_cr3() 1233 if ( v->domain->arch.vtsc ) in vmx_setup_tsc_scaling() 1473 v->arch.hvm.hw_cr[0] = in vmx_update_guest_cr() 1488 v->arch.hvm.hw_cr[4] |= v->arch.hvm.guest_cr[4]; in vmx_update_guest_cr() 1542 v->arch.hvm.vmx.cr4_host_mask |= v->arch.hvm.vmx.vmx_realmode ? in vmx_update_guest_cr() 3684 v->arch.hvm.guest_cr[4] &= v->arch.hvm.vmx.cr4_host_mask; in vmx_vmexit_handler() 3685 v->arch.hvm.guest_cr[4] |= (v->arch.hvm.hw_cr[4] & in vmx_vmexit_handler() [all …]
|
/xen/xen/include/asm-arm/ |
A D | grant_table.h | 54 (gt)->arch.shared_gfn = xmalloc_array(gfn_t, ngf_); \ 55 (gt)->arch.status_gfn = xmalloc_array(gfn_t, nsf_); \ 56 if ( (gt)->arch.shared_gfn && (gt)->arch.status_gfn ) \ 59 (gt)->arch.shared_gfn[ngf_] = INVALID_GFN; \ 61 (gt)->arch.status_gfn[nsf_] = INVALID_GFN; \ 65 (gt)->arch.shared_gfn ? 0 : -ENOMEM; \ 70 XFREE((gt)->arch.shared_gfn); \ 71 XFREE((gt)->arch.status_gfn); \ 76 ((st) ? (gt)->arch.status_gfn : (gt)->arch.shared_gfn)[idx] = \ 86 (((i) >= nr_grant_frames(t)) ? INVALID_GFN : (t)->arch.shared_gfn[i]) [all …]
|
/xen/xen/arch/x86/mm/shadow/ |
A D | common.c | 64 d->arch.paging.shadow.oos_active = 0; in shadow_domain_init() 2419 if ( v->arch.paging.mode ) in sh_update_paging_modes() 2442 v->arch.guest_table = d->arch.paging.shadow.unpaged_pagetable; in sh_update_paging_modes() 2448 v->arch.paging.mode = in sh_update_paging_modes() 2454 v->arch.paging.mode = in sh_update_paging_modes() 2460 v->arch.paging.mode = in sh_update_paging_modes() 2579 d->arch.paging.mode = new_mode; in sh_new_mode() 2739 if ( v->arch.paging.vtlb ) in shadow_teardown() 2794 if ( d->arch.hvm.dirty_vram ) in shadow_teardown() 2882 if ( d->arch.paging.mode == 0 ) in shadow_one_bit_enable() [all …]
|
/xen/xen/include/asm-x86/ |
A D | shared.h | 5 (u32 *)&(d)->shared_info->native.arch.nmi_reason : \ 6 (u32 *)&(d)->shared_info->compat.arch.nmi_reason) 12 d->shared_info->native.arch.field : \ 13 d->shared_info->compat.arch.field; \ 19 d->shared_info->native.arch.field = val; \ 21 d->shared_info->compat.arch.field = val; \ 28 v->vcpu_info->native.arch.field : \ 29 v->vcpu_info->compat.arch.field; \ 35 v->vcpu_info->native.arch.field = val; \ 37 v->vcpu_info->compat.arch.field = val; \
|
/xen/xen/include/ |
A D | xlat.lst | 10 ? pmu_amd_ctxt arch-x86/pmu.h 11 ? pmu_arch arch-x86/pmu.h 12 ? pmu_cntr_pair arch-x86/pmu.h 13 ? pmu_intel_ctxt arch-x86/pmu.h 14 ? pmu_regs arch-x86/pmu.h 15 ! cpu_user_regs arch-x86/xen-@arch@.h 16 ! trap_info arch-x86/xen.h 18 ? mc arch-x86/xen-mca.h 24 ? mcinfo_msr arch-x86/xen-mca.h 26 ! mc_fetch arch-x86/xen-mca.h [all …]
|