/xen/xen/arch/x86/pv/ |
A D | mm.c | 33 l1_pgentry_t *map_guest_l1e(unsigned long linear, mfn_t *gl1mfn) in map_guest_l1e() argument 40 if ( unlikely(!__addr_ok(linear)) ) in map_guest_l1e() 45 &__linear_l2_table[l2_linear_offset(linear)], in map_guest_l1e() 55 return (l1_pgentry_t *)map_domain_page(*gl1mfn) + l1_table_offset(linear); in map_guest_l1e() 62 static l1_pgentry_t guest_get_eff_kern_l1e(unsigned long linear) in guest_get_eff_kern_l1e() argument 71 l1e = guest_get_eff_l1e(linear); in guest_get_eff_kern_l1e() 90 unsigned long linear = curr->arch.pv.ldt_base + offset; in pv_map_ldt_shadow_page() local 107 linear = (uint32_t)linear; in pv_map_ldt_shadow_page() 109 gl1e = guest_get_eff_kern_l1e(linear); in pv_map_ldt_shadow_page()
|
A D | mm.h | 4 l1_pgentry_t *map_guest_l1e(unsigned long linear, mfn_t *gl1mfn); 9 static inline l1_pgentry_t guest_get_eff_l1e(unsigned long linear) in guest_get_eff_l1e() argument 16 if ( unlikely(!__addr_ok(linear)) || in guest_get_eff_l1e() 18 &__linear_l1_table[l1_linear_offset(linear)], in guest_get_eff_l1e()
|
A D | grant_table.c | 138 static bool steal_linear_address(unsigned long linear, l1_pgentry_t *out) in steal_linear_address() argument 149 pl1e = map_guest_l1e(linear, &gl1mfn); in steal_linear_address() 153 "Could not find L1 PTE for linear %"PRIx64"\n", linear); in steal_linear_address()
|
/xen/docs/features/ |
A D | intel_psr_mba.pandoc | 55 linear. 57 Non-linear mode: input delay values are powers-of-two from zero to the 60 response of throttling value is non-linear. 62 For linear mode, it shows the decimal value. For non-linear mode, it shows 164 Show system/domain runtime MBA throttling value. For linear mode, 184 bool linear; 191 - Member `linear` 193 `linear` means the response of delay value is linear or not. 211 linear mode, the max throttling value (MBA_MAX) and so on. 227 'psr-mab-show'. For linear mode, the decimal value is shown. [all …]
|
A D | migration.pandoc | 108 guests not using the linear p2m layout 123 2015-12-11 2 Xen 4.7 Support of linear p2m list
|
/xen/xen/arch/x86/ |
A D | smpboot.c | 674 unsigned long linear = (unsigned long)ptr, pfn; in clone_mapping() local 688 if ( linear < XEN_VIRT_START || in clone_mapping() 689 (linear >= XEN_VIRT_END && linear < DIRECTMAP_VIRT_START) ) in clone_mapping() 693 l3_table_offset(linear); in clone_mapping() 705 pl2e = l3e_to_l2e(*pl3e) + l2_table_offset(linear); in clone_mapping() 716 pl1e = l2e_to_l1e(*pl2e) + l1_table_offset(linear); in clone_mapping() 730 l4e_write(&rpt[root_table_offset(linear)], in clone_mapping() 734 pl3e = l4e_to_l3e(rpt[root_table_offset(linear)]); in clone_mapping() 736 pl3e += l3_table_offset(linear); in clone_mapping() 752 pl2e += l2_table_offset(linear); in clone_mapping() [all …]
|
A D | psr.c | 103 bool linear; member 400 feat->mba.linear = true; in mba_init_feature() 415 feat->cos_max, feat->mba.thrtl_max, feat->mba.linear); in mba_init_feature() 505 if ( feat->mba.linear ) in mba_get_feat_info() 530 if ( feat->mba.linear ) in mba_sanitize_thrtl()
|
/xen/xen/include/asm-x86/hvm/svm/ |
A D | svm.h | 39 static inline void svm_invlpga(unsigned long linear, uint32_t asid) in svm_invlpga() argument 45 "a" (linear), "c" (asid)); in svm_invlpga()
|
/xen/xen/include/asm-x86/hvm/ |
A D | support.h | 94 unsigned long linear; member 112 struct vcpu *v, unsigned long addr, bool linear, uint32_t pfec,
|
A D | hvm.h | 159 void (*invlpg)(struct vcpu *v, unsigned long linear); 519 static inline void hvm_invlpg(struct vcpu *v, unsigned long linear) in hvm_invlpg() argument 521 alternative_vcall(hvm_funcs.invlpg, v, linear); in hvm_invlpg() 714 static inline void hvm_invlpg(const struct vcpu *v, unsigned long linear) in hvm_invlpg() argument
|
/xen/xen/arch/arm/ |
A D | guestcopy.c | 31 bool linear, bool write) in translate_get_page() argument 36 if ( linear ) in translate_get_page()
|
/xen/xen/arch/x86/hvm/ |
A D | emulate.c | 575 unsigned long linear, unsigned int bytes, uint32_t pfec, in hvmemul_map_linear_addr() argument 581 (linear >> PAGE_SHIFT) + 1; in hvmemul_map_linear_addr() 609 unsigned long addr = i ? (linear + (i << PAGE_SHIFT)) & PAGE_MASK : linear; in hvmemul_map_linear_addr() 626 ASSERT(pfinfo.linear == addr); in hvmemul_map_linear_addr() 694 return mapping + (linear & ~PAGE_MASK); in hvmemul_map_linear_addr() 708 void *mapping, unsigned long linear, unsigned int bytes, in hvmemul_unmap_linear_addr() argument 713 (linear >> PAGE_SHIFT) + 1; in hvmemul_unmap_linear_addr() 837 unsigned long *linear) in hvmemul_virtual_to_linear() argument 845 *linear = offset; in hvmemul_virtual_to_linear() 887 *linear += (reps - 1) * bytes_per_rep; in hvmemul_virtual_to_linear() [all …]
|
A D | hvm.c | 3032 hvm_inject_page_fault(pfinfo.ec, pfinfo.linear); in hvm_task_switch() 3072 hvm_inject_page_fault(pfinfo.ec, pfinfo.linear); in hvm_task_switch() 3079 hvm_inject_page_fault(pfinfo.ec, pfinfo.linear); in hvm_task_switch() 3128 hvm_inject_page_fault(pfinfo.ec, pfinfo.linear); in hvm_task_switch() 3169 hvm_inject_page_fault(pfinfo.ec, pfinfo.linear); in hvm_task_switch() 3188 struct vcpu *v, unsigned long addr, bool linear, uint32_t pfec, in hvm_translate_get_page() argument 3196 if ( linear ) in hvm_translate_get_page() 3210 pfinfo->linear = addr; in hvm_translate_get_page()
|
/xen/docs/misc/ |
A D | xl-psr.pandoc | 195 into Class of Service (COS). MBA provides two THRTL mode. One is linear mode 196 and the other is non-linear mode. 198 In the linear mode the input precision is defined as 100-(THRTL_MAX). Values 202 If linear values are not supported then input delay values are powers-of-two 215 * Non-linear mode: Giving one domain a THRTL of 0xC and the other domain's 0 248 For linear mode, it shows the decimal value. For non-linear mode, it shows
|
A D | netif-staging-grants.pandoc | 13 mapped region to describe header/linear region of packet buffers. This document 27 The proposal is to leverage the already implicit copy from and to packet linear 177 [ *Linux specific*: This structure emcompasses a linear data region which 182 region (linear part of the skb) *only* from the first slot. 189 [ *Linux-specific*: does a copy for the linear region (<=128 bytes) and maps the 242 grefs with either linear or full packet. This allows us to replace step 27) 270 linear region. Hence on the case of the first region it is replaced by a memcpy 370 22) *Linux-specific*: Copy (from first slot gref) up to 256 bytes to the linear
|
/xen/xen/arch/x86/mm/shadow/ |
A D | none.c | 40 static bool _invlpg(struct vcpu *v, unsigned long linear) in _invlpg() argument
|
A D | hvm.c | 79 unsigned long *linear) in hvm_translate_virtual_addr() argument 90 hvm_get_seg_reg(x86_seg_cs, sh_ctxt), linear); in hvm_translate_virtual_addr() 136 x86_emul_pagefault(pfinfo.ec, pfinfo.linear, &sh_ctxt->ctxt); in hvm_read()
|
A D | multi.c | 3523 static bool sh_invlpg(struct vcpu *v, unsigned long linear) in sh_invlpg() argument 3546 sh_linear_l4_table(v)[shadow_l4_linear_offset(linear)]) in sh_invlpg() 3553 + shadow_l3_linear_offset(linear)), in sh_invlpg() 3563 if ( !(l3e_get_flags(v->arch.paging.shadow.l3table[shadow_l3_linear_offset(linear)]) in sh_invlpg() 3572 sh_linear_l2_table(v) + shadow_l2_linear_offset(linear), in sh_invlpg() 3616 + shadow_l2_linear_offset(linear), in sh_invlpg() 3638 sl1 = sh_linear_l1_table(v) + shadow_l1_linear_offset(linear); in sh_invlpg()
|
/xen/tools/libxl/ |
A D | libxl_psr.c | 488 xl_info->u.mba.linear = xc_info->mba.linear; in libxl__xc_hw_info_to_libxl_hw_info()
|
/xen/xen/arch/x86/boot/ |
A D | wakeup.S | 70 orb $0x40, %bh # Use linear frame buffer
|
/xen/xen/arch/x86/hvm/svm/ |
A D | svm.c | 1246 event->cr2 = pfinfo.linear; in svm_emul_swint_injection() 2374 struct vcpu *v, unsigned long linear, uint32_t asid) in svm_invlpga_intercept() argument 2376 svm_invlpga(linear, in svm_invlpga_intercept() 2382 static void svm_invlpg_intercept(unsigned long linear) in svm_invlpg_intercept() argument 2384 HVMTRACE_LONG_2D(INVLPG, 0, TRC_PAR_LONG(linear)); in svm_invlpg_intercept() 2385 paging_invlpg(current, linear); in svm_invlpg_intercept() 2398 static void svm_invlpg(struct vcpu *v, unsigned long linear) in svm_invlpg() argument 2400 svm_asid_g_invlpg(v, linear); in svm_invlpg()
|
/xen/tools/xl/ |
A D | xl_psr.c | 337 if (type == LIBXL_PSR_CBM_TYPE_MBA_THRTL && info->u.mba.linear) in psr_print_one_domain_val_type() 625 info[i].u.mba.linear ? "Enabled" : "Disabled"); in psr_mba_hwinfo()
|
/xen/xen/include/asm-x86/ |
A D | paging.h | 127 unsigned long linear);
|
/xen/tools/libxc/ |
A D | xc_psr.c | 375 hw_info->mba.linear = sysctl.u.psr_alloc.u.mba_info.flags & in xc_psr_get_hw_info()
|
/xen/xen/arch/x86/hvm/vmx/ |
A D | vmx.c | 79 static void vmx_invlpg(struct vcpu *v, unsigned long linear); 2627 static void vmx_invlpg_intercept(unsigned long linear) in vmx_invlpg_intercept() argument 2629 HVMTRACE_LONG_2D(INVLPG, /*invlpga=*/ 0, TRC_PAR_LONG(linear)); in vmx_invlpg_intercept() 2630 paging_invlpg(current, linear); in vmx_invlpg_intercept() 2633 static void vmx_invlpg(struct vcpu *v, unsigned long linear) in vmx_invlpg() argument 2636 vpid_sync_vcpu_gva(v, linear); in vmx_invlpg()
|