Lines Matching refs:pv

19     ctx->x86.pv.shinfo = xc_map_foreign_range(  in map_shinfo()
21 if ( !ctx->x86.pv.shinfo ) in map_shinfo()
40 if ( ctx->x86.pv.width == sizeof(unsigned long) ) in copy_mfns_from_guest()
85 ctx->x86.pv.p2m = xc_map_foreign_pages(xch, ctx->domid, PROT_READ, in map_p2m_leaves()
87 if ( !ctx->x86.pv.p2m ) in map_p2m_leaves()
93 ctx->save.p2m_size = ctx->x86.pv.max_pfn + 1; in map_p2m_leaves()
94 ctx->x86.pv.p2m_frames = n_mfns; in map_p2m_leaves()
95 ctx->x86.pv.p2m_pfns = malloc(n_mfns * sizeof(*mfns)); in map_p2m_leaves()
96 if ( !ctx->x86.pv.p2m_pfns ) in map_p2m_leaves()
114 ctx->x86.pv.p2m_pfns[x] = mfn_to_pfn(ctx, mfns[x]); in map_p2m_leaves()
147 fpp = PAGE_SIZE / ctx->x86.pv.width; in map_p2m_tree()
148 fll_entries = (ctx->x86.pv.max_pfn / (fpp * fpp)) + 1; in map_p2m_tree()
151 ERROR("max_pfn %#lx too large for p2m tree", ctx->x86.pv.max_pfn); in map_p2m_tree()
155 fll_mfn = GET_FIELD(ctx->x86.pv.shinfo, arch.pfn_to_mfn_frame_list_list, in map_p2m_tree()
156 ctx->x86.pv.width); in map_p2m_tree()
157 if ( fll_mfn == 0 || fll_mfn > ctx->x86.pv.max_mfn ) in map_p2m_tree()
192 if ( local_fll[x] == 0 || local_fll[x] > ctx->x86.pv.max_mfn ) in map_p2m_tree()
216 if ( max_pfn < ctx->x86.pv.max_pfn ) in map_p2m_tree()
218 ctx->x86.pv.max_pfn = max_pfn; in map_p2m_tree()
219 fll_entries = (ctx->x86.pv.max_pfn / (fpp * fpp)) + 1; in map_p2m_tree()
221 ctx->x86.pv.p2m_frames = (ctx->x86.pv.max_pfn + fpp) / fpp; in map_p2m_tree()
222 DPRINTF("max_pfn %#lx, p2m_frames %d", ctx->x86.pv.max_pfn, in map_p2m_tree()
223 ctx->x86.pv.p2m_frames); in map_p2m_tree()
224 fl_entries = (ctx->x86.pv.max_pfn / fpp) + 1; in map_p2m_tree()
252 if ( local_fl[x] == 0 || local_fl[x] > ctx->x86.pv.max_mfn ) in map_p2m_tree()
284 p2m_generation = GET_FIELD(ctx->x86.pv.shinfo, arch.p2m_generation, in get_p2m_generation()
285 ctx->x86.pv.width); in get_p2m_generation()
287 rc = (p2m_generation == ctx->x86.pv.p2m_generation) ? 0 : -1; in get_p2m_generation()
288 ctx->x86.pv.p2m_generation = p2m_generation; in get_p2m_generation()
325 if ( p2m_mfn > ctx->x86.pv.max_mfn ) in map_p2m_list()
334 p2m_vaddr = GET_FIELD(ctx->x86.pv.shinfo, arch.p2m_vaddr, in map_p2m_list()
335 ctx->x86.pv.width); in map_p2m_list()
336 fpp = PAGE_SIZE / ctx->x86.pv.width; in map_p2m_list()
337 ctx->x86.pv.p2m_frames = ctx->x86.pv.max_pfn / fpp + 1; in map_p2m_list()
338 p2m_end = p2m_vaddr + ctx->x86.pv.p2m_frames * PAGE_SIZE - 1; in map_p2m_list()
340 if ( ctx->x86.pv.width == 8 ) in map_p2m_list()
371 DPRINTF("max_pfn %#lx, p2m_frames %d", ctx->x86.pv.max_pfn, in map_p2m_list()
372 ctx->x86.pv.p2m_frames); in map_p2m_list()
385 for ( level = ctx->x86.pv.levels; level > 0; level-- ) in map_p2m_list()
410 if ( mfn == 0 || mfn > ctx->x86.pv.max_mfn ) in map_p2m_list()
435 if ( max_pfn < ctx->x86.pv.max_pfn ) in map_p2m_list()
437 ctx->x86.pv.max_pfn = max_pfn; in map_p2m_list()
438 ctx->x86.pv.p2m_frames = (ctx->x86.pv.max_pfn + fpp) / fpp; in map_p2m_list()
439 p2m_end = p2m_vaddr + ctx->x86.pv.p2m_frames * PAGE_SIZE - 1; in map_p2m_list()
469 ctx->x86.pv.p2m_generation = ~0ULL; in map_p2m()
470 ctx->x86.pv.max_pfn = GET_FIELD(ctx->x86.pv.shinfo, arch.max_pfn, in map_p2m()
471 ctx->x86.pv.width) - 1; in map_p2m()
472 p2m_cr3 = GET_FIELD(ctx->x86.pv.shinfo, arch.p2m_cr3, ctx->x86.pv.width); in map_p2m()
506 mfn = GET_FIELD(&vcpu, user_regs.edx, ctx->x86.pv.width); in write_one_vcpu_basic()
515 ctx->x86.pv.width); in write_one_vcpu_basic()
518 gdt_count = GET_FIELD(&vcpu, gdt_ents, ctx->x86.pv.width); in write_one_vcpu_basic()
531 mfn = GET_FIELD(&vcpu, gdt_frames[i], ctx->x86.pv.width); in write_one_vcpu_basic()
540 ctx->x86.pv.width); in write_one_vcpu_basic()
544 mfn = cr3_to_mfn(ctx, GET_FIELD(&vcpu, ctrlreg[3], ctx->x86.pv.width)); in write_one_vcpu_basic()
553 SET_FIELD(&vcpu, ctrlreg[3], mfn_to_cr3(ctx, pfn), ctx->x86.pv.width); in write_one_vcpu_basic()
556 if ( ctx->x86.pv.levels == 4 && vcpu.x64.ctrlreg[1] ) in write_one_vcpu_basic()
570 if ( ctx->x86.pv.width == 8 ) in write_one_vcpu_basic()
797 .guest_width = ctx->x86.pv.width, in write_x86_pv_info()
798 .pt_levels = ctx->x86.pv.levels, in write_x86_pv_info()
817 size_t datasz = ctx->x86.pv.p2m_frames * sizeof(uint64_t); in write_x86_pv_p2m_frames()
820 .end_pfn = ctx->x86.pv.max_pfn, in write_x86_pv_p2m_frames()
829 if ( sizeof(uint64_t) != sizeof(*ctx->x86.pv.p2m_pfns) ) in write_x86_pv_p2m_frames()
838 for ( i = 0; i < ctx->x86.pv.p2m_frames; ++i ) in write_x86_pv_p2m_frames()
839 data[i] = ctx->x86.pv.p2m_pfns[i]; in write_x86_pv_p2m_frames()
842 data = (uint64_t *)ctx->x86.pv.p2m_pfns; in write_x86_pv_p2m_frames()
846 if ( data != (uint64_t *)ctx->x86.pv.p2m_pfns ) in write_x86_pv_p2m_frames()
860 .data = ctx->x86.pv.shinfo, in write_shared_info()
879 if ( ctx->x86.pv.levels == 4 ) in normalise_pagetable()
911 if ( pte_to_frame(src[i]) == ctx->x86.pv.compat_m2p_mfn0 ) in normalise_pagetable()
992 assert(pfn <= ctx->x86.pv.max_pfn); in x86_pv_pfn_to_gfn()
994 return xc_pfn_to_mfn(pfn, ctx->x86.pv.p2m, ctx->x86.pv.width); in x86_pv_pfn_to_gfn()
1113 if ( ctx->x86.pv.p2m_generation == ~0ULL ) in x86_pv_check_vm_state()
1121 free(ctx->x86.pv.p2m_pfns); in x86_pv_cleanup()
1123 if ( ctx->x86.pv.p2m ) in x86_pv_cleanup()
1124 munmap(ctx->x86.pv.p2m, ctx->x86.pv.p2m_frames * PAGE_SIZE); in x86_pv_cleanup()
1126 if ( ctx->x86.pv.shinfo ) in x86_pv_cleanup()
1127 munmap(ctx->x86.pv.shinfo, PAGE_SIZE); in x86_pv_cleanup()
1129 if ( ctx->x86.pv.m2p ) in x86_pv_cleanup()
1130 munmap(ctx->x86.pv.m2p, ctx->x86.pv.nr_m2p_frames * PAGE_SIZE); in x86_pv_cleanup()