| /linux/tools/testing/selftests/kvm/x86_64/ |
| A D | hyperv_clock.c | 16 volatile s64 tsc_offset; member 79 return mul_u64_u64_shr64(rdtsc(), tsc_page->tsc_scale) + tsc_page->tsc_offset; in get_tscpage_ts() 101 u64 tsc_scale, tsc_offset; in guest_main() local 116 GUEST_ASSERT(tsc_page->tsc_offset == 0); in guest_main() 130 tsc_offset = tsc_page->tsc_offset; in guest_main() 137 GUEST_ASSERT(tsc_page->tsc_offset != tsc_offset); in guest_main() 147 tsc_offset = tsc_page->tsc_offset; in guest_main() 150 GUEST_ASSERT(tsc_page->tsc_offset == tsc_offset); in guest_main() 168 GUEST_ASSERT(tsc_page->tsc_offset == 0); in guest_main()
|
| /linux/tools/testing/selftests/kvm/ |
| A D | system_counter_offset_test.c | 22 uint64_t tsc_offset; member 43 KVM_VCPU_TSC_OFFSET, &test->tsc_offset, true); in setup_system_counter() 53 return rdtsc() + test->tsc_offset; in host_read_guest_system_counter()
|
| /linux/tools/perf/util/intel-pt-decoder/ |
| A D | intel-pt-decoder.c | 2269 vmcs_info->tsc_offset); in intel_pt_translate_vm_tsc() 2273 uint64_t tsc_offset) in intel_pt_translate_vm_tsc_offset() argument 2277 .tsc_offset = tsc_offset in intel_pt_translate_vm_tsc_offset() 2350 uint64_t tsc_offset = 0; in intel_pt_vm_tm_corr_tsc() local 2395 tsc_offset = host_tsc - expected_tsc; in intel_pt_vm_tm_corr_tsc() 2415 if (!tsc_offset) in intel_pt_vm_tm_corr_tsc() 2425 (tsc_offset & SEVEN_BYTES) == in intel_pt_vm_tm_corr_tsc() 2466 if (assign && (vmcs_info->tsc_offset != tsc_offset || in intel_pt_vm_tm_corr_tsc() 2468 bool print = vmcs_info->tsc_offset != tsc_offset; in intel_pt_vm_tm_corr_tsc() 2470 vmcs_info->tsc_offset = tsc_offset; in intel_pt_vm_tm_corr_tsc() [all …]
|
| A D | intel-pt-decoder.h | 207 uint64_t tsc_offset; member
|
| /linux/arch/x86/kvm/svm/ |
| A D | nested.c | 176 dst->tsc_offset = from->tsc_offset; in copy_vmcb_control_area() 556 vcpu->arch.tsc_offset = kvm_calc_nested_tsc_offset( in nested_vmcb02_prepare_control() 558 svm->nested.ctl.tsc_offset, in nested_vmcb02_prepare_control() 561 svm->vmcb->control.tsc_offset = vcpu->arch.tsc_offset; in nested_vmcb02_prepare_control() 845 svm->vcpu.arch.tsc_offset = svm->vcpu.arch.l1_tsc_offset; in nested_svm_vmexit() 846 if (svm->vmcb->control.tsc_offset != svm->vcpu.arch.tsc_offset) { in nested_svm_vmexit() 847 svm->vmcb->control.tsc_offset = svm->vcpu.arch.tsc_offset; in nested_svm_vmexit()
|
| A D | svm.c | 1135 return svm->nested.ctl.tsc_offset; in svm_get_l2_tsc_offset() 1149 svm->vmcb01.ptr->control.tsc_offset = vcpu->arch.l1_tsc_offset; in svm_write_tsc_offset() 1150 svm->vmcb->control.tsc_offset = offset; in svm_write_tsc_offset() 3238 pr_err("%-20s%016llx\n", "tsc_offset:", control->tsc_offset); in dump_vmcb()
|
| /linux/include/clocksource/ |
| A D | hyperv_timer.h | 70 offset = READ_ONCE(tsc_pg->tsc_offset); in hv_read_tsc_page_tsc()
|
| /linux/arch/x86/kvm/vmx/ |
| A D | vmcs12.h | 43 u64 tsc_offset; member 226 CHECK_OFFSET(tsc_offset, 88); in vmx_check_vmcs12_offsets()
|
| A D | vmcs12.c | 38 FIELD64(TSC_OFFSET, tsc_offset),
|
| A D | evmcs.c | 71 EVMCS1_FIELD(TSC_OFFSET, tsc_offset,
|
| A D | nested.c | 1691 vmcs12->tsc_offset = evmcs->tsc_offset; in copy_enlightened_to_vmcs12() 2514 vcpu->arch.tsc_offset = kvm_calc_nested_tsc_offset( in prepare_vmcs02() 2523 vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset); in prepare_vmcs02() 3472 vcpu->arch.tsc_offset -= vmcs12->tsc_offset; in nested_vmx_enter_non_root_mode() 4520 vcpu->arch.tsc_offset = vcpu->arch.l1_tsc_offset; in nested_vmx_vmexit() 4559 vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset); in nested_vmx_vmexit()
|
| A D | vmx.c | 1695 return vmcs12->tsc_offset; in vmx_get_l2_tsc_offset()
|
| /linux/tools/testing/selftests/kvm/include/x86_64/ |
| A D | evmcs.h | 123 u64 tsc_offset; member 335 *value = current_evmcs->tsc_offset; in evmcs_vmread() 722 current_evmcs->tsc_offset = value; in evmcs_vmwrite()
|
| A D | svm.h | 72 u64 tsc_offset; member
|
| /linux/arch/x86/kvm/ |
| A D | debugfs.c | 34 *val = vcpu->arch.tsc_offset; in vcpu_get_tsc_offset()
|
| A D | hyperv.c | 552 + hv->tsc_ref.tsc_offset; in get_time_ref_counter() 1093 tsc_ref->tsc_offset = hv_clock->system_time; in compute_tsc_page_parameters() 1094 do_div(tsc_ref->tsc_offset, 100); in compute_tsc_page_parameters() 1095 tsc_ref->tsc_offset -= in compute_tsc_page_parameters()
|
| A D | x86.c | 2438 vcpu->arch.tsc_offset = kvm_calc_nested_tsc_offset( in kvm_vcpu_write_tsc_offset() 2443 vcpu->arch.tsc_offset = l1_offset; in kvm_vcpu_write_tsc_offset() 2445 static_call(kvm_x86_write_tsc_offset)(vcpu, vcpu->arch.tsc_offset); in kvm_vcpu_write_tsc_offset() 2590 u64 tsc_offset = vcpu->arch.l1_tsc_offset; in adjust_tsc_offset_guest() local 2591 kvm_vcpu_write_tsc_offset(vcpu, tsc_offset + adjustment); in adjust_tsc_offset_guest() 3801 offset = vcpu->arch.tsc_offset; in kvm_get_msr_common()
|
| /linux/arch/x86/include/asm/ |
| A D | svm.h | 127 u64 tsc_offset; member
|
| A D | hyperv-tlfs.h | 474 u64 tsc_offset; member
|
| A D | kvm_host.h | 756 u64 tsc_offset; /* current tsc offset */ member
|
| /linux/include/asm-generic/ |
| A D | hyperv-tlfs.h | 102 volatile s64 tsc_offset; member
|
| /linux/Documentation/virt/kvm/ |
| A D | nested-vmx.rst | 101 u64 tsc_offset;
|
| /linux/tools/perf/util/ |
| A D | intel-pt.c | 314 v->tsc_offset = dflt_tsc_offset; in intel_pt_findnew_vmcs() 3610 u64 tsc_offset, vmcs; in intel_pt_parse_vm_tm_corr_arg() local 3619 tsc_offset = strtoull(p, &p, 0); in intel_pt_parse_vm_tm_corr_arg() 3624 pt->dflt_tsc_offset = tsc_offset; in intel_pt_parse_vm_tm_corr_arg() 3635 vmcs_info = intel_pt_findnew_vmcs(&pt->vmcs_info, vmcs, tsc_offset); in intel_pt_parse_vm_tm_corr_arg()
|