/linux/tools/testing/selftests/kvm/x86_64/ |
A D | hyperv_features.c | 107 while (msr->idx) { in guest_msr() 109 if (!msr->write) in guest_msr() 112 do_wrmsr(msr->idx, msr->write_val); in guest_msr() 217 msr->write = 0; in guest_test_msrs_access() 222 msr->write = 0; in guest_test_msrs_access() 232 msr->write = 1; in guest_test_msrs_access() 238 msr->write = 0; in guest_test_msrs_access() 243 msr->write = 0; in guest_test_msrs_access() 468 msr->idx = 0; in guest_test_msrs_access() 474 if (msr->idx) in guest_test_msrs_access() [all …]
|
A D | userspace_msr_exit_test.c | 430 run->msr.index, msr_index); in process_rdmsr() 432 switch (run->msr.index) { in process_rdmsr() 434 run->msr.data = 0; in process_rdmsr() 437 run->msr.error = 1; in process_rdmsr() 443 run->msr.data = MSR_FS_BASE; in process_rdmsr() 467 switch (run->msr.index) { in process_wrmsr() 469 if (run->msr.data != 0) in process_wrmsr() 470 run->msr.error = 1; in process_wrmsr() 473 if (run->msr.data != 1) in process_wrmsr() 474 run->msr.error = 1; in process_wrmsr() [all …]
|
A D | kvm_pv_test.c | 69 #define TEST_MSR(msr) { .idx = msr, .name = #msr } argument 71 #define PR_MSR(msr) ucall(UCALL_PR_MSR, 1, msr) argument 90 static void test_msr(struct msr_data *msr) in test_msr() argument 92 PR_MSR(msr); in test_msr() 93 do_rdmsr(msr->idx); in test_msr() 97 do_wrmsr(msr->idx, 0); in test_msr() 156 struct msr_data *msr = (struct msr_data *)uc->args[0]; in pr_msr() local 158 pr_info("testing msr: %s (%#x)\n", msr->name, msr->idx); in pr_msr()
|
/linux/arch/x86/kernel/cpu/ |
A D | perfctr-watchdog.c | 51 if (msr >= MSR_F15H_PERF_CTR) in nmi_perfctr_msr_to_bit() 53 return msr - MSR_K7_PERFCTR0; in nmi_perfctr_msr_to_bit() 60 return msr - MSR_P6_PERFCTR0; in nmi_perfctr_msr_to_bit() 62 return msr - MSR_KNC_PERFCTR0; in nmi_perfctr_msr_to_bit() 64 return msr - MSR_P4_BPU_PERFCTR0; in nmi_perfctr_msr_to_bit() 84 if (msr >= MSR_F15H_PERF_CTL) in nmi_evntsel_msr_to_bit() 86 return msr - MSR_K7_EVNTSEL0; in nmi_evntsel_msr_to_bit() 93 return msr - MSR_P6_EVNTSEL0; in nmi_evntsel_msr_to_bit() 95 return msr - MSR_KNC_EVNTSEL0; in nmi_evntsel_msr_to_bit() 97 return msr - MSR_P4_BSU_ESCR0; in nmi_evntsel_msr_to_bit() [all …]
|
A D | feat_ctl.c | 110 u64 msr; in init_ia32_feat_ctl() local 112 if (rdmsrl_safe(MSR_IA32_FEAT_CTL, &msr)) { in init_ia32_feat_ctl() 132 if (msr & FEAT_CTL_LOCKED) in init_ia32_feat_ctl() 139 msr = FEAT_CTL_LOCKED; in init_ia32_feat_ctl() 147 msr |= FEAT_CTL_VMX_ENABLED_OUTSIDE_SMX; in init_ia32_feat_ctl() 150 msr |= FEAT_CTL_VMX_ENABLED_INSIDE_SMX; in init_ia32_feat_ctl() 154 msr |= FEAT_CTL_SGX_ENABLED; in init_ia32_feat_ctl() 156 msr |= FEAT_CTL_SGX_LC_ENABLED; in init_ia32_feat_ctl() 159 wrmsrl(MSR_IA32_FEAT_CTL, msr); in init_ia32_feat_ctl() 167 if ( (tboot && !(msr & FEAT_CTL_VMX_ENABLED_INSIDE_SMX)) || in init_ia32_feat_ctl() [all …]
|
/linux/arch/x86/include/asm/ |
A D | msr.h | 14 struct msr { struct 26 struct msr reg; argument 27 struct msr *msrs; 111 u64 __val = __rdmsr((msr)); \ 117 __wrmsr(msr, low, high) 127 val = __rdmsr(msr); in native_read_msr() 160 __wrmsr(msr, low, high); in native_write_msr() 276 #define rdmsrl(msr, val) \ argument 327 struct msr *msrs_alloc(void); 367 struct msr *msrs) in rdmsr_on_cpus() [all …]
|
A D | msr-trace.h | 3 #define TRACE_SYSTEM msr 6 #define TRACE_INCLUDE_FILE msr-trace 22 TP_PROTO(unsigned msr, u64 val, int failed), 23 TP_ARGS(msr, val, failed), 25 __field( unsigned, msr ) 30 __entry->msr = msr; 35 __entry->msr, 41 TP_PROTO(unsigned msr, u64 val, int failed), 42 TP_ARGS(msr, val, failed) 47 TP_ARGS(msr, val, failed) [all …]
|
/linux/arch/powerpc/kvm/ |
A D | book3s_hv_tm.c | 19 u64 msr = vcpu->arch.shregs.msr; in emulate_tx_failure() local 25 if (msr & MSR_PR) { in emulate_tx_failure() 45 u64 msr = vcpu->arch.shregs.msr; in kvmhv_p9_tm_emulation() local 111 msr = (msr & ~MSR_TS_MASK) | MSR_TS_T; in kvmhv_p9_tm_emulation() 112 vcpu->arch.shregs.msr = msr; in kvmhv_p9_tm_emulation() 147 if (!(msr & MSR_TM)) { in kvmhv_p9_tm_emulation() 161 msr = (msr & ~MSR_TS_MASK) | MSR_TS_T; in kvmhv_p9_tm_emulation() 164 msr = (msr & ~MSR_TS_MASK) | MSR_TS_S; in kvmhv_p9_tm_emulation() 166 vcpu->arch.shregs.msr = msr; in kvmhv_p9_tm_emulation() 179 if (!(msr & MSR_TM)) { in kvmhv_p9_tm_emulation() [all …]
|
A D | book3s_hv_tm_builtin.c | 23 u64 newmsr, msr, bescr; in kvmhv_p9_tm_emulation_early() local 45 vcpu->arch.shregs.msr = newmsr; in kvmhv_p9_tm_emulation_early() 52 msr = vcpu->arch.shregs.msr; in kvmhv_p9_tm_emulation_early() 57 ((msr & MSR_PR) && !(mfspr(SPRN_FSCR) & FSCR_EBB))) in kvmhv_p9_tm_emulation_early() 67 msr = (msr & ~MSR_TS_MASK) | MSR_TS_T; in kvmhv_p9_tm_emulation_early() 68 vcpu->arch.shregs.msr = msr; in kvmhv_p9_tm_emulation_early() 77 msr = vcpu->arch.shregs.msr; in kvmhv_p9_tm_emulation_early() 82 newmsr = (newmsr & ~MSR_LE) | (msr & MSR_LE); in kvmhv_p9_tm_emulation_early() 84 vcpu->arch.shregs.msr = newmsr; in kvmhv_p9_tm_emulation_early() 90 msr = vcpu->arch.shregs.msr; in kvmhv_p9_tm_emulation_early() [all …]
|
/linux/arch/x86/lib/ |
A D | msr.c | 9 struct msr *msrs_alloc(void) in msrs_alloc() 11 struct msr *msrs = NULL; in msrs_alloc() 13 msrs = alloc_percpu(struct msr); in msrs_alloc() 23 void msrs_free(struct msr *msrs) in msrs_free() 39 static int msr_read(u32 msr, struct msr *m) in msr_read() argument 44 err = rdmsrl_safe(msr, &val); in msr_read() 57 static int msr_write(u32 msr, struct msr *m) in msr_write() argument 59 return wrmsrl_safe(msr, m->q); in msr_write() 64 struct msr m, m1; in __flip_bit() 70 err = msr_read(msr, &m); in __flip_bit() [all …]
|
/linux/tools/power/x86/turbostat/ |
A D | turbostat.c | 2354 base_cpu, msr, msr & 0x2 ? "EN" : "DIS"); in dump_nhm_platform_info() 3952 msr = (msr >> 30) & 1; in check_tcc_offset() 4237 cpu, msr, ((msr) & 0x1) ? "EN" : "Dis", ((msr) & 0x2) ? "EN" : "Dis"); in print_hwp() 4244 cpu, msr, ((msr) & 0x1) ? "" : "No-", ((msr) & 0x2) ? "" : "No-"); in print_hwp() 4762 cpu, msr, (msr >> 63) & 1 ? "" : "UN"); in print_rapl() 4788 cpu, msr, (msr >> 31) & 1 ? "" : "UN"); in print_rapl() 4802 cpu, msr, (msr >> 31) & 1 ? "" : "UN"); in print_rapl() 4814 cpu, msr, (msr >> 31) & 1 ? "" : "UN"); in print_rapl() 5095 base_cpu, msr, msr & FEAT_CTL_LOCKED ? "" : "UN-", msr & (1 << 18) ? "SGX" : ""); in decode_feature_control_msr() 5125 base_cpu, msr, msr & (0 << 0) ? "No-" : "", msr & (1 << 0) ? "No-" : "", in decode_misc_feature_control() [all …]
|
/linux/arch/x86/kvm/ |
A D | mtrr.c | 29 switch (msr) { in msr_mtrr_valid() 59 if (!msr_mtrr_valid(msr)) in kvm_mtrr_valid() 62 if (msr == MSR_IA32_CR_PAT) { in kvm_mtrr_valid() 68 } else if (msr >= MSR_MTRRfix64K_00000 && msr <= MSR_MTRRfix4K_F8000) { in kvm_mtrr_valid() 76 WARN_ON(!(msr >= 0x200 && msr < 0x200 + 2 * KVM_NR_VAR_MTRR)); in kvm_mtrr_valid() 79 if ((msr & 1) == 0) { in kvm_mtrr_valid() 184 switch (msr) { in fixed_msr_to_seg_unit() 346 index = (msr - 0x200) / 2; in set_var_mtrr_msr() 389 update_mtrr(vcpu, msr); in kvm_mtrr_set_msr() 398 if (msr == MSR_MTRRcap) { in kvm_mtrr_get_msr() [all …]
|
/linux/arch/m68k/bvme6000/ |
A D | config.c | 169 unsigned char msr; in bvme6000_timer_int() local 172 msr = rtc->msr & 0xc0; in bvme6000_timer_int() 173 rtc->msr = msr | 0x20; /* Ack the interrupt */ in bvme6000_timer_int() 194 unsigned char msr = rtc->msr & 0xc0; in bvme6000_sched_init() local 214 rtc->msr = msr; in bvme6000_sched_init() 236 unsigned char msr, msb; in bvme6000_read_clk() local 242 msr = rtc->msr & 0xc0; in bvme6000_read_clk() 247 t1int = rtc->msr & 0x20; in bvme6000_read_clk() 262 rtc->msr = msr; in bvme6000_read_clk() 289 unsigned char msr = rtc->msr & 0xc0; in bvme6000_hwclk() local [all …]
|
A D | rtc.c | 42 unsigned char msr; in rtc_ioctl() local 52 msr = rtc->msr & 0xc0; in rtc_ioctl() 53 rtc->msr = 0x40; in rtc_ioctl() 66 rtc->msr = msr; in rtc_ioctl() 108 msr = rtc->msr & 0xc0; in rtc_ioctl() 109 rtc->msr = 0x40; in rtc_ioctl() 123 rtc->msr = msr; in rtc_ioctl()
|
/linux/arch/x86/xen/ |
A D | pmu.c | 134 if ((msr >= MSR_F15H_PERF_CTL && in is_amd_pmu_msr() 136 (msr >= MSR_K7_EVNTSEL0 && in is_amd_pmu_msr() 205 switch (msr) { in xen_intel_pmu_emulate() 265 ((msr >= MSR_K7_EVNTSEL0) && (msr <= MSR_K7_PERFCTR3))) in xen_amd_pmu_emulate() 266 msr = get_fam15h_addr(msr); in xen_amd_pmu_emulate() 270 if (msr == amd_ctrls_base + off) { in xen_amd_pmu_emulate() 296 if (is_amd_pmu_msr(msr)) { in pmu_msr_read() 319 if (is_amd_pmu_msr(msr)) { in pmu_msr_write() 345 uint32_t msr; in xen_amd_read_pmc() local 366 uint32_t msr; in xen_intel_read_pmc() local [all …]
|
/linux/arch/microblaze/kernel/ |
A D | process.c | 71 local_save_flags(childregs->msr); in copy_thread() 72 ti->cpu_context.msr = childregs->msr & ~MSR_IE; in copy_thread() 82 childregs->msr |= MSR_UMS; in copy_thread() 94 childregs->msr &= ~MSR_EIP; in copy_thread() 95 childregs->msr |= MSR_IE; in copy_thread() 96 childregs->msr &= ~MSR_VM; in copy_thread() 97 childregs->msr |= MSR_VMS; in copy_thread() 100 ti->cpu_context.msr = (childregs->msr|MSR_VM); in copy_thread() 102 ti->cpu_context.msr &= ~MSR_IE; in copy_thread() 127 regs->msr |= MSR_UMS; in start_thread() [all …]
|
/linux/arch/powerpc/kernel/ |
A D | signal_64.c | 125 unsigned long msr = regs->msr; in __unsafe_setup_sigcontext() local 142 msr |= MSR_VEC; in __unsafe_setup_sigcontext() 158 msr &= ~MSR_VSX; in __unsafe_setup_sigcontext() 171 msr |= MSR_VSX; in __unsafe_setup_sigcontext() 232 msr |= tsk->thread.ckpt_regs.msr & (MSR_FP | MSR_VEC | MSR_VSX); in setup_tm_sigcontexts() 258 msr |= MSR_VEC; in setup_tm_sigcontexts() 280 if (msr & MSR_FP) in setup_tm_sigcontexts() 305 msr |= MSR_VSX; in setup_tm_sigcontexts() 568 regs_set_return_msr(regs, regs->msr | (msr & MSR_TS_MASK)); in restore_tm_sigcontexts() 857 unsigned long msr = regs->msr; in handle_rt_signal64() local [all …]
|
A D | signal_32.c | 264 unsigned long msr = regs->msr; in __unsafe_save_user_regs() local 276 msr |= MSR_VEC; in __unsafe_save_user_regs() 295 msr &= ~MSR_VSX; in __unsafe_save_user_regs() 305 msr |= MSR_VSX; in __unsafe_save_user_regs() 315 msr |= MSR_SPE; in __unsafe_save_user_regs() 392 msr |= MSR_VEC; in save_tm_user_regs_unsafe() 402 if (msr & MSR_VEC) in save_tm_user_regs_unsafe() 410 if (msr & MSR_FP) in save_tm_user_regs_unsafe() 428 msr |= MSR_VSX; in save_tm_user_regs_unsafe() 733 unsigned long msr = regs->msr; in handle_rt_signal32() local [all …]
|
A D | cpu_setup_power.c | 19 u64 msr; in init_hvmode_206() local 21 msr = mfmsr(); in init_hvmode_206() 22 if (msr & MSR_HV) in init_hvmode_206() 146 u64 msr; in __restore_cpu_power7() local 148 msr = mfmsr(); in __restore_cpu_power7() 176 u64 msr; in __restore_cpu_power8() local 182 msr = mfmsr(); in __restore_cpu_power8() 214 u64 msr; in __restore_cpu_power9() local 219 msr = mfmsr(); in __restore_cpu_power9() 254 u64 msr; in __restore_cpu_power10() local [all …]
|
A D | process.c | 158 unsigned long msr; in __giveup_fpu() local 161 msr = tsk->thread.regs->msr; in __giveup_fpu() 164 msr &= ~MSR_VSX; in __giveup_fpu() 244 msr = tsk->thread.regs->msr; in __giveup_altivec() 245 msr &= ~MSR_VEC; in __giveup_altivec() 247 msr &= ~MSR_VSX; in __giveup_altivec() 307 unsigned long msr = tsk->thread.regs->msr; in __giveup_vsx() local 313 WARN_ON((msr & MSR_VSX) && !((msr & MSR_FP) && (msr & MSR_VEC))); in __giveup_vsx() 316 if (msr & MSR_FP) in __giveup_vsx() 525 msr = regs->msr; in restore_math() [all …]
|
/linux/arch/x86/events/ |
A D | probe.c | 19 perf_msr_probe(struct perf_msr *msr, int cnt, bool zero, void *data) in perf_msr_probe() argument 29 if (!msr[bit].no_check) { in perf_msr_probe() 30 struct attribute_group *grp = msr[bit].grp; in perf_msr_probe() 40 if (!msr[bit].msr) in perf_msr_probe() 43 if (msr[bit].test && !msr[bit].test(bit, data)) in perf_msr_probe() 46 if (rdmsrl_safe(msr[bit].msr, &val)) in perf_msr_probe() 49 mask = msr[bit].mask; in perf_msr_probe()
|
/linux/arch/arm64/kvm/hyp/nvhe/ |
A D | hyp-init.S | 87 msr tpidr_el2, x1 93 msr mair_el2, x1 96 msr hcr_el2, x1 99 msr vttbr_el2, x1 102 msr vtcr_el2, x1 109 msr ttbr0_el2, x2 116 msr tcr_el2, x0 136 msr vbar_el2, x0 202 msr elr_el2, x1 204 msr spsr_el2, x0 [all …]
|
/linux/tools/power/x86/x86_energy_perf_policy/ |
A D | x86_energy_perf_policy.c | 691 retval = pread(fd, msr, sizeof(*msr), offset); in get_msr() 779 unsigned long long msr; in read_hwp_cap() local 814 unsigned long long msr; in read_hwp_request() local 917 unsigned long long msr; in print_pkg_msrs() local 929 pkg, msr, in print_pkg_msrs() 936 pkg, msr, in print_pkg_msrs() 1169 unsigned long long msr; in enable_hwp_on_cpu() local 1182 unsigned long long msr; in update_cpu_msrs() local 1332 unsigned long long msr; in verify_hwp_is_enabled() local 1339 if ((msr & 1) == 0) { in verify_hwp_is_enabled() [all …]
|
/linux/arch/powerpc/kernel/ptrace/ |
A D | ptrace-tm.c | 39 task->thread.ckpt_regs.msr &= ~MSR_DEBUGCHANGE; in set_user_ckpt_msr() 40 task->thread.ckpt_regs.msr |= msr & MSR_DEBUGCHANGE; in set_user_ckpt_msr() 63 if (!MSR_TM_ACTIVE(target->thread.regs->msr)) in tm_cgpr_active() 97 if (!MSR_TM_ACTIVE(target->thread.regs->msr)) in tm_cgpr_get() 144 if (!MSR_TM_ACTIVE(target->thread.regs->msr)) in tm_cgpr_set() 205 if (!MSR_TM_ACTIVE(target->thread.regs->msr)) in tm_cfpr_active() 238 if (!MSR_TM_ACTIVE(target->thread.regs->msr)) in tm_cfpr_get() 283 if (!MSR_TM_ACTIVE(target->thread.regs->msr)) in tm_cfpr_set() 317 if (!MSR_TM_ACTIVE(target->thread.regs->msr)) in tm_cvmx_active() 354 if (!MSR_TM_ACTIVE(target->thread.regs->msr)) in tm_cvmx_get() [all …]
|
/linux/arch/x86/kvm/svm/ |
A D | pmu.c | 64 static enum index msr_to_index(u32 msr) in msr_to_index() argument 66 switch (msr) { in msr_to_index() 103 switch (msr) { in get_gp_pmc_amd() 134 return &pmu->gp_counters[msr_to_index(msr)]; in get_gp_pmc_amd() 219 pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER); in amd_msr_idx_to_pmc() 229 u32 msr = msr_info->index; in amd_pmu_get_msr() local 232 pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER); in amd_pmu_get_msr() 238 pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL); in amd_pmu_get_msr() 251 u32 msr = msr_info->index; in amd_pmu_set_msr() local 255 pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER); in amd_pmu_set_msr() [all …]
|