Lines Matching refs:vcpu

17 static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx);
18 static void kvm_pmu_update_pmc_chained(struct kvm_vcpu *vcpu, u64 select_idx);
19 static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc);
43 static bool kvm_pmu_idx_is_64bit(struct kvm_vcpu *vcpu, u64 select_idx) in kvm_pmu_idx_is_64bit() argument
46 __vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_LC); in kvm_pmu_idx_is_64bit()
66 struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc); in kvm_pmu_pmc_is_chained() local
68 return test_bit(pmc->idx >> 1, vcpu->arch.pmu.chained); in kvm_pmu_pmc_is_chained()
108 static bool kvm_pmu_idx_has_chain_evtype(struct kvm_vcpu *vcpu, u64 select_idx) in kvm_pmu_idx_has_chain_evtype() argument
118 eventsel = __vcpu_sys_reg(vcpu, reg) & kvm_pmu_event_mask(vcpu->kvm); in kvm_pmu_idx_has_chain_evtype()
128 static u64 kvm_pmu_get_pair_counter_value(struct kvm_vcpu *vcpu, in kvm_pmu_get_pair_counter_value() argument
137 counter = __vcpu_sys_reg(vcpu, reg); in kvm_pmu_get_pair_counter_value()
138 counter_high = __vcpu_sys_reg(vcpu, reg + 1); in kvm_pmu_get_pair_counter_value()
144 counter = __vcpu_sys_reg(vcpu, reg); in kvm_pmu_get_pair_counter_value()
163 u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx) in kvm_pmu_get_counter_value() argument
166 struct kvm_pmu *pmu = &vcpu->arch.pmu; in kvm_pmu_get_counter_value()
169 counter = kvm_pmu_get_pair_counter_value(vcpu, pmc); in kvm_pmu_get_counter_value()
186 void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val) in kvm_pmu_set_counter_value() argument
192 __vcpu_sys_reg(vcpu, reg) += (s64)val - kvm_pmu_get_counter_value(vcpu, select_idx); in kvm_pmu_set_counter_value()
195 kvm_pmu_create_perf_event(vcpu, select_idx); in kvm_pmu_set_counter_value()
218 static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc) in kvm_pmu_stop_counter() argument
226 counter = kvm_pmu_get_pair_counter_value(vcpu, pmc); in kvm_pmu_stop_counter()
236 __vcpu_sys_reg(vcpu, reg) = val; in kvm_pmu_stop_counter()
239 __vcpu_sys_reg(vcpu, reg + 1) = upper_32_bits(counter); in kvm_pmu_stop_counter()
249 void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu) in kvm_pmu_vcpu_init() argument
252 struct kvm_pmu *pmu = &vcpu->arch.pmu; in kvm_pmu_vcpu_init()
263 void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu) in kvm_pmu_vcpu_reset() argument
265 unsigned long mask = kvm_pmu_valid_counter_mask(vcpu); in kvm_pmu_vcpu_reset()
266 struct kvm_pmu *pmu = &vcpu->arch.pmu; in kvm_pmu_vcpu_reset()
270 kvm_pmu_stop_counter(vcpu, &pmu->pmc[i]); in kvm_pmu_vcpu_reset()
272 bitmap_zero(vcpu->arch.pmu.chained, ARMV8_PMU_MAX_COUNTER_PAIRS); in kvm_pmu_vcpu_reset()
280 void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu) in kvm_pmu_vcpu_destroy() argument
283 struct kvm_pmu *pmu = &vcpu->arch.pmu; in kvm_pmu_vcpu_destroy()
287 irq_work_sync(&vcpu->arch.pmu.overflow_work); in kvm_pmu_vcpu_destroy()
290 u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu) in kvm_pmu_valid_counter_mask() argument
292 u64 val = __vcpu_sys_reg(vcpu, PMCR_EL0) >> ARMV8_PMU_PMCR_N_SHIFT; in kvm_pmu_valid_counter_mask()
308 void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val) in kvm_pmu_enable_counter_mask() argument
311 struct kvm_pmu *pmu = &vcpu->arch.pmu; in kvm_pmu_enable_counter_mask()
314 if (!(__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) || !val) in kvm_pmu_enable_counter_mask()
324 kvm_pmu_update_pmc_chained(vcpu, i); in kvm_pmu_enable_counter_mask()
325 kvm_pmu_create_perf_event(vcpu, i); in kvm_pmu_enable_counter_mask()
343 void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val) in kvm_pmu_disable_counter_mask() argument
346 struct kvm_pmu *pmu = &vcpu->arch.pmu; in kvm_pmu_disable_counter_mask()
359 kvm_pmu_update_pmc_chained(vcpu, i); in kvm_pmu_disable_counter_mask()
360 kvm_pmu_create_perf_event(vcpu, i); in kvm_pmu_disable_counter_mask()
368 static u64 kvm_pmu_overflow_status(struct kvm_vcpu *vcpu) in kvm_pmu_overflow_status() argument
372 if ((__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E)) { in kvm_pmu_overflow_status()
373 reg = __vcpu_sys_reg(vcpu, PMOVSSET_EL0); in kvm_pmu_overflow_status()
374 reg &= __vcpu_sys_reg(vcpu, PMCNTENSET_EL0); in kvm_pmu_overflow_status()
375 reg &= __vcpu_sys_reg(vcpu, PMINTENSET_EL1); in kvm_pmu_overflow_status()
381 static void kvm_pmu_update_state(struct kvm_vcpu *vcpu) in kvm_pmu_update_state() argument
383 struct kvm_pmu *pmu = &vcpu->arch.pmu; in kvm_pmu_update_state()
386 if (!kvm_vcpu_has_pmu(vcpu)) in kvm_pmu_update_state()
389 overflow = !!kvm_pmu_overflow_status(vcpu); in kvm_pmu_update_state()
395 if (likely(irqchip_in_kernel(vcpu->kvm))) { in kvm_pmu_update_state()
396 int ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id, in kvm_pmu_update_state()
402 bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu) in kvm_pmu_should_notify_user() argument
404 struct kvm_pmu *pmu = &vcpu->arch.pmu; in kvm_pmu_should_notify_user()
405 struct kvm_sync_regs *sregs = &vcpu->run->s.regs; in kvm_pmu_should_notify_user()
408 if (likely(irqchip_in_kernel(vcpu->kvm))) in kvm_pmu_should_notify_user()
417 void kvm_pmu_update_run(struct kvm_vcpu *vcpu) in kvm_pmu_update_run() argument
419 struct kvm_sync_regs *regs = &vcpu->run->s.regs; in kvm_pmu_update_run()
423 if (vcpu->arch.pmu.irq_level) in kvm_pmu_update_run()
434 void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu) in kvm_pmu_flush_hwstate() argument
436 kvm_pmu_update_state(vcpu); in kvm_pmu_flush_hwstate()
446 void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu) in kvm_pmu_sync_hwstate() argument
448 kvm_pmu_update_state(vcpu); in kvm_pmu_sync_hwstate()
458 struct kvm_vcpu *vcpu; in kvm_pmu_perf_overflow_notify_vcpu() local
462 vcpu = kvm_pmc_to_vcpu(pmu->pmc); in kvm_pmu_perf_overflow_notify_vcpu()
464 kvm_vcpu_kick(vcpu); in kvm_pmu_perf_overflow_notify_vcpu()
476 struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc); in kvm_pmu_perf_overflow() local
488 if (!kvm_pmu_idx_is_64bit(vcpu, pmc->idx)) in kvm_pmu_perf_overflow()
495 __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(idx); in kvm_pmu_perf_overflow()
497 if (kvm_pmu_overflow_status(vcpu)) { in kvm_pmu_perf_overflow()
498 kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu); in kvm_pmu_perf_overflow()
501 kvm_vcpu_kick(vcpu); in kvm_pmu_perf_overflow()
503 irq_work_queue(&vcpu->arch.pmu.overflow_work); in kvm_pmu_perf_overflow()
514 void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val) in kvm_pmu_software_increment() argument
516 struct kvm_pmu *pmu = &vcpu->arch.pmu; in kvm_pmu_software_increment()
519 if (!(__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E)) in kvm_pmu_software_increment()
523 val &= __vcpu_sys_reg(vcpu, PMCNTENSET_EL0); in kvm_pmu_software_increment()
532 type = __vcpu_sys_reg(vcpu, PMEVTYPER0_EL0 + i); in kvm_pmu_software_increment()
533 type &= kvm_pmu_event_mask(vcpu->kvm); in kvm_pmu_software_increment()
538 reg = __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) + 1; in kvm_pmu_software_increment()
540 __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) = reg; in kvm_pmu_software_increment()
547 reg = __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i + 1) + 1; in kvm_pmu_software_increment()
549 __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i + 1) = reg; in kvm_pmu_software_increment()
551 __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(i + 1); in kvm_pmu_software_increment()
554 __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(i); in kvm_pmu_software_increment()
564 void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val) in kvm_pmu_handle_pmcr() argument
569 kvm_pmu_enable_counter_mask(vcpu, in kvm_pmu_handle_pmcr()
570 __vcpu_sys_reg(vcpu, PMCNTENSET_EL0)); in kvm_pmu_handle_pmcr()
572 kvm_pmu_disable_counter_mask(vcpu, in kvm_pmu_handle_pmcr()
573 __vcpu_sys_reg(vcpu, PMCNTENSET_EL0)); in kvm_pmu_handle_pmcr()
577 kvm_pmu_set_counter_value(vcpu, ARMV8_PMU_CYCLE_IDX, 0); in kvm_pmu_handle_pmcr()
580 unsigned long mask = kvm_pmu_valid_counter_mask(vcpu); in kvm_pmu_handle_pmcr()
583 kvm_pmu_set_counter_value(vcpu, i, 0); in kvm_pmu_handle_pmcr()
587 static bool kvm_pmu_counter_is_enabled(struct kvm_vcpu *vcpu, u64 select_idx) in kvm_pmu_counter_is_enabled() argument
589 return (__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) && in kvm_pmu_counter_is_enabled()
590 (__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & BIT(select_idx)); in kvm_pmu_counter_is_enabled()
598 static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx) in kvm_pmu_create_perf_event() argument
600 struct kvm_pmu *pmu = &vcpu->arch.pmu; in kvm_pmu_create_perf_event()
615 data = __vcpu_sys_reg(vcpu, reg); in kvm_pmu_create_perf_event()
617 kvm_pmu_stop_counter(vcpu, pmc); in kvm_pmu_create_perf_event()
621 eventsel = data & kvm_pmu_event_mask(vcpu->kvm); in kvm_pmu_create_perf_event()
631 if (vcpu->kvm->arch.pmu_filter && in kvm_pmu_create_perf_event()
632 !test_bit(eventsel, vcpu->kvm->arch.pmu_filter)) in kvm_pmu_create_perf_event()
639 attr.disabled = !kvm_pmu_counter_is_enabled(vcpu, pmc->idx); in kvm_pmu_create_perf_event()
646 counter = kvm_pmu_get_pair_counter_value(vcpu, pmc); in kvm_pmu_create_perf_event()
662 if (kvm_pmu_idx_is_64bit(vcpu, pmc->idx)) in kvm_pmu_create_perf_event()
688 static void kvm_pmu_update_pmc_chained(struct kvm_vcpu *vcpu, u64 select_idx) in kvm_pmu_update_pmc_chained() argument
690 struct kvm_pmu *pmu = &vcpu->arch.pmu; in kvm_pmu_update_pmc_chained()
695 new_state = kvm_pmu_idx_has_chain_evtype(vcpu, pmc->idx) && in kvm_pmu_update_pmc_chained()
696 kvm_pmu_counter_is_enabled(vcpu, pmc->idx | 0x1); in kvm_pmu_update_pmc_chained()
702 kvm_pmu_stop_counter(vcpu, canonical_pmc); in kvm_pmu_update_pmc_chained()
708 kvm_pmu_stop_counter(vcpu, kvm_pmu_get_alternate_pmc(pmc)); in kvm_pmu_update_pmc_chained()
709 set_bit(pmc->idx >> 1, vcpu->arch.pmu.chained); in kvm_pmu_update_pmc_chained()
712 clear_bit(pmc->idx >> 1, vcpu->arch.pmu.chained); in kvm_pmu_update_pmc_chained()
725 void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data, in kvm_pmu_set_counter_event_type() argument
732 mask |= kvm_pmu_event_mask(vcpu->kvm); in kvm_pmu_set_counter_event_type()
737 __vcpu_sys_reg(vcpu, reg) = data & mask; in kvm_pmu_set_counter_event_type()
739 kvm_pmu_update_pmc_chained(vcpu, select_idx); in kvm_pmu_set_counter_event_type()
740 kvm_pmu_create_perf_event(vcpu, select_idx); in kvm_pmu_set_counter_event_type()
795 u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1) in kvm_pmu_get_pmceid() argument
797 unsigned long *bmap = vcpu->kvm->arch.pmu_filter; in kvm_pmu_get_pmceid()
810 if (vcpu->kvm->arch.pmuver >= ID_AA64DFR0_PMUVER_8_4) in kvm_pmu_get_pmceid()
818 nr_events = kvm_pmu_event_mask(vcpu->kvm) + 1; in kvm_pmu_get_pmceid()
834 int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu) in kvm_arm_pmu_v3_enable() argument
836 if (!kvm_vcpu_has_pmu(vcpu)) in kvm_arm_pmu_v3_enable()
839 if (!vcpu->arch.pmu.created) in kvm_arm_pmu_v3_enable()
847 if (irqchip_in_kernel(vcpu->kvm)) { in kvm_arm_pmu_v3_enable()
848 int irq = vcpu->arch.pmu.irq_num; in kvm_arm_pmu_v3_enable()
855 if (!irq_is_ppi(irq) && !vgic_valid_spi(vcpu->kvm, irq)) in kvm_arm_pmu_v3_enable()
857 } else if (kvm_arm_pmu_irq_initialized(vcpu)) { in kvm_arm_pmu_v3_enable()
862 kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu); in kvm_arm_pmu_v3_enable()
867 static int kvm_arm_pmu_v3_init(struct kvm_vcpu *vcpu) in kvm_arm_pmu_v3_init() argument
869 if (irqchip_in_kernel(vcpu->kvm)) { in kvm_arm_pmu_v3_init()
877 if (!vgic_initialized(vcpu->kvm)) in kvm_arm_pmu_v3_init()
880 if (!kvm_arm_pmu_irq_initialized(vcpu)) in kvm_arm_pmu_v3_init()
883 ret = kvm_vgic_set_owner(vcpu, vcpu->arch.pmu.irq_num, in kvm_arm_pmu_v3_init()
884 &vcpu->arch.pmu); in kvm_arm_pmu_v3_init()
889 init_irq_work(&vcpu->arch.pmu.overflow_work, in kvm_arm_pmu_v3_init()
892 vcpu->arch.pmu.created = true; in kvm_arm_pmu_v3_init()
904 struct kvm_vcpu *vcpu; in pmu_irq_is_valid() local
906 kvm_for_each_vcpu(i, vcpu, kvm) { in pmu_irq_is_valid()
907 if (!kvm_arm_pmu_irq_initialized(vcpu)) in pmu_irq_is_valid()
911 if (vcpu->arch.pmu.irq_num != irq) in pmu_irq_is_valid()
914 if (vcpu->arch.pmu.irq_num == irq) in pmu_irq_is_valid()
922 int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) in kvm_arm_pmu_v3_set_attr() argument
924 if (!kvm_vcpu_has_pmu(vcpu)) in kvm_arm_pmu_v3_set_attr()
927 if (vcpu->arch.pmu.created) in kvm_arm_pmu_v3_set_attr()
930 if (!vcpu->kvm->arch.pmuver) in kvm_arm_pmu_v3_set_attr()
931 vcpu->kvm->arch.pmuver = kvm_pmu_probe_pmuver(); in kvm_arm_pmu_v3_set_attr()
933 if (vcpu->kvm->arch.pmuver == ID_AA64DFR0_PMUVER_IMP_DEF) in kvm_arm_pmu_v3_set_attr()
941 if (!irqchip_in_kernel(vcpu->kvm)) in kvm_arm_pmu_v3_set_attr()
951 if (!pmu_irq_is_valid(vcpu->kvm, irq)) in kvm_arm_pmu_v3_set_attr()
954 if (kvm_arm_pmu_irq_initialized(vcpu)) in kvm_arm_pmu_v3_set_attr()
958 vcpu->arch.pmu.irq_num = irq; in kvm_arm_pmu_v3_set_attr()
966 nr_events = kvm_pmu_event_mask(vcpu->kvm) + 1; in kvm_arm_pmu_v3_set_attr()
978 mutex_lock(&vcpu->kvm->lock); in kvm_arm_pmu_v3_set_attr()
980 if (!vcpu->kvm->arch.pmu_filter) { in kvm_arm_pmu_v3_set_attr()
981 vcpu->kvm->arch.pmu_filter = bitmap_alloc(nr_events, GFP_KERNEL_ACCOUNT); in kvm_arm_pmu_v3_set_attr()
982 if (!vcpu->kvm->arch.pmu_filter) { in kvm_arm_pmu_v3_set_attr()
983 mutex_unlock(&vcpu->kvm->lock); in kvm_arm_pmu_v3_set_attr()
994 bitmap_zero(vcpu->kvm->arch.pmu_filter, nr_events); in kvm_arm_pmu_v3_set_attr()
996 bitmap_fill(vcpu->kvm->arch.pmu_filter, nr_events); in kvm_arm_pmu_v3_set_attr()
1000 bitmap_set(vcpu->kvm->arch.pmu_filter, filter.base_event, filter.nevents); in kvm_arm_pmu_v3_set_attr()
1002 bitmap_clear(vcpu->kvm->arch.pmu_filter, filter.base_event, filter.nevents); in kvm_arm_pmu_v3_set_attr()
1004 mutex_unlock(&vcpu->kvm->lock); in kvm_arm_pmu_v3_set_attr()
1009 return kvm_arm_pmu_v3_init(vcpu); in kvm_arm_pmu_v3_set_attr()
1015 int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) in kvm_arm_pmu_v3_get_attr() argument
1022 if (!irqchip_in_kernel(vcpu->kvm)) in kvm_arm_pmu_v3_get_attr()
1025 if (!kvm_vcpu_has_pmu(vcpu)) in kvm_arm_pmu_v3_get_attr()
1028 if (!kvm_arm_pmu_irq_initialized(vcpu)) in kvm_arm_pmu_v3_get_attr()
1031 irq = vcpu->arch.pmu.irq_num; in kvm_arm_pmu_v3_get_attr()
1039 int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) in kvm_arm_pmu_v3_has_attr() argument
1045 if (kvm_vcpu_has_pmu(vcpu)) in kvm_arm_pmu_v3_has_attr()