Lines Matching refs:pmc
19 static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc);
49 static struct kvm_vcpu *kvm_pmc_to_vcpu(struct kvm_pmc *pmc)
54 pmc -= pmc->idx;
55 pmu = container_of(pmc, struct kvm_pmu, pmc[0]);
61 * kvm_pmu_pmc_is_chained - determine if the pmc is chained
62 * @pmc: The PMU counter pointer
64 static bool kvm_pmu_pmc_is_chained(struct kvm_pmc *pmc)
66 struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
68 return test_bit(pmc->idx >> 1, vcpu->arch.pmu.chained);
81 * kvm_pmu_get_canonical_pmc - obtain the canonical pmc
82 * @pmc: The PMU counter pointer
87 static struct kvm_pmc *kvm_pmu_get_canonical_pmc(struct kvm_pmc *pmc)
89 if (kvm_pmu_pmc_is_chained(pmc) &&
90 kvm_pmu_idx_is_high_counter(pmc->idx))
91 return pmc - 1;
93 return pmc;
95 static struct kvm_pmc *kvm_pmu_get_alternate_pmc(struct kvm_pmc *pmc)
97 if (kvm_pmu_idx_is_high_counter(pmc->idx))
98 return pmc - 1;
100 return pmc + 1;
126 * @pmc: The PMU counter pointer
129 struct kvm_pmc *pmc)
133 if (kvm_pmu_pmc_is_chained(pmc)) {
134 pmc = kvm_pmu_get_canonical_pmc(pmc);
135 reg = PMEVCNTR0_EL0 + pmc->idx;
142 reg = (pmc->idx == ARMV8_PMU_CYCLE_IDX)
143 ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + pmc->idx;
151 if (pmc->perf_event)
152 counter += perf_event_read_value(pmc->perf_event, &enabled,
167 struct kvm_pmc *pmc = &pmu->pmc[select_idx];
169 counter = kvm_pmu_get_pair_counter_value(vcpu, pmc);
171 if (kvm_pmu_pmc_is_chained(pmc) &&
200 * @pmc: The PMU counter pointer
202 static void kvm_pmu_release_perf_event(struct kvm_pmc *pmc)
204 pmc = kvm_pmu_get_canonical_pmc(pmc);
205 if (pmc->perf_event) {
206 perf_event_disable(pmc->perf_event);
207 perf_event_release_kernel(pmc->perf_event);
208 pmc->perf_event = NULL;
214 * @pmc: The PMU counter pointer
218 static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc)
222 pmc = kvm_pmu_get_canonical_pmc(pmc);
223 if (!pmc->perf_event)
226 counter = kvm_pmu_get_pair_counter_value(vcpu, pmc);
228 if (pmc->idx == ARMV8_PMU_CYCLE_IDX) {
232 reg = PMEVCNTR0_EL0 + pmc->idx;
238 if (kvm_pmu_pmc_is_chained(pmc))
241 kvm_pmu_release_perf_event(pmc);
255 pmu->pmc[i].idx = i;
270 kvm_pmu_stop_counter(vcpu, &pmu->pmc[i]);
286 kvm_pmu_release_perf_event(&pmu->pmc[i]);
312 struct kvm_pmc *pmc;
321 pmc = &pmu->pmc[i];
327 /* At this point, pmc must be the canonical */
328 if (pmc->perf_event) {
329 perf_event_enable(pmc->perf_event);
330 if (pmc->perf_event->state != PERF_EVENT_STATE_ACTIVE)
347 struct kvm_pmc *pmc;
356 pmc = &pmu->pmc[i];
362 /* At this point, pmc must be the canonical */
363 if (pmc->perf_event)
364 perf_event_disable(pmc->perf_event);
463 vcpu = kvm_pmc_to_vcpu(pmu->pmc);
475 struct kvm_pmc *pmc = perf_event->overflow_handler_context;
477 struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
478 int idx = pmc->idx;
489 if (!kvm_pmu_idx_is_64bit(vcpu, pmc->idx))
546 if (kvm_pmu_pmc_is_chained(&pmu->pmc[i])) {
601 struct kvm_pmc *pmc;
611 pmc = kvm_pmu_get_canonical_pmc(&pmu->pmc[select_idx]);
613 reg = (pmc->idx == ARMV8_PMU_CYCLE_IDX)
614 ? PMCCFILTR_EL0 : PMEVTYPER0_EL0 + pmc->idx;
617 kvm_pmu_stop_counter(vcpu, pmc);
618 if (pmc->idx == ARMV8_PMU_CYCLE_IDX)
639 attr.disabled = !kvm_pmu_counter_is_enabled(vcpu, pmc->idx);
646 counter = kvm_pmu_get_pair_counter_value(vcpu, pmc);
648 if (kvm_pmu_pmc_is_chained(pmc)) {
659 pmc + 1);
662 if (kvm_pmu_idx_is_64bit(vcpu, pmc->idx))
668 kvm_pmu_perf_overflow, pmc);
677 pmc->perf_event = event;
691 struct kvm_pmc *pmc = &pmu->pmc[select_idx], *canonical_pmc;
694 old_state = kvm_pmu_pmc_is_chained(pmc);
695 new_state = kvm_pmu_idx_has_chain_evtype(vcpu, pmc->idx) &&
696 kvm_pmu_counter_is_enabled(vcpu, pmc->idx | 0x1);
701 canonical_pmc = kvm_pmu_get_canonical_pmc(pmc);
708 kvm_pmu_stop_counter(vcpu, kvm_pmu_get_alternate_pmc(pmc));
709 set_bit(pmc->idx >> 1, vcpu->arch.pmu.chained);
712 clear_bit(pmc->idx >> 1, vcpu->arch.pmu.chained);