Lines Matching refs:pmc

65  *     3. Global PMC Index (named pmc): pmc is an index specific to PMU
66 * code. Each pmc, stored in kvm_pmc.idx field, is unique across
68 * between pmc and perf counters is as the following:
96 static inline void __kvm_perf_overflow(struct kvm_pmc *pmc, bool in_pmi)
98 struct kvm_pmu *pmu = pmc_to_pmu(pmc);
101 if (pmc->perf_event && pmc->perf_event->attr.precise_ip) {
116 __set_bit(pmc->idx, (unsigned long *)&pmu->global_status);
119 if (pmc->intr && !skip_pmi)
120 kvm_make_request(KVM_REQ_PMI, pmc->vcpu);
127 struct kvm_pmc *pmc = perf_event->overflow_handler_context;
134 if (test_and_set_bit(pmc->idx, pmc_to_pmu(pmc)->reprogram_pmi))
137 __kvm_perf_overflow(pmc, true);
139 kvm_make_request(KVM_REQ_PMU, pmc->vcpu);
142 static u64 pmc_get_pebs_precise_level(struct kvm_pmc *pmc)
151 if ((pmc->idx == 0 && x86_match_cpu(vmx_pebs_pdist_cpu)) ||
152 (pmc->idx == 32 && x86_match_cpu(vmx_pebs_pdir_cpu)))
164 static int pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type, u64 config,
168 struct kvm_pmu *pmu = pmc_to_pmu(pmc);
180 bool pebs = test_bit(pmc->idx, (unsigned long *)&pmu->pebs_enable);
182 attr.sample_period = get_sample_period(pmc, pmc->counter);
185 guest_cpuid_is_intel(pmc->vcpu)) {
200 attr.precise_ip = pmc_get_pebs_precise_level(pmc);
204 kvm_perf_overflow, pmc);
206 pr_debug_ratelimited("kvm_pmu: event creation failed %ld for pmc->idx = %d\n",
207 PTR_ERR(event), pmc->idx);
211 pmc->perf_event = event;
212 pmc_to_pmu(pmc)->event_count++;
213 pmc->is_paused = false;
214 pmc->intr = intr || pebs;
218 static void pmc_pause_counter(struct kvm_pmc *pmc)
220 u64 counter = pmc->counter;
222 if (!pmc->perf_event || pmc->is_paused)
226 counter += perf_event_pause(pmc->perf_event, true);
227 pmc->counter = counter & pmc_bitmask(pmc);
228 pmc->is_paused = true;
231 static bool pmc_resume_counter(struct kvm_pmc *pmc)
233 if (!pmc->perf_event)
237 if (is_sampling_event(pmc->perf_event) &&
238 perf_event_period(pmc->perf_event,
239 get_sample_period(pmc, pmc->counter)))
242 if (test_bit(pmc->idx, (unsigned long *)&pmc_to_pmu(pmc)->pebs_enable) !=
243 (!!pmc->perf_event->attr.precise_ip))
247 perf_event_enable(pmc->perf_event);
248 pmc->is_paused = false;
253 static void pmc_release_perf_event(struct kvm_pmc *pmc)
255 if (pmc->perf_event) {
256 perf_event_release_kernel(pmc->perf_event);
257 pmc->perf_event = NULL;
258 pmc->current_config = 0;
259 pmc_to_pmu(pmc)->event_count--;
263 static void pmc_stop_counter(struct kvm_pmc *pmc)
265 if (pmc->perf_event) {
266 pmc->counter = pmc_read_counter(pmc);
267 pmc_release_perf_event(pmc);
377 static bool check_pmu_event_filter(struct kvm_pmc *pmc)
380 struct kvm *kvm = pmc->vcpu->kvm;
386 if (pmc_is_gp(pmc))
387 return is_gp_event_allowed(filter, pmc->eventsel);
389 return is_fixed_event_allowed(filter, pmc->idx);
392 static bool pmc_event_is_allowed(struct kvm_pmc *pmc)
394 return pmc_is_globally_enabled(pmc) && pmc_speculative_in_use(pmc) &&
395 static_call(kvm_x86_pmu_hw_event_available)(pmc) &&
396 check_pmu_event_filter(pmc);
399 static void reprogram_counter(struct kvm_pmc *pmc)
401 struct kvm_pmu *pmu = pmc_to_pmu(pmc);
402 u64 eventsel = pmc->eventsel;
406 pmc_pause_counter(pmc);
408 if (!pmc_event_is_allowed(pmc))
411 if (pmc->counter < pmc->prev_counter)
412 __kvm_perf_overflow(pmc, false);
417 if (pmc_is_fixed(pmc)) {
419 pmc->idx - INTEL_PMC_IDX_FIXED);
429 if (pmc->current_config == new_config && pmc_resume_counter(pmc))
432 pmc_release_perf_event(pmc);
434 pmc->current_config = new_config;
442 if (pmc_reprogram_counter(pmc, PERF_TYPE_RAW,
450 clear_bit(pmc->idx, (unsigned long *)&pmc_to_pmu(pmc)->reprogram_pmi);
451 pmc->prev_counter = 0;
460 struct kvm_pmc *pmc = static_call(kvm_x86_pmu_pmc_idx_to_pmc)(pmu, bit);
462 if (unlikely(!pmc)) {
467 reprogram_counter(pmc);
523 struct kvm_pmc *pmc;
532 pmc = static_call(kvm_x86_pmu_rdpmc_ecx_to_pmc)(vcpu, idx, &mask);
533 if (!pmc)
541 *data = pmc_read_counter(pmc) & mask;
570 struct kvm_pmc *pmc = static_call(kvm_x86_pmu_msr_idx_to_pmc)(vcpu, msr);
572 if (pmc)
573 __set_bit(pmc->idx, pmu->pmc_in_use);
663 struct kvm_pmc *pmc;
671 pmc = static_call(kvm_x86_pmu_pmc_idx_to_pmc)(pmu, i);
672 if (!pmc)
675 pmc_stop_counter(pmc);
676 pmc->counter = 0;
678 if (pmc_is_gp(pmc))
679 pmc->eventsel = 0;
722 struct kvm_pmc *pmc = NULL;
732 pmc = static_call(kvm_x86_pmu_pmc_idx_to_pmc)(pmu, i);
734 if (pmc && pmc->perf_event && !pmc_speculative_in_use(pmc))
735 pmc_stop_counter(pmc);
748 static void kvm_pmu_incr_counter(struct kvm_pmc *pmc)
750 pmc->prev_counter = pmc->counter;
751 pmc->counter = (pmc->counter + 1) & pmc_bitmask(pmc);
752 kvm_pmu_request_counter_reprogram(pmc);
755 static inline bool eventsel_match_perf_hw_id(struct kvm_pmc *pmc,
758 return !((pmc->eventsel ^ perf_get_hw_event_config(perf_hw_id)) &
762 static inline bool cpl_is_matched(struct kvm_pmc *pmc)
767 if (pmc_is_gp(pmc)) {
768 config = pmc->eventsel;
772 config = fixed_ctrl_field(pmc_to_pmu(pmc)->fixed_ctr_ctrl,
773 pmc->idx - INTEL_PMC_IDX_FIXED);
778 return (static_call(kvm_x86_get_cpl)(pmc->vcpu) == 0) ? select_os : select_user;
784 struct kvm_pmc *pmc;
788 pmc = static_call(kvm_x86_pmu_pmc_idx_to_pmc)(pmu, i);
790 if (!pmc || !pmc_event_is_allowed(pmc))
794 if (eventsel_match_perf_hw_id(pmc, perf_hw_id) && cpl_is_matched(pmc))
795 kvm_pmu_incr_counter(pmc);