Lines Matching refs:pmu
21 #include "pmu.h"
71 static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data)
74 u64 old_fixed_ctr_ctrl = pmu->fixed_ctr_ctrl;
77 pmu->fixed_ctr_ctrl = data;
78 for (i = 0; i < pmu->nr_arch_fixed_counters; i++) {
85 pmc = get_fixed_pmc(pmu, MSR_CORE_PERF_FIXED_CTR0 + i);
87 __set_bit(INTEL_PMC_IDX_FIXED + i, pmu->pmc_in_use);
92 static struct kvm_pmc *intel_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx)
95 return get_gp_pmc(pmu, MSR_P6_EVNTSEL0 + pmc_idx,
100 return get_fixed_pmc(pmu, idx + MSR_CORE_PERF_FIXED_CTR0);
106 struct kvm_pmu *pmu = pmc_to_pmu(pmc);
122 return pmu->available_event_types & BIT(i);
130 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
135 return fixed ? idx < pmu->nr_arch_fixed_counters
136 : idx < pmu->nr_arch_gp_counters;
142 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
149 counters = pmu->fixed_counters;
150 num_counters = pmu->nr_arch_fixed_counters;
152 counters = pmu->gp_counters;
153 num_counters = pmu->nr_arch_gp_counters;
157 *mask &= pmu->counter_bitmask[fixed ? KVM_PMC_FIXED : KVM_PMC_GP];
174 static inline struct kvm_pmc *get_fw_gp_pmc(struct kvm_pmu *pmu, u32 msr)
176 if (!fw_writes_is_enabled(pmu_to_vcpu(pmu)))
179 return get_gp_pmc(pmu, msr, MSR_IA32_PMC0);
202 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
208 return kvm_pmu_has_perf_global_ctrl(pmu);
221 ret = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0) ||
222 get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0) ||
223 get_fixed_pmc(pmu, msr) || get_fw_gp_pmc(pmu, msr) ||
233 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
236 pmc = get_fixed_pmc(pmu, msr);
237 pmc = pmc ? pmc : get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0);
238 pmc = pmc ? pmc : get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0);
257 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
289 __set_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use);
301 pmu->event_count++;
302 __set_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use);
350 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
356 msr_info->data = pmu->fixed_ctr_ctrl;
359 msr_info->data = pmu->pebs_enable;
362 msr_info->data = pmu->ds_area;
365 msr_info->data = pmu->pebs_data_cfg;
368 if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) ||
369 (pmc = get_gp_pmc(pmu, msr, MSR_IA32_PMC0))) {
372 val & pmu->counter_bitmask[KVM_PMC_GP];
374 } else if ((pmc = get_fixed_pmc(pmu, msr))) {
377 val & pmu->counter_bitmask[KVM_PMC_FIXED];
379 } else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
393 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
401 if (data & pmu->fixed_ctr_ctrl_mask)
404 if (pmu->fixed_ctr_ctrl != data)
405 reprogram_fixed_counters(pmu, data);
408 if (data & pmu->pebs_enable_mask)
411 if (pmu->pebs_enable != data) {
412 diff = pmu->pebs_enable ^ data;
413 pmu->pebs_enable = data;
414 reprogram_counters(pmu, diff);
421 pmu->ds_area = data;
424 if (data & pmu->pebs_data_cfg_mask)
427 pmu->pebs_data_cfg = data;
430 if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) ||
431 (pmc = get_gp_pmc(pmu, msr, MSR_IA32_PMC0))) {
433 (data & ~pmu->counter_bitmask[KVM_PMC_GP]))
442 } else if ((pmc = get_fixed_pmc(pmu, msr))) {
446 } else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
447 reserved_bits = pmu->reserved_bits;
449 (pmu->raw_event_mask & HSW_IN_TX_CHECKPOINTED))
469 static void setup_fixed_pmc_eventsel(struct kvm_pmu *pmu)
475 for (i = 0; i < pmu->nr_arch_fixed_counters; i++) {
477 struct kvm_pmc *pmc = &pmu->fixed_counters[index];
487 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
496 pmu->nr_arch_gp_counters = 0;
497 pmu->nr_arch_fixed_counters = 0;
498 pmu->counter_bitmask[KVM_PMC_GP] = 0;
499 pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
500 pmu->version = 0;
501 pmu->reserved_bits = 0xffffffff00200000ull;
502 pmu->raw_event_mask = X86_RAW_EVENT_MASK;
503 pmu->global_ctrl_mask = ~0ull;
504 pmu->global_status_mask = ~0ull;
505 pmu->fixed_ctr_ctrl_mask = ~0ull;
506 pmu->pebs_enable_mask = ~0ull;
507 pmu->pebs_data_cfg_mask = ~0ull;
525 pmu->version = eax.split.version_id;
526 if (!pmu->version)
529 pmu->nr_arch_gp_counters = min_t(int, eax.split.num_counters,
533 pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << eax.split.bit_width) - 1;
536 pmu->available_event_types = ~entry->ebx &
539 if (pmu->version == 1) {
540 pmu->nr_arch_fixed_counters = 0;
542 pmu->nr_arch_fixed_counters = min_t(int, edx.split.num_counters_fixed,
546 pmu->counter_bitmask[KVM_PMC_FIXED] =
548 setup_fixed_pmc_eventsel(pmu);
551 for (i = 0; i < pmu->nr_arch_fixed_counters; i++)
552 pmu->fixed_ctr_ctrl_mask &= ~(0xbull << (i * 4));
553 counter_mask = ~(((1ull << pmu->nr_arch_gp_counters) - 1) |
554 (((1ull << pmu->nr_arch_fixed_counters) - 1) << INTEL_PMC_IDX_FIXED));
555 pmu->global_ctrl_mask = counter_mask;
562 pmu->global_status_mask = pmu->global_ctrl_mask
566 pmu->global_status_mask &=
573 pmu->reserved_bits ^= HSW_IN_TX;
574 pmu->raw_event_mask |= (HSW_IN_TX|HSW_IN_TX_CHECKPOINTED);
577 bitmap_set(pmu->all_valid_pmc_idx,
578 0, pmu->nr_arch_gp_counters);
579 bitmap_set(pmu->all_valid_pmc_idx,
580 INTEL_PMC_MAX_GENERIC, pmu->nr_arch_fixed_counters);
590 bitmap_set(pmu->all_valid_pmc_idx, INTEL_PMC_IDX_FIXED_VLBR, 1);
594 pmu->pebs_enable_mask = counter_mask;
595 pmu->reserved_bits &= ~ICL_EVENTSEL_ADAPTIVE;
596 for (i = 0; i < pmu->nr_arch_fixed_counters; i++) {
597 pmu->fixed_ctr_ctrl_mask &=
600 pmu->pebs_data_cfg_mask = ~0xff00000full;
602 pmu->pebs_enable_mask =
603 ~((1ull << pmu->nr_arch_gp_counters) - 1);
611 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
615 pmu->gp_counters[i].type = KVM_PMC_GP;
616 pmu->gp_counters[i].vcpu = vcpu;
617 pmu->gp_counters[i].idx = i;
618 pmu->gp_counters[i].current_config = 0;
622 pmu->fixed_counters[i].type = KVM_PMC_FIXED;
623 pmu->fixed_counters[i].vcpu = vcpu;
624 pmu->fixed_counters[i].idx = i + INTEL_PMC_IDX_FIXED;
625 pmu->fixed_counters[i].current_config = 0;
639 * Emulate LBR_On_PMI behavior for 1 < pmu.version < 4.
707 * pmu resources (e.g. LBR) that were assigned to the guest. This is
711 * confirm that the pmu features enabled to the guest are not reclaimed
717 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
724 if (test_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use))
731 __clear_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use);
748 void intel_pmu_cross_mapped_check(struct kvm_pmu *pmu)
753 for_each_set_bit(bit, (unsigned long *)&pmu->global_ctrl,
755 pmc = intel_pmc_idx_to_pmc(pmu, bit);
767 pmu->host_cross_mapped_mask |= BIT_ULL(hw_idx);