Lines Matching refs:pmu

19 #include "pmu.h"
38 static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data)
42 for (i = 0; i < pmu->nr_arch_fixed_counters; i++) {
44 u8 old_ctrl = fixed_ctrl_field(pmu->fixed_ctr_ctrl, i);
47 pmc = get_fixed_pmc(pmu, MSR_CORE_PERF_FIXED_CTR0 + i);
52 __set_bit(INTEL_PMC_IDX_FIXED + i, pmu->pmc_in_use);
56 pmu->fixed_ctr_ctrl = data;
60 static void global_ctrl_changed(struct kvm_pmu *pmu, u64 data)
63 u64 diff = pmu->global_ctrl ^ data;
65 pmu->global_ctrl = data;
68 reprogram_counter(pmu, bit);
73 struct kvm_pmu *pmu = pmc_to_pmu(pmc);
81 && (pmu->available_event_types & (1 << i)))
105 struct kvm_pmu *pmu = pmc_to_pmu(pmc);
107 if (pmu->version < 2)
110 return test_bit(pmc->idx, (unsigned long *)&pmu->global_ctrl);
113 static struct kvm_pmc *intel_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx)
116 return get_gp_pmc(pmu, MSR_P6_EVNTSEL0 + pmc_idx,
121 return get_fixed_pmc(pmu, idx + MSR_CORE_PERF_FIXED_CTR0);
128 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
133 return (!fixed && idx >= pmu->nr_arch_gp_counters) ||
134 (fixed && idx >= pmu->nr_arch_fixed_counters);
140 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
147 counters = pmu->fixed_counters;
148 num_counters = pmu->nr_arch_fixed_counters;
150 counters = pmu->gp_counters;
151 num_counters = pmu->nr_arch_gp_counters;
155 *mask &= pmu->counter_bitmask[fixed ? KVM_PMC_FIXED : KVM_PMC_GP];
172 static inline struct kvm_pmc *get_fw_gp_pmc(struct kvm_pmu *pmu, u32 msr)
174 if (!fw_writes_is_enabled(pmu_to_vcpu(pmu)))
177 return get_gp_pmc(pmu, msr, MSR_IA32_PMC0);
182 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
190 ret = pmu->version > 1;
193 ret = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0) ||
194 get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0) ||
195 get_fixed_pmc(pmu, msr) || get_fw_gp_pmc(pmu, msr);
204 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
207 pmc = get_fixed_pmc(pmu, msr);
208 pmc = pmc ? pmc : get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0);
209 pmc = pmc ? pmc : get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0);
216 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
222 msr_info->data = pmu->fixed_ctr_ctrl;
225 msr_info->data = pmu->global_status;
228 msr_info->data = pmu->global_ctrl;
231 msr_info->data = pmu->global_ovf_ctrl;
234 if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) ||
235 (pmc = get_gp_pmc(pmu, msr, MSR_IA32_PMC0))) {
238 val & pmu->counter_bitmask[KVM_PMC_GP];
240 } else if ((pmc = get_fixed_pmc(pmu, msr))) {
243 val & pmu->counter_bitmask[KVM_PMC_FIXED];
245 } else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
256 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
263 if (pmu->fixed_ctr_ctrl == data)
265 if (!(data & pmu->fixed_ctr_ctrl_mask)) {
266 reprogram_fixed_counters(pmu, data);
272 pmu->global_status = data;
277 if (pmu->global_ctrl == data)
279 if (kvm_valid_perf_global_ctrl(pmu, data)) {
280 global_ctrl_changed(pmu, data);
285 if (!(data & pmu->global_ovf_ctrl_mask)) {
287 pmu->global_status &= ~data;
288 pmu->global_ovf_ctrl = data;
293 if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) ||
294 (pmc = get_gp_pmc(pmu, msr, MSR_IA32_PMC0))) {
296 (data & ~pmu->counter_bitmask[KVM_PMC_GP]))
306 } else if ((pmc = get_fixed_pmc(pmu, msr))) {
312 } else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
315 if (!(data & pmu->reserved_bits)) {
327 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
334 pmu->nr_arch_gp_counters = 0;
335 pmu->nr_arch_fixed_counters = 0;
336 pmu->counter_bitmask[KVM_PMC_GP] = 0;
337 pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
338 pmu->version = 0;
339 pmu->reserved_bits = 0xffffffff00200000ull;
340 pmu->raw_event_mask = X86_RAW_EVENT_MASK;
341 pmu->global_ctrl_mask = ~0ull;
342 pmu->global_ovf_ctrl_mask = ~0ull;
343 pmu->fixed_ctr_ctrl_mask = ~0ull;
351 pmu->version = eax.split.version_id;
352 if (!pmu->version)
357 pmu->nr_arch_gp_counters = min_t(int, eax.split.num_counters,
360 pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << eax.split.bit_width) - 1;
362 pmu->available_event_types = ~entry->ebx &
365 if (pmu->version == 1) {
366 pmu->nr_arch_fixed_counters = 0;
368 pmu->nr_arch_fixed_counters =
373 pmu->counter_bitmask[KVM_PMC_FIXED] =
377 for (i = 0; i < pmu->nr_arch_fixed_counters; i++)
378 pmu->fixed_ctr_ctrl_mask &= ~(0xbull << (i * 4));
379 pmu->global_ctrl = ((1ull << pmu->nr_arch_gp_counters) - 1) |
380 (((1ull << pmu->nr_arch_fixed_counters) - 1) << INTEL_PMC_IDX_FIXED);
381 pmu->global_ctrl_mask = ~pmu->global_ctrl;
382 pmu->global_ovf_ctrl_mask = pmu->global_ctrl_mask
386 pmu->global_ovf_ctrl_mask &=
393 pmu->reserved_bits ^= HSW_IN_TX|HSW_IN_TX_CHECKPOINTED;
395 bitmap_set(pmu->all_valid_pmc_idx,
396 0, pmu->nr_arch_gp_counters);
397 bitmap_set(pmu->all_valid_pmc_idx,
398 INTEL_PMC_MAX_GENERIC, pmu->nr_arch_fixed_counters);
406 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
409 pmu->gp_counters[i].type = KVM_PMC_GP;
410 pmu->gp_counters[i].vcpu = vcpu;
411 pmu->gp_counters[i].idx = i;
412 pmu->gp_counters[i].current_config = 0;
416 pmu->fixed_counters[i].type = KVM_PMC_FIXED;
417 pmu->fixed_counters[i].vcpu = vcpu;
418 pmu->fixed_counters[i].idx = i + INTEL_PMC_IDX_FIXED;
419 pmu->fixed_counters[i].current_config = 0;
427 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
432 pmc = &pmu->gp_counters[i];
439 pmc = &pmu->fixed_counters[i];
445 pmu->fixed_ctr_ctrl = pmu->global_ctrl = pmu->global_status =
446 pmu->global_ovf_ctrl = 0;