Lines Matching refs:pmc
35 /* mapping between fixed pmc index and intel_arch_events array */
45 struct kvm_pmc *pmc;
47 pmc = get_fixed_pmc(pmu, MSR_CORE_PERF_FIXED_CTR0 + i);
53 reprogram_fixed_counter(pmc, new_ctrl, i);
71 static unsigned int intel_pmc_perf_hw_id(struct kvm_pmc *pmc)
73 struct kvm_pmu *pmu = pmc_to_pmu(pmc);
74 u8 event_select = pmc->eventsel & ARCH_PERFMON_EVENTSEL_EVENT;
75 u8 unit_mask = (pmc->eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
103 static bool intel_pmc_is_enabled(struct kvm_pmc *pmc)
105 struct kvm_pmu *pmu = pmc_to_pmu(pmc);
110 return test_bit(pmc->idx, (unsigned long *)&pmu->global_ctrl);
205 struct kvm_pmc *pmc;
207 pmc = get_fixed_pmc(pmu, msr);
208 pmc = pmc ? pmc : get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0);
209 pmc = pmc ? pmc : get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0);
211 return pmc;
217 struct kvm_pmc *pmc;
234 if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) ||
235 (pmc = get_gp_pmc(pmu, msr, MSR_IA32_PMC0))) {
236 u64 val = pmc_read_counter(pmc);
240 } else if ((pmc = get_fixed_pmc(pmu, msr))) {
241 u64 val = pmc_read_counter(pmc);
245 } else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
246 msr_info->data = pmc->eventsel;
257 struct kvm_pmc *pmc;
293 if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) ||
294 (pmc = get_gp_pmc(pmu, msr, MSR_IA32_PMC0))) {
301 pmc->counter += data - pmc_read_counter(pmc);
302 if (pmc->perf_event)
303 perf_event_period(pmc->perf_event,
304 get_sample_period(pmc, data));
306 } else if ((pmc = get_fixed_pmc(pmu, msr))) {
307 pmc->counter += data - pmc_read_counter(pmc);
308 if (pmc->perf_event)
309 perf_event_period(pmc->perf_event,
310 get_sample_period(pmc, data));
312 } else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
313 if (data == pmc->eventsel)
316 reprogram_gp_counter(pmc, data);
428 struct kvm_pmc *pmc = NULL;
432 pmc = &pmu->gp_counters[i];
434 pmc_stop_counter(pmc);
435 pmc->counter = pmc->eventsel = 0;
439 pmc = &pmu->fixed_counters[i];
441 pmc_stop_counter(pmc);
442 pmc->counter = 0;