Lines Matching defs:idxd

6 #include "idxd.h"
126 struct idxd_device *idxd = idxd_pmu->idxd;
130 hwc->config_base = ioread64(CNTRCFG_REG(idxd, idx));
131 hwc->event_base = ioread64(CNTRCFG_REG(idxd, idx));
200 struct idxd_device *idxd;
203 idxd = event_to_idxd(event);
216 if (event->pmu != &idxd->idxd_pmu->pmu)
219 event->hw.event_base = ioread64(PERFMON_TABLE_OFFSET(idxd));
220 event->cpu = idxd->idxd_pmu->cpu;
225 ret = perfmon_validate_group(idxd->idxd_pmu, event);
233 struct idxd_device *idxd;
236 idxd = event_to_idxd(event);
238 return ioread64(CNTRDATA_REG(idxd, cntr));
243 struct idxd_device *idxd = event_to_idxd(event);
245 int shift = 64 - idxd->idxd_pmu->counter_width;
261 void perfmon_counter_overflow(struct idxd_device *idxd)
267 n_counters = min(idxd->idxd_pmu->n_counters, OVERFLOW_SIZE);
269 ovfstatus = ioread32(OVFSTATUS_REG(idxd));
285 event = idxd->idxd_pmu->event_list[i];
289 iowrite32(ovfstatus_clear, OVFSTATUS_REG(idxd));
292 ovfstatus = ioread32(OVFSTATUS_REG(idxd));
302 static inline void perfmon_reset_config(struct idxd_device *idxd)
304 iowrite32(CONFIG_RESET, PERFRST_REG(idxd));
305 iowrite32(0, OVFSTATUS_REG(idxd));
306 iowrite32(0, PERFFRZ_REG(idxd));
309 static inline void perfmon_reset_counters(struct idxd_device *idxd)
311 iowrite32(CNTR_RESET, PERFRST_REG(idxd));
314 static inline void perfmon_reset(struct idxd_device *idxd)
316 perfmon_reset_config(idxd);
317 perfmon_reset_counters(idxd);
327 struct idxd_device *idxd;
330 idxd = event_to_idxd(event);
348 if (flt_wq && test_bit(FLT_WQ, &idxd->idxd_pmu->supported_filters))
349 iowrite32(flt_wq, FLTCFG_REG(idxd, cntr, FLT_WQ));
350 if (flt_tc && test_bit(FLT_TC, &idxd->idxd_pmu->supported_filters))
351 iowrite32(flt_tc, FLTCFG_REG(idxd, cntr, FLT_TC));
352 if (flt_pg_sz && test_bit(FLT_PG_SZ, &idxd->idxd_pmu->supported_filters))
353 iowrite32(flt_pg_sz, FLTCFG_REG(idxd, cntr, FLT_PG_SZ));
354 if (flt_xfer_sz && test_bit(FLT_XFER_SZ, &idxd->idxd_pmu->supported_filters))
355 iowrite32(flt_xfer_sz, FLTCFG_REG(idxd, cntr, FLT_XFER_SZ));
356 if (flt_eng && test_bit(FLT_ENG, &idxd->idxd_pmu->supported_filters))
357 iowrite32(flt_eng, FLTCFG_REG(idxd, cntr, FLT_ENG));
360 cntrdata = ioread64(CNTRDATA_REG(idxd, cntr));
369 iowrite64(cntr_cfg, CNTRCFG_REG(idxd, cntr));
375 struct idxd_device *idxd;
379 idxd = event_to_idxd(event);
382 for (i = 0; i < idxd->idxd_pmu->n_events; i++) {
383 if (event != idxd->idxd_pmu->event_list[i])
386 for (++i; i < idxd->idxd_pmu->n_events; i++)
387 idxd->idxd_pmu->event_list[i - 1] = idxd->idxd_pmu->event_list[i];
388 --idxd->idxd_pmu->n_events;
392 cntr_cfg = ioread64(CNTRCFG_REG(idxd, cntr));
394 iowrite64(cntr_cfg, CNTRCFG_REG(idxd, cntr));
400 clear_bit(cntr, idxd->idxd_pmu->used_mask);
410 struct idxd_device *idxd = event_to_idxd(event);
411 struct idxd_pmu *idxd_pmu = idxd->idxd_pmu;
437 static void enable_perfmon_pmu(struct idxd_device *idxd)
439 iowrite32(COUNTER_UNFREEZE, PERFFRZ_REG(idxd));
442 static void disable_perfmon_pmu(struct idxd_device *idxd)
444 iowrite32(COUNTER_FREEZE, PERFFRZ_REG(idxd));
449 struct idxd_device *idxd = pmu_to_idxd(pmu);
451 enable_perfmon_pmu(idxd);
456 struct idxd_device *idxd = pmu_to_idxd(pmu);
458 disable_perfmon_pmu(idxd);
494 void perfmon_pmu_remove(struct idxd_device *idxd)
496 if (!idxd->idxd_pmu)
499 cpuhp_state_remove_instance(cpuhp_slot, &idxd->idxd_pmu->cpuhp_node);
500 perf_pmu_unregister(&idxd->idxd_pmu->pmu);
501 kfree(idxd->idxd_pmu);
502 idxd->idxd_pmu = NULL;
543 int perfmon_pmu_init(struct idxd_device *idxd)
559 if (idxd->perfmon_offset == 0)
566 idxd_pmu->idxd = idxd;
567 idxd->idxd_pmu = idxd_pmu;
569 if (idxd->data->type == IDXD_TYPE_DSA) {
570 rc = sprintf(idxd_pmu->name, "dsa%d", idxd->id);
573 } else if (idxd->data->type == IDXD_TYPE_IAX) {
574 rc = sprintf(idxd_pmu->name, "iax%d", idxd->id);
581 perfmon_reset(idxd);
583 perfcap.bits = ioread64(PERFCAP_REG(idxd));
632 perf_pmu_unregister(&idxd->idxd_pmu->pmu);
639 idxd->idxd_pmu = NULL;
647 "driver/dma/idxd/perf:online",