Lines Matching refs:tx2_pmu

112 			struct tx2_uncore_pmu *tx2_pmu);
254 struct tx2_uncore_pmu *tx2_pmu;
256 tx2_pmu = pmu_to_tx2_pmu(dev_get_drvdata(dev));
257 return cpumap_print_to_pagebuf(true, buf, cpumask_of(tx2_pmu->cpu));
304 static int alloc_counter(struct tx2_uncore_pmu *tx2_pmu)
308 counter = find_first_zero_bit(tx2_pmu->active_counters,
309 tx2_pmu->max_counters);
310 if (counter == tx2_pmu->max_counters)
313 set_bit(counter, tx2_pmu->active_counters);
317 static inline void free_counter(struct tx2_uncore_pmu *tx2_pmu, int counter)
319 clear_bit(counter, tx2_pmu->active_counters);
323 struct tx2_uncore_pmu *tx2_pmu)
328 tx2_pmu = pmu_to_tx2_pmu(event->pmu);
329 cmask = tx2_pmu->counters_mask;
332 hwc->config_base = (unsigned long)tx2_pmu->base
334 hwc->event_base = (unsigned long)tx2_pmu->base
339 struct tx2_uncore_pmu *tx2_pmu)
344 tx2_pmu = pmu_to_tx2_pmu(event->pmu);
345 cmask = tx2_pmu->counters_mask;
347 hwc->config_base = (unsigned long)tx2_pmu->base
350 hwc->event_base = (unsigned long)tx2_pmu->base
355 struct tx2_uncore_pmu *tx2_pmu)
360 cmask = tx2_pmu->counters_mask;
362 hwc->config_base = (unsigned long)tx2_pmu->base
364 hwc->event_base = (unsigned long)tx2_pmu->base;
371 struct tx2_uncore_pmu *tx2_pmu;
373 tx2_pmu = pmu_to_tx2_pmu(event->pmu);
374 emask = tx2_pmu->events_mask;
392 struct tx2_uncore_pmu *tx2_pmu;
395 tx2_pmu = pmu_to_tx2_pmu(event->pmu);
396 cmask = tx2_pmu->counters_mask;
397 emask = tx2_pmu->events_mask;
417 struct tx2_uncore_pmu *tx2_pmu;
420 tx2_pmu = pmu_to_tx2_pmu(event->pmu);
421 cmask = tx2_pmu->counters_mask;
434 struct tx2_uncore_pmu *tx2_pmu;
436 tx2_pmu = pmu_to_tx2_pmu(event->pmu);
437 emask = tx2_pmu->events_mask;
467 struct tx2_uncore_pmu *tx2_pmu;
472 tx2_pmu = pmu_to_tx2_pmu(event->pmu);
473 type = tx2_pmu->type;
474 cmask = tx2_pmu->counters_mask;
475 emask = tx2_pmu->events_mask;
476 prorate_factor = tx2_pmu->prorate_factor;
575 struct tx2_uncore_pmu *tx2_pmu;
592 tx2_pmu = pmu_to_tx2_pmu(event->pmu);
593 if (tx2_pmu->cpu >= nr_cpu_ids)
595 event->cpu = tx2_pmu->cpu;
597 if (event->attr.config >= tx2_pmu->max_events)
604 if (!tx2_uncore_validate_event_group(event, tx2_pmu->max_counters))
613 struct tx2_uncore_pmu *tx2_pmu;
616 tx2_pmu = pmu_to_tx2_pmu(event->pmu);
618 tx2_pmu->start_event(event, flags);
622 if (!tx2_pmu->hrtimer_callback)
626 if (bitmap_weight(tx2_pmu->active_counters,
627 tx2_pmu->max_counters) == 1) {
628 hrtimer_start(&tx2_pmu->hrtimer,
629 ns_to_ktime(tx2_pmu->hrtimer_interval),
637 struct tx2_uncore_pmu *tx2_pmu;
642 tx2_pmu = pmu_to_tx2_pmu(event->pmu);
643 tx2_pmu->stop_event(event);
655 struct tx2_uncore_pmu *tx2_pmu;
657 tx2_pmu = pmu_to_tx2_pmu(event->pmu);
660 hwc->idx = alloc_counter(tx2_pmu);
664 tx2_pmu->events[hwc->idx] = event;
666 tx2_pmu->init_cntr_base(event, tx2_pmu);
677 struct tx2_uncore_pmu *tx2_pmu = pmu_to_tx2_pmu(event->pmu);
681 cmask = tx2_pmu->counters_mask;
685 free_counter(tx2_pmu, GET_COUNTERID(event, cmask));
688 tx2_pmu->events[hwc->idx] = NULL;
691 if (!tx2_pmu->hrtimer_callback)
694 if (bitmap_empty(tx2_pmu->active_counters, tx2_pmu->max_counters))
695 hrtimer_cancel(&tx2_pmu->hrtimer);
705 struct tx2_uncore_pmu *tx2_pmu;
708 tx2_pmu = container_of(timer, struct tx2_uncore_pmu, hrtimer);
709 max_counters = tx2_pmu->max_counters;
711 if (bitmap_empty(tx2_pmu->active_counters, max_counters))
714 for_each_set_bit(idx, tx2_pmu->active_counters, max_counters) {
715 struct perf_event *event = tx2_pmu->events[idx];
719 hrtimer_forward_now(timer, ns_to_ktime(tx2_pmu->hrtimer_interval));
724 struct tx2_uncore_pmu *tx2_pmu)
726 struct device *dev = tx2_pmu->dev;
727 char *name = tx2_pmu->name;
730 tx2_pmu->pmu = (struct pmu) {
732 .attr_groups = tx2_pmu->attr_groups,
743 tx2_pmu->pmu.name = devm_kasprintf(dev, GFP_KERNEL,
746 return perf_pmu_register(&tx2_pmu->pmu, tx2_pmu->pmu.name, -1);
749 static int tx2_uncore_pmu_add_dev(struct tx2_uncore_pmu *tx2_pmu)
753 cpu = cpumask_any_and(cpumask_of_node(tx2_pmu->node),
756 tx2_pmu->cpu = cpu;
758 if (tx2_pmu->hrtimer_callback) {
759 hrtimer_init(&tx2_pmu->hrtimer,
761 tx2_pmu->hrtimer.function = tx2_pmu->hrtimer_callback;
764 ret = tx2_uncore_pmu_register(tx2_pmu);
766 dev_err(tx2_pmu->dev, "%s PMU: Failed to init driver\n",
767 tx2_pmu->name);
774 &tx2_pmu->hpnode);
776 dev_err(tx2_pmu->dev, "Error %d registering hotplug", ret);
781 list_add(&tx2_pmu->entry, &tx2_pmus);
783 dev_dbg(tx2_pmu->dev, "%s PMU UNCORE registered\n",
784 tx2_pmu->pmu.name);
791 struct tx2_uncore_pmu *tx2_pmu;
823 tx2_pmu = devm_kzalloc(dev, sizeof(*tx2_pmu), GFP_KERNEL);
824 if (!tx2_pmu)
827 tx2_pmu->dev = dev;
828 tx2_pmu->type = type;
829 tx2_pmu->base = base;
830 tx2_pmu->node = dev_to_node(dev);
831 INIT_LIST_HEAD(&tx2_pmu->entry);
833 switch (tx2_pmu->type) {
835 tx2_pmu->max_counters = TX2_PMU_DMC_L3C_MAX_COUNTERS;
836 tx2_pmu->counters_mask = 0x3;
837 tx2_pmu->prorate_factor = TX2_PMU_L3_TILES;
838 tx2_pmu->max_events = L3_EVENT_MAX;
839 tx2_pmu->events_mask = 0x1f;
840 tx2_pmu->hrtimer_interval = TX2_PMU_HRTIMER_INTERVAL;
841 tx2_pmu->hrtimer_callback = tx2_hrtimer_callback;
842 tx2_pmu->attr_groups = l3c_pmu_attr_groups;
843 tx2_pmu->name = devm_kasprintf(dev, GFP_KERNEL,
844 "uncore_l3c_%d", tx2_pmu->node);
845 tx2_pmu->init_cntr_base = init_cntr_base_l3c;
846 tx2_pmu->start_event = uncore_start_event_l3c;
847 tx2_pmu->stop_event = uncore_stop_event_l3c;
850 tx2_pmu->max_counters = TX2_PMU_DMC_L3C_MAX_COUNTERS;
851 tx2_pmu->counters_mask = 0x3;
852 tx2_pmu->prorate_factor = TX2_PMU_DMC_CHANNELS;
853 tx2_pmu->max_events = DMC_EVENT_MAX;
854 tx2_pmu->events_mask = 0x1f;
855 tx2_pmu->hrtimer_interval = TX2_PMU_HRTIMER_INTERVAL;
856 tx2_pmu->hrtimer_callback = tx2_hrtimer_callback;
857 tx2_pmu->attr_groups = dmc_pmu_attr_groups;
858 tx2_pmu->name = devm_kasprintf(dev, GFP_KERNEL,
859 "uncore_dmc_%d", tx2_pmu->node);
860 tx2_pmu->init_cntr_base = init_cntr_base_dmc;
861 tx2_pmu->start_event = uncore_start_event_dmc;
862 tx2_pmu->stop_event = uncore_stop_event_dmc;
866 tx2_pmu->max_counters = TX2_PMU_CCPI2_MAX_COUNTERS;
867 tx2_pmu->counters_mask = 0x7;
868 tx2_pmu->prorate_factor = 1;
869 tx2_pmu->max_events = CCPI2_EVENT_MAX;
870 tx2_pmu->events_mask = 0x1ff;
871 tx2_pmu->attr_groups = ccpi2_pmu_attr_groups;
872 tx2_pmu->name = devm_kasprintf(dev, GFP_KERNEL,
873 "uncore_ccpi2_%d", tx2_pmu->node);
874 tx2_pmu->init_cntr_base = init_cntr_base_ccpi2;
875 tx2_pmu->start_event = uncore_start_event_ccpi2;
876 tx2_pmu->stop_event = uncore_stop_event_ccpi2;
877 tx2_pmu->hrtimer_callback = NULL;
880 devm_kfree(dev, tx2_pmu);
884 return tx2_pmu;
891 struct tx2_uncore_pmu *tx2_pmu;
901 tx2_pmu = tx2_uncore_pmu_init_dev((struct device *)data,
904 if (!tx2_pmu)
907 if (tx2_uncore_pmu_add_dev(tx2_pmu)) {
917 struct tx2_uncore_pmu *tx2_pmu;
919 tx2_pmu = hlist_entry_safe(hpnode,
925 if ((tx2_pmu->cpu >= nr_cpu_ids) &&
926 (tx2_pmu->node == cpu_to_node(cpu)))
927 tx2_pmu->cpu = cpu;
936 struct tx2_uncore_pmu *tx2_pmu;
939 tx2_pmu = hlist_entry_safe(hpnode,
942 if (cpu != tx2_pmu->cpu)
945 if (tx2_pmu->hrtimer_callback)
946 hrtimer_cancel(&tx2_pmu->hrtimer);
951 cpumask_of_node(tx2_pmu->node),
954 tx2_pmu->cpu = new_cpu;
957 perf_pmu_migrate_context(&tx2_pmu->pmu, cpu, new_cpu);
998 struct tx2_uncore_pmu *tx2_pmu, *temp;
1002 list_for_each_entry_safe(tx2_pmu, temp, &tx2_pmus, entry) {
1003 if (tx2_pmu->node == dev_to_node(dev)) {
1006 &tx2_pmu->hpnode);
1007 perf_pmu_unregister(&tx2_pmu->pmu);
1008 list_del(&tx2_pmu->entry);