Lines Matching refs:tx2_pmu

112 			struct tx2_uncore_pmu *tx2_pmu);
254 struct tx2_uncore_pmu *tx2_pmu;
256 tx2_pmu = pmu_to_tx2_pmu(dev_get_drvdata(dev));
257 return cpumap_print_to_pagebuf(true, buf, cpumask_of(tx2_pmu->cpu));
304 static int alloc_counter(struct tx2_uncore_pmu *tx2_pmu)
308 counter = find_first_zero_bit(tx2_pmu->active_counters,
309 tx2_pmu->max_counters);
310 if (counter == tx2_pmu->max_counters)
313 set_bit(counter, tx2_pmu->active_counters);
317 static inline void free_counter(struct tx2_uncore_pmu *tx2_pmu, int counter)
319 clear_bit(counter, tx2_pmu->active_counters);
323 struct tx2_uncore_pmu *tx2_pmu)
328 tx2_pmu = pmu_to_tx2_pmu(event->pmu);
329 cmask = tx2_pmu->counters_mask;
332 hwc->config_base = (unsigned long)tx2_pmu->base
334 hwc->event_base = (unsigned long)tx2_pmu->base
339 struct tx2_uncore_pmu *tx2_pmu)
344 tx2_pmu = pmu_to_tx2_pmu(event->pmu);
345 cmask = tx2_pmu->counters_mask;
347 hwc->config_base = (unsigned long)tx2_pmu->base
350 hwc->event_base = (unsigned long)tx2_pmu->base
355 struct tx2_uncore_pmu *tx2_pmu)
360 cmask = tx2_pmu->counters_mask;
362 hwc->config_base = (unsigned long)tx2_pmu->base
364 hwc->event_base = (unsigned long)tx2_pmu->base;
371 struct tx2_uncore_pmu *tx2_pmu;
373 tx2_pmu = pmu_to_tx2_pmu(event->pmu);
374 emask = tx2_pmu->events_mask;
392 struct tx2_uncore_pmu *tx2_pmu;
395 tx2_pmu = pmu_to_tx2_pmu(event->pmu);
396 cmask = tx2_pmu->counters_mask;
397 emask = tx2_pmu->events_mask;
417 struct tx2_uncore_pmu *tx2_pmu;
420 tx2_pmu = pmu_to_tx2_pmu(event->pmu);
421 cmask = tx2_pmu->counters_mask;
434 struct tx2_uncore_pmu *tx2_pmu;
436 tx2_pmu = pmu_to_tx2_pmu(event->pmu);
437 emask = tx2_pmu->events_mask;
467 struct tx2_uncore_pmu *tx2_pmu;
472 tx2_pmu = pmu_to_tx2_pmu(event->pmu);
473 type = tx2_pmu->type;
474 cmask = tx2_pmu->counters_mask;
475 emask = tx2_pmu->events_mask;
476 prorate_factor = tx2_pmu->prorate_factor;
575 struct tx2_uncore_pmu *tx2_pmu;
592 tx2_pmu = pmu_to_tx2_pmu(event->pmu);
593 if (tx2_pmu->cpu >= nr_cpu_ids)
595 event->cpu = tx2_pmu->cpu;
597 if (event->attr.config >= tx2_pmu->max_events)
604 if (!tx2_uncore_validate_event_group(event, tx2_pmu->max_counters))
613 struct tx2_uncore_pmu *tx2_pmu;
616 tx2_pmu = pmu_to_tx2_pmu(event->pmu);
618 tx2_pmu->start_event(event, flags);
622 if (!tx2_pmu->hrtimer_callback)
626 if (bitmap_weight(tx2_pmu->active_counters,
627 tx2_pmu->max_counters) == 1) {
628 hrtimer_start(&tx2_pmu->hrtimer,
629 ns_to_ktime(tx2_pmu->hrtimer_interval),
637 struct tx2_uncore_pmu *tx2_pmu;
642 tx2_pmu = pmu_to_tx2_pmu(event->pmu);
643 tx2_pmu->stop_event(event);
655 struct tx2_uncore_pmu *tx2_pmu;
657 tx2_pmu = pmu_to_tx2_pmu(event->pmu);
660 hwc->idx = alloc_counter(tx2_pmu);
664 tx2_pmu->events[hwc->idx] = event;
666 tx2_pmu->init_cntr_base(event, tx2_pmu);
677 struct tx2_uncore_pmu *tx2_pmu = pmu_to_tx2_pmu(event->pmu);
681 cmask = tx2_pmu->counters_mask;
685 free_counter(tx2_pmu, GET_COUNTERID(event, cmask));
688 tx2_pmu->events[hwc->idx] = NULL;
691 if (!tx2_pmu->hrtimer_callback)
694 if (bitmap_empty(tx2_pmu->active_counters, tx2_pmu->max_counters))
695 hrtimer_cancel(&tx2_pmu->hrtimer);
705 struct tx2_uncore_pmu *tx2_pmu;
708 tx2_pmu = container_of(timer, struct tx2_uncore_pmu, hrtimer);
709 max_counters = tx2_pmu->max_counters;
711 if (bitmap_empty(tx2_pmu->active_counters, max_counters))
714 for_each_set_bit(idx, tx2_pmu->active_counters, max_counters) {
715 struct perf_event *event = tx2_pmu->events[idx];
719 hrtimer_forward_now(timer, ns_to_ktime(tx2_pmu->hrtimer_interval));
724 struct tx2_uncore_pmu *tx2_pmu)
726 struct device *dev = tx2_pmu->dev;
727 char *name = tx2_pmu->name;
730 tx2_pmu->pmu = (struct pmu) {
732 .attr_groups = tx2_pmu->attr_groups,
743 tx2_pmu->pmu.name = devm_kasprintf(dev, GFP_KERNEL,
746 return perf_pmu_register(&tx2_pmu->pmu, tx2_pmu->pmu.name, -1);
749 static int tx2_uncore_pmu_add_dev(struct tx2_uncore_pmu *tx2_pmu)
753 cpu = cpumask_any_and(cpumask_of_node(tx2_pmu->node),
756 tx2_pmu->cpu = cpu;
758 if (tx2_pmu->hrtimer_callback) {
759 hrtimer_init(&tx2_pmu->hrtimer,
761 tx2_pmu->hrtimer.function = tx2_pmu->hrtimer_callback;
764 ret = tx2_uncore_pmu_register(tx2_pmu);
766 dev_err(tx2_pmu->dev, "%s PMU: Failed to init driver\n",
767 tx2_pmu->name);
774 &tx2_pmu->hpnode);
776 dev_err(tx2_pmu->dev, "Error %d registering hotplug", ret);
781 list_add(&tx2_pmu->entry, &tx2_pmus);
783 dev_dbg(tx2_pmu->dev, "%s PMU UNCORE registered\n",
784 tx2_pmu->pmu.name);
791 struct tx2_uncore_pmu *tx2_pmu;
825 tx2_pmu = devm_kzalloc(dev, sizeof(*tx2_pmu), GFP_KERNEL);
826 if (!tx2_pmu)
829 tx2_pmu->dev = dev;
830 tx2_pmu->type = type;
831 tx2_pmu->base = base;
832 tx2_pmu->node = dev_to_node(dev);
833 INIT_LIST_HEAD(&tx2_pmu->entry);
835 switch (tx2_pmu->type) {
837 tx2_pmu->max_counters = TX2_PMU_DMC_L3C_MAX_COUNTERS;
838 tx2_pmu->counters_mask = 0x3;
839 tx2_pmu->prorate_factor = TX2_PMU_L3_TILES;
840 tx2_pmu->max_events = L3_EVENT_MAX;
841 tx2_pmu->events_mask = 0x1f;
842 tx2_pmu->hrtimer_interval = TX2_PMU_HRTIMER_INTERVAL;
843 tx2_pmu->hrtimer_callback = tx2_hrtimer_callback;
844 tx2_pmu->attr_groups = l3c_pmu_attr_groups;
845 tx2_pmu->name = devm_kasprintf(dev, GFP_KERNEL,
846 "uncore_l3c_%d", tx2_pmu->node);
847 tx2_pmu->init_cntr_base = init_cntr_base_l3c;
848 tx2_pmu->start_event = uncore_start_event_l3c;
849 tx2_pmu->stop_event = uncore_stop_event_l3c;
852 tx2_pmu->max_counters = TX2_PMU_DMC_L3C_MAX_COUNTERS;
853 tx2_pmu->counters_mask = 0x3;
854 tx2_pmu->prorate_factor = TX2_PMU_DMC_CHANNELS;
855 tx2_pmu->max_events = DMC_EVENT_MAX;
856 tx2_pmu->events_mask = 0x1f;
857 tx2_pmu->hrtimer_interval = TX2_PMU_HRTIMER_INTERVAL;
858 tx2_pmu->hrtimer_callback = tx2_hrtimer_callback;
859 tx2_pmu->attr_groups = dmc_pmu_attr_groups;
860 tx2_pmu->name = devm_kasprintf(dev, GFP_KERNEL,
861 "uncore_dmc_%d", tx2_pmu->node);
862 tx2_pmu->init_cntr_base = init_cntr_base_dmc;
863 tx2_pmu->start_event = uncore_start_event_dmc;
864 tx2_pmu->stop_event = uncore_stop_event_dmc;
868 tx2_pmu->max_counters = TX2_PMU_CCPI2_MAX_COUNTERS;
869 tx2_pmu->counters_mask = 0x7;
870 tx2_pmu->prorate_factor = 1;
871 tx2_pmu->max_events = CCPI2_EVENT_MAX;
872 tx2_pmu->events_mask = 0x1ff;
873 tx2_pmu->attr_groups = ccpi2_pmu_attr_groups;
874 tx2_pmu->name = devm_kasprintf(dev, GFP_KERNEL,
875 "uncore_ccpi2_%d", tx2_pmu->node);
876 tx2_pmu->init_cntr_base = init_cntr_base_ccpi2;
877 tx2_pmu->start_event = uncore_start_event_ccpi2;
878 tx2_pmu->stop_event = uncore_stop_event_ccpi2;
879 tx2_pmu->hrtimer_callback = NULL;
882 devm_kfree(dev, tx2_pmu);
886 return tx2_pmu;
892 struct tx2_uncore_pmu *tx2_pmu;
905 tx2_pmu = tx2_uncore_pmu_init_dev((struct device *)data,
908 if (!tx2_pmu)
911 if (tx2_uncore_pmu_add_dev(tx2_pmu)) {
921 struct tx2_uncore_pmu *tx2_pmu;
923 tx2_pmu = hlist_entry_safe(hpnode,
929 if ((tx2_pmu->cpu >= nr_cpu_ids) &&
930 (tx2_pmu->node == cpu_to_node(cpu)))
931 tx2_pmu->cpu = cpu;
940 struct tx2_uncore_pmu *tx2_pmu;
943 tx2_pmu = hlist_entry_safe(hpnode,
946 if (cpu != tx2_pmu->cpu)
949 if (tx2_pmu->hrtimer_callback)
950 hrtimer_cancel(&tx2_pmu->hrtimer);
955 cpumask_of_node(tx2_pmu->node),
958 tx2_pmu->cpu = new_cpu;
961 perf_pmu_migrate_context(&tx2_pmu->pmu, cpu, new_cpu);
1002 struct tx2_uncore_pmu *tx2_pmu, *temp;
1006 list_for_each_entry_safe(tx2_pmu, temp, &tx2_pmus, entry) {
1007 if (tx2_pmu->node == dev_to_node(dev)) {
1010 &tx2_pmu->hpnode);
1011 perf_pmu_unregister(&tx2_pmu->pmu);
1012 list_del(&tx2_pmu->entry);