Lines Matching defs:info

124 static int cxl_pmu_parse_caps(struct device *dev, struct cxl_pmu_info *info)
127 void __iomem *base = info->base;
140 info->num_counters = FIELD_GET(CXL_PMU_CAP_NUM_COUNTERS_MSK, val) + 1;
141 info->counter_width = FIELD_GET(CXL_PMU_CAP_COUNTER_WIDTH_MSK, val);
142 info->num_event_capabilities = FIELD_GET(CXL_PMU_CAP_NUM_EVN_CAP_REG_SUP_MSK, val) + 1;
144 info->filter_hdm = FIELD_GET(CXL_PMU_CAP_FILTERS_SUP_MSK, val) & CXL_PMU_FILTER_HDM;
146 info->irq = FIELD_GET(CXL_PMU_CAP_MSI_N_MSK, val);
148 info->irq = -1;
151 for (i = 0; i < info->num_counters; i++) {
160 set_bit(i, info->conf_counter_bm);
181 list_add(&pmu_ev->node, &info->event_caps_fixed);
189 if (!bitmap_empty(info->conf_counter_bm, CXL_PMU_MAX_COUNTERS)) {
194 info->num_event_capabilities) {
204 list_add(&pmu_ev->node, &info->event_caps_configurable);
266 struct cxl_pmu_info *info = dev_get_drvdata(dev);
272 if (!info->filter_hdm &&
437 static struct cxl_pmu_ev_cap *cxl_pmu_find_fixed_counter_ev_cap(struct cxl_pmu_info *info,
442 list_for_each_entry(pmu_ev, &info->event_caps_fixed, node) {
454 static struct cxl_pmu_ev_cap *cxl_pmu_find_config_counter_ev_cap(struct cxl_pmu_info *info,
459 list_for_each_entry(pmu_ev, &info->event_caps_configurable, node) {
479 struct cxl_pmu_info *info = dev_get_drvdata(dev);
484 if (!IS_ERR(cxl_pmu_find_fixed_counter_ev_cap(info, vid, gid, msk)))
487 if (!IS_ERR(cxl_pmu_find_config_counter_ev_cap(info, vid, gid, msk)))
502 struct cxl_pmu_info *info = dev_get_drvdata(dev);
504 return cpumap_print_to_pagebuf(true, buf, cpumask_of(info->on_cpu));
528 struct cxl_pmu_info *info = pmu_to_cxl_pmu_info(event->pmu);
539 pmu_ev = cxl_pmu_find_fixed_counter_ev_cap(info, vid, gid, mask);
543 if (!test_bit(pmu_ev->counter_idx, info->used_counter_bm)) {
550 pmu_ev = cxl_pmu_find_config_counter_ev_cap(info, vid, gid, mask);
555 bitmap_andnot(configurable_and_free, info->conf_counter_bm,
556 info->used_counter_bm, CXL_PMU_MAX_COUNTERS);
571 struct cxl_pmu_info *info = pmu_to_cxl_pmu_info(event->pmu);
591 event->cpu = info->on_cpu;
598 struct cxl_pmu_info *info = pmu_to_cxl_pmu_info(pmu);
599 void __iomem *base = info->base;
607 struct cxl_pmu_info *info = pmu_to_cxl_pmu_info(pmu);
608 void __iomem *base = info->base;
622 struct cxl_pmu_info *info = pmu_to_cxl_pmu_info(event->pmu);
624 void __iomem *base = info->base;
641 if (info->filter_hdm) {
659 if (test_bit(hwc->idx, info->conf_counter_bm)) {
686 struct cxl_pmu_info *info = pmu_to_cxl_pmu_info(event->pmu);
687 void __iomem *base = info->base;
694 struct cxl_pmu_info *info = pmu_to_cxl_pmu_info(event->pmu);
707 delta = (new_cnt - prev_cnt) & GENMASK_ULL(info->counter_width - 1, 0);
708 if (overflow && delta < GENMASK_ULL(info->counter_width - 1, 0))
709 delta += (1UL << info->counter_width);
721 struct cxl_pmu_info *info = pmu_to_cxl_pmu_info(event->pmu);
722 void __iomem *base = info->base;
740 struct cxl_pmu_info *info = pmu_to_cxl_pmu_info(event->pmu);
755 info->hw_events[idx] = event;
756 set_bit(idx, info->used_counter_bm);
766 struct cxl_pmu_info *info = pmu_to_cxl_pmu_info(event->pmu);
770 clear_bit(hwc->idx, info->used_counter_bm);
771 info->hw_events[hwc->idx] = NULL;
777 struct cxl_pmu_info *info = data;
778 void __iomem *base = info->base;
790 for_each_set_bit(i, overflowedbm, info->num_counters) {
791 struct perf_event *event = info->hw_events[i];
794 dev_dbg(info->pmu.dev,
809 struct cxl_pmu_info *info = _info;
811 perf_pmu_unregister(&info->pmu);
816 struct cxl_pmu_info *info = _info;
818 cpuhp_state_remove_instance_nocalls(cxl_pmu_cpuhp_state_num, &info->node);
825 struct cxl_pmu_info *info;
830 info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
831 if (!info)
834 dev_set_drvdata(dev, info);
835 INIT_LIST_HEAD(&info->event_caps_fixed);
836 INIT_LIST_HEAD(&info->event_caps_configurable);
838 info->base = pmu->base;
840 info->on_cpu = -1;
841 rc = cxl_pmu_parse_caps(dev, info);
845 info->hw_events = devm_kcalloc(dev, sizeof(*info->hw_events),
846 info->num_counters, GFP_KERNEL);
847 if (!info->hw_events)
859 info->pmu = (struct pmu) {
876 if (info->irq <= 0)
879 rc = pci_irq_vector(pdev, info->irq);
889 irq_name, info);
892 info->irq = irq;
894 rc = cpuhp_state_add_instance(cxl_pmu_cpuhp_state_num, &info->node);
898 rc = devm_add_action_or_reset(dev, cxl_pmu_cpuhp_remove, info);
902 rc = perf_pmu_register(&info->pmu, info->pmu.name, -1);
906 rc = devm_add_action_or_reset(dev, cxl_pmu_perf_unregister, info);
921 struct cxl_pmu_info *info = hlist_entry_safe(node, struct cxl_pmu_info, node);
923 if (info->on_cpu != -1)
926 info->on_cpu = cpu;
931 WARN_ON(irq_set_affinity(info->irq, cpumask_of(cpu)));
938 struct cxl_pmu_info *info = hlist_entry_safe(node, struct cxl_pmu_info, node);
941 if (info->on_cpu != cpu)
944 info->on_cpu = -1;
947 dev_err(info->pmu.dev, "Unable to find a suitable CPU\n");
951 perf_pmu_migrate_context(&info->pmu, cpu, target);
952 info->on_cpu = target;
957 WARN_ON(irq_set_affinity(info->irq, cpumask_of(target)));