Lines Matching defs:nd_pmu
122 struct nvdimm_pmu *nd_pmu;
124 nd_pmu = container_of(pmu, struct nvdimm_pmu, pmu);
126 return cpumap_print_to_pagebuf(true, buf, cpumask_of(nd_pmu->cpu));
131 struct nvdimm_pmu *nd_pmu;
136 nd_pmu = hlist_entry_safe(node, struct nvdimm_pmu, node);
138 /* Clear it, incase given cpu is set in nd_pmu->arch_cpumask */
139 cpumask_test_and_clear_cpu(cpu, &nd_pmu->arch_cpumask);
145 if (cpu != nd_pmu->cpu)
148 /* Check for any active cpu in nd_pmu->arch_cpumask */
149 target = cpumask_any(&nd_pmu->arch_cpumask);
152 * Incase we don't have any active cpu in nd_pmu->arch_cpumask,
160 nd_pmu->cpu = target;
164 perf_pmu_migrate_context(&nd_pmu->pmu, cpu, target);
171 struct nvdimm_pmu *nd_pmu;
173 nd_pmu = hlist_entry_safe(node, struct nvdimm_pmu, node);
175 if (nd_pmu->cpu >= nr_cpu_ids)
176 nd_pmu->cpu = cpu;
181 static int create_cpumask_attr_group(struct nvdimm_pmu *nd_pmu)
213 nd_pmu->pmu.attr_groups[NVDIMM_PMU_CPUMASK_ATTR] = nvdimm_pmu_cpumask_group;
217 static int nvdimm_pmu_cpu_hotplug_init(struct nvdimm_pmu *nd_pmu)
226 * Check for any active cpu in nd_pmu->arch_cpumask.
228 if (!cpumask_empty(&nd_pmu->arch_cpumask)) {
229 nd_pmu->cpu = cpumask_any(&nd_pmu->arch_cpumask);
232 nodeid = dev_to_node(nd_pmu->dev);
234 nd_pmu->cpu = cpumask_any(cpumask);
243 nd_pmu->cpuhp_state = rc;
246 rc = cpuhp_state_add_instance_nocalls(nd_pmu->cpuhp_state, &nd_pmu->node);
248 cpuhp_remove_multi_state(nd_pmu->cpuhp_state);
253 rc = create_cpumask_attr_group(nd_pmu);
255 cpuhp_state_remove_instance_nocalls(nd_pmu->cpuhp_state, &nd_pmu->node);
256 cpuhp_remove_multi_state(nd_pmu->cpuhp_state);
263 static void nvdimm_pmu_free_hotplug_memory(struct nvdimm_pmu *nd_pmu)
265 cpuhp_state_remove_instance_nocalls(nd_pmu->cpuhp_state, &nd_pmu->node);
266 cpuhp_remove_multi_state(nd_pmu->cpuhp_state);
268 if (nd_pmu->pmu.attr_groups[NVDIMM_PMU_CPUMASK_ATTR])
269 kfree(nd_pmu->pmu.attr_groups[NVDIMM_PMU_CPUMASK_ATTR]->attrs);
270 kfree(nd_pmu->pmu.attr_groups[NVDIMM_PMU_CPUMASK_ATTR]);
273 int register_nvdimm_pmu(struct nvdimm_pmu *nd_pmu, struct platform_device *pdev)
277 if (!nd_pmu || !pdev)
281 if (WARN_ON_ONCE(!(nd_pmu->pmu.event_init && nd_pmu->pmu.add &&
282 nd_pmu->pmu.del && nd_pmu->pmu.read && nd_pmu->pmu.name)))
285 nd_pmu->pmu.attr_groups = kzalloc((NVDIMM_PMU_NULL_ATTR + 1) *
287 if (!nd_pmu->pmu.attr_groups)
294 nd_pmu->dev = &pdev->dev;
297 nd_pmu->pmu.attr_groups[NVDIMM_PMU_FORMAT_ATTR] = &nvdimm_pmu_format_group;
298 nd_pmu->pmu.attr_groups[NVDIMM_PMU_EVENT_ATTR] = &nvdimm_pmu_events_group;
299 nd_pmu->pmu.attr_groups[NVDIMM_PMU_NULL_ATTR] = NULL;
302 rc = nvdimm_pmu_cpu_hotplug_init(nd_pmu);
304 pr_info("cpu hotplug feature failed for device: %s\n", nd_pmu->pmu.name);
305 kfree(nd_pmu->pmu.attr_groups);
309 rc = perf_pmu_register(&nd_pmu->pmu, nd_pmu->pmu.name, -1);
311 nvdimm_pmu_free_hotplug_memory(nd_pmu);
312 kfree(nd_pmu->pmu.attr_groups);
317 nd_pmu->pmu.name);
323 void unregister_nvdimm_pmu(struct nvdimm_pmu *nd_pmu)
325 perf_pmu_unregister(&nd_pmu->pmu);
326 nvdimm_pmu_free_hotplug_memory(nd_pmu);
327 kfree(nd_pmu->pmu.attr_groups);
328 kfree(nd_pmu);