18c2ecf20Sopenharmony_ci// SPDX-License-Identifier: GPL-2.0-only
28c2ecf20Sopenharmony_ci/*
38c2ecf20Sopenharmony_ci * ACPI probing code for ARM performance counters.
48c2ecf20Sopenharmony_ci *
58c2ecf20Sopenharmony_ci * Copyright (C) 2017 ARM Ltd.
68c2ecf20Sopenharmony_ci */
78c2ecf20Sopenharmony_ci
88c2ecf20Sopenharmony_ci#include <linux/acpi.h>
98c2ecf20Sopenharmony_ci#include <linux/cpumask.h>
108c2ecf20Sopenharmony_ci#include <linux/init.h>
118c2ecf20Sopenharmony_ci#include <linux/irq.h>
128c2ecf20Sopenharmony_ci#include <linux/irqdesc.h>
138c2ecf20Sopenharmony_ci#include <linux/percpu.h>
148c2ecf20Sopenharmony_ci#include <linux/perf/arm_pmu.h>
158c2ecf20Sopenharmony_ci
168c2ecf20Sopenharmony_ci#include <asm/cputype.h>
178c2ecf20Sopenharmony_ci
188c2ecf20Sopenharmony_cistatic DEFINE_PER_CPU(struct arm_pmu *, probed_pmus);
198c2ecf20Sopenharmony_cistatic DEFINE_PER_CPU(int, pmu_irqs);
208c2ecf20Sopenharmony_ci
218c2ecf20Sopenharmony_cistatic int arm_pmu_acpi_register_irq(int cpu)
228c2ecf20Sopenharmony_ci{
238c2ecf20Sopenharmony_ci	struct acpi_madt_generic_interrupt *gicc;
248c2ecf20Sopenharmony_ci	int gsi, trigger;
258c2ecf20Sopenharmony_ci
268c2ecf20Sopenharmony_ci	gicc = acpi_cpu_get_madt_gicc(cpu);
278c2ecf20Sopenharmony_ci
288c2ecf20Sopenharmony_ci	gsi = gicc->performance_interrupt;
298c2ecf20Sopenharmony_ci
308c2ecf20Sopenharmony_ci	/*
318c2ecf20Sopenharmony_ci	 * Per the ACPI spec, the MADT cannot describe a PMU that doesn't
328c2ecf20Sopenharmony_ci	 * have an interrupt. QEMU advertises this by using a GSI of zero,
338c2ecf20Sopenharmony_ci	 * which is not known to be valid on any hardware despite being
348c2ecf20Sopenharmony_ci	 * valid per the spec. Take the pragmatic approach and reject a
358c2ecf20Sopenharmony_ci	 * GSI of zero for now.
368c2ecf20Sopenharmony_ci	 */
378c2ecf20Sopenharmony_ci	if (!gsi)
388c2ecf20Sopenharmony_ci		return 0;
398c2ecf20Sopenharmony_ci
408c2ecf20Sopenharmony_ci	if (gicc->flags & ACPI_MADT_PERFORMANCE_IRQ_MODE)
418c2ecf20Sopenharmony_ci		trigger = ACPI_EDGE_SENSITIVE;
428c2ecf20Sopenharmony_ci	else
438c2ecf20Sopenharmony_ci		trigger = ACPI_LEVEL_SENSITIVE;
448c2ecf20Sopenharmony_ci
458c2ecf20Sopenharmony_ci	/*
468c2ecf20Sopenharmony_ci	 * Helpfully, the MADT GICC doesn't have a polarity flag for the
478c2ecf20Sopenharmony_ci	 * "performance interrupt". Luckily, on compliant GICs the polarity is
488c2ecf20Sopenharmony_ci	 * a fixed value in HW (for both SPIs and PPIs) that we cannot change
498c2ecf20Sopenharmony_ci	 * from SW.
508c2ecf20Sopenharmony_ci	 *
518c2ecf20Sopenharmony_ci	 * Here we pass in ACPI_ACTIVE_HIGH to keep the core code happy. This
528c2ecf20Sopenharmony_ci	 * may not match the real polarity, but that should not matter.
538c2ecf20Sopenharmony_ci	 *
548c2ecf20Sopenharmony_ci	 * Other interrupt controllers are not supported with ACPI.
558c2ecf20Sopenharmony_ci	 */
568c2ecf20Sopenharmony_ci	return acpi_register_gsi(NULL, gsi, trigger, ACPI_ACTIVE_HIGH);
578c2ecf20Sopenharmony_ci}
588c2ecf20Sopenharmony_ci
598c2ecf20Sopenharmony_cistatic void arm_pmu_acpi_unregister_irq(int cpu)
608c2ecf20Sopenharmony_ci{
618c2ecf20Sopenharmony_ci	struct acpi_madt_generic_interrupt *gicc;
628c2ecf20Sopenharmony_ci	int gsi;
638c2ecf20Sopenharmony_ci
648c2ecf20Sopenharmony_ci	gicc = acpi_cpu_get_madt_gicc(cpu);
658c2ecf20Sopenharmony_ci
668c2ecf20Sopenharmony_ci	gsi = gicc->performance_interrupt;
678c2ecf20Sopenharmony_ci	if (gsi)
688c2ecf20Sopenharmony_ci		acpi_unregister_gsi(gsi);
698c2ecf20Sopenharmony_ci}
708c2ecf20Sopenharmony_ci
718c2ecf20Sopenharmony_ci#if IS_ENABLED(CONFIG_ARM_SPE_PMU)
728c2ecf20Sopenharmony_cistatic struct resource spe_resources[] = {
738c2ecf20Sopenharmony_ci	{
748c2ecf20Sopenharmony_ci		/* irq */
758c2ecf20Sopenharmony_ci		.flags          = IORESOURCE_IRQ,
768c2ecf20Sopenharmony_ci	}
778c2ecf20Sopenharmony_ci};
788c2ecf20Sopenharmony_ci
798c2ecf20Sopenharmony_cistatic struct platform_device spe_dev = {
808c2ecf20Sopenharmony_ci	.name = ARMV8_SPE_PDEV_NAME,
818c2ecf20Sopenharmony_ci	.id = -1,
828c2ecf20Sopenharmony_ci	.resource = spe_resources,
838c2ecf20Sopenharmony_ci	.num_resources = ARRAY_SIZE(spe_resources)
848c2ecf20Sopenharmony_ci};
858c2ecf20Sopenharmony_ci
868c2ecf20Sopenharmony_ci/*
878c2ecf20Sopenharmony_ci * For lack of a better place, hook the normal PMU MADT walk
888c2ecf20Sopenharmony_ci * and create a SPE device if we detect a recent MADT with
898c2ecf20Sopenharmony_ci * a homogeneous PPI mapping.
908c2ecf20Sopenharmony_ci */
918c2ecf20Sopenharmony_cistatic void arm_spe_acpi_register_device(void)
928c2ecf20Sopenharmony_ci{
938c2ecf20Sopenharmony_ci	int cpu, hetid, irq, ret;
948c2ecf20Sopenharmony_ci	bool first = true;
958c2ecf20Sopenharmony_ci	u16 gsi = 0;
968c2ecf20Sopenharmony_ci
978c2ecf20Sopenharmony_ci	/*
988c2ecf20Sopenharmony_ci	 * Sanity check all the GICC tables for the same interrupt number.
998c2ecf20Sopenharmony_ci	 * For now, we only support homogeneous ACPI/SPE machines.
1008c2ecf20Sopenharmony_ci	 */
1018c2ecf20Sopenharmony_ci	for_each_possible_cpu(cpu) {
1028c2ecf20Sopenharmony_ci		struct acpi_madt_generic_interrupt *gicc;
1038c2ecf20Sopenharmony_ci
1048c2ecf20Sopenharmony_ci		gicc = acpi_cpu_get_madt_gicc(cpu);
1058c2ecf20Sopenharmony_ci		if (gicc->header.length < ACPI_MADT_GICC_SPE)
1068c2ecf20Sopenharmony_ci			return;
1078c2ecf20Sopenharmony_ci
1088c2ecf20Sopenharmony_ci		if (first) {
1098c2ecf20Sopenharmony_ci			gsi = gicc->spe_interrupt;
1108c2ecf20Sopenharmony_ci			if (!gsi)
1118c2ecf20Sopenharmony_ci				return;
1128c2ecf20Sopenharmony_ci			hetid = find_acpi_cpu_topology_hetero_id(cpu);
1138c2ecf20Sopenharmony_ci			first = false;
1148c2ecf20Sopenharmony_ci		} else if ((gsi != gicc->spe_interrupt) ||
1158c2ecf20Sopenharmony_ci			   (hetid != find_acpi_cpu_topology_hetero_id(cpu))) {
1168c2ecf20Sopenharmony_ci			pr_warn("ACPI: SPE must be homogeneous\n");
1178c2ecf20Sopenharmony_ci			return;
1188c2ecf20Sopenharmony_ci		}
1198c2ecf20Sopenharmony_ci	}
1208c2ecf20Sopenharmony_ci
1218c2ecf20Sopenharmony_ci	irq = acpi_register_gsi(NULL, gsi, ACPI_LEVEL_SENSITIVE,
1228c2ecf20Sopenharmony_ci				ACPI_ACTIVE_HIGH);
1238c2ecf20Sopenharmony_ci	if (irq < 0) {
1248c2ecf20Sopenharmony_ci		pr_warn("ACPI: SPE Unable to register interrupt: %d\n", gsi);
1258c2ecf20Sopenharmony_ci		return;
1268c2ecf20Sopenharmony_ci	}
1278c2ecf20Sopenharmony_ci
1288c2ecf20Sopenharmony_ci	spe_resources[0].start = irq;
1298c2ecf20Sopenharmony_ci	ret = platform_device_register(&spe_dev);
1308c2ecf20Sopenharmony_ci	if (ret < 0) {
1318c2ecf20Sopenharmony_ci		pr_warn("ACPI: SPE: Unable to register device\n");
1328c2ecf20Sopenharmony_ci		acpi_unregister_gsi(gsi);
1338c2ecf20Sopenharmony_ci	}
1348c2ecf20Sopenharmony_ci}
1358c2ecf20Sopenharmony_ci#else
1368c2ecf20Sopenharmony_cistatic inline void arm_spe_acpi_register_device(void)
1378c2ecf20Sopenharmony_ci{
1388c2ecf20Sopenharmony_ci}
1398c2ecf20Sopenharmony_ci#endif /* CONFIG_ARM_SPE_PMU */
1408c2ecf20Sopenharmony_ci
1418c2ecf20Sopenharmony_cistatic int arm_pmu_acpi_parse_irqs(void)
1428c2ecf20Sopenharmony_ci{
1438c2ecf20Sopenharmony_ci	int irq, cpu, irq_cpu, err;
1448c2ecf20Sopenharmony_ci
1458c2ecf20Sopenharmony_ci	for_each_possible_cpu(cpu) {
1468c2ecf20Sopenharmony_ci		irq = arm_pmu_acpi_register_irq(cpu);
1478c2ecf20Sopenharmony_ci		if (irq < 0) {
1488c2ecf20Sopenharmony_ci			err = irq;
1498c2ecf20Sopenharmony_ci			pr_warn("Unable to parse ACPI PMU IRQ for CPU%d: %d\n",
1508c2ecf20Sopenharmony_ci				cpu, err);
1518c2ecf20Sopenharmony_ci			goto out_err;
1528c2ecf20Sopenharmony_ci		} else if (irq == 0) {
1538c2ecf20Sopenharmony_ci			pr_warn("No ACPI PMU IRQ for CPU%d\n", cpu);
1548c2ecf20Sopenharmony_ci		}
1558c2ecf20Sopenharmony_ci
1568c2ecf20Sopenharmony_ci		/*
1578c2ecf20Sopenharmony_ci		 * Log and request the IRQ so the core arm_pmu code can manage
1588c2ecf20Sopenharmony_ci		 * it. We'll have to sanity-check IRQs later when we associate
1598c2ecf20Sopenharmony_ci		 * them with their PMUs.
1608c2ecf20Sopenharmony_ci		 */
1618c2ecf20Sopenharmony_ci		per_cpu(pmu_irqs, cpu) = irq;
1628c2ecf20Sopenharmony_ci		armpmu_request_irq(irq, cpu);
1638c2ecf20Sopenharmony_ci	}
1648c2ecf20Sopenharmony_ci
1658c2ecf20Sopenharmony_ci	return 0;
1668c2ecf20Sopenharmony_ci
1678c2ecf20Sopenharmony_ciout_err:
1688c2ecf20Sopenharmony_ci	for_each_possible_cpu(cpu) {
1698c2ecf20Sopenharmony_ci		irq = per_cpu(pmu_irqs, cpu);
1708c2ecf20Sopenharmony_ci		if (!irq)
1718c2ecf20Sopenharmony_ci			continue;
1728c2ecf20Sopenharmony_ci
1738c2ecf20Sopenharmony_ci		arm_pmu_acpi_unregister_irq(cpu);
1748c2ecf20Sopenharmony_ci
1758c2ecf20Sopenharmony_ci		/*
1768c2ecf20Sopenharmony_ci		 * Blat all copies of the IRQ so that we only unregister the
1778c2ecf20Sopenharmony_ci		 * corresponding GSI once (e.g. when we have PPIs).
1788c2ecf20Sopenharmony_ci		 */
1798c2ecf20Sopenharmony_ci		for_each_possible_cpu(irq_cpu) {
1808c2ecf20Sopenharmony_ci			if (per_cpu(pmu_irqs, irq_cpu) == irq)
1818c2ecf20Sopenharmony_ci				per_cpu(pmu_irqs, irq_cpu) = 0;
1828c2ecf20Sopenharmony_ci		}
1838c2ecf20Sopenharmony_ci	}
1848c2ecf20Sopenharmony_ci
1858c2ecf20Sopenharmony_ci	return err;
1868c2ecf20Sopenharmony_ci}
1878c2ecf20Sopenharmony_ci
1888c2ecf20Sopenharmony_cistatic struct arm_pmu *arm_pmu_acpi_find_alloc_pmu(void)
1898c2ecf20Sopenharmony_ci{
1908c2ecf20Sopenharmony_ci	unsigned long cpuid = read_cpuid_id();
1918c2ecf20Sopenharmony_ci	struct arm_pmu *pmu;
1928c2ecf20Sopenharmony_ci	int cpu;
1938c2ecf20Sopenharmony_ci
1948c2ecf20Sopenharmony_ci	for_each_possible_cpu(cpu) {
1958c2ecf20Sopenharmony_ci		pmu = per_cpu(probed_pmus, cpu);
1968c2ecf20Sopenharmony_ci		if (!pmu || pmu->acpi_cpuid != cpuid)
1978c2ecf20Sopenharmony_ci			continue;
1988c2ecf20Sopenharmony_ci
1998c2ecf20Sopenharmony_ci		return pmu;
2008c2ecf20Sopenharmony_ci	}
2018c2ecf20Sopenharmony_ci
2028c2ecf20Sopenharmony_ci	pmu = armpmu_alloc_atomic();
2038c2ecf20Sopenharmony_ci	if (!pmu) {
2048c2ecf20Sopenharmony_ci		pr_warn("Unable to allocate PMU for CPU%d\n",
2058c2ecf20Sopenharmony_ci			smp_processor_id());
2068c2ecf20Sopenharmony_ci		return NULL;
2078c2ecf20Sopenharmony_ci	}
2088c2ecf20Sopenharmony_ci
2098c2ecf20Sopenharmony_ci	pmu->acpi_cpuid = cpuid;
2108c2ecf20Sopenharmony_ci
2118c2ecf20Sopenharmony_ci	return pmu;
2128c2ecf20Sopenharmony_ci}
2138c2ecf20Sopenharmony_ci
2148c2ecf20Sopenharmony_ci/*
2158c2ecf20Sopenharmony_ci * Check whether the new IRQ is compatible with those already associated with
2168c2ecf20Sopenharmony_ci * the PMU (e.g. we don't have mismatched PPIs).
2178c2ecf20Sopenharmony_ci */
2188c2ecf20Sopenharmony_cistatic bool pmu_irq_matches(struct arm_pmu *pmu, int irq)
2198c2ecf20Sopenharmony_ci{
2208c2ecf20Sopenharmony_ci	struct pmu_hw_events __percpu *hw_events = pmu->hw_events;
2218c2ecf20Sopenharmony_ci	int cpu;
2228c2ecf20Sopenharmony_ci
2238c2ecf20Sopenharmony_ci	if (!irq)
2248c2ecf20Sopenharmony_ci		return true;
2258c2ecf20Sopenharmony_ci
2268c2ecf20Sopenharmony_ci	for_each_cpu(cpu, &pmu->supported_cpus) {
2278c2ecf20Sopenharmony_ci		int other_irq = per_cpu(hw_events->irq, cpu);
2288c2ecf20Sopenharmony_ci		if (!other_irq)
2298c2ecf20Sopenharmony_ci			continue;
2308c2ecf20Sopenharmony_ci
2318c2ecf20Sopenharmony_ci		if (irq == other_irq)
2328c2ecf20Sopenharmony_ci			continue;
2338c2ecf20Sopenharmony_ci		if (!irq_is_percpu_devid(irq) && !irq_is_percpu_devid(other_irq))
2348c2ecf20Sopenharmony_ci			continue;
2358c2ecf20Sopenharmony_ci
2368c2ecf20Sopenharmony_ci		pr_warn("mismatched PPIs detected\n");
2378c2ecf20Sopenharmony_ci		return false;
2388c2ecf20Sopenharmony_ci	}
2398c2ecf20Sopenharmony_ci
2408c2ecf20Sopenharmony_ci	return true;
2418c2ecf20Sopenharmony_ci}
2428c2ecf20Sopenharmony_ci
2438c2ecf20Sopenharmony_ci/*
2448c2ecf20Sopenharmony_ci * This must run before the common arm_pmu hotplug logic, so that we can
2458c2ecf20Sopenharmony_ci * associate a CPU and its interrupt before the common code tries to manage the
2468c2ecf20Sopenharmony_ci * affinity and so on.
2478c2ecf20Sopenharmony_ci *
2488c2ecf20Sopenharmony_ci * Note that hotplug events are serialized, so we cannot race with another CPU
2498c2ecf20Sopenharmony_ci * coming up. The perf core won't open events while a hotplug event is in
2508c2ecf20Sopenharmony_ci * progress.
2518c2ecf20Sopenharmony_ci */
2528c2ecf20Sopenharmony_cistatic int arm_pmu_acpi_cpu_starting(unsigned int cpu)
2538c2ecf20Sopenharmony_ci{
2548c2ecf20Sopenharmony_ci	struct arm_pmu *pmu;
2558c2ecf20Sopenharmony_ci	struct pmu_hw_events __percpu *hw_events;
2568c2ecf20Sopenharmony_ci	int irq;
2578c2ecf20Sopenharmony_ci
2588c2ecf20Sopenharmony_ci	/* If we've already probed this CPU, we have nothing to do */
2598c2ecf20Sopenharmony_ci	if (per_cpu(probed_pmus, cpu))
2608c2ecf20Sopenharmony_ci		return 0;
2618c2ecf20Sopenharmony_ci
2628c2ecf20Sopenharmony_ci	irq = per_cpu(pmu_irqs, cpu);
2638c2ecf20Sopenharmony_ci
2648c2ecf20Sopenharmony_ci	pmu = arm_pmu_acpi_find_alloc_pmu();
2658c2ecf20Sopenharmony_ci	if (!pmu)
2668c2ecf20Sopenharmony_ci		return -ENOMEM;
2678c2ecf20Sopenharmony_ci
2688c2ecf20Sopenharmony_ci	per_cpu(probed_pmus, cpu) = pmu;
2698c2ecf20Sopenharmony_ci
2708c2ecf20Sopenharmony_ci	if (pmu_irq_matches(pmu, irq)) {
2718c2ecf20Sopenharmony_ci		hw_events = pmu->hw_events;
2728c2ecf20Sopenharmony_ci		per_cpu(hw_events->irq, cpu) = irq;
2738c2ecf20Sopenharmony_ci	}
2748c2ecf20Sopenharmony_ci
2758c2ecf20Sopenharmony_ci	cpumask_set_cpu(cpu, &pmu->supported_cpus);
2768c2ecf20Sopenharmony_ci
2778c2ecf20Sopenharmony_ci	/*
2788c2ecf20Sopenharmony_ci	 * Ideally, we'd probe the PMU here when we find the first matching
2798c2ecf20Sopenharmony_ci	 * CPU. We can't do that for several reasons; see the comment in
2808c2ecf20Sopenharmony_ci	 * arm_pmu_acpi_init().
2818c2ecf20Sopenharmony_ci	 *
2828c2ecf20Sopenharmony_ci	 * So for the time being, we're done.
2838c2ecf20Sopenharmony_ci	 */
2848c2ecf20Sopenharmony_ci	return 0;
2858c2ecf20Sopenharmony_ci}
2868c2ecf20Sopenharmony_ci
2878c2ecf20Sopenharmony_ciint arm_pmu_acpi_probe(armpmu_init_fn init_fn)
2888c2ecf20Sopenharmony_ci{
2898c2ecf20Sopenharmony_ci	int pmu_idx = 0;
2908c2ecf20Sopenharmony_ci	int cpu, ret;
2918c2ecf20Sopenharmony_ci
2928c2ecf20Sopenharmony_ci	/*
2938c2ecf20Sopenharmony_ci	 * Initialise and register the set of PMUs which we know about right
2948c2ecf20Sopenharmony_ci	 * now. Ideally we'd do this in arm_pmu_acpi_cpu_starting() so that we
2958c2ecf20Sopenharmony_ci	 * could handle late hotplug, but this may lead to deadlock since we
2968c2ecf20Sopenharmony_ci	 * might try to register a hotplug notifier instance from within a
2978c2ecf20Sopenharmony_ci	 * hotplug notifier.
2988c2ecf20Sopenharmony_ci	 *
2998c2ecf20Sopenharmony_ci	 * There's also the problem of having access to the right init_fn,
3008c2ecf20Sopenharmony_ci	 * without tying this too deeply into the "real" PMU driver.
3018c2ecf20Sopenharmony_ci	 *
3028c2ecf20Sopenharmony_ci	 * For the moment, as with the platform/DT case, we need at least one
3038c2ecf20Sopenharmony_ci	 * of a PMU's CPUs to be online at probe time.
3048c2ecf20Sopenharmony_ci	 */
3058c2ecf20Sopenharmony_ci	for_each_possible_cpu(cpu) {
3068c2ecf20Sopenharmony_ci		struct arm_pmu *pmu = per_cpu(probed_pmus, cpu);
3078c2ecf20Sopenharmony_ci		char *base_name;
3088c2ecf20Sopenharmony_ci
3098c2ecf20Sopenharmony_ci		if (!pmu || pmu->name)
3108c2ecf20Sopenharmony_ci			continue;
3118c2ecf20Sopenharmony_ci
3128c2ecf20Sopenharmony_ci		ret = init_fn(pmu);
3138c2ecf20Sopenharmony_ci		if (ret == -ENODEV) {
3148c2ecf20Sopenharmony_ci			/* PMU not handled by this driver, or not present */
3158c2ecf20Sopenharmony_ci			continue;
3168c2ecf20Sopenharmony_ci		} else if (ret) {
3178c2ecf20Sopenharmony_ci			pr_warn("Unable to initialise PMU for CPU%d\n", cpu);
3188c2ecf20Sopenharmony_ci			return ret;
3198c2ecf20Sopenharmony_ci		}
3208c2ecf20Sopenharmony_ci
3218c2ecf20Sopenharmony_ci		base_name = pmu->name;
3228c2ecf20Sopenharmony_ci		pmu->name = kasprintf(GFP_KERNEL, "%s_%d", base_name, pmu_idx++);
3238c2ecf20Sopenharmony_ci		if (!pmu->name) {
3248c2ecf20Sopenharmony_ci			pr_warn("Unable to allocate PMU name for CPU%d\n", cpu);
3258c2ecf20Sopenharmony_ci			return -ENOMEM;
3268c2ecf20Sopenharmony_ci		}
3278c2ecf20Sopenharmony_ci
3288c2ecf20Sopenharmony_ci		ret = armpmu_register(pmu);
3298c2ecf20Sopenharmony_ci		if (ret) {
3308c2ecf20Sopenharmony_ci			pr_warn("Failed to register PMU for CPU%d\n", cpu);
3318c2ecf20Sopenharmony_ci			kfree(pmu->name);
3328c2ecf20Sopenharmony_ci			return ret;
3338c2ecf20Sopenharmony_ci		}
3348c2ecf20Sopenharmony_ci	}
3358c2ecf20Sopenharmony_ci
3368c2ecf20Sopenharmony_ci	return 0;
3378c2ecf20Sopenharmony_ci}
3388c2ecf20Sopenharmony_ci
3398c2ecf20Sopenharmony_cistatic int arm_pmu_acpi_init(void)
3408c2ecf20Sopenharmony_ci{
3418c2ecf20Sopenharmony_ci	int ret;
3428c2ecf20Sopenharmony_ci
3438c2ecf20Sopenharmony_ci	if (acpi_disabled)
3448c2ecf20Sopenharmony_ci		return 0;
3458c2ecf20Sopenharmony_ci
3468c2ecf20Sopenharmony_ci	arm_spe_acpi_register_device();
3478c2ecf20Sopenharmony_ci
3488c2ecf20Sopenharmony_ci	ret = arm_pmu_acpi_parse_irqs();
3498c2ecf20Sopenharmony_ci	if (ret)
3508c2ecf20Sopenharmony_ci		return ret;
3518c2ecf20Sopenharmony_ci
3528c2ecf20Sopenharmony_ci	ret = cpuhp_setup_state(CPUHP_AP_PERF_ARM_ACPI_STARTING,
3538c2ecf20Sopenharmony_ci				"perf/arm/pmu_acpi:starting",
3548c2ecf20Sopenharmony_ci				arm_pmu_acpi_cpu_starting, NULL);
3558c2ecf20Sopenharmony_ci
3568c2ecf20Sopenharmony_ci	return ret;
3578c2ecf20Sopenharmony_ci}
3588c2ecf20Sopenharmony_cisubsys_initcall(arm_pmu_acpi_init)
359