162306a36Sopenharmony_ci// SPDX-License-Identifier: GPL-2.0-only
262306a36Sopenharmony_ci/*
362306a36Sopenharmony_ci * ACPI probing code for ARM performance counters.
462306a36Sopenharmony_ci *
562306a36Sopenharmony_ci * Copyright (C) 2017 ARM Ltd.
662306a36Sopenharmony_ci */
762306a36Sopenharmony_ci
862306a36Sopenharmony_ci#include <linux/acpi.h>
962306a36Sopenharmony_ci#include <linux/cpumask.h>
1062306a36Sopenharmony_ci#include <linux/init.h>
1162306a36Sopenharmony_ci#include <linux/irq.h>
1262306a36Sopenharmony_ci#include <linux/irqdesc.h>
1362306a36Sopenharmony_ci#include <linux/percpu.h>
1462306a36Sopenharmony_ci#include <linux/perf/arm_pmu.h>
1562306a36Sopenharmony_ci
1662306a36Sopenharmony_ci#include <asm/cpu.h>
1762306a36Sopenharmony_ci#include <asm/cputype.h>
1862306a36Sopenharmony_ci
1962306a36Sopenharmony_cistatic DEFINE_PER_CPU(struct arm_pmu *, probed_pmus);
2062306a36Sopenharmony_cistatic DEFINE_PER_CPU(int, pmu_irqs);
2162306a36Sopenharmony_ci
2262306a36Sopenharmony_cistatic int arm_pmu_acpi_register_irq(int cpu)
2362306a36Sopenharmony_ci{
2462306a36Sopenharmony_ci	struct acpi_madt_generic_interrupt *gicc;
2562306a36Sopenharmony_ci	int gsi, trigger;
2662306a36Sopenharmony_ci
2762306a36Sopenharmony_ci	gicc = acpi_cpu_get_madt_gicc(cpu);
2862306a36Sopenharmony_ci
2962306a36Sopenharmony_ci	gsi = gicc->performance_interrupt;
3062306a36Sopenharmony_ci
3162306a36Sopenharmony_ci	/*
3262306a36Sopenharmony_ci	 * Per the ACPI spec, the MADT cannot describe a PMU that doesn't
3362306a36Sopenharmony_ci	 * have an interrupt. QEMU advertises this by using a GSI of zero,
3462306a36Sopenharmony_ci	 * which is not known to be valid on any hardware despite being
3562306a36Sopenharmony_ci	 * valid per the spec. Take the pragmatic approach and reject a
3662306a36Sopenharmony_ci	 * GSI of zero for now.
3762306a36Sopenharmony_ci	 */
3862306a36Sopenharmony_ci	if (!gsi)
3962306a36Sopenharmony_ci		return 0;
4062306a36Sopenharmony_ci
4162306a36Sopenharmony_ci	if (gicc->flags & ACPI_MADT_PERFORMANCE_IRQ_MODE)
4262306a36Sopenharmony_ci		trigger = ACPI_EDGE_SENSITIVE;
4362306a36Sopenharmony_ci	else
4462306a36Sopenharmony_ci		trigger = ACPI_LEVEL_SENSITIVE;
4562306a36Sopenharmony_ci
4662306a36Sopenharmony_ci	/*
4762306a36Sopenharmony_ci	 * Helpfully, the MADT GICC doesn't have a polarity flag for the
4862306a36Sopenharmony_ci	 * "performance interrupt". Luckily, on compliant GICs the polarity is
4962306a36Sopenharmony_ci	 * a fixed value in HW (for both SPIs and PPIs) that we cannot change
5062306a36Sopenharmony_ci	 * from SW.
5162306a36Sopenharmony_ci	 *
5262306a36Sopenharmony_ci	 * Here we pass in ACPI_ACTIVE_HIGH to keep the core code happy. This
5362306a36Sopenharmony_ci	 * may not match the real polarity, but that should not matter.
5462306a36Sopenharmony_ci	 *
5562306a36Sopenharmony_ci	 * Other interrupt controllers are not supported with ACPI.
5662306a36Sopenharmony_ci	 */
5762306a36Sopenharmony_ci	return acpi_register_gsi(NULL, gsi, trigger, ACPI_ACTIVE_HIGH);
5862306a36Sopenharmony_ci}
5962306a36Sopenharmony_ci
6062306a36Sopenharmony_cistatic void arm_pmu_acpi_unregister_irq(int cpu)
6162306a36Sopenharmony_ci{
6262306a36Sopenharmony_ci	struct acpi_madt_generic_interrupt *gicc;
6362306a36Sopenharmony_ci	int gsi;
6462306a36Sopenharmony_ci
6562306a36Sopenharmony_ci	gicc = acpi_cpu_get_madt_gicc(cpu);
6662306a36Sopenharmony_ci
6762306a36Sopenharmony_ci	gsi = gicc->performance_interrupt;
6862306a36Sopenharmony_ci	if (gsi)
6962306a36Sopenharmony_ci		acpi_unregister_gsi(gsi);
7062306a36Sopenharmony_ci}
7162306a36Sopenharmony_ci
7262306a36Sopenharmony_cistatic int __maybe_unused
7362306a36Sopenharmony_ciarm_acpi_register_pmu_device(struct platform_device *pdev, u8 len,
7462306a36Sopenharmony_ci			     u16 (*parse_gsi)(struct acpi_madt_generic_interrupt *))
7562306a36Sopenharmony_ci{
7662306a36Sopenharmony_ci	int cpu, this_hetid, hetid, irq, ret;
7762306a36Sopenharmony_ci	u16 this_gsi = 0, gsi = 0;
7862306a36Sopenharmony_ci
7962306a36Sopenharmony_ci	/*
8062306a36Sopenharmony_ci	 * Ensure that platform device must have IORESOURCE_IRQ
8162306a36Sopenharmony_ci	 * resource to hold gsi interrupt.
8262306a36Sopenharmony_ci	 */
8362306a36Sopenharmony_ci	if (pdev->num_resources != 1)
8462306a36Sopenharmony_ci		return -ENXIO;
8562306a36Sopenharmony_ci
8662306a36Sopenharmony_ci	if (pdev->resource[0].flags != IORESOURCE_IRQ)
8762306a36Sopenharmony_ci		return -ENXIO;
8862306a36Sopenharmony_ci
8962306a36Sopenharmony_ci	/*
9062306a36Sopenharmony_ci	 * Sanity check all the GICC tables for the same interrupt
9162306a36Sopenharmony_ci	 * number. For now, only support homogeneous ACPI machines.
9262306a36Sopenharmony_ci	 */
9362306a36Sopenharmony_ci	for_each_possible_cpu(cpu) {
9462306a36Sopenharmony_ci		struct acpi_madt_generic_interrupt *gicc;
9562306a36Sopenharmony_ci
9662306a36Sopenharmony_ci		gicc = acpi_cpu_get_madt_gicc(cpu);
9762306a36Sopenharmony_ci		if (gicc->header.length < len)
9862306a36Sopenharmony_ci			return gsi ? -ENXIO : 0;
9962306a36Sopenharmony_ci
10062306a36Sopenharmony_ci		this_gsi = parse_gsi(gicc);
10162306a36Sopenharmony_ci		this_hetid = find_acpi_cpu_topology_hetero_id(cpu);
10262306a36Sopenharmony_ci		if (!gsi) {
10362306a36Sopenharmony_ci			hetid = this_hetid;
10462306a36Sopenharmony_ci			gsi = this_gsi;
10562306a36Sopenharmony_ci		} else if (hetid != this_hetid || gsi != this_gsi) {
10662306a36Sopenharmony_ci			pr_warn("ACPI: %s: must be homogeneous\n", pdev->name);
10762306a36Sopenharmony_ci			return -ENXIO;
10862306a36Sopenharmony_ci		}
10962306a36Sopenharmony_ci	}
11062306a36Sopenharmony_ci
11162306a36Sopenharmony_ci	if (!this_gsi)
11262306a36Sopenharmony_ci		return 0;
11362306a36Sopenharmony_ci
11462306a36Sopenharmony_ci	irq = acpi_register_gsi(NULL, gsi, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_HIGH);
11562306a36Sopenharmony_ci	if (irq < 0) {
11662306a36Sopenharmony_ci		pr_warn("ACPI: %s Unable to register interrupt: %d\n", pdev->name, gsi);
11762306a36Sopenharmony_ci		return -ENXIO;
11862306a36Sopenharmony_ci	}
11962306a36Sopenharmony_ci
12062306a36Sopenharmony_ci	pdev->resource[0].start = irq;
12162306a36Sopenharmony_ci	ret = platform_device_register(pdev);
12262306a36Sopenharmony_ci	if (ret)
12362306a36Sopenharmony_ci		acpi_unregister_gsi(gsi);
12462306a36Sopenharmony_ci
12562306a36Sopenharmony_ci	return ret;
12662306a36Sopenharmony_ci}
12762306a36Sopenharmony_ci
12862306a36Sopenharmony_ci#if IS_ENABLED(CONFIG_ARM_SPE_PMU)
12962306a36Sopenharmony_cistatic struct resource spe_resources[] = {
13062306a36Sopenharmony_ci	{
13162306a36Sopenharmony_ci		/* irq */
13262306a36Sopenharmony_ci		.flags          = IORESOURCE_IRQ,
13362306a36Sopenharmony_ci	}
13462306a36Sopenharmony_ci};
13562306a36Sopenharmony_ci
13662306a36Sopenharmony_cistatic struct platform_device spe_dev = {
13762306a36Sopenharmony_ci	.name = ARMV8_SPE_PDEV_NAME,
13862306a36Sopenharmony_ci	.id = -1,
13962306a36Sopenharmony_ci	.resource = spe_resources,
14062306a36Sopenharmony_ci	.num_resources = ARRAY_SIZE(spe_resources)
14162306a36Sopenharmony_ci};
14262306a36Sopenharmony_ci
14362306a36Sopenharmony_cistatic u16 arm_spe_parse_gsi(struct acpi_madt_generic_interrupt *gicc)
14462306a36Sopenharmony_ci{
14562306a36Sopenharmony_ci	return gicc->spe_interrupt;
14662306a36Sopenharmony_ci}
14762306a36Sopenharmony_ci
14862306a36Sopenharmony_ci/*
14962306a36Sopenharmony_ci * For lack of a better place, hook the normal PMU MADT walk
15062306a36Sopenharmony_ci * and create a SPE device if we detect a recent MADT with
15162306a36Sopenharmony_ci * a homogeneous PPI mapping.
15262306a36Sopenharmony_ci */
15362306a36Sopenharmony_cistatic void arm_spe_acpi_register_device(void)
15462306a36Sopenharmony_ci{
15562306a36Sopenharmony_ci	int ret = arm_acpi_register_pmu_device(&spe_dev, ACPI_MADT_GICC_SPE,
15662306a36Sopenharmony_ci					       arm_spe_parse_gsi);
15762306a36Sopenharmony_ci	if (ret)
15862306a36Sopenharmony_ci		pr_warn("ACPI: SPE: Unable to register device\n");
15962306a36Sopenharmony_ci}
16062306a36Sopenharmony_ci#else
16162306a36Sopenharmony_cistatic inline void arm_spe_acpi_register_device(void)
16262306a36Sopenharmony_ci{
16362306a36Sopenharmony_ci}
16462306a36Sopenharmony_ci#endif /* CONFIG_ARM_SPE_PMU */
16562306a36Sopenharmony_ci
16662306a36Sopenharmony_ci#if IS_ENABLED(CONFIG_CORESIGHT_TRBE)
16762306a36Sopenharmony_cistatic struct resource trbe_resources[] = {
16862306a36Sopenharmony_ci	{
16962306a36Sopenharmony_ci		/* irq */
17062306a36Sopenharmony_ci		.flags          = IORESOURCE_IRQ,
17162306a36Sopenharmony_ci	}
17262306a36Sopenharmony_ci};
17362306a36Sopenharmony_ci
17462306a36Sopenharmony_cistatic struct platform_device trbe_dev = {
17562306a36Sopenharmony_ci	.name = ARMV8_TRBE_PDEV_NAME,
17662306a36Sopenharmony_ci	.id = -1,
17762306a36Sopenharmony_ci	.resource = trbe_resources,
17862306a36Sopenharmony_ci	.num_resources = ARRAY_SIZE(trbe_resources)
17962306a36Sopenharmony_ci};
18062306a36Sopenharmony_ci
18162306a36Sopenharmony_cistatic u16 arm_trbe_parse_gsi(struct acpi_madt_generic_interrupt *gicc)
18262306a36Sopenharmony_ci{
18362306a36Sopenharmony_ci	return gicc->trbe_interrupt;
18462306a36Sopenharmony_ci}
18562306a36Sopenharmony_ci
18662306a36Sopenharmony_cistatic void arm_trbe_acpi_register_device(void)
18762306a36Sopenharmony_ci{
18862306a36Sopenharmony_ci	int ret = arm_acpi_register_pmu_device(&trbe_dev, ACPI_MADT_GICC_TRBE,
18962306a36Sopenharmony_ci					       arm_trbe_parse_gsi);
19062306a36Sopenharmony_ci	if (ret)
19162306a36Sopenharmony_ci		pr_warn("ACPI: TRBE: Unable to register device\n");
19262306a36Sopenharmony_ci}
19362306a36Sopenharmony_ci#else
19462306a36Sopenharmony_cistatic inline void arm_trbe_acpi_register_device(void)
19562306a36Sopenharmony_ci{
19662306a36Sopenharmony_ci
19762306a36Sopenharmony_ci}
19862306a36Sopenharmony_ci#endif /* CONFIG_CORESIGHT_TRBE */
19962306a36Sopenharmony_ci
20062306a36Sopenharmony_cistatic int arm_pmu_acpi_parse_irqs(void)
20162306a36Sopenharmony_ci{
20262306a36Sopenharmony_ci	int irq, cpu, irq_cpu, err;
20362306a36Sopenharmony_ci
20462306a36Sopenharmony_ci	for_each_possible_cpu(cpu) {
20562306a36Sopenharmony_ci		irq = arm_pmu_acpi_register_irq(cpu);
20662306a36Sopenharmony_ci		if (irq < 0) {
20762306a36Sopenharmony_ci			err = irq;
20862306a36Sopenharmony_ci			pr_warn("Unable to parse ACPI PMU IRQ for CPU%d: %d\n",
20962306a36Sopenharmony_ci				cpu, err);
21062306a36Sopenharmony_ci			goto out_err;
21162306a36Sopenharmony_ci		} else if (irq == 0) {
21262306a36Sopenharmony_ci			pr_warn("No ACPI PMU IRQ for CPU%d\n", cpu);
21362306a36Sopenharmony_ci		}
21462306a36Sopenharmony_ci
21562306a36Sopenharmony_ci		/*
21662306a36Sopenharmony_ci		 * Log and request the IRQ so the core arm_pmu code can manage
21762306a36Sopenharmony_ci		 * it. We'll have to sanity-check IRQs later when we associate
21862306a36Sopenharmony_ci		 * them with their PMUs.
21962306a36Sopenharmony_ci		 */
22062306a36Sopenharmony_ci		per_cpu(pmu_irqs, cpu) = irq;
22162306a36Sopenharmony_ci		err = armpmu_request_irq(irq, cpu);
22262306a36Sopenharmony_ci		if (err)
22362306a36Sopenharmony_ci			goto out_err;
22462306a36Sopenharmony_ci	}
22562306a36Sopenharmony_ci
22662306a36Sopenharmony_ci	return 0;
22762306a36Sopenharmony_ci
22862306a36Sopenharmony_ciout_err:
22962306a36Sopenharmony_ci	for_each_possible_cpu(cpu) {
23062306a36Sopenharmony_ci		irq = per_cpu(pmu_irqs, cpu);
23162306a36Sopenharmony_ci		if (!irq)
23262306a36Sopenharmony_ci			continue;
23362306a36Sopenharmony_ci
23462306a36Sopenharmony_ci		arm_pmu_acpi_unregister_irq(cpu);
23562306a36Sopenharmony_ci
23662306a36Sopenharmony_ci		/*
23762306a36Sopenharmony_ci		 * Blat all copies of the IRQ so that we only unregister the
23862306a36Sopenharmony_ci		 * corresponding GSI once (e.g. when we have PPIs).
23962306a36Sopenharmony_ci		 */
24062306a36Sopenharmony_ci		for_each_possible_cpu(irq_cpu) {
24162306a36Sopenharmony_ci			if (per_cpu(pmu_irqs, irq_cpu) == irq)
24262306a36Sopenharmony_ci				per_cpu(pmu_irqs, irq_cpu) = 0;
24362306a36Sopenharmony_ci		}
24462306a36Sopenharmony_ci	}
24562306a36Sopenharmony_ci
24662306a36Sopenharmony_ci	return err;
24762306a36Sopenharmony_ci}
24862306a36Sopenharmony_ci
24962306a36Sopenharmony_cistatic struct arm_pmu *arm_pmu_acpi_find_pmu(void)
25062306a36Sopenharmony_ci{
25162306a36Sopenharmony_ci	unsigned long cpuid = read_cpuid_id();
25262306a36Sopenharmony_ci	struct arm_pmu *pmu;
25362306a36Sopenharmony_ci	int cpu;
25462306a36Sopenharmony_ci
25562306a36Sopenharmony_ci	for_each_possible_cpu(cpu) {
25662306a36Sopenharmony_ci		pmu = per_cpu(probed_pmus, cpu);
25762306a36Sopenharmony_ci		if (!pmu || pmu->acpi_cpuid != cpuid)
25862306a36Sopenharmony_ci			continue;
25962306a36Sopenharmony_ci
26062306a36Sopenharmony_ci		return pmu;
26162306a36Sopenharmony_ci	}
26262306a36Sopenharmony_ci
26362306a36Sopenharmony_ci	return NULL;
26462306a36Sopenharmony_ci}
26562306a36Sopenharmony_ci
26662306a36Sopenharmony_ci/*
26762306a36Sopenharmony_ci * Check whether the new IRQ is compatible with those already associated with
26862306a36Sopenharmony_ci * the PMU (e.g. we don't have mismatched PPIs).
26962306a36Sopenharmony_ci */
27062306a36Sopenharmony_cistatic bool pmu_irq_matches(struct arm_pmu *pmu, int irq)
27162306a36Sopenharmony_ci{
27262306a36Sopenharmony_ci	struct pmu_hw_events __percpu *hw_events = pmu->hw_events;
27362306a36Sopenharmony_ci	int cpu;
27462306a36Sopenharmony_ci
27562306a36Sopenharmony_ci	if (!irq)
27662306a36Sopenharmony_ci		return true;
27762306a36Sopenharmony_ci
27862306a36Sopenharmony_ci	for_each_cpu(cpu, &pmu->supported_cpus) {
27962306a36Sopenharmony_ci		int other_irq = per_cpu(hw_events->irq, cpu);
28062306a36Sopenharmony_ci		if (!other_irq)
28162306a36Sopenharmony_ci			continue;
28262306a36Sopenharmony_ci
28362306a36Sopenharmony_ci		if (irq == other_irq)
28462306a36Sopenharmony_ci			continue;
28562306a36Sopenharmony_ci		if (!irq_is_percpu_devid(irq) && !irq_is_percpu_devid(other_irq))
28662306a36Sopenharmony_ci			continue;
28762306a36Sopenharmony_ci
28862306a36Sopenharmony_ci		pr_warn("mismatched PPIs detected\n");
28962306a36Sopenharmony_ci		return false;
29062306a36Sopenharmony_ci	}
29162306a36Sopenharmony_ci
29262306a36Sopenharmony_ci	return true;
29362306a36Sopenharmony_ci}
29462306a36Sopenharmony_ci
29562306a36Sopenharmony_cistatic void arm_pmu_acpi_associate_pmu_cpu(struct arm_pmu *pmu,
29662306a36Sopenharmony_ci					   unsigned int cpu)
29762306a36Sopenharmony_ci{
29862306a36Sopenharmony_ci	int irq = per_cpu(pmu_irqs, cpu);
29962306a36Sopenharmony_ci
30062306a36Sopenharmony_ci	per_cpu(probed_pmus, cpu) = pmu;
30162306a36Sopenharmony_ci
30262306a36Sopenharmony_ci	if (pmu_irq_matches(pmu, irq)) {
30362306a36Sopenharmony_ci		struct pmu_hw_events __percpu *hw_events;
30462306a36Sopenharmony_ci		hw_events = pmu->hw_events;
30562306a36Sopenharmony_ci		per_cpu(hw_events->irq, cpu) = irq;
30662306a36Sopenharmony_ci	}
30762306a36Sopenharmony_ci
30862306a36Sopenharmony_ci	cpumask_set_cpu(cpu, &pmu->supported_cpus);
30962306a36Sopenharmony_ci}
31062306a36Sopenharmony_ci
31162306a36Sopenharmony_ci/*
31262306a36Sopenharmony_ci * This must run before the common arm_pmu hotplug logic, so that we can
31362306a36Sopenharmony_ci * associate a CPU and its interrupt before the common code tries to manage the
31462306a36Sopenharmony_ci * affinity and so on.
31562306a36Sopenharmony_ci *
31662306a36Sopenharmony_ci * Note that hotplug events are serialized, so we cannot race with another CPU
31762306a36Sopenharmony_ci * coming up. The perf core won't open events while a hotplug event is in
31862306a36Sopenharmony_ci * progress.
31962306a36Sopenharmony_ci */
32062306a36Sopenharmony_cistatic int arm_pmu_acpi_cpu_starting(unsigned int cpu)
32162306a36Sopenharmony_ci{
32262306a36Sopenharmony_ci	struct arm_pmu *pmu;
32362306a36Sopenharmony_ci
32462306a36Sopenharmony_ci	/* If we've already probed this CPU, we have nothing to do */
32562306a36Sopenharmony_ci	if (per_cpu(probed_pmus, cpu))
32662306a36Sopenharmony_ci		return 0;
32762306a36Sopenharmony_ci
32862306a36Sopenharmony_ci	pmu = arm_pmu_acpi_find_pmu();
32962306a36Sopenharmony_ci	if (!pmu) {
33062306a36Sopenharmony_ci		pr_warn_ratelimited("Unable to associate CPU%d with a PMU\n",
33162306a36Sopenharmony_ci				    cpu);
33262306a36Sopenharmony_ci		return 0;
33362306a36Sopenharmony_ci	}
33462306a36Sopenharmony_ci
33562306a36Sopenharmony_ci	arm_pmu_acpi_associate_pmu_cpu(pmu, cpu);
33662306a36Sopenharmony_ci	return 0;
33762306a36Sopenharmony_ci}
33862306a36Sopenharmony_ci
33962306a36Sopenharmony_cistatic void arm_pmu_acpi_probe_matching_cpus(struct arm_pmu *pmu,
34062306a36Sopenharmony_ci					     unsigned long cpuid)
34162306a36Sopenharmony_ci{
34262306a36Sopenharmony_ci	int cpu;
34362306a36Sopenharmony_ci
34462306a36Sopenharmony_ci	for_each_online_cpu(cpu) {
34562306a36Sopenharmony_ci		unsigned long cpu_cpuid = per_cpu(cpu_data, cpu).reg_midr;
34662306a36Sopenharmony_ci
34762306a36Sopenharmony_ci		if (cpu_cpuid == cpuid)
34862306a36Sopenharmony_ci			arm_pmu_acpi_associate_pmu_cpu(pmu, cpu);
34962306a36Sopenharmony_ci	}
35062306a36Sopenharmony_ci}
35162306a36Sopenharmony_ci
35262306a36Sopenharmony_ciint arm_pmu_acpi_probe(armpmu_init_fn init_fn)
35362306a36Sopenharmony_ci{
35462306a36Sopenharmony_ci	int pmu_idx = 0;
35562306a36Sopenharmony_ci	unsigned int cpu;
35662306a36Sopenharmony_ci	int ret;
35762306a36Sopenharmony_ci
35862306a36Sopenharmony_ci	ret = arm_pmu_acpi_parse_irqs();
35962306a36Sopenharmony_ci	if (ret)
36062306a36Sopenharmony_ci		return ret;
36162306a36Sopenharmony_ci
36262306a36Sopenharmony_ci	ret = cpuhp_setup_state_nocalls(CPUHP_AP_PERF_ARM_ACPI_STARTING,
36362306a36Sopenharmony_ci					"perf/arm/pmu_acpi:starting",
36462306a36Sopenharmony_ci					arm_pmu_acpi_cpu_starting, NULL);
36562306a36Sopenharmony_ci	if (ret)
36662306a36Sopenharmony_ci		return ret;
36762306a36Sopenharmony_ci
36862306a36Sopenharmony_ci	/*
36962306a36Sopenharmony_ci	 * Initialise and register the set of PMUs which we know about right
37062306a36Sopenharmony_ci	 * now. Ideally we'd do this in arm_pmu_acpi_cpu_starting() so that we
37162306a36Sopenharmony_ci	 * could handle late hotplug, but this may lead to deadlock since we
37262306a36Sopenharmony_ci	 * might try to register a hotplug notifier instance from within a
37362306a36Sopenharmony_ci	 * hotplug notifier.
37462306a36Sopenharmony_ci	 *
37562306a36Sopenharmony_ci	 * There's also the problem of having access to the right init_fn,
37662306a36Sopenharmony_ci	 * without tying this too deeply into the "real" PMU driver.
37762306a36Sopenharmony_ci	 *
37862306a36Sopenharmony_ci	 * For the moment, as with the platform/DT case, we need at least one
37962306a36Sopenharmony_ci	 * of a PMU's CPUs to be online at probe time.
38062306a36Sopenharmony_ci	 */
38162306a36Sopenharmony_ci	for_each_online_cpu(cpu) {
38262306a36Sopenharmony_ci		struct arm_pmu *pmu = per_cpu(probed_pmus, cpu);
38362306a36Sopenharmony_ci		unsigned long cpuid;
38462306a36Sopenharmony_ci		char *base_name;
38562306a36Sopenharmony_ci
38662306a36Sopenharmony_ci		/* If we've already probed this CPU, we have nothing to do */
38762306a36Sopenharmony_ci		if (pmu)
38862306a36Sopenharmony_ci			continue;
38962306a36Sopenharmony_ci
39062306a36Sopenharmony_ci		pmu = armpmu_alloc();
39162306a36Sopenharmony_ci		if (!pmu) {
39262306a36Sopenharmony_ci			pr_warn("Unable to allocate PMU for CPU%d\n",
39362306a36Sopenharmony_ci				cpu);
39462306a36Sopenharmony_ci			return -ENOMEM;
39562306a36Sopenharmony_ci		}
39662306a36Sopenharmony_ci
39762306a36Sopenharmony_ci		cpuid = per_cpu(cpu_data, cpu).reg_midr;
39862306a36Sopenharmony_ci		pmu->acpi_cpuid = cpuid;
39962306a36Sopenharmony_ci
40062306a36Sopenharmony_ci		arm_pmu_acpi_probe_matching_cpus(pmu, cpuid);
40162306a36Sopenharmony_ci
40262306a36Sopenharmony_ci		ret = init_fn(pmu);
40362306a36Sopenharmony_ci		if (ret == -ENODEV) {
40462306a36Sopenharmony_ci			/* PMU not handled by this driver, or not present */
40562306a36Sopenharmony_ci			continue;
40662306a36Sopenharmony_ci		} else if (ret) {
40762306a36Sopenharmony_ci			pr_warn("Unable to initialise PMU for CPU%d\n", cpu);
40862306a36Sopenharmony_ci			return ret;
40962306a36Sopenharmony_ci		}
41062306a36Sopenharmony_ci
41162306a36Sopenharmony_ci		base_name = pmu->name;
41262306a36Sopenharmony_ci		pmu->name = kasprintf(GFP_KERNEL, "%s_%d", base_name, pmu_idx++);
41362306a36Sopenharmony_ci		if (!pmu->name) {
41462306a36Sopenharmony_ci			pr_warn("Unable to allocate PMU name for CPU%d\n", cpu);
41562306a36Sopenharmony_ci			return -ENOMEM;
41662306a36Sopenharmony_ci		}
41762306a36Sopenharmony_ci
41862306a36Sopenharmony_ci		ret = armpmu_register(pmu);
41962306a36Sopenharmony_ci		if (ret) {
42062306a36Sopenharmony_ci			pr_warn("Failed to register PMU for CPU%d\n", cpu);
42162306a36Sopenharmony_ci			kfree(pmu->name);
42262306a36Sopenharmony_ci			return ret;
42362306a36Sopenharmony_ci		}
42462306a36Sopenharmony_ci	}
42562306a36Sopenharmony_ci
42662306a36Sopenharmony_ci	return ret;
42762306a36Sopenharmony_ci}
42862306a36Sopenharmony_ci
42962306a36Sopenharmony_cistatic int arm_pmu_acpi_init(void)
43062306a36Sopenharmony_ci{
43162306a36Sopenharmony_ci	if (acpi_disabled)
43262306a36Sopenharmony_ci		return 0;
43362306a36Sopenharmony_ci
43462306a36Sopenharmony_ci	arm_spe_acpi_register_device();
43562306a36Sopenharmony_ci	arm_trbe_acpi_register_device();
43662306a36Sopenharmony_ci
43762306a36Sopenharmony_ci	return 0;
43862306a36Sopenharmony_ci}
43962306a36Sopenharmony_cisubsys_initcall(arm_pmu_acpi_init)
440