162306a36Sopenharmony_ci// SPDX-License-Identifier: GPL-2.0
262306a36Sopenharmony_ci/*
362306a36Sopenharmony_ci * Arch specific cpu topology information
462306a36Sopenharmony_ci *
562306a36Sopenharmony_ci * Copyright (C) 2016, ARM Ltd.
662306a36Sopenharmony_ci * Written by: Juri Lelli, ARM Ltd.
762306a36Sopenharmony_ci */
862306a36Sopenharmony_ci
962306a36Sopenharmony_ci#include <linux/acpi.h>
1062306a36Sopenharmony_ci#include <linux/cacheinfo.h>
1162306a36Sopenharmony_ci#include <linux/cpu.h>
1262306a36Sopenharmony_ci#include <linux/cpufreq.h>
1362306a36Sopenharmony_ci#include <linux/device.h>
1462306a36Sopenharmony_ci#include <linux/of.h>
1562306a36Sopenharmony_ci#include <linux/slab.h>
1662306a36Sopenharmony_ci#include <linux/sched/topology.h>
1762306a36Sopenharmony_ci#include <linux/cpuset.h>
1862306a36Sopenharmony_ci#include <linux/cpumask.h>
1962306a36Sopenharmony_ci#include <linux/init.h>
2062306a36Sopenharmony_ci#include <linux/rcupdate.h>
2162306a36Sopenharmony_ci#include <linux/sched.h>
2262306a36Sopenharmony_ci
2362306a36Sopenharmony_ci#define CREATE_TRACE_POINTS
2462306a36Sopenharmony_ci#include <trace/events/thermal_pressure.h>
2562306a36Sopenharmony_ci
2662306a36Sopenharmony_cistatic DEFINE_PER_CPU(struct scale_freq_data __rcu *, sft_data);
2762306a36Sopenharmony_cistatic struct cpumask scale_freq_counters_mask;
2862306a36Sopenharmony_cistatic bool scale_freq_invariant;
2962306a36Sopenharmony_cistatic DEFINE_PER_CPU(u32, freq_factor) = 1;
3062306a36Sopenharmony_ci
3162306a36Sopenharmony_cistatic bool supports_scale_freq_counters(const struct cpumask *cpus)
3262306a36Sopenharmony_ci{
3362306a36Sopenharmony_ci	return cpumask_subset(cpus, &scale_freq_counters_mask);
3462306a36Sopenharmony_ci}
3562306a36Sopenharmony_ci
3662306a36Sopenharmony_cibool topology_scale_freq_invariant(void)
3762306a36Sopenharmony_ci{
3862306a36Sopenharmony_ci	return cpufreq_supports_freq_invariance() ||
3962306a36Sopenharmony_ci	       supports_scale_freq_counters(cpu_online_mask);
4062306a36Sopenharmony_ci}
4162306a36Sopenharmony_ci
4262306a36Sopenharmony_cistatic void update_scale_freq_invariant(bool status)
4362306a36Sopenharmony_ci{
4462306a36Sopenharmony_ci	if (scale_freq_invariant == status)
4562306a36Sopenharmony_ci		return;
4662306a36Sopenharmony_ci
4762306a36Sopenharmony_ci	/*
4862306a36Sopenharmony_ci	 * Task scheduler behavior depends on frequency invariance support,
4962306a36Sopenharmony_ci	 * either cpufreq or counter driven. If the support status changes as
5062306a36Sopenharmony_ci	 * a result of counter initialisation and use, retrigger the build of
5162306a36Sopenharmony_ci	 * scheduling domains to ensure the information is propagated properly.
5262306a36Sopenharmony_ci	 */
5362306a36Sopenharmony_ci	if (topology_scale_freq_invariant() == status) {
5462306a36Sopenharmony_ci		scale_freq_invariant = status;
5562306a36Sopenharmony_ci		rebuild_sched_domains_energy();
5662306a36Sopenharmony_ci	}
5762306a36Sopenharmony_ci}
5862306a36Sopenharmony_ci
5962306a36Sopenharmony_civoid topology_set_scale_freq_source(struct scale_freq_data *data,
6062306a36Sopenharmony_ci				    const struct cpumask *cpus)
6162306a36Sopenharmony_ci{
6262306a36Sopenharmony_ci	struct scale_freq_data *sfd;
6362306a36Sopenharmony_ci	int cpu;
6462306a36Sopenharmony_ci
6562306a36Sopenharmony_ci	/*
6662306a36Sopenharmony_ci	 * Avoid calling rebuild_sched_domains() unnecessarily if FIE is
6762306a36Sopenharmony_ci	 * supported by cpufreq.
6862306a36Sopenharmony_ci	 */
6962306a36Sopenharmony_ci	if (cpumask_empty(&scale_freq_counters_mask))
7062306a36Sopenharmony_ci		scale_freq_invariant = topology_scale_freq_invariant();
7162306a36Sopenharmony_ci
7262306a36Sopenharmony_ci	rcu_read_lock();
7362306a36Sopenharmony_ci
7462306a36Sopenharmony_ci	for_each_cpu(cpu, cpus) {
7562306a36Sopenharmony_ci		sfd = rcu_dereference(*per_cpu_ptr(&sft_data, cpu));
7662306a36Sopenharmony_ci
7762306a36Sopenharmony_ci		/* Use ARCH provided counters whenever possible */
7862306a36Sopenharmony_ci		if (!sfd || sfd->source != SCALE_FREQ_SOURCE_ARCH) {
7962306a36Sopenharmony_ci			rcu_assign_pointer(per_cpu(sft_data, cpu), data);
8062306a36Sopenharmony_ci			cpumask_set_cpu(cpu, &scale_freq_counters_mask);
8162306a36Sopenharmony_ci		}
8262306a36Sopenharmony_ci	}
8362306a36Sopenharmony_ci
8462306a36Sopenharmony_ci	rcu_read_unlock();
8562306a36Sopenharmony_ci
8662306a36Sopenharmony_ci	update_scale_freq_invariant(true);
8762306a36Sopenharmony_ci}
8862306a36Sopenharmony_ciEXPORT_SYMBOL_GPL(topology_set_scale_freq_source);
8962306a36Sopenharmony_ci
9062306a36Sopenharmony_civoid topology_clear_scale_freq_source(enum scale_freq_source source,
9162306a36Sopenharmony_ci				      const struct cpumask *cpus)
9262306a36Sopenharmony_ci{
9362306a36Sopenharmony_ci	struct scale_freq_data *sfd;
9462306a36Sopenharmony_ci	int cpu;
9562306a36Sopenharmony_ci
9662306a36Sopenharmony_ci	rcu_read_lock();
9762306a36Sopenharmony_ci
9862306a36Sopenharmony_ci	for_each_cpu(cpu, cpus) {
9962306a36Sopenharmony_ci		sfd = rcu_dereference(*per_cpu_ptr(&sft_data, cpu));
10062306a36Sopenharmony_ci
10162306a36Sopenharmony_ci		if (sfd && sfd->source == source) {
10262306a36Sopenharmony_ci			rcu_assign_pointer(per_cpu(sft_data, cpu), NULL);
10362306a36Sopenharmony_ci			cpumask_clear_cpu(cpu, &scale_freq_counters_mask);
10462306a36Sopenharmony_ci		}
10562306a36Sopenharmony_ci	}
10662306a36Sopenharmony_ci
10762306a36Sopenharmony_ci	rcu_read_unlock();
10862306a36Sopenharmony_ci
10962306a36Sopenharmony_ci	/*
11062306a36Sopenharmony_ci	 * Make sure all references to previous sft_data are dropped to avoid
11162306a36Sopenharmony_ci	 * use-after-free races.
11262306a36Sopenharmony_ci	 */
11362306a36Sopenharmony_ci	synchronize_rcu();
11462306a36Sopenharmony_ci
11562306a36Sopenharmony_ci	update_scale_freq_invariant(false);
11662306a36Sopenharmony_ci}
11762306a36Sopenharmony_ciEXPORT_SYMBOL_GPL(topology_clear_scale_freq_source);
11862306a36Sopenharmony_ci
11962306a36Sopenharmony_civoid topology_scale_freq_tick(void)
12062306a36Sopenharmony_ci{
12162306a36Sopenharmony_ci	struct scale_freq_data *sfd = rcu_dereference_sched(*this_cpu_ptr(&sft_data));
12262306a36Sopenharmony_ci
12362306a36Sopenharmony_ci	if (sfd)
12462306a36Sopenharmony_ci		sfd->set_freq_scale();
12562306a36Sopenharmony_ci}
12662306a36Sopenharmony_ci
12762306a36Sopenharmony_ciDEFINE_PER_CPU(unsigned long, arch_freq_scale) = SCHED_CAPACITY_SCALE;
12862306a36Sopenharmony_ciEXPORT_PER_CPU_SYMBOL_GPL(arch_freq_scale);
12962306a36Sopenharmony_ci
13062306a36Sopenharmony_civoid topology_set_freq_scale(const struct cpumask *cpus, unsigned long cur_freq,
13162306a36Sopenharmony_ci			     unsigned long max_freq)
13262306a36Sopenharmony_ci{
13362306a36Sopenharmony_ci	unsigned long scale;
13462306a36Sopenharmony_ci	int i;
13562306a36Sopenharmony_ci
13662306a36Sopenharmony_ci	if (WARN_ON_ONCE(!cur_freq || !max_freq))
13762306a36Sopenharmony_ci		return;
13862306a36Sopenharmony_ci
13962306a36Sopenharmony_ci	/*
14062306a36Sopenharmony_ci	 * If the use of counters for FIE is enabled, just return as we don't
14162306a36Sopenharmony_ci	 * want to update the scale factor with information from CPUFREQ.
14262306a36Sopenharmony_ci	 * Instead the scale factor will be updated from arch_scale_freq_tick.
14362306a36Sopenharmony_ci	 */
14462306a36Sopenharmony_ci	if (supports_scale_freq_counters(cpus))
14562306a36Sopenharmony_ci		return;
14662306a36Sopenharmony_ci
14762306a36Sopenharmony_ci	scale = (cur_freq << SCHED_CAPACITY_SHIFT) / max_freq;
14862306a36Sopenharmony_ci
14962306a36Sopenharmony_ci	for_each_cpu(i, cpus)
15062306a36Sopenharmony_ci		per_cpu(arch_freq_scale, i) = scale;
15162306a36Sopenharmony_ci}
15262306a36Sopenharmony_ci
15362306a36Sopenharmony_ciDEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE;
15462306a36Sopenharmony_ciEXPORT_PER_CPU_SYMBOL_GPL(cpu_scale);
15562306a36Sopenharmony_ci
15662306a36Sopenharmony_civoid topology_set_cpu_scale(unsigned int cpu, unsigned long capacity)
15762306a36Sopenharmony_ci{
15862306a36Sopenharmony_ci	per_cpu(cpu_scale, cpu) = capacity;
15962306a36Sopenharmony_ci}
16062306a36Sopenharmony_ci
16162306a36Sopenharmony_ciDEFINE_PER_CPU(unsigned long, thermal_pressure);
16262306a36Sopenharmony_ci
16362306a36Sopenharmony_ci/**
16462306a36Sopenharmony_ci * topology_update_thermal_pressure() - Update thermal pressure for CPUs
16562306a36Sopenharmony_ci * @cpus        : The related CPUs for which capacity has been reduced
16662306a36Sopenharmony_ci * @capped_freq : The maximum allowed frequency that CPUs can run at
16762306a36Sopenharmony_ci *
16862306a36Sopenharmony_ci * Update the value of thermal pressure for all @cpus in the mask. The
16962306a36Sopenharmony_ci * cpumask should include all (online+offline) affected CPUs, to avoid
17062306a36Sopenharmony_ci * operating on stale data when hot-plug is used for some CPUs. The
17162306a36Sopenharmony_ci * @capped_freq reflects the currently allowed max CPUs frequency due to
17262306a36Sopenharmony_ci * thermal capping. It might be also a boost frequency value, which is bigger
17362306a36Sopenharmony_ci * than the internal 'freq_factor' max frequency. In such case the pressure
17462306a36Sopenharmony_ci * value should simply be removed, since this is an indication that there is
17562306a36Sopenharmony_ci * no thermal throttling. The @capped_freq must be provided in kHz.
17662306a36Sopenharmony_ci */
17762306a36Sopenharmony_civoid topology_update_thermal_pressure(const struct cpumask *cpus,
17862306a36Sopenharmony_ci				      unsigned long capped_freq)
17962306a36Sopenharmony_ci{
18062306a36Sopenharmony_ci	unsigned long max_capacity, capacity, th_pressure;
18162306a36Sopenharmony_ci	u32 max_freq;
18262306a36Sopenharmony_ci	int cpu;
18362306a36Sopenharmony_ci
18462306a36Sopenharmony_ci	cpu = cpumask_first(cpus);
18562306a36Sopenharmony_ci	max_capacity = arch_scale_cpu_capacity(cpu);
18662306a36Sopenharmony_ci	max_freq = per_cpu(freq_factor, cpu);
18762306a36Sopenharmony_ci
18862306a36Sopenharmony_ci	/* Convert to MHz scale which is used in 'freq_factor' */
18962306a36Sopenharmony_ci	capped_freq /= 1000;
19062306a36Sopenharmony_ci
19162306a36Sopenharmony_ci	/*
19262306a36Sopenharmony_ci	 * Handle properly the boost frequencies, which should simply clean
19362306a36Sopenharmony_ci	 * the thermal pressure value.
19462306a36Sopenharmony_ci	 */
19562306a36Sopenharmony_ci	if (max_freq <= capped_freq)
19662306a36Sopenharmony_ci		capacity = max_capacity;
19762306a36Sopenharmony_ci	else
19862306a36Sopenharmony_ci		capacity = mult_frac(max_capacity, capped_freq, max_freq);
19962306a36Sopenharmony_ci
20062306a36Sopenharmony_ci	th_pressure = max_capacity - capacity;
20162306a36Sopenharmony_ci
20262306a36Sopenharmony_ci	trace_thermal_pressure_update(cpu, th_pressure);
20362306a36Sopenharmony_ci
20462306a36Sopenharmony_ci	for_each_cpu(cpu, cpus)
20562306a36Sopenharmony_ci		WRITE_ONCE(per_cpu(thermal_pressure, cpu), th_pressure);
20662306a36Sopenharmony_ci}
20762306a36Sopenharmony_ciEXPORT_SYMBOL_GPL(topology_update_thermal_pressure);
20862306a36Sopenharmony_ci
20962306a36Sopenharmony_cistatic ssize_t cpu_capacity_show(struct device *dev,
21062306a36Sopenharmony_ci				 struct device_attribute *attr,
21162306a36Sopenharmony_ci				 char *buf)
21262306a36Sopenharmony_ci{
21362306a36Sopenharmony_ci	struct cpu *cpu = container_of(dev, struct cpu, dev);
21462306a36Sopenharmony_ci
21562306a36Sopenharmony_ci	return sysfs_emit(buf, "%lu\n", topology_get_cpu_scale(cpu->dev.id));
21662306a36Sopenharmony_ci}
21762306a36Sopenharmony_ci
21862306a36Sopenharmony_cistatic void update_topology_flags_workfn(struct work_struct *work);
21962306a36Sopenharmony_cistatic DECLARE_WORK(update_topology_flags_work, update_topology_flags_workfn);
22062306a36Sopenharmony_ci
22162306a36Sopenharmony_cistatic DEVICE_ATTR_RO(cpu_capacity);
22262306a36Sopenharmony_ci
22362306a36Sopenharmony_cistatic int register_cpu_capacity_sysctl(void)
22462306a36Sopenharmony_ci{
22562306a36Sopenharmony_ci	int i;
22662306a36Sopenharmony_ci	struct device *cpu;
22762306a36Sopenharmony_ci
22862306a36Sopenharmony_ci	for_each_possible_cpu(i) {
22962306a36Sopenharmony_ci		cpu = get_cpu_device(i);
23062306a36Sopenharmony_ci		if (!cpu) {
23162306a36Sopenharmony_ci			pr_err("%s: too early to get CPU%d device!\n",
23262306a36Sopenharmony_ci			       __func__, i);
23362306a36Sopenharmony_ci			continue;
23462306a36Sopenharmony_ci		}
23562306a36Sopenharmony_ci		device_create_file(cpu, &dev_attr_cpu_capacity);
23662306a36Sopenharmony_ci	}
23762306a36Sopenharmony_ci
23862306a36Sopenharmony_ci	return 0;
23962306a36Sopenharmony_ci}
24062306a36Sopenharmony_cisubsys_initcall(register_cpu_capacity_sysctl);
24162306a36Sopenharmony_ci
24262306a36Sopenharmony_cistatic int update_topology;
24362306a36Sopenharmony_ci
24462306a36Sopenharmony_ciint topology_update_cpu_topology(void)
24562306a36Sopenharmony_ci{
24662306a36Sopenharmony_ci	return update_topology;
24762306a36Sopenharmony_ci}
24862306a36Sopenharmony_ci
24962306a36Sopenharmony_ci/*
25062306a36Sopenharmony_ci * Updating the sched_domains can't be done directly from cpufreq callbacks
25162306a36Sopenharmony_ci * due to locking, so queue the work for later.
25262306a36Sopenharmony_ci */
25362306a36Sopenharmony_cistatic void update_topology_flags_workfn(struct work_struct *work)
25462306a36Sopenharmony_ci{
25562306a36Sopenharmony_ci	update_topology = 1;
25662306a36Sopenharmony_ci	rebuild_sched_domains();
25762306a36Sopenharmony_ci	pr_debug("sched_domain hierarchy rebuilt, flags updated\n");
25862306a36Sopenharmony_ci	update_topology = 0;
25962306a36Sopenharmony_ci}
26062306a36Sopenharmony_ci
26162306a36Sopenharmony_cistatic u32 *raw_capacity;
26262306a36Sopenharmony_ci
26362306a36Sopenharmony_cistatic int free_raw_capacity(void)
26462306a36Sopenharmony_ci{
26562306a36Sopenharmony_ci	kfree(raw_capacity);
26662306a36Sopenharmony_ci	raw_capacity = NULL;
26762306a36Sopenharmony_ci
26862306a36Sopenharmony_ci	return 0;
26962306a36Sopenharmony_ci}
27062306a36Sopenharmony_ci
27162306a36Sopenharmony_civoid topology_normalize_cpu_scale(void)
27262306a36Sopenharmony_ci{
27362306a36Sopenharmony_ci	u64 capacity;
27462306a36Sopenharmony_ci	u64 capacity_scale;
27562306a36Sopenharmony_ci	int cpu;
27662306a36Sopenharmony_ci
27762306a36Sopenharmony_ci	if (!raw_capacity)
27862306a36Sopenharmony_ci		return;
27962306a36Sopenharmony_ci
28062306a36Sopenharmony_ci	capacity_scale = 1;
28162306a36Sopenharmony_ci	for_each_possible_cpu(cpu) {
28262306a36Sopenharmony_ci		capacity = raw_capacity[cpu] * per_cpu(freq_factor, cpu);
28362306a36Sopenharmony_ci		capacity_scale = max(capacity, capacity_scale);
28462306a36Sopenharmony_ci	}
28562306a36Sopenharmony_ci
28662306a36Sopenharmony_ci	pr_debug("cpu_capacity: capacity_scale=%llu\n", capacity_scale);
28762306a36Sopenharmony_ci	for_each_possible_cpu(cpu) {
28862306a36Sopenharmony_ci		capacity = raw_capacity[cpu] * per_cpu(freq_factor, cpu);
28962306a36Sopenharmony_ci		capacity = div64_u64(capacity << SCHED_CAPACITY_SHIFT,
29062306a36Sopenharmony_ci			capacity_scale);
29162306a36Sopenharmony_ci		topology_set_cpu_scale(cpu, capacity);
29262306a36Sopenharmony_ci		pr_debug("cpu_capacity: CPU%d cpu_capacity=%lu\n",
29362306a36Sopenharmony_ci			cpu, topology_get_cpu_scale(cpu));
29462306a36Sopenharmony_ci	}
29562306a36Sopenharmony_ci}
29662306a36Sopenharmony_ci
29762306a36Sopenharmony_cibool __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu)
29862306a36Sopenharmony_ci{
29962306a36Sopenharmony_ci	struct clk *cpu_clk;
30062306a36Sopenharmony_ci	static bool cap_parsing_failed;
30162306a36Sopenharmony_ci	int ret;
30262306a36Sopenharmony_ci	u32 cpu_capacity;
30362306a36Sopenharmony_ci
30462306a36Sopenharmony_ci	if (cap_parsing_failed)
30562306a36Sopenharmony_ci		return false;
30662306a36Sopenharmony_ci
30762306a36Sopenharmony_ci	ret = of_property_read_u32(cpu_node, "capacity-dmips-mhz",
30862306a36Sopenharmony_ci				   &cpu_capacity);
30962306a36Sopenharmony_ci	if (!ret) {
31062306a36Sopenharmony_ci		if (!raw_capacity) {
31162306a36Sopenharmony_ci			raw_capacity = kcalloc(num_possible_cpus(),
31262306a36Sopenharmony_ci					       sizeof(*raw_capacity),
31362306a36Sopenharmony_ci					       GFP_KERNEL);
31462306a36Sopenharmony_ci			if (!raw_capacity) {
31562306a36Sopenharmony_ci				cap_parsing_failed = true;
31662306a36Sopenharmony_ci				return false;
31762306a36Sopenharmony_ci			}
31862306a36Sopenharmony_ci		}
31962306a36Sopenharmony_ci		raw_capacity[cpu] = cpu_capacity;
32062306a36Sopenharmony_ci		pr_debug("cpu_capacity: %pOF cpu_capacity=%u (raw)\n",
32162306a36Sopenharmony_ci			cpu_node, raw_capacity[cpu]);
32262306a36Sopenharmony_ci
32362306a36Sopenharmony_ci		/*
32462306a36Sopenharmony_ci		 * Update freq_factor for calculating early boot cpu capacities.
32562306a36Sopenharmony_ci		 * For non-clk CPU DVFS mechanism, there's no way to get the
32662306a36Sopenharmony_ci		 * frequency value now, assuming they are running at the same
32762306a36Sopenharmony_ci		 * frequency (by keeping the initial freq_factor value).
32862306a36Sopenharmony_ci		 */
32962306a36Sopenharmony_ci		cpu_clk = of_clk_get(cpu_node, 0);
33062306a36Sopenharmony_ci		if (!PTR_ERR_OR_ZERO(cpu_clk)) {
33162306a36Sopenharmony_ci			per_cpu(freq_factor, cpu) =
33262306a36Sopenharmony_ci				clk_get_rate(cpu_clk) / 1000;
33362306a36Sopenharmony_ci			clk_put(cpu_clk);
33462306a36Sopenharmony_ci		}
33562306a36Sopenharmony_ci	} else {
33662306a36Sopenharmony_ci		if (raw_capacity) {
33762306a36Sopenharmony_ci			pr_err("cpu_capacity: missing %pOF raw capacity\n",
33862306a36Sopenharmony_ci				cpu_node);
33962306a36Sopenharmony_ci			pr_err("cpu_capacity: partial information: fallback to 1024 for all CPUs\n");
34062306a36Sopenharmony_ci		}
34162306a36Sopenharmony_ci		cap_parsing_failed = true;
34262306a36Sopenharmony_ci		free_raw_capacity();
34362306a36Sopenharmony_ci	}
34462306a36Sopenharmony_ci
34562306a36Sopenharmony_ci	return !ret;
34662306a36Sopenharmony_ci}
34762306a36Sopenharmony_ci
34862306a36Sopenharmony_ci#ifdef CONFIG_ACPI_CPPC_LIB
34962306a36Sopenharmony_ci#include <acpi/cppc_acpi.h>
35062306a36Sopenharmony_ci
35162306a36Sopenharmony_civoid topology_init_cpu_capacity_cppc(void)
35262306a36Sopenharmony_ci{
35362306a36Sopenharmony_ci	struct cppc_perf_caps perf_caps;
35462306a36Sopenharmony_ci	int cpu;
35562306a36Sopenharmony_ci
35662306a36Sopenharmony_ci	if (likely(!acpi_cpc_valid()))
35762306a36Sopenharmony_ci		return;
35862306a36Sopenharmony_ci
35962306a36Sopenharmony_ci	raw_capacity = kcalloc(num_possible_cpus(), sizeof(*raw_capacity),
36062306a36Sopenharmony_ci			       GFP_KERNEL);
36162306a36Sopenharmony_ci	if (!raw_capacity)
36262306a36Sopenharmony_ci		return;
36362306a36Sopenharmony_ci
36462306a36Sopenharmony_ci	for_each_possible_cpu(cpu) {
36562306a36Sopenharmony_ci		if (!cppc_get_perf_caps(cpu, &perf_caps) &&
36662306a36Sopenharmony_ci		    (perf_caps.highest_perf >= perf_caps.nominal_perf) &&
36762306a36Sopenharmony_ci		    (perf_caps.highest_perf >= perf_caps.lowest_perf)) {
36862306a36Sopenharmony_ci			raw_capacity[cpu] = perf_caps.highest_perf;
36962306a36Sopenharmony_ci			pr_debug("cpu_capacity: CPU%d cpu_capacity=%u (raw).\n",
37062306a36Sopenharmony_ci				 cpu, raw_capacity[cpu]);
37162306a36Sopenharmony_ci			continue;
37262306a36Sopenharmony_ci		}
37362306a36Sopenharmony_ci
37462306a36Sopenharmony_ci		pr_err("cpu_capacity: CPU%d missing/invalid highest performance.\n", cpu);
37562306a36Sopenharmony_ci		pr_err("cpu_capacity: partial information: fallback to 1024 for all CPUs\n");
37662306a36Sopenharmony_ci		goto exit;
37762306a36Sopenharmony_ci	}
37862306a36Sopenharmony_ci
37962306a36Sopenharmony_ci	topology_normalize_cpu_scale();
38062306a36Sopenharmony_ci	schedule_work(&update_topology_flags_work);
38162306a36Sopenharmony_ci	pr_debug("cpu_capacity: cpu_capacity initialization done\n");
38262306a36Sopenharmony_ci
38362306a36Sopenharmony_ciexit:
38462306a36Sopenharmony_ci	free_raw_capacity();
38562306a36Sopenharmony_ci}
38662306a36Sopenharmony_ci#endif
38762306a36Sopenharmony_ci
38862306a36Sopenharmony_ci#ifdef CONFIG_CPU_FREQ
38962306a36Sopenharmony_cistatic cpumask_var_t cpus_to_visit;
39062306a36Sopenharmony_cistatic void parsing_done_workfn(struct work_struct *work);
39162306a36Sopenharmony_cistatic DECLARE_WORK(parsing_done_work, parsing_done_workfn);
39262306a36Sopenharmony_ci
39362306a36Sopenharmony_cistatic int
39462306a36Sopenharmony_ciinit_cpu_capacity_callback(struct notifier_block *nb,
39562306a36Sopenharmony_ci			   unsigned long val,
39662306a36Sopenharmony_ci			   void *data)
39762306a36Sopenharmony_ci{
39862306a36Sopenharmony_ci	struct cpufreq_policy *policy = data;
39962306a36Sopenharmony_ci	int cpu;
40062306a36Sopenharmony_ci
40162306a36Sopenharmony_ci	if (!raw_capacity)
40262306a36Sopenharmony_ci		return 0;
40362306a36Sopenharmony_ci
40462306a36Sopenharmony_ci	if (val != CPUFREQ_CREATE_POLICY)
40562306a36Sopenharmony_ci		return 0;
40662306a36Sopenharmony_ci
40762306a36Sopenharmony_ci	pr_debug("cpu_capacity: init cpu capacity for CPUs [%*pbl] (to_visit=%*pbl)\n",
40862306a36Sopenharmony_ci		 cpumask_pr_args(policy->related_cpus),
40962306a36Sopenharmony_ci		 cpumask_pr_args(cpus_to_visit));
41062306a36Sopenharmony_ci
41162306a36Sopenharmony_ci	cpumask_andnot(cpus_to_visit, cpus_to_visit, policy->related_cpus);
41262306a36Sopenharmony_ci
41362306a36Sopenharmony_ci	for_each_cpu(cpu, policy->related_cpus)
41462306a36Sopenharmony_ci		per_cpu(freq_factor, cpu) = policy->cpuinfo.max_freq / 1000;
41562306a36Sopenharmony_ci
41662306a36Sopenharmony_ci	if (cpumask_empty(cpus_to_visit)) {
41762306a36Sopenharmony_ci		topology_normalize_cpu_scale();
41862306a36Sopenharmony_ci		schedule_work(&update_topology_flags_work);
41962306a36Sopenharmony_ci		free_raw_capacity();
42062306a36Sopenharmony_ci		pr_debug("cpu_capacity: parsing done\n");
42162306a36Sopenharmony_ci		schedule_work(&parsing_done_work);
42262306a36Sopenharmony_ci	}
42362306a36Sopenharmony_ci
42462306a36Sopenharmony_ci	return 0;
42562306a36Sopenharmony_ci}
42662306a36Sopenharmony_ci
42762306a36Sopenharmony_cistatic struct notifier_block init_cpu_capacity_notifier = {
42862306a36Sopenharmony_ci	.notifier_call = init_cpu_capacity_callback,
42962306a36Sopenharmony_ci};
43062306a36Sopenharmony_ci
43162306a36Sopenharmony_cistatic int __init register_cpufreq_notifier(void)
43262306a36Sopenharmony_ci{
43362306a36Sopenharmony_ci	int ret;
43462306a36Sopenharmony_ci
43562306a36Sopenharmony_ci	/*
43662306a36Sopenharmony_ci	 * On ACPI-based systems skip registering cpufreq notifier as cpufreq
43762306a36Sopenharmony_ci	 * information is not needed for cpu capacity initialization.
43862306a36Sopenharmony_ci	 */
43962306a36Sopenharmony_ci	if (!acpi_disabled || !raw_capacity)
44062306a36Sopenharmony_ci		return -EINVAL;
44162306a36Sopenharmony_ci
44262306a36Sopenharmony_ci	if (!alloc_cpumask_var(&cpus_to_visit, GFP_KERNEL))
44362306a36Sopenharmony_ci		return -ENOMEM;
44462306a36Sopenharmony_ci
44562306a36Sopenharmony_ci	cpumask_copy(cpus_to_visit, cpu_possible_mask);
44662306a36Sopenharmony_ci
44762306a36Sopenharmony_ci	ret = cpufreq_register_notifier(&init_cpu_capacity_notifier,
44862306a36Sopenharmony_ci					CPUFREQ_POLICY_NOTIFIER);
44962306a36Sopenharmony_ci
45062306a36Sopenharmony_ci	if (ret)
45162306a36Sopenharmony_ci		free_cpumask_var(cpus_to_visit);
45262306a36Sopenharmony_ci
45362306a36Sopenharmony_ci	return ret;
45462306a36Sopenharmony_ci}
45562306a36Sopenharmony_cicore_initcall(register_cpufreq_notifier);
45662306a36Sopenharmony_ci
45762306a36Sopenharmony_cistatic void parsing_done_workfn(struct work_struct *work)
45862306a36Sopenharmony_ci{
45962306a36Sopenharmony_ci	cpufreq_unregister_notifier(&init_cpu_capacity_notifier,
46062306a36Sopenharmony_ci					 CPUFREQ_POLICY_NOTIFIER);
46162306a36Sopenharmony_ci	free_cpumask_var(cpus_to_visit);
46262306a36Sopenharmony_ci}
46362306a36Sopenharmony_ci
46462306a36Sopenharmony_ci#else
46562306a36Sopenharmony_cicore_initcall(free_raw_capacity);
46662306a36Sopenharmony_ci#endif
46762306a36Sopenharmony_ci
46862306a36Sopenharmony_ci#if defined(CONFIG_ARM64) || defined(CONFIG_RISCV)
46962306a36Sopenharmony_ci/*
47062306a36Sopenharmony_ci * This function returns the logic cpu number of the node.
47162306a36Sopenharmony_ci * There are basically three kinds of return values:
47262306a36Sopenharmony_ci * (1) logic cpu number which is > 0.
47362306a36Sopenharmony_ci * (2) -ENODEV when the device tree(DT) node is valid and found in the DT but
47462306a36Sopenharmony_ci * there is no possible logical CPU in the kernel to match. This happens
47562306a36Sopenharmony_ci * when CONFIG_NR_CPUS is configure to be smaller than the number of
47662306a36Sopenharmony_ci * CPU nodes in DT. We need to just ignore this case.
47762306a36Sopenharmony_ci * (3) -1 if the node does not exist in the device tree
47862306a36Sopenharmony_ci */
47962306a36Sopenharmony_cistatic int __init get_cpu_for_node(struct device_node *node)
48062306a36Sopenharmony_ci{
48162306a36Sopenharmony_ci	struct device_node *cpu_node;
48262306a36Sopenharmony_ci	int cpu;
48362306a36Sopenharmony_ci
48462306a36Sopenharmony_ci	cpu_node = of_parse_phandle(node, "cpu", 0);
48562306a36Sopenharmony_ci	if (!cpu_node)
48662306a36Sopenharmony_ci		return -1;
48762306a36Sopenharmony_ci
48862306a36Sopenharmony_ci	cpu = of_cpu_node_to_id(cpu_node);
48962306a36Sopenharmony_ci	if (cpu >= 0)
49062306a36Sopenharmony_ci		topology_parse_cpu_capacity(cpu_node, cpu);
49162306a36Sopenharmony_ci	else
49262306a36Sopenharmony_ci		pr_info("CPU node for %pOF exist but the possible cpu range is :%*pbl\n",
49362306a36Sopenharmony_ci			cpu_node, cpumask_pr_args(cpu_possible_mask));
49462306a36Sopenharmony_ci
49562306a36Sopenharmony_ci	of_node_put(cpu_node);
49662306a36Sopenharmony_ci	return cpu;
49762306a36Sopenharmony_ci}
49862306a36Sopenharmony_ci
49962306a36Sopenharmony_cistatic int __init parse_core(struct device_node *core, int package_id,
50062306a36Sopenharmony_ci			     int cluster_id, int core_id)
50162306a36Sopenharmony_ci{
50262306a36Sopenharmony_ci	char name[20];
50362306a36Sopenharmony_ci	bool leaf = true;
50462306a36Sopenharmony_ci	int i = 0;
50562306a36Sopenharmony_ci	int cpu;
50662306a36Sopenharmony_ci	struct device_node *t;
50762306a36Sopenharmony_ci
50862306a36Sopenharmony_ci	do {
50962306a36Sopenharmony_ci		snprintf(name, sizeof(name), "thread%d", i);
51062306a36Sopenharmony_ci		t = of_get_child_by_name(core, name);
51162306a36Sopenharmony_ci		if (t) {
51262306a36Sopenharmony_ci			leaf = false;
51362306a36Sopenharmony_ci			cpu = get_cpu_for_node(t);
51462306a36Sopenharmony_ci			if (cpu >= 0) {
51562306a36Sopenharmony_ci				cpu_topology[cpu].package_id = package_id;
51662306a36Sopenharmony_ci				cpu_topology[cpu].cluster_id = cluster_id;
51762306a36Sopenharmony_ci				cpu_topology[cpu].core_id = core_id;
51862306a36Sopenharmony_ci				cpu_topology[cpu].thread_id = i;
51962306a36Sopenharmony_ci			} else if (cpu != -ENODEV) {
52062306a36Sopenharmony_ci				pr_err("%pOF: Can't get CPU for thread\n", t);
52162306a36Sopenharmony_ci				of_node_put(t);
52262306a36Sopenharmony_ci				return -EINVAL;
52362306a36Sopenharmony_ci			}
52462306a36Sopenharmony_ci			of_node_put(t);
52562306a36Sopenharmony_ci		}
52662306a36Sopenharmony_ci		i++;
52762306a36Sopenharmony_ci	} while (t);
52862306a36Sopenharmony_ci
52962306a36Sopenharmony_ci	cpu = get_cpu_for_node(core);
53062306a36Sopenharmony_ci	if (cpu >= 0) {
53162306a36Sopenharmony_ci		if (!leaf) {
53262306a36Sopenharmony_ci			pr_err("%pOF: Core has both threads and CPU\n",
53362306a36Sopenharmony_ci			       core);
53462306a36Sopenharmony_ci			return -EINVAL;
53562306a36Sopenharmony_ci		}
53662306a36Sopenharmony_ci
53762306a36Sopenharmony_ci		cpu_topology[cpu].package_id = package_id;
53862306a36Sopenharmony_ci		cpu_topology[cpu].cluster_id = cluster_id;
53962306a36Sopenharmony_ci		cpu_topology[cpu].core_id = core_id;
54062306a36Sopenharmony_ci	} else if (leaf && cpu != -ENODEV) {
54162306a36Sopenharmony_ci		pr_err("%pOF: Can't get CPU for leaf core\n", core);
54262306a36Sopenharmony_ci		return -EINVAL;
54362306a36Sopenharmony_ci	}
54462306a36Sopenharmony_ci
54562306a36Sopenharmony_ci	return 0;
54662306a36Sopenharmony_ci}
54762306a36Sopenharmony_ci
54862306a36Sopenharmony_cistatic int __init parse_cluster(struct device_node *cluster, int package_id,
54962306a36Sopenharmony_ci				int cluster_id, int depth)
55062306a36Sopenharmony_ci{
55162306a36Sopenharmony_ci	char name[20];
55262306a36Sopenharmony_ci	bool leaf = true;
55362306a36Sopenharmony_ci	bool has_cores = false;
55462306a36Sopenharmony_ci	struct device_node *c;
55562306a36Sopenharmony_ci	int core_id = 0;
55662306a36Sopenharmony_ci	int i, ret;
55762306a36Sopenharmony_ci
55862306a36Sopenharmony_ci	/*
55962306a36Sopenharmony_ci	 * First check for child clusters; we currently ignore any
56062306a36Sopenharmony_ci	 * information about the nesting of clusters and present the
56162306a36Sopenharmony_ci	 * scheduler with a flat list of them.
56262306a36Sopenharmony_ci	 */
56362306a36Sopenharmony_ci	i = 0;
56462306a36Sopenharmony_ci	do {
56562306a36Sopenharmony_ci		snprintf(name, sizeof(name), "cluster%d", i);
56662306a36Sopenharmony_ci		c = of_get_child_by_name(cluster, name);
56762306a36Sopenharmony_ci		if (c) {
56862306a36Sopenharmony_ci			leaf = false;
56962306a36Sopenharmony_ci			ret = parse_cluster(c, package_id, i, depth + 1);
57062306a36Sopenharmony_ci			if (depth > 0)
57162306a36Sopenharmony_ci				pr_warn("Topology for clusters of clusters not yet supported\n");
57262306a36Sopenharmony_ci			of_node_put(c);
57362306a36Sopenharmony_ci			if (ret != 0)
57462306a36Sopenharmony_ci				return ret;
57562306a36Sopenharmony_ci		}
57662306a36Sopenharmony_ci		i++;
57762306a36Sopenharmony_ci	} while (c);
57862306a36Sopenharmony_ci
57962306a36Sopenharmony_ci	/* Now check for cores */
58062306a36Sopenharmony_ci	i = 0;
58162306a36Sopenharmony_ci	do {
58262306a36Sopenharmony_ci		snprintf(name, sizeof(name), "core%d", i);
58362306a36Sopenharmony_ci		c = of_get_child_by_name(cluster, name);
58462306a36Sopenharmony_ci		if (c) {
58562306a36Sopenharmony_ci			has_cores = true;
58662306a36Sopenharmony_ci
58762306a36Sopenharmony_ci			if (depth == 0) {
58862306a36Sopenharmony_ci				pr_err("%pOF: cpu-map children should be clusters\n",
58962306a36Sopenharmony_ci				       c);
59062306a36Sopenharmony_ci				of_node_put(c);
59162306a36Sopenharmony_ci				return -EINVAL;
59262306a36Sopenharmony_ci			}
59362306a36Sopenharmony_ci
59462306a36Sopenharmony_ci			if (leaf) {
59562306a36Sopenharmony_ci				ret = parse_core(c, package_id, cluster_id,
59662306a36Sopenharmony_ci						 core_id++);
59762306a36Sopenharmony_ci			} else {
59862306a36Sopenharmony_ci				pr_err("%pOF: Non-leaf cluster with core %s\n",
59962306a36Sopenharmony_ci				       cluster, name);
60062306a36Sopenharmony_ci				ret = -EINVAL;
60162306a36Sopenharmony_ci			}
60262306a36Sopenharmony_ci
60362306a36Sopenharmony_ci			of_node_put(c);
60462306a36Sopenharmony_ci			if (ret != 0)
60562306a36Sopenharmony_ci				return ret;
60662306a36Sopenharmony_ci		}
60762306a36Sopenharmony_ci		i++;
60862306a36Sopenharmony_ci	} while (c);
60962306a36Sopenharmony_ci
61062306a36Sopenharmony_ci	if (leaf && !has_cores)
61162306a36Sopenharmony_ci		pr_warn("%pOF: empty cluster\n", cluster);
61262306a36Sopenharmony_ci
61362306a36Sopenharmony_ci	return 0;
61462306a36Sopenharmony_ci}
61562306a36Sopenharmony_ci
61662306a36Sopenharmony_cistatic int __init parse_socket(struct device_node *socket)
61762306a36Sopenharmony_ci{
61862306a36Sopenharmony_ci	char name[20];
61962306a36Sopenharmony_ci	struct device_node *c;
62062306a36Sopenharmony_ci	bool has_socket = false;
62162306a36Sopenharmony_ci	int package_id = 0, ret;
62262306a36Sopenharmony_ci
62362306a36Sopenharmony_ci	do {
62462306a36Sopenharmony_ci		snprintf(name, sizeof(name), "socket%d", package_id);
62562306a36Sopenharmony_ci		c = of_get_child_by_name(socket, name);
62662306a36Sopenharmony_ci		if (c) {
62762306a36Sopenharmony_ci			has_socket = true;
62862306a36Sopenharmony_ci			ret = parse_cluster(c, package_id, -1, 0);
62962306a36Sopenharmony_ci			of_node_put(c);
63062306a36Sopenharmony_ci			if (ret != 0)
63162306a36Sopenharmony_ci				return ret;
63262306a36Sopenharmony_ci		}
63362306a36Sopenharmony_ci		package_id++;
63462306a36Sopenharmony_ci	} while (c);
63562306a36Sopenharmony_ci
63662306a36Sopenharmony_ci	if (!has_socket)
63762306a36Sopenharmony_ci		ret = parse_cluster(socket, 0, -1, 0);
63862306a36Sopenharmony_ci
63962306a36Sopenharmony_ci	return ret;
64062306a36Sopenharmony_ci}
64162306a36Sopenharmony_ci
64262306a36Sopenharmony_cistatic int __init parse_dt_topology(void)
64362306a36Sopenharmony_ci{
64462306a36Sopenharmony_ci	struct device_node *cn, *map;
64562306a36Sopenharmony_ci	int ret = 0;
64662306a36Sopenharmony_ci	int cpu;
64762306a36Sopenharmony_ci
64862306a36Sopenharmony_ci	cn = of_find_node_by_path("/cpus");
64962306a36Sopenharmony_ci	if (!cn) {
65062306a36Sopenharmony_ci		pr_err("No CPU information found in DT\n");
65162306a36Sopenharmony_ci		return 0;
65262306a36Sopenharmony_ci	}
65362306a36Sopenharmony_ci
65462306a36Sopenharmony_ci	/*
65562306a36Sopenharmony_ci	 * When topology is provided cpu-map is essentially a root
65662306a36Sopenharmony_ci	 * cluster with restricted subnodes.
65762306a36Sopenharmony_ci	 */
65862306a36Sopenharmony_ci	map = of_get_child_by_name(cn, "cpu-map");
65962306a36Sopenharmony_ci	if (!map)
66062306a36Sopenharmony_ci		goto out;
66162306a36Sopenharmony_ci
66262306a36Sopenharmony_ci	ret = parse_socket(map);
66362306a36Sopenharmony_ci	if (ret != 0)
66462306a36Sopenharmony_ci		goto out_map;
66562306a36Sopenharmony_ci
66662306a36Sopenharmony_ci	topology_normalize_cpu_scale();
66762306a36Sopenharmony_ci
66862306a36Sopenharmony_ci	/*
66962306a36Sopenharmony_ci	 * Check that all cores are in the topology; the SMP code will
67062306a36Sopenharmony_ci	 * only mark cores described in the DT as possible.
67162306a36Sopenharmony_ci	 */
67262306a36Sopenharmony_ci	for_each_possible_cpu(cpu)
67362306a36Sopenharmony_ci		if (cpu_topology[cpu].package_id < 0) {
67462306a36Sopenharmony_ci			ret = -EINVAL;
67562306a36Sopenharmony_ci			break;
67662306a36Sopenharmony_ci		}
67762306a36Sopenharmony_ci
67862306a36Sopenharmony_ciout_map:
67962306a36Sopenharmony_ci	of_node_put(map);
68062306a36Sopenharmony_ciout:
68162306a36Sopenharmony_ci	of_node_put(cn);
68262306a36Sopenharmony_ci	return ret;
68362306a36Sopenharmony_ci}
68462306a36Sopenharmony_ci#endif
68562306a36Sopenharmony_ci
68662306a36Sopenharmony_ci/*
68762306a36Sopenharmony_ci * cpu topology table
68862306a36Sopenharmony_ci */
68962306a36Sopenharmony_cistruct cpu_topology cpu_topology[NR_CPUS];
69062306a36Sopenharmony_ciEXPORT_SYMBOL_GPL(cpu_topology);
69162306a36Sopenharmony_ci
69262306a36Sopenharmony_ciconst struct cpumask *cpu_coregroup_mask(int cpu)
69362306a36Sopenharmony_ci{
69462306a36Sopenharmony_ci	const cpumask_t *core_mask = cpumask_of_node(cpu_to_node(cpu));
69562306a36Sopenharmony_ci
69662306a36Sopenharmony_ci	/* Find the smaller of NUMA, core or LLC siblings */
69762306a36Sopenharmony_ci	if (cpumask_subset(&cpu_topology[cpu].core_sibling, core_mask)) {
69862306a36Sopenharmony_ci		/* not numa in package, lets use the package siblings */
69962306a36Sopenharmony_ci		core_mask = &cpu_topology[cpu].core_sibling;
70062306a36Sopenharmony_ci	}
70162306a36Sopenharmony_ci
70262306a36Sopenharmony_ci	if (last_level_cache_is_valid(cpu)) {
70362306a36Sopenharmony_ci		if (cpumask_subset(&cpu_topology[cpu].llc_sibling, core_mask))
70462306a36Sopenharmony_ci			core_mask = &cpu_topology[cpu].llc_sibling;
70562306a36Sopenharmony_ci	}
70662306a36Sopenharmony_ci
70762306a36Sopenharmony_ci	/*
70862306a36Sopenharmony_ci	 * For systems with no shared cpu-side LLC but with clusters defined,
70962306a36Sopenharmony_ci	 * extend core_mask to cluster_siblings. The sched domain builder will
71062306a36Sopenharmony_ci	 * then remove MC as redundant with CLS if SCHED_CLUSTER is enabled.
71162306a36Sopenharmony_ci	 */
71262306a36Sopenharmony_ci	if (IS_ENABLED(CONFIG_SCHED_CLUSTER) &&
71362306a36Sopenharmony_ci	    cpumask_subset(core_mask, &cpu_topology[cpu].cluster_sibling))
71462306a36Sopenharmony_ci		core_mask = &cpu_topology[cpu].cluster_sibling;
71562306a36Sopenharmony_ci
71662306a36Sopenharmony_ci	return core_mask;
71762306a36Sopenharmony_ci}
71862306a36Sopenharmony_ci
71962306a36Sopenharmony_ciconst struct cpumask *cpu_clustergroup_mask(int cpu)
72062306a36Sopenharmony_ci{
72162306a36Sopenharmony_ci	/*
72262306a36Sopenharmony_ci	 * Forbid cpu_clustergroup_mask() to span more or the same CPUs as
72362306a36Sopenharmony_ci	 * cpu_coregroup_mask().
72462306a36Sopenharmony_ci	 */
72562306a36Sopenharmony_ci	if (cpumask_subset(cpu_coregroup_mask(cpu),
72662306a36Sopenharmony_ci			   &cpu_topology[cpu].cluster_sibling))
72762306a36Sopenharmony_ci		return topology_sibling_cpumask(cpu);
72862306a36Sopenharmony_ci
72962306a36Sopenharmony_ci	return &cpu_topology[cpu].cluster_sibling;
73062306a36Sopenharmony_ci}
73162306a36Sopenharmony_ci
73262306a36Sopenharmony_civoid update_siblings_masks(unsigned int cpuid)
73362306a36Sopenharmony_ci{
73462306a36Sopenharmony_ci	struct cpu_topology *cpu_topo, *cpuid_topo = &cpu_topology[cpuid];
73562306a36Sopenharmony_ci	int cpu, ret;
73662306a36Sopenharmony_ci
73762306a36Sopenharmony_ci	ret = detect_cache_attributes(cpuid);
73862306a36Sopenharmony_ci	if (ret && ret != -ENOENT)
73962306a36Sopenharmony_ci		pr_info("Early cacheinfo allocation failed, ret = %d\n", ret);
74062306a36Sopenharmony_ci
74162306a36Sopenharmony_ci	/* update core and thread sibling masks */
74262306a36Sopenharmony_ci	for_each_online_cpu(cpu) {
74362306a36Sopenharmony_ci		cpu_topo = &cpu_topology[cpu];
74462306a36Sopenharmony_ci
74562306a36Sopenharmony_ci		if (last_level_cache_is_shared(cpu, cpuid)) {
74662306a36Sopenharmony_ci			cpumask_set_cpu(cpu, &cpuid_topo->llc_sibling);
74762306a36Sopenharmony_ci			cpumask_set_cpu(cpuid, &cpu_topo->llc_sibling);
74862306a36Sopenharmony_ci		}
74962306a36Sopenharmony_ci
75062306a36Sopenharmony_ci		if (cpuid_topo->package_id != cpu_topo->package_id)
75162306a36Sopenharmony_ci			continue;
75262306a36Sopenharmony_ci
75362306a36Sopenharmony_ci		cpumask_set_cpu(cpuid, &cpu_topo->core_sibling);
75462306a36Sopenharmony_ci		cpumask_set_cpu(cpu, &cpuid_topo->core_sibling);
75562306a36Sopenharmony_ci
75662306a36Sopenharmony_ci		if (cpuid_topo->cluster_id != cpu_topo->cluster_id)
75762306a36Sopenharmony_ci			continue;
75862306a36Sopenharmony_ci
75962306a36Sopenharmony_ci		if (cpuid_topo->cluster_id >= 0) {
76062306a36Sopenharmony_ci			cpumask_set_cpu(cpu, &cpuid_topo->cluster_sibling);
76162306a36Sopenharmony_ci			cpumask_set_cpu(cpuid, &cpu_topo->cluster_sibling);
76262306a36Sopenharmony_ci		}
76362306a36Sopenharmony_ci
76462306a36Sopenharmony_ci		if (cpuid_topo->core_id != cpu_topo->core_id)
76562306a36Sopenharmony_ci			continue;
76662306a36Sopenharmony_ci
76762306a36Sopenharmony_ci		cpumask_set_cpu(cpuid, &cpu_topo->thread_sibling);
76862306a36Sopenharmony_ci		cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling);
76962306a36Sopenharmony_ci	}
77062306a36Sopenharmony_ci}
77162306a36Sopenharmony_ci
77262306a36Sopenharmony_cistatic void clear_cpu_topology(int cpu)
77362306a36Sopenharmony_ci{
77462306a36Sopenharmony_ci	struct cpu_topology *cpu_topo = &cpu_topology[cpu];
77562306a36Sopenharmony_ci
77662306a36Sopenharmony_ci	cpumask_clear(&cpu_topo->llc_sibling);
77762306a36Sopenharmony_ci	cpumask_set_cpu(cpu, &cpu_topo->llc_sibling);
77862306a36Sopenharmony_ci
77962306a36Sopenharmony_ci	cpumask_clear(&cpu_topo->cluster_sibling);
78062306a36Sopenharmony_ci	cpumask_set_cpu(cpu, &cpu_topo->cluster_sibling);
78162306a36Sopenharmony_ci
78262306a36Sopenharmony_ci	cpumask_clear(&cpu_topo->core_sibling);
78362306a36Sopenharmony_ci	cpumask_set_cpu(cpu, &cpu_topo->core_sibling);
78462306a36Sopenharmony_ci	cpumask_clear(&cpu_topo->thread_sibling);
78562306a36Sopenharmony_ci	cpumask_set_cpu(cpu, &cpu_topo->thread_sibling);
78662306a36Sopenharmony_ci}
78762306a36Sopenharmony_ci
78862306a36Sopenharmony_civoid __init reset_cpu_topology(void)
78962306a36Sopenharmony_ci{
79062306a36Sopenharmony_ci	unsigned int cpu;
79162306a36Sopenharmony_ci
79262306a36Sopenharmony_ci	for_each_possible_cpu(cpu) {
79362306a36Sopenharmony_ci		struct cpu_topology *cpu_topo = &cpu_topology[cpu];
79462306a36Sopenharmony_ci
79562306a36Sopenharmony_ci		cpu_topo->thread_id = -1;
79662306a36Sopenharmony_ci		cpu_topo->core_id = -1;
79762306a36Sopenharmony_ci		cpu_topo->cluster_id = -1;
79862306a36Sopenharmony_ci		cpu_topo->package_id = -1;
79962306a36Sopenharmony_ci
80062306a36Sopenharmony_ci		clear_cpu_topology(cpu);
80162306a36Sopenharmony_ci	}
80262306a36Sopenharmony_ci}
80362306a36Sopenharmony_ci
80462306a36Sopenharmony_civoid remove_cpu_topology(unsigned int cpu)
80562306a36Sopenharmony_ci{
80662306a36Sopenharmony_ci	int sibling;
80762306a36Sopenharmony_ci
80862306a36Sopenharmony_ci	for_each_cpu(sibling, topology_core_cpumask(cpu))
80962306a36Sopenharmony_ci		cpumask_clear_cpu(cpu, topology_core_cpumask(sibling));
81062306a36Sopenharmony_ci	for_each_cpu(sibling, topology_sibling_cpumask(cpu))
81162306a36Sopenharmony_ci		cpumask_clear_cpu(cpu, topology_sibling_cpumask(sibling));
81262306a36Sopenharmony_ci	for_each_cpu(sibling, topology_cluster_cpumask(cpu))
81362306a36Sopenharmony_ci		cpumask_clear_cpu(cpu, topology_cluster_cpumask(sibling));
81462306a36Sopenharmony_ci	for_each_cpu(sibling, topology_llc_cpumask(cpu))
81562306a36Sopenharmony_ci		cpumask_clear_cpu(cpu, topology_llc_cpumask(sibling));
81662306a36Sopenharmony_ci
81762306a36Sopenharmony_ci	clear_cpu_topology(cpu);
81862306a36Sopenharmony_ci}
81962306a36Sopenharmony_ci
82062306a36Sopenharmony_ci__weak int __init parse_acpi_topology(void)
82162306a36Sopenharmony_ci{
82262306a36Sopenharmony_ci	return 0;
82362306a36Sopenharmony_ci}
82462306a36Sopenharmony_ci
82562306a36Sopenharmony_ci#if defined(CONFIG_ARM64) || defined(CONFIG_RISCV)
82662306a36Sopenharmony_civoid __init init_cpu_topology(void)
82762306a36Sopenharmony_ci{
82862306a36Sopenharmony_ci	int cpu, ret;
82962306a36Sopenharmony_ci
83062306a36Sopenharmony_ci	reset_cpu_topology();
83162306a36Sopenharmony_ci	ret = parse_acpi_topology();
83262306a36Sopenharmony_ci	if (!ret)
83362306a36Sopenharmony_ci		ret = of_have_populated_dt() && parse_dt_topology();
83462306a36Sopenharmony_ci
83562306a36Sopenharmony_ci	if (ret) {
83662306a36Sopenharmony_ci		/*
83762306a36Sopenharmony_ci		 * Discard anything that was parsed if we hit an error so we
83862306a36Sopenharmony_ci		 * don't use partial information. But do not return yet to give
83962306a36Sopenharmony_ci		 * arch-specific early cache level detection a chance to run.
84062306a36Sopenharmony_ci		 */
84162306a36Sopenharmony_ci		reset_cpu_topology();
84262306a36Sopenharmony_ci	}
84362306a36Sopenharmony_ci
84462306a36Sopenharmony_ci	for_each_possible_cpu(cpu) {
84562306a36Sopenharmony_ci		ret = fetch_cache_info(cpu);
84662306a36Sopenharmony_ci		if (!ret)
84762306a36Sopenharmony_ci			continue;
84862306a36Sopenharmony_ci		else if (ret != -ENOENT)
84962306a36Sopenharmony_ci			pr_err("Early cacheinfo failed, ret = %d\n", ret);
85062306a36Sopenharmony_ci		return;
85162306a36Sopenharmony_ci	}
85262306a36Sopenharmony_ci}
85362306a36Sopenharmony_ci
85462306a36Sopenharmony_civoid store_cpu_topology(unsigned int cpuid)
85562306a36Sopenharmony_ci{
85662306a36Sopenharmony_ci	struct cpu_topology *cpuid_topo = &cpu_topology[cpuid];
85762306a36Sopenharmony_ci
85862306a36Sopenharmony_ci	if (cpuid_topo->package_id != -1)
85962306a36Sopenharmony_ci		goto topology_populated;
86062306a36Sopenharmony_ci
86162306a36Sopenharmony_ci	cpuid_topo->thread_id = -1;
86262306a36Sopenharmony_ci	cpuid_topo->core_id = cpuid;
86362306a36Sopenharmony_ci	cpuid_topo->package_id = cpu_to_node(cpuid);
86462306a36Sopenharmony_ci
86562306a36Sopenharmony_ci	pr_debug("CPU%u: package %d core %d thread %d\n",
86662306a36Sopenharmony_ci		 cpuid, cpuid_topo->package_id, cpuid_topo->core_id,
86762306a36Sopenharmony_ci		 cpuid_topo->thread_id);
86862306a36Sopenharmony_ci
86962306a36Sopenharmony_citopology_populated:
87062306a36Sopenharmony_ci	update_siblings_masks(cpuid);
87162306a36Sopenharmony_ci}
87262306a36Sopenharmony_ci#endif
873