18c2ecf20Sopenharmony_ci// SPDX-License-Identifier: GPL-2.0
28c2ecf20Sopenharmony_ci/*
38c2ecf20Sopenharmony_ci * Arch specific cpu topology information
48c2ecf20Sopenharmony_ci *
58c2ecf20Sopenharmony_ci * Copyright (C) 2016, ARM Ltd.
68c2ecf20Sopenharmony_ci * Written by: Juri Lelli, ARM Ltd.
78c2ecf20Sopenharmony_ci */
88c2ecf20Sopenharmony_ci
98c2ecf20Sopenharmony_ci#include <linux/acpi.h>
108c2ecf20Sopenharmony_ci#include <linux/cpu.h>
118c2ecf20Sopenharmony_ci#include <linux/cpufreq.h>
128c2ecf20Sopenharmony_ci#include <linux/device.h>
138c2ecf20Sopenharmony_ci#include <linux/of.h>
148c2ecf20Sopenharmony_ci#include <linux/slab.h>
158c2ecf20Sopenharmony_ci#include <linux/string.h>
168c2ecf20Sopenharmony_ci#include <linux/sched/topology.h>
178c2ecf20Sopenharmony_ci#include <linux/cpuset.h>
188c2ecf20Sopenharmony_ci#include <linux/cpumask.h>
198c2ecf20Sopenharmony_ci#include <linux/init.h>
208c2ecf20Sopenharmony_ci#include <linux/percpu.h>
218c2ecf20Sopenharmony_ci#include <linux/sched.h>
228c2ecf20Sopenharmony_ci#include <linux/smp.h>
238c2ecf20Sopenharmony_ci
248c2ecf20Sopenharmony_cibool topology_scale_freq_invariant(void)
258c2ecf20Sopenharmony_ci{
268c2ecf20Sopenharmony_ci	return cpufreq_supports_freq_invariance() ||
278c2ecf20Sopenharmony_ci	       arch_freq_counters_available(cpu_online_mask);
288c2ecf20Sopenharmony_ci}
298c2ecf20Sopenharmony_ci
308c2ecf20Sopenharmony_ci__weak bool arch_freq_counters_available(const struct cpumask *cpus)
318c2ecf20Sopenharmony_ci{
328c2ecf20Sopenharmony_ci	return false;
338c2ecf20Sopenharmony_ci}
348c2ecf20Sopenharmony_ciDEFINE_PER_CPU(unsigned long, freq_scale) = SCHED_CAPACITY_SCALE;
358c2ecf20Sopenharmony_ci
368c2ecf20Sopenharmony_civoid topology_set_freq_scale(const struct cpumask *cpus, unsigned long cur_freq,
378c2ecf20Sopenharmony_ci			     unsigned long max_freq)
388c2ecf20Sopenharmony_ci{
398c2ecf20Sopenharmony_ci	unsigned long scale;
408c2ecf20Sopenharmony_ci	int i;
418c2ecf20Sopenharmony_ci
428c2ecf20Sopenharmony_ci	if (WARN_ON_ONCE(!cur_freq || !max_freq))
438c2ecf20Sopenharmony_ci		return;
448c2ecf20Sopenharmony_ci
458c2ecf20Sopenharmony_ci	/*
468c2ecf20Sopenharmony_ci	 * If the use of counters for FIE is enabled, just return as we don't
478c2ecf20Sopenharmony_ci	 * want to update the scale factor with information from CPUFREQ.
488c2ecf20Sopenharmony_ci	 * Instead the scale factor will be updated from arch_scale_freq_tick.
498c2ecf20Sopenharmony_ci	 */
508c2ecf20Sopenharmony_ci	if (arch_freq_counters_available(cpus))
518c2ecf20Sopenharmony_ci		return;
528c2ecf20Sopenharmony_ci
538c2ecf20Sopenharmony_ci	scale = (cur_freq << SCHED_CAPACITY_SHIFT) / max_freq;
548c2ecf20Sopenharmony_ci
558c2ecf20Sopenharmony_ci	for_each_cpu(i, cpus)
568c2ecf20Sopenharmony_ci		per_cpu(freq_scale, i) = scale;
578c2ecf20Sopenharmony_ci}
588c2ecf20Sopenharmony_ci
598c2ecf20Sopenharmony_ciDEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE;
608c2ecf20Sopenharmony_ci
618c2ecf20Sopenharmony_civoid topology_set_cpu_scale(unsigned int cpu, unsigned long capacity)
628c2ecf20Sopenharmony_ci{
638c2ecf20Sopenharmony_ci	per_cpu(cpu_scale, cpu) = capacity;
648c2ecf20Sopenharmony_ci}
658c2ecf20Sopenharmony_ci
668c2ecf20Sopenharmony_ciDEFINE_PER_CPU(unsigned long, thermal_pressure);
678c2ecf20Sopenharmony_ci
688c2ecf20Sopenharmony_civoid topology_set_thermal_pressure(const struct cpumask *cpus,
698c2ecf20Sopenharmony_ci			       unsigned long th_pressure)
708c2ecf20Sopenharmony_ci{
718c2ecf20Sopenharmony_ci	int cpu;
728c2ecf20Sopenharmony_ci
738c2ecf20Sopenharmony_ci	for_each_cpu(cpu, cpus)
748c2ecf20Sopenharmony_ci		WRITE_ONCE(per_cpu(thermal_pressure, cpu), th_pressure);
758c2ecf20Sopenharmony_ci}
768c2ecf20Sopenharmony_ci
778c2ecf20Sopenharmony_cistatic ssize_t cpu_capacity_show(struct device *dev,
788c2ecf20Sopenharmony_ci				 struct device_attribute *attr,
798c2ecf20Sopenharmony_ci				 char *buf)
808c2ecf20Sopenharmony_ci{
818c2ecf20Sopenharmony_ci	struct cpu *cpu = container_of(dev, struct cpu, dev);
828c2ecf20Sopenharmony_ci
838c2ecf20Sopenharmony_ci	return sysfs_emit(buf, "%lu\n", topology_get_cpu_scale(cpu->dev.id));
848c2ecf20Sopenharmony_ci}
858c2ecf20Sopenharmony_ci
868c2ecf20Sopenharmony_cistatic void update_topology_flags_workfn(struct work_struct *work);
878c2ecf20Sopenharmony_cistatic DECLARE_WORK(update_topology_flags_work, update_topology_flags_workfn);
888c2ecf20Sopenharmony_ci
898c2ecf20Sopenharmony_cistatic DEVICE_ATTR_RO(cpu_capacity);
908c2ecf20Sopenharmony_ci
918c2ecf20Sopenharmony_cistatic int register_cpu_capacity_sysctl(void)
928c2ecf20Sopenharmony_ci{
938c2ecf20Sopenharmony_ci	int i;
948c2ecf20Sopenharmony_ci	struct device *cpu;
958c2ecf20Sopenharmony_ci
968c2ecf20Sopenharmony_ci	for_each_possible_cpu(i) {
978c2ecf20Sopenharmony_ci		cpu = get_cpu_device(i);
988c2ecf20Sopenharmony_ci		if (!cpu) {
998c2ecf20Sopenharmony_ci			pr_err("%s: too early to get CPU%d device!\n",
1008c2ecf20Sopenharmony_ci			       __func__, i);
1018c2ecf20Sopenharmony_ci			continue;
1028c2ecf20Sopenharmony_ci		}
1038c2ecf20Sopenharmony_ci		device_create_file(cpu, &dev_attr_cpu_capacity);
1048c2ecf20Sopenharmony_ci	}
1058c2ecf20Sopenharmony_ci
1068c2ecf20Sopenharmony_ci	return 0;
1078c2ecf20Sopenharmony_ci}
1088c2ecf20Sopenharmony_cisubsys_initcall(register_cpu_capacity_sysctl);
1098c2ecf20Sopenharmony_ci
1108c2ecf20Sopenharmony_cistatic int update_topology;
1118c2ecf20Sopenharmony_ci
1128c2ecf20Sopenharmony_ciint topology_update_cpu_topology(void)
1138c2ecf20Sopenharmony_ci{
1148c2ecf20Sopenharmony_ci	return update_topology;
1158c2ecf20Sopenharmony_ci}
1168c2ecf20Sopenharmony_ci
1178c2ecf20Sopenharmony_ci/*
1188c2ecf20Sopenharmony_ci * Updating the sched_domains can't be done directly from cpufreq callbacks
1198c2ecf20Sopenharmony_ci * due to locking, so queue the work for later.
1208c2ecf20Sopenharmony_ci */
1218c2ecf20Sopenharmony_cistatic void update_topology_flags_workfn(struct work_struct *work)
1228c2ecf20Sopenharmony_ci{
1238c2ecf20Sopenharmony_ci	update_topology = 1;
1248c2ecf20Sopenharmony_ci	rebuild_sched_domains();
1258c2ecf20Sopenharmony_ci	pr_debug("sched_domain hierarchy rebuilt, flags updated\n");
1268c2ecf20Sopenharmony_ci	update_topology = 0;
1278c2ecf20Sopenharmony_ci}
1288c2ecf20Sopenharmony_ci
1298c2ecf20Sopenharmony_cistatic DEFINE_PER_CPU(u32, freq_factor) = 1;
1308c2ecf20Sopenharmony_cistatic u32 *raw_capacity;
1318c2ecf20Sopenharmony_ci
1328c2ecf20Sopenharmony_cistatic int free_raw_capacity(void)
1338c2ecf20Sopenharmony_ci{
1348c2ecf20Sopenharmony_ci	kfree(raw_capacity);
1358c2ecf20Sopenharmony_ci	raw_capacity = NULL;
1368c2ecf20Sopenharmony_ci
1378c2ecf20Sopenharmony_ci	return 0;
1388c2ecf20Sopenharmony_ci}
1398c2ecf20Sopenharmony_ci
1408c2ecf20Sopenharmony_civoid topology_normalize_cpu_scale(void)
1418c2ecf20Sopenharmony_ci{
1428c2ecf20Sopenharmony_ci	u64 capacity;
1438c2ecf20Sopenharmony_ci	u64 capacity_scale;
1448c2ecf20Sopenharmony_ci	int cpu;
1458c2ecf20Sopenharmony_ci
1468c2ecf20Sopenharmony_ci	if (!raw_capacity)
1478c2ecf20Sopenharmony_ci		return;
1488c2ecf20Sopenharmony_ci
1498c2ecf20Sopenharmony_ci	capacity_scale = 1;
1508c2ecf20Sopenharmony_ci	for_each_possible_cpu(cpu) {
1518c2ecf20Sopenharmony_ci		capacity = raw_capacity[cpu] * per_cpu(freq_factor, cpu);
1528c2ecf20Sopenharmony_ci		capacity_scale = max(capacity, capacity_scale);
1538c2ecf20Sopenharmony_ci	}
1548c2ecf20Sopenharmony_ci
1558c2ecf20Sopenharmony_ci	pr_debug("cpu_capacity: capacity_scale=%llu\n", capacity_scale);
1568c2ecf20Sopenharmony_ci	for_each_possible_cpu(cpu) {
1578c2ecf20Sopenharmony_ci		capacity = raw_capacity[cpu] * per_cpu(freq_factor, cpu);
1588c2ecf20Sopenharmony_ci		capacity = div64_u64(capacity << SCHED_CAPACITY_SHIFT,
1598c2ecf20Sopenharmony_ci			capacity_scale);
1608c2ecf20Sopenharmony_ci		topology_set_cpu_scale(cpu, capacity);
1618c2ecf20Sopenharmony_ci		pr_debug("cpu_capacity: CPU%d cpu_capacity=%lu\n",
1628c2ecf20Sopenharmony_ci			cpu, topology_get_cpu_scale(cpu));
1638c2ecf20Sopenharmony_ci	}
1648c2ecf20Sopenharmony_ci}
1658c2ecf20Sopenharmony_ci
1668c2ecf20Sopenharmony_cibool __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu)
1678c2ecf20Sopenharmony_ci{
1688c2ecf20Sopenharmony_ci	struct clk *cpu_clk;
1698c2ecf20Sopenharmony_ci	static bool cap_parsing_failed;
1708c2ecf20Sopenharmony_ci	int ret;
1718c2ecf20Sopenharmony_ci	u32 cpu_capacity;
1728c2ecf20Sopenharmony_ci
1738c2ecf20Sopenharmony_ci	if (cap_parsing_failed)
1748c2ecf20Sopenharmony_ci		return false;
1758c2ecf20Sopenharmony_ci
1768c2ecf20Sopenharmony_ci	ret = of_property_read_u32(cpu_node, "capacity-dmips-mhz",
1778c2ecf20Sopenharmony_ci				   &cpu_capacity);
1788c2ecf20Sopenharmony_ci	if (!ret) {
1798c2ecf20Sopenharmony_ci		if (!raw_capacity) {
1808c2ecf20Sopenharmony_ci			raw_capacity = kcalloc(num_possible_cpus(),
1818c2ecf20Sopenharmony_ci					       sizeof(*raw_capacity),
1828c2ecf20Sopenharmony_ci					       GFP_KERNEL);
1838c2ecf20Sopenharmony_ci			if (!raw_capacity) {
1848c2ecf20Sopenharmony_ci				cap_parsing_failed = true;
1858c2ecf20Sopenharmony_ci				return false;
1868c2ecf20Sopenharmony_ci			}
1878c2ecf20Sopenharmony_ci		}
1888c2ecf20Sopenharmony_ci		raw_capacity[cpu] = cpu_capacity;
1898c2ecf20Sopenharmony_ci		pr_debug("cpu_capacity: %pOF cpu_capacity=%u (raw)\n",
1908c2ecf20Sopenharmony_ci			cpu_node, raw_capacity[cpu]);
1918c2ecf20Sopenharmony_ci
1928c2ecf20Sopenharmony_ci		/*
1938c2ecf20Sopenharmony_ci		 * Update freq_factor for calculating early boot cpu capacities.
1948c2ecf20Sopenharmony_ci		 * For non-clk CPU DVFS mechanism, there's no way to get the
1958c2ecf20Sopenharmony_ci		 * frequency value now, assuming they are running at the same
1968c2ecf20Sopenharmony_ci		 * frequency (by keeping the initial freq_factor value).
1978c2ecf20Sopenharmony_ci		 */
1988c2ecf20Sopenharmony_ci		cpu_clk = of_clk_get(cpu_node, 0);
1998c2ecf20Sopenharmony_ci		if (!PTR_ERR_OR_ZERO(cpu_clk)) {
2008c2ecf20Sopenharmony_ci			per_cpu(freq_factor, cpu) =
2018c2ecf20Sopenharmony_ci				clk_get_rate(cpu_clk) / 1000;
2028c2ecf20Sopenharmony_ci			clk_put(cpu_clk);
2038c2ecf20Sopenharmony_ci		}
2048c2ecf20Sopenharmony_ci	} else {
2058c2ecf20Sopenharmony_ci		if (raw_capacity) {
2068c2ecf20Sopenharmony_ci			pr_err("cpu_capacity: missing %pOF raw capacity\n",
2078c2ecf20Sopenharmony_ci				cpu_node);
2088c2ecf20Sopenharmony_ci			pr_err("cpu_capacity: partial information: fallback to 1024 for all CPUs\n");
2098c2ecf20Sopenharmony_ci		}
2108c2ecf20Sopenharmony_ci		cap_parsing_failed = true;
2118c2ecf20Sopenharmony_ci		free_raw_capacity();
2128c2ecf20Sopenharmony_ci	}
2138c2ecf20Sopenharmony_ci
2148c2ecf20Sopenharmony_ci	return !ret;
2158c2ecf20Sopenharmony_ci}
2168c2ecf20Sopenharmony_ci
2178c2ecf20Sopenharmony_ci#ifdef CONFIG_CPU_FREQ
2188c2ecf20Sopenharmony_cistatic cpumask_var_t cpus_to_visit;
2198c2ecf20Sopenharmony_cistatic void parsing_done_workfn(struct work_struct *work);
2208c2ecf20Sopenharmony_cistatic DECLARE_WORK(parsing_done_work, parsing_done_workfn);
2218c2ecf20Sopenharmony_ci
2228c2ecf20Sopenharmony_cistatic int
2238c2ecf20Sopenharmony_ciinit_cpu_capacity_callback(struct notifier_block *nb,
2248c2ecf20Sopenharmony_ci			   unsigned long val,
2258c2ecf20Sopenharmony_ci			   void *data)
2268c2ecf20Sopenharmony_ci{
2278c2ecf20Sopenharmony_ci	struct cpufreq_policy *policy = data;
2288c2ecf20Sopenharmony_ci	int cpu;
2298c2ecf20Sopenharmony_ci
2308c2ecf20Sopenharmony_ci	if (!raw_capacity)
2318c2ecf20Sopenharmony_ci		return 0;
2328c2ecf20Sopenharmony_ci
2338c2ecf20Sopenharmony_ci	if (val != CPUFREQ_CREATE_POLICY)
2348c2ecf20Sopenharmony_ci		return 0;
2358c2ecf20Sopenharmony_ci
2368c2ecf20Sopenharmony_ci	pr_debug("cpu_capacity: init cpu capacity for CPUs [%*pbl] (to_visit=%*pbl)\n",
2378c2ecf20Sopenharmony_ci		 cpumask_pr_args(policy->related_cpus),
2388c2ecf20Sopenharmony_ci		 cpumask_pr_args(cpus_to_visit));
2398c2ecf20Sopenharmony_ci
2408c2ecf20Sopenharmony_ci	cpumask_andnot(cpus_to_visit, cpus_to_visit, policy->related_cpus);
2418c2ecf20Sopenharmony_ci
2428c2ecf20Sopenharmony_ci	for_each_cpu(cpu, policy->related_cpus)
2438c2ecf20Sopenharmony_ci		per_cpu(freq_factor, cpu) = policy->cpuinfo.max_freq / 1000;
2448c2ecf20Sopenharmony_ci
2458c2ecf20Sopenharmony_ci	if (cpumask_empty(cpus_to_visit)) {
2468c2ecf20Sopenharmony_ci		topology_normalize_cpu_scale();
2478c2ecf20Sopenharmony_ci		schedule_work(&update_topology_flags_work);
2488c2ecf20Sopenharmony_ci		free_raw_capacity();
2498c2ecf20Sopenharmony_ci		pr_debug("cpu_capacity: parsing done\n");
2508c2ecf20Sopenharmony_ci		schedule_work(&parsing_done_work);
2518c2ecf20Sopenharmony_ci	}
2528c2ecf20Sopenharmony_ci
2538c2ecf20Sopenharmony_ci	return 0;
2548c2ecf20Sopenharmony_ci}
2558c2ecf20Sopenharmony_ci
2568c2ecf20Sopenharmony_cistatic struct notifier_block init_cpu_capacity_notifier = {
2578c2ecf20Sopenharmony_ci	.notifier_call = init_cpu_capacity_callback,
2588c2ecf20Sopenharmony_ci};
2598c2ecf20Sopenharmony_ci
2608c2ecf20Sopenharmony_cistatic int __init register_cpufreq_notifier(void)
2618c2ecf20Sopenharmony_ci{
2628c2ecf20Sopenharmony_ci	int ret;
2638c2ecf20Sopenharmony_ci
2648c2ecf20Sopenharmony_ci	/*
2658c2ecf20Sopenharmony_ci	 * on ACPI-based systems we need to use the default cpu capacity
2668c2ecf20Sopenharmony_ci	 * until we have the necessary code to parse the cpu capacity, so
2678c2ecf20Sopenharmony_ci	 * skip registering cpufreq notifier.
2688c2ecf20Sopenharmony_ci	 */
2698c2ecf20Sopenharmony_ci	if (!acpi_disabled || !raw_capacity)
2708c2ecf20Sopenharmony_ci		return -EINVAL;
2718c2ecf20Sopenharmony_ci
2728c2ecf20Sopenharmony_ci	if (!alloc_cpumask_var(&cpus_to_visit, GFP_KERNEL))
2738c2ecf20Sopenharmony_ci		return -ENOMEM;
2748c2ecf20Sopenharmony_ci
2758c2ecf20Sopenharmony_ci	cpumask_copy(cpus_to_visit, cpu_possible_mask);
2768c2ecf20Sopenharmony_ci
2778c2ecf20Sopenharmony_ci	ret = cpufreq_register_notifier(&init_cpu_capacity_notifier,
2788c2ecf20Sopenharmony_ci					CPUFREQ_POLICY_NOTIFIER);
2798c2ecf20Sopenharmony_ci
2808c2ecf20Sopenharmony_ci	if (ret)
2818c2ecf20Sopenharmony_ci		free_cpumask_var(cpus_to_visit);
2828c2ecf20Sopenharmony_ci
2838c2ecf20Sopenharmony_ci	return ret;
2848c2ecf20Sopenharmony_ci}
2858c2ecf20Sopenharmony_cicore_initcall(register_cpufreq_notifier);
2868c2ecf20Sopenharmony_ci
2878c2ecf20Sopenharmony_cistatic void parsing_done_workfn(struct work_struct *work)
2888c2ecf20Sopenharmony_ci{
2898c2ecf20Sopenharmony_ci	cpufreq_unregister_notifier(&init_cpu_capacity_notifier,
2908c2ecf20Sopenharmony_ci					 CPUFREQ_POLICY_NOTIFIER);
2918c2ecf20Sopenharmony_ci	free_cpumask_var(cpus_to_visit);
2928c2ecf20Sopenharmony_ci}
2938c2ecf20Sopenharmony_ci
2948c2ecf20Sopenharmony_ci#else
2958c2ecf20Sopenharmony_cicore_initcall(free_raw_capacity);
2968c2ecf20Sopenharmony_ci#endif
2978c2ecf20Sopenharmony_ci
2988c2ecf20Sopenharmony_ci#if defined(CONFIG_ARM64) || defined(CONFIG_RISCV)
2998c2ecf20Sopenharmony_ci/*
3008c2ecf20Sopenharmony_ci * This function returns the logic cpu number of the node.
3018c2ecf20Sopenharmony_ci * There are basically three kinds of return values:
3028c2ecf20Sopenharmony_ci * (1) logic cpu number which is > 0.
3038c2ecf20Sopenharmony_ci * (2) -ENODEV when the device tree(DT) node is valid and found in the DT but
3048c2ecf20Sopenharmony_ci * there is no possible logical CPU in the kernel to match. This happens
3058c2ecf20Sopenharmony_ci * when CONFIG_NR_CPUS is configure to be smaller than the number of
3068c2ecf20Sopenharmony_ci * CPU nodes in DT. We need to just ignore this case.
3078c2ecf20Sopenharmony_ci * (3) -1 if the node does not exist in the device tree
3088c2ecf20Sopenharmony_ci */
3098c2ecf20Sopenharmony_cistatic int __init get_cpu_for_node(struct device_node *node)
3108c2ecf20Sopenharmony_ci{
3118c2ecf20Sopenharmony_ci	struct device_node *cpu_node;
3128c2ecf20Sopenharmony_ci	int cpu;
3138c2ecf20Sopenharmony_ci
3148c2ecf20Sopenharmony_ci	cpu_node = of_parse_phandle(node, "cpu", 0);
3158c2ecf20Sopenharmony_ci	if (!cpu_node)
3168c2ecf20Sopenharmony_ci		return -1;
3178c2ecf20Sopenharmony_ci
3188c2ecf20Sopenharmony_ci	cpu = of_cpu_node_to_id(cpu_node);
3198c2ecf20Sopenharmony_ci	if (cpu >= 0)
3208c2ecf20Sopenharmony_ci		topology_parse_cpu_capacity(cpu_node, cpu);
3218c2ecf20Sopenharmony_ci	else
3228c2ecf20Sopenharmony_ci		pr_info("CPU node for %pOF exist but the possible cpu range is :%*pbl\n",
3238c2ecf20Sopenharmony_ci			cpu_node, cpumask_pr_args(cpu_possible_mask));
3248c2ecf20Sopenharmony_ci
3258c2ecf20Sopenharmony_ci	of_node_put(cpu_node);
3268c2ecf20Sopenharmony_ci	return cpu;
3278c2ecf20Sopenharmony_ci}
3288c2ecf20Sopenharmony_ci
3298c2ecf20Sopenharmony_cistatic int __init parse_core(struct device_node *core, int package_id,
3308c2ecf20Sopenharmony_ci			     int core_id)
3318c2ecf20Sopenharmony_ci{
3328c2ecf20Sopenharmony_ci	char name[20];
3338c2ecf20Sopenharmony_ci	bool leaf = true;
3348c2ecf20Sopenharmony_ci	int i = 0;
3358c2ecf20Sopenharmony_ci	int cpu;
3368c2ecf20Sopenharmony_ci	struct device_node *t;
3378c2ecf20Sopenharmony_ci
3388c2ecf20Sopenharmony_ci	do {
3398c2ecf20Sopenharmony_ci		snprintf(name, sizeof(name), "thread%d", i);
3408c2ecf20Sopenharmony_ci		t = of_get_child_by_name(core, name);
3418c2ecf20Sopenharmony_ci		if (t) {
3428c2ecf20Sopenharmony_ci			leaf = false;
3438c2ecf20Sopenharmony_ci			cpu = get_cpu_for_node(t);
3448c2ecf20Sopenharmony_ci			if (cpu >= 0) {
3458c2ecf20Sopenharmony_ci				cpu_topology[cpu].package_id = package_id;
3468c2ecf20Sopenharmony_ci				cpu_topology[cpu].core_id = core_id;
3478c2ecf20Sopenharmony_ci				cpu_topology[cpu].thread_id = i;
3488c2ecf20Sopenharmony_ci			} else if (cpu != -ENODEV) {
3498c2ecf20Sopenharmony_ci				pr_err("%pOF: Can't get CPU for thread\n", t);
3508c2ecf20Sopenharmony_ci				of_node_put(t);
3518c2ecf20Sopenharmony_ci				return -EINVAL;
3528c2ecf20Sopenharmony_ci			}
3538c2ecf20Sopenharmony_ci			of_node_put(t);
3548c2ecf20Sopenharmony_ci		}
3558c2ecf20Sopenharmony_ci		i++;
3568c2ecf20Sopenharmony_ci	} while (t);
3578c2ecf20Sopenharmony_ci
3588c2ecf20Sopenharmony_ci	cpu = get_cpu_for_node(core);
3598c2ecf20Sopenharmony_ci	if (cpu >= 0) {
3608c2ecf20Sopenharmony_ci		if (!leaf) {
3618c2ecf20Sopenharmony_ci			pr_err("%pOF: Core has both threads and CPU\n",
3628c2ecf20Sopenharmony_ci			       core);
3638c2ecf20Sopenharmony_ci			return -EINVAL;
3648c2ecf20Sopenharmony_ci		}
3658c2ecf20Sopenharmony_ci
3668c2ecf20Sopenharmony_ci		cpu_topology[cpu].package_id = package_id;
3678c2ecf20Sopenharmony_ci		cpu_topology[cpu].core_id = core_id;
3688c2ecf20Sopenharmony_ci	} else if (leaf && cpu != -ENODEV) {
3698c2ecf20Sopenharmony_ci		pr_err("%pOF: Can't get CPU for leaf core\n", core);
3708c2ecf20Sopenharmony_ci		return -EINVAL;
3718c2ecf20Sopenharmony_ci	}
3728c2ecf20Sopenharmony_ci
3738c2ecf20Sopenharmony_ci	return 0;
3748c2ecf20Sopenharmony_ci}
3758c2ecf20Sopenharmony_ci
3768c2ecf20Sopenharmony_cistatic int __init parse_cluster(struct device_node *cluster, int depth)
3778c2ecf20Sopenharmony_ci{
3788c2ecf20Sopenharmony_ci	char name[20];
3798c2ecf20Sopenharmony_ci	bool leaf = true;
3808c2ecf20Sopenharmony_ci	bool has_cores = false;
3818c2ecf20Sopenharmony_ci	struct device_node *c;
3828c2ecf20Sopenharmony_ci	static int package_id __initdata;
3838c2ecf20Sopenharmony_ci	int core_id = 0;
3848c2ecf20Sopenharmony_ci	int i, ret;
3858c2ecf20Sopenharmony_ci
3868c2ecf20Sopenharmony_ci	/*
3878c2ecf20Sopenharmony_ci	 * First check for child clusters; we currently ignore any
3888c2ecf20Sopenharmony_ci	 * information about the nesting of clusters and present the
3898c2ecf20Sopenharmony_ci	 * scheduler with a flat list of them.
3908c2ecf20Sopenharmony_ci	 */
3918c2ecf20Sopenharmony_ci	i = 0;
3928c2ecf20Sopenharmony_ci	do {
3938c2ecf20Sopenharmony_ci		snprintf(name, sizeof(name), "cluster%d", i);
3948c2ecf20Sopenharmony_ci		c = of_get_child_by_name(cluster, name);
3958c2ecf20Sopenharmony_ci		if (c) {
3968c2ecf20Sopenharmony_ci			leaf = false;
3978c2ecf20Sopenharmony_ci			ret = parse_cluster(c, depth + 1);
3988c2ecf20Sopenharmony_ci			of_node_put(c);
3998c2ecf20Sopenharmony_ci			if (ret != 0)
4008c2ecf20Sopenharmony_ci				return ret;
4018c2ecf20Sopenharmony_ci		}
4028c2ecf20Sopenharmony_ci		i++;
4038c2ecf20Sopenharmony_ci	} while (c);
4048c2ecf20Sopenharmony_ci
4058c2ecf20Sopenharmony_ci	/* Now check for cores */
4068c2ecf20Sopenharmony_ci	i = 0;
4078c2ecf20Sopenharmony_ci	do {
4088c2ecf20Sopenharmony_ci		snprintf(name, sizeof(name), "core%d", i);
4098c2ecf20Sopenharmony_ci		c = of_get_child_by_name(cluster, name);
4108c2ecf20Sopenharmony_ci		if (c) {
4118c2ecf20Sopenharmony_ci			has_cores = true;
4128c2ecf20Sopenharmony_ci
4138c2ecf20Sopenharmony_ci			if (depth == 0) {
4148c2ecf20Sopenharmony_ci				pr_err("%pOF: cpu-map children should be clusters\n",
4158c2ecf20Sopenharmony_ci				       c);
4168c2ecf20Sopenharmony_ci				of_node_put(c);
4178c2ecf20Sopenharmony_ci				return -EINVAL;
4188c2ecf20Sopenharmony_ci			}
4198c2ecf20Sopenharmony_ci
4208c2ecf20Sopenharmony_ci			if (leaf) {
4218c2ecf20Sopenharmony_ci				ret = parse_core(c, package_id, core_id++);
4228c2ecf20Sopenharmony_ci			} else {
4238c2ecf20Sopenharmony_ci				pr_err("%pOF: Non-leaf cluster with core %s\n",
4248c2ecf20Sopenharmony_ci				       cluster, name);
4258c2ecf20Sopenharmony_ci				ret = -EINVAL;
4268c2ecf20Sopenharmony_ci			}
4278c2ecf20Sopenharmony_ci
4288c2ecf20Sopenharmony_ci			of_node_put(c);
4298c2ecf20Sopenharmony_ci			if (ret != 0)
4308c2ecf20Sopenharmony_ci				return ret;
4318c2ecf20Sopenharmony_ci		}
4328c2ecf20Sopenharmony_ci		i++;
4338c2ecf20Sopenharmony_ci	} while (c);
4348c2ecf20Sopenharmony_ci
4358c2ecf20Sopenharmony_ci	if (leaf && !has_cores)
4368c2ecf20Sopenharmony_ci		pr_warn("%pOF: empty cluster\n", cluster);
4378c2ecf20Sopenharmony_ci
4388c2ecf20Sopenharmony_ci	if (leaf)
4398c2ecf20Sopenharmony_ci		package_id++;
4408c2ecf20Sopenharmony_ci
4418c2ecf20Sopenharmony_ci	return 0;
4428c2ecf20Sopenharmony_ci}
4438c2ecf20Sopenharmony_ci
4448c2ecf20Sopenharmony_cistatic int __init parse_dt_topology(void)
4458c2ecf20Sopenharmony_ci{
4468c2ecf20Sopenharmony_ci	struct device_node *cn, *map;
4478c2ecf20Sopenharmony_ci	int ret = 0;
4488c2ecf20Sopenharmony_ci	int cpu;
4498c2ecf20Sopenharmony_ci
4508c2ecf20Sopenharmony_ci	cn = of_find_node_by_path("/cpus");
4518c2ecf20Sopenharmony_ci	if (!cn) {
4528c2ecf20Sopenharmony_ci		pr_err("No CPU information found in DT\n");
4538c2ecf20Sopenharmony_ci		return 0;
4548c2ecf20Sopenharmony_ci	}
4558c2ecf20Sopenharmony_ci
4568c2ecf20Sopenharmony_ci	/*
4578c2ecf20Sopenharmony_ci	 * When topology is provided cpu-map is essentially a root
4588c2ecf20Sopenharmony_ci	 * cluster with restricted subnodes.
4598c2ecf20Sopenharmony_ci	 */
4608c2ecf20Sopenharmony_ci	map = of_get_child_by_name(cn, "cpu-map");
4618c2ecf20Sopenharmony_ci	if (!map)
4628c2ecf20Sopenharmony_ci		goto out;
4638c2ecf20Sopenharmony_ci
4648c2ecf20Sopenharmony_ci	ret = parse_cluster(map, 0);
4658c2ecf20Sopenharmony_ci	if (ret != 0)
4668c2ecf20Sopenharmony_ci		goto out_map;
4678c2ecf20Sopenharmony_ci
4688c2ecf20Sopenharmony_ci	topology_normalize_cpu_scale();
4698c2ecf20Sopenharmony_ci
4708c2ecf20Sopenharmony_ci	/*
4718c2ecf20Sopenharmony_ci	 * Check that all cores are in the topology; the SMP code will
4728c2ecf20Sopenharmony_ci	 * only mark cores described in the DT as possible.
4738c2ecf20Sopenharmony_ci	 */
4748c2ecf20Sopenharmony_ci	for_each_possible_cpu(cpu)
4758c2ecf20Sopenharmony_ci		if (cpu_topology[cpu].package_id == -1)
4768c2ecf20Sopenharmony_ci			ret = -EINVAL;
4778c2ecf20Sopenharmony_ci
4788c2ecf20Sopenharmony_ciout_map:
4798c2ecf20Sopenharmony_ci	of_node_put(map);
4808c2ecf20Sopenharmony_ciout:
4818c2ecf20Sopenharmony_ci	of_node_put(cn);
4828c2ecf20Sopenharmony_ci	return ret;
4838c2ecf20Sopenharmony_ci}
4848c2ecf20Sopenharmony_ci#endif
4858c2ecf20Sopenharmony_ci
4868c2ecf20Sopenharmony_ci/*
4878c2ecf20Sopenharmony_ci * cpu topology table
4888c2ecf20Sopenharmony_ci */
4898c2ecf20Sopenharmony_cistruct cpu_topology cpu_topology[NR_CPUS];
4908c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(cpu_topology);
4918c2ecf20Sopenharmony_ci
4928c2ecf20Sopenharmony_ciconst struct cpumask *cpu_coregroup_mask(int cpu)
4938c2ecf20Sopenharmony_ci{
4948c2ecf20Sopenharmony_ci	const cpumask_t *core_mask = cpumask_of_node(cpu_to_node(cpu));
4958c2ecf20Sopenharmony_ci
4968c2ecf20Sopenharmony_ci	/* Find the smaller of NUMA, core or LLC siblings */
4978c2ecf20Sopenharmony_ci	if (cpumask_subset(&cpu_topology[cpu].core_sibling, core_mask)) {
4988c2ecf20Sopenharmony_ci		/* not numa in package, lets use the package siblings */
4998c2ecf20Sopenharmony_ci		core_mask = &cpu_topology[cpu].core_sibling;
5008c2ecf20Sopenharmony_ci	}
5018c2ecf20Sopenharmony_ci	if (cpu_topology[cpu].llc_id != -1) {
5028c2ecf20Sopenharmony_ci		if (cpumask_subset(&cpu_topology[cpu].llc_sibling, core_mask))
5038c2ecf20Sopenharmony_ci			core_mask = &cpu_topology[cpu].llc_sibling;
5048c2ecf20Sopenharmony_ci	}
5058c2ecf20Sopenharmony_ci
5068c2ecf20Sopenharmony_ci	return core_mask;
5078c2ecf20Sopenharmony_ci}
5088c2ecf20Sopenharmony_ci
5098c2ecf20Sopenharmony_civoid update_siblings_masks(unsigned int cpuid)
5108c2ecf20Sopenharmony_ci{
5118c2ecf20Sopenharmony_ci	struct cpu_topology *cpu_topo, *cpuid_topo = &cpu_topology[cpuid];
5128c2ecf20Sopenharmony_ci	int cpu;
5138c2ecf20Sopenharmony_ci
5148c2ecf20Sopenharmony_ci	/* update core and thread sibling masks */
5158c2ecf20Sopenharmony_ci	for_each_online_cpu(cpu) {
5168c2ecf20Sopenharmony_ci		cpu_topo = &cpu_topology[cpu];
5178c2ecf20Sopenharmony_ci
5188c2ecf20Sopenharmony_ci		if (cpu_topo->llc_id != -1 && cpuid_topo->llc_id == cpu_topo->llc_id) {
5198c2ecf20Sopenharmony_ci			cpumask_set_cpu(cpu, &cpuid_topo->llc_sibling);
5208c2ecf20Sopenharmony_ci			cpumask_set_cpu(cpuid, &cpu_topo->llc_sibling);
5218c2ecf20Sopenharmony_ci		}
5228c2ecf20Sopenharmony_ci
5238c2ecf20Sopenharmony_ci		if (cpuid_topo->package_id != cpu_topo->package_id)
5248c2ecf20Sopenharmony_ci			continue;
5258c2ecf20Sopenharmony_ci
5268c2ecf20Sopenharmony_ci		cpumask_set_cpu(cpuid, &cpu_topo->core_sibling);
5278c2ecf20Sopenharmony_ci		cpumask_set_cpu(cpu, &cpuid_topo->core_sibling);
5288c2ecf20Sopenharmony_ci
5298c2ecf20Sopenharmony_ci		if (cpuid_topo->core_id != cpu_topo->core_id)
5308c2ecf20Sopenharmony_ci			continue;
5318c2ecf20Sopenharmony_ci
5328c2ecf20Sopenharmony_ci		cpumask_set_cpu(cpuid, &cpu_topo->thread_sibling);
5338c2ecf20Sopenharmony_ci		cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling);
5348c2ecf20Sopenharmony_ci	}
5358c2ecf20Sopenharmony_ci}
5368c2ecf20Sopenharmony_ci
5378c2ecf20Sopenharmony_cistatic void clear_cpu_topology(int cpu)
5388c2ecf20Sopenharmony_ci{
5398c2ecf20Sopenharmony_ci	struct cpu_topology *cpu_topo = &cpu_topology[cpu];
5408c2ecf20Sopenharmony_ci
5418c2ecf20Sopenharmony_ci	cpumask_clear(&cpu_topo->llc_sibling);
5428c2ecf20Sopenharmony_ci	cpumask_set_cpu(cpu, &cpu_topo->llc_sibling);
5438c2ecf20Sopenharmony_ci
5448c2ecf20Sopenharmony_ci	cpumask_clear(&cpu_topo->core_sibling);
5458c2ecf20Sopenharmony_ci	cpumask_set_cpu(cpu, &cpu_topo->core_sibling);
5468c2ecf20Sopenharmony_ci	cpumask_clear(&cpu_topo->thread_sibling);
5478c2ecf20Sopenharmony_ci	cpumask_set_cpu(cpu, &cpu_topo->thread_sibling);
5488c2ecf20Sopenharmony_ci}
5498c2ecf20Sopenharmony_ci
5508c2ecf20Sopenharmony_civoid __init reset_cpu_topology(void)
5518c2ecf20Sopenharmony_ci{
5528c2ecf20Sopenharmony_ci	unsigned int cpu;
5538c2ecf20Sopenharmony_ci
5548c2ecf20Sopenharmony_ci	for_each_possible_cpu(cpu) {
5558c2ecf20Sopenharmony_ci		struct cpu_topology *cpu_topo = &cpu_topology[cpu];
5568c2ecf20Sopenharmony_ci
5578c2ecf20Sopenharmony_ci		cpu_topo->thread_id = -1;
5588c2ecf20Sopenharmony_ci		cpu_topo->core_id = -1;
5598c2ecf20Sopenharmony_ci		cpu_topo->package_id = -1;
5608c2ecf20Sopenharmony_ci		cpu_topo->llc_id = -1;
5618c2ecf20Sopenharmony_ci
5628c2ecf20Sopenharmony_ci		clear_cpu_topology(cpu);
5638c2ecf20Sopenharmony_ci	}
5648c2ecf20Sopenharmony_ci}
5658c2ecf20Sopenharmony_ci
5668c2ecf20Sopenharmony_civoid remove_cpu_topology(unsigned int cpu)
5678c2ecf20Sopenharmony_ci{
5688c2ecf20Sopenharmony_ci	int sibling;
5698c2ecf20Sopenharmony_ci
5708c2ecf20Sopenharmony_ci	for_each_cpu(sibling, topology_core_cpumask(cpu))
5718c2ecf20Sopenharmony_ci		cpumask_clear_cpu(cpu, topology_core_cpumask(sibling));
5728c2ecf20Sopenharmony_ci	for_each_cpu(sibling, topology_sibling_cpumask(cpu))
5738c2ecf20Sopenharmony_ci		cpumask_clear_cpu(cpu, topology_sibling_cpumask(sibling));
5748c2ecf20Sopenharmony_ci	for_each_cpu(sibling, topology_llc_cpumask(cpu))
5758c2ecf20Sopenharmony_ci		cpumask_clear_cpu(cpu, topology_llc_cpumask(sibling));
5768c2ecf20Sopenharmony_ci
5778c2ecf20Sopenharmony_ci	clear_cpu_topology(cpu);
5788c2ecf20Sopenharmony_ci}
5798c2ecf20Sopenharmony_ci
5808c2ecf20Sopenharmony_ci__weak int __init parse_acpi_topology(void)
5818c2ecf20Sopenharmony_ci{
5828c2ecf20Sopenharmony_ci	return 0;
5838c2ecf20Sopenharmony_ci}
5848c2ecf20Sopenharmony_ci
5858c2ecf20Sopenharmony_ci#if defined(CONFIG_ARM64) || defined(CONFIG_RISCV)
5868c2ecf20Sopenharmony_civoid __init init_cpu_topology(void)
5878c2ecf20Sopenharmony_ci{
5888c2ecf20Sopenharmony_ci	reset_cpu_topology();
5898c2ecf20Sopenharmony_ci
5908c2ecf20Sopenharmony_ci	/*
5918c2ecf20Sopenharmony_ci	 * Discard anything that was parsed if we hit an error so we
5928c2ecf20Sopenharmony_ci	 * don't use partial information.
5938c2ecf20Sopenharmony_ci	 */
5948c2ecf20Sopenharmony_ci	if (parse_acpi_topology())
5958c2ecf20Sopenharmony_ci		reset_cpu_topology();
5968c2ecf20Sopenharmony_ci	else if (of_have_populated_dt() && parse_dt_topology())
5978c2ecf20Sopenharmony_ci		reset_cpu_topology();
5988c2ecf20Sopenharmony_ci}
5998c2ecf20Sopenharmony_ci
6008c2ecf20Sopenharmony_civoid store_cpu_topology(unsigned int cpuid)
6018c2ecf20Sopenharmony_ci{
6028c2ecf20Sopenharmony_ci	struct cpu_topology *cpuid_topo = &cpu_topology[cpuid];
6038c2ecf20Sopenharmony_ci
6048c2ecf20Sopenharmony_ci	if (cpuid_topo->package_id != -1)
6058c2ecf20Sopenharmony_ci		goto topology_populated;
6068c2ecf20Sopenharmony_ci
6078c2ecf20Sopenharmony_ci	cpuid_topo->thread_id = -1;
6088c2ecf20Sopenharmony_ci	cpuid_topo->core_id = cpuid;
6098c2ecf20Sopenharmony_ci	cpuid_topo->package_id = cpu_to_node(cpuid);
6108c2ecf20Sopenharmony_ci
6118c2ecf20Sopenharmony_ci	pr_debug("CPU%u: package %d core %d thread %d\n",
6128c2ecf20Sopenharmony_ci		 cpuid, cpuid_topo->package_id, cpuid_topo->core_id,
6138c2ecf20Sopenharmony_ci		 cpuid_topo->thread_id);
6148c2ecf20Sopenharmony_ci
6158c2ecf20Sopenharmony_citopology_populated:
6168c2ecf20Sopenharmony_ci	update_siblings_masks(cpuid);
6178c2ecf20Sopenharmony_ci}
6188c2ecf20Sopenharmony_ci#endif
619