1// SPDX-License-Identifier: GPL-2.0
2/*
3 * PM domains for CPUs via genpd - managed by cpuidle-psci.
4 *
5 * Copyright (C) 2019 Linaro Ltd.
6 * Author: Ulf Hansson <ulf.hansson@linaro.org>
7 *
8 */
9
10#define pr_fmt(fmt) "CPUidle PSCI: " fmt
11
12#include <linux/cpu.h>
13#include <linux/device.h>
14#include <linux/kernel.h>
15#include <linux/platform_device.h>
16#include <linux/pm_domain.h>
17#include <linux/pm_runtime.h>
18#include <linux/psci.h>
19#include <linux/slab.h>
20#include <linux/string.h>
21
22#include "cpuidle-psci.h"
23
24struct psci_pd_provider {
25	struct list_head link;
26	struct device_node *node;
27};
28
29static LIST_HEAD(psci_pd_providers);
30static bool psci_pd_allow_domain_state;
31
32static int psci_pd_power_off(struct generic_pm_domain *pd)
33{
34	struct genpd_power_state *state = &pd->states[pd->state_idx];
35	u32 *pd_state;
36
37	if (!state->data)
38		return 0;
39
40	if (!psci_pd_allow_domain_state)
41		return -EBUSY;
42
43	/* OSI mode is enabled, set the corresponding domain state. */
44	pd_state = state->data;
45	psci_set_domain_state(*pd_state);
46
47	return 0;
48}
49
50static int psci_pd_parse_state_nodes(struct genpd_power_state *states,
51				     int state_count)
52{
53	int i, ret;
54	u32 psci_state, *psci_state_buf;
55
56	for (i = 0; i < state_count; i++) {
57		ret = psci_dt_parse_state_node(to_of_node(states[i].fwnode),
58					&psci_state);
59		if (ret)
60			goto free_state;
61
62		psci_state_buf = kmalloc(sizeof(u32), GFP_KERNEL);
63		if (!psci_state_buf) {
64			ret = -ENOMEM;
65			goto free_state;
66		}
67		*psci_state_buf = psci_state;
68		states[i].data = psci_state_buf;
69	}
70
71	return 0;
72
73free_state:
74	i--;
75	for (; i >= 0; i--)
76		kfree(states[i].data);
77	return ret;
78}
79
80static int psci_pd_parse_states(struct device_node *np,
81			struct genpd_power_state **states, int *state_count)
82{
83	int ret;
84
85	/* Parse the domain idle states. */
86	ret = of_genpd_parse_idle_states(np, states, state_count);
87	if (ret)
88		return ret;
89
90	/* Fill out the PSCI specifics for each found state. */
91	ret = psci_pd_parse_state_nodes(*states, *state_count);
92	if (ret)
93		kfree(*states);
94
95	return ret;
96}
97
98static void psci_pd_free_states(struct genpd_power_state *states,
99				unsigned int state_count)
100{
101	int i;
102
103	for (i = 0; i < state_count; i++)
104		kfree(states[i].data);
105	kfree(states);
106}
107
108static int psci_pd_init(struct device_node *np, bool use_osi)
109{
110	struct generic_pm_domain *pd;
111	struct psci_pd_provider *pd_provider;
112	struct dev_power_governor *pd_gov;
113	struct genpd_power_state *states = NULL;
114	int ret = -ENOMEM, state_count = 0;
115
116	pd = kzalloc(sizeof(*pd), GFP_KERNEL);
117	if (!pd)
118		goto out;
119
120	pd_provider = kzalloc(sizeof(*pd_provider), GFP_KERNEL);
121	if (!pd_provider)
122		goto free_pd;
123
124	pd->name = kasprintf(GFP_KERNEL, "%pOF", np);
125	if (!pd->name)
126		goto free_pd_prov;
127
128	/*
129	 * Parse the domain idle states and let genpd manage the state selection
130	 * for those being compatible with "domain-idle-state".
131	 */
132	ret = psci_pd_parse_states(np, &states, &state_count);
133	if (ret)
134		goto free_name;
135
136	pd->free_states = psci_pd_free_states;
137	pd->name = kbasename(pd->name);
138	pd->states = states;
139	pd->state_count = state_count;
140	pd->flags |= GENPD_FLAG_IRQ_SAFE | GENPD_FLAG_CPU_DOMAIN;
141
142	/* Allow power off when OSI has been successfully enabled. */
143	if (use_osi)
144		pd->power_off = psci_pd_power_off;
145	else
146		pd->flags |= GENPD_FLAG_ALWAYS_ON;
147
148	/* Use governor for CPU PM domains if it has some states to manage. */
149	pd_gov = state_count > 0 ? &pm_domain_cpu_gov : NULL;
150
151	ret = pm_genpd_init(pd, pd_gov, false);
152	if (ret) {
153		psci_pd_free_states(states, state_count);
154		goto free_name;
155	}
156
157	ret = of_genpd_add_provider_simple(np, pd);
158	if (ret)
159		goto remove_pd;
160
161	pd_provider->node = of_node_get(np);
162	list_add(&pd_provider->link, &psci_pd_providers);
163
164	pr_debug("init PM domain %s\n", pd->name);
165	return 0;
166
167remove_pd:
168	pm_genpd_remove(pd);
169free_name:
170	kfree(pd->name);
171free_pd_prov:
172	kfree(pd_provider);
173free_pd:
174	kfree(pd);
175out:
176	pr_err("failed to init PM domain ret=%d %pOF\n", ret, np);
177	return ret;
178}
179
180static void psci_pd_remove(void)
181{
182	struct psci_pd_provider *pd_provider, *it;
183	struct generic_pm_domain *genpd;
184
185	list_for_each_entry_safe_reverse(pd_provider, it,
186					 &psci_pd_providers, link) {
187		of_genpd_del_provider(pd_provider->node);
188
189		genpd = of_genpd_remove_last(pd_provider->node);
190		if (!IS_ERR(genpd))
191			kfree(genpd);
192
193		of_node_put(pd_provider->node);
194		list_del(&pd_provider->link);
195		kfree(pd_provider);
196	}
197}
198
199static int psci_pd_init_topology(struct device_node *np)
200{
201	struct device_node *node;
202	struct of_phandle_args child, parent;
203	int ret;
204
205	for_each_child_of_node(np, node) {
206		if (of_parse_phandle_with_args(node, "power-domains",
207					"#power-domain-cells", 0, &parent))
208			continue;
209
210		child.np = node;
211		child.args_count = 0;
212		ret = of_genpd_add_subdomain(&parent, &child);
213		of_node_put(parent.np);
214		if (ret) {
215			of_node_put(node);
216			return ret;
217		}
218	}
219
220	return 0;
221}
222
223static bool psci_pd_try_set_osi_mode(void)
224{
225	int ret;
226
227	if (!psci_has_osi_support())
228		return false;
229
230	ret = psci_set_osi_mode(true);
231	if (ret) {
232		pr_warn("failed to enable OSI mode: %d\n", ret);
233		return false;
234	}
235
236	return true;
237}
238
239static void psci_cpuidle_domain_sync_state(struct device *dev)
240{
241	/*
242	 * All devices have now been attached/probed to the PM domain topology,
243	 * hence it's fine to allow domain states to be picked.
244	 */
245	psci_pd_allow_domain_state = true;
246}
247
248static const struct of_device_id psci_of_match[] = {
249	{ .compatible = "arm,psci-1.0" },
250	{}
251};
252
253static int psci_cpuidle_domain_probe(struct platform_device *pdev)
254{
255	struct device_node *np = pdev->dev.of_node;
256	struct device_node *node;
257	bool use_osi;
258	int ret = 0, pd_count = 0;
259
260	if (!np)
261		return -ENODEV;
262
263	/* If OSI mode is supported, let's try to enable it. */
264	use_osi = psci_pd_try_set_osi_mode();
265
266	/*
267	 * Parse child nodes for the "#power-domain-cells" property and
268	 * initialize a genpd/genpd-of-provider pair when it's found.
269	 */
270	for_each_child_of_node(np, node) {
271		if (!of_find_property(node, "#power-domain-cells", NULL))
272			continue;
273
274		ret = psci_pd_init(node, use_osi);
275		if (ret)
276			goto put_node;
277
278		pd_count++;
279	}
280
281	/* Bail out if not using the hierarchical CPU topology. */
282	if (!pd_count)
283		goto no_pd;
284
285	/* Link genpd masters/subdomains to model the CPU topology. */
286	ret = psci_pd_init_topology(np);
287	if (ret)
288		goto remove_pd;
289
290	pr_info("Initialized CPU PM domain topology\n");
291	return 0;
292
293put_node:
294	of_node_put(node);
295remove_pd:
296	psci_pd_remove();
297	pr_err("failed to create CPU PM domains ret=%d\n", ret);
298no_pd:
299	if (use_osi)
300		psci_set_osi_mode(false);
301	return ret;
302}
303
304static struct platform_driver psci_cpuidle_domain_driver = {
305	.probe  = psci_cpuidle_domain_probe,
306	.driver = {
307		.name = "psci-cpuidle-domain",
308		.of_match_table = psci_of_match,
309		.sync_state = psci_cpuidle_domain_sync_state,
310	},
311};
312
313static int __init psci_idle_init_domains(void)
314{
315	return platform_driver_register(&psci_cpuidle_domain_driver);
316}
317subsys_initcall(psci_idle_init_domains);
318
319struct device *psci_dt_attach_cpu(int cpu)
320{
321	struct device *dev;
322
323	dev = dev_pm_domain_attach_by_name(get_cpu_device(cpu), "psci");
324	if (IS_ERR_OR_NULL(dev))
325		return dev;
326
327	pm_runtime_irq_safe(dev);
328	if (cpu_online(cpu))
329		pm_runtime_get_sync(dev);
330
331	return dev;
332}
333
334void psci_dt_detach_cpu(struct device *dev)
335{
336	if (IS_ERR_OR_NULL(dev))
337		return;
338
339	dev_pm_domain_detach(dev, false);
340}
341