1// SPDX-License-Identifier: GPL-2.0
2/*
3 *  cpuidle-powernv - idle state cpuidle driver.
4 *  Adapted from drivers/cpuidle/cpuidle-pseries
5 *
6 */
7
8#include <linux/kernel.h>
9#include <linux/module.h>
10#include <linux/init.h>
11#include <linux/moduleparam.h>
12#include <linux/cpuidle.h>
13#include <linux/cpu.h>
14#include <linux/notifier.h>
15#include <linux/clockchips.h>
16#include <linux/of.h>
17#include <linux/slab.h>
18
19#include <asm/machdep.h>
20#include <asm/firmware.h>
21#include <asm/opal.h>
22#include <asm/runlatch.h>
23#include <asm/cpuidle.h>
24
25/*
26 * Expose only those Hardware idle states via the cpuidle framework
27 * that have latency value below POWERNV_THRESHOLD_LATENCY_NS.
28 */
29#define POWERNV_THRESHOLD_LATENCY_NS 200000
30
31static struct cpuidle_driver powernv_idle_driver = {
32	.name             = "powernv_idle",
33	.owner            = THIS_MODULE,
34};
35
36static int max_idle_state __read_mostly;
37static struct cpuidle_state *cpuidle_state_table __read_mostly;
38
39struct stop_psscr_table {
40	u64 val;
41	u64 mask;
42};
43
44static struct stop_psscr_table stop_psscr_table[CPUIDLE_STATE_MAX] __read_mostly;
45
46static u64 default_snooze_timeout __read_mostly;
47static bool snooze_timeout_en __read_mostly;
48
49static u64 get_snooze_timeout(struct cpuidle_device *dev,
50			      struct cpuidle_driver *drv,
51			      int index)
52{
53	int i;
54
55	if (unlikely(!snooze_timeout_en))
56		return default_snooze_timeout;
57
58	for (i = index + 1; i < drv->state_count; i++) {
59		if (dev->states_usage[i].disable)
60			continue;
61
62		return drv->states[i].target_residency * tb_ticks_per_usec;
63	}
64
65	return default_snooze_timeout;
66}
67
68static int snooze_loop(struct cpuidle_device *dev,
69			struct cpuidle_driver *drv,
70			int index)
71{
72	u64 snooze_exit_time;
73
74	set_thread_flag(TIF_POLLING_NRFLAG);
75
76	local_irq_enable();
77
78	snooze_exit_time = get_tb() + get_snooze_timeout(dev, drv, index);
79	ppc64_runlatch_off();
80	HMT_very_low();
81	while (!need_resched()) {
82		if (likely(snooze_timeout_en) && get_tb() > snooze_exit_time) {
83			/*
84			 * Task has not woken up but we are exiting the polling
85			 * loop anyway. Require a barrier after polling is
86			 * cleared to order subsequent test of need_resched().
87			 */
88			clear_thread_flag(TIF_POLLING_NRFLAG);
89			smp_mb();
90			break;
91		}
92	}
93
94	HMT_medium();
95	ppc64_runlatch_on();
96	clear_thread_flag(TIF_POLLING_NRFLAG);
97
98	local_irq_disable();
99
100	return index;
101}
102
103static int nap_loop(struct cpuidle_device *dev,
104			struct cpuidle_driver *drv,
105			int index)
106{
107	power7_idle_type(PNV_THREAD_NAP);
108
109	return index;
110}
111
112/* Register for fastsleep only in oneshot mode of broadcast */
113#ifdef CONFIG_TICK_ONESHOT
114static int fastsleep_loop(struct cpuidle_device *dev,
115				struct cpuidle_driver *drv,
116				int index)
117{
118	unsigned long old_lpcr = mfspr(SPRN_LPCR);
119	unsigned long new_lpcr;
120
121	if (unlikely(system_state < SYSTEM_RUNNING))
122		return index;
123
124	new_lpcr = old_lpcr;
125	/* Do not exit powersave upon decrementer as we've setup the timer
126	 * offload.
127	 */
128	new_lpcr &= ~LPCR_PECE1;
129
130	mtspr(SPRN_LPCR, new_lpcr);
131
132	power7_idle_type(PNV_THREAD_SLEEP);
133
134	mtspr(SPRN_LPCR, old_lpcr);
135
136	return index;
137}
138#endif
139
140static int stop_loop(struct cpuidle_device *dev,
141		     struct cpuidle_driver *drv,
142		     int index)
143{
144	arch300_idle_type(stop_psscr_table[index].val,
145			 stop_psscr_table[index].mask);
146	return index;
147}
148
149/*
150 * States for dedicated partition case.
151 */
152static struct cpuidle_state powernv_states[CPUIDLE_STATE_MAX] = {
153	{ /* Snooze */
154		.name = "snooze",
155		.desc = "snooze",
156		.exit_latency = 0,
157		.target_residency = 0,
158		.enter = snooze_loop },
159};
160
161static int powernv_cpuidle_cpu_online(unsigned int cpu)
162{
163	struct cpuidle_device *dev = per_cpu(cpuidle_devices, cpu);
164
165	if (dev && cpuidle_get_driver()) {
166		cpuidle_pause_and_lock();
167		cpuidle_enable_device(dev);
168		cpuidle_resume_and_unlock();
169	}
170	return 0;
171}
172
173static int powernv_cpuidle_cpu_dead(unsigned int cpu)
174{
175	struct cpuidle_device *dev = per_cpu(cpuidle_devices, cpu);
176
177	if (dev && cpuidle_get_driver()) {
178		cpuidle_pause_and_lock();
179		cpuidle_disable_device(dev);
180		cpuidle_resume_and_unlock();
181	}
182	return 0;
183}
184
185/*
186 * powernv_cpuidle_driver_init()
187 */
188static int powernv_cpuidle_driver_init(void)
189{
190	int idle_state;
191	struct cpuidle_driver *drv = &powernv_idle_driver;
192
193	drv->state_count = 0;
194
195	for (idle_state = 0; idle_state < max_idle_state; ++idle_state) {
196		/* Is the state not enabled? */
197		if (cpuidle_state_table[idle_state].enter == NULL)
198			continue;
199
200		drv->states[drv->state_count] =	/* structure copy */
201			cpuidle_state_table[idle_state];
202
203		drv->state_count += 1;
204	}
205
206	/*
207	 * On the PowerNV platform cpu_present may be less than cpu_possible in
208	 * cases when firmware detects the CPU, but it is not available to the
209	 * OS.  If CONFIG_HOTPLUG_CPU=n, then such CPUs are not hotplugable at
210	 * run time and hence cpu_devices are not created for those CPUs by the
211	 * generic topology_init().
212	 *
213	 * drv->cpumask defaults to cpu_possible_mask in
214	 * __cpuidle_driver_init().  This breaks cpuidle on PowerNV where
215	 * cpu_devices are not created for CPUs in cpu_possible_mask that
216	 * cannot be hot-added later at run time.
217	 *
218	 * Trying cpuidle_register_device() on a CPU without a cpu_device is
219	 * incorrect, so pass a correct CPU mask to the generic cpuidle driver.
220	 */
221
222	drv->cpumask = (struct cpumask *)cpu_present_mask;
223
224	return 0;
225}
226
227static inline void add_powernv_state(int index, const char *name,
228				     unsigned int flags,
229				     int (*idle_fn)(struct cpuidle_device *,
230						    struct cpuidle_driver *,
231						    int),
232				     unsigned int target_residency,
233				     unsigned int exit_latency,
234				     u64 psscr_val, u64 psscr_mask)
235{
236	strlcpy(powernv_states[index].name, name, CPUIDLE_NAME_LEN);
237	strlcpy(powernv_states[index].desc, name, CPUIDLE_NAME_LEN);
238	powernv_states[index].flags = flags;
239	powernv_states[index].target_residency = target_residency;
240	powernv_states[index].exit_latency = exit_latency;
241	powernv_states[index].enter = idle_fn;
242	/* For power8 and below psscr_* will be 0 */
243	stop_psscr_table[index].val = psscr_val;
244	stop_psscr_table[index].mask = psscr_mask;
245}
246
247extern u32 pnv_get_supported_cpuidle_states(void);
248static int powernv_add_idle_states(void)
249{
250	int nr_idle_states = 1; /* Snooze */
251	int dt_idle_states;
252	u32 has_stop_states = 0;
253	int i;
254	u32 supported_flags = pnv_get_supported_cpuidle_states();
255
256
257	/* Currently we have snooze statically defined */
258	if (nr_pnv_idle_states <= 0) {
259		pr_warn("cpuidle-powernv : Only Snooze is available\n");
260		goto out;
261	}
262
263	/* TODO: Count only states which are eligible for cpuidle */
264	dt_idle_states = nr_pnv_idle_states;
265
266	/*
267	 * Since snooze is used as first idle state, max idle states allowed is
268	 * CPUIDLE_STATE_MAX -1
269	 */
270	if (nr_pnv_idle_states > CPUIDLE_STATE_MAX - 1) {
271		pr_warn("cpuidle-powernv: discovered idle states more than allowed");
272		dt_idle_states = CPUIDLE_STATE_MAX - 1;
273	}
274
275	/*
276	 * If the idle states use stop instruction, probe for psscr values
277	 * and psscr mask which are necessary to specify required stop level.
278	 */
279	has_stop_states = (pnv_idle_states[0].flags &
280			   (OPAL_PM_STOP_INST_FAST | OPAL_PM_STOP_INST_DEEP));
281
282	for (i = 0; i < dt_idle_states; i++) {
283		unsigned int exit_latency, target_residency;
284		bool stops_timebase = false;
285		struct pnv_idle_states_t *state = &pnv_idle_states[i];
286
287		/*
288		 * Skip the platform idle state whose flag isn't in
289		 * the supported_cpuidle_states flag mask.
290		 */
291		if ((state->flags & supported_flags) != state->flags)
292			continue;
293		/*
294		 * If an idle state has exit latency beyond
295		 * POWERNV_THRESHOLD_LATENCY_NS then don't use it
296		 * in cpu-idle.
297		 */
298		if (state->latency_ns > POWERNV_THRESHOLD_LATENCY_NS)
299			continue;
300		/*
301		 * Firmware passes residency and latency values in ns.
302		 * cpuidle expects it in us.
303		 */
304		exit_latency = DIV_ROUND_UP(state->latency_ns, 1000);
305		target_residency = DIV_ROUND_UP(state->residency_ns, 1000);
306
307		if (has_stop_states && !(state->valid))
308				continue;
309
310		if (state->flags & OPAL_PM_TIMEBASE_STOP)
311			stops_timebase = true;
312
313		if (state->flags & OPAL_PM_NAP_ENABLED) {
314			/* Add NAP state */
315			add_powernv_state(nr_idle_states, "Nap",
316					  CPUIDLE_FLAG_NONE, nap_loop,
317					  target_residency, exit_latency, 0, 0);
318		} else if (has_stop_states && !stops_timebase) {
319			add_powernv_state(nr_idle_states, state->name,
320					  CPUIDLE_FLAG_NONE, stop_loop,
321					  target_residency, exit_latency,
322					  state->psscr_val,
323					  state->psscr_mask);
324		}
325
326		/*
327		 * All cpuidle states with CPUIDLE_FLAG_TIMER_STOP set must come
328		 * within this config dependency check.
329		 */
330#ifdef CONFIG_TICK_ONESHOT
331		else if (state->flags & OPAL_PM_SLEEP_ENABLED ||
332			 state->flags & OPAL_PM_SLEEP_ENABLED_ER1) {
333			/* Add FASTSLEEP state */
334			add_powernv_state(nr_idle_states, "FastSleep",
335					  CPUIDLE_FLAG_TIMER_STOP,
336					  fastsleep_loop,
337					  target_residency, exit_latency, 0, 0);
338		} else if (has_stop_states && stops_timebase) {
339			add_powernv_state(nr_idle_states, state->name,
340					  CPUIDLE_FLAG_TIMER_STOP, stop_loop,
341					  target_residency, exit_latency,
342					  state->psscr_val,
343					  state->psscr_mask);
344		}
345#endif
346		else
347			continue;
348		nr_idle_states++;
349	}
350out:
351	return nr_idle_states;
352}
353
354/*
355 * powernv_idle_probe()
356 * Choose state table for shared versus dedicated partition
357 */
358static int powernv_idle_probe(void)
359{
360	if (cpuidle_disable != IDLE_NO_OVERRIDE)
361		return -ENODEV;
362
363	if (firmware_has_feature(FW_FEATURE_OPAL)) {
364		cpuidle_state_table = powernv_states;
365		/* Device tree can indicate more idle states */
366		max_idle_state = powernv_add_idle_states();
367		default_snooze_timeout = TICK_USEC * tb_ticks_per_usec;
368		if (max_idle_state > 1)
369			snooze_timeout_en = true;
370 	} else
371 		return -ENODEV;
372
373	return 0;
374}
375
376static int __init powernv_processor_idle_init(void)
377{
378	int retval;
379
380	retval = powernv_idle_probe();
381	if (retval)
382		return retval;
383
384	powernv_cpuidle_driver_init();
385	retval = cpuidle_register(&powernv_idle_driver, NULL);
386	if (retval) {
387		printk(KERN_DEBUG "Registration of powernv driver failed.\n");
388		return retval;
389	}
390
391	retval = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
392					   "cpuidle/powernv:online",
393					   powernv_cpuidle_cpu_online, NULL);
394	WARN_ON(retval < 0);
395	retval = cpuhp_setup_state_nocalls(CPUHP_CPUIDLE_DEAD,
396					   "cpuidle/powernv:dead", NULL,
397					   powernv_cpuidle_cpu_dead);
398	WARN_ON(retval < 0);
399	printk(KERN_DEBUG "powernv_idle_driver registered\n");
400	return 0;
401}
402
403device_initcall(powernv_processor_idle_init);
404