1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * OMAP4+ CPU idle Routines
4 *
5 * Copyright (C) 2011-2013 Texas Instruments, Inc.
6 * Santosh Shilimkar <santosh.shilimkar@ti.com>
7 * Rajendra Nayak <rnayak@ti.com>
8 */
9
10#include <linux/sched.h>
11#include <linux/cpuidle.h>
12#include <linux/cpu_pm.h>
13#include <linux/export.h>
14#include <linux/tick.h>
15
16#include <asm/cpuidle.h>
17
18#include "common.h"
19#include "pm.h"
20#include "prm.h"
21#include "soc.h"
22#include "clockdomain.h"
23
24#define MAX_CPUS	2
25
26/* Machine specific information */
27struct idle_statedata {
28	u32 cpu_state;
29	u32 mpu_logic_state;
30	u32 mpu_state;
31	u32 mpu_state_vote;
32};
33
34static struct idle_statedata omap4_idle_data[] = {
35	{
36		.cpu_state = PWRDM_POWER_ON,
37		.mpu_state = PWRDM_POWER_ON,
38		.mpu_logic_state = PWRDM_POWER_RET,
39	},
40	{
41		.cpu_state = PWRDM_POWER_OFF,
42		.mpu_state = PWRDM_POWER_RET,
43		.mpu_logic_state = PWRDM_POWER_RET,
44	},
45	{
46		.cpu_state = PWRDM_POWER_OFF,
47		.mpu_state = PWRDM_POWER_RET,
48		.mpu_logic_state = PWRDM_POWER_OFF,
49	},
50};
51
52static struct idle_statedata omap5_idle_data[] = {
53	{
54		.cpu_state = PWRDM_POWER_ON,
55		.mpu_state = PWRDM_POWER_ON,
56		.mpu_logic_state = PWRDM_POWER_ON,
57	},
58	{
59		.cpu_state = PWRDM_POWER_RET,
60		.mpu_state = PWRDM_POWER_RET,
61		.mpu_logic_state = PWRDM_POWER_RET,
62	},
63};
64
65static struct powerdomain *mpu_pd, *cpu_pd[MAX_CPUS];
66static struct clockdomain *cpu_clkdm[MAX_CPUS];
67
68static atomic_t abort_barrier;
69static bool cpu_done[MAX_CPUS];
70static struct idle_statedata *state_ptr = &omap4_idle_data[0];
71static DEFINE_RAW_SPINLOCK(mpu_lock);
72
73/* Private functions */
74
75/**
76 * omap_enter_idle_[simple/coupled] - OMAP4PLUS cpuidle entry functions
77 * @dev: cpuidle device
78 * @drv: cpuidle driver
79 * @index: the index of state to be entered
80 *
81 * Called from the CPUidle framework to program the device to the
82 * specified low power state selected by the governor.
83 * Returns the amount of time spent in the low power state.
84 */
85static int omap_enter_idle_simple(struct cpuidle_device *dev,
86			struct cpuidle_driver *drv,
87			int index)
88{
89	omap_do_wfi();
90	return index;
91}
92
93static int omap_enter_idle_smp(struct cpuidle_device *dev,
94			       struct cpuidle_driver *drv,
95			       int index)
96{
97	struct idle_statedata *cx = state_ptr + index;
98	unsigned long flag;
99
100	raw_spin_lock_irqsave(&mpu_lock, flag);
101	cx->mpu_state_vote++;
102	if (cx->mpu_state_vote == num_online_cpus()) {
103		pwrdm_set_logic_retst(mpu_pd, cx->mpu_logic_state);
104		omap_set_pwrdm_state(mpu_pd, cx->mpu_state);
105	}
106	raw_spin_unlock_irqrestore(&mpu_lock, flag);
107
108	omap4_enter_lowpower(dev->cpu, cx->cpu_state);
109
110	raw_spin_lock_irqsave(&mpu_lock, flag);
111	if (cx->mpu_state_vote == num_online_cpus())
112		omap_set_pwrdm_state(mpu_pd, PWRDM_POWER_ON);
113	cx->mpu_state_vote--;
114	raw_spin_unlock_irqrestore(&mpu_lock, flag);
115
116	return index;
117}
118
119static int omap_enter_idle_coupled(struct cpuidle_device *dev,
120			struct cpuidle_driver *drv,
121			int index)
122{
123	struct idle_statedata *cx = state_ptr + index;
124	u32 mpuss_can_lose_context = 0;
125	int error;
126
127	/*
128	 * CPU0 has to wait and stay ON until CPU1 is OFF state.
129	 * This is necessary to honour hardware recommondation
130	 * of triggeing all the possible low power modes once CPU1 is
131	 * out of coherency and in OFF mode.
132	 */
133	if (dev->cpu == 0 && cpumask_test_cpu(1, cpu_online_mask)) {
134		while (pwrdm_read_pwrst(cpu_pd[1]) != PWRDM_POWER_OFF) {
135			cpu_relax();
136
137			/*
138			 * CPU1 could have already entered & exited idle
139			 * without hitting off because of a wakeup
140			 * or a failed attempt to hit off mode.  Check for
141			 * that here, otherwise we could spin forever
142			 * waiting for CPU1 off.
143			 */
144			if (cpu_done[1])
145			    goto fail;
146
147		}
148	}
149
150	mpuss_can_lose_context = (cx->mpu_state == PWRDM_POWER_RET) &&
151				 (cx->mpu_logic_state == PWRDM_POWER_OFF);
152
153	/* Enter broadcast mode for periodic timers */
154	RCU_NONIDLE(tick_broadcast_enable());
155
156	/* Enter broadcast mode for one-shot timers */
157	RCU_NONIDLE(tick_broadcast_enter());
158
159	/*
160	 * Call idle CPU PM enter notifier chain so that
161	 * VFP and per CPU interrupt context is saved.
162	 */
163	error = cpu_pm_enter();
164	if (error)
165		goto cpu_pm_out;
166
167	if (dev->cpu == 0) {
168		pwrdm_set_logic_retst(mpu_pd, cx->mpu_logic_state);
169		RCU_NONIDLE(omap_set_pwrdm_state(mpu_pd, cx->mpu_state));
170
171		/*
172		 * Call idle CPU cluster PM enter notifier chain
173		 * to save GIC and wakeupgen context.
174		 */
175		if (mpuss_can_lose_context) {
176			error = cpu_cluster_pm_enter();
177			if (error) {
178				index = 0;
179				cx = state_ptr + index;
180				pwrdm_set_logic_retst(mpu_pd, cx->mpu_logic_state);
181				RCU_NONIDLE(omap_set_pwrdm_state(mpu_pd, cx->mpu_state));
182				mpuss_can_lose_context = 0;
183			}
184		}
185	}
186
187	omap4_enter_lowpower(dev->cpu, cx->cpu_state);
188	cpu_done[dev->cpu] = true;
189
190	/* Wakeup CPU1 only if it is not offlined */
191	if (dev->cpu == 0 && cpumask_test_cpu(1, cpu_online_mask)) {
192
193		if (IS_PM44XX_ERRATUM(PM_OMAP4_ROM_SMP_BOOT_ERRATUM_GICD) &&
194		    mpuss_can_lose_context)
195			gic_dist_disable();
196
197		RCU_NONIDLE(clkdm_deny_idle(cpu_clkdm[1]));
198		RCU_NONIDLE(omap_set_pwrdm_state(cpu_pd[1], PWRDM_POWER_ON));
199		RCU_NONIDLE(clkdm_allow_idle(cpu_clkdm[1]));
200
201		if (IS_PM44XX_ERRATUM(PM_OMAP4_ROM_SMP_BOOT_ERRATUM_GICD) &&
202		    mpuss_can_lose_context) {
203			while (gic_dist_disabled()) {
204				udelay(1);
205				cpu_relax();
206			}
207			gic_timer_retrigger();
208		}
209	}
210
211	/*
212	 * Call idle CPU cluster PM exit notifier chain
213	 * to restore GIC and wakeupgen context.
214	 */
215	if (dev->cpu == 0 && mpuss_can_lose_context)
216		cpu_cluster_pm_exit();
217
218	/*
219	 * Call idle CPU PM exit notifier chain to restore
220	 * VFP and per CPU IRQ context.
221	 */
222	cpu_pm_exit();
223
224cpu_pm_out:
225	RCU_NONIDLE(tick_broadcast_exit());
226
227fail:
228	cpuidle_coupled_parallel_barrier(dev, &abort_barrier);
229	cpu_done[dev->cpu] = false;
230
231	return index;
232}
233
234static struct cpuidle_driver omap4_idle_driver = {
235	.name				= "omap4_idle",
236	.owner				= THIS_MODULE,
237	.states = {
238		{
239			/* C1 - CPU0 ON + CPU1 ON + MPU ON */
240			.exit_latency = 2 + 2,
241			.target_residency = 5,
242			.enter = omap_enter_idle_simple,
243			.name = "C1",
244			.desc = "CPUx ON, MPUSS ON"
245		},
246		{
247			/* C2 - CPU0 OFF + CPU1 OFF + MPU CSWR */
248			.exit_latency = 328 + 440,
249			.target_residency = 960,
250			.flags = CPUIDLE_FLAG_COUPLED,
251			.enter = omap_enter_idle_coupled,
252			.name = "C2",
253			.desc = "CPUx OFF, MPUSS CSWR",
254		},
255		{
256			/* C3 - CPU0 OFF + CPU1 OFF + MPU OSWR */
257			.exit_latency = 460 + 518,
258			.target_residency = 1100,
259			.flags = CPUIDLE_FLAG_COUPLED,
260			.enter = omap_enter_idle_coupled,
261			.name = "C3",
262			.desc = "CPUx OFF, MPUSS OSWR",
263		},
264	},
265	.state_count = ARRAY_SIZE(omap4_idle_data),
266	.safe_state_index = 0,
267};
268
269static struct cpuidle_driver omap5_idle_driver = {
270	.name				= "omap5_idle",
271	.owner				= THIS_MODULE,
272	.states = {
273		{
274			/* C1 - CPU0 ON + CPU1 ON + MPU ON */
275			.exit_latency = 2 + 2,
276			.target_residency = 5,
277			.enter = omap_enter_idle_simple,
278			.name = "C1",
279			.desc = "CPUx WFI, MPUSS ON"
280		},
281		{
282			/* C2 - CPU0 RET + CPU1 RET + MPU CSWR */
283			.exit_latency = 48 + 60,
284			.target_residency = 100,
285			.flags = CPUIDLE_FLAG_TIMER_STOP,
286			.enter = omap_enter_idle_smp,
287			.name = "C2",
288			.desc = "CPUx CSWR, MPUSS CSWR",
289		},
290	},
291	.state_count = ARRAY_SIZE(omap5_idle_data),
292	.safe_state_index = 0,
293};
294
295/* Public functions */
296
297/**
298 * omap4_idle_init - Init routine for OMAP4+ idle
299 *
300 * Registers the OMAP4+ specific cpuidle driver to the cpuidle
301 * framework with the valid set of states.
302 */
303int __init omap4_idle_init(void)
304{
305	struct cpuidle_driver *idle_driver;
306
307	if (soc_is_omap54xx()) {
308		state_ptr = &omap5_idle_data[0];
309		idle_driver = &omap5_idle_driver;
310	} else {
311		state_ptr = &omap4_idle_data[0];
312		idle_driver = &omap4_idle_driver;
313	}
314
315	mpu_pd = pwrdm_lookup("mpu_pwrdm");
316	cpu_pd[0] = pwrdm_lookup("cpu0_pwrdm");
317	cpu_pd[1] = pwrdm_lookup("cpu1_pwrdm");
318	if ((!mpu_pd) || (!cpu_pd[0]) || (!cpu_pd[1]))
319		return -ENODEV;
320
321	cpu_clkdm[0] = clkdm_lookup("mpu0_clkdm");
322	cpu_clkdm[1] = clkdm_lookup("mpu1_clkdm");
323	if (!cpu_clkdm[0] || !cpu_clkdm[1])
324		return -ENODEV;
325
326	return cpuidle_register(idle_driver, cpu_online_mask);
327}
328