1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * linux/arch/arm/mach-omap2/cpuidle34xx.c
4 *
5 * OMAP3 CPU IDLE Routines
6 *
7 * Copyright (C) 2008 Texas Instruments, Inc.
8 * Rajendra Nayak <rnayak@ti.com>
9 *
10 * Copyright (C) 2007 Texas Instruments, Inc.
11 * Karthik Dasu <karthik-dp@ti.com>
12 *
13 * Copyright (C) 2006 Nokia Corporation
14 * Tony Lindgren <tony@atomide.com>
15 *
16 * Copyright (C) 2005 Texas Instruments, Inc.
17 * Richard Woodruff <r-woodruff2@ti.com>
18 *
19 * Based on pm.c for omap2
20 */
21
22#include <linux/sched.h>
23#include <linux/cpuidle.h>
24#include <linux/export.h>
25#include <linux/cpu_pm.h>
26#include <asm/cpuidle.h>
27
28#include "powerdomain.h"
29#include "clockdomain.h"
30
31#include "pm.h"
32#include "control.h"
33#include "common.h"
34#include "soc.h"
35
36/* Mach specific information to be recorded in the C-state driver_data */
37struct omap3_idle_statedata {
38	u8 mpu_state;
39	u8 core_state;
40	u8 per_min_state;
41	u8 flags;
42};
43
44static struct powerdomain *mpu_pd, *core_pd, *per_pd, *cam_pd;
45
46/*
47 * Possible flag bits for struct omap3_idle_statedata.flags:
48 *
49 * OMAP_CPUIDLE_CX_NO_CLKDM_IDLE: don't allow the MPU clockdomain to go
50 *    inactive.  This in turn prevents the MPU DPLL from entering autoidle
51 *    mode, so wakeup latency is greatly reduced, at the cost of additional
52 *    energy consumption.  This also prevents the CORE clockdomain from
53 *    entering idle.
54 */
55#define OMAP_CPUIDLE_CX_NO_CLKDM_IDLE		BIT(0)
56
57/*
58 * Prevent PER OFF if CORE is not in RETention or OFF as this would
59 * disable PER wakeups completely.
60 */
61static struct omap3_idle_statedata omap3_idle_data[] = {
62	{
63		.mpu_state = PWRDM_POWER_ON,
64		.core_state = PWRDM_POWER_ON,
65		/* In C1 do not allow PER state lower than CORE state */
66		.per_min_state = PWRDM_POWER_ON,
67		.flags = OMAP_CPUIDLE_CX_NO_CLKDM_IDLE,
68	},
69	{
70		.mpu_state = PWRDM_POWER_ON,
71		.core_state = PWRDM_POWER_ON,
72		.per_min_state = PWRDM_POWER_RET,
73	},
74	{
75		.mpu_state = PWRDM_POWER_RET,
76		.core_state = PWRDM_POWER_ON,
77		.per_min_state = PWRDM_POWER_RET,
78	},
79	{
80		.mpu_state = PWRDM_POWER_OFF,
81		.core_state = PWRDM_POWER_ON,
82		.per_min_state = PWRDM_POWER_RET,
83	},
84	{
85		.mpu_state = PWRDM_POWER_RET,
86		.core_state = PWRDM_POWER_RET,
87		.per_min_state = PWRDM_POWER_OFF,
88	},
89	{
90		.mpu_state = PWRDM_POWER_OFF,
91		.core_state = PWRDM_POWER_RET,
92		.per_min_state = PWRDM_POWER_OFF,
93	},
94	{
95		.mpu_state = PWRDM_POWER_OFF,
96		.core_state = PWRDM_POWER_OFF,
97		.per_min_state = PWRDM_POWER_OFF,
98	},
99};
100
101/**
102 * omap3_enter_idle - Programs OMAP3 to enter the specified state
103 * @dev: cpuidle device
104 * @drv: cpuidle driver
105 * @index: the index of state to be entered
106 */
107static int omap3_enter_idle(struct cpuidle_device *dev,
108			    struct cpuidle_driver *drv,
109			    int index)
110{
111	struct omap3_idle_statedata *cx = &omap3_idle_data[index];
112	int error;
113
114	if (omap_irq_pending() || need_resched())
115		goto return_sleep_time;
116
117	/* Deny idle for C1 */
118	if (cx->flags & OMAP_CPUIDLE_CX_NO_CLKDM_IDLE) {
119		clkdm_deny_idle(mpu_pd->pwrdm_clkdms[0]);
120	} else {
121		pwrdm_set_next_pwrst(mpu_pd, cx->mpu_state);
122		pwrdm_set_next_pwrst(core_pd, cx->core_state);
123	}
124
125	/*
126	 * Call idle CPU PM enter notifier chain so that
127	 * VFP context is saved.
128	 */
129	if (cx->mpu_state == PWRDM_POWER_OFF) {
130		error = cpu_pm_enter();
131		if (error)
132			goto out_clkdm_set;
133	}
134
135	/* Execute ARM wfi */
136	omap_sram_idle();
137
138	/*
139	 * Call idle CPU PM enter notifier chain to restore
140	 * VFP context.
141	 */
142	if (cx->mpu_state == PWRDM_POWER_OFF &&
143	    pwrdm_read_prev_pwrst(mpu_pd) == PWRDM_POWER_OFF)
144		cpu_pm_exit();
145
146out_clkdm_set:
147	/* Re-allow idle for C1 */
148	if (cx->flags & OMAP_CPUIDLE_CX_NO_CLKDM_IDLE)
149		clkdm_allow_idle(mpu_pd->pwrdm_clkdms[0]);
150
151return_sleep_time:
152
153	return index;
154}
155
156/**
157 * next_valid_state - Find next valid C-state
158 * @dev: cpuidle device
159 * @drv: cpuidle driver
160 * @index: Index of currently selected c-state
161 *
162 * If the state corresponding to index is valid, index is returned back
163 * to the caller. Else, this function searches for a lower c-state which is
164 * still valid (as defined in omap3_power_states[]) and returns its index.
165 *
166 * A state is valid if the 'valid' field is enabled and
167 * if it satisfies the enable_off_mode condition.
168 */
169static int next_valid_state(struct cpuidle_device *dev,
170			    struct cpuidle_driver *drv, int index)
171{
172	struct omap3_idle_statedata *cx = &omap3_idle_data[index];
173	u32 mpu_deepest_state = PWRDM_POWER_RET;
174	u32 core_deepest_state = PWRDM_POWER_RET;
175	int idx;
176	int next_index = 0; /* C1 is the default value */
177
178	if (enable_off_mode) {
179		mpu_deepest_state = PWRDM_POWER_OFF;
180		/*
181		 * Erratum i583: valable for ES rev < Es1.2 on 3630.
182		 * CORE OFF mode is not supported in a stable form, restrict
183		 * instead the CORE state to RET.
184		 */
185		if (!IS_PM34XX_ERRATUM(PM_SDRC_WAKEUP_ERRATUM_i583))
186			core_deepest_state = PWRDM_POWER_OFF;
187	}
188
189	/* Check if current state is valid */
190	if ((cx->mpu_state >= mpu_deepest_state) &&
191	    (cx->core_state >= core_deepest_state))
192		return index;
193
194	/*
195	 * Drop to next valid state.
196	 * Start search from the next (lower) state.
197	 */
198	for (idx = index - 1; idx >= 0; idx--) {
199		cx = &omap3_idle_data[idx];
200		if ((cx->mpu_state >= mpu_deepest_state) &&
201		    (cx->core_state >= core_deepest_state)) {
202			next_index = idx;
203			break;
204		}
205	}
206
207	return next_index;
208}
209
210/**
211 * omap3_enter_idle_bm - Checks for any bus activity
212 * @dev: cpuidle device
213 * @drv: cpuidle driver
214 * @index: array index of target state to be programmed
215 *
216 * This function checks for any pending activity and then programs
217 * the device to the specified or a safer state.
218 */
219static int omap3_enter_idle_bm(struct cpuidle_device *dev,
220			       struct cpuidle_driver *drv,
221			       int index)
222{
223	int new_state_idx, ret;
224	u8 per_next_state, per_saved_state;
225	struct omap3_idle_statedata *cx;
226
227	/*
228	 * Use only C1 if CAM is active.
229	 * CAM does not have wakeup capability in OMAP3.
230	 */
231	if (pwrdm_read_pwrst(cam_pd) == PWRDM_POWER_ON)
232		new_state_idx = drv->safe_state_index;
233	else
234		new_state_idx = next_valid_state(dev, drv, index);
235
236	/*
237	 * FIXME: we currently manage device-specific idle states
238	 *        for PER and CORE in combination with CPU-specific
239	 *        idle states.  This is wrong, and device-specific
240	 *        idle management needs to be separated out into
241	 *        its own code.
242	 */
243
244	/* Program PER state */
245	cx = &omap3_idle_data[new_state_idx];
246
247	per_next_state = pwrdm_read_next_pwrst(per_pd);
248	per_saved_state = per_next_state;
249	if (per_next_state < cx->per_min_state) {
250		per_next_state = cx->per_min_state;
251		pwrdm_set_next_pwrst(per_pd, per_next_state);
252	}
253
254	ret = omap3_enter_idle(dev, drv, new_state_idx);
255
256	/* Restore original PER state if it was modified */
257	if (per_next_state != per_saved_state)
258		pwrdm_set_next_pwrst(per_pd, per_saved_state);
259
260	return ret;
261}
262
263static struct cpuidle_driver omap3_idle_driver = {
264	.name             = "omap3_idle",
265	.owner            = THIS_MODULE,
266	.states = {
267		{
268			.enter		  = omap3_enter_idle_bm,
269			.exit_latency	  = 2 + 2,
270			.target_residency = 5,
271			.name		  = "C1",
272			.desc		  = "MPU ON + CORE ON",
273		},
274		{
275			.enter		  = omap3_enter_idle_bm,
276			.exit_latency	  = 10 + 10,
277			.target_residency = 30,
278			.name		  = "C2",
279			.desc		  = "MPU ON + CORE ON",
280		},
281		{
282			.enter		  = omap3_enter_idle_bm,
283			.exit_latency	  = 50 + 50,
284			.target_residency = 300,
285			.name		  = "C3",
286			.desc		  = "MPU RET + CORE ON",
287		},
288		{
289			.enter		  = omap3_enter_idle_bm,
290			.exit_latency	  = 1500 + 1800,
291			.target_residency = 4000,
292			.name		  = "C4",
293			.desc		  = "MPU OFF + CORE ON",
294		},
295		{
296			.enter		  = omap3_enter_idle_bm,
297			.exit_latency	  = 2500 + 7500,
298			.target_residency = 12000,
299			.name		  = "C5",
300			.desc		  = "MPU RET + CORE RET",
301		},
302		{
303			.enter		  = omap3_enter_idle_bm,
304			.exit_latency	  = 3000 + 8500,
305			.target_residency = 15000,
306			.name		  = "C6",
307			.desc		  = "MPU OFF + CORE RET",
308		},
309		{
310			.enter		  = omap3_enter_idle_bm,
311			.exit_latency	  = 10000 + 30000,
312			.target_residency = 30000,
313			.name		  = "C7",
314			.desc		  = "MPU OFF + CORE OFF",
315		},
316	},
317	.state_count = ARRAY_SIZE(omap3_idle_data),
318	.safe_state_index = 0,
319};
320
321/*
322 * Numbers based on measurements made in October 2009 for PM optimized kernel
323 * with CPU freq enabled on device Nokia N900. Assumes OPP2 (main idle OPP,
324 * and worst case latencies).
325 */
326static struct cpuidle_driver omap3430_idle_driver = {
327	.name             = "omap3430_idle",
328	.owner            = THIS_MODULE,
329	.states = {
330		{
331			.enter		  = omap3_enter_idle_bm,
332			.exit_latency	  = 110 + 162,
333			.target_residency = 5,
334			.name		  = "C1",
335			.desc		  = "MPU ON + CORE ON",
336		},
337		{
338			.enter		  = omap3_enter_idle_bm,
339			.exit_latency	  = 106 + 180,
340			.target_residency = 309,
341			.name		  = "C2",
342			.desc		  = "MPU ON + CORE ON",
343		},
344		{
345			.enter		  = omap3_enter_idle_bm,
346			.exit_latency	  = 107 + 410,
347			.target_residency = 46057,
348			.name		  = "C3",
349			.desc		  = "MPU RET + CORE ON",
350		},
351		{
352			.enter		  = omap3_enter_idle_bm,
353			.exit_latency	  = 121 + 3374,
354			.target_residency = 46057,
355			.name		  = "C4",
356			.desc		  = "MPU OFF + CORE ON",
357		},
358		{
359			.enter		  = omap3_enter_idle_bm,
360			.exit_latency	  = 855 + 1146,
361			.target_residency = 46057,
362			.name		  = "C5",
363			.desc		  = "MPU RET + CORE RET",
364		},
365		{
366			.enter		  = omap3_enter_idle_bm,
367			.exit_latency	  = 7580 + 4134,
368			.target_residency = 484329,
369			.name		  = "C6",
370			.desc		  = "MPU OFF + CORE RET",
371		},
372		{
373			.enter		  = omap3_enter_idle_bm,
374			.exit_latency	  = 7505 + 15274,
375			.target_residency = 484329,
376			.name		  = "C7",
377			.desc		  = "MPU OFF + CORE OFF",
378		},
379	},
380	.state_count = ARRAY_SIZE(omap3_idle_data),
381	.safe_state_index = 0,
382};
383
384/* Public functions */
385
386/**
387 * omap3_idle_init - Init routine for OMAP3 idle
388 *
389 * Registers the OMAP3 specific cpuidle driver to the cpuidle
390 * framework with the valid set of states.
391 */
392int __init omap3_idle_init(void)
393{
394	mpu_pd = pwrdm_lookup("mpu_pwrdm");
395	core_pd = pwrdm_lookup("core_pwrdm");
396	per_pd = pwrdm_lookup("per_pwrdm");
397	cam_pd = pwrdm_lookup("cam_pwrdm");
398
399	if (!mpu_pd || !core_pd || !per_pd || !cam_pd)
400		return -ENODEV;
401
402	if (cpu_is_omap3430())
403		return cpuidle_register(&omap3430_idle_driver, NULL);
404	else
405		return cpuidle_register(&omap3_idle_driver, NULL);
406}
407