1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * menu.c - the menu idle governor
4 *
5 * Copyright (C) 2006-2007 Adam Belay <abelay@novell.com>
6 * Copyright (C) 2009 Intel Corporation
7 * Author:
8 *        Arjan van de Ven <arjan@linux.intel.com>
9 */
10
11#include <linux/kernel.h>
12#include <linux/cpuidle.h>
13#include <linux/time.h>
14#include <linux/ktime.h>
15#include <linux/hrtimer.h>
16#include <linux/tick.h>
17#include <linux/sched.h>
18#include <linux/sched/loadavg.h>
19#include <linux/sched/stat.h>
20#include <linux/math64.h>
21
22#define BUCKETS 12
23#define INTERVAL_SHIFT 3
24#define INTERVALS (1UL << INTERVAL_SHIFT)
25#define RESOLUTION 1024
26#define DECAY 8
27#define MAX_INTERESTING (50000 * NSEC_PER_USEC)
28
29/*
30 * Concepts and ideas behind the menu governor
31 *
32 * For the menu governor, there are 3 decision factors for picking a C
33 * state:
34 * 1) Energy break even point
35 * 2) Performance impact
36 * 3) Latency tolerance (from pmqos infrastructure)
37 * These these three factors are treated independently.
38 *
39 * Energy break even point
40 * -----------------------
41 * C state entry and exit have an energy cost, and a certain amount of time in
42 * the  C state is required to actually break even on this cost. CPUIDLE
43 * provides us this duration in the "target_residency" field. So all that we
44 * need is a good prediction of how long we'll be idle. Like the traditional
45 * menu governor, we start with the actual known "next timer event" time.
46 *
47 * Since there are other source of wakeups (interrupts for example) than
48 * the next timer event, this estimation is rather optimistic. To get a
49 * more realistic estimate, a correction factor is applied to the estimate,
50 * that is based on historic behavior. For example, if in the past the actual
51 * duration always was 50% of the next timer tick, the correction factor will
52 * be 0.5.
53 *
54 * menu uses a running average for this correction factor, however it uses a
55 * set of factors, not just a single factor. This stems from the realization
56 * that the ratio is dependent on the order of magnitude of the expected
57 * duration; if we expect 500 milliseconds of idle time the likelihood of
58 * getting an interrupt very early is much higher than if we expect 50 micro
59 * seconds of idle time. A second independent factor that has big impact on
60 * the actual factor is if there is (disk) IO outstanding or not.
61 * (as a special twist, we consider every sleep longer than 50 milliseconds
62 * as perfect; there are no power gains for sleeping longer than this)
63 *
64 * For these two reasons we keep an array of 12 independent factors, that gets
65 * indexed based on the magnitude of the expected duration as well as the
66 * "is IO outstanding" property.
67 *
68 * Repeatable-interval-detector
69 * ----------------------------
70 * There are some cases where "next timer" is a completely unusable predictor:
71 * Those cases where the interval is fixed, for example due to hardware
72 * interrupt mitigation, but also due to fixed transfer rate devices such as
73 * mice.
74 * For this, we use a different predictor: We track the duration of the last 8
75 * intervals and if the stand deviation of these 8 intervals is below a
76 * threshold value, we use the average of these intervals as prediction.
77 *
78 * Limiting Performance Impact
79 * ---------------------------
80 * C states, especially those with large exit latencies, can have a real
81 * noticeable impact on workloads, which is not acceptable for most sysadmins,
82 * and in addition, less performance has a power price of its own.
83 *
84 * As a general rule of thumb, menu assumes that the following heuristic
85 * holds:
86 *     The busier the system, the less impact of C states is acceptable
87 *
88 * This rule-of-thumb is implemented using a performance-multiplier:
89 * If the exit latency times the performance multiplier is longer than
90 * the predicted duration, the C state is not considered a candidate
91 * for selection due to a too high performance impact. So the higher
92 * this multiplier is, the longer we need to be idle to pick a deep C
93 * state, and thus the less likely a busy CPU will hit such a deep
94 * C state.
95 *
96 * Two factors are used in determing this multiplier:
97 * a value of 10 is added for each point of "per cpu load average" we have.
98 * a value of 5 points is added for each process that is waiting for
99 * IO on this CPU.
100 * (these values are experimentally determined)
101 *
102 * The load average factor gives a longer term (few seconds) input to the
103 * decision, while the iowait value gives a cpu local instantanious input.
104 * The iowait factor may look low, but realize that this is also already
105 * represented in the system load average.
106 *
107 */
108
109struct menu_device {
110	int             needs_update;
111	int             tick_wakeup;
112
113	u64		next_timer_ns;
114	unsigned int	bucket;
115	unsigned int	correction_factor[BUCKETS];
116	unsigned int	intervals[INTERVALS];
117	int		interval_ptr;
118};
119
120static inline int which_bucket(u64 duration_ns, unsigned long nr_iowaiters)
121{
122	int bucket = 0;
123
124	/*
125	 * We keep two groups of stats; one with no
126	 * IO pending, one without.
127	 * This allows us to calculate
128	 * E(duration)|iowait
129	 */
130	if (nr_iowaiters)
131		bucket = BUCKETS/2;
132
133	if (duration_ns < 10ULL * NSEC_PER_USEC)
134		return bucket;
135	if (duration_ns < 100ULL * NSEC_PER_USEC)
136		return bucket + 1;
137	if (duration_ns < 1000ULL * NSEC_PER_USEC)
138		return bucket + 2;
139	if (duration_ns < 10000ULL * NSEC_PER_USEC)
140		return bucket + 3;
141	if (duration_ns < 100000ULL * NSEC_PER_USEC)
142		return bucket + 4;
143	return bucket + 5;
144}
145
146/*
147 * Return a multiplier for the exit latency that is intended
148 * to take performance requirements into account.
149 * The more performance critical we estimate the system
150 * to be, the higher this multiplier, and thus the higher
151 * the barrier to go to an expensive C state.
152 */
153static inline int performance_multiplier(unsigned long nr_iowaiters)
154{
155	/* for IO wait tasks (per cpu!) we add 10x each */
156	return 1 + 10 * nr_iowaiters;
157}
158
159static DEFINE_PER_CPU(struct menu_device, menu_devices);
160
161static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev);
162
163/*
164 * Try detecting repeating patterns by keeping track of the last 8
165 * intervals, and checking if the standard deviation of that set
166 * of points is below a threshold. If it is... then use the
167 * average of these 8 points as the estimated value.
168 */
169static unsigned int get_typical_interval(struct menu_device *data,
170					 unsigned int predicted_us)
171{
172	int i, divisor;
173	unsigned int min, max, thresh, avg;
174	uint64_t sum, variance;
175
176	thresh = INT_MAX; /* Discard outliers above this value */
177
178again:
179
180	/* First calculate the average of past intervals */
181	min = UINT_MAX;
182	max = 0;
183	sum = 0;
184	divisor = 0;
185	for (i = 0; i < INTERVALS; i++) {
186		unsigned int value = data->intervals[i];
187		if (value <= thresh) {
188			sum += value;
189			divisor++;
190			if (value > max)
191				max = value;
192
193			if (value < min)
194				min = value;
195		}
196	}
197
198	/*
199	 * If the result of the computation is going to be discarded anyway,
200	 * avoid the computation altogether.
201	 */
202	if (min >= predicted_us)
203		return UINT_MAX;
204
205	if (divisor == INTERVALS)
206		avg = sum >> INTERVAL_SHIFT;
207	else
208		avg = div_u64(sum, divisor);
209
210	/* Then try to determine variance */
211	variance = 0;
212	for (i = 0; i < INTERVALS; i++) {
213		unsigned int value = data->intervals[i];
214		if (value <= thresh) {
215			int64_t diff = (int64_t)value - avg;
216			variance += diff * diff;
217		}
218	}
219	if (divisor == INTERVALS)
220		variance >>= INTERVAL_SHIFT;
221	else
222		do_div(variance, divisor);
223
224	/*
225	 * The typical interval is obtained when standard deviation is
226	 * small (stddev <= 20 us, variance <= 400 us^2) or standard
227	 * deviation is small compared to the average interval (avg >
228	 * 6*stddev, avg^2 > 36*variance). The average is smaller than
229	 * UINT_MAX aka U32_MAX, so computing its square does not
230	 * overflow a u64. We simply reject this candidate average if
231	 * the standard deviation is greater than 715 s (which is
232	 * rather unlikely).
233	 *
234	 * Use this result only if there is no timer to wake us up sooner.
235	 */
236	if (likely(variance <= U64_MAX/36)) {
237		if ((((u64)avg*avg > variance*36) && (divisor * 4 >= INTERVALS * 3))
238							|| variance <= 400) {
239			return avg;
240		}
241	}
242
243	/*
244	 * If we have outliers to the upside in our distribution, discard
245	 * those by setting the threshold to exclude these outliers, then
246	 * calculate the average and standard deviation again. Once we get
247	 * down to the bottom 3/4 of our samples, stop excluding samples.
248	 *
249	 * This can deal with workloads that have long pauses interspersed
250	 * with sporadic activity with a bunch of short pauses.
251	 */
252	if ((divisor * 4) <= INTERVALS * 3)
253		return UINT_MAX;
254
255	thresh = max - 1;
256	goto again;
257}
258
259/**
260 * menu_select - selects the next idle state to enter
261 * @drv: cpuidle driver containing state data
262 * @dev: the CPU
263 * @stop_tick: indication on whether or not to stop the tick
264 */
265static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
266		       bool *stop_tick)
267{
268	struct menu_device *data = this_cpu_ptr(&menu_devices);
269	s64 latency_req = cpuidle_governor_latency_req(dev->cpu);
270	unsigned int predicted_us;
271	u64 predicted_ns;
272	u64 interactivity_req;
273	unsigned long nr_iowaiters;
274	ktime_t delta_next;
275	int i, idx;
276
277	if (data->needs_update) {
278		menu_update(drv, dev);
279		data->needs_update = 0;
280	}
281
282	/* determine the expected residency time, round up */
283	data->next_timer_ns = tick_nohz_get_sleep_length(&delta_next);
284
285	nr_iowaiters = nr_iowait_cpu(dev->cpu);
286	data->bucket = which_bucket(data->next_timer_ns, nr_iowaiters);
287
288	if (unlikely(drv->state_count <= 1 || latency_req == 0) ||
289	    ((data->next_timer_ns < drv->states[1].target_residency_ns ||
290	      latency_req < drv->states[1].exit_latency_ns) &&
291	     !dev->states_usage[0].disable)) {
292		/*
293		 * In this case state[0] will be used no matter what, so return
294		 * it right away and keep the tick running if state[0] is a
295		 * polling one.
296		 */
297		*stop_tick = !(drv->states[0].flags & CPUIDLE_FLAG_POLLING);
298		return 0;
299	}
300
301	/* Round up the result for half microseconds. */
302	predicted_us = div_u64(data->next_timer_ns *
303			       data->correction_factor[data->bucket] +
304			       (RESOLUTION * DECAY * NSEC_PER_USEC) / 2,
305			       RESOLUTION * DECAY * NSEC_PER_USEC);
306	/* Use the lowest expected idle interval to pick the idle state. */
307	predicted_ns = (u64)min(predicted_us,
308				get_typical_interval(data, predicted_us)) *
309				NSEC_PER_USEC;
310
311	if (tick_nohz_tick_stopped()) {
312		/*
313		 * If the tick is already stopped, the cost of possible short
314		 * idle duration misprediction is much higher, because the CPU
315		 * may be stuck in a shallow idle state for a long time as a
316		 * result of it.  In that case say we might mispredict and use
317		 * the known time till the closest timer event for the idle
318		 * state selection.
319		 */
320		if (predicted_ns < TICK_NSEC)
321			predicted_ns = delta_next;
322	} else {
323		/*
324		 * Use the performance multiplier and the user-configurable
325		 * latency_req to determine the maximum exit latency.
326		 */
327		interactivity_req = div64_u64(predicted_ns,
328					      performance_multiplier(nr_iowaiters));
329		if (latency_req > interactivity_req)
330			latency_req = interactivity_req;
331	}
332
333	/*
334	 * Find the idle state with the lowest power while satisfying
335	 * our constraints.
336	 */
337	idx = -1;
338	for (i = 0; i < drv->state_count; i++) {
339		struct cpuidle_state *s = &drv->states[i];
340
341		if (dev->states_usage[i].disable)
342			continue;
343
344		if (idx == -1)
345			idx = i; /* first enabled state */
346
347		if (s->target_residency_ns > predicted_ns) {
348			/*
349			 * Use a physical idle state, not busy polling, unless
350			 * a timer is going to trigger soon enough.
351			 */
352			if ((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) &&
353			    s->exit_latency_ns <= latency_req &&
354			    s->target_residency_ns <= data->next_timer_ns) {
355				predicted_ns = s->target_residency_ns;
356				idx = i;
357				break;
358			}
359			if (predicted_ns < TICK_NSEC)
360				break;
361
362			if (!tick_nohz_tick_stopped()) {
363				/*
364				 * If the state selected so far is shallow,
365				 * waking up early won't hurt, so retain the
366				 * tick in that case and let the governor run
367				 * again in the next iteration of the loop.
368				 */
369				predicted_ns = drv->states[idx].target_residency_ns;
370				break;
371			}
372
373			/*
374			 * If the state selected so far is shallow and this
375			 * state's target residency matches the time till the
376			 * closest timer event, select this one to avoid getting
377			 * stuck in the shallow one for too long.
378			 */
379			if (drv->states[idx].target_residency_ns < TICK_NSEC &&
380			    s->target_residency_ns <= delta_next)
381				idx = i;
382
383			return idx;
384		}
385		if (s->exit_latency_ns > latency_req)
386			break;
387
388		idx = i;
389	}
390
391	if (idx == -1)
392		idx = 0; /* No states enabled. Must use 0. */
393
394	/*
395	 * Don't stop the tick if the selected state is a polling one or if the
396	 * expected idle duration is shorter than the tick period length.
397	 */
398	if (((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) ||
399	     predicted_ns < TICK_NSEC) && !tick_nohz_tick_stopped()) {
400		*stop_tick = false;
401
402		if (idx > 0 && drv->states[idx].target_residency_ns > delta_next) {
403			/*
404			 * The tick is not going to be stopped and the target
405			 * residency of the state to be returned is not within
406			 * the time until the next timer event including the
407			 * tick, so try to correct that.
408			 */
409			for (i = idx - 1; i >= 0; i--) {
410				if (dev->states_usage[i].disable)
411					continue;
412
413				idx = i;
414				if (drv->states[i].target_residency_ns <= delta_next)
415					break;
416			}
417		}
418	}
419
420	return idx;
421}
422
423/**
424 * menu_reflect - records that data structures need update
425 * @dev: the CPU
426 * @index: the index of actual entered state
427 *
428 * NOTE: it's important to be fast here because this operation will add to
429 *       the overall exit latency.
430 */
431static void menu_reflect(struct cpuidle_device *dev, int index)
432{
433	struct menu_device *data = this_cpu_ptr(&menu_devices);
434
435	dev->last_state_idx = index;
436	data->needs_update = 1;
437	data->tick_wakeup = tick_nohz_idle_got_tick();
438}
439
440/**
441 * menu_update - attempts to guess what happened after entry
442 * @drv: cpuidle driver containing state data
443 * @dev: the CPU
444 */
445static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
446{
447	struct menu_device *data = this_cpu_ptr(&menu_devices);
448	int last_idx = dev->last_state_idx;
449	struct cpuidle_state *target = &drv->states[last_idx];
450	u64 measured_ns;
451	unsigned int new_factor;
452
453	/*
454	 * Try to figure out how much time passed between entry to low
455	 * power state and occurrence of the wakeup event.
456	 *
457	 * If the entered idle state didn't support residency measurements,
458	 * we use them anyway if they are short, and if long,
459	 * truncate to the whole expected time.
460	 *
461	 * Any measured amount of time will include the exit latency.
462	 * Since we are interested in when the wakeup begun, not when it
463	 * was completed, we must subtract the exit latency. However, if
464	 * the measured amount of time is less than the exit latency,
465	 * assume the state was never reached and the exit latency is 0.
466	 */
467
468	if (data->tick_wakeup && data->next_timer_ns > TICK_NSEC) {
469		/*
470		 * The nohz code said that there wouldn't be any events within
471		 * the tick boundary (if the tick was stopped), but the idle
472		 * duration predictor had a differing opinion.  Since the CPU
473		 * was woken up by a tick (that wasn't stopped after all), the
474		 * predictor was not quite right, so assume that the CPU could
475		 * have been idle long (but not forever) to help the idle
476		 * duration predictor do a better job next time.
477		 */
478		measured_ns = 9 * MAX_INTERESTING / 10;
479	} else if ((drv->states[last_idx].flags & CPUIDLE_FLAG_POLLING) &&
480		   dev->poll_time_limit) {
481		/*
482		 * The CPU exited the "polling" state due to a time limit, so
483		 * the idle duration prediction leading to the selection of that
484		 * state was inaccurate.  If a better prediction had been made,
485		 * the CPU might have been woken up from idle by the next timer.
486		 * Assume that to be the case.
487		 */
488		measured_ns = data->next_timer_ns;
489	} else {
490		/* measured value */
491		measured_ns = dev->last_residency_ns;
492
493		/* Deduct exit latency */
494		if (measured_ns > 2 * target->exit_latency_ns)
495			measured_ns -= target->exit_latency_ns;
496		else
497			measured_ns /= 2;
498	}
499
500	/* Make sure our coefficients do not exceed unity */
501	if (measured_ns > data->next_timer_ns)
502		measured_ns = data->next_timer_ns;
503
504	/* Update our correction ratio */
505	new_factor = data->correction_factor[data->bucket];
506	new_factor -= new_factor / DECAY;
507
508	if (data->next_timer_ns > 0 && measured_ns < MAX_INTERESTING)
509		new_factor += div64_u64(RESOLUTION * measured_ns,
510					data->next_timer_ns);
511	else
512		/*
513		 * we were idle so long that we count it as a perfect
514		 * prediction
515		 */
516		new_factor += RESOLUTION;
517
518	/*
519	 * We don't want 0 as factor; we always want at least
520	 * a tiny bit of estimated time. Fortunately, due to rounding,
521	 * new_factor will stay nonzero regardless of measured_us values
522	 * and the compiler can eliminate this test as long as DECAY > 1.
523	 */
524	if (DECAY == 1 && unlikely(new_factor == 0))
525		new_factor = 1;
526
527	data->correction_factor[data->bucket] = new_factor;
528
529	/* update the repeating-pattern data */
530	data->intervals[data->interval_ptr++] = ktime_to_us(measured_ns);
531	if (data->interval_ptr >= INTERVALS)
532		data->interval_ptr = 0;
533}
534
535/**
536 * menu_enable_device - scans a CPU's states and does setup
537 * @drv: cpuidle driver
538 * @dev: the CPU
539 */
540static int menu_enable_device(struct cpuidle_driver *drv,
541				struct cpuidle_device *dev)
542{
543	struct menu_device *data = &per_cpu(menu_devices, dev->cpu);
544	int i;
545
546	memset(data, 0, sizeof(struct menu_device));
547
548	/*
549	 * if the correction factor is 0 (eg first time init or cpu hotplug
550	 * etc), we actually want to start out with a unity factor.
551	 */
552	for(i = 0; i < BUCKETS; i++)
553		data->correction_factor[i] = RESOLUTION * DECAY;
554
555	return 0;
556}
557
558static struct cpuidle_governor menu_governor = {
559	.name =		"menu",
560	.rating =	20,
561	.enable =	menu_enable_device,
562	.select =	menu_select,
563	.reflect =	menu_reflect,
564};
565
566/**
567 * init_menu - initializes the governor
568 */
569static int __init init_menu(void)
570{
571	return cpuidle_register_governor(&menu_governor);
572}
573
574postcore_initcall(init_menu);
575