162306a36Sopenharmony_ci// SPDX-License-Identifier: GPL-2.0-only
262306a36Sopenharmony_ci/*
362306a36Sopenharmony_ci * menu.c - the menu idle governor
462306a36Sopenharmony_ci *
562306a36Sopenharmony_ci * Copyright (C) 2006-2007 Adam Belay <abelay@novell.com>
662306a36Sopenharmony_ci * Copyright (C) 2009 Intel Corporation
762306a36Sopenharmony_ci * Author:
862306a36Sopenharmony_ci *        Arjan van de Ven <arjan@linux.intel.com>
962306a36Sopenharmony_ci */
1062306a36Sopenharmony_ci
1162306a36Sopenharmony_ci#include <linux/kernel.h>
1262306a36Sopenharmony_ci#include <linux/cpuidle.h>
1362306a36Sopenharmony_ci#include <linux/time.h>
1462306a36Sopenharmony_ci#include <linux/ktime.h>
1562306a36Sopenharmony_ci#include <linux/hrtimer.h>
1662306a36Sopenharmony_ci#include <linux/tick.h>
1762306a36Sopenharmony_ci#include <linux/sched.h>
1862306a36Sopenharmony_ci#include <linux/sched/loadavg.h>
1962306a36Sopenharmony_ci#include <linux/sched/stat.h>
2062306a36Sopenharmony_ci#include <linux/math64.h>
2162306a36Sopenharmony_ci
2262306a36Sopenharmony_ci#include "gov.h"
2362306a36Sopenharmony_ci
2462306a36Sopenharmony_ci#define BUCKETS 12
2562306a36Sopenharmony_ci#define INTERVAL_SHIFT 3
2662306a36Sopenharmony_ci#define INTERVALS (1UL << INTERVAL_SHIFT)
2762306a36Sopenharmony_ci#define RESOLUTION 1024
2862306a36Sopenharmony_ci#define DECAY 8
2962306a36Sopenharmony_ci#define MAX_INTERESTING (50000 * NSEC_PER_USEC)
3062306a36Sopenharmony_ci
3162306a36Sopenharmony_ci/*
3262306a36Sopenharmony_ci * Concepts and ideas behind the menu governor
3362306a36Sopenharmony_ci *
3462306a36Sopenharmony_ci * For the menu governor, there are 3 decision factors for picking a C
3562306a36Sopenharmony_ci * state:
3662306a36Sopenharmony_ci * 1) Energy break even point
3762306a36Sopenharmony_ci * 2) Performance impact
3862306a36Sopenharmony_ci * 3) Latency tolerance (from pmqos infrastructure)
3962306a36Sopenharmony_ci * These three factors are treated independently.
4062306a36Sopenharmony_ci *
4162306a36Sopenharmony_ci * Energy break even point
4262306a36Sopenharmony_ci * -----------------------
4362306a36Sopenharmony_ci * C state entry and exit have an energy cost, and a certain amount of time in
4462306a36Sopenharmony_ci * the  C state is required to actually break even on this cost. CPUIDLE
4562306a36Sopenharmony_ci * provides us this duration in the "target_residency" field. So all that we
4662306a36Sopenharmony_ci * need is a good prediction of how long we'll be idle. Like the traditional
4762306a36Sopenharmony_ci * menu governor, we start with the actual known "next timer event" time.
4862306a36Sopenharmony_ci *
4962306a36Sopenharmony_ci * Since there are other source of wakeups (interrupts for example) than
5062306a36Sopenharmony_ci * the next timer event, this estimation is rather optimistic. To get a
5162306a36Sopenharmony_ci * more realistic estimate, a correction factor is applied to the estimate,
5262306a36Sopenharmony_ci * that is based on historic behavior. For example, if in the past the actual
5362306a36Sopenharmony_ci * duration always was 50% of the next timer tick, the correction factor will
5462306a36Sopenharmony_ci * be 0.5.
5562306a36Sopenharmony_ci *
5662306a36Sopenharmony_ci * menu uses a running average for this correction factor, however it uses a
5762306a36Sopenharmony_ci * set of factors, not just a single factor. This stems from the realization
5862306a36Sopenharmony_ci * that the ratio is dependent on the order of magnitude of the expected
5962306a36Sopenharmony_ci * duration; if we expect 500 milliseconds of idle time the likelihood of
6062306a36Sopenharmony_ci * getting an interrupt very early is much higher than if we expect 50 micro
6162306a36Sopenharmony_ci * seconds of idle time. A second independent factor that has big impact on
6262306a36Sopenharmony_ci * the actual factor is if there is (disk) IO outstanding or not.
6362306a36Sopenharmony_ci * (as a special twist, we consider every sleep longer than 50 milliseconds
6462306a36Sopenharmony_ci * as perfect; there are no power gains for sleeping longer than this)
6562306a36Sopenharmony_ci *
6662306a36Sopenharmony_ci * For these two reasons we keep an array of 12 independent factors, that gets
6762306a36Sopenharmony_ci * indexed based on the magnitude of the expected duration as well as the
6862306a36Sopenharmony_ci * "is IO outstanding" property.
6962306a36Sopenharmony_ci *
7062306a36Sopenharmony_ci * Repeatable-interval-detector
7162306a36Sopenharmony_ci * ----------------------------
7262306a36Sopenharmony_ci * There are some cases where "next timer" is a completely unusable predictor:
7362306a36Sopenharmony_ci * Those cases where the interval is fixed, for example due to hardware
7462306a36Sopenharmony_ci * interrupt mitigation, but also due to fixed transfer rate devices such as
7562306a36Sopenharmony_ci * mice.
7662306a36Sopenharmony_ci * For this, we use a different predictor: We track the duration of the last 8
7762306a36Sopenharmony_ci * intervals and if the stand deviation of these 8 intervals is below a
7862306a36Sopenharmony_ci * threshold value, we use the average of these intervals as prediction.
7962306a36Sopenharmony_ci *
8062306a36Sopenharmony_ci * Limiting Performance Impact
8162306a36Sopenharmony_ci * ---------------------------
8262306a36Sopenharmony_ci * C states, especially those with large exit latencies, can have a real
8362306a36Sopenharmony_ci * noticeable impact on workloads, which is not acceptable for most sysadmins,
8462306a36Sopenharmony_ci * and in addition, less performance has a power price of its own.
8562306a36Sopenharmony_ci *
8662306a36Sopenharmony_ci * As a general rule of thumb, menu assumes that the following heuristic
8762306a36Sopenharmony_ci * holds:
8862306a36Sopenharmony_ci *     The busier the system, the less impact of C states is acceptable
8962306a36Sopenharmony_ci *
9062306a36Sopenharmony_ci * This rule-of-thumb is implemented using a performance-multiplier:
9162306a36Sopenharmony_ci * If the exit latency times the performance multiplier is longer than
9262306a36Sopenharmony_ci * the predicted duration, the C state is not considered a candidate
9362306a36Sopenharmony_ci * for selection due to a too high performance impact. So the higher
9462306a36Sopenharmony_ci * this multiplier is, the longer we need to be idle to pick a deep C
9562306a36Sopenharmony_ci * state, and thus the less likely a busy CPU will hit such a deep
9662306a36Sopenharmony_ci * C state.
9762306a36Sopenharmony_ci *
9862306a36Sopenharmony_ci * Two factors are used in determing this multiplier:
9962306a36Sopenharmony_ci * a value of 10 is added for each point of "per cpu load average" we have.
10062306a36Sopenharmony_ci * a value of 5 points is added for each process that is waiting for
10162306a36Sopenharmony_ci * IO on this CPU.
10262306a36Sopenharmony_ci * (these values are experimentally determined)
10362306a36Sopenharmony_ci *
10462306a36Sopenharmony_ci * The load average factor gives a longer term (few seconds) input to the
10562306a36Sopenharmony_ci * decision, while the iowait value gives a cpu local instantanious input.
10662306a36Sopenharmony_ci * The iowait factor may look low, but realize that this is also already
10762306a36Sopenharmony_ci * represented in the system load average.
10862306a36Sopenharmony_ci *
10962306a36Sopenharmony_ci */
11062306a36Sopenharmony_ci
11162306a36Sopenharmony_cistruct menu_device {
11262306a36Sopenharmony_ci	int             needs_update;
11362306a36Sopenharmony_ci	int             tick_wakeup;
11462306a36Sopenharmony_ci
11562306a36Sopenharmony_ci	u64		next_timer_ns;
11662306a36Sopenharmony_ci	unsigned int	bucket;
11762306a36Sopenharmony_ci	unsigned int	correction_factor[BUCKETS];
11862306a36Sopenharmony_ci	unsigned int	intervals[INTERVALS];
11962306a36Sopenharmony_ci	int		interval_ptr;
12062306a36Sopenharmony_ci};
12162306a36Sopenharmony_ci
12262306a36Sopenharmony_cistatic inline int which_bucket(u64 duration_ns, unsigned int nr_iowaiters)
12362306a36Sopenharmony_ci{
12462306a36Sopenharmony_ci	int bucket = 0;
12562306a36Sopenharmony_ci
12662306a36Sopenharmony_ci	/*
12762306a36Sopenharmony_ci	 * We keep two groups of stats; one with no
12862306a36Sopenharmony_ci	 * IO pending, one without.
12962306a36Sopenharmony_ci	 * This allows us to calculate
13062306a36Sopenharmony_ci	 * E(duration)|iowait
13162306a36Sopenharmony_ci	 */
13262306a36Sopenharmony_ci	if (nr_iowaiters)
13362306a36Sopenharmony_ci		bucket = BUCKETS/2;
13462306a36Sopenharmony_ci
13562306a36Sopenharmony_ci	if (duration_ns < 10ULL * NSEC_PER_USEC)
13662306a36Sopenharmony_ci		return bucket;
13762306a36Sopenharmony_ci	if (duration_ns < 100ULL * NSEC_PER_USEC)
13862306a36Sopenharmony_ci		return bucket + 1;
13962306a36Sopenharmony_ci	if (duration_ns < 1000ULL * NSEC_PER_USEC)
14062306a36Sopenharmony_ci		return bucket + 2;
14162306a36Sopenharmony_ci	if (duration_ns < 10000ULL * NSEC_PER_USEC)
14262306a36Sopenharmony_ci		return bucket + 3;
14362306a36Sopenharmony_ci	if (duration_ns < 100000ULL * NSEC_PER_USEC)
14462306a36Sopenharmony_ci		return bucket + 4;
14562306a36Sopenharmony_ci	return bucket + 5;
14662306a36Sopenharmony_ci}
14762306a36Sopenharmony_ci
14862306a36Sopenharmony_ci/*
14962306a36Sopenharmony_ci * Return a multiplier for the exit latency that is intended
15062306a36Sopenharmony_ci * to take performance requirements into account.
15162306a36Sopenharmony_ci * The more performance critical we estimate the system
15262306a36Sopenharmony_ci * to be, the higher this multiplier, and thus the higher
15362306a36Sopenharmony_ci * the barrier to go to an expensive C state.
15462306a36Sopenharmony_ci */
15562306a36Sopenharmony_cistatic inline int performance_multiplier(unsigned int nr_iowaiters)
15662306a36Sopenharmony_ci{
15762306a36Sopenharmony_ci	/* for IO wait tasks (per cpu!) we add 10x each */
15862306a36Sopenharmony_ci	return 1 + 10 * nr_iowaiters;
15962306a36Sopenharmony_ci}
16062306a36Sopenharmony_ci
16162306a36Sopenharmony_cistatic DEFINE_PER_CPU(struct menu_device, menu_devices);
16262306a36Sopenharmony_ci
16362306a36Sopenharmony_cistatic void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev);
16462306a36Sopenharmony_ci
16562306a36Sopenharmony_ci/*
16662306a36Sopenharmony_ci * Try detecting repeating patterns by keeping track of the last 8
16762306a36Sopenharmony_ci * intervals, and checking if the standard deviation of that set
16862306a36Sopenharmony_ci * of points is below a threshold. If it is... then use the
16962306a36Sopenharmony_ci * average of these 8 points as the estimated value.
17062306a36Sopenharmony_ci */
17162306a36Sopenharmony_cistatic unsigned int get_typical_interval(struct menu_device *data)
17262306a36Sopenharmony_ci{
17362306a36Sopenharmony_ci	int i, divisor;
17462306a36Sopenharmony_ci	unsigned int min, max, thresh, avg;
17562306a36Sopenharmony_ci	uint64_t sum, variance;
17662306a36Sopenharmony_ci
17762306a36Sopenharmony_ci	thresh = INT_MAX; /* Discard outliers above this value */
17862306a36Sopenharmony_ci
17962306a36Sopenharmony_ciagain:
18062306a36Sopenharmony_ci
18162306a36Sopenharmony_ci	/* First calculate the average of past intervals */
18262306a36Sopenharmony_ci	min = UINT_MAX;
18362306a36Sopenharmony_ci	max = 0;
18462306a36Sopenharmony_ci	sum = 0;
18562306a36Sopenharmony_ci	divisor = 0;
18662306a36Sopenharmony_ci	for (i = 0; i < INTERVALS; i++) {
18762306a36Sopenharmony_ci		unsigned int value = data->intervals[i];
18862306a36Sopenharmony_ci		if (value <= thresh) {
18962306a36Sopenharmony_ci			sum += value;
19062306a36Sopenharmony_ci			divisor++;
19162306a36Sopenharmony_ci			if (value > max)
19262306a36Sopenharmony_ci				max = value;
19362306a36Sopenharmony_ci
19462306a36Sopenharmony_ci			if (value < min)
19562306a36Sopenharmony_ci				min = value;
19662306a36Sopenharmony_ci		}
19762306a36Sopenharmony_ci	}
19862306a36Sopenharmony_ci
19962306a36Sopenharmony_ci	if (!max)
20062306a36Sopenharmony_ci		return UINT_MAX;
20162306a36Sopenharmony_ci
20262306a36Sopenharmony_ci	if (divisor == INTERVALS)
20362306a36Sopenharmony_ci		avg = sum >> INTERVAL_SHIFT;
20462306a36Sopenharmony_ci	else
20562306a36Sopenharmony_ci		avg = div_u64(sum, divisor);
20662306a36Sopenharmony_ci
20762306a36Sopenharmony_ci	/* Then try to determine variance */
20862306a36Sopenharmony_ci	variance = 0;
20962306a36Sopenharmony_ci	for (i = 0; i < INTERVALS; i++) {
21062306a36Sopenharmony_ci		unsigned int value = data->intervals[i];
21162306a36Sopenharmony_ci		if (value <= thresh) {
21262306a36Sopenharmony_ci			int64_t diff = (int64_t)value - avg;
21362306a36Sopenharmony_ci			variance += diff * diff;
21462306a36Sopenharmony_ci		}
21562306a36Sopenharmony_ci	}
21662306a36Sopenharmony_ci	if (divisor == INTERVALS)
21762306a36Sopenharmony_ci		variance >>= INTERVAL_SHIFT;
21862306a36Sopenharmony_ci	else
21962306a36Sopenharmony_ci		do_div(variance, divisor);
22062306a36Sopenharmony_ci
22162306a36Sopenharmony_ci	/*
22262306a36Sopenharmony_ci	 * The typical interval is obtained when standard deviation is
22362306a36Sopenharmony_ci	 * small (stddev <= 20 us, variance <= 400 us^2) or standard
22462306a36Sopenharmony_ci	 * deviation is small compared to the average interval (avg >
22562306a36Sopenharmony_ci	 * 6*stddev, avg^2 > 36*variance). The average is smaller than
22662306a36Sopenharmony_ci	 * UINT_MAX aka U32_MAX, so computing its square does not
22762306a36Sopenharmony_ci	 * overflow a u64. We simply reject this candidate average if
22862306a36Sopenharmony_ci	 * the standard deviation is greater than 715 s (which is
22962306a36Sopenharmony_ci	 * rather unlikely).
23062306a36Sopenharmony_ci	 *
23162306a36Sopenharmony_ci	 * Use this result only if there is no timer to wake us up sooner.
23262306a36Sopenharmony_ci	 */
23362306a36Sopenharmony_ci	if (likely(variance <= U64_MAX/36)) {
23462306a36Sopenharmony_ci		if ((((u64)avg*avg > variance*36) && (divisor * 4 >= INTERVALS * 3))
23562306a36Sopenharmony_ci							|| variance <= 400) {
23662306a36Sopenharmony_ci			return avg;
23762306a36Sopenharmony_ci		}
23862306a36Sopenharmony_ci	}
23962306a36Sopenharmony_ci
24062306a36Sopenharmony_ci	/*
24162306a36Sopenharmony_ci	 * If we have outliers to the upside in our distribution, discard
24262306a36Sopenharmony_ci	 * those by setting the threshold to exclude these outliers, then
24362306a36Sopenharmony_ci	 * calculate the average and standard deviation again. Once we get
24462306a36Sopenharmony_ci	 * down to the bottom 3/4 of our samples, stop excluding samples.
24562306a36Sopenharmony_ci	 *
24662306a36Sopenharmony_ci	 * This can deal with workloads that have long pauses interspersed
24762306a36Sopenharmony_ci	 * with sporadic activity with a bunch of short pauses.
24862306a36Sopenharmony_ci	 */
24962306a36Sopenharmony_ci	if ((divisor * 4) <= INTERVALS * 3)
25062306a36Sopenharmony_ci		return UINT_MAX;
25162306a36Sopenharmony_ci
25262306a36Sopenharmony_ci	thresh = max - 1;
25362306a36Sopenharmony_ci	goto again;
25462306a36Sopenharmony_ci}
25562306a36Sopenharmony_ci
25662306a36Sopenharmony_ci/**
25762306a36Sopenharmony_ci * menu_select - selects the next idle state to enter
25862306a36Sopenharmony_ci * @drv: cpuidle driver containing state data
25962306a36Sopenharmony_ci * @dev: the CPU
26062306a36Sopenharmony_ci * @stop_tick: indication on whether or not to stop the tick
26162306a36Sopenharmony_ci */
26262306a36Sopenharmony_cistatic int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
26362306a36Sopenharmony_ci		       bool *stop_tick)
26462306a36Sopenharmony_ci{
26562306a36Sopenharmony_ci	struct menu_device *data = this_cpu_ptr(&menu_devices);
26662306a36Sopenharmony_ci	s64 latency_req = cpuidle_governor_latency_req(dev->cpu);
26762306a36Sopenharmony_ci	u64 predicted_ns;
26862306a36Sopenharmony_ci	u64 interactivity_req;
26962306a36Sopenharmony_ci	unsigned int nr_iowaiters;
27062306a36Sopenharmony_ci	ktime_t delta, delta_tick;
27162306a36Sopenharmony_ci	int i, idx;
27262306a36Sopenharmony_ci
27362306a36Sopenharmony_ci	if (data->needs_update) {
27462306a36Sopenharmony_ci		menu_update(drv, dev);
27562306a36Sopenharmony_ci		data->needs_update = 0;
27662306a36Sopenharmony_ci	}
27762306a36Sopenharmony_ci
27862306a36Sopenharmony_ci	nr_iowaiters = nr_iowait_cpu(dev->cpu);
27962306a36Sopenharmony_ci
28062306a36Sopenharmony_ci	/* Find the shortest expected idle interval. */
28162306a36Sopenharmony_ci	predicted_ns = get_typical_interval(data) * NSEC_PER_USEC;
28262306a36Sopenharmony_ci	if (predicted_ns > RESIDENCY_THRESHOLD_NS) {
28362306a36Sopenharmony_ci		unsigned int timer_us;
28462306a36Sopenharmony_ci
28562306a36Sopenharmony_ci		/* Determine the time till the closest timer. */
28662306a36Sopenharmony_ci		delta = tick_nohz_get_sleep_length(&delta_tick);
28762306a36Sopenharmony_ci		if (unlikely(delta < 0)) {
28862306a36Sopenharmony_ci			delta = 0;
28962306a36Sopenharmony_ci			delta_tick = 0;
29062306a36Sopenharmony_ci		}
29162306a36Sopenharmony_ci
29262306a36Sopenharmony_ci		data->next_timer_ns = delta;
29362306a36Sopenharmony_ci		data->bucket = which_bucket(data->next_timer_ns, nr_iowaiters);
29462306a36Sopenharmony_ci
29562306a36Sopenharmony_ci		/* Round up the result for half microseconds. */
29662306a36Sopenharmony_ci		timer_us = div_u64((RESOLUTION * DECAY * NSEC_PER_USEC) / 2 +
29762306a36Sopenharmony_ci					data->next_timer_ns *
29862306a36Sopenharmony_ci						data->correction_factor[data->bucket],
29962306a36Sopenharmony_ci				   RESOLUTION * DECAY * NSEC_PER_USEC);
30062306a36Sopenharmony_ci		/* Use the lowest expected idle interval to pick the idle state. */
30162306a36Sopenharmony_ci		predicted_ns = min((u64)timer_us * NSEC_PER_USEC, predicted_ns);
30262306a36Sopenharmony_ci	} else {
30362306a36Sopenharmony_ci		/*
30462306a36Sopenharmony_ci		 * Because the next timer event is not going to be determined
30562306a36Sopenharmony_ci		 * in this case, assume that without the tick the closest timer
30662306a36Sopenharmony_ci		 * will be in distant future and that the closest tick will occur
30762306a36Sopenharmony_ci		 * after 1/2 of the tick period.
30862306a36Sopenharmony_ci		 */
30962306a36Sopenharmony_ci		data->next_timer_ns = KTIME_MAX;
31062306a36Sopenharmony_ci		delta_tick = TICK_NSEC / 2;
31162306a36Sopenharmony_ci		data->bucket = which_bucket(KTIME_MAX, nr_iowaiters);
31262306a36Sopenharmony_ci	}
31362306a36Sopenharmony_ci
31462306a36Sopenharmony_ci	if (unlikely(drv->state_count <= 1 || latency_req == 0) ||
31562306a36Sopenharmony_ci	    ((data->next_timer_ns < drv->states[1].target_residency_ns ||
31662306a36Sopenharmony_ci	      latency_req < drv->states[1].exit_latency_ns) &&
31762306a36Sopenharmony_ci	     !dev->states_usage[0].disable)) {
31862306a36Sopenharmony_ci		/*
31962306a36Sopenharmony_ci		 * In this case state[0] will be used no matter what, so return
32062306a36Sopenharmony_ci		 * it right away and keep the tick running if state[0] is a
32162306a36Sopenharmony_ci		 * polling one.
32262306a36Sopenharmony_ci		 */
32362306a36Sopenharmony_ci		*stop_tick = !(drv->states[0].flags & CPUIDLE_FLAG_POLLING);
32462306a36Sopenharmony_ci		return 0;
32562306a36Sopenharmony_ci	}
32662306a36Sopenharmony_ci
32762306a36Sopenharmony_ci	if (tick_nohz_tick_stopped()) {
32862306a36Sopenharmony_ci		/*
32962306a36Sopenharmony_ci		 * If the tick is already stopped, the cost of possible short
33062306a36Sopenharmony_ci		 * idle duration misprediction is much higher, because the CPU
33162306a36Sopenharmony_ci		 * may be stuck in a shallow idle state for a long time as a
33262306a36Sopenharmony_ci		 * result of it.  In that case say we might mispredict and use
33362306a36Sopenharmony_ci		 * the known time till the closest timer event for the idle
33462306a36Sopenharmony_ci		 * state selection.
33562306a36Sopenharmony_ci		 */
33662306a36Sopenharmony_ci		if (predicted_ns < TICK_NSEC)
33762306a36Sopenharmony_ci			predicted_ns = data->next_timer_ns;
33862306a36Sopenharmony_ci	} else {
33962306a36Sopenharmony_ci		/*
34062306a36Sopenharmony_ci		 * Use the performance multiplier and the user-configurable
34162306a36Sopenharmony_ci		 * latency_req to determine the maximum exit latency.
34262306a36Sopenharmony_ci		 */
34362306a36Sopenharmony_ci		interactivity_req = div64_u64(predicted_ns,
34462306a36Sopenharmony_ci					      performance_multiplier(nr_iowaiters));
34562306a36Sopenharmony_ci		if (latency_req > interactivity_req)
34662306a36Sopenharmony_ci			latency_req = interactivity_req;
34762306a36Sopenharmony_ci	}
34862306a36Sopenharmony_ci
34962306a36Sopenharmony_ci	/*
35062306a36Sopenharmony_ci	 * Find the idle state with the lowest power while satisfying
35162306a36Sopenharmony_ci	 * our constraints.
35262306a36Sopenharmony_ci	 */
35362306a36Sopenharmony_ci	idx = -1;
35462306a36Sopenharmony_ci	for (i = 0; i < drv->state_count; i++) {
35562306a36Sopenharmony_ci		struct cpuidle_state *s = &drv->states[i];
35662306a36Sopenharmony_ci
35762306a36Sopenharmony_ci		if (dev->states_usage[i].disable)
35862306a36Sopenharmony_ci			continue;
35962306a36Sopenharmony_ci
36062306a36Sopenharmony_ci		if (idx == -1)
36162306a36Sopenharmony_ci			idx = i; /* first enabled state */
36262306a36Sopenharmony_ci
36362306a36Sopenharmony_ci		if (s->target_residency_ns > predicted_ns) {
36462306a36Sopenharmony_ci			/*
36562306a36Sopenharmony_ci			 * Use a physical idle state, not busy polling, unless
36662306a36Sopenharmony_ci			 * a timer is going to trigger soon enough.
36762306a36Sopenharmony_ci			 */
36862306a36Sopenharmony_ci			if ((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) &&
36962306a36Sopenharmony_ci			    s->exit_latency_ns <= latency_req &&
37062306a36Sopenharmony_ci			    s->target_residency_ns <= data->next_timer_ns) {
37162306a36Sopenharmony_ci				predicted_ns = s->target_residency_ns;
37262306a36Sopenharmony_ci				idx = i;
37362306a36Sopenharmony_ci				break;
37462306a36Sopenharmony_ci			}
37562306a36Sopenharmony_ci			if (predicted_ns < TICK_NSEC)
37662306a36Sopenharmony_ci				break;
37762306a36Sopenharmony_ci
37862306a36Sopenharmony_ci			if (!tick_nohz_tick_stopped()) {
37962306a36Sopenharmony_ci				/*
38062306a36Sopenharmony_ci				 * If the state selected so far is shallow,
38162306a36Sopenharmony_ci				 * waking up early won't hurt, so retain the
38262306a36Sopenharmony_ci				 * tick in that case and let the governor run
38362306a36Sopenharmony_ci				 * again in the next iteration of the loop.
38462306a36Sopenharmony_ci				 */
38562306a36Sopenharmony_ci				predicted_ns = drv->states[idx].target_residency_ns;
38662306a36Sopenharmony_ci				break;
38762306a36Sopenharmony_ci			}
38862306a36Sopenharmony_ci
38962306a36Sopenharmony_ci			/*
39062306a36Sopenharmony_ci			 * If the state selected so far is shallow and this
39162306a36Sopenharmony_ci			 * state's target residency matches the time till the
39262306a36Sopenharmony_ci			 * closest timer event, select this one to avoid getting
39362306a36Sopenharmony_ci			 * stuck in the shallow one for too long.
39462306a36Sopenharmony_ci			 */
39562306a36Sopenharmony_ci			if (drv->states[idx].target_residency_ns < TICK_NSEC &&
39662306a36Sopenharmony_ci			    s->target_residency_ns <= delta_tick)
39762306a36Sopenharmony_ci				idx = i;
39862306a36Sopenharmony_ci
39962306a36Sopenharmony_ci			return idx;
40062306a36Sopenharmony_ci		}
40162306a36Sopenharmony_ci		if (s->exit_latency_ns > latency_req)
40262306a36Sopenharmony_ci			break;
40362306a36Sopenharmony_ci
40462306a36Sopenharmony_ci		idx = i;
40562306a36Sopenharmony_ci	}
40662306a36Sopenharmony_ci
40762306a36Sopenharmony_ci	if (idx == -1)
40862306a36Sopenharmony_ci		idx = 0; /* No states enabled. Must use 0. */
40962306a36Sopenharmony_ci
41062306a36Sopenharmony_ci	/*
41162306a36Sopenharmony_ci	 * Don't stop the tick if the selected state is a polling one or if the
41262306a36Sopenharmony_ci	 * expected idle duration is shorter than the tick period length.
41362306a36Sopenharmony_ci	 */
41462306a36Sopenharmony_ci	if (((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) ||
41562306a36Sopenharmony_ci	     predicted_ns < TICK_NSEC) && !tick_nohz_tick_stopped()) {
41662306a36Sopenharmony_ci		*stop_tick = false;
41762306a36Sopenharmony_ci
41862306a36Sopenharmony_ci		if (idx > 0 && drv->states[idx].target_residency_ns > delta_tick) {
41962306a36Sopenharmony_ci			/*
42062306a36Sopenharmony_ci			 * The tick is not going to be stopped and the target
42162306a36Sopenharmony_ci			 * residency of the state to be returned is not within
42262306a36Sopenharmony_ci			 * the time until the next timer event including the
42362306a36Sopenharmony_ci			 * tick, so try to correct that.
42462306a36Sopenharmony_ci			 */
42562306a36Sopenharmony_ci			for (i = idx - 1; i >= 0; i--) {
42662306a36Sopenharmony_ci				if (dev->states_usage[i].disable)
42762306a36Sopenharmony_ci					continue;
42862306a36Sopenharmony_ci
42962306a36Sopenharmony_ci				idx = i;
43062306a36Sopenharmony_ci				if (drv->states[i].target_residency_ns <= delta_tick)
43162306a36Sopenharmony_ci					break;
43262306a36Sopenharmony_ci			}
43362306a36Sopenharmony_ci		}
43462306a36Sopenharmony_ci	}
43562306a36Sopenharmony_ci
43662306a36Sopenharmony_ci	return idx;
43762306a36Sopenharmony_ci}
43862306a36Sopenharmony_ci
43962306a36Sopenharmony_ci/**
44062306a36Sopenharmony_ci * menu_reflect - records that data structures need update
44162306a36Sopenharmony_ci * @dev: the CPU
44262306a36Sopenharmony_ci * @index: the index of actual entered state
44362306a36Sopenharmony_ci *
44462306a36Sopenharmony_ci * NOTE: it's important to be fast here because this operation will add to
44562306a36Sopenharmony_ci *       the overall exit latency.
44662306a36Sopenharmony_ci */
44762306a36Sopenharmony_cistatic void menu_reflect(struct cpuidle_device *dev, int index)
44862306a36Sopenharmony_ci{
44962306a36Sopenharmony_ci	struct menu_device *data = this_cpu_ptr(&menu_devices);
45062306a36Sopenharmony_ci
45162306a36Sopenharmony_ci	dev->last_state_idx = index;
45262306a36Sopenharmony_ci	data->needs_update = 1;
45362306a36Sopenharmony_ci	data->tick_wakeup = tick_nohz_idle_got_tick();
45462306a36Sopenharmony_ci}
45562306a36Sopenharmony_ci
45662306a36Sopenharmony_ci/**
45762306a36Sopenharmony_ci * menu_update - attempts to guess what happened after entry
45862306a36Sopenharmony_ci * @drv: cpuidle driver containing state data
45962306a36Sopenharmony_ci * @dev: the CPU
46062306a36Sopenharmony_ci */
46162306a36Sopenharmony_cistatic void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
46262306a36Sopenharmony_ci{
46362306a36Sopenharmony_ci	struct menu_device *data = this_cpu_ptr(&menu_devices);
46462306a36Sopenharmony_ci	int last_idx = dev->last_state_idx;
46562306a36Sopenharmony_ci	struct cpuidle_state *target = &drv->states[last_idx];
46662306a36Sopenharmony_ci	u64 measured_ns;
46762306a36Sopenharmony_ci	unsigned int new_factor;
46862306a36Sopenharmony_ci
46962306a36Sopenharmony_ci	/*
47062306a36Sopenharmony_ci	 * Try to figure out how much time passed between entry to low
47162306a36Sopenharmony_ci	 * power state and occurrence of the wakeup event.
47262306a36Sopenharmony_ci	 *
47362306a36Sopenharmony_ci	 * If the entered idle state didn't support residency measurements,
47462306a36Sopenharmony_ci	 * we use them anyway if they are short, and if long,
47562306a36Sopenharmony_ci	 * truncate to the whole expected time.
47662306a36Sopenharmony_ci	 *
47762306a36Sopenharmony_ci	 * Any measured amount of time will include the exit latency.
47862306a36Sopenharmony_ci	 * Since we are interested in when the wakeup begun, not when it
47962306a36Sopenharmony_ci	 * was completed, we must subtract the exit latency. However, if
48062306a36Sopenharmony_ci	 * the measured amount of time is less than the exit latency,
48162306a36Sopenharmony_ci	 * assume the state was never reached and the exit latency is 0.
48262306a36Sopenharmony_ci	 */
48362306a36Sopenharmony_ci
48462306a36Sopenharmony_ci	if (data->tick_wakeup && data->next_timer_ns > TICK_NSEC) {
48562306a36Sopenharmony_ci		/*
48662306a36Sopenharmony_ci		 * The nohz code said that there wouldn't be any events within
48762306a36Sopenharmony_ci		 * the tick boundary (if the tick was stopped), but the idle
48862306a36Sopenharmony_ci		 * duration predictor had a differing opinion.  Since the CPU
48962306a36Sopenharmony_ci		 * was woken up by a tick (that wasn't stopped after all), the
49062306a36Sopenharmony_ci		 * predictor was not quite right, so assume that the CPU could
49162306a36Sopenharmony_ci		 * have been idle long (but not forever) to help the idle
49262306a36Sopenharmony_ci		 * duration predictor do a better job next time.
49362306a36Sopenharmony_ci		 */
49462306a36Sopenharmony_ci		measured_ns = 9 * MAX_INTERESTING / 10;
49562306a36Sopenharmony_ci	} else if ((drv->states[last_idx].flags & CPUIDLE_FLAG_POLLING) &&
49662306a36Sopenharmony_ci		   dev->poll_time_limit) {
49762306a36Sopenharmony_ci		/*
49862306a36Sopenharmony_ci		 * The CPU exited the "polling" state due to a time limit, so
49962306a36Sopenharmony_ci		 * the idle duration prediction leading to the selection of that
50062306a36Sopenharmony_ci		 * state was inaccurate.  If a better prediction had been made,
50162306a36Sopenharmony_ci		 * the CPU might have been woken up from idle by the next timer.
50262306a36Sopenharmony_ci		 * Assume that to be the case.
50362306a36Sopenharmony_ci		 */
50462306a36Sopenharmony_ci		measured_ns = data->next_timer_ns;
50562306a36Sopenharmony_ci	} else {
50662306a36Sopenharmony_ci		/* measured value */
50762306a36Sopenharmony_ci		measured_ns = dev->last_residency_ns;
50862306a36Sopenharmony_ci
50962306a36Sopenharmony_ci		/* Deduct exit latency */
51062306a36Sopenharmony_ci		if (measured_ns > 2 * target->exit_latency_ns)
51162306a36Sopenharmony_ci			measured_ns -= target->exit_latency_ns;
51262306a36Sopenharmony_ci		else
51362306a36Sopenharmony_ci			measured_ns /= 2;
51462306a36Sopenharmony_ci	}
51562306a36Sopenharmony_ci
51662306a36Sopenharmony_ci	/* Make sure our coefficients do not exceed unity */
51762306a36Sopenharmony_ci	if (measured_ns > data->next_timer_ns)
51862306a36Sopenharmony_ci		measured_ns = data->next_timer_ns;
51962306a36Sopenharmony_ci
52062306a36Sopenharmony_ci	/* Update our correction ratio */
52162306a36Sopenharmony_ci	new_factor = data->correction_factor[data->bucket];
52262306a36Sopenharmony_ci	new_factor -= new_factor / DECAY;
52362306a36Sopenharmony_ci
52462306a36Sopenharmony_ci	if (data->next_timer_ns > 0 && measured_ns < MAX_INTERESTING)
52562306a36Sopenharmony_ci		new_factor += div64_u64(RESOLUTION * measured_ns,
52662306a36Sopenharmony_ci					data->next_timer_ns);
52762306a36Sopenharmony_ci	else
52862306a36Sopenharmony_ci		/*
52962306a36Sopenharmony_ci		 * we were idle so long that we count it as a perfect
53062306a36Sopenharmony_ci		 * prediction
53162306a36Sopenharmony_ci		 */
53262306a36Sopenharmony_ci		new_factor += RESOLUTION;
53362306a36Sopenharmony_ci
53462306a36Sopenharmony_ci	/*
53562306a36Sopenharmony_ci	 * We don't want 0 as factor; we always want at least
53662306a36Sopenharmony_ci	 * a tiny bit of estimated time. Fortunately, due to rounding,
53762306a36Sopenharmony_ci	 * new_factor will stay nonzero regardless of measured_us values
53862306a36Sopenharmony_ci	 * and the compiler can eliminate this test as long as DECAY > 1.
53962306a36Sopenharmony_ci	 */
54062306a36Sopenharmony_ci	if (DECAY == 1 && unlikely(new_factor == 0))
54162306a36Sopenharmony_ci		new_factor = 1;
54262306a36Sopenharmony_ci
54362306a36Sopenharmony_ci	data->correction_factor[data->bucket] = new_factor;
54462306a36Sopenharmony_ci
54562306a36Sopenharmony_ci	/* update the repeating-pattern data */
54662306a36Sopenharmony_ci	data->intervals[data->interval_ptr++] = ktime_to_us(measured_ns);
54762306a36Sopenharmony_ci	if (data->interval_ptr >= INTERVALS)
54862306a36Sopenharmony_ci		data->interval_ptr = 0;
54962306a36Sopenharmony_ci}
55062306a36Sopenharmony_ci
55162306a36Sopenharmony_ci/**
55262306a36Sopenharmony_ci * menu_enable_device - scans a CPU's states and does setup
55362306a36Sopenharmony_ci * @drv: cpuidle driver
55462306a36Sopenharmony_ci * @dev: the CPU
55562306a36Sopenharmony_ci */
55662306a36Sopenharmony_cistatic int menu_enable_device(struct cpuidle_driver *drv,
55762306a36Sopenharmony_ci				struct cpuidle_device *dev)
55862306a36Sopenharmony_ci{
55962306a36Sopenharmony_ci	struct menu_device *data = &per_cpu(menu_devices, dev->cpu);
56062306a36Sopenharmony_ci	int i;
56162306a36Sopenharmony_ci
56262306a36Sopenharmony_ci	memset(data, 0, sizeof(struct menu_device));
56362306a36Sopenharmony_ci
56462306a36Sopenharmony_ci	/*
56562306a36Sopenharmony_ci	 * if the correction factor is 0 (eg first time init or cpu hotplug
56662306a36Sopenharmony_ci	 * etc), we actually want to start out with a unity factor.
56762306a36Sopenharmony_ci	 */
56862306a36Sopenharmony_ci	for(i = 0; i < BUCKETS; i++)
56962306a36Sopenharmony_ci		data->correction_factor[i] = RESOLUTION * DECAY;
57062306a36Sopenharmony_ci
57162306a36Sopenharmony_ci	return 0;
57262306a36Sopenharmony_ci}
57362306a36Sopenharmony_ci
57462306a36Sopenharmony_cistatic struct cpuidle_governor menu_governor = {
57562306a36Sopenharmony_ci	.name =		"menu",
57662306a36Sopenharmony_ci	.rating =	20,
57762306a36Sopenharmony_ci	.enable =	menu_enable_device,
57862306a36Sopenharmony_ci	.select =	menu_select,
57962306a36Sopenharmony_ci	.reflect =	menu_reflect,
58062306a36Sopenharmony_ci};
58162306a36Sopenharmony_ci
58262306a36Sopenharmony_ci/**
58362306a36Sopenharmony_ci * init_menu - initializes the governor
58462306a36Sopenharmony_ci */
58562306a36Sopenharmony_cistatic int __init init_menu(void)
58662306a36Sopenharmony_ci{
58762306a36Sopenharmony_ci	return cpuidle_register_governor(&menu_governor);
58862306a36Sopenharmony_ci}
58962306a36Sopenharmony_ci
59062306a36Sopenharmony_cipostcore_initcall(init_menu);
591