Lines Matching defs:period
718 * The idea is to set a period in which each task runs once.
721 * this period because otherwise the slices get too small.
735 * We calculate the wall-time slice from the period by taking a part
1081 * Mark the end of the wait period if dequeueing a
1106 * We are starting a new run period:
1117 * Approximate time to scan a full NUMA task in ms. The task scan period is
1209 unsigned long period = smin;
1212 /* Scale the maximum scan period with the amount of shared memory. */
1219 period *= refcount_read(&ng->refcount);
1220 period *= shared + 1;
1221 period /= private + shared + 1;
1225 return max(smin, period);
1237 /* Scale the maximum scan period with the amount of shared memory. */
1242 unsigned long period = smax;
1244 period *= refcount_read(&ng->refcount);
1245 period *= shared + 1;
1246 period /= private + shared + 1;
1248 smax = max(smax, period);
1508 * independent, is then given by P(n)^2, provided our sample period
2236 * When adapting the scan rate, the period is divided into NUMA_PERIOD_SLOTS
2238 * period will be for the next scan window. If local/(local+remote) ratio is
2240 * the scan period will decrease. Aim for 70% local accesses.
2246 * Increase the scan period (slow down scanning) if the majority of
2249 * Otherwise, decrease the scan period.
2276 * Prepare to scale scan period relative to the current period.
2277 * == NUMA_PERIOD_THRESHOLD scan period stays the same
2278 * < NUMA_PERIOD_THRESHOLD scan period decreases (scan faster)
2279 * >= NUMA_PERIOD_THRESHOLD scan period increases (scan slower)
2323 * decays those on a 32ms period, which is orders of magnitude off
2324 * from the dozens-of-seconds NUMA balancing period. Use the scheduler
2327 static u64 numa_get_avg_runtime(struct task_struct *p, u64 *period)
2336 *period = now - p->last_task_numa_placement;
2339 if (unlikely((s64)*period < 0)) {
2340 *period = 0;
2344 *period = LOAD_AVG_MAX;
2453 u64 runtime, period;
2470 runtime = numa_get_avg_runtime(p, &period);
2507 f_weight = div64_u64(runtime << 0x10, period + 1);
3021 u64 period, now;
3037 period = (u64)curr->numa_scan_period * NSEC_PER_MSEC;
3039 if (now > curr->node_stamp + period) {
3043 curr->node_stamp += period;
4336 * The 'current' period is already promised to the current tasks,
4771 * don't let the period tick interfere with the hrtick preemption
4821 * default period for cfs group bandwidth.
4990 /* This will start the period timer if necessary */
4995 * entire period. We additionally needed to make sure that any
5198 * period the timer is deactivated until scheduling resumes; cfs_b->idle is
5224 /* mark as potentially idle for the upcoming period */
5245 * While we are ensured activity in the period following an
5260 /* minimum remaining period time to redistribute slack quota */
5266 * Are we near the end of the current quota period?
5469 overrun = hrtimer_forward_now(timer, cfs_b->period);
5477 u64 new, old = ktime_to_ns(cfs_b->period);
5480 * Grow period by a factor of 2 to avoid losing precision.
5481 * Precision loss in the quota/period ratio can cause __cfs_schedulable
5486 cfs_b->period = ns_to_ktime(new);
5489 pr_warn_ratelimited("cfs_period_timer[cpu%d]: period too short, scaling up (new cfs_period_us = %lld, "
5494 pr_warn_ratelimited("cfs_period_timer[cpu%d]: period too short, but cannot scale up without losing "
5517 cfs_b->period = ns_to_ktime(default_cfs_period());
5543 hrtimer_forward_now(&cfs_b->period_timer, cfs_b->period);
6694 * has just got a big task running since a long sleep period. At the same time