18c2ecf20Sopenharmony_ci// SPDX-License-Identifier: GPL-2.0-or-later
28c2ecf20Sopenharmony_ci/* sched.c - SPU scheduler.
38c2ecf20Sopenharmony_ci *
48c2ecf20Sopenharmony_ci * Copyright (C) IBM 2005
58c2ecf20Sopenharmony_ci * Author: Mark Nutter <mnutter@us.ibm.com>
68c2ecf20Sopenharmony_ci *
78c2ecf20Sopenharmony_ci * 2006-03-31	NUMA domains added.
88c2ecf20Sopenharmony_ci */
98c2ecf20Sopenharmony_ci
108c2ecf20Sopenharmony_ci#undef DEBUG
118c2ecf20Sopenharmony_ci
128c2ecf20Sopenharmony_ci#include <linux/errno.h>
138c2ecf20Sopenharmony_ci#include <linux/sched/signal.h>
148c2ecf20Sopenharmony_ci#include <linux/sched/loadavg.h>
158c2ecf20Sopenharmony_ci#include <linux/sched/rt.h>
168c2ecf20Sopenharmony_ci#include <linux/kernel.h>
178c2ecf20Sopenharmony_ci#include <linux/mm.h>
188c2ecf20Sopenharmony_ci#include <linux/slab.h>
198c2ecf20Sopenharmony_ci#include <linux/completion.h>
208c2ecf20Sopenharmony_ci#include <linux/vmalloc.h>
218c2ecf20Sopenharmony_ci#include <linux/smp.h>
228c2ecf20Sopenharmony_ci#include <linux/stddef.h>
238c2ecf20Sopenharmony_ci#include <linux/unistd.h>
248c2ecf20Sopenharmony_ci#include <linux/numa.h>
258c2ecf20Sopenharmony_ci#include <linux/mutex.h>
268c2ecf20Sopenharmony_ci#include <linux/notifier.h>
278c2ecf20Sopenharmony_ci#include <linux/kthread.h>
288c2ecf20Sopenharmony_ci#include <linux/pid_namespace.h>
298c2ecf20Sopenharmony_ci#include <linux/proc_fs.h>
308c2ecf20Sopenharmony_ci#include <linux/seq_file.h>
318c2ecf20Sopenharmony_ci
328c2ecf20Sopenharmony_ci#include <asm/io.h>
338c2ecf20Sopenharmony_ci#include <asm/mmu_context.h>
348c2ecf20Sopenharmony_ci#include <asm/spu.h>
358c2ecf20Sopenharmony_ci#include <asm/spu_csa.h>
368c2ecf20Sopenharmony_ci#include <asm/spu_priv1.h>
378c2ecf20Sopenharmony_ci#include "spufs.h"
388c2ecf20Sopenharmony_ci#define CREATE_TRACE_POINTS
398c2ecf20Sopenharmony_ci#include "sputrace.h"
408c2ecf20Sopenharmony_ci
418c2ecf20Sopenharmony_cistruct spu_prio_array {
428c2ecf20Sopenharmony_ci	DECLARE_BITMAP(bitmap, MAX_PRIO);
438c2ecf20Sopenharmony_ci	struct list_head runq[MAX_PRIO];
448c2ecf20Sopenharmony_ci	spinlock_t runq_lock;
458c2ecf20Sopenharmony_ci	int nr_waiting;
468c2ecf20Sopenharmony_ci};
478c2ecf20Sopenharmony_ci
488c2ecf20Sopenharmony_cistatic unsigned long spu_avenrun[3];
498c2ecf20Sopenharmony_cistatic struct spu_prio_array *spu_prio;
508c2ecf20Sopenharmony_cistatic struct task_struct *spusched_task;
518c2ecf20Sopenharmony_cistatic struct timer_list spusched_timer;
528c2ecf20Sopenharmony_cistatic struct timer_list spuloadavg_timer;
538c2ecf20Sopenharmony_ci
548c2ecf20Sopenharmony_ci/*
558c2ecf20Sopenharmony_ci * Priority of a normal, non-rt, non-niced'd process (aka nice level 0).
568c2ecf20Sopenharmony_ci */
578c2ecf20Sopenharmony_ci#define NORMAL_PRIO		120
588c2ecf20Sopenharmony_ci
598c2ecf20Sopenharmony_ci/*
608c2ecf20Sopenharmony_ci * Frequency of the spu scheduler tick.  By default we do one SPU scheduler
618c2ecf20Sopenharmony_ci * tick for every 10 CPU scheduler ticks.
628c2ecf20Sopenharmony_ci */
638c2ecf20Sopenharmony_ci#define SPUSCHED_TICK		(10)
648c2ecf20Sopenharmony_ci
658c2ecf20Sopenharmony_ci/*
668c2ecf20Sopenharmony_ci * These are the 'tuning knobs' of the scheduler:
678c2ecf20Sopenharmony_ci *
688c2ecf20Sopenharmony_ci * Minimum timeslice is 5 msecs (or 1 spu scheduler tick, whichever is
698c2ecf20Sopenharmony_ci * larger), default timeslice is 100 msecs, maximum timeslice is 800 msecs.
708c2ecf20Sopenharmony_ci */
718c2ecf20Sopenharmony_ci#define MIN_SPU_TIMESLICE	max(5 * HZ / (1000 * SPUSCHED_TICK), 1)
728c2ecf20Sopenharmony_ci#define DEF_SPU_TIMESLICE	(100 * HZ / (1000 * SPUSCHED_TICK))
738c2ecf20Sopenharmony_ci
748c2ecf20Sopenharmony_ci#define SCALE_PRIO(x, prio) \
758c2ecf20Sopenharmony_ci	max(x * (MAX_PRIO - prio) / (MAX_USER_PRIO / 2), MIN_SPU_TIMESLICE)
768c2ecf20Sopenharmony_ci
778c2ecf20Sopenharmony_ci/*
788c2ecf20Sopenharmony_ci * scale user-nice values [ -20 ... 0 ... 19 ] to time slice values:
798c2ecf20Sopenharmony_ci * [800ms ... 100ms ... 5ms]
808c2ecf20Sopenharmony_ci *
818c2ecf20Sopenharmony_ci * The higher a thread's priority, the bigger timeslices
828c2ecf20Sopenharmony_ci * it gets during one round of execution. But even the lowest
838c2ecf20Sopenharmony_ci * priority thread gets MIN_TIMESLICE worth of execution time.
848c2ecf20Sopenharmony_ci */
858c2ecf20Sopenharmony_civoid spu_set_timeslice(struct spu_context *ctx)
868c2ecf20Sopenharmony_ci{
878c2ecf20Sopenharmony_ci	if (ctx->prio < NORMAL_PRIO)
888c2ecf20Sopenharmony_ci		ctx->time_slice = SCALE_PRIO(DEF_SPU_TIMESLICE * 4, ctx->prio);
898c2ecf20Sopenharmony_ci	else
908c2ecf20Sopenharmony_ci		ctx->time_slice = SCALE_PRIO(DEF_SPU_TIMESLICE, ctx->prio);
918c2ecf20Sopenharmony_ci}
928c2ecf20Sopenharmony_ci
938c2ecf20Sopenharmony_ci/*
948c2ecf20Sopenharmony_ci * Update scheduling information from the owning thread.
958c2ecf20Sopenharmony_ci */
968c2ecf20Sopenharmony_civoid __spu_update_sched_info(struct spu_context *ctx)
978c2ecf20Sopenharmony_ci{
988c2ecf20Sopenharmony_ci	/*
998c2ecf20Sopenharmony_ci	 * assert that the context is not on the runqueue, so it is safe
1008c2ecf20Sopenharmony_ci	 * to change its scheduling parameters.
1018c2ecf20Sopenharmony_ci	 */
1028c2ecf20Sopenharmony_ci	BUG_ON(!list_empty(&ctx->rq));
1038c2ecf20Sopenharmony_ci
1048c2ecf20Sopenharmony_ci	/*
1058c2ecf20Sopenharmony_ci	 * 32-Bit assignments are atomic on powerpc, and we don't care about
1068c2ecf20Sopenharmony_ci	 * memory ordering here because retrieving the controlling thread is
1078c2ecf20Sopenharmony_ci	 * per definition racy.
1088c2ecf20Sopenharmony_ci	 */
1098c2ecf20Sopenharmony_ci	ctx->tid = current->pid;
1108c2ecf20Sopenharmony_ci
1118c2ecf20Sopenharmony_ci	/*
1128c2ecf20Sopenharmony_ci	 * We do our own priority calculations, so we normally want
1138c2ecf20Sopenharmony_ci	 * ->static_prio to start with. Unfortunately this field
1148c2ecf20Sopenharmony_ci	 * contains junk for threads with a realtime scheduling
1158c2ecf20Sopenharmony_ci	 * policy so we have to look at ->prio in this case.
1168c2ecf20Sopenharmony_ci	 */
1178c2ecf20Sopenharmony_ci	if (rt_prio(current->prio))
1188c2ecf20Sopenharmony_ci		ctx->prio = current->prio;
1198c2ecf20Sopenharmony_ci	else
1208c2ecf20Sopenharmony_ci		ctx->prio = current->static_prio;
1218c2ecf20Sopenharmony_ci	ctx->policy = current->policy;
1228c2ecf20Sopenharmony_ci
1238c2ecf20Sopenharmony_ci	/*
1248c2ecf20Sopenharmony_ci	 * TO DO: the context may be loaded, so we may need to activate
1258c2ecf20Sopenharmony_ci	 * it again on a different node. But it shouldn't hurt anything
1268c2ecf20Sopenharmony_ci	 * to update its parameters, because we know that the scheduler
1278c2ecf20Sopenharmony_ci	 * is not actively looking at this field, since it is not on the
1288c2ecf20Sopenharmony_ci	 * runqueue. The context will be rescheduled on the proper node
1298c2ecf20Sopenharmony_ci	 * if it is timesliced or preempted.
1308c2ecf20Sopenharmony_ci	 */
1318c2ecf20Sopenharmony_ci	cpumask_copy(&ctx->cpus_allowed, current->cpus_ptr);
1328c2ecf20Sopenharmony_ci
1338c2ecf20Sopenharmony_ci	/* Save the current cpu id for spu interrupt routing. */
1348c2ecf20Sopenharmony_ci	ctx->last_ran = raw_smp_processor_id();
1358c2ecf20Sopenharmony_ci}
1368c2ecf20Sopenharmony_ci
1378c2ecf20Sopenharmony_civoid spu_update_sched_info(struct spu_context *ctx)
1388c2ecf20Sopenharmony_ci{
1398c2ecf20Sopenharmony_ci	int node;
1408c2ecf20Sopenharmony_ci
1418c2ecf20Sopenharmony_ci	if (ctx->state == SPU_STATE_RUNNABLE) {
1428c2ecf20Sopenharmony_ci		node = ctx->spu->node;
1438c2ecf20Sopenharmony_ci
1448c2ecf20Sopenharmony_ci		/*
1458c2ecf20Sopenharmony_ci		 * Take list_mutex to sync with find_victim().
1468c2ecf20Sopenharmony_ci		 */
1478c2ecf20Sopenharmony_ci		mutex_lock(&cbe_spu_info[node].list_mutex);
1488c2ecf20Sopenharmony_ci		__spu_update_sched_info(ctx);
1498c2ecf20Sopenharmony_ci		mutex_unlock(&cbe_spu_info[node].list_mutex);
1508c2ecf20Sopenharmony_ci	} else {
1518c2ecf20Sopenharmony_ci		__spu_update_sched_info(ctx);
1528c2ecf20Sopenharmony_ci	}
1538c2ecf20Sopenharmony_ci}
1548c2ecf20Sopenharmony_ci
1558c2ecf20Sopenharmony_cistatic int __node_allowed(struct spu_context *ctx, int node)
1568c2ecf20Sopenharmony_ci{
1578c2ecf20Sopenharmony_ci	if (nr_cpus_node(node)) {
1588c2ecf20Sopenharmony_ci		const struct cpumask *mask = cpumask_of_node(node);
1598c2ecf20Sopenharmony_ci
1608c2ecf20Sopenharmony_ci		if (cpumask_intersects(mask, &ctx->cpus_allowed))
1618c2ecf20Sopenharmony_ci			return 1;
1628c2ecf20Sopenharmony_ci	}
1638c2ecf20Sopenharmony_ci
1648c2ecf20Sopenharmony_ci	return 0;
1658c2ecf20Sopenharmony_ci}
1668c2ecf20Sopenharmony_ci
1678c2ecf20Sopenharmony_cistatic int node_allowed(struct spu_context *ctx, int node)
1688c2ecf20Sopenharmony_ci{
1698c2ecf20Sopenharmony_ci	int rval;
1708c2ecf20Sopenharmony_ci
1718c2ecf20Sopenharmony_ci	spin_lock(&spu_prio->runq_lock);
1728c2ecf20Sopenharmony_ci	rval = __node_allowed(ctx, node);
1738c2ecf20Sopenharmony_ci	spin_unlock(&spu_prio->runq_lock);
1748c2ecf20Sopenharmony_ci
1758c2ecf20Sopenharmony_ci	return rval;
1768c2ecf20Sopenharmony_ci}
1778c2ecf20Sopenharmony_ci
1788c2ecf20Sopenharmony_civoid do_notify_spus_active(void)
1798c2ecf20Sopenharmony_ci{
1808c2ecf20Sopenharmony_ci	int node;
1818c2ecf20Sopenharmony_ci
1828c2ecf20Sopenharmony_ci	/*
1838c2ecf20Sopenharmony_ci	 * Wake up the active spu_contexts.
1848c2ecf20Sopenharmony_ci	 *
1858c2ecf20Sopenharmony_ci	 * When the awakened processes see their "notify_active" flag is set,
1868c2ecf20Sopenharmony_ci	 * they will call spu_switch_notify().
1878c2ecf20Sopenharmony_ci	 */
1888c2ecf20Sopenharmony_ci	for_each_online_node(node) {
1898c2ecf20Sopenharmony_ci		struct spu *spu;
1908c2ecf20Sopenharmony_ci
1918c2ecf20Sopenharmony_ci		mutex_lock(&cbe_spu_info[node].list_mutex);
1928c2ecf20Sopenharmony_ci		list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
1938c2ecf20Sopenharmony_ci			if (spu->alloc_state != SPU_FREE) {
1948c2ecf20Sopenharmony_ci				struct spu_context *ctx = spu->ctx;
1958c2ecf20Sopenharmony_ci				set_bit(SPU_SCHED_NOTIFY_ACTIVE,
1968c2ecf20Sopenharmony_ci					&ctx->sched_flags);
1978c2ecf20Sopenharmony_ci				mb();
1988c2ecf20Sopenharmony_ci				wake_up_all(&ctx->stop_wq);
1998c2ecf20Sopenharmony_ci			}
2008c2ecf20Sopenharmony_ci		}
2018c2ecf20Sopenharmony_ci		mutex_unlock(&cbe_spu_info[node].list_mutex);
2028c2ecf20Sopenharmony_ci	}
2038c2ecf20Sopenharmony_ci}
2048c2ecf20Sopenharmony_ci
2058c2ecf20Sopenharmony_ci/**
2068c2ecf20Sopenharmony_ci * spu_bind_context - bind spu context to physical spu
2078c2ecf20Sopenharmony_ci * @spu:	physical spu to bind to
2088c2ecf20Sopenharmony_ci * @ctx:	context to bind
2098c2ecf20Sopenharmony_ci */
2108c2ecf20Sopenharmony_cistatic void spu_bind_context(struct spu *spu, struct spu_context *ctx)
2118c2ecf20Sopenharmony_ci{
2128c2ecf20Sopenharmony_ci	spu_context_trace(spu_bind_context__enter, ctx, spu);
2138c2ecf20Sopenharmony_ci
2148c2ecf20Sopenharmony_ci	spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
2158c2ecf20Sopenharmony_ci
2168c2ecf20Sopenharmony_ci	if (ctx->flags & SPU_CREATE_NOSCHED)
2178c2ecf20Sopenharmony_ci		atomic_inc(&cbe_spu_info[spu->node].reserved_spus);
2188c2ecf20Sopenharmony_ci
2198c2ecf20Sopenharmony_ci	ctx->stats.slb_flt_base = spu->stats.slb_flt;
2208c2ecf20Sopenharmony_ci	ctx->stats.class2_intr_base = spu->stats.class2_intr;
2218c2ecf20Sopenharmony_ci
2228c2ecf20Sopenharmony_ci	spu_associate_mm(spu, ctx->owner);
2238c2ecf20Sopenharmony_ci
2248c2ecf20Sopenharmony_ci	spin_lock_irq(&spu->register_lock);
2258c2ecf20Sopenharmony_ci	spu->ctx = ctx;
2268c2ecf20Sopenharmony_ci	spu->flags = 0;
2278c2ecf20Sopenharmony_ci	ctx->spu = spu;
2288c2ecf20Sopenharmony_ci	ctx->ops = &spu_hw_ops;
2298c2ecf20Sopenharmony_ci	spu->pid = current->pid;
2308c2ecf20Sopenharmony_ci	spu->tgid = current->tgid;
2318c2ecf20Sopenharmony_ci	spu->ibox_callback = spufs_ibox_callback;
2328c2ecf20Sopenharmony_ci	spu->wbox_callback = spufs_wbox_callback;
2338c2ecf20Sopenharmony_ci	spu->stop_callback = spufs_stop_callback;
2348c2ecf20Sopenharmony_ci	spu->mfc_callback = spufs_mfc_callback;
2358c2ecf20Sopenharmony_ci	spin_unlock_irq(&spu->register_lock);
2368c2ecf20Sopenharmony_ci
2378c2ecf20Sopenharmony_ci	spu_unmap_mappings(ctx);
2388c2ecf20Sopenharmony_ci
2398c2ecf20Sopenharmony_ci	spu_switch_log_notify(spu, ctx, SWITCH_LOG_START, 0);
2408c2ecf20Sopenharmony_ci	spu_restore(&ctx->csa, spu);
2418c2ecf20Sopenharmony_ci	spu->timestamp = jiffies;
2428c2ecf20Sopenharmony_ci	spu_switch_notify(spu, ctx);
2438c2ecf20Sopenharmony_ci	ctx->state = SPU_STATE_RUNNABLE;
2448c2ecf20Sopenharmony_ci
2458c2ecf20Sopenharmony_ci	spuctx_switch_state(ctx, SPU_UTIL_USER);
2468c2ecf20Sopenharmony_ci}
2478c2ecf20Sopenharmony_ci
2488c2ecf20Sopenharmony_ci/*
2498c2ecf20Sopenharmony_ci * Must be used with the list_mutex held.
2508c2ecf20Sopenharmony_ci */
2518c2ecf20Sopenharmony_cistatic inline int sched_spu(struct spu *spu)
2528c2ecf20Sopenharmony_ci{
2538c2ecf20Sopenharmony_ci	BUG_ON(!mutex_is_locked(&cbe_spu_info[spu->node].list_mutex));
2548c2ecf20Sopenharmony_ci
2558c2ecf20Sopenharmony_ci	return (!spu->ctx || !(spu->ctx->flags & SPU_CREATE_NOSCHED));
2568c2ecf20Sopenharmony_ci}
2578c2ecf20Sopenharmony_ci
2588c2ecf20Sopenharmony_cistatic void aff_merge_remaining_ctxs(struct spu_gang *gang)
2598c2ecf20Sopenharmony_ci{
2608c2ecf20Sopenharmony_ci	struct spu_context *ctx;
2618c2ecf20Sopenharmony_ci
2628c2ecf20Sopenharmony_ci	list_for_each_entry(ctx, &gang->aff_list_head, aff_list) {
2638c2ecf20Sopenharmony_ci		if (list_empty(&ctx->aff_list))
2648c2ecf20Sopenharmony_ci			list_add(&ctx->aff_list, &gang->aff_list_head);
2658c2ecf20Sopenharmony_ci	}
2668c2ecf20Sopenharmony_ci	gang->aff_flags |= AFF_MERGED;
2678c2ecf20Sopenharmony_ci}
2688c2ecf20Sopenharmony_ci
2698c2ecf20Sopenharmony_cistatic void aff_set_offsets(struct spu_gang *gang)
2708c2ecf20Sopenharmony_ci{
2718c2ecf20Sopenharmony_ci	struct spu_context *ctx;
2728c2ecf20Sopenharmony_ci	int offset;
2738c2ecf20Sopenharmony_ci
2748c2ecf20Sopenharmony_ci	offset = -1;
2758c2ecf20Sopenharmony_ci	list_for_each_entry_reverse(ctx, &gang->aff_ref_ctx->aff_list,
2768c2ecf20Sopenharmony_ci								aff_list) {
2778c2ecf20Sopenharmony_ci		if (&ctx->aff_list == &gang->aff_list_head)
2788c2ecf20Sopenharmony_ci			break;
2798c2ecf20Sopenharmony_ci		ctx->aff_offset = offset--;
2808c2ecf20Sopenharmony_ci	}
2818c2ecf20Sopenharmony_ci
2828c2ecf20Sopenharmony_ci	offset = 0;
2838c2ecf20Sopenharmony_ci	list_for_each_entry(ctx, gang->aff_ref_ctx->aff_list.prev, aff_list) {
2848c2ecf20Sopenharmony_ci		if (&ctx->aff_list == &gang->aff_list_head)
2858c2ecf20Sopenharmony_ci			break;
2868c2ecf20Sopenharmony_ci		ctx->aff_offset = offset++;
2878c2ecf20Sopenharmony_ci	}
2888c2ecf20Sopenharmony_ci
2898c2ecf20Sopenharmony_ci	gang->aff_flags |= AFF_OFFSETS_SET;
2908c2ecf20Sopenharmony_ci}
2918c2ecf20Sopenharmony_ci
2928c2ecf20Sopenharmony_cistatic struct spu *aff_ref_location(struct spu_context *ctx, int mem_aff,
2938c2ecf20Sopenharmony_ci		 int group_size, int lowest_offset)
2948c2ecf20Sopenharmony_ci{
2958c2ecf20Sopenharmony_ci	struct spu *spu;
2968c2ecf20Sopenharmony_ci	int node, n;
2978c2ecf20Sopenharmony_ci
2988c2ecf20Sopenharmony_ci	/*
2998c2ecf20Sopenharmony_ci	 * TODO: A better algorithm could be used to find a good spu to be
3008c2ecf20Sopenharmony_ci	 *       used as reference location for the ctxs chain.
3018c2ecf20Sopenharmony_ci	 */
3028c2ecf20Sopenharmony_ci	node = cpu_to_node(raw_smp_processor_id());
3038c2ecf20Sopenharmony_ci	for (n = 0; n < MAX_NUMNODES; n++, node++) {
3048c2ecf20Sopenharmony_ci		/*
3058c2ecf20Sopenharmony_ci		 * "available_spus" counts how many spus are not potentially
3068c2ecf20Sopenharmony_ci		 * going to be used by other affinity gangs whose reference
3078c2ecf20Sopenharmony_ci		 * context is already in place. Although this code seeks to
3088c2ecf20Sopenharmony_ci		 * avoid having affinity gangs with a summed amount of
3098c2ecf20Sopenharmony_ci		 * contexts bigger than the amount of spus in the node,
3108c2ecf20Sopenharmony_ci		 * this may happen sporadically. In this case, available_spus
3118c2ecf20Sopenharmony_ci		 * becomes negative, which is harmless.
3128c2ecf20Sopenharmony_ci		 */
3138c2ecf20Sopenharmony_ci		int available_spus;
3148c2ecf20Sopenharmony_ci
3158c2ecf20Sopenharmony_ci		node = (node < MAX_NUMNODES) ? node : 0;
3168c2ecf20Sopenharmony_ci		if (!node_allowed(ctx, node))
3178c2ecf20Sopenharmony_ci			continue;
3188c2ecf20Sopenharmony_ci
3198c2ecf20Sopenharmony_ci		available_spus = 0;
3208c2ecf20Sopenharmony_ci		mutex_lock(&cbe_spu_info[node].list_mutex);
3218c2ecf20Sopenharmony_ci		list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
3228c2ecf20Sopenharmony_ci			if (spu->ctx && spu->ctx->gang && !spu->ctx->aff_offset
3238c2ecf20Sopenharmony_ci					&& spu->ctx->gang->aff_ref_spu)
3248c2ecf20Sopenharmony_ci				available_spus -= spu->ctx->gang->contexts;
3258c2ecf20Sopenharmony_ci			available_spus++;
3268c2ecf20Sopenharmony_ci		}
3278c2ecf20Sopenharmony_ci		if (available_spus < ctx->gang->contexts) {
3288c2ecf20Sopenharmony_ci			mutex_unlock(&cbe_spu_info[node].list_mutex);
3298c2ecf20Sopenharmony_ci			continue;
3308c2ecf20Sopenharmony_ci		}
3318c2ecf20Sopenharmony_ci
3328c2ecf20Sopenharmony_ci		list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
3338c2ecf20Sopenharmony_ci			if ((!mem_aff || spu->has_mem_affinity) &&
3348c2ecf20Sopenharmony_ci							sched_spu(spu)) {
3358c2ecf20Sopenharmony_ci				mutex_unlock(&cbe_spu_info[node].list_mutex);
3368c2ecf20Sopenharmony_ci				return spu;
3378c2ecf20Sopenharmony_ci			}
3388c2ecf20Sopenharmony_ci		}
3398c2ecf20Sopenharmony_ci		mutex_unlock(&cbe_spu_info[node].list_mutex);
3408c2ecf20Sopenharmony_ci	}
3418c2ecf20Sopenharmony_ci	return NULL;
3428c2ecf20Sopenharmony_ci}
3438c2ecf20Sopenharmony_ci
3448c2ecf20Sopenharmony_cistatic void aff_set_ref_point_location(struct spu_gang *gang)
3458c2ecf20Sopenharmony_ci{
3468c2ecf20Sopenharmony_ci	int mem_aff, gs, lowest_offset;
3478c2ecf20Sopenharmony_ci	struct spu_context *ctx;
3488c2ecf20Sopenharmony_ci	struct spu *tmp;
3498c2ecf20Sopenharmony_ci
3508c2ecf20Sopenharmony_ci	mem_aff = gang->aff_ref_ctx->flags & SPU_CREATE_AFFINITY_MEM;
3518c2ecf20Sopenharmony_ci	lowest_offset = 0;
3528c2ecf20Sopenharmony_ci	gs = 0;
3538c2ecf20Sopenharmony_ci
3548c2ecf20Sopenharmony_ci	list_for_each_entry(tmp, &gang->aff_list_head, aff_list)
3558c2ecf20Sopenharmony_ci		gs++;
3568c2ecf20Sopenharmony_ci
3578c2ecf20Sopenharmony_ci	list_for_each_entry_reverse(ctx, &gang->aff_ref_ctx->aff_list,
3588c2ecf20Sopenharmony_ci								aff_list) {
3598c2ecf20Sopenharmony_ci		if (&ctx->aff_list == &gang->aff_list_head)
3608c2ecf20Sopenharmony_ci			break;
3618c2ecf20Sopenharmony_ci		lowest_offset = ctx->aff_offset;
3628c2ecf20Sopenharmony_ci	}
3638c2ecf20Sopenharmony_ci
3648c2ecf20Sopenharmony_ci	gang->aff_ref_spu = aff_ref_location(gang->aff_ref_ctx, mem_aff, gs,
3658c2ecf20Sopenharmony_ci							lowest_offset);
3668c2ecf20Sopenharmony_ci}
3678c2ecf20Sopenharmony_ci
3688c2ecf20Sopenharmony_cistatic struct spu *ctx_location(struct spu *ref, int offset, int node)
3698c2ecf20Sopenharmony_ci{
3708c2ecf20Sopenharmony_ci	struct spu *spu;
3718c2ecf20Sopenharmony_ci
3728c2ecf20Sopenharmony_ci	spu = NULL;
3738c2ecf20Sopenharmony_ci	if (offset >= 0) {
3748c2ecf20Sopenharmony_ci		list_for_each_entry(spu, ref->aff_list.prev, aff_list) {
3758c2ecf20Sopenharmony_ci			BUG_ON(spu->node != node);
3768c2ecf20Sopenharmony_ci			if (offset == 0)
3778c2ecf20Sopenharmony_ci				break;
3788c2ecf20Sopenharmony_ci			if (sched_spu(spu))
3798c2ecf20Sopenharmony_ci				offset--;
3808c2ecf20Sopenharmony_ci		}
3818c2ecf20Sopenharmony_ci	} else {
3828c2ecf20Sopenharmony_ci		list_for_each_entry_reverse(spu, ref->aff_list.next, aff_list) {
3838c2ecf20Sopenharmony_ci			BUG_ON(spu->node != node);
3848c2ecf20Sopenharmony_ci			if (offset == 0)
3858c2ecf20Sopenharmony_ci				break;
3868c2ecf20Sopenharmony_ci			if (sched_spu(spu))
3878c2ecf20Sopenharmony_ci				offset++;
3888c2ecf20Sopenharmony_ci		}
3898c2ecf20Sopenharmony_ci	}
3908c2ecf20Sopenharmony_ci
3918c2ecf20Sopenharmony_ci	return spu;
3928c2ecf20Sopenharmony_ci}
3938c2ecf20Sopenharmony_ci
3948c2ecf20Sopenharmony_ci/*
3958c2ecf20Sopenharmony_ci * affinity_check is called each time a context is going to be scheduled.
3968c2ecf20Sopenharmony_ci * It returns the spu ptr on which the context must run.
3978c2ecf20Sopenharmony_ci */
3988c2ecf20Sopenharmony_cistatic int has_affinity(struct spu_context *ctx)
3998c2ecf20Sopenharmony_ci{
4008c2ecf20Sopenharmony_ci	struct spu_gang *gang = ctx->gang;
4018c2ecf20Sopenharmony_ci
4028c2ecf20Sopenharmony_ci	if (list_empty(&ctx->aff_list))
4038c2ecf20Sopenharmony_ci		return 0;
4048c2ecf20Sopenharmony_ci
4058c2ecf20Sopenharmony_ci	if (atomic_read(&ctx->gang->aff_sched_count) == 0)
4068c2ecf20Sopenharmony_ci		ctx->gang->aff_ref_spu = NULL;
4078c2ecf20Sopenharmony_ci
4088c2ecf20Sopenharmony_ci	if (!gang->aff_ref_spu) {
4098c2ecf20Sopenharmony_ci		if (!(gang->aff_flags & AFF_MERGED))
4108c2ecf20Sopenharmony_ci			aff_merge_remaining_ctxs(gang);
4118c2ecf20Sopenharmony_ci		if (!(gang->aff_flags & AFF_OFFSETS_SET))
4128c2ecf20Sopenharmony_ci			aff_set_offsets(gang);
4138c2ecf20Sopenharmony_ci		aff_set_ref_point_location(gang);
4148c2ecf20Sopenharmony_ci	}
4158c2ecf20Sopenharmony_ci
4168c2ecf20Sopenharmony_ci	return gang->aff_ref_spu != NULL;
4178c2ecf20Sopenharmony_ci}
4188c2ecf20Sopenharmony_ci
4198c2ecf20Sopenharmony_ci/**
4208c2ecf20Sopenharmony_ci * spu_unbind_context - unbind spu context from physical spu
4218c2ecf20Sopenharmony_ci * @spu:	physical spu to unbind from
4228c2ecf20Sopenharmony_ci * @ctx:	context to unbind
4238c2ecf20Sopenharmony_ci */
4248c2ecf20Sopenharmony_cistatic void spu_unbind_context(struct spu *spu, struct spu_context *ctx)
4258c2ecf20Sopenharmony_ci{
4268c2ecf20Sopenharmony_ci	u32 status;
4278c2ecf20Sopenharmony_ci
4288c2ecf20Sopenharmony_ci	spu_context_trace(spu_unbind_context__enter, ctx, spu);
4298c2ecf20Sopenharmony_ci
4308c2ecf20Sopenharmony_ci	spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
4318c2ecf20Sopenharmony_ci
4328c2ecf20Sopenharmony_ci 	if (spu->ctx->flags & SPU_CREATE_NOSCHED)
4338c2ecf20Sopenharmony_ci		atomic_dec(&cbe_spu_info[spu->node].reserved_spus);
4348c2ecf20Sopenharmony_ci
4358c2ecf20Sopenharmony_ci	if (ctx->gang)
4368c2ecf20Sopenharmony_ci		/*
4378c2ecf20Sopenharmony_ci		 * If ctx->gang->aff_sched_count is positive, SPU affinity is
4388c2ecf20Sopenharmony_ci		 * being considered in this gang. Using atomic_dec_if_positive
4398c2ecf20Sopenharmony_ci		 * allow us to skip an explicit check for affinity in this gang
4408c2ecf20Sopenharmony_ci		 */
4418c2ecf20Sopenharmony_ci		atomic_dec_if_positive(&ctx->gang->aff_sched_count);
4428c2ecf20Sopenharmony_ci
4438c2ecf20Sopenharmony_ci	spu_switch_notify(spu, NULL);
4448c2ecf20Sopenharmony_ci	spu_unmap_mappings(ctx);
4458c2ecf20Sopenharmony_ci	spu_save(&ctx->csa, spu);
4468c2ecf20Sopenharmony_ci	spu_switch_log_notify(spu, ctx, SWITCH_LOG_STOP, 0);
4478c2ecf20Sopenharmony_ci
4488c2ecf20Sopenharmony_ci	spin_lock_irq(&spu->register_lock);
4498c2ecf20Sopenharmony_ci	spu->timestamp = jiffies;
4508c2ecf20Sopenharmony_ci	ctx->state = SPU_STATE_SAVED;
4518c2ecf20Sopenharmony_ci	spu->ibox_callback = NULL;
4528c2ecf20Sopenharmony_ci	spu->wbox_callback = NULL;
4538c2ecf20Sopenharmony_ci	spu->stop_callback = NULL;
4548c2ecf20Sopenharmony_ci	spu->mfc_callback = NULL;
4558c2ecf20Sopenharmony_ci	spu->pid = 0;
4568c2ecf20Sopenharmony_ci	spu->tgid = 0;
4578c2ecf20Sopenharmony_ci	ctx->ops = &spu_backing_ops;
4588c2ecf20Sopenharmony_ci	spu->flags = 0;
4598c2ecf20Sopenharmony_ci	spu->ctx = NULL;
4608c2ecf20Sopenharmony_ci	spin_unlock_irq(&spu->register_lock);
4618c2ecf20Sopenharmony_ci
4628c2ecf20Sopenharmony_ci	spu_associate_mm(spu, NULL);
4638c2ecf20Sopenharmony_ci
4648c2ecf20Sopenharmony_ci	ctx->stats.slb_flt +=
4658c2ecf20Sopenharmony_ci		(spu->stats.slb_flt - ctx->stats.slb_flt_base);
4668c2ecf20Sopenharmony_ci	ctx->stats.class2_intr +=
4678c2ecf20Sopenharmony_ci		(spu->stats.class2_intr - ctx->stats.class2_intr_base);
4688c2ecf20Sopenharmony_ci
4698c2ecf20Sopenharmony_ci	/* This maps the underlying spu state to idle */
4708c2ecf20Sopenharmony_ci	spuctx_switch_state(ctx, SPU_UTIL_IDLE_LOADED);
4718c2ecf20Sopenharmony_ci	ctx->spu = NULL;
4728c2ecf20Sopenharmony_ci
4738c2ecf20Sopenharmony_ci	if (spu_stopped(ctx, &status))
4748c2ecf20Sopenharmony_ci		wake_up_all(&ctx->stop_wq);
4758c2ecf20Sopenharmony_ci}
4768c2ecf20Sopenharmony_ci
4778c2ecf20Sopenharmony_ci/**
4788c2ecf20Sopenharmony_ci * spu_add_to_rq - add a context to the runqueue
4798c2ecf20Sopenharmony_ci * @ctx:       context to add
4808c2ecf20Sopenharmony_ci */
4818c2ecf20Sopenharmony_cistatic void __spu_add_to_rq(struct spu_context *ctx)
4828c2ecf20Sopenharmony_ci{
4838c2ecf20Sopenharmony_ci	/*
4848c2ecf20Sopenharmony_ci	 * Unfortunately this code path can be called from multiple threads
4858c2ecf20Sopenharmony_ci	 * on behalf of a single context due to the way the problem state
4868c2ecf20Sopenharmony_ci	 * mmap support works.
4878c2ecf20Sopenharmony_ci	 *
4888c2ecf20Sopenharmony_ci	 * Fortunately we need to wake up all these threads at the same time
4898c2ecf20Sopenharmony_ci	 * and can simply skip the runqueue addition for every but the first
4908c2ecf20Sopenharmony_ci	 * thread getting into this codepath.
4918c2ecf20Sopenharmony_ci	 *
4928c2ecf20Sopenharmony_ci	 * It's still quite hacky, and long-term we should proxy all other
4938c2ecf20Sopenharmony_ci	 * threads through the owner thread so that spu_run is in control
4948c2ecf20Sopenharmony_ci	 * of all the scheduling activity for a given context.
4958c2ecf20Sopenharmony_ci	 */
4968c2ecf20Sopenharmony_ci	if (list_empty(&ctx->rq)) {
4978c2ecf20Sopenharmony_ci		list_add_tail(&ctx->rq, &spu_prio->runq[ctx->prio]);
4988c2ecf20Sopenharmony_ci		set_bit(ctx->prio, spu_prio->bitmap);
4998c2ecf20Sopenharmony_ci		if (!spu_prio->nr_waiting++)
5008c2ecf20Sopenharmony_ci			mod_timer(&spusched_timer, jiffies + SPUSCHED_TICK);
5018c2ecf20Sopenharmony_ci	}
5028c2ecf20Sopenharmony_ci}
5038c2ecf20Sopenharmony_ci
5048c2ecf20Sopenharmony_cistatic void spu_add_to_rq(struct spu_context *ctx)
5058c2ecf20Sopenharmony_ci{
5068c2ecf20Sopenharmony_ci	spin_lock(&spu_prio->runq_lock);
5078c2ecf20Sopenharmony_ci	__spu_add_to_rq(ctx);
5088c2ecf20Sopenharmony_ci	spin_unlock(&spu_prio->runq_lock);
5098c2ecf20Sopenharmony_ci}
5108c2ecf20Sopenharmony_ci
5118c2ecf20Sopenharmony_cistatic void __spu_del_from_rq(struct spu_context *ctx)
5128c2ecf20Sopenharmony_ci{
5138c2ecf20Sopenharmony_ci	int prio = ctx->prio;
5148c2ecf20Sopenharmony_ci
5158c2ecf20Sopenharmony_ci	if (!list_empty(&ctx->rq)) {
5168c2ecf20Sopenharmony_ci		if (!--spu_prio->nr_waiting)
5178c2ecf20Sopenharmony_ci			del_timer(&spusched_timer);
5188c2ecf20Sopenharmony_ci		list_del_init(&ctx->rq);
5198c2ecf20Sopenharmony_ci
5208c2ecf20Sopenharmony_ci		if (list_empty(&spu_prio->runq[prio]))
5218c2ecf20Sopenharmony_ci			clear_bit(prio, spu_prio->bitmap);
5228c2ecf20Sopenharmony_ci	}
5238c2ecf20Sopenharmony_ci}
5248c2ecf20Sopenharmony_ci
5258c2ecf20Sopenharmony_civoid spu_del_from_rq(struct spu_context *ctx)
5268c2ecf20Sopenharmony_ci{
5278c2ecf20Sopenharmony_ci	spin_lock(&spu_prio->runq_lock);
5288c2ecf20Sopenharmony_ci	__spu_del_from_rq(ctx);
5298c2ecf20Sopenharmony_ci	spin_unlock(&spu_prio->runq_lock);
5308c2ecf20Sopenharmony_ci}
5318c2ecf20Sopenharmony_ci
5328c2ecf20Sopenharmony_cistatic void spu_prio_wait(struct spu_context *ctx)
5338c2ecf20Sopenharmony_ci{
5348c2ecf20Sopenharmony_ci	DEFINE_WAIT(wait);
5358c2ecf20Sopenharmony_ci
5368c2ecf20Sopenharmony_ci	/*
5378c2ecf20Sopenharmony_ci	 * The caller must explicitly wait for a context to be loaded
5388c2ecf20Sopenharmony_ci	 * if the nosched flag is set.  If NOSCHED is not set, the caller
5398c2ecf20Sopenharmony_ci	 * queues the context and waits for an spu event or error.
5408c2ecf20Sopenharmony_ci	 */
5418c2ecf20Sopenharmony_ci	BUG_ON(!(ctx->flags & SPU_CREATE_NOSCHED));
5428c2ecf20Sopenharmony_ci
5438c2ecf20Sopenharmony_ci	spin_lock(&spu_prio->runq_lock);
5448c2ecf20Sopenharmony_ci	prepare_to_wait_exclusive(&ctx->stop_wq, &wait, TASK_INTERRUPTIBLE);
5458c2ecf20Sopenharmony_ci	if (!signal_pending(current)) {
5468c2ecf20Sopenharmony_ci		__spu_add_to_rq(ctx);
5478c2ecf20Sopenharmony_ci		spin_unlock(&spu_prio->runq_lock);
5488c2ecf20Sopenharmony_ci		mutex_unlock(&ctx->state_mutex);
5498c2ecf20Sopenharmony_ci		schedule();
5508c2ecf20Sopenharmony_ci		mutex_lock(&ctx->state_mutex);
5518c2ecf20Sopenharmony_ci		spin_lock(&spu_prio->runq_lock);
5528c2ecf20Sopenharmony_ci		__spu_del_from_rq(ctx);
5538c2ecf20Sopenharmony_ci	}
5548c2ecf20Sopenharmony_ci	spin_unlock(&spu_prio->runq_lock);
5558c2ecf20Sopenharmony_ci	__set_current_state(TASK_RUNNING);
5568c2ecf20Sopenharmony_ci	remove_wait_queue(&ctx->stop_wq, &wait);
5578c2ecf20Sopenharmony_ci}
5588c2ecf20Sopenharmony_ci
5598c2ecf20Sopenharmony_cistatic struct spu *spu_get_idle(struct spu_context *ctx)
5608c2ecf20Sopenharmony_ci{
5618c2ecf20Sopenharmony_ci	struct spu *spu, *aff_ref_spu;
5628c2ecf20Sopenharmony_ci	int node, n;
5638c2ecf20Sopenharmony_ci
5648c2ecf20Sopenharmony_ci	spu_context_nospu_trace(spu_get_idle__enter, ctx);
5658c2ecf20Sopenharmony_ci
5668c2ecf20Sopenharmony_ci	if (ctx->gang) {
5678c2ecf20Sopenharmony_ci		mutex_lock(&ctx->gang->aff_mutex);
5688c2ecf20Sopenharmony_ci		if (has_affinity(ctx)) {
5698c2ecf20Sopenharmony_ci			aff_ref_spu = ctx->gang->aff_ref_spu;
5708c2ecf20Sopenharmony_ci			atomic_inc(&ctx->gang->aff_sched_count);
5718c2ecf20Sopenharmony_ci			mutex_unlock(&ctx->gang->aff_mutex);
5728c2ecf20Sopenharmony_ci			node = aff_ref_spu->node;
5738c2ecf20Sopenharmony_ci
5748c2ecf20Sopenharmony_ci			mutex_lock(&cbe_spu_info[node].list_mutex);
5758c2ecf20Sopenharmony_ci			spu = ctx_location(aff_ref_spu, ctx->aff_offset, node);
5768c2ecf20Sopenharmony_ci			if (spu && spu->alloc_state == SPU_FREE)
5778c2ecf20Sopenharmony_ci				goto found;
5788c2ecf20Sopenharmony_ci			mutex_unlock(&cbe_spu_info[node].list_mutex);
5798c2ecf20Sopenharmony_ci
5808c2ecf20Sopenharmony_ci			atomic_dec(&ctx->gang->aff_sched_count);
5818c2ecf20Sopenharmony_ci			goto not_found;
5828c2ecf20Sopenharmony_ci		}
5838c2ecf20Sopenharmony_ci		mutex_unlock(&ctx->gang->aff_mutex);
5848c2ecf20Sopenharmony_ci	}
5858c2ecf20Sopenharmony_ci	node = cpu_to_node(raw_smp_processor_id());
5868c2ecf20Sopenharmony_ci	for (n = 0; n < MAX_NUMNODES; n++, node++) {
5878c2ecf20Sopenharmony_ci		node = (node < MAX_NUMNODES) ? node : 0;
5888c2ecf20Sopenharmony_ci		if (!node_allowed(ctx, node))
5898c2ecf20Sopenharmony_ci			continue;
5908c2ecf20Sopenharmony_ci
5918c2ecf20Sopenharmony_ci		mutex_lock(&cbe_spu_info[node].list_mutex);
5928c2ecf20Sopenharmony_ci		list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
5938c2ecf20Sopenharmony_ci			if (spu->alloc_state == SPU_FREE)
5948c2ecf20Sopenharmony_ci				goto found;
5958c2ecf20Sopenharmony_ci		}
5968c2ecf20Sopenharmony_ci		mutex_unlock(&cbe_spu_info[node].list_mutex);
5978c2ecf20Sopenharmony_ci	}
5988c2ecf20Sopenharmony_ci
5998c2ecf20Sopenharmony_ci not_found:
6008c2ecf20Sopenharmony_ci	spu_context_nospu_trace(spu_get_idle__not_found, ctx);
6018c2ecf20Sopenharmony_ci	return NULL;
6028c2ecf20Sopenharmony_ci
6038c2ecf20Sopenharmony_ci found:
6048c2ecf20Sopenharmony_ci	spu->alloc_state = SPU_USED;
6058c2ecf20Sopenharmony_ci	mutex_unlock(&cbe_spu_info[node].list_mutex);
6068c2ecf20Sopenharmony_ci	spu_context_trace(spu_get_idle__found, ctx, spu);
6078c2ecf20Sopenharmony_ci	spu_init_channels(spu);
6088c2ecf20Sopenharmony_ci	return spu;
6098c2ecf20Sopenharmony_ci}
6108c2ecf20Sopenharmony_ci
6118c2ecf20Sopenharmony_ci/**
6128c2ecf20Sopenharmony_ci * find_victim - find a lower priority context to preempt
6138c2ecf20Sopenharmony_ci * @ctx:	candidate context for running
6148c2ecf20Sopenharmony_ci *
6158c2ecf20Sopenharmony_ci * Returns the freed physical spu to run the new context on.
6168c2ecf20Sopenharmony_ci */
6178c2ecf20Sopenharmony_cistatic struct spu *find_victim(struct spu_context *ctx)
6188c2ecf20Sopenharmony_ci{
6198c2ecf20Sopenharmony_ci	struct spu_context *victim = NULL;
6208c2ecf20Sopenharmony_ci	struct spu *spu;
6218c2ecf20Sopenharmony_ci	int node, n;
6228c2ecf20Sopenharmony_ci
6238c2ecf20Sopenharmony_ci	spu_context_nospu_trace(spu_find_victim__enter, ctx);
6248c2ecf20Sopenharmony_ci
6258c2ecf20Sopenharmony_ci	/*
6268c2ecf20Sopenharmony_ci	 * Look for a possible preemption candidate on the local node first.
6278c2ecf20Sopenharmony_ci	 * If there is no candidate look at the other nodes.  This isn't
6288c2ecf20Sopenharmony_ci	 * exactly fair, but so far the whole spu scheduler tries to keep
6298c2ecf20Sopenharmony_ci	 * a strong node affinity.  We might want to fine-tune this in
6308c2ecf20Sopenharmony_ci	 * the future.
6318c2ecf20Sopenharmony_ci	 */
6328c2ecf20Sopenharmony_ci restart:
6338c2ecf20Sopenharmony_ci	node = cpu_to_node(raw_smp_processor_id());
6348c2ecf20Sopenharmony_ci	for (n = 0; n < MAX_NUMNODES; n++, node++) {
6358c2ecf20Sopenharmony_ci		node = (node < MAX_NUMNODES) ? node : 0;
6368c2ecf20Sopenharmony_ci		if (!node_allowed(ctx, node))
6378c2ecf20Sopenharmony_ci			continue;
6388c2ecf20Sopenharmony_ci
6398c2ecf20Sopenharmony_ci		mutex_lock(&cbe_spu_info[node].list_mutex);
6408c2ecf20Sopenharmony_ci		list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
6418c2ecf20Sopenharmony_ci			struct spu_context *tmp = spu->ctx;
6428c2ecf20Sopenharmony_ci
6438c2ecf20Sopenharmony_ci			if (tmp && tmp->prio > ctx->prio &&
6448c2ecf20Sopenharmony_ci			    !(tmp->flags & SPU_CREATE_NOSCHED) &&
6458c2ecf20Sopenharmony_ci			    (!victim || tmp->prio > victim->prio)) {
6468c2ecf20Sopenharmony_ci				victim = spu->ctx;
6478c2ecf20Sopenharmony_ci			}
6488c2ecf20Sopenharmony_ci		}
6498c2ecf20Sopenharmony_ci		if (victim)
6508c2ecf20Sopenharmony_ci			get_spu_context(victim);
6518c2ecf20Sopenharmony_ci		mutex_unlock(&cbe_spu_info[node].list_mutex);
6528c2ecf20Sopenharmony_ci
6538c2ecf20Sopenharmony_ci		if (victim) {
6548c2ecf20Sopenharmony_ci			/*
6558c2ecf20Sopenharmony_ci			 * This nests ctx->state_mutex, but we always lock
6568c2ecf20Sopenharmony_ci			 * higher priority contexts before lower priority
6578c2ecf20Sopenharmony_ci			 * ones, so this is safe until we introduce
6588c2ecf20Sopenharmony_ci			 * priority inheritance schemes.
6598c2ecf20Sopenharmony_ci			 *
6608c2ecf20Sopenharmony_ci			 * XXX if the highest priority context is locked,
6618c2ecf20Sopenharmony_ci			 * this can loop a long time.  Might be better to
6628c2ecf20Sopenharmony_ci			 * look at another context or give up after X retries.
6638c2ecf20Sopenharmony_ci			 */
6648c2ecf20Sopenharmony_ci			if (!mutex_trylock(&victim->state_mutex)) {
6658c2ecf20Sopenharmony_ci				put_spu_context(victim);
6668c2ecf20Sopenharmony_ci				victim = NULL;
6678c2ecf20Sopenharmony_ci				goto restart;
6688c2ecf20Sopenharmony_ci			}
6698c2ecf20Sopenharmony_ci
6708c2ecf20Sopenharmony_ci			spu = victim->spu;
6718c2ecf20Sopenharmony_ci			if (!spu || victim->prio <= ctx->prio) {
6728c2ecf20Sopenharmony_ci				/*
6738c2ecf20Sopenharmony_ci				 * This race can happen because we've dropped
6748c2ecf20Sopenharmony_ci				 * the active list mutex.  Not a problem, just
6758c2ecf20Sopenharmony_ci				 * restart the search.
6768c2ecf20Sopenharmony_ci				 */
6778c2ecf20Sopenharmony_ci				mutex_unlock(&victim->state_mutex);
6788c2ecf20Sopenharmony_ci				put_spu_context(victim);
6798c2ecf20Sopenharmony_ci				victim = NULL;
6808c2ecf20Sopenharmony_ci				goto restart;
6818c2ecf20Sopenharmony_ci			}
6828c2ecf20Sopenharmony_ci
6838c2ecf20Sopenharmony_ci			spu_context_trace(__spu_deactivate__unload, ctx, spu);
6848c2ecf20Sopenharmony_ci
6858c2ecf20Sopenharmony_ci			mutex_lock(&cbe_spu_info[node].list_mutex);
6868c2ecf20Sopenharmony_ci			cbe_spu_info[node].nr_active--;
6878c2ecf20Sopenharmony_ci			spu_unbind_context(spu, victim);
6888c2ecf20Sopenharmony_ci			mutex_unlock(&cbe_spu_info[node].list_mutex);
6898c2ecf20Sopenharmony_ci
6908c2ecf20Sopenharmony_ci			victim->stats.invol_ctx_switch++;
6918c2ecf20Sopenharmony_ci			spu->stats.invol_ctx_switch++;
6928c2ecf20Sopenharmony_ci			if (test_bit(SPU_SCHED_SPU_RUN, &victim->sched_flags))
6938c2ecf20Sopenharmony_ci				spu_add_to_rq(victim);
6948c2ecf20Sopenharmony_ci
6958c2ecf20Sopenharmony_ci			mutex_unlock(&victim->state_mutex);
6968c2ecf20Sopenharmony_ci			put_spu_context(victim);
6978c2ecf20Sopenharmony_ci
6988c2ecf20Sopenharmony_ci			return spu;
6998c2ecf20Sopenharmony_ci		}
7008c2ecf20Sopenharmony_ci	}
7018c2ecf20Sopenharmony_ci
7028c2ecf20Sopenharmony_ci	return NULL;
7038c2ecf20Sopenharmony_ci}
7048c2ecf20Sopenharmony_ci
7058c2ecf20Sopenharmony_cistatic void __spu_schedule(struct spu *spu, struct spu_context *ctx)
7068c2ecf20Sopenharmony_ci{
7078c2ecf20Sopenharmony_ci	int node = spu->node;
7088c2ecf20Sopenharmony_ci	int success = 0;
7098c2ecf20Sopenharmony_ci
7108c2ecf20Sopenharmony_ci	spu_set_timeslice(ctx);
7118c2ecf20Sopenharmony_ci
7128c2ecf20Sopenharmony_ci	mutex_lock(&cbe_spu_info[node].list_mutex);
7138c2ecf20Sopenharmony_ci	if (spu->ctx == NULL) {
7148c2ecf20Sopenharmony_ci		spu_bind_context(spu, ctx);
7158c2ecf20Sopenharmony_ci		cbe_spu_info[node].nr_active++;
7168c2ecf20Sopenharmony_ci		spu->alloc_state = SPU_USED;
7178c2ecf20Sopenharmony_ci		success = 1;
7188c2ecf20Sopenharmony_ci	}
7198c2ecf20Sopenharmony_ci	mutex_unlock(&cbe_spu_info[node].list_mutex);
7208c2ecf20Sopenharmony_ci
7218c2ecf20Sopenharmony_ci	if (success)
7228c2ecf20Sopenharmony_ci		wake_up_all(&ctx->run_wq);
7238c2ecf20Sopenharmony_ci	else
7248c2ecf20Sopenharmony_ci		spu_add_to_rq(ctx);
7258c2ecf20Sopenharmony_ci}
7268c2ecf20Sopenharmony_ci
7278c2ecf20Sopenharmony_cistatic void spu_schedule(struct spu *spu, struct spu_context *ctx)
7288c2ecf20Sopenharmony_ci{
7298c2ecf20Sopenharmony_ci	/* not a candidate for interruptible because it's called either
7308c2ecf20Sopenharmony_ci	   from the scheduler thread or from spu_deactivate */
7318c2ecf20Sopenharmony_ci	mutex_lock(&ctx->state_mutex);
7328c2ecf20Sopenharmony_ci	if (ctx->state == SPU_STATE_SAVED)
7338c2ecf20Sopenharmony_ci		__spu_schedule(spu, ctx);
7348c2ecf20Sopenharmony_ci	spu_release(ctx);
7358c2ecf20Sopenharmony_ci}
7368c2ecf20Sopenharmony_ci
7378c2ecf20Sopenharmony_ci/**
7388c2ecf20Sopenharmony_ci * spu_unschedule - remove a context from a spu, and possibly release it.
7398c2ecf20Sopenharmony_ci * @spu:	The SPU to unschedule from
7408c2ecf20Sopenharmony_ci * @ctx:	The context currently scheduled on the SPU
7418c2ecf20Sopenharmony_ci * @free_spu	Whether to free the SPU for other contexts
7428c2ecf20Sopenharmony_ci *
7438c2ecf20Sopenharmony_ci * Unbinds the context @ctx from the SPU @spu. If @free_spu is non-zero, the
7448c2ecf20Sopenharmony_ci * SPU is made available for other contexts (ie, may be returned by
7458c2ecf20Sopenharmony_ci * spu_get_idle). If this is zero, the caller is expected to schedule another
7468c2ecf20Sopenharmony_ci * context to this spu.
7478c2ecf20Sopenharmony_ci *
7488c2ecf20Sopenharmony_ci * Should be called with ctx->state_mutex held.
7498c2ecf20Sopenharmony_ci */
7508c2ecf20Sopenharmony_cistatic void spu_unschedule(struct spu *spu, struct spu_context *ctx,
7518c2ecf20Sopenharmony_ci		int free_spu)
7528c2ecf20Sopenharmony_ci{
7538c2ecf20Sopenharmony_ci	int node = spu->node;
7548c2ecf20Sopenharmony_ci
7558c2ecf20Sopenharmony_ci	mutex_lock(&cbe_spu_info[node].list_mutex);
7568c2ecf20Sopenharmony_ci	cbe_spu_info[node].nr_active--;
7578c2ecf20Sopenharmony_ci	if (free_spu)
7588c2ecf20Sopenharmony_ci		spu->alloc_state = SPU_FREE;
7598c2ecf20Sopenharmony_ci	spu_unbind_context(spu, ctx);
7608c2ecf20Sopenharmony_ci	ctx->stats.invol_ctx_switch++;
7618c2ecf20Sopenharmony_ci	spu->stats.invol_ctx_switch++;
7628c2ecf20Sopenharmony_ci	mutex_unlock(&cbe_spu_info[node].list_mutex);
7638c2ecf20Sopenharmony_ci}
7648c2ecf20Sopenharmony_ci
7658c2ecf20Sopenharmony_ci/**
7668c2ecf20Sopenharmony_ci * spu_activate - find a free spu for a context and execute it
7678c2ecf20Sopenharmony_ci * @ctx:	spu context to schedule
7688c2ecf20Sopenharmony_ci * @flags:	flags (currently ignored)
7698c2ecf20Sopenharmony_ci *
7708c2ecf20Sopenharmony_ci * Tries to find a free spu to run @ctx.  If no free spu is available
7718c2ecf20Sopenharmony_ci * add the context to the runqueue so it gets woken up once an spu
7728c2ecf20Sopenharmony_ci * is available.
7738c2ecf20Sopenharmony_ci */
7748c2ecf20Sopenharmony_ciint spu_activate(struct spu_context *ctx, unsigned long flags)
7758c2ecf20Sopenharmony_ci{
7768c2ecf20Sopenharmony_ci	struct spu *spu;
7778c2ecf20Sopenharmony_ci
7788c2ecf20Sopenharmony_ci	/*
7798c2ecf20Sopenharmony_ci	 * If there are multiple threads waiting for a single context
7808c2ecf20Sopenharmony_ci	 * only one actually binds the context while the others will
7818c2ecf20Sopenharmony_ci	 * only be able to acquire the state_mutex once the context
7828c2ecf20Sopenharmony_ci	 * already is in runnable state.
7838c2ecf20Sopenharmony_ci	 */
7848c2ecf20Sopenharmony_ci	if (ctx->spu)
7858c2ecf20Sopenharmony_ci		return 0;
7868c2ecf20Sopenharmony_ci
7878c2ecf20Sopenharmony_cispu_activate_top:
7888c2ecf20Sopenharmony_ci	if (signal_pending(current))
7898c2ecf20Sopenharmony_ci		return -ERESTARTSYS;
7908c2ecf20Sopenharmony_ci
7918c2ecf20Sopenharmony_ci	spu = spu_get_idle(ctx);
7928c2ecf20Sopenharmony_ci	/*
7938c2ecf20Sopenharmony_ci	 * If this is a realtime thread we try to get it running by
7948c2ecf20Sopenharmony_ci	 * preempting a lower priority thread.
7958c2ecf20Sopenharmony_ci	 */
7968c2ecf20Sopenharmony_ci	if (!spu && rt_prio(ctx->prio))
7978c2ecf20Sopenharmony_ci		spu = find_victim(ctx);
7988c2ecf20Sopenharmony_ci	if (spu) {
7998c2ecf20Sopenharmony_ci		unsigned long runcntl;
8008c2ecf20Sopenharmony_ci
8018c2ecf20Sopenharmony_ci		runcntl = ctx->ops->runcntl_read(ctx);
8028c2ecf20Sopenharmony_ci		__spu_schedule(spu, ctx);
8038c2ecf20Sopenharmony_ci		if (runcntl & SPU_RUNCNTL_RUNNABLE)
8048c2ecf20Sopenharmony_ci			spuctx_switch_state(ctx, SPU_UTIL_USER);
8058c2ecf20Sopenharmony_ci
8068c2ecf20Sopenharmony_ci		return 0;
8078c2ecf20Sopenharmony_ci	}
8088c2ecf20Sopenharmony_ci
8098c2ecf20Sopenharmony_ci	if (ctx->flags & SPU_CREATE_NOSCHED) {
8108c2ecf20Sopenharmony_ci		spu_prio_wait(ctx);
8118c2ecf20Sopenharmony_ci		goto spu_activate_top;
8128c2ecf20Sopenharmony_ci	}
8138c2ecf20Sopenharmony_ci
8148c2ecf20Sopenharmony_ci	spu_add_to_rq(ctx);
8158c2ecf20Sopenharmony_ci
8168c2ecf20Sopenharmony_ci	return 0;
8178c2ecf20Sopenharmony_ci}
8188c2ecf20Sopenharmony_ci
8198c2ecf20Sopenharmony_ci/**
8208c2ecf20Sopenharmony_ci * grab_runnable_context - try to find a runnable context
8218c2ecf20Sopenharmony_ci *
8228c2ecf20Sopenharmony_ci * Remove the highest priority context on the runqueue and return it
8238c2ecf20Sopenharmony_ci * to the caller.  Returns %NULL if no runnable context was found.
8248c2ecf20Sopenharmony_ci */
8258c2ecf20Sopenharmony_cistatic struct spu_context *grab_runnable_context(int prio, int node)
8268c2ecf20Sopenharmony_ci{
8278c2ecf20Sopenharmony_ci	struct spu_context *ctx;
8288c2ecf20Sopenharmony_ci	int best;
8298c2ecf20Sopenharmony_ci
8308c2ecf20Sopenharmony_ci	spin_lock(&spu_prio->runq_lock);
8318c2ecf20Sopenharmony_ci	best = find_first_bit(spu_prio->bitmap, prio);
8328c2ecf20Sopenharmony_ci	while (best < prio) {
8338c2ecf20Sopenharmony_ci		struct list_head *rq = &spu_prio->runq[best];
8348c2ecf20Sopenharmony_ci
8358c2ecf20Sopenharmony_ci		list_for_each_entry(ctx, rq, rq) {
8368c2ecf20Sopenharmony_ci			/* XXX(hch): check for affinity here as well */
8378c2ecf20Sopenharmony_ci			if (__node_allowed(ctx, node)) {
8388c2ecf20Sopenharmony_ci				__spu_del_from_rq(ctx);
8398c2ecf20Sopenharmony_ci				goto found;
8408c2ecf20Sopenharmony_ci			}
8418c2ecf20Sopenharmony_ci		}
8428c2ecf20Sopenharmony_ci		best++;
8438c2ecf20Sopenharmony_ci	}
8448c2ecf20Sopenharmony_ci	ctx = NULL;
8458c2ecf20Sopenharmony_ci found:
8468c2ecf20Sopenharmony_ci	spin_unlock(&spu_prio->runq_lock);
8478c2ecf20Sopenharmony_ci	return ctx;
8488c2ecf20Sopenharmony_ci}
8498c2ecf20Sopenharmony_ci
8508c2ecf20Sopenharmony_cistatic int __spu_deactivate(struct spu_context *ctx, int force, int max_prio)
8518c2ecf20Sopenharmony_ci{
8528c2ecf20Sopenharmony_ci	struct spu *spu = ctx->spu;
8538c2ecf20Sopenharmony_ci	struct spu_context *new = NULL;
8548c2ecf20Sopenharmony_ci
8558c2ecf20Sopenharmony_ci	if (spu) {
8568c2ecf20Sopenharmony_ci		new = grab_runnable_context(max_prio, spu->node);
8578c2ecf20Sopenharmony_ci		if (new || force) {
8588c2ecf20Sopenharmony_ci			spu_unschedule(spu, ctx, new == NULL);
8598c2ecf20Sopenharmony_ci			if (new) {
8608c2ecf20Sopenharmony_ci				if (new->flags & SPU_CREATE_NOSCHED)
8618c2ecf20Sopenharmony_ci					wake_up(&new->stop_wq);
8628c2ecf20Sopenharmony_ci				else {
8638c2ecf20Sopenharmony_ci					spu_release(ctx);
8648c2ecf20Sopenharmony_ci					spu_schedule(spu, new);
8658c2ecf20Sopenharmony_ci					/* this one can't easily be made
8668c2ecf20Sopenharmony_ci					   interruptible */
8678c2ecf20Sopenharmony_ci					mutex_lock(&ctx->state_mutex);
8688c2ecf20Sopenharmony_ci				}
8698c2ecf20Sopenharmony_ci			}
8708c2ecf20Sopenharmony_ci		}
8718c2ecf20Sopenharmony_ci	}
8728c2ecf20Sopenharmony_ci
8738c2ecf20Sopenharmony_ci	return new != NULL;
8748c2ecf20Sopenharmony_ci}
8758c2ecf20Sopenharmony_ci
8768c2ecf20Sopenharmony_ci/**
8778c2ecf20Sopenharmony_ci * spu_deactivate - unbind a context from it's physical spu
8788c2ecf20Sopenharmony_ci * @ctx:	spu context to unbind
8798c2ecf20Sopenharmony_ci *
8808c2ecf20Sopenharmony_ci * Unbind @ctx from the physical spu it is running on and schedule
8818c2ecf20Sopenharmony_ci * the highest priority context to run on the freed physical spu.
8828c2ecf20Sopenharmony_ci */
8838c2ecf20Sopenharmony_civoid spu_deactivate(struct spu_context *ctx)
8848c2ecf20Sopenharmony_ci{
8858c2ecf20Sopenharmony_ci	spu_context_nospu_trace(spu_deactivate__enter, ctx);
8868c2ecf20Sopenharmony_ci	__spu_deactivate(ctx, 1, MAX_PRIO);
8878c2ecf20Sopenharmony_ci}
8888c2ecf20Sopenharmony_ci
8898c2ecf20Sopenharmony_ci/**
8908c2ecf20Sopenharmony_ci * spu_yield -	yield a physical spu if others are waiting
8918c2ecf20Sopenharmony_ci * @ctx:	spu context to yield
8928c2ecf20Sopenharmony_ci *
8938c2ecf20Sopenharmony_ci * Check if there is a higher priority context waiting and if yes
8948c2ecf20Sopenharmony_ci * unbind @ctx from the physical spu and schedule the highest
8958c2ecf20Sopenharmony_ci * priority context to run on the freed physical spu instead.
8968c2ecf20Sopenharmony_ci */
8978c2ecf20Sopenharmony_civoid spu_yield(struct spu_context *ctx)
8988c2ecf20Sopenharmony_ci{
8998c2ecf20Sopenharmony_ci	spu_context_nospu_trace(spu_yield__enter, ctx);
9008c2ecf20Sopenharmony_ci	if (!(ctx->flags & SPU_CREATE_NOSCHED)) {
9018c2ecf20Sopenharmony_ci		mutex_lock(&ctx->state_mutex);
9028c2ecf20Sopenharmony_ci		__spu_deactivate(ctx, 0, MAX_PRIO);
9038c2ecf20Sopenharmony_ci		mutex_unlock(&ctx->state_mutex);
9048c2ecf20Sopenharmony_ci	}
9058c2ecf20Sopenharmony_ci}
9068c2ecf20Sopenharmony_ci
9078c2ecf20Sopenharmony_cistatic noinline void spusched_tick(struct spu_context *ctx)
9088c2ecf20Sopenharmony_ci{
9098c2ecf20Sopenharmony_ci	struct spu_context *new = NULL;
9108c2ecf20Sopenharmony_ci	struct spu *spu = NULL;
9118c2ecf20Sopenharmony_ci
9128c2ecf20Sopenharmony_ci	if (spu_acquire(ctx))
9138c2ecf20Sopenharmony_ci		BUG();	/* a kernel thread never has signals pending */
9148c2ecf20Sopenharmony_ci
9158c2ecf20Sopenharmony_ci	if (ctx->state != SPU_STATE_RUNNABLE)
9168c2ecf20Sopenharmony_ci		goto out;
9178c2ecf20Sopenharmony_ci	if (ctx->flags & SPU_CREATE_NOSCHED)
9188c2ecf20Sopenharmony_ci		goto out;
9198c2ecf20Sopenharmony_ci	if (ctx->policy == SCHED_FIFO)
9208c2ecf20Sopenharmony_ci		goto out;
9218c2ecf20Sopenharmony_ci
9228c2ecf20Sopenharmony_ci	if (--ctx->time_slice && test_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags))
9238c2ecf20Sopenharmony_ci		goto out;
9248c2ecf20Sopenharmony_ci
9258c2ecf20Sopenharmony_ci	spu = ctx->spu;
9268c2ecf20Sopenharmony_ci
9278c2ecf20Sopenharmony_ci	spu_context_trace(spusched_tick__preempt, ctx, spu);
9288c2ecf20Sopenharmony_ci
9298c2ecf20Sopenharmony_ci	new = grab_runnable_context(ctx->prio + 1, spu->node);
9308c2ecf20Sopenharmony_ci	if (new) {
9318c2ecf20Sopenharmony_ci		spu_unschedule(spu, ctx, 0);
9328c2ecf20Sopenharmony_ci		if (test_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags))
9338c2ecf20Sopenharmony_ci			spu_add_to_rq(ctx);
9348c2ecf20Sopenharmony_ci	} else {
9358c2ecf20Sopenharmony_ci		spu_context_nospu_trace(spusched_tick__newslice, ctx);
9368c2ecf20Sopenharmony_ci		if (!ctx->time_slice)
9378c2ecf20Sopenharmony_ci			ctx->time_slice++;
9388c2ecf20Sopenharmony_ci	}
9398c2ecf20Sopenharmony_ciout:
9408c2ecf20Sopenharmony_ci	spu_release(ctx);
9418c2ecf20Sopenharmony_ci
9428c2ecf20Sopenharmony_ci	if (new)
9438c2ecf20Sopenharmony_ci		spu_schedule(spu, new);
9448c2ecf20Sopenharmony_ci}
9458c2ecf20Sopenharmony_ci
9468c2ecf20Sopenharmony_ci/**
9478c2ecf20Sopenharmony_ci * count_active_contexts - count nr of active tasks
9488c2ecf20Sopenharmony_ci *
9498c2ecf20Sopenharmony_ci * Return the number of tasks currently running or waiting to run.
9508c2ecf20Sopenharmony_ci *
9518c2ecf20Sopenharmony_ci * Note that we don't take runq_lock / list_mutex here.  Reading
9528c2ecf20Sopenharmony_ci * a single 32bit value is atomic on powerpc, and we don't care
9538c2ecf20Sopenharmony_ci * about memory ordering issues here.
9548c2ecf20Sopenharmony_ci */
9558c2ecf20Sopenharmony_cistatic unsigned long count_active_contexts(void)
9568c2ecf20Sopenharmony_ci{
9578c2ecf20Sopenharmony_ci	int nr_active = 0, node;
9588c2ecf20Sopenharmony_ci
9598c2ecf20Sopenharmony_ci	for (node = 0; node < MAX_NUMNODES; node++)
9608c2ecf20Sopenharmony_ci		nr_active += cbe_spu_info[node].nr_active;
9618c2ecf20Sopenharmony_ci	nr_active += spu_prio->nr_waiting;
9628c2ecf20Sopenharmony_ci
9638c2ecf20Sopenharmony_ci	return nr_active;
9648c2ecf20Sopenharmony_ci}
9658c2ecf20Sopenharmony_ci
9668c2ecf20Sopenharmony_ci/**
9678c2ecf20Sopenharmony_ci * spu_calc_load - update the avenrun load estimates.
9688c2ecf20Sopenharmony_ci *
9698c2ecf20Sopenharmony_ci * No locking against reading these values from userspace, as for
9708c2ecf20Sopenharmony_ci * the CPU loadavg code.
9718c2ecf20Sopenharmony_ci */
9728c2ecf20Sopenharmony_cistatic void spu_calc_load(void)
9738c2ecf20Sopenharmony_ci{
9748c2ecf20Sopenharmony_ci	unsigned long active_tasks; /* fixed-point */
9758c2ecf20Sopenharmony_ci
9768c2ecf20Sopenharmony_ci	active_tasks = count_active_contexts() * FIXED_1;
9778c2ecf20Sopenharmony_ci	spu_avenrun[0] = calc_load(spu_avenrun[0], EXP_1, active_tasks);
9788c2ecf20Sopenharmony_ci	spu_avenrun[1] = calc_load(spu_avenrun[1], EXP_5, active_tasks);
9798c2ecf20Sopenharmony_ci	spu_avenrun[2] = calc_load(spu_avenrun[2], EXP_15, active_tasks);
9808c2ecf20Sopenharmony_ci}
9818c2ecf20Sopenharmony_ci
9828c2ecf20Sopenharmony_cistatic void spusched_wake(struct timer_list *unused)
9838c2ecf20Sopenharmony_ci{
9848c2ecf20Sopenharmony_ci	mod_timer(&spusched_timer, jiffies + SPUSCHED_TICK);
9858c2ecf20Sopenharmony_ci	wake_up_process(spusched_task);
9868c2ecf20Sopenharmony_ci}
9878c2ecf20Sopenharmony_ci
9888c2ecf20Sopenharmony_cistatic void spuloadavg_wake(struct timer_list *unused)
9898c2ecf20Sopenharmony_ci{
9908c2ecf20Sopenharmony_ci	mod_timer(&spuloadavg_timer, jiffies + LOAD_FREQ);
9918c2ecf20Sopenharmony_ci	spu_calc_load();
9928c2ecf20Sopenharmony_ci}
9938c2ecf20Sopenharmony_ci
9948c2ecf20Sopenharmony_cistatic int spusched_thread(void *unused)
9958c2ecf20Sopenharmony_ci{
9968c2ecf20Sopenharmony_ci	struct spu *spu;
9978c2ecf20Sopenharmony_ci	int node;
9988c2ecf20Sopenharmony_ci
9998c2ecf20Sopenharmony_ci	while (!kthread_should_stop()) {
10008c2ecf20Sopenharmony_ci		set_current_state(TASK_INTERRUPTIBLE);
10018c2ecf20Sopenharmony_ci		schedule();
10028c2ecf20Sopenharmony_ci		for (node = 0; node < MAX_NUMNODES; node++) {
10038c2ecf20Sopenharmony_ci			struct mutex *mtx = &cbe_spu_info[node].list_mutex;
10048c2ecf20Sopenharmony_ci
10058c2ecf20Sopenharmony_ci			mutex_lock(mtx);
10068c2ecf20Sopenharmony_ci			list_for_each_entry(spu, &cbe_spu_info[node].spus,
10078c2ecf20Sopenharmony_ci					cbe_list) {
10088c2ecf20Sopenharmony_ci				struct spu_context *ctx = spu->ctx;
10098c2ecf20Sopenharmony_ci
10108c2ecf20Sopenharmony_ci				if (ctx) {
10118c2ecf20Sopenharmony_ci					get_spu_context(ctx);
10128c2ecf20Sopenharmony_ci					mutex_unlock(mtx);
10138c2ecf20Sopenharmony_ci					spusched_tick(ctx);
10148c2ecf20Sopenharmony_ci					mutex_lock(mtx);
10158c2ecf20Sopenharmony_ci					put_spu_context(ctx);
10168c2ecf20Sopenharmony_ci				}
10178c2ecf20Sopenharmony_ci			}
10188c2ecf20Sopenharmony_ci			mutex_unlock(mtx);
10198c2ecf20Sopenharmony_ci		}
10208c2ecf20Sopenharmony_ci	}
10218c2ecf20Sopenharmony_ci
10228c2ecf20Sopenharmony_ci	return 0;
10238c2ecf20Sopenharmony_ci}
10248c2ecf20Sopenharmony_ci
10258c2ecf20Sopenharmony_civoid spuctx_switch_state(struct spu_context *ctx,
10268c2ecf20Sopenharmony_ci		enum spu_utilization_state new_state)
10278c2ecf20Sopenharmony_ci{
10288c2ecf20Sopenharmony_ci	unsigned long long curtime;
10298c2ecf20Sopenharmony_ci	signed long long delta;
10308c2ecf20Sopenharmony_ci	struct spu *spu;
10318c2ecf20Sopenharmony_ci	enum spu_utilization_state old_state;
10328c2ecf20Sopenharmony_ci	int node;
10338c2ecf20Sopenharmony_ci
10348c2ecf20Sopenharmony_ci	curtime = ktime_get_ns();
10358c2ecf20Sopenharmony_ci	delta = curtime - ctx->stats.tstamp;
10368c2ecf20Sopenharmony_ci
10378c2ecf20Sopenharmony_ci	WARN_ON(!mutex_is_locked(&ctx->state_mutex));
10388c2ecf20Sopenharmony_ci	WARN_ON(delta < 0);
10398c2ecf20Sopenharmony_ci
10408c2ecf20Sopenharmony_ci	spu = ctx->spu;
10418c2ecf20Sopenharmony_ci	old_state = ctx->stats.util_state;
10428c2ecf20Sopenharmony_ci	ctx->stats.util_state = new_state;
10438c2ecf20Sopenharmony_ci	ctx->stats.tstamp = curtime;
10448c2ecf20Sopenharmony_ci
10458c2ecf20Sopenharmony_ci	/*
10468c2ecf20Sopenharmony_ci	 * Update the physical SPU utilization statistics.
10478c2ecf20Sopenharmony_ci	 */
10488c2ecf20Sopenharmony_ci	if (spu) {
10498c2ecf20Sopenharmony_ci		ctx->stats.times[old_state] += delta;
10508c2ecf20Sopenharmony_ci		spu->stats.times[old_state] += delta;
10518c2ecf20Sopenharmony_ci		spu->stats.util_state = new_state;
10528c2ecf20Sopenharmony_ci		spu->stats.tstamp = curtime;
10538c2ecf20Sopenharmony_ci		node = spu->node;
10548c2ecf20Sopenharmony_ci		if (old_state == SPU_UTIL_USER)
10558c2ecf20Sopenharmony_ci			atomic_dec(&cbe_spu_info[node].busy_spus);
10568c2ecf20Sopenharmony_ci		if (new_state == SPU_UTIL_USER)
10578c2ecf20Sopenharmony_ci			atomic_inc(&cbe_spu_info[node].busy_spus);
10588c2ecf20Sopenharmony_ci	}
10598c2ecf20Sopenharmony_ci}
10608c2ecf20Sopenharmony_ci
10618c2ecf20Sopenharmony_cistatic int show_spu_loadavg(struct seq_file *s, void *private)
10628c2ecf20Sopenharmony_ci{
10638c2ecf20Sopenharmony_ci	int a, b, c;
10648c2ecf20Sopenharmony_ci
10658c2ecf20Sopenharmony_ci	a = spu_avenrun[0] + (FIXED_1/200);
10668c2ecf20Sopenharmony_ci	b = spu_avenrun[1] + (FIXED_1/200);
10678c2ecf20Sopenharmony_ci	c = spu_avenrun[2] + (FIXED_1/200);
10688c2ecf20Sopenharmony_ci
10698c2ecf20Sopenharmony_ci	/*
10708c2ecf20Sopenharmony_ci	 * Note that last_pid doesn't really make much sense for the
10718c2ecf20Sopenharmony_ci	 * SPU loadavg (it even seems very odd on the CPU side...),
10728c2ecf20Sopenharmony_ci	 * but we include it here to have a 100% compatible interface.
10738c2ecf20Sopenharmony_ci	 */
10748c2ecf20Sopenharmony_ci	seq_printf(s, "%d.%02d %d.%02d %d.%02d %ld/%d %d\n",
10758c2ecf20Sopenharmony_ci		LOAD_INT(a), LOAD_FRAC(a),
10768c2ecf20Sopenharmony_ci		LOAD_INT(b), LOAD_FRAC(b),
10778c2ecf20Sopenharmony_ci		LOAD_INT(c), LOAD_FRAC(c),
10788c2ecf20Sopenharmony_ci		count_active_contexts(),
10798c2ecf20Sopenharmony_ci		atomic_read(&nr_spu_contexts),
10808c2ecf20Sopenharmony_ci		idr_get_cursor(&task_active_pid_ns(current)->idr) - 1);
10818c2ecf20Sopenharmony_ci	return 0;
10828c2ecf20Sopenharmony_ci};
10838c2ecf20Sopenharmony_ci
10848c2ecf20Sopenharmony_ciint __init spu_sched_init(void)
10858c2ecf20Sopenharmony_ci{
10868c2ecf20Sopenharmony_ci	struct proc_dir_entry *entry;
10878c2ecf20Sopenharmony_ci	int err = -ENOMEM, i;
10888c2ecf20Sopenharmony_ci
10898c2ecf20Sopenharmony_ci	spu_prio = kzalloc(sizeof(struct spu_prio_array), GFP_KERNEL);
10908c2ecf20Sopenharmony_ci	if (!spu_prio)
10918c2ecf20Sopenharmony_ci		goto out;
10928c2ecf20Sopenharmony_ci
10938c2ecf20Sopenharmony_ci	for (i = 0; i < MAX_PRIO; i++) {
10948c2ecf20Sopenharmony_ci		INIT_LIST_HEAD(&spu_prio->runq[i]);
10958c2ecf20Sopenharmony_ci		__clear_bit(i, spu_prio->bitmap);
10968c2ecf20Sopenharmony_ci	}
10978c2ecf20Sopenharmony_ci	spin_lock_init(&spu_prio->runq_lock);
10988c2ecf20Sopenharmony_ci
10998c2ecf20Sopenharmony_ci	timer_setup(&spusched_timer, spusched_wake, 0);
11008c2ecf20Sopenharmony_ci	timer_setup(&spuloadavg_timer, spuloadavg_wake, 0);
11018c2ecf20Sopenharmony_ci
11028c2ecf20Sopenharmony_ci	spusched_task = kthread_run(spusched_thread, NULL, "spusched");
11038c2ecf20Sopenharmony_ci	if (IS_ERR(spusched_task)) {
11048c2ecf20Sopenharmony_ci		err = PTR_ERR(spusched_task);
11058c2ecf20Sopenharmony_ci		goto out_free_spu_prio;
11068c2ecf20Sopenharmony_ci	}
11078c2ecf20Sopenharmony_ci
11088c2ecf20Sopenharmony_ci	mod_timer(&spuloadavg_timer, 0);
11098c2ecf20Sopenharmony_ci
11108c2ecf20Sopenharmony_ci	entry = proc_create_single("spu_loadavg", 0, NULL, show_spu_loadavg);
11118c2ecf20Sopenharmony_ci	if (!entry)
11128c2ecf20Sopenharmony_ci		goto out_stop_kthread;
11138c2ecf20Sopenharmony_ci
11148c2ecf20Sopenharmony_ci	pr_debug("spusched: tick: %d, min ticks: %d, default ticks: %d\n",
11158c2ecf20Sopenharmony_ci			SPUSCHED_TICK, MIN_SPU_TIMESLICE, DEF_SPU_TIMESLICE);
11168c2ecf20Sopenharmony_ci	return 0;
11178c2ecf20Sopenharmony_ci
11188c2ecf20Sopenharmony_ci out_stop_kthread:
11198c2ecf20Sopenharmony_ci	kthread_stop(spusched_task);
11208c2ecf20Sopenharmony_ci out_free_spu_prio:
11218c2ecf20Sopenharmony_ci	kfree(spu_prio);
11228c2ecf20Sopenharmony_ci out:
11238c2ecf20Sopenharmony_ci	return err;
11248c2ecf20Sopenharmony_ci}
11258c2ecf20Sopenharmony_ci
11268c2ecf20Sopenharmony_civoid spu_sched_exit(void)
11278c2ecf20Sopenharmony_ci{
11288c2ecf20Sopenharmony_ci	struct spu *spu;
11298c2ecf20Sopenharmony_ci	int node;
11308c2ecf20Sopenharmony_ci
11318c2ecf20Sopenharmony_ci	remove_proc_entry("spu_loadavg", NULL);
11328c2ecf20Sopenharmony_ci
11338c2ecf20Sopenharmony_ci	del_timer_sync(&spusched_timer);
11348c2ecf20Sopenharmony_ci	del_timer_sync(&spuloadavg_timer);
11358c2ecf20Sopenharmony_ci	kthread_stop(spusched_task);
11368c2ecf20Sopenharmony_ci
11378c2ecf20Sopenharmony_ci	for (node = 0; node < MAX_NUMNODES; node++) {
11388c2ecf20Sopenharmony_ci		mutex_lock(&cbe_spu_info[node].list_mutex);
11398c2ecf20Sopenharmony_ci		list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list)
11408c2ecf20Sopenharmony_ci			if (spu->alloc_state != SPU_FREE)
11418c2ecf20Sopenharmony_ci				spu->alloc_state = SPU_FREE;
11428c2ecf20Sopenharmony_ci		mutex_unlock(&cbe_spu_info[node].list_mutex);
11438c2ecf20Sopenharmony_ci	}
11448c2ecf20Sopenharmony_ci	kfree(spu_prio);
11458c2ecf20Sopenharmony_ci}
1146