162306a36Sopenharmony_ci// SPDX-License-Identifier: GPL-2.0-or-later
262306a36Sopenharmony_ci/* sched.c - SPU scheduler.
362306a36Sopenharmony_ci *
462306a36Sopenharmony_ci * Copyright (C) IBM 2005
562306a36Sopenharmony_ci * Author: Mark Nutter <mnutter@us.ibm.com>
662306a36Sopenharmony_ci *
762306a36Sopenharmony_ci * 2006-03-31	NUMA domains added.
862306a36Sopenharmony_ci */
962306a36Sopenharmony_ci
1062306a36Sopenharmony_ci#undef DEBUG
1162306a36Sopenharmony_ci
1262306a36Sopenharmony_ci#include <linux/errno.h>
1362306a36Sopenharmony_ci#include <linux/sched/signal.h>
1462306a36Sopenharmony_ci#include <linux/sched/loadavg.h>
1562306a36Sopenharmony_ci#include <linux/sched/rt.h>
1662306a36Sopenharmony_ci#include <linux/kernel.h>
1762306a36Sopenharmony_ci#include <linux/mm.h>
1862306a36Sopenharmony_ci#include <linux/slab.h>
1962306a36Sopenharmony_ci#include <linux/completion.h>
2062306a36Sopenharmony_ci#include <linux/vmalloc.h>
2162306a36Sopenharmony_ci#include <linux/smp.h>
2262306a36Sopenharmony_ci#include <linux/stddef.h>
2362306a36Sopenharmony_ci#include <linux/unistd.h>
2462306a36Sopenharmony_ci#include <linux/numa.h>
2562306a36Sopenharmony_ci#include <linux/mutex.h>
2662306a36Sopenharmony_ci#include <linux/notifier.h>
2762306a36Sopenharmony_ci#include <linux/kthread.h>
2862306a36Sopenharmony_ci#include <linux/pid_namespace.h>
2962306a36Sopenharmony_ci#include <linux/proc_fs.h>
3062306a36Sopenharmony_ci#include <linux/seq_file.h>
3162306a36Sopenharmony_ci
3262306a36Sopenharmony_ci#include <asm/io.h>
3362306a36Sopenharmony_ci#include <asm/mmu_context.h>
3462306a36Sopenharmony_ci#include <asm/spu.h>
3562306a36Sopenharmony_ci#include <asm/spu_csa.h>
3662306a36Sopenharmony_ci#include <asm/spu_priv1.h>
3762306a36Sopenharmony_ci#include "spufs.h"
3862306a36Sopenharmony_ci#define CREATE_TRACE_POINTS
3962306a36Sopenharmony_ci#include "sputrace.h"
4062306a36Sopenharmony_ci
4162306a36Sopenharmony_cistruct spu_prio_array {
4262306a36Sopenharmony_ci	DECLARE_BITMAP(bitmap, MAX_PRIO);
4362306a36Sopenharmony_ci	struct list_head runq[MAX_PRIO];
4462306a36Sopenharmony_ci	spinlock_t runq_lock;
4562306a36Sopenharmony_ci	int nr_waiting;
4662306a36Sopenharmony_ci};
4762306a36Sopenharmony_ci
4862306a36Sopenharmony_cistatic unsigned long spu_avenrun[3];
4962306a36Sopenharmony_cistatic struct spu_prio_array *spu_prio;
5062306a36Sopenharmony_cistatic struct task_struct *spusched_task;
5162306a36Sopenharmony_cistatic struct timer_list spusched_timer;
5262306a36Sopenharmony_cistatic struct timer_list spuloadavg_timer;
5362306a36Sopenharmony_ci
5462306a36Sopenharmony_ci/*
5562306a36Sopenharmony_ci * Priority of a normal, non-rt, non-niced'd process (aka nice level 0).
5662306a36Sopenharmony_ci */
5762306a36Sopenharmony_ci#define NORMAL_PRIO		120
5862306a36Sopenharmony_ci
5962306a36Sopenharmony_ci/*
6062306a36Sopenharmony_ci * Frequency of the spu scheduler tick.  By default we do one SPU scheduler
6162306a36Sopenharmony_ci * tick for every 10 CPU scheduler ticks.
6262306a36Sopenharmony_ci */
6362306a36Sopenharmony_ci#define SPUSCHED_TICK		(10)
6462306a36Sopenharmony_ci
6562306a36Sopenharmony_ci/*
6662306a36Sopenharmony_ci * These are the 'tuning knobs' of the scheduler:
6762306a36Sopenharmony_ci *
6862306a36Sopenharmony_ci * Minimum timeslice is 5 msecs (or 1 spu scheduler tick, whichever is
6962306a36Sopenharmony_ci * larger), default timeslice is 100 msecs, maximum timeslice is 800 msecs.
7062306a36Sopenharmony_ci */
7162306a36Sopenharmony_ci#define MIN_SPU_TIMESLICE	max(5 * HZ / (1000 * SPUSCHED_TICK), 1)
7262306a36Sopenharmony_ci#define DEF_SPU_TIMESLICE	(100 * HZ / (1000 * SPUSCHED_TICK))
7362306a36Sopenharmony_ci
7462306a36Sopenharmony_ci#define SCALE_PRIO(x, prio) \
7562306a36Sopenharmony_ci	max(x * (MAX_PRIO - prio) / (NICE_WIDTH / 2), MIN_SPU_TIMESLICE)
7662306a36Sopenharmony_ci
7762306a36Sopenharmony_ci/*
7862306a36Sopenharmony_ci * scale user-nice values [ -20 ... 0 ... 19 ] to time slice values:
7962306a36Sopenharmony_ci * [800ms ... 100ms ... 5ms]
8062306a36Sopenharmony_ci *
8162306a36Sopenharmony_ci * The higher a thread's priority, the bigger timeslices
8262306a36Sopenharmony_ci * it gets during one round of execution. But even the lowest
8362306a36Sopenharmony_ci * priority thread gets MIN_TIMESLICE worth of execution time.
8462306a36Sopenharmony_ci */
8562306a36Sopenharmony_civoid spu_set_timeslice(struct spu_context *ctx)
8662306a36Sopenharmony_ci{
8762306a36Sopenharmony_ci	if (ctx->prio < NORMAL_PRIO)
8862306a36Sopenharmony_ci		ctx->time_slice = SCALE_PRIO(DEF_SPU_TIMESLICE * 4, ctx->prio);
8962306a36Sopenharmony_ci	else
9062306a36Sopenharmony_ci		ctx->time_slice = SCALE_PRIO(DEF_SPU_TIMESLICE, ctx->prio);
9162306a36Sopenharmony_ci}
9262306a36Sopenharmony_ci
9362306a36Sopenharmony_ci/*
9462306a36Sopenharmony_ci * Update scheduling information from the owning thread.
9562306a36Sopenharmony_ci */
9662306a36Sopenharmony_civoid __spu_update_sched_info(struct spu_context *ctx)
9762306a36Sopenharmony_ci{
9862306a36Sopenharmony_ci	/*
9962306a36Sopenharmony_ci	 * assert that the context is not on the runqueue, so it is safe
10062306a36Sopenharmony_ci	 * to change its scheduling parameters.
10162306a36Sopenharmony_ci	 */
10262306a36Sopenharmony_ci	BUG_ON(!list_empty(&ctx->rq));
10362306a36Sopenharmony_ci
10462306a36Sopenharmony_ci	/*
10562306a36Sopenharmony_ci	 * 32-Bit assignments are atomic on powerpc, and we don't care about
10662306a36Sopenharmony_ci	 * memory ordering here because retrieving the controlling thread is
10762306a36Sopenharmony_ci	 * per definition racy.
10862306a36Sopenharmony_ci	 */
10962306a36Sopenharmony_ci	ctx->tid = current->pid;
11062306a36Sopenharmony_ci
11162306a36Sopenharmony_ci	/*
11262306a36Sopenharmony_ci	 * We do our own priority calculations, so we normally want
11362306a36Sopenharmony_ci	 * ->static_prio to start with. Unfortunately this field
11462306a36Sopenharmony_ci	 * contains junk for threads with a realtime scheduling
11562306a36Sopenharmony_ci	 * policy so we have to look at ->prio in this case.
11662306a36Sopenharmony_ci	 */
11762306a36Sopenharmony_ci	if (rt_prio(current->prio))
11862306a36Sopenharmony_ci		ctx->prio = current->prio;
11962306a36Sopenharmony_ci	else
12062306a36Sopenharmony_ci		ctx->prio = current->static_prio;
12162306a36Sopenharmony_ci	ctx->policy = current->policy;
12262306a36Sopenharmony_ci
12362306a36Sopenharmony_ci	/*
12462306a36Sopenharmony_ci	 * TO DO: the context may be loaded, so we may need to activate
12562306a36Sopenharmony_ci	 * it again on a different node. But it shouldn't hurt anything
12662306a36Sopenharmony_ci	 * to update its parameters, because we know that the scheduler
12762306a36Sopenharmony_ci	 * is not actively looking at this field, since it is not on the
12862306a36Sopenharmony_ci	 * runqueue. The context will be rescheduled on the proper node
12962306a36Sopenharmony_ci	 * if it is timesliced or preempted.
13062306a36Sopenharmony_ci	 */
13162306a36Sopenharmony_ci	cpumask_copy(&ctx->cpus_allowed, current->cpus_ptr);
13262306a36Sopenharmony_ci
13362306a36Sopenharmony_ci	/* Save the current cpu id for spu interrupt routing. */
13462306a36Sopenharmony_ci	ctx->last_ran = raw_smp_processor_id();
13562306a36Sopenharmony_ci}
13662306a36Sopenharmony_ci
13762306a36Sopenharmony_civoid spu_update_sched_info(struct spu_context *ctx)
13862306a36Sopenharmony_ci{
13962306a36Sopenharmony_ci	int node;
14062306a36Sopenharmony_ci
14162306a36Sopenharmony_ci	if (ctx->state == SPU_STATE_RUNNABLE) {
14262306a36Sopenharmony_ci		node = ctx->spu->node;
14362306a36Sopenharmony_ci
14462306a36Sopenharmony_ci		/*
14562306a36Sopenharmony_ci		 * Take list_mutex to sync with find_victim().
14662306a36Sopenharmony_ci		 */
14762306a36Sopenharmony_ci		mutex_lock(&cbe_spu_info[node].list_mutex);
14862306a36Sopenharmony_ci		__spu_update_sched_info(ctx);
14962306a36Sopenharmony_ci		mutex_unlock(&cbe_spu_info[node].list_mutex);
15062306a36Sopenharmony_ci	} else {
15162306a36Sopenharmony_ci		__spu_update_sched_info(ctx);
15262306a36Sopenharmony_ci	}
15362306a36Sopenharmony_ci}
15462306a36Sopenharmony_ci
15562306a36Sopenharmony_cistatic int __node_allowed(struct spu_context *ctx, int node)
15662306a36Sopenharmony_ci{
15762306a36Sopenharmony_ci	if (nr_cpus_node(node)) {
15862306a36Sopenharmony_ci		const struct cpumask *mask = cpumask_of_node(node);
15962306a36Sopenharmony_ci
16062306a36Sopenharmony_ci		if (cpumask_intersects(mask, &ctx->cpus_allowed))
16162306a36Sopenharmony_ci			return 1;
16262306a36Sopenharmony_ci	}
16362306a36Sopenharmony_ci
16462306a36Sopenharmony_ci	return 0;
16562306a36Sopenharmony_ci}
16662306a36Sopenharmony_ci
16762306a36Sopenharmony_cistatic int node_allowed(struct spu_context *ctx, int node)
16862306a36Sopenharmony_ci{
16962306a36Sopenharmony_ci	int rval;
17062306a36Sopenharmony_ci
17162306a36Sopenharmony_ci	spin_lock(&spu_prio->runq_lock);
17262306a36Sopenharmony_ci	rval = __node_allowed(ctx, node);
17362306a36Sopenharmony_ci	spin_unlock(&spu_prio->runq_lock);
17462306a36Sopenharmony_ci
17562306a36Sopenharmony_ci	return rval;
17662306a36Sopenharmony_ci}
17762306a36Sopenharmony_ci
17862306a36Sopenharmony_civoid do_notify_spus_active(void)
17962306a36Sopenharmony_ci{
18062306a36Sopenharmony_ci	int node;
18162306a36Sopenharmony_ci
18262306a36Sopenharmony_ci	/*
18362306a36Sopenharmony_ci	 * Wake up the active spu_contexts.
18462306a36Sopenharmony_ci	 */
18562306a36Sopenharmony_ci	for_each_online_node(node) {
18662306a36Sopenharmony_ci		struct spu *spu;
18762306a36Sopenharmony_ci
18862306a36Sopenharmony_ci		mutex_lock(&cbe_spu_info[node].list_mutex);
18962306a36Sopenharmony_ci		list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
19062306a36Sopenharmony_ci			if (spu->alloc_state != SPU_FREE) {
19162306a36Sopenharmony_ci				struct spu_context *ctx = spu->ctx;
19262306a36Sopenharmony_ci				set_bit(SPU_SCHED_NOTIFY_ACTIVE,
19362306a36Sopenharmony_ci					&ctx->sched_flags);
19462306a36Sopenharmony_ci				mb();
19562306a36Sopenharmony_ci				wake_up_all(&ctx->stop_wq);
19662306a36Sopenharmony_ci			}
19762306a36Sopenharmony_ci		}
19862306a36Sopenharmony_ci		mutex_unlock(&cbe_spu_info[node].list_mutex);
19962306a36Sopenharmony_ci	}
20062306a36Sopenharmony_ci}
20162306a36Sopenharmony_ci
20262306a36Sopenharmony_ci/**
20362306a36Sopenharmony_ci * spu_bind_context - bind spu context to physical spu
20462306a36Sopenharmony_ci * @spu:	physical spu to bind to
20562306a36Sopenharmony_ci * @ctx:	context to bind
20662306a36Sopenharmony_ci */
20762306a36Sopenharmony_cistatic void spu_bind_context(struct spu *spu, struct spu_context *ctx)
20862306a36Sopenharmony_ci{
20962306a36Sopenharmony_ci	spu_context_trace(spu_bind_context__enter, ctx, spu);
21062306a36Sopenharmony_ci
21162306a36Sopenharmony_ci	spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
21262306a36Sopenharmony_ci
21362306a36Sopenharmony_ci	if (ctx->flags & SPU_CREATE_NOSCHED)
21462306a36Sopenharmony_ci		atomic_inc(&cbe_spu_info[spu->node].reserved_spus);
21562306a36Sopenharmony_ci
21662306a36Sopenharmony_ci	ctx->stats.slb_flt_base = spu->stats.slb_flt;
21762306a36Sopenharmony_ci	ctx->stats.class2_intr_base = spu->stats.class2_intr;
21862306a36Sopenharmony_ci
21962306a36Sopenharmony_ci	spu_associate_mm(spu, ctx->owner);
22062306a36Sopenharmony_ci
22162306a36Sopenharmony_ci	spin_lock_irq(&spu->register_lock);
22262306a36Sopenharmony_ci	spu->ctx = ctx;
22362306a36Sopenharmony_ci	spu->flags = 0;
22462306a36Sopenharmony_ci	ctx->spu = spu;
22562306a36Sopenharmony_ci	ctx->ops = &spu_hw_ops;
22662306a36Sopenharmony_ci	spu->pid = current->pid;
22762306a36Sopenharmony_ci	spu->tgid = current->tgid;
22862306a36Sopenharmony_ci	spu->ibox_callback = spufs_ibox_callback;
22962306a36Sopenharmony_ci	spu->wbox_callback = spufs_wbox_callback;
23062306a36Sopenharmony_ci	spu->stop_callback = spufs_stop_callback;
23162306a36Sopenharmony_ci	spu->mfc_callback = spufs_mfc_callback;
23262306a36Sopenharmony_ci	spin_unlock_irq(&spu->register_lock);
23362306a36Sopenharmony_ci
23462306a36Sopenharmony_ci	spu_unmap_mappings(ctx);
23562306a36Sopenharmony_ci
23662306a36Sopenharmony_ci	spu_switch_log_notify(spu, ctx, SWITCH_LOG_START, 0);
23762306a36Sopenharmony_ci	spu_restore(&ctx->csa, spu);
23862306a36Sopenharmony_ci	spu->timestamp = jiffies;
23962306a36Sopenharmony_ci	ctx->state = SPU_STATE_RUNNABLE;
24062306a36Sopenharmony_ci
24162306a36Sopenharmony_ci	spuctx_switch_state(ctx, SPU_UTIL_USER);
24262306a36Sopenharmony_ci}
24362306a36Sopenharmony_ci
24462306a36Sopenharmony_ci/*
24562306a36Sopenharmony_ci * Must be used with the list_mutex held.
24662306a36Sopenharmony_ci */
24762306a36Sopenharmony_cistatic inline int sched_spu(struct spu *spu)
24862306a36Sopenharmony_ci{
24962306a36Sopenharmony_ci	BUG_ON(!mutex_is_locked(&cbe_spu_info[spu->node].list_mutex));
25062306a36Sopenharmony_ci
25162306a36Sopenharmony_ci	return (!spu->ctx || !(spu->ctx->flags & SPU_CREATE_NOSCHED));
25262306a36Sopenharmony_ci}
25362306a36Sopenharmony_ci
25462306a36Sopenharmony_cistatic void aff_merge_remaining_ctxs(struct spu_gang *gang)
25562306a36Sopenharmony_ci{
25662306a36Sopenharmony_ci	struct spu_context *ctx;
25762306a36Sopenharmony_ci
25862306a36Sopenharmony_ci	list_for_each_entry(ctx, &gang->aff_list_head, aff_list) {
25962306a36Sopenharmony_ci		if (list_empty(&ctx->aff_list))
26062306a36Sopenharmony_ci			list_add(&ctx->aff_list, &gang->aff_list_head);
26162306a36Sopenharmony_ci	}
26262306a36Sopenharmony_ci	gang->aff_flags |= AFF_MERGED;
26362306a36Sopenharmony_ci}
26462306a36Sopenharmony_ci
26562306a36Sopenharmony_cistatic void aff_set_offsets(struct spu_gang *gang)
26662306a36Sopenharmony_ci{
26762306a36Sopenharmony_ci	struct spu_context *ctx;
26862306a36Sopenharmony_ci	int offset;
26962306a36Sopenharmony_ci
27062306a36Sopenharmony_ci	offset = -1;
27162306a36Sopenharmony_ci	list_for_each_entry_reverse(ctx, &gang->aff_ref_ctx->aff_list,
27262306a36Sopenharmony_ci								aff_list) {
27362306a36Sopenharmony_ci		if (&ctx->aff_list == &gang->aff_list_head)
27462306a36Sopenharmony_ci			break;
27562306a36Sopenharmony_ci		ctx->aff_offset = offset--;
27662306a36Sopenharmony_ci	}
27762306a36Sopenharmony_ci
27862306a36Sopenharmony_ci	offset = 0;
27962306a36Sopenharmony_ci	list_for_each_entry(ctx, gang->aff_ref_ctx->aff_list.prev, aff_list) {
28062306a36Sopenharmony_ci		if (&ctx->aff_list == &gang->aff_list_head)
28162306a36Sopenharmony_ci			break;
28262306a36Sopenharmony_ci		ctx->aff_offset = offset++;
28362306a36Sopenharmony_ci	}
28462306a36Sopenharmony_ci
28562306a36Sopenharmony_ci	gang->aff_flags |= AFF_OFFSETS_SET;
28662306a36Sopenharmony_ci}
28762306a36Sopenharmony_ci
28862306a36Sopenharmony_cistatic struct spu *aff_ref_location(struct spu_context *ctx, int mem_aff,
28962306a36Sopenharmony_ci		 int group_size, int lowest_offset)
29062306a36Sopenharmony_ci{
29162306a36Sopenharmony_ci	struct spu *spu;
29262306a36Sopenharmony_ci	int node, n;
29362306a36Sopenharmony_ci
29462306a36Sopenharmony_ci	/*
29562306a36Sopenharmony_ci	 * TODO: A better algorithm could be used to find a good spu to be
29662306a36Sopenharmony_ci	 *       used as reference location for the ctxs chain.
29762306a36Sopenharmony_ci	 */
29862306a36Sopenharmony_ci	node = cpu_to_node(raw_smp_processor_id());
29962306a36Sopenharmony_ci	for (n = 0; n < MAX_NUMNODES; n++, node++) {
30062306a36Sopenharmony_ci		/*
30162306a36Sopenharmony_ci		 * "available_spus" counts how many spus are not potentially
30262306a36Sopenharmony_ci		 * going to be used by other affinity gangs whose reference
30362306a36Sopenharmony_ci		 * context is already in place. Although this code seeks to
30462306a36Sopenharmony_ci		 * avoid having affinity gangs with a summed amount of
30562306a36Sopenharmony_ci		 * contexts bigger than the amount of spus in the node,
30662306a36Sopenharmony_ci		 * this may happen sporadically. In this case, available_spus
30762306a36Sopenharmony_ci		 * becomes negative, which is harmless.
30862306a36Sopenharmony_ci		 */
30962306a36Sopenharmony_ci		int available_spus;
31062306a36Sopenharmony_ci
31162306a36Sopenharmony_ci		node = (node < MAX_NUMNODES) ? node : 0;
31262306a36Sopenharmony_ci		if (!node_allowed(ctx, node))
31362306a36Sopenharmony_ci			continue;
31462306a36Sopenharmony_ci
31562306a36Sopenharmony_ci		available_spus = 0;
31662306a36Sopenharmony_ci		mutex_lock(&cbe_spu_info[node].list_mutex);
31762306a36Sopenharmony_ci		list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
31862306a36Sopenharmony_ci			if (spu->ctx && spu->ctx->gang && !spu->ctx->aff_offset
31962306a36Sopenharmony_ci					&& spu->ctx->gang->aff_ref_spu)
32062306a36Sopenharmony_ci				available_spus -= spu->ctx->gang->contexts;
32162306a36Sopenharmony_ci			available_spus++;
32262306a36Sopenharmony_ci		}
32362306a36Sopenharmony_ci		if (available_spus < ctx->gang->contexts) {
32462306a36Sopenharmony_ci			mutex_unlock(&cbe_spu_info[node].list_mutex);
32562306a36Sopenharmony_ci			continue;
32662306a36Sopenharmony_ci		}
32762306a36Sopenharmony_ci
32862306a36Sopenharmony_ci		list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
32962306a36Sopenharmony_ci			if ((!mem_aff || spu->has_mem_affinity) &&
33062306a36Sopenharmony_ci							sched_spu(spu)) {
33162306a36Sopenharmony_ci				mutex_unlock(&cbe_spu_info[node].list_mutex);
33262306a36Sopenharmony_ci				return spu;
33362306a36Sopenharmony_ci			}
33462306a36Sopenharmony_ci		}
33562306a36Sopenharmony_ci		mutex_unlock(&cbe_spu_info[node].list_mutex);
33662306a36Sopenharmony_ci	}
33762306a36Sopenharmony_ci	return NULL;
33862306a36Sopenharmony_ci}
33962306a36Sopenharmony_ci
34062306a36Sopenharmony_cistatic void aff_set_ref_point_location(struct spu_gang *gang)
34162306a36Sopenharmony_ci{
34262306a36Sopenharmony_ci	int mem_aff, gs, lowest_offset;
34362306a36Sopenharmony_ci	struct spu_context *tmp, *ctx;
34462306a36Sopenharmony_ci
34562306a36Sopenharmony_ci	mem_aff = gang->aff_ref_ctx->flags & SPU_CREATE_AFFINITY_MEM;
34662306a36Sopenharmony_ci	lowest_offset = 0;
34762306a36Sopenharmony_ci	gs = 0;
34862306a36Sopenharmony_ci
34962306a36Sopenharmony_ci	list_for_each_entry(tmp, &gang->aff_list_head, aff_list)
35062306a36Sopenharmony_ci		gs++;
35162306a36Sopenharmony_ci
35262306a36Sopenharmony_ci	list_for_each_entry_reverse(ctx, &gang->aff_ref_ctx->aff_list,
35362306a36Sopenharmony_ci								aff_list) {
35462306a36Sopenharmony_ci		if (&ctx->aff_list == &gang->aff_list_head)
35562306a36Sopenharmony_ci			break;
35662306a36Sopenharmony_ci		lowest_offset = ctx->aff_offset;
35762306a36Sopenharmony_ci	}
35862306a36Sopenharmony_ci
35962306a36Sopenharmony_ci	gang->aff_ref_spu = aff_ref_location(gang->aff_ref_ctx, mem_aff, gs,
36062306a36Sopenharmony_ci							lowest_offset);
36162306a36Sopenharmony_ci}
36262306a36Sopenharmony_ci
36362306a36Sopenharmony_cistatic struct spu *ctx_location(struct spu *ref, int offset, int node)
36462306a36Sopenharmony_ci{
36562306a36Sopenharmony_ci	struct spu *spu;
36662306a36Sopenharmony_ci
36762306a36Sopenharmony_ci	spu = NULL;
36862306a36Sopenharmony_ci	if (offset >= 0) {
36962306a36Sopenharmony_ci		list_for_each_entry(spu, ref->aff_list.prev, aff_list) {
37062306a36Sopenharmony_ci			BUG_ON(spu->node != node);
37162306a36Sopenharmony_ci			if (offset == 0)
37262306a36Sopenharmony_ci				break;
37362306a36Sopenharmony_ci			if (sched_spu(spu))
37462306a36Sopenharmony_ci				offset--;
37562306a36Sopenharmony_ci		}
37662306a36Sopenharmony_ci	} else {
37762306a36Sopenharmony_ci		list_for_each_entry_reverse(spu, ref->aff_list.next, aff_list) {
37862306a36Sopenharmony_ci			BUG_ON(spu->node != node);
37962306a36Sopenharmony_ci			if (offset == 0)
38062306a36Sopenharmony_ci				break;
38162306a36Sopenharmony_ci			if (sched_spu(spu))
38262306a36Sopenharmony_ci				offset++;
38362306a36Sopenharmony_ci		}
38462306a36Sopenharmony_ci	}
38562306a36Sopenharmony_ci
38662306a36Sopenharmony_ci	return spu;
38762306a36Sopenharmony_ci}
38862306a36Sopenharmony_ci
38962306a36Sopenharmony_ci/*
39062306a36Sopenharmony_ci * affinity_check is called each time a context is going to be scheduled.
39162306a36Sopenharmony_ci * It returns the spu ptr on which the context must run.
39262306a36Sopenharmony_ci */
39362306a36Sopenharmony_cistatic int has_affinity(struct spu_context *ctx)
39462306a36Sopenharmony_ci{
39562306a36Sopenharmony_ci	struct spu_gang *gang = ctx->gang;
39662306a36Sopenharmony_ci
39762306a36Sopenharmony_ci	if (list_empty(&ctx->aff_list))
39862306a36Sopenharmony_ci		return 0;
39962306a36Sopenharmony_ci
40062306a36Sopenharmony_ci	if (atomic_read(&ctx->gang->aff_sched_count) == 0)
40162306a36Sopenharmony_ci		ctx->gang->aff_ref_spu = NULL;
40262306a36Sopenharmony_ci
40362306a36Sopenharmony_ci	if (!gang->aff_ref_spu) {
40462306a36Sopenharmony_ci		if (!(gang->aff_flags & AFF_MERGED))
40562306a36Sopenharmony_ci			aff_merge_remaining_ctxs(gang);
40662306a36Sopenharmony_ci		if (!(gang->aff_flags & AFF_OFFSETS_SET))
40762306a36Sopenharmony_ci			aff_set_offsets(gang);
40862306a36Sopenharmony_ci		aff_set_ref_point_location(gang);
40962306a36Sopenharmony_ci	}
41062306a36Sopenharmony_ci
41162306a36Sopenharmony_ci	return gang->aff_ref_spu != NULL;
41262306a36Sopenharmony_ci}
41362306a36Sopenharmony_ci
41462306a36Sopenharmony_ci/**
41562306a36Sopenharmony_ci * spu_unbind_context - unbind spu context from physical spu
41662306a36Sopenharmony_ci * @spu:	physical spu to unbind from
41762306a36Sopenharmony_ci * @ctx:	context to unbind
41862306a36Sopenharmony_ci */
41962306a36Sopenharmony_cistatic void spu_unbind_context(struct spu *spu, struct spu_context *ctx)
42062306a36Sopenharmony_ci{
42162306a36Sopenharmony_ci	u32 status;
42262306a36Sopenharmony_ci
42362306a36Sopenharmony_ci	spu_context_trace(spu_unbind_context__enter, ctx, spu);
42462306a36Sopenharmony_ci
42562306a36Sopenharmony_ci	spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
42662306a36Sopenharmony_ci
42762306a36Sopenharmony_ci 	if (spu->ctx->flags & SPU_CREATE_NOSCHED)
42862306a36Sopenharmony_ci		atomic_dec(&cbe_spu_info[spu->node].reserved_spus);
42962306a36Sopenharmony_ci
43062306a36Sopenharmony_ci	if (ctx->gang)
43162306a36Sopenharmony_ci		/*
43262306a36Sopenharmony_ci		 * If ctx->gang->aff_sched_count is positive, SPU affinity is
43362306a36Sopenharmony_ci		 * being considered in this gang. Using atomic_dec_if_positive
43462306a36Sopenharmony_ci		 * allow us to skip an explicit check for affinity in this gang
43562306a36Sopenharmony_ci		 */
43662306a36Sopenharmony_ci		atomic_dec_if_positive(&ctx->gang->aff_sched_count);
43762306a36Sopenharmony_ci
43862306a36Sopenharmony_ci	spu_unmap_mappings(ctx);
43962306a36Sopenharmony_ci	spu_save(&ctx->csa, spu);
44062306a36Sopenharmony_ci	spu_switch_log_notify(spu, ctx, SWITCH_LOG_STOP, 0);
44162306a36Sopenharmony_ci
44262306a36Sopenharmony_ci	spin_lock_irq(&spu->register_lock);
44362306a36Sopenharmony_ci	spu->timestamp = jiffies;
44462306a36Sopenharmony_ci	ctx->state = SPU_STATE_SAVED;
44562306a36Sopenharmony_ci	spu->ibox_callback = NULL;
44662306a36Sopenharmony_ci	spu->wbox_callback = NULL;
44762306a36Sopenharmony_ci	spu->stop_callback = NULL;
44862306a36Sopenharmony_ci	spu->mfc_callback = NULL;
44962306a36Sopenharmony_ci	spu->pid = 0;
45062306a36Sopenharmony_ci	spu->tgid = 0;
45162306a36Sopenharmony_ci	ctx->ops = &spu_backing_ops;
45262306a36Sopenharmony_ci	spu->flags = 0;
45362306a36Sopenharmony_ci	spu->ctx = NULL;
45462306a36Sopenharmony_ci	spin_unlock_irq(&spu->register_lock);
45562306a36Sopenharmony_ci
45662306a36Sopenharmony_ci	spu_associate_mm(spu, NULL);
45762306a36Sopenharmony_ci
45862306a36Sopenharmony_ci	ctx->stats.slb_flt +=
45962306a36Sopenharmony_ci		(spu->stats.slb_flt - ctx->stats.slb_flt_base);
46062306a36Sopenharmony_ci	ctx->stats.class2_intr +=
46162306a36Sopenharmony_ci		(spu->stats.class2_intr - ctx->stats.class2_intr_base);
46262306a36Sopenharmony_ci
46362306a36Sopenharmony_ci	/* This maps the underlying spu state to idle */
46462306a36Sopenharmony_ci	spuctx_switch_state(ctx, SPU_UTIL_IDLE_LOADED);
46562306a36Sopenharmony_ci	ctx->spu = NULL;
46662306a36Sopenharmony_ci
46762306a36Sopenharmony_ci	if (spu_stopped(ctx, &status))
46862306a36Sopenharmony_ci		wake_up_all(&ctx->stop_wq);
46962306a36Sopenharmony_ci}
47062306a36Sopenharmony_ci
47162306a36Sopenharmony_ci/**
47262306a36Sopenharmony_ci * spu_add_to_rq - add a context to the runqueue
47362306a36Sopenharmony_ci * @ctx:       context to add
47462306a36Sopenharmony_ci */
47562306a36Sopenharmony_cistatic void __spu_add_to_rq(struct spu_context *ctx)
47662306a36Sopenharmony_ci{
47762306a36Sopenharmony_ci	/*
47862306a36Sopenharmony_ci	 * Unfortunately this code path can be called from multiple threads
47962306a36Sopenharmony_ci	 * on behalf of a single context due to the way the problem state
48062306a36Sopenharmony_ci	 * mmap support works.
48162306a36Sopenharmony_ci	 *
48262306a36Sopenharmony_ci	 * Fortunately we need to wake up all these threads at the same time
48362306a36Sopenharmony_ci	 * and can simply skip the runqueue addition for every but the first
48462306a36Sopenharmony_ci	 * thread getting into this codepath.
48562306a36Sopenharmony_ci	 *
48662306a36Sopenharmony_ci	 * It's still quite hacky, and long-term we should proxy all other
48762306a36Sopenharmony_ci	 * threads through the owner thread so that spu_run is in control
48862306a36Sopenharmony_ci	 * of all the scheduling activity for a given context.
48962306a36Sopenharmony_ci	 */
49062306a36Sopenharmony_ci	if (list_empty(&ctx->rq)) {
49162306a36Sopenharmony_ci		list_add_tail(&ctx->rq, &spu_prio->runq[ctx->prio]);
49262306a36Sopenharmony_ci		set_bit(ctx->prio, spu_prio->bitmap);
49362306a36Sopenharmony_ci		if (!spu_prio->nr_waiting++)
49462306a36Sopenharmony_ci			mod_timer(&spusched_timer, jiffies + SPUSCHED_TICK);
49562306a36Sopenharmony_ci	}
49662306a36Sopenharmony_ci}
49762306a36Sopenharmony_ci
49862306a36Sopenharmony_cistatic void spu_add_to_rq(struct spu_context *ctx)
49962306a36Sopenharmony_ci{
50062306a36Sopenharmony_ci	spin_lock(&spu_prio->runq_lock);
50162306a36Sopenharmony_ci	__spu_add_to_rq(ctx);
50262306a36Sopenharmony_ci	spin_unlock(&spu_prio->runq_lock);
50362306a36Sopenharmony_ci}
50462306a36Sopenharmony_ci
50562306a36Sopenharmony_cistatic void __spu_del_from_rq(struct spu_context *ctx)
50662306a36Sopenharmony_ci{
50762306a36Sopenharmony_ci	int prio = ctx->prio;
50862306a36Sopenharmony_ci
50962306a36Sopenharmony_ci	if (!list_empty(&ctx->rq)) {
51062306a36Sopenharmony_ci		if (!--spu_prio->nr_waiting)
51162306a36Sopenharmony_ci			del_timer(&spusched_timer);
51262306a36Sopenharmony_ci		list_del_init(&ctx->rq);
51362306a36Sopenharmony_ci
51462306a36Sopenharmony_ci		if (list_empty(&spu_prio->runq[prio]))
51562306a36Sopenharmony_ci			clear_bit(prio, spu_prio->bitmap);
51662306a36Sopenharmony_ci	}
51762306a36Sopenharmony_ci}
51862306a36Sopenharmony_ci
51962306a36Sopenharmony_civoid spu_del_from_rq(struct spu_context *ctx)
52062306a36Sopenharmony_ci{
52162306a36Sopenharmony_ci	spin_lock(&spu_prio->runq_lock);
52262306a36Sopenharmony_ci	__spu_del_from_rq(ctx);
52362306a36Sopenharmony_ci	spin_unlock(&spu_prio->runq_lock);
52462306a36Sopenharmony_ci}
52562306a36Sopenharmony_ci
52662306a36Sopenharmony_cistatic void spu_prio_wait(struct spu_context *ctx)
52762306a36Sopenharmony_ci{
52862306a36Sopenharmony_ci	DEFINE_WAIT(wait);
52962306a36Sopenharmony_ci
53062306a36Sopenharmony_ci	/*
53162306a36Sopenharmony_ci	 * The caller must explicitly wait for a context to be loaded
53262306a36Sopenharmony_ci	 * if the nosched flag is set.  If NOSCHED is not set, the caller
53362306a36Sopenharmony_ci	 * queues the context and waits for an spu event or error.
53462306a36Sopenharmony_ci	 */
53562306a36Sopenharmony_ci	BUG_ON(!(ctx->flags & SPU_CREATE_NOSCHED));
53662306a36Sopenharmony_ci
53762306a36Sopenharmony_ci	spin_lock(&spu_prio->runq_lock);
53862306a36Sopenharmony_ci	prepare_to_wait_exclusive(&ctx->stop_wq, &wait, TASK_INTERRUPTIBLE);
53962306a36Sopenharmony_ci	if (!signal_pending(current)) {
54062306a36Sopenharmony_ci		__spu_add_to_rq(ctx);
54162306a36Sopenharmony_ci		spin_unlock(&spu_prio->runq_lock);
54262306a36Sopenharmony_ci		mutex_unlock(&ctx->state_mutex);
54362306a36Sopenharmony_ci		schedule();
54462306a36Sopenharmony_ci		mutex_lock(&ctx->state_mutex);
54562306a36Sopenharmony_ci		spin_lock(&spu_prio->runq_lock);
54662306a36Sopenharmony_ci		__spu_del_from_rq(ctx);
54762306a36Sopenharmony_ci	}
54862306a36Sopenharmony_ci	spin_unlock(&spu_prio->runq_lock);
54962306a36Sopenharmony_ci	__set_current_state(TASK_RUNNING);
55062306a36Sopenharmony_ci	remove_wait_queue(&ctx->stop_wq, &wait);
55162306a36Sopenharmony_ci}
55262306a36Sopenharmony_ci
55362306a36Sopenharmony_cistatic struct spu *spu_get_idle(struct spu_context *ctx)
55462306a36Sopenharmony_ci{
55562306a36Sopenharmony_ci	struct spu *spu, *aff_ref_spu;
55662306a36Sopenharmony_ci	int node, n;
55762306a36Sopenharmony_ci
55862306a36Sopenharmony_ci	spu_context_nospu_trace(spu_get_idle__enter, ctx);
55962306a36Sopenharmony_ci
56062306a36Sopenharmony_ci	if (ctx->gang) {
56162306a36Sopenharmony_ci		mutex_lock(&ctx->gang->aff_mutex);
56262306a36Sopenharmony_ci		if (has_affinity(ctx)) {
56362306a36Sopenharmony_ci			aff_ref_spu = ctx->gang->aff_ref_spu;
56462306a36Sopenharmony_ci			atomic_inc(&ctx->gang->aff_sched_count);
56562306a36Sopenharmony_ci			mutex_unlock(&ctx->gang->aff_mutex);
56662306a36Sopenharmony_ci			node = aff_ref_spu->node;
56762306a36Sopenharmony_ci
56862306a36Sopenharmony_ci			mutex_lock(&cbe_spu_info[node].list_mutex);
56962306a36Sopenharmony_ci			spu = ctx_location(aff_ref_spu, ctx->aff_offset, node);
57062306a36Sopenharmony_ci			if (spu && spu->alloc_state == SPU_FREE)
57162306a36Sopenharmony_ci				goto found;
57262306a36Sopenharmony_ci			mutex_unlock(&cbe_spu_info[node].list_mutex);
57362306a36Sopenharmony_ci
57462306a36Sopenharmony_ci			atomic_dec(&ctx->gang->aff_sched_count);
57562306a36Sopenharmony_ci			goto not_found;
57662306a36Sopenharmony_ci		}
57762306a36Sopenharmony_ci		mutex_unlock(&ctx->gang->aff_mutex);
57862306a36Sopenharmony_ci	}
57962306a36Sopenharmony_ci	node = cpu_to_node(raw_smp_processor_id());
58062306a36Sopenharmony_ci	for (n = 0; n < MAX_NUMNODES; n++, node++) {
58162306a36Sopenharmony_ci		node = (node < MAX_NUMNODES) ? node : 0;
58262306a36Sopenharmony_ci		if (!node_allowed(ctx, node))
58362306a36Sopenharmony_ci			continue;
58462306a36Sopenharmony_ci
58562306a36Sopenharmony_ci		mutex_lock(&cbe_spu_info[node].list_mutex);
58662306a36Sopenharmony_ci		list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
58762306a36Sopenharmony_ci			if (spu->alloc_state == SPU_FREE)
58862306a36Sopenharmony_ci				goto found;
58962306a36Sopenharmony_ci		}
59062306a36Sopenharmony_ci		mutex_unlock(&cbe_spu_info[node].list_mutex);
59162306a36Sopenharmony_ci	}
59262306a36Sopenharmony_ci
59362306a36Sopenharmony_ci not_found:
59462306a36Sopenharmony_ci	spu_context_nospu_trace(spu_get_idle__not_found, ctx);
59562306a36Sopenharmony_ci	return NULL;
59662306a36Sopenharmony_ci
59762306a36Sopenharmony_ci found:
59862306a36Sopenharmony_ci	spu->alloc_state = SPU_USED;
59962306a36Sopenharmony_ci	mutex_unlock(&cbe_spu_info[node].list_mutex);
60062306a36Sopenharmony_ci	spu_context_trace(spu_get_idle__found, ctx, spu);
60162306a36Sopenharmony_ci	spu_init_channels(spu);
60262306a36Sopenharmony_ci	return spu;
60362306a36Sopenharmony_ci}
60462306a36Sopenharmony_ci
60562306a36Sopenharmony_ci/**
60662306a36Sopenharmony_ci * find_victim - find a lower priority context to preempt
60762306a36Sopenharmony_ci * @ctx:	candidate context for running
60862306a36Sopenharmony_ci *
60962306a36Sopenharmony_ci * Returns the freed physical spu to run the new context on.
61062306a36Sopenharmony_ci */
61162306a36Sopenharmony_cistatic struct spu *find_victim(struct spu_context *ctx)
61262306a36Sopenharmony_ci{
61362306a36Sopenharmony_ci	struct spu_context *victim = NULL;
61462306a36Sopenharmony_ci	struct spu *spu;
61562306a36Sopenharmony_ci	int node, n;
61662306a36Sopenharmony_ci
61762306a36Sopenharmony_ci	spu_context_nospu_trace(spu_find_victim__enter, ctx);
61862306a36Sopenharmony_ci
61962306a36Sopenharmony_ci	/*
62062306a36Sopenharmony_ci	 * Look for a possible preemption candidate on the local node first.
62162306a36Sopenharmony_ci	 * If there is no candidate look at the other nodes.  This isn't
62262306a36Sopenharmony_ci	 * exactly fair, but so far the whole spu scheduler tries to keep
62362306a36Sopenharmony_ci	 * a strong node affinity.  We might want to fine-tune this in
62462306a36Sopenharmony_ci	 * the future.
62562306a36Sopenharmony_ci	 */
62662306a36Sopenharmony_ci restart:
62762306a36Sopenharmony_ci	node = cpu_to_node(raw_smp_processor_id());
62862306a36Sopenharmony_ci	for (n = 0; n < MAX_NUMNODES; n++, node++) {
62962306a36Sopenharmony_ci		node = (node < MAX_NUMNODES) ? node : 0;
63062306a36Sopenharmony_ci		if (!node_allowed(ctx, node))
63162306a36Sopenharmony_ci			continue;
63262306a36Sopenharmony_ci
63362306a36Sopenharmony_ci		mutex_lock(&cbe_spu_info[node].list_mutex);
63462306a36Sopenharmony_ci		list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
63562306a36Sopenharmony_ci			struct spu_context *tmp = spu->ctx;
63662306a36Sopenharmony_ci
63762306a36Sopenharmony_ci			if (tmp && tmp->prio > ctx->prio &&
63862306a36Sopenharmony_ci			    !(tmp->flags & SPU_CREATE_NOSCHED) &&
63962306a36Sopenharmony_ci			    (!victim || tmp->prio > victim->prio)) {
64062306a36Sopenharmony_ci				victim = spu->ctx;
64162306a36Sopenharmony_ci			}
64262306a36Sopenharmony_ci		}
64362306a36Sopenharmony_ci		if (victim)
64462306a36Sopenharmony_ci			get_spu_context(victim);
64562306a36Sopenharmony_ci		mutex_unlock(&cbe_spu_info[node].list_mutex);
64662306a36Sopenharmony_ci
64762306a36Sopenharmony_ci		if (victim) {
64862306a36Sopenharmony_ci			/*
64962306a36Sopenharmony_ci			 * This nests ctx->state_mutex, but we always lock
65062306a36Sopenharmony_ci			 * higher priority contexts before lower priority
65162306a36Sopenharmony_ci			 * ones, so this is safe until we introduce
65262306a36Sopenharmony_ci			 * priority inheritance schemes.
65362306a36Sopenharmony_ci			 *
65462306a36Sopenharmony_ci			 * XXX if the highest priority context is locked,
65562306a36Sopenharmony_ci			 * this can loop a long time.  Might be better to
65662306a36Sopenharmony_ci			 * look at another context or give up after X retries.
65762306a36Sopenharmony_ci			 */
65862306a36Sopenharmony_ci			if (!mutex_trylock(&victim->state_mutex)) {
65962306a36Sopenharmony_ci				put_spu_context(victim);
66062306a36Sopenharmony_ci				victim = NULL;
66162306a36Sopenharmony_ci				goto restart;
66262306a36Sopenharmony_ci			}
66362306a36Sopenharmony_ci
66462306a36Sopenharmony_ci			spu = victim->spu;
66562306a36Sopenharmony_ci			if (!spu || victim->prio <= ctx->prio) {
66662306a36Sopenharmony_ci				/*
66762306a36Sopenharmony_ci				 * This race can happen because we've dropped
66862306a36Sopenharmony_ci				 * the active list mutex.  Not a problem, just
66962306a36Sopenharmony_ci				 * restart the search.
67062306a36Sopenharmony_ci				 */
67162306a36Sopenharmony_ci				mutex_unlock(&victim->state_mutex);
67262306a36Sopenharmony_ci				put_spu_context(victim);
67362306a36Sopenharmony_ci				victim = NULL;
67462306a36Sopenharmony_ci				goto restart;
67562306a36Sopenharmony_ci			}
67662306a36Sopenharmony_ci
67762306a36Sopenharmony_ci			spu_context_trace(__spu_deactivate__unload, ctx, spu);
67862306a36Sopenharmony_ci
67962306a36Sopenharmony_ci			mutex_lock(&cbe_spu_info[node].list_mutex);
68062306a36Sopenharmony_ci			cbe_spu_info[node].nr_active--;
68162306a36Sopenharmony_ci			spu_unbind_context(spu, victim);
68262306a36Sopenharmony_ci			mutex_unlock(&cbe_spu_info[node].list_mutex);
68362306a36Sopenharmony_ci
68462306a36Sopenharmony_ci			victim->stats.invol_ctx_switch++;
68562306a36Sopenharmony_ci			spu->stats.invol_ctx_switch++;
68662306a36Sopenharmony_ci			if (test_bit(SPU_SCHED_SPU_RUN, &victim->sched_flags))
68762306a36Sopenharmony_ci				spu_add_to_rq(victim);
68862306a36Sopenharmony_ci
68962306a36Sopenharmony_ci			mutex_unlock(&victim->state_mutex);
69062306a36Sopenharmony_ci			put_spu_context(victim);
69162306a36Sopenharmony_ci
69262306a36Sopenharmony_ci			return spu;
69362306a36Sopenharmony_ci		}
69462306a36Sopenharmony_ci	}
69562306a36Sopenharmony_ci
69662306a36Sopenharmony_ci	return NULL;
69762306a36Sopenharmony_ci}
69862306a36Sopenharmony_ci
69962306a36Sopenharmony_cistatic void __spu_schedule(struct spu *spu, struct spu_context *ctx)
70062306a36Sopenharmony_ci{
70162306a36Sopenharmony_ci	int node = spu->node;
70262306a36Sopenharmony_ci	int success = 0;
70362306a36Sopenharmony_ci
70462306a36Sopenharmony_ci	spu_set_timeslice(ctx);
70562306a36Sopenharmony_ci
70662306a36Sopenharmony_ci	mutex_lock(&cbe_spu_info[node].list_mutex);
70762306a36Sopenharmony_ci	if (spu->ctx == NULL) {
70862306a36Sopenharmony_ci		spu_bind_context(spu, ctx);
70962306a36Sopenharmony_ci		cbe_spu_info[node].nr_active++;
71062306a36Sopenharmony_ci		spu->alloc_state = SPU_USED;
71162306a36Sopenharmony_ci		success = 1;
71262306a36Sopenharmony_ci	}
71362306a36Sopenharmony_ci	mutex_unlock(&cbe_spu_info[node].list_mutex);
71462306a36Sopenharmony_ci
71562306a36Sopenharmony_ci	if (success)
71662306a36Sopenharmony_ci		wake_up_all(&ctx->run_wq);
71762306a36Sopenharmony_ci	else
71862306a36Sopenharmony_ci		spu_add_to_rq(ctx);
71962306a36Sopenharmony_ci}
72062306a36Sopenharmony_ci
72162306a36Sopenharmony_cistatic void spu_schedule(struct spu *spu, struct spu_context *ctx)
72262306a36Sopenharmony_ci{
72362306a36Sopenharmony_ci	/* not a candidate for interruptible because it's called either
72462306a36Sopenharmony_ci	   from the scheduler thread or from spu_deactivate */
72562306a36Sopenharmony_ci	mutex_lock(&ctx->state_mutex);
72662306a36Sopenharmony_ci	if (ctx->state == SPU_STATE_SAVED)
72762306a36Sopenharmony_ci		__spu_schedule(spu, ctx);
72862306a36Sopenharmony_ci	spu_release(ctx);
72962306a36Sopenharmony_ci}
73062306a36Sopenharmony_ci
73162306a36Sopenharmony_ci/**
73262306a36Sopenharmony_ci * spu_unschedule - remove a context from a spu, and possibly release it.
73362306a36Sopenharmony_ci * @spu:	The SPU to unschedule from
73462306a36Sopenharmony_ci * @ctx:	The context currently scheduled on the SPU
73562306a36Sopenharmony_ci * @free_spu	Whether to free the SPU for other contexts
73662306a36Sopenharmony_ci *
73762306a36Sopenharmony_ci * Unbinds the context @ctx from the SPU @spu. If @free_spu is non-zero, the
73862306a36Sopenharmony_ci * SPU is made available for other contexts (ie, may be returned by
73962306a36Sopenharmony_ci * spu_get_idle). If this is zero, the caller is expected to schedule another
74062306a36Sopenharmony_ci * context to this spu.
74162306a36Sopenharmony_ci *
74262306a36Sopenharmony_ci * Should be called with ctx->state_mutex held.
74362306a36Sopenharmony_ci */
74462306a36Sopenharmony_cistatic void spu_unschedule(struct spu *spu, struct spu_context *ctx,
74562306a36Sopenharmony_ci		int free_spu)
74662306a36Sopenharmony_ci{
74762306a36Sopenharmony_ci	int node = spu->node;
74862306a36Sopenharmony_ci
74962306a36Sopenharmony_ci	mutex_lock(&cbe_spu_info[node].list_mutex);
75062306a36Sopenharmony_ci	cbe_spu_info[node].nr_active--;
75162306a36Sopenharmony_ci	if (free_spu)
75262306a36Sopenharmony_ci		spu->alloc_state = SPU_FREE;
75362306a36Sopenharmony_ci	spu_unbind_context(spu, ctx);
75462306a36Sopenharmony_ci	ctx->stats.invol_ctx_switch++;
75562306a36Sopenharmony_ci	spu->stats.invol_ctx_switch++;
75662306a36Sopenharmony_ci	mutex_unlock(&cbe_spu_info[node].list_mutex);
75762306a36Sopenharmony_ci}
75862306a36Sopenharmony_ci
75962306a36Sopenharmony_ci/**
76062306a36Sopenharmony_ci * spu_activate - find a free spu for a context and execute it
76162306a36Sopenharmony_ci * @ctx:	spu context to schedule
76262306a36Sopenharmony_ci * @flags:	flags (currently ignored)
76362306a36Sopenharmony_ci *
76462306a36Sopenharmony_ci * Tries to find a free spu to run @ctx.  If no free spu is available
76562306a36Sopenharmony_ci * add the context to the runqueue so it gets woken up once an spu
76662306a36Sopenharmony_ci * is available.
76762306a36Sopenharmony_ci */
76862306a36Sopenharmony_ciint spu_activate(struct spu_context *ctx, unsigned long flags)
76962306a36Sopenharmony_ci{
77062306a36Sopenharmony_ci	struct spu *spu;
77162306a36Sopenharmony_ci
77262306a36Sopenharmony_ci	/*
77362306a36Sopenharmony_ci	 * If there are multiple threads waiting for a single context
77462306a36Sopenharmony_ci	 * only one actually binds the context while the others will
77562306a36Sopenharmony_ci	 * only be able to acquire the state_mutex once the context
77662306a36Sopenharmony_ci	 * already is in runnable state.
77762306a36Sopenharmony_ci	 */
77862306a36Sopenharmony_ci	if (ctx->spu)
77962306a36Sopenharmony_ci		return 0;
78062306a36Sopenharmony_ci
78162306a36Sopenharmony_cispu_activate_top:
78262306a36Sopenharmony_ci	if (signal_pending(current))
78362306a36Sopenharmony_ci		return -ERESTARTSYS;
78462306a36Sopenharmony_ci
78562306a36Sopenharmony_ci	spu = spu_get_idle(ctx);
78662306a36Sopenharmony_ci	/*
78762306a36Sopenharmony_ci	 * If this is a realtime thread we try to get it running by
78862306a36Sopenharmony_ci	 * preempting a lower priority thread.
78962306a36Sopenharmony_ci	 */
79062306a36Sopenharmony_ci	if (!spu && rt_prio(ctx->prio))
79162306a36Sopenharmony_ci		spu = find_victim(ctx);
79262306a36Sopenharmony_ci	if (spu) {
79362306a36Sopenharmony_ci		unsigned long runcntl;
79462306a36Sopenharmony_ci
79562306a36Sopenharmony_ci		runcntl = ctx->ops->runcntl_read(ctx);
79662306a36Sopenharmony_ci		__spu_schedule(spu, ctx);
79762306a36Sopenharmony_ci		if (runcntl & SPU_RUNCNTL_RUNNABLE)
79862306a36Sopenharmony_ci			spuctx_switch_state(ctx, SPU_UTIL_USER);
79962306a36Sopenharmony_ci
80062306a36Sopenharmony_ci		return 0;
80162306a36Sopenharmony_ci	}
80262306a36Sopenharmony_ci
80362306a36Sopenharmony_ci	if (ctx->flags & SPU_CREATE_NOSCHED) {
80462306a36Sopenharmony_ci		spu_prio_wait(ctx);
80562306a36Sopenharmony_ci		goto spu_activate_top;
80662306a36Sopenharmony_ci	}
80762306a36Sopenharmony_ci
80862306a36Sopenharmony_ci	spu_add_to_rq(ctx);
80962306a36Sopenharmony_ci
81062306a36Sopenharmony_ci	return 0;
81162306a36Sopenharmony_ci}
81262306a36Sopenharmony_ci
81362306a36Sopenharmony_ci/**
81462306a36Sopenharmony_ci * grab_runnable_context - try to find a runnable context
81562306a36Sopenharmony_ci *
81662306a36Sopenharmony_ci * Remove the highest priority context on the runqueue and return it
81762306a36Sopenharmony_ci * to the caller.  Returns %NULL if no runnable context was found.
81862306a36Sopenharmony_ci */
81962306a36Sopenharmony_cistatic struct spu_context *grab_runnable_context(int prio, int node)
82062306a36Sopenharmony_ci{
82162306a36Sopenharmony_ci	struct spu_context *ctx;
82262306a36Sopenharmony_ci	int best;
82362306a36Sopenharmony_ci
82462306a36Sopenharmony_ci	spin_lock(&spu_prio->runq_lock);
82562306a36Sopenharmony_ci	best = find_first_bit(spu_prio->bitmap, prio);
82662306a36Sopenharmony_ci	while (best < prio) {
82762306a36Sopenharmony_ci		struct list_head *rq = &spu_prio->runq[best];
82862306a36Sopenharmony_ci
82962306a36Sopenharmony_ci		list_for_each_entry(ctx, rq, rq) {
83062306a36Sopenharmony_ci			/* XXX(hch): check for affinity here as well */
83162306a36Sopenharmony_ci			if (__node_allowed(ctx, node)) {
83262306a36Sopenharmony_ci				__spu_del_from_rq(ctx);
83362306a36Sopenharmony_ci				goto found;
83462306a36Sopenharmony_ci			}
83562306a36Sopenharmony_ci		}
83662306a36Sopenharmony_ci		best++;
83762306a36Sopenharmony_ci	}
83862306a36Sopenharmony_ci	ctx = NULL;
83962306a36Sopenharmony_ci found:
84062306a36Sopenharmony_ci	spin_unlock(&spu_prio->runq_lock);
84162306a36Sopenharmony_ci	return ctx;
84262306a36Sopenharmony_ci}
84362306a36Sopenharmony_ci
84462306a36Sopenharmony_cistatic int __spu_deactivate(struct spu_context *ctx, int force, int max_prio)
84562306a36Sopenharmony_ci{
84662306a36Sopenharmony_ci	struct spu *spu = ctx->spu;
84762306a36Sopenharmony_ci	struct spu_context *new = NULL;
84862306a36Sopenharmony_ci
84962306a36Sopenharmony_ci	if (spu) {
85062306a36Sopenharmony_ci		new = grab_runnable_context(max_prio, spu->node);
85162306a36Sopenharmony_ci		if (new || force) {
85262306a36Sopenharmony_ci			spu_unschedule(spu, ctx, new == NULL);
85362306a36Sopenharmony_ci			if (new) {
85462306a36Sopenharmony_ci				if (new->flags & SPU_CREATE_NOSCHED)
85562306a36Sopenharmony_ci					wake_up(&new->stop_wq);
85662306a36Sopenharmony_ci				else {
85762306a36Sopenharmony_ci					spu_release(ctx);
85862306a36Sopenharmony_ci					spu_schedule(spu, new);
85962306a36Sopenharmony_ci					/* this one can't easily be made
86062306a36Sopenharmony_ci					   interruptible */
86162306a36Sopenharmony_ci					mutex_lock(&ctx->state_mutex);
86262306a36Sopenharmony_ci				}
86362306a36Sopenharmony_ci			}
86462306a36Sopenharmony_ci		}
86562306a36Sopenharmony_ci	}
86662306a36Sopenharmony_ci
86762306a36Sopenharmony_ci	return new != NULL;
86862306a36Sopenharmony_ci}
86962306a36Sopenharmony_ci
87062306a36Sopenharmony_ci/**
87162306a36Sopenharmony_ci * spu_deactivate - unbind a context from it's physical spu
87262306a36Sopenharmony_ci * @ctx:	spu context to unbind
87362306a36Sopenharmony_ci *
87462306a36Sopenharmony_ci * Unbind @ctx from the physical spu it is running on and schedule
87562306a36Sopenharmony_ci * the highest priority context to run on the freed physical spu.
87662306a36Sopenharmony_ci */
87762306a36Sopenharmony_civoid spu_deactivate(struct spu_context *ctx)
87862306a36Sopenharmony_ci{
87962306a36Sopenharmony_ci	spu_context_nospu_trace(spu_deactivate__enter, ctx);
88062306a36Sopenharmony_ci	__spu_deactivate(ctx, 1, MAX_PRIO);
88162306a36Sopenharmony_ci}
88262306a36Sopenharmony_ci
88362306a36Sopenharmony_ci/**
88462306a36Sopenharmony_ci * spu_yield -	yield a physical spu if others are waiting
88562306a36Sopenharmony_ci * @ctx:	spu context to yield
88662306a36Sopenharmony_ci *
88762306a36Sopenharmony_ci * Check if there is a higher priority context waiting and if yes
88862306a36Sopenharmony_ci * unbind @ctx from the physical spu and schedule the highest
88962306a36Sopenharmony_ci * priority context to run on the freed physical spu instead.
89062306a36Sopenharmony_ci */
89162306a36Sopenharmony_civoid spu_yield(struct spu_context *ctx)
89262306a36Sopenharmony_ci{
89362306a36Sopenharmony_ci	spu_context_nospu_trace(spu_yield__enter, ctx);
89462306a36Sopenharmony_ci	if (!(ctx->flags & SPU_CREATE_NOSCHED)) {
89562306a36Sopenharmony_ci		mutex_lock(&ctx->state_mutex);
89662306a36Sopenharmony_ci		__spu_deactivate(ctx, 0, MAX_PRIO);
89762306a36Sopenharmony_ci		mutex_unlock(&ctx->state_mutex);
89862306a36Sopenharmony_ci	}
89962306a36Sopenharmony_ci}
90062306a36Sopenharmony_ci
90162306a36Sopenharmony_cistatic noinline void spusched_tick(struct spu_context *ctx)
90262306a36Sopenharmony_ci{
90362306a36Sopenharmony_ci	struct spu_context *new = NULL;
90462306a36Sopenharmony_ci	struct spu *spu = NULL;
90562306a36Sopenharmony_ci
90662306a36Sopenharmony_ci	if (spu_acquire(ctx))
90762306a36Sopenharmony_ci		BUG();	/* a kernel thread never has signals pending */
90862306a36Sopenharmony_ci
90962306a36Sopenharmony_ci	if (ctx->state != SPU_STATE_RUNNABLE)
91062306a36Sopenharmony_ci		goto out;
91162306a36Sopenharmony_ci	if (ctx->flags & SPU_CREATE_NOSCHED)
91262306a36Sopenharmony_ci		goto out;
91362306a36Sopenharmony_ci	if (ctx->policy == SCHED_FIFO)
91462306a36Sopenharmony_ci		goto out;
91562306a36Sopenharmony_ci
91662306a36Sopenharmony_ci	if (--ctx->time_slice && test_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags))
91762306a36Sopenharmony_ci		goto out;
91862306a36Sopenharmony_ci
91962306a36Sopenharmony_ci	spu = ctx->spu;
92062306a36Sopenharmony_ci
92162306a36Sopenharmony_ci	spu_context_trace(spusched_tick__preempt, ctx, spu);
92262306a36Sopenharmony_ci
92362306a36Sopenharmony_ci	new = grab_runnable_context(ctx->prio + 1, spu->node);
92462306a36Sopenharmony_ci	if (new) {
92562306a36Sopenharmony_ci		spu_unschedule(spu, ctx, 0);
92662306a36Sopenharmony_ci		if (test_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags))
92762306a36Sopenharmony_ci			spu_add_to_rq(ctx);
92862306a36Sopenharmony_ci	} else {
92962306a36Sopenharmony_ci		spu_context_nospu_trace(spusched_tick__newslice, ctx);
93062306a36Sopenharmony_ci		if (!ctx->time_slice)
93162306a36Sopenharmony_ci			ctx->time_slice++;
93262306a36Sopenharmony_ci	}
93362306a36Sopenharmony_ciout:
93462306a36Sopenharmony_ci	spu_release(ctx);
93562306a36Sopenharmony_ci
93662306a36Sopenharmony_ci	if (new)
93762306a36Sopenharmony_ci		spu_schedule(spu, new);
93862306a36Sopenharmony_ci}
93962306a36Sopenharmony_ci
94062306a36Sopenharmony_ci/**
94162306a36Sopenharmony_ci * count_active_contexts - count nr of active tasks
94262306a36Sopenharmony_ci *
94362306a36Sopenharmony_ci * Return the number of tasks currently running or waiting to run.
94462306a36Sopenharmony_ci *
94562306a36Sopenharmony_ci * Note that we don't take runq_lock / list_mutex here.  Reading
94662306a36Sopenharmony_ci * a single 32bit value is atomic on powerpc, and we don't care
94762306a36Sopenharmony_ci * about memory ordering issues here.
94862306a36Sopenharmony_ci */
94962306a36Sopenharmony_cistatic unsigned long count_active_contexts(void)
95062306a36Sopenharmony_ci{
95162306a36Sopenharmony_ci	int nr_active = 0, node;
95262306a36Sopenharmony_ci
95362306a36Sopenharmony_ci	for (node = 0; node < MAX_NUMNODES; node++)
95462306a36Sopenharmony_ci		nr_active += cbe_spu_info[node].nr_active;
95562306a36Sopenharmony_ci	nr_active += spu_prio->nr_waiting;
95662306a36Sopenharmony_ci
95762306a36Sopenharmony_ci	return nr_active;
95862306a36Sopenharmony_ci}
95962306a36Sopenharmony_ci
96062306a36Sopenharmony_ci/**
96162306a36Sopenharmony_ci * spu_calc_load - update the avenrun load estimates.
96262306a36Sopenharmony_ci *
96362306a36Sopenharmony_ci * No locking against reading these values from userspace, as for
96462306a36Sopenharmony_ci * the CPU loadavg code.
96562306a36Sopenharmony_ci */
96662306a36Sopenharmony_cistatic void spu_calc_load(void)
96762306a36Sopenharmony_ci{
96862306a36Sopenharmony_ci	unsigned long active_tasks; /* fixed-point */
96962306a36Sopenharmony_ci
97062306a36Sopenharmony_ci	active_tasks = count_active_contexts() * FIXED_1;
97162306a36Sopenharmony_ci	spu_avenrun[0] = calc_load(spu_avenrun[0], EXP_1, active_tasks);
97262306a36Sopenharmony_ci	spu_avenrun[1] = calc_load(spu_avenrun[1], EXP_5, active_tasks);
97362306a36Sopenharmony_ci	spu_avenrun[2] = calc_load(spu_avenrun[2], EXP_15, active_tasks);
97462306a36Sopenharmony_ci}
97562306a36Sopenharmony_ci
97662306a36Sopenharmony_cistatic void spusched_wake(struct timer_list *unused)
97762306a36Sopenharmony_ci{
97862306a36Sopenharmony_ci	mod_timer(&spusched_timer, jiffies + SPUSCHED_TICK);
97962306a36Sopenharmony_ci	wake_up_process(spusched_task);
98062306a36Sopenharmony_ci}
98162306a36Sopenharmony_ci
98262306a36Sopenharmony_cistatic void spuloadavg_wake(struct timer_list *unused)
98362306a36Sopenharmony_ci{
98462306a36Sopenharmony_ci	mod_timer(&spuloadavg_timer, jiffies + LOAD_FREQ);
98562306a36Sopenharmony_ci	spu_calc_load();
98662306a36Sopenharmony_ci}
98762306a36Sopenharmony_ci
98862306a36Sopenharmony_cistatic int spusched_thread(void *unused)
98962306a36Sopenharmony_ci{
99062306a36Sopenharmony_ci	struct spu *spu;
99162306a36Sopenharmony_ci	int node;
99262306a36Sopenharmony_ci
99362306a36Sopenharmony_ci	while (!kthread_should_stop()) {
99462306a36Sopenharmony_ci		set_current_state(TASK_INTERRUPTIBLE);
99562306a36Sopenharmony_ci		schedule();
99662306a36Sopenharmony_ci		for (node = 0; node < MAX_NUMNODES; node++) {
99762306a36Sopenharmony_ci			struct mutex *mtx = &cbe_spu_info[node].list_mutex;
99862306a36Sopenharmony_ci
99962306a36Sopenharmony_ci			mutex_lock(mtx);
100062306a36Sopenharmony_ci			list_for_each_entry(spu, &cbe_spu_info[node].spus,
100162306a36Sopenharmony_ci					cbe_list) {
100262306a36Sopenharmony_ci				struct spu_context *ctx = spu->ctx;
100362306a36Sopenharmony_ci
100462306a36Sopenharmony_ci				if (ctx) {
100562306a36Sopenharmony_ci					get_spu_context(ctx);
100662306a36Sopenharmony_ci					mutex_unlock(mtx);
100762306a36Sopenharmony_ci					spusched_tick(ctx);
100862306a36Sopenharmony_ci					mutex_lock(mtx);
100962306a36Sopenharmony_ci					put_spu_context(ctx);
101062306a36Sopenharmony_ci				}
101162306a36Sopenharmony_ci			}
101262306a36Sopenharmony_ci			mutex_unlock(mtx);
101362306a36Sopenharmony_ci		}
101462306a36Sopenharmony_ci	}
101562306a36Sopenharmony_ci
101662306a36Sopenharmony_ci	return 0;
101762306a36Sopenharmony_ci}
101862306a36Sopenharmony_ci
101962306a36Sopenharmony_civoid spuctx_switch_state(struct spu_context *ctx,
102062306a36Sopenharmony_ci		enum spu_utilization_state new_state)
102162306a36Sopenharmony_ci{
102262306a36Sopenharmony_ci	unsigned long long curtime;
102362306a36Sopenharmony_ci	signed long long delta;
102462306a36Sopenharmony_ci	struct spu *spu;
102562306a36Sopenharmony_ci	enum spu_utilization_state old_state;
102662306a36Sopenharmony_ci	int node;
102762306a36Sopenharmony_ci
102862306a36Sopenharmony_ci	curtime = ktime_get_ns();
102962306a36Sopenharmony_ci	delta = curtime - ctx->stats.tstamp;
103062306a36Sopenharmony_ci
103162306a36Sopenharmony_ci	WARN_ON(!mutex_is_locked(&ctx->state_mutex));
103262306a36Sopenharmony_ci	WARN_ON(delta < 0);
103362306a36Sopenharmony_ci
103462306a36Sopenharmony_ci	spu = ctx->spu;
103562306a36Sopenharmony_ci	old_state = ctx->stats.util_state;
103662306a36Sopenharmony_ci	ctx->stats.util_state = new_state;
103762306a36Sopenharmony_ci	ctx->stats.tstamp = curtime;
103862306a36Sopenharmony_ci
103962306a36Sopenharmony_ci	/*
104062306a36Sopenharmony_ci	 * Update the physical SPU utilization statistics.
104162306a36Sopenharmony_ci	 */
104262306a36Sopenharmony_ci	if (spu) {
104362306a36Sopenharmony_ci		ctx->stats.times[old_state] += delta;
104462306a36Sopenharmony_ci		spu->stats.times[old_state] += delta;
104562306a36Sopenharmony_ci		spu->stats.util_state = new_state;
104662306a36Sopenharmony_ci		spu->stats.tstamp = curtime;
104762306a36Sopenharmony_ci		node = spu->node;
104862306a36Sopenharmony_ci		if (old_state == SPU_UTIL_USER)
104962306a36Sopenharmony_ci			atomic_dec(&cbe_spu_info[node].busy_spus);
105062306a36Sopenharmony_ci		if (new_state == SPU_UTIL_USER)
105162306a36Sopenharmony_ci			atomic_inc(&cbe_spu_info[node].busy_spus);
105262306a36Sopenharmony_ci	}
105362306a36Sopenharmony_ci}
105462306a36Sopenharmony_ci
105562306a36Sopenharmony_ci#ifdef CONFIG_PROC_FS
105662306a36Sopenharmony_cistatic int show_spu_loadavg(struct seq_file *s, void *private)
105762306a36Sopenharmony_ci{
105862306a36Sopenharmony_ci	int a, b, c;
105962306a36Sopenharmony_ci
106062306a36Sopenharmony_ci	a = spu_avenrun[0] + (FIXED_1/200);
106162306a36Sopenharmony_ci	b = spu_avenrun[1] + (FIXED_1/200);
106262306a36Sopenharmony_ci	c = spu_avenrun[2] + (FIXED_1/200);
106362306a36Sopenharmony_ci
106462306a36Sopenharmony_ci	/*
106562306a36Sopenharmony_ci	 * Note that last_pid doesn't really make much sense for the
106662306a36Sopenharmony_ci	 * SPU loadavg (it even seems very odd on the CPU side...),
106762306a36Sopenharmony_ci	 * but we include it here to have a 100% compatible interface.
106862306a36Sopenharmony_ci	 */
106962306a36Sopenharmony_ci	seq_printf(s, "%d.%02d %d.%02d %d.%02d %ld/%d %d\n",
107062306a36Sopenharmony_ci		LOAD_INT(a), LOAD_FRAC(a),
107162306a36Sopenharmony_ci		LOAD_INT(b), LOAD_FRAC(b),
107262306a36Sopenharmony_ci		LOAD_INT(c), LOAD_FRAC(c),
107362306a36Sopenharmony_ci		count_active_contexts(),
107462306a36Sopenharmony_ci		atomic_read(&nr_spu_contexts),
107562306a36Sopenharmony_ci		idr_get_cursor(&task_active_pid_ns(current)->idr) - 1);
107662306a36Sopenharmony_ci	return 0;
107762306a36Sopenharmony_ci}
107862306a36Sopenharmony_ci#endif
107962306a36Sopenharmony_ci
108062306a36Sopenharmony_ciint __init spu_sched_init(void)
108162306a36Sopenharmony_ci{
108262306a36Sopenharmony_ci	struct proc_dir_entry *entry;
108362306a36Sopenharmony_ci	int err = -ENOMEM, i;
108462306a36Sopenharmony_ci
108562306a36Sopenharmony_ci	spu_prio = kzalloc(sizeof(struct spu_prio_array), GFP_KERNEL);
108662306a36Sopenharmony_ci	if (!spu_prio)
108762306a36Sopenharmony_ci		goto out;
108862306a36Sopenharmony_ci
108962306a36Sopenharmony_ci	for (i = 0; i < MAX_PRIO; i++) {
109062306a36Sopenharmony_ci		INIT_LIST_HEAD(&spu_prio->runq[i]);
109162306a36Sopenharmony_ci		__clear_bit(i, spu_prio->bitmap);
109262306a36Sopenharmony_ci	}
109362306a36Sopenharmony_ci	spin_lock_init(&spu_prio->runq_lock);
109462306a36Sopenharmony_ci
109562306a36Sopenharmony_ci	timer_setup(&spusched_timer, spusched_wake, 0);
109662306a36Sopenharmony_ci	timer_setup(&spuloadavg_timer, spuloadavg_wake, 0);
109762306a36Sopenharmony_ci
109862306a36Sopenharmony_ci	spusched_task = kthread_run(spusched_thread, NULL, "spusched");
109962306a36Sopenharmony_ci	if (IS_ERR(spusched_task)) {
110062306a36Sopenharmony_ci		err = PTR_ERR(spusched_task);
110162306a36Sopenharmony_ci		goto out_free_spu_prio;
110262306a36Sopenharmony_ci	}
110362306a36Sopenharmony_ci
110462306a36Sopenharmony_ci	mod_timer(&spuloadavg_timer, 0);
110562306a36Sopenharmony_ci
110662306a36Sopenharmony_ci	entry = proc_create_single("spu_loadavg", 0, NULL, show_spu_loadavg);
110762306a36Sopenharmony_ci	if (!entry)
110862306a36Sopenharmony_ci		goto out_stop_kthread;
110962306a36Sopenharmony_ci
111062306a36Sopenharmony_ci	pr_debug("spusched: tick: %d, min ticks: %d, default ticks: %d\n",
111162306a36Sopenharmony_ci			SPUSCHED_TICK, MIN_SPU_TIMESLICE, DEF_SPU_TIMESLICE);
111262306a36Sopenharmony_ci	return 0;
111362306a36Sopenharmony_ci
111462306a36Sopenharmony_ci out_stop_kthread:
111562306a36Sopenharmony_ci	kthread_stop(spusched_task);
111662306a36Sopenharmony_ci out_free_spu_prio:
111762306a36Sopenharmony_ci	kfree(spu_prio);
111862306a36Sopenharmony_ci out:
111962306a36Sopenharmony_ci	return err;
112062306a36Sopenharmony_ci}
112162306a36Sopenharmony_ci
112262306a36Sopenharmony_civoid spu_sched_exit(void)
112362306a36Sopenharmony_ci{
112462306a36Sopenharmony_ci	struct spu *spu;
112562306a36Sopenharmony_ci	int node;
112662306a36Sopenharmony_ci
112762306a36Sopenharmony_ci	remove_proc_entry("spu_loadavg", NULL);
112862306a36Sopenharmony_ci
112962306a36Sopenharmony_ci	del_timer_sync(&spusched_timer);
113062306a36Sopenharmony_ci	del_timer_sync(&spuloadavg_timer);
113162306a36Sopenharmony_ci	kthread_stop(spusched_task);
113262306a36Sopenharmony_ci
113362306a36Sopenharmony_ci	for (node = 0; node < MAX_NUMNODES; node++) {
113462306a36Sopenharmony_ci		mutex_lock(&cbe_spu_info[node].list_mutex);
113562306a36Sopenharmony_ci		list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list)
113662306a36Sopenharmony_ci			if (spu->alloc_state != SPU_FREE)
113762306a36Sopenharmony_ci				spu->alloc_state = SPU_FREE;
113862306a36Sopenharmony_ci		mutex_unlock(&cbe_spu_info[node].list_mutex);
113962306a36Sopenharmony_ci	}
114062306a36Sopenharmony_ci	kfree(spu_prio);
114162306a36Sopenharmony_ci}
1142