18c2ecf20Sopenharmony_ci/*
28c2ecf20Sopenharmony_ci * This file is subject to the terms and conditions of the GNU General Public
38c2ecf20Sopenharmony_ci * License.  See the file "COPYING" in the main directory of this archive
48c2ecf20Sopenharmony_ci * for more details.
58c2ecf20Sopenharmony_ci *
68c2ecf20Sopenharmony_ci * Copyright (C) 1994, 95, 96, 97, 98, 99, 2003, 06 by Ralf Baechle
78c2ecf20Sopenharmony_ci * Copyright (C) 1996 by Paul M. Antoine
88c2ecf20Sopenharmony_ci * Copyright (C) 1999 Silicon Graphics
98c2ecf20Sopenharmony_ci * Kevin D. Kissell, kevink@mips.org and Carsten Langgaard, carstenl@mips.com
108c2ecf20Sopenharmony_ci * Copyright (C) 2000 MIPS Technologies, Inc.
118c2ecf20Sopenharmony_ci */
128c2ecf20Sopenharmony_ci#ifndef _ASM_SWITCH_TO_H
138c2ecf20Sopenharmony_ci#define _ASM_SWITCH_TO_H
148c2ecf20Sopenharmony_ci
158c2ecf20Sopenharmony_ci#include <asm/cpu-features.h>
168c2ecf20Sopenharmony_ci#include <asm/watch.h>
178c2ecf20Sopenharmony_ci#include <asm/dsp.h>
188c2ecf20Sopenharmony_ci#include <asm/cop2.h>
198c2ecf20Sopenharmony_ci#include <asm/fpu.h>
208c2ecf20Sopenharmony_ci
218c2ecf20Sopenharmony_cistruct task_struct;
228c2ecf20Sopenharmony_ci
238c2ecf20Sopenharmony_ci/**
248c2ecf20Sopenharmony_ci * resume - resume execution of a task
258c2ecf20Sopenharmony_ci * @prev:	The task previously executed.
268c2ecf20Sopenharmony_ci * @next:	The task to begin executing.
278c2ecf20Sopenharmony_ci * @next_ti:	task_thread_info(next).
288c2ecf20Sopenharmony_ci *
298c2ecf20Sopenharmony_ci * This function is used whilst scheduling to save the context of prev & load
308c2ecf20Sopenharmony_ci * the context of next. Returns prev.
318c2ecf20Sopenharmony_ci */
328c2ecf20Sopenharmony_ciextern asmlinkage struct task_struct *resume(struct task_struct *prev,
338c2ecf20Sopenharmony_ci		struct task_struct *next, struct thread_info *next_ti);
348c2ecf20Sopenharmony_ci
358c2ecf20Sopenharmony_ciextern unsigned int ll_bit;
368c2ecf20Sopenharmony_ciextern struct task_struct *ll_task;
378c2ecf20Sopenharmony_ci
388c2ecf20Sopenharmony_ci#ifdef CONFIG_MIPS_MT_FPAFF
398c2ecf20Sopenharmony_ci
408c2ecf20Sopenharmony_ci/*
418c2ecf20Sopenharmony_ci * Handle the scheduler resume end of FPU affinity management.	We do this
428c2ecf20Sopenharmony_ci * inline to try to keep the overhead down. If we have been forced to run on
438c2ecf20Sopenharmony_ci * a "CPU" with an FPU because of a previous high level of FP computation,
448c2ecf20Sopenharmony_ci * but did not actually use the FPU during the most recent time-slice (CU1
458c2ecf20Sopenharmony_ci * isn't set), we undo the restriction on cpus_mask.
468c2ecf20Sopenharmony_ci *
478c2ecf20Sopenharmony_ci * We're not calling set_cpus_allowed() here, because we have no need to
488c2ecf20Sopenharmony_ci * force prompt migration - we're already switching the current CPU to a
498c2ecf20Sopenharmony_ci * different thread.
508c2ecf20Sopenharmony_ci */
518c2ecf20Sopenharmony_ci
528c2ecf20Sopenharmony_ci#define __mips_mt_fpaff_switch_to(prev)					\
538c2ecf20Sopenharmony_cido {									\
548c2ecf20Sopenharmony_ci	struct thread_info *__prev_ti = task_thread_info(prev);		\
558c2ecf20Sopenharmony_ci									\
568c2ecf20Sopenharmony_ci	if (cpu_has_fpu &&						\
578c2ecf20Sopenharmony_ci	    test_ti_thread_flag(__prev_ti, TIF_FPUBOUND) &&		\
588c2ecf20Sopenharmony_ci	    (!(KSTK_STATUS(prev) & ST0_CU1))) {				\
598c2ecf20Sopenharmony_ci		clear_ti_thread_flag(__prev_ti, TIF_FPUBOUND);		\
608c2ecf20Sopenharmony_ci		prev->cpus_mask = prev->thread.user_cpus_allowed;	\
618c2ecf20Sopenharmony_ci	}								\
628c2ecf20Sopenharmony_ci	next->thread.emulated_fp = 0;					\
638c2ecf20Sopenharmony_ci} while(0)
648c2ecf20Sopenharmony_ci
658c2ecf20Sopenharmony_ci#else
668c2ecf20Sopenharmony_ci#define __mips_mt_fpaff_switch_to(prev) do { (void) (prev); } while (0)
678c2ecf20Sopenharmony_ci#endif
688c2ecf20Sopenharmony_ci
698c2ecf20Sopenharmony_ci/*
708c2ecf20Sopenharmony_ci * Clear LLBit during context switches on MIPSr5+ such that eretnc can be used
718c2ecf20Sopenharmony_ci * unconditionally when returning to userland in entry.S.
728c2ecf20Sopenharmony_ci */
738c2ecf20Sopenharmony_ci#define __clear_r5_hw_ll_bit() do {					\
748c2ecf20Sopenharmony_ci	if (cpu_has_mips_r5 || cpu_has_mips_r6)				\
758c2ecf20Sopenharmony_ci		write_c0_lladdr(0);					\
768c2ecf20Sopenharmony_ci} while (0)
778c2ecf20Sopenharmony_ci
788c2ecf20Sopenharmony_ci#define __clear_software_ll_bit() do {					\
798c2ecf20Sopenharmony_ci	if (!__builtin_constant_p(cpu_has_llsc) || !cpu_has_llsc)	\
808c2ecf20Sopenharmony_ci		ll_bit = 0;						\
818c2ecf20Sopenharmony_ci} while (0)
828c2ecf20Sopenharmony_ci
838c2ecf20Sopenharmony_ci/*
848c2ecf20Sopenharmony_ci * Check FCSR for any unmasked exceptions pending set with `ptrace',
858c2ecf20Sopenharmony_ci * clear them and send a signal.
868c2ecf20Sopenharmony_ci */
878c2ecf20Sopenharmony_ci#ifdef CONFIG_MIPS_FP_SUPPORT
888c2ecf20Sopenharmony_ci# define __sanitize_fcr31(next)						\
898c2ecf20Sopenharmony_cido {									\
908c2ecf20Sopenharmony_ci	unsigned long fcr31 = mask_fcr31_x(next->thread.fpu.fcr31);	\
918c2ecf20Sopenharmony_ci	void __user *pc;						\
928c2ecf20Sopenharmony_ci									\
938c2ecf20Sopenharmony_ci	if (unlikely(fcr31)) {						\
948c2ecf20Sopenharmony_ci		pc = (void __user *)task_pt_regs(next)->cp0_epc;	\
958c2ecf20Sopenharmony_ci		next->thread.fpu.fcr31 &= ~fcr31;			\
968c2ecf20Sopenharmony_ci		force_fcr31_sig(fcr31, pc, next);			\
978c2ecf20Sopenharmony_ci	}								\
988c2ecf20Sopenharmony_ci} while (0)
998c2ecf20Sopenharmony_ci#else
1008c2ecf20Sopenharmony_ci# define __sanitize_fcr31(next)
1018c2ecf20Sopenharmony_ci#endif
1028c2ecf20Sopenharmony_ci
1038c2ecf20Sopenharmony_ci/*
1048c2ecf20Sopenharmony_ci * For newly created kernel threads switch_to() will return to
1058c2ecf20Sopenharmony_ci * ret_from_kernel_thread, newly created user threads to ret_from_fork.
1068c2ecf20Sopenharmony_ci * That is, everything following resume() will be skipped for new threads.
1078c2ecf20Sopenharmony_ci * So everything that matters to new threads should be placed before resume().
1088c2ecf20Sopenharmony_ci */
1098c2ecf20Sopenharmony_ci#define switch_to(prev, next, last)					\
1108c2ecf20Sopenharmony_cido {									\
1118c2ecf20Sopenharmony_ci	__mips_mt_fpaff_switch_to(prev);				\
1128c2ecf20Sopenharmony_ci	lose_fpu_inatomic(1, prev);					\
1138c2ecf20Sopenharmony_ci	if (tsk_used_math(next))					\
1148c2ecf20Sopenharmony_ci		__sanitize_fcr31(next);					\
1158c2ecf20Sopenharmony_ci	if (cpu_has_dsp) {						\
1168c2ecf20Sopenharmony_ci		__save_dsp(prev);					\
1178c2ecf20Sopenharmony_ci		__restore_dsp(next);					\
1188c2ecf20Sopenharmony_ci	}								\
1198c2ecf20Sopenharmony_ci	if (cop2_present) {						\
1208c2ecf20Sopenharmony_ci		u32 status = read_c0_status();				\
1218c2ecf20Sopenharmony_ci									\
1228c2ecf20Sopenharmony_ci		set_c0_status(ST0_CU2);					\
1238c2ecf20Sopenharmony_ci		if ((KSTK_STATUS(prev) & ST0_CU2)) {			\
1248c2ecf20Sopenharmony_ci			if (cop2_lazy_restore)				\
1258c2ecf20Sopenharmony_ci				KSTK_STATUS(prev) &= ~ST0_CU2;		\
1268c2ecf20Sopenharmony_ci			cop2_save(prev);				\
1278c2ecf20Sopenharmony_ci		}							\
1288c2ecf20Sopenharmony_ci		if (KSTK_STATUS(next) & ST0_CU2 &&			\
1298c2ecf20Sopenharmony_ci		    !cop2_lazy_restore) {				\
1308c2ecf20Sopenharmony_ci			cop2_restore(next);				\
1318c2ecf20Sopenharmony_ci		}							\
1328c2ecf20Sopenharmony_ci		write_c0_status(status);				\
1338c2ecf20Sopenharmony_ci	}								\
1348c2ecf20Sopenharmony_ci	__clear_r5_hw_ll_bit();						\
1358c2ecf20Sopenharmony_ci	__clear_software_ll_bit();					\
1368c2ecf20Sopenharmony_ci	if (cpu_has_userlocal)						\
1378c2ecf20Sopenharmony_ci		write_c0_userlocal(task_thread_info(next)->tp_value);	\
1388c2ecf20Sopenharmony_ci	__restore_watch(next);						\
1398c2ecf20Sopenharmony_ci	(last) = resume(prev, next, task_thread_info(next));		\
1408c2ecf20Sopenharmony_ci} while (0)
1418c2ecf20Sopenharmony_ci
1428c2ecf20Sopenharmony_ci#endif /* _ASM_SWITCH_TO_H */
143