162306a36Sopenharmony_ci/* SPDX-License-Identifier: GPL-2.0 */
262306a36Sopenharmony_ci/*
362306a36Sopenharmony_ci * Copyright (C) 1994 Linus Torvalds
462306a36Sopenharmony_ci *
562306a36Sopenharmony_ci * Pentium III FXSR, SSE support
662306a36Sopenharmony_ci * General FPU state handling cleanups
762306a36Sopenharmony_ci *	Gareth Hughes <gareth@valinux.com>, May 2000
862306a36Sopenharmony_ci * x86-64 work by Andi Kleen 2002
962306a36Sopenharmony_ci */
1062306a36Sopenharmony_ci
1162306a36Sopenharmony_ci#ifndef _ASM_X86_FPU_API_H
1262306a36Sopenharmony_ci#define _ASM_X86_FPU_API_H
1362306a36Sopenharmony_ci#include <linux/bottom_half.h>
1462306a36Sopenharmony_ci
1562306a36Sopenharmony_ci#include <asm/fpu/types.h>
1662306a36Sopenharmony_ci
1762306a36Sopenharmony_ci/*
1862306a36Sopenharmony_ci * Use kernel_fpu_begin/end() if you intend to use FPU in kernel context. It
1962306a36Sopenharmony_ci * disables preemption so be careful if you intend to use it for long periods
2062306a36Sopenharmony_ci * of time.
2162306a36Sopenharmony_ci * If you intend to use the FPU in irq/softirq you need to check first with
2262306a36Sopenharmony_ci * irq_fpu_usable() if it is possible.
2362306a36Sopenharmony_ci */
2462306a36Sopenharmony_ci
2562306a36Sopenharmony_ci/* Kernel FPU states to initialize in kernel_fpu_begin_mask() */
2662306a36Sopenharmony_ci#define KFPU_387	_BITUL(0)	/* 387 state will be initialized */
2762306a36Sopenharmony_ci#define KFPU_MXCSR	_BITUL(1)	/* MXCSR will be initialized */
2862306a36Sopenharmony_ci
2962306a36Sopenharmony_ciextern void kernel_fpu_begin_mask(unsigned int kfpu_mask);
3062306a36Sopenharmony_ciextern void kernel_fpu_end(void);
3162306a36Sopenharmony_ciextern bool irq_fpu_usable(void);
3262306a36Sopenharmony_ciextern void fpregs_mark_activate(void);
3362306a36Sopenharmony_ci
3462306a36Sopenharmony_ci/* Code that is unaware of kernel_fpu_begin_mask() can use this */
3562306a36Sopenharmony_cistatic inline void kernel_fpu_begin(void)
3662306a36Sopenharmony_ci{
3762306a36Sopenharmony_ci#ifdef CONFIG_X86_64
3862306a36Sopenharmony_ci	/*
3962306a36Sopenharmony_ci	 * Any 64-bit code that uses 387 instructions must explicitly request
4062306a36Sopenharmony_ci	 * KFPU_387.
4162306a36Sopenharmony_ci	 */
4262306a36Sopenharmony_ci	kernel_fpu_begin_mask(KFPU_MXCSR);
4362306a36Sopenharmony_ci#else
4462306a36Sopenharmony_ci	/*
4562306a36Sopenharmony_ci	 * 32-bit kernel code may use 387 operations as well as SSE2, etc,
4662306a36Sopenharmony_ci	 * as long as it checks that the CPU has the required capability.
4762306a36Sopenharmony_ci	 */
4862306a36Sopenharmony_ci	kernel_fpu_begin_mask(KFPU_387 | KFPU_MXCSR);
4962306a36Sopenharmony_ci#endif
5062306a36Sopenharmony_ci}
5162306a36Sopenharmony_ci
5262306a36Sopenharmony_ci/*
5362306a36Sopenharmony_ci * Use fpregs_lock() while editing CPU's FPU registers or fpu->fpstate.
5462306a36Sopenharmony_ci * A context switch will (and softirq might) save CPU's FPU registers to
5562306a36Sopenharmony_ci * fpu->fpstate.regs and set TIF_NEED_FPU_LOAD leaving CPU's FPU registers in
5662306a36Sopenharmony_ci * a random state.
5762306a36Sopenharmony_ci *
5862306a36Sopenharmony_ci * local_bh_disable() protects against both preemption and soft interrupts
5962306a36Sopenharmony_ci * on !RT kernels.
6062306a36Sopenharmony_ci *
6162306a36Sopenharmony_ci * On RT kernels local_bh_disable() is not sufficient because it only
6262306a36Sopenharmony_ci * serializes soft interrupt related sections via a local lock, but stays
6362306a36Sopenharmony_ci * preemptible. Disabling preemption is the right choice here as bottom
6462306a36Sopenharmony_ci * half processing is always in thread context on RT kernels so it
6562306a36Sopenharmony_ci * implicitly prevents bottom half processing as well.
6662306a36Sopenharmony_ci *
6762306a36Sopenharmony_ci * Disabling preemption also serializes against kernel_fpu_begin().
6862306a36Sopenharmony_ci */
6962306a36Sopenharmony_cistatic inline void fpregs_lock(void)
7062306a36Sopenharmony_ci{
7162306a36Sopenharmony_ci	if (!IS_ENABLED(CONFIG_PREEMPT_RT))
7262306a36Sopenharmony_ci		local_bh_disable();
7362306a36Sopenharmony_ci	else
7462306a36Sopenharmony_ci		preempt_disable();
7562306a36Sopenharmony_ci}
7662306a36Sopenharmony_ci
7762306a36Sopenharmony_cistatic inline void fpregs_unlock(void)
7862306a36Sopenharmony_ci{
7962306a36Sopenharmony_ci	if (!IS_ENABLED(CONFIG_PREEMPT_RT))
8062306a36Sopenharmony_ci		local_bh_enable();
8162306a36Sopenharmony_ci	else
8262306a36Sopenharmony_ci		preempt_enable();
8362306a36Sopenharmony_ci}
8462306a36Sopenharmony_ci
8562306a36Sopenharmony_ci/*
8662306a36Sopenharmony_ci * FPU state gets lazily restored before returning to userspace. So when in the
8762306a36Sopenharmony_ci * kernel, the valid FPU state may be kept in the buffer. This function will force
8862306a36Sopenharmony_ci * restore all the fpu state to the registers early if needed, and lock them from
8962306a36Sopenharmony_ci * being automatically saved/restored. Then FPU state can be modified safely in the
9062306a36Sopenharmony_ci * registers, before unlocking with fpregs_unlock().
9162306a36Sopenharmony_ci */
9262306a36Sopenharmony_civoid fpregs_lock_and_load(void);
9362306a36Sopenharmony_ci
9462306a36Sopenharmony_ci#ifdef CONFIG_X86_DEBUG_FPU
9562306a36Sopenharmony_ciextern void fpregs_assert_state_consistent(void);
9662306a36Sopenharmony_ci#else
9762306a36Sopenharmony_cistatic inline void fpregs_assert_state_consistent(void) { }
9862306a36Sopenharmony_ci#endif
9962306a36Sopenharmony_ci
10062306a36Sopenharmony_ci/*
10162306a36Sopenharmony_ci * Load the task FPU state before returning to userspace.
10262306a36Sopenharmony_ci */
10362306a36Sopenharmony_ciextern void switch_fpu_return(void);
10462306a36Sopenharmony_ci
10562306a36Sopenharmony_ci/*
10662306a36Sopenharmony_ci * Query the presence of one or more xfeatures. Works on any legacy CPU as well.
10762306a36Sopenharmony_ci *
10862306a36Sopenharmony_ci * If 'feature_name' is set then put a human-readable description of
10962306a36Sopenharmony_ci * the feature there as well - this can be used to print error (or success)
11062306a36Sopenharmony_ci * messages.
11162306a36Sopenharmony_ci */
11262306a36Sopenharmony_ciextern int cpu_has_xfeatures(u64 xfeatures_mask, const char **feature_name);
11362306a36Sopenharmony_ci
11462306a36Sopenharmony_ci/* Trap handling */
11562306a36Sopenharmony_ciextern int  fpu__exception_code(struct fpu *fpu, int trap_nr);
11662306a36Sopenharmony_ciextern void fpu_sync_fpstate(struct fpu *fpu);
11762306a36Sopenharmony_ciextern void fpu_reset_from_exception_fixup(void);
11862306a36Sopenharmony_ci
11962306a36Sopenharmony_ci/* Boot, hotplug and resume */
12062306a36Sopenharmony_ciextern void fpu__init_cpu(void);
12162306a36Sopenharmony_ciextern void fpu__init_system(void);
12262306a36Sopenharmony_ciextern void fpu__init_check_bugs(void);
12362306a36Sopenharmony_ciextern void fpu__resume_cpu(void);
12462306a36Sopenharmony_ci
12562306a36Sopenharmony_ci#ifdef CONFIG_MATH_EMULATION
12662306a36Sopenharmony_ciextern void fpstate_init_soft(struct swregs_state *soft);
12762306a36Sopenharmony_ci#else
12862306a36Sopenharmony_cistatic inline void fpstate_init_soft(struct swregs_state *soft) {}
12962306a36Sopenharmony_ci#endif
13062306a36Sopenharmony_ci
13162306a36Sopenharmony_ci/* State tracking */
13262306a36Sopenharmony_ciDECLARE_PER_CPU(struct fpu *, fpu_fpregs_owner_ctx);
13362306a36Sopenharmony_ci
13462306a36Sopenharmony_ci/* Process cleanup */
13562306a36Sopenharmony_ci#ifdef CONFIG_X86_64
13662306a36Sopenharmony_ciextern void fpstate_free(struct fpu *fpu);
13762306a36Sopenharmony_ci#else
13862306a36Sopenharmony_cistatic inline void fpstate_free(struct fpu *fpu) { }
13962306a36Sopenharmony_ci#endif
14062306a36Sopenharmony_ci
14162306a36Sopenharmony_ci/* fpstate-related functions which are exported to KVM */
14262306a36Sopenharmony_ciextern void fpstate_clear_xstate_component(struct fpstate *fps, unsigned int xfeature);
14362306a36Sopenharmony_ci
14462306a36Sopenharmony_ciextern u64 xstate_get_guest_group_perm(void);
14562306a36Sopenharmony_ci
14662306a36Sopenharmony_ci/* KVM specific functions */
14762306a36Sopenharmony_ciextern bool fpu_alloc_guest_fpstate(struct fpu_guest *gfpu);
14862306a36Sopenharmony_ciextern void fpu_free_guest_fpstate(struct fpu_guest *gfpu);
14962306a36Sopenharmony_ciextern int fpu_swap_kvm_fpstate(struct fpu_guest *gfpu, bool enter_guest);
15062306a36Sopenharmony_ciextern int fpu_enable_guest_xfd_features(struct fpu_guest *guest_fpu, u64 xfeatures);
15162306a36Sopenharmony_ci
15262306a36Sopenharmony_ci#ifdef CONFIG_X86_64
15362306a36Sopenharmony_ciextern void fpu_update_guest_xfd(struct fpu_guest *guest_fpu, u64 xfd);
15462306a36Sopenharmony_ciextern void fpu_sync_guest_vmexit_xfd_state(void);
15562306a36Sopenharmony_ci#else
15662306a36Sopenharmony_cistatic inline void fpu_update_guest_xfd(struct fpu_guest *guest_fpu, u64 xfd) { }
15762306a36Sopenharmony_cistatic inline void fpu_sync_guest_vmexit_xfd_state(void) { }
15862306a36Sopenharmony_ci#endif
15962306a36Sopenharmony_ci
16062306a36Sopenharmony_ciextern void fpu_copy_guest_fpstate_to_uabi(struct fpu_guest *gfpu, void *buf,
16162306a36Sopenharmony_ci					   unsigned int size, u64 xfeatures, u32 pkru);
16262306a36Sopenharmony_ciextern int fpu_copy_uabi_to_guest_fpstate(struct fpu_guest *gfpu, const void *buf, u64 xcr0, u32 *vpkru);
16362306a36Sopenharmony_ci
16462306a36Sopenharmony_cistatic inline void fpstate_set_confidential(struct fpu_guest *gfpu)
16562306a36Sopenharmony_ci{
16662306a36Sopenharmony_ci	gfpu->fpstate->is_confidential = true;
16762306a36Sopenharmony_ci}
16862306a36Sopenharmony_ci
16962306a36Sopenharmony_cistatic inline bool fpstate_is_confidential(struct fpu_guest *gfpu)
17062306a36Sopenharmony_ci{
17162306a36Sopenharmony_ci	return gfpu->fpstate->is_confidential;
17262306a36Sopenharmony_ci}
17362306a36Sopenharmony_ci
17462306a36Sopenharmony_ci/* prctl */
17562306a36Sopenharmony_ciextern long fpu_xstate_prctl(int option, unsigned long arg2);
17662306a36Sopenharmony_ci
17762306a36Sopenharmony_ciextern void fpu_idle_fpregs(void);
17862306a36Sopenharmony_ci
17962306a36Sopenharmony_ci#endif /* _ASM_X86_FPU_API_H */
180