/kernel/linux/linux-6.6/arch/powerpc/kernel/ |
H A D | process.c | 126 if (cpu_has_feature(CPU_FTR_VSX) && (bits & MSR_FP)) in msr_check_and_set() 144 if (cpu_has_feature(CPU_FTR_VSX) && (bits & MSR_FP)) in __msr_check_and_clear() 159 msr &= ~(MSR_FP|MSR_FE0|MSR_FE1); in __giveup_fpu() 169 msr_check_and_set(MSR_FP); in giveup_fpu() 171 msr_check_and_clear(MSR_FP); in giveup_fpu() 191 if (tsk->thread.regs->msr & MSR_FP) { in flush_fp_to_thread() 213 cpumsr = msr_check_and_set(MSR_FP); in enable_kernel_fp() 215 if (current->thread.regs && (current->thread.regs->msr & MSR_FP)) { in enable_kernel_fp() 308 * MSR_FP and MSR_VEC in __giveup_vsx() 310 WARN_ON((msr & MSR_VSX) && !((msr & MSR_FP) in __giveup_vsx() [all...] |
H A D | fpu.S | 98 ori r5,r5,MSR_FP|MSR_RI 100 ori r5,r5,MSR_FP 113 ori r9,r9,MSR_FP /* enable FP for current */ 119 ori r12,r12,MSR_FP
|
H A D | signal_64.c | 237 msr |= tsk->thread.ckpt_regs.msr & (MSR_FP | MSR_VEC | MSR_VSX); in setup_tm_sigcontexts() 285 if (msr & MSR_FP) in setup_tm_sigcontexts() 387 regs_set_return_msr(regs, regs->msr & ~(MSR_FP | MSR_FE0 | MSR_FE1 | MSR_VEC | MSR_VSX)); in __unsafe_restore_sigcontext() 506 regs_set_return_msr(regs, regs->msr & ~(MSR_FP | MSR_FE0 | MSR_FE1 | MSR_VEC | MSR_VSX)); in restore_tm_sigcontexts() 599 msr_check_and_set(msr & (MSR_FP | MSR_VEC)); in restore_tm_sigcontexts() 600 if (msr & MSR_FP) { in restore_tm_sigcontexts() 602 regs_set_return_msr(regs, regs->msr | (MSR_FP | tsk->thread.fpexc_mode)); in restore_tm_sigcontexts()
|
H A D | signal_32.c | 418 if (msr & MSR_FP) in save_tm_user_regs_unsafe() 536 regs_set_return_msr(regs, regs->msr & ~(MSR_FP | MSR_FE0 | MSR_FE1)); in restore_user_regs() 617 regs_set_return_msr(regs, regs->msr & ~(MSR_FP | MSR_FE0 | MSR_FE1)); in restore_tm_user_regs() 700 msr_check_and_set(msr & (MSR_FP | MSR_VEC)); in restore_tm_user_regs() 701 if (msr & MSR_FP) { in restore_tm_user_regs() 703 regs_set_return_msr(regs, regs->msr | (MSR_FP | current->thread.fpexc_mode)); in restore_tm_user_regs()
|
H A D | tm.S | 135 ori r15, r15, MSR_FP 387 ori r5, r5, MSR_FP
|
H A D | vector.S | 131 andi. r5,r12,MSR_FP 204 ori r11,r10,MSR_FP
|
/kernel/linux/linux-5.10/arch/powerpc/kernel/ |
H A D | process.c | 127 if (cpu_has_feature(CPU_FTR_VSX) && (bits & MSR_FP)) in msr_check_and_set() 145 if (cpu_has_feature(CPU_FTR_VSX) && (bits & MSR_FP)) in __msr_check_and_clear() 160 msr &= ~(MSR_FP|MSR_FE0|MSR_FE1); in __giveup_fpu() 170 msr_check_and_set(MSR_FP); in giveup_fpu() 172 msr_check_and_clear(MSR_FP); in giveup_fpu() 192 if (tsk->thread.regs->msr & MSR_FP) { in flush_fp_to_thread() 214 cpumsr = msr_check_and_set(MSR_FP); in enable_kernel_fp() 216 if (current->thread.regs && (current->thread.regs->msr & MSR_FP)) { in enable_kernel_fp() 309 * MSR_FP and MSR_VEC in __giveup_vsx() 311 WARN_ON((msr & MSR_VSX) && !((msr & MSR_FP) in __giveup_vsx() [all...] |
H A D | fpu.S | 96 ori r5,r5,MSR_FP 111 ori r9,r9,MSR_FP /* enable FP for current */ 117 ori r12,r12,MSR_FP
|
H A D | signal_64.c | 223 msr |= tsk->thread.ckpt_regs.msr & (MSR_FP | MSR_VEC | MSR_VSX); in setup_tm_sigcontexts() 271 if (msr & MSR_FP) in setup_tm_sigcontexts() 369 regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1 | MSR_VEC | MSR_VSX); in restore_sigcontext() 487 regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1 | MSR_VEC | MSR_VSX); in restore_tm_sigcontexts() 580 msr_check_and_set(msr & (MSR_FP | MSR_VEC)); in restore_tm_sigcontexts() 581 if (msr & MSR_FP) { in restore_tm_sigcontexts() 583 regs->msr |= (MSR_FP | tsk->thread.fpexc_mode); in restore_tm_sigcontexts()
|
H A D | syscall_64.c | 217 unsigned long mathflags = MSR_FP; in syscall_exit_prepare() 296 unsigned long mathflags = MSR_FP; in interrupt_exit_user_prepare()
|
H A D | vector.S | 126 andi. r5,r12,MSR_FP 186 ori r11,r10,MSR_FP
|
H A D | signal_32.c | 413 if (msr & MSR_FP) { in save_tm_user_regs() 556 regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1); in restore_user_regs() 644 regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1); in restore_tm_user_regs() 723 msr_check_and_set(msr & (MSR_FP | MSR_VEC)); in restore_tm_user_regs() 724 if (msr & MSR_FP) { in restore_tm_user_regs() 726 regs->msr |= (MSR_FP | current->thread.fpexc_mode); in restore_tm_user_regs()
|
H A D | tm.S | 135 ori r15, r15, MSR_FP 390 ori r5, r5, MSR_FP
|
/kernel/linux/linux-5.10/arch/powerpc/lib/ |
H A D | ldstfp.S | 23 ori r7, r6, MSR_FP 47 ori r7, r6, MSR_FP 213 ori r7, r6, MSR_FP 228 ori r7, r6, MSR_FP
|
/kernel/linux/linux-6.6/arch/powerpc/lib/ |
H A D | ldstfp.S | 23 ori r7, r6, MSR_FP 47 ori r7, r6, MSR_FP 213 ori r7, r6, MSR_FP 228 ori r7, r6, MSR_FP
|
/kernel/linux/linux-5.10/arch/powerpc/include/asm/ |
H A D | switch_to.h | 46 msr_check_and_clear(MSR_FP); in disable_kernel_fp() 72 msr_check_and_clear(MSR_FP|MSR_VEC|MSR_VSX); in disable_kernel_vsx()
|
/kernel/linux/linux-6.6/arch/powerpc/include/asm/ |
H A D | switch_to.h | 46 msr_check_and_clear(MSR_FP); in disable_kernel_fp() 81 msr_check_and_clear(MSR_FP|MSR_VEC|MSR_VSX); in disable_kernel_vsx()
|
/kernel/linux/linux-5.10/arch/powerpc/kvm/ |
H A D | book3s_pr.c | 172 kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX); in kvmppc_core_vcpu_put_pr() 361 (MSR_FP | MSR_VEC | MSR_VSX); in kvmppc_handle_lost_math_exts() 366 if (ext_diff == MSR_FP) in kvmppc_handle_lost_math_exts() 563 if (kvmppc_get_msr(vcpu) & MSR_FP) in kvmppc_set_msr_pr() 564 kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP); in kvmppc_set_msr_pr() 829 msr |= MSR_FP | MSR_VEC; in kvmppc_giveup_ext() 839 if (msr & MSR_FP) { in kvmppc_giveup_ext() 845 if (t->regs->msr & MSR_FP) in kvmppc_giveup_ext() 910 msr = MSR_FP | MSR_VEC | MSR_VSX; in kvmppc_handle_ext() 922 if (msr & MSR_FP) { in kvmppc_handle_ext() [all...] |
H A D | tm.S | 44 ori r8, r8, MSR_FP 238 ori r5, r5, MSR_FP
|
H A D | emulate_loadstore.c | 30 if (!(kvmppc_get_msr(vcpu) & MSR_FP)) { in kvmppc_check_fp_disabled() 246 MSR_FP); in kvmppc_emulate_loadstore()
|
H A D | booke.c | 124 * It also set the MSR_FP in thread so that host know 134 if (!(current->thread.regs->msr & MSR_FP)) { in kvmppc_load_guest_fp() 139 current->thread.regs->msr |= MSR_FP; in kvmppc_load_guest_fp() 151 if (current->thread.regs->msr & MSR_FP) in kvmppc_save_guest_fp() 162 vcpu->arch.shadow_msr &= ~MSR_FP; in kvmppc_vcpu_sync_fpu() 163 vcpu->arch.shadow_msr |= vcpu->arch.shared->msr & MSR_FP; in kvmppc_vcpu_sync_fpu() 754 * Since we can't trap on MSR_FP in GS-mode, we consider the guest in kvmppc_vcpu_run()
|
/kernel/linux/linux-6.6/arch/powerpc/kvm/ |
H A D | book3s_pr.c | 184 kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX); in kvmppc_core_vcpu_put_pr() 368 (MSR_FP | MSR_VEC | MSR_VSX); in kvmppc_handle_lost_math_exts() 373 if (ext_diff == MSR_FP) in kvmppc_handle_lost_math_exts() 547 if (kvmppc_get_msr(vcpu) & MSR_FP) in kvmppc_set_msr_pr() 548 kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP); in kvmppc_set_msr_pr() 813 msr |= MSR_FP | MSR_VEC; in kvmppc_giveup_ext() 823 if (msr & MSR_FP) { in kvmppc_giveup_ext() 829 if (t->regs->msr & MSR_FP) in kvmppc_giveup_ext() 894 msr = MSR_FP | MSR_VEC | MSR_VSX; in kvmppc_handle_ext() 906 if (msr & MSR_FP) { in kvmppc_handle_ext() [all...] |
H A D | emulate_loadstore.c | 30 if (!(kvmppc_get_msr(vcpu) & MSR_FP)) { in kvmppc_check_fp_disabled() 247 MSR_FP); in kvmppc_emulate_loadstore()
|
H A D | tm.S | 44 ori r8, r8, MSR_FP 238 ori r5, r5, MSR_FP
|
H A D | booke.c | 150 * It also set the MSR_FP in thread so that host know 160 if (!(current->thread.regs->msr & MSR_FP)) { in kvmppc_load_guest_fp() 165 current->thread.regs->msr |= MSR_FP; in kvmppc_load_guest_fp() 177 if (current->thread.regs->msr & MSR_FP) in kvmppc_save_guest_fp() 188 vcpu->arch.shadow_msr &= ~MSR_FP; in kvmppc_vcpu_sync_fpu() 189 vcpu->arch.shadow_msr |= vcpu->arch.shared->msr & MSR_FP; in kvmppc_vcpu_sync_fpu() 782 * Since we can't trap on MSR_FP in GS-mode, we consider the guest in kvmppc_vcpu_run()
|