/kernel/linux/linux-5.10/arch/powerpc/kvm/ |
H A D | book3s_hv_tm.c | 67 (newmsr & MSR_TM))); in kvmhv_p9_tm_emulation() 114 (newmsr & MSR_TM))); in kvmhv_p9_tm_emulation() 135 if (!(msr & MSR_TM)) { in kvmhv_p9_tm_emulation() 165 if (!(msr & MSR_TM)) { in kvmhv_p9_tm_emulation() 203 if (!(msr & MSR_TM)) { in kvmhv_p9_tm_emulation()
|
H A D | book3s_hv_tm_builtin.c | 42 if (!(MSR_TM_TRANSACTIONAL(newmsr) && (newmsr & MSR_TM))) in kvmhv_p9_tm_emulation_early() 79 if (!(MSR_TM_TRANSACTIONAL(newmsr) && (newmsr & MSR_TM))) in kvmhv_p9_tm_emulation_early() 95 if (!(vcpu->arch.hfscr & HFSCR_TM) || !(msr & MSR_TM)) in kvmhv_p9_tm_emulation_early()
|
H A D | book3s_emulate.c | 279 if (((cur_msr & MSR_TM) == 0) && in kvmppc_core_emulate_op_pr() 280 ((srr1 & MSR_TM) == 0) && in kvmppc_core_emulate_op_pr() 495 if (!(kvmppc_get_msr(vcpu) & MSR_TM)) { in kvmppc_core_emulate_op_pr() 533 if (!(kvmppc_get_msr(vcpu) & MSR_TM)) { in kvmppc_core_emulate_op_pr() 559 if (!(kvmppc_get_msr(vcpu) & MSR_TM)) { in kvmppc_core_emulate_op_pr() 593 if (!(kvmppc_get_msr(vcpu) & MSR_TM)) { in kvmppc_core_emulate_op_pr() 789 if (!(kvmppc_get_msr(vcpu) & MSR_TM)) { in kvmppc_core_emulate_mtspr_pr() 967 if (!(kvmppc_get_msr(vcpu) & MSR_TM)) { in kvmppc_core_emulate_mfspr_pr()
|
H A D | book3s_pr.c | 232 MSR_TM | MSR_TS_MASK; in kvmppc_recalc_shadow_msr() 251 smsr &= ~MSR_TM; in kvmppc_recalc_shadow_msr() 395 if (kvmppc_get_msr(vcpu) & MSR_TM) { in kvmppc_restore_tm_pr() 407 if (kvmppc_get_msr(vcpu) & MSR_TM) { in kvmppc_restore_tm_pr() 567 if (kvmppc_get_msr(vcpu) & MSR_TM) in kvmppc_set_msr_pr() 1019 guest_fac_enabled = kvmppc_get_msr(vcpu) & MSR_TM; in kvmppc_handle_fac() 1045 /* Since we disabled MSR_TM at privilege state, the mfspr instruction in kvmppc_handle_fac()
|
H A D | tm.S | 235 li r6, MSR_TM >> 32
|
H A D | book3s_hv.c | 4353 (current->thread.regs->msr & MSR_TM)) { in kvmppc_vcpu_run_hv() 4360 mtmsr(mfmsr() | MSR_TM); in kvmppc_vcpu_run_hv() 4364 current->thread.regs->msr &= ~MSR_TM; in kvmppc_vcpu_run_hv()
|
/kernel/linux/linux-6.6/arch/powerpc/kvm/ |
H A D | book3s_hv_tm_builtin.c | 42 if (!(MSR_TM_TRANSACTIONAL(newmsr) && (newmsr & MSR_TM))) in kvmhv_p9_tm_emulation_early() 79 if (!(MSR_TM_TRANSACTIONAL(newmsr) && (newmsr & MSR_TM))) in kvmhv_p9_tm_emulation_early() 95 if (!(vcpu->arch.hfscr & HFSCR_TM) || !(msr & MSR_TM)) in kvmhv_p9_tm_emulation_early()
|
H A D | book3s_hv_tm.c | 76 (newmsr & MSR_TM))); in kvmhv_p9_tm_emulation() 124 (newmsr & MSR_TM))); in kvmhv_p9_tm_emulation() 147 if (!(msr & MSR_TM)) { in kvmhv_p9_tm_emulation() 179 if (!(msr & MSR_TM)) { in kvmhv_p9_tm_emulation() 219 if (!(msr & MSR_TM)) { in kvmhv_p9_tm_emulation()
|
H A D | book3s_emulate.c | 275 if (((cur_msr & MSR_TM) == 0) && in kvmppc_core_emulate_op_pr() 276 ((srr1 & MSR_TM) == 0) && in kvmppc_core_emulate_op_pr() 491 if (!(kvmppc_get_msr(vcpu) & MSR_TM)) { in kvmppc_core_emulate_op_pr() 529 if (!(kvmppc_get_msr(vcpu) & MSR_TM)) { in kvmppc_core_emulate_op_pr() 555 if (!(kvmppc_get_msr(vcpu) & MSR_TM)) { in kvmppc_core_emulate_op_pr() 589 if (!(kvmppc_get_msr(vcpu) & MSR_TM)) { in kvmppc_core_emulate_op_pr() 785 if (!(kvmppc_get_msr(vcpu) & MSR_TM)) { in kvmppc_core_emulate_mtspr_pr() 966 if (!(kvmppc_get_msr(vcpu) & MSR_TM)) { in kvmppc_core_emulate_mfspr_pr()
|
H A D | book3s_pr.c | 239 MSR_TM | MSR_TS_MASK; in kvmppc_recalc_shadow_msr() 258 smsr &= ~MSR_TM; in kvmppc_recalc_shadow_msr() 402 if (kvmppc_get_msr(vcpu) & MSR_TM) { in kvmppc_restore_tm_pr() 414 if (kvmppc_get_msr(vcpu) & MSR_TM) { in kvmppc_restore_tm_pr() 551 if (kvmppc_get_msr(vcpu) & MSR_TM) in kvmppc_set_msr_pr() 1003 guest_fac_enabled = kvmppc_get_msr(vcpu) & MSR_TM; in kvmppc_handle_fac() 1029 /* Since we disabled MSR_TM at privilege state, the mfspr instruction in kvmppc_handle_fac()
|
H A D | tm.S | 235 li r6, MSR_TM >> 32
|
H A D | book3s_hv_p9_entry.c | 510 msr_needed |= MSR_TM; in kvmppc_msr_hard_disable_set_facilities()
|
H A D | book3s_hv.c | 4827 (current->thread.regs->msr & MSR_TM)) { in kvmppc_vcpu_run_hv() 4862 msr |= MSR_TM; in kvmppc_vcpu_run_hv()
|
/kernel/linux/linux-6.6/arch/powerpc/kernel/ |
H A D | syscall.c | 101 mtmsr(mfmsr() | MSR_TM); in system_call_exception()
|
H A D | process.c | 930 return tsk && tsk->thread.regs && (tsk->thread.regs->msr & MSR_TM); in tm_enabled() 1031 if (!(thread->regs->msr & MSR_TM)) in tm_recheckpoint() 1100 prev->thread.regs->msr &= ~MSR_TM; in __switch_to_tm() 1208 if (usermsr & MSR_TM) { in kvmppc_save_user_regs() 1212 current->thread.regs->msr &= ~MSR_TM; in kvmppc_save_user_regs() 1513 {MSR_TM, "E"}, 1526 if (val & (MSR_TM | MSR_TS_S | MSR_TS_T)) { in print_tm_bits()
|
H A D | tm.S | 53 li r3, MSR_TM >> 32 64 li r3, MSR_TM >> 32
|
H A D | signal_64.c | 594 regs_set_return_msr(regs, regs->msr | MSR_TM); in restore_tm_sigcontexts()
|
H A D | traps.c | 1730 regs_set_return_msr(regs, regs->msr | MSR_TM); in tm_unavailable()
|
/kernel/linux/linux-5.10/arch/powerpc/kernel/ |
H A D | tm.S | 53 li r3, MSR_TM >> 32 64 li r3, MSR_TM >> 32
|
H A D | process.c | 912 return tsk && tsk->thread.regs && (tsk->thread.regs->msr & MSR_TM); in tm_enabled() 1013 if (!(thread->regs->msr & MSR_TM)) in tm_recheckpoint() 1082 prev->thread.regs->msr &= ~MSR_TM; in __switch_to_tm() 1444 {MSR_TM, "E"}, 1457 if (val & (MSR_TM | MSR_TS_S | MSR_TS_T)) { in print_tm_bits()
|
H A D | signal_64.c | 575 regs->msr |= MSR_TM; in restore_tm_sigcontexts()
|
H A D | traps.c | 1723 regs->msr |= MSR_TM; in tm_unavailable()
|
/kernel/linux/linux-5.10/arch/powerpc/include/asm/ |
H A D | reg.h | 118 #define MSR_TM __MASK(MSR_TM_LG) /* Transactional Mem Available */ macro
|
/kernel/linux/linux-6.6/arch/powerpc/include/asm/ |
H A D | reg.h | 115 #define MSR_TM __MASK(MSR_TM_LG) /* Transactional Mem Available */ macro
|
/kernel/linux/linux-5.10/arch/powerpc/xmon/ |
H A D | xmon.c | 1978 if (msr & MSR_TM) { in dump_207_sprs()
|