1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * Debug and Guest Debug support 4 * 5 * Copyright (C) 2015 - Linaro Ltd 6 * Author: Alex Bennée <alex.bennee@linaro.org> 7 */ 8 9#include <linux/kvm_host.h> 10#include <linux/hw_breakpoint.h> 11 12#include <asm/debug-monitors.h> 13#include <asm/kvm_asm.h> 14#include <asm/kvm_arm.h> 15#include <asm/kvm_emulate.h> 16 17#include "trace.h" 18 19/* These are the bits of MDSCR_EL1 we may manipulate */ 20#define MDSCR_EL1_DEBUG_MASK (DBG_MDSCR_SS | \ 21 DBG_MDSCR_KDE | \ 22 DBG_MDSCR_MDE) 23 24static DEFINE_PER_CPU(u32, mdcr_el2); 25 26/** 27 * save/restore_guest_debug_regs 28 * 29 * For some debug operations we need to tweak some guest registers. As 30 * a result we need to save the state of those registers before we 31 * make those modifications. 32 * 33 * Guest access to MDSCR_EL1 is trapped by the hypervisor and handled 34 * after we have restored the preserved value to the main context. 35 */ 36static void save_guest_debug_regs(struct kvm_vcpu *vcpu) 37{ 38 u64 val = vcpu_read_sys_reg(vcpu, MDSCR_EL1); 39 40 vcpu->arch.guest_debug_preserved.mdscr_el1 = val; 41 42 trace_kvm_arm_set_dreg32("Saved MDSCR_EL1", 43 vcpu->arch.guest_debug_preserved.mdscr_el1); 44} 45 46static void restore_guest_debug_regs(struct kvm_vcpu *vcpu) 47{ 48 u64 val = vcpu->arch.guest_debug_preserved.mdscr_el1; 49 50 vcpu_write_sys_reg(vcpu, val, MDSCR_EL1); 51 52 trace_kvm_arm_set_dreg32("Restored MDSCR_EL1", 53 vcpu_read_sys_reg(vcpu, MDSCR_EL1)); 54} 55 56/** 57 * kvm_arm_init_debug - grab what we need for debug 58 * 59 * Currently the sole task of this function is to retrieve the initial 60 * value of mdcr_el2 so we can preserve MDCR_EL2.HPMN which has 61 * presumably been set-up by some knowledgeable bootcode. 62 * 63 * It is called once per-cpu during CPU hyp initialisation. 64 */ 65 66void kvm_arm_init_debug(void) 67{ 68 __this_cpu_write(mdcr_el2, kvm_call_hyp_ret(__kvm_get_mdcr_el2)); 69} 70 71/** 72 * kvm_arm_setup_mdcr_el2 - configure vcpu mdcr_el2 value 73 * 74 * @vcpu: the vcpu pointer 75 * 76 * This ensures we will trap access to: 77 * - Performance monitors (MDCR_EL2_TPM/MDCR_EL2_TPMCR) 78 * - Debug ROM Address (MDCR_EL2_TDRA) 79 * - OS related registers (MDCR_EL2_TDOSA) 80 * - Statistical profiler (MDCR_EL2_TPMS/MDCR_EL2_E2PB) 81 * - Self-hosted Trace Filter controls (MDCR_EL2_TTRF) 82 */ 83static void kvm_arm_setup_mdcr_el2(struct kvm_vcpu *vcpu) 84{ 85 /* 86 * This also clears MDCR_EL2_E2PB_MASK to disable guest access 87 * to the profiling buffer. 88 */ 89 vcpu->arch.mdcr_el2 = __this_cpu_read(mdcr_el2) & MDCR_EL2_HPMN_MASK; 90 vcpu->arch.mdcr_el2 |= (MDCR_EL2_TPM | 91 MDCR_EL2_TPMS | 92 MDCR_EL2_TTRF | 93 MDCR_EL2_TPMCR | 94 MDCR_EL2_TDRA | 95 MDCR_EL2_TDOSA); 96 97 /* Is the VM being debugged by userspace? */ 98 if (vcpu->guest_debug) 99 /* Route all software debug exceptions to EL2 */ 100 vcpu->arch.mdcr_el2 |= MDCR_EL2_TDE; 101 102 /* 103 * Trap debug register access when one of the following is true: 104 * - Userspace is using the hardware to debug the guest 105 * (KVM_GUESTDBG_USE_HW is set). 106 * - The guest is not using debug (KVM_ARM64_DEBUG_DIRTY is clear). 107 */ 108 if ((vcpu->guest_debug & KVM_GUESTDBG_USE_HW) || 109 !(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY)) 110 vcpu->arch.mdcr_el2 |= MDCR_EL2_TDA; 111 112 trace_kvm_arm_set_dreg32("MDCR_EL2", vcpu->arch.mdcr_el2); 113} 114 115/** 116 * kvm_arm_vcpu_init_debug - setup vcpu debug traps 117 * 118 * @vcpu: the vcpu pointer 119 * 120 * Set vcpu initial mdcr_el2 value. 121 */ 122void kvm_arm_vcpu_init_debug(struct kvm_vcpu *vcpu) 123{ 124 preempt_disable(); 125 kvm_arm_setup_mdcr_el2(vcpu); 126 preempt_enable(); 127} 128 129/** 130 * kvm_arm_reset_debug_ptr - reset the debug ptr to point to the vcpu state 131 */ 132 133void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu) 134{ 135 vcpu->arch.debug_ptr = &vcpu->arch.vcpu_debug_state; 136} 137 138/** 139 * kvm_arm_setup_debug - set up debug related stuff 140 * 141 * @vcpu: the vcpu pointer 142 * 143 * This is called before each entry into the hypervisor to setup any 144 * debug related registers. 145 * 146 * Additionally, KVM only traps guest accesses to the debug registers if 147 * the guest is not actively using them (see the KVM_ARM64_DEBUG_DIRTY 148 * flag on vcpu->arch.flags). Since the guest must not interfere 149 * with the hardware state when debugging the guest, we must ensure that 150 * trapping is enabled whenever we are debugging the guest using the 151 * debug registers. 152 */ 153 154void kvm_arm_setup_debug(struct kvm_vcpu *vcpu) 155{ 156 unsigned long mdscr, orig_mdcr_el2 = vcpu->arch.mdcr_el2; 157 158 trace_kvm_arm_setup_debug(vcpu, vcpu->guest_debug); 159 160 kvm_arm_setup_mdcr_el2(vcpu); 161 162 /* Is Guest debugging in effect? */ 163 if (vcpu->guest_debug) { 164 /* Save guest debug state */ 165 save_guest_debug_regs(vcpu); 166 167 /* 168 * Single Step (ARM ARM D2.12.3 The software step state 169 * machine) 170 * 171 * If we are doing Single Step we need to manipulate 172 * the guest's MDSCR_EL1.SS and PSTATE.SS. Once the 173 * step has occurred the hypervisor will trap the 174 * debug exception and we return to userspace. 175 * 176 * If the guest attempts to single step its userspace 177 * we would have to deal with a trapped exception 178 * while in the guest kernel. Because this would be 179 * hard to unwind we suppress the guest's ability to 180 * do so by masking MDSCR_EL.SS. 181 * 182 * This confuses guest debuggers which use 183 * single-step behind the scenes but everything 184 * returns to normal once the host is no longer 185 * debugging the system. 186 */ 187 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) { 188 *vcpu_cpsr(vcpu) |= DBG_SPSR_SS; 189 mdscr = vcpu_read_sys_reg(vcpu, MDSCR_EL1); 190 mdscr |= DBG_MDSCR_SS; 191 vcpu_write_sys_reg(vcpu, mdscr, MDSCR_EL1); 192 } else { 193 mdscr = vcpu_read_sys_reg(vcpu, MDSCR_EL1); 194 mdscr &= ~DBG_MDSCR_SS; 195 vcpu_write_sys_reg(vcpu, mdscr, MDSCR_EL1); 196 } 197 198 trace_kvm_arm_set_dreg32("SPSR_EL2", *vcpu_cpsr(vcpu)); 199 200 /* 201 * HW Breakpoints and watchpoints 202 * 203 * We simply switch the debug_ptr to point to our new 204 * external_debug_state which has been populated by the 205 * debug ioctl. The existing KVM_ARM64_DEBUG_DIRTY 206 * mechanism ensures the registers are updated on the 207 * world switch. 208 */ 209 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW) { 210 /* Enable breakpoints/watchpoints */ 211 mdscr = vcpu_read_sys_reg(vcpu, MDSCR_EL1); 212 mdscr |= DBG_MDSCR_MDE; 213 vcpu_write_sys_reg(vcpu, mdscr, MDSCR_EL1); 214 215 vcpu->arch.debug_ptr = &vcpu->arch.external_debug_state; 216 vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY; 217 218 trace_kvm_arm_set_regset("BKPTS", get_num_brps(), 219 &vcpu->arch.debug_ptr->dbg_bcr[0], 220 &vcpu->arch.debug_ptr->dbg_bvr[0]); 221 222 trace_kvm_arm_set_regset("WAPTS", get_num_wrps(), 223 &vcpu->arch.debug_ptr->dbg_wcr[0], 224 &vcpu->arch.debug_ptr->dbg_wvr[0]); 225 } 226 } 227 228 BUG_ON(!vcpu->guest_debug && 229 vcpu->arch.debug_ptr != &vcpu->arch.vcpu_debug_state); 230 231 /* If KDE or MDE are set, perform a full save/restore cycle. */ 232 if (vcpu_read_sys_reg(vcpu, MDSCR_EL1) & (DBG_MDSCR_KDE | DBG_MDSCR_MDE)) 233 vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY; 234 235 /* Write mdcr_el2 changes since vcpu_load on VHE systems */ 236 if (has_vhe() && orig_mdcr_el2 != vcpu->arch.mdcr_el2) 237 write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2); 238 239 trace_kvm_arm_set_dreg32("MDSCR_EL1", vcpu_read_sys_reg(vcpu, MDSCR_EL1)); 240} 241 242void kvm_arm_clear_debug(struct kvm_vcpu *vcpu) 243{ 244 trace_kvm_arm_clear_debug(vcpu->guest_debug); 245 246 if (vcpu->guest_debug) { 247 restore_guest_debug_regs(vcpu); 248 249 /* 250 * If we were using HW debug we need to restore the 251 * debug_ptr to the guest debug state. 252 */ 253 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW) { 254 kvm_arm_reset_debug_ptr(vcpu); 255 256 trace_kvm_arm_set_regset("BKPTS", get_num_brps(), 257 &vcpu->arch.debug_ptr->dbg_bcr[0], 258 &vcpu->arch.debug_ptr->dbg_bvr[0]); 259 260 trace_kvm_arm_set_regset("WAPTS", get_num_wrps(), 261 &vcpu->arch.debug_ptr->dbg_wcr[0], 262 &vcpu->arch.debug_ptr->dbg_wvr[0]); 263 } 264 } 265} 266