1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Debug and Guest Debug support
4 *
5 * Copyright (C) 2015 - Linaro Ltd
6 * Author: Alex Bennée <alex.bennee@linaro.org>
7 */
8
9 #include <linux/kvm_host.h>
10 #include <linux/hw_breakpoint.h>
11
12 #include <asm/debug-monitors.h>
13 #include <asm/kvm_asm.h>
14 #include <asm/kvm_arm.h>
15 #include <asm/kvm_emulate.h>
16
17 #include "trace.h"
18
19 /* These are the bits of MDSCR_EL1 we may manipulate */
20 #define MDSCR_EL1_DEBUG_MASK (DBG_MDSCR_SS | \
21 DBG_MDSCR_KDE | \
22 DBG_MDSCR_MDE)
23
24 static DEFINE_PER_CPU(u64, mdcr_el2);
25
26 /**
27 * save/restore_guest_debug_regs
28 *
29 * For some debug operations we need to tweak some guest registers. As
30 * a result we need to save the state of those registers before we
31 * make those modifications.
32 *
33 * Guest access to MDSCR_EL1 is trapped by the hypervisor and handled
34 * after we have restored the preserved value to the main context.
35 */
save_guest_debug_regs(struct kvm_vcpu * vcpu)36 static void save_guest_debug_regs(struct kvm_vcpu *vcpu)
37 {
38 u64 val = vcpu_read_sys_reg(vcpu, MDSCR_EL1);
39
40 vcpu->arch.guest_debug_preserved.mdscr_el1 = val;
41
42 trace_kvm_arm_set_dreg32("Saved MDSCR_EL1",
43 vcpu->arch.guest_debug_preserved.mdscr_el1);
44 }
45
restore_guest_debug_regs(struct kvm_vcpu * vcpu)46 static void restore_guest_debug_regs(struct kvm_vcpu *vcpu)
47 {
48 u64 val = vcpu->arch.guest_debug_preserved.mdscr_el1;
49
50 vcpu_write_sys_reg(vcpu, val, MDSCR_EL1);
51
52 trace_kvm_arm_set_dreg32("Restored MDSCR_EL1",
53 vcpu_read_sys_reg(vcpu, MDSCR_EL1));
54 }
55
56 /**
57 * kvm_arm_init_debug - grab what we need for debug
58 *
59 * Currently the sole task of this function is to retrieve the initial
60 * value of mdcr_el2 so we can preserve MDCR_EL2.HPMN which has
61 * presumably been set-up by some knowledgeable bootcode.
62 *
63 * It is called once per-cpu during CPU hyp initialisation.
64 */
65
kvm_arm_init_debug(void)66 void kvm_arm_init_debug(void)
67 {
68 __this_cpu_write(mdcr_el2, kvm_call_hyp_ret(__kvm_get_mdcr_el2));
69 }
70
71 /**
72 * kvm_arm_setup_mdcr_el2 - configure vcpu mdcr_el2 value
73 *
74 * @vcpu: the vcpu pointer
75 *
76 * This ensures we will trap access to:
77 * - Performance monitors (MDCR_EL2_TPM/MDCR_EL2_TPMCR)
78 * - Debug ROM Address (MDCR_EL2_TDRA)
79 * - OS related registers (MDCR_EL2_TDOSA)
80 * - Statistical profiler (MDCR_EL2_TPMS/MDCR_EL2_E2PB)
81 * - Self-hosted Trace Filter controls (MDCR_EL2_TTRF)
82 * - Self-hosted Trace (MDCR_EL2_TTRF/MDCR_EL2_E2TB)
83 */
kvm_arm_setup_mdcr_el2(struct kvm_vcpu * vcpu)84 static void kvm_arm_setup_mdcr_el2(struct kvm_vcpu *vcpu)
85 {
86 /*
87 * This also clears MDCR_EL2_E2PB_MASK and MDCR_EL2_E2TB_MASK
88 * to disable guest access to the profiling and trace buffers
89 */
90 vcpu->arch.mdcr_el2 = __this_cpu_read(mdcr_el2) & MDCR_EL2_HPMN_MASK;
91 vcpu->arch.mdcr_el2 |= (MDCR_EL2_TPM |
92 MDCR_EL2_TPMS |
93 MDCR_EL2_TTRF |
94 MDCR_EL2_TPMCR |
95 MDCR_EL2_TDRA |
96 MDCR_EL2_TDOSA);
97
98 /* Is the VM being debugged by userspace? */
99 if (vcpu->guest_debug)
100 /* Route all software debug exceptions to EL2 */
101 vcpu->arch.mdcr_el2 |= MDCR_EL2_TDE;
102
103 /*
104 * Trap debug register access when one of the following is true:
105 * - Userspace is using the hardware to debug the guest
106 * (KVM_GUESTDBG_USE_HW is set).
107 * - The guest is not using debug (KVM_ARM64_DEBUG_DIRTY is clear).
108 */
109 if ((vcpu->guest_debug & KVM_GUESTDBG_USE_HW) ||
110 !(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY))
111 vcpu->arch.mdcr_el2 |= MDCR_EL2_TDA;
112
113 trace_kvm_arm_set_dreg32("MDCR_EL2", vcpu->arch.mdcr_el2);
114 }
115
116 /**
117 * kvm_arm_vcpu_init_debug - setup vcpu debug traps
118 *
119 * @vcpu: the vcpu pointer
120 *
121 * Set vcpu initial mdcr_el2 value.
122 */
kvm_arm_vcpu_init_debug(struct kvm_vcpu * vcpu)123 void kvm_arm_vcpu_init_debug(struct kvm_vcpu *vcpu)
124 {
125 preempt_disable();
126 kvm_arm_setup_mdcr_el2(vcpu);
127 preempt_enable();
128 }
129
130 /**
131 * kvm_arm_reset_debug_ptr - reset the debug ptr to point to the vcpu state
132 */
133
kvm_arm_reset_debug_ptr(struct kvm_vcpu * vcpu)134 void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu)
135 {
136 vcpu->arch.debug_ptr = &vcpu->arch.vcpu_debug_state;
137 }
138
139 /**
140 * kvm_arm_setup_debug - set up debug related stuff
141 *
142 * @vcpu: the vcpu pointer
143 *
144 * This is called before each entry into the hypervisor to setup any
145 * debug related registers.
146 *
147 * Additionally, KVM only traps guest accesses to the debug registers if
148 * the guest is not actively using them (see the KVM_ARM64_DEBUG_DIRTY
149 * flag on vcpu->arch.flags). Since the guest must not interfere
150 * with the hardware state when debugging the guest, we must ensure that
151 * trapping is enabled whenever we are debugging the guest using the
152 * debug registers.
153 */
154
kvm_arm_setup_debug(struct kvm_vcpu * vcpu)155 void kvm_arm_setup_debug(struct kvm_vcpu *vcpu)
156 {
157 unsigned long mdscr, orig_mdcr_el2 = vcpu->arch.mdcr_el2;
158
159 trace_kvm_arm_setup_debug(vcpu, vcpu->guest_debug);
160
161 kvm_arm_setup_mdcr_el2(vcpu);
162
163 /* Is Guest debugging in effect? */
164 if (vcpu->guest_debug) {
165 /* Save guest debug state */
166 save_guest_debug_regs(vcpu);
167
168 /*
169 * Single Step (ARM ARM D2.12.3 The software step state
170 * machine)
171 *
172 * If we are doing Single Step we need to manipulate
173 * the guest's MDSCR_EL1.SS and PSTATE.SS. Once the
174 * step has occurred the hypervisor will trap the
175 * debug exception and we return to userspace.
176 *
177 * If the guest attempts to single step its userspace
178 * we would have to deal with a trapped exception
179 * while in the guest kernel. Because this would be
180 * hard to unwind we suppress the guest's ability to
181 * do so by masking MDSCR_EL.SS.
182 *
183 * This confuses guest debuggers which use
184 * single-step behind the scenes but everything
185 * returns to normal once the host is no longer
186 * debugging the system.
187 */
188 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
189 *vcpu_cpsr(vcpu) |= DBG_SPSR_SS;
190 mdscr = vcpu_read_sys_reg(vcpu, MDSCR_EL1);
191 mdscr |= DBG_MDSCR_SS;
192 vcpu_write_sys_reg(vcpu, mdscr, MDSCR_EL1);
193 } else {
194 mdscr = vcpu_read_sys_reg(vcpu, MDSCR_EL1);
195 mdscr &= ~DBG_MDSCR_SS;
196 vcpu_write_sys_reg(vcpu, mdscr, MDSCR_EL1);
197 }
198
199 trace_kvm_arm_set_dreg32("SPSR_EL2", *vcpu_cpsr(vcpu));
200
201 /*
202 * HW Breakpoints and watchpoints
203 *
204 * We simply switch the debug_ptr to point to our new
205 * external_debug_state which has been populated by the
206 * debug ioctl. The existing KVM_ARM64_DEBUG_DIRTY
207 * mechanism ensures the registers are updated on the
208 * world switch.
209 */
210 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW) {
211 /* Enable breakpoints/watchpoints */
212 mdscr = vcpu_read_sys_reg(vcpu, MDSCR_EL1);
213 mdscr |= DBG_MDSCR_MDE;
214 vcpu_write_sys_reg(vcpu, mdscr, MDSCR_EL1);
215
216 vcpu->arch.debug_ptr = &vcpu->arch.external_debug_state;
217 vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY;
218
219 trace_kvm_arm_set_regset("BKPTS", get_num_brps(),
220 &vcpu->arch.debug_ptr->dbg_bcr[0],
221 &vcpu->arch.debug_ptr->dbg_bvr[0]);
222
223 trace_kvm_arm_set_regset("WAPTS", get_num_wrps(),
224 &vcpu->arch.debug_ptr->dbg_wcr[0],
225 &vcpu->arch.debug_ptr->dbg_wvr[0]);
226 }
227 }
228
229 BUG_ON(!vcpu->guest_debug &&
230 vcpu->arch.debug_ptr != &vcpu->arch.vcpu_debug_state);
231
232 /* If KDE or MDE are set, perform a full save/restore cycle. */
233 if (vcpu_read_sys_reg(vcpu, MDSCR_EL1) & (DBG_MDSCR_KDE | DBG_MDSCR_MDE))
234 vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY;
235
236 /* Write mdcr_el2 changes since vcpu_load on VHE systems */
237 if (has_vhe() && orig_mdcr_el2 != vcpu->arch.mdcr_el2)
238 write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2);
239
240 trace_kvm_arm_set_dreg32("MDSCR_EL1", vcpu_read_sys_reg(vcpu, MDSCR_EL1));
241 }
242
kvm_arm_clear_debug(struct kvm_vcpu * vcpu)243 void kvm_arm_clear_debug(struct kvm_vcpu *vcpu)
244 {
245 trace_kvm_arm_clear_debug(vcpu->guest_debug);
246
247 if (vcpu->guest_debug) {
248 restore_guest_debug_regs(vcpu);
249
250 /*
251 * If we were using HW debug we need to restore the
252 * debug_ptr to the guest debug state.
253 */
254 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW) {
255 kvm_arm_reset_debug_ptr(vcpu);
256
257 trace_kvm_arm_set_regset("BKPTS", get_num_brps(),
258 &vcpu->arch.debug_ptr->dbg_bcr[0],
259 &vcpu->arch.debug_ptr->dbg_bvr[0]);
260
261 trace_kvm_arm_set_regset("WAPTS", get_num_wrps(),
262 &vcpu->arch.debug_ptr->dbg_wcr[0],
263 &vcpu->arch.debug_ptr->dbg_wvr[0]);
264 }
265 }
266 }
267
kvm_arch_vcpu_load_debug_state_flags(struct kvm_vcpu * vcpu)268 void kvm_arch_vcpu_load_debug_state_flags(struct kvm_vcpu *vcpu)
269 {
270 u64 dfr0;
271
272 /* For VHE, there is nothing to do */
273 if (has_vhe())
274 return;
275
276 dfr0 = read_sysreg(id_aa64dfr0_el1);
277 /*
278 * If SPE is present on this CPU and is available at current EL,
279 * we may need to check if the host state needs to be saved.
280 */
281 if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_PMSVER_SHIFT) &&
282 !(read_sysreg_s(SYS_PMBIDR_EL1) & BIT(SYS_PMBIDR_EL1_P_SHIFT)))
283 vcpu->arch.flags |= KVM_ARM64_DEBUG_STATE_SAVE_SPE;
284
285 /* Check if we have TRBE implemented and available at the host */
286 if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_TRBE_SHIFT) &&
287 !(read_sysreg_s(SYS_TRBIDR_EL1) & TRBIDR_PROG))
288 vcpu->arch.flags |= KVM_ARM64_DEBUG_STATE_SAVE_TRBE;
289 }
290
kvm_arch_vcpu_put_debug_state_flags(struct kvm_vcpu * vcpu)291 void kvm_arch_vcpu_put_debug_state_flags(struct kvm_vcpu *vcpu)
292 {
293 vcpu->arch.flags &= ~(KVM_ARM64_DEBUG_STATE_SAVE_SPE |
294 KVM_ARM64_DEBUG_STATE_SAVE_TRBE);
295 }
296