1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Kernel-based Virtual Machine driver for Linux
4 *
5 * Macros and functions to access KVM PTEs (also known as SPTEs)
6 *
7 * Copyright (C) 2006 Qumranet, Inc.
8 * Copyright 2020 Red Hat, Inc. and/or its affiliates.
9 */
10
11
12 #include <linux/kvm_host.h>
13 #include "mmu.h"
14 #include "mmu_internal.h"
15 #include "x86.h"
16 #include "spte.h"
17
18 #include <asm/e820/api.h>
19 #include <asm/vmx.h>
20
21 static bool __read_mostly enable_mmio_caching = true;
22 module_param_named(mmio_caching, enable_mmio_caching, bool, 0444);
23
24 u64 __read_mostly shadow_host_writable_mask;
25 u64 __read_mostly shadow_mmu_writable_mask;
26 u64 __read_mostly shadow_nx_mask;
27 u64 __read_mostly shadow_x_mask; /* mutual exclusive with nx_mask */
28 u64 __read_mostly shadow_user_mask;
29 u64 __read_mostly shadow_accessed_mask;
30 u64 __read_mostly shadow_dirty_mask;
31 u64 __read_mostly shadow_mmio_value;
32 u64 __read_mostly shadow_mmio_mask;
33 u64 __read_mostly shadow_mmio_access_mask;
34 u64 __read_mostly shadow_present_mask;
35 u64 __read_mostly shadow_me_mask;
36 u64 __read_mostly shadow_acc_track_mask;
37
38 u64 __read_mostly shadow_nonpresent_or_rsvd_mask;
39 u64 __read_mostly shadow_nonpresent_or_rsvd_lower_gfn_mask;
40
41 u8 __read_mostly shadow_phys_bits;
42
generation_mmio_spte_mask(u64 gen)43 static u64 generation_mmio_spte_mask(u64 gen)
44 {
45 u64 mask;
46
47 WARN_ON(gen & ~MMIO_SPTE_GEN_MASK);
48
49 mask = (gen << MMIO_SPTE_GEN_LOW_SHIFT) & MMIO_SPTE_GEN_LOW_MASK;
50 mask |= (gen << MMIO_SPTE_GEN_HIGH_SHIFT) & MMIO_SPTE_GEN_HIGH_MASK;
51 return mask;
52 }
53
make_mmio_spte(struct kvm_vcpu * vcpu,u64 gfn,unsigned int access)54 u64 make_mmio_spte(struct kvm_vcpu *vcpu, u64 gfn, unsigned int access)
55 {
56 u64 gen = kvm_vcpu_memslots(vcpu)->generation & MMIO_SPTE_GEN_MASK;
57 u64 spte = generation_mmio_spte_mask(gen);
58 u64 gpa = gfn << PAGE_SHIFT;
59
60 WARN_ON_ONCE(!shadow_mmio_value);
61
62 access &= shadow_mmio_access_mask;
63 spte |= shadow_mmio_value | access;
64 spte |= gpa | shadow_nonpresent_or_rsvd_mask;
65 spte |= (gpa & shadow_nonpresent_or_rsvd_mask)
66 << SHADOW_NONPRESENT_OR_RSVD_MASK_LEN;
67
68 return spte;
69 }
70
kvm_is_mmio_pfn(kvm_pfn_t pfn)71 static bool kvm_is_mmio_pfn(kvm_pfn_t pfn)
72 {
73 if (pfn_valid(pfn))
74 return !is_zero_pfn(pfn) && PageReserved(pfn_to_page(pfn)) &&
75 /*
76 * Some reserved pages, such as those from NVDIMM
77 * DAX devices, are not for MMIO, and can be mapped
78 * with cached memory type for better performance.
79 * However, the above check misconceives those pages
80 * as MMIO, and results in KVM mapping them with UC
81 * memory type, which would hurt the performance.
82 * Therefore, we check the host memory type in addition
83 * and only treat UC/UC-/WC pages as MMIO.
84 */
85 (!pat_enabled() || pat_pfn_immune_to_uc_mtrr(pfn));
86
87 return !e820__mapped_raw_any(pfn_to_hpa(pfn),
88 pfn_to_hpa(pfn + 1) - 1,
89 E820_TYPE_RAM);
90 }
91
make_spte(struct kvm_vcpu * vcpu,struct kvm_mmu_page * sp,struct kvm_memory_slot * slot,unsigned int pte_access,gfn_t gfn,kvm_pfn_t pfn,u64 old_spte,bool prefetch,bool can_unsync,bool host_writable,u64 * new_spte)92 bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
93 struct kvm_memory_slot *slot,
94 unsigned int pte_access, gfn_t gfn, kvm_pfn_t pfn,
95 u64 old_spte, bool prefetch, bool can_unsync,
96 bool host_writable, u64 *new_spte)
97 {
98 int level = sp->role.level;
99 u64 spte = SPTE_MMU_PRESENT_MASK;
100 bool wrprot = false;
101
102 if (sp->role.ad_disabled)
103 spte |= SPTE_TDP_AD_DISABLED_MASK;
104 else if (kvm_vcpu_ad_need_write_protect(vcpu))
105 spte |= SPTE_TDP_AD_WRPROT_ONLY_MASK;
106
107 /*
108 * For the EPT case, shadow_present_mask is 0 if hardware
109 * supports exec-only page table entries. In that case,
110 * ACC_USER_MASK and shadow_user_mask are used to represent
111 * read access. See FNAME(gpte_access) in paging_tmpl.h.
112 */
113 spte |= shadow_present_mask;
114 if (!prefetch)
115 spte |= spte_shadow_accessed_mask(spte);
116
117 if (level > PG_LEVEL_4K && (pte_access & ACC_EXEC_MASK) &&
118 is_nx_huge_page_enabled()) {
119 pte_access &= ~ACC_EXEC_MASK;
120 }
121
122 if (pte_access & ACC_EXEC_MASK)
123 spte |= shadow_x_mask;
124 else
125 spte |= shadow_nx_mask;
126
127 if (pte_access & ACC_USER_MASK)
128 spte |= shadow_user_mask;
129
130 if (level > PG_LEVEL_4K)
131 spte |= PT_PAGE_SIZE_MASK;
132 if (tdp_enabled)
133 spte |= static_call(kvm_x86_get_mt_mask)(vcpu, gfn,
134 kvm_is_mmio_pfn(pfn));
135
136 if (host_writable)
137 spte |= shadow_host_writable_mask;
138 else
139 pte_access &= ~ACC_WRITE_MASK;
140
141 if (!kvm_is_mmio_pfn(pfn))
142 spte |= shadow_me_mask;
143
144 spte |= (u64)pfn << PAGE_SHIFT;
145
146 if (pte_access & ACC_WRITE_MASK) {
147 spte |= PT_WRITABLE_MASK | shadow_mmu_writable_mask;
148
149 /*
150 * Optimization: for pte sync, if spte was writable the hash
151 * lookup is unnecessary (and expensive). Write protection
152 * is responsibility of kvm_mmu_get_page / kvm_mmu_sync_roots.
153 * Same reasoning can be applied to dirty page accounting.
154 */
155 if (is_writable_pte(old_spte))
156 goto out;
157
158 /*
159 * Unsync shadow pages that are reachable by the new, writable
160 * SPTE. Write-protect the SPTE if the page can't be unsync'd,
161 * e.g. it's write-tracked (upper-level SPs) or has one or more
162 * shadow pages and unsync'ing pages is not allowed.
163 */
164 if (mmu_try_to_unsync_pages(vcpu, slot, gfn, can_unsync, prefetch)) {
165 pgprintk("%s: found shadow page for %llx, marking ro\n",
166 __func__, gfn);
167 wrprot = true;
168 pte_access &= ~ACC_WRITE_MASK;
169 spte &= ~(PT_WRITABLE_MASK | shadow_mmu_writable_mask);
170 }
171 }
172
173 if (pte_access & ACC_WRITE_MASK)
174 spte |= spte_shadow_dirty_mask(spte);
175
176 out:
177 if (prefetch)
178 spte = mark_spte_for_access_track(spte);
179
180 WARN_ONCE(is_rsvd_spte(&vcpu->arch.mmu->shadow_zero_check, spte, level),
181 "spte = 0x%llx, level = %d, rsvd bits = 0x%llx", spte, level,
182 get_rsvd_bits(&vcpu->arch.mmu->shadow_zero_check, spte, level));
183
184 if ((spte & PT_WRITABLE_MASK) && kvm_slot_dirty_track_enabled(slot)) {
185 /* Enforced by kvm_mmu_hugepage_adjust. */
186 WARN_ON(level > PG_LEVEL_4K);
187 mark_page_dirty_in_slot(vcpu->kvm, slot, gfn);
188 }
189
190 *new_spte = spte;
191 return wrprot;
192 }
193
make_nonleaf_spte(u64 * child_pt,bool ad_disabled)194 u64 make_nonleaf_spte(u64 *child_pt, bool ad_disabled)
195 {
196 u64 spte = SPTE_MMU_PRESENT_MASK;
197
198 spte |= __pa(child_pt) | shadow_present_mask | PT_WRITABLE_MASK |
199 shadow_user_mask | shadow_x_mask | shadow_me_mask;
200
201 if (ad_disabled)
202 spte |= SPTE_TDP_AD_DISABLED_MASK;
203 else
204 spte |= shadow_accessed_mask;
205
206 return spte;
207 }
208
kvm_mmu_changed_pte_notifier_make_spte(u64 old_spte,kvm_pfn_t new_pfn)209 u64 kvm_mmu_changed_pte_notifier_make_spte(u64 old_spte, kvm_pfn_t new_pfn)
210 {
211 u64 new_spte;
212
213 new_spte = old_spte & ~PT64_BASE_ADDR_MASK;
214 new_spte |= (u64)new_pfn << PAGE_SHIFT;
215
216 new_spte &= ~PT_WRITABLE_MASK;
217 new_spte &= ~shadow_host_writable_mask;
218
219 new_spte = mark_spte_for_access_track(new_spte);
220
221 return new_spte;
222 }
223
kvm_get_shadow_phys_bits(void)224 static u8 kvm_get_shadow_phys_bits(void)
225 {
226 /*
227 * boot_cpu_data.x86_phys_bits is reduced when MKTME or SME are detected
228 * in CPU detection code, but the processor treats those reduced bits as
229 * 'keyID' thus they are not reserved bits. Therefore KVM needs to look at
230 * the physical address bits reported by CPUID.
231 */
232 if (likely(boot_cpu_data.extended_cpuid_level >= 0x80000008))
233 return cpuid_eax(0x80000008) & 0xff;
234
235 /*
236 * Quite weird to have VMX or SVM but not MAXPHYADDR; probably a VM with
237 * custom CPUID. Proceed with whatever the kernel found since these features
238 * aren't virtualizable (SME/SEV also require CPUIDs higher than 0x80000008).
239 */
240 return boot_cpu_data.x86_phys_bits;
241 }
242
mark_spte_for_access_track(u64 spte)243 u64 mark_spte_for_access_track(u64 spte)
244 {
245 if (spte_ad_enabled(spte))
246 return spte & ~shadow_accessed_mask;
247
248 if (is_access_track_spte(spte))
249 return spte;
250
251 /*
252 * Making an Access Tracking PTE will result in removal of write access
253 * from the PTE. So, verify that we will be able to restore the write
254 * access in the fast page fault path later on.
255 */
256 WARN_ONCE((spte & PT_WRITABLE_MASK) &&
257 !spte_can_locklessly_be_made_writable(spte),
258 "kvm: Writable SPTE is not locklessly dirty-trackable\n");
259
260 WARN_ONCE(spte & (SHADOW_ACC_TRACK_SAVED_BITS_MASK <<
261 SHADOW_ACC_TRACK_SAVED_BITS_SHIFT),
262 "kvm: Access Tracking saved bit locations are not zero\n");
263
264 spte |= (spte & SHADOW_ACC_TRACK_SAVED_BITS_MASK) <<
265 SHADOW_ACC_TRACK_SAVED_BITS_SHIFT;
266 spte &= ~shadow_acc_track_mask;
267
268 return spte;
269 }
270
kvm_mmu_set_mmio_spte_mask(u64 mmio_value,u64 mmio_mask,u64 access_mask)271 void kvm_mmu_set_mmio_spte_mask(u64 mmio_value, u64 mmio_mask, u64 access_mask)
272 {
273 BUG_ON((u64)(unsigned)access_mask != access_mask);
274 WARN_ON(mmio_value & shadow_nonpresent_or_rsvd_lower_gfn_mask);
275
276 if (!enable_mmio_caching)
277 mmio_value = 0;
278
279 /*
280 * Disable MMIO caching if the MMIO value collides with the bits that
281 * are used to hold the relocated GFN when the L1TF mitigation is
282 * enabled. This should never fire as there is no known hardware that
283 * can trigger this condition, e.g. SME/SEV CPUs that require a custom
284 * MMIO value are not susceptible to L1TF.
285 */
286 if (WARN_ON(mmio_value & (shadow_nonpresent_or_rsvd_mask <<
287 SHADOW_NONPRESENT_OR_RSVD_MASK_LEN)))
288 mmio_value = 0;
289
290 /*
291 * The masked MMIO value must obviously match itself and a removed SPTE
292 * must not get a false positive. Removed SPTEs and MMIO SPTEs should
293 * never collide as MMIO must set some RWX bits, and removed SPTEs must
294 * not set any RWX bits.
295 */
296 if (WARN_ON((mmio_value & mmio_mask) != mmio_value) ||
297 WARN_ON(mmio_value && (REMOVED_SPTE & mmio_mask) == mmio_value))
298 mmio_value = 0;
299
300 shadow_mmio_value = mmio_value;
301 shadow_mmio_mask = mmio_mask;
302 shadow_mmio_access_mask = access_mask;
303 }
304 EXPORT_SYMBOL_GPL(kvm_mmu_set_mmio_spte_mask);
305
kvm_mmu_set_ept_masks(bool has_ad_bits,bool has_exec_only)306 void kvm_mmu_set_ept_masks(bool has_ad_bits, bool has_exec_only)
307 {
308 shadow_user_mask = VMX_EPT_READABLE_MASK;
309 shadow_accessed_mask = has_ad_bits ? VMX_EPT_ACCESS_BIT : 0ull;
310 shadow_dirty_mask = has_ad_bits ? VMX_EPT_DIRTY_BIT : 0ull;
311 shadow_nx_mask = 0ull;
312 shadow_x_mask = VMX_EPT_EXECUTABLE_MASK;
313 shadow_present_mask = has_exec_only ? 0ull : VMX_EPT_READABLE_MASK;
314 shadow_acc_track_mask = VMX_EPT_RWX_MASK;
315 shadow_me_mask = 0ull;
316
317 shadow_host_writable_mask = EPT_SPTE_HOST_WRITABLE;
318 shadow_mmu_writable_mask = EPT_SPTE_MMU_WRITABLE;
319
320 /*
321 * EPT Misconfigurations are generated if the value of bits 2:0
322 * of an EPT paging-structure entry is 110b (write/execute).
323 */
324 kvm_mmu_set_mmio_spte_mask(VMX_EPT_MISCONFIG_WX_VALUE,
325 VMX_EPT_RWX_MASK, 0);
326 }
327 EXPORT_SYMBOL_GPL(kvm_mmu_set_ept_masks);
328
kvm_mmu_reset_all_pte_masks(void)329 void kvm_mmu_reset_all_pte_masks(void)
330 {
331 u8 low_phys_bits;
332 u64 mask;
333
334 shadow_phys_bits = kvm_get_shadow_phys_bits();
335
336 /*
337 * If the CPU has 46 or less physical address bits, then set an
338 * appropriate mask to guard against L1TF attacks. Otherwise, it is
339 * assumed that the CPU is not vulnerable to L1TF.
340 *
341 * Some Intel CPUs address the L1 cache using more PA bits than are
342 * reported by CPUID. Use the PA width of the L1 cache when possible
343 * to achieve more effective mitigation, e.g. if system RAM overlaps
344 * the most significant bits of legal physical address space.
345 */
346 shadow_nonpresent_or_rsvd_mask = 0;
347 low_phys_bits = boot_cpu_data.x86_phys_bits;
348 if (boot_cpu_has_bug(X86_BUG_L1TF) &&
349 !WARN_ON_ONCE(boot_cpu_data.x86_cache_bits >=
350 52 - SHADOW_NONPRESENT_OR_RSVD_MASK_LEN)) {
351 low_phys_bits = boot_cpu_data.x86_cache_bits
352 - SHADOW_NONPRESENT_OR_RSVD_MASK_LEN;
353 shadow_nonpresent_or_rsvd_mask =
354 rsvd_bits(low_phys_bits, boot_cpu_data.x86_cache_bits - 1);
355 }
356
357 shadow_nonpresent_or_rsvd_lower_gfn_mask =
358 GENMASK_ULL(low_phys_bits - 1, PAGE_SHIFT);
359
360 shadow_user_mask = PT_USER_MASK;
361 shadow_accessed_mask = PT_ACCESSED_MASK;
362 shadow_dirty_mask = PT_DIRTY_MASK;
363 shadow_nx_mask = PT64_NX_MASK;
364 shadow_x_mask = 0;
365 shadow_present_mask = PT_PRESENT_MASK;
366 shadow_acc_track_mask = 0;
367 shadow_me_mask = sme_me_mask;
368
369 shadow_host_writable_mask = DEFAULT_SPTE_HOST_WRITEABLE;
370 shadow_mmu_writable_mask = DEFAULT_SPTE_MMU_WRITEABLE;
371
372 /*
373 * Set a reserved PA bit in MMIO SPTEs to generate page faults with
374 * PFEC.RSVD=1 on MMIO accesses. 64-bit PTEs (PAE, x86-64, and EPT
375 * paging) support a maximum of 52 bits of PA, i.e. if the CPU supports
376 * 52-bit physical addresses then there are no reserved PA bits in the
377 * PTEs and so the reserved PA approach must be disabled.
378 */
379 if (shadow_phys_bits < 52)
380 mask = BIT_ULL(51) | PT_PRESENT_MASK;
381 else
382 mask = 0;
383
384 kvm_mmu_set_mmio_spte_mask(mask, mask, ACC_WRITE_MASK | ACC_USER_MASK);
385 }
386