1 #include <xen/cpu.h>
2 #include <xen/cpumask.h>
3 #include <xen/init.h>
4 #include <xen/mm.h>
5 #include <xen/param.h>
6 #include <xen/sizes.h>
7 #include <xen/smp.h>
8 #include <xen/spinlock.h>
9 #include <xen/vmap.h>
10 #include <xen/warning.h>
11 #include <xen/notifier.h>
12 #include <asm/cpufeature.h>
13 #include <asm/cpuerrata.h>
14 #include <asm/insn.h>
15 #include <asm/psci.h>
16 
17 /* Override macros from asm/page.h to make them work with mfn_t */
18 #undef virt_to_mfn
19 #define virt_to_mfn(va) _mfn(__virt_to_mfn(va))
20 
21 /* Hardening Branch predictor code for Arm64 */
22 #ifdef CONFIG_ARM64_HARDEN_BRANCH_PREDICTOR
23 
24 #define VECTOR_TABLE_SIZE SZ_2K
25 
26 /*
27  * Number of available table vectors (this should be in-sync with
28  * arch/arm64/bpi.S
29  */
30 #define NR_BPI_HYP_VECS 4
31 
32 extern char __bp_harden_hyp_vecs_start[], __bp_harden_hyp_vecs_end[];
33 
34 /*
35  * Key for each slot. This is used to find whether a specific workaround
36  * had a slot assigned.
37  *
38  * The key is virtual address of the vector workaround
39  */
40 static uintptr_t bp_harden_slot_key[NR_BPI_HYP_VECS];
41 
42 /*
43  * [hyp_vec_start, hyp_vec_end[ corresponds to the first 31 instructions
44  * of each vector. The last (i.e 32th) instruction is used to branch to
45  * the original entry.
46  *
47  * Those instructions will be copied on each vector to harden them.
48  */
copy_hyp_vect_bpi(unsigned int slot,const char * hyp_vec_start,const char * hyp_vec_end)49 static bool copy_hyp_vect_bpi(unsigned int slot, const char *hyp_vec_start,
50                               const char *hyp_vec_end)
51 {
52     void *dst_remapped;
53     const void *dst = __bp_harden_hyp_vecs_start + slot * VECTOR_TABLE_SIZE;
54     unsigned int i;
55     mfn_t dst_mfn = virt_to_mfn(dst);
56 
57     BUG_ON(((hyp_vec_end - hyp_vec_start) / 4) > 31);
58 
59     /*
60      * Vectors are part of the text that are mapped read-only. So re-map
61      * the vector table to be able to update vectors.
62      */
63     dst_remapped = __vmap(&dst_mfn,
64                           1UL << get_order_from_bytes(VECTOR_TABLE_SIZE),
65                           1, 1, PAGE_HYPERVISOR, VMAP_DEFAULT);
66     if ( !dst_remapped )
67         return false;
68 
69     dst_remapped += (vaddr_t)dst & ~PAGE_MASK;
70 
71     for ( i = 0; i < VECTOR_TABLE_SIZE; i += 0x80 )
72     {
73         memcpy(dst_remapped + i, hyp_vec_start, hyp_vec_end - hyp_vec_start);
74     }
75 
76     clean_dcache_va_range(dst_remapped, VECTOR_TABLE_SIZE);
77     invalidate_icache();
78 
79     vunmap((void *)((vaddr_t)dst_remapped & PAGE_MASK));
80 
81     return true;
82 }
83 
84 static bool __maybe_unused
install_bp_hardening_vec(const struct arm_cpu_capabilities * entry,const char * hyp_vec_start,const char * hyp_vec_end,const char * desc)85 install_bp_hardening_vec(const struct arm_cpu_capabilities *entry,
86                          const char *hyp_vec_start,
87                          const char *hyp_vec_end,
88                          const char *desc)
89 {
90     static int last_slot = -1;
91     static DEFINE_SPINLOCK(bp_lock);
92     unsigned int i, slot = -1;
93     bool ret = true;
94 
95     /*
96      * Enable callbacks are called on every CPU based on the
97      * capabilities. So double-check whether the CPU matches the
98      * entry.
99      */
100     if ( !entry->matches(entry) )
101         return true;
102 
103     printk(XENLOG_INFO "CPU%u will %s on exception entry\n",
104            smp_processor_id(), desc);
105 
106     /*
107      * No need to install hardened vector when the processor has
108      * ID_AA64PRF0_EL1.CSV2 set.
109      */
110     if ( cpu_data[smp_processor_id()].pfr64.csv2 )
111         return true;
112 
113     spin_lock(&bp_lock);
114 
115     /*
116      * Look up whether the hardening vector had a slot already
117      * assigned.
118      */
119     for ( i = 0; i < 4; i++ )
120     {
121         if ( bp_harden_slot_key[i] == (uintptr_t)hyp_vec_start )
122         {
123             slot = i;
124             break;
125         }
126     }
127 
128     if ( slot == -1 )
129     {
130         last_slot++;
131         /* Check we don't overrun the number of slots available. */
132         BUG_ON(NR_BPI_HYP_VECS <= last_slot);
133 
134         slot = last_slot;
135         ret = copy_hyp_vect_bpi(slot, hyp_vec_start, hyp_vec_end);
136 
137         /* Only update the slot if the copy succeeded. */
138         if ( ret )
139             bp_harden_slot_key[slot] = (uintptr_t)hyp_vec_start;
140     }
141 
142     if ( ret )
143     {
144         /* Install the new vector table. */
145         WRITE_SYSREG((vaddr_t)(__bp_harden_hyp_vecs_start + slot * VECTOR_TABLE_SIZE),
146                      VBAR_EL2);
147         isb();
148     }
149 
150     spin_unlock(&bp_lock);
151 
152     return ret;
153 }
154 
155 extern char __smccc_workaround_1_smc_start[], __smccc_workaround_1_smc_end[];
156 
enable_smccc_arch_workaround_1(void * data)157 static int enable_smccc_arch_workaround_1(void *data)
158 {
159     struct arm_smccc_res res;
160     static bool warned = false;
161     const struct arm_cpu_capabilities *entry = data;
162 
163     /*
164      * Enable callbacks are called on every CPU based on the
165      * capabilities. So double-check whether the CPU matches the
166      * entry.
167      */
168     if ( !entry->matches(entry) )
169         return 0;
170 
171     if ( smccc_ver < SMCCC_VERSION(1, 1) )
172         goto warn;
173 
174     arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FID,
175                       ARM_SMCCC_ARCH_WORKAROUND_1_FID, &res);
176     /* The return value is in the lower 32-bits. */
177     if ( (int)res.a0 < 0 )
178         goto warn;
179 
180     return !install_bp_hardening_vec(entry,__smccc_workaround_1_smc_start,
181                                      __smccc_workaround_1_smc_end,
182                                      "call ARM_SMCCC_ARCH_WORKAROUND_1");
183 
184 warn:
185     if ( !warned )
186     {
187         ASSERT(system_state < SYS_STATE_active);
188         warning_add("No support for ARM_SMCCC_ARCH_WORKAROUND_1.\n"
189                     "Please update your firmware.\n");
190         warned = false;
191     }
192 
193     return 0;
194 }
195 
196 #endif /* CONFIG_ARM64_HARDEN_BRANCH_PREDICTOR */
197 
198 /* Hardening Branch predictor code for Arm32 */
199 #ifdef CONFIG_ARM32_HARDEN_BRANCH_PREDICTOR
200 
201 /*
202  * Per-CPU vector tables to use when returning to the guests. They will
203  * only be used on platform requiring to harden the branch predictor.
204  */
205 DEFINE_PER_CPU_READ_MOSTLY(const char *, bp_harden_vecs);
206 
207 extern char hyp_traps_vector_bp_inv[];
208 extern char hyp_traps_vector_ic_inv[];
209 
210 static void __maybe_unused
install_bp_hardening_vecs(const struct arm_cpu_capabilities * entry,const char * hyp_vecs,const char * desc)211 install_bp_hardening_vecs(const struct arm_cpu_capabilities *entry,
212                           const char *hyp_vecs, const char *desc)
213 {
214     /*
215      * Enable callbacks are called on every CPU based on the
216      * capabilities. So double-check whether the CPU matches the
217      * entry.
218      */
219     if ( !entry->matches(entry) )
220         return;
221 
222     printk(XENLOG_INFO "CPU%u will %s on guest exit\n",
223            smp_processor_id(), desc);
224     this_cpu(bp_harden_vecs) = hyp_vecs;
225 }
226 
enable_bp_inv_hardening(void * data)227 static int enable_bp_inv_hardening(void *data)
228 {
229     install_bp_hardening_vecs(data, hyp_traps_vector_bp_inv,
230                               "execute BPIALL");
231     return 0;
232 }
233 
enable_ic_inv_hardening(void * data)234 static int enable_ic_inv_hardening(void *data)
235 {
236     install_bp_hardening_vecs(data, hyp_traps_vector_ic_inv,
237                               "execute ICIALLU");
238     return 0;
239 }
240 
241 #endif
242 
243 #ifdef CONFIG_ARM_SSBD
244 
245 enum ssbd_state ssbd_state = ARM_SSBD_RUNTIME;
246 
parse_spec_ctrl(const char * s)247 static int __init parse_spec_ctrl(const char *s)
248 {
249     const char *ss;
250     int rc = 0;
251 
252     do {
253         ss = strchr(s, ',');
254         if ( !ss )
255             ss = strchr(s, '\0');
256 
257         if ( !strncmp(s, "ssbd=", 5) )
258         {
259             s += 5;
260 
261             if ( !cmdline_strcmp(s, "force-disable") )
262                 ssbd_state = ARM_SSBD_FORCE_DISABLE;
263             else if ( !cmdline_strcmp(s, "runtime") )
264                 ssbd_state = ARM_SSBD_RUNTIME;
265             else if ( !cmdline_strcmp(s, "force-enable") )
266                 ssbd_state = ARM_SSBD_FORCE_ENABLE;
267             else
268                 rc = -EINVAL;
269         }
270         else
271             rc = -EINVAL;
272 
273         s = ss + 1;
274     } while ( *ss );
275 
276     return rc;
277 }
278 custom_param("spec-ctrl", parse_spec_ctrl);
279 
280 /* Arm64 only for now as for Arm32 the workaround is currently handled in C. */
281 #ifdef CONFIG_ARM_64
arm_enable_wa2_handling(const struct alt_instr * alt,const uint32_t * origptr,uint32_t * updptr,int nr_inst)282 void __init arm_enable_wa2_handling(const struct alt_instr *alt,
283                                     const uint32_t *origptr,
284                                     uint32_t *updptr, int nr_inst)
285 {
286     BUG_ON(nr_inst != 1);
287 
288     /*
289      * Only allow mitigation on guest ARCH_WORKAROUND_2 if the SSBD
290      * state allow it to be flipped.
291      */
292     if ( get_ssbd_state() == ARM_SSBD_RUNTIME )
293         *updptr = aarch64_insn_gen_nop();
294 }
295 #endif
296 
297 /*
298  * Assembly code may use the variable directly, so we need to make sure
299  * it fits in a register.
300  */
301 DEFINE_PER_CPU_READ_MOSTLY(register_t, ssbd_callback_required);
302 
has_ssbd_mitigation(const struct arm_cpu_capabilities * entry)303 static bool has_ssbd_mitigation(const struct arm_cpu_capabilities *entry)
304 {
305     struct arm_smccc_res res;
306     bool required;
307 
308     if ( smccc_ver < SMCCC_VERSION(1, 1) )
309         return false;
310 
311     arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FID,
312                       ARM_SMCCC_ARCH_WORKAROUND_2_FID, &res);
313 
314     switch ( (int)res.a0 )
315     {
316     case ARM_SMCCC_NOT_SUPPORTED:
317         ssbd_state = ARM_SSBD_UNKNOWN;
318         return false;
319 
320     case ARM_SMCCC_NOT_REQUIRED:
321         ssbd_state = ARM_SSBD_MITIGATED;
322         return false;
323 
324     case ARM_SMCCC_SUCCESS:
325         required = true;
326         break;
327 
328     case 1: /* Mitigation not required on this CPU. */
329         required = false;
330         break;
331 
332     default:
333         ASSERT_UNREACHABLE();
334         return false;
335     }
336 
337     switch ( ssbd_state )
338     {
339     case ARM_SSBD_FORCE_DISABLE:
340         printk_once("%s disabled from command-line\n", entry->desc);
341 
342         arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2_FID, 0, NULL);
343         required = false;
344         break;
345 
346     case ARM_SSBD_RUNTIME:
347         if ( required )
348         {
349             this_cpu(ssbd_callback_required) = 1;
350             arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2_FID, 1, NULL);
351         }
352 
353         break;
354 
355     case ARM_SSBD_FORCE_ENABLE:
356         printk_once("%s forced from command-line\n", entry->desc);
357 
358         arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2_FID, 1, NULL);
359         required = true;
360         break;
361 
362     default:
363         ASSERT_UNREACHABLE();
364         return false;
365     }
366 
367     return required;
368 }
369 #endif
370 
371 #define MIDR_RANGE(model, min, max)     \
372     .matches = is_affected_midr_range,  \
373     .midr_model = model,                \
374     .midr_range_min = min,              \
375     .midr_range_max = max
376 
377 #define MIDR_ALL_VERSIONS(model)        \
378     .matches = is_affected_midr_range,  \
379     .midr_model = model,                \
380     .midr_range_min = 0,                \
381     .midr_range_max = (MIDR_VARIANT_MASK | MIDR_REVISION_MASK)
382 
383 static bool __maybe_unused
is_affected_midr_range(const struct arm_cpu_capabilities * entry)384 is_affected_midr_range(const struct arm_cpu_capabilities *entry)
385 {
386     return MIDR_IS_CPU_MODEL_RANGE(current_cpu_data.midr.bits, entry->midr_model,
387                                    entry->midr_range_min,
388                                    entry->midr_range_max);
389 }
390 
391 static const struct arm_cpu_capabilities arm_errata[] = {
392     {
393         /* Cortex-A15 r0p4 */
394         .desc = "ARM erratum 766422",
395         .capability = ARM32_WORKAROUND_766422,
396         MIDR_RANGE(MIDR_CORTEX_A15, 0x04, 0x04),
397     },
398 #if defined(CONFIG_ARM64_ERRATUM_827319) || \
399     defined(CONFIG_ARM64_ERRATUM_824069)
400     {
401         /* Cortex-A53 r0p[012] */
402         .desc = "ARM errata 827319, 824069",
403         .capability = ARM64_WORKAROUND_CLEAN_CACHE,
404         MIDR_RANGE(MIDR_CORTEX_A53, 0x00, 0x02),
405     },
406 #endif
407 #ifdef CONFIG_ARM64_ERRATUM_819472
408     {
409         /* Cortex-A53 r0[01] */
410         .desc = "ARM erratum 819472",
411         .capability = ARM64_WORKAROUND_CLEAN_CACHE,
412         MIDR_RANGE(MIDR_CORTEX_A53, 0x00, 0x01),
413     },
414 #endif
415 #ifdef CONFIG_ARM64_ERRATUM_832075
416     {
417         /* Cortex-A57 r0p0 - r1p2 */
418         .desc = "ARM erratum 832075",
419         .capability = ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE,
420         MIDR_RANGE(MIDR_CORTEX_A57, 0x00,
421                    (1 << MIDR_VARIANT_SHIFT) | 2),
422     },
423 #endif
424 #ifdef CONFIG_ARM64_ERRATUM_834220
425     {
426         /* Cortex-A57 r0p0 - r1p2 */
427         .desc = "ARM erratum 834220",
428         .capability = ARM64_WORKAROUND_834220,
429         MIDR_RANGE(MIDR_CORTEX_A57, 0x00,
430                    (1 << MIDR_VARIANT_SHIFT) | 2),
431     },
432 #endif
433 #ifdef CONFIG_ARM64_HARDEN_BRANCH_PREDICTOR
434     {
435         .capability = ARM_HARDEN_BRANCH_PREDICTOR,
436         MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
437         .enable = enable_smccc_arch_workaround_1,
438     },
439     {
440         .capability = ARM_HARDEN_BRANCH_PREDICTOR,
441         MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
442         .enable = enable_smccc_arch_workaround_1,
443     },
444     {
445         .capability = ARM_HARDEN_BRANCH_PREDICTOR,
446         MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
447         .enable = enable_smccc_arch_workaround_1,
448     },
449     {
450         .capability = ARM_HARDEN_BRANCH_PREDICTOR,
451         MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
452         .enable = enable_smccc_arch_workaround_1,
453     },
454 #endif
455 #ifdef CONFIG_ARM32_HARDEN_BRANCH_PREDICTOR
456     {
457         .capability = ARM_HARDEN_BRANCH_PREDICTOR,
458         MIDR_ALL_VERSIONS(MIDR_CORTEX_A12),
459         .enable = enable_bp_inv_hardening,
460     },
461     {
462         .capability = ARM_HARDEN_BRANCH_PREDICTOR,
463         MIDR_ALL_VERSIONS(MIDR_CORTEX_A17),
464         .enable = enable_bp_inv_hardening,
465     },
466     {
467         .capability = ARM_HARDEN_BRANCH_PREDICTOR,
468         MIDR_ALL_VERSIONS(MIDR_CORTEX_A15),
469         .enable = enable_ic_inv_hardening,
470     },
471 #endif
472 #ifdef CONFIG_ARM_SSBD
473     {
474         .desc = "Speculative Store Bypass Disabled",
475         .capability = ARM_SSBD,
476         .matches = has_ssbd_mitigation,
477     },
478 #endif
479     {
480         /* Neoverse r0p0 - r2p0 */
481         .desc = "ARM erratum 1165522",
482         .capability = ARM64_WORKAROUND_AT_SPECULATE,
483         MIDR_RANGE(MIDR_NEOVERSE_N1, 0, 2 << MIDR_VARIANT_SHIFT),
484     },
485     {
486         /* Cortex-A76 r0p0 - r2p0 */
487         .desc = "ARM erratum 1165522",
488         .capability = ARM64_WORKAROUND_AT_SPECULATE,
489         MIDR_RANGE(MIDR_CORTEX_A76, 0, 2 << MIDR_VARIANT_SHIFT),
490     },
491     {
492         .desc = "ARM erratum 1319537",
493         .capability = ARM64_WORKAROUND_AT_SPECULATE,
494         MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
495     },
496     {
497         .desc = "ARM erratum 1319367",
498         .capability = ARM64_WORKAROUND_AT_SPECULATE,
499         MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
500     },
501     {},
502 };
503 
check_local_cpu_errata(void)504 void check_local_cpu_errata(void)
505 {
506     update_cpu_capabilities(arm_errata, "enabled workaround for");
507 }
508 
enable_errata_workarounds(void)509 void __init enable_errata_workarounds(void)
510 {
511     enable_cpu_capabilities(arm_errata);
512 }
513 
cpu_errata_callback(struct notifier_block * nfb,unsigned long action,void * hcpu)514 static int cpu_errata_callback(struct notifier_block *nfb,
515                                unsigned long action,
516                                void *hcpu)
517 {
518     int rc = 0;
519 
520     switch ( action )
521     {
522     case CPU_STARTING:
523         /*
524          * At CPU_STARTING phase no notifier shall return an error, because the
525          * system is designed with the assumption that starting a CPU cannot
526          * fail at this point. If an error happens here it will cause Xen to hit
527          * the BUG_ON() in notify_cpu_starting(). In future, either this
528          * notifier/enabling capabilities should be fixed to always return
529          * success/void or notify_cpu_starting() and other common code should be
530          * fixed to expect an error at CPU_STARTING phase.
531          */
532         ASSERT(system_state != SYS_STATE_boot);
533         rc = enable_nonboot_cpu_caps(arm_errata);
534         break;
535     default:
536         break;
537     }
538 
539     return !rc ? NOTIFY_DONE : notifier_from_errno(rc);
540 }
541 
542 static struct notifier_block cpu_errata_nfb = {
543     .notifier_call = cpu_errata_callback,
544 };
545 
cpu_errata_notifier_init(void)546 static int __init cpu_errata_notifier_init(void)
547 {
548     register_cpu_notifier(&cpu_errata_nfb);
549 
550     return 0;
551 }
552 /*
553  * Initialization has to be done at init rather than presmp_init phase because
554  * the callback should execute only after the secondary CPUs are initially
555  * booted (in hotplug scenarios when the system state is not boot). On boot,
556  * the enabling of errata workarounds will be triggered by the boot CPU from
557  * start_xen().
558  */
559 __initcall(cpu_errata_notifier_init);
560 
561 /*
562  * Local variables:
563  * mode: C
564  * c-file-style: "BSD"
565  * c-basic-offset: 4
566  * indent-tabs-mode: nil
567  * End:
568  */
569