1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Handle detection, reporting and mitigation of Spectre v1, v2, v3a and v4, as
4  * detailed at:
5  *
6  *   https://developer.arm.com/support/arm-security-updates/speculative-processor-vulnerability
7  *
8  * This code was originally written hastily under an awful lot of stress and so
9  * aspects of it are somewhat hacky. Unfortunately, changing anything in here
10  * instantly makes me feel ill. Thanks, Jann. Thann.
11  *
12  * Copyright (C) 2018 ARM Ltd, All Rights Reserved.
13  * Copyright (C) 2020 Google LLC
14  *
15  * "If there's something strange in your neighbourhood, who you gonna call?"
16  *
17  * Authors: Will Deacon <will@kernel.org> and Marc Zyngier <maz@kernel.org>
18  */
19 
20 #include <linux/arm-smccc.h>
21 #include <linux/cpu.h>
22 #include <linux/device.h>
23 #include <linux/nospec.h>
24 #include <linux/prctl.h>
25 #include <linux/sched/task_stack.h>
26 
27 #include <asm/insn.h>
28 #include <asm/spectre.h>
29 #include <asm/traps.h>
30 #include <asm/virt.h>
31 
32 /*
33  * We try to ensure that the mitigation state can never change as the result of
34  * onlining a late CPU.
35  */
update_mitigation_state(enum mitigation_state * oldp,enum mitigation_state new)36 static void update_mitigation_state(enum mitigation_state *oldp,
37 				    enum mitigation_state new)
38 {
39 	enum mitigation_state state;
40 
41 	do {
42 		state = READ_ONCE(*oldp);
43 		if (new <= state)
44 			break;
45 
46 		/* Userspace almost certainly can't deal with this. */
47 		if (WARN_ON(system_capabilities_finalized()))
48 			break;
49 	} while (cmpxchg_relaxed(oldp, state, new) != state);
50 }
51 
52 /*
53  * Spectre v1.
54  *
55  * The kernel can't protect userspace for this one: it's each person for
56  * themselves. Advertise what we're doing and be done with it.
57  */
cpu_show_spectre_v1(struct device * dev,struct device_attribute * attr,char * buf)58 ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr,
59 			    char *buf)
60 {
61 	return sprintf(buf, "Mitigation: __user pointer sanitization\n");
62 }
63 
64 /*
65  * Spectre v2.
66  *
67  * This one sucks. A CPU is either:
68  *
69  * - Mitigated in hardware and advertised by ID_AA64PFR0_EL1.CSV2.
70  * - Mitigated in hardware and listed in our "safe list".
71  * - Mitigated in software by firmware.
72  * - Mitigated in software by a CPU-specific dance in the kernel and a
73  *   firmware call at EL2.
74  * - Vulnerable.
75  *
76  * It's not unlikely for different CPUs in a big.LITTLE system to fall into
77  * different camps.
78  */
79 static enum mitigation_state spectre_v2_state;
80 
81 static bool __read_mostly __nospectre_v2;
parse_spectre_v2_param(char * str)82 static int __init parse_spectre_v2_param(char *str)
83 {
84 	__nospectre_v2 = true;
85 	return 0;
86 }
87 early_param("nospectre_v2", parse_spectre_v2_param);
88 
spectre_v2_mitigations_off(void)89 static bool spectre_v2_mitigations_off(void)
90 {
91 	bool ret = __nospectre_v2 || cpu_mitigations_off();
92 
93 	if (ret)
94 		pr_info_once("spectre-v2 mitigation disabled by command line option\n");
95 
96 	return ret;
97 }
98 
cpu_show_spectre_v2(struct device * dev,struct device_attribute * attr,char * buf)99 ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr,
100 			    char *buf)
101 {
102 	switch (spectre_v2_state) {
103 	case SPECTRE_UNAFFECTED:
104 		return sprintf(buf, "Not affected\n");
105 	case SPECTRE_MITIGATED:
106 		return sprintf(buf, "Mitigation: Branch predictor hardening\n");
107 	case SPECTRE_VULNERABLE:
108 		fallthrough;
109 	default:
110 		return sprintf(buf, "Vulnerable\n");
111 	}
112 }
113 
spectre_v2_get_cpu_hw_mitigation_state(void)114 static enum mitigation_state spectre_v2_get_cpu_hw_mitigation_state(void)
115 {
116 	u64 pfr0;
117 	static const struct midr_range spectre_v2_safe_list[] = {
118 		MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
119 		MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
120 		MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
121 		MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
122 		MIDR_ALL_VERSIONS(MIDR_HISI_TSV110),
123 		MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_2XX_SILVER),
124 		MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER),
125 		MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER),
126 		{ /* sentinel */ }
127 	};
128 
129 	/* If the CPU has CSV2 set, we're safe */
130 	pfr0 = read_cpuid(ID_AA64PFR0_EL1);
131 	if (cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_CSV2_SHIFT))
132 		return SPECTRE_UNAFFECTED;
133 
134 	/* Alternatively, we have a list of unaffected CPUs */
135 	if (is_midr_in_range_list(read_cpuid_id(), spectre_v2_safe_list))
136 		return SPECTRE_UNAFFECTED;
137 
138 	return SPECTRE_VULNERABLE;
139 }
140 
spectre_v2_get_cpu_fw_mitigation_state(void)141 static enum mitigation_state spectre_v2_get_cpu_fw_mitigation_state(void)
142 {
143 	int ret;
144 	struct arm_smccc_res res;
145 
146 	arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
147 			     ARM_SMCCC_ARCH_WORKAROUND_1, &res);
148 
149 	ret = res.a0;
150 	switch (ret) {
151 	case SMCCC_RET_SUCCESS:
152 		return SPECTRE_MITIGATED;
153 	case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED:
154 		return SPECTRE_UNAFFECTED;
155 	default:
156 		fallthrough;
157 	case SMCCC_RET_NOT_SUPPORTED:
158 		return SPECTRE_VULNERABLE;
159 	}
160 }
161 
has_spectre_v2(const struct arm64_cpu_capabilities * entry,int scope)162 bool has_spectre_v2(const struct arm64_cpu_capabilities *entry, int scope)
163 {
164 	WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
165 
166 	if (spectre_v2_get_cpu_hw_mitigation_state() == SPECTRE_UNAFFECTED)
167 		return false;
168 
169 	if (spectre_v2_get_cpu_fw_mitigation_state() == SPECTRE_UNAFFECTED)
170 		return false;
171 
172 	return true;
173 }
174 
arm64_get_spectre_v2_state(void)175 enum mitigation_state arm64_get_spectre_v2_state(void)
176 {
177 	return spectre_v2_state;
178 }
179 
180 DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
181 
install_bp_hardening_cb(bp_hardening_cb_t fn)182 static void install_bp_hardening_cb(bp_hardening_cb_t fn)
183 {
184 	__this_cpu_write(bp_hardening_data.fn, fn);
185 
186 	/*
187 	 * Vinz Clortho takes the hyp_vecs start/end "keys" at
188 	 * the door when we're a guest. Skip the hyp-vectors work.
189 	 */
190 	if (!is_hyp_mode_available())
191 		return;
192 
193 	__this_cpu_write(bp_hardening_data.slot, HYP_VECTOR_SPECTRE_DIRECT);
194 }
195 
call_smc_arch_workaround_1(void)196 static void call_smc_arch_workaround_1(void)
197 {
198 	arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
199 }
200 
call_hvc_arch_workaround_1(void)201 static void call_hvc_arch_workaround_1(void)
202 {
203 	arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
204 }
205 
qcom_link_stack_sanitisation(void)206 static void qcom_link_stack_sanitisation(void)
207 {
208 	u64 tmp;
209 
210 	asm volatile("mov	%0, x30		\n"
211 		     ".rept	16		\n"
212 		     "bl	. + 4		\n"
213 		     ".endr			\n"
214 		     "mov	x30, %0		\n"
215 		     : "=&r" (tmp));
216 }
217 
spectre_v2_get_sw_mitigation_cb(void)218 static bp_hardening_cb_t spectre_v2_get_sw_mitigation_cb(void)
219 {
220 	u32 midr = read_cpuid_id();
221 	if (((midr & MIDR_CPU_MODEL_MASK) != MIDR_QCOM_FALKOR) &&
222 	    ((midr & MIDR_CPU_MODEL_MASK) != MIDR_QCOM_FALKOR_V1))
223 		return NULL;
224 
225 	return qcom_link_stack_sanitisation;
226 }
227 
spectre_v2_enable_fw_mitigation(void)228 static enum mitigation_state spectre_v2_enable_fw_mitigation(void)
229 {
230 	bp_hardening_cb_t cb;
231 	enum mitigation_state state;
232 
233 	state = spectre_v2_get_cpu_fw_mitigation_state();
234 	if (state != SPECTRE_MITIGATED)
235 		return state;
236 
237 	if (spectre_v2_mitigations_off())
238 		return SPECTRE_VULNERABLE;
239 
240 	switch (arm_smccc_1_1_get_conduit()) {
241 	case SMCCC_CONDUIT_HVC:
242 		cb = call_hvc_arch_workaround_1;
243 		break;
244 
245 	case SMCCC_CONDUIT_SMC:
246 		cb = call_smc_arch_workaround_1;
247 		break;
248 
249 	default:
250 		return SPECTRE_VULNERABLE;
251 	}
252 
253 	/*
254 	 * Prefer a CPU-specific workaround if it exists. Note that we
255 	 * still rely on firmware for the mitigation at EL2.
256 	 */
257 	cb = spectre_v2_get_sw_mitigation_cb() ?: cb;
258 	install_bp_hardening_cb(cb);
259 	return SPECTRE_MITIGATED;
260 }
261 
spectre_v2_enable_mitigation(const struct arm64_cpu_capabilities * __unused)262 void spectre_v2_enable_mitigation(const struct arm64_cpu_capabilities *__unused)
263 {
264 	enum mitigation_state state;
265 
266 	WARN_ON(preemptible());
267 
268 	state = spectre_v2_get_cpu_hw_mitigation_state();
269 	if (state == SPECTRE_VULNERABLE)
270 		state = spectre_v2_enable_fw_mitigation();
271 
272 	update_mitigation_state(&spectre_v2_state, state);
273 }
274 
275 /*
276  * Spectre-v3a.
277  *
278  * Phew, there's not an awful lot to do here! We just instruct EL2 to use
279  * an indirect trampoline for the hyp vectors so that guests can't read
280  * VBAR_EL2 to defeat randomisation of the hypervisor VA layout.
281  */
has_spectre_v3a(const struct arm64_cpu_capabilities * entry,int scope)282 bool has_spectre_v3a(const struct arm64_cpu_capabilities *entry, int scope)
283 {
284 	static const struct midr_range spectre_v3a_unsafe_list[] = {
285 		MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
286 		MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
287 		{},
288 	};
289 
290 	WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
291 	return is_midr_in_range_list(read_cpuid_id(), spectre_v3a_unsafe_list);
292 }
293 
spectre_v3a_enable_mitigation(const struct arm64_cpu_capabilities * __unused)294 void spectre_v3a_enable_mitigation(const struct arm64_cpu_capabilities *__unused)
295 {
296 	struct bp_hardening_data *data = this_cpu_ptr(&bp_hardening_data);
297 
298 	if (this_cpu_has_cap(ARM64_SPECTRE_V3A))
299 		data->slot += HYP_VECTOR_INDIRECT;
300 }
301 
302 /*
303  * Spectre v4.
304  *
305  * If you thought Spectre v2 was nasty, wait until you see this mess. A CPU is
306  * either:
307  *
308  * - Mitigated in hardware and listed in our "safe list".
309  * - Mitigated in hardware via PSTATE.SSBS.
310  * - Mitigated in software by firmware (sometimes referred to as SSBD).
311  *
312  * Wait, that doesn't sound so bad, does it? Keep reading...
313  *
314  * A major source of headaches is that the software mitigation is enabled both
315  * on a per-task basis, but can also be forced on for the kernel, necessitating
316  * both context-switch *and* entry/exit hooks. To make it even worse, some CPUs
317  * allow EL0 to toggle SSBS directly, which can end up with the prctl() state
318  * being stale when re-entering the kernel. The usual big.LITTLE caveats apply,
319  * so you can have systems that have both firmware and SSBS mitigations. This
320  * means we actually have to reject late onlining of CPUs with mitigations if
321  * all of the currently onlined CPUs are safelisted, as the mitigation tends to
322  * be opt-in for userspace. Yes, really, the cure is worse than the disease.
323  *
324  * The only good part is that if the firmware mitigation is present, then it is
325  * present for all CPUs, meaning we don't have to worry about late onlining of a
326  * vulnerable CPU if one of the boot CPUs is using the firmware mitigation.
327  *
328  * Give me a VAX-11/780 any day of the week...
329  */
330 static enum mitigation_state spectre_v4_state;
331 
332 /* This is the per-cpu state tracking whether we need to talk to firmware */
333 DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
334 
335 enum spectre_v4_policy {
336 	SPECTRE_V4_POLICY_MITIGATION_DYNAMIC,
337 	SPECTRE_V4_POLICY_MITIGATION_ENABLED,
338 	SPECTRE_V4_POLICY_MITIGATION_DISABLED,
339 };
340 
341 static enum spectre_v4_policy __read_mostly __spectre_v4_policy;
342 
343 static const struct spectre_v4_param {
344 	const char		*str;
345 	enum spectre_v4_policy	policy;
346 } spectre_v4_params[] = {
347 	{ "force-on",	SPECTRE_V4_POLICY_MITIGATION_ENABLED, },
348 	{ "force-off",	SPECTRE_V4_POLICY_MITIGATION_DISABLED, },
349 	{ "kernel",	SPECTRE_V4_POLICY_MITIGATION_DYNAMIC, },
350 };
parse_spectre_v4_param(char * str)351 static int __init parse_spectre_v4_param(char *str)
352 {
353 	int i;
354 
355 	if (!str || !str[0])
356 		return -EINVAL;
357 
358 	for (i = 0; i < ARRAY_SIZE(spectre_v4_params); i++) {
359 		const struct spectre_v4_param *param = &spectre_v4_params[i];
360 
361 		if (strncmp(str, param->str, strlen(param->str)))
362 			continue;
363 
364 		__spectre_v4_policy = param->policy;
365 		return 0;
366 	}
367 
368 	return -EINVAL;
369 }
370 early_param("ssbd", parse_spectre_v4_param);
371 
372 /*
373  * Because this was all written in a rush by people working in different silos,
374  * we've ended up with multiple command line options to control the same thing.
375  * Wrap these up in some helpers, which prefer disabling the mitigation if faced
376  * with contradictory parameters. The mitigation is always either "off",
377  * "dynamic" or "on".
378  */
spectre_v4_mitigations_off(void)379 static bool spectre_v4_mitigations_off(void)
380 {
381 	bool ret = cpu_mitigations_off() ||
382 		   __spectre_v4_policy == SPECTRE_V4_POLICY_MITIGATION_DISABLED;
383 
384 	if (ret)
385 		pr_info_once("spectre-v4 mitigation disabled by command-line option\n");
386 
387 	return ret;
388 }
389 
390 /* Do we need to toggle the mitigation state on entry to/exit from the kernel? */
spectre_v4_mitigations_dynamic(void)391 static bool spectre_v4_mitigations_dynamic(void)
392 {
393 	return !spectre_v4_mitigations_off() &&
394 	       __spectre_v4_policy == SPECTRE_V4_POLICY_MITIGATION_DYNAMIC;
395 }
396 
spectre_v4_mitigations_on(void)397 static bool spectre_v4_mitigations_on(void)
398 {
399 	return !spectre_v4_mitigations_off() &&
400 	       __spectre_v4_policy == SPECTRE_V4_POLICY_MITIGATION_ENABLED;
401 }
402 
cpu_show_spec_store_bypass(struct device * dev,struct device_attribute * attr,char * buf)403 ssize_t cpu_show_spec_store_bypass(struct device *dev,
404 				   struct device_attribute *attr, char *buf)
405 {
406 	switch (spectre_v4_state) {
407 	case SPECTRE_UNAFFECTED:
408 		return sprintf(buf, "Not affected\n");
409 	case SPECTRE_MITIGATED:
410 		return sprintf(buf, "Mitigation: Speculative Store Bypass disabled via prctl\n");
411 	case SPECTRE_VULNERABLE:
412 		fallthrough;
413 	default:
414 		return sprintf(buf, "Vulnerable\n");
415 	}
416 }
417 
arm64_get_spectre_v4_state(void)418 enum mitigation_state arm64_get_spectre_v4_state(void)
419 {
420 	return spectre_v4_state;
421 }
422 
spectre_v4_get_cpu_hw_mitigation_state(void)423 static enum mitigation_state spectre_v4_get_cpu_hw_mitigation_state(void)
424 {
425 	static const struct midr_range spectre_v4_safe_list[] = {
426 		MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
427 		MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
428 		MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
429 		MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
430 		MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER),
431 		MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER),
432 		{ /* sentinel */ },
433 	};
434 
435 	if (is_midr_in_range_list(read_cpuid_id(), spectre_v4_safe_list))
436 		return SPECTRE_UNAFFECTED;
437 
438 	/* CPU features are detected first */
439 	if (this_cpu_has_cap(ARM64_SSBS))
440 		return SPECTRE_MITIGATED;
441 
442 	return SPECTRE_VULNERABLE;
443 }
444 
spectre_v4_get_cpu_fw_mitigation_state(void)445 static enum mitigation_state spectre_v4_get_cpu_fw_mitigation_state(void)
446 {
447 	int ret;
448 	struct arm_smccc_res res;
449 
450 	arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
451 			     ARM_SMCCC_ARCH_WORKAROUND_2, &res);
452 
453 	ret = res.a0;
454 	switch (ret) {
455 	case SMCCC_RET_SUCCESS:
456 		return SPECTRE_MITIGATED;
457 	case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED:
458 		fallthrough;
459 	case SMCCC_RET_NOT_REQUIRED:
460 		return SPECTRE_UNAFFECTED;
461 	default:
462 		fallthrough;
463 	case SMCCC_RET_NOT_SUPPORTED:
464 		return SPECTRE_VULNERABLE;
465 	}
466 }
467 
has_spectre_v4(const struct arm64_cpu_capabilities * cap,int scope)468 bool has_spectre_v4(const struct arm64_cpu_capabilities *cap, int scope)
469 {
470 	enum mitigation_state state;
471 
472 	WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
473 
474 	state = spectre_v4_get_cpu_hw_mitigation_state();
475 	if (state == SPECTRE_VULNERABLE)
476 		state = spectre_v4_get_cpu_fw_mitigation_state();
477 
478 	return state != SPECTRE_UNAFFECTED;
479 }
480 
ssbs_emulation_handler(struct pt_regs * regs,u32 instr)481 static int ssbs_emulation_handler(struct pt_regs *regs, u32 instr)
482 {
483 	if (user_mode(regs))
484 		return 1;
485 
486 	if (instr & BIT(PSTATE_Imm_shift))
487 		regs->pstate |= PSR_SSBS_BIT;
488 	else
489 		regs->pstate &= ~PSR_SSBS_BIT;
490 
491 	arm64_skip_faulting_instruction(regs, 4);
492 	return 0;
493 }
494 
495 static struct undef_hook ssbs_emulation_hook = {
496 	.instr_mask	= ~(1U << PSTATE_Imm_shift),
497 	.instr_val	= 0xd500401f | PSTATE_SSBS,
498 	.fn		= ssbs_emulation_handler,
499 };
500 
spectre_v4_enable_hw_mitigation(void)501 static enum mitigation_state spectre_v4_enable_hw_mitigation(void)
502 {
503 	static bool undef_hook_registered = false;
504 	static DEFINE_RAW_SPINLOCK(hook_lock);
505 	enum mitigation_state state;
506 
507 	/*
508 	 * If the system is mitigated but this CPU doesn't have SSBS, then
509 	 * we must be on the safelist and there's nothing more to do.
510 	 */
511 	state = spectre_v4_get_cpu_hw_mitigation_state();
512 	if (state != SPECTRE_MITIGATED || !this_cpu_has_cap(ARM64_SSBS))
513 		return state;
514 
515 	raw_spin_lock(&hook_lock);
516 	if (!undef_hook_registered) {
517 		register_undef_hook(&ssbs_emulation_hook);
518 		undef_hook_registered = true;
519 	}
520 	raw_spin_unlock(&hook_lock);
521 
522 	if (spectre_v4_mitigations_off()) {
523 		sysreg_clear_set(sctlr_el1, 0, SCTLR_ELx_DSSBS);
524 		set_pstate_ssbs(1);
525 		return SPECTRE_VULNERABLE;
526 	}
527 
528 	/* SCTLR_EL1.DSSBS was initialised to 0 during boot */
529 	set_pstate_ssbs(0);
530 	return SPECTRE_MITIGATED;
531 }
532 
533 /*
534  * Patch a branch over the Spectre-v4 mitigation code with a NOP so that
535  * we fallthrough and check whether firmware needs to be called on this CPU.
536  */
spectre_v4_patch_fw_mitigation_enable(struct alt_instr * alt,__le32 * origptr,__le32 * updptr,int nr_inst)537 void __init spectre_v4_patch_fw_mitigation_enable(struct alt_instr *alt,
538 						  __le32 *origptr,
539 						  __le32 *updptr, int nr_inst)
540 {
541 	BUG_ON(nr_inst != 1); /* Branch -> NOP */
542 
543 	if (spectre_v4_mitigations_off())
544 		return;
545 
546 	if (cpus_have_final_cap(ARM64_SSBS))
547 		return;
548 
549 	if (spectre_v4_mitigations_dynamic())
550 		*updptr = cpu_to_le32(aarch64_insn_gen_nop());
551 }
552 
553 /*
554  * Patch a NOP in the Spectre-v4 mitigation code with an SMC/HVC instruction
555  * to call into firmware to adjust the mitigation state.
556  */
spectre_v4_patch_fw_mitigation_conduit(struct alt_instr * alt,__le32 * origptr,__le32 * updptr,int nr_inst)557 void __init spectre_v4_patch_fw_mitigation_conduit(struct alt_instr *alt,
558 						   __le32 *origptr,
559 						   __le32 *updptr, int nr_inst)
560 {
561 	u32 insn;
562 
563 	BUG_ON(nr_inst != 1); /* NOP -> HVC/SMC */
564 
565 	switch (arm_smccc_1_1_get_conduit()) {
566 	case SMCCC_CONDUIT_HVC:
567 		insn = aarch64_insn_get_hvc_value();
568 		break;
569 	case SMCCC_CONDUIT_SMC:
570 		insn = aarch64_insn_get_smc_value();
571 		break;
572 	default:
573 		return;
574 	}
575 
576 	*updptr = cpu_to_le32(insn);
577 }
578 
spectre_v4_enable_fw_mitigation(void)579 static enum mitigation_state spectre_v4_enable_fw_mitigation(void)
580 {
581 	enum mitigation_state state;
582 
583 	state = spectre_v4_get_cpu_fw_mitigation_state();
584 	if (state != SPECTRE_MITIGATED)
585 		return state;
586 
587 	if (spectre_v4_mitigations_off()) {
588 		arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_WORKAROUND_2, false, NULL);
589 		return SPECTRE_VULNERABLE;
590 	}
591 
592 	arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_WORKAROUND_2, true, NULL);
593 
594 	if (spectre_v4_mitigations_dynamic())
595 		__this_cpu_write(arm64_ssbd_callback_required, 1);
596 
597 	return SPECTRE_MITIGATED;
598 }
599 
spectre_v4_enable_mitigation(const struct arm64_cpu_capabilities * __unused)600 void spectre_v4_enable_mitigation(const struct arm64_cpu_capabilities *__unused)
601 {
602 	enum mitigation_state state;
603 
604 	WARN_ON(preemptible());
605 
606 	state = spectre_v4_enable_hw_mitigation();
607 	if (state == SPECTRE_VULNERABLE)
608 		state = spectre_v4_enable_fw_mitigation();
609 
610 	update_mitigation_state(&spectre_v4_state, state);
611 }
612 
__update_pstate_ssbs(struct pt_regs * regs,bool state)613 static void __update_pstate_ssbs(struct pt_regs *regs, bool state)
614 {
615 	u64 bit = compat_user_mode(regs) ? PSR_AA32_SSBS_BIT : PSR_SSBS_BIT;
616 
617 	if (state)
618 		regs->pstate |= bit;
619 	else
620 		regs->pstate &= ~bit;
621 }
622 
spectre_v4_enable_task_mitigation(struct task_struct * tsk)623 void spectre_v4_enable_task_mitigation(struct task_struct *tsk)
624 {
625 	struct pt_regs *regs = task_pt_regs(tsk);
626 	bool ssbs = false, kthread = tsk->flags & PF_KTHREAD;
627 
628 	if (spectre_v4_mitigations_off())
629 		ssbs = true;
630 	else if (spectre_v4_mitigations_dynamic() && !kthread)
631 		ssbs = !test_tsk_thread_flag(tsk, TIF_SSBD);
632 
633 	__update_pstate_ssbs(regs, ssbs);
634 }
635 
636 /*
637  * The Spectre-v4 mitigation can be controlled via a prctl() from userspace.
638  * This is interesting because the "speculation disabled" behaviour can be
639  * configured so that it is preserved across exec(), which means that the
640  * prctl() may be necessary even when PSTATE.SSBS can be toggled directly
641  * from userspace.
642  */
ssbd_prctl_enable_mitigation(struct task_struct * task)643 static void ssbd_prctl_enable_mitigation(struct task_struct *task)
644 {
645 	task_clear_spec_ssb_noexec(task);
646 	task_set_spec_ssb_disable(task);
647 	set_tsk_thread_flag(task, TIF_SSBD);
648 }
649 
ssbd_prctl_disable_mitigation(struct task_struct * task)650 static void ssbd_prctl_disable_mitigation(struct task_struct *task)
651 {
652 	task_clear_spec_ssb_noexec(task);
653 	task_clear_spec_ssb_disable(task);
654 	clear_tsk_thread_flag(task, TIF_SSBD);
655 }
656 
ssbd_prctl_set(struct task_struct * task,unsigned long ctrl)657 static int ssbd_prctl_set(struct task_struct *task, unsigned long ctrl)
658 {
659 	switch (ctrl) {
660 	case PR_SPEC_ENABLE:
661 		/* Enable speculation: disable mitigation */
662 		/*
663 		 * Force disabled speculation prevents it from being
664 		 * re-enabled.
665 		 */
666 		if (task_spec_ssb_force_disable(task))
667 			return -EPERM;
668 
669 		/*
670 		 * If the mitigation is forced on, then speculation is forced
671 		 * off and we again prevent it from being re-enabled.
672 		 */
673 		if (spectre_v4_mitigations_on())
674 			return -EPERM;
675 
676 		ssbd_prctl_disable_mitigation(task);
677 		break;
678 	case PR_SPEC_FORCE_DISABLE:
679 		/* Force disable speculation: force enable mitigation */
680 		/*
681 		 * If the mitigation is forced off, then speculation is forced
682 		 * on and we prevent it from being disabled.
683 		 */
684 		if (spectre_v4_mitigations_off())
685 			return -EPERM;
686 
687 		task_set_spec_ssb_force_disable(task);
688 		fallthrough;
689 	case PR_SPEC_DISABLE:
690 		/* Disable speculation: enable mitigation */
691 		/* Same as PR_SPEC_FORCE_DISABLE */
692 		if (spectre_v4_mitigations_off())
693 			return -EPERM;
694 
695 		ssbd_prctl_enable_mitigation(task);
696 		break;
697 	case PR_SPEC_DISABLE_NOEXEC:
698 		/* Disable speculation until execve(): enable mitigation */
699 		/*
700 		 * If the mitigation state is forced one way or the other, then
701 		 * we must fail now before we try to toggle it on execve().
702 		 */
703 		if (task_spec_ssb_force_disable(task) ||
704 		    spectre_v4_mitigations_off() ||
705 		    spectre_v4_mitigations_on()) {
706 			return -EPERM;
707 		}
708 
709 		ssbd_prctl_enable_mitigation(task);
710 		task_set_spec_ssb_noexec(task);
711 		break;
712 	default:
713 		return -ERANGE;
714 	}
715 
716 	spectre_v4_enable_task_mitigation(task);
717 	return 0;
718 }
719 
arch_prctl_spec_ctrl_set(struct task_struct * task,unsigned long which,unsigned long ctrl)720 int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
721 			     unsigned long ctrl)
722 {
723 	switch (which) {
724 	case PR_SPEC_STORE_BYPASS:
725 		return ssbd_prctl_set(task, ctrl);
726 	default:
727 		return -ENODEV;
728 	}
729 }
730 
ssbd_prctl_get(struct task_struct * task)731 static int ssbd_prctl_get(struct task_struct *task)
732 {
733 	switch (spectre_v4_state) {
734 	case SPECTRE_UNAFFECTED:
735 		return PR_SPEC_NOT_AFFECTED;
736 	case SPECTRE_MITIGATED:
737 		if (spectre_v4_mitigations_on())
738 			return PR_SPEC_NOT_AFFECTED;
739 
740 		if (spectre_v4_mitigations_dynamic())
741 			break;
742 
743 		/* Mitigations are disabled, so we're vulnerable. */
744 		fallthrough;
745 	case SPECTRE_VULNERABLE:
746 		fallthrough;
747 	default:
748 		return PR_SPEC_ENABLE;
749 	}
750 
751 	/* Check the mitigation state for this task */
752 	if (task_spec_ssb_force_disable(task))
753 		return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
754 
755 	if (task_spec_ssb_noexec(task))
756 		return PR_SPEC_PRCTL | PR_SPEC_DISABLE_NOEXEC;
757 
758 	if (task_spec_ssb_disable(task))
759 		return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
760 
761 	return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
762 }
763 
arch_prctl_spec_ctrl_get(struct task_struct * task,unsigned long which)764 int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
765 {
766 	switch (which) {
767 	case PR_SPEC_STORE_BYPASS:
768 		return ssbd_prctl_get(task);
769 	default:
770 		return -ENODEV;
771 	}
772 }
773