1 /******************************************************************************
2  * include/asm-x86/spec_ctrl.h
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; If not, see <http://www.gnu.org/licenses/>.
16  *
17  * Copyright (c) 2017-2018 Citrix Systems Ltd.
18  */
19 
20 #ifndef __X86_SPEC_CTRL_H__
21 #define __X86_SPEC_CTRL_H__
22 
23 /* Encoding of cpuinfo.spec_ctrl_flags */
24 #define SCF_use_shadow (1 << 0)
25 #define SCF_ist_wrmsr  (1 << 1)
26 #define SCF_ist_rsb    (1 << 2)
27 
28 #ifndef __ASSEMBLY__
29 
30 #include <asm/alternative.h>
31 #include <asm/current.h>
32 #include <asm/msr-index.h>
33 
34 void init_speculation_mitigations(void);
35 
36 extern bool opt_ibpb;
37 extern bool opt_ssbd;
38 extern int8_t opt_eager_fpu;
39 extern int8_t opt_l1d_flush;
40 extern bool opt_branch_harden;
41 
42 extern bool bsp_delay_spec_ctrl;
43 extern uint8_t default_xen_spec_ctrl;
44 extern uint8_t default_spec_ctrl_flags;
45 
46 extern int8_t opt_xpti_hwdom, opt_xpti_domu;
47 
48 extern int8_t opt_pv_l1tf_hwdom, opt_pv_l1tf_domu;
49 
50 /*
51  * The L1D address mask, which might be wider than reported in CPUID, and the
52  * system physical address above which there are believed to be no cacheable
53  * memory regions, thus unable to leak data via the L1TF vulnerability.
54  */
55 extern paddr_t l1tf_addr_mask, l1tf_safe_maddr;
56 
57 extern uint64_t default_xen_mcu_opt_ctrl;
58 
init_shadow_spec_ctrl_state(void)59 static inline void init_shadow_spec_ctrl_state(void)
60 {
61     struct cpu_info *info = get_cpu_info();
62 
63     info->shadow_spec_ctrl = 0;
64     info->xen_spec_ctrl = default_xen_spec_ctrl;
65     info->spec_ctrl_flags = default_spec_ctrl_flags;
66 
67     /*
68      * For least latency, the VERW selector should be a writeable data
69      * descriptor resident in the cache.  __HYPERVISOR_DS32 shares a cache
70      * line with __HYPERVISOR_CS, so is expected to be very cache-hot.
71      */
72     info->verw_sel = __HYPERVISOR_DS32;
73 }
74 
75 /* WARNING! `ret`, `call *`, `jmp *` not safe after this call. */
spec_ctrl_enter_idle(struct cpu_info * info)76 static always_inline void spec_ctrl_enter_idle(struct cpu_info *info)
77 {
78     uint32_t val = 0;
79 
80     /*
81      * Branch Target Injection:
82      *
83      * Latch the new shadow value, then enable shadowing, then update the MSR.
84      * There are no SMP issues here; only local processor ordering concerns.
85      */
86     info->shadow_spec_ctrl = val;
87     barrier();
88     info->spec_ctrl_flags |= SCF_use_shadow;
89     barrier();
90     alternative_input("", "wrmsr", X86_FEATURE_SC_MSR_IDLE,
91                       "a" (val), "c" (MSR_SPEC_CTRL), "d" (0));
92     barrier();
93 
94     /*
95      * Microarchitectural Store Buffer Data Sampling:
96      *
97      * On vulnerable systems, store buffer entries are statically partitioned
98      * between active threads.  When entering idle, our store buffer entries
99      * are re-partitioned to allow the other threads to use them.
100      *
101      * Flush the buffers to ensure that no sensitive data of ours can be
102      * leaked by a sibling after it gets our store buffer entries.
103      *
104      * Note: VERW must be encoded with a memory operand, as it is only that
105      * form which causes a flush.
106      */
107     alternative_input("", "verw %[sel]", X86_FEATURE_SC_VERW_IDLE,
108                       [sel] "m" (info->verw_sel));
109 }
110 
111 /* WARNING! `ret`, `call *`, `jmp *` not safe before this call. */
spec_ctrl_exit_idle(struct cpu_info * info)112 static always_inline void spec_ctrl_exit_idle(struct cpu_info *info)
113 {
114     uint32_t val = info->xen_spec_ctrl;
115 
116     /*
117      * Branch Target Injection:
118      *
119      * Disable shadowing before updating the MSR.  There are no SMP issues
120      * here; only local processor ordering concerns.
121      */
122     info->spec_ctrl_flags &= ~SCF_use_shadow;
123     barrier();
124     alternative_input("", "wrmsr", X86_FEATURE_SC_MSR_IDLE,
125                       "a" (val), "c" (MSR_SPEC_CTRL), "d" (0));
126     barrier();
127 
128     /*
129      * Microarchitectural Store Buffer Data Sampling:
130      *
131      * On vulnerable systems, store buffer entries are statically partitioned
132      * between active threads.  When exiting idle, the other threads store
133      * buffer entries are re-partitioned to give us some.
134      *
135      * We now have store buffer entries with stale data from sibling threads.
136      * A flush if necessary will be performed on the return to guest path.
137      */
138 }
139 
140 #endif /* __ASSEMBLY__ */
141 #endif /* !__X86_SPEC_CTRL_H__ */
142 
143 /*
144  * Local variables:
145  * mode: C
146  * c-file-style: "BSD"
147  * c-basic-offset: 4
148  * tab-width: 4
149  * indent-tabs-mode: nil
150  * End:
151  */
152