1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __X86_KERNEL_FPU_XSTATE_H
3 #define __X86_KERNEL_FPU_XSTATE_H
4 
5 #include <asm/cpufeature.h>
6 #include <asm/fpu/xstate.h>
7 #include <asm/fpu/xcr.h>
8 
9 #ifdef CONFIG_X86_64
10 DECLARE_PER_CPU(u64, xfd_state);
11 #endif
12 
xstate_init_xcomp_bv(struct xregs_state * xsave,u64 mask)13 static inline void xstate_init_xcomp_bv(struct xregs_state *xsave, u64 mask)
14 {
15 	/*
16 	 * XRSTORS requires these bits set in xcomp_bv, or it will
17 	 * trigger #GP:
18 	 */
19 	if (cpu_feature_enabled(X86_FEATURE_XSAVES))
20 		xsave->header.xcomp_bv = mask | XCOMP_BV_COMPACTED_FORMAT;
21 }
22 
xstate_get_host_group_perm(void)23 static inline u64 xstate_get_host_group_perm(void)
24 {
25 	/* Pairs with WRITE_ONCE() in xstate_request_perm() */
26 	return READ_ONCE(current->group_leader->thread.fpu.perm.__state_perm);
27 }
28 
29 enum xstate_copy_mode {
30 	XSTATE_COPY_FP,
31 	XSTATE_COPY_FX,
32 	XSTATE_COPY_XSAVE,
33 };
34 
35 struct membuf;
36 extern void __copy_xstate_to_uabi_buf(struct membuf to, struct fpstate *fpstate,
37 				      u32 pkru_val, enum xstate_copy_mode copy_mode);
38 extern void copy_xstate_to_uabi_buf(struct membuf to, struct task_struct *tsk,
39 				    enum xstate_copy_mode mode);
40 extern int copy_uabi_from_kernel_to_xstate(struct fpstate *fpstate, const void *kbuf);
41 extern int copy_sigframe_from_user_to_xstate(struct fpstate *fpstate, const void __user *ubuf);
42 
43 
44 extern void fpu__init_cpu_xstate(void);
45 extern void fpu__init_system_xstate(unsigned int legacy_size);
46 
47 extern void *get_xsave_addr(struct xregs_state *xsave, int xfeature_nr);
48 
xfeatures_mask_supervisor(void)49 static inline u64 xfeatures_mask_supervisor(void)
50 {
51 	return fpu_kernel_cfg.max_features & XFEATURE_MASK_SUPERVISOR_SUPPORTED;
52 }
53 
xfeatures_mask_independent(void)54 static inline u64 xfeatures_mask_independent(void)
55 {
56 	if (!cpu_feature_enabled(X86_FEATURE_ARCH_LBR))
57 		return XFEATURE_MASK_INDEPENDENT & ~XFEATURE_MASK_LBR;
58 
59 	return XFEATURE_MASK_INDEPENDENT;
60 }
61 
62 /* XSAVE/XRSTOR wrapper functions */
63 
64 #ifdef CONFIG_X86_64
65 #define REX_PREFIX	"0x48, "
66 #else
67 #define REX_PREFIX
68 #endif
69 
70 /* These macros all use (%edi)/(%rdi) as the single memory argument. */
71 #define XSAVE		".byte " REX_PREFIX "0x0f,0xae,0x27"
72 #define XSAVEOPT	".byte " REX_PREFIX "0x0f,0xae,0x37"
73 #define XSAVES		".byte " REX_PREFIX "0x0f,0xc7,0x2f"
74 #define XRSTOR		".byte " REX_PREFIX "0x0f,0xae,0x2f"
75 #define XRSTORS		".byte " REX_PREFIX "0x0f,0xc7,0x1f"
76 
77 /*
78  * After this @err contains 0 on success or the trap number when the
79  * operation raises an exception.
80  */
81 #define XSTATE_OP(op, st, lmask, hmask, err)				\
82 	asm volatile("1:" op "\n\t"					\
83 		     "xor %[err], %[err]\n"				\
84 		     "2:\n\t"						\
85 		     _ASM_EXTABLE_TYPE(1b, 2b, EX_TYPE_FAULT_MCE_SAFE)	\
86 		     : [err] "=a" (err)					\
87 		     : "D" (st), "m" (*st), "a" (lmask), "d" (hmask)	\
88 		     : "memory")
89 
90 /*
91  * If XSAVES is enabled, it replaces XSAVEOPT because it supports a compact
92  * format and supervisor states in addition to modified optimization in
93  * XSAVEOPT.
94  *
95  * Otherwise, if XSAVEOPT is enabled, XSAVEOPT replaces XSAVE because XSAVEOPT
96  * supports modified optimization which is not supported by XSAVE.
97  *
98  * We use XSAVE as a fallback.
99  *
100  * The 661 label is defined in the ALTERNATIVE* macros as the address of the
101  * original instruction which gets replaced. We need to use it here as the
102  * address of the instruction where we might get an exception at.
103  */
104 #define XSTATE_XSAVE(st, lmask, hmask, err)				\
105 	asm volatile(ALTERNATIVE_2(XSAVE,				\
106 				   XSAVEOPT, X86_FEATURE_XSAVEOPT,	\
107 				   XSAVES,   X86_FEATURE_XSAVES)	\
108 		     "\n"						\
109 		     "xor %[err], %[err]\n"				\
110 		     "3:\n"						\
111 		     ".pushsection .fixup,\"ax\"\n"			\
112 		     "4: movl $-2, %[err]\n"				\
113 		     "jmp 3b\n"						\
114 		     ".popsection\n"					\
115 		     _ASM_EXTABLE(661b, 4b)				\
116 		     : [err] "=r" (err)					\
117 		     : "D" (st), "m" (*st), "a" (lmask), "d" (hmask)	\
118 		     : "memory")
119 
120 /*
121  * Use XRSTORS to restore context if it is enabled. XRSTORS supports compact
122  * XSAVE area format.
123  */
124 #define XSTATE_XRESTORE(st, lmask, hmask)				\
125 	asm volatile(ALTERNATIVE(XRSTOR,				\
126 				 XRSTORS, X86_FEATURE_XSAVES)		\
127 		     "\n"						\
128 		     "3:\n"						\
129 		     _ASM_EXTABLE_TYPE(661b, 3b, EX_TYPE_FPU_RESTORE)	\
130 		     :							\
131 		     : "D" (st), "m" (*st), "a" (lmask), "d" (hmask)	\
132 		     : "memory")
133 
134 #if defined(CONFIG_X86_64) && defined(CONFIG_X86_DEBUG_FPU)
135 extern void xfd_validate_state(struct fpstate *fpstate, u64 mask, bool rstor);
136 #else
xfd_validate_state(struct fpstate * fpstate,u64 mask,bool rstor)137 static inline void xfd_validate_state(struct fpstate *fpstate, u64 mask, bool rstor) { }
138 #endif
139 
140 #ifdef CONFIG_X86_64
xfd_update_state(struct fpstate * fpstate)141 static inline void xfd_update_state(struct fpstate *fpstate)
142 {
143 	if (fpu_state_size_dynamic()) {
144 		u64 xfd = fpstate->xfd;
145 
146 		if (__this_cpu_read(xfd_state) != xfd) {
147 			wrmsrl(MSR_IA32_XFD, xfd);
148 			__this_cpu_write(xfd_state, xfd);
149 		}
150 	}
151 }
152 #else
xfd_update_state(struct fpstate * fpstate)153 static inline void xfd_update_state(struct fpstate *fpstate) { }
154 #endif
155 
156 /*
157  * Save processor xstate to xsave area.
158  *
159  * Uses either XSAVE or XSAVEOPT or XSAVES depending on the CPU features
160  * and command line options. The choice is permanent until the next reboot.
161  */
os_xsave(struct fpstate * fpstate)162 static inline void os_xsave(struct fpstate *fpstate)
163 {
164 	u64 mask = fpstate->xfeatures;
165 	u32 lmask = mask;
166 	u32 hmask = mask >> 32;
167 	int err;
168 
169 	WARN_ON_FPU(!alternatives_patched);
170 	xfd_validate_state(fpstate, mask, false);
171 
172 	XSTATE_XSAVE(&fpstate->regs.xsave, lmask, hmask, err);
173 
174 	/* We should never fault when copying to a kernel buffer: */
175 	WARN_ON_FPU(err);
176 }
177 
178 /*
179  * Restore processor xstate from xsave area.
180  *
181  * Uses XRSTORS when XSAVES is used, XRSTOR otherwise.
182  */
os_xrstor(struct fpstate * fpstate,u64 mask)183 static inline void os_xrstor(struct fpstate *fpstate, u64 mask)
184 {
185 	u32 lmask = mask;
186 	u32 hmask = mask >> 32;
187 
188 	xfd_validate_state(fpstate, mask, true);
189 	XSTATE_XRESTORE(&fpstate->regs.xsave, lmask, hmask);
190 }
191 
192 /* Restore of supervisor state. Does not require XFD */
os_xrstor_supervisor(struct fpstate * fpstate)193 static inline void os_xrstor_supervisor(struct fpstate *fpstate)
194 {
195 	u64 mask = xfeatures_mask_supervisor();
196 	u32 lmask = mask;
197 	u32 hmask = mask >> 32;
198 
199 	XSTATE_XRESTORE(&fpstate->regs.xsave, lmask, hmask);
200 }
201 
202 /*
203  * XSAVE itself always writes all requested xfeatures.  Removing features
204  * from the request bitmap reduces the features which are written.
205  * Generate a mask of features which must be written to a sigframe.  The
206  * unset features can be optimized away and not written.
207  *
208  * This optimization is user-visible.  Only use for states where
209  * uninitialized sigframe contents are tolerable, like dynamic features.
210  *
211  * Users of buffers produced with this optimization must check XSTATE_BV
212  * to determine which features have been optimized out.
213  */
xfeatures_need_sigframe_write(void)214 static inline u64 xfeatures_need_sigframe_write(void)
215 {
216 	u64 xfeaures_to_write;
217 
218 	/* In-use features must be written: */
219 	xfeaures_to_write = xfeatures_in_use();
220 
221 	/* Also write all non-optimizable sigframe features: */
222 	xfeaures_to_write |= XFEATURE_MASK_USER_SUPPORTED &
223 			     ~XFEATURE_MASK_SIGFRAME_INITOPT;
224 
225 	return xfeaures_to_write;
226 }
227 
228 /*
229  * Save xstate to user space xsave area.
230  *
231  * We don't use modified optimization because xrstor/xrstors might track
232  * a different application.
233  *
234  * We don't use compacted format xsave area for backward compatibility for
235  * old applications which don't understand the compacted format of the
236  * xsave area.
237  *
238  * The caller has to zero buf::header before calling this because XSAVE*
239  * does not touch the reserved fields in the header.
240  */
xsave_to_user_sigframe(struct xregs_state __user * buf)241 static inline int xsave_to_user_sigframe(struct xregs_state __user *buf)
242 {
243 	/*
244 	 * Include the features which are not xsaved/rstored by the kernel
245 	 * internally, e.g. PKRU. That's user space ABI and also required
246 	 * to allow the signal handler to modify PKRU.
247 	 */
248 	struct fpstate *fpstate = current->thread.fpu.fpstate;
249 	u64 mask = fpstate->user_xfeatures;
250 	u32 lmask;
251 	u32 hmask;
252 	int err;
253 
254 	/* Optimize away writing unnecessary xfeatures: */
255 	if (fpu_state_size_dynamic())
256 		mask &= xfeatures_need_sigframe_write();
257 
258 	lmask = mask;
259 	hmask = mask >> 32;
260 	xfd_validate_state(fpstate, mask, false);
261 
262 	stac();
263 	XSTATE_OP(XSAVE, buf, lmask, hmask, err);
264 	clac();
265 
266 	return err;
267 }
268 
269 /*
270  * Restore xstate from user space xsave area.
271  */
xrstor_from_user_sigframe(struct xregs_state __user * buf,u64 mask)272 static inline int xrstor_from_user_sigframe(struct xregs_state __user *buf, u64 mask)
273 {
274 	struct xregs_state *xstate = ((__force struct xregs_state *)buf);
275 	u32 lmask = mask;
276 	u32 hmask = mask >> 32;
277 	int err;
278 
279 	xfd_validate_state(current->thread.fpu.fpstate, mask, true);
280 
281 	stac();
282 	XSTATE_OP(XRSTOR, xstate, lmask, hmask, err);
283 	clac();
284 
285 	return err;
286 }
287 
288 /*
289  * Restore xstate from kernel space xsave area, return an error code instead of
290  * an exception.
291  */
os_xrstor_safe(struct fpstate * fpstate,u64 mask)292 static inline int os_xrstor_safe(struct fpstate *fpstate, u64 mask)
293 {
294 	struct xregs_state *xstate = &fpstate->regs.xsave;
295 	u32 lmask = mask;
296 	u32 hmask = mask >> 32;
297 	int err;
298 
299 	/* Ensure that XFD is up to date */
300 	xfd_update_state(fpstate);
301 
302 	if (cpu_feature_enabled(X86_FEATURE_XSAVES))
303 		XSTATE_OP(XRSTORS, xstate, lmask, hmask, err);
304 	else
305 		XSTATE_OP(XRSTOR, xstate, lmask, hmask, err);
306 
307 	return err;
308 }
309 
310 
311 #endif
312