1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2012 Regents of the University of California
4  *
5  * This file was copied from include/asm-generic/uaccess.h
6  */
7 
8 #ifndef _ASM_RISCV_UACCESS_H
9 #define _ASM_RISCV_UACCESS_H
10 
11 #include <asm/pgtable.h>		/* for TASK_SIZE */
12 
13 /*
14  * User space memory access functions
15  */
16 #ifdef CONFIG_MMU
17 #include <linux/errno.h>
18 #include <linux/compiler.h>
19 #include <linux/thread_info.h>
20 #include <asm/byteorder.h>
21 #include <asm/extable.h>
22 #include <asm/asm.h>
23 
24 #define __enable_user_access()							\
25 	__asm__ __volatile__ ("csrs sstatus, %0" : : "r" (SR_SUM) : "memory")
26 #define __disable_user_access()							\
27 	__asm__ __volatile__ ("csrc sstatus, %0" : : "r" (SR_SUM) : "memory")
28 
29 /**
30  * access_ok: - Checks if a user space pointer is valid
31  * @addr: User space pointer to start of block to check
32  * @size: Size of block to check
33  *
34  * Context: User context only.  This function may sleep.
35  *
36  * Checks if a pointer to a block of memory in user space is valid.
37  *
38  * Returns true (nonzero) if the memory block may be valid, false (zero)
39  * if it is definitely invalid.
40  *
41  * Note that, depending on architecture, this function probably just
42  * checks that the pointer is in the user space range - after calling
43  * this function, memory access functions may still return -EFAULT.
44  */
45 #define access_ok(addr, size) ({					\
46 	__chk_user_ptr(addr);						\
47 	likely(__access_ok((unsigned long __force)(addr), (size)));	\
48 })
49 
50 /*
51  * Ensure that the range [addr, addr+size) is within the process's
52  * address space
53  */
__access_ok(unsigned long addr,unsigned long size)54 static inline int __access_ok(unsigned long addr, unsigned long size)
55 {
56 	return size <= TASK_SIZE && addr <= TASK_SIZE - size;
57 }
58 
59 /*
60  * The exception table consists of pairs of addresses: the first is the
61  * address of an instruction that is allowed to fault, and the second is
62  * the address at which the program should continue.  No registers are
63  * modified, so it is entirely up to the continuation code to figure out
64  * what to do.
65  *
66  * All the routines below use bits of fixup code that are out of line
67  * with the main instruction path.  This means when everything is well,
68  * we don't even have to jump over them.  Further, they do not intrude
69  * on our cache or tlb entries.
70  */
71 
72 #define __LSW	0
73 #define __MSW	1
74 
75 /*
76  * The "__xxx" versions of the user access functions do not verify the address
77  * space - it must have been done previously with a separate "access_ok()"
78  * call.
79  */
80 
81 #define __get_user_asm(insn, x, ptr, err)			\
82 do {								\
83 	uintptr_t __tmp;					\
84 	__typeof__(x) __x;					\
85 	__asm__ __volatile__ (					\
86 		"1:\n"						\
87 		"	" insn " %1, %3\n"			\
88 		"2:\n"						\
89 		"	.section .fixup,\"ax\"\n"		\
90 		"	.balign 4\n"				\
91 		"3:\n"						\
92 		"	li %0, %4\n"				\
93 		"	li %1, 0\n"				\
94 		"	jump 2b, %2\n"				\
95 		"	.previous\n"				\
96 		"	.section __ex_table,\"a\"\n"		\
97 		"	.balign " RISCV_SZPTR "\n"			\
98 		"	" RISCV_PTR " 1b, 3b\n"			\
99 		"	.previous"				\
100 		: "+r" (err), "=&r" (__x), "=r" (__tmp)		\
101 		: "m" (*(ptr)), "i" (-EFAULT));			\
102 	(x) = __x;						\
103 } while (0)
104 
105 #ifdef CONFIG_64BIT
106 #define __get_user_8(x, ptr, err) \
107 	__get_user_asm("ld", x, ptr, err)
108 #else /* !CONFIG_64BIT */
109 #define __get_user_8(x, ptr, err)				\
110 do {								\
111 	u32 __user *__ptr = (u32 __user *)(ptr);		\
112 	u32 __lo, __hi;						\
113 	uintptr_t __tmp;					\
114 	__asm__ __volatile__ (					\
115 		"1:\n"						\
116 		"	lw %1, %4\n"				\
117 		"2:\n"						\
118 		"	lw %2, %5\n"				\
119 		"3:\n"						\
120 		"	.section .fixup,\"ax\"\n"		\
121 		"	.balign 4\n"				\
122 		"4:\n"						\
123 		"	li %0, %6\n"				\
124 		"	li %1, 0\n"				\
125 		"	li %2, 0\n"				\
126 		"	jump 3b, %3\n"				\
127 		"	.previous\n"				\
128 		"	.section __ex_table,\"a\"\n"		\
129 		"	.balign " RISCV_SZPTR "\n"			\
130 		"	" RISCV_PTR " 1b, 4b\n"			\
131 		"	" RISCV_PTR " 2b, 4b\n"			\
132 		"	.previous"				\
133 		: "+r" (err), "=&r" (__lo), "=r" (__hi),	\
134 			"=r" (__tmp)				\
135 		: "m" (__ptr[__LSW]), "m" (__ptr[__MSW]),	\
136 			"i" (-EFAULT));				\
137 	(x) = (__typeof__(x))((__typeof__((x)-(x)))(		\
138 		(((u64)__hi << 32) | __lo)));			\
139 } while (0)
140 #endif /* CONFIG_64BIT */
141 
142 #define __get_user_nocheck(x, __gu_ptr, __gu_err)		\
143 do {								\
144 	switch (sizeof(*__gu_ptr)) {				\
145 	case 1:							\
146 		__get_user_asm("lb", (x), __gu_ptr, __gu_err);	\
147 		break;						\
148 	case 2:							\
149 		__get_user_asm("lh", (x), __gu_ptr, __gu_err);	\
150 		break;						\
151 	case 4:							\
152 		__get_user_asm("lw", (x), __gu_ptr, __gu_err);	\
153 		break;						\
154 	case 8:							\
155 		__get_user_8((x), __gu_ptr, __gu_err);	\
156 		break;						\
157 	default:						\
158 		BUILD_BUG();					\
159 	}							\
160 } while (0)
161 
162 /**
163  * __get_user: - Get a simple variable from user space, with less checking.
164  * @x:   Variable to store result.
165  * @ptr: Source address, in user space.
166  *
167  * Context: User context only.  This function may sleep.
168  *
169  * This macro copies a single simple variable from user space to kernel
170  * space.  It supports simple types like char and int, but not larger
171  * data types like structures or arrays.
172  *
173  * @ptr must have pointer-to-simple-variable type, and the result of
174  * dereferencing @ptr must be assignable to @x without a cast.
175  *
176  * Caller must check the pointer with access_ok() before calling this
177  * function.
178  *
179  * Returns zero on success, or -EFAULT on error.
180  * On error, the variable @x is set to zero.
181  */
182 #define __get_user(x, ptr)					\
183 ({								\
184 	const __typeof__(*(ptr)) __user *__gu_ptr = (ptr);	\
185 	long __gu_err = 0;					\
186 								\
187 	__chk_user_ptr(__gu_ptr);				\
188 								\
189 	__enable_user_access();					\
190 	__get_user_nocheck(x, __gu_ptr, __gu_err);		\
191 	__disable_user_access();				\
192 								\
193 	__gu_err;						\
194 })
195 
196 /**
197  * get_user: - Get a simple variable from user space.
198  * @x:   Variable to store result.
199  * @ptr: Source address, in user space.
200  *
201  * Context: User context only.  This function may sleep.
202  *
203  * This macro copies a single simple variable from user space to kernel
204  * space.  It supports simple types like char and int, but not larger
205  * data types like structures or arrays.
206  *
207  * @ptr must have pointer-to-simple-variable type, and the result of
208  * dereferencing @ptr must be assignable to @x without a cast.
209  *
210  * Returns zero on success, or -EFAULT on error.
211  * On error, the variable @x is set to zero.
212  */
213 #define get_user(x, ptr)					\
214 ({								\
215 	const __typeof__(*(ptr)) __user *__p = (ptr);		\
216 	might_fault();						\
217 	access_ok(__p, sizeof(*__p)) ?		\
218 		__get_user((x), __p) :				\
219 		((x) = 0, -EFAULT);				\
220 })
221 
222 #define __put_user_asm(insn, x, ptr, err)			\
223 do {								\
224 	uintptr_t __tmp;					\
225 	__typeof__(*(ptr)) __x = x;				\
226 	__asm__ __volatile__ (					\
227 		"1:\n"						\
228 		"	" insn " %z3, %2\n"			\
229 		"2:\n"						\
230 		"	.section .fixup,\"ax\"\n"		\
231 		"	.balign 4\n"				\
232 		"3:\n"						\
233 		"	li %0, %4\n"				\
234 		"	jump 2b, %1\n"				\
235 		"	.previous\n"				\
236 		"	.section __ex_table,\"a\"\n"		\
237 		"	.balign " RISCV_SZPTR "\n"			\
238 		"	" RISCV_PTR " 1b, 3b\n"			\
239 		"	.previous"				\
240 		: "+r" (err), "=r" (__tmp), "=m" (*(ptr))	\
241 		: "rJ" (__x), "i" (-EFAULT));			\
242 } while (0)
243 
244 #ifdef CONFIG_64BIT
245 #define __put_user_8(x, ptr, err) \
246 	__put_user_asm("sd", x, ptr, err)
247 #else /* !CONFIG_64BIT */
248 #define __put_user_8(x, ptr, err)				\
249 do {								\
250 	u32 __user *__ptr = (u32 __user *)(ptr);		\
251 	u64 __x = (__typeof__((x)-(x)))(x);			\
252 	uintptr_t __tmp;					\
253 	__asm__ __volatile__ (					\
254 		"1:\n"						\
255 		"	sw %z4, %2\n"				\
256 		"2:\n"						\
257 		"	sw %z5, %3\n"				\
258 		"3:\n"						\
259 		"	.section .fixup,\"ax\"\n"		\
260 		"	.balign 4\n"				\
261 		"4:\n"						\
262 		"	li %0, %6\n"				\
263 		"	jump 3b, %1\n"				\
264 		"	.previous\n"				\
265 		"	.section __ex_table,\"a\"\n"		\
266 		"	.balign " RISCV_SZPTR "\n"			\
267 		"	" RISCV_PTR " 1b, 4b\n"			\
268 		"	" RISCV_PTR " 2b, 4b\n"			\
269 		"	.previous"				\
270 		: "+r" (err), "=r" (__tmp),			\
271 			"=m" (__ptr[__LSW]),			\
272 			"=m" (__ptr[__MSW])			\
273 		: "rJ" (__x), "rJ" (__x >> 32), "i" (-EFAULT));	\
274 } while (0)
275 #endif /* CONFIG_64BIT */
276 
277 #define __put_user_nocheck(x, __gu_ptr, __pu_err)					\
278 do {								\
279 	switch (sizeof(*__gu_ptr)) {				\
280 	case 1:							\
281 		__put_user_asm("sb", (x), __gu_ptr, __pu_err);	\
282 		break;						\
283 	case 2:							\
284 		__put_user_asm("sh", (x), __gu_ptr, __pu_err);	\
285 		break;						\
286 	case 4:							\
287 		__put_user_asm("sw", (x), __gu_ptr, __pu_err);	\
288 		break;						\
289 	case 8:							\
290 		__put_user_8((x), __gu_ptr, __pu_err);	\
291 		break;						\
292 	default:						\
293 		BUILD_BUG();					\
294 	}							\
295 } while (0)
296 
297 /**
298  * __put_user: - Write a simple value into user space, with less checking.
299  * @x:   Value to copy to user space.
300  * @ptr: Destination address, in user space.
301  *
302  * Context: User context only.  This function may sleep.
303  *
304  * This macro copies a single simple value from kernel space to user
305  * space.  It supports simple types like char and int, but not larger
306  * data types like structures or arrays.
307  *
308  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
309  * to the result of dereferencing @ptr. The value of @x is copied to avoid
310  * re-ordering where @x is evaluated inside the block that enables user-space
311  * access (thus bypassing user space protection if @x is a function).
312  *
313  * Caller must check the pointer with access_ok() before calling this
314  * function.
315  *
316  * Returns zero on success, or -EFAULT on error.
317  */
318 #define __put_user(x, ptr)					\
319 ({								\
320 	__typeof__(*(ptr)) __user *__gu_ptr = (ptr);		\
321 	__typeof__(*__gu_ptr) __val = (x);			\
322 	long __pu_err = 0;					\
323 								\
324 	__chk_user_ptr(__gu_ptr);				\
325 								\
326 	__enable_user_access();					\
327 	__put_user_nocheck(__val, __gu_ptr, __pu_err);		\
328 	__disable_user_access();				\
329 								\
330 	__pu_err;						\
331 })
332 
333 /**
334  * put_user: - Write a simple value into user space.
335  * @x:   Value to copy to user space.
336  * @ptr: Destination address, in user space.
337  *
338  * Context: User context only.  This function may sleep.
339  *
340  * This macro copies a single simple value from kernel space to user
341  * space.  It supports simple types like char and int, but not larger
342  * data types like structures or arrays.
343  *
344  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
345  * to the result of dereferencing @ptr.
346  *
347  * Returns zero on success, or -EFAULT on error.
348  */
349 #define put_user(x, ptr)					\
350 ({								\
351 	__typeof__(*(ptr)) __user *__p = (ptr);			\
352 	might_fault();						\
353 	access_ok(__p, sizeof(*__p)) ?		\
354 		__put_user((x), __p) :				\
355 		-EFAULT;					\
356 })
357 
358 
359 unsigned long __must_check __asm_copy_to_user(void __user *to,
360 	const void *from, unsigned long n);
361 unsigned long __must_check __asm_copy_from_user(void *to,
362 	const void __user *from, unsigned long n);
363 
364 static inline unsigned long
raw_copy_from_user(void * to,const void __user * from,unsigned long n)365 raw_copy_from_user(void *to, const void __user *from, unsigned long n)
366 {
367 	return __asm_copy_from_user(to, from, n);
368 }
369 
370 static inline unsigned long
raw_copy_to_user(void __user * to,const void * from,unsigned long n)371 raw_copy_to_user(void __user *to, const void *from, unsigned long n)
372 {
373 	return __asm_copy_to_user(to, from, n);
374 }
375 
376 extern long strncpy_from_user(char *dest, const char __user *src, long count);
377 
378 extern long __must_check strnlen_user(const char __user *str, long n);
379 
380 extern
381 unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
382 
383 static inline
clear_user(void __user * to,unsigned long n)384 unsigned long __must_check clear_user(void __user *to, unsigned long n)
385 {
386 	might_fault();
387 	return access_ok(to, n) ?
388 		__clear_user(to, n) : n;
389 }
390 
391 /*
392  * Atomic compare-and-exchange, but with a fixup for userspace faults.  Faults
393  * will set "err" to -EFAULT, while successful accesses return the previous
394  * value.
395  */
396 #define __cmpxchg_user(ptr, old, new, err, size, lrb, scb)	\
397 ({								\
398 	__typeof__(ptr) __ptr = (ptr);				\
399 	__typeof__(*(ptr)) __old = (old);			\
400 	__typeof__(*(ptr)) __new = (new);			\
401 	__typeof__(*(ptr)) __ret;				\
402 	__typeof__(err) __err = 0;				\
403 	register unsigned int __rc;				\
404 	__enable_user_access();					\
405 	switch (size) {						\
406 	case 4:							\
407 		__asm__ __volatile__ (				\
408 		"0:\n"						\
409 		"	lr.w" #scb " %[ret], %[ptr]\n"		\
410 		"	bne          %[ret], %z[old], 1f\n"	\
411 		"	sc.w" #lrb " %[rc], %z[new], %[ptr]\n"	\
412 		"	bnez         %[rc], 0b\n"		\
413 		"1:\n"						\
414 		".section .fixup,\"ax\"\n"			\
415 		".balign 4\n"					\
416 		"2:\n"						\
417 		"	li %[err], %[efault]\n"			\
418 		"	jump 1b, %[rc]\n"			\
419 		".previous\n"					\
420 		".section __ex_table,\"a\"\n"			\
421 		".balign " RISCV_SZPTR "\n"			\
422 		"	" RISCV_PTR " 1b, 2b\n"			\
423 		".previous\n"					\
424 			: [ret] "=&r" (__ret),			\
425 			  [rc]  "=&r" (__rc),			\
426 			  [ptr] "+A" (*__ptr),			\
427 			  [err] "=&r" (__err)			\
428 			: [old] "rJ" (__old),			\
429 			  [new] "rJ" (__new),			\
430 			  [efault] "i" (-EFAULT));		\
431 		break;						\
432 	case 8:							\
433 		__asm__ __volatile__ (				\
434 		"0:\n"						\
435 		"	lr.d" #scb " %[ret], %[ptr]\n"		\
436 		"	bne          %[ret], %z[old], 1f\n"	\
437 		"	sc.d" #lrb " %[rc], %z[new], %[ptr]\n"	\
438 		"	bnez         %[rc], 0b\n"		\
439 		"1:\n"						\
440 		".section .fixup,\"ax\"\n"			\
441 		".balign 4\n"					\
442 		"2:\n"						\
443 		"	li %[err], %[efault]\n"			\
444 		"	jump 1b, %[rc]\n"			\
445 		".previous\n"					\
446 		".section __ex_table,\"a\"\n"			\
447 		".balign " RISCV_SZPTR "\n"			\
448 		"	" RISCV_PTR " 1b, 2b\n"			\
449 		".previous\n"					\
450 			: [ret] "=&r" (__ret),			\
451 			  [rc]  "=&r" (__rc),			\
452 			  [ptr] "+A" (*__ptr),			\
453 			  [err] "=&r" (__err)			\
454 			: [old] "rJ" (__old),			\
455 			  [new] "rJ" (__new),			\
456 			  [efault] "i" (-EFAULT));		\
457 		break;						\
458 	default:						\
459 		BUILD_BUG();					\
460 	}							\
461 	__disable_user_access();				\
462 	(err) = __err;						\
463 	__ret;							\
464 })
465 
466 #define HAVE_GET_KERNEL_NOFAULT
467 
468 #define __get_kernel_nofault(dst, src, type, err_label)			\
469 do {									\
470 	long __kr_err;							\
471 									\
472 	__get_user_nocheck(*((type *)(dst)), (type *)(src), __kr_err);	\
473 	if (unlikely(__kr_err))						\
474 		goto err_label;						\
475 } while (0)
476 
477 #define __put_kernel_nofault(dst, src, type, err_label)			\
478 do {									\
479 	long __kr_err;							\
480 									\
481 	__put_user_nocheck(*((type *)(src)), (type *)(dst), __kr_err);	\
482 	if (unlikely(__kr_err))						\
483 		goto err_label;						\
484 } while (0)
485 
486 #else /* CONFIG_MMU */
487 #include <asm-generic/uaccess.h>
488 #endif /* CONFIG_MMU */
489 #endif /* _ASM_RISCV_UACCESS_H */
490