1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (C) 2020 ARM Limited
3 
4 #include <fcntl.h>
5 #include <sched.h>
6 #include <signal.h>
7 #include <stdio.h>
8 #include <stdlib.h>
9 #include <unistd.h>
10 
11 #include <linux/auxvec.h>
12 #include <sys/auxv.h>
13 #include <sys/mman.h>
14 #include <sys/prctl.h>
15 
16 #include <asm/hwcap.h>
17 
18 #include "kselftest.h"
19 #include "mte_common_util.h"
20 #include "mte_def.h"
21 
22 #define INIT_BUFFER_SIZE       256
23 
24 struct mte_fault_cxt cur_mte_cxt;
25 static unsigned int mte_cur_mode;
26 static unsigned int mte_cur_pstate_tco;
27 
mte_default_handler(int signum,siginfo_t * si,void * uc)28 void mte_default_handler(int signum, siginfo_t *si, void *uc)
29 {
30 	unsigned long addr = (unsigned long)si->si_addr;
31 
32 	if (signum == SIGSEGV) {
33 #ifdef DEBUG
34 		ksft_print_msg("INFO: SIGSEGV signal at pc=%lx, fault addr=%lx, si_code=%lx\n",
35 				((ucontext_t *)uc)->uc_mcontext.pc, addr, si->si_code);
36 #endif
37 		if (si->si_code == SEGV_MTEAERR) {
38 			if (cur_mte_cxt.trig_si_code == si->si_code)
39 				cur_mte_cxt.fault_valid = true;
40 			return;
41 		}
42 		/* Compare the context for precise error */
43 		else if (si->si_code == SEGV_MTESERR) {
44 			if (cur_mte_cxt.trig_si_code == si->si_code &&
45 			    ((cur_mte_cxt.trig_range >= 0 &&
46 			      addr >= MT_CLEAR_TAG(cur_mte_cxt.trig_addr) &&
47 			      addr <= (MT_CLEAR_TAG(cur_mte_cxt.trig_addr) + cur_mte_cxt.trig_range)) ||
48 			     (cur_mte_cxt.trig_range < 0 &&
49 			      addr <= MT_CLEAR_TAG(cur_mte_cxt.trig_addr) &&
50 			      addr >= (MT_CLEAR_TAG(cur_mte_cxt.trig_addr) + cur_mte_cxt.trig_range)))) {
51 				cur_mte_cxt.fault_valid = true;
52 				/* Adjust the pc by 4 */
53 				((ucontext_t *)uc)->uc_mcontext.pc += 4;
54 			} else {
55 				ksft_print_msg("Invalid MTE synchronous exception caught!\n");
56 				exit(1);
57 			}
58 		} else {
59 			ksft_print_msg("Unknown SIGSEGV exception caught!\n");
60 			exit(1);
61 		}
62 	} else if (signum == SIGBUS) {
63 		ksft_print_msg("INFO: SIGBUS signal at pc=%lx, fault addr=%lx, si_code=%lx\n",
64 				((ucontext_t *)uc)->uc_mcontext.pc, addr, si->si_code);
65 		if ((cur_mte_cxt.trig_range >= 0 &&
66 		     addr >= MT_CLEAR_TAG(cur_mte_cxt.trig_addr) &&
67 		     addr <= (MT_CLEAR_TAG(cur_mte_cxt.trig_addr) + cur_mte_cxt.trig_range)) ||
68 		    (cur_mte_cxt.trig_range < 0 &&
69 		     addr <= MT_CLEAR_TAG(cur_mte_cxt.trig_addr) &&
70 		     addr >= (MT_CLEAR_TAG(cur_mte_cxt.trig_addr) + cur_mte_cxt.trig_range))) {
71 			cur_mte_cxt.fault_valid = true;
72 			/* Adjust the pc by 4 */
73 			((ucontext_t *)uc)->uc_mcontext.pc += 4;
74 		}
75 	}
76 }
77 
mte_register_signal(int signal,void (* handler)(int,siginfo_t *,void *))78 void mte_register_signal(int signal, void (*handler)(int, siginfo_t *, void *))
79 {
80 	struct sigaction sa;
81 
82 	sa.sa_sigaction = handler;
83 	sa.sa_flags = SA_SIGINFO;
84 	sigemptyset(&sa.sa_mask);
85 	sigaction(signal, &sa, NULL);
86 }
87 
mte_wait_after_trig(void)88 void mte_wait_after_trig(void)
89 {
90 	sched_yield();
91 }
92 
mte_insert_tags(void * ptr,size_t size)93 void *mte_insert_tags(void *ptr, size_t size)
94 {
95 	void *tag_ptr;
96 	int align_size;
97 
98 	if (!ptr || (unsigned long)(ptr) & MT_ALIGN_GRANULE) {
99 		ksft_print_msg("FAIL: Addr=%lx: invalid\n", ptr);
100 		return NULL;
101 	}
102 	align_size = MT_ALIGN_UP(size);
103 	tag_ptr = mte_insert_random_tag(ptr);
104 	mte_set_tag_address_range(tag_ptr, align_size);
105 	return tag_ptr;
106 }
107 
mte_clear_tags(void * ptr,size_t size)108 void mte_clear_tags(void *ptr, size_t size)
109 {
110 	if (!ptr || (unsigned long)(ptr) & MT_ALIGN_GRANULE) {
111 		ksft_print_msg("FAIL: Addr=%lx: invalid\n", ptr);
112 		return;
113 	}
114 	size = MT_ALIGN_UP(size);
115 	ptr = (void *)MT_CLEAR_TAG((unsigned long)ptr);
116 	mte_clear_tag_address_range(ptr, size);
117 }
118 
__mte_allocate_memory_range(size_t size,int mem_type,int mapping,size_t range_before,size_t range_after,bool tags,int fd)119 static void *__mte_allocate_memory_range(size_t size, int mem_type, int mapping,
120 					 size_t range_before, size_t range_after,
121 					 bool tags, int fd)
122 {
123 	void *ptr;
124 	int prot_flag, map_flag;
125 	size_t entire_size = size + range_before + range_after;
126 
127 	if (mem_type != USE_MALLOC && mem_type != USE_MMAP &&
128 	    mem_type != USE_MPROTECT) {
129 		ksft_print_msg("FAIL: Invalid allocate request\n");
130 		return NULL;
131 	}
132 	if (mem_type == USE_MALLOC)
133 		return malloc(entire_size) + range_before;
134 
135 	prot_flag = PROT_READ | PROT_WRITE;
136 	if (mem_type == USE_MMAP)
137 		prot_flag |= PROT_MTE;
138 
139 	map_flag = mapping;
140 	if (fd == -1)
141 		map_flag = MAP_ANONYMOUS | map_flag;
142 	if (!(mapping & MAP_SHARED))
143 		map_flag |= MAP_PRIVATE;
144 	ptr = mmap(NULL, entire_size, prot_flag, map_flag, fd, 0);
145 	if (ptr == MAP_FAILED) {
146 		ksft_print_msg("FAIL: mmap allocation\n");
147 		return NULL;
148 	}
149 	if (mem_type == USE_MPROTECT) {
150 		if (mprotect(ptr, entire_size, prot_flag | PROT_MTE)) {
151 			munmap(ptr, size);
152 			ksft_print_msg("FAIL: mprotect PROT_MTE property\n");
153 			return NULL;
154 		}
155 	}
156 	if (tags)
157 		ptr = mte_insert_tags(ptr + range_before, size);
158 	return ptr;
159 }
160 
mte_allocate_memory_tag_range(size_t size,int mem_type,int mapping,size_t range_before,size_t range_after)161 void *mte_allocate_memory_tag_range(size_t size, int mem_type, int mapping,
162 				    size_t range_before, size_t range_after)
163 {
164 	return __mte_allocate_memory_range(size, mem_type, mapping, range_before,
165 					   range_after, true, -1);
166 }
167 
mte_allocate_memory(size_t size,int mem_type,int mapping,bool tags)168 void *mte_allocate_memory(size_t size, int mem_type, int mapping, bool tags)
169 {
170 	return __mte_allocate_memory_range(size, mem_type, mapping, 0, 0, tags, -1);
171 }
172 
mte_allocate_file_memory(size_t size,int mem_type,int mapping,bool tags,int fd)173 void *mte_allocate_file_memory(size_t size, int mem_type, int mapping, bool tags, int fd)
174 {
175 	int index;
176 	char buffer[INIT_BUFFER_SIZE];
177 
178 	if (mem_type != USE_MPROTECT && mem_type != USE_MMAP) {
179 		ksft_print_msg("FAIL: Invalid mmap file request\n");
180 		return NULL;
181 	}
182 	/* Initialize the file for mappable size */
183 	lseek(fd, 0, SEEK_SET);
184 	for (index = INIT_BUFFER_SIZE; index < size; index += INIT_BUFFER_SIZE) {
185 		if (write(fd, buffer, INIT_BUFFER_SIZE) != INIT_BUFFER_SIZE) {
186 			perror("initialising buffer");
187 			return NULL;
188 		}
189 	}
190 	index -= INIT_BUFFER_SIZE;
191 	if (write(fd, buffer, size - index) != size - index) {
192 		perror("initialising buffer");
193 		return NULL;
194 	}
195 	return __mte_allocate_memory_range(size, mem_type, mapping, 0, 0, tags, fd);
196 }
197 
mte_allocate_file_memory_tag_range(size_t size,int mem_type,int mapping,size_t range_before,size_t range_after,int fd)198 void *mte_allocate_file_memory_tag_range(size_t size, int mem_type, int mapping,
199 					 size_t range_before, size_t range_after, int fd)
200 {
201 	int index;
202 	char buffer[INIT_BUFFER_SIZE];
203 	int map_size = size + range_before + range_after;
204 
205 	if (mem_type != USE_MPROTECT && mem_type != USE_MMAP) {
206 		ksft_print_msg("FAIL: Invalid mmap file request\n");
207 		return NULL;
208 	}
209 	/* Initialize the file for mappable size */
210 	lseek(fd, 0, SEEK_SET);
211 	for (index = INIT_BUFFER_SIZE; index < map_size; index += INIT_BUFFER_SIZE)
212 		if (write(fd, buffer, INIT_BUFFER_SIZE) != INIT_BUFFER_SIZE) {
213 			perror("initialising buffer");
214 			return NULL;
215 		}
216 	index -= INIT_BUFFER_SIZE;
217 	if (write(fd, buffer, map_size - index) != map_size - index) {
218 		perror("initialising buffer");
219 		return NULL;
220 	}
221 	return __mte_allocate_memory_range(size, mem_type, mapping, range_before,
222 					   range_after, true, fd);
223 }
224 
__mte_free_memory_range(void * ptr,size_t size,int mem_type,size_t range_before,size_t range_after,bool tags)225 static void __mte_free_memory_range(void *ptr, size_t size, int mem_type,
226 				    size_t range_before, size_t range_after, bool tags)
227 {
228 	switch (mem_type) {
229 	case USE_MALLOC:
230 		free(ptr - range_before);
231 		break;
232 	case USE_MMAP:
233 	case USE_MPROTECT:
234 		if (tags)
235 			mte_clear_tags(ptr, size);
236 		munmap(ptr - range_before, size + range_before + range_after);
237 		break;
238 	default:
239 		ksft_print_msg("FAIL: Invalid free request\n");
240 		break;
241 	}
242 }
243 
mte_free_memory_tag_range(void * ptr,size_t size,int mem_type,size_t range_before,size_t range_after)244 void mte_free_memory_tag_range(void *ptr, size_t size, int mem_type,
245 			       size_t range_before, size_t range_after)
246 {
247 	__mte_free_memory_range(ptr, size, mem_type, range_before, range_after, true);
248 }
249 
mte_free_memory(void * ptr,size_t size,int mem_type,bool tags)250 void mte_free_memory(void *ptr, size_t size, int mem_type, bool tags)
251 {
252 	__mte_free_memory_range(ptr, size, mem_type, 0, 0, tags);
253 }
254 
mte_initialize_current_context(int mode,uintptr_t ptr,ssize_t range)255 void mte_initialize_current_context(int mode, uintptr_t ptr, ssize_t range)
256 {
257 	cur_mte_cxt.fault_valid = false;
258 	cur_mte_cxt.trig_addr = ptr;
259 	cur_mte_cxt.trig_range = range;
260 	if (mode == MTE_SYNC_ERR)
261 		cur_mte_cxt.trig_si_code = SEGV_MTESERR;
262 	else if (mode == MTE_ASYNC_ERR)
263 		cur_mte_cxt.trig_si_code = SEGV_MTEAERR;
264 	else
265 		cur_mte_cxt.trig_si_code = 0;
266 }
267 
mte_switch_mode(int mte_option,unsigned long incl_mask)268 int mte_switch_mode(int mte_option, unsigned long incl_mask)
269 {
270 	unsigned long en = 0;
271 
272 	if (!(mte_option == MTE_SYNC_ERR || mte_option == MTE_ASYNC_ERR ||
273 	      mte_option == MTE_NONE_ERR || incl_mask <= MTE_ALLOW_NON_ZERO_TAG)) {
274 		ksft_print_msg("FAIL: Invalid mte config option\n");
275 		return -EINVAL;
276 	}
277 	en = PR_TAGGED_ADDR_ENABLE;
278 	if (mte_option == MTE_SYNC_ERR)
279 		en |= PR_MTE_TCF_SYNC;
280 	else if (mte_option == MTE_ASYNC_ERR)
281 		en |= PR_MTE_TCF_ASYNC;
282 	else if (mte_option == MTE_NONE_ERR)
283 		en |= PR_MTE_TCF_NONE;
284 
285 	en |= (incl_mask << PR_MTE_TAG_SHIFT);
286 	/* Enable address tagging ABI, mte error reporting mode and tag inclusion mask. */
287 	if (prctl(PR_SET_TAGGED_ADDR_CTRL, en, 0, 0, 0) != 0) {
288 		ksft_print_msg("FAIL:prctl PR_SET_TAGGED_ADDR_CTRL for mte mode\n");
289 		return -EINVAL;
290 	}
291 	return 0;
292 }
293 
mte_default_setup(void)294 int mte_default_setup(void)
295 {
296 	unsigned long hwcaps2 = getauxval(AT_HWCAP2);
297 	unsigned long en = 0;
298 	int ret;
299 
300 	if (!(hwcaps2 & HWCAP2_MTE)) {
301 		ksft_print_msg("SKIP: MTE features unavailable\n");
302 		return KSFT_SKIP;
303 	}
304 	/* Get current mte mode */
305 	ret = prctl(PR_GET_TAGGED_ADDR_CTRL, en, 0, 0, 0);
306 	if (ret < 0) {
307 		ksft_print_msg("FAIL:prctl PR_GET_TAGGED_ADDR_CTRL with error =%d\n", ret);
308 		return KSFT_FAIL;
309 	}
310 	if (ret & PR_MTE_TCF_SYNC)
311 		mte_cur_mode = MTE_SYNC_ERR;
312 	else if (ret & PR_MTE_TCF_ASYNC)
313 		mte_cur_mode = MTE_ASYNC_ERR;
314 	else if (ret & PR_MTE_TCF_NONE)
315 		mte_cur_mode = MTE_NONE_ERR;
316 
317 	mte_cur_pstate_tco = mte_get_pstate_tco();
318 	/* Disable PSTATE.TCO */
319 	mte_disable_pstate_tco();
320 	return 0;
321 }
322 
mte_restore_setup(void)323 void mte_restore_setup(void)
324 {
325 	mte_switch_mode(mte_cur_mode, MTE_ALLOW_NON_ZERO_TAG);
326 	if (mte_cur_pstate_tco == MT_PSTATE_TCO_EN)
327 		mte_enable_pstate_tco();
328 	else if (mte_cur_pstate_tco == MT_PSTATE_TCO_DIS)
329 		mte_disable_pstate_tco();
330 }
331 
create_temp_file(void)332 int create_temp_file(void)
333 {
334 	int fd;
335 	char filename[] = "/dev/shm/tmp_XXXXXX";
336 
337 	/* Create a file in the tmpfs filesystem */
338 	fd = mkstemp(&filename[0]);
339 	if (fd == -1) {
340 		perror(filename);
341 		ksft_print_msg("FAIL: Unable to open temporary file\n");
342 		return 0;
343 	}
344 	unlink(&filename[0]);
345 	return fd;
346 }
347