1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (C) 2020 ARM Limited
3
4 #define _GNU_SOURCE
5
6 #include <errno.h>
7 #include <fcntl.h>
8 #include <signal.h>
9 #include <stdlib.h>
10 #include <stdio.h>
11 #include <string.h>
12 #include <ucontext.h>
13 #include <unistd.h>
14 #include <sys/mman.h>
15
16 #include "kselftest.h"
17 #include "mte_common_util.h"
18 #include "mte_def.h"
19
20 static size_t page_sz;
21
check_usermem_access_fault(int mem_type,int mode,int mapping)22 static int check_usermem_access_fault(int mem_type, int mode, int mapping)
23 {
24 int fd, i, err;
25 char val = 'A';
26 size_t len, read_len;
27 void *ptr, *ptr_next;
28
29 err = KSFT_FAIL;
30 len = 2 * page_sz;
31 mte_switch_mode(mode, MTE_ALLOW_NON_ZERO_TAG);
32 fd = create_temp_file();
33 if (fd == -1)
34 return KSFT_FAIL;
35 for (i = 0; i < len; i++)
36 if (write(fd, &val, sizeof(val)) != sizeof(val))
37 return KSFT_FAIL;
38 lseek(fd, 0, 0);
39 ptr = mte_allocate_memory(len, mem_type, mapping, true);
40 if (check_allocated_memory(ptr, len, mem_type, true) != KSFT_PASS) {
41 close(fd);
42 return KSFT_FAIL;
43 }
44 mte_initialize_current_context(mode, (uintptr_t)ptr, len);
45 /* Copy from file into buffer with valid tag */
46 read_len = read(fd, ptr, len);
47 mte_wait_after_trig();
48 if (cur_mte_cxt.fault_valid || read_len < len)
49 goto usermem_acc_err;
50 /* Verify same pattern is read */
51 for (i = 0; i < len; i++)
52 if (*(char *)(ptr + i) != val)
53 break;
54 if (i < len)
55 goto usermem_acc_err;
56
57 /* Tag the next half of memory with different value */
58 ptr_next = (void *)((unsigned long)ptr + page_sz);
59 ptr_next = mte_insert_new_tag(ptr_next);
60 mte_set_tag_address_range(ptr_next, page_sz);
61
62 lseek(fd, 0, 0);
63 /* Copy from file into buffer with invalid tag */
64 read_len = read(fd, ptr, len);
65 mte_wait_after_trig();
66 /*
67 * Accessing user memory in kernel with invalid tag should fail in sync
68 * mode without fault but may not fail in async mode as per the
69 * implemented MTE userspace support in Arm64 kernel.
70 */
71 if (mode == MTE_SYNC_ERR &&
72 !cur_mte_cxt.fault_valid && read_len < len) {
73 err = KSFT_PASS;
74 } else if (mode == MTE_ASYNC_ERR &&
75 !cur_mte_cxt.fault_valid && read_len == len) {
76 err = KSFT_PASS;
77 }
78 usermem_acc_err:
79 mte_free_memory((void *)ptr, len, mem_type, true);
80 close(fd);
81 return err;
82 }
83
main(int argc,char * argv[])84 int main(int argc, char *argv[])
85 {
86 int err;
87
88 page_sz = getpagesize();
89 if (!page_sz) {
90 ksft_print_msg("ERR: Unable to get page size\n");
91 return KSFT_FAIL;
92 }
93 err = mte_default_setup();
94 if (err)
95 return err;
96
97 /* Register signal handlers */
98 mte_register_signal(SIGSEGV, mte_default_handler);
99
100 /* Set test plan */
101 ksft_set_plan(4);
102
103 evaluate_test(check_usermem_access_fault(USE_MMAP, MTE_SYNC_ERR, MAP_PRIVATE),
104 "Check memory access from kernel in sync mode, private mapping and mmap memory\n");
105 evaluate_test(check_usermem_access_fault(USE_MMAP, MTE_SYNC_ERR, MAP_SHARED),
106 "Check memory access from kernel in sync mode, shared mapping and mmap memory\n");
107
108 evaluate_test(check_usermem_access_fault(USE_MMAP, MTE_ASYNC_ERR, MAP_PRIVATE),
109 "Check memory access from kernel in async mode, private mapping and mmap memory\n");
110 evaluate_test(check_usermem_access_fault(USE_MMAP, MTE_ASYNC_ERR, MAP_SHARED),
111 "Check memory access from kernel in async mode, shared mapping and mmap memory\n");
112
113 mte_restore_setup();
114 ksft_print_cnts();
115 return ksft_get_fail_cnt() == 0 ? KSFT_PASS : KSFT_FAIL;
116 }
117