1
2 #include <xen/init.h>
3 #include <xen/list.h>
4 #include <xen/perfc.h>
5 #include <xen/rcupdate.h>
6 #include <xen/sort.h>
7 #include <xen/spinlock.h>
8 #include <asm/uaccess.h>
9 #include <xen/domain_page.h>
10 #include <xen/virtual_region.h>
11 #include <xen/livepatch.h>
12 #include <xen/warning.h>
13
14 #define EX_FIELD(ptr, field) ((unsigned long)&(ptr)->field + (ptr)->field)
15
ex_addr(const struct exception_table_entry * x)16 static inline unsigned long ex_addr(const struct exception_table_entry *x)
17 {
18 return EX_FIELD(x, addr);
19 }
20
ex_cont(const struct exception_table_entry * x)21 static inline unsigned long ex_cont(const struct exception_table_entry *x)
22 {
23 return EX_FIELD(x, cont);
24 }
25
cmp_ex(const void * a,const void * b)26 static int init_or_livepatch cmp_ex(const void *a, const void *b)
27 {
28 const struct exception_table_entry *l = a, *r = b;
29 unsigned long lip = ex_addr(l);
30 unsigned long rip = ex_addr(r);
31
32 /* avoid overflow */
33 if (lip > rip)
34 return 1;
35 if (lip < rip)
36 return -1;
37 return 0;
38 }
39
40 #ifndef swap_ex
swap_ex(void * a,void * b,int size)41 static void init_or_livepatch swap_ex(void *a, void *b, int size)
42 {
43 struct exception_table_entry *l = a, *r = b, tmp;
44 long delta = b - a;
45
46 tmp = *l;
47 l->addr = r->addr + delta;
48 l->cont = r->cont + delta;
49 r->addr = tmp.addr - delta;
50 r->cont = tmp.cont - delta;
51 }
52 #endif
53
sort_exception_table(struct exception_table_entry * start,const struct exception_table_entry * stop)54 void init_or_livepatch sort_exception_table(struct exception_table_entry *start,
55 const struct exception_table_entry *stop)
56 {
57 sort(start, stop - start,
58 sizeof(struct exception_table_entry), cmp_ex, swap_ex);
59 }
60
sort_exception_tables(void)61 void __init sort_exception_tables(void)
62 {
63 sort_exception_table(__start___ex_table, __stop___ex_table);
64 sort_exception_table(__start___pre_ex_table, __stop___pre_ex_table);
65 }
66
67 static unsigned long
search_one_extable(const struct exception_table_entry * first,const struct exception_table_entry * last,unsigned long value)68 search_one_extable(const struct exception_table_entry *first,
69 const struct exception_table_entry *last,
70 unsigned long value)
71 {
72 const struct exception_table_entry *mid;
73 long diff;
74
75 while ( first <= last )
76 {
77 mid = (last - first) / 2 + first;
78 diff = ex_addr(mid) - value;
79 if (diff == 0)
80 return ex_cont(mid);
81 else if (diff < 0)
82 first = mid+1;
83 else
84 last = mid-1;
85 }
86 return 0;
87 }
88
89 unsigned long
search_exception_table(const struct cpu_user_regs * regs)90 search_exception_table(const struct cpu_user_regs *regs)
91 {
92 const struct virtual_region *region = find_text_region(regs->rip);
93 unsigned long stub = this_cpu(stubs.addr);
94
95 if ( region && region->ex )
96 return search_one_extable(region->ex, region->ex_end - 1, regs->rip);
97
98 if ( regs->rip >= stub + STUB_BUF_SIZE / 2 &&
99 regs->rip < stub + STUB_BUF_SIZE &&
100 regs->rsp > (unsigned long)regs &&
101 regs->rsp < (unsigned long)get_cpu_info() )
102 {
103 unsigned long retptr = *(unsigned long *)regs->rsp;
104
105 region = find_text_region(retptr);
106 retptr = region && region->ex
107 ? search_one_extable(region->ex, region->ex_end - 1, retptr)
108 : 0;
109 if ( retptr )
110 {
111 /*
112 * Put trap number and error code on the stack (in place of the
113 * original return address) for recovery code to pick up.
114 */
115 union stub_exception_token token = {
116 .fields.ec = regs->error_code,
117 .fields.trapnr = regs->entry_vector,
118 };
119
120 *(unsigned long *)regs->rsp = token.raw;
121 return retptr;
122 }
123 }
124
125 return 0;
126 }
127
128 #ifndef NDEBUG
stub_selftest(void)129 static int __init stub_selftest(void)
130 {
131 static const struct {
132 uint8_t opc[4];
133 uint64_t rax;
134 union stub_exception_token res;
135 } tests[] __initconst = {
136 { .opc = { 0x0f, 0xb9, 0xc3, 0xc3 }, /* ud1 */
137 .res.fields.trapnr = TRAP_invalid_op },
138 { .opc = { 0x90, 0x02, 0x00, 0xc3 }, /* nop; add (%rax),%al */
139 .rax = 0x0123456789abcdef,
140 .res.fields.trapnr = TRAP_gp_fault },
141 { .opc = { 0x02, 0x04, 0x04, 0xc3 }, /* add (%rsp,%rax),%al */
142 .rax = 0xfedcba9876543210,
143 .res.fields.trapnr = TRAP_stack_error },
144 { .opc = { 0xcc, 0xc3, 0xc3, 0xc3 }, /* int3 */
145 .res.fields.trapnr = TRAP_int3 },
146 };
147 unsigned long addr = this_cpu(stubs.addr) + STUB_BUF_SIZE / 2;
148 unsigned int i;
149 bool fail = false;
150
151 printk("Running stub recovery selftests...\n");
152
153 for ( i = 0; i < ARRAY_SIZE(tests); ++i )
154 {
155 uint8_t *ptr = map_domain_page(_mfn(this_cpu(stubs.mfn))) +
156 (addr & ~PAGE_MASK);
157 union stub_exception_token res = { .raw = ~0 };
158
159 memset(ptr, 0xcc, STUB_BUF_SIZE / 2);
160 memcpy(ptr, tests[i].opc, ARRAY_SIZE(tests[i].opc));
161 unmap_domain_page(ptr);
162
163 asm volatile ( "INDIRECT_CALL %[stb]\n"
164 ".Lret%=:\n\t"
165 ".pushsection .fixup,\"ax\"\n"
166 ".Lfix%=:\n\t"
167 "pop %[exn]\n\t"
168 "jmp .Lret%=\n\t"
169 ".popsection\n\t"
170 _ASM_EXTABLE(.Lret%=, .Lfix%=)
171 : [exn] "+m" (res) ASM_CALL_CONSTRAINT
172 : [stb] "r" (addr), "a" (tests[i].rax));
173
174 if ( res.raw != tests[i].res.raw )
175 {
176 printk("Selftest %u failed: Opc %*ph "
177 "expected %u[%04x], got %u[%04x]\n",
178 i, (int)ARRAY_SIZE(tests[i].opc), tests[i].opc,
179 tests[i].res.fields.trapnr, tests[i].res.fields.ec,
180 res.fields.trapnr, res.fields.ec);
181
182 fail = true;
183 }
184 }
185
186 if ( fail )
187 warning_add("SELFTEST FAILURE: CORRECT BEHAVIOR CANNOT BE GUARANTEED\n");
188
189 return 0;
190 }
191 __initcall(stub_selftest);
192 #endif
193
194 unsigned long
search_pre_exception_table(struct cpu_user_regs * regs)195 search_pre_exception_table(struct cpu_user_regs *regs)
196 {
197 unsigned long addr = regs->rip;
198 unsigned long fixup = search_one_extable(
199 __start___pre_ex_table, __stop___pre_ex_table-1, addr);
200 if ( fixup )
201 {
202 dprintk(XENLOG_INFO, "Pre-exception: %p -> %p\n", _p(addr), _p(fixup));
203 perfc_incr(exception_fixed);
204 }
205 return fixup;
206 }
207