1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * This is for all the tests related to logic bugs (e.g. bad dereferences,
4 * bad alignment, bad loops, bad locking, bad scheduling, deep stacks, and
5 * lockups) along with other things that don't fit well into existing LKDTM
6 * test source files.
7 */
8 #include "lkdtm.h"
9 #include <linux/list.h>
10 #include <linux/sched.h>
11 #include <linux/sched/signal.h>
12 #include <linux/sched/task_stack.h>
13 #include <linux/uaccess.h>
14 #include <linux/slab.h>
15
16 #if IS_ENABLED(CONFIG_X86_32) && !IS_ENABLED(CONFIG_UML)
17 #include <asm/desc.h>
18 #endif
19
20 struct lkdtm_list {
21 struct list_head node;
22 };
23
24 /*
25 * Make sure our attempts to over run the kernel stack doesn't trigger
26 * a compiler warning when CONFIG_FRAME_WARN is set. Then make sure we
27 * recurse past the end of THREAD_SIZE by default.
28 */
29 #if defined(CONFIG_FRAME_WARN) && (CONFIG_FRAME_WARN > 0)
30 #define REC_STACK_SIZE (_AC(CONFIG_FRAME_WARN, UL) / 2)
31 #else
32 #define REC_STACK_SIZE (THREAD_SIZE / 8)
33 #endif
34 #define REC_NUM_DEFAULT ((THREAD_SIZE / REC_STACK_SIZE) * 2)
35
36 static int recur_count = REC_NUM_DEFAULT;
37
38 static DEFINE_SPINLOCK(lock_me_up);
39
40 /*
41 * Make sure compiler does not optimize this function or stack frame away:
42 * - function marked noinline
43 * - stack variables are marked volatile
44 * - stack variables are written (memset()) and read (pr_info())
45 * - function has external effects (pr_info())
46 * */
recursive_loop(int remaining)47 static int noinline recursive_loop(int remaining)
48 {
49 volatile char buf[REC_STACK_SIZE];
50
51 memset((void *)buf, remaining & 0xFF, sizeof(buf));
52 pr_info("loop %d/%d ...\n", (int)buf[remaining % sizeof(buf)],
53 recur_count);
54 if (!remaining)
55 return 0;
56 else
57 return recursive_loop(remaining - 1);
58 }
59
60 /* If the depth is negative, use the default, otherwise keep parameter. */
lkdtm_bugs_init(int * recur_param)61 void __init lkdtm_bugs_init(int *recur_param)
62 {
63 if (*recur_param < 0)
64 *recur_param = recur_count;
65 else
66 recur_count = *recur_param;
67 }
68
lkdtm_PANIC(void)69 void lkdtm_PANIC(void)
70 {
71 panic("dumptest");
72 }
73
lkdtm_BUG(void)74 void lkdtm_BUG(void)
75 {
76 BUG();
77 }
78
79 static int warn_counter;
80
lkdtm_WARNING(void)81 void lkdtm_WARNING(void)
82 {
83 WARN_ON(++warn_counter);
84 }
85
lkdtm_WARNING_MESSAGE(void)86 void lkdtm_WARNING_MESSAGE(void)
87 {
88 WARN(1, "Warning message trigger count: %d\n", ++warn_counter);
89 }
90
lkdtm_EXCEPTION(void)91 void lkdtm_EXCEPTION(void)
92 {
93 *((volatile int *) 0) = 0;
94 }
95
lkdtm_LOOP(void)96 void lkdtm_LOOP(void)
97 {
98 for (;;)
99 ;
100 }
101
lkdtm_EXHAUST_STACK(void)102 void lkdtm_EXHAUST_STACK(void)
103 {
104 pr_info("Calling function with %lu frame size to depth %d ...\n",
105 REC_STACK_SIZE, recur_count);
106 recursive_loop(recur_count);
107 pr_info("FAIL: survived without exhausting stack?!\n");
108 }
109
__lkdtm_CORRUPT_STACK(void * stack)110 static noinline void __lkdtm_CORRUPT_STACK(void *stack)
111 {
112 memset(stack, '\xff', 64);
113 }
114
115 /* This should trip the stack canary, not corrupt the return address. */
lkdtm_CORRUPT_STACK(void)116 noinline void lkdtm_CORRUPT_STACK(void)
117 {
118 /* Use default char array length that triggers stack protection. */
119 char data[8] __aligned(sizeof(void *));
120
121 pr_info("Corrupting stack containing char array ...\n");
122 __lkdtm_CORRUPT_STACK((void *)&data);
123 }
124
125 /* Same as above but will only get a canary with -fstack-protector-strong */
lkdtm_CORRUPT_STACK_STRONG(void)126 noinline void lkdtm_CORRUPT_STACK_STRONG(void)
127 {
128 union {
129 unsigned short shorts[4];
130 unsigned long *ptr;
131 } data __aligned(sizeof(void *));
132
133 pr_info("Corrupting stack containing union ...\n");
134 __lkdtm_CORRUPT_STACK((void *)&data);
135 }
136
137 static pid_t stack_pid;
138 static unsigned long stack_addr;
139
lkdtm_REPORT_STACK(void)140 void lkdtm_REPORT_STACK(void)
141 {
142 volatile uintptr_t magic;
143 pid_t pid = task_pid_nr(current);
144
145 if (pid != stack_pid) {
146 pr_info("Starting stack offset tracking for pid %d\n", pid);
147 stack_pid = pid;
148 stack_addr = (uintptr_t)&magic;
149 }
150
151 pr_info("Stack offset: %d\n", (int)(stack_addr - (uintptr_t)&magic));
152 }
153
154 static pid_t stack_canary_pid;
155 static unsigned long stack_canary;
156 static unsigned long stack_canary_offset;
157
__lkdtm_REPORT_STACK_CANARY(void * stack)158 static noinline void __lkdtm_REPORT_STACK_CANARY(void *stack)
159 {
160 int i = 0;
161 pid_t pid = task_pid_nr(current);
162 unsigned long *canary = (unsigned long *)stack;
163 unsigned long current_offset = 0, init_offset = 0;
164
165 /* Do our best to find the canary in a 16 word window ... */
166 for (i = 1; i < 16; i++) {
167 canary = (unsigned long *)stack + i;
168 #ifdef CONFIG_STACKPROTECTOR
169 if (*canary == current->stack_canary)
170 current_offset = i;
171 if (*canary == init_task.stack_canary)
172 init_offset = i;
173 #endif
174 }
175
176 if (current_offset == 0) {
177 /*
178 * If the canary doesn't match what's in the task_struct,
179 * we're either using a global canary or the stack frame
180 * layout changed.
181 */
182 if (init_offset != 0) {
183 pr_err("FAIL: global stack canary found at offset %ld (canary for pid %d matches init_task's)!\n",
184 init_offset, pid);
185 } else {
186 pr_warn("FAIL: did not correctly locate stack canary :(\n");
187 pr_expected_config(CONFIG_STACKPROTECTOR);
188 }
189
190 return;
191 } else if (init_offset != 0) {
192 pr_warn("WARNING: found both current and init_task canaries nearby?!\n");
193 }
194
195 canary = (unsigned long *)stack + current_offset;
196 if (stack_canary_pid == 0) {
197 stack_canary = *canary;
198 stack_canary_pid = pid;
199 stack_canary_offset = current_offset;
200 pr_info("Recorded stack canary for pid %d at offset %ld\n",
201 stack_canary_pid, stack_canary_offset);
202 } else if (pid == stack_canary_pid) {
203 pr_warn("ERROR: saw pid %d again -- please use a new pid\n", pid);
204 } else {
205 if (current_offset != stack_canary_offset) {
206 pr_warn("ERROR: canary offset changed from %ld to %ld!?\n",
207 stack_canary_offset, current_offset);
208 return;
209 }
210
211 if (*canary == stack_canary) {
212 pr_warn("FAIL: canary identical for pid %d and pid %d at offset %ld!\n",
213 stack_canary_pid, pid, current_offset);
214 } else {
215 pr_info("ok: stack canaries differ between pid %d and pid %d at offset %ld.\n",
216 stack_canary_pid, pid, current_offset);
217 /* Reset the test. */
218 stack_canary_pid = 0;
219 }
220 }
221 }
222
lkdtm_REPORT_STACK_CANARY(void)223 void lkdtm_REPORT_STACK_CANARY(void)
224 {
225 /* Use default char array length that triggers stack protection. */
226 char data[8] __aligned(sizeof(void *)) = { };
227
228 __lkdtm_REPORT_STACK_CANARY((void *)&data);
229 }
230
lkdtm_UNALIGNED_LOAD_STORE_WRITE(void)231 void lkdtm_UNALIGNED_LOAD_STORE_WRITE(void)
232 {
233 static u8 data[5] __attribute__((aligned(4))) = {1, 2, 3, 4, 5};
234 u32 *p;
235 u32 val = 0x12345678;
236
237 p = (u32 *)(data + 1);
238 if (*p == 0)
239 val = 0x87654321;
240 *p = val;
241
242 if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
243 pr_err("XFAIL: arch has CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS\n");
244 }
245
lkdtm_SOFTLOCKUP(void)246 void lkdtm_SOFTLOCKUP(void)
247 {
248 preempt_disable();
249 for (;;)
250 cpu_relax();
251 }
252
lkdtm_HARDLOCKUP(void)253 void lkdtm_HARDLOCKUP(void)
254 {
255 local_irq_disable();
256 for (;;)
257 cpu_relax();
258 }
259
lkdtm_SPINLOCKUP(void)260 void lkdtm_SPINLOCKUP(void)
261 {
262 /* Must be called twice to trigger. */
263 spin_lock(&lock_me_up);
264 /* Let sparse know we intended to exit holding the lock. */
265 __release(&lock_me_up);
266 }
267
lkdtm_HUNG_TASK(void)268 void lkdtm_HUNG_TASK(void)
269 {
270 set_current_state(TASK_UNINTERRUPTIBLE);
271 schedule();
272 }
273
274 volatile unsigned int huge = INT_MAX - 2;
275 volatile unsigned int ignored;
276
lkdtm_OVERFLOW_SIGNED(void)277 void lkdtm_OVERFLOW_SIGNED(void)
278 {
279 int value;
280
281 value = huge;
282 pr_info("Normal signed addition ...\n");
283 value += 1;
284 ignored = value;
285
286 pr_info("Overflowing signed addition ...\n");
287 value += 4;
288 ignored = value;
289 }
290
291
lkdtm_OVERFLOW_UNSIGNED(void)292 void lkdtm_OVERFLOW_UNSIGNED(void)
293 {
294 unsigned int value;
295
296 value = huge;
297 pr_info("Normal unsigned addition ...\n");
298 value += 1;
299 ignored = value;
300
301 pr_info("Overflowing unsigned addition ...\n");
302 value += 4;
303 ignored = value;
304 }
305
306 /* Intentionally using old-style flex array definition of 1 byte. */
307 struct array_bounds_flex_array {
308 int one;
309 int two;
310 char data[1];
311 };
312
313 struct array_bounds {
314 int one;
315 int two;
316 char data[8];
317 int three;
318 };
319
lkdtm_ARRAY_BOUNDS(void)320 void lkdtm_ARRAY_BOUNDS(void)
321 {
322 struct array_bounds_flex_array *not_checked;
323 struct array_bounds *checked;
324 volatile int i;
325
326 not_checked = kmalloc(sizeof(*not_checked) * 2, GFP_KERNEL);
327 checked = kmalloc(sizeof(*checked) * 2, GFP_KERNEL);
328
329 pr_info("Array access within bounds ...\n");
330 /* For both, touch all bytes in the actual member size. */
331 for (i = 0; i < sizeof(checked->data); i++)
332 checked->data[i] = 'A';
333 /*
334 * For the uninstrumented flex array member, also touch 1 byte
335 * beyond to verify it is correctly uninstrumented.
336 */
337 for (i = 0; i < sizeof(not_checked->data) + 1; i++)
338 not_checked->data[i] = 'A';
339
340 pr_info("Array access beyond bounds ...\n");
341 for (i = 0; i < sizeof(checked->data) + 1; i++)
342 checked->data[i] = 'B';
343
344 kfree(not_checked);
345 kfree(checked);
346 pr_err("FAIL: survived array bounds overflow!\n");
347 pr_expected_config(CONFIG_UBSAN_BOUNDS);
348 }
349
lkdtm_CORRUPT_LIST_ADD(void)350 void lkdtm_CORRUPT_LIST_ADD(void)
351 {
352 /*
353 * Initially, an empty list via LIST_HEAD:
354 * test_head.next = &test_head
355 * test_head.prev = &test_head
356 */
357 LIST_HEAD(test_head);
358 struct lkdtm_list good, bad;
359 void *target[2] = { };
360 void *redirection = ⌖
361
362 pr_info("attempting good list addition\n");
363
364 /*
365 * Adding to the list performs these actions:
366 * test_head.next->prev = &good.node
367 * good.node.next = test_head.next
368 * good.node.prev = test_head
369 * test_head.next = good.node
370 */
371 list_add(&good.node, &test_head);
372
373 pr_info("attempting corrupted list addition\n");
374 /*
375 * In simulating this "write what where" primitive, the "what" is
376 * the address of &bad.node, and the "where" is the address held
377 * by "redirection".
378 */
379 test_head.next = redirection;
380 list_add(&bad.node, &test_head);
381
382 if (target[0] == NULL && target[1] == NULL)
383 pr_err("Overwrite did not happen, but no BUG?!\n");
384 else {
385 pr_err("list_add() corruption not detected!\n");
386 pr_expected_config(CONFIG_DEBUG_LIST);
387 }
388 }
389
lkdtm_CORRUPT_LIST_DEL(void)390 void lkdtm_CORRUPT_LIST_DEL(void)
391 {
392 LIST_HEAD(test_head);
393 struct lkdtm_list item;
394 void *target[2] = { };
395 void *redirection = ⌖
396
397 list_add(&item.node, &test_head);
398
399 pr_info("attempting good list removal\n");
400 list_del(&item.node);
401
402 pr_info("attempting corrupted list removal\n");
403 list_add(&item.node, &test_head);
404
405 /* As with the list_add() test above, this corrupts "next". */
406 item.node.next = redirection;
407 list_del(&item.node);
408
409 if (target[0] == NULL && target[1] == NULL)
410 pr_err("Overwrite did not happen, but no BUG?!\n");
411 else {
412 pr_err("list_del() corruption not detected!\n");
413 pr_expected_config(CONFIG_DEBUG_LIST);
414 }
415 }
416
417 /* Test that VMAP_STACK is actually allocating with a leading guard page */
lkdtm_STACK_GUARD_PAGE_LEADING(void)418 void lkdtm_STACK_GUARD_PAGE_LEADING(void)
419 {
420 const unsigned char *stack = task_stack_page(current);
421 const unsigned char *ptr = stack - 1;
422 volatile unsigned char byte;
423
424 pr_info("attempting bad read from page below current stack\n");
425
426 byte = *ptr;
427
428 pr_err("FAIL: accessed page before stack! (byte: %x)\n", byte);
429 }
430
431 /* Test that VMAP_STACK is actually allocating with a trailing guard page */
lkdtm_STACK_GUARD_PAGE_TRAILING(void)432 void lkdtm_STACK_GUARD_PAGE_TRAILING(void)
433 {
434 const unsigned char *stack = task_stack_page(current);
435 const unsigned char *ptr = stack + THREAD_SIZE;
436 volatile unsigned char byte;
437
438 pr_info("attempting bad read from page above current stack\n");
439
440 byte = *ptr;
441
442 pr_err("FAIL: accessed page after stack! (byte: %x)\n", byte);
443 }
444
lkdtm_UNSET_SMEP(void)445 void lkdtm_UNSET_SMEP(void)
446 {
447 #if IS_ENABLED(CONFIG_X86_64) && !IS_ENABLED(CONFIG_UML)
448 #define MOV_CR4_DEPTH 64
449 void (*direct_write_cr4)(unsigned long val);
450 unsigned char *insn;
451 unsigned long cr4;
452 int i;
453
454 cr4 = native_read_cr4();
455
456 if ((cr4 & X86_CR4_SMEP) != X86_CR4_SMEP) {
457 pr_err("FAIL: SMEP not in use\n");
458 return;
459 }
460 cr4 &= ~(X86_CR4_SMEP);
461
462 pr_info("trying to clear SMEP normally\n");
463 native_write_cr4(cr4);
464 if (cr4 == native_read_cr4()) {
465 pr_err("FAIL: pinning SMEP failed!\n");
466 cr4 |= X86_CR4_SMEP;
467 pr_info("restoring SMEP\n");
468 native_write_cr4(cr4);
469 return;
470 }
471 pr_info("ok: SMEP did not get cleared\n");
472
473 /*
474 * To test the post-write pinning verification we need to call
475 * directly into the middle of native_write_cr4() where the
476 * cr4 write happens, skipping any pinning. This searches for
477 * the cr4 writing instruction.
478 */
479 insn = (unsigned char *)native_write_cr4;
480 for (i = 0; i < MOV_CR4_DEPTH; i++) {
481 /* mov %rdi, %cr4 */
482 if (insn[i] == 0x0f && insn[i+1] == 0x22 && insn[i+2] == 0xe7)
483 break;
484 /* mov %rdi,%rax; mov %rax, %cr4 */
485 if (insn[i] == 0x48 && insn[i+1] == 0x89 &&
486 insn[i+2] == 0xf8 && insn[i+3] == 0x0f &&
487 insn[i+4] == 0x22 && insn[i+5] == 0xe0)
488 break;
489 }
490 if (i >= MOV_CR4_DEPTH) {
491 pr_info("ok: cannot locate cr4 writing call gadget\n");
492 return;
493 }
494 direct_write_cr4 = (void *)(insn + i);
495
496 pr_info("trying to clear SMEP with call gadget\n");
497 direct_write_cr4(cr4);
498 if (native_read_cr4() & X86_CR4_SMEP) {
499 pr_info("ok: SMEP removal was reverted\n");
500 } else {
501 pr_err("FAIL: cleared SMEP not detected!\n");
502 cr4 |= X86_CR4_SMEP;
503 pr_info("restoring SMEP\n");
504 native_write_cr4(cr4);
505 }
506 #else
507 pr_err("XFAIL: this test is x86_64-only\n");
508 #endif
509 }
510
lkdtm_DOUBLE_FAULT(void)511 void lkdtm_DOUBLE_FAULT(void)
512 {
513 #if IS_ENABLED(CONFIG_X86_32) && !IS_ENABLED(CONFIG_UML)
514 /*
515 * Trigger #DF by setting the stack limit to zero. This clobbers
516 * a GDT TLS slot, which is okay because the current task will die
517 * anyway due to the double fault.
518 */
519 struct desc_struct d = {
520 .type = 3, /* expand-up, writable, accessed data */
521 .p = 1, /* present */
522 .d = 1, /* 32-bit */
523 .g = 0, /* limit in bytes */
524 .s = 1, /* not system */
525 };
526
527 local_irq_disable();
528 write_gdt_entry(get_cpu_gdt_rw(smp_processor_id()),
529 GDT_ENTRY_TLS_MIN, &d, DESCTYPE_S);
530
531 /*
532 * Put our zero-limit segment in SS and then trigger a fault. The
533 * 4-byte access to (%esp) will fault with #SS, and the attempt to
534 * deliver the fault will recursively cause #SS and result in #DF.
535 * This whole process happens while NMIs and MCEs are blocked by the
536 * MOV SS window. This is nice because an NMI with an invalid SS
537 * would also double-fault, resulting in the NMI or MCE being lost.
538 */
539 asm volatile ("movw %0, %%ss; addl $0, (%%esp)" ::
540 "r" ((unsigned short)(GDT_ENTRY_TLS_MIN << 3)));
541
542 pr_err("FAIL: tried to double fault but didn't die\n");
543 #else
544 pr_err("XFAIL: this test is ia32-only\n");
545 #endif
546 }
547
548 #ifdef CONFIG_ARM64
change_pac_parameters(void)549 static noinline void change_pac_parameters(void)
550 {
551 if (IS_ENABLED(CONFIG_ARM64_PTR_AUTH_KERNEL)) {
552 /* Reset the keys of current task */
553 ptrauth_thread_init_kernel(current);
554 ptrauth_thread_switch_kernel(current);
555 }
556 }
557 #endif
558
lkdtm_CORRUPT_PAC(void)559 noinline void lkdtm_CORRUPT_PAC(void)
560 {
561 #ifdef CONFIG_ARM64
562 #define CORRUPT_PAC_ITERATE 10
563 int i;
564
565 if (!IS_ENABLED(CONFIG_ARM64_PTR_AUTH_KERNEL))
566 pr_err("FAIL: kernel not built with CONFIG_ARM64_PTR_AUTH_KERNEL\n");
567
568 if (!system_supports_address_auth()) {
569 pr_err("FAIL: CPU lacks pointer authentication feature\n");
570 return;
571 }
572
573 pr_info("changing PAC parameters to force function return failure...\n");
574 /*
575 * PAC is a hash value computed from input keys, return address and
576 * stack pointer. As pac has fewer bits so there is a chance of
577 * collision, so iterate few times to reduce the collision probability.
578 */
579 for (i = 0; i < CORRUPT_PAC_ITERATE; i++)
580 change_pac_parameters();
581
582 pr_err("FAIL: survived PAC changes! Kernel may be unstable from here\n");
583 #else
584 pr_err("XFAIL: this test is arm64-only\n");
585 #endif
586 }
587