1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * arch/sh/kernel/vsyscall/vsyscall.c
4  *
5  *  Copyright (C) 2006 Paul Mundt
6  *
7  * vDSO randomization
8  * Copyright(C) 2005-2006, Red Hat, Inc., Ingo Molnar
9  */
10 #include <linux/mm.h>
11 #include <linux/kernel.h>
12 #include <linux/init.h>
13 #include <linux/gfp.h>
14 #include <linux/module.h>
15 #include <linux/elf.h>
16 #include <linux/sched.h>
17 #include <linux/err.h>
18 
19 /*
20  * Should the kernel map a VDSO page into processes and pass its
21  * address down to glibc upon exec()?
22  */
23 unsigned int __read_mostly vdso_enabled = 1;
24 EXPORT_SYMBOL_GPL(vdso_enabled);
25 
vdso_setup(char * s)26 static int __init vdso_setup(char *s)
27 {
28 	vdso_enabled = simple_strtoul(s, NULL, 0);
29 	return 1;
30 }
31 __setup("vdso=", vdso_setup);
32 
33 /*
34  * These symbols are defined by vsyscall.o to mark the bounds
35  * of the ELF DSO images included therein.
36  */
37 extern const char vsyscall_trapa_start, vsyscall_trapa_end;
38 static struct page *syscall_pages[1];
39 
vsyscall_init(void)40 int __init vsyscall_init(void)
41 {
42 	void *syscall_page = (void *)get_zeroed_page(GFP_ATOMIC);
43 	syscall_pages[0] = virt_to_page(syscall_page);
44 
45 	/*
46 	 * XXX: Map this page to a fixmap entry if we get around
47 	 * to adding the page to ELF core dumps
48 	 */
49 
50 	memcpy(syscall_page,
51 	       &vsyscall_trapa_start,
52 	       &vsyscall_trapa_end - &vsyscall_trapa_start);
53 
54 	return 0;
55 }
56 
57 /* Setup a VMA at program startup for the vsyscall page */
arch_setup_additional_pages(struct linux_binprm * bprm,int uses_interp)58 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
59 {
60 	struct mm_struct *mm = current->mm;
61 	unsigned long addr;
62 	int ret;
63 
64 	if (mmap_write_lock_killable(mm))
65 		return -EINTR;
66 
67 	addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
68 	if (IS_ERR_VALUE(addr)) {
69 		ret = addr;
70 		goto up_fail;
71 	}
72 
73 	ret = install_special_mapping(mm, addr, PAGE_SIZE,
74 				      VM_READ | VM_EXEC |
75 				      VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
76 				      syscall_pages);
77 	if (unlikely(ret))
78 		goto up_fail;
79 
80 	current->mm->context.vdso = (void *)addr;
81 
82 up_fail:
83 	mmap_write_unlock(mm);
84 	return ret;
85 }
86 
arch_vma_name(struct vm_area_struct * vma)87 const char *arch_vma_name(struct vm_area_struct *vma)
88 {
89 	if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
90 		return "[vdso]";
91 
92 	return NULL;
93 }
94