1 // SPDX-License-Identifier: GPL-2.0
2
3 #include <linux/spinlock.h>
4 #include <linux/percpu.h>
5 #include <linux/kallsyms.h>
6 #include <linux/kcore.h>
7 #include <linux/pgtable.h>
8
9 #include <asm/cpu_entry_area.h>
10 #include <asm/fixmap.h>
11 #include <asm/desc.h>
12
13 static DEFINE_PER_CPU_PAGE_ALIGNED(struct entry_stack_page, entry_stack_storage);
14
15 #ifdef CONFIG_X86_64
16 static DEFINE_PER_CPU_PAGE_ALIGNED(struct exception_stacks, exception_stacks);
17 DEFINE_PER_CPU(struct cea_exception_stacks*, cea_exception_stacks);
18 #endif
19
20 #ifdef CONFIG_X86_32
21 DECLARE_PER_CPU_PAGE_ALIGNED(struct doublefault_stack, doublefault_stack);
22 #endif
23
24 /* Is called from entry code, so must be noinstr */
get_cpu_entry_area(int cpu)25 noinstr struct cpu_entry_area *get_cpu_entry_area(int cpu)
26 {
27 unsigned long va = CPU_ENTRY_AREA_PER_CPU + cpu * CPU_ENTRY_AREA_SIZE;
28 BUILD_BUG_ON(sizeof(struct cpu_entry_area) % PAGE_SIZE != 0);
29
30 return (struct cpu_entry_area *) va;
31 }
32 EXPORT_SYMBOL(get_cpu_entry_area);
33
cea_set_pte(void * cea_vaddr,phys_addr_t pa,pgprot_t flags)34 void cea_set_pte(void *cea_vaddr, phys_addr_t pa, pgprot_t flags)
35 {
36 unsigned long va = (unsigned long) cea_vaddr;
37 pte_t pte = pfn_pte(pa >> PAGE_SHIFT, flags);
38
39 /*
40 * The cpu_entry_area is shared between the user and kernel
41 * page tables. All of its ptes can safely be global.
42 * _PAGE_GLOBAL gets reused to help indicate PROT_NONE for
43 * non-present PTEs, so be careful not to set it in that
44 * case to avoid confusion.
45 */
46 if (boot_cpu_has(X86_FEATURE_PGE) &&
47 (pgprot_val(flags) & _PAGE_PRESENT))
48 pte = pte_set_flags(pte, _PAGE_GLOBAL);
49
50 set_pte_vaddr(va, pte);
51 }
52
53 static void __init
cea_map_percpu_pages(void * cea_vaddr,void * ptr,int pages,pgprot_t prot)54 cea_map_percpu_pages(void *cea_vaddr, void *ptr, int pages, pgprot_t prot)
55 {
56 for ( ; pages; pages--, cea_vaddr+= PAGE_SIZE, ptr += PAGE_SIZE)
57 cea_set_pte(cea_vaddr, per_cpu_ptr_to_phys(ptr), prot);
58 }
59
percpu_setup_debug_store(unsigned int cpu)60 static void __init percpu_setup_debug_store(unsigned int cpu)
61 {
62 #ifdef CONFIG_CPU_SUP_INTEL
63 unsigned int npages;
64 void *cea;
65
66 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
67 return;
68
69 cea = &get_cpu_entry_area(cpu)->cpu_debug_store;
70 npages = sizeof(struct debug_store) / PAGE_SIZE;
71 BUILD_BUG_ON(sizeof(struct debug_store) % PAGE_SIZE != 0);
72 cea_map_percpu_pages(cea, &per_cpu(cpu_debug_store, cpu), npages,
73 PAGE_KERNEL);
74
75 cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers;
76 /*
77 * Force the population of PMDs for not yet allocated per cpu
78 * memory like debug store buffers.
79 */
80 npages = sizeof(struct debug_store_buffers) / PAGE_SIZE;
81 for (; npages; npages--, cea += PAGE_SIZE)
82 cea_set_pte(cea, 0, PAGE_NONE);
83 #endif
84 }
85
86 #ifdef CONFIG_X86_64
87
88 #define cea_map_stack(name) do { \
89 npages = sizeof(estacks->name## _stack) / PAGE_SIZE; \
90 cea_map_percpu_pages(cea->estacks.name## _stack, \
91 estacks->name## _stack, npages, PAGE_KERNEL); \
92 } while (0)
93
percpu_setup_exception_stacks(unsigned int cpu)94 static void __init percpu_setup_exception_stacks(unsigned int cpu)
95 {
96 struct exception_stacks *estacks = per_cpu_ptr(&exception_stacks, cpu);
97 struct cpu_entry_area *cea = get_cpu_entry_area(cpu);
98 unsigned int npages;
99
100 BUILD_BUG_ON(sizeof(exception_stacks) % PAGE_SIZE != 0);
101
102 per_cpu(cea_exception_stacks, cpu) = &cea->estacks;
103
104 /*
105 * The exceptions stack mappings in the per cpu area are protected
106 * by guard pages so each stack must be mapped separately. DB2 is
107 * not mapped; it just exists to catch triple nesting of #DB.
108 */
109 cea_map_stack(DF);
110 cea_map_stack(NMI);
111 cea_map_stack(DB);
112 cea_map_stack(MCE);
113
114 if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT)) {
115 if (cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT)) {
116 cea_map_stack(VC);
117 cea_map_stack(VC2);
118 }
119 }
120 }
121 #else
percpu_setup_exception_stacks(unsigned int cpu)122 static inline void percpu_setup_exception_stacks(unsigned int cpu)
123 {
124 struct cpu_entry_area *cea = get_cpu_entry_area(cpu);
125
126 cea_map_percpu_pages(&cea->doublefault_stack,
127 &per_cpu(doublefault_stack, cpu), 1, PAGE_KERNEL);
128 }
129 #endif
130
131 /* Setup the fixmap mappings only once per-processor */
setup_cpu_entry_area(unsigned int cpu)132 static void __init setup_cpu_entry_area(unsigned int cpu)
133 {
134 struct cpu_entry_area *cea = get_cpu_entry_area(cpu);
135 #ifdef CONFIG_X86_64
136 /* On 64-bit systems, we use a read-only fixmap GDT and TSS. */
137 pgprot_t gdt_prot = PAGE_KERNEL_RO;
138 pgprot_t tss_prot = PAGE_KERNEL_RO;
139 #else
140 /*
141 * On native 32-bit systems, the GDT cannot be read-only because
142 * our double fault handler uses a task gate, and entering through
143 * a task gate needs to change an available TSS to busy. If the
144 * GDT is read-only, that will triple fault. The TSS cannot be
145 * read-only because the CPU writes to it on task switches.
146 *
147 * On Xen PV, the GDT must be read-only because the hypervisor
148 * requires it.
149 */
150 pgprot_t gdt_prot = boot_cpu_has(X86_FEATURE_XENPV) ?
151 PAGE_KERNEL_RO : PAGE_KERNEL;
152 pgprot_t tss_prot = PAGE_KERNEL;
153 #endif
154
155 cea_set_pte(&cea->gdt, get_cpu_gdt_paddr(cpu), gdt_prot);
156
157 cea_map_percpu_pages(&cea->entry_stack_page,
158 per_cpu_ptr(&entry_stack_storage, cpu), 1,
159 PAGE_KERNEL);
160
161 /*
162 * The Intel SDM says (Volume 3, 7.2.1):
163 *
164 * Avoid placing a page boundary in the part of the TSS that the
165 * processor reads during a task switch (the first 104 bytes). The
166 * processor may not correctly perform address translations if a
167 * boundary occurs in this area. During a task switch, the processor
168 * reads and writes into the first 104 bytes of each TSS (using
169 * contiguous physical addresses beginning with the physical address
170 * of the first byte of the TSS). So, after TSS access begins, if
171 * part of the 104 bytes is not physically contiguous, the processor
172 * will access incorrect information without generating a page-fault
173 * exception.
174 *
175 * There are also a lot of errata involving the TSS spanning a page
176 * boundary. Assert that we're not doing that.
177 */
178 BUILD_BUG_ON((offsetof(struct tss_struct, x86_tss) ^
179 offsetofend(struct tss_struct, x86_tss)) & PAGE_MASK);
180 BUILD_BUG_ON(sizeof(struct tss_struct) % PAGE_SIZE != 0);
181 /*
182 * VMX changes the host TR limit to 0x67 after a VM exit. This is
183 * okay, since 0x67 covers the size of struct x86_hw_tss. Make sure
184 * that this is correct.
185 */
186 BUILD_BUG_ON(offsetof(struct tss_struct, x86_tss) != 0);
187 BUILD_BUG_ON(sizeof(struct x86_hw_tss) != 0x68);
188
189 cea_map_percpu_pages(&cea->tss, &per_cpu(cpu_tss_rw, cpu),
190 sizeof(struct tss_struct) / PAGE_SIZE, tss_prot);
191
192 #ifdef CONFIG_X86_32
193 per_cpu(cpu_entry_area, cpu) = cea;
194 #endif
195
196 percpu_setup_exception_stacks(cpu);
197
198 percpu_setup_debug_store(cpu);
199 }
200
setup_cpu_entry_area_ptes(void)201 static __init void setup_cpu_entry_area_ptes(void)
202 {
203 #ifdef CONFIG_X86_32
204 unsigned long start, end;
205
206 /* The +1 is for the readonly IDT: */
207 BUILD_BUG_ON((CPU_ENTRY_AREA_PAGES+1)*PAGE_SIZE != CPU_ENTRY_AREA_MAP_SIZE);
208 BUILD_BUG_ON(CPU_ENTRY_AREA_TOTAL_SIZE != CPU_ENTRY_AREA_MAP_SIZE);
209 BUG_ON(CPU_ENTRY_AREA_BASE & ~PMD_MASK);
210
211 start = CPU_ENTRY_AREA_BASE;
212 end = start + CPU_ENTRY_AREA_MAP_SIZE;
213
214 /* Careful here: start + PMD_SIZE might wrap around */
215 for (; start < end && start >= CPU_ENTRY_AREA_BASE; start += PMD_SIZE)
216 populate_extra_pte(start);
217 #endif
218 }
219
setup_cpu_entry_areas(void)220 void __init setup_cpu_entry_areas(void)
221 {
222 unsigned int cpu;
223
224 setup_cpu_entry_area_ptes();
225
226 for_each_possible_cpu(cpu)
227 setup_cpu_entry_area(cpu);
228
229 /*
230 * This is the last essential update to swapper_pgdir which needs
231 * to be synchronized to initial_page_table on 32bit.
232 */
233 sync_initial_page_table();
234 }
235