1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * This file contains the routines setting up the linux page tables.
4 * -- paulus
5 *
6 * Derived from arch/ppc/mm/init.c:
7 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
8 *
9 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
10 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
11 * Copyright (C) 1996 Paul Mackerras
12 *
13 * Derived from "arch/i386/mm/init.c"
14 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
15 */
16
17 #include <linux/kernel.h>
18 #include <linux/module.h>
19 #include <linux/types.h>
20 #include <linux/mm.h>
21 #include <linux/vmalloc.h>
22 #include <linux/init.h>
23 #include <linux/highmem.h>
24 #include <linux/memblock.h>
25 #include <linux/slab.h>
26 #include <linux/set_memory.h>
27
28 #include <asm/pgalloc.h>
29 #include <asm/fixmap.h>
30 #include <asm/setup.h>
31 #include <asm/sections.h>
32 #include <asm/early_ioremap.h>
33
34 #include <mm/mmu_decl.h>
35
36 static u8 early_fixmap_pagetable[FIXMAP_PTE_SIZE] __page_aligned_data;
37
early_ioremap_init(void)38 notrace void __init early_ioremap_init(void)
39 {
40 unsigned long addr = ALIGN_DOWN(FIXADDR_START, PGDIR_SIZE);
41 pte_t *ptep = (pte_t *)early_fixmap_pagetable;
42 pmd_t *pmdp = pmd_off_k(addr);
43
44 for (; (s32)(FIXADDR_TOP - addr) > 0;
45 addr += PGDIR_SIZE, ptep += PTRS_PER_PTE, pmdp++)
46 pmd_populate_kernel(&init_mm, pmdp, ptep);
47
48 early_ioremap_setup();
49 }
50
early_alloc_pgtable(unsigned long size)51 static void __init *early_alloc_pgtable(unsigned long size)
52 {
53 void *ptr = memblock_alloc(size, size);
54
55 if (!ptr)
56 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
57 __func__, size, size);
58
59 return ptr;
60 }
61
early_pte_alloc_kernel(pmd_t * pmdp,unsigned long va)62 pte_t __init *early_pte_alloc_kernel(pmd_t *pmdp, unsigned long va)
63 {
64 if (pmd_none(*pmdp)) {
65 pte_t *ptep = early_alloc_pgtable(PTE_FRAG_SIZE);
66
67 pmd_populate_kernel(&init_mm, pmdp, ptep);
68 }
69 return pte_offset_kernel(pmdp, va);
70 }
71
72
map_kernel_page(unsigned long va,phys_addr_t pa,pgprot_t prot)73 int __ref map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot)
74 {
75 pmd_t *pd;
76 pte_t *pg;
77 int err = -ENOMEM;
78
79 /* Use upper 10 bits of VA to index the first level map */
80 pd = pmd_off_k(va);
81 /* Use middle 10 bits of VA to index the second-level map */
82 if (likely(slab_is_available()))
83 pg = pte_alloc_kernel(pd, va);
84 else
85 pg = early_pte_alloc_kernel(pd, va);
86 if (pg) {
87 err = 0;
88 /* The PTE should never be already set nor present in the
89 * hash table
90 */
91 BUG_ON((pte_present(*pg) | pte_hashpte(*pg)) && pgprot_val(prot));
92 set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT, prot));
93 }
94 smp_wmb();
95 return err;
96 }
97
98 /*
99 * Map in a chunk of physical memory starting at start.
100 */
__mapin_ram_chunk(unsigned long offset,unsigned long top)101 static void __init __mapin_ram_chunk(unsigned long offset, unsigned long top)
102 {
103 unsigned long v, s;
104 phys_addr_t p;
105 bool ktext;
106
107 s = offset;
108 v = PAGE_OFFSET + s;
109 p = memstart_addr + s;
110 for (; s < top; s += PAGE_SIZE) {
111 ktext = core_kernel_text(v);
112 map_kernel_page(v, p, ktext ? PAGE_KERNEL_TEXT : PAGE_KERNEL);
113 v += PAGE_SIZE;
114 p += PAGE_SIZE;
115 }
116 }
117
mapin_ram(void)118 void __init mapin_ram(void)
119 {
120 phys_addr_t base, end;
121 u64 i;
122
123 for_each_mem_range(i, &base, &end) {
124 phys_addr_t top = min(end, total_lowmem);
125
126 if (base >= top)
127 continue;
128 base = mmu_mapin_ram(base, top);
129 __mapin_ram_chunk(base, top);
130 }
131 }
132
mark_initmem_nx(void)133 void mark_initmem_nx(void)
134 {
135 unsigned long numpages = PFN_UP((unsigned long)_einittext) -
136 PFN_DOWN((unsigned long)_sinittext);
137
138 if (v_block_mapped((unsigned long)_sinittext))
139 mmu_mark_initmem_nx();
140 else
141 set_memory_attr((unsigned long)_sinittext, numpages, PAGE_KERNEL);
142 }
143
144 #ifdef CONFIG_STRICT_KERNEL_RWX
mark_rodata_ro(void)145 void mark_rodata_ro(void)
146 {
147 unsigned long numpages;
148
149 if (v_block_mapped((unsigned long)_stext + 1)) {
150 mmu_mark_rodata_ro();
151 ptdump_check_wx();
152 return;
153 }
154
155 numpages = PFN_UP((unsigned long)_etext) -
156 PFN_DOWN((unsigned long)_stext);
157
158 set_memory_attr((unsigned long)_stext, numpages, PAGE_KERNEL_ROX);
159 /*
160 * mark .rodata as read only. Use __init_begin rather than __end_rodata
161 * to cover NOTES and EXCEPTION_TABLE.
162 */
163 numpages = PFN_UP((unsigned long)__init_begin) -
164 PFN_DOWN((unsigned long)__start_rodata);
165
166 set_memory_attr((unsigned long)__start_rodata, numpages, PAGE_KERNEL_RO);
167
168 // mark_initmem_nx() should have already run by now
169 ptdump_check_wx();
170 }
171 #endif
172
173 #if defined(CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC) && defined(CONFIG_DEBUG_PAGEALLOC)
__kernel_map_pages(struct page * page,int numpages,int enable)174 void __kernel_map_pages(struct page *page, int numpages, int enable)
175 {
176 unsigned long addr = (unsigned long)page_address(page);
177
178 if (PageHighMem(page))
179 return;
180
181 if (enable)
182 set_memory_attr(addr, numpages, PAGE_KERNEL);
183 else
184 set_memory_attr(addr, numpages, __pgprot(0));
185 }
186 #endif /* CONFIG_DEBUG_PAGEALLOC */
187