1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * linux/arch/arm/mm/init.c
4 *
5 * Copyright (C) 1995-2005 Russell King
6 */
7 #include <linux/kernel.h>
8 #include <linux/errno.h>
9 #include <linux/swap.h>
10 #include <linux/init.h>
11 #include <linux/mman.h>
12 #include <linux/sched/signal.h>
13 #include <linux/sched/task.h>
14 #include <linux/export.h>
15 #include <linux/nodemask.h>
16 #include <linux/initrd.h>
17 #include <linux/of_fdt.h>
18 #include <linux/highmem.h>
19 #include <linux/gfp.h>
20 #include <linux/memblock.h>
21 #include <linux/dma-map-ops.h>
22 #include <linux/sizes.h>
23 #include <linux/stop_machine.h>
24 #include <linux/swiotlb.h>
25
26 #include <asm/cp15.h>
27 #include <asm/mach-types.h>
28 #include <asm/memblock.h>
29 #include <asm/memory.h>
30 #include <asm/prom.h>
31 #include <asm/sections.h>
32 #include <asm/setup.h>
33 #include <asm/set_memory.h>
34 #include <asm/system_info.h>
35 #include <asm/tlb.h>
36 #include <asm/fixmap.h>
37 #include <asm/ptdump.h>
38
39 #include <asm/mach/arch.h>
40 #include <asm/mach/map.h>
41
42 #include "mm.h"
43
44 #ifdef CONFIG_CPU_CP15_MMU
__clear_cr(unsigned long mask)45 unsigned long __init __clear_cr(unsigned long mask)
46 {
47 cr_alignment = cr_alignment & ~mask;
48 return cr_alignment;
49 }
50 #endif
51
52 #ifdef CONFIG_BLK_DEV_INITRD
parse_tag_initrd(const struct tag * tag)53 static int __init parse_tag_initrd(const struct tag *tag)
54 {
55 pr_warn("ATAG_INITRD is deprecated; "
56 "please update your bootloader.\n");
57 phys_initrd_start = __virt_to_phys(tag->u.initrd.start);
58 phys_initrd_size = tag->u.initrd.size;
59 return 0;
60 }
61
62 __tagtable(ATAG_INITRD, parse_tag_initrd);
63
parse_tag_initrd2(const struct tag * tag)64 static int __init parse_tag_initrd2(const struct tag *tag)
65 {
66 phys_initrd_start = tag->u.initrd.start;
67 phys_initrd_size = tag->u.initrd.size;
68 return 0;
69 }
70
71 __tagtable(ATAG_INITRD2, parse_tag_initrd2);
72 #endif
73
find_limits(unsigned long * min,unsigned long * max_low,unsigned long * max_high)74 static void __init find_limits(unsigned long *min, unsigned long *max_low,
75 unsigned long *max_high)
76 {
77 *max_low = PFN_DOWN(memblock_get_current_limit());
78 *min = PFN_UP(memblock_start_of_DRAM());
79 *max_high = PFN_DOWN(memblock_end_of_DRAM());
80 }
81
82 #ifdef CONFIG_ZONE_DMA
83
84 phys_addr_t arm_dma_zone_size __read_mostly;
85 EXPORT_SYMBOL(arm_dma_zone_size);
86
87 /*
88 * The DMA mask corresponding to the maximum bus address allocatable
89 * using GFP_DMA. The default here places no restriction on DMA
90 * allocations. This must be the smallest DMA mask in the system,
91 * so a successful GFP_DMA allocation will always satisfy this.
92 */
93 phys_addr_t arm_dma_limit;
94 unsigned long arm_dma_pfn_limit;
95 #endif
96
setup_dma_zone(const struct machine_desc * mdesc)97 void __init setup_dma_zone(const struct machine_desc *mdesc)
98 {
99 #ifdef CONFIG_ZONE_DMA
100 if (mdesc->dma_zone_size) {
101 arm_dma_zone_size = mdesc->dma_zone_size;
102 arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1;
103 } else
104 arm_dma_limit = 0xffffffff;
105 arm_dma_pfn_limit = arm_dma_limit >> PAGE_SHIFT;
106 #endif
107 }
108
zone_sizes_init(unsigned long min,unsigned long max_low,unsigned long max_high)109 static void __init zone_sizes_init(unsigned long min, unsigned long max_low,
110 unsigned long max_high)
111 {
112 unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0 };
113
114 #ifdef CONFIG_ZONE_DMA
115 max_zone_pfn[ZONE_DMA] = min(arm_dma_pfn_limit, max_low);
116 #endif
117 max_zone_pfn[ZONE_NORMAL] = max_low;
118 #ifdef CONFIG_HIGHMEM
119 max_zone_pfn[ZONE_HIGHMEM] = max_high;
120 #endif
121 free_area_init(max_zone_pfn);
122 }
123
124 #ifdef CONFIG_HAVE_ARCH_PFN_VALID
pfn_valid(unsigned long pfn)125 int pfn_valid(unsigned long pfn)
126 {
127 phys_addr_t addr = __pfn_to_phys(pfn);
128 unsigned long pageblock_size = PAGE_SIZE * pageblock_nr_pages;
129
130 if (__phys_to_pfn(addr) != pfn)
131 return 0;
132
133 /*
134 * If address less than pageblock_size bytes away from a present
135 * memory chunk there still will be a memory map entry for it
136 * because we round freed memory map to the pageblock boundaries.
137 */
138 if (memblock_overlaps_region(&memblock.memory,
139 ALIGN_DOWN(addr, pageblock_size),
140 pageblock_size))
141 return 1;
142
143 return 0;
144 }
145 EXPORT_SYMBOL(pfn_valid);
146 #endif
147
148 static bool arm_memblock_steal_permitted = true;
149
arm_memblock_steal(phys_addr_t size,phys_addr_t align)150 phys_addr_t __init arm_memblock_steal(phys_addr_t size, phys_addr_t align)
151 {
152 phys_addr_t phys;
153
154 BUG_ON(!arm_memblock_steal_permitted);
155
156 phys = memblock_phys_alloc(size, align);
157 if (!phys)
158 panic("Failed to steal %pa bytes at %pS\n",
159 &size, (void *)_RET_IP_);
160
161 memblock_phys_free(phys, size);
162 memblock_remove(phys, size);
163
164 return phys;
165 }
166
arm_initrd_init(void)167 static void __init arm_initrd_init(void)
168 {
169 #ifdef CONFIG_BLK_DEV_INITRD
170 phys_addr_t start;
171 unsigned long size;
172
173 initrd_start = initrd_end = 0;
174
175 if (!phys_initrd_size)
176 return;
177
178 /*
179 * Round the memory region to page boundaries as per free_initrd_mem()
180 * This allows us to detect whether the pages overlapping the initrd
181 * are in use, but more importantly, reserves the entire set of pages
182 * as we don't want these pages allocated for other purposes.
183 */
184 start = round_down(phys_initrd_start, PAGE_SIZE);
185 size = phys_initrd_size + (phys_initrd_start - start);
186 size = round_up(size, PAGE_SIZE);
187
188 if (!memblock_is_region_memory(start, size)) {
189 pr_err("INITRD: 0x%08llx+0x%08lx is not a memory region - disabling initrd\n",
190 (u64)start, size);
191 return;
192 }
193
194 if (memblock_is_region_reserved(start, size)) {
195 pr_err("INITRD: 0x%08llx+0x%08lx overlaps in-use memory region - disabling initrd\n",
196 (u64)start, size);
197 return;
198 }
199
200 memblock_reserve(start, size);
201
202 /* Now convert initrd to virtual addresses */
203 initrd_start = __phys_to_virt(phys_initrd_start);
204 initrd_end = initrd_start + phys_initrd_size;
205 #endif
206 }
207
208 #ifdef CONFIG_CPU_ICACHE_MISMATCH_WORKAROUND
check_cpu_icache_size(int cpuid)209 void check_cpu_icache_size(int cpuid)
210 {
211 u32 size, ctr;
212
213 asm("mrc p15, 0, %0, c0, c0, 1" : "=r" (ctr));
214
215 size = 1 << ((ctr & 0xf) + 2);
216 if (cpuid != 0 && icache_size != size)
217 pr_info("CPU%u: detected I-Cache line size mismatch, workaround enabled\n",
218 cpuid);
219 if (icache_size > size)
220 icache_size = size;
221 }
222 #endif
223
arm_memblock_init(const struct machine_desc * mdesc)224 void __init arm_memblock_init(const struct machine_desc *mdesc)
225 {
226 /* Register the kernel text, kernel data and initrd with memblock. */
227 memblock_reserve(__pa(KERNEL_START), KERNEL_END - KERNEL_START);
228
229 arm_initrd_init();
230
231 arm_mm_memblock_reserve();
232
233 /* reserve any platform specific memblock areas */
234 if (mdesc->reserve)
235 mdesc->reserve();
236
237 early_init_fdt_scan_reserved_mem();
238
239 /* reserve memory for DMA contiguous allocations */
240 dma_contiguous_reserve(arm_dma_limit);
241
242 arm_memblock_steal_permitted = false;
243 memblock_dump_all();
244 }
245
bootmem_init(void)246 void __init bootmem_init(void)
247 {
248 memblock_allow_resize();
249
250 find_limits(&min_low_pfn, &max_low_pfn, &max_pfn);
251
252 early_memtest((phys_addr_t)min_low_pfn << PAGE_SHIFT,
253 (phys_addr_t)max_low_pfn << PAGE_SHIFT);
254
255 /*
256 * sparse_init() tries to allocate memory from memblock, so must be
257 * done after the fixed reservations
258 */
259 sparse_init();
260
261 /*
262 * Now free the memory - free_area_init needs
263 * the sparse mem_map arrays initialized by sparse_init()
264 * for memmap_init_zone(), otherwise all PFNs are invalid.
265 */
266 zone_sizes_init(min_low_pfn, max_low_pfn, max_pfn);
267 }
268
269 /*
270 * Poison init memory with an undefined instruction (ARM) or a branch to an
271 * undefined instruction (Thumb).
272 */
poison_init_mem(void * s,size_t count)273 static inline void poison_init_mem(void *s, size_t count)
274 {
275 u32 *p = (u32 *)s;
276 for (; count != 0; count -= 4)
277 *p++ = 0xe7fddef0;
278 }
279
free_highpages(void)280 static void __init free_highpages(void)
281 {
282 #ifdef CONFIG_HIGHMEM
283 unsigned long max_low = max_low_pfn;
284 phys_addr_t range_start, range_end;
285 u64 i;
286
287 /* set highmem page free */
288 for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE,
289 &range_start, &range_end, NULL) {
290 unsigned long start = PFN_UP(range_start);
291 unsigned long end = PFN_DOWN(range_end);
292
293 /* Ignore complete lowmem entries */
294 if (end <= max_low)
295 continue;
296
297 /* Truncate partial highmem entries */
298 if (start < max_low)
299 start = max_low;
300
301 for (; start < end; start++)
302 free_highmem_page(pfn_to_page(start));
303 }
304 #endif
305 }
306
307 /*
308 * mem_init() marks the free areas in the mem_map and tells us how much
309 * memory is free. This is done after various parts of the system have
310 * claimed their memory after the kernel image.
311 */
mem_init(void)312 void __init mem_init(void)
313 {
314 #ifdef CONFIG_ARM_LPAE
315 if (swiotlb_force == SWIOTLB_FORCE ||
316 max_pfn > arm_dma_pfn_limit)
317 swiotlb_init(1);
318 else
319 swiotlb_force = SWIOTLB_NO_FORCE;
320 #endif
321
322 set_max_mapnr(pfn_to_page(max_pfn) - mem_map);
323
324 /* this will put all unused low memory onto the freelists */
325 memblock_free_all();
326
327 #ifdef CONFIG_SA1111
328 /* now that our DMA memory is actually so designated, we can free it */
329 free_reserved_area(__va(PHYS_OFFSET), swapper_pg_dir, -1, NULL);
330 #endif
331
332 free_highpages();
333
334 /*
335 * Check boundaries twice: Some fundamental inconsistencies can
336 * be detected at build time already.
337 */
338 #ifdef CONFIG_MMU
339 BUILD_BUG_ON(TASK_SIZE > MODULES_VADDR);
340 BUG_ON(TASK_SIZE > MODULES_VADDR);
341 #endif
342
343 #ifdef CONFIG_HIGHMEM
344 BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET);
345 BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET);
346 #endif
347 }
348
349 #ifdef CONFIG_STRICT_KERNEL_RWX
350 struct section_perm {
351 const char *name;
352 unsigned long start;
353 unsigned long end;
354 pmdval_t mask;
355 pmdval_t prot;
356 pmdval_t clear;
357 };
358
359 /* First section-aligned location at or after __start_rodata. */
360 extern char __start_rodata_section_aligned[];
361
362 static struct section_perm nx_perms[] = {
363 /* Make pages tables, etc before _stext RW (set NX). */
364 {
365 .name = "pre-text NX",
366 .start = PAGE_OFFSET,
367 .end = (unsigned long)_stext,
368 .mask = ~PMD_SECT_XN,
369 .prot = PMD_SECT_XN,
370 },
371 /* Make init RW (set NX). */
372 {
373 .name = "init NX",
374 .start = (unsigned long)__init_begin,
375 .end = (unsigned long)_sdata,
376 .mask = ~PMD_SECT_XN,
377 .prot = PMD_SECT_XN,
378 },
379 /* Make rodata NX (set RO in ro_perms below). */
380 {
381 .name = "rodata NX",
382 .start = (unsigned long)__start_rodata_section_aligned,
383 .end = (unsigned long)__init_begin,
384 .mask = ~PMD_SECT_XN,
385 .prot = PMD_SECT_XN,
386 },
387 };
388
389 static struct section_perm ro_perms[] = {
390 /* Make kernel code and rodata RX (set RO). */
391 {
392 .name = "text/rodata RO",
393 .start = (unsigned long)_stext,
394 .end = (unsigned long)__init_begin,
395 #ifdef CONFIG_ARM_LPAE
396 .mask = ~(L_PMD_SECT_RDONLY | PMD_SECT_AP2),
397 .prot = L_PMD_SECT_RDONLY | PMD_SECT_AP2,
398 #else
399 .mask = ~(PMD_SECT_APX | PMD_SECT_AP_WRITE),
400 .prot = PMD_SECT_APX | PMD_SECT_AP_WRITE,
401 .clear = PMD_SECT_AP_WRITE,
402 #endif
403 },
404 };
405
406 /*
407 * Updates section permissions only for the current mm (sections are
408 * copied into each mm). During startup, this is the init_mm. Is only
409 * safe to be called with preemption disabled, as under stop_machine().
410 */
section_update(unsigned long addr,pmdval_t mask,pmdval_t prot,struct mm_struct * mm)411 static inline void section_update(unsigned long addr, pmdval_t mask,
412 pmdval_t prot, struct mm_struct *mm)
413 {
414 pmd_t *pmd;
415
416 pmd = pmd_offset(pud_offset(p4d_offset(pgd_offset(mm, addr), addr), addr), addr);
417
418 #ifdef CONFIG_ARM_LPAE
419 pmd[0] = __pmd((pmd_val(pmd[0]) & mask) | prot);
420 #else
421 if (addr & SECTION_SIZE)
422 pmd[1] = __pmd((pmd_val(pmd[1]) & mask) | prot);
423 else
424 pmd[0] = __pmd((pmd_val(pmd[0]) & mask) | prot);
425 #endif
426 flush_pmd_entry(pmd);
427 local_flush_tlb_kernel_range(addr, addr + SECTION_SIZE);
428 }
429
430 /* Make sure extended page tables are in use. */
arch_has_strict_perms(void)431 static inline bool arch_has_strict_perms(void)
432 {
433 if (cpu_architecture() < CPU_ARCH_ARMv6)
434 return false;
435
436 return !!(get_cr() & CR_XP);
437 }
438
set_section_perms(struct section_perm * perms,int n,bool set,struct mm_struct * mm)439 static void set_section_perms(struct section_perm *perms, int n, bool set,
440 struct mm_struct *mm)
441 {
442 size_t i;
443 unsigned long addr;
444
445 if (!arch_has_strict_perms())
446 return;
447
448 for (i = 0; i < n; i++) {
449 if (!IS_ALIGNED(perms[i].start, SECTION_SIZE) ||
450 !IS_ALIGNED(perms[i].end, SECTION_SIZE)) {
451 pr_err("BUG: %s section %lx-%lx not aligned to %lx\n",
452 perms[i].name, perms[i].start, perms[i].end,
453 SECTION_SIZE);
454 continue;
455 }
456
457 for (addr = perms[i].start;
458 addr < perms[i].end;
459 addr += SECTION_SIZE)
460 section_update(addr, perms[i].mask,
461 set ? perms[i].prot : perms[i].clear, mm);
462 }
463
464 }
465
466 /**
467 * update_sections_early intended to be called only through stop_machine
468 * framework and executed by only one CPU while all other CPUs will spin and
469 * wait, so no locking is required in this function.
470 */
update_sections_early(struct section_perm perms[],int n)471 static void update_sections_early(struct section_perm perms[], int n)
472 {
473 struct task_struct *t, *s;
474
475 for_each_process(t) {
476 if (t->flags & PF_KTHREAD)
477 continue;
478 for_each_thread(t, s)
479 if (s->mm)
480 set_section_perms(perms, n, true, s->mm);
481 }
482 set_section_perms(perms, n, true, current->active_mm);
483 set_section_perms(perms, n, true, &init_mm);
484 }
485
__fix_kernmem_perms(void * unused)486 static int __fix_kernmem_perms(void *unused)
487 {
488 update_sections_early(nx_perms, ARRAY_SIZE(nx_perms));
489 return 0;
490 }
491
fix_kernmem_perms(void)492 static void fix_kernmem_perms(void)
493 {
494 stop_machine(__fix_kernmem_perms, NULL, NULL);
495 }
496
__mark_rodata_ro(void * unused)497 static int __mark_rodata_ro(void *unused)
498 {
499 update_sections_early(ro_perms, ARRAY_SIZE(ro_perms));
500 return 0;
501 }
502
mark_rodata_ro(void)503 void mark_rodata_ro(void)
504 {
505 stop_machine(__mark_rodata_ro, NULL, NULL);
506 debug_checkwx();
507 }
508
509 #else
fix_kernmem_perms(void)510 static inline void fix_kernmem_perms(void) { }
511 #endif /* CONFIG_STRICT_KERNEL_RWX */
512
free_initmem(void)513 void free_initmem(void)
514 {
515 fix_kernmem_perms();
516
517 poison_init_mem(__init_begin, __init_end - __init_begin);
518 if (!machine_is_integrator() && !machine_is_cintegrator())
519 free_initmem_default(-1);
520 }
521
522 #ifdef CONFIG_BLK_DEV_INITRD
free_initrd_mem(unsigned long start,unsigned long end)523 void free_initrd_mem(unsigned long start, unsigned long end)
524 {
525 if (start == initrd_start)
526 start = round_down(start, PAGE_SIZE);
527 if (end == initrd_end)
528 end = round_up(end, PAGE_SIZE);
529
530 poison_init_mem((void *)start, PAGE_ALIGN(end) - start);
531 free_reserved_area((void *)start, (void *)end, -1, "initrd");
532 }
533 #endif
534