1/* SPDX-License-Identifier: GPL-2.0-only */ 2/* 3 * kexec for arm64 4 * 5 * Copyright (C) Linaro. 6 * Copyright (C) Huawei Futurewei Technologies. 7 * Copyright (C) 2021, Microsoft Corporation. 8 * Pasha Tatashin <pasha.tatashin@soleen.com> 9 */ 10 11#include <linux/kexec.h> 12#include <linux/linkage.h> 13 14#include <asm/assembler.h> 15#include <asm/kexec.h> 16#include <asm/page.h> 17#include <asm/sysreg.h> 18#include <asm/virt.h> 19 20.macro turn_off_mmu tmp1, tmp2 21 mov_q \tmp1, INIT_SCTLR_EL1_MMU_OFF 22 pre_disable_mmu_workaround 23 msr sctlr_el1, \tmp1 24 isb 25.endm 26 27.section ".kexec_relocate.text", "ax" 28/* 29 * arm64_relocate_new_kernel - Put a 2nd stage image in place and boot it. 30 * 31 * The memory that the old kernel occupies may be overwritten when copying the 32 * new image to its final location. To assure that the 33 * arm64_relocate_new_kernel routine which does that copy is not overwritten, 34 * all code and data needed by arm64_relocate_new_kernel must be between the 35 * symbols arm64_relocate_new_kernel and arm64_relocate_new_kernel_end. The 36 * machine_kexec() routine will copy arm64_relocate_new_kernel to the kexec 37 * safe memory that has been set up to be preserved during the copy operation. 38 */ 39SYM_CODE_START(arm64_relocate_new_kernel) 40 /* Setup the list loop variables. */ 41 ldr x18, [x0, #KIMAGE_ARCH_ZERO_PAGE] /* x18 = zero page for BBM */ 42 ldr x17, [x0, #KIMAGE_ARCH_TTBR1] /* x17 = linear map copy */ 43 ldr x16, [x0, #KIMAGE_HEAD] /* x16 = kimage_head */ 44 ldr x22, [x0, #KIMAGE_ARCH_PHYS_OFFSET] /* x22 phys_offset */ 45 raw_dcache_line_size x15, x1 /* x15 = dcache line size */ 46 break_before_make_ttbr_switch x18, x17, x1, x2 /* set linear map */ 47.Lloop: 48 and x12, x16, PAGE_MASK /* x12 = addr */ 49 sub x12, x12, x22 /* Convert x12 to virt */ 50 /* Test the entry flags. */ 51.Ltest_source: 52 tbz x16, IND_SOURCE_BIT, .Ltest_indirection 53 54 /* Invalidate dest page to PoC. */ 55 mov x19, x13 56 copy_page x13, x12, x1, x2, x3, x4, x5, x6, x7, x8 57 add x1, x19, #PAGE_SIZE 58 dcache_by_myline_op civac, sy, x19, x1, x15, x20 59 b .Lnext 60.Ltest_indirection: 61 tbz x16, IND_INDIRECTION_BIT, .Ltest_destination 62 mov x14, x12 /* ptr = addr */ 63 b .Lnext 64.Ltest_destination: 65 tbz x16, IND_DESTINATION_BIT, .Lnext 66 mov x13, x12 /* dest = addr */ 67.Lnext: 68 ldr x16, [x14], #8 /* entry = *ptr++ */ 69 tbz x16, IND_DONE_BIT, .Lloop /* while (!(entry & DONE)) */ 70 /* wait for writes from copy_page to finish */ 71 dsb nsh 72 ic iallu 73 dsb nsh 74 isb 75 ldr x4, [x0, #KIMAGE_START] /* relocation start */ 76 ldr x1, [x0, #KIMAGE_ARCH_EL2_VECTORS] /* relocation start */ 77 ldr x0, [x0, #KIMAGE_ARCH_DTB_MEM] /* dtb address */ 78 turn_off_mmu x12, x13 79 80 /* Start new image. */ 81 cbz x1, .Lel1 82 mov x1, x4 /* relocation start */ 83 mov x2, x0 /* dtb address */ 84 mov x3, xzr 85 mov x4, xzr 86 mov x0, #HVC_SOFT_RESTART 87 hvc #0 /* Jumps from el2 */ 88.Lel1: 89 mov x2, xzr 90 mov x3, xzr 91 br x4 /* Jumps from el1 */ 92SYM_CODE_END(arm64_relocate_new_kernel) 93