1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * s390 code for kexec_file_load system call
4 *
5 * Copyright IBM Corp. 2018
6 *
7 * Author(s): Philipp Rudo <prudo@linux.vnet.ibm.com>
8 */
9
10 #define pr_fmt(fmt) "kexec: " fmt
11
12 #include <linux/elf.h>
13 #include <linux/errno.h>
14 #include <linux/kexec.h>
15 #include <linux/module_signature.h>
16 #include <linux/verification.h>
17 #include <linux/vmalloc.h>
18 #include <asm/boot_data.h>
19 #include <asm/ipl.h>
20 #include <asm/setup.h>
21
22 const struct kexec_file_ops * const kexec_file_loaders[] = {
23 &s390_kexec_elf_ops,
24 &s390_kexec_image_ops,
25 NULL,
26 };
27
28 #ifdef CONFIG_KEXEC_SIG
s390_verify_sig(const char * kernel,unsigned long kernel_len)29 int s390_verify_sig(const char *kernel, unsigned long kernel_len)
30 {
31 const unsigned long marker_len = sizeof(MODULE_SIG_STRING) - 1;
32 struct module_signature *ms;
33 unsigned long sig_len;
34
35 /* Skip signature verification when not secure IPLed. */
36 if (!ipl_secure_flag)
37 return 0;
38
39 if (marker_len > kernel_len)
40 return -EKEYREJECTED;
41
42 if (memcmp(kernel + kernel_len - marker_len, MODULE_SIG_STRING,
43 marker_len))
44 return -EKEYREJECTED;
45 kernel_len -= marker_len;
46
47 ms = (void *)kernel + kernel_len - sizeof(*ms);
48 kernel_len -= sizeof(*ms);
49
50 sig_len = be32_to_cpu(ms->sig_len);
51 if (sig_len >= kernel_len)
52 return -EKEYREJECTED;
53 kernel_len -= sig_len;
54
55 if (ms->id_type != PKEY_ID_PKCS7)
56 return -EKEYREJECTED;
57
58 if (ms->algo != 0 ||
59 ms->hash != 0 ||
60 ms->signer_len != 0 ||
61 ms->key_id_len != 0 ||
62 ms->__pad[0] != 0 ||
63 ms->__pad[1] != 0 ||
64 ms->__pad[2] != 0) {
65 return -EBADMSG;
66 }
67
68 return verify_pkcs7_signature(kernel, kernel_len,
69 kernel + kernel_len, sig_len,
70 VERIFY_USE_PLATFORM_KEYRING,
71 VERIFYING_MODULE_SIGNATURE,
72 NULL, NULL);
73 }
74 #endif /* CONFIG_KEXEC_SIG */
75
kexec_file_update_purgatory(struct kimage * image,struct s390_load_data * data)76 static int kexec_file_update_purgatory(struct kimage *image,
77 struct s390_load_data *data)
78 {
79 u64 entry, type;
80 int ret;
81
82 if (image->type == KEXEC_TYPE_CRASH) {
83 entry = STARTUP_KDUMP_OFFSET;
84 type = KEXEC_TYPE_CRASH;
85 } else {
86 entry = STARTUP_NORMAL_OFFSET;
87 type = KEXEC_TYPE_DEFAULT;
88 }
89
90 ret = kexec_purgatory_get_set_symbol(image, "kernel_entry", &entry,
91 sizeof(entry), false);
92 if (ret)
93 return ret;
94
95 ret = kexec_purgatory_get_set_symbol(image, "kernel_type", &type,
96 sizeof(type), false);
97 if (ret)
98 return ret;
99
100 if (image->type == KEXEC_TYPE_CRASH) {
101 u64 crash_size;
102
103 ret = kexec_purgatory_get_set_symbol(image, "crash_start",
104 &crashk_res.start,
105 sizeof(crashk_res.start),
106 false);
107 if (ret)
108 return ret;
109
110 crash_size = crashk_res.end - crashk_res.start + 1;
111 ret = kexec_purgatory_get_set_symbol(image, "crash_size",
112 &crash_size,
113 sizeof(crash_size),
114 false);
115 }
116 return ret;
117 }
118
kexec_file_add_purgatory(struct kimage * image,struct s390_load_data * data)119 static int kexec_file_add_purgatory(struct kimage *image,
120 struct s390_load_data *data)
121 {
122 struct kexec_buf buf;
123 int ret;
124
125 buf.image = image;
126
127 data->memsz = ALIGN(data->memsz, PAGE_SIZE);
128 buf.mem = data->memsz;
129 if (image->type == KEXEC_TYPE_CRASH)
130 buf.mem += crashk_res.start;
131
132 ret = kexec_load_purgatory(image, &buf);
133 if (ret)
134 return ret;
135 data->memsz += buf.memsz;
136
137 return kexec_file_update_purgatory(image, data);
138 }
139
kexec_file_add_initrd(struct kimage * image,struct s390_load_data * data)140 static int kexec_file_add_initrd(struct kimage *image,
141 struct s390_load_data *data)
142 {
143 struct kexec_buf buf;
144 int ret;
145
146 buf.image = image;
147
148 buf.buffer = image->initrd_buf;
149 buf.bufsz = image->initrd_buf_len;
150
151 data->memsz = ALIGN(data->memsz, PAGE_SIZE);
152 buf.mem = data->memsz;
153 if (image->type == KEXEC_TYPE_CRASH)
154 buf.mem += crashk_res.start;
155 buf.memsz = buf.bufsz;
156
157 data->parm->initrd_start = data->memsz;
158 data->parm->initrd_size = buf.memsz;
159 data->memsz += buf.memsz;
160
161 ret = kexec_add_buffer(&buf);
162 if (ret)
163 return ret;
164
165 return ipl_report_add_component(data->report, &buf, 0, 0);
166 }
167
kexec_file_add_ipl_report(struct kimage * image,struct s390_load_data * data)168 static int kexec_file_add_ipl_report(struct kimage *image,
169 struct s390_load_data *data)
170 {
171 __u32 *lc_ipl_parmblock_ptr;
172 unsigned int len, ncerts;
173 struct kexec_buf buf;
174 unsigned long addr;
175 void *ptr, *end;
176 int ret;
177
178 buf.image = image;
179
180 data->memsz = ALIGN(data->memsz, PAGE_SIZE);
181 buf.mem = data->memsz;
182 if (image->type == KEXEC_TYPE_CRASH)
183 buf.mem += crashk_res.start;
184
185 ptr = (void *)ipl_cert_list_addr;
186 end = ptr + ipl_cert_list_size;
187 ncerts = 0;
188 while (ptr < end) {
189 ncerts++;
190 len = *(unsigned int *)ptr;
191 ptr += sizeof(len);
192 ptr += len;
193 }
194
195 addr = data->memsz + data->report->size;
196 addr += ncerts * sizeof(struct ipl_rb_certificate_entry);
197 ptr = (void *)ipl_cert_list_addr;
198 while (ptr < end) {
199 len = *(unsigned int *)ptr;
200 ptr += sizeof(len);
201 ipl_report_add_certificate(data->report, ptr, addr, len);
202 addr += len;
203 ptr += len;
204 }
205
206 ret = -ENOMEM;
207 buf.buffer = ipl_report_finish(data->report);
208 if (!buf.buffer)
209 goto out;
210 buf.bufsz = data->report->size;
211 buf.memsz = buf.bufsz;
212 image->arch.ipl_buf = buf.buffer;
213
214 data->memsz += buf.memsz;
215
216 lc_ipl_parmblock_ptr =
217 data->kernel_buf + offsetof(struct lowcore, ipl_parmblock_ptr);
218 *lc_ipl_parmblock_ptr = (__u32)buf.mem;
219
220 ret = kexec_add_buffer(&buf);
221 out:
222 return ret;
223 }
224
kexec_file_add_components(struct kimage * image,int (* add_kernel)(struct kimage * image,struct s390_load_data * data))225 void *kexec_file_add_components(struct kimage *image,
226 int (*add_kernel)(struct kimage *image,
227 struct s390_load_data *data))
228 {
229 unsigned long max_command_line_size = LEGACY_COMMAND_LINE_SIZE;
230 struct s390_load_data data = {0};
231 unsigned long minsize;
232 int ret;
233
234 data.report = ipl_report_init(&ipl_block);
235 if (IS_ERR(data.report))
236 return data.report;
237
238 ret = add_kernel(image, &data);
239 if (ret)
240 goto out;
241
242 ret = -EINVAL;
243 minsize = PARMAREA + offsetof(struct parmarea, command_line);
244 if (image->kernel_buf_len < minsize)
245 goto out;
246
247 if (data.parm->max_command_line_size)
248 max_command_line_size = data.parm->max_command_line_size;
249
250 if (minsize + max_command_line_size < minsize)
251 goto out;
252
253 if (image->kernel_buf_len < minsize + max_command_line_size)
254 goto out;
255
256 if (image->cmdline_buf_len >= max_command_line_size)
257 goto out;
258
259 memcpy(data.parm->command_line, image->cmdline_buf,
260 image->cmdline_buf_len);
261
262 if (image->type == KEXEC_TYPE_CRASH) {
263 data.parm->oldmem_base = crashk_res.start;
264 data.parm->oldmem_size = crashk_res.end - crashk_res.start + 1;
265 }
266
267 if (image->initrd_buf) {
268 ret = kexec_file_add_initrd(image, &data);
269 if (ret)
270 goto out;
271 }
272
273 ret = kexec_file_add_purgatory(image, &data);
274 if (ret)
275 goto out;
276
277 if (data.kernel_mem == 0) {
278 unsigned long restart_psw = 0x0008000080000000UL;
279 restart_psw += image->start;
280 memcpy(data.kernel_buf, &restart_psw, sizeof(restart_psw));
281 image->start = 0;
282 }
283
284 ret = kexec_file_add_ipl_report(image, &data);
285 out:
286 ipl_report_free(data.report);
287 return ERR_PTR(ret);
288 }
289
arch_kexec_apply_relocations_add(struct purgatory_info * pi,Elf_Shdr * section,const Elf_Shdr * relsec,const Elf_Shdr * symtab)290 int arch_kexec_apply_relocations_add(struct purgatory_info *pi,
291 Elf_Shdr *section,
292 const Elf_Shdr *relsec,
293 const Elf_Shdr *symtab)
294 {
295 const char *strtab, *name, *shstrtab;
296 const Elf_Shdr *sechdrs;
297 Elf_Rela *relas;
298 int i, r_type;
299 int ret;
300
301 /* String & section header string table */
302 sechdrs = (void *)pi->ehdr + pi->ehdr->e_shoff;
303 strtab = (char *)pi->ehdr + sechdrs[symtab->sh_link].sh_offset;
304 shstrtab = (char *)pi->ehdr + sechdrs[pi->ehdr->e_shstrndx].sh_offset;
305
306 relas = (void *)pi->ehdr + relsec->sh_offset;
307
308 for (i = 0; i < relsec->sh_size / sizeof(*relas); i++) {
309 const Elf_Sym *sym; /* symbol to relocate */
310 unsigned long addr; /* final location after relocation */
311 unsigned long val; /* relocated symbol value */
312 void *loc; /* tmp location to modify */
313
314 sym = (void *)pi->ehdr + symtab->sh_offset;
315 sym += ELF64_R_SYM(relas[i].r_info);
316
317 if (sym->st_name)
318 name = strtab + sym->st_name;
319 else
320 name = shstrtab + sechdrs[sym->st_shndx].sh_name;
321
322 if (sym->st_shndx == SHN_UNDEF) {
323 pr_err("Undefined symbol: %s\n", name);
324 return -ENOEXEC;
325 }
326
327 if (sym->st_shndx == SHN_COMMON) {
328 pr_err("symbol '%s' in common section\n", name);
329 return -ENOEXEC;
330 }
331
332 if (sym->st_shndx >= pi->ehdr->e_shnum &&
333 sym->st_shndx != SHN_ABS) {
334 pr_err("Invalid section %d for symbol %s\n",
335 sym->st_shndx, name);
336 return -ENOEXEC;
337 }
338
339 loc = pi->purgatory_buf;
340 loc += section->sh_offset;
341 loc += relas[i].r_offset;
342
343 val = sym->st_value;
344 if (sym->st_shndx != SHN_ABS)
345 val += pi->sechdrs[sym->st_shndx].sh_addr;
346 val += relas[i].r_addend;
347
348 addr = section->sh_addr + relas[i].r_offset;
349
350 r_type = ELF64_R_TYPE(relas[i].r_info);
351
352 if (r_type == R_390_PLT32DBL)
353 r_type = R_390_PC32DBL;
354
355 ret = arch_kexec_do_relocs(r_type, loc, val, addr);
356 if (ret) {
357 pr_err("Unknown rela relocation: %d\n", r_type);
358 return -ENOEXEC;
359 }
360 }
361 return 0;
362 }
363
arch_kimage_file_post_load_cleanup(struct kimage * image)364 int arch_kimage_file_post_load_cleanup(struct kimage *image)
365 {
366 vfree(image->arch.ipl_buf);
367 image->arch.ipl_buf = NULL;
368
369 return kexec_image_post_load_cleanup_default(image);
370 }
371