1 /*
2 * Copyright (c) 2016 Oracle and/or its affiliates. All rights reserved.
3 *
4 */
5
6 #include <xen/cpu.h>
7 #include <xen/elf.h>
8 #include <xen/err.h>
9 #include <xen/guest_access.h>
10 #include <xen/keyhandler.h>
11 #include <xen/lib.h>
12 #include <xen/list.h>
13 #include <xen/mm.h>
14 #include <xen/sched.h>
15 #include <xen/smp.h>
16 #include <xen/softirq.h>
17 #include <xen/spinlock.h>
18 #include <xen/string.h>
19 #include <xen/symbols.h>
20 #include <xen/tasklet.h>
21 #include <xen/version.h>
22 #include <xen/virtual_region.h>
23 #include <xen/vmap.h>
24 #include <xen/wait.h>
25 #include <xen/livepatch_elf.h>
26 #include <xen/livepatch.h>
27 #include <xen/livepatch_payload.h>
28
29 #include <asm/alternative.h>
30 #include <asm/event.h>
31
32 #define is_hook_enabled(hook) ({ (hook) && *(hook); })
33
34 /*
35 * Protects against payload_list operations and also allows only one
36 * caller in schedule_work.
37 */
38 static DEFINE_SPINLOCK(payload_lock);
39 static LIST_HEAD(payload_list);
40
41 /*
42 * Patches which have been applied. Need RCU in case we crash (and then
43 * traps code would iterate via applied_list) when adding entries onthe list.
44 */
45 static DEFINE_RCU_READ_LOCK(rcu_applied_lock);
46 static LIST_HEAD(applied_list);
47
48 static unsigned int payload_cnt;
49 static unsigned int payload_version = 1;
50
51 /* Defines an outstanding patching action. */
52 struct livepatch_work
53 {
54 atomic_t semaphore; /* Used to rendezvous CPUs in
55 check_for_livepatch_work. */
56 uint32_t timeout; /* Timeout to do the operation. */
57 struct payload *data; /* The payload on which to act. */
58 volatile bool_t do_work; /* Signals work to do. */
59 volatile bool_t ready; /* Signals all CPUs synchronized. */
60 unsigned int cmd; /* Action request: LIVEPATCH_ACTION_* */
61 };
62
63 /* There can be only one outstanding patching action. */
64 static struct livepatch_work livepatch_work;
65
66 /*
67 * Indicate whether the CPU needs to consult livepatch_work structure.
68 * We want an per-cpu data structure otherwise the check_for_livepatch_work
69 * would hammer a global livepatch_work structure on every guest VMEXIT.
70 * Having an per-cpu lessens the load.
71 */
72 static DEFINE_PER_CPU(bool_t, work_to_do);
73 static DEFINE_PER_CPU(struct tasklet, livepatch_tasklet);
74
get_name(const struct xen_livepatch_name * name,char * n)75 static int get_name(const struct xen_livepatch_name *name, char *n)
76 {
77 if ( !name->size || name->size > XEN_LIVEPATCH_NAME_SIZE )
78 return -EINVAL;
79
80 if ( name->pad[0] || name->pad[1] || name->pad[2] )
81 return -EINVAL;
82
83 if ( copy_from_guest(n, name->name, name->size) )
84 return -EFAULT;
85
86 if ( n[name->size - 1] )
87 return -EINVAL;
88
89 return 0;
90 }
91
verify_payload(const struct xen_sysctl_livepatch_upload * upload,char * n)92 static int verify_payload(const struct xen_sysctl_livepatch_upload *upload, char *n)
93 {
94 if ( get_name(&upload->name, n) )
95 return -EINVAL;
96
97 if ( !upload->size )
98 return -EINVAL;
99
100 if ( upload->size > LIVEPATCH_MAX_SIZE )
101 return -EINVAL;
102
103 if ( !guest_handle_okay(upload->payload, upload->size) )
104 return -EFAULT;
105
106 return 0;
107 }
108
is_patch(const void * ptr)109 bool_t is_patch(const void *ptr)
110 {
111 const struct payload *data;
112 bool_t r = 0;
113
114 /*
115 * Only RCU locking since this list is only ever changed during apply
116 * or revert context. And in case it dies there we need an safe list.
117 */
118 rcu_read_lock(&rcu_applied_lock);
119 list_for_each_entry_rcu ( data, &applied_list, applied_list )
120 {
121 if ( (ptr >= data->rw_addr &&
122 ptr < (data->rw_addr + data->rw_size)) ||
123 (ptr >= data->ro_addr &&
124 ptr < (data->ro_addr + data->ro_size)) ||
125 (ptr >= data->text_addr &&
126 ptr < (data->text_addr + data->text_size)) )
127 {
128 r = 1;
129 break;
130 }
131
132 }
133 rcu_read_unlock(&rcu_applied_lock);
134
135 return r;
136 }
137
livepatch_symbols_lookup_by_name(const char * symname)138 unsigned long livepatch_symbols_lookup_by_name(const char *symname)
139 {
140 const struct payload *data;
141
142 ASSERT(spin_is_locked(&payload_lock));
143 list_for_each_entry ( data, &payload_list, list )
144 {
145 unsigned int i;
146
147 for ( i = 0; i < data->nsyms; i++ )
148 {
149 if ( !data->symtab[i].new_symbol )
150 continue;
151
152 if ( !strcmp(data->symtab[i].name, symname) )
153 return data->symtab[i].value;
154 }
155 }
156
157 return 0;
158 }
159
livepatch_symbols_lookup(unsigned long addr,unsigned long * symbolsize,unsigned long * offset,char * namebuf)160 static const char *livepatch_symbols_lookup(unsigned long addr,
161 unsigned long *symbolsize,
162 unsigned long *offset,
163 char *namebuf)
164 {
165 const struct payload *data;
166 unsigned int i, best;
167 const void *va = (const void *)addr;
168 const char *n = NULL;
169
170 /*
171 * Only RCU locking since this list is only ever changed during apply
172 * or revert context. And in case it dies there we need an safe list.
173 */
174 rcu_read_lock(&rcu_applied_lock);
175 list_for_each_entry_rcu ( data, &applied_list, applied_list )
176 {
177 if ( va < data->text_addr ||
178 va >= (data->text_addr + data->text_size) )
179 continue;
180
181 best = UINT_MAX;
182
183 for ( i = 0; i < data->nsyms; i++ )
184 {
185 if ( data->symtab[i].value <= addr &&
186 (best == UINT_MAX ||
187 data->symtab[best].value < data->symtab[i].value) )
188 best = i;
189 }
190
191 if ( best == UINT_MAX )
192 break;
193
194 if ( symbolsize )
195 *symbolsize = data->symtab[best].size;
196 if ( offset )
197 *offset = addr - data->symtab[best].value;
198 if ( namebuf )
199 strlcpy(namebuf, data->name, KSYM_NAME_LEN);
200
201 n = data->symtab[best].name;
202 break;
203 }
204 rcu_read_unlock(&rcu_applied_lock);
205
206 return n;
207 }
208
209 /* Lookup function's old address if not already resolved. */
resolve_old_address(struct livepatch_func * f,const struct livepatch_elf * elf)210 static int resolve_old_address(struct livepatch_func *f,
211 const struct livepatch_elf *elf)
212 {
213 if ( f->old_addr )
214 return 0;
215
216 f->old_addr = (void *)symbols_lookup_by_name(f->name);
217 if ( !f->old_addr )
218 {
219 f->old_addr = (void *)livepatch_symbols_lookup_by_name(f->name);
220 if ( !f->old_addr )
221 {
222 printk(XENLOG_ERR LIVEPATCH "%s: Could not resolve old address of %s\n",
223 elf->name, f->name);
224 return -ENOENT;
225 }
226 }
227 dprintk(XENLOG_DEBUG, LIVEPATCH "%s: Resolved old address %s => %p\n",
228 elf->name, f->name, f->old_addr);
229
230 return 0;
231 }
232
find_payload(const char * name)233 static struct payload *find_payload(const char *name)
234 {
235 struct payload *data, *found = NULL;
236
237 ASSERT(spin_is_locked(&payload_lock));
238 list_for_each_entry ( data, &payload_list, list )
239 {
240 if ( !strcmp(data->name, name) )
241 {
242 found = data;
243 break;
244 }
245 }
246
247 return found;
248 }
249
250 /*
251 * Functions related to XEN_SYSCTL_LIVEPATCH_UPLOAD (see livepatch_upload), and
252 * freeing payload (XEN_SYSCTL_LIVEPATCH_ACTION:LIVEPATCH_ACTION_UNLOAD).
253 */
254
free_payload_data(struct payload * payload)255 static void free_payload_data(struct payload *payload)
256 {
257 /* Set to zero until "move_payload". */
258 if ( !payload->pages )
259 return;
260
261 vfree((void *)payload->text_addr);
262
263 payload->pages = 0;
264 }
265
266 /*
267 * calc_section computes the size (taking into account section alignment).
268 *
269 * Furthermore the offset is set with the offset from the start of the virtual
270 * address space for the payload (using passed in size). This is used in
271 * move_payload to figure out the destination location (load_addr).
272 */
calc_section(const struct livepatch_elf_sec * sec,size_t * size,unsigned int * offset)273 static void calc_section(const struct livepatch_elf_sec *sec, size_t *size,
274 unsigned int *offset)
275 {
276 const Elf_Shdr *s = sec->sec;
277 size_t align_size;
278
279 align_size = ROUNDUP(*size, s->sh_addralign);
280 *offset = align_size;
281 *size = s->sh_size + align_size;
282 }
283
move_payload(struct payload * payload,struct livepatch_elf * elf)284 static int move_payload(struct payload *payload, struct livepatch_elf *elf)
285 {
286 void *text_buf, *ro_buf, *rw_buf;
287 unsigned int i, rw_buf_sec, rw_buf_cnt = 0;
288 size_t size = 0;
289 unsigned int *offset;
290 int rc = 0;
291
292 offset = xmalloc_array(unsigned int, elf->hdr->e_shnum);
293 if ( !offset )
294 return -ENOMEM;
295
296 /* Compute size of different regions. */
297 for ( i = 1; i < elf->hdr->e_shnum; i++ )
298 {
299 /*
300 * Do nothing. These are .rel.text, rel.*, .symtab, .strtab,
301 * and .shstrtab. For the non-relocate we allocate and copy these
302 * via other means - and the .rel we can ignore as we only use it
303 * once during loading.
304 *
305 * Also ignore sections with zero size. Those can be for example:
306 * data, or .bss.
307 */
308 if ( livepatch_elf_ignore_section(elf->sec[i].sec) )
309 offset[i] = UINT_MAX;
310 else if ( (elf->sec[i].sec->sh_flags & SHF_EXECINSTR) &&
311 !(elf->sec[i].sec->sh_flags & SHF_WRITE) )
312 calc_section(&elf->sec[i], &payload->text_size, &offset[i]);
313 else if ( !(elf->sec[i].sec->sh_flags & SHF_EXECINSTR) &&
314 (elf->sec[i].sec->sh_flags & SHF_WRITE) )
315 calc_section(&elf->sec[i], &payload->rw_size, &offset[i]);
316 else if ( !(elf->sec[i].sec->sh_flags & SHF_EXECINSTR) &&
317 !(elf->sec[i].sec->sh_flags & SHF_WRITE) )
318 calc_section(&elf->sec[i], &payload->ro_size, &offset[i]);
319 else
320 {
321 dprintk(XENLOG_DEBUG, LIVEPATCH "%s: Not supporting %s section!\n",
322 elf->name, elf->sec[i].name);
323 rc = -EOPNOTSUPP;
324 goto out;
325 }
326 }
327
328 /*
329 * Total of all three regions - RX, RW, and RO. We have to have
330 * keep them in seperate pages so we PAGE_ALIGN the RX and RW to have
331 * them on seperate pages. The last one will by default fall on its
332 * own page.
333 */
334 size = PAGE_ALIGN(payload->text_size) + PAGE_ALIGN(payload->rw_size) +
335 payload->ro_size;
336
337 size = PFN_UP(size); /* Nr of pages. */
338 text_buf = vmalloc_xen(size * PAGE_SIZE);
339 if ( !text_buf )
340 {
341 printk(XENLOG_ERR LIVEPATCH "%s: Could not allocate memory for payload\n",
342 elf->name);
343 rc = -ENOMEM;
344 goto out;
345 }
346 rw_buf = text_buf + PAGE_ALIGN(payload->text_size);
347 ro_buf = rw_buf + PAGE_ALIGN(payload->rw_size);
348
349 payload->pages = size;
350 payload->text_addr = text_buf;
351 payload->rw_addr = rw_buf;
352 payload->ro_addr = ro_buf;
353
354 for ( i = 1; i < elf->hdr->e_shnum; i++ )
355 {
356 if ( !livepatch_elf_ignore_section(elf->sec[i].sec) )
357 {
358 void *buf;
359
360 if ( elf->sec[i].sec->sh_flags & SHF_EXECINSTR )
361 buf = text_buf;
362 else if ( elf->sec[i].sec->sh_flags & SHF_WRITE )
363 {
364 buf = rw_buf;
365 rw_buf_sec = i;
366 rw_buf_cnt++;
367 }
368 else
369 buf = ro_buf;
370
371 ASSERT(offset[i] != UINT_MAX);
372
373 elf->sec[i].load_addr = buf + offset[i];
374
375 /* Don't copy NOBITS - such as BSS. */
376 if ( elf->sec[i].sec->sh_type != SHT_NOBITS )
377 {
378 memcpy(elf->sec[i].load_addr, elf->sec[i].data,
379 elf->sec[i].sec->sh_size);
380 dprintk(XENLOG_DEBUG, LIVEPATCH "%s: Loaded %s at %p\n",
381 elf->name, elf->sec[i].name, elf->sec[i].load_addr);
382 }
383 else
384 memset(elf->sec[i].load_addr, 0, elf->sec[i].sec->sh_size);
385 }
386 }
387
388 /*
389 * Only one RW section with non-zero size: .livepatch.funcs,
390 * or only RO sections.
391 */
392 if ( !rw_buf_cnt || (rw_buf_cnt == 1 &&
393 !strcmp(elf->sec[rw_buf_sec].name, ELF_LIVEPATCH_FUNC)) )
394 payload->safe_to_reapply = true;
395 out:
396 xfree(offset);
397
398 return rc;
399 }
400
secure_payload(struct payload * payload,struct livepatch_elf * elf)401 static int secure_payload(struct payload *payload, struct livepatch_elf *elf)
402 {
403 int rc = 0;
404 unsigned int text_pages, rw_pages, ro_pages;
405
406 text_pages = PFN_UP(payload->text_size);
407
408 if ( text_pages )
409 {
410 rc = arch_livepatch_secure(payload->text_addr, text_pages, LIVEPATCH_VA_RX);
411 if ( rc )
412 return rc;
413 }
414 rw_pages = PFN_UP(payload->rw_size);
415 if ( rw_pages )
416 {
417 rc = arch_livepatch_secure(payload->rw_addr, rw_pages, LIVEPATCH_VA_RW);
418 if ( rc )
419 return rc;
420 }
421
422 ro_pages = PFN_UP(payload->ro_size);
423 if ( ro_pages )
424 rc = arch_livepatch_secure(payload->ro_addr, ro_pages, LIVEPATCH_VA_RO);
425
426 ASSERT(ro_pages + rw_pages + text_pages == payload->pages);
427
428 return rc;
429 }
430
section_ok(const struct livepatch_elf * elf,const struct livepatch_elf_sec * sec,size_t sz)431 static bool section_ok(const struct livepatch_elf *elf,
432 const struct livepatch_elf_sec *sec, size_t sz)
433 {
434 if ( !elf || !sec )
435 return false;
436
437 if ( sec->sec->sh_size % sz )
438 {
439 printk(XENLOG_ERR LIVEPATCH "%s: Wrong size %"PRIuElfWord" of %s (must be multiple of %zu)\n",
440 elf->name, sec->sec->sh_size, sec->name, sz);
441 return false;
442 }
443
444 return true;
445 }
446
xen_build_id_dep(const struct payload * payload)447 static int xen_build_id_dep(const struct payload *payload)
448 {
449 const void *id = NULL;
450 unsigned int len = 0;
451 int rc;
452
453 ASSERT(payload->xen_dep.len);
454 ASSERT(payload->xen_dep.p);
455
456 rc = xen_build_id(&id, &len);
457 if ( rc )
458 return rc;
459
460 if ( payload->xen_dep.len != len || memcmp(id, payload->xen_dep.p, len) ) {
461 printk(XENLOG_ERR LIVEPATCH "%s: check against hypervisor build-id failed\n",
462 payload->name);
463 return -EINVAL;
464 }
465
466 return 0;
467 }
468
check_special_sections(const struct livepatch_elf * elf)469 static int check_special_sections(const struct livepatch_elf *elf)
470 {
471 unsigned int i;
472 static const char *const names[] = { ELF_LIVEPATCH_DEPENDS,
473 ELF_LIVEPATCH_XEN_DEPENDS,
474 ELF_BUILD_ID_NOTE};
475 DECLARE_BITMAP(found, ARRAY_SIZE(names)) = { 0 };
476
477 for ( i = 0; i < ARRAY_SIZE(names); i++ )
478 {
479 const struct livepatch_elf_sec *sec;
480
481 sec = livepatch_elf_sec_by_name(elf, names[i]);
482 if ( !sec )
483 {
484 printk(XENLOG_ERR LIVEPATCH "%s: %s is missing\n",
485 elf->name, names[i]);
486 return -EINVAL;
487 }
488
489 if ( !sec->sec->sh_size )
490 {
491 printk(XENLOG_ERR LIVEPATCH "%s: %s is empty\n",
492 elf->name, names[i]);
493 return -EINVAL;
494 }
495
496 if ( test_and_set_bit(i, found) )
497 {
498 printk(XENLOG_ERR LIVEPATCH "%s: %s was seen more than once\n",
499 elf->name, names[i]);
500 return -EINVAL;
501 }
502 }
503
504 return 0;
505 }
506
check_patching_sections(const struct livepatch_elf * elf)507 static int check_patching_sections(const struct livepatch_elf *elf)
508 {
509 unsigned int i;
510 static const char *const names[] = { ELF_LIVEPATCH_FUNC,
511 ELF_LIVEPATCH_LOAD_HOOKS,
512 ELF_LIVEPATCH_UNLOAD_HOOKS,
513 ELF_LIVEPATCH_PREAPPLY_HOOK,
514 ELF_LIVEPATCH_APPLY_HOOK,
515 ELF_LIVEPATCH_POSTAPPLY_HOOK,
516 ELF_LIVEPATCH_PREREVERT_HOOK,
517 ELF_LIVEPATCH_REVERT_HOOK,
518 ELF_LIVEPATCH_POSTREVERT_HOOK};
519 DECLARE_BITMAP(found, ARRAY_SIZE(names)) = { 0 };
520
521 /*
522 * The patching sections are optional, but at least one
523 * must be present. Otherwise, there is nothing to do.
524 * All the existing sections must not be empty and must
525 * be present at most once.
526 */
527 for ( i = 0; i < ARRAY_SIZE(names); i++ )
528 {
529 const struct livepatch_elf_sec *sec;
530
531 sec = livepatch_elf_sec_by_name(elf, names[i]);
532 if ( !sec )
533 {
534 dprintk(XENLOG_DEBUG, LIVEPATCH "%s: %s is missing\n",
535 elf->name, names[i]);
536 continue; /* This section is optional */
537 }
538
539 if ( !sec->sec->sh_size )
540 {
541 printk(XENLOG_ERR LIVEPATCH "%s: %s is empty\n",
542 elf->name, names[i]);
543 return -EINVAL;
544 }
545
546 if ( test_and_set_bit(i, found) )
547 {
548 printk(XENLOG_ERR LIVEPATCH "%s: %s was seen more than once\n",
549 elf->name, names[i]);
550 return -EINVAL;
551 }
552 }
553
554 /* Checking if at least one section is present. */
555 if ( bitmap_empty(found, ARRAY_SIZE(names)) )
556 {
557 printk(XENLOG_ERR LIVEPATCH "%s: Nothing to patch. Aborting...\n",
558 elf->name);
559 return -EINVAL;
560 }
561
562 return 0;
563 }
564
livepatch_verify_expectation_fn(const struct livepatch_func * func)565 static inline int livepatch_verify_expectation_fn(const struct livepatch_func *func)
566 {
567 const livepatch_expectation_t *exp = &func->expect;
568
569 /* Ignore disabled expectations. */
570 if ( !exp->enabled )
571 return 0;
572
573 /* There is nothing to expect */
574 if ( !func->old_addr )
575 return -EFAULT;
576
577 if ( exp->len > sizeof(exp->data))
578 return -EOVERFLOW;
579
580 if ( exp->rsv )
581 return -EINVAL;
582
583 /* Incorrect expectation */
584 if ( func->old_size < exp->len )
585 return -ERANGE;
586
587 if ( memcmp(func->old_addr, exp->data, exp->len) )
588 {
589 printk(XENLOG_ERR LIVEPATCH "%s: expectation failed: expected:%*phN, actual:%*phN\n",
590 func->name, exp->len, exp->data, exp->len, func->old_addr);
591 return -EINVAL;
592 }
593
594 return 0;
595 }
596
livepatch_check_expectations(const struct payload * payload)597 static inline int livepatch_check_expectations(const struct payload *payload)
598 {
599 int i, rc;
600
601 printk(XENLOG_INFO LIVEPATCH "%s: Verifying enabled expectations for all functions\n",
602 payload->name);
603
604 for ( i = 0; i < payload->nfuncs; i++ )
605 {
606 const struct livepatch_func *func = &(payload->funcs[i]);
607
608 rc = livepatch_verify_expectation_fn(func);
609 if ( rc )
610 {
611 printk(XENLOG_ERR LIVEPATCH "%s: expectations of %s failed (rc=%d), aborting!\n",
612 payload->name, func->name ?: "unknown", rc);
613 return rc;
614 }
615 }
616
617 return 0;
618 }
619
620 /*
621 * Lookup specified section and when exists assign its address to a specified hook.
622 * Perform section pointer and size validation: single hook sections must contain a
623 * single pointer only.
624 */
625 #define LIVEPATCH_ASSIGN_SINGLE_HOOK(elf, hook, section_name) do { \
626 const struct livepatch_elf_sec *__sec = livepatch_elf_sec_by_name(elf, section_name); \
627 if ( !__sec ) \
628 break; \
629 if ( !section_ok(elf, __sec, sizeof(*hook)) || __sec->sec->sh_size != sizeof(*hook) ) \
630 return -EINVAL; \
631 hook = __sec->load_addr; \
632 } while (0)
633
634 /*
635 * Lookup specified section and when exists assign its address to a specified hook.
636 * Perform section pointer and size validation: multi hook sections must contain an
637 * array whose size must be a multiple of the array's items size.
638 */
639 #define LIVEPATCH_ASSIGN_MULTI_HOOK(elf, hook, nhooks, section_name) do { \
640 const struct livepatch_elf_sec *__sec = livepatch_elf_sec_by_name(elf, section_name); \
641 if ( !__sec ) \
642 break; \
643 if ( !section_ok(elf, __sec, sizeof(*hook)) ) \
644 return -EINVAL; \
645 hook = __sec->load_addr; \
646 nhooks = __sec->sec->sh_size / sizeof(*hook); \
647 } while (0)
648
prepare_payload(struct payload * payload,struct livepatch_elf * elf)649 static int prepare_payload(struct payload *payload,
650 struct livepatch_elf *elf)
651 {
652 const struct livepatch_elf_sec *sec;
653 unsigned int i;
654 struct livepatch_func *f;
655 struct virtual_region *region;
656 const Elf_Note *n;
657
658 sec = livepatch_elf_sec_by_name(elf, ELF_LIVEPATCH_FUNC);
659 if ( sec )
660 {
661 if ( !section_ok(elf, sec, sizeof(*payload->funcs)) )
662 return -EINVAL;
663
664 payload->funcs = sec->load_addr;
665 payload->nfuncs = sec->sec->sh_size / sizeof(*payload->funcs);
666
667 for ( i = 0; i < payload->nfuncs; i++ )
668 {
669 int rc;
670
671 f = &(payload->funcs[i]);
672
673 if ( f->version != LIVEPATCH_PAYLOAD_VERSION )
674 {
675 printk(XENLOG_ERR LIVEPATCH "%s: Wrong version (%u). Expected %d\n",
676 elf->name, f->version, LIVEPATCH_PAYLOAD_VERSION);
677 return -EOPNOTSUPP;
678 }
679
680 /* 'old_addr', 'new_addr', 'new_size' can all be zero. */
681 if ( !f->old_size )
682 {
683 printk(XENLOG_ERR LIVEPATCH "%s: Address or size fields are zero\n",
684 elf->name);
685 return -EINVAL;
686 }
687
688 rc = arch_livepatch_verify_func(f);
689 if ( rc )
690 return rc;
691
692 rc = resolve_old_address(f, elf);
693 if ( rc )
694 return rc;
695
696 rc = livepatch_verify_distance(f);
697 if ( rc )
698 return rc;
699 }
700 }
701
702 LIVEPATCH_ASSIGN_MULTI_HOOK(elf, payload->load_funcs, payload->n_load_funcs, ELF_LIVEPATCH_LOAD_HOOKS);
703 LIVEPATCH_ASSIGN_MULTI_HOOK(elf, payload->unload_funcs, payload->n_unload_funcs, ELF_LIVEPATCH_UNLOAD_HOOKS);
704
705 LIVEPATCH_ASSIGN_SINGLE_HOOK(elf, payload->hooks.apply.pre, ELF_LIVEPATCH_PREAPPLY_HOOK);
706 LIVEPATCH_ASSIGN_SINGLE_HOOK(elf, payload->hooks.apply.action, ELF_LIVEPATCH_APPLY_HOOK);
707 LIVEPATCH_ASSIGN_SINGLE_HOOK(elf, payload->hooks.apply.post, ELF_LIVEPATCH_POSTAPPLY_HOOK);
708
709 LIVEPATCH_ASSIGN_SINGLE_HOOK(elf, payload->hooks.revert.pre, ELF_LIVEPATCH_PREREVERT_HOOK);
710 LIVEPATCH_ASSIGN_SINGLE_HOOK(elf, payload->hooks.revert.action, ELF_LIVEPATCH_REVERT_HOOK);
711 LIVEPATCH_ASSIGN_SINGLE_HOOK(elf, payload->hooks.revert.post, ELF_LIVEPATCH_POSTREVERT_HOOK);
712
713 sec = livepatch_elf_sec_by_name(elf, ELF_BUILD_ID_NOTE);
714 if ( sec )
715 {
716 const struct payload *data;
717
718 n = sec->load_addr;
719
720 if ( sec->sec->sh_size <= sizeof(*n) )
721 return -EINVAL;
722
723 if ( xen_build_id_check(n, sec->sec->sh_size,
724 &payload->id.p, &payload->id.len) )
725 return -EINVAL;
726
727 if ( !payload->id.len || !payload->id.p )
728 return -EINVAL;
729
730 /* Make sure it is not a duplicate. */
731 list_for_each_entry ( data, &payload_list, list )
732 {
733 /* No way _this_ payload is on the list. */
734 ASSERT(data != payload);
735 if ( data->id.len == payload->id.len &&
736 !memcmp(data->id.p, payload->id.p, data->id.len) )
737 {
738 dprintk(XENLOG_DEBUG, LIVEPATCH "%s: Already loaded as %s!\n",
739 elf->name, data->name);
740 return -EEXIST;
741 }
742 }
743 }
744
745 sec = livepatch_elf_sec_by_name(elf, ELF_LIVEPATCH_DEPENDS);
746 if ( sec )
747 {
748 n = sec->load_addr;
749
750 if ( sec->sec->sh_size <= sizeof(*n) )
751 return -EINVAL;
752
753 if ( xen_build_id_check(n, sec->sec->sh_size,
754 &payload->dep.p, &payload->dep.len) )
755 return -EINVAL;
756
757 if ( !payload->dep.len || !payload->dep.p )
758 return -EINVAL;
759 }
760
761 sec = livepatch_elf_sec_by_name(elf, ELF_LIVEPATCH_XEN_DEPENDS);
762 if ( sec )
763 {
764 n = sec->load_addr;
765
766 if ( sec->sec->sh_size <= sizeof(*n) )
767 return -EINVAL;
768
769 if ( xen_build_id_check(n, sec->sec->sh_size,
770 &payload->xen_dep.p, &payload->xen_dep.len) )
771 return -EINVAL;
772
773 if ( !payload->xen_dep.len || !payload->xen_dep.p )
774 return -EINVAL;
775 }
776
777 /* Setup the virtual region with proper data. */
778 region = &payload->region;
779
780 region->symbols_lookup = livepatch_symbols_lookup;
781 region->start = payload->text_addr;
782 region->end = payload->text_addr + payload->text_size;
783
784 /* Optional sections. */
785 for ( i = 0; i < BUGFRAME_NR; i++ )
786 {
787 char str[14];
788
789 snprintf(str, sizeof(str), ".bug_frames.%u", i);
790 sec = livepatch_elf_sec_by_name(elf, str);
791 if ( !sec )
792 continue;
793
794 if ( !section_ok(elf, sec, sizeof(*region->frame[i].bugs)) )
795 return -EINVAL;
796
797 region->frame[i].bugs = sec->load_addr;
798 region->frame[i].n_bugs = sec->sec->sh_size /
799 sizeof(*region->frame[i].bugs);
800 }
801
802 sec = livepatch_elf_sec_by_name(elf, ".altinstructions");
803 if ( sec )
804 {
805 #ifdef CONFIG_HAS_ALTERNATIVE
806 struct alt_instr *a, *start, *end;
807
808 if ( !section_ok(elf, sec, sizeof(*a)) )
809 return -EINVAL;
810
811 start = sec->load_addr;
812 end = sec->load_addr + sec->sec->sh_size;
813
814 for ( a = start; a < end; a++ )
815 {
816 const void *instr = ALT_ORIG_PTR(a);
817 const void *replacement = ALT_REPL_PTR(a);
818
819 if ( (instr < region->start && instr >= region->end) ||
820 (replacement < region->start && replacement >= region->end) )
821 {
822 printk(XENLOG_ERR LIVEPATCH "%s Alt patching outside payload: %p\n",
823 elf->name, instr);
824 return -EINVAL;
825 }
826 }
827 apply_alternatives(start, end);
828 #else
829 printk(XENLOG_ERR LIVEPATCH "%s: We don't support alternative patching\n",
830 elf->name);
831 return -EOPNOTSUPP;
832 #endif
833 }
834
835 sec = livepatch_elf_sec_by_name(elf, ".ex_table");
836 if ( sec )
837 {
838 #ifdef CONFIG_HAS_EX_TABLE
839 struct exception_table_entry *s, *e;
840
841 if ( !section_ok(elf, sec, sizeof(*region->ex)) )
842 return -EINVAL;
843
844 s = sec->load_addr;
845 e = sec->load_addr + sec->sec->sh_size;
846
847 sort_exception_table(s ,e);
848
849 region->ex = s;
850 region->ex_end = e;
851 #else
852 printk(XENLOG_ERR LIVEPATCH "%s: We don't support .ex_table\n",
853 elf->name);
854 return -EOPNOTSUPP;
855 #endif
856 }
857
858 sec = livepatch_elf_sec_by_name(elf, ".modinfo");
859 if ( sec )
860 {
861 if ( !section_ok(elf, sec, sizeof(*payload->metadata.data)) )
862 return -EINVAL;
863
864 payload->metadata.data = sec->load_addr;
865 payload->metadata.len = sec->sec->sh_size;
866
867 /* The metadata is required to consists of null terminated strings. */
868 if ( payload->metadata.data[payload->metadata.len - 1] != '\0' )
869 {
870 printk(XENLOG_ERR LIVEPATCH "%s: Incorrect metadata format detected\n", payload->name);
871 return -EINVAL;
872 }
873 }
874
875 return 0;
876 }
877
is_payload_symbol(const struct livepatch_elf * elf,const struct livepatch_elf_sym * sym)878 static bool_t is_payload_symbol(const struct livepatch_elf *elf,
879 const struct livepatch_elf_sym *sym)
880 {
881 if ( sym->sym->st_shndx == SHN_UNDEF ||
882 sym->sym->st_shndx >= elf->hdr->e_shnum )
883 return 0;
884
885 /*
886 * The payload is not a final image as we dynmically link against it.
887 * As such the linker has left symbols we don't care about and which
888 * binutils would have removed had it be a final image. Hence we:
889 * - For SHF_ALLOC - ignore symbols referring to sections that are not
890 * loaded.
891 */
892 if ( !(elf->sec[sym->sym->st_shndx].sec->sh_flags & SHF_ALLOC) )
893 return 0;
894
895 /* - And ignore empty symbols (\0). */
896 if ( *sym->name == '\0' )
897 return 0;
898
899 /*
900 * - For SHF_MERGE - ignore local symbols referring to mergeable sections.
901 * (ld squashes them all in one section and discards the symbols) when
902 * those symbols start with '.L' (like .LCx). Those are intermediate
903 * artifacts of assembly.
904 *
905 * See elf_link_input_bfd and _bfd_elf_is_local_label_name in binutils.
906 */
907 if ( (elf->sec[sym->sym->st_shndx].sec->sh_flags & SHF_MERGE) &&
908 !strncmp(sym->name, ".L", 2) )
909 return 0;
910
911 return arch_livepatch_symbol_ok(elf, sym);
912 }
913
build_symbol_table(struct payload * payload,const struct livepatch_elf * elf)914 static int build_symbol_table(struct payload *payload,
915 const struct livepatch_elf *elf)
916 {
917 unsigned int i, j, nsyms = 0;
918 size_t strtab_len = 0;
919 struct livepatch_symbol *symtab;
920 char *strtab;
921
922 /* Recall that section @0 is always NULL. */
923 for ( i = 1; i < elf->nsym; i++ )
924 {
925 if ( is_payload_symbol(elf, elf->sym + i) )
926 {
927 nsyms++;
928 strtab_len += strlen(elf->sym[i].name) + 1;
929 }
930 }
931
932 symtab = xzalloc_array(struct livepatch_symbol, nsyms);
933 strtab = xzalloc_array(char, strtab_len);
934
935 if ( !strtab || !symtab )
936 {
937 xfree(strtab);
938 xfree(symtab);
939 return -ENOMEM;
940 }
941
942 nsyms = 0;
943 strtab_len = 0;
944 for ( i = 1; i < elf->nsym; i++ )
945 {
946 if ( is_payload_symbol(elf, elf->sym + i) )
947 {
948 symtab[nsyms].name = strtab + strtab_len;
949 symtab[nsyms].size = elf->sym[i].sym->st_size;
950 symtab[nsyms].value = elf->sym[i].sym->st_value;
951 symtab[nsyms].new_symbol = 0; /* May be overwritten below. */
952 strtab_len += strlcpy(strtab + strtab_len, elf->sym[i].name,
953 KSYM_NAME_LEN) + 1;
954 nsyms++;
955 }
956 }
957
958 for ( i = 0; i < nsyms; i++ )
959 {
960 bool_t found = 0;
961
962 for ( j = 0; j < payload->nfuncs; j++ )
963 {
964 if ( symtab[i].value == (unsigned long)payload->funcs[j].new_addr )
965 {
966 found = 1;
967 break;
968 }
969 }
970
971 if ( !found )
972 {
973 if ( symbols_lookup_by_name(symtab[i].name) ||
974 livepatch_symbols_lookup_by_name(symtab[i].name) )
975 {
976 printk(XENLOG_ERR LIVEPATCH "%s: duplicate new symbol: %s\n",
977 elf->name, symtab[i].name);
978 xfree(symtab);
979 xfree(strtab);
980 return -EEXIST;
981 }
982 symtab[i].new_symbol = 1;
983 dprintk(XENLOG_DEBUG, LIVEPATCH "%s: new symbol %s\n",
984 elf->name, symtab[i].name);
985 }
986 else
987 {
988 /* new_symbol is not set. */
989 dprintk(XENLOG_DEBUG, LIVEPATCH "%s: overriding symbol %s\n",
990 elf->name, symtab[i].name);
991 }
992 }
993
994 payload->symtab = symtab;
995 payload->strtab = strtab;
996 payload->nsyms = nsyms;
997
998 return 0;
999 }
1000
free_payload(struct payload * data)1001 static void free_payload(struct payload *data)
1002 {
1003 ASSERT(spin_is_locked(&payload_lock));
1004 list_del(&data->list);
1005 payload_cnt--;
1006 payload_version++;
1007 free_payload_data(data);
1008 xfree((void *)data->symtab);
1009 xfree((void *)data->strtab);
1010 xfree(data);
1011 }
1012
load_payload_data(struct payload * payload,void * raw,size_t len)1013 static int load_payload_data(struct payload *payload, void *raw, size_t len)
1014 {
1015 struct livepatch_elf elf = { .name = payload->name, .len = len };
1016 int rc = 0;
1017
1018 rc = livepatch_elf_load(&elf, raw);
1019 if ( rc )
1020 goto out;
1021
1022 rc = move_payload(payload, &elf);
1023 if ( rc )
1024 goto out;
1025
1026 rc = livepatch_elf_resolve_symbols(&elf);
1027 if ( rc )
1028 goto out;
1029
1030 rc = livepatch_elf_perform_relocs(&elf);
1031 if ( rc )
1032 goto out;
1033
1034 rc = check_special_sections(&elf);
1035 if ( rc )
1036 goto out;
1037
1038 rc = check_patching_sections(&elf);
1039 if ( rc )
1040 goto out;
1041
1042 rc = prepare_payload(payload, &elf);
1043 if ( rc )
1044 goto out;
1045
1046 rc = xen_build_id_dep(payload);
1047 if ( rc )
1048 goto out;
1049
1050 rc = build_symbol_table(payload, &elf);
1051 if ( rc )
1052 goto out;
1053
1054 rc = secure_payload(payload, &elf);
1055
1056 out:
1057 if ( rc )
1058 free_payload_data(payload);
1059
1060 /* Free our temporary data structure. */
1061 livepatch_elf_free(&elf);
1062
1063 return rc;
1064 }
1065
livepatch_upload(struct xen_sysctl_livepatch_upload * upload)1066 static int livepatch_upload(struct xen_sysctl_livepatch_upload *upload)
1067 {
1068 struct payload *data, *found;
1069 char n[XEN_LIVEPATCH_NAME_SIZE];
1070 void *raw_data;
1071 int rc;
1072
1073 rc = verify_payload(upload, n);
1074 if ( rc )
1075 return rc;
1076
1077 data = xzalloc(struct payload);
1078 raw_data = vmalloc(upload->size);
1079
1080 spin_lock(&payload_lock);
1081
1082 found = find_payload(n);
1083 if ( IS_ERR(found) )
1084 rc = PTR_ERR(found);
1085 else if ( found )
1086 rc = -EEXIST;
1087 else if ( !data || !raw_data )
1088 rc = -ENOMEM;
1089 else if ( __copy_from_guest(raw_data, upload->payload, upload->size) )
1090 rc = -EFAULT;
1091 else
1092 {
1093 memcpy(data->name, n, strlen(n));
1094
1095 rc = load_payload_data(data, raw_data, upload->size);
1096 if ( rc )
1097 goto out;
1098
1099 data->state = LIVEPATCH_STATE_CHECKED;
1100 INIT_LIST_HEAD(&data->list);
1101 INIT_LIST_HEAD(&data->applied_list);
1102
1103 list_add_tail(&data->list, &payload_list);
1104 payload_cnt++;
1105 payload_version++;
1106 }
1107
1108 out:
1109 spin_unlock(&payload_lock);
1110
1111 vfree(raw_data);
1112
1113 if ( rc && data )
1114 {
1115 xfree((void *)data->symtab);
1116 xfree((void *)data->strtab);
1117 xfree(data);
1118 }
1119
1120 return rc;
1121 }
1122
livepatch_get(struct xen_sysctl_livepatch_get * get)1123 static int livepatch_get(struct xen_sysctl_livepatch_get *get)
1124 {
1125 struct payload *data;
1126 int rc;
1127 char n[XEN_LIVEPATCH_NAME_SIZE];
1128
1129 rc = get_name(&get->name, n);
1130 if ( rc )
1131 return rc;
1132
1133 spin_lock(&payload_lock);
1134
1135 data = find_payload(n);
1136 if ( IS_ERR_OR_NULL(data) )
1137 {
1138 spin_unlock(&payload_lock);
1139
1140 if ( !data )
1141 return -ENOENT;
1142
1143 return PTR_ERR(data);
1144 }
1145
1146 get->status.state = data->state;
1147 get->status.rc = data->rc;
1148
1149 spin_unlock(&payload_lock);
1150
1151 return 0;
1152 }
1153
livepatch_list(struct xen_sysctl_livepatch_list * list)1154 static int livepatch_list(struct xen_sysctl_livepatch_list *list)
1155 {
1156 struct xen_livepatch_status status;
1157 struct payload *data;
1158 unsigned int idx = 0, i = 0;
1159 int rc = 0;
1160
1161 if ( list->nr > 1024 )
1162 return -E2BIG;
1163
1164 if ( list->pad )
1165 return -EINVAL;
1166
1167 if ( list->nr &&
1168 (!guest_handle_okay(list->status, list->nr) ||
1169 !guest_handle_okay(list->len, list->nr) ||
1170 !guest_handle_okay(list->metadata_len, list->nr)) )
1171 return -EINVAL;
1172
1173 spin_lock(&payload_lock);
1174 if ( list->idx >= payload_cnt && payload_cnt )
1175 {
1176 spin_unlock(&payload_lock);
1177 return -EINVAL;
1178 }
1179
1180 list->name_total_size = 0;
1181 list->metadata_total_size = 0;
1182 if ( list->nr )
1183 {
1184 uint64_t name_offset = 0, metadata_offset = 0;
1185
1186 list_for_each_entry( data, &payload_list, list )
1187 {
1188 uint32_t name_len, metadata_len;
1189
1190 if ( list->idx > i++ )
1191 continue;
1192
1193 status.state = data->state;
1194 status.rc = data->rc;
1195
1196 name_len = strlen(data->name) + 1;
1197 list->name_total_size += name_len;
1198
1199 metadata_len = data->metadata.len;
1200 list->metadata_total_size += metadata_len;
1201
1202 if ( !guest_handle_subrange_okay(list->name, name_offset,
1203 name_offset + name_len - 1) ||
1204 !guest_handle_subrange_okay(list->metadata, metadata_offset,
1205 metadata_offset + metadata_len - 1) )
1206 {
1207 rc = -EINVAL;
1208 break;
1209 }
1210
1211 /* N.B. 'idx' != 'i'. */
1212 if ( __copy_to_guest_offset(list->name, name_offset,
1213 data->name, name_len) ||
1214 __copy_to_guest_offset(list->len, idx, &name_len, 1) ||
1215 __copy_to_guest_offset(list->status, idx, &status, 1) ||
1216 __copy_to_guest_offset(list->metadata, metadata_offset,
1217 data->metadata.data, metadata_len) ||
1218 __copy_to_guest_offset(list->metadata_len, idx, &metadata_len, 1) )
1219 {
1220 rc = -EFAULT;
1221 break;
1222 }
1223
1224 idx++;
1225 name_offset += name_len;
1226 metadata_offset += metadata_len;
1227
1228 if ( (idx >= list->nr) || hypercall_preempt_check() )
1229 break;
1230 }
1231 }
1232 else
1233 {
1234 list_for_each_entry( data, &payload_list, list )
1235 {
1236 list->name_total_size += strlen(data->name) + 1;
1237 list->metadata_total_size += data->metadata.len;
1238 }
1239 }
1240 list->nr = payload_cnt - i; /* Remaining amount. */
1241 list->version = payload_version;
1242 spin_unlock(&payload_lock);
1243
1244 /* And how many we have processed. */
1245 return rc ? : idx;
1246 }
1247
1248 /*
1249 * The following functions get the CPUs into an appropriate state and
1250 * apply (or revert) each of the payload's functions. This is needed
1251 * for XEN_SYSCTL_LIVEPATCH_ACTION operation (see livepatch_action).
1252 */
1253
livepatch_display_metadata(const struct livepatch_metadata * metadata)1254 static inline void livepatch_display_metadata(const struct livepatch_metadata *metadata)
1255 {
1256 const char *str;
1257
1258 if ( metadata && metadata->data && metadata->len > 0 )
1259 {
1260 printk(XENLOG_INFO LIVEPATCH "module metadata:\n");
1261 for ( str = metadata->data; str < (metadata->data + metadata->len); str += (strlen(str) + 1) )
1262 printk(XENLOG_INFO LIVEPATCH " %s\n", str);
1263 }
1264
1265 }
1266
apply_payload(struct payload * data)1267 static int apply_payload(struct payload *data)
1268 {
1269 unsigned int i;
1270 int rc;
1271
1272 rc = arch_livepatch_safety_check();
1273 if ( rc )
1274 {
1275 printk(XENLOG_ERR LIVEPATCH "%s: Safety checks failed: %d\n",
1276 data->name, rc);
1277 return rc;
1278 }
1279
1280 printk(XENLOG_INFO LIVEPATCH "%s: Applying %u functions\n",
1281 data->name, data->nfuncs);
1282
1283 rc = arch_livepatch_quiesce();
1284 if ( rc )
1285 {
1286 printk(XENLOG_ERR LIVEPATCH "%s: unable to quiesce!\n", data->name);
1287 return rc;
1288 }
1289
1290 /*
1291 * Since we are running with IRQs disabled and the hooks may call common
1292 * code - which expects certain spinlocks to run with IRQs enabled - we
1293 * temporarily disable the spin locks IRQ state checks.
1294 */
1295 spin_debug_disable();
1296 for ( i = 0; i < data->n_load_funcs; i++ )
1297 data->load_funcs[i]();
1298 spin_debug_enable();
1299
1300 ASSERT(!local_irq_is_enabled());
1301
1302 for ( i = 0; i < data->nfuncs; i++ )
1303 common_livepatch_apply(&data->funcs[i]);
1304
1305 arch_livepatch_revive();
1306
1307 livepatch_display_metadata(&data->metadata);
1308
1309 return 0;
1310 }
1311
apply_payload_tail(struct payload * data)1312 static inline void apply_payload_tail(struct payload *data)
1313 {
1314 /*
1315 * We need RCU variant (which has barriers) in case we crash here.
1316 * The applied_list is iterated by the trap code.
1317 */
1318 list_add_tail_rcu(&data->applied_list, &applied_list);
1319 register_virtual_region(&data->region);
1320
1321 data->state = LIVEPATCH_STATE_APPLIED;
1322 }
1323
revert_payload(struct payload * data)1324 static int revert_payload(struct payload *data)
1325 {
1326 unsigned int i;
1327 int rc;
1328
1329 printk(XENLOG_INFO LIVEPATCH "%s: Reverting\n", data->name);
1330
1331 rc = arch_livepatch_quiesce();
1332 if ( rc )
1333 {
1334 printk(XENLOG_ERR LIVEPATCH "%s: unable to quiesce!\n", data->name);
1335 return rc;
1336 }
1337
1338 for ( i = 0; i < data->nfuncs; i++ )
1339 common_livepatch_revert(&data->funcs[i]);
1340
1341 /*
1342 * Since we are running with IRQs disabled and the hooks may call common
1343 * code - which expects certain spinlocks to run with IRQs enabled - we
1344 * temporarily disable the spin locks IRQ state checks.
1345 */
1346 spin_debug_disable();
1347 for ( i = 0; i < data->n_unload_funcs; i++ )
1348 data->unload_funcs[i]();
1349 spin_debug_enable();
1350
1351 ASSERT(!local_irq_is_enabled());
1352
1353 arch_livepatch_revive();
1354 return 0;
1355 }
1356
revert_payload_tail(struct payload * data)1357 static inline void revert_payload_tail(struct payload *data)
1358 {
1359
1360 /*
1361 * We need RCU variant (which has barriers) in case we crash here.
1362 * The applied_list is iterated by the trap code.
1363 */
1364 list_del_rcu(&data->applied_list);
1365 unregister_virtual_region(&data->region);
1366
1367 data->reverted = true;
1368 data->state = LIVEPATCH_STATE_CHECKED;
1369 }
1370
1371 /*
1372 * Check if an action has applied the same state to all payload's functions consistently.
1373 */
was_action_consistent(const struct payload * data,livepatch_func_state_t expected_state)1374 static inline bool was_action_consistent(const struct payload *data, livepatch_func_state_t expected_state)
1375 {
1376 int i;
1377
1378 for ( i = 0; i < data->nfuncs; i++ )
1379 {
1380 struct livepatch_func *f = &(data->funcs[i]);
1381
1382 if ( f->applied != expected_state )
1383 {
1384 printk(XENLOG_ERR LIVEPATCH "%s: Payload has a function: '%s' with inconsistent applied state.\n",
1385 data->name, f->name ?: "noname");
1386
1387 return false;
1388 }
1389 }
1390
1391 return true;
1392 }
1393
1394 /*
1395 * This function is executed having all other CPUs with no deep stack (we may
1396 * have cpu_idle on it) and IRQs disabled.
1397 */
livepatch_do_action(void)1398 static void livepatch_do_action(void)
1399 {
1400 int rc;
1401 struct payload *data, *other, *tmp;
1402
1403 data = livepatch_work.data;
1404 /*
1405 * This function and the transition from asm to C code should be the only
1406 * one on any stack. No need to lock the payload list or applied list.
1407 */
1408 switch ( livepatch_work.cmd )
1409 {
1410 case LIVEPATCH_ACTION_APPLY:
1411 if ( is_hook_enabled(data->hooks.apply.action) )
1412 {
1413 printk(XENLOG_INFO LIVEPATCH "%s: Calling apply action hook function\n", data->name);
1414
1415 rc = (*data->hooks.apply.action)(data);
1416 }
1417 else
1418 rc = apply_payload(data);
1419
1420 if ( !was_action_consistent(data, rc ? LIVEPATCH_FUNC_NOT_APPLIED : LIVEPATCH_FUNC_APPLIED) )
1421 panic("livepatch: partially applied payload '%s'!\n", data->name);
1422
1423 if ( rc == 0 )
1424 apply_payload_tail(data);
1425 break;
1426
1427 case LIVEPATCH_ACTION_REVERT:
1428 if ( is_hook_enabled(data->hooks.revert.action) )
1429 {
1430 printk(XENLOG_INFO LIVEPATCH "%s: Calling revert action hook function\n", data->name);
1431
1432 rc = (*data->hooks.revert.action)(data);
1433 }
1434 else
1435 rc = revert_payload(data);
1436
1437 if ( !was_action_consistent(data, rc ? LIVEPATCH_FUNC_APPLIED : LIVEPATCH_FUNC_NOT_APPLIED) )
1438 panic("livepatch: partially reverted payload '%s'!\n", data->name);
1439
1440 if ( rc == 0 )
1441 revert_payload_tail(data);
1442 break;
1443
1444 case LIVEPATCH_ACTION_REPLACE:
1445 rc = 0;
1446 /*
1447 * N.B: Use 'applied_list' member, not 'list'. We also abuse the
1448 * the 'normal' list iterator as the list is an RCU one.
1449 */
1450 list_for_each_entry_safe_reverse ( other, tmp, &applied_list, applied_list )
1451 {
1452 if ( is_hook_enabled(other->hooks.revert.action) )
1453 {
1454 printk(XENLOG_INFO LIVEPATCH "%s: Calling revert action hook function\n", other->name);
1455
1456 other->rc = (*other->hooks.revert.action)(other);
1457 }
1458 else
1459 other->rc = revert_payload(other);
1460
1461 if ( !was_action_consistent(other, other->rc
1462 ? LIVEPATCH_FUNC_APPLIED
1463 : LIVEPATCH_FUNC_NOT_APPLIED) )
1464 panic("livepatch: partially reverted payload '%s'!\n", other->name);
1465
1466 if ( other->rc == 0 )
1467 revert_payload_tail(other);
1468 else
1469 {
1470 rc = -EINVAL;
1471 break;
1472 }
1473 }
1474
1475 if ( rc == 0 )
1476 {
1477 /*
1478 * Make sure all expectation requirements are met.
1479 * Beware all the payloads are reverted at this point.
1480 * If expectations are not met the system is left in a
1481 * completely UNPATCHED state!
1482 */
1483 rc = livepatch_check_expectations(data);
1484 if ( rc )
1485 {
1486 printk(XENLOG_ERR LIVEPATCH "%s: SYSTEM MIGHT BE INSECURE: "
1487 "Replace action has been aborted after reverting ALL payloads!\n", data->name);
1488 break;
1489 }
1490
1491 if ( is_hook_enabled(data->hooks.apply.action) )
1492 {
1493 printk(XENLOG_INFO LIVEPATCH "%s: Calling apply action hook function\n", data->name);
1494
1495 rc = (*data->hooks.apply.action)(data);
1496 }
1497 else
1498 rc = apply_payload(data);
1499
1500 if ( !was_action_consistent(data, rc ? LIVEPATCH_FUNC_NOT_APPLIED : LIVEPATCH_FUNC_APPLIED) )
1501 panic("livepatch: partially applied payload '%s'!\n", data->name);
1502
1503 if ( rc == 0 )
1504 apply_payload_tail(data);
1505 }
1506 break;
1507
1508 default:
1509 rc = -EINVAL; /* Make GCC5 happy. */
1510 ASSERT_UNREACHABLE();
1511 break;
1512 }
1513
1514 /* We must set rc as livepatch_action sets it to -EAGAIN when kicking of. */
1515 data->rc = rc;
1516 }
1517
is_work_scheduled(const struct payload * data)1518 static bool_t is_work_scheduled(const struct payload *data)
1519 {
1520 ASSERT(spin_is_locked(&payload_lock));
1521
1522 return livepatch_work.do_work && livepatch_work.data == data;
1523 }
1524
1525 /*
1526 * Check if payload has any of the vetoing, non-atomic hooks assigned.
1527 * A vetoing, non-atmic hook may perform an operation that changes the
1528 * hypervisor state and may not be guaranteed to succeed. Result of
1529 * such operation may be returned and may change the livepatch workflow.
1530 * Such hooks may require additional cleanup actions performed by other
1531 * hooks. Thus they are not suitable for replace action.
1532 */
has_payload_any_vetoing_hooks(const struct payload * payload)1533 static inline bool has_payload_any_vetoing_hooks(const struct payload *payload)
1534 {
1535 return is_hook_enabled(payload->hooks.apply.pre) ||
1536 is_hook_enabled(payload->hooks.apply.post) ||
1537 is_hook_enabled(payload->hooks.revert.pre) ||
1538 is_hook_enabled(payload->hooks.revert.post);
1539 }
1540
1541 /*
1542 * Checks if any of the already applied livepatches has any vetoing,
1543 * non-atomic hooks assigned.
1544 */
livepatch_applied_have_vetoing_hooks(void)1545 static inline bool livepatch_applied_have_vetoing_hooks(void)
1546 {
1547 struct payload *p;
1548
1549 list_for_each_entry ( p, &applied_list, applied_list )
1550 {
1551 if ( has_payload_any_vetoing_hooks(p) )
1552 return true;
1553 }
1554
1555 return false;
1556 }
1557
schedule_work(struct payload * data,uint32_t cmd,uint32_t timeout)1558 static int schedule_work(struct payload *data, uint32_t cmd, uint32_t timeout)
1559 {
1560 ASSERT(spin_is_locked(&payload_lock));
1561
1562 /* Fail if an operation is already scheduled. */
1563 if ( livepatch_work.do_work )
1564 return -EBUSY;
1565
1566 if ( !get_cpu_maps() )
1567 {
1568 printk(XENLOG_ERR LIVEPATCH "%s: unable to get cpu_maps lock!\n",
1569 data->name);
1570 return -EBUSY;
1571 }
1572
1573 livepatch_work.cmd = cmd;
1574 livepatch_work.data = data;
1575 livepatch_work.timeout = timeout ?: MILLISECS(30);
1576
1577 dprintk(XENLOG_DEBUG, LIVEPATCH "%s: timeout is %"PRIu32"ns\n",
1578 data->name, livepatch_work.timeout);
1579
1580 atomic_set(&livepatch_work.semaphore, -1);
1581
1582 livepatch_work.ready = 0;
1583
1584 smp_wmb();
1585
1586 livepatch_work.do_work = 1;
1587 tasklet_schedule_on_cpu(&this_cpu(livepatch_tasklet), smp_processor_id());
1588
1589 put_cpu_maps();
1590
1591 return 0;
1592 }
1593
tasklet_fn(void * unused)1594 static void tasklet_fn(void *unused)
1595 {
1596 this_cpu(work_to_do) = 1;
1597 }
1598
livepatch_spin(atomic_t * counter,s_time_t timeout,unsigned int cpus,const char * s)1599 static int livepatch_spin(atomic_t *counter, s_time_t timeout,
1600 unsigned int cpus, const char *s)
1601 {
1602 int rc = 0;
1603
1604 while ( atomic_read(counter) != cpus && NOW() < timeout )
1605 cpu_relax();
1606
1607 /* Log & abort. */
1608 if ( atomic_read(counter) != cpus )
1609 {
1610 printk(XENLOG_ERR LIVEPATCH "%s: Timed out on semaphore in %s quiesce phase %u/%u\n",
1611 livepatch_work.data->name, s, atomic_read(counter), cpus);
1612 rc = -EBUSY;
1613 livepatch_work.data->rc = rc;
1614 smp_wmb();
1615 livepatch_work.do_work = 0;
1616 }
1617
1618 return rc;
1619 }
1620
1621 /*
1622 * The main function which manages the work of quiescing the system and
1623 * patching code.
1624 */
check_for_livepatch_work(void)1625 void check_for_livepatch_work(void)
1626 {
1627 #define ACTION(x) [LIVEPATCH_ACTION_##x] = #x
1628 static const char *const names[] = {
1629 ACTION(APPLY),
1630 ACTION(REVERT),
1631 ACTION(REPLACE),
1632 };
1633 #undef ACTION
1634 unsigned int cpu = smp_processor_id();
1635 s_time_t timeout;
1636 unsigned long flags;
1637
1638 /* Only do any work when invoked in truly idle state. */
1639 if ( system_state != SYS_STATE_active ||
1640 !is_idle_domain(current->sched_unit->domain) )
1641 return;
1642
1643 /* Fast path: no work to do. */
1644 if ( !per_cpu(work_to_do, cpu ) )
1645 return;
1646
1647 smp_rmb();
1648 /* In case we aborted, other CPUs can skip right away. */
1649 if ( !livepatch_work.do_work )
1650 {
1651 per_cpu(work_to_do, cpu) = 0;
1652 return;
1653 }
1654
1655 ASSERT(local_irq_is_enabled());
1656
1657 /* Set at -1, so will go up to num_online_cpus - 1. */
1658 if ( atomic_inc_and_test(&livepatch_work.semaphore) )
1659 {
1660 struct payload *p;
1661 unsigned int cpus, i;
1662 bool action_done = false;
1663
1664 p = livepatch_work.data;
1665 if ( !get_cpu_maps() )
1666 {
1667 printk(XENLOG_ERR LIVEPATCH "%s: CPU%u - unable to get cpu_maps lock!\n",
1668 p->name, cpu);
1669 per_cpu(work_to_do, cpu) = 0;
1670 livepatch_work.data->rc = -EBUSY;
1671 smp_wmb();
1672 livepatch_work.do_work = 0;
1673 /*
1674 * Do NOT decrement livepatch_work.semaphore down - as that may cause
1675 * the other CPU (which may be at this point ready to increment it)
1676 * to assume the role of master and then needlessly time out
1677 * out (as do_work is zero).
1678 */
1679 return;
1680 }
1681 /* "Mask" NMIs. */
1682 arch_livepatch_mask();
1683
1684 barrier(); /* MUST do it after get_cpu_maps. */
1685 cpus = num_online_cpus() - 1;
1686
1687 if ( cpus )
1688 {
1689 dprintk(XENLOG_DEBUG, LIVEPATCH "%s: CPU%u - IPIing the other %u CPUs\n",
1690 p->name, cpu, cpus);
1691 for_each_online_cpu ( i )
1692 if ( i != cpu )
1693 tasklet_schedule_on_cpu(&per_cpu(livepatch_tasklet, i), i);
1694 }
1695
1696 timeout = livepatch_work.timeout + NOW();
1697 if ( livepatch_spin(&livepatch_work.semaphore, timeout, cpus, "CPU") )
1698 goto abort;
1699
1700 /* All CPUs are waiting, now signal to disable IRQs. */
1701 atomic_set(&livepatch_work.semaphore, 0);
1702 /*
1703 * MUST have a barrier after semaphore so that the other CPUs don't
1704 * leak out of the 'Wait for all CPUs to rendezvous' loop and increment
1705 * 'semaphore' before we set it to zero.
1706 */
1707 smp_wmb();
1708 livepatch_work.ready = 1;
1709
1710 if ( !livepatch_spin(&livepatch_work.semaphore, timeout, cpus, "IRQ") )
1711 {
1712 local_irq_save(flags);
1713 /* Do the patching. */
1714 livepatch_do_action();
1715 /* Serialize and flush out the CPU via CPUID instruction (on x86). */
1716 arch_livepatch_post_action();
1717 action_done = true;
1718 local_irq_restore(flags);
1719 }
1720
1721 abort:
1722 arch_livepatch_unmask();
1723
1724 per_cpu(work_to_do, cpu) = 0;
1725 livepatch_work.do_work = 0;
1726
1727 /* put_cpu_maps has an barrier(). */
1728 put_cpu_maps();
1729
1730 if ( action_done )
1731 {
1732 switch ( livepatch_work.cmd )
1733 {
1734 case LIVEPATCH_ACTION_REVERT:
1735 if ( is_hook_enabled(p->hooks.revert.post) )
1736 {
1737 printk(XENLOG_INFO LIVEPATCH "%s: Calling post-revert hook function with rc=%d\n",
1738 p->name, p->rc);
1739
1740 (*p->hooks.revert.post)(p);
1741 }
1742 break;
1743
1744 case LIVEPATCH_ACTION_APPLY:
1745 if ( is_hook_enabled(p->hooks.apply.post) )
1746 {
1747 printk(XENLOG_INFO LIVEPATCH "%s: Calling post-apply hook function with rc=%d\n",
1748 p->name, p->rc);
1749
1750 (*p->hooks.apply.post)(p);
1751 }
1752 break;
1753
1754 case LIVEPATCH_ACTION_REPLACE:
1755 if ( has_payload_any_vetoing_hooks(p) )
1756 {
1757 /* It should be impossible to get here since livepatch_action() guards against that. */
1758 panic(LIVEPATCH "%s: REPLACE action is not supported on livepatches with vetoing hooks!\n",
1759 p->name);
1760 ASSERT_UNREACHABLE();
1761 }
1762 default:
1763 break;
1764 }
1765 }
1766
1767 printk(XENLOG_INFO LIVEPATCH "%s finished %s with rc=%d\n",
1768 p->name, names[livepatch_work.cmd], p->rc);
1769 }
1770 else
1771 {
1772 /* Wait for all CPUs to rendezvous. */
1773 while ( livepatch_work.do_work && !livepatch_work.ready )
1774 cpu_relax();
1775
1776 /* Disable IRQs and signal. */
1777 local_irq_save(flags);
1778 /*
1779 * We re-use the sempahore, so MUST have it reset by master before
1780 * we exit the loop above.
1781 */
1782 atomic_inc(&livepatch_work.semaphore);
1783
1784 /* Wait for patching to complete. */
1785 while ( livepatch_work.do_work )
1786 cpu_relax();
1787
1788 /* To flush out pipeline. */
1789 arch_livepatch_post_action();
1790 local_irq_restore(flags);
1791
1792 per_cpu(work_to_do, cpu) = 0;
1793 }
1794 }
1795
1796 /*
1797 * Only allow dependent payload is applied on top of the correct
1798 * build-id.
1799 *
1800 * This enforces an stacking order - the first payload MUST be against the
1801 * hypervisor. The second against the first payload, and so on.
1802 *
1803 * Unless the 'internal' parameter is used - in which case we only
1804 * check against the hypervisor.
1805 */
build_id_dep(struct payload * payload,bool_t internal)1806 static int build_id_dep(struct payload *payload, bool_t internal)
1807 {
1808 const void *id = NULL;
1809 unsigned int len = 0;
1810 int rc;
1811 const char *name = "hypervisor";
1812
1813 ASSERT(payload->dep.len && payload->dep.p);
1814
1815 /* First time user is against hypervisor. */
1816 if ( internal )
1817 {
1818 rc = xen_build_id(&id, &len);
1819 if ( rc )
1820 return rc;
1821 }
1822 else
1823 {
1824 /* We should be against the last applied one. */
1825 const struct payload *data;
1826
1827 data = list_last_entry(&applied_list, struct payload, applied_list);
1828
1829 id = data->id.p;
1830 len = data->id.len;
1831 name = data->name;
1832 }
1833
1834 if ( payload->dep.len != len ||
1835 memcmp(id, payload->dep.p, len) )
1836 {
1837 printk(XENLOG_ERR LIVEPATCH "%s: check against %s build-id failed\n",
1838 payload->name, name);
1839 return -EINVAL;
1840 }
1841
1842 return 0;
1843 }
1844
livepatch_action(struct xen_sysctl_livepatch_action * action)1845 static int livepatch_action(struct xen_sysctl_livepatch_action *action)
1846 {
1847 struct payload *data;
1848 char n[XEN_LIVEPATCH_NAME_SIZE];
1849 int rc;
1850
1851 if ( action->pad )
1852 return -EINVAL;
1853
1854 rc = get_name(&action->name, n);
1855 if ( rc )
1856 return rc;
1857
1858 spin_lock(&payload_lock);
1859
1860 data = find_payload(n);
1861 if ( IS_ERR_OR_NULL(data) )
1862 {
1863 spin_unlock(&payload_lock);
1864
1865 if ( !data )
1866 return -ENOENT;
1867
1868 return PTR_ERR(data);
1869 }
1870
1871 if ( is_work_scheduled(data) )
1872 {
1873 rc = -EBUSY;
1874 goto out;
1875 }
1876
1877 switch ( action->cmd )
1878 {
1879 case LIVEPATCH_ACTION_UNLOAD:
1880 if ( data->state == LIVEPATCH_STATE_CHECKED )
1881 {
1882 free_payload(data);
1883 /* No touching 'data' from here on! */
1884 data = NULL;
1885 }
1886 else
1887 rc = -EINVAL;
1888 break;
1889
1890 case LIVEPATCH_ACTION_REVERT:
1891 if ( data->state == LIVEPATCH_STATE_APPLIED )
1892 {
1893 const struct payload *p;
1894
1895 p = list_last_entry(&applied_list, struct payload, applied_list);
1896 ASSERT(p);
1897 /* We should be the last applied one. */
1898 if ( p != data )
1899 {
1900 printk(XENLOG_ERR LIVEPATCH "%s: can't unload. Top is %s\n",
1901 data->name, p->name);
1902 rc = -EBUSY;
1903 break;
1904 }
1905
1906 if ( is_hook_enabled(data->hooks.revert.pre) )
1907 {
1908 printk(XENLOG_INFO LIVEPATCH "%s: Calling pre-revert hook function\n", data->name);
1909
1910 rc = (*data->hooks.revert.pre)(data);
1911 if ( rc )
1912 {
1913 printk(XENLOG_ERR LIVEPATCH "%s: pre-revert hook failed (rc=%d), aborting!\n",
1914 data->name, rc);
1915 data->rc = rc;
1916 break;
1917 }
1918 }
1919
1920 data->rc = -EAGAIN;
1921 rc = schedule_work(data, action->cmd, action->timeout);
1922 }
1923 break;
1924
1925 case LIVEPATCH_ACTION_APPLY:
1926 if ( data->state == LIVEPATCH_STATE_CHECKED )
1927 {
1928 /*
1929 * It is unsafe to apply an reverted payload as the .data (or .bss)
1930 * may not be in in pristine condition. Hence MUST unload and then
1931 * apply patch again. Unless the payload has only one
1932 * RW section (.livepatch.funcs).
1933 */
1934 if ( data->reverted && !data->safe_to_reapply )
1935 {
1936 printk(XENLOG_ERR LIVEPATCH "%s: can't revert as payload has .data. Please unload\n",
1937 data->name);
1938 data->rc = -EINVAL;
1939 break;
1940 }
1941
1942 /*
1943 * Check if action is issued with nodeps flags to ignore module
1944 * stack dependencies.
1945 */
1946 if ( !(action->flags & LIVEPATCH_ACTION_APPLY_NODEPS) )
1947 {
1948 rc = build_id_dep(data, !!list_empty(&applied_list));
1949 if ( rc )
1950 break;
1951 }
1952
1953 /* Make sure all expectation requirements are met. */
1954 rc = livepatch_check_expectations(data);
1955 if ( rc )
1956 break;
1957
1958 if ( is_hook_enabled(data->hooks.apply.pre) )
1959 {
1960 printk(XENLOG_INFO LIVEPATCH "%s: Calling pre-apply hook function\n", data->name);
1961
1962 rc = (*data->hooks.apply.pre)(data);
1963 if ( rc )
1964 {
1965 printk(XENLOG_ERR LIVEPATCH "%s: pre-apply hook failed (rc=%d), aborting!\n",
1966 data->name, rc);
1967 data->rc = rc;
1968 break;
1969 }
1970 }
1971
1972 data->rc = -EAGAIN;
1973 rc = schedule_work(data, action->cmd, action->timeout);
1974 }
1975 break;
1976
1977 case LIVEPATCH_ACTION_REPLACE:
1978 if ( data->state == LIVEPATCH_STATE_CHECKED )
1979 {
1980 rc = build_id_dep(data, 1 /* against hypervisor. */);
1981 if ( rc )
1982 break;
1983
1984 /*
1985 * REPLACE action is not supported on livepatches with vetoing hooks.
1986 * Vetoing hooks usually perform mutating actions on the system and
1987 * typically exist in pairs (pre- hook doing an action and post- hook
1988 * undoing the action). Coalescing all hooks from all applied modules
1989 * cannot be performed without inspecting potential dependencies between
1990 * the mutating hooks and hence cannot be performed automatically by
1991 * the replace action. Also, the replace action cannot safely assume a
1992 * successful revert of all the module with vetoing hooks. When one
1993 * of the hooks fails due to not meeting certain conditions the whole
1994 * replace operation must have been reverted with all previous pre- and
1995 * post- hooks re-executed (which cannot be guaranteed to succeed).
1996 * The simplest response to this complication is disallow replace
1997 * action on modules with vetoing hooks.
1998 */
1999 if ( has_payload_any_vetoing_hooks(data) || livepatch_applied_have_vetoing_hooks() )
2000 {
2001 printk(XENLOG_ERR LIVEPATCH "%s: REPLACE action is not supported on livepatches with vetoing hooks!\n",
2002 data->name);
2003 rc = -EOPNOTSUPP;
2004 break;
2005 }
2006
2007 data->rc = -EAGAIN;
2008 rc = schedule_work(data, action->cmd, action->timeout);
2009 }
2010 break;
2011
2012 default:
2013 rc = -EOPNOTSUPP;
2014 break;
2015 }
2016
2017 out:
2018 spin_unlock(&payload_lock);
2019
2020 return rc;
2021 }
2022
livepatch_op(struct xen_sysctl_livepatch_op * livepatch)2023 int livepatch_op(struct xen_sysctl_livepatch_op *livepatch)
2024 {
2025 int rc;
2026
2027 if ( livepatch->pad )
2028 return -EINVAL;
2029
2030 switch ( livepatch->cmd )
2031 {
2032 case XEN_SYSCTL_LIVEPATCH_UPLOAD:
2033 rc = livepatch_upload(&livepatch->u.upload);
2034 break;
2035
2036 case XEN_SYSCTL_LIVEPATCH_GET:
2037 rc = livepatch_get(&livepatch->u.get);
2038 break;
2039
2040 case XEN_SYSCTL_LIVEPATCH_LIST:
2041 rc = livepatch_list(&livepatch->u.list);
2042 break;
2043
2044 case XEN_SYSCTL_LIVEPATCH_ACTION:
2045 rc = livepatch_action(&livepatch->u.action);
2046 break;
2047
2048 default:
2049 rc = -EOPNOTSUPP;
2050 break;
2051 }
2052
2053 return rc;
2054 }
2055
state2str(unsigned int state)2056 static const char *state2str(unsigned int state)
2057 {
2058 #define STATE(x) [LIVEPATCH_STATE_##x] = #x
2059 static const char *const names[] = {
2060 STATE(CHECKED),
2061 STATE(APPLIED),
2062 };
2063 #undef STATE
2064
2065 if ( state >= ARRAY_SIZE(names) || !names[state] )
2066 return "unknown";
2067
2068 return names[state];
2069 }
2070
livepatch_printall(unsigned char key)2071 static void livepatch_printall(unsigned char key)
2072 {
2073 struct payload *data;
2074 const void *binary_id = NULL;
2075 unsigned int len = 0;
2076 unsigned int i;
2077
2078 printk("'%c' pressed - Dumping all livepatch patches\n", key);
2079
2080 if ( !xen_build_id(&binary_id, &len) )
2081 printk("build-id: %*phN\n", len, binary_id);
2082
2083 if ( !spin_trylock(&payload_lock) )
2084 {
2085 printk("Lock held. Try again.\n");
2086 return;
2087 }
2088
2089 list_for_each_entry ( data, &payload_list, list )
2090 {
2091 printk(" name=%s state=%s(%d) %p (.data=%p, .rodata=%p) using %u pages.\n",
2092 data->name, state2str(data->state), data->state, data->text_addr,
2093 data->rw_addr, data->ro_addr, data->pages);
2094
2095 livepatch_display_metadata(&data->metadata);
2096
2097 for ( i = 0; i < data->nfuncs; i++ )
2098 {
2099 struct livepatch_func *f = &(data->funcs[i]);
2100 printk(" %s patch %p(%u) with %p (%u)\n",
2101 f->name, f->old_addr, f->old_size, f->new_addr, f->new_size);
2102
2103 if ( i && !(i % 64) )
2104 {
2105 spin_unlock(&payload_lock);
2106 process_pending_softirqs();
2107 if ( !spin_trylock(&payload_lock) )
2108 {
2109 printk("Couldn't reacquire lock. Try again.\n");
2110 return;
2111 }
2112 }
2113 }
2114 if ( data->id.len )
2115 printk("build-id=%*phN\n", data->id.len, data->id.p);
2116
2117 if ( data->dep.len )
2118 printk("depend-on=%*phN\n", data->dep.len, data->dep.p);
2119
2120 if ( data->xen_dep.len )
2121 printk("depend-on-xen=%*phN\n", data->xen_dep.len, data->xen_dep.p);
2122 }
2123
2124 spin_unlock(&payload_lock);
2125 }
2126
cpu_callback(struct notifier_block * nfb,unsigned long action,void * hcpu)2127 static int cpu_callback(
2128 struct notifier_block *nfb, unsigned long action, void *hcpu)
2129 {
2130 unsigned int cpu = (unsigned long)hcpu;
2131
2132 if ( action == CPU_UP_PREPARE )
2133 tasklet_init(&per_cpu(livepatch_tasklet, cpu), tasklet_fn, NULL);
2134
2135 return NOTIFY_DONE;
2136 }
2137
2138 static struct notifier_block cpu_nfb = {
2139 .notifier_call = cpu_callback
2140 };
2141
livepatch_init(void)2142 static int __init livepatch_init(void)
2143 {
2144 unsigned int cpu;
2145
2146 for_each_online_cpu ( cpu )
2147 {
2148 void *hcpu = (void *)(long)cpu;
2149
2150 cpu_callback(&cpu_nfb, CPU_UP_PREPARE, hcpu);
2151 }
2152
2153 register_cpu_notifier(&cpu_nfb);
2154
2155 register_keyhandler('x', livepatch_printall, "print livepatch info", 1);
2156
2157 arch_livepatch_init();
2158 return 0;
2159 }
2160 __initcall(livepatch_init);
2161
2162 /*
2163 * Local variables:
2164 * mode: C
2165 * c-file-style: "BSD"
2166 * c-basic-offset: 4
2167 * tab-width: 4
2168 * indent-tabs-mode: nil
2169 * End:
2170 */
2171