1 /*
2  * Intel CPU Microcode Update Driver for Linux
3  *
4  * Copyright (C) 2000-2006 Tigran Aivazian <tigran@aivazian.fsnet.co.uk>
5  *               2006      Shaohua Li <shaohua.li@intel.com> *
6  * This driver allows to upgrade microcode on Intel processors
7  * belonging to IA-32 family - PentiumPro, Pentium II,
8  * Pentium III, Xeon, Pentium 4, etc.
9  *
10  * Reference: Section 8.11 of Volume 3a, IA-32 Intel? Architecture
11  * Software Developer's Manual
12  * Order Number 253668 or free download from:
13  *
14  * http://developer.intel.com/design/pentium4/manuals/253668.htm
15  *
16  * For more information, go to http://www.urbanmyth.org/microcode
17  *
18  * This program is free software; you can redistribute it and/or
19  * modify it under the terms of the GNU General Public License
20  * as published by the Free Software Foundation; either version
21  * 2 of the License, or (at your option) any later version.
22  */
23 
24 #include <xen/cpu.h>
25 #include <xen/earlycpio.h>
26 #include <xen/err.h>
27 #include <xen/guest_access.h>
28 #include <xen/init.h>
29 #include <xen/param.h>
30 #include <xen/spinlock.h>
31 #include <xen/stop_machine.h>
32 #include <xen/watchdog.h>
33 
34 #include <asm/apic.h>
35 #include <asm/delay.h>
36 #include <asm/nmi.h>
37 #include <asm/processor.h>
38 #include <asm/setup.h>
39 
40 #include "private.h"
41 
42 /*
43  * Before performing a late microcode update on any thread, we
44  * rendezvous all cpus in stop_machine context. The timeout for
45  * waiting for cpu rendezvous is 30ms. It is the timeout used by
46  * live patching
47  */
48 #define MICROCODE_CALLIN_TIMEOUT_US 30000
49 
50 /*
51  * Timeout for each thread to complete update is set to 1s. It is a
52  * conservative choice considering all possible interference.
53  */
54 #define MICROCODE_UPDATE_TIMEOUT_US 1000000
55 
56 static module_t __initdata ucode_mod;
57 static signed int __initdata ucode_mod_idx;
58 static bool_t __initdata ucode_mod_forced;
59 static unsigned int nr_cores;
60 
61 /*
62  * These states help to coordinate CPUs during loading an update.
63  *
64  * The semantics of each state is as follow:
65  *  - LOADING_PREPARE: initial state of 'loading_state'.
66  *  - LOADING_CALLIN: CPUs are allowed to callin.
67  *  - LOADING_ENTER: all CPUs have called in. Initiate ucode loading.
68  *  - LOADING_EXIT: ucode loading is done or aborted.
69  */
70 static enum {
71     LOADING_PREPARE,
72     LOADING_CALLIN,
73     LOADING_ENTER,
74     LOADING_EXIT,
75 } loading_state;
76 
77 /*
78  * If we scan the initramfs.cpio for the early microcode code
79  * and find it, then 'ucode_blob' will contain the pointer
80  * and the size of said blob. It is allocated from Xen's heap
81  * memory.
82  */
83 struct ucode_mod_blob {
84     const void *data;
85     size_t size;
86 };
87 
88 static struct ucode_mod_blob __initdata ucode_blob;
89 /*
90  * By default we will NOT parse the multiboot modules to see if there is
91  * cpio image with the microcode images.
92  */
93 static bool_t __initdata ucode_scan;
94 
95 /* By default, ucode loading is done in NMI handler */
96 static bool ucode_in_nmi = true;
97 
98 /* Protected by microcode_mutex */
99 static struct microcode_patch *microcode_cache;
100 
microcode_set_module(unsigned int idx)101 void __init microcode_set_module(unsigned int idx)
102 {
103     ucode_mod_idx = idx;
104     ucode_mod_forced = 1;
105 }
106 
107 /*
108  * The format is '[<integer>|scan=<bool>, nmi=<bool>]'. Both options are
109  * optional. If the EFI has forced which of the multiboot payloads is to be
110  * used, only nmi=<bool> is parsed.
111  */
parse_ucode(const char * s)112 static int __init parse_ucode(const char *s)
113 {
114     const char *ss;
115     int val, rc = 0;
116 
117     do {
118         ss = strchr(s, ',');
119         if ( !ss )
120             ss = strchr(s, '\0');
121 
122         if ( (val = parse_boolean("nmi", s, ss)) >= 0 )
123             ucode_in_nmi = val;
124         else if ( !ucode_mod_forced ) /* Not forced by EFI */
125         {
126             if ( (val = parse_boolean("scan", s, ss)) >= 0 )
127                 ucode_scan = val;
128             else
129             {
130                 const char *q;
131 
132                 ucode_mod_idx = simple_strtol(s, &q, 0);
133                 if ( q != ss )
134                     rc = -EINVAL;
135             }
136         }
137 
138         s = ss + 1;
139     } while ( *ss );
140 
141     return rc;
142 }
143 custom_param("ucode", parse_ucode);
144 
microcode_scan_module(unsigned long * module_map,const multiboot_info_t * mbi)145 void __init microcode_scan_module(
146     unsigned long *module_map,
147     const multiboot_info_t *mbi)
148 {
149     module_t *mod = (module_t *)__va(mbi->mods_addr);
150     uint64_t *_blob_start;
151     unsigned long _blob_size;
152     struct cpio_data cd;
153     long offset;
154     const char *p = NULL;
155     int i;
156 
157     ucode_blob.size = 0;
158     if ( !ucode_scan )
159         return;
160 
161     if ( boot_cpu_data.x86_vendor == X86_VENDOR_AMD )
162         p = "kernel/x86/microcode/AuthenticAMD.bin";
163     else if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL )
164         p = "kernel/x86/microcode/GenuineIntel.bin";
165     else
166         return;
167 
168     /*
169      * Try all modules and see whichever could be the microcode blob.
170      */
171     for ( i = 1 /* Ignore dom0 kernel */; i < mbi->mods_count; i++ )
172     {
173         if ( !test_bit(i, module_map) )
174             continue;
175 
176         _blob_start = bootstrap_map(&mod[i]);
177         _blob_size = mod[i].mod_end;
178         if ( !_blob_start )
179         {
180             printk("Could not map multiboot module #%d (size: %ld)\n",
181                    i, _blob_size);
182             continue;
183         }
184         cd.data = NULL;
185         cd.size = 0;
186         cd = find_cpio_data(p, _blob_start, _blob_size, &offset /* ignore */);
187         if ( cd.data )
188         {
189             ucode_blob.size = cd.size;
190             ucode_blob.data = cd.data;
191             break;
192         }
193         bootstrap_map(NULL);
194     }
195 }
microcode_grab_module(unsigned long * module_map,const multiboot_info_t * mbi)196 void __init microcode_grab_module(
197     unsigned long *module_map,
198     const multiboot_info_t *mbi)
199 {
200     module_t *mod = (module_t *)__va(mbi->mods_addr);
201 
202     if ( ucode_mod_idx < 0 )
203         ucode_mod_idx += mbi->mods_count;
204     if ( ucode_mod_idx <= 0 || ucode_mod_idx >= mbi->mods_count ||
205          !__test_and_clear_bit(ucode_mod_idx, module_map) )
206         goto scan;
207     ucode_mod = mod[ucode_mod_idx];
208 scan:
209     if ( ucode_scan )
210         microcode_scan_module(module_map, mbi);
211 }
212 
213 static const struct microcode_ops __read_mostly *microcode_ops;
214 
215 static DEFINE_SPINLOCK(microcode_mutex);
216 
217 DEFINE_PER_CPU(struct cpu_signature, cpu_sig);
218 /* Store error code of the work done in NMI handler */
219 static DEFINE_PER_CPU(int, loading_err);
220 
221 /*
222  * Count the CPUs that have entered, exited the rendezvous and succeeded in
223  * microcode update during late microcode update respectively.
224  *
225  * Note that a bitmap is used for callin to allow cpu to set a bit multiple
226  * times. It is required to do busy-loop in #NMI handling.
227  */
228 static cpumask_t cpu_callin_map;
229 static atomic_t cpu_out, cpu_updated;
230 static const struct microcode_patch *nmi_patch = ZERO_BLOCK_PTR;
231 
232 /*
233  * Return a patch that covers current CPU. If there are multiple patches,
234  * return the one with the highest revision number. Return error If no
235  * patch is found and an error occurs during the parsing process. Otherwise
236  * return NULL.
237  */
parse_blob(const char * buf,size_t len)238 static struct microcode_patch *parse_blob(const char *buf, size_t len)
239 {
240     microcode_ops->collect_cpu_info();
241 
242     return microcode_ops->cpu_request_microcode(buf, len);
243 }
244 
microcode_free_patch(struct microcode_patch * patch)245 static void microcode_free_patch(struct microcode_patch *patch)
246 {
247     xfree(patch);
248 }
249 
250 /* Return true if cache gets updated. Otherwise, return false */
microcode_update_cache(struct microcode_patch * patch)251 static bool microcode_update_cache(struct microcode_patch *patch)
252 {
253     ASSERT(spin_is_locked(&microcode_mutex));
254 
255     if ( !microcode_cache )
256         microcode_cache = patch;
257     else if ( microcode_ops->compare_patch(patch,
258                                            microcode_cache) == NEW_UCODE )
259     {
260         microcode_free_patch(microcode_cache);
261         microcode_cache = patch;
262     }
263     else
264     {
265         microcode_free_patch(patch);
266         return false;
267     }
268 
269     return true;
270 }
271 
272 /* Wait for a condition to be met with a timeout (us). */
wait_for_condition(bool (* func)(unsigned int data),unsigned int data,unsigned int timeout)273 static int wait_for_condition(bool (*func)(unsigned int data),
274                               unsigned int data, unsigned int timeout)
275 {
276     while ( !func(data) )
277     {
278         if ( !timeout-- )
279         {
280             printk("CPU%u: Timeout in %pS\n",
281                    smp_processor_id(), __builtin_return_address(0));
282             return -EBUSY;
283         }
284         udelay(1);
285     }
286 
287     return 0;
288 }
289 
wait_cpu_callin(unsigned int nr)290 static bool wait_cpu_callin(unsigned int nr)
291 {
292     return cpumask_weight(&cpu_callin_map) >= nr;
293 }
294 
wait_cpu_callout(unsigned int nr)295 static bool wait_cpu_callout(unsigned int nr)
296 {
297     return atomic_read(&cpu_out) >= nr;
298 }
299 
300 /*
301  * Load a microcode update to current CPU.
302  *
303  * If no patch is provided, the cached patch will be loaded. Microcode update
304  * during APs bringup and CPU resuming falls into this case.
305  */
microcode_update_cpu(const struct microcode_patch * patch)306 static int microcode_update_cpu(const struct microcode_patch *patch)
307 {
308     int err;
309 
310     microcode_ops->collect_cpu_info();
311 
312     spin_lock(&microcode_mutex);
313     if ( patch )
314         err = microcode_ops->apply_microcode(patch);
315     else if ( microcode_cache )
316     {
317         err = microcode_ops->apply_microcode(microcode_cache);
318         if ( err == -EIO )
319         {
320             microcode_free_patch(microcode_cache);
321             microcode_cache = NULL;
322         }
323     }
324     else
325         /* No patch to update */
326         err = -ENOENT;
327     spin_unlock(&microcode_mutex);
328 
329     return err;
330 }
331 
wait_for_state(typeof (loading_state)state)332 static bool wait_for_state(typeof(loading_state) state)
333 {
334     typeof(loading_state) cur_state;
335 
336     while ( (cur_state = ACCESS_ONCE(loading_state)) != state )
337     {
338         if ( cur_state == LOADING_EXIT )
339             return false;
340         cpu_relax();
341     }
342 
343     return true;
344 }
345 
set_state(typeof (loading_state)state)346 static void set_state(typeof(loading_state) state)
347 {
348     ACCESS_ONCE(loading_state) = state;
349 }
350 
secondary_nmi_work(void)351 static int secondary_nmi_work(void)
352 {
353     cpumask_set_cpu(smp_processor_id(), &cpu_callin_map);
354 
355     return wait_for_state(LOADING_EXIT) ? 0 : -EBUSY;
356 }
357 
primary_thread_work(const struct microcode_patch * patch)358 static int primary_thread_work(const struct microcode_patch *patch)
359 {
360     int ret;
361 
362     cpumask_set_cpu(smp_processor_id(), &cpu_callin_map);
363 
364     if ( !wait_for_state(LOADING_ENTER) )
365         return -EBUSY;
366 
367     ret = microcode_ops->apply_microcode(patch);
368     if ( !ret )
369         atomic_inc(&cpu_updated);
370     atomic_inc(&cpu_out);
371 
372     return ret;
373 }
374 
microcode_nmi_callback(const struct cpu_user_regs * regs,int cpu)375 static int microcode_nmi_callback(const struct cpu_user_regs *regs, int cpu)
376 {
377     unsigned int primary = cpumask_first(this_cpu(cpu_sibling_mask));
378     int ret;
379 
380     /* System-generated NMI, leave to main handler */
381     if ( ACCESS_ONCE(loading_state) != LOADING_CALLIN )
382         return 0;
383 
384     /*
385      * Primary threads load ucode in NMI handler on if ucode_in_nmi is true.
386      * Secondary threads are expected to stay in NMI handler regardless of
387      * ucode_in_nmi.
388      */
389     if ( cpu == cpumask_first(&cpu_online_map) ||
390          (!ucode_in_nmi && cpu == primary) )
391         return 0;
392 
393     if ( cpu == primary )
394         ret = primary_thread_work(nmi_patch);
395     else
396         ret = secondary_nmi_work();
397     this_cpu(loading_err) = ret;
398 
399     return 0;
400 }
401 
secondary_thread_fn(void)402 static int secondary_thread_fn(void)
403 {
404     if ( !wait_for_state(LOADING_CALLIN) )
405         return -EBUSY;
406 
407     self_nmi();
408 
409     /*
410      * Wait for ucode loading is done in case that the NMI does not arrive
411      * synchronously, which may lead to a not-yet-updated CPU signature is
412      * copied below.
413      */
414     if ( unlikely(!wait_for_state(LOADING_EXIT)) )
415         ASSERT_UNREACHABLE();
416 
417     /* Copy update revision from the primary thread. */
418     this_cpu(cpu_sig).rev =
419         per_cpu(cpu_sig, cpumask_first(this_cpu(cpu_sibling_mask))).rev;
420 
421     return this_cpu(loading_err);
422 }
423 
primary_thread_fn(const struct microcode_patch * patch)424 static int primary_thread_fn(const struct microcode_patch *patch)
425 {
426     if ( !wait_for_state(LOADING_CALLIN) )
427         return -EBUSY;
428 
429     if ( ucode_in_nmi )
430     {
431         self_nmi();
432 
433         /*
434          * Wait for ucode loading is done in case that the NMI does not arrive
435          * synchronously, which may lead to a not-yet-updated error is returned
436          * below.
437          */
438         if ( unlikely(!wait_for_state(LOADING_EXIT)) )
439             ASSERT_UNREACHABLE();
440 
441         return this_cpu(loading_err);
442     }
443 
444     return primary_thread_work(patch);
445 }
446 
control_thread_fn(const struct microcode_patch * patch)447 static int control_thread_fn(const struct microcode_patch *patch)
448 {
449     unsigned int cpu = smp_processor_id(), done;
450     unsigned long tick;
451     int ret;
452     nmi_callback_t *saved_nmi_callback;
453 
454     /*
455      * We intend to keep interrupt disabled for a long time, which may lead to
456      * watchdog timeout.
457      */
458     watchdog_disable();
459 
460     nmi_patch = patch;
461     smp_wmb();
462     saved_nmi_callback = set_nmi_callback(microcode_nmi_callback);
463 
464     /* Allow threads to call in */
465     set_state(LOADING_CALLIN);
466 
467     cpumask_set_cpu(cpu, &cpu_callin_map);
468 
469     /* Waiting for all threads calling in */
470     ret = wait_for_condition(wait_cpu_callin, num_online_cpus(),
471                              MICROCODE_CALLIN_TIMEOUT_US);
472     if ( ret )
473     {
474         set_state(LOADING_EXIT);
475         return ret;
476     }
477 
478     /* Control thread loads ucode first while others are in NMI handler. */
479     ret = microcode_ops->apply_microcode(patch);
480     if ( !ret )
481         atomic_inc(&cpu_updated);
482     atomic_inc(&cpu_out);
483 
484     if ( ret == -EIO )
485     {
486         printk(XENLOG_ERR
487                "Late loading aborted: CPU%u failed to update ucode\n", cpu);
488         set_state(LOADING_EXIT);
489         return ret;
490     }
491 
492     /* Let primary threads load the given ucode update */
493     set_state(LOADING_ENTER);
494 
495     tick = rdtsc_ordered();
496     /* Wait for primary threads finishing update */
497     while ( (done = atomic_read(&cpu_out)) != nr_cores )
498     {
499         /*
500          * During each timeout interval, at least a CPU is expected to
501          * finish its update. Otherwise, something goes wrong.
502          *
503          * Note that RDTSC (in wait_for_condition()) is safe for threads to
504          * execute while waiting for completion of loading an update.
505          */
506         if ( wait_for_condition(wait_cpu_callout, (done + 1),
507                                 MICROCODE_UPDATE_TIMEOUT_US) )
508             panic("Timeout when finished updating microcode (finished %u/%u)",
509                   done, nr_cores);
510 
511         /* Print warning message once if long time is spent here */
512         if ( tick && rdtsc_ordered() - tick >= cpu_khz * 1000 )
513         {
514             printk(XENLOG_WARNING
515                    "WARNING: UPDATING MICROCODE HAS CONSUMED MORE THAN 1 SECOND!\n");
516             tick = 0;
517         }
518     }
519 
520     /* Mark loading is done to unblock other threads */
521     set_state(LOADING_EXIT);
522 
523     set_nmi_callback(saved_nmi_callback);
524     smp_wmb();
525     nmi_patch = ZERO_BLOCK_PTR;
526 
527     watchdog_enable();
528 
529     return ret;
530 }
531 
do_microcode_update(void * patch)532 static int do_microcode_update(void *patch)
533 {
534     unsigned int cpu = smp_processor_id();
535     int ret;
536 
537     /*
538      * The control thread set state to coordinate ucode loading. Primary
539      * threads load the given ucode patch. Secondary threads just wait for
540      * the completion of the ucode loading process.
541      */
542     if ( cpu == cpumask_first(&cpu_online_map) )
543         ret = control_thread_fn(patch);
544     else if ( cpu == cpumask_first(this_cpu(cpu_sibling_mask)) )
545         ret = primary_thread_fn(patch);
546     else
547         ret = secondary_thread_fn();
548 
549     return ret;
550 }
551 
552 struct ucode_buf {
553     unsigned int len;
554     char buffer[];
555 };
556 
microcode_update_helper(void * data)557 static long microcode_update_helper(void *data)
558 {
559     int ret;
560     struct ucode_buf *buffer = data;
561     unsigned int cpu, updated;
562     struct microcode_patch *patch;
563 
564     /* cpu_online_map must not change during update */
565     if ( !get_cpu_maps() )
566     {
567         xfree(buffer);
568         return -EBUSY;
569     }
570 
571     /*
572      * CPUs except the first online CPU would send a fake (self) NMI to
573      * rendezvous in NMI handler. But a fake NMI to nmi_cpu may trigger
574      * unknown_nmi_error(). It ensures nmi_cpu won't receive a fake NMI.
575      */
576     if ( unlikely(cpumask_first(&cpu_online_map) != nmi_cpu) )
577     {
578         xfree(buffer);
579         printk(XENLOG_WARNING
580                "CPU%u is expected to lead ucode loading (but got CPU%u)\n",
581                nmi_cpu, cpumask_first(&cpu_online_map));
582         return -EPERM;
583     }
584 
585     patch = parse_blob(buffer->buffer, buffer->len);
586     xfree(buffer);
587     if ( IS_ERR(patch) )
588     {
589         ret = PTR_ERR(patch);
590         printk(XENLOG_WARNING "Parsing microcode blob error %d\n", ret);
591         goto put;
592     }
593 
594     if ( !patch )
595     {
596         printk(XENLOG_WARNING "microcode: couldn't find any matching ucode in "
597                               "the provided blob!\n");
598         ret = -ENOENT;
599         goto put;
600     }
601 
602     /*
603      * If microcode_cache exists, all CPUs in the system should have at least
604      * that ucode revision.
605      */
606     spin_lock(&microcode_mutex);
607     if ( microcode_cache &&
608          microcode_ops->compare_patch(patch, microcode_cache) != NEW_UCODE )
609     {
610         spin_unlock(&microcode_mutex);
611         printk(XENLOG_WARNING "microcode: couldn't find any newer revision "
612                               "in the provided blob!\n");
613         microcode_free_patch(patch);
614         ret = -ENOENT;
615 
616         goto put;
617     }
618     spin_unlock(&microcode_mutex);
619 
620     cpumask_clear(&cpu_callin_map);
621     atomic_set(&cpu_out, 0);
622     atomic_set(&cpu_updated, 0);
623     loading_state = LOADING_PREPARE;
624 
625     /* Calculate the number of online CPU core */
626     nr_cores = 0;
627     for_each_online_cpu(cpu)
628         if ( cpu == cpumask_first(per_cpu(cpu_sibling_mask, cpu)) )
629             nr_cores++;
630 
631     printk(XENLOG_INFO "%u cores are to update their microcode\n", nr_cores);
632 
633     /*
634      * Late loading dance. Why the heavy-handed stop_machine effort?
635      *
636      * - HT siblings must be idle and not execute other code while the other
637      *   sibling is loading microcode in order to avoid any negative
638      *   interactions cause by the loading.
639      *
640      * - In addition, microcode update on the cores must be serialized until
641      *   this requirement can be relaxed in the future. Right now, this is
642      *   conservative and good.
643      */
644     ret = stop_machine_run(do_microcode_update, patch, NR_CPUS);
645 
646     updated = atomic_read(&cpu_updated);
647     if ( updated > 0 )
648     {
649         spin_lock(&microcode_mutex);
650         microcode_update_cache(patch);
651         spin_unlock(&microcode_mutex);
652     }
653     else
654         microcode_free_patch(patch);
655 
656     if ( updated && updated != nr_cores )
657         printk(XENLOG_ERR "ERROR: Updating microcode succeeded on %u cores and failed\n"
658                XENLOG_ERR "on other %u cores. A system with differing microcode\n"
659                XENLOG_ERR "revisions is considered unstable. Please reboot and do not\n"
660                XENLOG_ERR "load the microcode that triggers this warning!\n",
661                updated, nr_cores - updated);
662 
663  put:
664     put_cpu_maps();
665     return ret;
666 }
667 
microcode_update(XEN_GUEST_HANDLE (const_void)buf,unsigned long len)668 int microcode_update(XEN_GUEST_HANDLE(const_void) buf, unsigned long len)
669 {
670     int ret;
671     struct ucode_buf *buffer;
672 
673     if ( len != (uint32_t)len )
674         return -E2BIG;
675 
676     if ( microcode_ops == NULL )
677         return -EINVAL;
678 
679     buffer = xmalloc_flex_struct(struct ucode_buf, buffer, len);
680     if ( !buffer )
681         return -ENOMEM;
682 
683     ret = copy_from_guest(buffer->buffer, buf, len);
684     if ( ret )
685     {
686         xfree(buffer);
687         return -EFAULT;
688     }
689     buffer->len = len;
690 
691     return continue_hypercall_on_cpu(smp_processor_id(),
692                                      microcode_update_helper, buffer);
693 }
694 
microcode_init(void)695 static int __init microcode_init(void)
696 {
697     /*
698      * At this point, all CPUs should have updated their microcode
699      * via the early_microcode_* paths so free the microcode blob.
700      */
701     if ( ucode_blob.size )
702     {
703         bootstrap_map(NULL);
704         ucode_blob.size = 0;
705         ucode_blob.data = NULL;
706     }
707     else if ( ucode_mod.mod_end )
708     {
709         bootstrap_map(NULL);
710         ucode_mod.mod_end = 0;
711     }
712 
713     return 0;
714 }
715 __initcall(microcode_init);
716 
717 /* Load a cached update to current cpu */
microcode_update_one(void)718 int microcode_update_one(void)
719 {
720     if ( !microcode_ops )
721         return -EOPNOTSUPP;
722 
723     microcode_ops->collect_cpu_info();
724 
725     return microcode_update_cpu(NULL);
726 }
727 
728 /* BSP calls this function to parse ucode blob and then apply an update. */
early_microcode_update_cpu(void)729 static int __init early_microcode_update_cpu(void)
730 {
731     int rc = 0;
732     const void *data = NULL;
733     size_t len;
734     struct microcode_patch *patch;
735 
736     if ( ucode_blob.size )
737     {
738         len = ucode_blob.size;
739         data = ucode_blob.data;
740     }
741     else if ( ucode_mod.mod_end )
742     {
743         len = ucode_mod.mod_end;
744         data = bootstrap_map(&ucode_mod);
745     }
746 
747     if ( !data )
748         return -ENOMEM;
749 
750     patch = parse_blob(data, len);
751     if ( IS_ERR(patch) )
752     {
753         printk(XENLOG_WARNING "Parsing microcode blob error %ld\n",
754                PTR_ERR(patch));
755         return PTR_ERR(patch);
756     }
757 
758     if ( !patch )
759         return -ENOENT;
760 
761     spin_lock(&microcode_mutex);
762     rc = microcode_update_cache(patch);
763     spin_unlock(&microcode_mutex);
764     ASSERT(rc);
765 
766     return microcode_update_one();
767 }
768 
early_microcode_init(void)769 int __init early_microcode_init(void)
770 {
771     const struct cpuinfo_x86 *c = &boot_cpu_data;
772     int rc = 0;
773 
774     switch ( c->x86_vendor )
775     {
776     case X86_VENDOR_AMD:
777         if ( c->x86 >= 0x10 )
778             microcode_ops = &amd_ucode_ops;
779         break;
780 
781     case X86_VENDOR_INTEL:
782         if ( c->x86 >= 6 )
783             microcode_ops = &intel_ucode_ops;
784         break;
785     }
786 
787     if ( !microcode_ops )
788     {
789         printk(XENLOG_WARNING "Microcode loading not available\n");
790         return -ENODEV;
791     }
792 
793     microcode_ops->collect_cpu_info();
794 
795     if ( ucode_mod.mod_end || ucode_blob.size )
796         rc = early_microcode_update_cpu();
797 
798     return rc;
799 }
800