1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_MICROCODE_H
3 #define _ASM_X86_MICROCODE_H
4
5 #include <asm/cpu.h>
6 #include <linux/earlycpio.h>
7 #include <linux/initrd.h>
8
9 struct ucode_patch {
10 struct list_head plist;
11 void *data; /* Intel uses only this one */
12 u32 patch_id;
13 u16 equiv_cpu;
14 };
15
16 extern struct list_head microcode_cache;
17
18 struct cpu_signature {
19 unsigned int sig;
20 unsigned int pf;
21 unsigned int rev;
22 };
23
24 struct device;
25
26 enum ucode_state {
27 UCODE_OK = 0,
28 UCODE_NEW,
29 UCODE_UPDATED,
30 UCODE_NFOUND,
31 UCODE_ERROR,
32 };
33
34 struct microcode_ops {
35 enum ucode_state (*request_microcode_user) (int cpu,
36 const void __user *buf, size_t size);
37
38 enum ucode_state (*request_microcode_fw) (int cpu, struct device *,
39 bool refresh_fw);
40
41 void (*microcode_fini_cpu) (int cpu);
42
43 /*
44 * The generic 'microcode_core' part guarantees that
45 * the callbacks below run on a target cpu when they
46 * are being called.
47 * See also the "Synchronization" section in microcode_core.c.
48 */
49 enum ucode_state (*apply_microcode) (int cpu);
50 int (*collect_cpu_info) (int cpu, struct cpu_signature *csig);
51 };
52
53 struct ucode_cpu_info {
54 struct cpu_signature cpu_sig;
55 int valid;
56 void *mc;
57 };
58 extern struct ucode_cpu_info ucode_cpu_info[];
59 struct cpio_data find_microcode_in_initrd(const char *path, bool use_pa);
60
61 #ifdef CONFIG_MICROCODE_INTEL
62 extern struct microcode_ops * __init init_intel_microcode(void);
63 #else
init_intel_microcode(void)64 static inline struct microcode_ops * __init init_intel_microcode(void)
65 {
66 return NULL;
67 }
68 #endif /* CONFIG_MICROCODE_INTEL */
69
70 #ifdef CONFIG_MICROCODE_AMD
71 extern struct microcode_ops * __init init_amd_microcode(void);
72 extern void __exit exit_amd_microcode(void);
73 #else
init_amd_microcode(void)74 static inline struct microcode_ops * __init init_amd_microcode(void)
75 {
76 return NULL;
77 }
exit_amd_microcode(void)78 static inline void __exit exit_amd_microcode(void) {}
79 #endif
80
81 #define MAX_UCODE_COUNT 128
82
83 #define QCHAR(a, b, c, d) ((a) + ((b) << 8) + ((c) << 16) + ((d) << 24))
84 #define CPUID_INTEL1 QCHAR('G', 'e', 'n', 'u')
85 #define CPUID_INTEL2 QCHAR('i', 'n', 'e', 'I')
86 #define CPUID_INTEL3 QCHAR('n', 't', 'e', 'l')
87 #define CPUID_AMD1 QCHAR('A', 'u', 't', 'h')
88 #define CPUID_AMD2 QCHAR('e', 'n', 't', 'i')
89 #define CPUID_AMD3 QCHAR('c', 'A', 'M', 'D')
90
91 #define CPUID_IS(a, b, c, ebx, ecx, edx) \
92 (!((ebx ^ (a))|(edx ^ (b))|(ecx ^ (c))))
93
94 /*
95 * In early loading microcode phase on BSP, boot_cpu_data is not set up yet.
96 * x86_cpuid_vendor() gets vendor id for BSP.
97 *
98 * In 32 bit AP case, accessing boot_cpu_data needs linear address. To simplify
99 * coding, we still use x86_cpuid_vendor() to get vendor id for AP.
100 *
101 * x86_cpuid_vendor() gets vendor information directly from CPUID.
102 */
x86_cpuid_vendor(void)103 static inline int x86_cpuid_vendor(void)
104 {
105 u32 eax = 0x00000000;
106 u32 ebx, ecx = 0, edx;
107
108 native_cpuid(&eax, &ebx, &ecx, &edx);
109
110 if (CPUID_IS(CPUID_INTEL1, CPUID_INTEL2, CPUID_INTEL3, ebx, ecx, edx))
111 return X86_VENDOR_INTEL;
112
113 if (CPUID_IS(CPUID_AMD1, CPUID_AMD2, CPUID_AMD3, ebx, ecx, edx))
114 return X86_VENDOR_AMD;
115
116 return X86_VENDOR_UNKNOWN;
117 }
118
x86_cpuid_family(void)119 static inline unsigned int x86_cpuid_family(void)
120 {
121 u32 eax = 0x00000001;
122 u32 ebx, ecx = 0, edx;
123
124 native_cpuid(&eax, &ebx, &ecx, &edx);
125
126 return x86_family(eax);
127 }
128
129 #ifdef CONFIG_MICROCODE
130 extern void __init load_ucode_bsp(void);
131 extern void load_ucode_ap(void);
132 void reload_early_microcode(void);
133 extern bool initrd_gone;
134 #else
load_ucode_bsp(void)135 static inline void __init load_ucode_bsp(void) { }
load_ucode_ap(void)136 static inline void load_ucode_ap(void) { }
reload_early_microcode(void)137 static inline void reload_early_microcode(void) { }
138 #endif
139
140 #endif /* _ASM_X86_MICROCODE_H */
141