1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __ASM_ARM_CPUTYPE_H
3 #define __ASM_ARM_CPUTYPE_H
4
5 #define CPUID_ID 0
6 #define CPUID_CACHETYPE 1
7 #define CPUID_TCM 2
8 #define CPUID_TLBTYPE 3
9 #define CPUID_MPUIR 4
10 #define CPUID_MPIDR 5
11 #define CPUID_REVIDR 6
12
13 #ifdef CONFIG_CPU_V7M
14 #define CPUID_EXT_PFR0 0x40
15 #define CPUID_EXT_PFR1 0x44
16 #define CPUID_EXT_DFR0 0x48
17 #define CPUID_EXT_AFR0 0x4c
18 #define CPUID_EXT_MMFR0 0x50
19 #define CPUID_EXT_MMFR1 0x54
20 #define CPUID_EXT_MMFR2 0x58
21 #define CPUID_EXT_MMFR3 0x5c
22 #define CPUID_EXT_ISAR0 0x60
23 #define CPUID_EXT_ISAR1 0x64
24 #define CPUID_EXT_ISAR2 0x68
25 #define CPUID_EXT_ISAR3 0x6c
26 #define CPUID_EXT_ISAR4 0x70
27 #define CPUID_EXT_ISAR5 0x74
28 #else
29 #define CPUID_EXT_PFR0 "c1, 0"
30 #define CPUID_EXT_PFR1 "c1, 1"
31 #define CPUID_EXT_DFR0 "c1, 2"
32 #define CPUID_EXT_AFR0 "c1, 3"
33 #define CPUID_EXT_MMFR0 "c1, 4"
34 #define CPUID_EXT_MMFR1 "c1, 5"
35 #define CPUID_EXT_MMFR2 "c1, 6"
36 #define CPUID_EXT_MMFR3 "c1, 7"
37 #define CPUID_EXT_ISAR0 "c2, 0"
38 #define CPUID_EXT_ISAR1 "c2, 1"
39 #define CPUID_EXT_ISAR2 "c2, 2"
40 #define CPUID_EXT_ISAR3 "c2, 3"
41 #define CPUID_EXT_ISAR4 "c2, 4"
42 #define CPUID_EXT_ISAR5 "c2, 5"
43 #endif
44
45 #define MPIDR_SMP_BITMASK (0x3 << 30)
46 #define MPIDR_SMP_VALUE (0x2 << 30)
47
48 #define MPIDR_MT_BITMASK (0x1 << 24)
49
50 #define MPIDR_HWID_BITMASK 0xFFFFFF
51
52 #define MPIDR_INVALID (~MPIDR_HWID_BITMASK)
53
54 #define MPIDR_LEVEL_BITS 8
55 #define MPIDR_LEVEL_MASK ((1 << MPIDR_LEVEL_BITS) - 1)
56 #define MPIDR_LEVEL_SHIFT(level) (MPIDR_LEVEL_BITS * level)
57
58 #define MPIDR_AFFINITY_LEVEL(mpidr, level) \
59 ((mpidr >> (MPIDR_LEVEL_BITS * level)) & MPIDR_LEVEL_MASK)
60
61 #define ARM_CPU_IMP_ARM 0x41
62 #define ARM_CPU_IMP_BRCM 0x42
63 #define ARM_CPU_IMP_DEC 0x44
64 #define ARM_CPU_IMP_INTEL 0x69
65
66 /* ARM implemented processors */
67 #define ARM_CPU_PART_ARM1136 0x4100b360
68 #define ARM_CPU_PART_ARM1156 0x4100b560
69 #define ARM_CPU_PART_ARM1176 0x4100b760
70 #define ARM_CPU_PART_ARM11MPCORE 0x4100b020
71 #define ARM_CPU_PART_CORTEX_A8 0x4100c080
72 #define ARM_CPU_PART_CORTEX_A9 0x4100c090
73 #define ARM_CPU_PART_CORTEX_A5 0x4100c050
74 #define ARM_CPU_PART_CORTEX_A7 0x4100c070
75 #define ARM_CPU_PART_CORTEX_A12 0x4100c0d0
76 #define ARM_CPU_PART_CORTEX_A17 0x4100c0e0
77 #define ARM_CPU_PART_CORTEX_A15 0x4100c0f0
78 #define ARM_CPU_PART_CORTEX_A53 0x4100d030
79 #define ARM_CPU_PART_CORTEX_A57 0x4100d070
80 #define ARM_CPU_PART_CORTEX_A72 0x4100d080
81 #define ARM_CPU_PART_CORTEX_A73 0x4100d090
82 #define ARM_CPU_PART_CORTEX_A75 0x4100d0a0
83 #define ARM_CPU_PART_MASK 0xff00fff0
84
85 /* Broadcom implemented processors */
86 #define ARM_CPU_PART_BRAHMA_B15 0x420000f0
87 #define ARM_CPU_PART_BRAHMA_B53 0x42001000
88
89 /* DEC implemented cores */
90 #define ARM_CPU_PART_SA1100 0x4400a110
91
92 /* Intel implemented cores */
93 #define ARM_CPU_PART_SA1110 0x6900b110
94 #define ARM_CPU_REV_SA1110_A0 0
95 #define ARM_CPU_REV_SA1110_B0 4
96 #define ARM_CPU_REV_SA1110_B1 5
97 #define ARM_CPU_REV_SA1110_B2 6
98 #define ARM_CPU_REV_SA1110_B4 8
99
100 #define ARM_CPU_XSCALE_ARCH_MASK 0xe000
101 #define ARM_CPU_XSCALE_ARCH_V1 0x2000
102 #define ARM_CPU_XSCALE_ARCH_V2 0x4000
103 #define ARM_CPU_XSCALE_ARCH_V3 0x6000
104
105 /* Qualcomm implemented cores */
106 #define ARM_CPU_PART_SCORPION 0x510002d0
107
108 #ifndef __ASSEMBLY__
109
110 #include <linux/stringify.h>
111 #include <linux/kernel.h>
112
113 extern unsigned int processor_id;
114 struct proc_info_list *lookup_processor(u32 midr);
115
116 #ifdef CONFIG_CPU_CP15
117 #define read_cpuid(reg) \
118 ({ \
119 unsigned int __val; \
120 asm("mrc p15, 0, %0, c0, c0, " __stringify(reg) \
121 : "=r" (__val) \
122 : \
123 : "cc"); \
124 __val; \
125 })
126
127 /*
128 * The memory clobber prevents gcc 4.5 from reordering the mrc before
129 * any is_smp() tests, which can cause undefined instruction aborts on
130 * ARM1136 r0 due to the missing extended CP15 registers.
131 */
132 #define read_cpuid_ext(ext_reg) \
133 ({ \
134 unsigned int __val; \
135 asm("mrc p15, 0, %0, c0, " ext_reg \
136 : "=r" (__val) \
137 : \
138 : "memory"); \
139 __val; \
140 })
141
142 #elif defined(CONFIG_CPU_V7M)
143
144 #include <asm/io.h>
145 #include <asm/v7m.h>
146
147 #define read_cpuid(reg) \
148 ({ \
149 WARN_ON_ONCE(1); \
150 0; \
151 })
152
read_cpuid_ext(unsigned offset)153 static inline unsigned int __attribute_const__ read_cpuid_ext(unsigned offset)
154 {
155 return readl(BASEADDR_V7M_SCB + offset);
156 }
157
158 #else /* ifdef CONFIG_CPU_CP15 / elif defined (CONFIG_CPU_V7M) */
159
160 /*
161 * read_cpuid and read_cpuid_ext should only ever be called on machines that
162 * have cp15 so warn on other usages.
163 */
164 #define read_cpuid(reg) \
165 ({ \
166 WARN_ON_ONCE(1); \
167 0; \
168 })
169
170 #define read_cpuid_ext(reg) read_cpuid(reg)
171
172 #endif /* ifdef CONFIG_CPU_CP15 / else */
173
174 #ifdef CONFIG_CPU_CP15
175 /*
176 * The CPU ID never changes at run time, so we might as well tell the
177 * compiler that it's constant. Use this function to read the CPU ID
178 * rather than directly reading processor_id or read_cpuid() directly.
179 */
read_cpuid_id(void)180 static inline unsigned int __attribute_const__ read_cpuid_id(void)
181 {
182 return read_cpuid(CPUID_ID);
183 }
184
read_cpuid_cachetype(void)185 static inline unsigned int __attribute_const__ read_cpuid_cachetype(void)
186 {
187 return read_cpuid(CPUID_CACHETYPE);
188 }
189
read_cpuid_mputype(void)190 static inline unsigned int __attribute_const__ read_cpuid_mputype(void)
191 {
192 return read_cpuid(CPUID_MPUIR);
193 }
194
195 #elif defined(CONFIG_CPU_V7M)
196
read_cpuid_id(void)197 static inline unsigned int __attribute_const__ read_cpuid_id(void)
198 {
199 return readl(BASEADDR_V7M_SCB + V7M_SCB_CPUID);
200 }
201
read_cpuid_cachetype(void)202 static inline unsigned int __attribute_const__ read_cpuid_cachetype(void)
203 {
204 return readl(BASEADDR_V7M_SCB + V7M_SCB_CTR);
205 }
206
read_cpuid_mputype(void)207 static inline unsigned int __attribute_const__ read_cpuid_mputype(void)
208 {
209 return readl(BASEADDR_V7M_SCB + MPU_TYPE);
210 }
211
212 #else /* ifdef CONFIG_CPU_CP15 / elif defined(CONFIG_CPU_V7M) */
213
read_cpuid_id(void)214 static inline unsigned int __attribute_const__ read_cpuid_id(void)
215 {
216 return processor_id;
217 }
218
219 #endif /* ifdef CONFIG_CPU_CP15 / else */
220
read_cpuid_implementor(void)221 static inline unsigned int __attribute_const__ read_cpuid_implementor(void)
222 {
223 return (read_cpuid_id() & 0xFF000000) >> 24;
224 }
225
read_cpuid_revision(void)226 static inline unsigned int __attribute_const__ read_cpuid_revision(void)
227 {
228 return read_cpuid_id() & 0x0000000f;
229 }
230
231 /*
232 * The CPU part number is meaningless without referring to the CPU
233 * implementer: implementers are free to define their own part numbers
234 * which are permitted to clash with other implementer part numbers.
235 */
read_cpuid_part(void)236 static inline unsigned int __attribute_const__ read_cpuid_part(void)
237 {
238 return read_cpuid_id() & ARM_CPU_PART_MASK;
239 }
240
read_cpuid_part_number(void)241 static inline unsigned int __attribute_const__ __deprecated read_cpuid_part_number(void)
242 {
243 return read_cpuid_id() & 0xFFF0;
244 }
245
xscale_cpu_arch_version(void)246 static inline unsigned int __attribute_const__ xscale_cpu_arch_version(void)
247 {
248 return read_cpuid_id() & ARM_CPU_XSCALE_ARCH_MASK;
249 }
250
read_cpuid_tcmstatus(void)251 static inline unsigned int __attribute_const__ read_cpuid_tcmstatus(void)
252 {
253 return read_cpuid(CPUID_TCM);
254 }
255
read_cpuid_mpidr(void)256 static inline unsigned int __attribute_const__ read_cpuid_mpidr(void)
257 {
258 return read_cpuid(CPUID_MPIDR);
259 }
260
261 /* StrongARM-11x0 CPUs */
262 #define cpu_is_sa1100() (read_cpuid_part() == ARM_CPU_PART_SA1100)
263 #define cpu_is_sa1110() (read_cpuid_part() == ARM_CPU_PART_SA1110)
264
265 /*
266 * Intel's XScale3 core supports some v6 features (supersections, L2)
267 * but advertises itself as v5 as it does not support the v6 ISA. For
268 * this reason, we need a way to explicitly test for this type of CPU.
269 */
270 #ifndef CONFIG_CPU_XSC3
271 #define cpu_is_xsc3() 0
272 #else
cpu_is_xsc3(void)273 static inline int cpu_is_xsc3(void)
274 {
275 unsigned int id;
276 id = read_cpuid_id() & 0xffffe000;
277 /* It covers both Intel ID and Marvell ID */
278 if ((id == 0x69056000) || (id == 0x56056000))
279 return 1;
280
281 return 0;
282 }
283 #endif
284
285 #if !defined(CONFIG_CPU_XSCALE) && !defined(CONFIG_CPU_XSC3) && \
286 !defined(CONFIG_CPU_MOHAWK)
287 #define cpu_is_xscale_family() 0
288 #else
cpu_is_xscale_family(void)289 static inline int cpu_is_xscale_family(void)
290 {
291 unsigned int id;
292 id = read_cpuid_id() & 0xffffe000;
293
294 switch (id) {
295 case 0x69052000: /* Intel XScale 1 */
296 case 0x69054000: /* Intel XScale 2 */
297 case 0x69056000: /* Intel XScale 3 */
298 case 0x56056000: /* Marvell XScale 3 */
299 case 0x56158000: /* Marvell Mohawk */
300 return 1;
301 }
302
303 return 0;
304 }
305 #endif
306
307 /*
308 * Marvell's PJ4 and PJ4B cores are based on V7 version,
309 * but require a specical sequence for enabling coprocessors.
310 * For this reason, we need a way to distinguish them.
311 */
312 #if defined(CONFIG_CPU_PJ4) || defined(CONFIG_CPU_PJ4B)
cpu_is_pj4(void)313 static inline int cpu_is_pj4(void)
314 {
315 unsigned int id;
316
317 id = read_cpuid_id();
318 if ((id & 0xff0fff00) == 0x560f5800)
319 return 1;
320
321 return 0;
322 }
323 #else
324 #define cpu_is_pj4() 0
325 #endif
326
cpuid_feature_extract_field(u32 features,int field)327 static inline int __attribute_const__ cpuid_feature_extract_field(u32 features,
328 int field)
329 {
330 int feature = (features >> field) & 15;
331
332 /* feature registers are signed values */
333 if (feature > 7)
334 feature -= 16;
335
336 return feature;
337 }
338
339 #define cpuid_feature_extract(reg, field) \
340 cpuid_feature_extract_field(read_cpuid_ext(reg), field)
341
342 #endif /* __ASSEMBLY__ */
343
344 #endif
345