1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2013 Huawei Ltd.
4 * Author: Jiang Liu <liuj97@gmail.com>
5 *
6 * Copyright (C) 2014-2016 Zi Shen Lim <zlim.lnx@gmail.com>
7 */
8 #include <linux/bitops.h>
9 #include <linux/bug.h>
10 #include <linux/printk.h>
11 #include <linux/sizes.h>
12 #include <linux/types.h>
13
14 #include <asm/debug-monitors.h>
15 #include <asm/errno.h>
16 #include <asm/insn.h>
17 #include <asm/kprobes.h>
18
19 #define AARCH64_INSN_SF_BIT BIT(31)
20 #define AARCH64_INSN_N_BIT BIT(22)
21 #define AARCH64_INSN_LSL_12 BIT(22)
22
23 static const int aarch64_insn_encoding_class[] = {
24 AARCH64_INSN_CLS_UNKNOWN,
25 AARCH64_INSN_CLS_UNKNOWN,
26 AARCH64_INSN_CLS_SVE,
27 AARCH64_INSN_CLS_UNKNOWN,
28 AARCH64_INSN_CLS_LDST,
29 AARCH64_INSN_CLS_DP_REG,
30 AARCH64_INSN_CLS_LDST,
31 AARCH64_INSN_CLS_DP_FPSIMD,
32 AARCH64_INSN_CLS_DP_IMM,
33 AARCH64_INSN_CLS_DP_IMM,
34 AARCH64_INSN_CLS_BR_SYS,
35 AARCH64_INSN_CLS_BR_SYS,
36 AARCH64_INSN_CLS_LDST,
37 AARCH64_INSN_CLS_DP_REG,
38 AARCH64_INSN_CLS_LDST,
39 AARCH64_INSN_CLS_DP_FPSIMD,
40 };
41
aarch64_get_insn_class(u32 insn)42 enum aarch64_insn_encoding_class __kprobes aarch64_get_insn_class(u32 insn)
43 {
44 return aarch64_insn_encoding_class[(insn >> 25) & 0xf];
45 }
46
aarch64_insn_is_steppable_hint(u32 insn)47 bool __kprobes aarch64_insn_is_steppable_hint(u32 insn)
48 {
49 if (!aarch64_insn_is_hint(insn))
50 return false;
51
52 switch (insn & 0xFE0) {
53 case AARCH64_INSN_HINT_XPACLRI:
54 case AARCH64_INSN_HINT_PACIA_1716:
55 case AARCH64_INSN_HINT_PACIB_1716:
56 case AARCH64_INSN_HINT_PACIAZ:
57 case AARCH64_INSN_HINT_PACIASP:
58 case AARCH64_INSN_HINT_PACIBZ:
59 case AARCH64_INSN_HINT_PACIBSP:
60 case AARCH64_INSN_HINT_BTI:
61 case AARCH64_INSN_HINT_BTIC:
62 case AARCH64_INSN_HINT_BTIJ:
63 case AARCH64_INSN_HINT_BTIJC:
64 case AARCH64_INSN_HINT_NOP:
65 return true;
66 default:
67 return false;
68 }
69 }
70
aarch64_insn_is_branch_imm(u32 insn)71 bool aarch64_insn_is_branch_imm(u32 insn)
72 {
73 return (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn) ||
74 aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn) ||
75 aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) ||
76 aarch64_insn_is_bcond(insn));
77 }
78
aarch64_insn_uses_literal(u32 insn)79 bool __kprobes aarch64_insn_uses_literal(u32 insn)
80 {
81 /* ldr/ldrsw (literal), prfm */
82
83 return aarch64_insn_is_ldr_lit(insn) ||
84 aarch64_insn_is_ldrsw_lit(insn) ||
85 aarch64_insn_is_adr_adrp(insn) ||
86 aarch64_insn_is_prfm_lit(insn);
87 }
88
aarch64_insn_is_branch(u32 insn)89 bool __kprobes aarch64_insn_is_branch(u32 insn)
90 {
91 /* b, bl, cb*, tb*, ret*, b.cond, br*, blr* */
92
93 return aarch64_insn_is_b(insn) ||
94 aarch64_insn_is_bl(insn) ||
95 aarch64_insn_is_cbz(insn) ||
96 aarch64_insn_is_cbnz(insn) ||
97 aarch64_insn_is_tbz(insn) ||
98 aarch64_insn_is_tbnz(insn) ||
99 aarch64_insn_is_ret(insn) ||
100 aarch64_insn_is_ret_auth(insn) ||
101 aarch64_insn_is_br(insn) ||
102 aarch64_insn_is_br_auth(insn) ||
103 aarch64_insn_is_blr(insn) ||
104 aarch64_insn_is_blr_auth(insn) ||
105 aarch64_insn_is_bcond(insn);
106 }
107
aarch64_get_imm_shift_mask(enum aarch64_insn_imm_type type,u32 * maskp,int * shiftp)108 static int __kprobes aarch64_get_imm_shift_mask(enum aarch64_insn_imm_type type,
109 u32 *maskp, int *shiftp)
110 {
111 u32 mask;
112 int shift;
113
114 switch (type) {
115 case AARCH64_INSN_IMM_26:
116 mask = BIT(26) - 1;
117 shift = 0;
118 break;
119 case AARCH64_INSN_IMM_19:
120 mask = BIT(19) - 1;
121 shift = 5;
122 break;
123 case AARCH64_INSN_IMM_16:
124 mask = BIT(16) - 1;
125 shift = 5;
126 break;
127 case AARCH64_INSN_IMM_14:
128 mask = BIT(14) - 1;
129 shift = 5;
130 break;
131 case AARCH64_INSN_IMM_12:
132 mask = BIT(12) - 1;
133 shift = 10;
134 break;
135 case AARCH64_INSN_IMM_9:
136 mask = BIT(9) - 1;
137 shift = 12;
138 break;
139 case AARCH64_INSN_IMM_7:
140 mask = BIT(7) - 1;
141 shift = 15;
142 break;
143 case AARCH64_INSN_IMM_6:
144 case AARCH64_INSN_IMM_S:
145 mask = BIT(6) - 1;
146 shift = 10;
147 break;
148 case AARCH64_INSN_IMM_R:
149 mask = BIT(6) - 1;
150 shift = 16;
151 break;
152 case AARCH64_INSN_IMM_N:
153 mask = 1;
154 shift = 22;
155 break;
156 default:
157 return -EINVAL;
158 }
159
160 *maskp = mask;
161 *shiftp = shift;
162
163 return 0;
164 }
165
166 #define ADR_IMM_HILOSPLIT 2
167 #define ADR_IMM_SIZE SZ_2M
168 #define ADR_IMM_LOMASK ((1 << ADR_IMM_HILOSPLIT) - 1)
169 #define ADR_IMM_HIMASK ((ADR_IMM_SIZE >> ADR_IMM_HILOSPLIT) - 1)
170 #define ADR_IMM_LOSHIFT 29
171 #define ADR_IMM_HISHIFT 5
172
aarch64_insn_decode_immediate(enum aarch64_insn_imm_type type,u32 insn)173 u64 aarch64_insn_decode_immediate(enum aarch64_insn_imm_type type, u32 insn)
174 {
175 u32 immlo, immhi, mask;
176 int shift;
177
178 switch (type) {
179 case AARCH64_INSN_IMM_ADR:
180 shift = 0;
181 immlo = (insn >> ADR_IMM_LOSHIFT) & ADR_IMM_LOMASK;
182 immhi = (insn >> ADR_IMM_HISHIFT) & ADR_IMM_HIMASK;
183 insn = (immhi << ADR_IMM_HILOSPLIT) | immlo;
184 mask = ADR_IMM_SIZE - 1;
185 break;
186 default:
187 if (aarch64_get_imm_shift_mask(type, &mask, &shift) < 0) {
188 pr_err("%s: unknown immediate encoding %d\n", __func__,
189 type);
190 return 0;
191 }
192 }
193
194 return (insn >> shift) & mask;
195 }
196
aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type,u32 insn,u64 imm)197 u32 __kprobes aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type,
198 u32 insn, u64 imm)
199 {
200 u32 immlo, immhi, mask;
201 int shift;
202
203 if (insn == AARCH64_BREAK_FAULT)
204 return AARCH64_BREAK_FAULT;
205
206 switch (type) {
207 case AARCH64_INSN_IMM_ADR:
208 shift = 0;
209 immlo = (imm & ADR_IMM_LOMASK) << ADR_IMM_LOSHIFT;
210 imm >>= ADR_IMM_HILOSPLIT;
211 immhi = (imm & ADR_IMM_HIMASK) << ADR_IMM_HISHIFT;
212 imm = immlo | immhi;
213 mask = ((ADR_IMM_LOMASK << ADR_IMM_LOSHIFT) |
214 (ADR_IMM_HIMASK << ADR_IMM_HISHIFT));
215 break;
216 default:
217 if (aarch64_get_imm_shift_mask(type, &mask, &shift) < 0) {
218 pr_err("%s: unknown immediate encoding %d\n", __func__,
219 type);
220 return AARCH64_BREAK_FAULT;
221 }
222 }
223
224 /* Update the immediate field. */
225 insn &= ~(mask << shift);
226 insn |= (imm & mask) << shift;
227
228 return insn;
229 }
230
aarch64_insn_decode_register(enum aarch64_insn_register_type type,u32 insn)231 u32 aarch64_insn_decode_register(enum aarch64_insn_register_type type,
232 u32 insn)
233 {
234 int shift;
235
236 switch (type) {
237 case AARCH64_INSN_REGTYPE_RT:
238 case AARCH64_INSN_REGTYPE_RD:
239 shift = 0;
240 break;
241 case AARCH64_INSN_REGTYPE_RN:
242 shift = 5;
243 break;
244 case AARCH64_INSN_REGTYPE_RT2:
245 case AARCH64_INSN_REGTYPE_RA:
246 shift = 10;
247 break;
248 case AARCH64_INSN_REGTYPE_RM:
249 shift = 16;
250 break;
251 default:
252 pr_err("%s: unknown register type encoding %d\n", __func__,
253 type);
254 return 0;
255 }
256
257 return (insn >> shift) & GENMASK(4, 0);
258 }
259
aarch64_insn_encode_register(enum aarch64_insn_register_type type,u32 insn,enum aarch64_insn_register reg)260 static u32 aarch64_insn_encode_register(enum aarch64_insn_register_type type,
261 u32 insn,
262 enum aarch64_insn_register reg)
263 {
264 int shift;
265
266 if (insn == AARCH64_BREAK_FAULT)
267 return AARCH64_BREAK_FAULT;
268
269 if (reg < AARCH64_INSN_REG_0 || reg > AARCH64_INSN_REG_SP) {
270 pr_err("%s: unknown register encoding %d\n", __func__, reg);
271 return AARCH64_BREAK_FAULT;
272 }
273
274 switch (type) {
275 case AARCH64_INSN_REGTYPE_RT:
276 case AARCH64_INSN_REGTYPE_RD:
277 shift = 0;
278 break;
279 case AARCH64_INSN_REGTYPE_RN:
280 shift = 5;
281 break;
282 case AARCH64_INSN_REGTYPE_RT2:
283 case AARCH64_INSN_REGTYPE_RA:
284 shift = 10;
285 break;
286 case AARCH64_INSN_REGTYPE_RM:
287 case AARCH64_INSN_REGTYPE_RS:
288 shift = 16;
289 break;
290 default:
291 pr_err("%s: unknown register type encoding %d\n", __func__,
292 type);
293 return AARCH64_BREAK_FAULT;
294 }
295
296 insn &= ~(GENMASK(4, 0) << shift);
297 insn |= reg << shift;
298
299 return insn;
300 }
301
aarch64_insn_encode_ldst_size(enum aarch64_insn_size_type type,u32 insn)302 static u32 aarch64_insn_encode_ldst_size(enum aarch64_insn_size_type type,
303 u32 insn)
304 {
305 u32 size;
306
307 switch (type) {
308 case AARCH64_INSN_SIZE_8:
309 size = 0;
310 break;
311 case AARCH64_INSN_SIZE_16:
312 size = 1;
313 break;
314 case AARCH64_INSN_SIZE_32:
315 size = 2;
316 break;
317 case AARCH64_INSN_SIZE_64:
318 size = 3;
319 break;
320 default:
321 pr_err("%s: unknown size encoding %d\n", __func__, type);
322 return AARCH64_BREAK_FAULT;
323 }
324
325 insn &= ~GENMASK(31, 30);
326 insn |= size << 30;
327
328 return insn;
329 }
330
branch_imm_common(unsigned long pc,unsigned long addr,long range)331 static inline long branch_imm_common(unsigned long pc, unsigned long addr,
332 long range)
333 {
334 long offset;
335
336 if ((pc & 0x3) || (addr & 0x3)) {
337 pr_err("%s: A64 instructions must be word aligned\n", __func__);
338 return range;
339 }
340
341 offset = ((long)addr - (long)pc);
342
343 if (offset < -range || offset >= range) {
344 pr_err("%s: offset out of range\n", __func__);
345 return range;
346 }
347
348 return offset;
349 }
350
aarch64_insn_gen_branch_imm(unsigned long pc,unsigned long addr,enum aarch64_insn_branch_type type)351 u32 __kprobes aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr,
352 enum aarch64_insn_branch_type type)
353 {
354 u32 insn;
355 long offset;
356
357 /*
358 * B/BL support [-128M, 128M) offset
359 * ARM64 virtual address arrangement guarantees all kernel and module
360 * texts are within +/-128M.
361 */
362 offset = branch_imm_common(pc, addr, SZ_128M);
363 if (offset >= SZ_128M)
364 return AARCH64_BREAK_FAULT;
365
366 switch (type) {
367 case AARCH64_INSN_BRANCH_LINK:
368 insn = aarch64_insn_get_bl_value();
369 break;
370 case AARCH64_INSN_BRANCH_NOLINK:
371 insn = aarch64_insn_get_b_value();
372 break;
373 default:
374 pr_err("%s: unknown branch encoding %d\n", __func__, type);
375 return AARCH64_BREAK_FAULT;
376 }
377
378 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26, insn,
379 offset >> 2);
380 }
381
aarch64_insn_gen_comp_branch_imm(unsigned long pc,unsigned long addr,enum aarch64_insn_register reg,enum aarch64_insn_variant variant,enum aarch64_insn_branch_type type)382 u32 aarch64_insn_gen_comp_branch_imm(unsigned long pc, unsigned long addr,
383 enum aarch64_insn_register reg,
384 enum aarch64_insn_variant variant,
385 enum aarch64_insn_branch_type type)
386 {
387 u32 insn;
388 long offset;
389
390 offset = branch_imm_common(pc, addr, SZ_1M);
391 if (offset >= SZ_1M)
392 return AARCH64_BREAK_FAULT;
393
394 switch (type) {
395 case AARCH64_INSN_BRANCH_COMP_ZERO:
396 insn = aarch64_insn_get_cbz_value();
397 break;
398 case AARCH64_INSN_BRANCH_COMP_NONZERO:
399 insn = aarch64_insn_get_cbnz_value();
400 break;
401 default:
402 pr_err("%s: unknown branch encoding %d\n", __func__, type);
403 return AARCH64_BREAK_FAULT;
404 }
405
406 switch (variant) {
407 case AARCH64_INSN_VARIANT_32BIT:
408 break;
409 case AARCH64_INSN_VARIANT_64BIT:
410 insn |= AARCH64_INSN_SF_BIT;
411 break;
412 default:
413 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
414 return AARCH64_BREAK_FAULT;
415 }
416
417 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg);
418
419 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
420 offset >> 2);
421 }
422
aarch64_insn_gen_cond_branch_imm(unsigned long pc,unsigned long addr,enum aarch64_insn_condition cond)423 u32 aarch64_insn_gen_cond_branch_imm(unsigned long pc, unsigned long addr,
424 enum aarch64_insn_condition cond)
425 {
426 u32 insn;
427 long offset;
428
429 offset = branch_imm_common(pc, addr, SZ_1M);
430
431 insn = aarch64_insn_get_bcond_value();
432
433 if (cond < AARCH64_INSN_COND_EQ || cond > AARCH64_INSN_COND_AL) {
434 pr_err("%s: unknown condition encoding %d\n", __func__, cond);
435 return AARCH64_BREAK_FAULT;
436 }
437 insn |= cond;
438
439 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
440 offset >> 2);
441 }
442
aarch64_insn_gen_hint(enum aarch64_insn_hint_cr_op op)443 u32 __kprobes aarch64_insn_gen_hint(enum aarch64_insn_hint_cr_op op)
444 {
445 return aarch64_insn_get_hint_value() | op;
446 }
447
aarch64_insn_gen_nop(void)448 u32 __kprobes aarch64_insn_gen_nop(void)
449 {
450 return aarch64_insn_gen_hint(AARCH64_INSN_HINT_NOP);
451 }
452
aarch64_insn_gen_branch_reg(enum aarch64_insn_register reg,enum aarch64_insn_branch_type type)453 u32 aarch64_insn_gen_branch_reg(enum aarch64_insn_register reg,
454 enum aarch64_insn_branch_type type)
455 {
456 u32 insn;
457
458 switch (type) {
459 case AARCH64_INSN_BRANCH_NOLINK:
460 insn = aarch64_insn_get_br_value();
461 break;
462 case AARCH64_INSN_BRANCH_LINK:
463 insn = aarch64_insn_get_blr_value();
464 break;
465 case AARCH64_INSN_BRANCH_RETURN:
466 insn = aarch64_insn_get_ret_value();
467 break;
468 default:
469 pr_err("%s: unknown branch encoding %d\n", __func__, type);
470 return AARCH64_BREAK_FAULT;
471 }
472
473 return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, reg);
474 }
475
aarch64_insn_gen_load_store_reg(enum aarch64_insn_register reg,enum aarch64_insn_register base,enum aarch64_insn_register offset,enum aarch64_insn_size_type size,enum aarch64_insn_ldst_type type)476 u32 aarch64_insn_gen_load_store_reg(enum aarch64_insn_register reg,
477 enum aarch64_insn_register base,
478 enum aarch64_insn_register offset,
479 enum aarch64_insn_size_type size,
480 enum aarch64_insn_ldst_type type)
481 {
482 u32 insn;
483
484 switch (type) {
485 case AARCH64_INSN_LDST_LOAD_REG_OFFSET:
486 insn = aarch64_insn_get_ldr_reg_value();
487 break;
488 case AARCH64_INSN_LDST_STORE_REG_OFFSET:
489 insn = aarch64_insn_get_str_reg_value();
490 break;
491 default:
492 pr_err("%s: unknown load/store encoding %d\n", __func__, type);
493 return AARCH64_BREAK_FAULT;
494 }
495
496 insn = aarch64_insn_encode_ldst_size(size, insn);
497
498 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg);
499
500 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
501 base);
502
503 return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn,
504 offset);
505 }
506
aarch64_insn_gen_load_store_pair(enum aarch64_insn_register reg1,enum aarch64_insn_register reg2,enum aarch64_insn_register base,int offset,enum aarch64_insn_variant variant,enum aarch64_insn_ldst_type type)507 u32 aarch64_insn_gen_load_store_pair(enum aarch64_insn_register reg1,
508 enum aarch64_insn_register reg2,
509 enum aarch64_insn_register base,
510 int offset,
511 enum aarch64_insn_variant variant,
512 enum aarch64_insn_ldst_type type)
513 {
514 u32 insn;
515 int shift;
516
517 switch (type) {
518 case AARCH64_INSN_LDST_LOAD_PAIR_PRE_INDEX:
519 insn = aarch64_insn_get_ldp_pre_value();
520 break;
521 case AARCH64_INSN_LDST_STORE_PAIR_PRE_INDEX:
522 insn = aarch64_insn_get_stp_pre_value();
523 break;
524 case AARCH64_INSN_LDST_LOAD_PAIR_POST_INDEX:
525 insn = aarch64_insn_get_ldp_post_value();
526 break;
527 case AARCH64_INSN_LDST_STORE_PAIR_POST_INDEX:
528 insn = aarch64_insn_get_stp_post_value();
529 break;
530 default:
531 pr_err("%s: unknown load/store encoding %d\n", __func__, type);
532 return AARCH64_BREAK_FAULT;
533 }
534
535 switch (variant) {
536 case AARCH64_INSN_VARIANT_32BIT:
537 if ((offset & 0x3) || (offset < -256) || (offset > 252)) {
538 pr_err("%s: offset must be multiples of 4 in the range of [-256, 252] %d\n",
539 __func__, offset);
540 return AARCH64_BREAK_FAULT;
541 }
542 shift = 2;
543 break;
544 case AARCH64_INSN_VARIANT_64BIT:
545 if ((offset & 0x7) || (offset < -512) || (offset > 504)) {
546 pr_err("%s: offset must be multiples of 8 in the range of [-512, 504] %d\n",
547 __func__, offset);
548 return AARCH64_BREAK_FAULT;
549 }
550 shift = 3;
551 insn |= AARCH64_INSN_SF_BIT;
552 break;
553 default:
554 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
555 return AARCH64_BREAK_FAULT;
556 }
557
558 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn,
559 reg1);
560
561 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT2, insn,
562 reg2);
563
564 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
565 base);
566
567 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_7, insn,
568 offset >> shift);
569 }
570
aarch64_insn_gen_load_store_ex(enum aarch64_insn_register reg,enum aarch64_insn_register base,enum aarch64_insn_register state,enum aarch64_insn_size_type size,enum aarch64_insn_ldst_type type)571 u32 aarch64_insn_gen_load_store_ex(enum aarch64_insn_register reg,
572 enum aarch64_insn_register base,
573 enum aarch64_insn_register state,
574 enum aarch64_insn_size_type size,
575 enum aarch64_insn_ldst_type type)
576 {
577 u32 insn;
578
579 switch (type) {
580 case AARCH64_INSN_LDST_LOAD_EX:
581 insn = aarch64_insn_get_load_ex_value();
582 break;
583 case AARCH64_INSN_LDST_STORE_EX:
584 insn = aarch64_insn_get_store_ex_value();
585 break;
586 default:
587 pr_err("%s: unknown load/store exclusive encoding %d\n", __func__, type);
588 return AARCH64_BREAK_FAULT;
589 }
590
591 insn = aarch64_insn_encode_ldst_size(size, insn);
592
593 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn,
594 reg);
595
596 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
597 base);
598
599 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT2, insn,
600 AARCH64_INSN_REG_ZR);
601
602 return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RS, insn,
603 state);
604 }
605
aarch64_insn_gen_ldadd(enum aarch64_insn_register result,enum aarch64_insn_register address,enum aarch64_insn_register value,enum aarch64_insn_size_type size)606 u32 aarch64_insn_gen_ldadd(enum aarch64_insn_register result,
607 enum aarch64_insn_register address,
608 enum aarch64_insn_register value,
609 enum aarch64_insn_size_type size)
610 {
611 u32 insn = aarch64_insn_get_ldadd_value();
612
613 switch (size) {
614 case AARCH64_INSN_SIZE_32:
615 case AARCH64_INSN_SIZE_64:
616 break;
617 default:
618 pr_err("%s: unimplemented size encoding %d\n", __func__, size);
619 return AARCH64_BREAK_FAULT;
620 }
621
622 insn = aarch64_insn_encode_ldst_size(size, insn);
623
624 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn,
625 result);
626
627 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
628 address);
629
630 return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RS, insn,
631 value);
632 }
633
aarch64_insn_gen_stadd(enum aarch64_insn_register address,enum aarch64_insn_register value,enum aarch64_insn_size_type size)634 u32 aarch64_insn_gen_stadd(enum aarch64_insn_register address,
635 enum aarch64_insn_register value,
636 enum aarch64_insn_size_type size)
637 {
638 /*
639 * STADD is simply encoded as an alias for LDADD with XZR as
640 * the destination register.
641 */
642 return aarch64_insn_gen_ldadd(AARCH64_INSN_REG_ZR, address,
643 value, size);
644 }
645
aarch64_insn_encode_prfm_imm(enum aarch64_insn_prfm_type type,enum aarch64_insn_prfm_target target,enum aarch64_insn_prfm_policy policy,u32 insn)646 static u32 aarch64_insn_encode_prfm_imm(enum aarch64_insn_prfm_type type,
647 enum aarch64_insn_prfm_target target,
648 enum aarch64_insn_prfm_policy policy,
649 u32 insn)
650 {
651 u32 imm_type = 0, imm_target = 0, imm_policy = 0;
652
653 switch (type) {
654 case AARCH64_INSN_PRFM_TYPE_PLD:
655 break;
656 case AARCH64_INSN_PRFM_TYPE_PLI:
657 imm_type = BIT(0);
658 break;
659 case AARCH64_INSN_PRFM_TYPE_PST:
660 imm_type = BIT(1);
661 break;
662 default:
663 pr_err("%s: unknown prfm type encoding %d\n", __func__, type);
664 return AARCH64_BREAK_FAULT;
665 }
666
667 switch (target) {
668 case AARCH64_INSN_PRFM_TARGET_L1:
669 break;
670 case AARCH64_INSN_PRFM_TARGET_L2:
671 imm_target = BIT(0);
672 break;
673 case AARCH64_INSN_PRFM_TARGET_L3:
674 imm_target = BIT(1);
675 break;
676 default:
677 pr_err("%s: unknown prfm target encoding %d\n", __func__, target);
678 return AARCH64_BREAK_FAULT;
679 }
680
681 switch (policy) {
682 case AARCH64_INSN_PRFM_POLICY_KEEP:
683 break;
684 case AARCH64_INSN_PRFM_POLICY_STRM:
685 imm_policy = BIT(0);
686 break;
687 default:
688 pr_err("%s: unknown prfm policy encoding %d\n", __func__, policy);
689 return AARCH64_BREAK_FAULT;
690 }
691
692 /* In this case, imm5 is encoded into Rt field. */
693 insn &= ~GENMASK(4, 0);
694 insn |= imm_policy | (imm_target << 1) | (imm_type << 3);
695
696 return insn;
697 }
698
aarch64_insn_gen_prefetch(enum aarch64_insn_register base,enum aarch64_insn_prfm_type type,enum aarch64_insn_prfm_target target,enum aarch64_insn_prfm_policy policy)699 u32 aarch64_insn_gen_prefetch(enum aarch64_insn_register base,
700 enum aarch64_insn_prfm_type type,
701 enum aarch64_insn_prfm_target target,
702 enum aarch64_insn_prfm_policy policy)
703 {
704 u32 insn = aarch64_insn_get_prfm_value();
705
706 insn = aarch64_insn_encode_ldst_size(AARCH64_INSN_SIZE_64, insn);
707
708 insn = aarch64_insn_encode_prfm_imm(type, target, policy, insn);
709
710 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
711 base);
712
713 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_12, insn, 0);
714 }
715
aarch64_insn_gen_add_sub_imm(enum aarch64_insn_register dst,enum aarch64_insn_register src,int imm,enum aarch64_insn_variant variant,enum aarch64_insn_adsb_type type)716 u32 aarch64_insn_gen_add_sub_imm(enum aarch64_insn_register dst,
717 enum aarch64_insn_register src,
718 int imm, enum aarch64_insn_variant variant,
719 enum aarch64_insn_adsb_type type)
720 {
721 u32 insn;
722
723 switch (type) {
724 case AARCH64_INSN_ADSB_ADD:
725 insn = aarch64_insn_get_add_imm_value();
726 break;
727 case AARCH64_INSN_ADSB_SUB:
728 insn = aarch64_insn_get_sub_imm_value();
729 break;
730 case AARCH64_INSN_ADSB_ADD_SETFLAGS:
731 insn = aarch64_insn_get_adds_imm_value();
732 break;
733 case AARCH64_INSN_ADSB_SUB_SETFLAGS:
734 insn = aarch64_insn_get_subs_imm_value();
735 break;
736 default:
737 pr_err("%s: unknown add/sub encoding %d\n", __func__, type);
738 return AARCH64_BREAK_FAULT;
739 }
740
741 switch (variant) {
742 case AARCH64_INSN_VARIANT_32BIT:
743 break;
744 case AARCH64_INSN_VARIANT_64BIT:
745 insn |= AARCH64_INSN_SF_BIT;
746 break;
747 default:
748 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
749 return AARCH64_BREAK_FAULT;
750 }
751
752 /* We can't encode more than a 24bit value (12bit + 12bit shift) */
753 if (imm & ~(BIT(24) - 1))
754 goto out;
755
756 /* If we have something in the top 12 bits... */
757 if (imm & ~(SZ_4K - 1)) {
758 /* ... and in the low 12 bits -> error */
759 if (imm & (SZ_4K - 1))
760 goto out;
761
762 imm >>= 12;
763 insn |= AARCH64_INSN_LSL_12;
764 }
765
766 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
767
768 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
769
770 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_12, insn, imm);
771
772 out:
773 pr_err("%s: invalid immediate encoding %d\n", __func__, imm);
774 return AARCH64_BREAK_FAULT;
775 }
776
aarch64_insn_gen_bitfield(enum aarch64_insn_register dst,enum aarch64_insn_register src,int immr,int imms,enum aarch64_insn_variant variant,enum aarch64_insn_bitfield_type type)777 u32 aarch64_insn_gen_bitfield(enum aarch64_insn_register dst,
778 enum aarch64_insn_register src,
779 int immr, int imms,
780 enum aarch64_insn_variant variant,
781 enum aarch64_insn_bitfield_type type)
782 {
783 u32 insn;
784 u32 mask;
785
786 switch (type) {
787 case AARCH64_INSN_BITFIELD_MOVE:
788 insn = aarch64_insn_get_bfm_value();
789 break;
790 case AARCH64_INSN_BITFIELD_MOVE_UNSIGNED:
791 insn = aarch64_insn_get_ubfm_value();
792 break;
793 case AARCH64_INSN_BITFIELD_MOVE_SIGNED:
794 insn = aarch64_insn_get_sbfm_value();
795 break;
796 default:
797 pr_err("%s: unknown bitfield encoding %d\n", __func__, type);
798 return AARCH64_BREAK_FAULT;
799 }
800
801 switch (variant) {
802 case AARCH64_INSN_VARIANT_32BIT:
803 mask = GENMASK(4, 0);
804 break;
805 case AARCH64_INSN_VARIANT_64BIT:
806 insn |= AARCH64_INSN_SF_BIT | AARCH64_INSN_N_BIT;
807 mask = GENMASK(5, 0);
808 break;
809 default:
810 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
811 return AARCH64_BREAK_FAULT;
812 }
813
814 if (immr & ~mask) {
815 pr_err("%s: invalid immr encoding %d\n", __func__, immr);
816 return AARCH64_BREAK_FAULT;
817 }
818 if (imms & ~mask) {
819 pr_err("%s: invalid imms encoding %d\n", __func__, imms);
820 return AARCH64_BREAK_FAULT;
821 }
822
823 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
824
825 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
826
827 insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_R, insn, immr);
828
829 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_S, insn, imms);
830 }
831
aarch64_insn_gen_movewide(enum aarch64_insn_register dst,int imm,int shift,enum aarch64_insn_variant variant,enum aarch64_insn_movewide_type type)832 u32 aarch64_insn_gen_movewide(enum aarch64_insn_register dst,
833 int imm, int shift,
834 enum aarch64_insn_variant variant,
835 enum aarch64_insn_movewide_type type)
836 {
837 u32 insn;
838
839 switch (type) {
840 case AARCH64_INSN_MOVEWIDE_ZERO:
841 insn = aarch64_insn_get_movz_value();
842 break;
843 case AARCH64_INSN_MOVEWIDE_KEEP:
844 insn = aarch64_insn_get_movk_value();
845 break;
846 case AARCH64_INSN_MOVEWIDE_INVERSE:
847 insn = aarch64_insn_get_movn_value();
848 break;
849 default:
850 pr_err("%s: unknown movewide encoding %d\n", __func__, type);
851 return AARCH64_BREAK_FAULT;
852 }
853
854 if (imm & ~(SZ_64K - 1)) {
855 pr_err("%s: invalid immediate encoding %d\n", __func__, imm);
856 return AARCH64_BREAK_FAULT;
857 }
858
859 switch (variant) {
860 case AARCH64_INSN_VARIANT_32BIT:
861 if (shift != 0 && shift != 16) {
862 pr_err("%s: invalid shift encoding %d\n", __func__,
863 shift);
864 return AARCH64_BREAK_FAULT;
865 }
866 break;
867 case AARCH64_INSN_VARIANT_64BIT:
868 insn |= AARCH64_INSN_SF_BIT;
869 if (shift != 0 && shift != 16 && shift != 32 && shift != 48) {
870 pr_err("%s: invalid shift encoding %d\n", __func__,
871 shift);
872 return AARCH64_BREAK_FAULT;
873 }
874 break;
875 default:
876 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
877 return AARCH64_BREAK_FAULT;
878 }
879
880 insn |= (shift >> 4) << 21;
881
882 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
883
884 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_16, insn, imm);
885 }
886
aarch64_insn_gen_add_sub_shifted_reg(enum aarch64_insn_register dst,enum aarch64_insn_register src,enum aarch64_insn_register reg,int shift,enum aarch64_insn_variant variant,enum aarch64_insn_adsb_type type)887 u32 aarch64_insn_gen_add_sub_shifted_reg(enum aarch64_insn_register dst,
888 enum aarch64_insn_register src,
889 enum aarch64_insn_register reg,
890 int shift,
891 enum aarch64_insn_variant variant,
892 enum aarch64_insn_adsb_type type)
893 {
894 u32 insn;
895
896 switch (type) {
897 case AARCH64_INSN_ADSB_ADD:
898 insn = aarch64_insn_get_add_value();
899 break;
900 case AARCH64_INSN_ADSB_SUB:
901 insn = aarch64_insn_get_sub_value();
902 break;
903 case AARCH64_INSN_ADSB_ADD_SETFLAGS:
904 insn = aarch64_insn_get_adds_value();
905 break;
906 case AARCH64_INSN_ADSB_SUB_SETFLAGS:
907 insn = aarch64_insn_get_subs_value();
908 break;
909 default:
910 pr_err("%s: unknown add/sub encoding %d\n", __func__, type);
911 return AARCH64_BREAK_FAULT;
912 }
913
914 switch (variant) {
915 case AARCH64_INSN_VARIANT_32BIT:
916 if (shift & ~(SZ_32 - 1)) {
917 pr_err("%s: invalid shift encoding %d\n", __func__,
918 shift);
919 return AARCH64_BREAK_FAULT;
920 }
921 break;
922 case AARCH64_INSN_VARIANT_64BIT:
923 insn |= AARCH64_INSN_SF_BIT;
924 if (shift & ~(SZ_64 - 1)) {
925 pr_err("%s: invalid shift encoding %d\n", __func__,
926 shift);
927 return AARCH64_BREAK_FAULT;
928 }
929 break;
930 default:
931 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
932 return AARCH64_BREAK_FAULT;
933 }
934
935
936 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
937
938 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
939
940 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
941
942 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6, insn, shift);
943 }
944
aarch64_insn_gen_data1(enum aarch64_insn_register dst,enum aarch64_insn_register src,enum aarch64_insn_variant variant,enum aarch64_insn_data1_type type)945 u32 aarch64_insn_gen_data1(enum aarch64_insn_register dst,
946 enum aarch64_insn_register src,
947 enum aarch64_insn_variant variant,
948 enum aarch64_insn_data1_type type)
949 {
950 u32 insn;
951
952 switch (type) {
953 case AARCH64_INSN_DATA1_REVERSE_16:
954 insn = aarch64_insn_get_rev16_value();
955 break;
956 case AARCH64_INSN_DATA1_REVERSE_32:
957 insn = aarch64_insn_get_rev32_value();
958 break;
959 case AARCH64_INSN_DATA1_REVERSE_64:
960 if (variant != AARCH64_INSN_VARIANT_64BIT) {
961 pr_err("%s: invalid variant for reverse64 %d\n",
962 __func__, variant);
963 return AARCH64_BREAK_FAULT;
964 }
965 insn = aarch64_insn_get_rev64_value();
966 break;
967 default:
968 pr_err("%s: unknown data1 encoding %d\n", __func__, type);
969 return AARCH64_BREAK_FAULT;
970 }
971
972 switch (variant) {
973 case AARCH64_INSN_VARIANT_32BIT:
974 break;
975 case AARCH64_INSN_VARIANT_64BIT:
976 insn |= AARCH64_INSN_SF_BIT;
977 break;
978 default:
979 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
980 return AARCH64_BREAK_FAULT;
981 }
982
983 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
984
985 return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
986 }
987
aarch64_insn_gen_data2(enum aarch64_insn_register dst,enum aarch64_insn_register src,enum aarch64_insn_register reg,enum aarch64_insn_variant variant,enum aarch64_insn_data2_type type)988 u32 aarch64_insn_gen_data2(enum aarch64_insn_register dst,
989 enum aarch64_insn_register src,
990 enum aarch64_insn_register reg,
991 enum aarch64_insn_variant variant,
992 enum aarch64_insn_data2_type type)
993 {
994 u32 insn;
995
996 switch (type) {
997 case AARCH64_INSN_DATA2_UDIV:
998 insn = aarch64_insn_get_udiv_value();
999 break;
1000 case AARCH64_INSN_DATA2_SDIV:
1001 insn = aarch64_insn_get_sdiv_value();
1002 break;
1003 case AARCH64_INSN_DATA2_LSLV:
1004 insn = aarch64_insn_get_lslv_value();
1005 break;
1006 case AARCH64_INSN_DATA2_LSRV:
1007 insn = aarch64_insn_get_lsrv_value();
1008 break;
1009 case AARCH64_INSN_DATA2_ASRV:
1010 insn = aarch64_insn_get_asrv_value();
1011 break;
1012 case AARCH64_INSN_DATA2_RORV:
1013 insn = aarch64_insn_get_rorv_value();
1014 break;
1015 default:
1016 pr_err("%s: unknown data2 encoding %d\n", __func__, type);
1017 return AARCH64_BREAK_FAULT;
1018 }
1019
1020 switch (variant) {
1021 case AARCH64_INSN_VARIANT_32BIT:
1022 break;
1023 case AARCH64_INSN_VARIANT_64BIT:
1024 insn |= AARCH64_INSN_SF_BIT;
1025 break;
1026 default:
1027 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1028 return AARCH64_BREAK_FAULT;
1029 }
1030
1031 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
1032
1033 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
1034
1035 return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
1036 }
1037
aarch64_insn_gen_data3(enum aarch64_insn_register dst,enum aarch64_insn_register src,enum aarch64_insn_register reg1,enum aarch64_insn_register reg2,enum aarch64_insn_variant variant,enum aarch64_insn_data3_type type)1038 u32 aarch64_insn_gen_data3(enum aarch64_insn_register dst,
1039 enum aarch64_insn_register src,
1040 enum aarch64_insn_register reg1,
1041 enum aarch64_insn_register reg2,
1042 enum aarch64_insn_variant variant,
1043 enum aarch64_insn_data3_type type)
1044 {
1045 u32 insn;
1046
1047 switch (type) {
1048 case AARCH64_INSN_DATA3_MADD:
1049 insn = aarch64_insn_get_madd_value();
1050 break;
1051 case AARCH64_INSN_DATA3_MSUB:
1052 insn = aarch64_insn_get_msub_value();
1053 break;
1054 default:
1055 pr_err("%s: unknown data3 encoding %d\n", __func__, type);
1056 return AARCH64_BREAK_FAULT;
1057 }
1058
1059 switch (variant) {
1060 case AARCH64_INSN_VARIANT_32BIT:
1061 break;
1062 case AARCH64_INSN_VARIANT_64BIT:
1063 insn |= AARCH64_INSN_SF_BIT;
1064 break;
1065 default:
1066 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1067 return AARCH64_BREAK_FAULT;
1068 }
1069
1070 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
1071
1072 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RA, insn, src);
1073
1074 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
1075 reg1);
1076
1077 return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn,
1078 reg2);
1079 }
1080
aarch64_insn_gen_logical_shifted_reg(enum aarch64_insn_register dst,enum aarch64_insn_register src,enum aarch64_insn_register reg,int shift,enum aarch64_insn_variant variant,enum aarch64_insn_logic_type type)1081 u32 aarch64_insn_gen_logical_shifted_reg(enum aarch64_insn_register dst,
1082 enum aarch64_insn_register src,
1083 enum aarch64_insn_register reg,
1084 int shift,
1085 enum aarch64_insn_variant variant,
1086 enum aarch64_insn_logic_type type)
1087 {
1088 u32 insn;
1089
1090 switch (type) {
1091 case AARCH64_INSN_LOGIC_AND:
1092 insn = aarch64_insn_get_and_value();
1093 break;
1094 case AARCH64_INSN_LOGIC_BIC:
1095 insn = aarch64_insn_get_bic_value();
1096 break;
1097 case AARCH64_INSN_LOGIC_ORR:
1098 insn = aarch64_insn_get_orr_value();
1099 break;
1100 case AARCH64_INSN_LOGIC_ORN:
1101 insn = aarch64_insn_get_orn_value();
1102 break;
1103 case AARCH64_INSN_LOGIC_EOR:
1104 insn = aarch64_insn_get_eor_value();
1105 break;
1106 case AARCH64_INSN_LOGIC_EON:
1107 insn = aarch64_insn_get_eon_value();
1108 break;
1109 case AARCH64_INSN_LOGIC_AND_SETFLAGS:
1110 insn = aarch64_insn_get_ands_value();
1111 break;
1112 case AARCH64_INSN_LOGIC_BIC_SETFLAGS:
1113 insn = aarch64_insn_get_bics_value();
1114 break;
1115 default:
1116 pr_err("%s: unknown logical encoding %d\n", __func__, type);
1117 return AARCH64_BREAK_FAULT;
1118 }
1119
1120 switch (variant) {
1121 case AARCH64_INSN_VARIANT_32BIT:
1122 if (shift & ~(SZ_32 - 1)) {
1123 pr_err("%s: invalid shift encoding %d\n", __func__,
1124 shift);
1125 return AARCH64_BREAK_FAULT;
1126 }
1127 break;
1128 case AARCH64_INSN_VARIANT_64BIT:
1129 insn |= AARCH64_INSN_SF_BIT;
1130 if (shift & ~(SZ_64 - 1)) {
1131 pr_err("%s: invalid shift encoding %d\n", __func__,
1132 shift);
1133 return AARCH64_BREAK_FAULT;
1134 }
1135 break;
1136 default:
1137 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1138 return AARCH64_BREAK_FAULT;
1139 }
1140
1141
1142 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
1143
1144 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
1145
1146 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
1147
1148 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6, insn, shift);
1149 }
1150
1151 /*
1152 * MOV (register) is architecturally an alias of ORR (shifted register) where
1153 * MOV <*d>, <*m> is equivalent to ORR <*d>, <*ZR>, <*m>
1154 */
aarch64_insn_gen_move_reg(enum aarch64_insn_register dst,enum aarch64_insn_register src,enum aarch64_insn_variant variant)1155 u32 aarch64_insn_gen_move_reg(enum aarch64_insn_register dst,
1156 enum aarch64_insn_register src,
1157 enum aarch64_insn_variant variant)
1158 {
1159 return aarch64_insn_gen_logical_shifted_reg(dst, AARCH64_INSN_REG_ZR,
1160 src, 0, variant,
1161 AARCH64_INSN_LOGIC_ORR);
1162 }
1163
aarch64_insn_gen_adr(unsigned long pc,unsigned long addr,enum aarch64_insn_register reg,enum aarch64_insn_adr_type type)1164 u32 aarch64_insn_gen_adr(unsigned long pc, unsigned long addr,
1165 enum aarch64_insn_register reg,
1166 enum aarch64_insn_adr_type type)
1167 {
1168 u32 insn;
1169 s32 offset;
1170
1171 switch (type) {
1172 case AARCH64_INSN_ADR_TYPE_ADR:
1173 insn = aarch64_insn_get_adr_value();
1174 offset = addr - pc;
1175 break;
1176 case AARCH64_INSN_ADR_TYPE_ADRP:
1177 insn = aarch64_insn_get_adrp_value();
1178 offset = (addr - ALIGN_DOWN(pc, SZ_4K)) >> 12;
1179 break;
1180 default:
1181 pr_err("%s: unknown adr encoding %d\n", __func__, type);
1182 return AARCH64_BREAK_FAULT;
1183 }
1184
1185 if (offset < -SZ_1M || offset >= SZ_1M)
1186 return AARCH64_BREAK_FAULT;
1187
1188 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, reg);
1189
1190 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_ADR, insn, offset);
1191 }
1192
1193 /*
1194 * Decode the imm field of a branch, and return the byte offset as a
1195 * signed value (so it can be used when computing a new branch
1196 * target).
1197 */
aarch64_get_branch_offset(u32 insn)1198 s32 aarch64_get_branch_offset(u32 insn)
1199 {
1200 s32 imm;
1201
1202 if (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn)) {
1203 imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_26, insn);
1204 return (imm << 6) >> 4;
1205 }
1206
1207 if (aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) ||
1208 aarch64_insn_is_bcond(insn)) {
1209 imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_19, insn);
1210 return (imm << 13) >> 11;
1211 }
1212
1213 if (aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn)) {
1214 imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_14, insn);
1215 return (imm << 18) >> 16;
1216 }
1217
1218 /* Unhandled instruction */
1219 BUG();
1220 }
1221
1222 /*
1223 * Encode the displacement of a branch in the imm field and return the
1224 * updated instruction.
1225 */
aarch64_set_branch_offset(u32 insn,s32 offset)1226 u32 aarch64_set_branch_offset(u32 insn, s32 offset)
1227 {
1228 if (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn))
1229 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26, insn,
1230 offset >> 2);
1231
1232 if (aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) ||
1233 aarch64_insn_is_bcond(insn))
1234 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
1235 offset >> 2);
1236
1237 if (aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn))
1238 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_14, insn,
1239 offset >> 2);
1240
1241 /* Unhandled instruction */
1242 BUG();
1243 }
1244
aarch64_insn_adrp_get_offset(u32 insn)1245 s32 aarch64_insn_adrp_get_offset(u32 insn)
1246 {
1247 BUG_ON(!aarch64_insn_is_adrp(insn));
1248 return aarch64_insn_decode_immediate(AARCH64_INSN_IMM_ADR, insn) << 12;
1249 }
1250
aarch64_insn_adrp_set_offset(u32 insn,s32 offset)1251 u32 aarch64_insn_adrp_set_offset(u32 insn, s32 offset)
1252 {
1253 BUG_ON(!aarch64_insn_is_adrp(insn));
1254 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_ADR, insn,
1255 offset >> 12);
1256 }
1257
1258 /*
1259 * Extract the Op/CR data from a msr/mrs instruction.
1260 */
aarch64_insn_extract_system_reg(u32 insn)1261 u32 aarch64_insn_extract_system_reg(u32 insn)
1262 {
1263 return (insn & 0x1FFFE0) >> 5;
1264 }
1265
aarch32_insn_is_wide(u32 insn)1266 bool aarch32_insn_is_wide(u32 insn)
1267 {
1268 return insn >= 0xe800;
1269 }
1270
1271 /*
1272 * Macros/defines for extracting register numbers from instruction.
1273 */
aarch32_insn_extract_reg_num(u32 insn,int offset)1274 u32 aarch32_insn_extract_reg_num(u32 insn, int offset)
1275 {
1276 return (insn & (0xf << offset)) >> offset;
1277 }
1278
1279 #define OPC2_MASK 0x7
1280 #define OPC2_OFFSET 5
aarch32_insn_mcr_extract_opc2(u32 insn)1281 u32 aarch32_insn_mcr_extract_opc2(u32 insn)
1282 {
1283 return (insn & (OPC2_MASK << OPC2_OFFSET)) >> OPC2_OFFSET;
1284 }
1285
1286 #define CRM_MASK 0xf
aarch32_insn_mcr_extract_crm(u32 insn)1287 u32 aarch32_insn_mcr_extract_crm(u32 insn)
1288 {
1289 return insn & CRM_MASK;
1290 }
1291
range_of_ones(u64 val)1292 static bool range_of_ones(u64 val)
1293 {
1294 /* Doesn't handle full ones or full zeroes */
1295 u64 sval = val >> __ffs64(val);
1296
1297 /* One of Sean Eron Anderson's bithack tricks */
1298 return ((sval + 1) & (sval)) == 0;
1299 }
1300
aarch64_encode_immediate(u64 imm,enum aarch64_insn_variant variant,u32 insn)1301 static u32 aarch64_encode_immediate(u64 imm,
1302 enum aarch64_insn_variant variant,
1303 u32 insn)
1304 {
1305 unsigned int immr, imms, n, ones, ror, esz, tmp;
1306 u64 mask;
1307
1308 switch (variant) {
1309 case AARCH64_INSN_VARIANT_32BIT:
1310 esz = 32;
1311 break;
1312 case AARCH64_INSN_VARIANT_64BIT:
1313 insn |= AARCH64_INSN_SF_BIT;
1314 esz = 64;
1315 break;
1316 default:
1317 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1318 return AARCH64_BREAK_FAULT;
1319 }
1320
1321 mask = GENMASK(esz - 1, 0);
1322
1323 /* Can't encode full zeroes, full ones, or value wider than the mask */
1324 if (!imm || imm == mask || imm & ~mask)
1325 return AARCH64_BREAK_FAULT;
1326
1327 /*
1328 * Inverse of Replicate(). Try to spot a repeating pattern
1329 * with a pow2 stride.
1330 */
1331 for (tmp = esz / 2; tmp >= 2; tmp /= 2) {
1332 u64 emask = BIT(tmp) - 1;
1333
1334 if ((imm & emask) != ((imm >> tmp) & emask))
1335 break;
1336
1337 esz = tmp;
1338 mask = emask;
1339 }
1340
1341 /* N is only set if we're encoding a 64bit value */
1342 n = esz == 64;
1343
1344 /* Trim imm to the element size */
1345 imm &= mask;
1346
1347 /* That's how many ones we need to encode */
1348 ones = hweight64(imm);
1349
1350 /*
1351 * imms is set to (ones - 1), prefixed with a string of ones
1352 * and a zero if they fit. Cap it to 6 bits.
1353 */
1354 imms = ones - 1;
1355 imms |= 0xf << ffs(esz);
1356 imms &= BIT(6) - 1;
1357
1358 /* Compute the rotation */
1359 if (range_of_ones(imm)) {
1360 /*
1361 * Pattern: 0..01..10..0
1362 *
1363 * Compute how many rotate we need to align it right
1364 */
1365 ror = __ffs64(imm);
1366 } else {
1367 /*
1368 * Pattern: 0..01..10..01..1
1369 *
1370 * Fill the unused top bits with ones, and check if
1371 * the result is a valid immediate (all ones with a
1372 * contiguous ranges of zeroes).
1373 */
1374 imm |= ~mask;
1375 if (!range_of_ones(~imm))
1376 return AARCH64_BREAK_FAULT;
1377
1378 /*
1379 * Compute the rotation to get a continuous set of
1380 * ones, with the first bit set at position 0
1381 */
1382 ror = fls(~imm);
1383 }
1384
1385 /*
1386 * immr is the number of bits we need to rotate back to the
1387 * original set of ones. Note that this is relative to the
1388 * element size...
1389 */
1390 immr = (esz - ror) % esz;
1391
1392 insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_N, insn, n);
1393 insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_R, insn, immr);
1394 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_S, insn, imms);
1395 }
1396
aarch64_insn_gen_logical_immediate(enum aarch64_insn_logic_type type,enum aarch64_insn_variant variant,enum aarch64_insn_register Rn,enum aarch64_insn_register Rd,u64 imm)1397 u32 aarch64_insn_gen_logical_immediate(enum aarch64_insn_logic_type type,
1398 enum aarch64_insn_variant variant,
1399 enum aarch64_insn_register Rn,
1400 enum aarch64_insn_register Rd,
1401 u64 imm)
1402 {
1403 u32 insn;
1404
1405 switch (type) {
1406 case AARCH64_INSN_LOGIC_AND:
1407 insn = aarch64_insn_get_and_imm_value();
1408 break;
1409 case AARCH64_INSN_LOGIC_ORR:
1410 insn = aarch64_insn_get_orr_imm_value();
1411 break;
1412 case AARCH64_INSN_LOGIC_EOR:
1413 insn = aarch64_insn_get_eor_imm_value();
1414 break;
1415 case AARCH64_INSN_LOGIC_AND_SETFLAGS:
1416 insn = aarch64_insn_get_ands_imm_value();
1417 break;
1418 default:
1419 pr_err("%s: unknown logical encoding %d\n", __func__, type);
1420 return AARCH64_BREAK_FAULT;
1421 }
1422
1423 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, Rd);
1424 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, Rn);
1425 return aarch64_encode_immediate(imm, variant, insn);
1426 }
1427
aarch64_insn_gen_extr(enum aarch64_insn_variant variant,enum aarch64_insn_register Rm,enum aarch64_insn_register Rn,enum aarch64_insn_register Rd,u8 lsb)1428 u32 aarch64_insn_gen_extr(enum aarch64_insn_variant variant,
1429 enum aarch64_insn_register Rm,
1430 enum aarch64_insn_register Rn,
1431 enum aarch64_insn_register Rd,
1432 u8 lsb)
1433 {
1434 u32 insn;
1435
1436 insn = aarch64_insn_get_extr_value();
1437
1438 switch (variant) {
1439 case AARCH64_INSN_VARIANT_32BIT:
1440 if (lsb > 31)
1441 return AARCH64_BREAK_FAULT;
1442 break;
1443 case AARCH64_INSN_VARIANT_64BIT:
1444 if (lsb > 63)
1445 return AARCH64_BREAK_FAULT;
1446 insn |= AARCH64_INSN_SF_BIT;
1447 insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_N, insn, 1);
1448 break;
1449 default:
1450 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1451 return AARCH64_BREAK_FAULT;
1452 }
1453
1454 insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_S, insn, lsb);
1455 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, Rd);
1456 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, Rn);
1457 return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, Rm);
1458 }
1459